aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPablo Neira Ayuso <pablo@netfilter.org>2015-04-08 11:40:17 -0400
committerPablo Neira Ayuso <pablo@netfilter.org>2015-04-08 12:30:21 -0400
commitaadd51aa71f8d013c818a312bb2a0c5714830dbc (patch)
tree28ca52d17183cb1d732b1324fce4f7b5d6b3dfc0
parent68e942e88add0ac8576fc8397e86495edf3dcea7 (diff)
parentee90b81203a91d4e5385622811ee7872b5bcfe76 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Resolve conflicts between 5888b93 ("Merge branch 'nf-hook-compress'") and Florian Westphal br_netfilter works. Conflicts: net/bridge/br_netfilter.c Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
-rw-r--r--Documentation/devicetree/bindings/net/dsa/dsa.txt4
-rw-r--r--Documentation/input/alps.txt8
-rw-r--r--Documentation/input/event-codes.txt6
-rw-r--r--Documentation/input/multi-touch-protocol.txt9
-rw-r--r--Documentation/networking/can.txt20
-rw-r--r--MAINTAINERS45
-rw-r--r--Makefile2
-rw-r--r--arch/arc/kernel/signal.c24
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/dm8168-evm.dts19
-rw-r--r--arch/arm/boot/dts/dm816x.dtsi18
-rw-r--r--arch/arm/boot/dts/dra7.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3.dtsi4
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi1
-rw-r--r--arch/arm/boot/dts/socfpga.dtsi2
-rw-r--r--arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts16
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi3
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi3
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi3
-rw-r--r--arch/arm/mach-omap2/id.c2
-rw-r--r--arch/arm/mach-pxa/irq.c111
-rw-r--r--arch/arm/mach-pxa/zeus.c2
-rw-r--r--arch/arm/mach-sunxi/Kconfig8
-rw-r--r--arch/arm/plat-omap/dmtimer.c15
-rw-r--r--arch/arm64/boot/dts/arm/juno-clocks.dtsi2
-rw-r--r--arch/arm64/include/asm/cmpxchg.h32
-rw-r--r--arch/arm64/include/asm/mmu_context.h9
-rw-r--r--arch/arm64/include/asm/percpu.h44
-rw-r--r--arch/metag/include/asm/io.h1
-rw-r--r--arch/metag/include/asm/pgtable-bits.h104
-rw-r--r--arch/metag/include/asm/pgtable.h95
-rw-r--r--arch/parisc/include/asm/pgalloc.h17
-rw-r--r--arch/parisc/kernel/syscall_table.S9
-rw-r--r--arch/powerpc/include/asm/cputhreads.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h3
-rw-r--r--arch/powerpc/include/asm/reg.h3
-rw-r--r--arch/powerpc/kernel/cputable.c20
-rw-r--r--arch/powerpc/kernel/dbell.c2
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c8
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S1
-rw-r--r--arch/powerpc/platforms/powernv/smp.c14
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c44
-rw-r--r--arch/s390/include/asm/elf.h2
-rw-r--r--arch/s390/kernel/ftrace.c61
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c7
-rw-r--r--arch/s390/kernel/swsusp_asm64.S11
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c10
-rw-r--r--arch/x86/kernel/entry_64.S34
-rw-r--r--arch/x86/kernel/kgdb.c2
-rw-r--r--arch/x86/kernel/reboot.c10
-rw-r--r--arch/x86/kvm/ioapic.c4
-rw-r--r--arch/x86/kvm/lapic.c3
-rw-r--r--arch/x86/kvm/vmx.c7
-rw-r--r--arch/x86/xen/p2m.c10
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/blk-mq-tag.c6
-rw-r--r--block/blk-mq.c6
-rw-r--r--block/blk-settings.c6
-rw-r--r--crypto/af_alg.c2
-rw-r--r--crypto/algif_skcipher.c14
-rw-r--r--drivers/ata/libata-core.c19
-rw-r--r--drivers/base/regmap/internal.h8
-rw-r--r--drivers/base/regmap/regcache.c16
-rw-r--r--drivers/base/regmap/regmap.c32
-rw-r--r--drivers/bcma/Kconfig6
-rw-r--r--drivers/bcma/bcma_private.h20
-rw-r--r--drivers/bcma/driver_gpio.c23
-rw-r--r--drivers/bcma/driver_pci.c33
-rw-r--r--drivers/bcma/host_pci.c34
-rw-r--r--drivers/block/nbd.c8
-rw-r--r--drivers/block/nvme-core.c1
-rw-r--r--drivers/bluetooth/btusb.c12
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/clocksource/Kconfig3
-rw-r--r--drivers/clocksource/timer-sun5i.c7
-rw-r--r--drivers/dma/bcm2835-dma.c1
-rw-r--r--drivers/dma/dma-jz4740.c7
-rw-r--r--drivers/dma/edma.c7
-rw-r--r--drivers/dma/moxart-dma.c4
-rw-r--r--drivers/dma/omap-dma.c1
-rw-r--r--drivers/firmware/dmi_scan.c22
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c2
-rw-r--r--drivers/gpio/gpio-syscon.c2
-rw-r--r--drivers/gpio/gpiolib-acpi.c10
-rw-r--r--drivers/gpu/drm/drm_crtc.c13
-rw-r--r--drivers/gpu/drm/drm_edid_load.c1
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c17
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c38
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c18
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c4
-rw-r--r--drivers/gpu/drm/radeon/cikd.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/radeon/vce_v2_0.c3
-rw-r--r--drivers/iio/accel/bma180.c2
-rw-r--r--drivers/iio/accel/bmc150-accel.c20
-rw-r--r--drivers/iio/accel/kxcjk-1013.c2
-rw-r--r--drivers/iio/adc/Kconfig3
-rw-r--r--drivers/iio/adc/at91_adc.c5
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c3
-rw-r--r--drivers/iio/adc/vf610_adc.c91
-rw-r--r--drivers/iio/gyro/bmg160.c2
-rw-r--r--drivers/iio/imu/adis_trigger.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c56
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c25
-rw-r--r--drivers/iio/imu/kmx61.c2
-rw-r--r--drivers/iio/industrialio-core.c5
-rw-r--r--drivers/iio/industrialio-event.c1
-rw-r--r--drivers/iio/proximity/sx9500.c2
-rw-r--r--drivers/infiniband/core/umem.c8
-rw-r--r--drivers/infiniband/hw/mlx4/main.c11
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c2
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c14
-rw-r--r--drivers/infiniband/hw/mlx5/doorbell.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c107
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h5
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c2
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c8
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c4
-rw-r--r--drivers/infiniband/hw/mlx5/user.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c1
-rw-r--r--drivers/input/mouse/alps.c48
-rw-r--r--drivers/input/mouse/synaptics.c7
-rw-r--r--drivers/iommu/arm-smmu.c9
-rw-r--r--drivers/iommu/intel-iommu.c7
-rw-r--r--drivers/iommu/ipmmu-vmsa.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c57
-rw-r--r--drivers/lguest/Kconfig2
-rw-r--r--drivers/md/dm.c26
-rw-r--r--drivers/mfd/kempld-core.c2
-rw-r--r--drivers/mfd/rtsx_usb.c30
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/caif/caif_serial.c1
-rw-r--r--drivers/net/can/flexcan.c18
-rw-r--r--drivers/net/can/usb/ems_usb.c11
-rw-r--r--drivers/net/can/usb/gs_usb.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb.c69
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_ucan.h15
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c75
-rw-r--r--drivers/net/dsa/mv88e6123_61_65.c170
-rw-r--r--drivers/net/dsa/mv88e6131.c182
-rw-r--r--drivers/net/dsa/mv88e6171.c166
-rw-r--r--drivers/net/dsa/mv88e6352.c205
-rw-r--r--drivers/net/dsa/mv88e6xxx.c531
-rw-r--r--drivers/net/dsa/mv88e6xxx.h238
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c99
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c162
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c9
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h61
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c915
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c1755
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c781
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h39
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c6
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c30
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c82
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c113
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c43
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c67
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Makefile5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c297
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw_qos.c289
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw_qos.h145
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c133
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mad.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c123
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mcg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c19
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.h19
-rw-r--r--drivers/net/ethernet/rocker/rocker.c8
-rw-r--r--drivers/net/hyperv/hyperv_net.h8
-rw-r--r--drivers/net/hyperv/netvsc.c7
-rw-r--r--drivers/net/hyperv/netvsc_drv.c17
-rw-r--r--drivers/net/hyperv/rndis_filter.c2
-rw-r--r--drivers/net/ipvlan/ipvlan.h4
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c30
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c39
-rw-r--r--drivers/net/macvlan.c9
-rw-r--r--drivers/net/phy/at803x.c11
-rw-r--r--drivers/net/phy/fixed_phy.c29
-rw-r--r--drivers/net/usb/asix_common.c2
-rw-r--r--drivers/net/usb/cdc_ether.c8
-rw-r--r--drivers/net/usb/cdc_ncm.c7
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/usb/sr9800.c2
-rw-r--r--drivers/net/usb/usbnet.c16
-rw-r--r--drivers/net/veth.c15
-rw-r--r--drivers/net/virtio_net.c14
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c4
-rw-r--r--drivers/net/vxlan.c20
-rw-r--r--drivers/net/wan/cosa.c1
-rw-r--r--drivers/net/wan/lmc/lmc_main.c1
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c9
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.h1
-rw-r--r--drivers/net/wireless/ath/ath.h3
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c31
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c24
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c77
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_aic.c599
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_aic.h61
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c84
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h25
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_rtt.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c44
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c36
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c34
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c142
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h31
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/reg_aic.h168
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h16
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c34
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c19
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c36
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c302
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h8
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c16
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h23
-rw-r--r--drivers/net/wireless/b43/main.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.c310
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.h12
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/feature.c3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/pcie.c24
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio.c199
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c2
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h2
-rw-r--r--drivers/net/wireless/brcm80211/include/chipcommon.h9
-rw-r--r--drivers/net/wireless/cw1200/cw1200_spi.c11
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h1
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c17
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c7
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c411
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.h19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h27
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h15
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c220
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex_legacy.c61
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c19
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c26
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h47
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h107
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c180
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h63
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c290
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c11
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c120
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sf.c67
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c22
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c33
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h4
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c157
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c63
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/mwifiex/11n.c18
-rw-r--r--drivers/net/wireless/mwifiex/11n.h32
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c16
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c7
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c167
-rw-r--r--drivers/net/wireless/mwifiex/decl.h10
-rw-r--r--drivers/net/wireless/mwifiex/fw.h11
-rw-r--r--drivers/net/wireless/mwifiex/init.c26
-rw-r--r--drivers/net/wireless/mwifiex/main.c76
-rw-r--r--drivers/net/wireless/mwifiex/main.h30
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c31
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c226
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h14
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c61
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c21
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c4
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c128
-rw-r--r--drivers/net/wireless/mwifiex/usb.c6
-rw-r--r--drivers/net/wireless/mwifiex/util.c4
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c48
-rw-r--r--drivers/net/wireless/mwifiex/wmm.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c13
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h4
-rw-r--r--drivers/net/wireless/rtlwifi/base.h1
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c12
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/hw.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/hw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/trx.c16
-rw-r--r--drivers/net/wireless/rtlwifi/stats.c24
-rw-r--r--drivers/net/wireless/rtlwifi/stats.h1
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.h4
-rw-r--r--drivers/net/xen-netfront.c5
-rw-r--r--drivers/of/address.c11
-rw-r--r--drivers/regulator/palmas-regulator.c4
-rw-r--r--drivers/rtc/rtc-mrst.c17
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/libsas/sas_ata.c3
-rw-r--r--drivers/spi/spi-dw-mid.c6
-rw-r--r--drivers/spi/spi-qup.c9
-rw-r--r--drivers/spi/spi.c5
-rw-r--r--drivers/staging/iio/Kconfig1
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843_core.c1
-rw-r--r--drivers/tty/serial/fsl_lpuart.c5
-rw-r--r--drivers/tty/serial/samsung.c1
-rw-r--r--drivers/usb/host/xhci-hub.c9
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/isp1760/isp1760-udc.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c9
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/keyspan_pda.c3
-rw-r--r--drivers/watchdog/imgpdc_wdt.c8
-rw-r--r--drivers/watchdog/mtk_wdt.c2
-rw-r--r--drivers/xen/Kconfig17
-rw-r--r--drivers/xen/balloon.c23
-rw-r--r--fs/affs/file.c19
-rw-r--r--fs/cifs/cifsencrypt.c6
-rw-r--r--fs/cifs/connect.c13
-rw-r--r--fs/cifs/file.c1
-rw-r--r--fs/cifs/inode.c2
-rw-r--r--fs/cifs/smb2misc.c2
-rw-r--r--fs/cifs/smb2ops.c3
-rw-r--r--fs/cifs/smb2pdu.c17
-rw-r--r--fs/compat_ioctl.c2
-rw-r--r--fs/fs-writeback.c93
-rw-r--r--fs/hfsplus/brec.c20
-rw-r--r--fs/locks.c5
-rw-r--r--fs/nfsd/blocklayout.c2
-rw-r--r--fs/nfsd/blocklayoutxdr.c6
-rw-r--r--fs/nfsd/nfs4layouts.c12
-rw-r--r--fs/nfsd/nfs4proc.c2
-rw-r--r--fs/nfsd/nfs4state.c4
-rw-r--r--fs/nfsd/nfs4xdr.c20
-rw-r--r--fs/nfsd/nfscache.c6
-rw-r--r--include/linux/bcma/bcma.h9
-rw-r--r--include/linux/bcma/bcma_driver_pci.h8
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/irqchip/arm-gic-v3.h17
-rw-r--r--include/linux/jhash.h17
-rw-r--r--include/linux/lcm.h1
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/mfd/palmas.h3
-rw-r--r--include/linux/mlx4/cmd.h13
-rw-r--r--include/linux/mlx4/device.h20
-rw-r--r--include/linux/mlx4/qp.h8
-rw-r--r--include/linux/mlx5/cmd.h2
-rw-r--r--include/linux/mlx5/cq.h7
-rw-r--r--include/linux/mlx5/device.h2
-rw-r--r--include/linux/mlx5/doorbell.h2
-rw-r--r--include/linux/mlx5/driver.h16
-rw-r--r--include/linux/mlx5/mlx5_ifc.h2
-rw-r--r--include/linux/mlx5/qp.h2
-rw-r--r--include/linux/mlx5/srq.h2
-rw-r--r--include/linux/mmc/sdio_ids.h2
-rw-r--r--include/linux/netdevice.h35
-rw-r--r--include/linux/netfilter.h103
-rw-r--r--include/linux/netfilter_arp/arp_tables.h3
-rw-r--r--include/linux/netfilter_bridge.h2
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h3
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h3
-rw-r--r--include/linux/phy_fixed.h9
-rw-r--r--include/linux/regulator/driver.h2
-rw-r--r--include/linux/sched.h9
-rw-r--r--include/linux/sunrpc/debug.h18
-rw-r--r--include/linux/tcp.h2
-rw-r--r--include/linux/usb/usbnet.h6
-rw-r--r--include/linux/writeback.h3
-rw-r--r--include/net/bluetooth/bluetooth.h23
-rw-r--r--include/net/bluetooth/hci_core.h7
-rw-r--r--include/net/dn_neigh.h6
-rw-r--r--include/net/ip.h19
-rw-r--r--include/net/ip6_route.h6
-rw-r--r--include/net/ip6_tunnel.h6
-rw-r--r--include/net/ip_tunnels.h1
-rw-r--r--include/net/ipv6.h3
-rw-r--r--include/net/netfilter/nf_nat_l3proto.h48
-rw-r--r--include/net/netfilter/nf_queue.h6
-rw-r--r--include/net/netfilter/nf_tables.h7
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h5
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h5
-rw-r--r--include/net/sock.h2
-rw-r--r--include/net/tcp.h5
-rw-r--r--include/net/udp_tunnel.h5
-rw-r--r--include/net/vxlan.h2
-rw-r--r--include/net/xfrm.h8
-rw-r--r--include/trace/events/regmap.h123
-rw-r--r--include/uapi/linux/bpf.h39
-rw-r--r--include/uapi/linux/can/raw.h1
-rw-r--r--include/uapi/linux/input.h3
-rw-r--r--include/uapi/linux/nfsd/export.h2
-rw-r--r--include/uapi/linux/rtnetlink.h4
-rw-r--r--kernel/events/core.c10
-rw-r--r--kernel/locking/lockdep.c81
-rw-r--r--kernel/module.c8
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/fair.c8
-rw-r--r--kernel/sysctl.c8
-rw-r--r--kernel/time/tick-broadcast-hrtimer.c11
-rw-r--r--lib/lcm.c11
-rw-r--r--lib/nlattr.c2
-rw-r--r--lib/test_rhashtable.c1
-rw-r--r--mm/huge_memory.c26
-rw-r--r--mm/memory.c22
-rw-r--r--mm/memory_hotplug.c13
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/mprotect.c3
-rw-r--r--mm/page-writeback.c7
-rw-r--r--mm/page_isolation.c1
-rw-r--r--mm/pagewalk.c9
-rw-r--r--mm/rmap.c7
-rw-r--r--mm/slub.c6
-rw-r--r--net/8021q/vlan_dev.c9
-rw-r--r--net/batman-adv/hard-interface.c5
-rw-r--r--net/bluetooth/bnep/bnep.h4
-rw-r--r--net/bluetooth/bnep/core.c70
-rw-r--r--net/bluetooth/bnep/sock.c7
-rw-r--r--net/bluetooth/cmtp/capi.c2
-rw-r--r--net/bluetooth/cmtp/core.c15
-rw-r--r--net/bluetooth/hci_core.c120
-rw-r--r--net/bluetooth/hci_debugfs.c26
-rw-r--r--net/bluetooth/hci_event.c175
-rw-r--r--net/bluetooth/hci_request.c20
-rw-r--r--net/bluetooth/hci_request.h5
-rw-r--r--net/bluetooth/hci_sock.c2
-rw-r--r--net/bluetooth/hidp/core.c15
-rw-r--r--net/bluetooth/l2cap_core.c48
-rw-r--r--net/bluetooth/l2cap_sock.c6
-rw-r--r--net/bluetooth/mgmt.c267
-rw-r--r--net/bluetooth/selftest.c35
-rw-r--r--net/bluetooth/smp.c45
-rw-r--r--net/bridge/br_forward.c13
-rw-r--r--net/bridge/br_input.c16
-rw-r--r--net/bridge/br_multicast.c3
-rw-r--r--net/bridge/br_netfilter.c96
-rw-r--r--net/bridge/br_netlink.c4
-rw-r--r--net/bridge/br_private.h6
-rw-r--r--net/bridge/br_stp_bpdu.c5
-rw-r--r--net/bridge/netfilter/ebtable_filter.c14
-rw-r--r--net/bridge/netfilter/ebtable_nat.c14
-rw-r--r--net/bridge/netfilter/nf_tables_bridge.c28
-rw-r--r--net/can/raw.c50
-rw-r--r--net/core/dev.c46
-rw-r--r--net/core/fib_rules.c2
-rw-r--r--net/core/filter.c115
-rw-r--r--net/core/link_watch.c4
-rw-r--r--net/core/net-sysfs.c10
-rw-r--r--net/core/net_namespace.c107
-rw-r--r--net/core/rtnetlink.c12
-rw-r--r--net/core/sock.c19
-rw-r--r--net/decnet/dn_neigh.c35
-rw-r--r--net/decnet/dn_nsp_in.c5
-rw-r--r--net/decnet/dn_route.c26
-rw-r--r--net/decnet/dn_rules.c2
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c4
-rw-r--r--net/dsa/dsa.c23
-rw-r--r--net/dsa/slave.c8
-rw-r--r--net/ipv4/af_inet.c8
-rw-r--r--net/ipv4/arp.c38
-rw-r--r--net/ipv4/cipso_ipv4.c42
-rw-r--r--net/ipv4/devinet.c42
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_frontend.c28
-rw-r--r--net/ipv4/fib_rules.c4
-rw-r--r--net/ipv4/fib_semantics.c24
-rw-r--r--net/ipv4/fib_trie.c12
-rw-r--r--net/ipv4/geneve.c6
-rw-r--r--net/ipv4/gre_offload.c4
-rw-r--r--net/ipv4/icmp.c6
-rw-r--r--net/ipv4/igmp.c20
-rw-r--r--net/ipv4/inet_connection_sock.c8
-rw-r--r--net/ipv4/inet_fragment.c4
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/inet_timewait_sock.c2
-rw-r--r--net/ipv4/ip_forward.c8
-rw-r--r--net/ipv4/ip_fragment.c14
-rw-r--r--net/ipv4/ip_gre.c6
-rw-r--r--net/ipv4/ip_input.c17
-rw-r--r--net/ipv4/ip_options.c2
-rw-r--r--net/ipv4/ip_output.c74
-rw-r--r--net/ipv4/ip_sockglue.c6
-rw-r--r--net/ipv4/ip_tunnel.c21
-rw-r--r--net/ipv4/ip_vti.c4
-rw-r--r--net/ipv4/ipcomp.c2
-rw-r--r--net/ipv4/ipconfig.c6
-rw-r--r--net/ipv4/ipip.c4
-rw-r--r--net/ipv4/ipmr.c73
-rw-r--r--net/ipv4/netfilter.c4
-rw-r--r--net/ipv4/netfilter/arp_tables.c11
-rw-r--r--net/ipv4/netfilter/arptable_filter.c7
-rw-r--r--net/ipv4/netfilter/ip_tables.c13
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c10
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c6
-rw-r--r--net/ipv4/netfilter/iptable_filter.c8
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c19
-rw-r--r--net/ipv4/netfilter/iptable_nat.c29
-rw-r--r--net/ipv4/netfilter/iptable_raw.c7
-rw-r--r--net/ipv4/netfilter/iptable_security.c8
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c20
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_l3proto_ipv4.c33
-rw-r--r--net/ipv4/netfilter/nf_tables_arp.c6
-rw-r--r--net/ipv4/netfilter/nf_tables_ipv4.c12
-rw-r--r--net/ipv4/netfilter/nft_chain_nat_ipv4.c29
-rw-r--r--net/ipv4/netfilter/nft_chain_route_ipv4.c6
-rw-r--r--net/ipv4/ping.c4
-rw-r--r--net/ipv4/raw.c10
-rw-r--r--net/ipv4/route.c22
-rw-r--r--net/ipv4/sysctl_net_ipv4.c6
-rw-r--r--net/ipv4/tcp.c14
-rw-r--r--net/ipv4/tcp_diag.c2
-rw-r--r--net/ipv4/tcp_fastopen.c7
-rw-r--r--net/ipv4/tcp_input.c104
-rw-r--r--net/ipv4/tcp_ipv4.c12
-rw-r--r--net/ipv4/tcp_metrics.c15
-rw-r--r--net/ipv4/tcp_minisocks.c8
-rw-r--r--net/ipv4/tcp_output.c71
-rw-r--r--net/ipv4/tcp_timer.c2
-rw-r--r--net/ipv4/udp.c10
-rw-r--r--net/ipv4/udp_diag.c2
-rw-r--r--net/ipv4/udp_offload.c4
-rw-r--r--net/ipv4/udp_tunnel.c4
-rw-r--r--net/ipv4/xfrm4_input.c7
-rw-r--r--net/ipv4/xfrm4_output.c12
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/fib6_rules.c2
-rw-r--r--net/ipv6/ip6_gre.c10
-rw-r--r--net/ipv6/ip6_input.c11
-rw-r--r--net/ipv6/ip6_output.c36
-rw-r--r--net/ipv6/ip6_tunnel.c12
-rw-r--r--net/ipv6/ip6_udp_tunnel.c5
-rw-r--r--net/ipv6/ip6_vti.c3
-rw-r--r--net/ipv6/ip6mr.c22
-rw-r--r--net/ipv6/mcast.c9
-rw-r--r--net/ipv6/ndisc.c14
-rw-r--r--net/ipv6/netfilter.c4
-rw-r--r--net/ipv6/netfilter/ip6_tables.c13
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c6
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c8
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c19
-rw-r--r--net/ipv6/netfilter/ip6table_nat.c29
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c8
-rw-r--r--net/ipv6/netfilter/ip6table_security.c8
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c20
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c10
-rw-r--r--net/ipv6/netfilter/nf_nat_l3proto_ipv6.c32
-rw-r--r--net/ipv6/netfilter/nf_tables_ipv6.c12
-rw-r--r--net/ipv6/netfilter/nft_chain_nat_ipv6.c29
-rw-r--r--net/ipv6/netfilter/nft_chain_route_ipv6.c6
-rw-r--r--net/ipv6/output_core.c23
-rw-r--r--net/ipv6/raw.c4
-rw-r--r--net/ipv6/sit.c3
-rw-r--r--net/ipv6/tcp_ipv6.c13
-rw-r--r--net/ipv6/xfrm6_input.c3
-rw-r--r--net/ipv6/xfrm6_output.c15
-rw-r--r--net/iucv/af_iucv.c4
-rw-r--r--net/l2tp/l2tp_core.c1
-rw-r--r--net/mac80211/agg-rx.c8
-rw-r--r--net/mac80211/rx.c7
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/netfilter/core.c31
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c32
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c8
-rw-r--r--net/netfilter/nf_internals.h11
-rw-r--r--net/netfilter/nf_queue.c58
-rw-r--r--net/netfilter/nfnetlink_queue_core.c30
-rw-r--r--net/openvswitch/vport-vxlan.c5
-rw-r--r--net/openvswitch/vport.c4
-rw-r--r--net/sched/sch_fq.c4
-rw-r--r--net/sunrpc/clnt.c4
-rw-r--r--net/sunrpc/debugfs.c52
-rw-r--r--net/sunrpc/sunrpc_syms.c7
-rw-r--r--net/sunrpc/xprt.c7
-rw-r--r--net/tipc/bcast.c4
-rw-r--r--net/tipc/core.c2
-rw-r--r--net/tipc/link.c343
-rw-r--r--net/tipc/link.h29
-rw-r--r--net/tipc/msg.c4
-rw-r--r--net/tipc/msg.h10
-rw-r--r--net/tipc/node.c22
-rw-r--r--net/tipc/udp_media.c6
-rw-r--r--net/xfrm/xfrm_output.c16
-rw-r--r--samples/bpf/Makefile1
-rw-r--r--samples/bpf/bpf_helpers.h7
-rw-r--r--samples/bpf/tcbpf1_kern.c71
-rw-r--r--security/selinux/hooks.c28
-rw-r--r--security/selinux/selinuxfs.c2
-rw-r--r--security/smack/smack_netfilter.c8
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/patch_realtek.c3
-rw-r--r--tools/testing/selftests/Makefile8
-rw-r--r--virt/kvm/kvm_main.c14
691 files changed, 13324 insertions, 7276 deletions
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt
index e124847443f8..f0b4cd72411d 100644
--- a/Documentation/devicetree/bindings/net/dsa/dsa.txt
+++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt
@@ -19,7 +19,9 @@ the parent DSA node. The maximum number of allowed child nodes is 4
19(DSA_MAX_SWITCHES). 19(DSA_MAX_SWITCHES).
20Each of these switch child nodes should have the following required properties: 20Each of these switch child nodes should have the following required properties:
21 21
22- reg : Describes the switch address on the MII bus 22- reg : Contains two fields. The first one describes the
23 address on the MII bus. The second is the switch
24 number that must be unique in cascaded configurations
23- #address-cells : Must be 1 25- #address-cells : Must be 1
24- #size-cells : Must be 0 26- #size-cells : Must be 0
25 27
diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt
index a63e5e013a8c..92ae734c00c3 100644
--- a/Documentation/input/alps.txt
+++ b/Documentation/input/alps.txt
@@ -114,6 +114,9 @@ ALPS Absolute Mode - Protocol Version 2
114 byte 4: 0 y6 y5 y4 y3 y2 y1 y0 114 byte 4: 0 y6 y5 y4 y3 y2 y1 y0
115 byte 5: 0 z6 z5 z4 z3 z2 z1 z0 115 byte 5: 0 z6 z5 z4 z3 z2 z1 z0
116 116
117Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
118the DualPoint Stick.
119
117Dualpoint device -- interleaved packet format 120Dualpoint device -- interleaved packet format
118--------------------------------------------- 121---------------------------------------------
119 122
@@ -127,6 +130,11 @@ Dualpoint device -- interleaved packet format
127 byte 7: 0 y6 y5 y4 y3 y2 y1 y0 130 byte 7: 0 y6 y5 y4 y3 y2 y1 y0
128 byte 8: 0 z6 z5 z4 z3 z2 z1 z0 131 byte 8: 0 z6 z5 z4 z3 z2 z1 z0
129 132
133Devices which use the interleaving format normally send standard PS/2 mouse
134packets for the DualPoint Stick + ALPS Absolute Mode packets for the
135touchpad, switching to the interleaved packet format when both the stick and
136the touchpad are used at the same time.
137
130ALPS Absolute Mode - Protocol Version 3 138ALPS Absolute Mode - Protocol Version 3
131--------------------------------------- 139---------------------------------------
132 140
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt
index c587a966413e..96705616f582 100644
--- a/Documentation/input/event-codes.txt
+++ b/Documentation/input/event-codes.txt
@@ -294,6 +294,12 @@ accordingly. This property does not affect kernel behavior.
294The kernel does not provide button emulation for such devices but treats 294The kernel does not provide button emulation for such devices but treats
295them as any other INPUT_PROP_BUTTONPAD device. 295them as any other INPUT_PROP_BUTTONPAD device.
296 296
297INPUT_PROP_ACCELEROMETER
298-------------------------
299Directional axes on this device (absolute and/or relative x, y, z) represent
300accelerometer data. All other axes retain their meaning. A device must not mix
301regular directional axes and accelerometer axes on the same event node.
302
297Guidelines: 303Guidelines:
298========== 304==========
299The guidelines below ensure proper single-touch and multi-finger functionality. 305The guidelines below ensure proper single-touch and multi-finger functionality.
diff --git a/Documentation/input/multi-touch-protocol.txt b/Documentation/input/multi-touch-protocol.txt
index 7b4f59c09ee2..b85d000faeb4 100644
--- a/Documentation/input/multi-touch-protocol.txt
+++ b/Documentation/input/multi-touch-protocol.txt
@@ -312,9 +312,12 @@ ABS_MT_TOOL_TYPE
312 312
313The type of approaching tool. A lot of kernel drivers cannot distinguish 313The type of approaching tool. A lot of kernel drivers cannot distinguish
314between different tool types, such as a finger or a pen. In such cases, the 314between different tool types, such as a finger or a pen. In such cases, the
315event should be omitted. The protocol currently supports MT_TOOL_FINGER and 315event should be omitted. The protocol currently supports MT_TOOL_FINGER,
316MT_TOOL_PEN [2]. For type B devices, this event is handled by input core; 316MT_TOOL_PEN, and MT_TOOL_PALM [2]. For type B devices, this event is handled
317drivers should instead use input_mt_report_slot_state(). 317by input core; drivers should instead use input_mt_report_slot_state().
318A contact's ABS_MT_TOOL_TYPE may change over time while still touching the
319device, because the firmware may not be able to determine which tool is being
320used when it first appears.
318 321
319ABS_MT_BLOB_ID 322ABS_MT_BLOB_ID
320 323
diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt
index 0a2859a8ee7e..5abad1e921ca 100644
--- a/Documentation/networking/can.txt
+++ b/Documentation/networking/can.txt
@@ -22,7 +22,8 @@ This file contains
22 4.1.3 RAW socket option CAN_RAW_LOOPBACK 22 4.1.3 RAW socket option CAN_RAW_LOOPBACK
23 4.1.4 RAW socket option CAN_RAW_RECV_OWN_MSGS 23 4.1.4 RAW socket option CAN_RAW_RECV_OWN_MSGS
24 4.1.5 RAW socket option CAN_RAW_FD_FRAMES 24 4.1.5 RAW socket option CAN_RAW_FD_FRAMES
25 4.1.6 RAW socket returned message flags 25 4.1.6 RAW socket option CAN_RAW_JOIN_FILTERS
26 4.1.7 RAW socket returned message flags
26 4.2 Broadcast Manager protocol sockets (SOCK_DGRAM) 27 4.2 Broadcast Manager protocol sockets (SOCK_DGRAM)
27 4.2.1 Broadcast Manager operations 28 4.2.1 Broadcast Manager operations
28 4.2.2 Broadcast Manager message flags 29 4.2.2 Broadcast Manager message flags
@@ -601,7 +602,22 @@ solution for a couple of reasons:
601 CAN FD frames by checking if the device maximum transfer unit is CANFD_MTU. 602 CAN FD frames by checking if the device maximum transfer unit is CANFD_MTU.
602 The CAN device MTU can be retrieved e.g. with a SIOCGIFMTU ioctl() syscall. 603 The CAN device MTU can be retrieved e.g. with a SIOCGIFMTU ioctl() syscall.
603 604
604 4.1.6 RAW socket returned message flags 605 4.1.6 RAW socket option CAN_RAW_JOIN_FILTERS
606
607 The CAN_RAW socket can set multiple CAN identifier specific filters that
608 lead to multiple filters in the af_can.c filter processing. These filters
609 are indenpendent from each other which leads to logical OR'ed filters when
610 applied (see 4.1.1).
611
612 This socket option joines the given CAN filters in the way that only CAN
613 frames are passed to user space that matched *all* given CAN filters. The
614 semantic for the applied filters is therefore changed to a logical AND.
615
616 This is useful especially when the filterset is a combination of filters
617 where the CAN_INV_FILTER flag is set in order to notch single CAN IDs or
618 CAN ID ranges from the incoming traffic.
619
620 4.1.7 RAW socket returned message flags
605 621
606 When using recvmsg() call, the msg->msg_flags may contain following flags: 622 When using recvmsg() call, the msg->msg_flags may contain following flags:
607 623
diff --git a/MAINTAINERS b/MAINTAINERS
index c2016557b294..9091b4ad1cc3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -637,8 +637,7 @@ F: drivers/gpu/drm/radeon/radeon_kfd.h
637F: include/uapi/linux/kfd_ioctl.h 637F: include/uapi/linux/kfd_ioctl.h
638 638
639AMD MICROCODE UPDATE SUPPORT 639AMD MICROCODE UPDATE SUPPORT
640M: Andreas Herrmann <herrmann.der.user@googlemail.com> 640M: Borislav Petkov <bp@alien8.de>
641L: amd64-microcode@amd64.org
642S: Maintained 641S: Maintained
643F: arch/x86/kernel/cpu/microcode/amd* 642F: arch/x86/kernel/cpu/microcode/amd*
644 643
@@ -1186,7 +1185,7 @@ M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
1186L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1185L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1187S: Maintained 1186S: Maintained
1188F: arch/arm/mach-mvebu/ 1187F: arch/arm/mach-mvebu/
1189F: drivers/rtc/armada38x-rtc 1188F: drivers/rtc/rtc-armada38x.c
1190 1189
1191ARM/Marvell Berlin SoC support 1190ARM/Marvell Berlin SoC support
1192M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> 1191M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
@@ -1362,6 +1361,7 @@ F: drivers/i2c/busses/i2c-rk3x.c
1362F: drivers/*/*rockchip* 1361F: drivers/*/*rockchip*
1363F: drivers/*/*/*rockchip* 1362F: drivers/*/*/*rockchip*
1364F: sound/soc/rockchip/ 1363F: sound/soc/rockchip/
1364N: rockchip
1365 1365
1366ARM/SAMSUNG EXYNOS ARM ARCHITECTURES 1366ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
1367M: Kukjin Kim <kgene@kernel.org> 1367M: Kukjin Kim <kgene@kernel.org>
@@ -1675,8 +1675,8 @@ F: drivers/misc/eeprom/at24.c
1675F: include/linux/platform_data/at24.h 1675F: include/linux/platform_data/at24.h
1676 1676
1677ATA OVER ETHERNET (AOE) DRIVER 1677ATA OVER ETHERNET (AOE) DRIVER
1678M: "Ed L. Cashin" <ecashin@coraid.com> 1678M: "Ed L. Cashin" <ed.cashin@acm.org>
1679W: http://support.coraid.com/support/linux 1679W: http://www.openaoe.org/
1680S: Supported 1680S: Supported
1681F: Documentation/aoe/ 1681F: Documentation/aoe/
1682F: drivers/block/aoe/ 1682F: drivers/block/aoe/
@@ -3252,6 +3252,13 @@ S: Maintained
3252F: Documentation/hwmon/dme1737 3252F: Documentation/hwmon/dme1737
3253F: drivers/hwmon/dme1737.c 3253F: drivers/hwmon/dme1737.c
3254 3254
3255DMI/SMBIOS SUPPORT
3256M: Jean Delvare <jdelvare@suse.de>
3257S: Maintained
3258F: drivers/firmware/dmi-id.c
3259F: drivers/firmware/dmi_scan.c
3260F: include/linux/dmi.h
3261
3255DOCKING STATION DRIVER 3262DOCKING STATION DRIVER
3256M: Shaohua Li <shaohua.li@intel.com> 3263M: Shaohua Li <shaohua.li@intel.com>
3257L: linux-acpi@vger.kernel.org 3264L: linux-acpi@vger.kernel.org
@@ -5087,7 +5094,7 @@ S: Supported
5087F: drivers/platform/x86/intel_menlow.c 5094F: drivers/platform/x86/intel_menlow.c
5088 5095
5089INTEL IA32 MICROCODE UPDATE SUPPORT 5096INTEL IA32 MICROCODE UPDATE SUPPORT
5090M: Tigran Aivazian <tigran@aivazian.fsnet.co.uk> 5097M: Borislav Petkov <bp@alien8.de>
5091S: Maintained 5098S: Maintained
5092F: arch/x86/kernel/cpu/microcode/core* 5099F: arch/x86/kernel/cpu/microcode/core*
5093F: arch/x86/kernel/cpu/microcode/intel* 5100F: arch/x86/kernel/cpu/microcode/intel*
@@ -5128,22 +5135,21 @@ M: Deepak Saxena <dsaxena@plexity.net>
5128S: Maintained 5135S: Maintained
5129F: drivers/char/hw_random/ixp4xx-rng.c 5136F: drivers/char/hw_random/ixp4xx-rng.c
5130 5137
5131INTEL ETHERNET DRIVERS (e100/e1000/e1000e/fm10k/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf) 5138INTEL ETHERNET DRIVERS
5132M: Jeff Kirsher <jeffrey.t.kirsher@intel.com> 5139M: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
5133M: Jesse Brandeburg <jesse.brandeburg@intel.com> 5140R: Jesse Brandeburg <jesse.brandeburg@intel.com>
5134M: Bruce Allan <bruce.w.allan@intel.com> 5141R: Shannon Nelson <shannon.nelson@intel.com>
5135M: Carolyn Wyborny <carolyn.wyborny@intel.com> 5142R: Carolyn Wyborny <carolyn.wyborny@intel.com>
5136M: Don Skidmore <donald.c.skidmore@intel.com> 5143R: Don Skidmore <donald.c.skidmore@intel.com>
5137M: Greg Rose <gregory.v.rose@intel.com> 5144R: Matthew Vick <matthew.vick@intel.com>
5138M: Matthew Vick <matthew.vick@intel.com> 5145R: John Ronciak <john.ronciak@intel.com>
5139M: John Ronciak <john.ronciak@intel.com> 5146R: Mitch Williams <mitch.a.williams@intel.com>
5140M: Mitch Williams <mitch.a.williams@intel.com> 5147L: intel-wired-lan@lists.osuosl.org
5141M: Linux NICS <linux.nics@intel.com>
5142L: e1000-devel@lists.sourceforge.net
5143W: http://www.intel.com/support/feedback.htm 5148W: http://www.intel.com/support/feedback.htm
5144W: http://e1000.sourceforge.net/ 5149W: http://e1000.sourceforge.net/
5145T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net.git 5150Q: http://patchwork.ozlabs.org/project/intel-wired-lan/list/
5146T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next.git 5151T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue.git
5152T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue.git
5147S: Supported 5153S: Supported
5148F: Documentation/networking/e100.txt 5154F: Documentation/networking/e100.txt
5149F: Documentation/networking/e1000.txt 5155F: Documentation/networking/e1000.txt
@@ -6316,6 +6322,7 @@ F: drivers/scsi/megaraid/
6316 6322
6317MELLANOX ETHERNET DRIVER (mlx4_en) 6323MELLANOX ETHERNET DRIVER (mlx4_en)
6318M: Amir Vadai <amirv@mellanox.com> 6324M: Amir Vadai <amirv@mellanox.com>
6325M: Ido Shamay <idos@mellanox.com>
6319L: netdev@vger.kernel.org 6326L: netdev@vger.kernel.org
6320S: Supported 6327S: Supported
6321W: http://www.mellanox.com 6328W: http://www.mellanox.com
diff --git a/Makefile b/Makefile
index 14c722f96877..54430f933b62 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 0 2PATCHLEVEL = 0
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc5 4EXTRAVERSION = -rc7
5NAME = Hurr durr I'ma sheep 5NAME = Hurr durr I'ma sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 114234e83caa..edda76fae83f 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
67 sigset_t *set) 67 sigset_t *set)
68{ 68{
69 int err; 69 int err;
70 err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs, 70 err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs,
71 sizeof(sf->uc.uc_mcontext.regs.scratch)); 71 sizeof(sf->uc.uc_mcontext.regs.scratch));
72 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t)); 72 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
73 73
@@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
83 if (!err) 83 if (!err)
84 set_current_blocked(&set); 84 set_current_blocked(&set);
85 85
86 err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs), 86 err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch),
87 sizeof(sf->uc.uc_mcontext.regs.scratch)); 87 sizeof(sf->uc.uc_mcontext.regs.scratch));
88 88
89 return err; 89 return err;
@@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn)
131 /* Don't restart from sigreturn */ 131 /* Don't restart from sigreturn */
132 syscall_wont_restart(regs); 132 syscall_wont_restart(regs);
133 133
134 /*
135 * Ensure that sigreturn always returns to user mode (in case the
136 * regs saved on user stack got fudged between save and sigreturn)
137 * Otherwise it is easy to panic the kernel with a custom
138 * signal handler and/or restorer which clobberes the status32/ret
139 * to return to a bogus location in kernel mode.
140 */
141 regs->status32 |= STATUS_U_MASK;
142
134 return regs->r0; 143 return regs->r0;
135 144
136badframe: 145badframe:
@@ -229,8 +238,11 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
229 238
230 /* 239 /*
231 * handler returns using sigreturn stub provided already by userpsace 240 * handler returns using sigreturn stub provided already by userpsace
241 * If not, nuke the process right away
232 */ 242 */
233 BUG_ON(!(ksig->ka.sa.sa_flags & SA_RESTORER)); 243 if(!(ksig->ka.sa.sa_flags & SA_RESTORER))
244 return 1;
245
234 regs->blink = (unsigned long)ksig->ka.sa.sa_restorer; 246 regs->blink = (unsigned long)ksig->ka.sa.sa_restorer;
235 247
236 /* User Stack for signal handler will be above the frame just carved */ 248 /* User Stack for signal handler will be above the frame just carved */
@@ -296,12 +308,12 @@ static void
296handle_signal(struct ksignal *ksig, struct pt_regs *regs) 308handle_signal(struct ksignal *ksig, struct pt_regs *regs)
297{ 309{
298 sigset_t *oldset = sigmask_to_save(); 310 sigset_t *oldset = sigmask_to_save();
299 int ret; 311 int failed;
300 312
301 /* Set up the stack frame */ 313 /* Set up the stack frame */
302 ret = setup_rt_frame(ksig, oldset, regs); 314 failed = setup_rt_frame(ksig, oldset, regs);
303 315
304 signal_setup_done(ret, ksig, 0); 316 signal_setup_done(failed, ksig, 0);
305} 317}
306 318
307void do_signal(struct pt_regs *regs) 319void do_signal(struct pt_regs *regs)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 9f1f09a2bc9b..cf4c0c99aa25 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -619,6 +619,7 @@ config ARCH_PXA
619 select GENERIC_CLOCKEVENTS 619 select GENERIC_CLOCKEVENTS
620 select GPIO_PXA 620 select GPIO_PXA
621 select HAVE_IDE 621 select HAVE_IDE
622 select IRQ_DOMAIN
622 select MULTI_IRQ_HANDLER 623 select MULTI_IRQ_HANDLER
623 select PLAT_PXA 624 select PLAT_PXA
624 select SPARSE_IRQ 625 select SPARSE_IRQ
diff --git a/arch/arm/boot/dts/dm8168-evm.dts b/arch/arm/boot/dts/dm8168-evm.dts
index d3a29c1b8417..afe678f6d2e9 100644
--- a/arch/arm/boot/dts/dm8168-evm.dts
+++ b/arch/arm/boot/dts/dm8168-evm.dts
@@ -36,6 +36,20 @@
36 >; 36 >;
37 }; 37 };
38 38
39 mmc_pins: pinmux_mmc_pins {
40 pinctrl-single,pins = <
41 DM816X_IOPAD(0x0a70, MUX_MODE0) /* SD_POW */
42 DM816X_IOPAD(0x0a74, MUX_MODE0) /* SD_CLK */
43 DM816X_IOPAD(0x0a78, MUX_MODE0) /* SD_CMD */
44 DM816X_IOPAD(0x0a7C, MUX_MODE0) /* SD_DAT0 */
45 DM816X_IOPAD(0x0a80, MUX_MODE0) /* SD_DAT1 */
46 DM816X_IOPAD(0x0a84, MUX_MODE0) /* SD_DAT2 */
47 DM816X_IOPAD(0x0a88, MUX_MODE0) /* SD_DAT2 */
48 DM816X_IOPAD(0x0a8c, MUX_MODE2) /* GP1[7] */
49 DM816X_IOPAD(0x0a90, MUX_MODE2) /* GP1[8] */
50 >;
51 };
52
39 usb0_pins: pinmux_usb0_pins { 53 usb0_pins: pinmux_usb0_pins {
40 pinctrl-single,pins = < 54 pinctrl-single,pins = <
41 DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */ 55 DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */
@@ -137,7 +151,12 @@
137}; 151};
138 152
139&mmc1 { 153&mmc1 {
154 pinctrl-names = "default";
155 pinctrl-0 = <&mmc_pins>;
140 vmmc-supply = <&vmmcsd_fixed>; 156 vmmc-supply = <&vmmcsd_fixed>;
157 bus-width = <4>;
158 cd-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
159 wp-gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
141}; 160};
142 161
143/* At least dm8168-evm rev c won't support multipoint, later may */ 162/* At least dm8168-evm rev c won't support multipoint, later may */
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
index 3c97b5f2addc..f35715bc6992 100644
--- a/arch/arm/boot/dts/dm816x.dtsi
+++ b/arch/arm/boot/dts/dm816x.dtsi
@@ -150,17 +150,27 @@
150 }; 150 };
151 151
152 gpio1: gpio@48032000 { 152 gpio1: gpio@48032000 {
153 compatible = "ti,omap3-gpio"; 153 compatible = "ti,omap4-gpio";
154 ti,hwmods = "gpio1"; 154 ti,hwmods = "gpio1";
155 ti,gpio-always-on;
155 reg = <0x48032000 0x1000>; 156 reg = <0x48032000 0x1000>;
156 interrupts = <97>; 157 interrupts = <96>;
158 gpio-controller;
159 #gpio-cells = <2>;
160 interrupt-controller;
161 #interrupt-cells = <2>;
157 }; 162 };
158 163
159 gpio2: gpio@4804c000 { 164 gpio2: gpio@4804c000 {
160 compatible = "ti,omap3-gpio"; 165 compatible = "ti,omap4-gpio";
161 ti,hwmods = "gpio2"; 166 ti,hwmods = "gpio2";
167 ti,gpio-always-on;
162 reg = <0x4804c000 0x1000>; 168 reg = <0x4804c000 0x1000>;
163 interrupts = <99>; 169 interrupts = <98>;
170 gpio-controller;
171 #gpio-cells = <2>;
172 interrupt-controller;
173 #interrupt-cells = <2>;
164 }; 174 };
165 175
166 gpmc: gpmc@50000000 { 176 gpmc: gpmc@50000000 {
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 127608d79033..c4659a979c41 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1111,7 +1111,6 @@
1111 "wkupclk", "refclk", 1111 "wkupclk", "refclk",
1112 "div-clk", "phy-div"; 1112 "div-clk", "phy-div";
1113 #phy-cells = <0>; 1113 #phy-cells = <0>;
1114 ti,hwmods = "pcie1-phy";
1115 }; 1114 };
1116 1115
1117 pcie2_phy: pciephy@4a095000 { 1116 pcie2_phy: pciephy@4a095000 {
@@ -1130,7 +1129,6 @@
1130 "wkupclk", "refclk", 1129 "wkupclk", "refclk",
1131 "div-clk", "phy-div"; 1130 "div-clk", "phy-div";
1132 #phy-cells = <0>; 1131 #phy-cells = <0>;
1133 ti,hwmods = "pcie2-phy";
1134 status = "disabled"; 1132 status = "disabled";
1135 }; 1133 };
1136 }; 1134 };
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index f4f78c40b564..3fdc84fddb70 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -92,6 +92,8 @@
92 ti,hwmods = "aes"; 92 ti,hwmods = "aes";
93 reg = <0x480c5000 0x50>; 93 reg = <0x480c5000 0x50>;
94 interrupts = <0>; 94 interrupts = <0>;
95 dmas = <&sdma 65 &sdma 66>;
96 dma-names = "tx", "rx";
95 }; 97 };
96 98
97 prm: prm@48306000 { 99 prm: prm@48306000 {
@@ -550,6 +552,8 @@
550 ti,hwmods = "sham"; 552 ti,hwmods = "sham";
551 reg = <0x480c3000 0x64>; 553 reg = <0x480c3000 0x64>;
552 interrupts = <49>; 554 interrupts = <49>;
555 dmas = <&sdma 69>;
556 dma-names = "rx";
553 }; 557 };
554 558
555 smartreflex_core: smartreflex@480cb000 { 559 smartreflex_core: smartreflex@480cb000 {
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index d771f687a13b..eccc78d3220b 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -411,6 +411,7 @@
411 "mac_clk_rx", "mac_clk_tx", 411 "mac_clk_rx", "mac_clk_tx",
412 "clk_mac_ref", "clk_mac_refout", 412 "clk_mac_ref", "clk_mac_refout",
413 "aclk_mac", "pclk_mac"; 413 "aclk_mac", "pclk_mac";
414 status = "disabled";
414 }; 415 };
415 416
416 usb_host0_ehci: usb@ff500000 { 417 usb_host0_ehci: usb@ff500000 {
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 9d8760956752..d9176e606173 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -660,7 +660,7 @@
660 #address-cells = <1>; 660 #address-cells = <1>;
661 #size-cells = <0>; 661 #size-cells = <0>;
662 reg = <0xfff01000 0x1000>; 662 reg = <0xfff01000 0x1000>;
663 interrupts = <0 156 4>; 663 interrupts = <0 155 4>;
664 num-cs = <4>; 664 num-cs = <4>;
665 clocks = <&spi_m_clk>; 665 clocks = <&spi_m_clk>;
666 status = "disabled"; 666 status = "disabled";
diff --git a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
index ab7891c43231..75742f8f96f3 100644
--- a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
+++ b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
@@ -56,6 +56,22 @@
56 model = "Olimex A10-OLinuXino-LIME"; 56 model = "Olimex A10-OLinuXino-LIME";
57 compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10"; 57 compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10";
58 58
59 cpus {
60 cpu0: cpu@0 {
61 /*
62 * The A10-Lime is known to be unstable
63 * when running at 1008 MHz
64 */
65 operating-points = <
66 /* kHz uV */
67 912000 1350000
68 864000 1300000
69 624000 1250000
70 >;
71 cooling-max-level = <2>;
72 };
73 };
74
59 soc@01c00000 { 75 soc@01c00000 {
60 emac: ethernet@01c0b000 { 76 emac: ethernet@01c0b000 {
61 pinctrl-names = "default"; 77 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 5c2925831f20..eebb7853e00b 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -75,7 +75,6 @@
75 clock-latency = <244144>; /* 8 32k periods */ 75 clock-latency = <244144>; /* 8 32k periods */
76 operating-points = < 76 operating-points = <
77 /* kHz uV */ 77 /* kHz uV */
78 1056000 1500000
79 1008000 1400000 78 1008000 1400000
80 912000 1350000 79 912000 1350000
81 864000 1300000 80 864000 1300000
@@ -83,7 +82,7 @@
83 >; 82 >;
84 #cooling-cells = <2>; 83 #cooling-cells = <2>;
85 cooling-min-level = <0>; 84 cooling-min-level = <0>;
86 cooling-max-level = <4>; 85 cooling-max-level = <3>;
87 }; 86 };
88 }; 87 };
89 88
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index f8818f1edbbe..883cb4873688 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -47,7 +47,6 @@
47 clock-latency = <244144>; /* 8 32k periods */ 47 clock-latency = <244144>; /* 8 32k periods */
48 operating-points = < 48 operating-points = <
49 /* kHz uV */ 49 /* kHz uV */
50 1104000 1500000
51 1008000 1400000 50 1008000 1400000
52 912000 1350000 51 912000 1350000
53 864000 1300000 52 864000 1300000
@@ -57,7 +56,7 @@
57 >; 56 >;
58 #cooling-cells = <2>; 57 #cooling-cells = <2>;
59 cooling-min-level = <0>; 58 cooling-min-level = <0>;
60 cooling-max-level = <6>; 59 cooling-max-level = <5>;
61 }; 60 };
62 }; 61 };
63 62
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 3a8530b79f1c..fdd181792b4b 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -105,7 +105,6 @@
105 clock-latency = <244144>; /* 8 32k periods */ 105 clock-latency = <244144>; /* 8 32k periods */
106 operating-points = < 106 operating-points = <
107 /* kHz uV */ 107 /* kHz uV */
108 1008000 1450000
109 960000 1400000 108 960000 1400000
110 912000 1400000 109 912000 1400000
111 864000 1300000 110 864000 1300000
@@ -116,7 +115,7 @@
116 >; 115 >;
117 #cooling-cells = <2>; 116 #cooling-cells = <2>;
118 cooling-min-level = <0>; 117 cooling-min-level = <0>;
119 cooling-max-level = <7>; 118 cooling-max-level = <6>;
120 }; 119 };
121 120
122 cpu@1 { 121 cpu@1 {
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 2a2f4d56e4c8..25f1beea453e 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -720,6 +720,8 @@ static const char * __init omap_get_family(void)
720 return kasprintf(GFP_KERNEL, "OMAP4"); 720 return kasprintf(GFP_KERNEL, "OMAP4");
721 else if (soc_is_omap54xx()) 721 else if (soc_is_omap54xx())
722 return kasprintf(GFP_KERNEL, "OMAP5"); 722 return kasprintf(GFP_KERNEL, "OMAP5");
723 else if (soc_is_am33xx() || soc_is_am335x())
724 return kasprintf(GFP_KERNEL, "AM33xx");
723 else if (soc_is_am43xx()) 725 else if (soc_is_am43xx())
724 return kasprintf(GFP_KERNEL, "AM43xx"); 726 return kasprintf(GFP_KERNEL, "AM43xx");
725 else if (soc_is_dra7xx()) 727 else if (soc_is_dra7xx())
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
index 0eecd83c624e..89a7c06570d3 100644
--- a/arch/arm/mach-pxa/irq.c
+++ b/arch/arm/mach-pxa/irq.c
@@ -11,6 +11,7 @@
11 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 */ 13 */
14#include <linux/bitops.h>
14#include <linux/init.h> 15#include <linux/init.h>
15#include <linux/module.h> 16#include <linux/module.h>
16#include <linux/interrupt.h> 17#include <linux/interrupt.h>
@@ -40,7 +41,6 @@
40#define ICHP_VAL_IRQ (1 << 31) 41#define ICHP_VAL_IRQ (1 << 31)
41#define ICHP_IRQ(i) (((i) >> 16) & 0x7fff) 42#define ICHP_IRQ(i) (((i) >> 16) & 0x7fff)
42#define IPR_VALID (1 << 31) 43#define IPR_VALID (1 << 31)
43#define IRQ_BIT(n) (((n) - PXA_IRQ(0)) & 0x1f)
44 44
45#define MAX_INTERNAL_IRQS 128 45#define MAX_INTERNAL_IRQS 128
46 46
@@ -51,6 +51,7 @@
51static void __iomem *pxa_irq_base; 51static void __iomem *pxa_irq_base;
52static int pxa_internal_irq_nr; 52static int pxa_internal_irq_nr;
53static bool cpu_has_ipr; 53static bool cpu_has_ipr;
54static struct irq_domain *pxa_irq_domain;
54 55
55static inline void __iomem *irq_base(int i) 56static inline void __iomem *irq_base(int i)
56{ 57{
@@ -66,18 +67,20 @@ static inline void __iomem *irq_base(int i)
66void pxa_mask_irq(struct irq_data *d) 67void pxa_mask_irq(struct irq_data *d)
67{ 68{
68 void __iomem *base = irq_data_get_irq_chip_data(d); 69 void __iomem *base = irq_data_get_irq_chip_data(d);
70 irq_hw_number_t irq = irqd_to_hwirq(d);
69 uint32_t icmr = __raw_readl(base + ICMR); 71 uint32_t icmr = __raw_readl(base + ICMR);
70 72
71 icmr &= ~(1 << IRQ_BIT(d->irq)); 73 icmr &= ~BIT(irq & 0x1f);
72 __raw_writel(icmr, base + ICMR); 74 __raw_writel(icmr, base + ICMR);
73} 75}
74 76
75void pxa_unmask_irq(struct irq_data *d) 77void pxa_unmask_irq(struct irq_data *d)
76{ 78{
77 void __iomem *base = irq_data_get_irq_chip_data(d); 79 void __iomem *base = irq_data_get_irq_chip_data(d);
80 irq_hw_number_t irq = irqd_to_hwirq(d);
78 uint32_t icmr = __raw_readl(base + ICMR); 81 uint32_t icmr = __raw_readl(base + ICMR);
79 82
80 icmr |= 1 << IRQ_BIT(d->irq); 83 icmr |= BIT(irq & 0x1f);
81 __raw_writel(icmr, base + ICMR); 84 __raw_writel(icmr, base + ICMR);
82} 85}
83 86
@@ -118,40 +121,63 @@ asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs)
118 } while (1); 121 } while (1);
119} 122}
120 123
121void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int)) 124static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
125 irq_hw_number_t hw)
122{ 126{
123 int irq, i, n; 127 void __iomem *base = irq_base(hw / 32);
124 128
125 BUG_ON(irq_nr > MAX_INTERNAL_IRQS); 129 /* initialize interrupt priority */
130 if (cpu_has_ipr)
131 __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
132
133 irq_set_chip_and_handler(virq, &pxa_internal_irq_chip,
134 handle_level_irq);
135 irq_set_chip_data(virq, base);
136 set_irq_flags(virq, IRQF_VALID);
137
138 return 0;
139}
140
141static struct irq_domain_ops pxa_irq_ops = {
142 .map = pxa_irq_map,
143 .xlate = irq_domain_xlate_onecell,
144};
145
146static __init void
147pxa_init_irq_common(struct device_node *node, int irq_nr,
148 int (*fn)(struct irq_data *, unsigned int))
149{
150 int n;
126 151
127 pxa_internal_irq_nr = irq_nr; 152 pxa_internal_irq_nr = irq_nr;
128 cpu_has_ipr = !cpu_is_pxa25x(); 153 pxa_irq_domain = irq_domain_add_legacy(node, irq_nr,
129 pxa_irq_base = io_p2v(0x40d00000); 154 PXA_IRQ(0), 0,
155 &pxa_irq_ops, NULL);
156 if (!pxa_irq_domain)
157 panic("Unable to add PXA IRQ domain\n");
158 irq_set_default_host(pxa_irq_domain);
130 159
131 for (n = 0; n < irq_nr; n += 32) { 160 for (n = 0; n < irq_nr; n += 32) {
132 void __iomem *base = irq_base(n >> 5); 161 void __iomem *base = irq_base(n >> 5);
133 162
134 __raw_writel(0, base + ICMR); /* disable all IRQs */ 163 __raw_writel(0, base + ICMR); /* disable all IRQs */
135 __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */ 164 __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */
136 for (i = n; (i < (n + 32)) && (i < irq_nr); i++) {
137 /* initialize interrupt priority */
138 if (cpu_has_ipr)
139 __raw_writel(i | IPR_VALID, pxa_irq_base + IPR(i));
140
141 irq = PXA_IRQ(i);
142 irq_set_chip_and_handler(irq, &pxa_internal_irq_chip,
143 handle_level_irq);
144 irq_set_chip_data(irq, base);
145 set_irq_flags(irq, IRQF_VALID);
146 }
147 } 165 }
148
149 /* only unmasked interrupts kick us out of idle */ 166 /* only unmasked interrupts kick us out of idle */
150 __raw_writel(1, irq_base(0) + ICCR); 167 __raw_writel(1, irq_base(0) + ICCR);
151 168
152 pxa_internal_irq_chip.irq_set_wake = fn; 169 pxa_internal_irq_chip.irq_set_wake = fn;
153} 170}
154 171
172void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int))
173{
174 BUG_ON(irq_nr > MAX_INTERNAL_IRQS);
175
176 pxa_irq_base = io_p2v(0x40d00000);
177 cpu_has_ipr = !cpu_is_pxa25x();
178 pxa_init_irq_common(NULL, irq_nr, fn);
179}
180
155#ifdef CONFIG_PM 181#ifdef CONFIG_PM
156static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32]; 182static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32];
157static unsigned long saved_ipr[MAX_INTERNAL_IRQS]; 183static unsigned long saved_ipr[MAX_INTERNAL_IRQS];
@@ -203,30 +229,6 @@ struct syscore_ops pxa_irq_syscore_ops = {
203}; 229};
204 230
205#ifdef CONFIG_OF 231#ifdef CONFIG_OF
206static struct irq_domain *pxa_irq_domain;
207
208static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
209 irq_hw_number_t hw)
210{
211 void __iomem *base = irq_base(hw / 32);
212
213 /* initialize interrupt priority */
214 if (cpu_has_ipr)
215 __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
216
217 irq_set_chip_and_handler(hw, &pxa_internal_irq_chip,
218 handle_level_irq);
219 irq_set_chip_data(hw, base);
220 set_irq_flags(hw, IRQF_VALID);
221
222 return 0;
223}
224
225static struct irq_domain_ops pxa_irq_ops = {
226 .map = pxa_irq_map,
227 .xlate = irq_domain_xlate_onecell,
228};
229
230static const struct of_device_id intc_ids[] __initconst = { 232static const struct of_device_id intc_ids[] __initconst = {
231 { .compatible = "marvell,pxa-intc", }, 233 { .compatible = "marvell,pxa-intc", },
232 {} 234 {}
@@ -236,7 +238,7 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
236{ 238{
237 struct device_node *node; 239 struct device_node *node;
238 struct resource res; 240 struct resource res;
239 int n, ret; 241 int ret;
240 242
241 node = of_find_matching_node(NULL, intc_ids); 243 node = of_find_matching_node(NULL, intc_ids);
242 if (!node) { 244 if (!node) {
@@ -267,23 +269,6 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
267 return; 269 return;
268 } 270 }
269 271
270 pxa_irq_domain = irq_domain_add_legacy(node, pxa_internal_irq_nr, 0, 0, 272 pxa_init_irq_common(node, pxa_internal_irq_nr, fn);
271 &pxa_irq_ops, NULL);
272 if (!pxa_irq_domain)
273 panic("Unable to add PXA IRQ domain\n");
274
275 irq_set_default_host(pxa_irq_domain);
276
277 for (n = 0; n < pxa_internal_irq_nr; n += 32) {
278 void __iomem *base = irq_base(n >> 5);
279
280 __raw_writel(0, base + ICMR); /* disable all IRQs */
281 __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */
282 }
283
284 /* only unmasked interrupts kick us out of idle */
285 __raw_writel(1, irq_base(0) + ICCR);
286
287 pxa_internal_irq_chip.irq_set_wake = fn;
288} 273}
289#endif /* CONFIG_OF */ 274#endif /* CONFIG_OF */
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index 205f9bf3821e..ac2ae5c71ab4 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -412,7 +412,7 @@ static struct fixed_voltage_config can_regulator_pdata = {
412}; 412};
413 413
414static struct platform_device can_regulator_device = { 414static struct platform_device can_regulator_device = {
415 .name = "reg-fixed-volage", 415 .name = "reg-fixed-voltage",
416 .id = 0, 416 .id = 0,
417 .dev = { 417 .dev = {
418 .platform_data = &can_regulator_pdata, 418 .platform_data = &can_regulator_pdata,
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig
index a77604fbaf25..81502b90dd91 100644
--- a/arch/arm/mach-sunxi/Kconfig
+++ b/arch/arm/mach-sunxi/Kconfig
@@ -1,10 +1,12 @@
1menuconfig ARCH_SUNXI 1menuconfig ARCH_SUNXI
2 bool "Allwinner SoCs" if ARCH_MULTI_V7 2 bool "Allwinner SoCs" if ARCH_MULTI_V7
3 select ARCH_REQUIRE_GPIOLIB 3 select ARCH_REQUIRE_GPIOLIB
4 select ARCH_HAS_RESET_CONTROLLER
4 select CLKSRC_MMIO 5 select CLKSRC_MMIO
5 select GENERIC_IRQ_CHIP 6 select GENERIC_IRQ_CHIP
6 select PINCTRL 7 select PINCTRL
7 select SUN4I_TIMER 8 select SUN4I_TIMER
9 select RESET_CONTROLLER
8 10
9if ARCH_SUNXI 11if ARCH_SUNXI
10 12
@@ -20,10 +22,8 @@ config MACH_SUN5I
20config MACH_SUN6I 22config MACH_SUN6I
21 bool "Allwinner A31 (sun6i) SoCs support" 23 bool "Allwinner A31 (sun6i) SoCs support"
22 default ARCH_SUNXI 24 default ARCH_SUNXI
23 select ARCH_HAS_RESET_CONTROLLER
24 select ARM_GIC 25 select ARM_GIC
25 select MFD_SUN6I_PRCM 26 select MFD_SUN6I_PRCM
26 select RESET_CONTROLLER
27 select SUN5I_HSTIMER 27 select SUN5I_HSTIMER
28 28
29config MACH_SUN7I 29config MACH_SUN7I
@@ -37,16 +37,12 @@ config MACH_SUN7I
37config MACH_SUN8I 37config MACH_SUN8I
38 bool "Allwinner A23 (sun8i) SoCs support" 38 bool "Allwinner A23 (sun8i) SoCs support"
39 default ARCH_SUNXI 39 default ARCH_SUNXI
40 select ARCH_HAS_RESET_CONTROLLER
41 select ARM_GIC 40 select ARM_GIC
42 select MFD_SUN6I_PRCM 41 select MFD_SUN6I_PRCM
43 select RESET_CONTROLLER
44 42
45config MACH_SUN9I 43config MACH_SUN9I
46 bool "Allwinner (sun9i) SoCs support" 44 bool "Allwinner (sun9i) SoCs support"
47 default ARCH_SUNXI 45 default ARCH_SUNXI
48 select ARCH_HAS_RESET_CONTROLLER
49 select ARM_GIC 46 select ARM_GIC
50 select RESET_CONTROLLER
51 47
52endif 48endif
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index db10169a08de..8ca94d379bc3 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -799,6 +799,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
799 struct device *dev = &pdev->dev; 799 struct device *dev = &pdev->dev;
800 const struct of_device_id *match; 800 const struct of_device_id *match;
801 const struct dmtimer_platform_data *pdata; 801 const struct dmtimer_platform_data *pdata;
802 int ret;
802 803
803 match = of_match_device(of_match_ptr(omap_timer_match), dev); 804 match = of_match_device(of_match_ptr(omap_timer_match), dev);
804 pdata = match ? match->data : dev->platform_data; 805 pdata = match ? match->data : dev->platform_data;
@@ -860,7 +861,12 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
860 } 861 }
861 862
862 if (!timer->reserved) { 863 if (!timer->reserved) {
863 pm_runtime_get_sync(dev); 864 ret = pm_runtime_get_sync(dev);
865 if (ret < 0) {
866 dev_err(dev, "%s: pm_runtime_get_sync failed!\n",
867 __func__);
868 goto err_get_sync;
869 }
864 __omap_dm_timer_init_regs(timer); 870 __omap_dm_timer_init_regs(timer);
865 pm_runtime_put(dev); 871 pm_runtime_put(dev);
866 } 872 }
@@ -873,6 +879,11 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
873 dev_dbg(dev, "Device Probed.\n"); 879 dev_dbg(dev, "Device Probed.\n");
874 880
875 return 0; 881 return 0;
882
883err_get_sync:
884 pm_runtime_put_noidle(dev);
885 pm_runtime_disable(dev);
886 return ret;
876} 887}
877 888
878/** 889/**
@@ -899,6 +910,8 @@ static int omap_dm_timer_remove(struct platform_device *pdev)
899 } 910 }
900 spin_unlock_irqrestore(&dm_timer_lock, flags); 911 spin_unlock_irqrestore(&dm_timer_lock, flags);
901 912
913 pm_runtime_disable(&pdev->dev);
914
902 return ret; 915 return ret;
903} 916}
904 917
diff --git a/arch/arm64/boot/dts/arm/juno-clocks.dtsi b/arch/arm64/boot/dts/arm/juno-clocks.dtsi
index ea2b5666a16f..c9b89efe0f56 100644
--- a/arch/arm64/boot/dts/arm/juno-clocks.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-clocks.dtsi
@@ -8,7 +8,7 @@
8 */ 8 */
9 9
10 /* SoC fixed clocks */ 10 /* SoC fixed clocks */
11 soc_uartclk: refclk72738khz { 11 soc_uartclk: refclk7273800hz {
12 compatible = "fixed-clock"; 12 compatible = "fixed-clock";
13 #clock-cells = <0>; 13 #clock-cells = <0>;
14 clock-frequency = <7273800>; 14 clock-frequency = <7273800>;
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index cb9593079f29..d8c25b7b18fb 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -246,14 +246,30 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
246 __ret; \ 246 __ret; \
247}) 247})
248 248
249#define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) 249#define _protect_cmpxchg_local(pcp, o, n) \
250#define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) 250({ \
251#define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) 251 typeof(*raw_cpu_ptr(&(pcp))) __ret; \
252#define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) 252 preempt_disable(); \
253 253 __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
254#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ 254 preempt_enable(); \
255 cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \ 255 __ret; \
256 o1, o2, n1, n2) 256})
257
258#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
259#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
260#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
261#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
262
263#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
264({ \
265 int __ret; \
266 preempt_disable(); \
267 __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
268 raw_cpu_ptr(&(ptr2)), \
269 o1, o2, n1, n2); \
270 preempt_enable(); \
271 __ret; \
272})
257 273
258#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) 274#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
259#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) 275#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index a9eee33dfa62..101a42bde728 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -151,6 +151,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
151{ 151{
152 unsigned int cpu = smp_processor_id(); 152 unsigned int cpu = smp_processor_id();
153 153
154 /*
155 * init_mm.pgd does not contain any user mappings and it is always
156 * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
157 */
158 if (next == &init_mm) {
159 cpu_set_reserved_ttbr0();
160 return;
161 }
162
154 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) 163 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
155 check_and_switch_context(next, tsk); 164 check_and_switch_context(next, tsk);
156} 165}
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 09da25bc596f..4fde8c1df97f 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -204,25 +204,47 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
204 return ret; 204 return ret;
205} 205}
206 206
207#define _percpu_read(pcp) \
208({ \
209 typeof(pcp) __retval; \
210 preempt_disable(); \
211 __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \
212 sizeof(pcp)); \
213 preempt_enable(); \
214 __retval; \
215})
216
217#define _percpu_write(pcp, val) \
218do { \
219 preempt_disable(); \
220 __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \
221 sizeof(pcp)); \
222 preempt_enable(); \
223} while(0) \
224
225#define _pcp_protect(operation, pcp, val) \
226({ \
227 typeof(pcp) __retval; \
228 preempt_disable(); \
229 __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \
230 (val), sizeof(pcp)); \
231 preempt_enable(); \
232 __retval; \
233})
234
207#define _percpu_add(pcp, val) \ 235#define _percpu_add(pcp, val) \
208 __percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) 236 _pcp_protect(__percpu_add, pcp, val)
209 237
210#define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val)) 238#define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
211 239
212#define _percpu_and(pcp, val) \ 240#define _percpu_and(pcp, val) \
213 __percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) 241 _pcp_protect(__percpu_and, pcp, val)
214 242
215#define _percpu_or(pcp, val) \ 243#define _percpu_or(pcp, val) \
216 __percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) 244 _pcp_protect(__percpu_or, pcp, val)
217
218#define _percpu_read(pcp) (typeof(pcp)) \
219 (__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp)))
220
221#define _percpu_write(pcp, val) \
222 __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))
223 245
224#define _percpu_xchg(pcp, val) (typeof(pcp)) \ 246#define _percpu_xchg(pcp, val) (typeof(pcp)) \
225 (__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))) 247 _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))
226 248
227#define this_cpu_add_1(pcp, val) _percpu_add(pcp, val) 249#define this_cpu_add_1(pcp, val) _percpu_add(pcp, val)
228#define this_cpu_add_2(pcp, val) _percpu_add(pcp, val) 250#define this_cpu_add_2(pcp, val) _percpu_add(pcp, val)
diff --git a/arch/metag/include/asm/io.h b/arch/metag/include/asm/io.h
index 9359e5048442..d5779b0ec573 100644
--- a/arch/metag/include/asm/io.h
+++ b/arch/metag/include/asm/io.h
@@ -2,6 +2,7 @@
2#define _ASM_METAG_IO_H 2#define _ASM_METAG_IO_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <asm/pgtable-bits.h>
5 6
6#define IO_SPACE_LIMIT 0 7#define IO_SPACE_LIMIT 0
7 8
diff --git a/arch/metag/include/asm/pgtable-bits.h b/arch/metag/include/asm/pgtable-bits.h
new file mode 100644
index 000000000000..25ba6729f496
--- /dev/null
+++ b/arch/metag/include/asm/pgtable-bits.h
@@ -0,0 +1,104 @@
1/*
2 * Meta page table definitions.
3 */
4
5#ifndef _METAG_PGTABLE_BITS_H
6#define _METAG_PGTABLE_BITS_H
7
8#include <asm/metag_mem.h>
9
10/*
11 * Definitions for MMU descriptors
12 *
13 * These are the hardware bits in the MMCU pte entries.
14 * Derived from the Meta toolkit headers.
15 */
16#define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT
17#define _PAGE_WRITE MMCU_ENTRY_WR_BIT
18#define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT
19/* Write combine bit - this can cause writes to occur out of order */
20#define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT
21/* Sys coherent bit - this bit is never used by Linux */
22#define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT
23#define _PAGE_ALWAYS_ZERO_1 0x020
24#define _PAGE_CACHE_CTRL0 0x040
25#define _PAGE_CACHE_CTRL1 0x080
26#define _PAGE_ALWAYS_ZERO_2 0x100
27#define _PAGE_ALWAYS_ZERO_3 0x200
28#define _PAGE_ALWAYS_ZERO_4 0x400
29#define _PAGE_ALWAYS_ZERO_5 0x800
30
31/* These are software bits that we stuff into the gaps in the hardware
32 * pte entries that are not used. Note, these DO get stored in the actual
33 * hardware, but the hardware just does not use them.
34 */
35#define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1
36#define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2
37
38/* Pages owned, and protected by, the kernel. */
39#define _PAGE_KERNEL _PAGE_PRIV
40
41/* No cacheing of this page */
42#define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
43/* burst cacheing - good for data streaming */
44#define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
45/* One cache way per thread */
46#define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
47/* Full on cacheing */
48#define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
49
50#define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
51
52/* which bits are used for cache control ... */
53#define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
54 _PAGE_WR_COMBINE)
55
56/* This is a mask of the bits that pte_modify is allowed to change. */
57#define _PAGE_CHG_MASK (PAGE_MASK)
58
59#define _PAGE_SZ_SHIFT 1
60#define _PAGE_SZ_4K (0x0)
61#define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT)
62#define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT)
63#define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT)
64#define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT)
65#define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT)
66#define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT)
67#define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT)
68#define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT)
69#define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT)
70#define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT)
71#define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT)
72
73#if defined(CONFIG_PAGE_SIZE_4K)
74#define _PAGE_SZ (_PAGE_SZ_4K)
75#elif defined(CONFIG_PAGE_SIZE_8K)
76#define _PAGE_SZ (_PAGE_SZ_8K)
77#elif defined(CONFIG_PAGE_SIZE_16K)
78#define _PAGE_SZ (_PAGE_SZ_16K)
79#endif
80#define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT)
81
82#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
83# define _PAGE_SZHUGE (_PAGE_SZ_8K)
84#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
85# define _PAGE_SZHUGE (_PAGE_SZ_16K)
86#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
87# define _PAGE_SZHUGE (_PAGE_SZ_32K)
88#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
89# define _PAGE_SZHUGE (_PAGE_SZ_64K)
90#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
91# define _PAGE_SZHUGE (_PAGE_SZ_128K)
92#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
93# define _PAGE_SZHUGE (_PAGE_SZ_256K)
94#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
95# define _PAGE_SZHUGE (_PAGE_SZ_512K)
96#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
97# define _PAGE_SZHUGE (_PAGE_SZ_1M)
98#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
99# define _PAGE_SZHUGE (_PAGE_SZ_2M)
100#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
101# define _PAGE_SZHUGE (_PAGE_SZ_4M)
102#endif
103
104#endif /* _METAG_PGTABLE_BITS_H */
diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h
index d0604c0a8702..ffa3a3a2ecad 100644
--- a/arch/metag/include/asm/pgtable.h
+++ b/arch/metag/include/asm/pgtable.h
@@ -5,6 +5,7 @@
5#ifndef _METAG_PGTABLE_H 5#ifndef _METAG_PGTABLE_H
6#define _METAG_PGTABLE_H 6#define _METAG_PGTABLE_H
7 7
8#include <asm/pgtable-bits.h>
8#include <asm-generic/pgtable-nopmd.h> 9#include <asm-generic/pgtable-nopmd.h>
9 10
10/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */ 11/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
@@ -21,100 +22,6 @@
21#endif 22#endif
22 23
23/* 24/*
24 * Definitions for MMU descriptors
25 *
26 * These are the hardware bits in the MMCU pte entries.
27 * Derived from the Meta toolkit headers.
28 */
29#define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT
30#define _PAGE_WRITE MMCU_ENTRY_WR_BIT
31#define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT
32/* Write combine bit - this can cause writes to occur out of order */
33#define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT
34/* Sys coherent bit - this bit is never used by Linux */
35#define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT
36#define _PAGE_ALWAYS_ZERO_1 0x020
37#define _PAGE_CACHE_CTRL0 0x040
38#define _PAGE_CACHE_CTRL1 0x080
39#define _PAGE_ALWAYS_ZERO_2 0x100
40#define _PAGE_ALWAYS_ZERO_3 0x200
41#define _PAGE_ALWAYS_ZERO_4 0x400
42#define _PAGE_ALWAYS_ZERO_5 0x800
43
44/* These are software bits that we stuff into the gaps in the hardware
45 * pte entries that are not used. Note, these DO get stored in the actual
46 * hardware, but the hardware just does not use them.
47 */
48#define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1
49#define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2
50
51/* Pages owned, and protected by, the kernel. */
52#define _PAGE_KERNEL _PAGE_PRIV
53
54/* No cacheing of this page */
55#define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
56/* burst cacheing - good for data streaming */
57#define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
58/* One cache way per thread */
59#define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
60/* Full on cacheing */
61#define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
62
63#define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
64
65/* which bits are used for cache control ... */
66#define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
67 _PAGE_WR_COMBINE)
68
69/* This is a mask of the bits that pte_modify is allowed to change. */
70#define _PAGE_CHG_MASK (PAGE_MASK)
71
72#define _PAGE_SZ_SHIFT 1
73#define _PAGE_SZ_4K (0x0)
74#define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT)
75#define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT)
76#define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT)
77#define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT)
78#define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT)
79#define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT)
80#define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT)
81#define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT)
82#define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT)
83#define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT)
84#define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT)
85
86#if defined(CONFIG_PAGE_SIZE_4K)
87#define _PAGE_SZ (_PAGE_SZ_4K)
88#elif defined(CONFIG_PAGE_SIZE_8K)
89#define _PAGE_SZ (_PAGE_SZ_8K)
90#elif defined(CONFIG_PAGE_SIZE_16K)
91#define _PAGE_SZ (_PAGE_SZ_16K)
92#endif
93#define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT)
94
95#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
96# define _PAGE_SZHUGE (_PAGE_SZ_8K)
97#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
98# define _PAGE_SZHUGE (_PAGE_SZ_16K)
99#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
100# define _PAGE_SZHUGE (_PAGE_SZ_32K)
101#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
102# define _PAGE_SZHUGE (_PAGE_SZ_64K)
103#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
104# define _PAGE_SZHUGE (_PAGE_SZ_128K)
105#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
106# define _PAGE_SZHUGE (_PAGE_SZ_256K)
107#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
108# define _PAGE_SZHUGE (_PAGE_SZ_512K)
109#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
110# define _PAGE_SZHUGE (_PAGE_SZ_1M)
111#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
112# define _PAGE_SZHUGE (_PAGE_SZ_2M)
113#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
114# define _PAGE_SZHUGE (_PAGE_SZ_4M)
115#endif
116
117/*
118 * The Linux memory management assumes a three-level page table setup. On 25 * The Linux memory management assumes a three-level page table setup. On
119 * Meta, we use that, but "fold" the mid level into the top-level page 26 * Meta, we use that, but "fold" the mid level into the top-level page
120 * table. 27 * table.
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index f213f5b4c423..d17437238a2c 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -26,7 +26,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
26 26
27 if (likely(pgd != NULL)) { 27 if (likely(pgd != NULL)) {
28 memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER); 28 memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
29#ifdef CONFIG_64BIT 29#if PT_NLEVELS == 3
30 actual_pgd += PTRS_PER_PGD; 30 actual_pgd += PTRS_PER_PGD;
31 /* Populate first pmd with allocated memory. We mark it 31 /* Populate first pmd with allocated memory. We mark it
32 * with PxD_FLAG_ATTACHED as a signal to the system that this 32 * with PxD_FLAG_ATTACHED as a signal to the system that this
@@ -45,7 +45,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
45 45
46static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 46static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
47{ 47{
48#ifdef CONFIG_64BIT 48#if PT_NLEVELS == 3
49 pgd -= PTRS_PER_PGD; 49 pgd -= PTRS_PER_PGD;
50#endif 50#endif
51 free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); 51 free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
@@ -72,12 +72,15 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
72 72
73static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 73static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
74{ 74{
75#ifdef CONFIG_64BIT
76 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) 75 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
77 /* This is the permanent pmd attached to the pgd; 76 /*
78 * cannot free it */ 77 * This is the permanent pmd attached to the pgd;
78 * cannot free it.
79 * Increment the counter to compensate for the decrement
80 * done by generic mm code.
81 */
82 mm_inc_nr_pmds(mm);
79 return; 83 return;
80#endif
81 free_pages((unsigned long)pmd, PMD_ORDER); 84 free_pages((unsigned long)pmd, PMD_ORDER);
82} 85}
83 86
@@ -99,7 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
99static inline void 102static inline void
100pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) 103pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
101{ 104{
102#ifdef CONFIG_64BIT 105#if PT_NLEVELS == 3
103 /* preserve the gateway marker if this is the beginning of 106 /* preserve the gateway marker if this is the beginning of
104 * the permanent pmd */ 107 * the permanent pmd */
105 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) 108 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 5a8997d63899..8eefb12d1d33 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -55,8 +55,8 @@
55#define ENTRY_COMP(_name_) .word sys_##_name_ 55#define ENTRY_COMP(_name_) .word sys_##_name_
56#endif 56#endif
57 57
58 ENTRY_SAME(restart_syscall) /* 0 */ 5890: ENTRY_SAME(restart_syscall) /* 0 */
59 ENTRY_SAME(exit) 5991: ENTRY_SAME(exit)
60 ENTRY_SAME(fork_wrapper) 60 ENTRY_SAME(fork_wrapper)
61 ENTRY_SAME(read) 61 ENTRY_SAME(read)
62 ENTRY_SAME(write) 62 ENTRY_SAME(write)
@@ -439,7 +439,10 @@
439 ENTRY_SAME(bpf) 439 ENTRY_SAME(bpf)
440 ENTRY_COMP(execveat) 440 ENTRY_COMP(execveat)
441 441
442 /* Nothing yet */ 442
443.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
444.error "size of syscall table does not fit value of __NR_Linux_syscalls"
445.endif
443 446
444#undef ENTRY_SAME 447#undef ENTRY_SAME
445#undef ENTRY_DIFF 448#undef ENTRY_DIFF
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index 2bf8e9307be9..4c8ad592ae33 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -55,7 +55,7 @@ static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
55 55
56static inline int cpu_nr_cores(void) 56static inline int cpu_nr_cores(void)
57{ 57{
58 return NR_CPUS >> threads_shift; 58 return nr_cpu_ids >> threads_shift;
59} 59}
60 60
61static inline cpumask_t cpu_online_cores_map(void) 61static inline cpumask_t cpu_online_cores_map(void)
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 2eadde0b98fb..5c93f691b495 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -153,6 +153,7 @@
153#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff 153#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff
154#define PPC_INST_MFTMR 0x7c0002dc 154#define PPC_INST_MFTMR 0x7c0002dc
155#define PPC_INST_MSGSND 0x7c00019c 155#define PPC_INST_MSGSND 0x7c00019c
156#define PPC_INST_MSGCLR 0x7c0001dc
156#define PPC_INST_MSGSNDP 0x7c00011c 157#define PPC_INST_MSGSNDP 0x7c00011c
157#define PPC_INST_MTTMR 0x7c0003dc 158#define PPC_INST_MTTMR 0x7c0003dc
158#define PPC_INST_NOP 0x60000000 159#define PPC_INST_NOP 0x60000000
@@ -311,6 +312,8 @@
311 ___PPC_RB(b) | __PPC_EH(eh)) 312 ___PPC_RB(b) | __PPC_EH(eh))
312#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \ 313#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \
313 ___PPC_RB(b)) 314 ___PPC_RB(b))
315#define PPC_MSGCLR(b) stringify_in_c(.long PPC_INST_MSGCLR | \
316 ___PPC_RB(b))
314#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \ 317#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \
315 ___PPC_RB(b)) 318 ___PPC_RB(b))
316#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \ 319#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 1c874fb533bb..af56b5c6c81a 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -608,13 +608,16 @@
608#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */ 608#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
609#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */ 609#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
610#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ 610#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
611#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */
611#define SRR1_WAKESYSERR 0x00300000 /* System error */ 612#define SRR1_WAKESYSERR 0x00300000 /* System error */
612#define SRR1_WAKEEE 0x00200000 /* External interrupt */ 613#define SRR1_WAKEEE 0x00200000 /* External interrupt */
613#define SRR1_WAKEMT 0x00280000 /* mtctrl */ 614#define SRR1_WAKEMT 0x00280000 /* mtctrl */
614#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */ 615#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */
615#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ 616#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
617#define SRR1_WAKEDBELL 0x00140000 /* Privileged doorbell on P8 */
616#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */ 618#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */
617#define SRR1_WAKERESET 0x00100000 /* System reset */ 619#define SRR1_WAKERESET 0x00100000 /* System reset */
620#define SRR1_WAKEHDBELL 0x000c0000 /* Hypervisor doorbell on P8 */
618#define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */ 621#define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */
619#define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained, 622#define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained,
620 * may not be recoverable */ 623 * may not be recoverable */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index f337666768a7..f83046878336 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -437,6 +437,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
437 .machine_check_early = __machine_check_early_realmode_p8, 437 .machine_check_early = __machine_check_early_realmode_p8,
438 .platform = "power8", 438 .platform = "power8",
439 }, 439 },
440 { /* Power8NVL */
441 .pvr_mask = 0xffff0000,
442 .pvr_value = 0x004c0000,
443 .cpu_name = "POWER8NVL (raw)",
444 .cpu_features = CPU_FTRS_POWER8,
445 .cpu_user_features = COMMON_USER_POWER8,
446 .cpu_user_features2 = COMMON_USER2_POWER8,
447 .mmu_features = MMU_FTRS_POWER8,
448 .icache_bsize = 128,
449 .dcache_bsize = 128,
450 .num_pmcs = 6,
451 .pmc_type = PPC_PMC_IBM,
452 .oprofile_cpu_type = "ppc64/power8",
453 .oprofile_type = PPC_OPROFILE_INVALID,
454 .cpu_setup = __setup_cpu_power8,
455 .cpu_restore = __restore_cpu_power8,
456 .flush_tlb = __flush_tlb_power8,
457 .machine_check_early = __machine_check_early_realmode_p8,
458 .platform = "power8",
459 },
440 { /* Power8 DD1: Does not support doorbell IPIs */ 460 { /* Power8 DD1: Does not support doorbell IPIs */
441 .pvr_mask = 0xffffff00, 461 .pvr_mask = 0xffffff00,
442 .pvr_value = 0x004d0100, 462 .pvr_value = 0x004d0100,
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index f4217819cc31..2128f3a96c32 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -17,6 +17,7 @@
17 17
18#include <asm/dbell.h> 18#include <asm/dbell.h>
19#include <asm/irq_regs.h> 19#include <asm/irq_regs.h>
20#include <asm/kvm_ppc.h>
20 21
21#ifdef CONFIG_SMP 22#ifdef CONFIG_SMP
22void doorbell_setup_this_cpu(void) 23void doorbell_setup_this_cpu(void)
@@ -41,6 +42,7 @@ void doorbell_exception(struct pt_regs *regs)
41 42
42 may_hard_irq_enable(); 43 may_hard_irq_enable();
43 44
45 kvmppc_set_host_ipi(smp_processor_id(), 0);
44 __this_cpu_inc(irq_stat.doorbell_irqs); 46 __this_cpu_inc(irq_stat.doorbell_irqs);
45 47
46 smp_ipi_demux(); 48 smp_ipi_demux();
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index c2df8150bd7a..9519e6bdc6d7 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1408,7 +1408,7 @@ machine_check_handle_early:
1408 bne 9f /* continue in V mode if we are. */ 1408 bne 9f /* continue in V mode if we are. */
1409 1409
14105: 14105:
1411#ifdef CONFIG_KVM_BOOK3S_64_HV 1411#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1412 /* 1412 /*
1413 * We are coming from kernel context. Check if we are coming from 1413 * We are coming from kernel context. Check if we are coming from
1414 * guest. if yes, then we can continue. We will fall through 1414 * guest. if yes, then we can continue. We will fall through
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index de4018a1bc4b..de747563d29d 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -636,7 +636,7 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
636 spin_lock(&vcpu->arch.vpa_update_lock); 636 spin_lock(&vcpu->arch.vpa_update_lock);
637 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; 637 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
638 if (lppaca) 638 if (lppaca)
639 yield_count = lppaca->yield_count; 639 yield_count = be32_to_cpu(lppaca->yield_count);
640 spin_unlock(&vcpu->arch.vpa_update_lock); 640 spin_unlock(&vcpu->arch.vpa_update_lock);
641 return yield_count; 641 return yield_count;
642} 642}
@@ -942,20 +942,20 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
942static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, 942static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
943 bool preserve_top32) 943 bool preserve_top32)
944{ 944{
945 struct kvm *kvm = vcpu->kvm;
945 struct kvmppc_vcore *vc = vcpu->arch.vcore; 946 struct kvmppc_vcore *vc = vcpu->arch.vcore;
946 u64 mask; 947 u64 mask;
947 948
949 mutex_lock(&kvm->lock);
948 spin_lock(&vc->lock); 950 spin_lock(&vc->lock);
949 /* 951 /*
950 * If ILE (interrupt little-endian) has changed, update the 952 * If ILE (interrupt little-endian) has changed, update the
951 * MSR_LE bit in the intr_msr for each vcpu in this vcore. 953 * MSR_LE bit in the intr_msr for each vcpu in this vcore.
952 */ 954 */
953 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { 955 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
954 struct kvm *kvm = vcpu->kvm;
955 struct kvm_vcpu *vcpu; 956 struct kvm_vcpu *vcpu;
956 int i; 957 int i;
957 958
958 mutex_lock(&kvm->lock);
959 kvm_for_each_vcpu(i, vcpu, kvm) { 959 kvm_for_each_vcpu(i, vcpu, kvm) {
960 if (vcpu->arch.vcore != vc) 960 if (vcpu->arch.vcore != vc)
961 continue; 961 continue;
@@ -964,7 +964,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
964 else 964 else
965 vcpu->arch.intr_msr &= ~MSR_LE; 965 vcpu->arch.intr_msr &= ~MSR_LE;
966 } 966 }
967 mutex_unlock(&kvm->lock);
968 } 967 }
969 968
970 /* 969 /*
@@ -981,6 +980,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
981 mask &= 0xFFFFFFFF; 980 mask &= 0xFFFFFFFF;
982 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); 981 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
983 spin_unlock(&vc->lock); 982 spin_unlock(&vc->lock);
983 mutex_unlock(&kvm->lock);
984} 984}
985 985
986static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, 986static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index bb94e6f20c81..6cbf1630cb70 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1005,6 +1005,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1005 /* Save HEIR (HV emulation assist reg) in emul_inst 1005 /* Save HEIR (HV emulation assist reg) in emul_inst
1006 if this is an HEI (HV emulation interrupt, e40) */ 1006 if this is an HEI (HV emulation interrupt, e40) */
1007 li r3,KVM_INST_FETCH_FAILED 1007 li r3,KVM_INST_FETCH_FAILED
1008 stw r3,VCPU_LAST_INST(r9)
1008 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 1009 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1009 bne 11f 1010 bne 11f
1010 mfspr r3,SPRN_HEIR 1011 mfspr r3,SPRN_HEIR
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index fc34025ef822..38a45088f633 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -33,6 +33,8 @@
33#include <asm/runlatch.h> 33#include <asm/runlatch.h>
34#include <asm/code-patching.h> 34#include <asm/code-patching.h>
35#include <asm/dbell.h> 35#include <asm/dbell.h>
36#include <asm/kvm_ppc.h>
37#include <asm/ppc-opcode.h>
36 38
37#include "powernv.h" 39#include "powernv.h"
38 40
@@ -149,7 +151,7 @@ static int pnv_smp_cpu_disable(void)
149static void pnv_smp_cpu_kill_self(void) 151static void pnv_smp_cpu_kill_self(void)
150{ 152{
151 unsigned int cpu; 153 unsigned int cpu;
152 unsigned long srr1; 154 unsigned long srr1, wmask;
153 u32 idle_states; 155 u32 idle_states;
154 156
155 /* Standard hot unplug procedure */ 157 /* Standard hot unplug procedure */
@@ -161,6 +163,10 @@ static void pnv_smp_cpu_kill_self(void)
161 generic_set_cpu_dead(cpu); 163 generic_set_cpu_dead(cpu);
162 smp_wmb(); 164 smp_wmb();
163 165
166 wmask = SRR1_WAKEMASK;
167 if (cpu_has_feature(CPU_FTR_ARCH_207S))
168 wmask = SRR1_WAKEMASK_P8;
169
164 idle_states = pnv_get_supported_cpuidle_states(); 170 idle_states = pnv_get_supported_cpuidle_states();
165 /* We don't want to take decrementer interrupts while we are offline, 171 /* We don't want to take decrementer interrupts while we are offline,
166 * so clear LPCR:PECE1. We keep PECE2 enabled. 172 * so clear LPCR:PECE1. We keep PECE2 enabled.
@@ -191,10 +197,14 @@ static void pnv_smp_cpu_kill_self(void)
191 * having finished executing in a KVM guest, then srr1 197 * having finished executing in a KVM guest, then srr1
192 * contains 0. 198 * contains 0.
193 */ 199 */
194 if ((srr1 & SRR1_WAKEMASK) == SRR1_WAKEEE) { 200 if ((srr1 & wmask) == SRR1_WAKEEE) {
195 icp_native_flush_interrupt(); 201 icp_native_flush_interrupt();
196 local_paca->irq_happened &= PACA_IRQ_HARD_DIS; 202 local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
197 smp_mb(); 203 smp_mb();
204 } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
205 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
206 asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
207 kvmppc_set_host_ipi(cpu, 0);
198 } 208 }
199 209
200 if (cpu_core_split_required()) 210 if (cpu_core_split_required())
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 90cf3dcbd9f2..8f35d525cede 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -25,10 +25,10 @@
25static struct kobject *mobility_kobj; 25static struct kobject *mobility_kobj;
26 26
27struct update_props_workarea { 27struct update_props_workarea {
28 u32 phandle; 28 __be32 phandle;
29 u32 state; 29 __be32 state;
30 u64 reserved; 30 __be64 reserved;
31 u32 nprops; 31 __be32 nprops;
32} __packed; 32} __packed;
33 33
34#define NODE_ACTION_MASK 0xff000000 34#define NODE_ACTION_MASK 0xff000000
@@ -54,11 +54,11 @@ static int mobility_rtas_call(int token, char *buf, s32 scope)
54 return rc; 54 return rc;
55} 55}
56 56
57static int delete_dt_node(u32 phandle) 57static int delete_dt_node(__be32 phandle)
58{ 58{
59 struct device_node *dn; 59 struct device_node *dn;
60 60
61 dn = of_find_node_by_phandle(phandle); 61 dn = of_find_node_by_phandle(be32_to_cpu(phandle));
62 if (!dn) 62 if (!dn)
63 return -ENOENT; 63 return -ENOENT;
64 64
@@ -127,7 +127,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
127 return 0; 127 return 0;
128} 128}
129 129
130static int update_dt_node(u32 phandle, s32 scope) 130static int update_dt_node(__be32 phandle, s32 scope)
131{ 131{
132 struct update_props_workarea *upwa; 132 struct update_props_workarea *upwa;
133 struct device_node *dn; 133 struct device_node *dn;
@@ -136,6 +136,7 @@ static int update_dt_node(u32 phandle, s32 scope)
136 char *prop_data; 136 char *prop_data;
137 char *rtas_buf; 137 char *rtas_buf;
138 int update_properties_token; 138 int update_properties_token;
139 u32 nprops;
139 u32 vd; 140 u32 vd;
140 141
141 update_properties_token = rtas_token("ibm,update-properties"); 142 update_properties_token = rtas_token("ibm,update-properties");
@@ -146,7 +147,7 @@ static int update_dt_node(u32 phandle, s32 scope)
146 if (!rtas_buf) 147 if (!rtas_buf)
147 return -ENOMEM; 148 return -ENOMEM;
148 149
149 dn = of_find_node_by_phandle(phandle); 150 dn = of_find_node_by_phandle(be32_to_cpu(phandle));
150 if (!dn) { 151 if (!dn) {
151 kfree(rtas_buf); 152 kfree(rtas_buf);
152 return -ENOENT; 153 return -ENOENT;
@@ -162,6 +163,7 @@ static int update_dt_node(u32 phandle, s32 scope)
162 break; 163 break;
163 164
164 prop_data = rtas_buf + sizeof(*upwa); 165 prop_data = rtas_buf + sizeof(*upwa);
166 nprops = be32_to_cpu(upwa->nprops);
165 167
166 /* On the first call to ibm,update-properties for a node the 168 /* On the first call to ibm,update-properties for a node the
167 * the first property value descriptor contains an empty 169 * the first property value descriptor contains an empty
@@ -170,17 +172,17 @@ static int update_dt_node(u32 phandle, s32 scope)
170 */ 172 */
171 if (*prop_data == 0) { 173 if (*prop_data == 0) {
172 prop_data++; 174 prop_data++;
173 vd = *(u32 *)prop_data; 175 vd = be32_to_cpu(*(__be32 *)prop_data);
174 prop_data += vd + sizeof(vd); 176 prop_data += vd + sizeof(vd);
175 upwa->nprops--; 177 nprops--;
176 } 178 }
177 179
178 for (i = 0; i < upwa->nprops; i++) { 180 for (i = 0; i < nprops; i++) {
179 char *prop_name; 181 char *prop_name;
180 182
181 prop_name = prop_data; 183 prop_name = prop_data;
182 prop_data += strlen(prop_name) + 1; 184 prop_data += strlen(prop_name) + 1;
183 vd = *(u32 *)prop_data; 185 vd = be32_to_cpu(*(__be32 *)prop_data);
184 prop_data += sizeof(vd); 186 prop_data += sizeof(vd);
185 187
186 switch (vd) { 188 switch (vd) {
@@ -212,13 +214,13 @@ static int update_dt_node(u32 phandle, s32 scope)
212 return 0; 214 return 0;
213} 215}
214 216
215static int add_dt_node(u32 parent_phandle, u32 drc_index) 217static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
216{ 218{
217 struct device_node *dn; 219 struct device_node *dn;
218 struct device_node *parent_dn; 220 struct device_node *parent_dn;
219 int rc; 221 int rc;
220 222
221 parent_dn = of_find_node_by_phandle(parent_phandle); 223 parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle));
222 if (!parent_dn) 224 if (!parent_dn)
223 return -ENOENT; 225 return -ENOENT;
224 226
@@ -237,7 +239,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
237int pseries_devicetree_update(s32 scope) 239int pseries_devicetree_update(s32 scope)
238{ 240{
239 char *rtas_buf; 241 char *rtas_buf;
240 u32 *data; 242 __be32 *data;
241 int update_nodes_token; 243 int update_nodes_token;
242 int rc; 244 int rc;
243 245
@@ -254,17 +256,17 @@ int pseries_devicetree_update(s32 scope)
254 if (rc && rc != 1) 256 if (rc && rc != 1)
255 break; 257 break;
256 258
257 data = (u32 *)rtas_buf + 4; 259 data = (__be32 *)rtas_buf + 4;
258 while (*data & NODE_ACTION_MASK) { 260 while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
259 int i; 261 int i;
260 u32 action = *data & NODE_ACTION_MASK; 262 u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK;
261 int node_count = *data & NODE_COUNT_MASK; 263 u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
262 264
263 data++; 265 data++;
264 266
265 for (i = 0; i < node_count; i++) { 267 for (i = 0; i < node_count; i++) {
266 u32 phandle = *data++; 268 __be32 phandle = *data++;
267 u32 drc_index; 269 __be32 drc_index;
268 270
269 switch (action) { 271 switch (action) {
270 case DELETE_DT_NODE: 272 case DELETE_DT_NODE:
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index c9df40b5c0ac..c9c875d9ed31 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -211,7 +211,7 @@ do { \
211 211
212extern unsigned long mmap_rnd_mask; 212extern unsigned long mmap_rnd_mask;
213 213
214#define STACK_RND_MASK (mmap_rnd_mask) 214#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask)
215 215
216#define ARCH_DLINFO \ 216#define ARCH_DLINFO \
217do { \ 217do { \
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 82c19899574f..6c79f1b44fe7 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -57,6 +57,44 @@
57 57
58unsigned long ftrace_plt; 58unsigned long ftrace_plt;
59 59
60static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
61{
62#ifdef CC_USING_HOTPATCH
63 /* brcl 0,0 */
64 insn->opc = 0xc004;
65 insn->disp = 0;
66#else
67 /* stg r14,8(r15) */
68 insn->opc = 0xe3e0;
69 insn->disp = 0xf0080024;
70#endif
71}
72
73static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
74{
75#ifdef CONFIG_KPROBES
76 if (insn->opc == BREAKPOINT_INSTRUCTION)
77 return 1;
78#endif
79 return 0;
80}
81
82static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
83{
84#ifdef CONFIG_KPROBES
85 insn->opc = BREAKPOINT_INSTRUCTION;
86 insn->disp = KPROBE_ON_FTRACE_NOP;
87#endif
88}
89
90static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn)
91{
92#ifdef CONFIG_KPROBES
93 insn->opc = BREAKPOINT_INSTRUCTION;
94 insn->disp = KPROBE_ON_FTRACE_CALL;
95#endif
96}
97
60int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 98int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
61 unsigned long addr) 99 unsigned long addr)
62{ 100{
@@ -72,16 +110,9 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
72 return -EFAULT; 110 return -EFAULT;
73 if (addr == MCOUNT_ADDR) { 111 if (addr == MCOUNT_ADDR) {
74 /* Initial code replacement */ 112 /* Initial code replacement */
75#ifdef CC_USING_HOTPATCH 113 ftrace_generate_orig_insn(&orig);
76 /* We expect to see brcl 0,0 */
77 ftrace_generate_nop_insn(&orig);
78#else
79 /* We expect to see stg r14,8(r15) */
80 orig.opc = 0xe3e0;
81 orig.disp = 0xf0080024;
82#endif
83 ftrace_generate_nop_insn(&new); 114 ftrace_generate_nop_insn(&new);
84 } else if (old.opc == BREAKPOINT_INSTRUCTION) { 115 } else if (is_kprobe_on_ftrace(&old)) {
85 /* 116 /*
86 * If we find a breakpoint instruction, a kprobe has been 117 * If we find a breakpoint instruction, a kprobe has been
87 * placed at the beginning of the function. We write the 118 * placed at the beginning of the function. We write the
@@ -89,9 +120,8 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
89 * bytes of the original instruction so that the kprobes 120 * bytes of the original instruction so that the kprobes
90 * handler can execute a nop, if it reaches this breakpoint. 121 * handler can execute a nop, if it reaches this breakpoint.
91 */ 122 */
92 new.opc = orig.opc = BREAKPOINT_INSTRUCTION; 123 ftrace_generate_kprobe_call_insn(&orig);
93 orig.disp = KPROBE_ON_FTRACE_CALL; 124 ftrace_generate_kprobe_nop_insn(&new);
94 new.disp = KPROBE_ON_FTRACE_NOP;
95 } else { 125 } else {
96 /* Replace ftrace call with a nop. */ 126 /* Replace ftrace call with a nop. */
97 ftrace_generate_call_insn(&orig, rec->ip); 127 ftrace_generate_call_insn(&orig, rec->ip);
@@ -111,7 +141,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
111 141
112 if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old))) 142 if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
113 return -EFAULT; 143 return -EFAULT;
114 if (old.opc == BREAKPOINT_INSTRUCTION) { 144 if (is_kprobe_on_ftrace(&old)) {
115 /* 145 /*
116 * If we find a breakpoint instruction, a kprobe has been 146 * If we find a breakpoint instruction, a kprobe has been
117 * placed at the beginning of the function. We write the 147 * placed at the beginning of the function. We write the
@@ -119,9 +149,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
119 * bytes of the original instruction so that the kprobes 149 * bytes of the original instruction so that the kprobes
120 * handler can execute a brasl if it reaches this breakpoint. 150 * handler can execute a brasl if it reaches this breakpoint.
121 */ 151 */
122 new.opc = orig.opc = BREAKPOINT_INSTRUCTION; 152 ftrace_generate_kprobe_nop_insn(&orig);
123 orig.disp = KPROBE_ON_FTRACE_NOP; 153 ftrace_generate_kprobe_call_insn(&new);
124 new.disp = KPROBE_ON_FTRACE_CALL;
125 } else { 154 } else {
126 /* Replace nop with an ftrace call. */ 155 /* Replace nop with an ftrace call. */
127 ftrace_generate_nop_insn(&orig); 156 ftrace_generate_nop_insn(&orig);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index c3f8d157cb0d..e6a1578fc000 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1415,7 +1415,7 @@ CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
1415 1415
1416static struct attribute *cpumsf_pmu_events_attr[] = { 1416static struct attribute *cpumsf_pmu_events_attr[] = {
1417 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC), 1417 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC),
1418 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG), 1418 NULL,
1419 NULL, 1419 NULL,
1420}; 1420};
1421 1421
@@ -1606,8 +1606,11 @@ static int __init init_cpum_sampling_pmu(void)
1606 return -EINVAL; 1606 return -EINVAL;
1607 } 1607 }
1608 1608
1609 if (si.ad) 1609 if (si.ad) {
1610 sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); 1610 sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
1611 cpumsf_pmu_events_attr[1] =
1612 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
1613 }
1611 1614
1612 sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); 1615 sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
1613 if (!sfdbg) 1616 if (!sfdbg)
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index 6b09fdffbd2f..ca6294645dd3 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -177,6 +177,17 @@ restart_entry:
177 lhi %r1,1 177 lhi %r1,1
178 sigp %r1,%r0,SIGP_SET_ARCHITECTURE 178 sigp %r1,%r0,SIGP_SET_ARCHITECTURE
179 sam64 179 sam64
180#ifdef CONFIG_SMP
181 larl %r1,smp_cpu_mt_shift
182 icm %r1,15,0(%r1)
183 jz smt_done
184 llgfr %r1,%r1
185smt_loop:
186 sigp %r1,%r0,SIGP_SET_MULTI_THREADING
187 brc 8,smt_done /* accepted */
188 brc 2,smt_loop /* busy, try again */
189smt_done:
190#endif
180 larl %r1,.Lnew_pgm_check_psw 191 larl %r1,.Lnew_pgm_check_psw
181 lpswe 0(%r1) 192 lpswe 0(%r1)
182pgm_check_entry: 193pgm_check_entry:
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 498b6d967138..258990688a5e 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -212,11 +212,11 @@ static struct event_constraint intel_hsw_event_constraints[] = {
212 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 212 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
213 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 213 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
214 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 214 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
215 INTEL_EVENT_CONSTRAINT(0x08a3, 0x4), 215 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
216 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ 216 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
217 INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4), 217 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
218 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ 218 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
219 INTEL_EVENT_CONSTRAINT(0x04a3, 0xf), 219 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
220 EVENT_CONSTRAINT_END 220 EVENT_CONSTRAINT_END
221}; 221};
222 222
@@ -1649,11 +1649,11 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
1649 if (c) 1649 if (c)
1650 return c; 1650 return c;
1651 1651
1652 c = intel_pebs_constraints(event); 1652 c = intel_shared_regs_constraints(cpuc, event);
1653 if (c) 1653 if (c)
1654 return c; 1654 return c;
1655 1655
1656 c = intel_shared_regs_constraints(cpuc, event); 1656 c = intel_pebs_constraints(event);
1657 if (c) 1657 if (c)
1658 return c; 1658 return c;
1659 1659
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 1d74d161687c..f0095a76c182 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -364,12 +364,21 @@ system_call_fastpath:
364 * Has incomplete stack frame and undefined top of stack. 364 * Has incomplete stack frame and undefined top of stack.
365 */ 365 */
366ret_from_sys_call: 366ret_from_sys_call:
367 testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
368 jnz int_ret_from_sys_call_fixup /* Go the the slow path */
369
370 LOCKDEP_SYS_EXIT 367 LOCKDEP_SYS_EXIT
371 DISABLE_INTERRUPTS(CLBR_NONE) 368 DISABLE_INTERRUPTS(CLBR_NONE)
372 TRACE_IRQS_OFF 369 TRACE_IRQS_OFF
370
371 /*
372 * We must check ti flags with interrupts (or at least preemption)
373 * off because we must *never* return to userspace without
374 * processing exit work that is enqueued if we're preempted here.
375 * In particular, returning to userspace with any of the one-shot
376 * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
377 * very bad.
378 */
379 testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
380 jnz int_ret_from_sys_call_fixup /* Go the the slow path */
381
373 CFI_REMEMBER_STATE 382 CFI_REMEMBER_STATE
374 /* 383 /*
375 * sysretq will re-enable interrupts: 384 * sysretq will re-enable interrupts:
@@ -386,7 +395,7 @@ ret_from_sys_call:
386 395
387int_ret_from_sys_call_fixup: 396int_ret_from_sys_call_fixup:
388 FIXUP_TOP_OF_STACK %r11, -ARGOFFSET 397 FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
389 jmp int_ret_from_sys_call 398 jmp int_ret_from_sys_call_irqs_off
390 399
391 /* Do syscall tracing */ 400 /* Do syscall tracing */
392tracesys: 401tracesys:
@@ -432,6 +441,7 @@ tracesys_phase2:
432GLOBAL(int_ret_from_sys_call) 441GLOBAL(int_ret_from_sys_call)
433 DISABLE_INTERRUPTS(CLBR_NONE) 442 DISABLE_INTERRUPTS(CLBR_NONE)
434 TRACE_IRQS_OFF 443 TRACE_IRQS_OFF
444int_ret_from_sys_call_irqs_off:
435 movl $_TIF_ALLWORK_MASK,%edi 445 movl $_TIF_ALLWORK_MASK,%edi
436 /* edi: mask to check */ 446 /* edi: mask to check */
437GLOBAL(int_with_check) 447GLOBAL(int_with_check)
@@ -789,7 +799,21 @@ retint_swapgs: /* return to user-space */
789 cmpq %r11,(EFLAGS-ARGOFFSET)(%rsp) /* R11 == RFLAGS */ 799 cmpq %r11,(EFLAGS-ARGOFFSET)(%rsp) /* R11 == RFLAGS */
790 jne opportunistic_sysret_failed 800 jne opportunistic_sysret_failed
791 801
792 testq $X86_EFLAGS_RF,%r11 /* sysret can't restore RF */ 802 /*
803 * SYSRET can't restore RF. SYSRET can restore TF, but unlike IRET,
804 * restoring TF results in a trap from userspace immediately after
805 * SYSRET. This would cause an infinite loop whenever #DB happens
806 * with register state that satisfies the opportunistic SYSRET
807 * conditions. For example, single-stepping this user code:
808 *
809 * movq $stuck_here,%rcx
810 * pushfq
811 * popq %r11
812 * stuck_here:
813 *
814 * would never get past 'stuck_here'.
815 */
816 testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
793 jnz opportunistic_sysret_failed 817 jnz opportunistic_sysret_failed
794 818
795 /* nothing to check for RSP */ 819 /* nothing to check for RSP */
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 7ec1d5f8d283..25ecd56cefa8 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -72,7 +72,7 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
72 { "bx", 8, offsetof(struct pt_regs, bx) }, 72 { "bx", 8, offsetof(struct pt_regs, bx) },
73 { "cx", 8, offsetof(struct pt_regs, cx) }, 73 { "cx", 8, offsetof(struct pt_regs, cx) },
74 { "dx", 8, offsetof(struct pt_regs, dx) }, 74 { "dx", 8, offsetof(struct pt_regs, dx) },
75 { "si", 8, offsetof(struct pt_regs, dx) }, 75 { "si", 8, offsetof(struct pt_regs, si) },
76 { "di", 8, offsetof(struct pt_regs, di) }, 76 { "di", 8, offsetof(struct pt_regs, di) },
77 { "bp", 8, offsetof(struct pt_regs, bp) }, 77 { "bp", 8, offsetof(struct pt_regs, bp) },
78 { "sp", 8, offsetof(struct pt_regs, sp) }, 78 { "sp", 8, offsetof(struct pt_regs, sp) },
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index bae6c609888e..86db4bcd7ce5 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -183,6 +183,16 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
183 }, 183 },
184 }, 184 },
185 185
186 /* ASRock */
187 { /* Handle problems with rebooting on ASRock Q1900DC-ITX */
188 .callback = set_pci_reboot,
189 .ident = "ASRock Q1900DC-ITX",
190 .matches = {
191 DMI_MATCH(DMI_BOARD_VENDOR, "ASRock"),
192 DMI_MATCH(DMI_BOARD_NAME, "Q1900DC-ITX"),
193 },
194 },
195
186 /* ASUS */ 196 /* ASUS */
187 { /* Handle problems with rebooting on ASUS P4S800 */ 197 { /* Handle problems with rebooting on ASUS P4S800 */
188 .callback = set_bios_reboot, 198 .callback = set_bios_reboot,
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index b1947e0f3e10..46d4449772bc 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -422,6 +422,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
422 struct kvm_ioapic *ioapic, int vector, int trigger_mode) 422 struct kvm_ioapic *ioapic, int vector, int trigger_mode)
423{ 423{
424 int i; 424 int i;
425 struct kvm_lapic *apic = vcpu->arch.apic;
425 426
426 for (i = 0; i < IOAPIC_NUM_PINS; i++) { 427 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
427 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; 428 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
@@ -443,7 +444,8 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
443 kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); 444 kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
444 spin_lock(&ioapic->lock); 445 spin_lock(&ioapic->lock);
445 446
446 if (trigger_mode != IOAPIC_LEVEL_TRIG) 447 if (trigger_mode != IOAPIC_LEVEL_TRIG ||
448 kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
447 continue; 449 continue;
448 450
449 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); 451 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index bd4e34de24c7..4ee827d7bf36 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -833,8 +833,7 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
833 833
834static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector) 834static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
835{ 835{
836 if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) && 836 if (kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
837 kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
838 int trigger_mode; 837 int trigger_mode;
839 if (apic_test_vector(vector, apic->regs + APIC_TMR)) 838 if (apic_test_vector(vector, apic->regs + APIC_TMR))
840 trigger_mode = IOAPIC_LEVEL_TRIG; 839 trigger_mode = IOAPIC_LEVEL_TRIG;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 10a481b7674d..ae4f6d35d19c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2479,8 +2479,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2479 if (enable_ept) { 2479 if (enable_ept) {
2480 /* nested EPT: emulate EPT also to L1 */ 2480 /* nested EPT: emulate EPT also to L1 */
2481 vmx->nested.nested_vmx_secondary_ctls_high |= 2481 vmx->nested.nested_vmx_secondary_ctls_high |=
2482 SECONDARY_EXEC_ENABLE_EPT | 2482 SECONDARY_EXEC_ENABLE_EPT;
2483 SECONDARY_EXEC_UNRESTRICTED_GUEST;
2484 vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | 2483 vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
2485 VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT | 2484 VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
2486 VMX_EPT_INVEPT_BIT; 2485 VMX_EPT_INVEPT_BIT;
@@ -2494,6 +2493,10 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2494 } else 2493 } else
2495 vmx->nested.nested_vmx_ept_caps = 0; 2494 vmx->nested.nested_vmx_ept_caps = 0;
2496 2495
2496 if (enable_unrestricted_guest)
2497 vmx->nested.nested_vmx_secondary_ctls_high |=
2498 SECONDARY_EXEC_UNRESTRICTED_GUEST;
2499
2497 /* miscellaneous data */ 2500 /* miscellaneous data */
2498 rdmsr(MSR_IA32_VMX_MISC, 2501 rdmsr(MSR_IA32_VMX_MISC,
2499 vmx->nested.nested_vmx_misc_low, 2502 vmx->nested.nested_vmx_misc_low,
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 9f93af56a5fc..b47124d4cd67 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -91,6 +91,12 @@ EXPORT_SYMBOL_GPL(xen_p2m_size);
91unsigned long xen_max_p2m_pfn __read_mostly; 91unsigned long xen_max_p2m_pfn __read_mostly;
92EXPORT_SYMBOL_GPL(xen_max_p2m_pfn); 92EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
93 93
94#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
95#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
96#else
97#define P2M_LIMIT 0
98#endif
99
94static DEFINE_SPINLOCK(p2m_update_lock); 100static DEFINE_SPINLOCK(p2m_update_lock);
95 101
96static unsigned long *p2m_mid_missing_mfn; 102static unsigned long *p2m_mid_missing_mfn;
@@ -385,9 +391,11 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
385void __init xen_vmalloc_p2m_tree(void) 391void __init xen_vmalloc_p2m_tree(void)
386{ 392{
387 static struct vm_struct vm; 393 static struct vm_struct vm;
394 unsigned long p2m_limit;
388 395
396 p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
389 vm.flags = VM_ALLOC; 397 vm.flags = VM_ALLOC;
390 vm.size = ALIGN(sizeof(unsigned long) * xen_max_p2m_pfn, 398 vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
391 PMD_SIZE * PMDS_PER_MID_PAGE); 399 PMD_SIZE * PMDS_PER_MID_PAGE);
392 vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE); 400 vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE);
393 pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size); 401 pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index fc1ff3b1ea1f..fd3fee81c23c 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -592,7 +592,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
592 if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) { 592 if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
593 struct bio_vec *bprev; 593 struct bio_vec *bprev;
594 594
595 bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1]; 595 bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1];
596 if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset)) 596 if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
597 return false; 597 return false;
598 } 598 }
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index d53a764b05ea..be3290cc0644 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -278,9 +278,11 @@ static int bt_get(struct blk_mq_alloc_data *data,
278 /* 278 /*
279 * We're out of tags on this hardware queue, kick any 279 * We're out of tags on this hardware queue, kick any
280 * pending IO submits before going to sleep waiting for 280 * pending IO submits before going to sleep waiting for
281 * some to complete. 281 * some to complete. Note that hctx can be NULL here for
282 * reserved tag allocation.
282 */ 283 */
283 blk_mq_run_hw_queue(hctx, false); 284 if (hctx)
285 blk_mq_run_hw_queue(hctx, false);
284 286
285 /* 287 /*
286 * Retry tag allocation after running the hardware queue, 288 * Retry tag allocation after running the hardware queue,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 4f4bea21052e..b7b8933ec241 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1938,7 +1938,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1938 */ 1938 */
1939 if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release, 1939 if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
1940 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) 1940 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
1941 goto err_map; 1941 goto err_mq_usage;
1942 1942
1943 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); 1943 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1944 blk_queue_rq_timeout(q, 30000); 1944 blk_queue_rq_timeout(q, 30000);
@@ -1981,7 +1981,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1981 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 1981 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
1982 1982
1983 if (blk_mq_init_hw_queues(q, set)) 1983 if (blk_mq_init_hw_queues(q, set))
1984 goto err_hw; 1984 goto err_mq_usage;
1985 1985
1986 mutex_lock(&all_q_mutex); 1986 mutex_lock(&all_q_mutex);
1987 list_add_tail(&q->all_q_node, &all_q_list); 1987 list_add_tail(&q->all_q_node, &all_q_list);
@@ -1993,7 +1993,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1993 1993
1994 return q; 1994 return q;
1995 1995
1996err_hw: 1996err_mq_usage:
1997 blk_cleanup_queue(q); 1997 blk_cleanup_queue(q);
1998err_hctxs: 1998err_hctxs:
1999 kfree(map); 1999 kfree(map);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 6ed2cbe5e8c9..12600bfffca9 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -585,7 +585,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
585 b->physical_block_size); 585 b->physical_block_size);
586 586
587 t->io_min = max(t->io_min, b->io_min); 587 t->io_min = max(t->io_min, b->io_min);
588 t->io_opt = lcm(t->io_opt, b->io_opt); 588 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
589 589
590 t->cluster &= b->cluster; 590 t->cluster &= b->cluster;
591 t->discard_zeroes_data &= b->discard_zeroes_data; 591 t->discard_zeroes_data &= b->discard_zeroes_data;
@@ -616,7 +616,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
616 b->raid_partial_stripes_expensive); 616 b->raid_partial_stripes_expensive);
617 617
618 /* Find lowest common alignment_offset */ 618 /* Find lowest common alignment_offset */
619 t->alignment_offset = lcm(t->alignment_offset, alignment) 619 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
620 % max(t->physical_block_size, t->io_min); 620 % max(t->physical_block_size, t->io_min);
621 621
622 /* Verify that new alignment_offset is on a logical block boundary */ 622 /* Verify that new alignment_offset is on a logical block boundary */
@@ -643,7 +643,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
643 b->max_discard_sectors); 643 b->max_discard_sectors);
644 t->discard_granularity = max(t->discard_granularity, 644 t->discard_granularity = max(t->discard_granularity,
645 b->discard_granularity); 645 b->discard_granularity);
646 t->discard_alignment = lcm(t->discard_alignment, alignment) % 646 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
647 t->discard_granularity; 647 t->discard_granularity;
648 } 648 }
649 649
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 26089d182cb7..f22cc56fd1b3 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -381,7 +381,7 @@ void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new)
381 sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1); 381 sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
382 sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg); 382 sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg);
383} 383}
384EXPORT_SYMBOL(af_alg_link_sg); 384EXPORT_SYMBOL_GPL(af_alg_link_sg);
385 385
386void af_alg_free_sg(struct af_alg_sgl *sgl) 386void af_alg_free_sg(struct af_alg_sgl *sgl)
387{ 387{
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 60496d405ebf..0aa02635ceda 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -509,11 +509,11 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
509 struct skcipher_async_req *sreq; 509 struct skcipher_async_req *sreq;
510 struct ablkcipher_request *req; 510 struct ablkcipher_request *req;
511 struct skcipher_async_rsgl *last_rsgl = NULL; 511 struct skcipher_async_rsgl *last_rsgl = NULL;
512 unsigned int len = 0, tx_nents = skcipher_all_sg_nents(ctx); 512 unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
513 unsigned int reqlen = sizeof(struct skcipher_async_req) + 513 unsigned int reqlen = sizeof(struct skcipher_async_req) +
514 GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx); 514 GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
515 int i = 0;
516 int err = -ENOMEM; 515 int err = -ENOMEM;
516 bool mark = false;
517 517
518 lock_sock(sk); 518 lock_sock(sk);
519 req = kmalloc(reqlen, GFP_KERNEL); 519 req = kmalloc(reqlen, GFP_KERNEL);
@@ -555,7 +555,7 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
555 iov_iter_count(&msg->msg_iter)); 555 iov_iter_count(&msg->msg_iter));
556 used = min_t(unsigned long, used, sg->length); 556 used = min_t(unsigned long, used, sg->length);
557 557
558 if (i == tx_nents) { 558 if (txbufs == tx_nents) {
559 struct scatterlist *tmp; 559 struct scatterlist *tmp;
560 int x; 560 int x;
561 /* Ran out of tx slots in async request 561 /* Ran out of tx slots in async request
@@ -573,17 +573,18 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
573 kfree(sreq->tsg); 573 kfree(sreq->tsg);
574 sreq->tsg = tmp; 574 sreq->tsg = tmp;
575 tx_nents *= 2; 575 tx_nents *= 2;
576 mark = true;
576 } 577 }
577 /* Need to take over the tx sgl from ctx 578 /* Need to take over the tx sgl from ctx
578 * to the asynch req - these sgls will be freed later */ 579 * to the asynch req - these sgls will be freed later */
579 sg_set_page(sreq->tsg + i++, sg_page(sg), sg->length, 580 sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length,
580 sg->offset); 581 sg->offset);
581 582
582 if (list_empty(&sreq->list)) { 583 if (list_empty(&sreq->list)) {
583 rsgl = &sreq->first_sgl; 584 rsgl = &sreq->first_sgl;
584 list_add_tail(&rsgl->list, &sreq->list); 585 list_add_tail(&rsgl->list, &sreq->list);
585 } else { 586 } else {
586 rsgl = kzalloc(sizeof(*rsgl), GFP_KERNEL); 587 rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL);
587 if (!rsgl) { 588 if (!rsgl) {
588 err = -ENOMEM; 589 err = -ENOMEM;
589 goto free; 590 goto free;
@@ -604,6 +605,9 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
604 iov_iter_advance(&msg->msg_iter, used); 605 iov_iter_advance(&msg->msg_iter, used);
605 } 606 }
606 607
608 if (mark)
609 sg_mark_end(sreq->tsg + txbufs - 1);
610
607 ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg, 611 ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
608 len, sreq->iv); 612 len, sreq->iv);
609 err = ctx->enc ? crypto_ablkcipher_encrypt(req) : 613 err = ctx->enc ? crypto_ablkcipher_encrypt(req) :
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 4c35f0822d06..23dac3babfe3 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4204,9 +4204,18 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4204 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4204 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4205 4205
4206 /* devices that don't properly handle queued TRIM commands */ 4206 /* devices that don't properly handle queued TRIM commands */
4207 { "Micron_M[56]*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4207 { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4208 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4209 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4210 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4211 { "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4212 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4213 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4214 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4215 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4216 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4217 { "Samsung SSD 850 PRO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4208 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4218 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4209 { "Crucial_CT*SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4210 4219
4211 /* 4220 /*
4212 * As defined, the DRAT (Deterministic Read After Trim) and RZAT 4221 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
@@ -4226,6 +4235,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4226 */ 4235 */
4227 { "INTEL*SSDSC2MH*", NULL, 0, }, 4236 { "INTEL*SSDSC2MH*", NULL, 0, },
4228 4237
4238 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4239 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4229 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4240 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4230 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4241 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4231 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4242 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -4737,7 +4748,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
4737 return NULL; 4748 return NULL;
4738 4749
4739 /* libsas case */ 4750 /* libsas case */
4740 if (!ap->scsi_host) { 4751 if (ap->flags & ATA_FLAG_SAS_HOST) {
4741 tag = ata_sas_allocate_tag(ap); 4752 tag = ata_sas_allocate_tag(ap);
4742 if (tag < 0) 4753 if (tag < 0)
4743 return NULL; 4754 return NULL;
@@ -4776,7 +4787,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
4776 tag = qc->tag; 4787 tag = qc->tag;
4777 if (likely(ata_tag_valid(tag))) { 4788 if (likely(ata_tag_valid(tag))) {
4778 qc->tag = ATA_TAG_POISON; 4789 qc->tag = ATA_TAG_POISON;
4779 if (!ap->scsi_host) 4790 if (ap->flags & ATA_FLAG_SAS_HOST)
4780 ata_sas_free_tag(tag, ap); 4791 ata_sas_free_tag(tag, ap);
4781 } 4792 }
4782} 4793}
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index beb8b27d4621..a13587b5c2be 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -243,4 +243,12 @@ extern struct regcache_ops regcache_rbtree_ops;
243extern struct regcache_ops regcache_lzo_ops; 243extern struct regcache_ops regcache_lzo_ops;
244extern struct regcache_ops regcache_flat_ops; 244extern struct regcache_ops regcache_flat_ops;
245 245
246static inline const char *regmap_name(const struct regmap *map)
247{
248 if (map->dev)
249 return dev_name(map->dev);
250
251 return map->name;
252}
253
246#endif 254#endif
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index da84f544c544..87db9893b463 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -218,7 +218,7 @@ int regcache_read(struct regmap *map,
218 ret = map->cache_ops->read(map, reg, value); 218 ret = map->cache_ops->read(map, reg, value);
219 219
220 if (ret == 0) 220 if (ret == 0)
221 trace_regmap_reg_read_cache(map->dev, reg, *value); 221 trace_regmap_reg_read_cache(map, reg, *value);
222 222
223 return ret; 223 return ret;
224 } 224 }
@@ -311,7 +311,7 @@ int regcache_sync(struct regmap *map)
311 dev_dbg(map->dev, "Syncing %s cache\n", 311 dev_dbg(map->dev, "Syncing %s cache\n",
312 map->cache_ops->name); 312 map->cache_ops->name);
313 name = map->cache_ops->name; 313 name = map->cache_ops->name;
314 trace_regcache_sync(map->dev, name, "start"); 314 trace_regcache_sync(map, name, "start");
315 315
316 if (!map->cache_dirty) 316 if (!map->cache_dirty)
317 goto out; 317 goto out;
@@ -346,7 +346,7 @@ out:
346 346
347 regmap_async_complete(map); 347 regmap_async_complete(map);
348 348
349 trace_regcache_sync(map->dev, name, "stop"); 349 trace_regcache_sync(map, name, "stop");
350 350
351 return ret; 351 return ret;
352} 352}
@@ -381,7 +381,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
381 name = map->cache_ops->name; 381 name = map->cache_ops->name;
382 dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max); 382 dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
383 383
384 trace_regcache_sync(map->dev, name, "start region"); 384 trace_regcache_sync(map, name, "start region");
385 385
386 if (!map->cache_dirty) 386 if (!map->cache_dirty)
387 goto out; 387 goto out;
@@ -401,7 +401,7 @@ out:
401 401
402 regmap_async_complete(map); 402 regmap_async_complete(map);
403 403
404 trace_regcache_sync(map->dev, name, "stop region"); 404 trace_regcache_sync(map, name, "stop region");
405 405
406 return ret; 406 return ret;
407} 407}
@@ -428,7 +428,7 @@ int regcache_drop_region(struct regmap *map, unsigned int min,
428 428
429 map->lock(map->lock_arg); 429 map->lock(map->lock_arg);
430 430
431 trace_regcache_drop_region(map->dev, min, max); 431 trace_regcache_drop_region(map, min, max);
432 432
433 ret = map->cache_ops->drop(map, min, max); 433 ret = map->cache_ops->drop(map, min, max);
434 434
@@ -455,7 +455,7 @@ void regcache_cache_only(struct regmap *map, bool enable)
455 map->lock(map->lock_arg); 455 map->lock(map->lock_arg);
456 WARN_ON(map->cache_bypass && enable); 456 WARN_ON(map->cache_bypass && enable);
457 map->cache_only = enable; 457 map->cache_only = enable;
458 trace_regmap_cache_only(map->dev, enable); 458 trace_regmap_cache_only(map, enable);
459 map->unlock(map->lock_arg); 459 map->unlock(map->lock_arg);
460} 460}
461EXPORT_SYMBOL_GPL(regcache_cache_only); 461EXPORT_SYMBOL_GPL(regcache_cache_only);
@@ -493,7 +493,7 @@ void regcache_cache_bypass(struct regmap *map, bool enable)
493 map->lock(map->lock_arg); 493 map->lock(map->lock_arg);
494 WARN_ON(map->cache_only && enable); 494 WARN_ON(map->cache_only && enable);
495 map->cache_bypass = enable; 495 map->cache_bypass = enable;
496 trace_regmap_cache_bypass(map->dev, enable); 496 trace_regmap_cache_bypass(map, enable);
497 map->unlock(map->lock_arg); 497 map->unlock(map->lock_arg);
498} 498}
499EXPORT_SYMBOL_GPL(regcache_cache_bypass); 499EXPORT_SYMBOL_GPL(regcache_cache_bypass);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index f99b098ddabf..dbfe6a69c3da 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1281,7 +1281,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1281 if (map->async && map->bus->async_write) { 1281 if (map->async && map->bus->async_write) {
1282 struct regmap_async *async; 1282 struct regmap_async *async;
1283 1283
1284 trace_regmap_async_write_start(map->dev, reg, val_len); 1284 trace_regmap_async_write_start(map, reg, val_len);
1285 1285
1286 spin_lock_irqsave(&map->async_lock, flags); 1286 spin_lock_irqsave(&map->async_lock, flags);
1287 async = list_first_entry_or_null(&map->async_free, 1287 async = list_first_entry_or_null(&map->async_free,
@@ -1339,8 +1339,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1339 return ret; 1339 return ret;
1340 } 1340 }
1341 1341
1342 trace_regmap_hw_write_start(map->dev, reg, 1342 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
1343 val_len / map->format.val_bytes);
1344 1343
1345 /* If we're doing a single register write we can probably just 1344 /* If we're doing a single register write we can probably just
1346 * send the work_buf directly, otherwise try to do a gather 1345 * send the work_buf directly, otherwise try to do a gather
@@ -1372,8 +1371,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1372 kfree(buf); 1371 kfree(buf);
1373 } 1372 }
1374 1373
1375 trace_regmap_hw_write_done(map->dev, reg, 1374 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
1376 val_len / map->format.val_bytes);
1377 1375
1378 return ret; 1376 return ret;
1379} 1377}
@@ -1407,12 +1405,12 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1407 1405
1408 map->format.format_write(map, reg, val); 1406 map->format.format_write(map, reg, val);
1409 1407
1410 trace_regmap_hw_write_start(map->dev, reg, 1); 1408 trace_regmap_hw_write_start(map, reg, 1);
1411 1409
1412 ret = map->bus->write(map->bus_context, map->work_buf, 1410 ret = map->bus->write(map->bus_context, map->work_buf,
1413 map->format.buf_size); 1411 map->format.buf_size);
1414 1412
1415 trace_regmap_hw_write_done(map->dev, reg, 1); 1413 trace_regmap_hw_write_done(map, reg, 1);
1416 1414
1417 return ret; 1415 return ret;
1418} 1416}
@@ -1470,7 +1468,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
1470 dev_info(map->dev, "%x <= %x\n", reg, val); 1468 dev_info(map->dev, "%x <= %x\n", reg, val);
1471#endif 1469#endif
1472 1470
1473 trace_regmap_reg_write(map->dev, reg, val); 1471 trace_regmap_reg_write(map, reg, val);
1474 1472
1475 return map->reg_write(context, reg, val); 1473 return map->reg_write(context, reg, val);
1476} 1474}
@@ -1773,7 +1771,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
1773 for (i = 0; i < num_regs; i++) { 1771 for (i = 0; i < num_regs; i++) {
1774 int reg = regs[i].reg; 1772 int reg = regs[i].reg;
1775 int val = regs[i].def; 1773 int val = regs[i].def;
1776 trace_regmap_hw_write_start(map->dev, reg, 1); 1774 trace_regmap_hw_write_start(map, reg, 1);
1777 map->format.format_reg(u8, reg, map->reg_shift); 1775 map->format.format_reg(u8, reg, map->reg_shift);
1778 u8 += reg_bytes + pad_bytes; 1776 u8 += reg_bytes + pad_bytes;
1779 map->format.format_val(u8, val, 0); 1777 map->format.format_val(u8, val, 0);
@@ -1788,7 +1786,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
1788 1786
1789 for (i = 0; i < num_regs; i++) { 1787 for (i = 0; i < num_regs; i++) {
1790 int reg = regs[i].reg; 1788 int reg = regs[i].reg;
1791 trace_regmap_hw_write_done(map->dev, reg, 1); 1789 trace_regmap_hw_write_done(map, reg, 1);
1792 } 1790 }
1793 return ret; 1791 return ret;
1794} 1792}
@@ -2059,15 +2057,13 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2059 */ 2057 */
2060 u8[0] |= map->read_flag_mask; 2058 u8[0] |= map->read_flag_mask;
2061 2059
2062 trace_regmap_hw_read_start(map->dev, reg, 2060 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
2063 val_len / map->format.val_bytes);
2064 2061
2065 ret = map->bus->read(map->bus_context, map->work_buf, 2062 ret = map->bus->read(map->bus_context, map->work_buf,
2066 map->format.reg_bytes + map->format.pad_bytes, 2063 map->format.reg_bytes + map->format.pad_bytes,
2067 val, val_len); 2064 val, val_len);
2068 2065
2069 trace_regmap_hw_read_done(map->dev, reg, 2066 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
2070 val_len / map->format.val_bytes);
2071 2067
2072 return ret; 2068 return ret;
2073} 2069}
@@ -2123,7 +2119,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
2123 dev_info(map->dev, "%x => %x\n", reg, *val); 2119 dev_info(map->dev, "%x => %x\n", reg, *val);
2124#endif 2120#endif
2125 2121
2126 trace_regmap_reg_read(map->dev, reg, *val); 2122 trace_regmap_reg_read(map, reg, *val);
2127 2123
2128 if (!map->cache_bypass) 2124 if (!map->cache_bypass)
2129 regcache_write(map, reg, *val); 2125 regcache_write(map, reg, *val);
@@ -2480,7 +2476,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
2480 struct regmap *map = async->map; 2476 struct regmap *map = async->map;
2481 bool wake; 2477 bool wake;
2482 2478
2483 trace_regmap_async_io_complete(map->dev); 2479 trace_regmap_async_io_complete(map);
2484 2480
2485 spin_lock(&map->async_lock); 2481 spin_lock(&map->async_lock);
2486 list_move(&async->list, &map->async_free); 2482 list_move(&async->list, &map->async_free);
@@ -2525,7 +2521,7 @@ int regmap_async_complete(struct regmap *map)
2525 if (!map->bus || !map->bus->async_write) 2521 if (!map->bus || !map->bus->async_write)
2526 return 0; 2522 return 0;
2527 2523
2528 trace_regmap_async_complete_start(map->dev); 2524 trace_regmap_async_complete_start(map);
2529 2525
2530 wait_event(map->async_waitq, regmap_async_is_done(map)); 2526 wait_event(map->async_waitq, regmap_async_is_done(map));
2531 2527
@@ -2534,7 +2530,7 @@ int regmap_async_complete(struct regmap *map)
2534 map->async_ret = 0; 2530 map->async_ret = 0;
2535 spin_unlock_irqrestore(&map->async_lock, flags); 2531 spin_unlock_irqrestore(&map->async_lock, flags);
2536 2532
2537 trace_regmap_async_complete_done(map->dev); 2533 trace_regmap_async_complete_done(map);
2538 2534
2539 return ret; 2535 return ret;
2540} 2536}
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index 9be17d3431bb..fc6ffcfa8061 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -1,6 +1,6 @@
1config BCMA_POSSIBLE 1config BCMA_POSSIBLE
2 bool 2 bool
3 depends on HAS_IOMEM && HAS_DMA && PCI 3 depends on HAS_IOMEM && HAS_DMA
4 default y 4 default y
5 5
6menu "Broadcom specific AMBA" 6menu "Broadcom specific AMBA"
@@ -45,9 +45,9 @@ config BCMA_HOST_SOC
45 45
46 If unsure, say N 46 If unsure, say N
47 47
48# TODO: make it depend on PCI when ready
49config BCMA_DRIVER_PCI 48config BCMA_DRIVER_PCI
50 bool 49 bool "BCMA Broadcom PCI core driver"
50 depends on BCMA && PCI
51 default y 51 default y
52 help 52 help
53 BCMA bus may have many versions of PCIe core. This driver 53 BCMA bus may have many versions of PCIe core. This driver
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 5a1d22489afc..15f2b2e242ea 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -106,15 +106,35 @@ static inline void __exit bcma_host_soc_unregister_driver(void)
106#endif /* CONFIG_BCMA_HOST_SOC && CONFIG_OF */ 106#endif /* CONFIG_BCMA_HOST_SOC && CONFIG_OF */
107 107
108/* driver_pci.c */ 108/* driver_pci.c */
109#ifdef CONFIG_BCMA_DRIVER_PCI
109u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address); 110u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address);
110void bcma_core_pci_early_init(struct bcma_drv_pci *pc); 111void bcma_core_pci_early_init(struct bcma_drv_pci *pc);
111void bcma_core_pci_init(struct bcma_drv_pci *pc); 112void bcma_core_pci_init(struct bcma_drv_pci *pc);
112void bcma_core_pci_up(struct bcma_drv_pci *pc); 113void bcma_core_pci_up(struct bcma_drv_pci *pc);
113void bcma_core_pci_down(struct bcma_drv_pci *pc); 114void bcma_core_pci_down(struct bcma_drv_pci *pc);
115#else
116static inline void bcma_core_pci_early_init(struct bcma_drv_pci *pc)
117{
118 WARN_ON(pc->core->bus->hosttype == BCMA_HOSTTYPE_PCI);
119}
120static inline void bcma_core_pci_init(struct bcma_drv_pci *pc)
121{
122 /* Initialization is required for PCI hosted bus */
123 WARN_ON(pc->core->bus->hosttype == BCMA_HOSTTYPE_PCI);
124}
125#endif
114 126
115/* driver_pcie2.c */ 127/* driver_pcie2.c */
128#ifdef CONFIG_BCMA_DRIVER_PCI
116void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2); 129void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2);
117void bcma_core_pcie2_up(struct bcma_drv_pcie2 *pcie2); 130void bcma_core_pcie2_up(struct bcma_drv_pcie2 *pcie2);
131#else
132static inline void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2)
133{
134 /* Initialization is required for PCI hosted bus */
135 WARN_ON(pcie2->core->bus->hosttype == BCMA_HOSTTYPE_PCI);
136}
137#endif
118 138
119extern int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc); 139extern int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc);
120 140
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index dce34fb52e27..74ccb02e0f10 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -17,6 +17,8 @@
17 17
18#include "bcma_private.h" 18#include "bcma_private.h"
19 19
20#define BCMA_GPIO_MAX_PINS 32
21
20static inline struct bcma_drv_cc *bcma_gpio_get_cc(struct gpio_chip *chip) 22static inline struct bcma_drv_cc *bcma_gpio_get_cc(struct gpio_chip *chip)
21{ 23{
22 return container_of(chip, struct bcma_drv_cc, gpio); 24 return container_of(chip, struct bcma_drv_cc, gpio);
@@ -204,6 +206,7 @@ static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
204 206
205int bcma_gpio_init(struct bcma_drv_cc *cc) 207int bcma_gpio_init(struct bcma_drv_cc *cc)
206{ 208{
209 struct bcma_bus *bus = cc->core->bus;
207 struct gpio_chip *chip = &cc->gpio; 210 struct gpio_chip *chip = &cc->gpio;
208 int err; 211 int err;
209 212
@@ -222,7 +225,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
222 if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC) 225 if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
223 chip->of_node = cc->core->dev.of_node; 226 chip->of_node = cc->core->dev.of_node;
224#endif 227#endif
225 switch (cc->core->bus->chipinfo.id) { 228 switch (bus->chipinfo.id) {
226 case BCMA_CHIP_ID_BCM5357: 229 case BCMA_CHIP_ID_BCM5357:
227 case BCMA_CHIP_ID_BCM53572: 230 case BCMA_CHIP_ID_BCM53572:
228 chip->ngpio = 32; 231 chip->ngpio = 32;
@@ -231,13 +234,17 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
231 chip->ngpio = 16; 234 chip->ngpio = 16;
232 } 235 }
233 236
234 /* There is just one SoC in one device and its GPIO addresses should be 237 /*
235 * deterministic to address them more easily. The other buses could get 238 * On MIPS we register GPIO devices (LEDs, buttons) using absolute GPIO
236 * a random base number. */ 239 * pin numbers. We don't have Device Tree there and we can't really use
237 if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC) 240 * relative (per chip) numbers.
238 chip->base = 0; 241 * So let's use predictable base for BCM47XX and "random" for all other.
239 else 242 */
240 chip->base = -1; 243#if IS_BUILTIN(CONFIG_BCM47XX)
244 chip->base = bus->num * BCMA_GPIO_MAX_PINS;
245#else
246 chip->base = -1;
247#endif
241 248
242 err = bcma_gpio_irq_domain_init(cc); 249 err = bcma_gpio_irq_domain_init(cc);
243 if (err) 250 if (err)
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index cfd35bc1c5a3..f499a469e66d 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -282,39 +282,6 @@ void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
282} 282}
283EXPORT_SYMBOL_GPL(bcma_core_pci_power_save); 283EXPORT_SYMBOL_GPL(bcma_core_pci_power_save);
284 284
285int bcma_core_pci_irq_ctl(struct bcma_bus *bus, struct bcma_device *core,
286 bool enable)
287{
288 struct pci_dev *pdev;
289 u32 coremask, tmp;
290 int err = 0;
291
292 if (bus->hosttype != BCMA_HOSTTYPE_PCI) {
293 /* This bcma device is not on a PCI host-bus. So the IRQs are
294 * not routed through the PCI core.
295 * So we must not enable routing through the PCI core. */
296 goto out;
297 }
298
299 pdev = bus->host_pci;
300
301 err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
302 if (err)
303 goto out;
304
305 coremask = BIT(core->core_index) << 8;
306 if (enable)
307 tmp |= coremask;
308 else
309 tmp &= ~coremask;
310
311 err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp);
312
313out:
314 return err;
315}
316EXPORT_SYMBOL_GPL(bcma_core_pci_irq_ctl);
317
318static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend) 285static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
319{ 286{
320 u32 w; 287 u32 w;
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index a62a2f9091f5..0856189c065f 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -351,3 +351,37 @@ void bcma_host_pci_down(struct bcma_bus *bus)
351 bcma_core_pci_down(&bus->drv_pci[0]); 351 bcma_core_pci_down(&bus->drv_pci[0]);
352} 352}
353EXPORT_SYMBOL_GPL(bcma_host_pci_down); 353EXPORT_SYMBOL_GPL(bcma_host_pci_down);
354
355/* See also si_pci_setup */
356int bcma_host_pci_irq_ctl(struct bcma_bus *bus, struct bcma_device *core,
357 bool enable)
358{
359 struct pci_dev *pdev;
360 u32 coremask, tmp;
361 int err = 0;
362
363 if (bus->hosttype != BCMA_HOSTTYPE_PCI) {
364 /* This bcma device is not on a PCI host-bus. So the IRQs are
365 * not routed through the PCI core.
366 * So we must not enable routing through the PCI core. */
367 goto out;
368 }
369
370 pdev = bus->host_pci;
371
372 err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
373 if (err)
374 goto out;
375
376 coremask = BIT(core->core_index) << 8;
377 if (enable)
378 tmp |= coremask;
379 else
380 tmp &= ~coremask;
381
382 err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp);
383
384out:
385 return err;
386}
387EXPORT_SYMBOL_GPL(bcma_host_pci_irq_ctl);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 4bc2a5cb9935..a98c41f72c63 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -803,10 +803,6 @@ static int __init nbd_init(void)
803 return -EINVAL; 803 return -EINVAL;
804 } 804 }
805 805
806 nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
807 if (!nbd_dev)
808 return -ENOMEM;
809
810 part_shift = 0; 806 part_shift = 0;
811 if (max_part > 0) { 807 if (max_part > 0) {
812 part_shift = fls(max_part); 808 part_shift = fls(max_part);
@@ -828,6 +824,10 @@ static int __init nbd_init(void)
828 if (nbds_max > 1UL << (MINORBITS - part_shift)) 824 if (nbds_max > 1UL << (MINORBITS - part_shift))
829 return -EINVAL; 825 return -EINVAL;
830 826
827 nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
828 if (!nbd_dev)
829 return -ENOMEM;
830
831 for (i = 0; i < nbds_max; i++) { 831 for (i = 0; i < nbds_max; i++) {
832 struct gendisk *disk = alloc_disk(1 << part_shift); 832 struct gendisk *disk = alloc_disk(1 << part_shift);
833 if (!disk) 833 if (!disk)
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index ceb32dd52a6c..e23be20a3417 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -3003,6 +3003,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3003 } 3003 }
3004 get_device(dev->device); 3004 get_device(dev->device);
3005 3005
3006 INIT_LIST_HEAD(&dev->node);
3006 INIT_WORK(&dev->probe_work, nvme_async_probe); 3007 INIT_WORK(&dev->probe_work, nvme_async_probe);
3007 schedule_work(&dev->probe_work); 3008 schedule_work(&dev->probe_work);
3008 return 0; 3009 return 0;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 9bf4d6ae6c6b..6e4ff16e487b 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -111,13 +111,7 @@ static const struct usb_device_id btusb_table[] = {
111 { USB_DEVICE(0x0c10, 0x0000) }, 111 { USB_DEVICE(0x0c10, 0x0000) },
112 112
113 /* Broadcom BCM20702A0 */ 113 /* Broadcom BCM20702A0 */
114 { USB_DEVICE(0x0489, 0xe042) },
115 { USB_DEVICE(0x04ca, 0x2003) },
116 { USB_DEVICE(0x0b05, 0x17b5) },
117 { USB_DEVICE(0x0b05, 0x17cb) },
118 { USB_DEVICE(0x413c, 0x8197) }, 114 { USB_DEVICE(0x413c, 0x8197) },
119 { USB_DEVICE(0x13d3, 0x3404),
120 .driver_info = BTUSB_BCM_PATCHRAM },
121 115
122 /* Broadcom BCM20702B0 (Dynex/Insignia) */ 116 /* Broadcom BCM20702B0 (Dynex/Insignia) */
123 { USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM }, 117 { USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM },
@@ -139,10 +133,12 @@ static const struct usb_device_id btusb_table[] = {
139 .driver_info = BTUSB_BCM_PATCHRAM }, 133 .driver_info = BTUSB_BCM_PATCHRAM },
140 134
141 /* Belkin F8065bf - Broadcom based */ 135 /* Belkin F8065bf - Broadcom based */
142 { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) }, 136 { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01),
137 .driver_info = BTUSB_BCM_PATCHRAM },
143 138
144 /* IMC Networks - Broadcom based */ 139 /* IMC Networks - Broadcom based */
145 { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01) }, 140 { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01),
141 .driver_info = BTUSB_BCM_PATCHRAM },
146 142
147 /* Intel Bluetooth USB Bootloader (RAM module) */ 143 /* Intel Bluetooth USB Bootloader (RAM module) */
148 { USB_DEVICE(0x8087, 0x0a5a), 144 { USB_DEVICE(0x8087, 0x0a5a),
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 48a0c250d5b8..1363dc616ace 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -499,7 +499,7 @@ static int hci_uart_set_flags(struct hci_uart *hu, unsigned long flags)
499 BIT(HCI_UART_INIT_PENDING) | 499 BIT(HCI_UART_INIT_PENDING) |
500 BIT(HCI_UART_EXT_CONFIG); 500 BIT(HCI_UART_EXT_CONFIG);
501 501
502 if ((flags & ~valid_flags)) 502 if (flags & ~valid_flags)
503 return -EINVAL; 503 return -EINVAL;
504 504
505 hu->hdev_flags = flags; 505 hu->hdev_flags = flags;
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 68161f7a07d6..a0b036ccb118 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -192,6 +192,7 @@ config SYS_SUPPORTS_EM_STI
192config SH_TIMER_CMT 192config SH_TIMER_CMT
193 bool "Renesas CMT timer driver" if COMPILE_TEST 193 bool "Renesas CMT timer driver" if COMPILE_TEST
194 depends on GENERIC_CLOCKEVENTS 194 depends on GENERIC_CLOCKEVENTS
195 depends on HAS_IOMEM
195 default SYS_SUPPORTS_SH_CMT 196 default SYS_SUPPORTS_SH_CMT
196 help 197 help
197 This enables build of a clocksource and clockevent driver for 198 This enables build of a clocksource and clockevent driver for
@@ -201,6 +202,7 @@ config SH_TIMER_CMT
201config SH_TIMER_MTU2 202config SH_TIMER_MTU2
202 bool "Renesas MTU2 timer driver" if COMPILE_TEST 203 bool "Renesas MTU2 timer driver" if COMPILE_TEST
203 depends on GENERIC_CLOCKEVENTS 204 depends on GENERIC_CLOCKEVENTS
205 depends on HAS_IOMEM
204 default SYS_SUPPORTS_SH_MTU2 206 default SYS_SUPPORTS_SH_MTU2
205 help 207 help
206 This enables build of a clockevent driver for the Multi-Function 208 This enables build of a clockevent driver for the Multi-Function
@@ -210,6 +212,7 @@ config SH_TIMER_MTU2
210config SH_TIMER_TMU 212config SH_TIMER_TMU
211 bool "Renesas TMU timer driver" if COMPILE_TEST 213 bool "Renesas TMU timer driver" if COMPILE_TEST
212 depends on GENERIC_CLOCKEVENTS 214 depends on GENERIC_CLOCKEVENTS
215 depends on HAS_IOMEM
213 default SYS_SUPPORTS_SH_TMU 216 default SYS_SUPPORTS_SH_TMU
214 help 217 help
215 This enables build of a clocksource and clockevent driver for 218 This enables build of a clocksource and clockevent driver for
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 5dcbf90b8015..58597fbcc046 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -17,7 +17,6 @@
17#include <linux/irq.h> 17#include <linux/irq.h>
18#include <linux/irqreturn.h> 18#include <linux/irqreturn.h>
19#include <linux/reset.h> 19#include <linux/reset.h>
20#include <linux/sched_clock.h>
21#include <linux/of.h> 20#include <linux/of.h>
22#include <linux/of_address.h> 21#include <linux/of_address.h>
23#include <linux/of_irq.h> 22#include <linux/of_irq.h>
@@ -137,11 +136,6 @@ static struct irqaction sun5i_timer_irq = {
137 .dev_id = &sun5i_clockevent, 136 .dev_id = &sun5i_clockevent,
138}; 137};
139 138
140static u64 sun5i_timer_sched_read(void)
141{
142 return ~readl(timer_base + TIMER_CNTVAL_LO_REG(1));
143}
144
145static void __init sun5i_timer_init(struct device_node *node) 139static void __init sun5i_timer_init(struct device_node *node)
146{ 140{
147 struct reset_control *rstc; 141 struct reset_control *rstc;
@@ -172,7 +166,6 @@ static void __init sun5i_timer_init(struct device_node *node)
172 writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, 166 writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
173 timer_base + TIMER_CTL_REG(1)); 167 timer_base + TIMER_CTL_REG(1));
174 168
175 sched_clock_register(sun5i_timer_sched_read, 32, rate);
176 clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name, 169 clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name,
177 rate, 340, 32, clocksource_mmio_readl_down); 170 rate, 340, 32, clocksource_mmio_readl_down);
178 171
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 0723096fb50a..c92d6a70ccf3 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -475,6 +475,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
475 * c->desc is NULL and exit.) 475 * c->desc is NULL and exit.)
476 */ 476 */
477 if (c->desc) { 477 if (c->desc) {
478 bcm2835_dma_desc_free(&c->desc->vd);
478 c->desc = NULL; 479 c->desc = NULL;
479 bcm2835_dma_abort(c->chan_base); 480 bcm2835_dma_abort(c->chan_base);
480 481
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index 4527a3ebeac4..84884418fd30 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -511,6 +511,9 @@ static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc)
511 kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc)); 511 kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc));
512} 512}
513 513
514#define JZ4740_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
515 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
516
514static int jz4740_dma_probe(struct platform_device *pdev) 517static int jz4740_dma_probe(struct platform_device *pdev)
515{ 518{
516 struct jz4740_dmaengine_chan *chan; 519 struct jz4740_dmaengine_chan *chan;
@@ -548,6 +551,10 @@ static int jz4740_dma_probe(struct platform_device *pdev)
548 dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic; 551 dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
549 dd->device_config = jz4740_dma_slave_config; 552 dd->device_config = jz4740_dma_slave_config;
550 dd->device_terminate_all = jz4740_dma_terminate_all; 553 dd->device_terminate_all = jz4740_dma_terminate_all;
554 dd->src_addr_widths = JZ4740_DMA_BUSWIDTHS;
555 dd->dst_addr_widths = JZ4740_DMA_BUSWIDTHS;
556 dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
557 dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
551 dd->dev = &pdev->dev; 558 dd->dev = &pdev->dev;
552 INIT_LIST_HEAD(&dd->channels); 559 INIT_LIST_HEAD(&dd->channels);
553 560
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 276157f22612..53dbd3b3384c 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -260,6 +260,13 @@ static int edma_terminate_all(struct dma_chan *chan)
260 */ 260 */
261 if (echan->edesc) { 261 if (echan->edesc) {
262 int cyclic = echan->edesc->cyclic; 262 int cyclic = echan->edesc->cyclic;
263
264 /*
265 * free the running request descriptor
266 * since it is not in any of the vdesc lists
267 */
268 edma_desc_free(&echan->edesc->vdesc);
269
263 echan->edesc = NULL; 270 echan->edesc = NULL;
264 edma_stop(echan->ch_num); 271 edma_stop(echan->ch_num);
265 /* Move the cyclic channel back to default queue */ 272 /* Move the cyclic channel back to default queue */
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
index 15cab7d79525..b4634109e010 100644
--- a/drivers/dma/moxart-dma.c
+++ b/drivers/dma/moxart-dma.c
@@ -193,8 +193,10 @@ static int moxart_terminate_all(struct dma_chan *chan)
193 193
194 spin_lock_irqsave(&ch->vc.lock, flags); 194 spin_lock_irqsave(&ch->vc.lock, flags);
195 195
196 if (ch->desc) 196 if (ch->desc) {
197 moxart_dma_desc_free(&ch->desc->vd);
197 ch->desc = NULL; 198 ch->desc = NULL;
199 }
198 200
199 ctrl = readl(ch->base + REG_OFF_CTRL); 201 ctrl = readl(ch->base + REG_OFF_CTRL);
200 ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN); 202 ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 7dd6dd121681..167dbaf65742 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -981,6 +981,7 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
981 * c->desc is NULL and exit.) 981 * c->desc is NULL and exit.)
982 */ 982 */
983 if (c->desc) { 983 if (c->desc) {
984 omap_dma_desc_free(&c->desc->vd);
984 c->desc = NULL; 985 c->desc = NULL;
985 /* Avoid stopping the dma twice */ 986 /* Avoid stopping the dma twice */
986 if (!c->paused) 987 if (!c->paused)
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 69fac068669f..2eebd28b4c40 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -86,10 +86,13 @@ static void dmi_table(u8 *buf, u32 len, int num,
86 int i = 0; 86 int i = 0;
87 87
88 /* 88 /*
89 * Stop when we see all the items the table claimed to have 89 * Stop when we have seen all the items the table claimed to have
90 * OR we run off the end of the table (also happens) 90 * (SMBIOS < 3.0 only) OR we reach an end-of-table marker OR we run
91 * off the end of the table (should never happen but sometimes does
92 * on bogus implementations.)
91 */ 93 */
92 while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) { 94 while ((!num || i < num) &&
95 (data - buf + sizeof(struct dmi_header)) <= len) {
93 const struct dmi_header *dm = (const struct dmi_header *)data; 96 const struct dmi_header *dm = (const struct dmi_header *)data;
94 97
95 /* 98 /*
@@ -529,21 +532,10 @@ static int __init dmi_smbios3_present(const u8 *buf)
529 if (memcmp(buf, "_SM3_", 5) == 0 && 532 if (memcmp(buf, "_SM3_", 5) == 0 &&
530 buf[6] < 32 && dmi_checksum(buf, buf[6])) { 533 buf[6] < 32 && dmi_checksum(buf, buf[6])) {
531 dmi_ver = get_unaligned_be16(buf + 7); 534 dmi_ver = get_unaligned_be16(buf + 7);
535 dmi_num = 0; /* No longer specified */
532 dmi_len = get_unaligned_le32(buf + 12); 536 dmi_len = get_unaligned_le32(buf + 12);
533 dmi_base = get_unaligned_le64(buf + 16); 537 dmi_base = get_unaligned_le64(buf + 16);
534 538
535 /*
536 * The 64-bit SMBIOS 3.0 entry point no longer has a field
537 * containing the number of structures present in the table.
538 * Instead, it defines the table size as a maximum size, and
539 * relies on the end-of-table structure type (#127) to be used
540 * to signal the end of the table.
541 * So let's define dmi_num as an upper bound as well: each
542 * structure has a 4 byte header, so dmi_len / 4 is an upper
543 * bound for the number of structures in the table.
544 */
545 dmi_num = dmi_len / 4;
546
547 if (dmi_walk_early(dmi_decode) == 0) { 539 if (dmi_walk_early(dmi_decode) == 0) {
548 pr_info("SMBIOS %d.%d present.\n", 540 pr_info("SMBIOS %d.%d present.\n",
549 dmi_ver >> 8, dmi_ver & 0xFF); 541 dmi_ver >> 8, dmi_ver & 0xFF);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index a6952ba343a8..a65b75161aa4 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -334,7 +334,7 @@ static struct irq_domain_ops mpc8xxx_gpio_irq_ops = {
334 .xlate = irq_domain_xlate_twocell, 334 .xlate = irq_domain_xlate_twocell,
335}; 335};
336 336
337static struct of_device_id mpc8xxx_gpio_ids[] __initdata = { 337static struct of_device_id mpc8xxx_gpio_ids[] = {
338 { .compatible = "fsl,mpc8349-gpio", }, 338 { .compatible = "fsl,mpc8349-gpio", },
339 { .compatible = "fsl,mpc8572-gpio", }, 339 { .compatible = "fsl,mpc8572-gpio", },
340 { .compatible = "fsl,mpc8610-gpio", }, 340 { .compatible = "fsl,mpc8610-gpio", },
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index 257e2989215c..045a952576c7 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -219,7 +219,7 @@ static int syscon_gpio_probe(struct platform_device *pdev)
219 ret = of_property_read_u32_index(np, "gpio,syscon-dev", 2, 219 ret = of_property_read_u32_index(np, "gpio,syscon-dev", 2,
220 &priv->dir_reg_offset); 220 &priv->dir_reg_offset);
221 if (ret) 221 if (ret)
222 dev_err(dev, "can't read the dir register offset!\n"); 222 dev_dbg(dev, "can't read the dir register offset!\n");
223 223
224 priv->dir_reg_offset <<= 3; 224 priv->dir_reg_offset <<= 3;
225 } 225 }
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index c0929d938ced..df990f29757a 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -201,6 +201,10 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
201 if (!handler) 201 if (!handler)
202 return AE_BAD_PARAMETER; 202 return AE_BAD_PARAMETER;
203 203
204 pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
205 if (pin < 0)
206 return AE_BAD_PARAMETER;
207
204 desc = gpiochip_request_own_desc(chip, pin, "ACPI:Event"); 208 desc = gpiochip_request_own_desc(chip, pin, "ACPI:Event");
205 if (IS_ERR(desc)) { 209 if (IS_ERR(desc)) {
206 dev_err(chip->dev, "Failed to request GPIO\n"); 210 dev_err(chip->dev, "Failed to request GPIO\n");
@@ -551,6 +555,12 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
551 struct gpio_desc *desc; 555 struct gpio_desc *desc;
552 bool found; 556 bool found;
553 557
558 pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
559 if (pin < 0) {
560 status = AE_BAD_PARAMETER;
561 goto out;
562 }
563
554 mutex_lock(&achip->conn_lock); 564 mutex_lock(&achip->conn_lock);
555 565
556 found = false; 566 found = false;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index f6d04c7b5115..679b10e34fb5 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -525,17 +525,6 @@ void drm_framebuffer_reference(struct drm_framebuffer *fb)
525} 525}
526EXPORT_SYMBOL(drm_framebuffer_reference); 526EXPORT_SYMBOL(drm_framebuffer_reference);
527 527
528static void drm_framebuffer_free_bug(struct kref *kref)
529{
530 BUG();
531}
532
533static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
534{
535 DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
536 kref_put(&fb->refcount, drm_framebuffer_free_bug);
537}
538
539/** 528/**
540 * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr 529 * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
541 * @fb: fb to unregister 530 * @fb: fb to unregister
@@ -1320,7 +1309,7 @@ void drm_plane_force_disable(struct drm_plane *plane)
1320 return; 1309 return;
1321 } 1310 }
1322 /* disconnect the plane from the fb and crtc: */ 1311 /* disconnect the plane from the fb and crtc: */
1323 __drm_framebuffer_unreference(plane->old_fb); 1312 drm_framebuffer_unreference(plane->old_fb);
1324 plane->old_fb = NULL; 1313 plane->old_fb = NULL;
1325 plane->fb = NULL; 1314 plane->fb = NULL;
1326 plane->crtc = NULL; 1315 plane->crtc = NULL;
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 732cb6f8e653..4c0aa97aaf03 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -287,6 +287,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
287 287
288 drm_mode_connector_update_edid_property(connector, edid); 288 drm_mode_connector_update_edid_property(connector, edid);
289 ret = drm_add_edid_modes(connector, edid); 289 ret = drm_add_edid_modes(connector, edid);
290 drm_edid_to_eld(connector, edid);
290 kfree(edid); 291 kfree(edid);
291 292
292 return ret; 293 return ret;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 6591d48c1b9d..3fee587bc284 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -174,6 +174,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
174 struct edid *edid = (struct edid *) connector->edid_blob_ptr->data; 174 struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
175 175
176 count = drm_add_edid_modes(connector, edid); 176 count = drm_add_edid_modes(connector, edid);
177 drm_edid_to_eld(connector, edid);
177 } else 178 } else
178 count = (*connector_funcs->get_modes)(connector); 179 count = (*connector_funcs->get_modes)(connector);
179 } 180 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index c300e22da8ac..33a10ce967ea 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -147,6 +147,7 @@ struct fimd_win_data {
147 unsigned int ovl_height; 147 unsigned int ovl_height;
148 unsigned int fb_width; 148 unsigned int fb_width;
149 unsigned int fb_height; 149 unsigned int fb_height;
150 unsigned int fb_pitch;
150 unsigned int bpp; 151 unsigned int bpp;
151 unsigned int pixel_format; 152 unsigned int pixel_format;
152 dma_addr_t dma_addr; 153 dma_addr_t dma_addr;
@@ -532,13 +533,14 @@ static void fimd_win_mode_set(struct exynos_drm_crtc *crtc,
532 win_data->offset_y = plane->crtc_y; 533 win_data->offset_y = plane->crtc_y;
533 win_data->ovl_width = plane->crtc_width; 534 win_data->ovl_width = plane->crtc_width;
534 win_data->ovl_height = plane->crtc_height; 535 win_data->ovl_height = plane->crtc_height;
536 win_data->fb_pitch = plane->pitch;
535 win_data->fb_width = plane->fb_width; 537 win_data->fb_width = plane->fb_width;
536 win_data->fb_height = plane->fb_height; 538 win_data->fb_height = plane->fb_height;
537 win_data->dma_addr = plane->dma_addr[0] + offset; 539 win_data->dma_addr = plane->dma_addr[0] + offset;
538 win_data->bpp = plane->bpp; 540 win_data->bpp = plane->bpp;
539 win_data->pixel_format = plane->pixel_format; 541 win_data->pixel_format = plane->pixel_format;
540 win_data->buf_offsize = (plane->fb_width - plane->crtc_width) * 542 win_data->buf_offsize =
541 (plane->bpp >> 3); 543 plane->pitch - (plane->crtc_width * (plane->bpp >> 3));
542 win_data->line_size = plane->crtc_width * (plane->bpp >> 3); 544 win_data->line_size = plane->crtc_width * (plane->bpp >> 3);
543 545
544 DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n", 546 DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
@@ -704,7 +706,7 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos)
704 writel(val, ctx->regs + VIDWx_BUF_START(win, 0)); 706 writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
705 707
706 /* buffer end address */ 708 /* buffer end address */
707 size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3); 709 size = win_data->fb_pitch * win_data->ovl_height * (win_data->bpp >> 3);
708 val = (unsigned long)(win_data->dma_addr + size); 710 val = (unsigned long)(win_data->dma_addr + size);
709 writel(val, ctx->regs + VIDWx_BUF_END(win, 0)); 711 writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
710 712
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 3518bc4654c5..2e3bc57ea50e 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -55,6 +55,7 @@ struct hdmi_win_data {
55 unsigned int fb_x; 55 unsigned int fb_x;
56 unsigned int fb_y; 56 unsigned int fb_y;
57 unsigned int fb_width; 57 unsigned int fb_width;
58 unsigned int fb_pitch;
58 unsigned int fb_height; 59 unsigned int fb_height;
59 unsigned int src_width; 60 unsigned int src_width;
60 unsigned int src_height; 61 unsigned int src_height;
@@ -438,7 +439,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
438 } else { 439 } else {
439 luma_addr[0] = win_data->dma_addr; 440 luma_addr[0] = win_data->dma_addr;
440 chroma_addr[0] = win_data->dma_addr 441 chroma_addr[0] = win_data->dma_addr
441 + (win_data->fb_width * win_data->fb_height); 442 + (win_data->fb_pitch * win_data->fb_height);
442 } 443 }
443 444
444 if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) { 445 if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
@@ -447,8 +448,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
447 luma_addr[1] = luma_addr[0] + 0x40; 448 luma_addr[1] = luma_addr[0] + 0x40;
448 chroma_addr[1] = chroma_addr[0] + 0x40; 449 chroma_addr[1] = chroma_addr[0] + 0x40;
449 } else { 450 } else {
450 luma_addr[1] = luma_addr[0] + win_data->fb_width; 451 luma_addr[1] = luma_addr[0] + win_data->fb_pitch;
451 chroma_addr[1] = chroma_addr[0] + win_data->fb_width; 452 chroma_addr[1] = chroma_addr[0] + win_data->fb_pitch;
452 } 453 }
453 } else { 454 } else {
454 ctx->interlace = false; 455 ctx->interlace = false;
@@ -469,10 +470,10 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
469 vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK); 470 vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
470 471
471 /* setting size of input image */ 472 /* setting size of input image */
472 vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_width) | 473 vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_pitch) |
473 VP_IMG_VSIZE(win_data->fb_height)); 474 VP_IMG_VSIZE(win_data->fb_height));
474 /* chroma height has to reduced by 2 to avoid chroma distorions */ 475 /* chroma height has to reduced by 2 to avoid chroma distorions */
475 vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_width) | 476 vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_pitch) |
476 VP_IMG_VSIZE(win_data->fb_height / 2)); 477 VP_IMG_VSIZE(win_data->fb_height / 2));
477 478
478 vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width); 479 vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width);
@@ -559,7 +560,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
559 /* converting dma address base and source offset */ 560 /* converting dma address base and source offset */
560 dma_addr = win_data->dma_addr 561 dma_addr = win_data->dma_addr
561 + (win_data->fb_x * win_data->bpp >> 3) 562 + (win_data->fb_x * win_data->bpp >> 3)
562 + (win_data->fb_y * win_data->fb_width * win_data->bpp >> 3); 563 + (win_data->fb_y * win_data->fb_pitch);
563 src_x_offset = 0; 564 src_x_offset = 0;
564 src_y_offset = 0; 565 src_y_offset = 0;
565 566
@@ -576,7 +577,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
576 MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK); 577 MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
577 578
578 /* setup geometry */ 579 /* setup geometry */
579 mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width); 580 mixer_reg_write(res, MXR_GRAPHIC_SPAN(win),
581 win_data->fb_pitch / (win_data->bpp >> 3));
580 582
581 /* setup display size */ 583 /* setup display size */
582 if (ctx->mxr_ver == MXR_VER_128_0_0_184 && 584 if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
@@ -961,6 +963,7 @@ static void mixer_win_mode_set(struct exynos_drm_crtc *crtc,
961 win_data->fb_y = plane->fb_y; 963 win_data->fb_y = plane->fb_y;
962 win_data->fb_width = plane->fb_width; 964 win_data->fb_width = plane->fb_width;
963 win_data->fb_height = plane->fb_height; 965 win_data->fb_height = plane->fb_height;
966 win_data->fb_pitch = plane->pitch;
964 win_data->src_width = plane->src_width; 967 win_data->src_width = plane->src_width;
965 win_data->src_height = plane->src_height; 968 win_data->src_height = plane->src_height;
966 969
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5b205863b659..27ea6bdebce7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2737,24 +2737,11 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2737 2737
2738 WARN_ON(i915_verify_lists(ring->dev)); 2738 WARN_ON(i915_verify_lists(ring->dev));
2739 2739
2740 /* Move any buffers on the active list that are no longer referenced 2740 /* Retire requests first as we use it above for the early return.
2741 * by the ringbuffer to the flushing/inactive lists as appropriate, 2741 * If we retire requests last, we may use a later seqno and so clear
2742 * before we free the context associated with the requests. 2742 * the requests lists without clearing the active list, leading to
2743 * confusion.
2743 */ 2744 */
2744 while (!list_empty(&ring->active_list)) {
2745 struct drm_i915_gem_object *obj;
2746
2747 obj = list_first_entry(&ring->active_list,
2748 struct drm_i915_gem_object,
2749 ring_list);
2750
2751 if (!i915_gem_request_completed(obj->last_read_req, true))
2752 break;
2753
2754 i915_gem_object_move_to_inactive(obj);
2755 }
2756
2757
2758 while (!list_empty(&ring->request_list)) { 2745 while (!list_empty(&ring->request_list)) {
2759 struct drm_i915_gem_request *request; 2746 struct drm_i915_gem_request *request;
2760 struct intel_ringbuffer *ringbuf; 2747 struct intel_ringbuffer *ringbuf;
@@ -2789,6 +2776,23 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2789 i915_gem_free_request(request); 2776 i915_gem_free_request(request);
2790 } 2777 }
2791 2778
2779 /* Move any buffers on the active list that are no longer referenced
2780 * by the ringbuffer to the flushing/inactive lists as appropriate,
2781 * before we free the context associated with the requests.
2782 */
2783 while (!list_empty(&ring->active_list)) {
2784 struct drm_i915_gem_object *obj;
2785
2786 obj = list_first_entry(&ring->active_list,
2787 struct drm_i915_gem_object,
2788 ring_list);
2789
2790 if (!i915_gem_request_completed(obj->last_read_req, true))
2791 break;
2792
2793 i915_gem_object_move_to_inactive(obj);
2794 }
2795
2792 if (unlikely(ring->trace_irq_req && 2796 if (unlikely(ring->trace_irq_req &&
2793 i915_gem_request_completed(ring->trace_irq_req, true))) { 2797 i915_gem_request_completed(ring->trace_irq_req, true))) {
2794 ring->irq_put(ring); 2798 ring->irq_put(ring);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index b773368fc62c..38a742532c4f 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1487,7 +1487,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1487 goto err; 1487 goto err;
1488 } 1488 }
1489 1489
1490 if (i915_needs_cmd_parser(ring)) { 1490 if (i915_needs_cmd_parser(ring) && args->batch_len) {
1491 batch_obj = i915_gem_execbuffer_parse(ring, 1491 batch_obj = i915_gem_execbuffer_parse(ring,
1492 &shadow_exec_entry, 1492 &shadow_exec_entry,
1493 eb, 1493 eb,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 6d22128d97b1..f75173c20f47 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2438,8 +2438,15 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
2438 if (!intel_crtc->base.primary->fb) 2438 if (!intel_crtc->base.primary->fb)
2439 return; 2439 return;
2440 2440
2441 if (intel_alloc_plane_obj(intel_crtc, plane_config)) 2441 if (intel_alloc_plane_obj(intel_crtc, plane_config)) {
2442 struct drm_plane *primary = intel_crtc->base.primary;
2443
2444 primary->state->crtc = &intel_crtc->base;
2445 primary->crtc = &intel_crtc->base;
2446 update_state_fb(primary);
2447
2442 return; 2448 return;
2449 }
2443 2450
2444 kfree(intel_crtc->base.primary->fb); 2451 kfree(intel_crtc->base.primary->fb);
2445 intel_crtc->base.primary->fb = NULL; 2452 intel_crtc->base.primary->fb = NULL;
@@ -2462,11 +2469,15 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
2462 continue; 2469 continue;
2463 2470
2464 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { 2471 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2472 struct drm_plane *primary = intel_crtc->base.primary;
2473
2465 if (obj->tiling_mode != I915_TILING_NONE) 2474 if (obj->tiling_mode != I915_TILING_NONE)
2466 dev_priv->preserve_bios_swizzle = true; 2475 dev_priv->preserve_bios_swizzle = true;
2467 2476
2468 drm_framebuffer_reference(c->primary->fb); 2477 drm_framebuffer_reference(c->primary->fb);
2469 intel_crtc->base.primary->fb = c->primary->fb; 2478 primary->fb = c->primary->fb;
2479 primary->state->crtc = &intel_crtc->base;
2480 primary->crtc = &intel_crtc->base;
2470 obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); 2481 obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
2471 break; 2482 break;
2472 } 2483 }
@@ -6663,7 +6674,6 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
6663 plane_config->size); 6674 plane_config->size);
6664 6675
6665 crtc->base.primary->fb = fb; 6676 crtc->base.primary->fb = fb;
6666 update_state_fb(crtc->base.primary);
6667} 6677}
6668 6678
6669static void chv_crtc_clock_get(struct intel_crtc *crtc, 6679static void chv_crtc_clock_get(struct intel_crtc *crtc,
@@ -7704,7 +7714,6 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
7704 plane_config->size); 7714 plane_config->size);
7705 7715
7706 crtc->base.primary->fb = fb; 7716 crtc->base.primary->fb = fb;
7707 update_state_fb(crtc->base.primary);
7708 return; 7717 return;
7709 7718
7710error: 7719error:
@@ -7798,7 +7807,6 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
7798 plane_config->size); 7807 plane_config->size);
7799 7808
7800 crtc->base.primary->fb = fb; 7809 crtc->base.primary->fb = fb;
7801 update_state_fb(crtc->base.primary);
7802} 7810}
7803 7811
7804static bool ironlake_get_pipe_config(struct intel_crtc *crtc, 7812static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 0a52c44ad03d..9c5451c97942 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -1322,7 +1322,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
1322 drm_modeset_lock_all(dev); 1322 drm_modeset_lock_all(dev);
1323 1323
1324 plane = drm_plane_find(dev, set->plane_id); 1324 plane = drm_plane_find(dev, set->plane_id);
1325 if (!plane) { 1325 if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
1326 ret = -ENOENT; 1326 ret = -ENOENT;
1327 goto out_unlock; 1327 goto out_unlock;
1328 } 1328 }
@@ -1349,7 +1349,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
1349 drm_modeset_lock_all(dev); 1349 drm_modeset_lock_all(dev);
1350 1350
1351 plane = drm_plane_find(dev, get->plane_id); 1351 plane = drm_plane_find(dev, get->plane_id);
1352 if (!plane) { 1352 if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
1353 ret = -ENOENT; 1353 ret = -ENOENT;
1354 goto out_unlock; 1354 goto out_unlock;
1355 } 1355 }
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index c648e1996dab..243a36c93b8f 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -2129,6 +2129,7 @@
2129#define VCE_UENC_REG_CLOCK_GATING 0x207c0 2129#define VCE_UENC_REG_CLOCK_GATING 0x207c0
2130#define VCE_SYS_INT_EN 0x21300 2130#define VCE_SYS_INT_EN 0x21300
2131# define VCE_SYS_INT_TRAP_INTERRUPT_EN (1 << 3) 2131# define VCE_SYS_INT_TRAP_INTERRUPT_EN (1 << 3)
2132#define VCE_LMI_VCPU_CACHE_40BIT_BAR 0x2145c
2132#define VCE_LMI_CTRL2 0x21474 2133#define VCE_LMI_CTRL2 0x21474
2133#define VCE_LMI_CTRL 0x21498 2134#define VCE_LMI_CTRL 0x21498
2134#define VCE_LMI_VM_CTRL 0x214a0 2135#define VCE_LMI_VM_CTRL 0x214a0
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5587603b4a89..33d5a4f4eebd 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1565,6 +1565,7 @@ struct radeon_dpm {
1565 int new_active_crtc_count; 1565 int new_active_crtc_count;
1566 u32 current_active_crtcs; 1566 u32 current_active_crtcs;
1567 int current_active_crtc_count; 1567 int current_active_crtc_count;
1568 bool single_display;
1568 struct radeon_dpm_dynamic_state dyn_state; 1569 struct radeon_dpm_dynamic_state dyn_state;
1569 struct radeon_dpm_fan fan; 1570 struct radeon_dpm_fan fan;
1570 u32 tdp_limit; 1571 u32 tdp_limit;
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 63ccb8fa799c..d27e4ccb848c 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -76,7 +76,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
76 76
77static bool radeon_read_bios(struct radeon_device *rdev) 77static bool radeon_read_bios(struct radeon_device *rdev)
78{ 78{
79 uint8_t __iomem *bios; 79 uint8_t __iomem *bios, val1, val2;
80 size_t size; 80 size_t size;
81 81
82 rdev->bios = NULL; 82 rdev->bios = NULL;
@@ -86,15 +86,19 @@ static bool radeon_read_bios(struct radeon_device *rdev)
86 return false; 86 return false;
87 } 87 }
88 88
89 if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { 89 val1 = readb(&bios[0]);
90 val2 = readb(&bios[1]);
91
92 if (size == 0 || val1 != 0x55 || val2 != 0xaa) {
90 pci_unmap_rom(rdev->pdev, bios); 93 pci_unmap_rom(rdev->pdev, bios);
91 return false; 94 return false;
92 } 95 }
93 rdev->bios = kmemdup(bios, size, GFP_KERNEL); 96 rdev->bios = kzalloc(size, GFP_KERNEL);
94 if (rdev->bios == NULL) { 97 if (rdev->bios == NULL) {
95 pci_unmap_rom(rdev->pdev, bios); 98 pci_unmap_rom(rdev->pdev, bios);
96 return false; 99 return false;
97 } 100 }
101 memcpy_fromio(rdev->bios, bios, size);
98 pci_unmap_rom(rdev->pdev, bios); 102 pci_unmap_rom(rdev->pdev, bios);
99 return true; 103 return true;
100} 104}
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index a69bd441dd2d..572b4dbec186 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -122,7 +122,6 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
122 it = interval_tree_iter_first(&rmn->objects, start, end); 122 it = interval_tree_iter_first(&rmn->objects, start, end);
123 while (it) { 123 while (it) {
124 struct radeon_bo *bo; 124 struct radeon_bo *bo;
125 struct fence *fence;
126 int r; 125 int r;
127 126
128 bo = container_of(it, struct radeon_bo, mn_it); 127 bo = container_of(it, struct radeon_bo, mn_it);
@@ -134,12 +133,10 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
134 continue; 133 continue;
135 } 134 }
136 135
137 fence = reservation_object_get_excl(bo->tbo.resv); 136 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true,
138 if (fence) { 137 false, MAX_SCHEDULE_TIMEOUT);
139 r = radeon_fence_wait((struct radeon_fence *)fence, false); 138 if (r)
140 if (r) 139 DRM_ERROR("(%d) failed to wait for user bo\n", r);
141 DRM_ERROR("(%d) failed to wait for user bo\n", r);
142 }
143 140
144 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); 141 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
145 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 142 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 33cf4108386d..c1ba83a8dd8c 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -837,12 +837,8 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work)
837 radeon_pm_compute_clocks(rdev); 837 radeon_pm_compute_clocks(rdev);
838} 838}
839 839
840static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, 840static bool radeon_dpm_single_display(struct radeon_device *rdev)
841 enum radeon_pm_state_type dpm_state)
842{ 841{
843 int i;
844 struct radeon_ps *ps;
845 u32 ui_class;
846 bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ? 842 bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
847 true : false; 843 true : false;
848 844
@@ -858,6 +854,17 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
858 if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120)) 854 if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
859 single_display = false; 855 single_display = false;
860 856
857 return single_display;
858}
859
860static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
861 enum radeon_pm_state_type dpm_state)
862{
863 int i;
864 struct radeon_ps *ps;
865 u32 ui_class;
866 bool single_display = radeon_dpm_single_display(rdev);
867
861 /* certain older asics have a separare 3D performance state, 868 /* certain older asics have a separare 3D performance state,
862 * so try that first if the user selected performance 869 * so try that first if the user selected performance
863 */ 870 */
@@ -983,6 +990,7 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
983 struct radeon_ps *ps; 990 struct radeon_ps *ps;
984 enum radeon_pm_state_type dpm_state; 991 enum radeon_pm_state_type dpm_state;
985 int ret; 992 int ret;
993 bool single_display = radeon_dpm_single_display(rdev);
986 994
987 /* if dpm init failed */ 995 /* if dpm init failed */
988 if (!rdev->pm.dpm_enabled) 996 if (!rdev->pm.dpm_enabled)
@@ -1007,6 +1015,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
1007 /* vce just modifies an existing state so force a change */ 1015 /* vce just modifies an existing state so force a change */
1008 if (ps->vce_active != rdev->pm.dpm.vce_active) 1016 if (ps->vce_active != rdev->pm.dpm.vce_active)
1009 goto force; 1017 goto force;
1018 /* user has made a display change (such as timing) */
1019 if (rdev->pm.dpm.single_display != single_display)
1020 goto force;
1010 if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) { 1021 if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
1011 /* for pre-BTC and APUs if the num crtcs changed but state is the same, 1022 /* for pre-BTC and APUs if the num crtcs changed but state is the same,
1012 * all we need to do is update the display configuration. 1023 * all we need to do is update the display configuration.
@@ -1069,6 +1080,7 @@ force:
1069 1080
1070 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; 1081 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
1071 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; 1082 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
1083 rdev->pm.dpm.single_display = single_display;
1072 1084
1073 /* wait for the rings to drain */ 1085 /* wait for the rings to drain */
1074 for (i = 0; i < RADEON_NUM_RINGS; i++) { 1086 for (i = 0; i < RADEON_NUM_RINGS; i++) {
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 2456f69efd23..8c7872339c2a 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -495,7 +495,7 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
495 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); 495 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
496 seq_printf(m, "%u dwords in ring\n", count); 496 seq_printf(m, "%u dwords in ring\n", count);
497 497
498 if (!ring->ready) 498 if (!ring->ring)
499 return 0; 499 return 0;
500 500
501 /* print 8 dw before current rptr as often it's the last executed 501 /* print 8 dw before current rptr as often it's the last executed
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index d02aa1d0f588..b292aca0f342 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -598,6 +598,10 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
598 enum dma_data_direction direction = write ? 598 enum dma_data_direction direction = write ?
599 DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 599 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
600 600
601 /* double check that we don't free the table twice */
602 if (!ttm->sg->sgl)
603 return;
604
601 /* free the sg table and pages again */ 605 /* free the sg table and pages again */
602 dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); 606 dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
603 607
diff --git a/drivers/gpu/drm/radeon/vce_v2_0.c b/drivers/gpu/drm/radeon/vce_v2_0.c
index 1ac7bb825a1b..fbbe78fbd087 100644
--- a/drivers/gpu/drm/radeon/vce_v2_0.c
+++ b/drivers/gpu/drm/radeon/vce_v2_0.c
@@ -156,6 +156,9 @@ int vce_v2_0_resume(struct radeon_device *rdev)
156 WREG32(VCE_LMI_SWAP_CNTL1, 0); 156 WREG32(VCE_LMI_SWAP_CNTL1, 0);
157 WREG32(VCE_LMI_VM_CTRL, 0); 157 WREG32(VCE_LMI_VM_CTRL, 0);
158 158
159 WREG32(VCE_LMI_VCPU_CACHE_40BIT_BAR, addr >> 8);
160
161 addr &= 0xff;
159 size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size); 162 size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size);
160 WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff); 163 WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
161 WREG32(VCE_VCPU_CACHE_SIZE0, size); 164 WREG32(VCE_VCPU_CACHE_SIZE0, size);
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 1096da327130..75c6d2103e07 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -659,7 +659,7 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p)
659 659
660 mutex_lock(&data->mutex); 660 mutex_lock(&data->mutex);
661 661
662 for_each_set_bit(bit, indio_dev->buffer->scan_mask, 662 for_each_set_bit(bit, indio_dev->active_scan_mask,
663 indio_dev->masklength) { 663 indio_dev->masklength) {
664 ret = bma180_get_data_reg(data, bit); 664 ret = bma180_get_data_reg(data, bit);
665 if (ret < 0) { 665 if (ret < 0) {
diff --git a/drivers/iio/accel/bmc150-accel.c b/drivers/iio/accel/bmc150-accel.c
index 066d0c04072c..75567fd457dc 100644
--- a/drivers/iio/accel/bmc150-accel.c
+++ b/drivers/iio/accel/bmc150-accel.c
@@ -168,14 +168,14 @@ static const struct {
168 int val; 168 int val;
169 int val2; 169 int val2;
170 u8 bw_bits; 170 u8 bw_bits;
171} bmc150_accel_samp_freq_table[] = { {7, 810000, 0x08}, 171} bmc150_accel_samp_freq_table[] = { {15, 620000, 0x08},
172 {15, 630000, 0x09}, 172 {31, 260000, 0x09},
173 {31, 250000, 0x0A}, 173 {62, 500000, 0x0A},
174 {62, 500000, 0x0B}, 174 {125, 0, 0x0B},
175 {125, 0, 0x0C}, 175 {250, 0, 0x0C},
176 {250, 0, 0x0D}, 176 {500, 0, 0x0D},
177 {500, 0, 0x0E}, 177 {1000, 0, 0x0E},
178 {1000, 0, 0x0F} }; 178 {2000, 0, 0x0F} };
179 179
180static const struct { 180static const struct {
181 int bw_bits; 181 int bw_bits;
@@ -840,7 +840,7 @@ static int bmc150_accel_validate_trigger(struct iio_dev *indio_dev,
840} 840}
841 841
842static IIO_CONST_ATTR_SAMP_FREQ_AVAIL( 842static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
843 "7.810000 15.630000 31.250000 62.500000 125 250 500 1000"); 843 "15.620000 31.260000 62.50000 125 250 500 1000 2000");
844 844
845static struct attribute *bmc150_accel_attributes[] = { 845static struct attribute *bmc150_accel_attributes[] = {
846 &iio_const_attr_sampling_frequency_available.dev_attr.attr, 846 &iio_const_attr_sampling_frequency_available.dev_attr.attr,
@@ -986,7 +986,7 @@ static irqreturn_t bmc150_accel_trigger_handler(int irq, void *p)
986 int bit, ret, i = 0; 986 int bit, ret, i = 0;
987 987
988 mutex_lock(&data->mutex); 988 mutex_lock(&data->mutex);
989 for_each_set_bit(bit, indio_dev->buffer->scan_mask, 989 for_each_set_bit(bit, indio_dev->active_scan_mask,
990 indio_dev->masklength) { 990 indio_dev->masklength) {
991 ret = i2c_smbus_read_word_data(data->client, 991 ret = i2c_smbus_read_word_data(data->client,
992 BMC150_ACCEL_AXIS_TO_REG(bit)); 992 BMC150_ACCEL_AXIS_TO_REG(bit));
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 567de269cc00..1a6379525fa4 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -956,7 +956,7 @@ static irqreturn_t kxcjk1013_trigger_handler(int irq, void *p)
956 956
957 mutex_lock(&data->mutex); 957 mutex_lock(&data->mutex);
958 958
959 for_each_set_bit(bit, indio_dev->buffer->scan_mask, 959 for_each_set_bit(bit, indio_dev->active_scan_mask,
960 indio_dev->masklength) { 960 indio_dev->masklength) {
961 ret = kxcjk1013_get_acc_reg(data, bit); 961 ret = kxcjk1013_get_acc_reg(data, bit);
962 if (ret < 0) { 962 if (ret < 0) {
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 202daf889be2..46379b1fb25b 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -137,7 +137,8 @@ config AXP288_ADC
137 137
138config CC10001_ADC 138config CC10001_ADC
139 tristate "Cosmic Circuits 10001 ADC driver" 139 tristate "Cosmic Circuits 10001 ADC driver"
140 depends on HAS_IOMEM || HAVE_CLK || REGULATOR 140 depends on HAVE_CLK || REGULATOR
141 depends on HAS_IOMEM
141 select IIO_BUFFER 142 select IIO_BUFFER
142 select IIO_TRIGGERED_BUFFER 143 select IIO_TRIGGERED_BUFFER
143 help 144 help
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index ff61ae55dd3f..8a0eb4a04fb5 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -544,7 +544,6 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
544{ 544{
545 struct iio_dev *idev = iio_trigger_get_drvdata(trig); 545 struct iio_dev *idev = iio_trigger_get_drvdata(trig);
546 struct at91_adc_state *st = iio_priv(idev); 546 struct at91_adc_state *st = iio_priv(idev);
547 struct iio_buffer *buffer = idev->buffer;
548 struct at91_adc_reg_desc *reg = st->registers; 547 struct at91_adc_reg_desc *reg = st->registers;
549 u32 status = at91_adc_readl(st, reg->trigger_register); 548 u32 status = at91_adc_readl(st, reg->trigger_register);
550 int value; 549 int value;
@@ -564,7 +563,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
564 at91_adc_writel(st, reg->trigger_register, 563 at91_adc_writel(st, reg->trigger_register,
565 status | value); 564 status | value);
566 565
567 for_each_set_bit(bit, buffer->scan_mask, 566 for_each_set_bit(bit, idev->active_scan_mask,
568 st->num_channels) { 567 st->num_channels) {
569 struct iio_chan_spec const *chan = idev->channels + bit; 568 struct iio_chan_spec const *chan = idev->channels + bit;
570 at91_adc_writel(st, AT91_ADC_CHER, 569 at91_adc_writel(st, AT91_ADC_CHER,
@@ -579,7 +578,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
579 at91_adc_writel(st, reg->trigger_register, 578 at91_adc_writel(st, reg->trigger_register,
580 status & ~value); 579 status & ~value);
581 580
582 for_each_set_bit(bit, buffer->scan_mask, 581 for_each_set_bit(bit, idev->active_scan_mask,
583 st->num_channels) { 582 st->num_channels) {
584 struct iio_chan_spec const *chan = idev->channels + bit; 583 struct iio_chan_spec const *chan = idev->channels + bit;
585 at91_adc_writel(st, AT91_ADC_CHDR, 584 at91_adc_writel(st, AT91_ADC_CHDR,
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 2e5cc4409f78..a0e7161f040c 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -188,12 +188,11 @@ static int tiadc_buffer_preenable(struct iio_dev *indio_dev)
188static int tiadc_buffer_postenable(struct iio_dev *indio_dev) 188static int tiadc_buffer_postenable(struct iio_dev *indio_dev)
189{ 189{
190 struct tiadc_device *adc_dev = iio_priv(indio_dev); 190 struct tiadc_device *adc_dev = iio_priv(indio_dev);
191 struct iio_buffer *buffer = indio_dev->buffer;
192 unsigned int enb = 0; 191 unsigned int enb = 0;
193 u8 bit; 192 u8 bit;
194 193
195 tiadc_step_config(indio_dev); 194 tiadc_step_config(indio_dev);
196 for_each_set_bit(bit, buffer->scan_mask, adc_dev->channels) 195 for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels)
197 enb |= (get_adc_step_bit(adc_dev, bit) << 1); 196 enb |= (get_adc_step_bit(adc_dev, bit) << 1);
198 adc_dev->buffer_en_ch_steps = enb; 197 adc_dev->buffer_en_ch_steps = enb;
199 198
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 8ec353c01d98..e63b8e76d4c3 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -141,9 +141,13 @@ struct vf610_adc {
141 struct regulator *vref; 141 struct regulator *vref;
142 struct vf610_adc_feature adc_feature; 142 struct vf610_adc_feature adc_feature;
143 143
144 u32 sample_freq_avail[5];
145
144 struct completion completion; 146 struct completion completion;
145}; 147};
146 148
149static const u32 vf610_hw_avgs[] = { 1, 4, 8, 16, 32 };
150
147#define VF610_ADC_CHAN(_idx, _chan_type) { \ 151#define VF610_ADC_CHAN(_idx, _chan_type) { \
148 .type = (_chan_type), \ 152 .type = (_chan_type), \
149 .indexed = 1, \ 153 .indexed = 1, \
@@ -180,35 +184,47 @@ static const struct iio_chan_spec vf610_adc_iio_channels[] = {
180 /* sentinel */ 184 /* sentinel */
181}; 185};
182 186
183/* 187static inline void vf610_adc_calculate_rates(struct vf610_adc *info)
184 * ADC sample frequency, unit is ADCK cycles. 188{
185 * ADC clk source is ipg clock, which is the same as bus clock. 189 unsigned long adck_rate, ipg_rate = clk_get_rate(info->clk);
186 * 190 int i;
187 * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder) 191
188 * SFCAdder: fixed to 6 ADCK cycles 192 /*
189 * AverageNum: 1, 4, 8, 16, 32 samples for hardware average. 193 * Calculate ADC sample frequencies
190 * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode 194 * Sample time unit is ADCK cycles. ADCK clk source is ipg clock,
191 * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles 195 * which is the same as bus clock.
192 * 196 *
193 * By default, enable 12 bit resolution mode, clock source 197 * ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder)
194 * set to ipg clock, So get below frequency group: 198 * SFCAdder: fixed to 6 ADCK cycles
195 */ 199 * AverageNum: 1, 4, 8, 16, 32 samples for hardware average.
196static const u32 vf610_sample_freq_avail[5] = 200 * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
197{1941176, 559332, 286957, 145374, 73171}; 201 * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles
202 */
203 adck_rate = ipg_rate / info->adc_feature.clk_div;
204 for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++)
205 info->sample_freq_avail[i] =
206 adck_rate / (6 + vf610_hw_avgs[i] * (25 + 3));
207}
198 208
199static inline void vf610_adc_cfg_init(struct vf610_adc *info) 209static inline void vf610_adc_cfg_init(struct vf610_adc *info)
200{ 210{
211 struct vf610_adc_feature *adc_feature = &info->adc_feature;
212
201 /* set default Configuration for ADC controller */ 213 /* set default Configuration for ADC controller */
202 info->adc_feature.clk_sel = VF610_ADCIOC_BUSCLK_SET; 214 adc_feature->clk_sel = VF610_ADCIOC_BUSCLK_SET;
203 info->adc_feature.vol_ref = VF610_ADCIOC_VR_VREF_SET; 215 adc_feature->vol_ref = VF610_ADCIOC_VR_VREF_SET;
216
217 adc_feature->calibration = true;
218 adc_feature->ovwren = true;
219
220 adc_feature->res_mode = 12;
221 adc_feature->sample_rate = 1;
222 adc_feature->lpm = true;
204 223
205 info->adc_feature.calibration = true; 224 /* Use a save ADCK which is below 20MHz on all devices */
206 info->adc_feature.ovwren = true; 225 adc_feature->clk_div = 8;
207 226
208 info->adc_feature.clk_div = 1; 227 vf610_adc_calculate_rates(info);
209 info->adc_feature.res_mode = 12;
210 info->adc_feature.sample_rate = 1;
211 info->adc_feature.lpm = true;
212} 228}
213 229
214static void vf610_adc_cfg_post_set(struct vf610_adc *info) 230static void vf610_adc_cfg_post_set(struct vf610_adc *info)
@@ -290,12 +306,10 @@ static void vf610_adc_cfg_set(struct vf610_adc *info)
290 306
291 cfg_data = readl(info->regs + VF610_REG_ADC_CFG); 307 cfg_data = readl(info->regs + VF610_REG_ADC_CFG);
292 308
293 /* low power configuration */
294 cfg_data &= ~VF610_ADC_ADLPC_EN; 309 cfg_data &= ~VF610_ADC_ADLPC_EN;
295 if (adc_feature->lpm) 310 if (adc_feature->lpm)
296 cfg_data |= VF610_ADC_ADLPC_EN; 311 cfg_data |= VF610_ADC_ADLPC_EN;
297 312
298 /* disable high speed */
299 cfg_data &= ~VF610_ADC_ADHSC_EN; 313 cfg_data &= ~VF610_ADC_ADHSC_EN;
300 314
301 writel(cfg_data, info->regs + VF610_REG_ADC_CFG); 315 writel(cfg_data, info->regs + VF610_REG_ADC_CFG);
@@ -435,10 +449,27 @@ static irqreturn_t vf610_adc_isr(int irq, void *dev_id)
435 return IRQ_HANDLED; 449 return IRQ_HANDLED;
436} 450}
437 451
438static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1941176, 559332, 286957, 145374, 73171"); 452static ssize_t vf610_show_samp_freq_avail(struct device *dev,
453 struct device_attribute *attr, char *buf)
454{
455 struct vf610_adc *info = iio_priv(dev_to_iio_dev(dev));
456 size_t len = 0;
457 int i;
458
459 for (i = 0; i < ARRAY_SIZE(info->sample_freq_avail); i++)
460 len += scnprintf(buf + len, PAGE_SIZE - len,
461 "%u ", info->sample_freq_avail[i]);
462
463 /* replace trailing space by newline */
464 buf[len - 1] = '\n';
465
466 return len;
467}
468
469static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(vf610_show_samp_freq_avail);
439 470
440static struct attribute *vf610_attributes[] = { 471static struct attribute *vf610_attributes[] = {
441 &iio_const_attr_sampling_frequency_available.dev_attr.attr, 472 &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
442 NULL 473 NULL
443}; 474};
444 475
@@ -502,7 +533,7 @@ static int vf610_read_raw(struct iio_dev *indio_dev,
502 return IIO_VAL_FRACTIONAL_LOG2; 533 return IIO_VAL_FRACTIONAL_LOG2;
503 534
504 case IIO_CHAN_INFO_SAMP_FREQ: 535 case IIO_CHAN_INFO_SAMP_FREQ:
505 *val = vf610_sample_freq_avail[info->adc_feature.sample_rate]; 536 *val = info->sample_freq_avail[info->adc_feature.sample_rate];
506 *val2 = 0; 537 *val2 = 0;
507 return IIO_VAL_INT; 538 return IIO_VAL_INT;
508 539
@@ -525,9 +556,9 @@ static int vf610_write_raw(struct iio_dev *indio_dev,
525 switch (mask) { 556 switch (mask) {
526 case IIO_CHAN_INFO_SAMP_FREQ: 557 case IIO_CHAN_INFO_SAMP_FREQ:
527 for (i = 0; 558 for (i = 0;
528 i < ARRAY_SIZE(vf610_sample_freq_avail); 559 i < ARRAY_SIZE(info->sample_freq_avail);
529 i++) 560 i++)
530 if (val == vf610_sample_freq_avail[i]) { 561 if (val == info->sample_freq_avail[i]) {
531 info->adc_feature.sample_rate = i; 562 info->adc_feature.sample_rate = i;
532 vf610_adc_sample_set(info); 563 vf610_adc_sample_set(info);
533 return 0; 564 return 0;
diff --git a/drivers/iio/gyro/bmg160.c b/drivers/iio/gyro/bmg160.c
index 60451b328242..ccf3ea7e1afa 100644
--- a/drivers/iio/gyro/bmg160.c
+++ b/drivers/iio/gyro/bmg160.c
@@ -822,7 +822,7 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p)
822 int bit, ret, i = 0; 822 int bit, ret, i = 0;
823 823
824 mutex_lock(&data->mutex); 824 mutex_lock(&data->mutex);
825 for_each_set_bit(bit, indio_dev->buffer->scan_mask, 825 for_each_set_bit(bit, indio_dev->active_scan_mask,
826 indio_dev->masklength) { 826 indio_dev->masklength) {
827 ret = i2c_smbus_read_word_data(data->client, 827 ret = i2c_smbus_read_word_data(data->client,
828 BMG160_AXIS_TO_REG(bit)); 828 BMG160_AXIS_TO_REG(bit));
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
index e0017c22bb9c..f53e9a803a0e 100644
--- a/drivers/iio/imu/adis_trigger.c
+++ b/drivers/iio/imu/adis_trigger.c
@@ -60,7 +60,7 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
60 iio_trigger_set_drvdata(adis->trig, adis); 60 iio_trigger_set_drvdata(adis->trig, adis);
61 ret = iio_trigger_register(adis->trig); 61 ret = iio_trigger_register(adis->trig);
62 62
63 indio_dev->trig = adis->trig; 63 indio_dev->trig = iio_trigger_get(adis->trig);
64 if (ret) 64 if (ret)
65 goto error_free_irq; 65 goto error_free_irq;
66 66
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index d8d5bed65e07..ef76afe2643c 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -410,42 +410,46 @@ error_read_raw:
410 } 410 }
411} 411}
412 412
413static int inv_mpu6050_write_fsr(struct inv_mpu6050_state *st, int fsr) 413static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
414{ 414{
415 int result; 415 int result, i;
416 u8 d; 416 u8 d;
417 417
418 if (fsr < 0 || fsr > INV_MPU6050_MAX_GYRO_FS_PARAM) 418 for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) {
419 return -EINVAL; 419 if (gyro_scale_6050[i] == val) {
420 if (fsr == st->chip_config.fsr) 420 d = (i << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
421 return 0; 421 result = inv_mpu6050_write_reg(st,
422 st->reg->gyro_config, d);
423 if (result)
424 return result;
422 425
423 d = (fsr << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT); 426 st->chip_config.fsr = i;
424 result = inv_mpu6050_write_reg(st, st->reg->gyro_config, d); 427 return 0;
425 if (result) 428 }
426 return result; 429 }
427 st->chip_config.fsr = fsr;
428 430
429 return 0; 431 return -EINVAL;
430} 432}
431 433
432static int inv_mpu6050_write_accel_fs(struct inv_mpu6050_state *st, int fs) 434static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
433{ 435{
434 int result; 436 int result, i;
435 u8 d; 437 u8 d;
436 438
437 if (fs < 0 || fs > INV_MPU6050_MAX_ACCL_FS_PARAM) 439 for (i = 0; i < ARRAY_SIZE(accel_scale); ++i) {
438 return -EINVAL; 440 if (accel_scale[i] == val) {
439 if (fs == st->chip_config.accl_fs) 441 d = (i << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
440 return 0; 442 result = inv_mpu6050_write_reg(st,
443 st->reg->accl_config, d);
444 if (result)
445 return result;
441 446
442 d = (fs << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT); 447 st->chip_config.accl_fs = i;
443 result = inv_mpu6050_write_reg(st, st->reg->accl_config, d); 448 return 0;
444 if (result) 449 }
445 return result; 450 }
446 st->chip_config.accl_fs = fs;
447 451
448 return 0; 452 return -EINVAL;
449} 453}
450 454
451static int inv_mpu6050_write_raw(struct iio_dev *indio_dev, 455static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
@@ -471,10 +475,10 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
471 case IIO_CHAN_INFO_SCALE: 475 case IIO_CHAN_INFO_SCALE:
472 switch (chan->type) { 476 switch (chan->type) {
473 case IIO_ANGL_VEL: 477 case IIO_ANGL_VEL:
474 result = inv_mpu6050_write_fsr(st, val); 478 result = inv_mpu6050_write_gyro_scale(st, val2);
475 break; 479 break;
476 case IIO_ACCEL: 480 case IIO_ACCEL:
477 result = inv_mpu6050_write_accel_fs(st, val); 481 result = inv_mpu6050_write_accel_scale(st, val2);
478 break; 482 break;
479 default: 483 default:
480 result = -EINVAL; 484 result = -EINVAL;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 0cd306a72a6e..ba27e277511f 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -24,6 +24,16 @@
24#include <linux/poll.h> 24#include <linux/poll.h>
25#include "inv_mpu_iio.h" 25#include "inv_mpu_iio.h"
26 26
27static void inv_clear_kfifo(struct inv_mpu6050_state *st)
28{
29 unsigned long flags;
30
31 /* take the spin lock sem to avoid interrupt kick in */
32 spin_lock_irqsave(&st->time_stamp_lock, flags);
33 kfifo_reset(&st->timestamps);
34 spin_unlock_irqrestore(&st->time_stamp_lock, flags);
35}
36
27int inv_reset_fifo(struct iio_dev *indio_dev) 37int inv_reset_fifo(struct iio_dev *indio_dev)
28{ 38{
29 int result; 39 int result;
@@ -50,6 +60,10 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
50 INV_MPU6050_BIT_FIFO_RST); 60 INV_MPU6050_BIT_FIFO_RST);
51 if (result) 61 if (result)
52 goto reset_fifo_fail; 62 goto reset_fifo_fail;
63
64 /* clear timestamps fifo */
65 inv_clear_kfifo(st);
66
53 /* enable interrupt */ 67 /* enable interrupt */
54 if (st->chip_config.accl_fifo_enable || 68 if (st->chip_config.accl_fifo_enable ||
55 st->chip_config.gyro_fifo_enable) { 69 st->chip_config.gyro_fifo_enable) {
@@ -83,16 +97,6 @@ reset_fifo_fail:
83 return result; 97 return result;
84} 98}
85 99
86static void inv_clear_kfifo(struct inv_mpu6050_state *st)
87{
88 unsigned long flags;
89
90 /* take the spin lock sem to avoid interrupt kick in */
91 spin_lock_irqsave(&st->time_stamp_lock, flags);
92 kfifo_reset(&st->timestamps);
93 spin_unlock_irqrestore(&st->time_stamp_lock, flags);
94}
95
96/** 100/**
97 * inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt. 101 * inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt.
98 */ 102 */
@@ -184,7 +188,6 @@ end_session:
184flush_fifo: 188flush_fifo:
185 /* Flush HW and SW FIFOs. */ 189 /* Flush HW and SW FIFOs. */
186 inv_reset_fifo(indio_dev); 190 inv_reset_fifo(indio_dev);
187 inv_clear_kfifo(st);
188 mutex_unlock(&indio_dev->mlock); 191 mutex_unlock(&indio_dev->mlock);
189 iio_trigger_notify_done(indio_dev->trig); 192 iio_trigger_notify_done(indio_dev->trig);
190 193
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index 5cc3692acf37..b3a36376c719 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -1227,7 +1227,7 @@ static irqreturn_t kmx61_trigger_handler(int irq, void *p)
1227 base = KMX61_MAG_XOUT_L; 1227 base = KMX61_MAG_XOUT_L;
1228 1228
1229 mutex_lock(&data->lock); 1229 mutex_lock(&data->lock);
1230 for_each_set_bit(bit, indio_dev->buffer->scan_mask, 1230 for_each_set_bit(bit, indio_dev->active_scan_mask,
1231 indio_dev->masklength) { 1231 indio_dev->masklength) {
1232 ret = kmx61_read_measurement(data, base, bit); 1232 ret = kmx61_read_measurement(data, base, bit);
1233 if (ret < 0) { 1233 if (ret < 0) {
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index aaba9d3d980e..4df97f650e44 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -847,8 +847,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
847 * @attr_list: List of IIO device attributes 847 * @attr_list: List of IIO device attributes
848 * 848 *
849 * This function frees the memory allocated for each of the IIO device 849 * This function frees the memory allocated for each of the IIO device
850 * attributes in the list. Note: if you want to reuse the list after calling 850 * attributes in the list.
851 * this function you have to reinitialize it using INIT_LIST_HEAD().
852 */ 851 */
853void iio_free_chan_devattr_list(struct list_head *attr_list) 852void iio_free_chan_devattr_list(struct list_head *attr_list)
854{ 853{
@@ -856,6 +855,7 @@ void iio_free_chan_devattr_list(struct list_head *attr_list)
856 855
857 list_for_each_entry_safe(p, n, attr_list, l) { 856 list_for_each_entry_safe(p, n, attr_list, l) {
858 kfree(p->dev_attr.attr.name); 857 kfree(p->dev_attr.attr.name);
858 list_del(&p->l);
859 kfree(p); 859 kfree(p);
860 } 860 }
861} 861}
@@ -936,6 +936,7 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
936 936
937 iio_free_chan_devattr_list(&indio_dev->channel_attr_list); 937 iio_free_chan_devattr_list(&indio_dev->channel_attr_list);
938 kfree(indio_dev->chan_attr_group.attrs); 938 kfree(indio_dev->chan_attr_group.attrs);
939 indio_dev->chan_attr_group.attrs = NULL;
939} 940}
940 941
941static void iio_dev_release(struct device *device) 942static void iio_dev_release(struct device *device)
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index a4b397048f71..a99692ba91bc 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -500,6 +500,7 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
500error_free_setup_event_lines: 500error_free_setup_event_lines:
501 iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list); 501 iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
502 kfree(indio_dev->event_interface); 502 kfree(indio_dev->event_interface);
503 indio_dev->event_interface = NULL;
503 return ret; 504 return ret;
504} 505}
505 506
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 74dff4e4a11a..89fca3a70750 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -494,7 +494,7 @@ static irqreturn_t sx9500_trigger_handler(int irq, void *private)
494 494
495 mutex_lock(&data->mutex); 495 mutex_lock(&data->mutex);
496 496
497 for_each_set_bit(bit, indio_dev->buffer->scan_mask, 497 for_each_set_bit(bit, indio_dev->active_scan_mask,
498 indio_dev->masklength) { 498 indio_dev->masklength) {
499 ret = sx9500_read_proximity(data, &indio_dev->channels[bit], 499 ret = sx9500_read_proximity(data, &indio_dev->channels[bit],
500 &val); 500 &val);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index aec7a6aa2951..8c014b5dab4c 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -99,6 +99,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
99 if (dmasync) 99 if (dmasync)
100 dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); 100 dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
101 101
102 /*
103 * If the combination of the addr and size requested for this memory
104 * region causes an integer overflow, return error.
105 */
106 if ((PAGE_ALIGN(addr + size) <= size) ||
107 (PAGE_ALIGN(addr + size) <= addr))
108 return ERR_PTR(-EINVAL);
109
102 if (!can_do_mlock()) 110 if (!can_do_mlock())
103 return ERR_PTR(-EPERM); 111 return ERR_PTR(-EPERM);
104 112
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index b972c0b41799..976bea794b5f 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -587,8 +587,9 @@ static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_vio
587 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask); 587 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
588 } 588 }
589 589
590 err = mlx4_cmd(dev->dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, 590 err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
591 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 591 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
592 MLX4_CMD_WRAPPED);
592 593
593 mlx4_free_cmd_mailbox(dev->dev, mailbox); 594 mlx4_free_cmd_mailbox(dev->dev, mailbox);
594 return err; 595 return err;
@@ -1525,8 +1526,8 @@ static void update_gids_task(struct work_struct *work)
1525 memcpy(gids, gw->gids, sizeof gw->gids); 1526 memcpy(gids, gw->gids, sizeof gw->gids);
1526 1527
1527 err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port, 1528 err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
1528 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 1529 MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
1529 MLX4_CMD_WRAPPED); 1530 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
1530 if (err) 1531 if (err)
1531 pr_warn("set port command failed\n"); 1532 pr_warn("set port command failed\n");
1532 else 1533 else
@@ -1564,7 +1565,7 @@ static void reset_gids_task(struct work_struct *work)
1564 IB_LINK_LAYER_ETHERNET) { 1565 IB_LINK_LAYER_ETHERNET) {
1565 err = mlx4_cmd(dev, mailbox->dma, 1566 err = mlx4_cmd(dev, mailbox->dma,
1566 MLX4_SET_PORT_GID_TABLE << 8 | gw->port, 1567 MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
1567 1, MLX4_CMD_SET_PORT, 1568 MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
1568 MLX4_CMD_TIME_CLASS_B, 1569 MLX4_CMD_TIME_CLASS_B,
1569 MLX4_CMD_WRAPPED); 1570 MLX4_CMD_WRAPPED);
1570 if (err) 1571 if (err)
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index 39ab0caefdf9..66080580e24d 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index c463e7bba5f4..2ee6b1051975 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -572,11 +572,15 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
572 572
573int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 573int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
574{ 574{
575 struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
576 void __iomem *uar_page = mdev->priv.uuari.uars[0].map;
577
575 mlx5_cq_arm(&to_mcq(ibcq)->mcq, 578 mlx5_cq_arm(&to_mcq(ibcq)->mcq,
576 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? 579 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
577 MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT, 580 MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
578 to_mdev(ibcq->device)->mdev->priv.uuari.uars[0].map, 581 uar_page,
579 MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev->priv.cq_uar_lock)); 582 MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock),
583 to_mcq(ibcq)->mcq.cons_index);
580 584
581 return 0; 585 return 0;
582} 586}
@@ -697,8 +701,6 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
697 701
698 cq->mcq.set_ci_db = cq->db.db; 702 cq->mcq.set_ci_db = cq->db.db;
699 cq->mcq.arm_db = cq->db.db + 1; 703 cq->mcq.arm_db = cq->db.db + 1;
700 *cq->mcq.set_ci_db = 0;
701 *cq->mcq.arm_db = 0;
702 cq->mcq.cqe_sz = cqe_size; 704 cq->mcq.cqe_sz = cqe_size;
703 705
704 err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size); 706 err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size);
@@ -782,7 +784,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
782 cq->cqe_size = cqe_size; 784 cq->cqe_size = cqe_size;
783 cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5; 785 cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
784 cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index); 786 cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index);
785 err = mlx5_vector2eqn(dev, vector, &eqn, &irqn); 787 err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
786 if (err) 788 if (err)
787 goto err_cqb; 789 goto err_cqb;
788 790
diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c
index ece028fc47d6..a0e4e6ddb71a 100644
--- a/drivers/infiniband/hw/mlx5/doorbell.c
+++ b/drivers/infiniband/hw/mlx5/doorbell.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 657af9a1167c..9cf9a37bb5ff 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index cc4ac1e583b2..57c9809e8b87 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -62,95 +62,6 @@ static char mlx5_version[] =
62 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v" 62 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
63 DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; 63 DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
64 64
65int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
66{
67 struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
68 struct mlx5_eq *eq, *n;
69 int err = -ENOENT;
70
71 spin_lock(&table->lock);
72 list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
73 if (eq->index == vector) {
74 *eqn = eq->eqn;
75 *irqn = eq->irqn;
76 err = 0;
77 break;
78 }
79 }
80 spin_unlock(&table->lock);
81
82 return err;
83}
84
85static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
86{
87 struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
88 char name[MLX5_MAX_EQ_NAME];
89 struct mlx5_eq *eq, *n;
90 int ncomp_vec;
91 int nent;
92 int err;
93 int i;
94
95 INIT_LIST_HEAD(&dev->eqs_list);
96 ncomp_vec = table->num_comp_vectors;
97 nent = MLX5_COMP_EQ_SIZE;
98 for (i = 0; i < ncomp_vec; i++) {
99 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
100 if (!eq) {
101 err = -ENOMEM;
102 goto clean;
103 }
104
105 snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
106 err = mlx5_create_map_eq(dev->mdev, eq,
107 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
108 name, &dev->mdev->priv.uuari.uars[0]);
109 if (err) {
110 kfree(eq);
111 goto clean;
112 }
113 mlx5_ib_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
114 eq->index = i;
115 spin_lock(&table->lock);
116 list_add_tail(&eq->list, &dev->eqs_list);
117 spin_unlock(&table->lock);
118 }
119
120 dev->num_comp_vectors = ncomp_vec;
121 return 0;
122
123clean:
124 spin_lock(&table->lock);
125 list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
126 list_del(&eq->list);
127 spin_unlock(&table->lock);
128 if (mlx5_destroy_unmap_eq(dev->mdev, eq))
129 mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn);
130 kfree(eq);
131 spin_lock(&table->lock);
132 }
133 spin_unlock(&table->lock);
134 return err;
135}
136
137static void free_comp_eqs(struct mlx5_ib_dev *dev)
138{
139 struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
140 struct mlx5_eq *eq, *n;
141
142 spin_lock(&table->lock);
143 list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
144 list_del(&eq->list);
145 spin_unlock(&table->lock);
146 if (mlx5_destroy_unmap_eq(dev->mdev, eq))
147 mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn);
148 kfree(eq);
149 spin_lock(&table->lock);
150 }
151 spin_unlock(&table->lock);
152}
153
154static int mlx5_ib_query_device(struct ib_device *ibdev, 65static int mlx5_ib_query_device(struct ib_device *ibdev,
155 struct ib_device_attr *props) 66 struct ib_device_attr *props)
156{ 67{
@@ -1291,10 +1202,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1291 1202
1292 get_ext_port_caps(dev); 1203 get_ext_port_caps(dev);
1293 1204
1294 err = alloc_comp_eqs(dev);
1295 if (err)
1296 goto err_dealloc;
1297
1298 MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock); 1205 MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
1299 1206
1300 strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); 1207 strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
@@ -1303,7 +1210,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1303 dev->ib_dev.local_dma_lkey = mdev->caps.gen.reserved_lkey; 1210 dev->ib_dev.local_dma_lkey = mdev->caps.gen.reserved_lkey;
1304 dev->num_ports = mdev->caps.gen.num_ports; 1211 dev->num_ports = mdev->caps.gen.num_ports;
1305 dev->ib_dev.phys_port_cnt = dev->num_ports; 1212 dev->ib_dev.phys_port_cnt = dev->num_ports;
1306 dev->ib_dev.num_comp_vectors = dev->num_comp_vectors; 1213 dev->ib_dev.num_comp_vectors =
1214 dev->mdev->priv.eq_table.num_comp_vectors;
1307 dev->ib_dev.dma_device = &mdev->pdev->dev; 1215 dev->ib_dev.dma_device = &mdev->pdev->dev;
1308 1216
1309 dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION; 1217 dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
@@ -1390,13 +1298,13 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1390 1298
1391 err = init_node_data(dev); 1299 err = init_node_data(dev);
1392 if (err) 1300 if (err)
1393 goto err_eqs; 1301 goto err_dealloc;
1394 1302
1395 mutex_init(&dev->cap_mask_mutex); 1303 mutex_init(&dev->cap_mask_mutex);
1396 1304
1397 err = create_dev_resources(&dev->devr); 1305 err = create_dev_resources(&dev->devr);
1398 if (err) 1306 if (err)
1399 goto err_eqs; 1307 goto err_dealloc;
1400 1308
1401 err = mlx5_ib_odp_init_one(dev); 1309 err = mlx5_ib_odp_init_one(dev);
1402 if (err) 1310 if (err)
@@ -1433,9 +1341,6 @@ err_odp:
1433err_rsrc: 1341err_rsrc:
1434 destroy_dev_resources(&dev->devr); 1342 destroy_dev_resources(&dev->devr);
1435 1343
1436err_eqs:
1437 free_comp_eqs(dev);
1438
1439err_dealloc: 1344err_dealloc:
1440 ib_dealloc_device((struct ib_device *)dev); 1345 ib_dealloc_device((struct ib_device *)dev);
1441 1346
@@ -1450,7 +1355,6 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
1450 destroy_umrc_res(dev); 1355 destroy_umrc_res(dev);
1451 mlx5_ib_odp_remove_one(dev); 1356 mlx5_ib_odp_remove_one(dev);
1452 destroy_dev_resources(&dev->devr); 1357 destroy_dev_resources(&dev->devr);
1453 free_comp_eqs(dev);
1454 ib_dealloc_device(&dev->ib_dev); 1358 ib_dealloc_device(&dev->ib_dev);
1455} 1359}
1456 1360
@@ -1458,6 +1362,7 @@ static struct mlx5_interface mlx5_ib_interface = {
1458 .add = mlx5_ib_add, 1362 .add = mlx5_ib_add,
1459 .remove = mlx5_ib_remove, 1363 .remove = mlx5_ib_remove,
1460 .event = mlx5_ib_event, 1364 .event = mlx5_ib_event,
1365 .protocol = MLX5_INTERFACE_PROTOCOL_IB,
1461}; 1366};
1462 1367
1463static int __init mlx5_ib_init(void) 1368static int __init mlx5_ib_init(void)
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 611a9fdf2f38..40df2cca0609 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 83f22fe297c8..dff1cfcdf476 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -421,9 +421,7 @@ struct mlx5_ib_dev {
421 struct ib_device ib_dev; 421 struct ib_device ib_dev;
422 struct mlx5_core_dev *mdev; 422 struct mlx5_core_dev *mdev;
423 MLX5_DECLARE_DOORBELL_LOCK(uar_lock); 423 MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
424 struct list_head eqs_list;
425 int num_ports; 424 int num_ports;
426 int num_comp_vectors;
427 /* serialize update of capability mask 425 /* serialize update of capability mask
428 */ 426 */
429 struct mutex cap_mask_mutex; 427 struct mutex cap_mask_mutex;
@@ -594,7 +592,6 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
594 struct ib_ucontext *context, 592 struct ib_ucontext *context,
595 struct ib_udata *udata); 593 struct ib_udata *udata);
596int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd); 594int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
597int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn);
598int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); 595int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
599int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port); 596int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
600int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, 597int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index cd9822eeacae..71c593583864 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index a2c541c4809a..5099db08afd2 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index be0cd358b080..4d7024b899cb 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -796,9 +796,6 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
796 goto err_free; 796 goto err_free;
797 } 797 }
798 798
799 qp->db.db[0] = 0;
800 qp->db.db[1] = 0;
801
802 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL); 799 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL);
803 qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL); 800 qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL);
804 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL); 801 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL);
@@ -1162,10 +1159,11 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1162 in = kzalloc(sizeof(*in), GFP_KERNEL); 1159 in = kzalloc(sizeof(*in), GFP_KERNEL);
1163 if (!in) 1160 if (!in)
1164 return; 1161 return;
1162
1165 if (qp->state != IB_QPS_RESET) { 1163 if (qp->state != IB_QPS_RESET) {
1166 mlx5_ib_qp_disable_pagefaults(qp); 1164 mlx5_ib_qp_disable_pagefaults(qp);
1167 if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state), 1165 if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
1168 MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp)) 1166 MLX5_QP_STATE_RST, in, 0, &qp->mqp))
1169 mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n", 1167 mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
1170 qp->mqp.qpn); 1168 qp->mqp.qpn);
1171 } 1169 }
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 41fec66217dd..02d77a29764d 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -165,8 +165,6 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
165 return err; 165 return err;
166 } 166 }
167 167
168 *srq->db.db = 0;
169
170 if (mlx5_buf_alloc(dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) { 168 if (mlx5_buf_alloc(dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
171 mlx5_ib_dbg(dev, "buf alloc failed\n"); 169 mlx5_ib_dbg(dev, "buf alloc failed\n");
172 err = -ENOMEM; 170 err = -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h
index d0ba264ac1ed..76fb7b927d37 100644
--- a/drivers/infiniband/hw/mlx5/user.h
+++ b/drivers/infiniband/hw/mlx5/user.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 58b5aa3b6f2d..657b89b1d291 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -842,6 +842,13 @@ static void ipoib_set_mcast_list(struct net_device *dev)
842 queue_work(ipoib_workqueue, &priv->restart_task); 842 queue_work(ipoib_workqueue, &priv->restart_task);
843} 843}
844 844
845static int ipoib_get_iflink(const struct net_device *dev)
846{
847 struct ipoib_dev_priv *priv = netdev_priv(dev);
848
849 return priv->parent->ifindex;
850}
851
845static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr) 852static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
846{ 853{
847 /* 854 /*
@@ -1341,6 +1348,7 @@ static const struct net_device_ops ipoib_netdev_ops = {
1341 .ndo_start_xmit = ipoib_start_xmit, 1348 .ndo_start_xmit = ipoib_start_xmit,
1342 .ndo_tx_timeout = ipoib_timeout, 1349 .ndo_tx_timeout = ipoib_timeout,
1343 .ndo_set_rx_mode = ipoib_set_mcast_list, 1350 .ndo_set_rx_mode = ipoib_set_mcast_list,
1351 .ndo_get_iflink = ipoib_get_iflink,
1344}; 1352};
1345 1353
1346void ipoib_setup(struct net_device *dev) 1354void ipoib_setup(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 9fad7b5ac8b9..4dd1313056a4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -102,7 +102,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
102 } 102 }
103 103
104 priv->child_type = type; 104 priv->child_type = type;
105 priv->dev->iflink = ppriv->dev->ifindex;
106 list_add_tail(&priv->list, &ppriv->child_intfs); 105 list_add_tail(&priv->list, &ppriv->child_intfs);
107 106
108 return 0; 107 return 0;
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 1bd15ebc01f2..27bcdbc950c9 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1154,10 +1154,28 @@ out:
1154 mutex_unlock(&alps_mutex); 1154 mutex_unlock(&alps_mutex);
1155} 1155}
1156 1156
1157static void alps_report_bare_ps2_packet(struct input_dev *dev, 1157static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
1158 unsigned char packet[], 1158 unsigned char packet[],
1159 bool report_buttons) 1159 bool report_buttons)
1160{ 1160{
1161 struct alps_data *priv = psmouse->private;
1162 struct input_dev *dev;
1163
1164 /* Figure out which device to use to report the bare packet */
1165 if (priv->proto_version == ALPS_PROTO_V2 &&
1166 (priv->flags & ALPS_DUALPOINT)) {
1167 /* On V2 devices the DualPoint Stick reports bare packets */
1168 dev = priv->dev2;
1169 } else if (unlikely(IS_ERR_OR_NULL(priv->dev3))) {
1170 /* Register dev3 mouse if we received PS/2 packet first time */
1171 if (!IS_ERR(priv->dev3))
1172 psmouse_queue_work(psmouse, &priv->dev3_register_work,
1173 0);
1174 return;
1175 } else {
1176 dev = priv->dev3;
1177 }
1178
1161 if (report_buttons) 1179 if (report_buttons)
1162 alps_report_buttons(dev, NULL, 1180 alps_report_buttons(dev, NULL,
1163 packet[0] & 1, packet[0] & 2, packet[0] & 4); 1181 packet[0] & 1, packet[0] & 2, packet[0] & 4);
@@ -1232,8 +1250,8 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
1232 * de-synchronization. 1250 * de-synchronization.
1233 */ 1251 */
1234 1252
1235 alps_report_bare_ps2_packet(priv->dev2, 1253 alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3],
1236 &psmouse->packet[3], false); 1254 false);
1237 1255
1238 /* 1256 /*
1239 * Continue with the standard ALPS protocol handling, 1257 * Continue with the standard ALPS protocol handling,
@@ -1289,18 +1307,9 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
1289 * properly we only do this if the device is fully synchronized. 1307 * properly we only do this if the device is fully synchronized.
1290 */ 1308 */
1291 if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) { 1309 if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) {
1292
1293 /* Register dev3 mouse if we received PS/2 packet first time */
1294 if (unlikely(!priv->dev3))
1295 psmouse_queue_work(psmouse,
1296 &priv->dev3_register_work, 0);
1297
1298 if (psmouse->pktcnt == 3) { 1310 if (psmouse->pktcnt == 3) {
1299 /* Once dev3 mouse device is registered report data */ 1311 alps_report_bare_ps2_packet(psmouse, psmouse->packet,
1300 if (likely(!IS_ERR_OR_NULL(priv->dev3))) 1312 true);
1301 alps_report_bare_ps2_packet(priv->dev3,
1302 psmouse->packet,
1303 true);
1304 return PSMOUSE_FULL_PACKET; 1313 return PSMOUSE_FULL_PACKET;
1305 } 1314 }
1306 return PSMOUSE_GOOD_DATA; 1315 return PSMOUSE_GOOD_DATA;
@@ -2281,10 +2290,12 @@ static int alps_set_protocol(struct psmouse *psmouse,
2281 priv->set_abs_params = alps_set_abs_params_mt; 2290 priv->set_abs_params = alps_set_abs_params_mt;
2282 priv->nibble_commands = alps_v3_nibble_commands; 2291 priv->nibble_commands = alps_v3_nibble_commands;
2283 priv->addr_command = PSMOUSE_CMD_RESET_WRAP; 2292 priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
2284 priv->x_max = 1360;
2285 priv->y_max = 660;
2286 priv->x_bits = 23; 2293 priv->x_bits = 23;
2287 priv->y_bits = 12; 2294 priv->y_bits = 12;
2295
2296 if (alps_dolphin_get_device_area(psmouse, priv))
2297 return -EIO;
2298
2288 break; 2299 break;
2289 2300
2290 case ALPS_PROTO_V6: 2301 case ALPS_PROTO_V6:
@@ -2303,9 +2314,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
2303 priv->set_abs_params = alps_set_abs_params_mt; 2314 priv->set_abs_params = alps_set_abs_params_mt;
2304 priv->nibble_commands = alps_v3_nibble_commands; 2315 priv->nibble_commands = alps_v3_nibble_commands;
2305 priv->addr_command = PSMOUSE_CMD_RESET_WRAP; 2316 priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
2306 2317 priv->x_max = 0xfff;
2307 if (alps_dolphin_get_device_area(psmouse, priv)) 2318 priv->y_max = 0x7ff;
2308 return -EIO;
2309 2319
2310 if (priv->fw_ver[1] != 0xba) 2320 if (priv->fw_ver[1] != 0xba)
2311 priv->flags |= ALPS_BUTTONPAD; 2321 priv->flags |= ALPS_BUTTONPAD;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index dda605836546..3b06c8a360b6 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -154,6 +154,11 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
154 }, 154 },
155 { 155 {
156 (const char * const []){"LEN2006", NULL}, 156 (const char * const []){"LEN2006", NULL},
157 {2691, 2691},
158 1024, 5045, 2457, 4832
159 },
160 {
161 (const char * const []){"LEN2006", NULL},
157 {ANY_BOARD_ID, ANY_BOARD_ID}, 162 {ANY_BOARD_ID, ANY_BOARD_ID},
158 1264, 5675, 1171, 4688 163 1264, 5675, 1171, 4688
159 }, 164 },
@@ -189,7 +194,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
189 "LEN2003", 194 "LEN2003",
190 "LEN2004", /* L440 */ 195 "LEN2004", /* L440 */
191 "LEN2005", 196 "LEN2005",
192 "LEN2006", 197 "LEN2006", /* Edge E440/E540 */
193 "LEN2007", 198 "LEN2007",
194 "LEN2008", 199 "LEN2008",
195 "LEN2009", 200 "LEN2009",
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index fc13dd56953e..a3adde6519f0 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1288,10 +1288,13 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1288 return 0; 1288 return 0;
1289 1289
1290 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); 1290 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1291 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS) 1291 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1292 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1292 ret = arm_smmu_iova_to_phys_hard(domain, iova); 1293 ret = arm_smmu_iova_to_phys_hard(domain, iova);
1293 else 1294 } else {
1294 ret = ops->iova_to_phys(ops, iova); 1295 ret = ops->iova_to_phys(ops, iova);
1296 }
1297
1295 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); 1298 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1296 1299
1297 return ret; 1300 return ret;
@@ -1556,7 +1559,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1556 return -ENODEV; 1559 return -ENODEV;
1557 } 1560 }
1558 1561
1559 if (smmu->version == 1 || (!(id & ID0_ATOSNS) && (id & ID0_S1TS))) { 1562 if ((id & ID0_S1TS) && ((smmu->version == 1) || (id & ID0_ATOSNS))) {
1560 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS; 1563 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1561 dev_notice(smmu->dev, "\taddress translation ops\n"); 1564 dev_notice(smmu->dev, "\taddress translation ops\n");
1562 } 1565 }
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index ae4c1a854e57..2d1e05bdbb53 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1742,9 +1742,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1742 1742
1743static void domain_exit(struct dmar_domain *domain) 1743static void domain_exit(struct dmar_domain *domain)
1744{ 1744{
1745 struct dmar_drhd_unit *drhd;
1746 struct intel_iommu *iommu;
1747 struct page *freelist = NULL; 1745 struct page *freelist = NULL;
1746 int i;
1748 1747
1749 /* Domain 0 is reserved, so dont process it */ 1748 /* Domain 0 is reserved, so dont process it */
1750 if (!domain) 1749 if (!domain)
@@ -1764,8 +1763,8 @@ static void domain_exit(struct dmar_domain *domain)
1764 1763
1765 /* clear attached or cached domains */ 1764 /* clear attached or cached domains */
1766 rcu_read_lock(); 1765 rcu_read_lock();
1767 for_each_active_iommu(iommu, drhd) 1766 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
1768 iommu_detach_domain(domain, iommu); 1767 iommu_detach_domain(domain, g_iommus[i]);
1769 rcu_read_unlock(); 1768 rcu_read_unlock();
1770 1769
1771 dma_free_pagelist(freelist); 1770 dma_free_pagelist(freelist);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 10186cac7716..bc39bdf7b99b 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -851,6 +851,7 @@ static int ipmmu_remove(struct platform_device *pdev)
851 851
852static const struct of_device_id ipmmu_of_ids[] = { 852static const struct of_device_id ipmmu_of_ids[] = {
853 { .compatible = "renesas,ipmmu-vmsa", }, 853 { .compatible = "renesas,ipmmu-vmsa", },
854 { }
854}; 855};
855 856
856static struct platform_driver ipmmu_driver = { 857static struct platform_driver ipmmu_driver = {
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 596b0a9eee99..9687f8afebff 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -169,7 +169,7 @@ static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
169 169
170static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) 170static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
171{ 171{
172 cmd->raw_cmd[0] &= ~(0xffffUL << 32); 172 cmd->raw_cmd[0] &= BIT_ULL(32) - 1;
173 cmd->raw_cmd[0] |= ((u64)devid) << 32; 173 cmd->raw_cmd[0] |= ((u64)devid) << 32;
174} 174}
175 175
@@ -802,6 +802,7 @@ static int its_alloc_tables(struct its_node *its)
802 int i; 802 int i;
803 int psz = SZ_64K; 803 int psz = SZ_64K;
804 u64 shr = GITS_BASER_InnerShareable; 804 u64 shr = GITS_BASER_InnerShareable;
805 u64 cache = GITS_BASER_WaWb;
805 806
806 for (i = 0; i < GITS_BASER_NR_REGS; i++) { 807 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
807 u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); 808 u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
@@ -848,7 +849,7 @@ retry_baser:
848 val = (virt_to_phys(base) | 849 val = (virt_to_phys(base) |
849 (type << GITS_BASER_TYPE_SHIFT) | 850 (type << GITS_BASER_TYPE_SHIFT) |
850 ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | 851 ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
851 GITS_BASER_WaWb | 852 cache |
852 shr | 853 shr |
853 GITS_BASER_VALID); 854 GITS_BASER_VALID);
854 855
@@ -874,9 +875,12 @@ retry_baser:
874 * Shareability didn't stick. Just use 875 * Shareability didn't stick. Just use
875 * whatever the read reported, which is likely 876 * whatever the read reported, which is likely
876 * to be the only thing this redistributor 877 * to be the only thing this redistributor
877 * supports. 878 * supports. If that's zero, make it
879 * non-cacheable as well.
878 */ 880 */
879 shr = tmp & GITS_BASER_SHAREABILITY_MASK; 881 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
882 if (!shr)
883 cache = GITS_BASER_nC;
880 goto retry_baser; 884 goto retry_baser;
881 } 885 }
882 886
@@ -980,16 +984,39 @@ static void its_cpu_init_lpis(void)
980 tmp = readq_relaxed(rbase + GICR_PROPBASER); 984 tmp = readq_relaxed(rbase + GICR_PROPBASER);
981 985
982 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { 986 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
987 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
988 /*
989 * The HW reports non-shareable, we must
990 * remove the cacheability attributes as
991 * well.
992 */
993 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
994 GICR_PROPBASER_CACHEABILITY_MASK);
995 val |= GICR_PROPBASER_nC;
996 writeq_relaxed(val, rbase + GICR_PROPBASER);
997 }
983 pr_info_once("GIC: using cache flushing for LPI property table\n"); 998 pr_info_once("GIC: using cache flushing for LPI property table\n");
984 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; 999 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
985 } 1000 }
986 1001
987 /* set PENDBASE */ 1002 /* set PENDBASE */
988 val = (page_to_phys(pend_page) | 1003 val = (page_to_phys(pend_page) |
989 GICR_PROPBASER_InnerShareable | 1004 GICR_PENDBASER_InnerShareable |
990 GICR_PROPBASER_WaWb); 1005 GICR_PENDBASER_WaWb);
991 1006
992 writeq_relaxed(val, rbase + GICR_PENDBASER); 1007 writeq_relaxed(val, rbase + GICR_PENDBASER);
1008 tmp = readq_relaxed(rbase + GICR_PENDBASER);
1009
1010 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
1011 /*
1012 * The HW reports non-shareable, we must remove the
1013 * cacheability attributes as well.
1014 */
1015 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
1016 GICR_PENDBASER_CACHEABILITY_MASK);
1017 val |= GICR_PENDBASER_nC;
1018 writeq_relaxed(val, rbase + GICR_PENDBASER);
1019 }
993 1020
994 /* Enable LPIs */ 1021 /* Enable LPIs */
995 val = readl_relaxed(rbase + GICR_CTLR); 1022 val = readl_relaxed(rbase + GICR_CTLR);
@@ -1026,7 +1053,7 @@ static void its_cpu_init_collection(void)
1026 * This ITS wants a linear CPU number. 1053 * This ITS wants a linear CPU number.
1027 */ 1054 */
1028 target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER); 1055 target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER);
1029 target = GICR_TYPER_CPU_NUMBER(target); 1056 target = GICR_TYPER_CPU_NUMBER(target) << 16;
1030 } 1057 }
1031 1058
1032 /* Perform collection mapping */ 1059 /* Perform collection mapping */
@@ -1422,14 +1449,26 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
1422 1449
1423 writeq_relaxed(baser, its->base + GITS_CBASER); 1450 writeq_relaxed(baser, its->base + GITS_CBASER);
1424 tmp = readq_relaxed(its->base + GITS_CBASER); 1451 tmp = readq_relaxed(its->base + GITS_CBASER);
1425 writeq_relaxed(0, its->base + GITS_CWRITER);
1426 writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
1427 1452
1428 if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) { 1453 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
1454 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
1455 /*
1456 * The HW reports non-shareable, we must
1457 * remove the cacheability attributes as
1458 * well.
1459 */
1460 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
1461 GITS_CBASER_CACHEABILITY_MASK);
1462 baser |= GITS_CBASER_nC;
1463 writeq_relaxed(baser, its->base + GITS_CBASER);
1464 }
1429 pr_info("ITS: using cache flushing for cmd queue\n"); 1465 pr_info("ITS: using cache flushing for cmd queue\n");
1430 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; 1466 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
1431 } 1467 }
1432 1468
1469 writeq_relaxed(0, its->base + GITS_CWRITER);
1470 writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
1471
1433 if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) { 1472 if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) {
1434 its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its); 1473 its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its);
1435 if (!its->domain) { 1474 if (!its->domain) {
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig
index ee035ec4526b..169172d2ba05 100644
--- a/drivers/lguest/Kconfig
+++ b/drivers/lguest/Kconfig
@@ -1,6 +1,6 @@
1config LGUEST 1config LGUEST
2 tristate "Linux hypervisor example code" 2 tristate "Linux hypervisor example code"
3 depends on X86_32 && EVENTFD && TTY 3 depends on X86_32 && EVENTFD && TTY && PCI_DIRECT
4 select HVC_DRIVER 4 select HVC_DRIVER
5 ---help--- 5 ---help---
6 This is a very simple module which allows you to run 6 This is a very simple module which allows you to run
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 9b641b38b857..8001fe9e3434 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -433,7 +433,6 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
433 433
434 dm_get(md); 434 dm_get(md);
435 atomic_inc(&md->open_count); 435 atomic_inc(&md->open_count);
436
437out: 436out:
438 spin_unlock(&_minor_lock); 437 spin_unlock(&_minor_lock);
439 438
@@ -442,16 +441,20 @@ out:
442 441
443static void dm_blk_close(struct gendisk *disk, fmode_t mode) 442static void dm_blk_close(struct gendisk *disk, fmode_t mode)
444{ 443{
445 struct mapped_device *md = disk->private_data; 444 struct mapped_device *md;
446 445
447 spin_lock(&_minor_lock); 446 spin_lock(&_minor_lock);
448 447
448 md = disk->private_data;
449 if (WARN_ON(!md))
450 goto out;
451
449 if (atomic_dec_and_test(&md->open_count) && 452 if (atomic_dec_and_test(&md->open_count) &&
450 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 453 (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
451 queue_work(deferred_remove_workqueue, &deferred_remove_work); 454 queue_work(deferred_remove_workqueue, &deferred_remove_work);
452 455
453 dm_put(md); 456 dm_put(md);
454 457out:
455 spin_unlock(&_minor_lock); 458 spin_unlock(&_minor_lock);
456} 459}
457 460
@@ -2241,7 +2244,6 @@ static void free_dev(struct mapped_device *md)
2241 int minor = MINOR(disk_devt(md->disk)); 2244 int minor = MINOR(disk_devt(md->disk));
2242 2245
2243 unlock_fs(md); 2246 unlock_fs(md);
2244 bdput(md->bdev);
2245 destroy_workqueue(md->wq); 2247 destroy_workqueue(md->wq);
2246 2248
2247 if (md->kworker_task) 2249 if (md->kworker_task)
@@ -2252,19 +2254,22 @@ static void free_dev(struct mapped_device *md)
2252 mempool_destroy(md->rq_pool); 2254 mempool_destroy(md->rq_pool);
2253 if (md->bs) 2255 if (md->bs)
2254 bioset_free(md->bs); 2256 bioset_free(md->bs);
2255 blk_integrity_unregister(md->disk); 2257
2256 del_gendisk(md->disk);
2257 cleanup_srcu_struct(&md->io_barrier); 2258 cleanup_srcu_struct(&md->io_barrier);
2258 free_table_devices(&md->table_devices); 2259 free_table_devices(&md->table_devices);
2259 free_minor(minor); 2260 dm_stats_cleanup(&md->stats);
2260 2261
2261 spin_lock(&_minor_lock); 2262 spin_lock(&_minor_lock);
2262 md->disk->private_data = NULL; 2263 md->disk->private_data = NULL;
2263 spin_unlock(&_minor_lock); 2264 spin_unlock(&_minor_lock);
2264 2265 if (blk_get_integrity(md->disk))
2266 blk_integrity_unregister(md->disk);
2267 del_gendisk(md->disk);
2265 put_disk(md->disk); 2268 put_disk(md->disk);
2266 blk_cleanup_queue(md->queue); 2269 blk_cleanup_queue(md->queue);
2267 dm_stats_cleanup(&md->stats); 2270 bdput(md->bdev);
2271 free_minor(minor);
2272
2268 module_put(THIS_MODULE); 2273 module_put(THIS_MODULE);
2269 kfree(md); 2274 kfree(md);
2270} 2275}
@@ -2642,8 +2647,9 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
2642 2647
2643 might_sleep(); 2648 might_sleep();
2644 2649
2645 spin_lock(&_minor_lock);
2646 map = dm_get_live_table(md, &srcu_idx); 2650 map = dm_get_live_table(md, &srcu_idx);
2651
2652 spin_lock(&_minor_lock);
2647 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2653 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2648 set_bit(DMF_FREEING, &md->flags); 2654 set_bit(DMF_FREEING, &md->flags);
2649 spin_unlock(&_minor_lock); 2655 spin_unlock(&_minor_lock);
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index f38ec424872e..5615522f8d62 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -739,7 +739,7 @@ static int __init kempld_init(void)
739 for (id = kempld_dmi_table; 739 for (id = kempld_dmi_table;
740 id->matches[0].slot != DMI_NONE; id++) 740 id->matches[0].slot != DMI_NONE; id++)
741 if (strstr(id->ident, force_device_id)) 741 if (strstr(id->ident, force_device_id))
742 if (id->callback && id->callback(id)) 742 if (id->callback && !id->callback(id))
743 break; 743 break;
744 if (id->matches[0].slot == DMI_NONE) 744 if (id->matches[0].slot == DMI_NONE)
745 return -ENODEV; 745 return -ENODEV;
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c
index ede50244f265..dbd907d7170e 100644
--- a/drivers/mfd/rtsx_usb.c
+++ b/drivers/mfd/rtsx_usb.c
@@ -196,18 +196,27 @@ EXPORT_SYMBOL_GPL(rtsx_usb_ep0_write_register);
196int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data) 196int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data)
197{ 197{
198 u16 value; 198 u16 value;
199 u8 *buf;
200 int ret;
199 201
200 if (!data) 202 if (!data)
201 return -EINVAL; 203 return -EINVAL;
202 *data = 0; 204
205 buf = kzalloc(sizeof(u8), GFP_KERNEL);
206 if (!buf)
207 return -ENOMEM;
203 208
204 addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT; 209 addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT;
205 value = swab16(addr); 210 value = swab16(addr);
206 211
207 return usb_control_msg(ucr->pusb_dev, 212 ret = usb_control_msg(ucr->pusb_dev,
208 usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP, 213 usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP,
209 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 214 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
210 value, 0, data, 1, 100); 215 value, 0, buf, 1, 100);
216 *data = *buf;
217
218 kfree(buf);
219 return ret;
211} 220}
212EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register); 221EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register);
213 222
@@ -288,18 +297,27 @@ static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status)
288int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status) 297int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
289{ 298{
290 int ret; 299 int ret;
300 u16 *buf;
291 301
292 if (!status) 302 if (!status)
293 return -EINVAL; 303 return -EINVAL;
294 304
295 if (polling_pipe == 0) 305 if (polling_pipe == 0) {
306 buf = kzalloc(sizeof(u16), GFP_KERNEL);
307 if (!buf)
308 return -ENOMEM;
309
296 ret = usb_control_msg(ucr->pusb_dev, 310 ret = usb_control_msg(ucr->pusb_dev,
297 usb_rcvctrlpipe(ucr->pusb_dev, 0), 311 usb_rcvctrlpipe(ucr->pusb_dev, 0),
298 RTSX_USB_REQ_POLL, 312 RTSX_USB_REQ_POLL,
299 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 313 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
300 0, 0, status, 2, 100); 314 0, 0, buf, 2, 100);
301 else 315 *status = *buf;
316
317 kfree(buf);
318 } else {
302 ret = rtsx_usb_get_status_with_bulk(ucr, status); 319 ret = rtsx_usb_get_status_with_bulk(ucr, status);
320 }
303 321
304 /* usb_control_msg may return positive when success */ 322 /* usb_control_msg may return positive when success */
305 if (ret < 0) 323 if (ret < 0)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 7b4684ccdb3f..78dde56ae6e6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3881,7 +3881,8 @@ static inline int bond_slave_override(struct bonding *bond,
3881 /* Find out if any slaves have the same mapping as this skb. */ 3881 /* Find out if any slaves have the same mapping as this skb. */
3882 bond_for_each_slave_rcu(bond, slave, iter) { 3882 bond_for_each_slave_rcu(bond, slave, iter) {
3883 if (slave->queue_id == skb->queue_mapping) { 3883 if (slave->queue_id == skb->queue_mapping) {
3884 if (bond_slave_can_tx(slave)) { 3884 if (bond_slave_is_up(slave) &&
3885 slave->link == BOND_LINK_UP) {
3885 bond_dev_queue_xmit(bond, skb, slave->dev); 3886 bond_dev_queue_xmit(bond, skb, slave->dev);
3886 return 0; 3887 return 0;
3887 } 3888 }
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 27bbc56de15f..9da06537237f 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -70,7 +70,6 @@ struct ser_device {
70 struct tty_struct *tty; 70 struct tty_struct *tty;
71 bool tx_started; 71 bool tx_started;
72 unsigned long state; 72 unsigned long state;
73 char *tty_name;
74#ifdef CONFIG_DEBUG_FS 73#ifdef CONFIG_DEBUG_FS
75 struct dentry *debugfs_tty_dir; 74 struct dentry *debugfs_tty_dir;
76 struct debugfs_blob_wrapper tx_blob; 75 struct debugfs_blob_wrapper tx_blob;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 80c46ad4cee4..ad0a7e8c2c2b 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -592,13 +592,12 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
592 rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ? 592 rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ?
593 CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE; 593 CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE;
594 new_state = max(tx_state, rx_state); 594 new_state = max(tx_state, rx_state);
595 } else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE)) { 595 } else {
596 __flexcan_get_berr_counter(dev, &bec); 596 __flexcan_get_berr_counter(dev, &bec);
597 new_state = CAN_STATE_ERROR_PASSIVE; 597 new_state = flt == FLEXCAN_ESR_FLT_CONF_PASSIVE ?
598 CAN_STATE_ERROR_PASSIVE : CAN_STATE_BUS_OFF;
598 rx_state = bec.rxerr >= bec.txerr ? new_state : 0; 599 rx_state = bec.rxerr >= bec.txerr ? new_state : 0;
599 tx_state = bec.rxerr <= bec.txerr ? new_state : 0; 600 tx_state = bec.rxerr <= bec.txerr ? new_state : 0;
600 } else {
601 new_state = CAN_STATE_BUS_OFF;
602 } 601 }
603 602
604 /* state hasn't changed */ 603 /* state hasn't changed */
@@ -1158,12 +1157,19 @@ static int flexcan_probe(struct platform_device *pdev)
1158 const struct flexcan_devtype_data *devtype_data; 1157 const struct flexcan_devtype_data *devtype_data;
1159 struct net_device *dev; 1158 struct net_device *dev;
1160 struct flexcan_priv *priv; 1159 struct flexcan_priv *priv;
1160 struct regulator *reg_xceiver;
1161 struct resource *mem; 1161 struct resource *mem;
1162 struct clk *clk_ipg = NULL, *clk_per = NULL; 1162 struct clk *clk_ipg = NULL, *clk_per = NULL;
1163 void __iomem *base; 1163 void __iomem *base;
1164 int err, irq; 1164 int err, irq;
1165 u32 clock_freq = 0; 1165 u32 clock_freq = 0;
1166 1166
1167 reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
1168 if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
1169 return -EPROBE_DEFER;
1170 else if (IS_ERR(reg_xceiver))
1171 reg_xceiver = NULL;
1172
1167 if (pdev->dev.of_node) 1173 if (pdev->dev.of_node)
1168 of_property_read_u32(pdev->dev.of_node, 1174 of_property_read_u32(pdev->dev.of_node,
1169 "clock-frequency", &clock_freq); 1175 "clock-frequency", &clock_freq);
@@ -1224,9 +1230,7 @@ static int flexcan_probe(struct platform_device *pdev)
1224 priv->pdata = dev_get_platdata(&pdev->dev); 1230 priv->pdata = dev_get_platdata(&pdev->dev);
1225 priv->devtype_data = devtype_data; 1231 priv->devtype_data = devtype_data;
1226 1232
1227 priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver"); 1233 priv->reg_xceiver = reg_xceiver;
1228 if (IS_ERR(priv->reg_xceiver))
1229 priv->reg_xceiver = NULL;
1230 1234
1231 netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT); 1235 netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
1232 1236
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 9376f5e5b94e..866bac0ae7e9 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -123,7 +123,7 @@ MODULE_LICENSE("GPL v2");
123 * CPC_MSG_TYPE_EXT_CAN_FRAME or CPC_MSG_TYPE_EXT_RTR_FRAME. 123 * CPC_MSG_TYPE_EXT_CAN_FRAME or CPC_MSG_TYPE_EXT_RTR_FRAME.
124 */ 124 */
125struct cpc_can_msg { 125struct cpc_can_msg {
126 u32 id; 126 __le32 id;
127 u8 length; 127 u8 length;
128 u8 msg[8]; 128 u8 msg[8];
129}; 129};
@@ -200,8 +200,8 @@ struct __packed ems_cpc_msg {
200 u8 type; /* type of message */ 200 u8 type; /* type of message */
201 u8 length; /* length of data within union 'msg' */ 201 u8 length; /* length of data within union 'msg' */
202 u8 msgid; /* confirmation handle */ 202 u8 msgid; /* confirmation handle */
203 u32 ts_sec; /* timestamp in seconds */ 203 __le32 ts_sec; /* timestamp in seconds */
204 u32 ts_nsec; /* timestamp in nano seconds */ 204 __le32 ts_nsec; /* timestamp in nano seconds */
205 205
206 union { 206 union {
207 u8 generic[64]; 207 u8 generic[64];
@@ -765,7 +765,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
765 765
766 msg = (struct ems_cpc_msg *)&buf[CPC_HEADER_SIZE]; 766 msg = (struct ems_cpc_msg *)&buf[CPC_HEADER_SIZE];
767 767
768 msg->msg.can_msg.id = cf->can_id & CAN_ERR_MASK; 768 msg->msg.can_msg.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK);
769 msg->msg.can_msg.length = cf->can_dlc; 769 msg->msg.can_msg.length = cf->can_dlc;
770 770
771 if (cf->can_id & CAN_RTR_FLAG) { 771 if (cf->can_id & CAN_RTR_FLAG) {
@@ -783,9 +783,6 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
783 msg->length = CPC_CAN_MSG_MIN_SIZE + cf->can_dlc; 783 msg->length = CPC_CAN_MSG_MIN_SIZE + cf->can_dlc;
784 } 784 }
785 785
786 /* Respect byte order */
787 msg->msg.can_msg.id = cpu_to_le32(msg->msg.can_msg.id);
788
789 for (i = 0; i < MAX_TX_URBS; i++) { 786 for (i = 0; i < MAX_TX_URBS; i++) {
790 if (dev->tx_contexts[i].echo_index == MAX_TX_URBS) { 787 if (dev->tx_contexts[i].echo_index == MAX_TX_URBS) {
791 context = &dev->tx_contexts[i]; 788 context = &dev->tx_contexts[i];
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 009acc8641fc..8b4d3e6875eb 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -901,6 +901,8 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
901 } 901 }
902 902
903 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 903 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
904 if (!dev)
905 return -ENOMEM;
904 init_usb_anchor(&dev->rx_submitted); 906 init_usb_anchor(&dev->rx_submitted);
905 907
906 atomic_set(&dev->active_channels, 0); 908 atomic_set(&dev->active_channels, 0);
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index d269ae0b072a..4643914859b2 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -25,7 +25,6 @@
25#include <linux/can/dev.h> 25#include <linux/can/dev.h>
26#include <linux/can/error.h> 26#include <linux/can/error.h>
27 27
28#define MAX_TX_URBS 16
29#define MAX_RX_URBS 4 28#define MAX_RX_URBS 4
30#define START_TIMEOUT 1000 /* msecs */ 29#define START_TIMEOUT 1000 /* msecs */
31#define STOP_TIMEOUT 1000 /* msecs */ 30#define STOP_TIMEOUT 1000 /* msecs */
@@ -443,6 +442,7 @@ struct kvaser_usb_error_summary {
443 }; 442 };
444}; 443};
445 444
445/* Context for an outstanding, not yet ACKed, transmission */
446struct kvaser_usb_tx_urb_context { 446struct kvaser_usb_tx_urb_context {
447 struct kvaser_usb_net_priv *priv; 447 struct kvaser_usb_net_priv *priv;
448 u32 echo_index; 448 u32 echo_index;
@@ -456,8 +456,13 @@ struct kvaser_usb {
456 struct usb_endpoint_descriptor *bulk_in, *bulk_out; 456 struct usb_endpoint_descriptor *bulk_in, *bulk_out;
457 struct usb_anchor rx_submitted; 457 struct usb_anchor rx_submitted;
458 458
459 /* @max_tx_urbs: Firmware-reported maximum number of oustanding,
460 * not yet ACKed, transmissions on this device. This value is
461 * also used as a sentinel for marking free tx contexts.
462 */
459 u32 fw_version; 463 u32 fw_version;
460 unsigned int nchannels; 464 unsigned int nchannels;
465 unsigned int max_tx_urbs;
461 enum kvaser_usb_family family; 466 enum kvaser_usb_family family;
462 467
463 bool rxinitdone; 468 bool rxinitdone;
@@ -467,19 +472,18 @@ struct kvaser_usb {
467 472
468struct kvaser_usb_net_priv { 473struct kvaser_usb_net_priv {
469 struct can_priv can; 474 struct can_priv can;
470 475 struct can_berr_counter bec;
471 spinlock_t tx_contexts_lock;
472 int active_tx_contexts;
473 struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS];
474
475 struct usb_anchor tx_submitted;
476 struct completion start_comp, stop_comp;
477 476
478 struct kvaser_usb *dev; 477 struct kvaser_usb *dev;
479 struct net_device *netdev; 478 struct net_device *netdev;
480 int channel; 479 int channel;
481 480
482 struct can_berr_counter bec; 481 struct completion start_comp, stop_comp;
482 struct usb_anchor tx_submitted;
483
484 spinlock_t tx_contexts_lock;
485 int active_tx_contexts;
486 struct kvaser_usb_tx_urb_context tx_contexts[];
483}; 487};
484 488
485static const struct usb_device_id kvaser_usb_table[] = { 489static const struct usb_device_id kvaser_usb_table[] = {
@@ -592,8 +596,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
592 * for further details. 596 * for further details.
593 */ 597 */
594 if (tmp->len == 0) { 598 if (tmp->len == 0) {
595 pos = round_up(pos, 599 pos = round_up(pos, le16_to_cpu(dev->bulk_in->
596 dev->bulk_in->wMaxPacketSize); 600 wMaxPacketSize));
597 continue; 601 continue;
598 } 602 }
599 603
@@ -657,9 +661,13 @@ static int kvaser_usb_get_software_info(struct kvaser_usb *dev)
657 switch (dev->family) { 661 switch (dev->family) {
658 case KVASER_LEAF: 662 case KVASER_LEAF:
659 dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version); 663 dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version);
664 dev->max_tx_urbs =
665 le16_to_cpu(msg.u.leaf.softinfo.max_outstanding_tx);
660 break; 666 break;
661 case KVASER_USBCAN: 667 case KVASER_USBCAN:
662 dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version); 668 dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version);
669 dev->max_tx_urbs =
670 le16_to_cpu(msg.u.usbcan.softinfo.max_outstanding_tx);
663 break; 671 break;
664 } 672 }
665 673
@@ -715,7 +723,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
715 723
716 stats = &priv->netdev->stats; 724 stats = &priv->netdev->stats;
717 725
718 context = &priv->tx_contexts[tid % MAX_TX_URBS]; 726 context = &priv->tx_contexts[tid % dev->max_tx_urbs];
719 727
720 /* Sometimes the state change doesn't come after a bus-off event */ 728 /* Sometimes the state change doesn't come after a bus-off event */
721 if (priv->can.restart_ms && 729 if (priv->can.restart_ms &&
@@ -744,7 +752,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
744 spin_lock_irqsave(&priv->tx_contexts_lock, flags); 752 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
745 753
746 can_get_echo_skb(priv->netdev, context->echo_index); 754 can_get_echo_skb(priv->netdev, context->echo_index);
747 context->echo_index = MAX_TX_URBS; 755 context->echo_index = dev->max_tx_urbs;
748 --priv->active_tx_contexts; 756 --priv->active_tx_contexts;
749 netif_wake_queue(priv->netdev); 757 netif_wake_queue(priv->netdev);
750 758
@@ -1329,7 +1337,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
1329 * number of events in case of a heavy rx load on the bus. 1337 * number of events in case of a heavy rx load on the bus.
1330 */ 1338 */
1331 if (msg->len == 0) { 1339 if (msg->len == 0) {
1332 pos = round_up(pos, dev->bulk_in->wMaxPacketSize); 1340 pos = round_up(pos, le16_to_cpu(dev->bulk_in->
1341 wMaxPacketSize));
1333 continue; 1342 continue;
1334 } 1343 }
1335 1344
@@ -1512,11 +1521,13 @@ error:
1512 1521
1513static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv) 1522static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
1514{ 1523{
1515 int i; 1524 int i, max_tx_urbs;
1525
1526 max_tx_urbs = priv->dev->max_tx_urbs;
1516 1527
1517 priv->active_tx_contexts = 0; 1528 priv->active_tx_contexts = 0;
1518 for (i = 0; i < MAX_TX_URBS; i++) 1529 for (i = 0; i < max_tx_urbs; i++)
1519 priv->tx_contexts[i].echo_index = MAX_TX_URBS; 1530 priv->tx_contexts[i].echo_index = max_tx_urbs;
1520} 1531}
1521 1532
1522/* This method might sleep. Do not call it in the atomic context 1533/* This method might sleep. Do not call it in the atomic context
@@ -1702,14 +1713,14 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1702 *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; 1713 *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
1703 1714
1704 spin_lock_irqsave(&priv->tx_contexts_lock, flags); 1715 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
1705 for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) { 1716 for (i = 0; i < dev->max_tx_urbs; i++) {
1706 if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) { 1717 if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) {
1707 context = &priv->tx_contexts[i]; 1718 context = &priv->tx_contexts[i];
1708 1719
1709 context->echo_index = i; 1720 context->echo_index = i;
1710 can_put_echo_skb(skb, netdev, context->echo_index); 1721 can_put_echo_skb(skb, netdev, context->echo_index);
1711 ++priv->active_tx_contexts; 1722 ++priv->active_tx_contexts;
1712 if (priv->active_tx_contexts >= MAX_TX_URBS) 1723 if (priv->active_tx_contexts >= dev->max_tx_urbs)
1713 netif_stop_queue(netdev); 1724 netif_stop_queue(netdev);
1714 1725
1715 break; 1726 break;
@@ -1743,7 +1754,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1743 spin_lock_irqsave(&priv->tx_contexts_lock, flags); 1754 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
1744 1755
1745 can_free_echo_skb(netdev, context->echo_index); 1756 can_free_echo_skb(netdev, context->echo_index);
1746 context->echo_index = MAX_TX_URBS; 1757 context->echo_index = dev->max_tx_urbs;
1747 --priv->active_tx_contexts; 1758 --priv->active_tx_contexts;
1748 netif_wake_queue(netdev); 1759 netif_wake_queue(netdev);
1749 1760
@@ -1881,7 +1892,9 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
1881 if (err) 1892 if (err)
1882 return err; 1893 return err;
1883 1894
1884 netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS); 1895 netdev = alloc_candev(sizeof(*priv) +
1896 dev->max_tx_urbs * sizeof(*priv->tx_contexts),
1897 dev->max_tx_urbs);
1885 if (!netdev) { 1898 if (!netdev) {
1886 dev_err(&intf->dev, "Cannot alloc candev\n"); 1899 dev_err(&intf->dev, "Cannot alloc candev\n");
1887 return -ENOMEM; 1900 return -ENOMEM;
@@ -2009,6 +2022,13 @@ static int kvaser_usb_probe(struct usb_interface *intf,
2009 return err; 2022 return err;
2010 } 2023 }
2011 2024
2025 dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
2026 ((dev->fw_version >> 24) & 0xff),
2027 ((dev->fw_version >> 16) & 0xff),
2028 (dev->fw_version & 0xffff));
2029
2030 dev_dbg(&intf->dev, "Max oustanding tx = %d URBs\n", dev->max_tx_urbs);
2031
2012 err = kvaser_usb_get_card_info(dev); 2032 err = kvaser_usb_get_card_info(dev);
2013 if (err) { 2033 if (err) {
2014 dev_err(&intf->dev, 2034 dev_err(&intf->dev,
@@ -2016,11 +2036,6 @@ static int kvaser_usb_probe(struct usb_interface *intf,
2016 return err; 2036 return err;
2017 } 2037 }
2018 2038
2019 dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
2020 ((dev->fw_version >> 24) & 0xff),
2021 ((dev->fw_version >> 16) & 0xff),
2022 (dev->fw_version & 0xffff));
2023
2024 for (i = 0; i < dev->nchannels; i++) { 2039 for (i = 0; i < dev->nchannels; i++) {
2025 err = kvaser_usb_init_one(intf, id, i); 2040 err = kvaser_usb_init_one(intf, id, i);
2026 if (err) { 2041 if (err) {
diff --git a/drivers/net/can/usb/peak_usb/pcan_ucan.h b/drivers/net/can/usb/peak_usb/pcan_ucan.h
index 1ba7c25002e1..e8fc4952c6b0 100644
--- a/drivers/net/can/usb/peak_usb/pcan_ucan.h
+++ b/drivers/net/can/usb/peak_usb/pcan_ucan.h
@@ -26,8 +26,8 @@
26#define PUCAN_CMD_FILTER_STD 0x008 26#define PUCAN_CMD_FILTER_STD 0x008
27#define PUCAN_CMD_TX_ABORT 0x009 27#define PUCAN_CMD_TX_ABORT 0x009
28#define PUCAN_CMD_WR_ERR_CNT 0x00a 28#define PUCAN_CMD_WR_ERR_CNT 0x00a
29#define PUCAN_CMD_RX_FRAME_ENABLE 0x00b 29#define PUCAN_CMD_SET_EN_OPTION 0x00b
30#define PUCAN_CMD_RX_FRAME_DISABLE 0x00c 30#define PUCAN_CMD_CLR_DIS_OPTION 0x00c
31#define PUCAN_CMD_END_OF_COLLECTION 0x3ff 31#define PUCAN_CMD_END_OF_COLLECTION 0x3ff
32 32
33/* uCAN received messages list */ 33/* uCAN received messages list */
@@ -101,14 +101,15 @@ struct __packed pucan_wr_err_cnt {
101 u16 unused; 101 u16 unused;
102}; 102};
103 103
104/* uCAN RX_FRAME_ENABLE command fields */ 104/* uCAN SET_EN/CLR_DIS _OPTION command fields */
105#define PUCAN_FLTEXT_ERROR 0x0001 105#define PUCAN_OPTION_ERROR 0x0001
106#define PUCAN_FLTEXT_BUSLOAD 0x0002 106#define PUCAN_OPTION_BUSLOAD 0x0002
107#define PUCAN_OPTION_CANDFDISO 0x0004
107 108
108struct __packed pucan_filter_ext { 109struct __packed pucan_options {
109 __le16 opcode_channel; 110 __le16 opcode_channel;
110 111
111 __le16 ext_mask; 112 __le16 options;
112 u32 unused; 113 u32 unused;
113}; 114};
114 115
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 0bac0f14edc3..09d14e70abd7 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -110,13 +110,13 @@ struct __packed pcan_ufd_led {
110 u8 unused[5]; 110 u8 unused[5];
111}; 111};
112 112
113/* Extended usage of uCAN commands CMD_RX_FRAME_xxxABLE for PCAN-USB Pro FD */ 113/* Extended usage of uCAN commands CMD_xxx_xx_OPTION for PCAN-USB Pro FD */
114#define PCAN_UFD_FLTEXT_CALIBRATION 0x8000 114#define PCAN_UFD_FLTEXT_CALIBRATION 0x8000
115 115
116struct __packed pcan_ufd_filter_ext { 116struct __packed pcan_ufd_options {
117 __le16 opcode_channel; 117 __le16 opcode_channel;
118 118
119 __le16 ext_mask; 119 __le16 ucan_mask;
120 u16 unused; 120 u16 unused;
121 __le16 usb_mask; 121 __le16 usb_mask;
122}; 122};
@@ -182,7 +182,7 @@ static inline void *pcan_usb_fd_cmd_buffer(struct peak_usb_device *dev)
182static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail) 182static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
183{ 183{
184 void *cmd_head = pcan_usb_fd_cmd_buffer(dev); 184 void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
185 int err; 185 int err = 0;
186 u8 *packet_ptr; 186 u8 *packet_ptr;
187 int i, n = 1, packet_len; 187 int i, n = 1, packet_len;
188 ptrdiff_t cmd_len; 188 ptrdiff_t cmd_len;
@@ -251,6 +251,27 @@ static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf)
251 /* moves the pointer forward */ 251 /* moves the pointer forward */
252 pc += sizeof(struct pucan_wr_err_cnt); 252 pc += sizeof(struct pucan_wr_err_cnt);
253 253
254 /* add command to switch from ISO to non-ISO mode, if fw allows it */
255 if (dev->can.ctrlmode_supported & CAN_CTRLMODE_FD_NON_ISO) {
256 struct pucan_options *puo = (struct pucan_options *)pc;
257
258 puo->opcode_channel =
259 (dev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) ?
260 pucan_cmd_opcode_channel(dev,
261 PUCAN_CMD_CLR_DIS_OPTION) :
262 pucan_cmd_opcode_channel(dev, PUCAN_CMD_SET_EN_OPTION);
263
264 puo->options = cpu_to_le16(PUCAN_OPTION_CANDFDISO);
265
266 /* to be sure that no other extended bits will be taken into
267 * account
268 */
269 puo->unused = 0;
270
271 /* moves the pointer forward */
272 pc += sizeof(struct pucan_options);
273 }
274
254 /* next, go back to operational mode */ 275 /* next, go back to operational mode */
255 cmd = (struct pucan_command *)pc; 276 cmd = (struct pucan_command *)pc;
256 cmd->opcode_channel = pucan_cmd_opcode_channel(dev, 277 cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
@@ -321,21 +342,21 @@ static int pcan_usb_fd_set_filter_std(struct peak_usb_device *dev, int idx,
321 return pcan_usb_fd_send_cmd(dev, cmd); 342 return pcan_usb_fd_send_cmd(dev, cmd);
322} 343}
323 344
324/* set/unset notifications filter: 345/* set/unset options
325 * 346 *
326 * onoff sets(1)/unset(0) notifications 347 * onoff set(1)/unset(0) options
327 * mask each bit defines a kind of notification to set/unset 348 * mask each bit defines a kind of options to set/unset
328 */ 349 */
329static int pcan_usb_fd_set_filter_ext(struct peak_usb_device *dev, 350static int pcan_usb_fd_set_options(struct peak_usb_device *dev,
330 bool onoff, u16 ext_mask, u16 usb_mask) 351 bool onoff, u16 ucan_mask, u16 usb_mask)
331{ 352{
332 struct pcan_ufd_filter_ext *cmd = pcan_usb_fd_cmd_buffer(dev); 353 struct pcan_ufd_options *cmd = pcan_usb_fd_cmd_buffer(dev);
333 354
334 cmd->opcode_channel = pucan_cmd_opcode_channel(dev, 355 cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
335 (onoff) ? PUCAN_CMD_RX_FRAME_ENABLE : 356 (onoff) ? PUCAN_CMD_SET_EN_OPTION :
336 PUCAN_CMD_RX_FRAME_DISABLE); 357 PUCAN_CMD_CLR_DIS_OPTION);
337 358
338 cmd->ext_mask = cpu_to_le16(ext_mask); 359 cmd->ucan_mask = cpu_to_le16(ucan_mask);
339 cmd->usb_mask = cpu_to_le16(usb_mask); 360 cmd->usb_mask = cpu_to_le16(usb_mask);
340 361
341 /* send the command */ 362 /* send the command */
@@ -770,9 +791,9 @@ static int pcan_usb_fd_start(struct peak_usb_device *dev)
770 &pcan_usb_pro_fd); 791 &pcan_usb_pro_fd);
771 792
772 /* enable USB calibration messages */ 793 /* enable USB calibration messages */
773 err = pcan_usb_fd_set_filter_ext(dev, 1, 794 err = pcan_usb_fd_set_options(dev, 1,
774 PUCAN_FLTEXT_ERROR, 795 PUCAN_OPTION_ERROR,
775 PCAN_UFD_FLTEXT_CALIBRATION); 796 PCAN_UFD_FLTEXT_CALIBRATION);
776 } 797 }
777 798
778 pdev->usb_if->dev_opened_count++; 799 pdev->usb_if->dev_opened_count++;
@@ -806,9 +827,9 @@ static int pcan_usb_fd_stop(struct peak_usb_device *dev)
806 827
807 /* turn off special msgs for that interface if no other dev opened */ 828 /* turn off special msgs for that interface if no other dev opened */
808 if (pdev->usb_if->dev_opened_count == 1) 829 if (pdev->usb_if->dev_opened_count == 1)
809 pcan_usb_fd_set_filter_ext(dev, 0, 830 pcan_usb_fd_set_options(dev, 0,
810 PUCAN_FLTEXT_ERROR, 831 PUCAN_OPTION_ERROR,
811 PCAN_UFD_FLTEXT_CALIBRATION); 832 PCAN_UFD_FLTEXT_CALIBRATION);
812 pdev->usb_if->dev_opened_count--; 833 pdev->usb_if->dev_opened_count--;
813 834
814 return 0; 835 return 0;
@@ -860,8 +881,14 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
860 pdev->usb_if->fw_info.fw_version[2], 881 pdev->usb_if->fw_info.fw_version[2],
861 dev->adapter->ctrl_count); 882 dev->adapter->ctrl_count);
862 883
863 /* the currently supported hw is non-ISO */ 884 /* check for ability to switch between ISO/non-ISO modes */
864 dev->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO; 885 if (pdev->usb_if->fw_info.fw_version[0] >= 2) {
886 /* firmware >= 2.x supports ISO/non-ISO switching */
887 dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO;
888 } else {
889 /* firmware < 2.x only supports fixed(!) non-ISO */
890 dev->can.ctrlmode |= CAN_CTRLMODE_FD_NON_ISO;
891 }
865 892
866 /* tell the hardware the can driver is running */ 893 /* tell the hardware the can driver is running */
867 err = pcan_usb_fd_drv_loaded(dev, 1); 894 err = pcan_usb_fd_drv_loaded(dev, 1);
@@ -937,9 +964,9 @@ static void pcan_usb_fd_exit(struct peak_usb_device *dev)
937 if (dev->ctrl_idx == 0) { 964 if (dev->ctrl_idx == 0) {
938 /* turn off calibration message if any device were opened */ 965 /* turn off calibration message if any device were opened */
939 if (pdev->usb_if->dev_opened_count > 0) 966 if (pdev->usb_if->dev_opened_count > 0)
940 pcan_usb_fd_set_filter_ext(dev, 0, 967 pcan_usb_fd_set_options(dev, 0,
941 PUCAN_FLTEXT_ERROR, 968 PUCAN_OPTION_ERROR,
942 PCAN_UFD_FLTEXT_CALIBRATION); 969 PCAN_UFD_FLTEXT_CALIBRATION);
943 970
944 /* tell USB adapter that the driver is being unloaded */ 971 /* tell USB adapter that the driver is being unloaded */
945 pcan_usb_fd_drv_loaded(dev, 0); 972 pcan_usb_fd_drv_loaded(dev, 0);
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
index 2d7e1ffe9fdc..b4af6d5aff7c 100644
--- a/drivers/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -25,66 +25,33 @@ static char *mv88e6123_61_65_probe(struct device *host_dev, int sw_addr)
25 if (bus == NULL) 25 if (bus == NULL)
26 return NULL; 26 return NULL;
27 27
28 ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); 28 ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
29 if (ret >= 0) { 29 if (ret >= 0) {
30 if (ret == 0x1212) 30 if (ret == PORT_SWITCH_ID_6123_A1)
31 return "Marvell 88E6123 (A1)"; 31 return "Marvell 88E6123 (A1)";
32 if (ret == 0x1213) 32 if (ret == PORT_SWITCH_ID_6123_A2)
33 return "Marvell 88E6123 (A2)"; 33 return "Marvell 88E6123 (A2)";
34 if ((ret & 0xfff0) == 0x1210) 34 if ((ret & 0xfff0) == PORT_SWITCH_ID_6123)
35 return "Marvell 88E6123"; 35 return "Marvell 88E6123";
36 36
37 if (ret == 0x1612) 37 if (ret == PORT_SWITCH_ID_6161_A1)
38 return "Marvell 88E6161 (A1)"; 38 return "Marvell 88E6161 (A1)";
39 if (ret == 0x1613) 39 if (ret == PORT_SWITCH_ID_6161_A2)
40 return "Marvell 88E6161 (A2)"; 40 return "Marvell 88E6161 (A2)";
41 if ((ret & 0xfff0) == 0x1610) 41 if ((ret & 0xfff0) == PORT_SWITCH_ID_6161)
42 return "Marvell 88E6161"; 42 return "Marvell 88E6161";
43 43
44 if (ret == 0x1652) 44 if (ret == PORT_SWITCH_ID_6165_A1)
45 return "Marvell 88E6165 (A1)"; 45 return "Marvell 88E6165 (A1)";
46 if (ret == 0x1653) 46 if (ret == PORT_SWITCH_ID_6165_A2)
47 return "Marvell 88e6165 (A2)"; 47 return "Marvell 88e6165 (A2)";
48 if ((ret & 0xfff0) == 0x1650) 48 if ((ret & 0xfff0) == PORT_SWITCH_ID_6165)
49 return "Marvell 88E6165"; 49 return "Marvell 88E6165";
50 } 50 }
51 51
52 return NULL; 52 return NULL;
53} 53}
54 54
55static int mv88e6123_61_65_switch_reset(struct dsa_switch *ds)
56{
57 int i;
58 int ret;
59 unsigned long timeout;
60
61 /* Set all ports to the disabled state. */
62 for (i = 0; i < 8; i++) {
63 ret = REG_READ(REG_PORT(i), 0x04);
64 REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
65 }
66
67 /* Wait for transmit queues to drain. */
68 usleep_range(2000, 4000);
69
70 /* Reset the switch. */
71 REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
72
73 /* Wait up to one second for reset to complete. */
74 timeout = jiffies + 1 * HZ;
75 while (time_before(jiffies, timeout)) {
76 ret = REG_READ(REG_GLOBAL, 0x00);
77 if ((ret & 0xc800) == 0xc800)
78 break;
79
80 usleep_range(1000, 2000);
81 }
82 if (time_after(jiffies, timeout))
83 return -ETIMEDOUT;
84
85 return 0;
86}
87
88static int mv88e6123_61_65_setup_global(struct dsa_switch *ds) 55static int mv88e6123_61_65_setup_global(struct dsa_switch *ds)
89{ 56{
90 int ret; 57 int ret;
@@ -271,6 +238,7 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
271 238
272static int mv88e6123_61_65_setup(struct dsa_switch *ds) 239static int mv88e6123_61_65_setup(struct dsa_switch *ds)
273{ 240{
241 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
274 int i; 242 int i;
275 int ret; 243 int ret;
276 244
@@ -278,7 +246,19 @@ static int mv88e6123_61_65_setup(struct dsa_switch *ds)
278 if (ret < 0) 246 if (ret < 0)
279 return ret; 247 return ret;
280 248
281 ret = mv88e6123_61_65_switch_reset(ds); 249 switch (ps->id) {
250 case PORT_SWITCH_ID_6123:
251 ps->num_ports = 3;
252 break;
253 case PORT_SWITCH_ID_6161:
254 case PORT_SWITCH_ID_6165:
255 ps->num_ports = 6;
256 break;
257 default:
258 return -ENODEV;
259 }
260
261 ret = mv88e6xxx_switch_reset(ds, false);
282 if (ret < 0) 262 if (ret < 0)
283 return ret; 263 return ret;
284 264
@@ -288,7 +268,7 @@ static int mv88e6123_61_65_setup(struct dsa_switch *ds)
288 if (ret < 0) 268 if (ret < 0)
289 return ret; 269 return ret;
290 270
291 for (i = 0; i < 6; i++) { 271 for (i = 0; i < ps->num_ports; i++) {
292 ret = mv88e6123_61_65_setup_port(ds, i); 272 ret = mv88e6123_61_65_setup_port(ds, i);
293 if (ret < 0) 273 if (ret < 0)
294 return ret; 274 return ret;
@@ -297,108 +277,18 @@ static int mv88e6123_61_65_setup(struct dsa_switch *ds)
297 return 0; 277 return 0;
298} 278}
299 279
300static int mv88e6123_61_65_port_to_phy_addr(int port)
301{
302 if (port >= 0 && port <= 4)
303 return port;
304 return -1;
305}
306
307static int
308mv88e6123_61_65_phy_read(struct dsa_switch *ds, int port, int regnum)
309{
310 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
311 int addr = mv88e6123_61_65_port_to_phy_addr(port);
312 int ret;
313
314 mutex_lock(&ps->phy_mutex);
315 ret = mv88e6xxx_phy_read(ds, addr, regnum);
316 mutex_unlock(&ps->phy_mutex);
317 return ret;
318}
319
320static int
321mv88e6123_61_65_phy_write(struct dsa_switch *ds,
322 int port, int regnum, u16 val)
323{
324 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
325 int addr = mv88e6123_61_65_port_to_phy_addr(port);
326 int ret;
327
328 mutex_lock(&ps->phy_mutex);
329 ret = mv88e6xxx_phy_write(ds, addr, regnum, val);
330 mutex_unlock(&ps->phy_mutex);
331 return ret;
332}
333
334static struct mv88e6xxx_hw_stat mv88e6123_61_65_hw_stats[] = {
335 { "in_good_octets", 8, 0x00, },
336 { "in_bad_octets", 4, 0x02, },
337 { "in_unicast", 4, 0x04, },
338 { "in_broadcasts", 4, 0x06, },
339 { "in_multicasts", 4, 0x07, },
340 { "in_pause", 4, 0x16, },
341 { "in_undersize", 4, 0x18, },
342 { "in_fragments", 4, 0x19, },
343 { "in_oversize", 4, 0x1a, },
344 { "in_jabber", 4, 0x1b, },
345 { "in_rx_error", 4, 0x1c, },
346 { "in_fcs_error", 4, 0x1d, },
347 { "out_octets", 8, 0x0e, },
348 { "out_unicast", 4, 0x10, },
349 { "out_broadcasts", 4, 0x13, },
350 { "out_multicasts", 4, 0x12, },
351 { "out_pause", 4, 0x15, },
352 { "excessive", 4, 0x11, },
353 { "collisions", 4, 0x1e, },
354 { "deferred", 4, 0x05, },
355 { "single", 4, 0x14, },
356 { "multiple", 4, 0x17, },
357 { "out_fcs_error", 4, 0x03, },
358 { "late", 4, 0x1f, },
359 { "hist_64bytes", 4, 0x08, },
360 { "hist_65_127bytes", 4, 0x09, },
361 { "hist_128_255bytes", 4, 0x0a, },
362 { "hist_256_511bytes", 4, 0x0b, },
363 { "hist_512_1023bytes", 4, 0x0c, },
364 { "hist_1024_max_bytes", 4, 0x0d, },
365 { "sw_in_discards", 4, 0x110, },
366 { "sw_in_filtered", 2, 0x112, },
367 { "sw_out_filtered", 2, 0x113, },
368};
369
370static void
371mv88e6123_61_65_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
372{
373 mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6123_61_65_hw_stats),
374 mv88e6123_61_65_hw_stats, port, data);
375}
376
377static void
378mv88e6123_61_65_get_ethtool_stats(struct dsa_switch *ds,
379 int port, uint64_t *data)
380{
381 mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6123_61_65_hw_stats),
382 mv88e6123_61_65_hw_stats, port, data);
383}
384
385static int mv88e6123_61_65_get_sset_count(struct dsa_switch *ds)
386{
387 return ARRAY_SIZE(mv88e6123_61_65_hw_stats);
388}
389
390struct dsa_switch_driver mv88e6123_61_65_switch_driver = { 280struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
391 .tag_protocol = DSA_TAG_PROTO_EDSA, 281 .tag_protocol = DSA_TAG_PROTO_EDSA,
392 .priv_size = sizeof(struct mv88e6xxx_priv_state), 282 .priv_size = sizeof(struct mv88e6xxx_priv_state),
393 .probe = mv88e6123_61_65_probe, 283 .probe = mv88e6123_61_65_probe,
394 .setup = mv88e6123_61_65_setup, 284 .setup = mv88e6123_61_65_setup,
395 .set_addr = mv88e6xxx_set_addr_indirect, 285 .set_addr = mv88e6xxx_set_addr_indirect,
396 .phy_read = mv88e6123_61_65_phy_read, 286 .phy_read = mv88e6xxx_phy_read,
397 .phy_write = mv88e6123_61_65_phy_write, 287 .phy_write = mv88e6xxx_phy_write,
398 .poll_link = mv88e6xxx_poll_link, 288 .poll_link = mv88e6xxx_poll_link,
399 .get_strings = mv88e6123_61_65_get_strings, 289 .get_strings = mv88e6xxx_get_strings,
400 .get_ethtool_stats = mv88e6123_61_65_get_ethtool_stats, 290 .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
401 .get_sset_count = mv88e6123_61_65_get_sset_count, 291 .get_sset_count = mv88e6xxx_get_sset_count,
402#ifdef CONFIG_NET_DSA_HWMON 292#ifdef CONFIG_NET_DSA_HWMON
403 .get_temp = mv88e6xxx_get_temp, 293 .get_temp = mv88e6xxx_get_temp,
404#endif 294#endif
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index 2540ef0142af..e54824fa0d95 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -17,12 +17,6 @@
17#include <net/dsa.h> 17#include <net/dsa.h>
18#include "mv88e6xxx.h" 18#include "mv88e6xxx.h"
19 19
20/* Switch product IDs */
21#define ID_6085 0x04a0
22#define ID_6095 0x0950
23#define ID_6131 0x1060
24#define ID_6131_B2 0x1066
25
26static char *mv88e6131_probe(struct device *host_dev, int sw_addr) 20static char *mv88e6131_probe(struct device *host_dev, int sw_addr)
27{ 21{
28 struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); 22 struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
@@ -31,56 +25,23 @@ static char *mv88e6131_probe(struct device *host_dev, int sw_addr)
31 if (bus == NULL) 25 if (bus == NULL)
32 return NULL; 26 return NULL;
33 27
34 ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); 28 ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
35 if (ret >= 0) { 29 if (ret >= 0) {
36 int ret_masked = ret & 0xfff0; 30 int ret_masked = ret & 0xfff0;
37 31
38 if (ret_masked == ID_6085) 32 if (ret_masked == PORT_SWITCH_ID_6085)
39 return "Marvell 88E6085"; 33 return "Marvell 88E6085";
40 if (ret_masked == ID_6095) 34 if (ret_masked == PORT_SWITCH_ID_6095)
41 return "Marvell 88E6095/88E6095F"; 35 return "Marvell 88E6095/88E6095F";
42 if (ret == ID_6131_B2) 36 if (ret == PORT_SWITCH_ID_6131_B2)
43 return "Marvell 88E6131 (B2)"; 37 return "Marvell 88E6131 (B2)";
44 if (ret_masked == ID_6131) 38 if (ret_masked == PORT_SWITCH_ID_6131)
45 return "Marvell 88E6131"; 39 return "Marvell 88E6131";
46 } 40 }
47 41
48 return NULL; 42 return NULL;
49} 43}
50 44
51static int mv88e6131_switch_reset(struct dsa_switch *ds)
52{
53 int i;
54 int ret;
55 unsigned long timeout;
56
57 /* Set all ports to the disabled state. */
58 for (i = 0; i < 11; i++) {
59 ret = REG_READ(REG_PORT(i), 0x04);
60 REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
61 }
62
63 /* Wait for transmit queues to drain. */
64 usleep_range(2000, 4000);
65
66 /* Reset the switch. */
67 REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
68
69 /* Wait up to one second for reset to complete. */
70 timeout = jiffies + 1 * HZ;
71 while (time_before(jiffies, timeout)) {
72 ret = REG_READ(REG_GLOBAL, 0x00);
73 if ((ret & 0xc800) == 0xc800)
74 break;
75
76 usleep_range(1000, 2000);
77 }
78 if (time_after(jiffies, timeout))
79 return -ETIMEDOUT;
80
81 return 0;
82}
83
84static int mv88e6131_setup_global(struct dsa_switch *ds) 45static int mv88e6131_setup_global(struct dsa_switch *ds)
85{ 46{
86 int ret; 47 int ret;
@@ -174,7 +135,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
174 * (100 Mb/s on 6085) full duplex. 135 * (100 Mb/s on 6085) full duplex.
175 */ 136 */
176 if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p)) 137 if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
177 if (ps->id == ID_6085) 138 if (ps->id == PORT_SWITCH_ID_6085)
178 REG_WRITE(addr, 0x01, 0x003d); /* 100 Mb/s */ 139 REG_WRITE(addr, 0x01, 0x003d); /* 100 Mb/s */
179 else 140 else
180 REG_WRITE(addr, 0x01, 0x003e); /* 1000 Mb/s */ 141 REG_WRITE(addr, 0x01, 0x003e); /* 1000 Mb/s */
@@ -201,35 +162,13 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
201 /* On 6085, unknown multicast forward is controlled 162 /* On 6085, unknown multicast forward is controlled
202 * here rather than in Port Control 2 register. 163 * here rather than in Port Control 2 register.
203 */ 164 */
204 if (ps->id == ID_6085) 165 if (ps->id == PORT_SWITCH_ID_6085)
205 val |= 0x0008; 166 val |= 0x0008;
206 } 167 }
207 if (ds->dsa_port_mask & (1 << p)) 168 if (ds->dsa_port_mask & (1 << p))
208 val |= 0x0100; 169 val |= 0x0100;
209 REG_WRITE(addr, 0x04, val); 170 REG_WRITE(addr, 0x04, val);
210 171
211 /* Port Control 1: disable trunking. Also, if this is the
212 * CPU port, enable learn messages to be sent to this port.
213 */
214 REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000);
215
216 /* Port based VLAN map: give each port its own address
217 * database, allow the CPU port to talk to each of the 'real'
218 * ports, and allow each of the 'real' ports to only talk to
219 * the upstream port.
220 */
221 val = (p & 0xf) << 12;
222 if (dsa_is_cpu_port(ds, p))
223 val |= ds->phys_port_mask;
224 else
225 val |= 1 << dsa_upstream_port(ds);
226 REG_WRITE(addr, 0x06, val);
227
228 /* Default VLAN ID and priority: don't set a default VLAN
229 * ID, and set the default packet priority to zero.
230 */
231 REG_WRITE(addr, 0x07, 0x0000);
232
233 /* Port Control 2: don't force a good FCS, don't use 172 /* Port Control 2: don't force a good FCS, don't use
234 * VLAN-based, source address-based or destination 173 * VLAN-based, source address-based or destination
235 * address-based priority overrides, don't let the switch 174 * address-based priority overrides, don't let the switch
@@ -242,7 +181,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
242 * If this is the upstream port for this switch, enable 181 * If this is the upstream port for this switch, enable
243 * forwarding of unknown multicast addresses. 182 * forwarding of unknown multicast addresses.
244 */ 183 */
245 if (ps->id == ID_6085) 184 if (ps->id == PORT_SWITCH_ID_6085)
246 /* on 6085, bits 3:0 are reserved, bit 6 control ARP 185 /* on 6085, bits 3:0 are reserved, bit 6 control ARP
247 * mirroring, and multicast forward is handled in 186 * mirroring, and multicast forward is handled in
248 * Port Control register. 187 * Port Control register.
@@ -278,7 +217,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
278 */ 217 */
279 REG_WRITE(addr, 0x19, 0x7654); 218 REG_WRITE(addr, 0x19, 0x7654);
280 219
281 return 0; 220 return mv88e6xxx_setup_port_common(ds, p);
282} 221}
283 222
284static int mv88e6131_setup(struct dsa_switch *ds) 223static int mv88e6131_setup(struct dsa_switch *ds)
@@ -287,13 +226,28 @@ static int mv88e6131_setup(struct dsa_switch *ds)
287 int i; 226 int i;
288 int ret; 227 int ret;
289 228
290 mutex_init(&ps->smi_mutex); 229 ret = mv88e6xxx_setup_common(ds);
230 if (ret < 0)
231 return ret;
232
291 mv88e6xxx_ppu_state_init(ds); 233 mv88e6xxx_ppu_state_init(ds);
292 mutex_init(&ps->stats_mutex);
293 234
294 ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0; 235 switch (ps->id) {
236 case PORT_SWITCH_ID_6085:
237 ps->num_ports = 10;
238 break;
239 case PORT_SWITCH_ID_6095:
240 ps->num_ports = 11;
241 break;
242 case PORT_SWITCH_ID_6131:
243 case PORT_SWITCH_ID_6131_B2:
244 ps->num_ports = 8;
245 break;
246 default:
247 return -ENODEV;
248 }
295 249
296 ret = mv88e6131_switch_reset(ds); 250 ret = mv88e6xxx_switch_reset(ds, false);
297 if (ret < 0) 251 if (ret < 0)
298 return ret; 252 return ret;
299 253
@@ -303,7 +257,7 @@ static int mv88e6131_setup(struct dsa_switch *ds)
303 if (ret < 0) 257 if (ret < 0)
304 return ret; 258 return ret;
305 259
306 for (i = 0; i < 11; i++) { 260 for (i = 0; i < ps->num_ports; i++) {
307 ret = mv88e6131_setup_port(ds, i); 261 ret = mv88e6131_setup_port(ds, i);
308 if (ret < 0) 262 if (ret < 0)
309 return ret; 263 return ret;
@@ -312,17 +266,24 @@ static int mv88e6131_setup(struct dsa_switch *ds)
312 return 0; 266 return 0;
313} 267}
314 268
315static int mv88e6131_port_to_phy_addr(int port) 269static int mv88e6131_port_to_phy_addr(struct dsa_switch *ds, int port)
316{ 270{
317 if (port >= 0 && port <= 11) 271 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
272
273 if (port >= 0 && port < ps->num_ports)
318 return port; 274 return port;
319 return -1; 275
276 return -EINVAL;
320} 277}
321 278
322static int 279static int
323mv88e6131_phy_read(struct dsa_switch *ds, int port, int regnum) 280mv88e6131_phy_read(struct dsa_switch *ds, int port, int regnum)
324{ 281{
325 int addr = mv88e6131_port_to_phy_addr(port); 282 int addr = mv88e6131_port_to_phy_addr(ds, port);
283
284 if (addr < 0)
285 return addr;
286
326 return mv88e6xxx_phy_read_ppu(ds, addr, regnum); 287 return mv88e6xxx_phy_read_ppu(ds, addr, regnum);
327} 288}
328 289
@@ -330,61 +291,12 @@ static int
330mv88e6131_phy_write(struct dsa_switch *ds, 291mv88e6131_phy_write(struct dsa_switch *ds,
331 int port, int regnum, u16 val) 292 int port, int regnum, u16 val)
332{ 293{
333 int addr = mv88e6131_port_to_phy_addr(port); 294 int addr = mv88e6131_port_to_phy_addr(ds, port);
334 return mv88e6xxx_phy_write_ppu(ds, addr, regnum, val);
335}
336
337static struct mv88e6xxx_hw_stat mv88e6131_hw_stats[] = {
338 { "in_good_octets", 8, 0x00, },
339 { "in_bad_octets", 4, 0x02, },
340 { "in_unicast", 4, 0x04, },
341 { "in_broadcasts", 4, 0x06, },
342 { "in_multicasts", 4, 0x07, },
343 { "in_pause", 4, 0x16, },
344 { "in_undersize", 4, 0x18, },
345 { "in_fragments", 4, 0x19, },
346 { "in_oversize", 4, 0x1a, },
347 { "in_jabber", 4, 0x1b, },
348 { "in_rx_error", 4, 0x1c, },
349 { "in_fcs_error", 4, 0x1d, },
350 { "out_octets", 8, 0x0e, },
351 { "out_unicast", 4, 0x10, },
352 { "out_broadcasts", 4, 0x13, },
353 { "out_multicasts", 4, 0x12, },
354 { "out_pause", 4, 0x15, },
355 { "excessive", 4, 0x11, },
356 { "collisions", 4, 0x1e, },
357 { "deferred", 4, 0x05, },
358 { "single", 4, 0x14, },
359 { "multiple", 4, 0x17, },
360 { "out_fcs_error", 4, 0x03, },
361 { "late", 4, 0x1f, },
362 { "hist_64bytes", 4, 0x08, },
363 { "hist_65_127bytes", 4, 0x09, },
364 { "hist_128_255bytes", 4, 0x0a, },
365 { "hist_256_511bytes", 4, 0x0b, },
366 { "hist_512_1023bytes", 4, 0x0c, },
367 { "hist_1024_max_bytes", 4, 0x0d, },
368};
369
370static void
371mv88e6131_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
372{
373 mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6131_hw_stats),
374 mv88e6131_hw_stats, port, data);
375}
376 295
377static void 296 if (addr < 0)
378mv88e6131_get_ethtool_stats(struct dsa_switch *ds, 297 return addr;
379 int port, uint64_t *data)
380{
381 mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6131_hw_stats),
382 mv88e6131_hw_stats, port, data);
383}
384 298
385static int mv88e6131_get_sset_count(struct dsa_switch *ds) 299 return mv88e6xxx_phy_write_ppu(ds, addr, regnum, val);
386{
387 return ARRAY_SIZE(mv88e6131_hw_stats);
388} 300}
389 301
390struct dsa_switch_driver mv88e6131_switch_driver = { 302struct dsa_switch_driver mv88e6131_switch_driver = {
@@ -396,9 +308,9 @@ struct dsa_switch_driver mv88e6131_switch_driver = {
396 .phy_read = mv88e6131_phy_read, 308 .phy_read = mv88e6131_phy_read,
397 .phy_write = mv88e6131_phy_write, 309 .phy_write = mv88e6131_phy_write,
398 .poll_link = mv88e6xxx_poll_link, 310 .poll_link = mv88e6xxx_poll_link,
399 .get_strings = mv88e6131_get_strings, 311 .get_strings = mv88e6xxx_get_strings,
400 .get_ethtool_stats = mv88e6131_get_ethtool_stats, 312 .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
401 .get_sset_count = mv88e6131_get_sset_count, 313 .get_sset_count = mv88e6xxx_get_sset_count,
402}; 314};
403 315
404MODULE_ALIAS("platform:mv88e6085"); 316MODULE_ALIAS("platform:mv88e6085");
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
index 18cfead83dc9..9104efea0e3e 100644
--- a/drivers/net/dsa/mv88e6171.c
+++ b/drivers/net/dsa/mv88e6171.c
@@ -17,10 +17,6 @@
17#include <net/dsa.h> 17#include <net/dsa.h>
18#include "mv88e6xxx.h" 18#include "mv88e6xxx.h"
19 19
20/* Switch product IDs */
21#define ID_6171 0x1710
22#define ID_6172 0x1720
23
24static char *mv88e6171_probe(struct device *host_dev, int sw_addr) 20static char *mv88e6171_probe(struct device *host_dev, int sw_addr)
25{ 21{
26 struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); 22 struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
@@ -29,64 +25,20 @@ static char *mv88e6171_probe(struct device *host_dev, int sw_addr)
29 if (bus == NULL) 25 if (bus == NULL)
30 return NULL; 26 return NULL;
31 27
32 ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); 28 ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
33 if (ret >= 0) { 29 if (ret >= 0) {
34 if ((ret & 0xfff0) == ID_6171) 30 if ((ret & 0xfff0) == PORT_SWITCH_ID_6171)
35 return "Marvell 88E6171"; 31 return "Marvell 88E6171";
36 if ((ret & 0xfff0) == ID_6172) 32 if ((ret & 0xfff0) == PORT_SWITCH_ID_6172)
37 return "Marvell 88E6172"; 33 return "Marvell 88E6172";
38 } 34 }
39 35
40 return NULL; 36 return NULL;
41} 37}
42 38
43static int mv88e6171_switch_reset(struct dsa_switch *ds)
44{
45 int i;
46 int ret;
47 unsigned long timeout;
48
49 /* Set all ports to the disabled state. */
50 for (i = 0; i < 8; i++) {
51 ret = REG_READ(REG_PORT(i), 0x04);
52 REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
53 }
54
55 /* Wait for transmit queues to drain. */
56 usleep_range(2000, 4000);
57
58 /* Reset the switch. Keep PPU active. The PPU needs to be
59 * active to support indirect phy register accesses through
60 * global registers 0x18 and 0x19.
61 */
62 REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
63
64 /* Wait up to one second for reset to complete. */
65 timeout = jiffies + 1 * HZ;
66 while (time_before(jiffies, timeout)) {
67 ret = REG_READ(REG_GLOBAL, 0x00);
68 if ((ret & 0xc800) == 0xc800)
69 break;
70
71 usleep_range(1000, 2000);
72 }
73 if (time_after(jiffies, timeout))
74 return -ETIMEDOUT;
75
76 /* Enable ports not under DSA, e.g. WAN port */
77 for (i = 0; i < 8; i++) {
78 if (dsa_is_cpu_port(ds, i) || ds->phys_port_mask & (1 << i))
79 continue;
80
81 ret = REG_READ(REG_PORT(i), 0x04);
82 REG_WRITE(REG_PORT(i), 0x04, ret | 0x03);
83 }
84
85 return 0;
86}
87
88static int mv88e6171_setup_global(struct dsa_switch *ds) 39static int mv88e6171_setup_global(struct dsa_switch *ds)
89{ 40{
41 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
90 int ret; 42 int ret;
91 int i; 43 int i;
92 44
@@ -151,7 +103,7 @@ static int mv88e6171_setup_global(struct dsa_switch *ds)
151 } 103 }
152 104
153 /* Clear all trunk masks. */ 105 /* Clear all trunk masks. */
154 for (i = 0; i < 8; i++) 106 for (i = 0; i < ps->num_ports; i++)
155 REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0xff); 107 REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0xff);
156 108
157 /* Clear all trunk mappings. */ 109 /* Clear all trunk mappings. */
@@ -274,6 +226,7 @@ static int mv88e6171_setup_port(struct dsa_switch *ds, int p)
274 226
275static int mv88e6171_setup(struct dsa_switch *ds) 227static int mv88e6171_setup(struct dsa_switch *ds)
276{ 228{
229 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
277 int i; 230 int i;
278 int ret; 231 int ret;
279 232
@@ -281,7 +234,9 @@ static int mv88e6171_setup(struct dsa_switch *ds)
281 if (ret < 0) 234 if (ret < 0)
282 return ret; 235 return ret;
283 236
284 ret = mv88e6171_switch_reset(ds); 237 ps->num_ports = 7;
238
239 ret = mv88e6xxx_switch_reset(ds, true);
285 if (ret < 0) 240 if (ret < 0)
286 return ret; 241 return ret;
287 242
@@ -291,7 +246,7 @@ static int mv88e6171_setup(struct dsa_switch *ds)
291 if (ret < 0) 246 if (ret < 0)
292 return ret; 247 return ret;
293 248
294 for (i = 0; i < 8; i++) { 249 for (i = 0; i < ps->num_ports; i++) {
295 if (!(dsa_is_cpu_port(ds, i) || ds->phys_port_mask & (1 << i))) 250 if (!(dsa_is_cpu_port(ds, i) || ds->phys_port_mask & (1 << i)))
296 continue; 251 continue;
297 252
@@ -303,99 +258,12 @@ static int mv88e6171_setup(struct dsa_switch *ds)
303 return 0; 258 return 0;
304} 259}
305 260
306static int mv88e6171_port_to_phy_addr(int port)
307{
308 if (port >= 0 && port <= 4)
309 return port;
310 return -1;
311}
312
313static int
314mv88e6171_phy_read(struct dsa_switch *ds, int port, int regnum)
315{
316 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
317 int addr = mv88e6171_port_to_phy_addr(port);
318 int ret;
319
320 mutex_lock(&ps->phy_mutex);
321 ret = mv88e6xxx_phy_read_indirect(ds, addr, regnum);
322 mutex_unlock(&ps->phy_mutex);
323 return ret;
324}
325
326static int
327mv88e6171_phy_write(struct dsa_switch *ds,
328 int port, int regnum, u16 val)
329{
330 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
331 int addr = mv88e6171_port_to_phy_addr(port);
332 int ret;
333
334 mutex_lock(&ps->phy_mutex);
335 ret = mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
336 mutex_unlock(&ps->phy_mutex);
337 return ret;
338}
339
340static struct mv88e6xxx_hw_stat mv88e6171_hw_stats[] = {
341 { "in_good_octets", 8, 0x00, },
342 { "in_bad_octets", 4, 0x02, },
343 { "in_unicast", 4, 0x04, },
344 { "in_broadcasts", 4, 0x06, },
345 { "in_multicasts", 4, 0x07, },
346 { "in_pause", 4, 0x16, },
347 { "in_undersize", 4, 0x18, },
348 { "in_fragments", 4, 0x19, },
349 { "in_oversize", 4, 0x1a, },
350 { "in_jabber", 4, 0x1b, },
351 { "in_rx_error", 4, 0x1c, },
352 { "in_fcs_error", 4, 0x1d, },
353 { "out_octets", 8, 0x0e, },
354 { "out_unicast", 4, 0x10, },
355 { "out_broadcasts", 4, 0x13, },
356 { "out_multicasts", 4, 0x12, },
357 { "out_pause", 4, 0x15, },
358 { "excessive", 4, 0x11, },
359 { "collisions", 4, 0x1e, },
360 { "deferred", 4, 0x05, },
361 { "single", 4, 0x14, },
362 { "multiple", 4, 0x17, },
363 { "out_fcs_error", 4, 0x03, },
364 { "late", 4, 0x1f, },
365 { "hist_64bytes", 4, 0x08, },
366 { "hist_65_127bytes", 4, 0x09, },
367 { "hist_128_255bytes", 4, 0x0a, },
368 { "hist_256_511bytes", 4, 0x0b, },
369 { "hist_512_1023bytes", 4, 0x0c, },
370 { "hist_1024_max_bytes", 4, 0x0d, },
371};
372
373static void
374mv88e6171_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
375{
376 mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6171_hw_stats),
377 mv88e6171_hw_stats, port, data);
378}
379
380static void
381mv88e6171_get_ethtool_stats(struct dsa_switch *ds,
382 int port, uint64_t *data)
383{
384 mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6171_hw_stats),
385 mv88e6171_hw_stats, port, data);
386}
387
388static int mv88e6171_get_sset_count(struct dsa_switch *ds)
389{
390 return ARRAY_SIZE(mv88e6171_hw_stats);
391}
392
393static int mv88e6171_get_eee(struct dsa_switch *ds, int port, 261static int mv88e6171_get_eee(struct dsa_switch *ds, int port,
394 struct ethtool_eee *e) 262 struct ethtool_eee *e)
395{ 263{
396 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 264 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
397 265
398 if (ps->id == ID_6172) 266 if (ps->id == PORT_SWITCH_ID_6172)
399 return mv88e6xxx_get_eee(ds, port, e); 267 return mv88e6xxx_get_eee(ds, port, e);
400 268
401 return -EOPNOTSUPP; 269 return -EOPNOTSUPP;
@@ -406,7 +274,7 @@ static int mv88e6171_set_eee(struct dsa_switch *ds, int port,
406{ 274{
407 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 275 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
408 276
409 if (ps->id == ID_6172) 277 if (ps->id == PORT_SWITCH_ID_6172)
410 return mv88e6xxx_set_eee(ds, port, phydev, e); 278 return mv88e6xxx_set_eee(ds, port, phydev, e);
411 279
412 return -EOPNOTSUPP; 280 return -EOPNOTSUPP;
@@ -418,12 +286,12 @@ struct dsa_switch_driver mv88e6171_switch_driver = {
418 .probe = mv88e6171_probe, 286 .probe = mv88e6171_probe,
419 .setup = mv88e6171_setup, 287 .setup = mv88e6171_setup,
420 .set_addr = mv88e6xxx_set_addr_indirect, 288 .set_addr = mv88e6xxx_set_addr_indirect,
421 .phy_read = mv88e6171_phy_read, 289 .phy_read = mv88e6xxx_phy_read_indirect,
422 .phy_write = mv88e6171_phy_write, 290 .phy_write = mv88e6xxx_phy_write_indirect,
423 .poll_link = mv88e6xxx_poll_link, 291 .poll_link = mv88e6xxx_poll_link,
424 .get_strings = mv88e6171_get_strings, 292 .get_strings = mv88e6xxx_get_strings,
425 .get_ethtool_stats = mv88e6171_get_ethtool_stats, 293 .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
426 .get_sset_count = mv88e6171_get_sset_count, 294 .get_sset_count = mv88e6xxx_get_sset_count,
427 .set_eee = mv88e6171_set_eee, 295 .set_eee = mv88e6171_set_eee,
428 .get_eee = mv88e6171_get_eee, 296 .get_eee = mv88e6171_get_eee,
429#ifdef CONFIG_NET_DSA_HWMON 297#ifdef CONFIG_NET_DSA_HWMON
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
index 41fe3a6a72d1..126c11b81e75 100644
--- a/drivers/net/dsa/mv88e6352.c
+++ b/drivers/net/dsa/mv88e6352.c
@@ -30,58 +30,24 @@ static char *mv88e6352_probe(struct device *host_dev, int sw_addr)
30 if (bus == NULL) 30 if (bus == NULL)
31 return NULL; 31 return NULL;
32 32
33 ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); 33 ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
34 if (ret >= 0) { 34 if (ret >= 0) {
35 if ((ret & 0xfff0) == 0x1760) 35 if ((ret & 0xfff0) == PORT_SWITCH_ID_6176)
36 return "Marvell 88E6176"; 36 return "Marvell 88E6176";
37 if (ret == 0x3521) 37 if (ret == PORT_SWITCH_ID_6352_A0)
38 return "Marvell 88E6352 (A0)"; 38 return "Marvell 88E6352 (A0)";
39 if (ret == 0x3522) 39 if (ret == PORT_SWITCH_ID_6352_A1)
40 return "Marvell 88E6352 (A1)"; 40 return "Marvell 88E6352 (A1)";
41 if ((ret & 0xfff0) == 0x3520) 41 if ((ret & 0xfff0) == PORT_SWITCH_ID_6352)
42 return "Marvell 88E6352"; 42 return "Marvell 88E6352";
43 } 43 }
44 44
45 return NULL; 45 return NULL;
46} 46}
47 47
48static int mv88e6352_switch_reset(struct dsa_switch *ds)
49{
50 unsigned long timeout;
51 int ret;
52 int i;
53
54 /* Set all ports to the disabled state. */
55 for (i = 0; i < 7; i++) {
56 ret = REG_READ(REG_PORT(i), 0x04);
57 REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
58 }
59
60 /* Wait for transmit queues to drain. */
61 usleep_range(2000, 4000);
62
63 /* Reset the switch. Keep PPU active (bit 14, undocumented).
64 * The PPU needs to be active to support indirect phy register
65 * accesses through global registers 0x18 and 0x19.
66 */
67 REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
68
69 /* Wait up to one second for reset to complete. */
70 timeout = jiffies + 1 * HZ;
71 while (time_before(jiffies, timeout)) {
72 ret = REG_READ(REG_GLOBAL, 0x00);
73 if ((ret & 0x8800) == 0x8800)
74 break;
75 usleep_range(1000, 2000);
76 }
77 if (time_after(jiffies, timeout))
78 return -ETIMEDOUT;
79
80 return 0;
81}
82
83static int mv88e6352_setup_global(struct dsa_switch *ds) 48static int mv88e6352_setup_global(struct dsa_switch *ds)
84{ 49{
50 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
85 int ret; 51 int ret;
86 int i; 52 int i;
87 53
@@ -152,7 +118,7 @@ static int mv88e6352_setup_global(struct dsa_switch *ds)
152 /* Disable ingress rate limiting by resetting all ingress 118 /* Disable ingress rate limiting by resetting all ingress
153 * rate limit registers to their initial state. 119 * rate limit registers to their initial state.
154 */ 120 */
155 for (i = 0; i < 7; i++) 121 for (i = 0; i < ps->num_ports; i++)
156 REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8)); 122 REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8));
157 123
158 /* Initialise cross-chip port VLAN table to reset defaults. */ 124 /* Initialise cross-chip port VLAN table to reset defaults. */
@@ -264,48 +230,13 @@ static int mv88e6352_setup_port(struct dsa_switch *ds, int p)
264 230
265#ifdef CONFIG_NET_DSA_HWMON 231#ifdef CONFIG_NET_DSA_HWMON
266 232
267static int mv88e6352_phy_page_read(struct dsa_switch *ds,
268 int port, int page, int reg)
269{
270 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
271 int ret;
272
273 mutex_lock(&ps->phy_mutex);
274 ret = mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
275 if (ret < 0)
276 goto error;
277 ret = mv88e6xxx_phy_read_indirect(ds, port, reg);
278error:
279 mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
280 mutex_unlock(&ps->phy_mutex);
281 return ret;
282}
283
284static int mv88e6352_phy_page_write(struct dsa_switch *ds,
285 int port, int page, int reg, int val)
286{
287 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
288 int ret;
289
290 mutex_lock(&ps->phy_mutex);
291 ret = mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
292 if (ret < 0)
293 goto error;
294
295 ret = mv88e6xxx_phy_write_indirect(ds, port, reg, val);
296error:
297 mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
298 mutex_unlock(&ps->phy_mutex);
299 return ret;
300}
301
302static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp) 233static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp)
303{ 234{
304 int ret; 235 int ret;
305 236
306 *temp = 0; 237 *temp = 0;
307 238
308 ret = mv88e6352_phy_page_read(ds, 0, 6, 27); 239 ret = mv88e6xxx_phy_page_read(ds, 0, 6, 27);
309 if (ret < 0) 240 if (ret < 0)
310 return ret; 241 return ret;
311 242
@@ -320,7 +251,7 @@ static int mv88e6352_get_temp_limit(struct dsa_switch *ds, int *temp)
320 251
321 *temp = 0; 252 *temp = 0;
322 253
323 ret = mv88e6352_phy_page_read(ds, 0, 6, 26); 254 ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
324 if (ret < 0) 255 if (ret < 0)
325 return ret; 256 return ret;
326 257
@@ -333,11 +264,11 @@ static int mv88e6352_set_temp_limit(struct dsa_switch *ds, int temp)
333{ 264{
334 int ret; 265 int ret;
335 266
336 ret = mv88e6352_phy_page_read(ds, 0, 6, 26); 267 ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
337 if (ret < 0) 268 if (ret < 0)
338 return ret; 269 return ret;
339 temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f); 270 temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
340 return mv88e6352_phy_page_write(ds, 0, 6, 26, 271 return mv88e6xxx_phy_page_write(ds, 0, 6, 26,
341 (ret & 0xe0ff) | (temp << 8)); 272 (ret & 0xe0ff) | (temp << 8));
342} 273}
343 274
@@ -347,7 +278,7 @@ static int mv88e6352_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
347 278
348 *alarm = false; 279 *alarm = false;
349 280
350 ret = mv88e6352_phy_page_read(ds, 0, 6, 26); 281 ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
351 if (ret < 0) 282 if (ret < 0)
352 return ret; 283 return ret;
353 284
@@ -367,9 +298,11 @@ static int mv88e6352_setup(struct dsa_switch *ds)
367 if (ret < 0) 298 if (ret < 0)
368 return ret; 299 return ret;
369 300
301 ps->num_ports = 7;
302
370 mutex_init(&ps->eeprom_mutex); 303 mutex_init(&ps->eeprom_mutex);
371 304
372 ret = mv88e6352_switch_reset(ds); 305 ret = mv88e6xxx_switch_reset(ds, true);
373 if (ret < 0) 306 if (ret < 0)
374 return ret; 307 return ret;
375 308
@@ -379,7 +312,7 @@ static int mv88e6352_setup(struct dsa_switch *ds)
379 if (ret < 0) 312 if (ret < 0)
380 return ret; 313 return ret;
381 314
382 for (i = 0; i < 7; i++) { 315 for (i = 0; i < ps->num_ports; i++) {
383 ret = mv88e6352_setup_port(ds, i); 316 ret = mv88e6352_setup_port(ds, i);
384 if (ret < 0) 317 if (ret < 0)
385 return ret; 318 return ret;
@@ -388,83 +321,6 @@ static int mv88e6352_setup(struct dsa_switch *ds)
388 return 0; 321 return 0;
389} 322}
390 323
391static int mv88e6352_port_to_phy_addr(int port)
392{
393 if (port >= 0 && port <= 4)
394 return port;
395 return -EINVAL;
396}
397
398static int
399mv88e6352_phy_read(struct dsa_switch *ds, int port, int regnum)
400{
401 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
402 int addr = mv88e6352_port_to_phy_addr(port);
403 int ret;
404
405 if (addr < 0)
406 return addr;
407
408 mutex_lock(&ps->phy_mutex);
409 ret = mv88e6xxx_phy_read_indirect(ds, addr, regnum);
410 mutex_unlock(&ps->phy_mutex);
411
412 return ret;
413}
414
415static int
416mv88e6352_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
417{
418 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
419 int addr = mv88e6352_port_to_phy_addr(port);
420 int ret;
421
422 if (addr < 0)
423 return addr;
424
425 mutex_lock(&ps->phy_mutex);
426 ret = mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
427 mutex_unlock(&ps->phy_mutex);
428
429 return ret;
430}
431
432static struct mv88e6xxx_hw_stat mv88e6352_hw_stats[] = {
433 { "in_good_octets", 8, 0x00, },
434 { "in_bad_octets", 4, 0x02, },
435 { "in_unicast", 4, 0x04, },
436 { "in_broadcasts", 4, 0x06, },
437 { "in_multicasts", 4, 0x07, },
438 { "in_pause", 4, 0x16, },
439 { "in_undersize", 4, 0x18, },
440 { "in_fragments", 4, 0x19, },
441 { "in_oversize", 4, 0x1a, },
442 { "in_jabber", 4, 0x1b, },
443 { "in_rx_error", 4, 0x1c, },
444 { "in_fcs_error", 4, 0x1d, },
445 { "out_octets", 8, 0x0e, },
446 { "out_unicast", 4, 0x10, },
447 { "out_broadcasts", 4, 0x13, },
448 { "out_multicasts", 4, 0x12, },
449 { "out_pause", 4, 0x15, },
450 { "excessive", 4, 0x11, },
451 { "collisions", 4, 0x1e, },
452 { "deferred", 4, 0x05, },
453 { "single", 4, 0x14, },
454 { "multiple", 4, 0x17, },
455 { "out_fcs_error", 4, 0x03, },
456 { "late", 4, 0x1f, },
457 { "hist_64bytes", 4, 0x08, },
458 { "hist_65_127bytes", 4, 0x09, },
459 { "hist_128_255bytes", 4, 0x0a, },
460 { "hist_256_511bytes", 4, 0x0b, },
461 { "hist_512_1023bytes", 4, 0x0c, },
462 { "hist_1024_max_bytes", 4, 0x0d, },
463 { "sw_in_discards", 4, 0x110, },
464 { "sw_in_filtered", 2, 0x112, },
465 { "sw_out_filtered", 2, 0x113, },
466};
467
468static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr) 324static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr)
469{ 325{
470 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 326 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -663,37 +519,18 @@ static int mv88e6352_set_eeprom(struct dsa_switch *ds,
663 return 0; 519 return 0;
664} 520}
665 521
666static void
667mv88e6352_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
668{
669 mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6352_hw_stats),
670 mv88e6352_hw_stats, port, data);
671}
672
673static void
674mv88e6352_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
675{
676 mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6352_hw_stats),
677 mv88e6352_hw_stats, port, data);
678}
679
680static int mv88e6352_get_sset_count(struct dsa_switch *ds)
681{
682 return ARRAY_SIZE(mv88e6352_hw_stats);
683}
684
685struct dsa_switch_driver mv88e6352_switch_driver = { 522struct dsa_switch_driver mv88e6352_switch_driver = {
686 .tag_protocol = DSA_TAG_PROTO_EDSA, 523 .tag_protocol = DSA_TAG_PROTO_EDSA,
687 .priv_size = sizeof(struct mv88e6xxx_priv_state), 524 .priv_size = sizeof(struct mv88e6xxx_priv_state),
688 .probe = mv88e6352_probe, 525 .probe = mv88e6352_probe,
689 .setup = mv88e6352_setup, 526 .setup = mv88e6352_setup,
690 .set_addr = mv88e6xxx_set_addr_indirect, 527 .set_addr = mv88e6xxx_set_addr_indirect,
691 .phy_read = mv88e6352_phy_read, 528 .phy_read = mv88e6xxx_phy_read_indirect,
692 .phy_write = mv88e6352_phy_write, 529 .phy_write = mv88e6xxx_phy_write_indirect,
693 .poll_link = mv88e6xxx_poll_link, 530 .poll_link = mv88e6xxx_poll_link,
694 .get_strings = mv88e6352_get_strings, 531 .get_strings = mv88e6xxx_get_strings,
695 .get_ethtool_stats = mv88e6352_get_ethtool_stats, 532 .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
696 .get_sset_count = mv88e6352_get_sset_count, 533 .get_sset_count = mv88e6xxx_get_sset_count,
697 .set_eee = mv88e6xxx_set_eee, 534 .set_eee = mv88e6xxx_set_eee,
698 .get_eee = mv88e6xxx_get_eee, 535 .get_eee = mv88e6xxx_get_eee,
699#ifdef CONFIG_NET_DSA_HWMON 536#ifdef CONFIG_NET_DSA_HWMON
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 13572cc24c6d..fc8d3b6ffe8e 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -33,11 +33,11 @@ static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
33 int i; 33 int i;
34 34
35 for (i = 0; i < 16; i++) { 35 for (i = 0; i < 16; i++) {
36 ret = mdiobus_read(bus, sw_addr, 0); 36 ret = mdiobus_read(bus, sw_addr, SMI_CMD);
37 if (ret < 0) 37 if (ret < 0)
38 return ret; 38 return ret;
39 39
40 if ((ret & 0x8000) == 0) 40 if ((ret & SMI_CMD_BUSY) == 0)
41 return 0; 41 return 0;
42 } 42 }
43 43
@@ -57,7 +57,8 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
57 return ret; 57 return ret;
58 58
59 /* Transmit the read command. */ 59 /* Transmit the read command. */
60 ret = mdiobus_write(bus, sw_addr, 0, 0x9800 | (addr << 5) | reg); 60 ret = mdiobus_write(bus, sw_addr, SMI_CMD,
61 SMI_CMD_OP_22_READ | (addr << 5) | reg);
61 if (ret < 0) 62 if (ret < 0)
62 return ret; 63 return ret;
63 64
@@ -67,7 +68,7 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
67 return ret; 68 return ret;
68 69
69 /* Read the data. */ 70 /* Read the data. */
70 ret = mdiobus_read(bus, sw_addr, 1); 71 ret = mdiobus_read(bus, sw_addr, SMI_DATA);
71 if (ret < 0) 72 if (ret < 0)
72 return ret; 73 return ret;
73 74
@@ -119,12 +120,13 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
119 return ret; 120 return ret;
120 121
121 /* Transmit the data to write. */ 122 /* Transmit the data to write. */
122 ret = mdiobus_write(bus, sw_addr, 1, val); 123 ret = mdiobus_write(bus, sw_addr, SMI_DATA, val);
123 if (ret < 0) 124 if (ret < 0)
124 return ret; 125 return ret;
125 126
126 /* Transmit the write command. */ 127 /* Transmit the write command. */
127 ret = mdiobus_write(bus, sw_addr, 0, 0x9400 | (addr << 5) | reg); 128 ret = mdiobus_write(bus, sw_addr, SMI_CMD,
129 SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
128 if (ret < 0) 130 if (ret < 0)
129 return ret; 131 return ret;
130 132
@@ -166,26 +168,26 @@ int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
166int mv88e6xxx_config_prio(struct dsa_switch *ds) 168int mv88e6xxx_config_prio(struct dsa_switch *ds)
167{ 169{
168 /* Configure the IP ToS mapping registers. */ 170 /* Configure the IP ToS mapping registers. */
169 REG_WRITE(REG_GLOBAL, 0x10, 0x0000); 171 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
170 REG_WRITE(REG_GLOBAL, 0x11, 0x0000); 172 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
171 REG_WRITE(REG_GLOBAL, 0x12, 0x5555); 173 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
172 REG_WRITE(REG_GLOBAL, 0x13, 0x5555); 174 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
173 REG_WRITE(REG_GLOBAL, 0x14, 0xaaaa); 175 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
174 REG_WRITE(REG_GLOBAL, 0x15, 0xaaaa); 176 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
175 REG_WRITE(REG_GLOBAL, 0x16, 0xffff); 177 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
176 REG_WRITE(REG_GLOBAL, 0x17, 0xffff); 178 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
177 179
178 /* Configure the IEEE 802.1p priority mapping register. */ 180 /* Configure the IEEE 802.1p priority mapping register. */
179 REG_WRITE(REG_GLOBAL, 0x18, 0xfa41); 181 REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
180 182
181 return 0; 183 return 0;
182} 184}
183 185
184int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr) 186int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
185{ 187{
186 REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]); 188 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
187 REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]); 189 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
188 REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]); 190 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
189 191
190 return 0; 192 return 0;
191} 193}
@@ -199,12 +201,13 @@ int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
199 int j; 201 int j;
200 202
201 /* Write the MAC address byte. */ 203 /* Write the MAC address byte. */
202 REG_WRITE(REG_GLOBAL2, 0x0d, 0x8000 | (i << 8) | addr[i]); 204 REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
205 GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]);
203 206
204 /* Wait for the write to complete. */ 207 /* Wait for the write to complete. */
205 for (j = 0; j < 16; j++) { 208 for (j = 0; j < 16; j++) {
206 ret = REG_READ(REG_GLOBAL2, 0x0d); 209 ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC);
207 if ((ret & 0x8000) == 0) 210 if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
208 break; 211 break;
209 } 212 }
210 if (j == 16) 213 if (j == 16)
@@ -214,14 +217,17 @@ int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
214 return 0; 217 return 0;
215} 218}
216 219
217int mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum) 220/* Must be called with phy mutex held */
221static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
218{ 222{
219 if (addr >= 0) 223 if (addr >= 0)
220 return mv88e6xxx_reg_read(ds, addr, regnum); 224 return mv88e6xxx_reg_read(ds, addr, regnum);
221 return 0xffff; 225 return 0xffff;
222} 226}
223 227
224int mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum, u16 val) 228/* Must be called with phy mutex held */
229static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
230 u16 val)
225{ 231{
226 if (addr >= 0) 232 if (addr >= 0)
227 return mv88e6xxx_reg_write(ds, addr, regnum, val); 233 return mv88e6xxx_reg_write(ds, addr, regnum, val);
@@ -234,14 +240,16 @@ static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
234 int ret; 240 int ret;
235 unsigned long timeout; 241 unsigned long timeout;
236 242
237 ret = REG_READ(REG_GLOBAL, 0x04); 243 ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
238 REG_WRITE(REG_GLOBAL, 0x04, ret & ~0x4000); 244 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
245 ret & ~GLOBAL_CONTROL_PPU_ENABLE);
239 246
240 timeout = jiffies + 1 * HZ; 247 timeout = jiffies + 1 * HZ;
241 while (time_before(jiffies, timeout)) { 248 while (time_before(jiffies, timeout)) {
242 ret = REG_READ(REG_GLOBAL, 0x00); 249 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
243 usleep_range(1000, 2000); 250 usleep_range(1000, 2000);
244 if ((ret & 0xc000) != 0xc000) 251 if ((ret & GLOBAL_STATUS_PPU_MASK) !=
252 GLOBAL_STATUS_PPU_POLLING)
245 return 0; 253 return 0;
246 } 254 }
247 255
@@ -253,14 +261,15 @@ static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
253 int ret; 261 int ret;
254 unsigned long timeout; 262 unsigned long timeout;
255 263
256 ret = REG_READ(REG_GLOBAL, 0x04); 264 ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
257 REG_WRITE(REG_GLOBAL, 0x04, ret | 0x4000); 265 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE);
258 266
259 timeout = jiffies + 1 * HZ; 267 timeout = jiffies + 1 * HZ;
260 while (time_before(jiffies, timeout)) { 268 while (time_before(jiffies, timeout)) {
261 ret = REG_READ(REG_GLOBAL, 0x00); 269 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
262 usleep_range(1000, 2000); 270 usleep_range(1000, 2000);
263 if ((ret & 0xc000) == 0xc000) 271 if ((ret & GLOBAL_STATUS_PPU_MASK) ==
272 GLOBAL_STATUS_PPU_POLLING)
264 return 0; 273 return 0;
265 } 274 }
266 275
@@ -381,11 +390,12 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds)
381 390
382 link = 0; 391 link = 0;
383 if (dev->flags & IFF_UP) { 392 if (dev->flags & IFF_UP) {
384 port_status = mv88e6xxx_reg_read(ds, REG_PORT(i), 0x00); 393 port_status = mv88e6xxx_reg_read(ds, REG_PORT(i),
394 PORT_STATUS);
385 if (port_status < 0) 395 if (port_status < 0)
386 continue; 396 continue;
387 397
388 link = !!(port_status & 0x0800); 398 link = !!(port_status & PORT_STATUS_LINK);
389 } 399 }
390 400
391 if (!link) { 401 if (!link) {
@@ -396,22 +406,22 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds)
396 continue; 406 continue;
397 } 407 }
398 408
399 switch (port_status & 0x0300) { 409 switch (port_status & PORT_STATUS_SPEED_MASK) {
400 case 0x0000: 410 case PORT_STATUS_SPEED_10:
401 speed = 10; 411 speed = 10;
402 break; 412 break;
403 case 0x0100: 413 case PORT_STATUS_SPEED_100:
404 speed = 100; 414 speed = 100;
405 break; 415 break;
406 case 0x0200: 416 case PORT_STATUS_SPEED_1000:
407 speed = 1000; 417 speed = 1000;
408 break; 418 break;
409 default: 419 default:
410 speed = -1; 420 speed = -1;
411 break; 421 break;
412 } 422 }
413 duplex = (port_status & 0x0400) ? 1 : 0; 423 duplex = (port_status & PORT_STATUS_DUPLEX) ? 1 : 0;
414 fc = (port_status & 0x8000) ? 1 : 0; 424 fc = (port_status & PORT_STATUS_PAUSE_EN) ? 1 : 0;
415 425
416 if (!netif_carrier_ok(dev)) { 426 if (!netif_carrier_ok(dev)) {
417 netdev_info(dev, 427 netdev_info(dev,
@@ -424,14 +434,27 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds)
424 } 434 }
425} 435}
426 436
437static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
438{
439 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
440
441 switch (ps->id) {
442 case PORT_SWITCH_ID_6352:
443 case PORT_SWITCH_ID_6172:
444 case PORT_SWITCH_ID_6176:
445 return true;
446 }
447 return false;
448}
449
427static int mv88e6xxx_stats_wait(struct dsa_switch *ds) 450static int mv88e6xxx_stats_wait(struct dsa_switch *ds)
428{ 451{
429 int ret; 452 int ret;
430 int i; 453 int i;
431 454
432 for (i = 0; i < 10; i++) { 455 for (i = 0; i < 10; i++) {
433 ret = REG_READ(REG_GLOBAL, 0x1d); 456 ret = REG_READ(REG_GLOBAL, GLOBAL_STATS_OP);
434 if ((ret & 0x8000) == 0) 457 if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
435 return 0; 458 return 0;
436 } 459 }
437 460
@@ -442,8 +465,13 @@ static int mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
442{ 465{
443 int ret; 466 int ret;
444 467
468 if (mv88e6xxx_6352_family(ds))
469 port = (port + 1) << 5;
470
445 /* Snapshot the hardware statistics counters for this port. */ 471 /* Snapshot the hardware statistics counters for this port. */
446 REG_WRITE(REG_GLOBAL, 0x1d, 0xdc00 | port); 472 REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP,
473 GLOBAL_STATS_OP_CAPTURE_PORT |
474 GLOBAL_STATS_OP_HIST_RX_TX | port);
447 475
448 /* Wait for the snapshotting to complete. */ 476 /* Wait for the snapshotting to complete. */
449 ret = mv88e6xxx_stats_wait(ds); 477 ret = mv88e6xxx_stats_wait(ds);
@@ -460,7 +488,9 @@ static void mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
460 488
461 *val = 0; 489 *val = 0;
462 490
463 ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x1d, 0xcc00 | stat); 491 ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
492 GLOBAL_STATS_OP_READ_CAPTURED |
493 GLOBAL_STATS_OP_HIST_RX_TX | stat);
464 if (ret < 0) 494 if (ret < 0)
465 return; 495 return;
466 496
@@ -468,22 +498,77 @@ static void mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
468 if (ret < 0) 498 if (ret < 0)
469 return; 499 return;
470 500
471 ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x1e); 501 ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
472 if (ret < 0) 502 if (ret < 0)
473 return; 503 return;
474 504
475 _val = ret << 16; 505 _val = ret << 16;
476 506
477 ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x1f); 507 ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
478 if (ret < 0) 508 if (ret < 0)
479 return; 509 return;
480 510
481 *val = _val | ret; 511 *val = _val | ret;
482} 512}
483 513
484void mv88e6xxx_get_strings(struct dsa_switch *ds, 514static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
485 int nr_stats, struct mv88e6xxx_hw_stat *stats, 515 { "in_good_octets", 8, 0x00, },
486 int port, uint8_t *data) 516 { "in_bad_octets", 4, 0x02, },
517 { "in_unicast", 4, 0x04, },
518 { "in_broadcasts", 4, 0x06, },
519 { "in_multicasts", 4, 0x07, },
520 { "in_pause", 4, 0x16, },
521 { "in_undersize", 4, 0x18, },
522 { "in_fragments", 4, 0x19, },
523 { "in_oversize", 4, 0x1a, },
524 { "in_jabber", 4, 0x1b, },
525 { "in_rx_error", 4, 0x1c, },
526 { "in_fcs_error", 4, 0x1d, },
527 { "out_octets", 8, 0x0e, },
528 { "out_unicast", 4, 0x10, },
529 { "out_broadcasts", 4, 0x13, },
530 { "out_multicasts", 4, 0x12, },
531 { "out_pause", 4, 0x15, },
532 { "excessive", 4, 0x11, },
533 { "collisions", 4, 0x1e, },
534 { "deferred", 4, 0x05, },
535 { "single", 4, 0x14, },
536 { "multiple", 4, 0x17, },
537 { "out_fcs_error", 4, 0x03, },
538 { "late", 4, 0x1f, },
539 { "hist_64bytes", 4, 0x08, },
540 { "hist_65_127bytes", 4, 0x09, },
541 { "hist_128_255bytes", 4, 0x0a, },
542 { "hist_256_511bytes", 4, 0x0b, },
543 { "hist_512_1023bytes", 4, 0x0c, },
544 { "hist_1024_max_bytes", 4, 0x0d, },
545 /* Not all devices have the following counters */
546 { "sw_in_discards", 4, 0x110, },
547 { "sw_in_filtered", 2, 0x112, },
548 { "sw_out_filtered", 2, 0x113, },
549
550};
551
552static bool have_sw_in_discards(struct dsa_switch *ds)
553{
554 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
555
556 switch (ps->id) {
557 case PORT_SWITCH_ID_6095: case PORT_SWITCH_ID_6161:
558 case PORT_SWITCH_ID_6165: case PORT_SWITCH_ID_6171:
559 case PORT_SWITCH_ID_6172: case PORT_SWITCH_ID_6176:
560 case PORT_SWITCH_ID_6182: case PORT_SWITCH_ID_6185:
561 case PORT_SWITCH_ID_6352:
562 return true;
563 default:
564 return false;
565 }
566}
567
568static void _mv88e6xxx_get_strings(struct dsa_switch *ds,
569 int nr_stats,
570 struct mv88e6xxx_hw_stat *stats,
571 int port, uint8_t *data)
487{ 572{
488 int i; 573 int i;
489 574
@@ -493,9 +578,10 @@ void mv88e6xxx_get_strings(struct dsa_switch *ds,
493 } 578 }
494} 579}
495 580
496void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, 581static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
497 int nr_stats, struct mv88e6xxx_hw_stat *stats, 582 int nr_stats,
498 int port, uint64_t *data) 583 struct mv88e6xxx_hw_stat *stats,
584 int port, uint64_t *data)
499{ 585{
500 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 586 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
501 int ret; 587 int ret;
@@ -543,6 +629,39 @@ error:
543 mutex_unlock(&ps->stats_mutex); 629 mutex_unlock(&ps->stats_mutex);
544} 630}
545 631
632/* All the statistics in the table */
633void
634mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
635{
636 if (have_sw_in_discards(ds))
637 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
638 mv88e6xxx_hw_stats, port, data);
639 else
640 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
641 mv88e6xxx_hw_stats, port, data);
642}
643
644int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
645{
646 if (have_sw_in_discards(ds))
647 return ARRAY_SIZE(mv88e6xxx_hw_stats);
648 return ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
649}
650
651void
652mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
653 int port, uint64_t *data)
654{
655 if (have_sw_in_discards(ds))
656 _mv88e6xxx_get_ethtool_stats(
657 ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
658 mv88e6xxx_hw_stats, port, data);
659 else
660 _mv88e6xxx_get_ethtool_stats(
661 ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
662 mv88e6xxx_hw_stats, port, data);
663}
664
546int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port) 665int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
547{ 666{
548 return 32 * sizeof(u16); 667 return 32 * sizeof(u16);
@@ -579,37 +698,37 @@ int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
579 698
580 mutex_lock(&ps->phy_mutex); 699 mutex_lock(&ps->phy_mutex);
581 700
582 ret = mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6); 701 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
583 if (ret < 0) 702 if (ret < 0)
584 goto error; 703 goto error;
585 704
586 /* Enable temperature sensor */ 705 /* Enable temperature sensor */
587 ret = mv88e6xxx_phy_read(ds, 0x0, 0x1a); 706 ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
588 if (ret < 0) 707 if (ret < 0)
589 goto error; 708 goto error;
590 709
591 ret = mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5)); 710 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
592 if (ret < 0) 711 if (ret < 0)
593 goto error; 712 goto error;
594 713
595 /* Wait for temperature to stabilize */ 714 /* Wait for temperature to stabilize */
596 usleep_range(10000, 12000); 715 usleep_range(10000, 12000);
597 716
598 val = mv88e6xxx_phy_read(ds, 0x0, 0x1a); 717 val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
599 if (val < 0) { 718 if (val < 0) {
600 ret = val; 719 ret = val;
601 goto error; 720 goto error;
602 } 721 }
603 722
604 /* Disable temperature sensor */ 723 /* Disable temperature sensor */
605 ret = mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5)); 724 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
606 if (ret < 0) 725 if (ret < 0)
607 goto error; 726 goto error;
608 727
609 *temp = ((val & 0x1f) - 5) * 5; 728 *temp = ((val & 0x1f) - 5) * 5;
610 729
611error: 730error:
612 mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0); 731 _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
613 mutex_unlock(&ps->phy_mutex); 732 mutex_unlock(&ps->phy_mutex);
614 return ret; 733 return ret;
615} 734}
@@ -633,17 +752,20 @@ static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
633 752
634int mv88e6xxx_phy_wait(struct dsa_switch *ds) 753int mv88e6xxx_phy_wait(struct dsa_switch *ds)
635{ 754{
636 return mv88e6xxx_wait(ds, REG_GLOBAL2, 0x18, 0x8000); 755 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
756 GLOBAL2_SMI_OP_BUSY);
637} 757}
638 758
639int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds) 759int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
640{ 760{
641 return mv88e6xxx_wait(ds, REG_GLOBAL2, 0x14, 0x0800); 761 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
762 GLOBAL2_EEPROM_OP_LOAD);
642} 763}
643 764
644int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds) 765int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
645{ 766{
646 return mv88e6xxx_wait(ds, REG_GLOBAL2, 0x14, 0x8000); 767 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
768 GLOBAL2_EEPROM_OP_BUSY);
647} 769}
648 770
649/* Must be called with SMI lock held */ 771/* Must be called with SMI lock held */
@@ -668,80 +790,87 @@ static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
668/* Must be called with SMI lock held */ 790/* Must be called with SMI lock held */
669static int _mv88e6xxx_atu_wait(struct dsa_switch *ds) 791static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
670{ 792{
671 return _mv88e6xxx_wait(ds, REG_GLOBAL, 0x0b, ATU_BUSY); 793 return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
794 GLOBAL_ATU_OP_BUSY);
672} 795}
673 796
674int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum) 797/* Must be called with phy mutex held */
798static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
799 int regnum)
675{ 800{
676 int ret; 801 int ret;
677 802
678 REG_WRITE(REG_GLOBAL2, 0x18, 0x9800 | (addr << 5) | regnum); 803 REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_OP,
804 GLOBAL2_SMI_OP_22_READ | (addr << 5) | regnum);
679 805
680 ret = mv88e6xxx_phy_wait(ds); 806 ret = mv88e6xxx_phy_wait(ds);
681 if (ret < 0) 807 if (ret < 0)
682 return ret; 808 return ret;
683 809
684 return REG_READ(REG_GLOBAL2, 0x19); 810 return REG_READ(REG_GLOBAL2, GLOBAL2_SMI_DATA);
685} 811}
686 812
687int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr, int regnum, 813/* Must be called with phy mutex held */
688 u16 val) 814static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
815 int regnum, u16 val)
689{ 816{
690 REG_WRITE(REG_GLOBAL2, 0x19, val); 817 REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
691 REG_WRITE(REG_GLOBAL2, 0x18, 0x9400 | (addr << 5) | regnum); 818 REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_OP,
819 GLOBAL2_SMI_OP_22_WRITE | (addr << 5) | regnum);
692 820
693 return mv88e6xxx_phy_wait(ds); 821 return mv88e6xxx_phy_wait(ds);
694} 822}
695 823
696int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) 824int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
697{ 825{
826 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
698 int reg; 827 int reg;
699 828
700 reg = mv88e6xxx_phy_read_indirect(ds, port, 16); 829 mutex_lock(&ps->phy_mutex);
830
831 reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
701 if (reg < 0) 832 if (reg < 0)
702 return -EOPNOTSUPP; 833 goto out;
703 834
704 e->eee_enabled = !!(reg & 0x0200); 835 e->eee_enabled = !!(reg & 0x0200);
705 e->tx_lpi_enabled = !!(reg & 0x0100); 836 e->tx_lpi_enabled = !!(reg & 0x0100);
706 837
707 reg = REG_READ(REG_PORT(port), 0); 838 reg = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
708 e->eee_active = !!(reg & 0x0040);
709
710 return 0;
711}
712
713static int mv88e6xxx_eee_enable_set(struct dsa_switch *ds, int port,
714 bool eee_enabled, bool tx_lpi_enabled)
715{
716 int reg, nreg;
717
718 reg = mv88e6xxx_phy_read_indirect(ds, port, 16);
719 if (reg < 0) 839 if (reg < 0)
720 return reg; 840 goto out;
721
722 nreg = reg & ~0x0300;
723 if (eee_enabled)
724 nreg |= 0x0200;
725 if (tx_lpi_enabled)
726 nreg |= 0x0100;
727 841
728 if (nreg != reg) 842 e->eee_active = !!(reg & PORT_STATUS_EEE);
729 return mv88e6xxx_phy_write_indirect(ds, port, 16, nreg); 843 reg = 0;
730 844
731 return 0; 845out:
846 mutex_unlock(&ps->phy_mutex);
847 return reg;
732} 848}
733 849
734int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, 850int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
735 struct phy_device *phydev, struct ethtool_eee *e) 851 struct phy_device *phydev, struct ethtool_eee *e)
736{ 852{
853 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
854 int reg;
737 int ret; 855 int ret;
738 856
739 ret = mv88e6xxx_eee_enable_set(ds, port, e->eee_enabled, 857 mutex_lock(&ps->phy_mutex);
740 e->tx_lpi_enabled);
741 if (ret)
742 return -EOPNOTSUPP;
743 858
744 return 0; 859 ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
860 if (ret < 0)
861 goto out;
862
863 reg = ret & ~0x0300;
864 if (e->eee_enabled)
865 reg |= 0x0200;
866 if (e->tx_lpi_enabled)
867 reg |= 0x0100;
868
869 ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
870out:
871 mutex_unlock(&ps->phy_mutex);
872
873 return ret;
745} 874}
746 875
747static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, int fid, u16 cmd) 876static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, int fid, u16 cmd)
@@ -752,7 +881,7 @@ static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, int fid, u16 cmd)
752 if (ret < 0) 881 if (ret < 0)
753 return ret; 882 return ret;
754 883
755 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x0b, cmd); 884 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
756 if (ret < 0) 885 if (ret < 0)
757 return ret; 886 return ret;
758 887
@@ -767,7 +896,7 @@ static int _mv88e6xxx_flush_fid(struct dsa_switch *ds, int fid)
767 if (ret < 0) 896 if (ret < 0)
768 return ret; 897 return ret;
769 898
770 return _mv88e6xxx_atu_cmd(ds, fid, ATU_CMD_FLUSH_NONSTATIC_FID); 899 return _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_FLUSH_NON_STATIC_DB);
771} 900}
772 901
773static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state) 902static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state)
@@ -778,23 +907,25 @@ static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state)
778 907
779 mutex_lock(&ps->smi_mutex); 908 mutex_lock(&ps->smi_mutex);
780 909
781 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), 0x04); 910 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
782 if (reg < 0) 911 if (reg < 0)
783 goto abort; 912 goto abort;
784 913
785 oldstate = reg & PSTATE_MASK; 914 oldstate = reg & PORT_CONTROL_STATE_MASK;
786 if (oldstate != state) { 915 if (oldstate != state) {
787 /* Flush forwarding database if we're moving a port 916 /* Flush forwarding database if we're moving a port
788 * from Learning or Forwarding state to Disabled or 917 * from Learning or Forwarding state to Disabled or
789 * Blocking or Listening state. 918 * Blocking or Listening state.
790 */ 919 */
791 if (oldstate >= PSTATE_LEARNING && state <= PSTATE_BLOCKING) { 920 if (oldstate >= PORT_CONTROL_STATE_LEARNING &&
921 state <= PORT_CONTROL_STATE_BLOCKING) {
792 ret = _mv88e6xxx_flush_fid(ds, ps->fid[port]); 922 ret = _mv88e6xxx_flush_fid(ds, ps->fid[port]);
793 if (ret) 923 if (ret)
794 goto abort; 924 goto abort;
795 } 925 }
796 reg = (reg & ~PSTATE_MASK) | state; 926 reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
797 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), 0x04, reg); 927 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
928 reg);
798 } 929 }
799 930
800abort: 931abort:
@@ -815,7 +946,7 @@ static int _mv88e6xxx_update_port_config(struct dsa_switch *ds, int port)
815 reg |= (ps->bridge_mask[fid] | 946 reg |= (ps->bridge_mask[fid] |
816 (1 << dsa_upstream_port(ds))) & ~(1 << port); 947 (1 << dsa_upstream_port(ds))) & ~(1 << port);
817 948
818 return _mv88e6xxx_reg_write(ds, REG_PORT(port), 0x06, reg); 949 return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
819} 950}
820 951
821/* Must be called with smi lock held */ 952/* Must be called with smi lock held */
@@ -927,18 +1058,18 @@ int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
927 1058
928 switch (state) { 1059 switch (state) {
929 case BR_STATE_DISABLED: 1060 case BR_STATE_DISABLED:
930 stp_state = PSTATE_DISABLED; 1061 stp_state = PORT_CONTROL_STATE_DISABLED;
931 break; 1062 break;
932 case BR_STATE_BLOCKING: 1063 case BR_STATE_BLOCKING:
933 case BR_STATE_LISTENING: 1064 case BR_STATE_LISTENING:
934 stp_state = PSTATE_BLOCKING; 1065 stp_state = PORT_CONTROL_STATE_BLOCKING;
935 break; 1066 break;
936 case BR_STATE_LEARNING: 1067 case BR_STATE_LEARNING:
937 stp_state = PSTATE_LEARNING; 1068 stp_state = PORT_CONTROL_STATE_LEARNING;
938 break; 1069 break;
939 case BR_STATE_FORWARDING: 1070 case BR_STATE_FORWARDING:
940 default: 1071 default:
941 stp_state = PSTATE_FORWARDING; 1072 stp_state = PORT_CONTROL_STATE_FORWARDING;
942 break; 1073 break;
943 } 1074 }
944 1075
@@ -960,8 +1091,9 @@ static int __mv88e6xxx_write_addr(struct dsa_switch *ds,
960 int i, ret; 1091 int i, ret;
961 1092
962 for (i = 0; i < 3; i++) { 1093 for (i = 0; i < 3; i++) {
963 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x0d + i, 1094 ret = _mv88e6xxx_reg_write(
964 (addr[i * 2] << 8) | addr[i * 2 + 1]); 1095 ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
1096 (addr[i * 2] << 8) | addr[i * 2 + 1]);
965 if (ret < 0) 1097 if (ret < 0)
966 return ret; 1098 return ret;
967 } 1099 }
@@ -974,7 +1106,8 @@ static int __mv88e6xxx_read_addr(struct dsa_switch *ds, unsigned char *addr)
974 int i, ret; 1106 int i, ret;
975 1107
976 for (i = 0; i < 3; i++) { 1108 for (i = 0; i < 3; i++) {
977 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x0d + i); 1109 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1110 GLOBAL_ATU_MAC_01 + i);
978 if (ret < 0) 1111 if (ret < 0)
979 return ret; 1112 return ret;
980 addr[i * 2] = ret >> 8; 1113 addr[i * 2] = ret >> 8;
@@ -999,12 +1132,12 @@ static int __mv88e6xxx_port_fdb_cmd(struct dsa_switch *ds, int port,
999 if (ret < 0) 1132 if (ret < 0)
1000 return ret; 1133 return ret;
1001 1134
1002 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x0c, 1135 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA,
1003 (0x10 << port) | state); 1136 (0x10 << port) | state);
1004 if (ret) 1137 if (ret)
1005 return ret; 1138 return ret;
1006 1139
1007 ret = _mv88e6xxx_atu_cmd(ds, fid, ATU_CMD_LOAD_FID); 1140 ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_LOAD_DB);
1008 1141
1009 return ret; 1142 return ret;
1010} 1143}
@@ -1013,7 +1146,8 @@ int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
1013 const unsigned char *addr, u16 vid) 1146 const unsigned char *addr, u16 vid)
1014{ 1147{
1015 int state = is_multicast_ether_addr(addr) ? 1148 int state = is_multicast_ether_addr(addr) ?
1016 FDB_STATE_MC_STATIC : FDB_STATE_STATIC; 1149 GLOBAL_ATU_DATA_STATE_MC_STATIC :
1150 GLOBAL_ATU_DATA_STATE_UC_STATIC;
1017 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1151 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1018 int ret; 1152 int ret;
1019 1153
@@ -1031,7 +1165,8 @@ int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
1031 int ret; 1165 int ret;
1032 1166
1033 mutex_lock(&ps->smi_mutex); 1167 mutex_lock(&ps->smi_mutex);
1034 ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr, FDB_STATE_UNUSED); 1168 ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr,
1169 GLOBAL_ATU_DATA_STATE_UNUSED);
1035 mutex_unlock(&ps->smi_mutex); 1170 mutex_unlock(&ps->smi_mutex);
1036 1171
1037 return ret; 1172 return ret;
@@ -1053,15 +1188,15 @@ static int __mv88e6xxx_port_getnext(struct dsa_switch *ds, int port,
1053 return ret; 1188 return ret;
1054 1189
1055 do { 1190 do {
1056 ret = _mv88e6xxx_atu_cmd(ds, fid, ATU_CMD_GETNEXT_FID); 1191 ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
1057 if (ret < 0) 1192 if (ret < 0)
1058 return ret; 1193 return ret;
1059 1194
1060 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x0c); 1195 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
1061 if (ret < 0) 1196 if (ret < 0)
1062 return ret; 1197 return ret;
1063 state = ret & FDB_STATE_MASK; 1198 state = ret & GLOBAL_ATU_DATA_STATE_MASK;
1064 if (state == FDB_STATE_UNUSED) 1199 if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
1065 return -ENOENT; 1200 return -ENOENT;
1066 } while (!(((ret >> 4) & 0xff) & (1 << port))); 1201 } while (!(((ret >> 4) & 0xff) & (1 << port)));
1067 1202
@@ -1070,7 +1205,8 @@ static int __mv88e6xxx_port_getnext(struct dsa_switch *ds, int port,
1070 return ret; 1205 return ret;
1071 1206
1072 *is_static = state == (is_multicast_ether_addr(addr) ? 1207 *is_static = state == (is_multicast_ether_addr(addr) ?
1073 FDB_STATE_MC_STATIC : FDB_STATE_STATIC); 1208 GLOBAL_ATU_DATA_STATE_MC_STATIC :
1209 GLOBAL_ATU_DATA_STATE_UC_STATIC);
1074 1210
1075 return 0; 1211 return 0;
1076} 1212}
@@ -1115,7 +1251,8 @@ int mv88e6xxx_setup_port_common(struct dsa_switch *ds, int port)
1115 /* Port Control 1: disable trunking, disable sending 1251 /* Port Control 1: disable trunking, disable sending
1116 * learning messages to this port. 1252 * learning messages to this port.
1117 */ 1253 */
1118 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), 0x05, 0x0000); 1254 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
1255 0x0000);
1119 if (ret) 1256 if (ret)
1120 goto abort; 1257 goto abort;
1121 1258
@@ -1152,7 +1289,7 @@ int mv88e6xxx_setup_common(struct dsa_switch *ds)
1152 mutex_init(&ps->stats_mutex); 1289 mutex_init(&ps->stats_mutex);
1153 mutex_init(&ps->phy_mutex); 1290 mutex_init(&ps->phy_mutex);
1154 1291
1155 ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0; 1292 ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
1156 1293
1157 ps->fid_mask = (1 << DSA_MAX_PORTS) - 1; 1294 ps->fid_mask = (1 << DSA_MAX_PORTS) - 1;
1158 1295
@@ -1161,6 +1298,154 @@ int mv88e6xxx_setup_common(struct dsa_switch *ds)
1161 return 0; 1298 return 0;
1162} 1299}
1163 1300
1301int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
1302{
1303 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1304 u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
1305 unsigned long timeout;
1306 int ret;
1307 int i;
1308
1309 /* Set all ports to the disabled state. */
1310 for (i = 0; i < ps->num_ports; i++) {
1311 ret = REG_READ(REG_PORT(i), PORT_CONTROL);
1312 REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc);
1313 }
1314
1315 /* Wait for transmit queues to drain. */
1316 usleep_range(2000, 4000);
1317
1318 /* Reset the switch. Keep the PPU active if requested. The PPU
1319 * needs to be active to support indirect phy register access
1320 * through global registers 0x18 and 0x19.
1321 */
1322 if (ppu_active)
1323 REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
1324 else
1325 REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
1326
1327 /* Wait up to one second for reset to complete. */
1328 timeout = jiffies + 1 * HZ;
1329 while (time_before(jiffies, timeout)) {
1330 ret = REG_READ(REG_GLOBAL, 0x00);
1331 if ((ret & is_reset) == is_reset)
1332 break;
1333 usleep_range(1000, 2000);
1334 }
1335 if (time_after(jiffies, timeout))
1336 return -ETIMEDOUT;
1337
1338 return 0;
1339}
1340
1341int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
1342{
1343 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1344 int ret;
1345
1346 mutex_lock(&ps->phy_mutex);
1347 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
1348 if (ret < 0)
1349 goto error;
1350 ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
1351error:
1352 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
1353 mutex_unlock(&ps->phy_mutex);
1354 return ret;
1355}
1356
1357int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
1358 int reg, int val)
1359{
1360 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1361 int ret;
1362
1363 mutex_lock(&ps->phy_mutex);
1364 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
1365 if (ret < 0)
1366 goto error;
1367
1368 ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
1369error:
1370 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
1371 mutex_unlock(&ps->phy_mutex);
1372 return ret;
1373}
1374
1375static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
1376{
1377 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1378
1379 if (port >= 0 && port < ps->num_ports)
1380 return port;
1381 return -EINVAL;
1382}
1383
1384int
1385mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
1386{
1387 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1388 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
1389 int ret;
1390
1391 if (addr < 0)
1392 return addr;
1393
1394 mutex_lock(&ps->phy_mutex);
1395 ret = _mv88e6xxx_phy_read(ds, addr, regnum);
1396 mutex_unlock(&ps->phy_mutex);
1397 return ret;
1398}
1399
1400int
1401mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
1402{
1403 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1404 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
1405 int ret;
1406
1407 if (addr < 0)
1408 return addr;
1409
1410 mutex_lock(&ps->phy_mutex);
1411 ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
1412 mutex_unlock(&ps->phy_mutex);
1413 return ret;
1414}
1415
1416int
1417mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
1418{
1419 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1420 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
1421 int ret;
1422
1423 if (addr < 0)
1424 return addr;
1425
1426 mutex_lock(&ps->phy_mutex);
1427 ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
1428 mutex_unlock(&ps->phy_mutex);
1429 return ret;
1430}
1431
1432int
1433mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
1434 u16 val)
1435{
1436 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1437 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
1438 int ret;
1439
1440 if (addr < 0)
1441 return addr;
1442
1443 mutex_lock(&ps->phy_mutex);
1444 ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
1445 mutex_unlock(&ps->phy_mutex);
1446 return ret;
1447}
1448
1164static int __init mv88e6xxx_init(void) 1449static int __init mv88e6xxx_init(void)
1165{ 1450{
1166#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131) 1451#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index aaf239aba726..e045154f3364 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -11,33 +11,199 @@
11#ifndef __MV88E6XXX_H 11#ifndef __MV88E6XXX_H
12#define __MV88E6XXX_H 12#define __MV88E6XXX_H
13 13
14#define REG_PORT(p) (0x10 + (p)) 14#define SMI_CMD 0x00
15#define REG_GLOBAL 0x1b 15#define SMI_CMD_BUSY BIT(15)
16#define REG_GLOBAL2 0x1c 16#define SMI_CMD_CLAUSE_22 BIT(12)
17 17#define SMI_CMD_OP_22_WRITE ((1 << 10) | SMI_CMD_BUSY | SMI_CMD_CLAUSE_22)
18/* ATU commands */ 18#define SMI_CMD_OP_22_READ ((2 << 10) | SMI_CMD_BUSY | SMI_CMD_CLAUSE_22)
19 19#define SMI_CMD_OP_45_WRITE_ADDR ((0 << 10) | SMI_CMD_BUSY)
20#define ATU_BUSY 0x8000 20#define SMI_CMD_OP_45_WRITE_DATA ((1 << 10) | SMI_CMD_BUSY)
21 21#define SMI_CMD_OP_45_READ_DATA ((2 << 10) | SMI_CMD_BUSY)
22#define ATU_CMD_LOAD_FID (ATU_BUSY | 0x3000) 22#define SMI_CMD_OP_45_READ_DATA_INC ((3 << 10) | SMI_CMD_BUSY)
23#define ATU_CMD_GETNEXT_FID (ATU_BUSY | 0x4000) 23#define SMI_DATA 0x01
24#define ATU_CMD_FLUSH_NONSTATIC_FID (ATU_BUSY | 0x6000)
25
26/* port states */
27 24
28#define PSTATE_MASK 0x03 25#define REG_PORT(p) (0x10 + (p))
29#define PSTATE_DISABLED 0x00 26#define PORT_STATUS 0x00
30#define PSTATE_BLOCKING 0x01 27#define PORT_STATUS_PAUSE_EN BIT(15)
31#define PSTATE_LEARNING 0x02 28#define PORT_STATUS_MY_PAUSE BIT(14)
32#define PSTATE_FORWARDING 0x03 29#define PORT_STATUS_HD_FLOW BIT(13)
33 30#define PORT_STATUS_PHY_DETECT BIT(12)
34/* FDB states */ 31#define PORT_STATUS_LINK BIT(11)
32#define PORT_STATUS_DUPLEX BIT(10)
33#define PORT_STATUS_SPEED_MASK 0x0300
34#define PORT_STATUS_SPEED_10 0x0000
35#define PORT_STATUS_SPEED_100 0x0100
36#define PORT_STATUS_SPEED_1000 0x0200
37#define PORT_STATUS_EEE BIT(6) /* 6352 */
38#define PORT_STATUS_AM_DIS BIT(6) /* 6165 */
39#define PORT_STATUS_MGMII BIT(6) /* 6185 */
40#define PORT_STATUS_TX_PAUSED BIT(5)
41#define PORT_STATUS_FLOW_CTRL BIT(4)
42#define PORT_PCS_CTRL 0x01
43#define PORT_SWITCH_ID 0x03
44#define PORT_SWITCH_ID_6085 0x04a0
45#define PORT_SWITCH_ID_6095 0x0950
46#define PORT_SWITCH_ID_6123 0x1210
47#define PORT_SWITCH_ID_6123_A1 0x1212
48#define PORT_SWITCH_ID_6123_A2 0x1213
49#define PORT_SWITCH_ID_6131 0x1060
50#define PORT_SWITCH_ID_6131_B2 0x1066
51#define PORT_SWITCH_ID_6152 0x1a40
52#define PORT_SWITCH_ID_6155 0x1a50
53#define PORT_SWITCH_ID_6161 0x1610
54#define PORT_SWITCH_ID_6161_A1 0x1612
55#define PORT_SWITCH_ID_6161_A2 0x1613
56#define PORT_SWITCH_ID_6165 0x1650
57#define PORT_SWITCH_ID_6165_A1 0x1652
58#define PORT_SWITCH_ID_6165_A2 0x1653
59#define PORT_SWITCH_ID_6171 0x1710
60#define PORT_SWITCH_ID_6172 0x1720
61#define PORT_SWITCH_ID_6176 0x1760
62#define PORT_SWITCH_ID_6182 0x1a60
63#define PORT_SWITCH_ID_6185 0x1a70
64#define PORT_SWITCH_ID_6352 0x3520
65#define PORT_SWITCH_ID_6352_A0 0x3521
66#define PORT_SWITCH_ID_6352_A1 0x3522
67#define PORT_CONTROL 0x04
68#define PORT_CONTROL_STATE_MASK 0x03
69#define PORT_CONTROL_STATE_DISABLED 0x00
70#define PORT_CONTROL_STATE_BLOCKING 0x01
71#define PORT_CONTROL_STATE_LEARNING 0x02
72#define PORT_CONTROL_STATE_FORWARDING 0x03
73#define PORT_CONTROL_1 0x05
74#define PORT_BASE_VLAN 0x06
75#define PORT_DEFAULT_VLAN 0x07
76#define PORT_CONTROL_2 0x08
77#define PORT_RATE_CONTROL 0x09
78#define PORT_RATE_CONTROL_2 0x0a
79#define PORT_ASSOC_VECTOR 0x0b
80#define PORT_IN_DISCARD_LO 0x10
81#define PORT_IN_DISCARD_HI 0x11
82#define PORT_IN_FILTERED 0x12
83#define PORT_OUT_FILTERED 0x13
84#define PORT_TAG_REGMAP_0123 0x19
85#define PORT_TAG_REGMAP_4567 0x1a
35 86
36#define FDB_STATE_MASK 0x0f 87#define REG_GLOBAL 0x1b
88#define GLOBAL_STATUS 0x00
89#define GLOBAL_STATUS_PPU_STATE BIT(15) /* 6351 and 6171 */
90/* Two bits for 6165, 6185 etc */
91#define GLOBAL_STATUS_PPU_MASK (0x3 << 14)
92#define GLOBAL_STATUS_PPU_DISABLED_RST (0x0 << 14)
93#define GLOBAL_STATUS_PPU_INITIALIZING (0x1 << 14)
94#define GLOBAL_STATUS_PPU_DISABLED (0x2 << 14)
95#define GLOBAL_STATUS_PPU_POLLING (0x3 << 14)
96#define GLOBAL_MAC_01 0x01
97#define GLOBAL_MAC_23 0x02
98#define GLOBAL_MAC_45 0x03
99#define GLOBAL_CONTROL 0x04
100#define GLOBAL_CONTROL_SW_RESET BIT(15)
101#define GLOBAL_CONTROL_PPU_ENABLE BIT(14)
102#define GLOBAL_CONTROL_DISCARD_EXCESS BIT(13) /* 6352 */
103#define GLOBAL_CONTROL_SCHED_PRIO BIT(11) /* 6152 */
104#define GLOBAL_CONTROL_MAX_FRAME_1632 BIT(10) /* 6152 */
105#define GLOBAL_CONTROL_RELOAD_EEPROM BIT(9) /* 6152 */
106#define GLOBAL_CONTROL_DEVICE_EN BIT(7)
107#define GLOBAL_CONTROL_STATS_DONE_EN BIT(6)
108#define GLOBAL_CONTROL_VTU_PROBLEM_EN BIT(5)
109#define GLOBAL_CONTROL_VTU_DONE_EN BIT(4)
110#define GLOBAL_CONTROL_ATU_PROBLEM_EN BIT(3)
111#define GLOBAL_CONTROL_ATU_DONE_EN BIT(2)
112#define GLOBAL_CONTROL_TCAM_EN BIT(1)
113#define GLOBAL_CONTROL_EEPROM_DONE_EN BIT(0)
114#define GLOBAL_VTU_OP 0x05
115#define GLOBAL_VTU_VID 0x06
116#define GLOBAL_VTU_DATA_0_3 0x07
117#define GLOBAL_VTU_DATA_4_7 0x08
118#define GLOBAL_VTU_DATA_8_11 0x09
119#define GLOBAL_ATU_CONTROL 0x0a
120#define GLOBAL_ATU_OP 0x0b
121#define GLOBAL_ATU_OP_BUSY BIT(15)
122#define GLOBAL_ATU_OP_NOP (0 << 12)
123#define GLOBAL_ATU_OP_FLUSH_ALL ((1 << 12) | GLOBAL_ATU_OP_BUSY)
124#define GLOBAL_ATU_OP_FLUSH_NON_STATIC ((2 << 12) | GLOBAL_ATU_OP_BUSY)
125#define GLOBAL_ATU_OP_LOAD_DB ((3 << 12) | GLOBAL_ATU_OP_BUSY)
126#define GLOBAL_ATU_OP_GET_NEXT_DB ((4 << 12) | GLOBAL_ATU_OP_BUSY)
127#define GLOBAL_ATU_OP_FLUSH_DB ((5 << 12) | GLOBAL_ATU_OP_BUSY)
128#define GLOBAL_ATU_OP_FLUSH_NON_STATIC_DB ((6 << 12) | GLOBAL_ATU_OP_BUSY)
129#define GLOBAL_ATU_OP_GET_CLR_VIOLATION ((7 << 12) | GLOBAL_ATU_OP_BUSY)
130#define GLOBAL_ATU_DATA 0x0c
131#define GLOBAL_ATU_DATA_STATE_MASK 0x0f
132#define GLOBAL_ATU_DATA_STATE_UNUSED 0x00
133#define GLOBAL_ATU_DATA_STATE_UC_MGMT 0x0d
134#define GLOBAL_ATU_DATA_STATE_UC_STATIC 0x0e
135#define GLOBAL_ATU_DATA_STATE_UC_PRIO_OVER 0x0f
136#define GLOBAL_ATU_DATA_STATE_MC_NONE_RATE 0x05
137#define GLOBAL_ATU_DATA_STATE_MC_STATIC 0x07
138#define GLOBAL_ATU_DATA_STATE_MC_MGMT 0x0e
139#define GLOBAL_ATU_DATA_STATE_MC_PRIO_OVER 0x0f
140#define GLOBAL_ATU_MAC_01 0x0d
141#define GLOBAL_ATU_MAC_23 0x0e
142#define GLOBAL_ATU_MAC_45 0x0f
143#define GLOBAL_IP_PRI_0 0x10
144#define GLOBAL_IP_PRI_1 0x11
145#define GLOBAL_IP_PRI_2 0x12
146#define GLOBAL_IP_PRI_3 0x13
147#define GLOBAL_IP_PRI_4 0x14
148#define GLOBAL_IP_PRI_5 0x15
149#define GLOBAL_IP_PRI_6 0x16
150#define GLOBAL_IP_PRI_7 0x17
151#define GLOBAL_IEEE_PRI 0x18
152#define GLOBAL_CORE_TAG_TYPE 0x19
153#define GLOBAL_MONITOR_CONTROL 0x1a
154#define GLOBAL_CONTROL_2 0x1c
155#define GLOBAL_STATS_OP 0x1d
156#define GLOBAL_STATS_OP_BUSY BIT(15)
157#define GLOBAL_STATS_OP_NOP (0 << 12)
158#define GLOBAL_STATS_OP_FLUSH_ALL ((1 << 12) | GLOBAL_STATS_OP_BUSY)
159#define GLOBAL_STATS_OP_FLUSH_PORT ((2 << 12) | GLOBAL_STATS_OP_BUSY)
160#define GLOBAL_STATS_OP_READ_CAPTURED ((4 << 12) | GLOBAL_STATS_OP_BUSY)
161#define GLOBAL_STATS_OP_CAPTURE_PORT ((5 << 12) | GLOBAL_STATS_OP_BUSY)
162#define GLOBAL_STATS_OP_HIST_RX ((1 << 10) | GLOBAL_STATS_OP_BUSY)
163#define GLOBAL_STATS_OP_HIST_TX ((2 << 10) | GLOBAL_STATS_OP_BUSY)
164#define GLOBAL_STATS_OP_HIST_RX_TX ((3 << 10) | GLOBAL_STATS_OP_BUSY)
165#define GLOBAL_STATS_COUNTER_32 0x1e
166#define GLOBAL_STATS_COUNTER_01 0x1f
37 167
38#define FDB_STATE_UNUSED 0x00 168#define REG_GLOBAL2 0x1c
39#define FDB_STATE_MC_STATIC 0x07 /* static multicast */ 169#define GLOBAL2_INT_SOURCE 0x00
40#define FDB_STATE_STATIC 0x0e /* static unicast */ 170#define GLOBAL2_INT_MASK 0x01
171#define GLOBAL2_MGMT_EN_2X 0x02
172#define GLOBAL2_MGMT_EN_0X 0x03
173#define GLOBAL2_FLOW_CONTROL 0x04
174#define GLOBAL2_SWITCH_MGMT 0x05
175#define GLOBAL2_DEVICE_MAPPING 0x06
176#define GLOBAL2_TRUNK_MASK 0x07
177#define GLOBAL2_TRUNK_MAPPING 0x08
178#define GLOBAL2_INGRESS_OP 0x09
179#define GLOBAL2_INGRESS_DATA 0x0a
180#define GLOBAL2_PVT_ADDR 0x0b
181#define GLOBAL2_PVT_DATA 0x0c
182#define GLOBAL2_SWITCH_MAC 0x0d
183#define GLOBAL2_SWITCH_MAC_BUSY BIT(15)
184#define GLOBAL2_ATU_STATS 0x0e
185#define GLOBAL2_PRIO_OVERRIDE 0x0f
186#define GLOBAL2_EEPROM_OP 0x14
187#define GLOBAL2_EEPROM_OP_BUSY BIT(15)
188#define GLOBAL2_EEPROM_OP_LOAD BIT(11)
189#define GLOBAL2_EEPROM_DATA 0x15
190#define GLOBAL2_PTP_AVB_OP 0x16
191#define GLOBAL2_PTP_AVB_DATA 0x17
192#define GLOBAL2_SMI_OP 0x18
193#define GLOBAL2_SMI_OP_BUSY BIT(15)
194#define GLOBAL2_SMI_OP_CLAUSE_22 BIT(12)
195#define GLOBAL2_SMI_OP_22_WRITE ((1 << 10) | GLOBAL2_SMI_OP_BUSY | \
196 GLOBAL2_SMI_OP_CLAUSE_22)
197#define GLOBAL2_SMI_OP_22_READ ((2 << 10) | GLOBAL2_SMI_OP_BUSY | \
198 GLOBAL2_SMI_OP_CLAUSE_22)
199#define GLOBAL2_SMI_OP_45_WRITE_ADDR ((0 << 10) | GLOBAL2_SMI_OP_BUSY)
200#define GLOBAL2_SMI_OP_45_WRITE_DATA ((1 << 10) | GLOBAL2_SMI_OP_BUSY)
201#define GLOBAL2_SMI_OP_45_READ_DATA ((2 << 10) | GLOBAL2_SMI_OP_BUSY)
202#define GLOBAL2_SMI_DATA 0x19
203#define GLOBAL2_SCRATCH_MISC 0x1a
204#define GLOBAL2_WDOG_CONTROL 0x1b
205#define GLOBAL2_QOS_WEIGHT 0x1c
206#define GLOBAL2_MISC 0x1d
41 207
42struct mv88e6xxx_priv_state { 208struct mv88e6xxx_priv_state {
43 /* When using multi-chip addressing, this mutex protects 209 /* When using multi-chip addressing, this mutex protects
@@ -73,6 +239,7 @@ struct mv88e6xxx_priv_state {
73 struct mutex eeprom_mutex; 239 struct mutex eeprom_mutex;
74 240
75 int id; /* switch product id */ 241 int id; /* switch product id */
242 int num_ports; /* number of switch ports */
76 243
77 /* hw bridging */ 244 /* hw bridging */
78 245
@@ -92,6 +259,7 @@ struct mv88e6xxx_hw_stat {
92 int reg; 259 int reg;
93}; 260};
94 261
262int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active);
95int mv88e6xxx_setup_port_common(struct dsa_switch *ds, int port); 263int mv88e6xxx_setup_port_common(struct dsa_switch *ds, int port);
96int mv88e6xxx_setup_common(struct dsa_switch *ds); 264int mv88e6xxx_setup_common(struct dsa_switch *ds);
97int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg); 265int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg);
@@ -102,19 +270,21 @@ int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val);
102int mv88e6xxx_config_prio(struct dsa_switch *ds); 270int mv88e6xxx_config_prio(struct dsa_switch *ds);
103int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr); 271int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr);
104int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr); 272int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr);
105int mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum); 273int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum);
106int mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum, u16 val); 274int mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val);
275int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum);
276int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
277 u16 val);
107void mv88e6xxx_ppu_state_init(struct dsa_switch *ds); 278void mv88e6xxx_ppu_state_init(struct dsa_switch *ds);
108int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum); 279int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum);
109int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr, 280int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
110 int regnum, u16 val); 281 int regnum, u16 val);
111void mv88e6xxx_poll_link(struct dsa_switch *ds); 282void mv88e6xxx_poll_link(struct dsa_switch *ds);
112void mv88e6xxx_get_strings(struct dsa_switch *ds, 283void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data);
113 int nr_stats, struct mv88e6xxx_hw_stat *stats, 284void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
114 int port, uint8_t *data); 285 uint64_t *data);
115void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, 286int mv88e6xxx_get_sset_count(struct dsa_switch *ds);
116 int nr_stats, struct mv88e6xxx_hw_stat *stats, 287int mv88e6xxx_get_sset_count_basic(struct dsa_switch *ds);
117 int port, uint64_t *data);
118int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port); 288int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port);
119void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, 289void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
120 struct ethtool_regs *regs, void *_p); 290 struct ethtool_regs *regs, void *_p);
@@ -137,7 +307,9 @@ int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
137 const unsigned char *addr, u16 vid); 307 const unsigned char *addr, u16 vid);
138int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port, 308int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
139 unsigned char *addr, bool *is_static); 309 unsigned char *addr, bool *is_static);
140 310int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg);
311int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
312 int reg, int val);
141extern struct dsa_switch_driver mv88e6131_switch_driver; 313extern struct dsa_switch_driver mv88e6131_switch_driver;
142extern struct dsa_switch_driver mv88e6123_61_65_switch_driver; 314extern struct dsa_switch_driver mv88e6123_61_65_switch_driver;
143extern struct dsa_switch_driver mv88e6352_switch_driver; 315extern struct dsa_switch_driver mv88e6352_switch_driver;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 756053c028be..4085c4b31047 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1811,7 +1811,7 @@ struct bnx2x {
1811 int stats_state; 1811 int stats_state;
1812 1812
1813 /* used for synchronization of concurrent threads statistics handling */ 1813 /* used for synchronization of concurrent threads statistics handling */
1814 spinlock_t stats_lock; 1814 struct mutex stats_lock;
1815 1815
1816 /* used by dmae command loader */ 1816 /* used by dmae command loader */
1817 struct dmae_command stats_dmae; 1817 struct dmae_command stats_dmae;
@@ -1935,8 +1935,6 @@ struct bnx2x {
1935 1935
1936 int fp_array_size; 1936 int fp_array_size;
1937 u32 dump_preset_idx; 1937 u32 dump_preset_idx;
1938 bool stats_started;
1939 struct semaphore stats_sema;
1940 1938
1941 u8 phys_port_id[ETH_ALEN]; 1939 u8 phys_port_id[ETH_ALEN];
1942 1940
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 177cb0e722e7..b9f85fccb419 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -129,8 +129,8 @@ struct bnx2x_mac_vals {
129 u32 xmac_val; 129 u32 xmac_val;
130 u32 emac_addr; 130 u32 emac_addr;
131 u32 emac_val; 131 u32 emac_val;
132 u32 umac_addr; 132 u32 umac_addr[2];
133 u32 umac_val; 133 u32 umac_val[2];
134 u32 bmac_addr; 134 u32 bmac_addr;
135 u32 bmac_val[2]; 135 u32 bmac_val[2];
136}; 136};
@@ -7866,6 +7866,20 @@ int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7866 return 0; 7866 return 0;
7867} 7867}
7868 7868
7869/* previous driver DMAE transaction may have occurred when pre-boot stage ended
7870 * and boot began, or when kdump kernel was loaded. Either case would invalidate
7871 * the addresses of the transaction, resulting in was-error bit set in the pci
7872 * causing all hw-to-host pcie transactions to timeout. If this happened we want
7873 * to clear the interrupt which detected this from the pglueb and the was done
7874 * bit
7875 */
7876static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
7877{
7878 if (!CHIP_IS_E1x(bp))
7879 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
7880 1 << BP_ABS_FUNC(bp));
7881}
7882
7869static int bnx2x_init_hw_func(struct bnx2x *bp) 7883static int bnx2x_init_hw_func(struct bnx2x *bp)
7870{ 7884{
7871 int port = BP_PORT(bp); 7885 int port = BP_PORT(bp);
@@ -7958,8 +7972,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
7958 7972
7959 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); 7973 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7960 7974
7961 if (!CHIP_IS_E1x(bp)) 7975 bnx2x_clean_pglue_errors(bp);
7962 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
7963 7976
7964 bnx2x_init_block(bp, BLOCK_ATC, init_phase); 7977 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7965 bnx2x_init_block(bp, BLOCK_DMAE, init_phase); 7978 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
@@ -10141,6 +10154,25 @@ static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
10141 return base + (BP_ABS_FUNC(bp)) * stride; 10154 return base + (BP_ABS_FUNC(bp)) * stride;
10142} 10155}
10143 10156
10157static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
10158 u8 port, u32 reset_reg,
10159 struct bnx2x_mac_vals *vals)
10160{
10161 u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
10162 u32 base_addr;
10163
10164 if (!(mask & reset_reg))
10165 return false;
10166
10167 BNX2X_DEV_INFO("Disable umac Rx %02x\n", port);
10168 base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
10169 vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG;
10170 vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
10171 REG_WR(bp, vals->umac_addr[port], 0);
10172
10173 return true;
10174}
10175
10144static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, 10176static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10145 struct bnx2x_mac_vals *vals) 10177 struct bnx2x_mac_vals *vals)
10146{ 10178{
@@ -10149,10 +10181,7 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10149 u8 port = BP_PORT(bp); 10181 u8 port = BP_PORT(bp);
10150 10182
10151 /* reset addresses as they also mark which values were changed */ 10183 /* reset addresses as they also mark which values were changed */
10152 vals->bmac_addr = 0; 10184 memset(vals, 0, sizeof(*vals));
10153 vals->umac_addr = 0;
10154 vals->xmac_addr = 0;
10155 vals->emac_addr = 0;
10156 10185
10157 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2); 10186 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
10158 10187
@@ -10201,15 +10230,11 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10201 REG_WR(bp, vals->xmac_addr, 0); 10230 REG_WR(bp, vals->xmac_addr, 0);
10202 mac_stopped = true; 10231 mac_stopped = true;
10203 } 10232 }
10204 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; 10233
10205 if (mask & reset_reg) { 10234 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
10206 BNX2X_DEV_INFO("Disable umac Rx\n"); 10235 reset_reg, vals);
10207 base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 10236 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
10208 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; 10237 reset_reg, vals);
10209 vals->umac_val = REG_RD(bp, vals->umac_addr);
10210 REG_WR(bp, vals->umac_addr, 0);
10211 mac_stopped = true;
10212 }
10213 } 10238 }
10214 10239
10215 if (mac_stopped) 10240 if (mac_stopped)
@@ -10505,8 +10530,11 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
10505 /* Close the MAC Rx to prevent BRB from filling up */ 10530 /* Close the MAC Rx to prevent BRB from filling up */
10506 bnx2x_prev_unload_close_mac(bp, &mac_vals); 10531 bnx2x_prev_unload_close_mac(bp, &mac_vals);
10507 10532
10508 /* close LLH filters towards the BRB */ 10533 /* close LLH filters for both ports towards the BRB */
10509 bnx2x_set_rx_filter(&bp->link_params, 0); 10534 bnx2x_set_rx_filter(&bp->link_params, 0);
10535 bp->link_params.port ^= 1;
10536 bnx2x_set_rx_filter(&bp->link_params, 0);
10537 bp->link_params.port ^= 1;
10510 10538
10511 /* Check if the UNDI driver was previously loaded */ 10539 /* Check if the UNDI driver was previously loaded */
10512 if (bnx2x_prev_is_after_undi(bp)) { 10540 if (bnx2x_prev_is_after_undi(bp)) {
@@ -10553,8 +10581,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
10553 10581
10554 if (mac_vals.xmac_addr) 10582 if (mac_vals.xmac_addr)
10555 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val); 10583 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
10556 if (mac_vals.umac_addr) 10584 if (mac_vals.umac_addr[0])
10557 REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val); 10585 REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
10586 if (mac_vals.umac_addr[1])
10587 REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
10558 if (mac_vals.emac_addr) 10588 if (mac_vals.emac_addr)
10559 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val); 10589 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
10560 if (mac_vals.bmac_addr) { 10590 if (mac_vals.bmac_addr) {
@@ -10571,26 +10601,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
10571 return bnx2x_prev_mcp_done(bp); 10601 return bnx2x_prev_mcp_done(bp);
10572} 10602}
10573 10603
10574/* previous driver DMAE transaction may have occurred when pre-boot stage ended
10575 * and boot began, or when kdump kernel was loaded. Either case would invalidate
10576 * the addresses of the transaction, resulting in was-error bit set in the pci
10577 * causing all hw-to-host pcie transactions to timeout. If this happened we want
10578 * to clear the interrupt which detected this from the pglueb and the was done
10579 * bit
10580 */
10581static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
10582{
10583 if (!CHIP_IS_E1x(bp)) {
10584 u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
10585 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
10586 DP(BNX2X_MSG_SP,
10587 "'was error' bit was found to be set in pglueb upon startup. Clearing\n");
10588 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
10589 1 << BP_FUNC(bp));
10590 }
10591 }
10592}
10593
10594static int bnx2x_prev_unload(struct bnx2x *bp) 10604static int bnx2x_prev_unload(struct bnx2x *bp)
10595{ 10605{
10596 int time_counter = 10; 10606 int time_counter = 10;
@@ -10600,7 +10610,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
10600 /* clear hw from errors which may have resulted from an interrupted 10610 /* clear hw from errors which may have resulted from an interrupted
10601 * dmae transaction. 10611 * dmae transaction.
10602 */ 10612 */
10603 bnx2x_prev_interrupted_dmae(bp); 10613 bnx2x_clean_pglue_errors(bp);
10604 10614
10605 /* Release previously held locks */ 10615 /* Release previously held locks */
10606 hw_lock_reg = (BP_FUNC(bp) <= 5) ? 10616 hw_lock_reg = (BP_FUNC(bp) <= 5) ?
@@ -12044,9 +12054,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
12044 mutex_init(&bp->port.phy_mutex); 12054 mutex_init(&bp->port.phy_mutex);
12045 mutex_init(&bp->fw_mb_mutex); 12055 mutex_init(&bp->fw_mb_mutex);
12046 mutex_init(&bp->drv_info_mutex); 12056 mutex_init(&bp->drv_info_mutex);
12057 mutex_init(&bp->stats_lock);
12047 bp->drv_info_mng_owner = false; 12058 bp->drv_info_mng_owner = false;
12048 spin_lock_init(&bp->stats_lock);
12049 sema_init(&bp->stats_sema, 1);
12050 12059
12051 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 12060 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
12052 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); 12061 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -13673,9 +13682,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13673 cancel_delayed_work_sync(&bp->sp_task); 13682 cancel_delayed_work_sync(&bp->sp_task);
13674 cancel_delayed_work_sync(&bp->period_task); 13683 cancel_delayed_work_sync(&bp->period_task);
13675 13684
13676 spin_lock_bh(&bp->stats_lock); 13685 mutex_lock(&bp->stats_lock);
13677 bp->stats_state = STATS_STATE_DISABLED; 13686 bp->stats_state = STATS_STATE_DISABLED;
13678 spin_unlock_bh(&bp->stats_lock); 13687 mutex_unlock(&bp->stats_lock);
13679 13688
13680 bnx2x_save_statistics(bp); 13689 bnx2x_save_statistics(bp);
13681 13690
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 8638d6c97caa..d95f7b4e19e1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2238,7 +2238,9 @@ int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2238 2238
2239 cookie.vf = vf; 2239 cookie.vf = vf;
2240 cookie.state = VF_ACQUIRED; 2240 cookie.state = VF_ACQUIRED;
2241 bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); 2241 rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
2242 if (rc)
2243 goto op_err;
2242 } 2244 }
2243 2245
2244 DP(BNX2X_MSG_IOV, "set state to acquired\n"); 2246 DP(BNX2X_MSG_IOV, "set state to acquired\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 612cafb5df53..266b055c2360 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -123,36 +123,28 @@ static void bnx2x_dp_stats(struct bnx2x *bp)
123 */ 123 */
124static void bnx2x_storm_stats_post(struct bnx2x *bp) 124static void bnx2x_storm_stats_post(struct bnx2x *bp)
125{ 125{
126 if (!bp->stats_pending) { 126 int rc;
127 int rc;
128 127
129 spin_lock_bh(&bp->stats_lock); 128 if (bp->stats_pending)
130 129 return;
131 if (bp->stats_pending) {
132 spin_unlock_bh(&bp->stats_lock);
133 return;
134 }
135
136 bp->fw_stats_req->hdr.drv_stats_counter =
137 cpu_to_le16(bp->stats_counter++);
138 130
139 DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", 131 bp->fw_stats_req->hdr.drv_stats_counter =
140 le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter)); 132 cpu_to_le16(bp->stats_counter++);
141 133
142 /* adjust the ramrod to include VF queues statistics */ 134 DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
143 bnx2x_iov_adjust_stats_req(bp); 135 le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
144 bnx2x_dp_stats(bp);
145 136
146 /* send FW stats ramrod */ 137 /* adjust the ramrod to include VF queues statistics */
147 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, 138 bnx2x_iov_adjust_stats_req(bp);
148 U64_HI(bp->fw_stats_req_mapping), 139 bnx2x_dp_stats(bp);
149 U64_LO(bp->fw_stats_req_mapping),
150 NONE_CONNECTION_TYPE);
151 if (rc == 0)
152 bp->stats_pending = 1;
153 140
154 spin_unlock_bh(&bp->stats_lock); 141 /* send FW stats ramrod */
155 } 142 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
143 U64_HI(bp->fw_stats_req_mapping),
144 U64_LO(bp->fw_stats_req_mapping),
145 NONE_CONNECTION_TYPE);
146 if (rc == 0)
147 bp->stats_pending = 1;
156} 148}
157 149
158static void bnx2x_hw_stats_post(struct bnx2x *bp) 150static void bnx2x_hw_stats_post(struct bnx2x *bp)
@@ -221,7 +213,7 @@ static void bnx2x_stats_comp(struct bnx2x *bp)
221 */ 213 */
222 214
223/* should be called under stats_sema */ 215/* should be called under stats_sema */
224static void __bnx2x_stats_pmf_update(struct bnx2x *bp) 216static void bnx2x_stats_pmf_update(struct bnx2x *bp)
225{ 217{
226 struct dmae_command *dmae; 218 struct dmae_command *dmae;
227 u32 opcode; 219 u32 opcode;
@@ -519,7 +511,7 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
519} 511}
520 512
521/* should be called under stats_sema */ 513/* should be called under stats_sema */
522static void __bnx2x_stats_start(struct bnx2x *bp) 514static void bnx2x_stats_start(struct bnx2x *bp)
523{ 515{
524 if (IS_PF(bp)) { 516 if (IS_PF(bp)) {
525 if (bp->port.pmf) 517 if (bp->port.pmf)
@@ -531,34 +523,13 @@ static void __bnx2x_stats_start(struct bnx2x *bp)
531 bnx2x_hw_stats_post(bp); 523 bnx2x_hw_stats_post(bp);
532 bnx2x_storm_stats_post(bp); 524 bnx2x_storm_stats_post(bp);
533 } 525 }
534
535 bp->stats_started = true;
536}
537
538static void bnx2x_stats_start(struct bnx2x *bp)
539{
540 if (down_timeout(&bp->stats_sema, HZ/10))
541 BNX2X_ERR("Unable to acquire stats lock\n");
542 __bnx2x_stats_start(bp);
543 up(&bp->stats_sema);
544} 526}
545 527
546static void bnx2x_stats_pmf_start(struct bnx2x *bp) 528static void bnx2x_stats_pmf_start(struct bnx2x *bp)
547{ 529{
548 if (down_timeout(&bp->stats_sema, HZ/10))
549 BNX2X_ERR("Unable to acquire stats lock\n");
550 bnx2x_stats_comp(bp); 530 bnx2x_stats_comp(bp);
551 __bnx2x_stats_pmf_update(bp); 531 bnx2x_stats_pmf_update(bp);
552 __bnx2x_stats_start(bp); 532 bnx2x_stats_start(bp);
553 up(&bp->stats_sema);
554}
555
556static void bnx2x_stats_pmf_update(struct bnx2x *bp)
557{
558 if (down_timeout(&bp->stats_sema, HZ/10))
559 BNX2X_ERR("Unable to acquire stats lock\n");
560 __bnx2x_stats_pmf_update(bp);
561 up(&bp->stats_sema);
562} 533}
563 534
564static void bnx2x_stats_restart(struct bnx2x *bp) 535static void bnx2x_stats_restart(struct bnx2x *bp)
@@ -568,11 +539,9 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
568 */ 539 */
569 if (IS_VF(bp)) 540 if (IS_VF(bp))
570 return; 541 return;
571 if (down_timeout(&bp->stats_sema, HZ/10)) 542
572 BNX2X_ERR("Unable to acquire stats lock\n");
573 bnx2x_stats_comp(bp); 543 bnx2x_stats_comp(bp);
574 __bnx2x_stats_start(bp); 544 bnx2x_stats_start(bp);
575 up(&bp->stats_sema);
576} 545}
577 546
578static void bnx2x_bmac_stats_update(struct bnx2x *bp) 547static void bnx2x_bmac_stats_update(struct bnx2x *bp)
@@ -1246,18 +1215,12 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1246{ 1215{
1247 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 1216 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1248 1217
1249 /* we run update from timer context, so give up 1218 if (bnx2x_edebug_stats_stopped(bp))
1250 * if somebody is in the middle of transition
1251 */
1252 if (down_trylock(&bp->stats_sema))
1253 return; 1219 return;
1254 1220
1255 if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started)
1256 goto out;
1257
1258 if (IS_PF(bp)) { 1221 if (IS_PF(bp)) {
1259 if (*stats_comp != DMAE_COMP_VAL) 1222 if (*stats_comp != DMAE_COMP_VAL)
1260 goto out; 1223 return;
1261 1224
1262 if (bp->port.pmf) 1225 if (bp->port.pmf)
1263 bnx2x_hw_stats_update(bp); 1226 bnx2x_hw_stats_update(bp);
@@ -1267,7 +1230,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1267 BNX2X_ERR("storm stats were not updated for 3 times\n"); 1230 BNX2X_ERR("storm stats were not updated for 3 times\n");
1268 bnx2x_panic(); 1231 bnx2x_panic();
1269 } 1232 }
1270 goto out; 1233 return;
1271 } 1234 }
1272 } else { 1235 } else {
1273 /* vf doesn't collect HW statistics, and doesn't get completions 1236 /* vf doesn't collect HW statistics, and doesn't get completions
@@ -1281,7 +1244,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1281 1244
1282 /* vf is done */ 1245 /* vf is done */
1283 if (IS_VF(bp)) 1246 if (IS_VF(bp))
1284 goto out; 1247 return;
1285 1248
1286 if (netif_msg_timer(bp)) { 1249 if (netif_msg_timer(bp)) {
1287 struct bnx2x_eth_stats *estats = &bp->eth_stats; 1250 struct bnx2x_eth_stats *estats = &bp->eth_stats;
@@ -1292,9 +1255,6 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1292 1255
1293 bnx2x_hw_stats_post(bp); 1256 bnx2x_hw_stats_post(bp);
1294 bnx2x_storm_stats_post(bp); 1257 bnx2x_storm_stats_post(bp);
1295
1296out:
1297 up(&bp->stats_sema);
1298} 1258}
1299 1259
1300static void bnx2x_port_stats_stop(struct bnx2x *bp) 1260static void bnx2x_port_stats_stop(struct bnx2x *bp)
@@ -1358,12 +1318,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
1358 1318
1359static void bnx2x_stats_stop(struct bnx2x *bp) 1319static void bnx2x_stats_stop(struct bnx2x *bp)
1360{ 1320{
1361 int update = 0; 1321 bool update = false;
1362
1363 if (down_timeout(&bp->stats_sema, HZ/10))
1364 BNX2X_ERR("Unable to acquire stats lock\n");
1365
1366 bp->stats_started = false;
1367 1322
1368 bnx2x_stats_comp(bp); 1323 bnx2x_stats_comp(bp);
1369 1324
@@ -1381,8 +1336,6 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
1381 bnx2x_hw_stats_post(bp); 1336 bnx2x_hw_stats_post(bp);
1382 bnx2x_stats_comp(bp); 1337 bnx2x_stats_comp(bp);
1383 } 1338 }
1384
1385 up(&bp->stats_sema);
1386} 1339}
1387 1340
1388static void bnx2x_stats_do_nothing(struct bnx2x *bp) 1341static void bnx2x_stats_do_nothing(struct bnx2x *bp)
@@ -1410,18 +1363,28 @@ static const struct {
1410 1363
1411void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) 1364void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1412{ 1365{
1413 enum bnx2x_stats_state state; 1366 enum bnx2x_stats_state state = bp->stats_state;
1414 void (*action)(struct bnx2x *bp); 1367
1415 if (unlikely(bp->panic)) 1368 if (unlikely(bp->panic))
1416 return; 1369 return;
1417 1370
1418 spin_lock_bh(&bp->stats_lock); 1371 /* Statistics update run from timer context, and we don't want to stop
1419 state = bp->stats_state; 1372 * that context in case someone is in the middle of a transition.
1373 * For other events, wait a bit until lock is taken.
1374 */
1375 if (!mutex_trylock(&bp->stats_lock)) {
1376 if (event == STATS_EVENT_UPDATE)
1377 return;
1378
1379 DP(BNX2X_MSG_STATS,
1380 "Unlikely stats' lock contention [event %d]\n", event);
1381 mutex_lock(&bp->stats_lock);
1382 }
1383
1384 bnx2x_stats_stm[state][event].action(bp);
1420 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1385 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
1421 action = bnx2x_stats_stm[state][event].action;
1422 spin_unlock_bh(&bp->stats_lock);
1423 1386
1424 action(bp); 1387 mutex_unlock(&bp->stats_lock);
1425 1388
1426 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1389 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1427 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", 1390 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
@@ -1998,13 +1961,34 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
1998 } 1961 }
1999} 1962}
2000 1963
2001void bnx2x_stats_safe_exec(struct bnx2x *bp, 1964int bnx2x_stats_safe_exec(struct bnx2x *bp,
2002 void (func_to_exec)(void *cookie), 1965 void (func_to_exec)(void *cookie),
2003 void *cookie){ 1966 void *cookie)
2004 if (down_timeout(&bp->stats_sema, HZ/10)) 1967{
2005 BNX2X_ERR("Unable to acquire stats lock\n"); 1968 int cnt = 10, rc = 0;
1969
1970 /* Wait for statistics to end [while blocking further requests],
1971 * then run supplied function 'safely'.
1972 */
1973 mutex_lock(&bp->stats_lock);
1974
2006 bnx2x_stats_comp(bp); 1975 bnx2x_stats_comp(bp);
1976 while (bp->stats_pending && cnt--)
1977 if (bnx2x_storm_stats_update(bp))
1978 usleep_range(1000, 2000);
1979 if (bp->stats_pending) {
1980 BNX2X_ERR("Failed to wait for stats pending to clear [possibly FW is stuck]\n");
1981 rc = -EBUSY;
1982 goto out;
1983 }
1984
2007 func_to_exec(cookie); 1985 func_to_exec(cookie);
2008 __bnx2x_stats_start(bp); 1986
2009 up(&bp->stats_sema); 1987out:
1988 /* No need to restart statistics - if they're enabled, the timer
1989 * will restart the statistics.
1990 */
1991 mutex_unlock(&bp->stats_lock);
1992
1993 return rc;
2010} 1994}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 2beceaefdeea..965539a9dabe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -539,9 +539,9 @@ struct bnx2x;
539void bnx2x_memset_stats(struct bnx2x *bp); 539void bnx2x_memset_stats(struct bnx2x *bp);
540void bnx2x_stats_init(struct bnx2x *bp); 540void bnx2x_stats_init(struct bnx2x *bp);
541void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); 541void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
542void bnx2x_stats_safe_exec(struct bnx2x *bp, 542int bnx2x_stats_safe_exec(struct bnx2x *bp,
543 void (func_to_exec)(void *cookie), 543 void (func_to_exec)(void *cookie),
544 void *cookie); 544 void *cookie);
545 545
546/** 546/**
547 * bnx2x_save_statistics - save statistics when unloading. 547 * bnx2x_save_statistics - save statistics when unloading.
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index f7855a61e7ad..6043734ea613 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1734,6 +1734,9 @@ static int init_umac(struct bcmgenet_priv *priv)
1734 } else if (priv->ext_phy) { 1734 } else if (priv->ext_phy) {
1735 int0_enable |= UMAC_IRQ_LINK_EVENT; 1735 int0_enable |= UMAC_IRQ_LINK_EVENT;
1736 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { 1736 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1737 if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
1738 int0_enable |= UMAC_IRQ_LINK_EVENT;
1739
1737 reg = bcmgenet_bp_mc_get(priv); 1740 reg = bcmgenet_bp_mc_get(priv);
1738 reg |= BIT(priv->hw_params->bp_in_en_shift); 1741 reg |= BIT(priv->hw_params->bp_in_en_shift);
1739 1742
@@ -2926,7 +2929,8 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
2926 .rdma_offset = 0x10000, 2929 .rdma_offset = 0x10000,
2927 .tdma_offset = 0x11000, 2930 .tdma_offset = 0x11000,
2928 .words_per_bd = 2, 2931 .words_per_bd = 2,
2929 .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR, 2932 .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
2933 GENET_HAS_MOCA_LINK_DET,
2930 }, 2934 },
2931 [GENET_V4] = { 2935 [GENET_V4] = {
2932 .tx_queues = 4, 2936 .tx_queues = 4,
@@ -2944,7 +2948,8 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
2944 .rdma_offset = 0x2000, 2948 .rdma_offset = 0x2000,
2945 .tdma_offset = 0x4000, 2949 .tdma_offset = 0x4000,
2946 .words_per_bd = 3, 2950 .words_per_bd = 3,
2947 .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR, 2951 .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
2952 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
2948 }, 2953 },
2949}; 2954};
2950 2955
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index ddaa40cb0f21..6f2887a5e0be 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -508,6 +508,7 @@ enum bcmgenet_version {
508#define GENET_HAS_40BITS (1 << 0) 508#define GENET_HAS_40BITS (1 << 0)
509#define GENET_HAS_EXT (1 << 1) 509#define GENET_HAS_EXT (1 << 1)
510#define GENET_HAS_MDIO_INTR (1 << 2) 510#define GENET_HAS_MDIO_INTR (1 << 2)
511#define GENET_HAS_MOCA_LINK_DET (1 << 3)
511 512
512/* BCMGENET hardware parameters, keep this structure nicely aligned 513/* BCMGENET hardware parameters, keep this structure nicely aligned
513 * since it is going to be used in hot paths 514 * since it is going to be used in hot paths
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 6d3b66a103cc..e7651b3c6c57 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -462,6 +462,15 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
462 return 0; 462 return 0;
463} 463}
464 464
465static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
466 struct fixed_phy_status *status)
467{
468 if (dev && dev->phydev && status)
469 status->link = dev->phydev->link;
470
471 return 0;
472}
473
465static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) 474static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
466{ 475{
467 struct device *kdev = &priv->pdev->dev; 476 struct device *kdev = &priv->pdev->dev;
@@ -513,6 +522,13 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
513 dev_err(kdev, "failed to register fixed PHY device\n"); 522 dev_err(kdev, "failed to register fixed PHY device\n");
514 return -ENODEV; 523 return -ENODEV;
515 } 524 }
525
526 if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) {
527 ret = fixed_phy_set_link_update(
528 phydev, bcmgenet_fixed_phy_link_update);
529 if (!ret)
530 phydev->link = 0;
531 }
516 } 532 }
517 533
518 priv->phydev = phydev; 534 priv->phydev = phydev;
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 186566bfdbc8..f5f1b0b51ebd 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -354,7 +354,7 @@ static void set_msglevel(struct net_device *dev, u32 val)
354 adapter->msg_enable = val; 354 adapter->msg_enable = val;
355} 355}
356 356
357static char stats_strings[][ETH_GSTRING_LEN] = { 357static const char stats_strings[][ETH_GSTRING_LEN] = {
358 "TxOctetsOK", 358 "TxOctetsOK",
359 "TxOctetsBad", 359 "TxOctetsBad",
360 "TxUnicastFramesOK", 360 "TxUnicastFramesOK",
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index db76f7040455..b96e4bfcac41 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1537,7 +1537,7 @@ static void set_msglevel(struct net_device *dev, u32 val)
1537 adapter->msg_enable = val; 1537 adapter->msg_enable = val;
1538} 1538}
1539 1539
1540static char stats_strings[][ETH_GSTRING_LEN] = { 1540static const char stats_strings[][ETH_GSTRING_LEN] = {
1541 "TxOctetsOK ", 1541 "TxOctetsOK ",
1542 "TxFramesOK ", 1542 "TxFramesOK ",
1543 "TxMulticastFramesOK", 1543 "TxMulticastFramesOK",
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index 07d9b68a4da2..ace0ab98d0f1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-$(CONFIG_CHELSIO_T4) += cxgb4.o 5obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
6 6
7cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o 7cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o
8cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o 8cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
9cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o 9cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
10cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o 10cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index bf46ca935e2a..524d11098c56 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -60,6 +60,11 @@ enum {
60}; 60};
61 61
62enum { 62enum {
63 T4_REGMAP_SIZE = (160 * 1024),
64 T5_REGMAP_SIZE = (332 * 1024),
65};
66
67enum {
63 MEM_EDC0, 68 MEM_EDC0,
64 MEM_EDC1, 69 MEM_EDC1,
65 MEM_MC, 70 MEM_MC,
@@ -374,10 +379,19 @@ enum {
374}; 379};
375 380
376enum { 381enum {
382 MAX_TXQ_ENTRIES = 16384,
383 MAX_CTRL_TXQ_ENTRIES = 1024,
384 MAX_RSPQ_ENTRIES = 16384,
385 MAX_RX_BUFFERS = 16384,
386 MIN_TXQ_ENTRIES = 32,
387 MIN_CTRL_TXQ_ENTRIES = 32,
388 MIN_RSPQ_ENTRIES = 128,
389 MIN_FL_ENTRIES = 16
390};
391
392enum {
377 INGQ_EXTRAS = 2, /* firmware event queue and */ 393 INGQ_EXTRAS = 2, /* firmware event queue and */
378 /* forwarded interrupts */ 394 /* forwarded interrupts */
379 MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2
380 + MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES,
381 MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES 395 MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
382 + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS, 396 + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS,
383}; 397};
@@ -623,11 +637,13 @@ struct sge {
623 unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */ 637 unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */
624 638
625 unsigned int egr_start; 639 unsigned int egr_start;
640 unsigned int egr_sz;
626 unsigned int ingr_start; 641 unsigned int ingr_start;
627 void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */ 642 unsigned int ingr_sz;
628 struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */ 643 void **egr_map; /* qid->queue egress queue map */
629 DECLARE_BITMAP(starving_fl, MAX_EGRQ); 644 struct sge_rspq **ingr_map; /* qid->queue ingress queue map */
630 DECLARE_BITMAP(txq_maperr, MAX_EGRQ); 645 unsigned long *starving_fl;
646 unsigned long *txq_maperr;
631 struct timer_list rx_timer; /* refills starving FLs */ 647 struct timer_list rx_timer; /* refills starving FLs */
632 struct timer_list tx_timer; /* checks Tx queues */ 648 struct timer_list tx_timer; /* checks Tx queues */
633}; 649};
@@ -1000,6 +1016,30 @@ static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
1000} 1016}
1001#endif /* CONFIG_NET_RX_BUSY_POLL */ 1017#endif /* CONFIG_NET_RX_BUSY_POLL */
1002 1018
1019/* Return a version number to identify the type of adapter. The scheme is:
1020 * - bits 0..9: chip version
1021 * - bits 10..15: chip revision
1022 * - bits 16..23: register dump version
1023 */
1024static inline unsigned int mk_adap_vers(struct adapter *ap)
1025{
1026 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1027 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1028}
1029
1030/* Return a queue's interrupt hold-off time in us. 0 means no timer. */
1031static inline unsigned int qtimer_val(const struct adapter *adap,
1032 const struct sge_rspq *q)
1033{
1034 unsigned int idx = q->intr_params >> 1;
1035
1036 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1037}
1038
1039/* driver version & name used for ethtool_drvinfo */
1040extern char cxgb4_driver_name[];
1041extern const char cxgb4_driver_version[];
1042
1003void t4_os_portmod_changed(const struct adapter *adap, int port_id); 1043void t4_os_portmod_changed(const struct adapter *adap, int port_id);
1004void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); 1044void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
1005 1045
@@ -1029,6 +1069,10 @@ int t4_sge_init(struct adapter *adap);
1029void t4_sge_start(struct adapter *adap); 1069void t4_sge_start(struct adapter *adap);
1030void t4_sge_stop(struct adapter *adap); 1070void t4_sge_stop(struct adapter *adap);
1031int cxgb_busy_poll(struct napi_struct *napi); 1071int cxgb_busy_poll(struct napi_struct *napi);
1072int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
1073 unsigned int cnt);
1074void cxgb4_set_ethtool_ops(struct net_device *netdev);
1075int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
1032extern int dbfifo_int_thresh; 1076extern int dbfifo_int_thresh;
1033 1077
1034#define for_each_port(adapter, iter) \ 1078#define for_each_port(adapter, iter) \
@@ -1117,6 +1161,9 @@ static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
1117 return t4_memory_rw(adap, 0, mtype, addr, len, buf, 0); 1161 return t4_memory_rw(adap, 0, mtype, addr, len, buf, 0);
1118} 1162}
1119 1163
1164unsigned int t4_get_regs_len(struct adapter *adapter);
1165void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size);
1166
1120int t4_seeprom_wp(struct adapter *adapter, bool enable); 1167int t4_seeprom_wp(struct adapter *adapter, bool enable);
1121int get_vpd_params(struct adapter *adapter, struct vpd_params *p); 1168int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
1122int t4_read_flash(struct adapter *adapter, unsigned int addr, 1169int t4_read_flash(struct adapter *adapter, unsigned int addr,
@@ -1143,6 +1190,8 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
1143 1190
1144unsigned int qtimer_val(const struct adapter *adap, 1191unsigned int qtimer_val(const struct adapter *adap,
1145 const struct sge_rspq *q); 1192 const struct sge_rspq *q);
1193
1194int t4_init_devlog_params(struct adapter *adapter);
1146int t4_init_sge_params(struct adapter *adapter); 1195int t4_init_sge_params(struct adapter *adapter);
1147int t4_init_tp_params(struct adapter *adap); 1196int t4_init_tp_params(struct adapter *adap);
1148int t4_filter_field_shift(const struct adapter *adap, int filter_sel); 1197int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 0918c16bb154..f0285bcbe598 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -670,9 +670,13 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v)
670 "0.9375" }; 670 "0.9375" };
671 671
672 int i; 672 int i;
673 u16 incr[NMTUS][NCCTRL_WIN]; 673 u16 (*incr)[NCCTRL_WIN];
674 struct adapter *adap = seq->private; 674 struct adapter *adap = seq->private;
675 675
676 incr = kmalloc(sizeof(*incr) * NMTUS, GFP_KERNEL);
677 if (!incr)
678 return -ENOMEM;
679
676 t4_read_cong_tbl(adap, incr); 680 t4_read_cong_tbl(adap, incr);
677 681
678 for (i = 0; i < NCCTRL_WIN; ++i) { 682 for (i = 0; i < NCCTRL_WIN; ++i) {
@@ -685,6 +689,8 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v)
685 adap->params.a_wnd[i], 689 adap->params.a_wnd[i],
686 dec_fac[adap->params.b_wnd[i]]); 690 dec_fac[adap->params.b_wnd[i]]);
687 } 691 }
692
693 kfree(incr);
688 return 0; 694 return 0;
689} 695}
690 696
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
new file mode 100644
index 000000000000..10d82b51d7ef
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -0,0 +1,915 @@
1/*
2 * Copyright (C) 2013-2015 Chelsio Communications. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
15 *
16 */
17
18#include <linux/firmware.h>
19#include <linux/mdio.h>
20
21#include "cxgb4.h"
22#include "t4_regs.h"
23#include "t4fw_api.h"
24
25#define EEPROM_MAGIC 0x38E2F10C
26
27static u32 get_msglevel(struct net_device *dev)
28{
29 return netdev2adap(dev)->msg_enable;
30}
31
32static void set_msglevel(struct net_device *dev, u32 val)
33{
34 netdev2adap(dev)->msg_enable = val;
35}
36
37static const char stats_strings[][ETH_GSTRING_LEN] = {
38 "TxOctetsOK ",
39 "TxFramesOK ",
40 "TxBroadcastFrames ",
41 "TxMulticastFrames ",
42 "TxUnicastFrames ",
43 "TxErrorFrames ",
44
45 "TxFrames64 ",
46 "TxFrames65To127 ",
47 "TxFrames128To255 ",
48 "TxFrames256To511 ",
49 "TxFrames512To1023 ",
50 "TxFrames1024To1518 ",
51 "TxFrames1519ToMax ",
52
53 "TxFramesDropped ",
54 "TxPauseFrames ",
55 "TxPPP0Frames ",
56 "TxPPP1Frames ",
57 "TxPPP2Frames ",
58 "TxPPP3Frames ",
59 "TxPPP4Frames ",
60 "TxPPP5Frames ",
61 "TxPPP6Frames ",
62 "TxPPP7Frames ",
63
64 "RxOctetsOK ",
65 "RxFramesOK ",
66 "RxBroadcastFrames ",
67 "RxMulticastFrames ",
68 "RxUnicastFrames ",
69
70 "RxFramesTooLong ",
71 "RxJabberErrors ",
72 "RxFCSErrors ",
73 "RxLengthErrors ",
74 "RxSymbolErrors ",
75 "RxRuntFrames ",
76
77 "RxFrames64 ",
78 "RxFrames65To127 ",
79 "RxFrames128To255 ",
80 "RxFrames256To511 ",
81 "RxFrames512To1023 ",
82 "RxFrames1024To1518 ",
83 "RxFrames1519ToMax ",
84
85 "RxPauseFrames ",
86 "RxPPP0Frames ",
87 "RxPPP1Frames ",
88 "RxPPP2Frames ",
89 "RxPPP3Frames ",
90 "RxPPP4Frames ",
91 "RxPPP5Frames ",
92 "RxPPP6Frames ",
93 "RxPPP7Frames ",
94
95 "RxBG0FramesDropped ",
96 "RxBG1FramesDropped ",
97 "RxBG2FramesDropped ",
98 "RxBG3FramesDropped ",
99 "RxBG0FramesTrunc ",
100 "RxBG1FramesTrunc ",
101 "RxBG2FramesTrunc ",
102 "RxBG3FramesTrunc ",
103
104 "TSO ",
105 "TxCsumOffload ",
106 "RxCsumGood ",
107 "VLANextractions ",
108 "VLANinsertions ",
109 "GROpackets ",
110 "GROmerged ",
111 "WriteCoalSuccess ",
112 "WriteCoalFail ",
113};
114
115static int get_sset_count(struct net_device *dev, int sset)
116{
117 switch (sset) {
118 case ETH_SS_STATS:
119 return ARRAY_SIZE(stats_strings);
120 default:
121 return -EOPNOTSUPP;
122 }
123}
124
125static int get_regs_len(struct net_device *dev)
126{
127 struct adapter *adap = netdev2adap(dev);
128
129 return t4_get_regs_len(adap);
130}
131
132static int get_eeprom_len(struct net_device *dev)
133{
134 return EEPROMSIZE;
135}
136
137static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
138{
139 struct adapter *adapter = netdev2adap(dev);
140 u32 exprom_vers;
141
142 strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
143 strlcpy(info->version, cxgb4_driver_version,
144 sizeof(info->version));
145 strlcpy(info->bus_info, pci_name(adapter->pdev),
146 sizeof(info->bus_info));
147
148 if (adapter->params.fw_vers)
149 snprintf(info->fw_version, sizeof(info->fw_version),
150 "%u.%u.%u.%u, TP %u.%u.%u.%u",
151 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
152 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
153 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
154 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers),
155 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
156 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
157 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
158 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
159
160 if (!t4_get_exprom_version(adapter, &exprom_vers))
161 snprintf(info->erom_version, sizeof(info->erom_version),
162 "%u.%u.%u.%u",
163 FW_HDR_FW_VER_MAJOR_G(exprom_vers),
164 FW_HDR_FW_VER_MINOR_G(exprom_vers),
165 FW_HDR_FW_VER_MICRO_G(exprom_vers),
166 FW_HDR_FW_VER_BUILD_G(exprom_vers));
167}
168
169static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
170{
171 if (stringset == ETH_SS_STATS)
172 memcpy(data, stats_strings, sizeof(stats_strings));
173}
174
175/* port stats maintained per queue of the port. They should be in the same
176 * order as in stats_strings above.
177 */
178struct queue_port_stats {
179 u64 tso;
180 u64 tx_csum;
181 u64 rx_csum;
182 u64 vlan_ex;
183 u64 vlan_ins;
184 u64 gro_pkts;
185 u64 gro_merged;
186};
187
188static void collect_sge_port_stats(const struct adapter *adap,
189 const struct port_info *p,
190 struct queue_port_stats *s)
191{
192 int i;
193 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
194 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
195
196 memset(s, 0, sizeof(*s));
197 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
198 s->tso += tx->tso;
199 s->tx_csum += tx->tx_cso;
200 s->rx_csum += rx->stats.rx_cso;
201 s->vlan_ex += rx->stats.vlan_ex;
202 s->vlan_ins += tx->vlan_ins;
203 s->gro_pkts += rx->stats.lro_pkts;
204 s->gro_merged += rx->stats.lro_merged;
205 }
206}
207
208static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
209 u64 *data)
210{
211 struct port_info *pi = netdev_priv(dev);
212 struct adapter *adapter = pi->adapter;
213 u32 val1, val2;
214
215 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
216
217 data += sizeof(struct port_stats) / sizeof(u64);
218 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
219 data += sizeof(struct queue_port_stats) / sizeof(u64);
220 if (!is_t4(adapter->params.chip)) {
221 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7));
222 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A);
223 val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A);
224 *data = val1 - val2;
225 data++;
226 *data = val2;
227 data++;
228 } else {
229 memset(data, 0, 2 * sizeof(u64));
230 *data += 2;
231 }
232}
233
234static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
235 void *buf)
236{
237 struct adapter *adap = netdev2adap(dev);
238 size_t buf_size;
239
240 buf_size = t4_get_regs_len(adap);
241 regs->version = mk_adap_vers(adap);
242 t4_get_regs(adap, buf, buf_size);
243}
244
245static int restart_autoneg(struct net_device *dev)
246{
247 struct port_info *p = netdev_priv(dev);
248
249 if (!netif_running(dev))
250 return -EAGAIN;
251 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
252 return -EINVAL;
253 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
254 return 0;
255}
256
257static int identify_port(struct net_device *dev,
258 enum ethtool_phys_id_state state)
259{
260 unsigned int val;
261 struct adapter *adap = netdev2adap(dev);
262
263 if (state == ETHTOOL_ID_ACTIVE)
264 val = 0xffff;
265 else if (state == ETHTOOL_ID_INACTIVE)
266 val = 0;
267 else
268 return -EINVAL;
269
270 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
271}
272
273static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps)
274{
275 unsigned int v = 0;
276
277 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
278 type == FW_PORT_TYPE_BT_XAUI) {
279 v |= SUPPORTED_TP;
280 if (caps & FW_PORT_CAP_SPEED_100M)
281 v |= SUPPORTED_100baseT_Full;
282 if (caps & FW_PORT_CAP_SPEED_1G)
283 v |= SUPPORTED_1000baseT_Full;
284 if (caps & FW_PORT_CAP_SPEED_10G)
285 v |= SUPPORTED_10000baseT_Full;
286 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
287 v |= SUPPORTED_Backplane;
288 if (caps & FW_PORT_CAP_SPEED_1G)
289 v |= SUPPORTED_1000baseKX_Full;
290 if (caps & FW_PORT_CAP_SPEED_10G)
291 v |= SUPPORTED_10000baseKX4_Full;
292 } else if (type == FW_PORT_TYPE_KR) {
293 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
294 } else if (type == FW_PORT_TYPE_BP_AP) {
295 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
296 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
297 } else if (type == FW_PORT_TYPE_BP4_AP) {
298 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
299 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
300 SUPPORTED_10000baseKX4_Full;
301 } else if (type == FW_PORT_TYPE_FIBER_XFI ||
302 type == FW_PORT_TYPE_FIBER_XAUI ||
303 type == FW_PORT_TYPE_SFP ||
304 type == FW_PORT_TYPE_QSFP_10G ||
305 type == FW_PORT_TYPE_QSA) {
306 v |= SUPPORTED_FIBRE;
307 if (caps & FW_PORT_CAP_SPEED_1G)
308 v |= SUPPORTED_1000baseT_Full;
309 if (caps & FW_PORT_CAP_SPEED_10G)
310 v |= SUPPORTED_10000baseT_Full;
311 } else if (type == FW_PORT_TYPE_BP40_BA ||
312 type == FW_PORT_TYPE_QSFP) {
313 v |= SUPPORTED_40000baseSR4_Full;
314 v |= SUPPORTED_FIBRE;
315 }
316
317 if (caps & FW_PORT_CAP_ANEG)
318 v |= SUPPORTED_Autoneg;
319 return v;
320}
321
322static unsigned int to_fw_linkcaps(unsigned int caps)
323{
324 unsigned int v = 0;
325
326 if (caps & ADVERTISED_100baseT_Full)
327 v |= FW_PORT_CAP_SPEED_100M;
328 if (caps & ADVERTISED_1000baseT_Full)
329 v |= FW_PORT_CAP_SPEED_1G;
330 if (caps & ADVERTISED_10000baseT_Full)
331 v |= FW_PORT_CAP_SPEED_10G;
332 if (caps & ADVERTISED_40000baseSR4_Full)
333 v |= FW_PORT_CAP_SPEED_40G;
334 return v;
335}
336
337static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
338{
339 const struct port_info *p = netdev_priv(dev);
340
341 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
342 p->port_type == FW_PORT_TYPE_BT_XFI ||
343 p->port_type == FW_PORT_TYPE_BT_XAUI) {
344 cmd->port = PORT_TP;
345 } else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
346 p->port_type == FW_PORT_TYPE_FIBER_XAUI) {
347 cmd->port = PORT_FIBRE;
348 } else if (p->port_type == FW_PORT_TYPE_SFP ||
349 p->port_type == FW_PORT_TYPE_QSFP_10G ||
350 p->port_type == FW_PORT_TYPE_QSA ||
351 p->port_type == FW_PORT_TYPE_QSFP) {
352 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
353 p->mod_type == FW_PORT_MOD_TYPE_SR ||
354 p->mod_type == FW_PORT_MOD_TYPE_ER ||
355 p->mod_type == FW_PORT_MOD_TYPE_LRM)
356 cmd->port = PORT_FIBRE;
357 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
358 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
359 cmd->port = PORT_DA;
360 else
361 cmd->port = PORT_OTHER;
362 } else {
363 cmd->port = PORT_OTHER;
364 }
365
366 if (p->mdio_addr >= 0) {
367 cmd->phy_address = p->mdio_addr;
368 cmd->transceiver = XCVR_EXTERNAL;
369 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
370 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
371 } else {
372 cmd->phy_address = 0; /* not really, but no better option */
373 cmd->transceiver = XCVR_INTERNAL;
374 cmd->mdio_support = 0;
375 }
376
377 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
378 cmd->advertising = from_fw_linkcaps(p->port_type,
379 p->link_cfg.advertising);
380 ethtool_cmd_speed_set(cmd,
381 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
382 cmd->duplex = DUPLEX_FULL;
383 cmd->autoneg = p->link_cfg.autoneg;
384 cmd->maxtxpkt = 0;
385 cmd->maxrxpkt = 0;
386 return 0;
387}
388
389static unsigned int speed_to_caps(int speed)
390{
391 if (speed == 100)
392 return FW_PORT_CAP_SPEED_100M;
393 if (speed == 1000)
394 return FW_PORT_CAP_SPEED_1G;
395 if (speed == 10000)
396 return FW_PORT_CAP_SPEED_10G;
397 if (speed == 40000)
398 return FW_PORT_CAP_SPEED_40G;
399 return 0;
400}
401
402static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
403{
404 unsigned int cap;
405 struct port_info *p = netdev_priv(dev);
406 struct link_config *lc = &p->link_cfg;
407 u32 speed = ethtool_cmd_speed(cmd);
408
409 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
410 return -EINVAL;
411
412 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
413 /* PHY offers a single speed. See if that's what's
414 * being requested.
415 */
416 if (cmd->autoneg == AUTONEG_DISABLE &&
417 (lc->supported & speed_to_caps(speed)))
418 return 0;
419 return -EINVAL;
420 }
421
422 if (cmd->autoneg == AUTONEG_DISABLE) {
423 cap = speed_to_caps(speed);
424
425 if (!(lc->supported & cap) ||
426 (speed == 1000) ||
427 (speed == 10000) ||
428 (speed == 40000))
429 return -EINVAL;
430 lc->requested_speed = cap;
431 lc->advertising = 0;
432 } else {
433 cap = to_fw_linkcaps(cmd->advertising);
434 if (!(lc->supported & cap))
435 return -EINVAL;
436 lc->requested_speed = 0;
437 lc->advertising = cap | FW_PORT_CAP_ANEG;
438 }
439 lc->autoneg = cmd->autoneg;
440
441 if (netif_running(dev))
442 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
443 lc);
444 return 0;
445}
446
447static void get_pauseparam(struct net_device *dev,
448 struct ethtool_pauseparam *epause)
449{
450 struct port_info *p = netdev_priv(dev);
451
452 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
453 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
454 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
455}
456
457static int set_pauseparam(struct net_device *dev,
458 struct ethtool_pauseparam *epause)
459{
460 struct port_info *p = netdev_priv(dev);
461 struct link_config *lc = &p->link_cfg;
462
463 if (epause->autoneg == AUTONEG_DISABLE)
464 lc->requested_fc = 0;
465 else if (lc->supported & FW_PORT_CAP_ANEG)
466 lc->requested_fc = PAUSE_AUTONEG;
467 else
468 return -EINVAL;
469
470 if (epause->rx_pause)
471 lc->requested_fc |= PAUSE_RX;
472 if (epause->tx_pause)
473 lc->requested_fc |= PAUSE_TX;
474 if (netif_running(dev))
475 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
476 lc);
477 return 0;
478}
479
480static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
481{
482 const struct port_info *pi = netdev_priv(dev);
483 const struct sge *s = &pi->adapter->sge;
484
485 e->rx_max_pending = MAX_RX_BUFFERS;
486 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
487 e->rx_jumbo_max_pending = 0;
488 e->tx_max_pending = MAX_TXQ_ENTRIES;
489
490 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
491 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
492 e->rx_jumbo_pending = 0;
493 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
494}
495
496static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
497{
498 int i;
499 const struct port_info *pi = netdev_priv(dev);
500 struct adapter *adapter = pi->adapter;
501 struct sge *s = &adapter->sge;
502
503 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
504 e->tx_pending > MAX_TXQ_ENTRIES ||
505 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
506 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
507 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
508 return -EINVAL;
509
510 if (adapter->flags & FULL_INIT_DONE)
511 return -EBUSY;
512
513 for (i = 0; i < pi->nqsets; ++i) {
514 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
515 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
516 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
517 }
518 return 0;
519}
520
521/**
522 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
523 * @dev: the network device
524 * @us: the hold-off time in us, or 0 to disable timer
525 * @cnt: the hold-off packet count, or 0 to disable counter
526 *
527 * Set the RX interrupt hold-off parameters for a network device.
528 */
529static int set_rx_intr_params(struct net_device *dev,
530 unsigned int us, unsigned int cnt)
531{
532 int i, err;
533 struct port_info *pi = netdev_priv(dev);
534 struct adapter *adap = pi->adapter;
535 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
536
537 for (i = 0; i < pi->nqsets; i++, q++) {
538 err = cxgb4_set_rspq_intr_params(&q->rspq, us, cnt);
539 if (err)
540 return err;
541 }
542 return 0;
543}
544
545static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
546{
547 int i;
548 struct port_info *pi = netdev_priv(dev);
549 struct adapter *adap = pi->adapter;
550 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
551
552 for (i = 0; i < pi->nqsets; i++, q++)
553 q->rspq.adaptive_rx = adaptive_rx;
554
555 return 0;
556}
557
558static int get_adaptive_rx_setting(struct net_device *dev)
559{
560 struct port_info *pi = netdev_priv(dev);
561 struct adapter *adap = pi->adapter;
562 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
563
564 return q->rspq.adaptive_rx;
565}
566
567static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
568{
569 set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
570 return set_rx_intr_params(dev, c->rx_coalesce_usecs,
571 c->rx_max_coalesced_frames);
572}
573
574static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
575{
576 const struct port_info *pi = netdev_priv(dev);
577 const struct adapter *adap = pi->adapter;
578 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
579
580 c->rx_coalesce_usecs = qtimer_val(adap, rq);
581 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
582 adap->sge.counter_val[rq->pktcnt_idx] : 0;
583 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
584 return 0;
585}
586
587/**
588 * eeprom_ptov - translate a physical EEPROM address to virtual
589 * @phys_addr: the physical EEPROM address
590 * @fn: the PCI function number
591 * @sz: size of function-specific area
592 *
593 * Translate a physical EEPROM address to virtual. The first 1K is
594 * accessed through virtual addresses starting at 31K, the rest is
595 * accessed through virtual addresses starting at 0.
596 *
597 * The mapping is as follows:
598 * [0..1K) -> [31K..32K)
599 * [1K..1K+A) -> [31K-A..31K)
600 * [1K+A..ES) -> [0..ES-A-1K)
601 *
602 * where A = @fn * @sz, and ES = EEPROM size.
603 */
604static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
605{
606 fn *= sz;
607 if (phys_addr < 1024)
608 return phys_addr + (31 << 10);
609 if (phys_addr < 1024 + fn)
610 return 31744 - fn + phys_addr - 1024;
611 if (phys_addr < EEPROMSIZE)
612 return phys_addr - 1024 - fn;
613 return -EINVAL;
614}
615
616/* The next two routines implement eeprom read/write from physical addresses.
617 */
618static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
619{
620 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
621
622 if (vaddr >= 0)
623 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
624 return vaddr < 0 ? vaddr : 0;
625}
626
627static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
628{
629 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
630
631 if (vaddr >= 0)
632 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
633 return vaddr < 0 ? vaddr : 0;
634}
635
636#define EEPROM_MAGIC 0x38E2F10C
637
638static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
639 u8 *data)
640{
641 int i, err = 0;
642 struct adapter *adapter = netdev2adap(dev);
643 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
644
645 if (!buf)
646 return -ENOMEM;
647
648 e->magic = EEPROM_MAGIC;
649 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
650 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
651
652 if (!err)
653 memcpy(data, buf + e->offset, e->len);
654 kfree(buf);
655 return err;
656}
657
658static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
659 u8 *data)
660{
661 u8 *buf;
662 int err = 0;
663 u32 aligned_offset, aligned_len, *p;
664 struct adapter *adapter = netdev2adap(dev);
665
666 if (eeprom->magic != EEPROM_MAGIC)
667 return -EINVAL;
668
669 aligned_offset = eeprom->offset & ~3;
670 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
671
672 if (adapter->fn > 0) {
673 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
674
675 if (aligned_offset < start ||
676 aligned_offset + aligned_len > start + EEPROMPFSIZE)
677 return -EPERM;
678 }
679
680 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
681 /* RMW possibly needed for first or last words.
682 */
683 buf = kmalloc(aligned_len, GFP_KERNEL);
684 if (!buf)
685 return -ENOMEM;
686 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
687 if (!err && aligned_len > 4)
688 err = eeprom_rd_phys(adapter,
689 aligned_offset + aligned_len - 4,
690 (u32 *)&buf[aligned_len - 4]);
691 if (err)
692 goto out;
693 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
694 } else {
695 buf = data;
696 }
697
698 err = t4_seeprom_wp(adapter, false);
699 if (err)
700 goto out;
701
702 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
703 err = eeprom_wr_phys(adapter, aligned_offset, *p);
704 aligned_offset += 4;
705 }
706
707 if (!err)
708 err = t4_seeprom_wp(adapter, true);
709out:
710 if (buf != data)
711 kfree(buf);
712 return err;
713}
714
715static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
716{
717 int ret;
718 const struct firmware *fw;
719 struct adapter *adap = netdev2adap(netdev);
720 unsigned int mbox = PCIE_FW_MASTER_M + 1;
721
722 ef->data[sizeof(ef->data) - 1] = '\0';
723 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
724 if (ret < 0)
725 return ret;
726
727 /* If the adapter has been fully initialized then we'll go ahead and
728 * try to get the firmware's cooperation in upgrading to the new
729 * firmware image otherwise we'll try to do the entire job from the
730 * host ... and we always "force" the operation in this path.
731 */
732 if (adap->flags & FULL_INIT_DONE)
733 mbox = adap->mbox;
734
735 ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
736 release_firmware(fw);
737 if (!ret)
738 dev_info(adap->pdev_dev,
739 "loaded firmware %s, reload cxgb4 driver\n", ef->data);
740 return ret;
741}
742
743#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
744#define BCAST_CRC 0xa0ccc1a6
745
746static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
747{
748 wol->supported = WAKE_BCAST | WAKE_MAGIC;
749 wol->wolopts = netdev2adap(dev)->wol;
750 memset(&wol->sopass, 0, sizeof(wol->sopass));
751}
752
753static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
754{
755 int err = 0;
756 struct port_info *pi = netdev_priv(dev);
757
758 if (wol->wolopts & ~WOL_SUPPORTED)
759 return -EINVAL;
760 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
761 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
762 if (wol->wolopts & WAKE_BCAST) {
763 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
764 ~0ULL, 0, false);
765 if (!err)
766 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
767 ~6ULL, ~0ULL, BCAST_CRC, true);
768 } else {
769 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
770 }
771 return err;
772}
773
774static u32 get_rss_table_size(struct net_device *dev)
775{
776 const struct port_info *pi = netdev_priv(dev);
777
778 return pi->rss_size;
779}
780
781static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
782{
783 const struct port_info *pi = netdev_priv(dev);
784 unsigned int n = pi->rss_size;
785
786 if (hfunc)
787 *hfunc = ETH_RSS_HASH_TOP;
788 if (!p)
789 return 0;
790 while (n--)
791 p[n] = pi->rss[n];
792 return 0;
793}
794
795static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
796 const u8 hfunc)
797{
798 unsigned int i;
799 struct port_info *pi = netdev_priv(dev);
800
801 /* We require at least one supported parameter to be changed and no
802 * change in any of the unsupported parameters
803 */
804 if (key ||
805 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
806 return -EOPNOTSUPP;
807 if (!p)
808 return 0;
809
810 for (i = 0; i < pi->rss_size; i++)
811 pi->rss[i] = p[i];
812 if (pi->adapter->flags & FULL_INIT_DONE)
813 return cxgb4_write_rss(pi, pi->rss);
814 return 0;
815}
816
817static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
818 u32 *rules)
819{
820 const struct port_info *pi = netdev_priv(dev);
821
822 switch (info->cmd) {
823 case ETHTOOL_GRXFH: {
824 unsigned int v = pi->rss_mode;
825
826 info->data = 0;
827 switch (info->flow_type) {
828 case TCP_V4_FLOW:
829 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
830 info->data = RXH_IP_SRC | RXH_IP_DST |
831 RXH_L4_B_0_1 | RXH_L4_B_2_3;
832 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
833 info->data = RXH_IP_SRC | RXH_IP_DST;
834 break;
835 case UDP_V4_FLOW:
836 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
837 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
838 info->data = RXH_IP_SRC | RXH_IP_DST |
839 RXH_L4_B_0_1 | RXH_L4_B_2_3;
840 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
841 info->data = RXH_IP_SRC | RXH_IP_DST;
842 break;
843 case SCTP_V4_FLOW:
844 case AH_ESP_V4_FLOW:
845 case IPV4_FLOW:
846 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
847 info->data = RXH_IP_SRC | RXH_IP_DST;
848 break;
849 case TCP_V6_FLOW:
850 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
851 info->data = RXH_IP_SRC | RXH_IP_DST |
852 RXH_L4_B_0_1 | RXH_L4_B_2_3;
853 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
854 info->data = RXH_IP_SRC | RXH_IP_DST;
855 break;
856 case UDP_V6_FLOW:
857 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
858 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
859 info->data = RXH_IP_SRC | RXH_IP_DST |
860 RXH_L4_B_0_1 | RXH_L4_B_2_3;
861 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
862 info->data = RXH_IP_SRC | RXH_IP_DST;
863 break;
864 case SCTP_V6_FLOW:
865 case AH_ESP_V6_FLOW:
866 case IPV6_FLOW:
867 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
868 info->data = RXH_IP_SRC | RXH_IP_DST;
869 break;
870 }
871 return 0;
872 }
873 case ETHTOOL_GRXRINGS:
874 info->data = pi->nqsets;
875 return 0;
876 }
877 return -EOPNOTSUPP;
878}
879
880static const struct ethtool_ops cxgb_ethtool_ops = {
881 .get_settings = get_settings,
882 .set_settings = set_settings,
883 .get_drvinfo = get_drvinfo,
884 .get_msglevel = get_msglevel,
885 .set_msglevel = set_msglevel,
886 .get_ringparam = get_sge_param,
887 .set_ringparam = set_sge_param,
888 .get_coalesce = get_coalesce,
889 .set_coalesce = set_coalesce,
890 .get_eeprom_len = get_eeprom_len,
891 .get_eeprom = get_eeprom,
892 .set_eeprom = set_eeprom,
893 .get_pauseparam = get_pauseparam,
894 .set_pauseparam = set_pauseparam,
895 .get_link = ethtool_op_get_link,
896 .get_strings = get_strings,
897 .set_phys_id = identify_port,
898 .nway_reset = restart_autoneg,
899 .get_sset_count = get_sset_count,
900 .get_ethtool_stats = get_stats,
901 .get_regs_len = get_regs_len,
902 .get_regs = get_regs,
903 .get_wol = get_wol,
904 .set_wol = set_wol,
905 .get_rxnfc = get_rxnfc,
906 .get_rxfh_indir_size = get_rss_table_size,
907 .get_rxfh = get_rss_table,
908 .set_rxfh = set_rss_table,
909 .flash_device = set_flash,
910};
911
912void cxgb4_set_ethtool_ops(struct net_device *netdev)
913{
914 netdev->ethtool_ops = &cxgb_ethtool_ops;
915}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index e40e283ff36c..24e10ea3d5ef 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -76,23 +76,15 @@
76#include "clip_tbl.h" 76#include "clip_tbl.h"
77#include "l2t.h" 77#include "l2t.h"
78 78
79char cxgb4_driver_name[] = KBUILD_MODNAME;
80
79#ifdef DRV_VERSION 81#ifdef DRV_VERSION
80#undef DRV_VERSION 82#undef DRV_VERSION
81#endif 83#endif
82#define DRV_VERSION "2.0.0-ko" 84#define DRV_VERSION "2.0.0-ko"
85const char cxgb4_driver_version[] = DRV_VERSION;
83#define DRV_DESC "Chelsio T4/T5 Network Driver" 86#define DRV_DESC "Chelsio T4/T5 Network Driver"
84 87
85enum {
86 MAX_TXQ_ENTRIES = 16384,
87 MAX_CTRL_TXQ_ENTRIES = 1024,
88 MAX_RSPQ_ENTRIES = 16384,
89 MAX_RX_BUFFERS = 16384,
90 MIN_TXQ_ENTRIES = 32,
91 MIN_CTRL_TXQ_ENTRIES = 32,
92 MIN_RSPQ_ENTRIES = 128,
93 MIN_FL_ENTRIES = 16
94};
95
96/* Host shadow copy of ingress filter entry. This is in host native format 88/* Host shadow copy of ingress filter entry. This is in host native format
97 * and doesn't match the ordering or bit order, etc. of the hardware of the 89 * and doesn't match the ordering or bit order, etc. of the hardware of the
98 * firmware command. The use of bit-field structure elements is purely to 90 * firmware command. The use of bit-field structure elements is purely to
@@ -857,14 +849,14 @@ static void free_msix_queue_irqs(struct adapter *adap)
857} 849}
858 850
859/** 851/**
860 * write_rss - write the RSS table for a given port 852 * cxgb4_write_rss - write the RSS table for a given port
861 * @pi: the port 853 * @pi: the port
862 * @queues: array of queue indices for RSS 854 * @queues: array of queue indices for RSS
863 * 855 *
864 * Sets up the portion of the HW RSS table for the port's VI to distribute 856 * Sets up the portion of the HW RSS table for the port's VI to distribute
865 * packets to the Rx queues in @queues. 857 * packets to the Rx queues in @queues.
866 */ 858 */
867static int write_rss(const struct port_info *pi, const u16 *queues) 859int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
868{ 860{
869 u16 *rss; 861 u16 *rss;
870 int i, err; 862 int i, err;
@@ -897,7 +889,7 @@ static int setup_rss(struct adapter *adap)
897 for_each_port(adap, i) { 889 for_each_port(adap, i) {
898 const struct port_info *pi = adap2pinfo(adap, i); 890 const struct port_info *pi = adap2pinfo(adap, i);
899 891
900 err = write_rss(pi, pi->rss); 892 err = cxgb4_write_rss(pi, pi->rss);
901 if (err) 893 if (err)
902 return err; 894 return err;
903 } 895 }
@@ -920,7 +912,7 @@ static void quiesce_rx(struct adapter *adap)
920{ 912{
921 int i; 913 int i;
922 914
923 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { 915 for (i = 0; i < adap->sge.ingr_sz; i++) {
924 struct sge_rspq *q = adap->sge.ingr_map[i]; 916 struct sge_rspq *q = adap->sge.ingr_map[i];
925 917
926 if (q && q->handler) { 918 if (q && q->handler) {
@@ -934,6 +926,21 @@ static void quiesce_rx(struct adapter *adap)
934 } 926 }
935} 927}
936 928
929/* Disable interrupt and napi handler */
930static void disable_interrupts(struct adapter *adap)
931{
932 if (adap->flags & FULL_INIT_DONE) {
933 t4_intr_disable(adap);
934 if (adap->flags & USING_MSIX) {
935 free_msix_queue_irqs(adap);
936 free_irq(adap->msix_info[0].vec, adap);
937 } else {
938 free_irq(adap->pdev->irq, adap);
939 }
940 quiesce_rx(adap);
941 }
942}
943
937/* 944/*
938 * Enable NAPI scheduling and interrupt generation for all Rx queues. 945 * Enable NAPI scheduling and interrupt generation for all Rx queues.
939 */ 946 */
@@ -941,7 +948,7 @@ static void enable_rx(struct adapter *adap)
941{ 948{
942 int i; 949 int i;
943 950
944 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { 951 for (i = 0; i < adap->sge.ingr_sz; i++) {
945 struct sge_rspq *q = adap->sge.ingr_map[i]; 952 struct sge_rspq *q = adap->sge.ingr_map[i];
946 953
947 if (!q) 954 if (!q)
@@ -992,8 +999,8 @@ static int setup_sge_queues(struct adapter *adap)
992 int err, msi_idx, i, j; 999 int err, msi_idx, i, j;
993 struct sge *s = &adap->sge; 1000 struct sge *s = &adap->sge;
994 1001
995 bitmap_zero(s->starving_fl, MAX_EGRQ); 1002 bitmap_zero(s->starving_fl, s->egr_sz);
996 bitmap_zero(s->txq_maperr, MAX_EGRQ); 1003 bitmap_zero(s->txq_maperr, s->egr_sz);
997 1004
998 if (adap->flags & USING_MSIX) 1005 if (adap->flags & USING_MSIX)
999 msi_idx = 1; /* vector 0 is for non-queue interrupts */ 1006 msi_idx = 1; /* vector 0 is for non-queue interrupts */
@@ -1005,6 +1012,19 @@ static int setup_sge_queues(struct adapter *adap)
1005 msi_idx = -((int)s->intrq.abs_id + 1); 1012 msi_idx = -((int)s->intrq.abs_id + 1);
1006 } 1013 }
1007 1014
1015 /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
1016 * don't forget to update the following which need to be
1017 * synchronized to and changes here.
1018 *
1019 * 1. The calculations of MAX_INGQ in cxgb4.h.
1020 *
1021 * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
1022 * to accommodate any new/deleted Ingress Queues
1023 * which need MSI-X Vectors.
1024 *
1025 * 3. Update sge_qinfo_show() to include information on the
1026 * new/deleted queues.
1027 */
1008 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], 1028 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1009 msi_idx, NULL, fwevtq_handler); 1029 msi_idx, NULL, fwevtq_handler);
1010 if (err) { 1030 if (err) {
@@ -1299,1192 +1319,6 @@ static inline int is_offload(const struct adapter *adap)
1299 return adap->params.offload; 1319 return adap->params.offload;
1300} 1320}
1301 1321
1302/*
1303 * Implementation of ethtool operations.
1304 */
1305
1306static u32 get_msglevel(struct net_device *dev)
1307{
1308 return netdev2adap(dev)->msg_enable;
1309}
1310
1311static void set_msglevel(struct net_device *dev, u32 val)
1312{
1313 netdev2adap(dev)->msg_enable = val;
1314}
1315
1316static char stats_strings[][ETH_GSTRING_LEN] = {
1317 "TxOctetsOK ",
1318 "TxFramesOK ",
1319 "TxBroadcastFrames ",
1320 "TxMulticastFrames ",
1321 "TxUnicastFrames ",
1322 "TxErrorFrames ",
1323
1324 "TxFrames64 ",
1325 "TxFrames65To127 ",
1326 "TxFrames128To255 ",
1327 "TxFrames256To511 ",
1328 "TxFrames512To1023 ",
1329 "TxFrames1024To1518 ",
1330 "TxFrames1519ToMax ",
1331
1332 "TxFramesDropped ",
1333 "TxPauseFrames ",
1334 "TxPPP0Frames ",
1335 "TxPPP1Frames ",
1336 "TxPPP2Frames ",
1337 "TxPPP3Frames ",
1338 "TxPPP4Frames ",
1339 "TxPPP5Frames ",
1340 "TxPPP6Frames ",
1341 "TxPPP7Frames ",
1342
1343 "RxOctetsOK ",
1344 "RxFramesOK ",
1345 "RxBroadcastFrames ",
1346 "RxMulticastFrames ",
1347 "RxUnicastFrames ",
1348
1349 "RxFramesTooLong ",
1350 "RxJabberErrors ",
1351 "RxFCSErrors ",
1352 "RxLengthErrors ",
1353 "RxSymbolErrors ",
1354 "RxRuntFrames ",
1355
1356 "RxFrames64 ",
1357 "RxFrames65To127 ",
1358 "RxFrames128To255 ",
1359 "RxFrames256To511 ",
1360 "RxFrames512To1023 ",
1361 "RxFrames1024To1518 ",
1362 "RxFrames1519ToMax ",
1363
1364 "RxPauseFrames ",
1365 "RxPPP0Frames ",
1366 "RxPPP1Frames ",
1367 "RxPPP2Frames ",
1368 "RxPPP3Frames ",
1369 "RxPPP4Frames ",
1370 "RxPPP5Frames ",
1371 "RxPPP6Frames ",
1372 "RxPPP7Frames ",
1373
1374 "RxBG0FramesDropped ",
1375 "RxBG1FramesDropped ",
1376 "RxBG2FramesDropped ",
1377 "RxBG3FramesDropped ",
1378 "RxBG0FramesTrunc ",
1379 "RxBG1FramesTrunc ",
1380 "RxBG2FramesTrunc ",
1381 "RxBG3FramesTrunc ",
1382
1383 "TSO ",
1384 "TxCsumOffload ",
1385 "RxCsumGood ",
1386 "VLANextractions ",
1387 "VLANinsertions ",
1388 "GROpackets ",
1389 "GROmerged ",
1390 "WriteCoalSuccess ",
1391 "WriteCoalFail ",
1392};
1393
1394static int get_sset_count(struct net_device *dev, int sset)
1395{
1396 switch (sset) {
1397 case ETH_SS_STATS:
1398 return ARRAY_SIZE(stats_strings);
1399 default:
1400 return -EOPNOTSUPP;
1401 }
1402}
1403
1404#define T4_REGMAP_SIZE (160 * 1024)
1405#define T5_REGMAP_SIZE (332 * 1024)
1406
1407static int get_regs_len(struct net_device *dev)
1408{
1409 struct adapter *adap = netdev2adap(dev);
1410 if (is_t4(adap->params.chip))
1411 return T4_REGMAP_SIZE;
1412 else
1413 return T5_REGMAP_SIZE;
1414}
1415
1416static int get_eeprom_len(struct net_device *dev)
1417{
1418 return EEPROMSIZE;
1419}
1420
1421static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1422{
1423 struct adapter *adapter = netdev2adap(dev);
1424 u32 exprom_vers;
1425
1426 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1427 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1428 strlcpy(info->bus_info, pci_name(adapter->pdev),
1429 sizeof(info->bus_info));
1430
1431 if (adapter->params.fw_vers)
1432 snprintf(info->fw_version, sizeof(info->fw_version),
1433 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1434 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
1435 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
1436 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
1437 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers),
1438 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
1439 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
1440 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
1441 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
1442
1443 if (!t4_get_exprom_version(adapter, &exprom_vers))
1444 snprintf(info->erom_version, sizeof(info->erom_version),
1445 "%u.%u.%u.%u",
1446 FW_HDR_FW_VER_MAJOR_G(exprom_vers),
1447 FW_HDR_FW_VER_MINOR_G(exprom_vers),
1448 FW_HDR_FW_VER_MICRO_G(exprom_vers),
1449 FW_HDR_FW_VER_BUILD_G(exprom_vers));
1450}
1451
1452static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1453{
1454 if (stringset == ETH_SS_STATS)
1455 memcpy(data, stats_strings, sizeof(stats_strings));
1456}
1457
1458/*
1459 * port stats maintained per queue of the port. They should be in the same
1460 * order as in stats_strings above.
1461 */
1462struct queue_port_stats {
1463 u64 tso;
1464 u64 tx_csum;
1465 u64 rx_csum;
1466 u64 vlan_ex;
1467 u64 vlan_ins;
1468 u64 gro_pkts;
1469 u64 gro_merged;
1470};
1471
1472static void collect_sge_port_stats(const struct adapter *adap,
1473 const struct port_info *p, struct queue_port_stats *s)
1474{
1475 int i;
1476 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1477 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1478
1479 memset(s, 0, sizeof(*s));
1480 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1481 s->tso += tx->tso;
1482 s->tx_csum += tx->tx_cso;
1483 s->rx_csum += rx->stats.rx_cso;
1484 s->vlan_ex += rx->stats.vlan_ex;
1485 s->vlan_ins += tx->vlan_ins;
1486 s->gro_pkts += rx->stats.lro_pkts;
1487 s->gro_merged += rx->stats.lro_merged;
1488 }
1489}
1490
1491static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1492 u64 *data)
1493{
1494 struct port_info *pi = netdev_priv(dev);
1495 struct adapter *adapter = pi->adapter;
1496 u32 val1, val2;
1497
1498 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1499
1500 data += sizeof(struct port_stats) / sizeof(u64);
1501 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1502 data += sizeof(struct queue_port_stats) / sizeof(u64);
1503 if (!is_t4(adapter->params.chip)) {
1504 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7));
1505 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A);
1506 val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A);
1507 *data = val1 - val2;
1508 data++;
1509 *data = val2;
1510 data++;
1511 } else {
1512 memset(data, 0, 2 * sizeof(u64));
1513 *data += 2;
1514 }
1515}
1516
1517/*
1518 * Return a version number to identify the type of adapter. The scheme is:
1519 * - bits 0..9: chip version
1520 * - bits 10..15: chip revision
1521 * - bits 16..23: register dump version
1522 */
1523static inline unsigned int mk_adap_vers(const struct adapter *ap)
1524{
1525 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1526 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1527}
1528
1529static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1530 unsigned int end)
1531{
1532 u32 *p = buf + start;
1533
1534 for ( ; start <= end; start += sizeof(u32))
1535 *p++ = t4_read_reg(ap, start);
1536}
1537
1538static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1539 void *buf)
1540{
1541 static const unsigned int t4_reg_ranges[] = {
1542 0x1008, 0x1108,
1543 0x1180, 0x11b4,
1544 0x11fc, 0x123c,
1545 0x1300, 0x173c,
1546 0x1800, 0x18fc,
1547 0x3000, 0x30d8,
1548 0x30e0, 0x5924,
1549 0x5960, 0x59d4,
1550 0x5a00, 0x5af8,
1551 0x6000, 0x6098,
1552 0x6100, 0x6150,
1553 0x6200, 0x6208,
1554 0x6240, 0x6248,
1555 0x6280, 0x6338,
1556 0x6370, 0x638c,
1557 0x6400, 0x643c,
1558 0x6500, 0x6524,
1559 0x6a00, 0x6a38,
1560 0x6a60, 0x6a78,
1561 0x6b00, 0x6b84,
1562 0x6bf0, 0x6c84,
1563 0x6cf0, 0x6d84,
1564 0x6df0, 0x6e84,
1565 0x6ef0, 0x6f84,
1566 0x6ff0, 0x7084,
1567 0x70f0, 0x7184,
1568 0x71f0, 0x7284,
1569 0x72f0, 0x7384,
1570 0x73f0, 0x7450,
1571 0x7500, 0x7530,
1572 0x7600, 0x761c,
1573 0x7680, 0x76cc,
1574 0x7700, 0x7798,
1575 0x77c0, 0x77fc,
1576 0x7900, 0x79fc,
1577 0x7b00, 0x7c38,
1578 0x7d00, 0x7efc,
1579 0x8dc0, 0x8e1c,
1580 0x8e30, 0x8e78,
1581 0x8ea0, 0x8f6c,
1582 0x8fc0, 0x9074,
1583 0x90fc, 0x90fc,
1584 0x9400, 0x9458,
1585 0x9600, 0x96bc,
1586 0x9800, 0x9808,
1587 0x9820, 0x983c,
1588 0x9850, 0x9864,
1589 0x9c00, 0x9c6c,
1590 0x9c80, 0x9cec,
1591 0x9d00, 0x9d6c,
1592 0x9d80, 0x9dec,
1593 0x9e00, 0x9e6c,
1594 0x9e80, 0x9eec,
1595 0x9f00, 0x9f6c,
1596 0x9f80, 0x9fec,
1597 0xd004, 0xd03c,
1598 0xdfc0, 0xdfe0,
1599 0xe000, 0xea7c,
1600 0xf000, 0x11110,
1601 0x11118, 0x11190,
1602 0x19040, 0x1906c,
1603 0x19078, 0x19080,
1604 0x1908c, 0x19124,
1605 0x19150, 0x191b0,
1606 0x191d0, 0x191e8,
1607 0x19238, 0x1924c,
1608 0x193f8, 0x19474,
1609 0x19490, 0x194f8,
1610 0x19800, 0x19f30,
1611 0x1a000, 0x1a06c,
1612 0x1a0b0, 0x1a120,
1613 0x1a128, 0x1a138,
1614 0x1a190, 0x1a1c4,
1615 0x1a1fc, 0x1a1fc,
1616 0x1e040, 0x1e04c,
1617 0x1e284, 0x1e28c,
1618 0x1e2c0, 0x1e2c0,
1619 0x1e2e0, 0x1e2e0,
1620 0x1e300, 0x1e384,
1621 0x1e3c0, 0x1e3c8,
1622 0x1e440, 0x1e44c,
1623 0x1e684, 0x1e68c,
1624 0x1e6c0, 0x1e6c0,
1625 0x1e6e0, 0x1e6e0,
1626 0x1e700, 0x1e784,
1627 0x1e7c0, 0x1e7c8,
1628 0x1e840, 0x1e84c,
1629 0x1ea84, 0x1ea8c,
1630 0x1eac0, 0x1eac0,
1631 0x1eae0, 0x1eae0,
1632 0x1eb00, 0x1eb84,
1633 0x1ebc0, 0x1ebc8,
1634 0x1ec40, 0x1ec4c,
1635 0x1ee84, 0x1ee8c,
1636 0x1eec0, 0x1eec0,
1637 0x1eee0, 0x1eee0,
1638 0x1ef00, 0x1ef84,
1639 0x1efc0, 0x1efc8,
1640 0x1f040, 0x1f04c,
1641 0x1f284, 0x1f28c,
1642 0x1f2c0, 0x1f2c0,
1643 0x1f2e0, 0x1f2e0,
1644 0x1f300, 0x1f384,
1645 0x1f3c0, 0x1f3c8,
1646 0x1f440, 0x1f44c,
1647 0x1f684, 0x1f68c,
1648 0x1f6c0, 0x1f6c0,
1649 0x1f6e0, 0x1f6e0,
1650 0x1f700, 0x1f784,
1651 0x1f7c0, 0x1f7c8,
1652 0x1f840, 0x1f84c,
1653 0x1fa84, 0x1fa8c,
1654 0x1fac0, 0x1fac0,
1655 0x1fae0, 0x1fae0,
1656 0x1fb00, 0x1fb84,
1657 0x1fbc0, 0x1fbc8,
1658 0x1fc40, 0x1fc4c,
1659 0x1fe84, 0x1fe8c,
1660 0x1fec0, 0x1fec0,
1661 0x1fee0, 0x1fee0,
1662 0x1ff00, 0x1ff84,
1663 0x1ffc0, 0x1ffc8,
1664 0x20000, 0x2002c,
1665 0x20100, 0x2013c,
1666 0x20190, 0x201c8,
1667 0x20200, 0x20318,
1668 0x20400, 0x20528,
1669 0x20540, 0x20614,
1670 0x21000, 0x21040,
1671 0x2104c, 0x21060,
1672 0x210c0, 0x210ec,
1673 0x21200, 0x21268,
1674 0x21270, 0x21284,
1675 0x212fc, 0x21388,
1676 0x21400, 0x21404,
1677 0x21500, 0x21518,
1678 0x2152c, 0x2153c,
1679 0x21550, 0x21554,
1680 0x21600, 0x21600,
1681 0x21608, 0x21628,
1682 0x21630, 0x2163c,
1683 0x21700, 0x2171c,
1684 0x21780, 0x2178c,
1685 0x21800, 0x21c38,
1686 0x21c80, 0x21d7c,
1687 0x21e00, 0x21e04,
1688 0x22000, 0x2202c,
1689 0x22100, 0x2213c,
1690 0x22190, 0x221c8,
1691 0x22200, 0x22318,
1692 0x22400, 0x22528,
1693 0x22540, 0x22614,
1694 0x23000, 0x23040,
1695 0x2304c, 0x23060,
1696 0x230c0, 0x230ec,
1697 0x23200, 0x23268,
1698 0x23270, 0x23284,
1699 0x232fc, 0x23388,
1700 0x23400, 0x23404,
1701 0x23500, 0x23518,
1702 0x2352c, 0x2353c,
1703 0x23550, 0x23554,
1704 0x23600, 0x23600,
1705 0x23608, 0x23628,
1706 0x23630, 0x2363c,
1707 0x23700, 0x2371c,
1708 0x23780, 0x2378c,
1709 0x23800, 0x23c38,
1710 0x23c80, 0x23d7c,
1711 0x23e00, 0x23e04,
1712 0x24000, 0x2402c,
1713 0x24100, 0x2413c,
1714 0x24190, 0x241c8,
1715 0x24200, 0x24318,
1716 0x24400, 0x24528,
1717 0x24540, 0x24614,
1718 0x25000, 0x25040,
1719 0x2504c, 0x25060,
1720 0x250c0, 0x250ec,
1721 0x25200, 0x25268,
1722 0x25270, 0x25284,
1723 0x252fc, 0x25388,
1724 0x25400, 0x25404,
1725 0x25500, 0x25518,
1726 0x2552c, 0x2553c,
1727 0x25550, 0x25554,
1728 0x25600, 0x25600,
1729 0x25608, 0x25628,
1730 0x25630, 0x2563c,
1731 0x25700, 0x2571c,
1732 0x25780, 0x2578c,
1733 0x25800, 0x25c38,
1734 0x25c80, 0x25d7c,
1735 0x25e00, 0x25e04,
1736 0x26000, 0x2602c,
1737 0x26100, 0x2613c,
1738 0x26190, 0x261c8,
1739 0x26200, 0x26318,
1740 0x26400, 0x26528,
1741 0x26540, 0x26614,
1742 0x27000, 0x27040,
1743 0x2704c, 0x27060,
1744 0x270c0, 0x270ec,
1745 0x27200, 0x27268,
1746 0x27270, 0x27284,
1747 0x272fc, 0x27388,
1748 0x27400, 0x27404,
1749 0x27500, 0x27518,
1750 0x2752c, 0x2753c,
1751 0x27550, 0x27554,
1752 0x27600, 0x27600,
1753 0x27608, 0x27628,
1754 0x27630, 0x2763c,
1755 0x27700, 0x2771c,
1756 0x27780, 0x2778c,
1757 0x27800, 0x27c38,
1758 0x27c80, 0x27d7c,
1759 0x27e00, 0x27e04
1760 };
1761
1762 static const unsigned int t5_reg_ranges[] = {
1763 0x1008, 0x1148,
1764 0x1180, 0x11b4,
1765 0x11fc, 0x123c,
1766 0x1280, 0x173c,
1767 0x1800, 0x18fc,
1768 0x3000, 0x3028,
1769 0x3060, 0x30d8,
1770 0x30e0, 0x30fc,
1771 0x3140, 0x357c,
1772 0x35a8, 0x35cc,
1773 0x35ec, 0x35ec,
1774 0x3600, 0x5624,
1775 0x56cc, 0x575c,
1776 0x580c, 0x5814,
1777 0x5890, 0x58bc,
1778 0x5940, 0x59dc,
1779 0x59fc, 0x5a18,
1780 0x5a60, 0x5a9c,
1781 0x5b9c, 0x5bfc,
1782 0x6000, 0x6040,
1783 0x6058, 0x614c,
1784 0x7700, 0x7798,
1785 0x77c0, 0x78fc,
1786 0x7b00, 0x7c54,
1787 0x7d00, 0x7efc,
1788 0x8dc0, 0x8de0,
1789 0x8df8, 0x8e84,
1790 0x8ea0, 0x8f84,
1791 0x8fc0, 0x90f8,
1792 0x9400, 0x9470,
1793 0x9600, 0x96f4,
1794 0x9800, 0x9808,
1795 0x9820, 0x983c,
1796 0x9850, 0x9864,
1797 0x9c00, 0x9c6c,
1798 0x9c80, 0x9cec,
1799 0x9d00, 0x9d6c,
1800 0x9d80, 0x9dec,
1801 0x9e00, 0x9e6c,
1802 0x9e80, 0x9eec,
1803 0x9f00, 0x9f6c,
1804 0x9f80, 0xa020,
1805 0xd004, 0xd03c,
1806 0xdfc0, 0xdfe0,
1807 0xe000, 0x11088,
1808 0x1109c, 0x11110,
1809 0x11118, 0x1117c,
1810 0x11190, 0x11204,
1811 0x19040, 0x1906c,
1812 0x19078, 0x19080,
1813 0x1908c, 0x19124,
1814 0x19150, 0x191b0,
1815 0x191d0, 0x191e8,
1816 0x19238, 0x19290,
1817 0x193f8, 0x19474,
1818 0x19490, 0x194cc,
1819 0x194f0, 0x194f8,
1820 0x19c00, 0x19c60,
1821 0x19c94, 0x19e10,
1822 0x19e50, 0x19f34,
1823 0x19f40, 0x19f50,
1824 0x19f90, 0x19fe4,
1825 0x1a000, 0x1a06c,
1826 0x1a0b0, 0x1a120,
1827 0x1a128, 0x1a138,
1828 0x1a190, 0x1a1c4,
1829 0x1a1fc, 0x1a1fc,
1830 0x1e008, 0x1e00c,
1831 0x1e040, 0x1e04c,
1832 0x1e284, 0x1e290,
1833 0x1e2c0, 0x1e2c0,
1834 0x1e2e0, 0x1e2e0,
1835 0x1e300, 0x1e384,
1836 0x1e3c0, 0x1e3c8,
1837 0x1e408, 0x1e40c,
1838 0x1e440, 0x1e44c,
1839 0x1e684, 0x1e690,
1840 0x1e6c0, 0x1e6c0,
1841 0x1e6e0, 0x1e6e0,
1842 0x1e700, 0x1e784,
1843 0x1e7c0, 0x1e7c8,
1844 0x1e808, 0x1e80c,
1845 0x1e840, 0x1e84c,
1846 0x1ea84, 0x1ea90,
1847 0x1eac0, 0x1eac0,
1848 0x1eae0, 0x1eae0,
1849 0x1eb00, 0x1eb84,
1850 0x1ebc0, 0x1ebc8,
1851 0x1ec08, 0x1ec0c,
1852 0x1ec40, 0x1ec4c,
1853 0x1ee84, 0x1ee90,
1854 0x1eec0, 0x1eec0,
1855 0x1eee0, 0x1eee0,
1856 0x1ef00, 0x1ef84,
1857 0x1efc0, 0x1efc8,
1858 0x1f008, 0x1f00c,
1859 0x1f040, 0x1f04c,
1860 0x1f284, 0x1f290,
1861 0x1f2c0, 0x1f2c0,
1862 0x1f2e0, 0x1f2e0,
1863 0x1f300, 0x1f384,
1864 0x1f3c0, 0x1f3c8,
1865 0x1f408, 0x1f40c,
1866 0x1f440, 0x1f44c,
1867 0x1f684, 0x1f690,
1868 0x1f6c0, 0x1f6c0,
1869 0x1f6e0, 0x1f6e0,
1870 0x1f700, 0x1f784,
1871 0x1f7c0, 0x1f7c8,
1872 0x1f808, 0x1f80c,
1873 0x1f840, 0x1f84c,
1874 0x1fa84, 0x1fa90,
1875 0x1fac0, 0x1fac0,
1876 0x1fae0, 0x1fae0,
1877 0x1fb00, 0x1fb84,
1878 0x1fbc0, 0x1fbc8,
1879 0x1fc08, 0x1fc0c,
1880 0x1fc40, 0x1fc4c,
1881 0x1fe84, 0x1fe90,
1882 0x1fec0, 0x1fec0,
1883 0x1fee0, 0x1fee0,
1884 0x1ff00, 0x1ff84,
1885 0x1ffc0, 0x1ffc8,
1886 0x30000, 0x30030,
1887 0x30100, 0x30144,
1888 0x30190, 0x301d0,
1889 0x30200, 0x30318,
1890 0x30400, 0x3052c,
1891 0x30540, 0x3061c,
1892 0x30800, 0x30834,
1893 0x308c0, 0x30908,
1894 0x30910, 0x309ac,
1895 0x30a00, 0x30a04,
1896 0x30a0c, 0x30a2c,
1897 0x30a44, 0x30a50,
1898 0x30a74, 0x30c24,
1899 0x30d08, 0x30d14,
1900 0x30d1c, 0x30d20,
1901 0x30d3c, 0x30d50,
1902 0x31200, 0x3120c,
1903 0x31220, 0x31220,
1904 0x31240, 0x31240,
1905 0x31600, 0x31600,
1906 0x31608, 0x3160c,
1907 0x31a00, 0x31a1c,
1908 0x31e04, 0x31e20,
1909 0x31e38, 0x31e3c,
1910 0x31e80, 0x31e80,
1911 0x31e88, 0x31ea8,
1912 0x31eb0, 0x31eb4,
1913 0x31ec8, 0x31ed4,
1914 0x31fb8, 0x32004,
1915 0x32208, 0x3223c,
1916 0x32600, 0x32630,
1917 0x32a00, 0x32abc,
1918 0x32b00, 0x32b70,
1919 0x33000, 0x33048,
1920 0x33060, 0x3309c,
1921 0x330f0, 0x33148,
1922 0x33160, 0x3319c,
1923 0x331f0, 0x332e4,
1924 0x332f8, 0x333e4,
1925 0x333f8, 0x33448,
1926 0x33460, 0x3349c,
1927 0x334f0, 0x33548,
1928 0x33560, 0x3359c,
1929 0x335f0, 0x336e4,
1930 0x336f8, 0x337e4,
1931 0x337f8, 0x337fc,
1932 0x33814, 0x33814,
1933 0x3382c, 0x3382c,
1934 0x33880, 0x3388c,
1935 0x338e8, 0x338ec,
1936 0x33900, 0x33948,
1937 0x33960, 0x3399c,
1938 0x339f0, 0x33ae4,
1939 0x33af8, 0x33b10,
1940 0x33b28, 0x33b28,
1941 0x33b3c, 0x33b50,
1942 0x33bf0, 0x33c10,
1943 0x33c28, 0x33c28,
1944 0x33c3c, 0x33c50,
1945 0x33cf0, 0x33cfc,
1946 0x34000, 0x34030,
1947 0x34100, 0x34144,
1948 0x34190, 0x341d0,
1949 0x34200, 0x34318,
1950 0x34400, 0x3452c,
1951 0x34540, 0x3461c,
1952 0x34800, 0x34834,
1953 0x348c0, 0x34908,
1954 0x34910, 0x349ac,
1955 0x34a00, 0x34a04,
1956 0x34a0c, 0x34a2c,
1957 0x34a44, 0x34a50,
1958 0x34a74, 0x34c24,
1959 0x34d08, 0x34d14,
1960 0x34d1c, 0x34d20,
1961 0x34d3c, 0x34d50,
1962 0x35200, 0x3520c,
1963 0x35220, 0x35220,
1964 0x35240, 0x35240,
1965 0x35600, 0x35600,
1966 0x35608, 0x3560c,
1967 0x35a00, 0x35a1c,
1968 0x35e04, 0x35e20,
1969 0x35e38, 0x35e3c,
1970 0x35e80, 0x35e80,
1971 0x35e88, 0x35ea8,
1972 0x35eb0, 0x35eb4,
1973 0x35ec8, 0x35ed4,
1974 0x35fb8, 0x36004,
1975 0x36208, 0x3623c,
1976 0x36600, 0x36630,
1977 0x36a00, 0x36abc,
1978 0x36b00, 0x36b70,
1979 0x37000, 0x37048,
1980 0x37060, 0x3709c,
1981 0x370f0, 0x37148,
1982 0x37160, 0x3719c,
1983 0x371f0, 0x372e4,
1984 0x372f8, 0x373e4,
1985 0x373f8, 0x37448,
1986 0x37460, 0x3749c,
1987 0x374f0, 0x37548,
1988 0x37560, 0x3759c,
1989 0x375f0, 0x376e4,
1990 0x376f8, 0x377e4,
1991 0x377f8, 0x377fc,
1992 0x37814, 0x37814,
1993 0x3782c, 0x3782c,
1994 0x37880, 0x3788c,
1995 0x378e8, 0x378ec,
1996 0x37900, 0x37948,
1997 0x37960, 0x3799c,
1998 0x379f0, 0x37ae4,
1999 0x37af8, 0x37b10,
2000 0x37b28, 0x37b28,
2001 0x37b3c, 0x37b50,
2002 0x37bf0, 0x37c10,
2003 0x37c28, 0x37c28,
2004 0x37c3c, 0x37c50,
2005 0x37cf0, 0x37cfc,
2006 0x38000, 0x38030,
2007 0x38100, 0x38144,
2008 0x38190, 0x381d0,
2009 0x38200, 0x38318,
2010 0x38400, 0x3852c,
2011 0x38540, 0x3861c,
2012 0x38800, 0x38834,
2013 0x388c0, 0x38908,
2014 0x38910, 0x389ac,
2015 0x38a00, 0x38a04,
2016 0x38a0c, 0x38a2c,
2017 0x38a44, 0x38a50,
2018 0x38a74, 0x38c24,
2019 0x38d08, 0x38d14,
2020 0x38d1c, 0x38d20,
2021 0x38d3c, 0x38d50,
2022 0x39200, 0x3920c,
2023 0x39220, 0x39220,
2024 0x39240, 0x39240,
2025 0x39600, 0x39600,
2026 0x39608, 0x3960c,
2027 0x39a00, 0x39a1c,
2028 0x39e04, 0x39e20,
2029 0x39e38, 0x39e3c,
2030 0x39e80, 0x39e80,
2031 0x39e88, 0x39ea8,
2032 0x39eb0, 0x39eb4,
2033 0x39ec8, 0x39ed4,
2034 0x39fb8, 0x3a004,
2035 0x3a208, 0x3a23c,
2036 0x3a600, 0x3a630,
2037 0x3aa00, 0x3aabc,
2038 0x3ab00, 0x3ab70,
2039 0x3b000, 0x3b048,
2040 0x3b060, 0x3b09c,
2041 0x3b0f0, 0x3b148,
2042 0x3b160, 0x3b19c,
2043 0x3b1f0, 0x3b2e4,
2044 0x3b2f8, 0x3b3e4,
2045 0x3b3f8, 0x3b448,
2046 0x3b460, 0x3b49c,
2047 0x3b4f0, 0x3b548,
2048 0x3b560, 0x3b59c,
2049 0x3b5f0, 0x3b6e4,
2050 0x3b6f8, 0x3b7e4,
2051 0x3b7f8, 0x3b7fc,
2052 0x3b814, 0x3b814,
2053 0x3b82c, 0x3b82c,
2054 0x3b880, 0x3b88c,
2055 0x3b8e8, 0x3b8ec,
2056 0x3b900, 0x3b948,
2057 0x3b960, 0x3b99c,
2058 0x3b9f0, 0x3bae4,
2059 0x3baf8, 0x3bb10,
2060 0x3bb28, 0x3bb28,
2061 0x3bb3c, 0x3bb50,
2062 0x3bbf0, 0x3bc10,
2063 0x3bc28, 0x3bc28,
2064 0x3bc3c, 0x3bc50,
2065 0x3bcf0, 0x3bcfc,
2066 0x3c000, 0x3c030,
2067 0x3c100, 0x3c144,
2068 0x3c190, 0x3c1d0,
2069 0x3c200, 0x3c318,
2070 0x3c400, 0x3c52c,
2071 0x3c540, 0x3c61c,
2072 0x3c800, 0x3c834,
2073 0x3c8c0, 0x3c908,
2074 0x3c910, 0x3c9ac,
2075 0x3ca00, 0x3ca04,
2076 0x3ca0c, 0x3ca2c,
2077 0x3ca44, 0x3ca50,
2078 0x3ca74, 0x3cc24,
2079 0x3cd08, 0x3cd14,
2080 0x3cd1c, 0x3cd20,
2081 0x3cd3c, 0x3cd50,
2082 0x3d200, 0x3d20c,
2083 0x3d220, 0x3d220,
2084 0x3d240, 0x3d240,
2085 0x3d600, 0x3d600,
2086 0x3d608, 0x3d60c,
2087 0x3da00, 0x3da1c,
2088 0x3de04, 0x3de20,
2089 0x3de38, 0x3de3c,
2090 0x3de80, 0x3de80,
2091 0x3de88, 0x3dea8,
2092 0x3deb0, 0x3deb4,
2093 0x3dec8, 0x3ded4,
2094 0x3dfb8, 0x3e004,
2095 0x3e208, 0x3e23c,
2096 0x3e600, 0x3e630,
2097 0x3ea00, 0x3eabc,
2098 0x3eb00, 0x3eb70,
2099 0x3f000, 0x3f048,
2100 0x3f060, 0x3f09c,
2101 0x3f0f0, 0x3f148,
2102 0x3f160, 0x3f19c,
2103 0x3f1f0, 0x3f2e4,
2104 0x3f2f8, 0x3f3e4,
2105 0x3f3f8, 0x3f448,
2106 0x3f460, 0x3f49c,
2107 0x3f4f0, 0x3f548,
2108 0x3f560, 0x3f59c,
2109 0x3f5f0, 0x3f6e4,
2110 0x3f6f8, 0x3f7e4,
2111 0x3f7f8, 0x3f7fc,
2112 0x3f814, 0x3f814,
2113 0x3f82c, 0x3f82c,
2114 0x3f880, 0x3f88c,
2115 0x3f8e8, 0x3f8ec,
2116 0x3f900, 0x3f948,
2117 0x3f960, 0x3f99c,
2118 0x3f9f0, 0x3fae4,
2119 0x3faf8, 0x3fb10,
2120 0x3fb28, 0x3fb28,
2121 0x3fb3c, 0x3fb50,
2122 0x3fbf0, 0x3fc10,
2123 0x3fc28, 0x3fc28,
2124 0x3fc3c, 0x3fc50,
2125 0x3fcf0, 0x3fcfc,
2126 0x40000, 0x4000c,
2127 0x40040, 0x40068,
2128 0x40080, 0x40144,
2129 0x40180, 0x4018c,
2130 0x40200, 0x40298,
2131 0x402ac, 0x4033c,
2132 0x403f8, 0x403fc,
2133 0x41304, 0x413c4,
2134 0x41400, 0x4141c,
2135 0x41480, 0x414d0,
2136 0x44000, 0x44078,
2137 0x440c0, 0x44278,
2138 0x442c0, 0x44478,
2139 0x444c0, 0x44678,
2140 0x446c0, 0x44878,
2141 0x448c0, 0x449fc,
2142 0x45000, 0x45068,
2143 0x45080, 0x45084,
2144 0x450a0, 0x450b0,
2145 0x45200, 0x45268,
2146 0x45280, 0x45284,
2147 0x452a0, 0x452b0,
2148 0x460c0, 0x460e4,
2149 0x47000, 0x4708c,
2150 0x47200, 0x47250,
2151 0x47400, 0x47420,
2152 0x47600, 0x47618,
2153 0x47800, 0x47814,
2154 0x48000, 0x4800c,
2155 0x48040, 0x48068,
2156 0x48080, 0x48144,
2157 0x48180, 0x4818c,
2158 0x48200, 0x48298,
2159 0x482ac, 0x4833c,
2160 0x483f8, 0x483fc,
2161 0x49304, 0x493c4,
2162 0x49400, 0x4941c,
2163 0x49480, 0x494d0,
2164 0x4c000, 0x4c078,
2165 0x4c0c0, 0x4c278,
2166 0x4c2c0, 0x4c478,
2167 0x4c4c0, 0x4c678,
2168 0x4c6c0, 0x4c878,
2169 0x4c8c0, 0x4c9fc,
2170 0x4d000, 0x4d068,
2171 0x4d080, 0x4d084,
2172 0x4d0a0, 0x4d0b0,
2173 0x4d200, 0x4d268,
2174 0x4d280, 0x4d284,
2175 0x4d2a0, 0x4d2b0,
2176 0x4e0c0, 0x4e0e4,
2177 0x4f000, 0x4f08c,
2178 0x4f200, 0x4f250,
2179 0x4f400, 0x4f420,
2180 0x4f600, 0x4f618,
2181 0x4f800, 0x4f814,
2182 0x50000, 0x500cc,
2183 0x50400, 0x50400,
2184 0x50800, 0x508cc,
2185 0x50c00, 0x50c00,
2186 0x51000, 0x5101c,
2187 0x51300, 0x51308,
2188 };
2189
2190 int i;
2191 struct adapter *ap = netdev2adap(dev);
2192 static const unsigned int *reg_ranges;
2193 int arr_size = 0, buf_size = 0;
2194
2195 if (is_t4(ap->params.chip)) {
2196 reg_ranges = &t4_reg_ranges[0];
2197 arr_size = ARRAY_SIZE(t4_reg_ranges);
2198 buf_size = T4_REGMAP_SIZE;
2199 } else {
2200 reg_ranges = &t5_reg_ranges[0];
2201 arr_size = ARRAY_SIZE(t5_reg_ranges);
2202 buf_size = T5_REGMAP_SIZE;
2203 }
2204
2205 regs->version = mk_adap_vers(ap);
2206
2207 memset(buf, 0, buf_size);
2208 for (i = 0; i < arr_size; i += 2)
2209 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2210}
2211
2212static int restart_autoneg(struct net_device *dev)
2213{
2214 struct port_info *p = netdev_priv(dev);
2215
2216 if (!netif_running(dev))
2217 return -EAGAIN;
2218 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2219 return -EINVAL;
2220 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2221 return 0;
2222}
2223
2224static int identify_port(struct net_device *dev,
2225 enum ethtool_phys_id_state state)
2226{
2227 unsigned int val;
2228 struct adapter *adap = netdev2adap(dev);
2229
2230 if (state == ETHTOOL_ID_ACTIVE)
2231 val = 0xffff;
2232 else if (state == ETHTOOL_ID_INACTIVE)
2233 val = 0;
2234 else
2235 return -EINVAL;
2236
2237 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2238}
2239
2240static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps)
2241{
2242 unsigned int v = 0;
2243
2244 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2245 type == FW_PORT_TYPE_BT_XAUI) {
2246 v |= SUPPORTED_TP;
2247 if (caps & FW_PORT_CAP_SPEED_100M)
2248 v |= SUPPORTED_100baseT_Full;
2249 if (caps & FW_PORT_CAP_SPEED_1G)
2250 v |= SUPPORTED_1000baseT_Full;
2251 if (caps & FW_PORT_CAP_SPEED_10G)
2252 v |= SUPPORTED_10000baseT_Full;
2253 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2254 v |= SUPPORTED_Backplane;
2255 if (caps & FW_PORT_CAP_SPEED_1G)
2256 v |= SUPPORTED_1000baseKX_Full;
2257 if (caps & FW_PORT_CAP_SPEED_10G)
2258 v |= SUPPORTED_10000baseKX4_Full;
2259 } else if (type == FW_PORT_TYPE_KR)
2260 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2261 else if (type == FW_PORT_TYPE_BP_AP)
2262 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2263 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2264 else if (type == FW_PORT_TYPE_BP4_AP)
2265 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2266 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2267 SUPPORTED_10000baseKX4_Full;
2268 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2269 type == FW_PORT_TYPE_FIBER_XAUI ||
2270 type == FW_PORT_TYPE_SFP ||
2271 type == FW_PORT_TYPE_QSFP_10G ||
2272 type == FW_PORT_TYPE_QSA) {
2273 v |= SUPPORTED_FIBRE;
2274 if (caps & FW_PORT_CAP_SPEED_1G)
2275 v |= SUPPORTED_1000baseT_Full;
2276 if (caps & FW_PORT_CAP_SPEED_10G)
2277 v |= SUPPORTED_10000baseT_Full;
2278 } else if (type == FW_PORT_TYPE_BP40_BA ||
2279 type == FW_PORT_TYPE_QSFP) {
2280 v |= SUPPORTED_40000baseSR4_Full;
2281 v |= SUPPORTED_FIBRE;
2282 }
2283
2284 if (caps & FW_PORT_CAP_ANEG)
2285 v |= SUPPORTED_Autoneg;
2286 return v;
2287}
2288
2289static unsigned int to_fw_linkcaps(unsigned int caps)
2290{
2291 unsigned int v = 0;
2292
2293 if (caps & ADVERTISED_100baseT_Full)
2294 v |= FW_PORT_CAP_SPEED_100M;
2295 if (caps & ADVERTISED_1000baseT_Full)
2296 v |= FW_PORT_CAP_SPEED_1G;
2297 if (caps & ADVERTISED_10000baseT_Full)
2298 v |= FW_PORT_CAP_SPEED_10G;
2299 if (caps & ADVERTISED_40000baseSR4_Full)
2300 v |= FW_PORT_CAP_SPEED_40G;
2301 return v;
2302}
2303
2304static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2305{
2306 const struct port_info *p = netdev_priv(dev);
2307
2308 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2309 p->port_type == FW_PORT_TYPE_BT_XFI ||
2310 p->port_type == FW_PORT_TYPE_BT_XAUI)
2311 cmd->port = PORT_TP;
2312 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2313 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2314 cmd->port = PORT_FIBRE;
2315 else if (p->port_type == FW_PORT_TYPE_SFP ||
2316 p->port_type == FW_PORT_TYPE_QSFP_10G ||
2317 p->port_type == FW_PORT_TYPE_QSA ||
2318 p->port_type == FW_PORT_TYPE_QSFP) {
2319 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2320 p->mod_type == FW_PORT_MOD_TYPE_SR ||
2321 p->mod_type == FW_PORT_MOD_TYPE_ER ||
2322 p->mod_type == FW_PORT_MOD_TYPE_LRM)
2323 cmd->port = PORT_FIBRE;
2324 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2325 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2326 cmd->port = PORT_DA;
2327 else
2328 cmd->port = PORT_OTHER;
2329 } else
2330 cmd->port = PORT_OTHER;
2331
2332 if (p->mdio_addr >= 0) {
2333 cmd->phy_address = p->mdio_addr;
2334 cmd->transceiver = XCVR_EXTERNAL;
2335 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2336 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2337 } else {
2338 cmd->phy_address = 0; /* not really, but no better option */
2339 cmd->transceiver = XCVR_INTERNAL;
2340 cmd->mdio_support = 0;
2341 }
2342
2343 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2344 cmd->advertising = from_fw_linkcaps(p->port_type,
2345 p->link_cfg.advertising);
2346 ethtool_cmd_speed_set(cmd,
2347 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2348 cmd->duplex = DUPLEX_FULL;
2349 cmd->autoneg = p->link_cfg.autoneg;
2350 cmd->maxtxpkt = 0;
2351 cmd->maxrxpkt = 0;
2352 return 0;
2353}
2354
2355static unsigned int speed_to_caps(int speed)
2356{
2357 if (speed == 100)
2358 return FW_PORT_CAP_SPEED_100M;
2359 if (speed == 1000)
2360 return FW_PORT_CAP_SPEED_1G;
2361 if (speed == 10000)
2362 return FW_PORT_CAP_SPEED_10G;
2363 if (speed == 40000)
2364 return FW_PORT_CAP_SPEED_40G;
2365 return 0;
2366}
2367
2368static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2369{
2370 unsigned int cap;
2371 struct port_info *p = netdev_priv(dev);
2372 struct link_config *lc = &p->link_cfg;
2373 u32 speed = ethtool_cmd_speed(cmd);
2374
2375 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2376 return -EINVAL;
2377
2378 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2379 /*
2380 * PHY offers a single speed. See if that's what's
2381 * being requested.
2382 */
2383 if (cmd->autoneg == AUTONEG_DISABLE &&
2384 (lc->supported & speed_to_caps(speed)))
2385 return 0;
2386 return -EINVAL;
2387 }
2388
2389 if (cmd->autoneg == AUTONEG_DISABLE) {
2390 cap = speed_to_caps(speed);
2391
2392 if (!(lc->supported & cap) ||
2393 (speed == 1000) ||
2394 (speed == 10000) ||
2395 (speed == 40000))
2396 return -EINVAL;
2397 lc->requested_speed = cap;
2398 lc->advertising = 0;
2399 } else {
2400 cap = to_fw_linkcaps(cmd->advertising);
2401 if (!(lc->supported & cap))
2402 return -EINVAL;
2403 lc->requested_speed = 0;
2404 lc->advertising = cap | FW_PORT_CAP_ANEG;
2405 }
2406 lc->autoneg = cmd->autoneg;
2407
2408 if (netif_running(dev))
2409 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2410 lc);
2411 return 0;
2412}
2413
2414static void get_pauseparam(struct net_device *dev,
2415 struct ethtool_pauseparam *epause)
2416{
2417 struct port_info *p = netdev_priv(dev);
2418
2419 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2420 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2421 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2422}
2423
2424static int set_pauseparam(struct net_device *dev,
2425 struct ethtool_pauseparam *epause)
2426{
2427 struct port_info *p = netdev_priv(dev);
2428 struct link_config *lc = &p->link_cfg;
2429
2430 if (epause->autoneg == AUTONEG_DISABLE)
2431 lc->requested_fc = 0;
2432 else if (lc->supported & FW_PORT_CAP_ANEG)
2433 lc->requested_fc = PAUSE_AUTONEG;
2434 else
2435 return -EINVAL;
2436
2437 if (epause->rx_pause)
2438 lc->requested_fc |= PAUSE_RX;
2439 if (epause->tx_pause)
2440 lc->requested_fc |= PAUSE_TX;
2441 if (netif_running(dev))
2442 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2443 lc);
2444 return 0;
2445}
2446
2447static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2448{
2449 const struct port_info *pi = netdev_priv(dev);
2450 const struct sge *s = &pi->adapter->sge;
2451
2452 e->rx_max_pending = MAX_RX_BUFFERS;
2453 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2454 e->rx_jumbo_max_pending = 0;
2455 e->tx_max_pending = MAX_TXQ_ENTRIES;
2456
2457 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2458 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2459 e->rx_jumbo_pending = 0;
2460 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2461}
2462
2463static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2464{
2465 int i;
2466 const struct port_info *pi = netdev_priv(dev);
2467 struct adapter *adapter = pi->adapter;
2468 struct sge *s = &adapter->sge;
2469
2470 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2471 e->tx_pending > MAX_TXQ_ENTRIES ||
2472 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2473 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2474 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2475 return -EINVAL;
2476
2477 if (adapter->flags & FULL_INIT_DONE)
2478 return -EBUSY;
2479
2480 for (i = 0; i < pi->nqsets; ++i) {
2481 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2482 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2483 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2484 }
2485 return 0;
2486}
2487
2488static int closest_timer(const struct sge *s, int time) 1322static int closest_timer(const struct sge *s, int time)
2489{ 1323{
2490 int i, delta, match = 0, min_delta = INT_MAX; 1324 int i, delta, match = 0, min_delta = INT_MAX;
@@ -2517,19 +1351,8 @@ static int closest_thres(const struct sge *s, int thres)
2517 return match; 1351 return match;
2518} 1352}
2519 1353
2520/*
2521 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2522 */
2523unsigned int qtimer_val(const struct adapter *adap,
2524 const struct sge_rspq *q)
2525{
2526 unsigned int idx = q->intr_params >> 1;
2527
2528 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2529}
2530
2531/** 1354/**
2532 * set_rspq_intr_params - set a queue's interrupt holdoff parameters 1355 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
2533 * @q: the Rx queue 1356 * @q: the Rx queue
2534 * @us: the hold-off time in us, or 0 to disable timer 1357 * @us: the hold-off time in us, or 0 to disable timer
2535 * @cnt: the hold-off packet count, or 0 to disable counter 1358 * @cnt: the hold-off packet count, or 0 to disable counter
@@ -2537,8 +1360,8 @@ unsigned int qtimer_val(const struct adapter *adap,
2537 * Sets an Rx queue's interrupt hold-off time and packet count. At least 1360 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2538 * one of the two needs to be enabled for the queue to generate interrupts. 1361 * one of the two needs to be enabled for the queue to generate interrupts.
2539 */ 1362 */
2540static int set_rspq_intr_params(struct sge_rspq *q, 1363int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
2541 unsigned int us, unsigned int cnt) 1364 unsigned int us, unsigned int cnt)
2542{ 1365{
2543 struct adapter *adap = q->adap; 1366 struct adapter *adap = q->adap;
2544 1367
@@ -2569,259 +1392,6 @@ static int set_rspq_intr_params(struct sge_rspq *q,
2569 return 0; 1392 return 0;
2570} 1393}
2571 1394
2572/**
2573 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2574 * @dev: the network device
2575 * @us: the hold-off time in us, or 0 to disable timer
2576 * @cnt: the hold-off packet count, or 0 to disable counter
2577 *
2578 * Set the RX interrupt hold-off parameters for a network device.
2579 */
2580static int set_rx_intr_params(struct net_device *dev,
2581 unsigned int us, unsigned int cnt)
2582{
2583 int i, err;
2584 struct port_info *pi = netdev_priv(dev);
2585 struct adapter *adap = pi->adapter;
2586 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2587
2588 for (i = 0; i < pi->nqsets; i++, q++) {
2589 err = set_rspq_intr_params(&q->rspq, us, cnt);
2590 if (err)
2591 return err;
2592 }
2593 return 0;
2594}
2595
2596static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
2597{
2598 int i;
2599 struct port_info *pi = netdev_priv(dev);
2600 struct adapter *adap = pi->adapter;
2601 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2602
2603 for (i = 0; i < pi->nqsets; i++, q++)
2604 q->rspq.adaptive_rx = adaptive_rx;
2605
2606 return 0;
2607}
2608
2609static int get_adaptive_rx_setting(struct net_device *dev)
2610{
2611 struct port_info *pi = netdev_priv(dev);
2612 struct adapter *adap = pi->adapter;
2613 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2614
2615 return q->rspq.adaptive_rx;
2616}
2617
2618static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2619{
2620 set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
2621 return set_rx_intr_params(dev, c->rx_coalesce_usecs,
2622 c->rx_max_coalesced_frames);
2623}
2624
2625static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2626{
2627 const struct port_info *pi = netdev_priv(dev);
2628 const struct adapter *adap = pi->adapter;
2629 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2630
2631 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2632 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2633 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2634 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
2635 return 0;
2636}
2637
2638/**
2639 * eeprom_ptov - translate a physical EEPROM address to virtual
2640 * @phys_addr: the physical EEPROM address
2641 * @fn: the PCI function number
2642 * @sz: size of function-specific area
2643 *
2644 * Translate a physical EEPROM address to virtual. The first 1K is
2645 * accessed through virtual addresses starting at 31K, the rest is
2646 * accessed through virtual addresses starting at 0.
2647 *
2648 * The mapping is as follows:
2649 * [0..1K) -> [31K..32K)
2650 * [1K..1K+A) -> [31K-A..31K)
2651 * [1K+A..ES) -> [0..ES-A-1K)
2652 *
2653 * where A = @fn * @sz, and ES = EEPROM size.
2654 */
2655static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2656{
2657 fn *= sz;
2658 if (phys_addr < 1024)
2659 return phys_addr + (31 << 10);
2660 if (phys_addr < 1024 + fn)
2661 return 31744 - fn + phys_addr - 1024;
2662 if (phys_addr < EEPROMSIZE)
2663 return phys_addr - 1024 - fn;
2664 return -EINVAL;
2665}
2666
2667/*
2668 * The next two routines implement eeprom read/write from physical addresses.
2669 */
2670static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2671{
2672 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2673
2674 if (vaddr >= 0)
2675 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2676 return vaddr < 0 ? vaddr : 0;
2677}
2678
2679static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2680{
2681 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2682
2683 if (vaddr >= 0)
2684 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2685 return vaddr < 0 ? vaddr : 0;
2686}
2687
2688#define EEPROM_MAGIC 0x38E2F10C
2689
2690static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2691 u8 *data)
2692{
2693 int i, err = 0;
2694 struct adapter *adapter = netdev2adap(dev);
2695
2696 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2697 if (!buf)
2698 return -ENOMEM;
2699
2700 e->magic = EEPROM_MAGIC;
2701 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2702 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2703
2704 if (!err)
2705 memcpy(data, buf + e->offset, e->len);
2706 kfree(buf);
2707 return err;
2708}
2709
2710static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2711 u8 *data)
2712{
2713 u8 *buf;
2714 int err = 0;
2715 u32 aligned_offset, aligned_len, *p;
2716 struct adapter *adapter = netdev2adap(dev);
2717
2718 if (eeprom->magic != EEPROM_MAGIC)
2719 return -EINVAL;
2720
2721 aligned_offset = eeprom->offset & ~3;
2722 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2723
2724 if (adapter->fn > 0) {
2725 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2726
2727 if (aligned_offset < start ||
2728 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2729 return -EPERM;
2730 }
2731
2732 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2733 /*
2734 * RMW possibly needed for first or last words.
2735 */
2736 buf = kmalloc(aligned_len, GFP_KERNEL);
2737 if (!buf)
2738 return -ENOMEM;
2739 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2740 if (!err && aligned_len > 4)
2741 err = eeprom_rd_phys(adapter,
2742 aligned_offset + aligned_len - 4,
2743 (u32 *)&buf[aligned_len - 4]);
2744 if (err)
2745 goto out;
2746 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2747 } else
2748 buf = data;
2749
2750 err = t4_seeprom_wp(adapter, false);
2751 if (err)
2752 goto out;
2753
2754 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2755 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2756 aligned_offset += 4;
2757 }
2758
2759 if (!err)
2760 err = t4_seeprom_wp(adapter, true);
2761out:
2762 if (buf != data)
2763 kfree(buf);
2764 return err;
2765}
2766
2767static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2768{
2769 int ret;
2770 const struct firmware *fw;
2771 struct adapter *adap = netdev2adap(netdev);
2772 unsigned int mbox = PCIE_FW_MASTER_M + 1;
2773
2774 ef->data[sizeof(ef->data) - 1] = '\0';
2775 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2776 if (ret < 0)
2777 return ret;
2778
2779 /* If the adapter has been fully initialized then we'll go ahead and
2780 * try to get the firmware's cooperation in upgrading to the new
2781 * firmware image otherwise we'll try to do the entire job from the
2782 * host ... and we always "force" the operation in this path.
2783 */
2784 if (adap->flags & FULL_INIT_DONE)
2785 mbox = adap->mbox;
2786
2787 ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
2788 release_firmware(fw);
2789 if (!ret)
2790 dev_info(adap->pdev_dev, "loaded firmware %s,"
2791 " reload cxgb4 driver\n", ef->data);
2792 return ret;
2793}
2794
2795#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2796#define BCAST_CRC 0xa0ccc1a6
2797
2798static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2799{
2800 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2801 wol->wolopts = netdev2adap(dev)->wol;
2802 memset(&wol->sopass, 0, sizeof(wol->sopass));
2803}
2804
2805static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2806{
2807 int err = 0;
2808 struct port_info *pi = netdev_priv(dev);
2809
2810 if (wol->wolopts & ~WOL_SUPPORTED)
2811 return -EINVAL;
2812 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2813 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2814 if (wol->wolopts & WAKE_BCAST) {
2815 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2816 ~0ULL, 0, false);
2817 if (!err)
2818 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2819 ~6ULL, ~0ULL, BCAST_CRC, true);
2820 } else
2821 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2822 return err;
2823}
2824
2825static int cxgb_set_features(struct net_device *dev, netdev_features_t features) 1395static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2826{ 1396{
2827 const struct port_info *pi = netdev_priv(dev); 1397 const struct port_info *pi = netdev_priv(dev);
@@ -2839,144 +1409,6 @@ static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2839 return err; 1409 return err;
2840} 1410}
2841 1411
2842static u32 get_rss_table_size(struct net_device *dev)
2843{
2844 const struct port_info *pi = netdev_priv(dev);
2845
2846 return pi->rss_size;
2847}
2848
2849static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
2850{
2851 const struct port_info *pi = netdev_priv(dev);
2852 unsigned int n = pi->rss_size;
2853
2854 if (hfunc)
2855 *hfunc = ETH_RSS_HASH_TOP;
2856 if (!p)
2857 return 0;
2858 while (n--)
2859 p[n] = pi->rss[n];
2860 return 0;
2861}
2862
2863static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
2864 const u8 hfunc)
2865{
2866 unsigned int i;
2867 struct port_info *pi = netdev_priv(dev);
2868
2869 /* We require at least one supported parameter to be changed and no
2870 * change in any of the unsupported parameters
2871 */
2872 if (key ||
2873 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
2874 return -EOPNOTSUPP;
2875 if (!p)
2876 return 0;
2877
2878 for (i = 0; i < pi->rss_size; i++)
2879 pi->rss[i] = p[i];
2880 if (pi->adapter->flags & FULL_INIT_DONE)
2881 return write_rss(pi, pi->rss);
2882 return 0;
2883}
2884
2885static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2886 u32 *rules)
2887{
2888 const struct port_info *pi = netdev_priv(dev);
2889
2890 switch (info->cmd) {
2891 case ETHTOOL_GRXFH: {
2892 unsigned int v = pi->rss_mode;
2893
2894 info->data = 0;
2895 switch (info->flow_type) {
2896 case TCP_V4_FLOW:
2897 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
2898 info->data = RXH_IP_SRC | RXH_IP_DST |
2899 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2900 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
2901 info->data = RXH_IP_SRC | RXH_IP_DST;
2902 break;
2903 case UDP_V4_FLOW:
2904 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
2905 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
2906 info->data = RXH_IP_SRC | RXH_IP_DST |
2907 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2908 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
2909 info->data = RXH_IP_SRC | RXH_IP_DST;
2910 break;
2911 case SCTP_V4_FLOW:
2912 case AH_ESP_V4_FLOW:
2913 case IPV4_FLOW:
2914 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
2915 info->data = RXH_IP_SRC | RXH_IP_DST;
2916 break;
2917 case TCP_V6_FLOW:
2918 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
2919 info->data = RXH_IP_SRC | RXH_IP_DST |
2920 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2921 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
2922 info->data = RXH_IP_SRC | RXH_IP_DST;
2923 break;
2924 case UDP_V6_FLOW:
2925 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
2926 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
2927 info->data = RXH_IP_SRC | RXH_IP_DST |
2928 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2929 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
2930 info->data = RXH_IP_SRC | RXH_IP_DST;
2931 break;
2932 case SCTP_V6_FLOW:
2933 case AH_ESP_V6_FLOW:
2934 case IPV6_FLOW:
2935 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
2936 info->data = RXH_IP_SRC | RXH_IP_DST;
2937 break;
2938 }
2939 return 0;
2940 }
2941 case ETHTOOL_GRXRINGS:
2942 info->data = pi->nqsets;
2943 return 0;
2944 }
2945 return -EOPNOTSUPP;
2946}
2947
2948static const struct ethtool_ops cxgb_ethtool_ops = {
2949 .get_settings = get_settings,
2950 .set_settings = set_settings,
2951 .get_drvinfo = get_drvinfo,
2952 .get_msglevel = get_msglevel,
2953 .set_msglevel = set_msglevel,
2954 .get_ringparam = get_sge_param,
2955 .set_ringparam = set_sge_param,
2956 .get_coalesce = get_coalesce,
2957 .set_coalesce = set_coalesce,
2958 .get_eeprom_len = get_eeprom_len,
2959 .get_eeprom = get_eeprom,
2960 .set_eeprom = set_eeprom,
2961 .get_pauseparam = get_pauseparam,
2962 .set_pauseparam = set_pauseparam,
2963 .get_link = ethtool_op_get_link,
2964 .get_strings = get_strings,
2965 .set_phys_id = identify_port,
2966 .nway_reset = restart_autoneg,
2967 .get_sset_count = get_sset_count,
2968 .get_ethtool_stats = get_stats,
2969 .get_regs_len = get_regs_len,
2970 .get_regs = get_regs,
2971 .get_wol = get_wol,
2972 .set_wol = set_wol,
2973 .get_rxnfc = get_rxnfc,
2974 .get_rxfh_indir_size = get_rss_table_size,
2975 .get_rxfh = get_rss_table,
2976 .set_rxfh = set_rss_table,
2977 .flash_device = set_flash,
2978};
2979
2980static int setup_debugfs(struct adapter *adap) 1412static int setup_debugfs(struct adapter *adap)
2981{ 1413{
2982 if (IS_ERR_OR_NULL(adap->debugfs_root)) 1414 if (IS_ERR_OR_NULL(adap->debugfs_root))
@@ -4246,19 +2678,12 @@ static int cxgb_up(struct adapter *adap)
4246 2678
4247static void cxgb_down(struct adapter *adapter) 2679static void cxgb_down(struct adapter *adapter)
4248{ 2680{
4249 t4_intr_disable(adapter);
4250 cancel_work_sync(&adapter->tid_release_task); 2681 cancel_work_sync(&adapter->tid_release_task);
4251 cancel_work_sync(&adapter->db_full_task); 2682 cancel_work_sync(&adapter->db_full_task);
4252 cancel_work_sync(&adapter->db_drop_task); 2683 cancel_work_sync(&adapter->db_drop_task);
4253 adapter->tid_release_task_busy = false; 2684 adapter->tid_release_task_busy = false;
4254 adapter->tid_release_head = NULL; 2685 adapter->tid_release_head = NULL;
4255 2686
4256 if (adapter->flags & USING_MSIX) {
4257 free_msix_queue_irqs(adapter);
4258 free_irq(adapter->msix_info[0].vec, adapter);
4259 } else
4260 free_irq(adapter->pdev->irq, adapter);
4261 quiesce_rx(adapter);
4262 t4_sge_stop(adapter); 2687 t4_sge_stop(adapter);
4263 t4_free_sge_resources(adapter); 2688 t4_free_sge_resources(adapter);
4264 adapter->flags &= ~FULL_INIT_DONE; 2689 adapter->flags &= ~FULL_INIT_DONE;
@@ -4739,8 +3164,9 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4739 if (ret < 0) 3164 if (ret < 0)
4740 return ret; 3165 return ret;
4741 3166
4742 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ, 3167 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64,
4743 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF); 3168 MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
3169 FW_CMD_CAP_PF);
4744 if (ret < 0) 3170 if (ret < 0)
4745 return ret; 3171 return ret;
4746 3172
@@ -5094,10 +3520,15 @@ static int adap_init0(struct adapter *adap)
5094 enum dev_state state; 3520 enum dev_state state;
5095 u32 params[7], val[7]; 3521 u32 params[7], val[7];
5096 struct fw_caps_config_cmd caps_cmd; 3522 struct fw_caps_config_cmd caps_cmd;
5097 struct fw_devlog_cmd devlog_cmd;
5098 u32 devlog_meminfo;
5099 int reset = 1; 3523 int reset = 1;
5100 3524
3525 /* Grab Firmware Device Log parameters as early as possible so we have
3526 * access to it for debugging, etc.
3527 */
3528 ret = t4_init_devlog_params(adap);
3529 if (ret < 0)
3530 return ret;
3531
5101 /* Contact FW, advertising Master capability */ 3532 /* Contact FW, advertising Master capability */
5102 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state); 3533 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
5103 if (ret < 0) { 3534 if (ret < 0) {
@@ -5175,30 +3606,6 @@ static int adap_init0(struct adapter *adap)
5175 if (ret < 0) 3606 if (ret < 0)
5176 goto bye; 3607 goto bye;
5177 3608
5178 /* Read firmware device log parameters. We really need to find a way
5179 * to get these parameters initialized with some default values (which
5180 * are likely to be correct) for the case where we either don't
5181 * attache to the firmware or it's crashed when we probe the adapter.
5182 * That way we'll still be able to perform early firmware startup
5183 * debugging ... If the request to get the Firmware's Device Log
5184 * parameters fails, we'll live so we don't make that a fatal error.
5185 */
5186 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
5187 devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
5188 FW_CMD_REQUEST_F | FW_CMD_READ_F);
5189 devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
5190 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
5191 &devlog_cmd);
5192 if (ret == 0) {
5193 devlog_meminfo =
5194 ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
5195 adap->params.devlog.memtype =
5196 FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
5197 adap->params.devlog.start =
5198 FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
5199 adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog);
5200 }
5201
5202 /* 3609 /*
5203 * Find out what ports are available to us. Note that we need to do 3610 * Find out what ports are available to us. Note that we need to do
5204 * this before calling adap_init0_no_config() since it needs nports 3611 * this before calling adap_init0_no_config() since it needs nports
@@ -5299,6 +3706,51 @@ static int adap_init0(struct adapter *adap)
5299 adap->tids.nftids = val[4] - val[3] + 1; 3706 adap->tids.nftids = val[4] - val[3] + 1;
5300 adap->sge.ingr_start = val[5]; 3707 adap->sge.ingr_start = val[5];
5301 3708
3709 /* qids (ingress/egress) returned from firmware can be anywhere
3710 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
3711 * Hence driver needs to allocate memory for this range to
3712 * store the queue info. Get the highest IQFLINT/EQ index returned
3713 * in FW_EQ_*_CMD.alloc command.
3714 */
3715 params[0] = FW_PARAM_PFVF(EQ_END);
3716 params[1] = FW_PARAM_PFVF(IQFLINT_END);
3717 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
3718 if (ret < 0)
3719 goto bye;
3720 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
3721 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
3722
3723 adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
3724 sizeof(*adap->sge.egr_map), GFP_KERNEL);
3725 if (!adap->sge.egr_map) {
3726 ret = -ENOMEM;
3727 goto bye;
3728 }
3729
3730 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
3731 sizeof(*adap->sge.ingr_map), GFP_KERNEL);
3732 if (!adap->sge.ingr_map) {
3733 ret = -ENOMEM;
3734 goto bye;
3735 }
3736
3737 /* Allocate the memory for the vaious egress queue bitmaps
3738 * ie starving_fl and txq_maperr.
3739 */
3740 adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3741 sizeof(long), GFP_KERNEL);
3742 if (!adap->sge.starving_fl) {
3743 ret = -ENOMEM;
3744 goto bye;
3745 }
3746
3747 adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3748 sizeof(long), GFP_KERNEL);
3749 if (!adap->sge.txq_maperr) {
3750 ret = -ENOMEM;
3751 goto bye;
3752 }
3753
5302 params[0] = FW_PARAM_PFVF(CLIP_START); 3754 params[0] = FW_PARAM_PFVF(CLIP_START);
5303 params[1] = FW_PARAM_PFVF(CLIP_END); 3755 params[1] = FW_PARAM_PFVF(CLIP_END);
5304 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); 3756 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
@@ -5507,6 +3959,10 @@ static int adap_init0(struct adapter *adap)
5507 * happened to HW/FW, stop issuing commands. 3959 * happened to HW/FW, stop issuing commands.
5508 */ 3960 */
5509bye: 3961bye:
3962 kfree(adap->sge.egr_map);
3963 kfree(adap->sge.ingr_map);
3964 kfree(adap->sge.starving_fl);
3965 kfree(adap->sge.txq_maperr);
5510 if (ret != -ETIMEDOUT && ret != -EIO) 3966 if (ret != -ETIMEDOUT && ret != -EIO)
5511 t4_fw_bye(adap, adap->mbox); 3967 t4_fw_bye(adap, adap->mbox);
5512 return ret; 3968 return ret;
@@ -5534,6 +3990,7 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5534 netif_carrier_off(dev); 3990 netif_carrier_off(dev);
5535 } 3991 }
5536 spin_unlock(&adap->stats_lock); 3992 spin_unlock(&adap->stats_lock);
3993 disable_interrupts(adap);
5537 if (adap->flags & FULL_INIT_DONE) 3994 if (adap->flags & FULL_INIT_DONE)
5538 cxgb_down(adap); 3995 cxgb_down(adap);
5539 rtnl_unlock(); 3996 rtnl_unlock();
@@ -5636,7 +4093,7 @@ static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
5636 unsigned int size, unsigned int iqe_size) 4093 unsigned int size, unsigned int iqe_size)
5637{ 4094{
5638 q->adap = adap; 4095 q->adap = adap;
5639 set_rspq_intr_params(q, us, cnt); 4096 cxgb4_set_rspq_intr_params(q, us, cnt);
5640 q->iqe_len = iqe_size; 4097 q->iqe_len = iqe_size;
5641 q->size = size; 4098 q->size = size;
5642} 4099}
@@ -5942,6 +4399,10 @@ static void free_some_resources(struct adapter *adapter)
5942 4399
5943 t4_free_mem(adapter->l2t); 4400 t4_free_mem(adapter->l2t);
5944 t4_free_mem(adapter->tids.tid_tab); 4401 t4_free_mem(adapter->tids.tid_tab);
4402 kfree(adapter->sge.egr_map);
4403 kfree(adapter->sge.ingr_map);
4404 kfree(adapter->sge.starving_fl);
4405 kfree(adapter->sge.txq_maperr);
5945 disable_msi(adapter); 4406 disable_msi(adapter);
5946 4407
5947 for_each_port(adapter, i) 4408 for_each_port(adapter, i)
@@ -6127,7 +4588,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6127 netdev->dcbnl_ops = &cxgb4_dcb_ops; 4588 netdev->dcbnl_ops = &cxgb4_dcb_ops;
6128 cxgb4_dcb_state_init(netdev); 4589 cxgb4_dcb_state_init(netdev);
6129#endif 4590#endif
6130 netdev->ethtool_ops = &cxgb_ethtool_ops; 4591 cxgb4_set_ethtool_ops(netdev);
6131 } 4592 }
6132 4593
6133 pci_set_drvdata(pdev, adapter); 4594 pci_set_drvdata(pdev, adapter);
@@ -6267,6 +4728,8 @@ static void remove_one(struct pci_dev *pdev)
6267 if (is_offload(adapter)) 4728 if (is_offload(adapter))
6268 detach_ulds(adapter); 4729 detach_ulds(adapter);
6269 4730
4731 disable_interrupts(adapter);
4732
6270 for_each_port(adapter, i) 4733 for_each_port(adapter, i)
6271 if (adapter->port[i]->reg_state == NETREG_REGISTERED) 4734 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6272 unregister_netdev(adapter->port[i]); 4735 unregister_netdev(adapter->port[i]);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index c46e7a938317..c438f3895c40 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2239,7 +2239,7 @@ static void sge_rx_timer_cb(unsigned long data)
2239 struct adapter *adap = (struct adapter *)data; 2239 struct adapter *adap = (struct adapter *)data;
2240 struct sge *s = &adap->sge; 2240 struct sge *s = &adap->sge;
2241 2241
2242 for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) 2242 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
2243 for (m = s->starving_fl[i]; m; m &= m - 1) { 2243 for (m = s->starving_fl[i]; m; m &= m - 1) {
2244 struct sge_eth_rxq *rxq; 2244 struct sge_eth_rxq *rxq;
2245 unsigned int id = __ffs(m) + i * BITS_PER_LONG; 2245 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
@@ -2327,7 +2327,7 @@ static void sge_tx_timer_cb(unsigned long data)
2327 struct adapter *adap = (struct adapter *)data; 2327 struct adapter *adap = (struct adapter *)data;
2328 struct sge *s = &adap->sge; 2328 struct sge *s = &adap->sge;
2329 2329
2330 for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++) 2330 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
2331 for (m = s->txq_maperr[i]; m; m &= m - 1) { 2331 for (m = s->txq_maperr[i]; m; m &= m - 1) {
2332 unsigned long id = __ffs(m) + i * BITS_PER_LONG; 2332 unsigned long id = __ffs(m) + i * BITS_PER_LONG;
2333 struct sge_ofld_txq *txq = s->egr_map[id]; 2333 struct sge_ofld_txq *txq = s->egr_map[id];
@@ -2809,7 +2809,8 @@ void t4_free_sge_resources(struct adapter *adap)
2809 free_rspq_fl(adap, &adap->sge.intrq, NULL); 2809 free_rspq_fl(adap, &adap->sge.intrq, NULL);
2810 2810
2811 /* clear the reverse egress queue map */ 2811 /* clear the reverse egress queue map */
2812 memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map)); 2812 memset(adap->sge.egr_map, 0,
2813 adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
2813} 2814}
2814 2815
2815void t4_sge_start(struct adapter *adap) 2816void t4_sge_start(struct adapter *adap)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index afbe1682ff48..5959e3ae72da 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -625,6 +625,734 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
625 return 0; 625 return 0;
626} 626}
627 627
628/**
629 * t4_get_regs_len - return the size of the chips register set
630 * @adapter: the adapter
631 *
632 * Returns the size of the chip's BAR0 register space.
633 */
634unsigned int t4_get_regs_len(struct adapter *adapter)
635{
636 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
637
638 switch (chip_version) {
639 case CHELSIO_T4:
640 return T4_REGMAP_SIZE;
641
642 case CHELSIO_T5:
643 return T5_REGMAP_SIZE;
644 }
645
646 dev_err(adapter->pdev_dev,
647 "Unsupported chip version %d\n", chip_version);
648 return 0;
649}
650
651/**
652 * t4_get_regs - read chip registers into provided buffer
653 * @adap: the adapter
654 * @buf: register buffer
655 * @buf_size: size (in bytes) of register buffer
656 *
657 * If the provided register buffer isn't large enough for the chip's
658 * full register range, the register dump will be truncated to the
659 * register buffer's size.
660 */
661void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
662{
663 static const unsigned int t4_reg_ranges[] = {
664 0x1008, 0x1108,
665 0x1180, 0x11b4,
666 0x11fc, 0x123c,
667 0x1300, 0x173c,
668 0x1800, 0x18fc,
669 0x3000, 0x30d8,
670 0x30e0, 0x5924,
671 0x5960, 0x59d4,
672 0x5a00, 0x5af8,
673 0x6000, 0x6098,
674 0x6100, 0x6150,
675 0x6200, 0x6208,
676 0x6240, 0x6248,
677 0x6280, 0x6338,
678 0x6370, 0x638c,
679 0x6400, 0x643c,
680 0x6500, 0x6524,
681 0x6a00, 0x6a38,
682 0x6a60, 0x6a78,
683 0x6b00, 0x6b84,
684 0x6bf0, 0x6c84,
685 0x6cf0, 0x6d84,
686 0x6df0, 0x6e84,
687 0x6ef0, 0x6f84,
688 0x6ff0, 0x7084,
689 0x70f0, 0x7184,
690 0x71f0, 0x7284,
691 0x72f0, 0x7384,
692 0x73f0, 0x7450,
693 0x7500, 0x7530,
694 0x7600, 0x761c,
695 0x7680, 0x76cc,
696 0x7700, 0x7798,
697 0x77c0, 0x77fc,
698 0x7900, 0x79fc,
699 0x7b00, 0x7c38,
700 0x7d00, 0x7efc,
701 0x8dc0, 0x8e1c,
702 0x8e30, 0x8e78,
703 0x8ea0, 0x8f6c,
704 0x8fc0, 0x9074,
705 0x90fc, 0x90fc,
706 0x9400, 0x9458,
707 0x9600, 0x96bc,
708 0x9800, 0x9808,
709 0x9820, 0x983c,
710 0x9850, 0x9864,
711 0x9c00, 0x9c6c,
712 0x9c80, 0x9cec,
713 0x9d00, 0x9d6c,
714 0x9d80, 0x9dec,
715 0x9e00, 0x9e6c,
716 0x9e80, 0x9eec,
717 0x9f00, 0x9f6c,
718 0x9f80, 0x9fec,
719 0xd004, 0xd03c,
720 0xdfc0, 0xdfe0,
721 0xe000, 0xea7c,
722 0xf000, 0x11110,
723 0x11118, 0x11190,
724 0x19040, 0x1906c,
725 0x19078, 0x19080,
726 0x1908c, 0x19124,
727 0x19150, 0x191b0,
728 0x191d0, 0x191e8,
729 0x19238, 0x1924c,
730 0x193f8, 0x19474,
731 0x19490, 0x194f8,
732 0x19800, 0x19f30,
733 0x1a000, 0x1a06c,
734 0x1a0b0, 0x1a120,
735 0x1a128, 0x1a138,
736 0x1a190, 0x1a1c4,
737 0x1a1fc, 0x1a1fc,
738 0x1e040, 0x1e04c,
739 0x1e284, 0x1e28c,
740 0x1e2c0, 0x1e2c0,
741 0x1e2e0, 0x1e2e0,
742 0x1e300, 0x1e384,
743 0x1e3c0, 0x1e3c8,
744 0x1e440, 0x1e44c,
745 0x1e684, 0x1e68c,
746 0x1e6c0, 0x1e6c0,
747 0x1e6e0, 0x1e6e0,
748 0x1e700, 0x1e784,
749 0x1e7c0, 0x1e7c8,
750 0x1e840, 0x1e84c,
751 0x1ea84, 0x1ea8c,
752 0x1eac0, 0x1eac0,
753 0x1eae0, 0x1eae0,
754 0x1eb00, 0x1eb84,
755 0x1ebc0, 0x1ebc8,
756 0x1ec40, 0x1ec4c,
757 0x1ee84, 0x1ee8c,
758 0x1eec0, 0x1eec0,
759 0x1eee0, 0x1eee0,
760 0x1ef00, 0x1ef84,
761 0x1efc0, 0x1efc8,
762 0x1f040, 0x1f04c,
763 0x1f284, 0x1f28c,
764 0x1f2c0, 0x1f2c0,
765 0x1f2e0, 0x1f2e0,
766 0x1f300, 0x1f384,
767 0x1f3c0, 0x1f3c8,
768 0x1f440, 0x1f44c,
769 0x1f684, 0x1f68c,
770 0x1f6c0, 0x1f6c0,
771 0x1f6e0, 0x1f6e0,
772 0x1f700, 0x1f784,
773 0x1f7c0, 0x1f7c8,
774 0x1f840, 0x1f84c,
775 0x1fa84, 0x1fa8c,
776 0x1fac0, 0x1fac0,
777 0x1fae0, 0x1fae0,
778 0x1fb00, 0x1fb84,
779 0x1fbc0, 0x1fbc8,
780 0x1fc40, 0x1fc4c,
781 0x1fe84, 0x1fe8c,
782 0x1fec0, 0x1fec0,
783 0x1fee0, 0x1fee0,
784 0x1ff00, 0x1ff84,
785 0x1ffc0, 0x1ffc8,
786 0x20000, 0x2002c,
787 0x20100, 0x2013c,
788 0x20190, 0x201c8,
789 0x20200, 0x20318,
790 0x20400, 0x20528,
791 0x20540, 0x20614,
792 0x21000, 0x21040,
793 0x2104c, 0x21060,
794 0x210c0, 0x210ec,
795 0x21200, 0x21268,
796 0x21270, 0x21284,
797 0x212fc, 0x21388,
798 0x21400, 0x21404,
799 0x21500, 0x21518,
800 0x2152c, 0x2153c,
801 0x21550, 0x21554,
802 0x21600, 0x21600,
803 0x21608, 0x21628,
804 0x21630, 0x2163c,
805 0x21700, 0x2171c,
806 0x21780, 0x2178c,
807 0x21800, 0x21c38,
808 0x21c80, 0x21d7c,
809 0x21e00, 0x21e04,
810 0x22000, 0x2202c,
811 0x22100, 0x2213c,
812 0x22190, 0x221c8,
813 0x22200, 0x22318,
814 0x22400, 0x22528,
815 0x22540, 0x22614,
816 0x23000, 0x23040,
817 0x2304c, 0x23060,
818 0x230c0, 0x230ec,
819 0x23200, 0x23268,
820 0x23270, 0x23284,
821 0x232fc, 0x23388,
822 0x23400, 0x23404,
823 0x23500, 0x23518,
824 0x2352c, 0x2353c,
825 0x23550, 0x23554,
826 0x23600, 0x23600,
827 0x23608, 0x23628,
828 0x23630, 0x2363c,
829 0x23700, 0x2371c,
830 0x23780, 0x2378c,
831 0x23800, 0x23c38,
832 0x23c80, 0x23d7c,
833 0x23e00, 0x23e04,
834 0x24000, 0x2402c,
835 0x24100, 0x2413c,
836 0x24190, 0x241c8,
837 0x24200, 0x24318,
838 0x24400, 0x24528,
839 0x24540, 0x24614,
840 0x25000, 0x25040,
841 0x2504c, 0x25060,
842 0x250c0, 0x250ec,
843 0x25200, 0x25268,
844 0x25270, 0x25284,
845 0x252fc, 0x25388,
846 0x25400, 0x25404,
847 0x25500, 0x25518,
848 0x2552c, 0x2553c,
849 0x25550, 0x25554,
850 0x25600, 0x25600,
851 0x25608, 0x25628,
852 0x25630, 0x2563c,
853 0x25700, 0x2571c,
854 0x25780, 0x2578c,
855 0x25800, 0x25c38,
856 0x25c80, 0x25d7c,
857 0x25e00, 0x25e04,
858 0x26000, 0x2602c,
859 0x26100, 0x2613c,
860 0x26190, 0x261c8,
861 0x26200, 0x26318,
862 0x26400, 0x26528,
863 0x26540, 0x26614,
864 0x27000, 0x27040,
865 0x2704c, 0x27060,
866 0x270c0, 0x270ec,
867 0x27200, 0x27268,
868 0x27270, 0x27284,
869 0x272fc, 0x27388,
870 0x27400, 0x27404,
871 0x27500, 0x27518,
872 0x2752c, 0x2753c,
873 0x27550, 0x27554,
874 0x27600, 0x27600,
875 0x27608, 0x27628,
876 0x27630, 0x2763c,
877 0x27700, 0x2771c,
878 0x27780, 0x2778c,
879 0x27800, 0x27c38,
880 0x27c80, 0x27d7c,
881 0x27e00, 0x27e04
882 };
883
884 static const unsigned int t5_reg_ranges[] = {
885 0x1008, 0x1148,
886 0x1180, 0x11b4,
887 0x11fc, 0x123c,
888 0x1280, 0x173c,
889 0x1800, 0x18fc,
890 0x3000, 0x3028,
891 0x3060, 0x30d8,
892 0x30e0, 0x30fc,
893 0x3140, 0x357c,
894 0x35a8, 0x35cc,
895 0x35ec, 0x35ec,
896 0x3600, 0x5624,
897 0x56cc, 0x575c,
898 0x580c, 0x5814,
899 0x5890, 0x58bc,
900 0x5940, 0x59dc,
901 0x59fc, 0x5a18,
902 0x5a60, 0x5a9c,
903 0x5b9c, 0x5bfc,
904 0x6000, 0x6040,
905 0x6058, 0x614c,
906 0x7700, 0x7798,
907 0x77c0, 0x78fc,
908 0x7b00, 0x7c54,
909 0x7d00, 0x7efc,
910 0x8dc0, 0x8de0,
911 0x8df8, 0x8e84,
912 0x8ea0, 0x8f84,
913 0x8fc0, 0x90f8,
914 0x9400, 0x9470,
915 0x9600, 0x96f4,
916 0x9800, 0x9808,
917 0x9820, 0x983c,
918 0x9850, 0x9864,
919 0x9c00, 0x9c6c,
920 0x9c80, 0x9cec,
921 0x9d00, 0x9d6c,
922 0x9d80, 0x9dec,
923 0x9e00, 0x9e6c,
924 0x9e80, 0x9eec,
925 0x9f00, 0x9f6c,
926 0x9f80, 0xa020,
927 0xd004, 0xd03c,
928 0xdfc0, 0xdfe0,
929 0xe000, 0x11088,
930 0x1109c, 0x11110,
931 0x11118, 0x1117c,
932 0x11190, 0x11204,
933 0x19040, 0x1906c,
934 0x19078, 0x19080,
935 0x1908c, 0x19124,
936 0x19150, 0x191b0,
937 0x191d0, 0x191e8,
938 0x19238, 0x19290,
939 0x193f8, 0x19474,
940 0x19490, 0x194cc,
941 0x194f0, 0x194f8,
942 0x19c00, 0x19c60,
943 0x19c94, 0x19e10,
944 0x19e50, 0x19f34,
945 0x19f40, 0x19f50,
946 0x19f90, 0x19fe4,
947 0x1a000, 0x1a06c,
948 0x1a0b0, 0x1a120,
949 0x1a128, 0x1a138,
950 0x1a190, 0x1a1c4,
951 0x1a1fc, 0x1a1fc,
952 0x1e008, 0x1e00c,
953 0x1e040, 0x1e04c,
954 0x1e284, 0x1e290,
955 0x1e2c0, 0x1e2c0,
956 0x1e2e0, 0x1e2e0,
957 0x1e300, 0x1e384,
958 0x1e3c0, 0x1e3c8,
959 0x1e408, 0x1e40c,
960 0x1e440, 0x1e44c,
961 0x1e684, 0x1e690,
962 0x1e6c0, 0x1e6c0,
963 0x1e6e0, 0x1e6e0,
964 0x1e700, 0x1e784,
965 0x1e7c0, 0x1e7c8,
966 0x1e808, 0x1e80c,
967 0x1e840, 0x1e84c,
968 0x1ea84, 0x1ea90,
969 0x1eac0, 0x1eac0,
970 0x1eae0, 0x1eae0,
971 0x1eb00, 0x1eb84,
972 0x1ebc0, 0x1ebc8,
973 0x1ec08, 0x1ec0c,
974 0x1ec40, 0x1ec4c,
975 0x1ee84, 0x1ee90,
976 0x1eec0, 0x1eec0,
977 0x1eee0, 0x1eee0,
978 0x1ef00, 0x1ef84,
979 0x1efc0, 0x1efc8,
980 0x1f008, 0x1f00c,
981 0x1f040, 0x1f04c,
982 0x1f284, 0x1f290,
983 0x1f2c0, 0x1f2c0,
984 0x1f2e0, 0x1f2e0,
985 0x1f300, 0x1f384,
986 0x1f3c0, 0x1f3c8,
987 0x1f408, 0x1f40c,
988 0x1f440, 0x1f44c,
989 0x1f684, 0x1f690,
990 0x1f6c0, 0x1f6c0,
991 0x1f6e0, 0x1f6e0,
992 0x1f700, 0x1f784,
993 0x1f7c0, 0x1f7c8,
994 0x1f808, 0x1f80c,
995 0x1f840, 0x1f84c,
996 0x1fa84, 0x1fa90,
997 0x1fac0, 0x1fac0,
998 0x1fae0, 0x1fae0,
999 0x1fb00, 0x1fb84,
1000 0x1fbc0, 0x1fbc8,
1001 0x1fc08, 0x1fc0c,
1002 0x1fc40, 0x1fc4c,
1003 0x1fe84, 0x1fe90,
1004 0x1fec0, 0x1fec0,
1005 0x1fee0, 0x1fee0,
1006 0x1ff00, 0x1ff84,
1007 0x1ffc0, 0x1ffc8,
1008 0x30000, 0x30030,
1009 0x30100, 0x30144,
1010 0x30190, 0x301d0,
1011 0x30200, 0x30318,
1012 0x30400, 0x3052c,
1013 0x30540, 0x3061c,
1014 0x30800, 0x30834,
1015 0x308c0, 0x30908,
1016 0x30910, 0x309ac,
1017 0x30a00, 0x30a04,
1018 0x30a0c, 0x30a2c,
1019 0x30a44, 0x30a50,
1020 0x30a74, 0x30c24,
1021 0x30d08, 0x30d14,
1022 0x30d1c, 0x30d20,
1023 0x30d3c, 0x30d50,
1024 0x31200, 0x3120c,
1025 0x31220, 0x31220,
1026 0x31240, 0x31240,
1027 0x31600, 0x31600,
1028 0x31608, 0x3160c,
1029 0x31a00, 0x31a1c,
1030 0x31e04, 0x31e20,
1031 0x31e38, 0x31e3c,
1032 0x31e80, 0x31e80,
1033 0x31e88, 0x31ea8,
1034 0x31eb0, 0x31eb4,
1035 0x31ec8, 0x31ed4,
1036 0x31fb8, 0x32004,
1037 0x32208, 0x3223c,
1038 0x32600, 0x32630,
1039 0x32a00, 0x32abc,
1040 0x32b00, 0x32b70,
1041 0x33000, 0x33048,
1042 0x33060, 0x3309c,
1043 0x330f0, 0x33148,
1044 0x33160, 0x3319c,
1045 0x331f0, 0x332e4,
1046 0x332f8, 0x333e4,
1047 0x333f8, 0x33448,
1048 0x33460, 0x3349c,
1049 0x334f0, 0x33548,
1050 0x33560, 0x3359c,
1051 0x335f0, 0x336e4,
1052 0x336f8, 0x337e4,
1053 0x337f8, 0x337fc,
1054 0x33814, 0x33814,
1055 0x3382c, 0x3382c,
1056 0x33880, 0x3388c,
1057 0x338e8, 0x338ec,
1058 0x33900, 0x33948,
1059 0x33960, 0x3399c,
1060 0x339f0, 0x33ae4,
1061 0x33af8, 0x33b10,
1062 0x33b28, 0x33b28,
1063 0x33b3c, 0x33b50,
1064 0x33bf0, 0x33c10,
1065 0x33c28, 0x33c28,
1066 0x33c3c, 0x33c50,
1067 0x33cf0, 0x33cfc,
1068 0x34000, 0x34030,
1069 0x34100, 0x34144,
1070 0x34190, 0x341d0,
1071 0x34200, 0x34318,
1072 0x34400, 0x3452c,
1073 0x34540, 0x3461c,
1074 0x34800, 0x34834,
1075 0x348c0, 0x34908,
1076 0x34910, 0x349ac,
1077 0x34a00, 0x34a04,
1078 0x34a0c, 0x34a2c,
1079 0x34a44, 0x34a50,
1080 0x34a74, 0x34c24,
1081 0x34d08, 0x34d14,
1082 0x34d1c, 0x34d20,
1083 0x34d3c, 0x34d50,
1084 0x35200, 0x3520c,
1085 0x35220, 0x35220,
1086 0x35240, 0x35240,
1087 0x35600, 0x35600,
1088 0x35608, 0x3560c,
1089 0x35a00, 0x35a1c,
1090 0x35e04, 0x35e20,
1091 0x35e38, 0x35e3c,
1092 0x35e80, 0x35e80,
1093 0x35e88, 0x35ea8,
1094 0x35eb0, 0x35eb4,
1095 0x35ec8, 0x35ed4,
1096 0x35fb8, 0x36004,
1097 0x36208, 0x3623c,
1098 0x36600, 0x36630,
1099 0x36a00, 0x36abc,
1100 0x36b00, 0x36b70,
1101 0x37000, 0x37048,
1102 0x37060, 0x3709c,
1103 0x370f0, 0x37148,
1104 0x37160, 0x3719c,
1105 0x371f0, 0x372e4,
1106 0x372f8, 0x373e4,
1107 0x373f8, 0x37448,
1108 0x37460, 0x3749c,
1109 0x374f0, 0x37548,
1110 0x37560, 0x3759c,
1111 0x375f0, 0x376e4,
1112 0x376f8, 0x377e4,
1113 0x377f8, 0x377fc,
1114 0x37814, 0x37814,
1115 0x3782c, 0x3782c,
1116 0x37880, 0x3788c,
1117 0x378e8, 0x378ec,
1118 0x37900, 0x37948,
1119 0x37960, 0x3799c,
1120 0x379f0, 0x37ae4,
1121 0x37af8, 0x37b10,
1122 0x37b28, 0x37b28,
1123 0x37b3c, 0x37b50,
1124 0x37bf0, 0x37c10,
1125 0x37c28, 0x37c28,
1126 0x37c3c, 0x37c50,
1127 0x37cf0, 0x37cfc,
1128 0x38000, 0x38030,
1129 0x38100, 0x38144,
1130 0x38190, 0x381d0,
1131 0x38200, 0x38318,
1132 0x38400, 0x3852c,
1133 0x38540, 0x3861c,
1134 0x38800, 0x38834,
1135 0x388c0, 0x38908,
1136 0x38910, 0x389ac,
1137 0x38a00, 0x38a04,
1138 0x38a0c, 0x38a2c,
1139 0x38a44, 0x38a50,
1140 0x38a74, 0x38c24,
1141 0x38d08, 0x38d14,
1142 0x38d1c, 0x38d20,
1143 0x38d3c, 0x38d50,
1144 0x39200, 0x3920c,
1145 0x39220, 0x39220,
1146 0x39240, 0x39240,
1147 0x39600, 0x39600,
1148 0x39608, 0x3960c,
1149 0x39a00, 0x39a1c,
1150 0x39e04, 0x39e20,
1151 0x39e38, 0x39e3c,
1152 0x39e80, 0x39e80,
1153 0x39e88, 0x39ea8,
1154 0x39eb0, 0x39eb4,
1155 0x39ec8, 0x39ed4,
1156 0x39fb8, 0x3a004,
1157 0x3a208, 0x3a23c,
1158 0x3a600, 0x3a630,
1159 0x3aa00, 0x3aabc,
1160 0x3ab00, 0x3ab70,
1161 0x3b000, 0x3b048,
1162 0x3b060, 0x3b09c,
1163 0x3b0f0, 0x3b148,
1164 0x3b160, 0x3b19c,
1165 0x3b1f0, 0x3b2e4,
1166 0x3b2f8, 0x3b3e4,
1167 0x3b3f8, 0x3b448,
1168 0x3b460, 0x3b49c,
1169 0x3b4f0, 0x3b548,
1170 0x3b560, 0x3b59c,
1171 0x3b5f0, 0x3b6e4,
1172 0x3b6f8, 0x3b7e4,
1173 0x3b7f8, 0x3b7fc,
1174 0x3b814, 0x3b814,
1175 0x3b82c, 0x3b82c,
1176 0x3b880, 0x3b88c,
1177 0x3b8e8, 0x3b8ec,
1178 0x3b900, 0x3b948,
1179 0x3b960, 0x3b99c,
1180 0x3b9f0, 0x3bae4,
1181 0x3baf8, 0x3bb10,
1182 0x3bb28, 0x3bb28,
1183 0x3bb3c, 0x3bb50,
1184 0x3bbf0, 0x3bc10,
1185 0x3bc28, 0x3bc28,
1186 0x3bc3c, 0x3bc50,
1187 0x3bcf0, 0x3bcfc,
1188 0x3c000, 0x3c030,
1189 0x3c100, 0x3c144,
1190 0x3c190, 0x3c1d0,
1191 0x3c200, 0x3c318,
1192 0x3c400, 0x3c52c,
1193 0x3c540, 0x3c61c,
1194 0x3c800, 0x3c834,
1195 0x3c8c0, 0x3c908,
1196 0x3c910, 0x3c9ac,
1197 0x3ca00, 0x3ca04,
1198 0x3ca0c, 0x3ca2c,
1199 0x3ca44, 0x3ca50,
1200 0x3ca74, 0x3cc24,
1201 0x3cd08, 0x3cd14,
1202 0x3cd1c, 0x3cd20,
1203 0x3cd3c, 0x3cd50,
1204 0x3d200, 0x3d20c,
1205 0x3d220, 0x3d220,
1206 0x3d240, 0x3d240,
1207 0x3d600, 0x3d600,
1208 0x3d608, 0x3d60c,
1209 0x3da00, 0x3da1c,
1210 0x3de04, 0x3de20,
1211 0x3de38, 0x3de3c,
1212 0x3de80, 0x3de80,
1213 0x3de88, 0x3dea8,
1214 0x3deb0, 0x3deb4,
1215 0x3dec8, 0x3ded4,
1216 0x3dfb8, 0x3e004,
1217 0x3e208, 0x3e23c,
1218 0x3e600, 0x3e630,
1219 0x3ea00, 0x3eabc,
1220 0x3eb00, 0x3eb70,
1221 0x3f000, 0x3f048,
1222 0x3f060, 0x3f09c,
1223 0x3f0f0, 0x3f148,
1224 0x3f160, 0x3f19c,
1225 0x3f1f0, 0x3f2e4,
1226 0x3f2f8, 0x3f3e4,
1227 0x3f3f8, 0x3f448,
1228 0x3f460, 0x3f49c,
1229 0x3f4f0, 0x3f548,
1230 0x3f560, 0x3f59c,
1231 0x3f5f0, 0x3f6e4,
1232 0x3f6f8, 0x3f7e4,
1233 0x3f7f8, 0x3f7fc,
1234 0x3f814, 0x3f814,
1235 0x3f82c, 0x3f82c,
1236 0x3f880, 0x3f88c,
1237 0x3f8e8, 0x3f8ec,
1238 0x3f900, 0x3f948,
1239 0x3f960, 0x3f99c,
1240 0x3f9f0, 0x3fae4,
1241 0x3faf8, 0x3fb10,
1242 0x3fb28, 0x3fb28,
1243 0x3fb3c, 0x3fb50,
1244 0x3fbf0, 0x3fc10,
1245 0x3fc28, 0x3fc28,
1246 0x3fc3c, 0x3fc50,
1247 0x3fcf0, 0x3fcfc,
1248 0x40000, 0x4000c,
1249 0x40040, 0x40068,
1250 0x40080, 0x40144,
1251 0x40180, 0x4018c,
1252 0x40200, 0x40298,
1253 0x402ac, 0x4033c,
1254 0x403f8, 0x403fc,
1255 0x41304, 0x413c4,
1256 0x41400, 0x4141c,
1257 0x41480, 0x414d0,
1258 0x44000, 0x44078,
1259 0x440c0, 0x44278,
1260 0x442c0, 0x44478,
1261 0x444c0, 0x44678,
1262 0x446c0, 0x44878,
1263 0x448c0, 0x449fc,
1264 0x45000, 0x45068,
1265 0x45080, 0x45084,
1266 0x450a0, 0x450b0,
1267 0x45200, 0x45268,
1268 0x45280, 0x45284,
1269 0x452a0, 0x452b0,
1270 0x460c0, 0x460e4,
1271 0x47000, 0x4708c,
1272 0x47200, 0x47250,
1273 0x47400, 0x47420,
1274 0x47600, 0x47618,
1275 0x47800, 0x47814,
1276 0x48000, 0x4800c,
1277 0x48040, 0x48068,
1278 0x48080, 0x48144,
1279 0x48180, 0x4818c,
1280 0x48200, 0x48298,
1281 0x482ac, 0x4833c,
1282 0x483f8, 0x483fc,
1283 0x49304, 0x493c4,
1284 0x49400, 0x4941c,
1285 0x49480, 0x494d0,
1286 0x4c000, 0x4c078,
1287 0x4c0c0, 0x4c278,
1288 0x4c2c0, 0x4c478,
1289 0x4c4c0, 0x4c678,
1290 0x4c6c0, 0x4c878,
1291 0x4c8c0, 0x4c9fc,
1292 0x4d000, 0x4d068,
1293 0x4d080, 0x4d084,
1294 0x4d0a0, 0x4d0b0,
1295 0x4d200, 0x4d268,
1296 0x4d280, 0x4d284,
1297 0x4d2a0, 0x4d2b0,
1298 0x4e0c0, 0x4e0e4,
1299 0x4f000, 0x4f08c,
1300 0x4f200, 0x4f250,
1301 0x4f400, 0x4f420,
1302 0x4f600, 0x4f618,
1303 0x4f800, 0x4f814,
1304 0x50000, 0x500cc,
1305 0x50400, 0x50400,
1306 0x50800, 0x508cc,
1307 0x50c00, 0x50c00,
1308 0x51000, 0x5101c,
1309 0x51300, 0x51308,
1310 };
1311
1312 u32 *buf_end = (u32 *)((char *)buf + buf_size);
1313 const unsigned int *reg_ranges;
1314 int reg_ranges_size, range;
1315 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
1316
1317 /* Select the right set of register ranges to dump depending on the
1318 * adapter chip type.
1319 */
1320 switch (chip_version) {
1321 case CHELSIO_T4:
1322 reg_ranges = t4_reg_ranges;
1323 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
1324 break;
1325
1326 case CHELSIO_T5:
1327 reg_ranges = t5_reg_ranges;
1328 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
1329 break;
1330
1331 default:
1332 dev_err(adap->pdev_dev,
1333 "Unsupported chip version %d\n", chip_version);
1334 return;
1335 }
1336
1337 /* Clear the register buffer and insert the appropriate register
1338 * values selected by the above register ranges.
1339 */
1340 memset(buf, 0, buf_size);
1341 for (range = 0; range < reg_ranges_size; range += 2) {
1342 unsigned int reg = reg_ranges[range];
1343 unsigned int last_reg = reg_ranges[range + 1];
1344 u32 *bufp = (u32 *)((char *)buf + reg);
1345
1346 /* Iterate across the register range filling in the register
1347 * buffer but don't write past the end of the register buffer.
1348 */
1349 while (reg <= last_reg && bufp < buf_end) {
1350 *bufp++ = t4_read_reg(adap, reg);
1351 reg += sizeof(u32);
1352 }
1353 }
1354}
1355
628#define EEPROM_STAT_ADDR 0x7bfc 1356#define EEPROM_STAT_ADDR 0x7bfc
629#define VPD_BASE 0x400 1357#define VPD_BASE 0x400
630#define VPD_BASE_OLD 0 1358#define VPD_BASE_OLD 0
@@ -4459,6 +5187,59 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
4459} 5187}
4460 5188
4461/** 5189/**
5190 * t4_init_devlog_params - initialize adapter->params.devlog
5191 * @adap: the adapter
5192 *
5193 * Initialize various fields of the adapter's Firmware Device Log
5194 * Parameters structure.
5195 */
5196int t4_init_devlog_params(struct adapter *adap)
5197{
5198 struct devlog_params *dparams = &adap->params.devlog;
5199 u32 pf_dparams;
5200 unsigned int devlog_meminfo;
5201 struct fw_devlog_cmd devlog_cmd;
5202 int ret;
5203
5204 /* If we're dealing with newer firmware, the Device Log Paramerters
5205 * are stored in a designated register which allows us to access the
5206 * Device Log even if we can't talk to the firmware.
5207 */
5208 pf_dparams =
5209 t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
5210 if (pf_dparams) {
5211 unsigned int nentries, nentries128;
5212
5213 dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
5214 dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
5215
5216 nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
5217 nentries = (nentries128 + 1) * 128;
5218 dparams->size = nentries * sizeof(struct fw_devlog_e);
5219
5220 return 0;
5221 }
5222
5223 /* Otherwise, ask the firmware for it's Device Log Parameters.
5224 */
5225 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
5226 devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
5227 FW_CMD_REQUEST_F | FW_CMD_READ_F);
5228 devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
5229 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
5230 &devlog_cmd);
5231 if (ret)
5232 return ret;
5233
5234 devlog_meminfo = ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
5235 dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
5236 dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
5237 dparams->size = ntohl(devlog_cmd.memsize_devlog);
5238
5239 return 0;
5240}
5241
5242/**
4462 * t4_init_sge_params - initialize adap->params.sge 5243 * t4_init_sge_params - initialize adap->params.sge
4463 * @adapter: the adapter 5244 * @adapter: the adapter
4464 * 5245 *
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 231a725f6d5d..326674b19983 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -63,6 +63,8 @@
63#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) 63#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
64#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) 64#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
65 65
66#define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
67
66#define SGE_PF_KDOORBELL_A 0x0 68#define SGE_PF_KDOORBELL_A 0x0
67 69
68#define QID_S 15 70#define QID_S 15
@@ -707,6 +709,7 @@
707#define PFNUM_V(x) ((x) << PFNUM_S) 709#define PFNUM_V(x) ((x) << PFNUM_S)
708 710
709#define PCIE_FW_A 0x30b8 711#define PCIE_FW_A 0x30b8
712#define PCIE_FW_PF_A 0x30bc
710 713
711#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908 714#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908
712 715
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index d136ca6a0c8a..03fbfd1fb3df 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -101,7 +101,7 @@ enum fw_wr_opcodes {
101 FW_RI_BIND_MW_WR = 0x18, 101 FW_RI_BIND_MW_WR = 0x18,
102 FW_RI_FR_NSMR_WR = 0x19, 102 FW_RI_FR_NSMR_WR = 0x19,
103 FW_RI_INV_LSTAG_WR = 0x1a, 103 FW_RI_INV_LSTAG_WR = 0x1a,
104 FW_LASTC2E_WR = 0x40 104 FW_LASTC2E_WR = 0x70
105}; 105};
106 106
107struct fw_wr_hdr { 107struct fw_wr_hdr {
@@ -993,6 +993,7 @@ enum fw_memtype_cf {
993 FW_MEMTYPE_CF_EXTMEM = 0x2, 993 FW_MEMTYPE_CF_EXTMEM = 0x2,
994 FW_MEMTYPE_CF_FLASH = 0x4, 994 FW_MEMTYPE_CF_FLASH = 0x4,
995 FW_MEMTYPE_CF_INTERNAL = 0x5, 995 FW_MEMTYPE_CF_INTERNAL = 0x5,
996 FW_MEMTYPE_CF_EXTMEM1 = 0x6,
996}; 997};
997 998
998struct fw_caps_config_cmd { 999struct fw_caps_config_cmd {
@@ -1035,6 +1036,7 @@ enum fw_params_mnem {
1035 FW_PARAMS_MNEM_PFVF = 2, /* function params */ 1036 FW_PARAMS_MNEM_PFVF = 2, /* function params */
1036 FW_PARAMS_MNEM_REG = 3, /* limited register access */ 1037 FW_PARAMS_MNEM_REG = 3, /* limited register access */
1037 FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */ 1038 FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */
1039 FW_PARAMS_MNEM_CHNET = 5, /* chnet params */
1038 FW_PARAMS_MNEM_LAST 1040 FW_PARAMS_MNEM_LAST
1039}; 1041};
1040 1042
@@ -3102,7 +3104,8 @@ enum fw_devlog_facility {
3102 FW_DEVLOG_FACILITY_FCOE = 0x2E, 3104 FW_DEVLOG_FACILITY_FCOE = 0x2E,
3103 FW_DEVLOG_FACILITY_FOISCSI = 0x30, 3105 FW_DEVLOG_FACILITY_FOISCSI = 0x30,
3104 FW_DEVLOG_FACILITY_FOFCOE = 0x32, 3106 FW_DEVLOG_FACILITY_FOFCOE = 0x32,
3105 FW_DEVLOG_FACILITY_MAX = 0x32, 3107 FW_DEVLOG_FACILITY_CHNET = 0x34,
3108 FW_DEVLOG_FACILITY_MAX = 0x34,
3106}; 3109};
3107 3110
3108/* log message format */ 3111/* log message format */
@@ -3139,4 +3142,36 @@ struct fw_devlog_cmd {
3139 (((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \ 3142 (((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \
3140 FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M) 3143 FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M)
3141 3144
3145/* P C I E F W P F 7 R E G I S T E R */
3146
3147/* PF7 stores the Firmware Device Log parameters which allows Host Drivers to
3148 * access the "devlog" which needing to contact firmware. The encoding is
3149 * mostly the same as that returned by the DEVLOG command except for the size
3150 * which is encoded as the number of entries in multiples-1 of 128 here rather
3151 * than the memory size as is done in the DEVLOG command. Thus, 0 means 128
3152 * and 15 means 2048. This of course in turn constrains the allowed values
3153 * for the devlog size ...
3154 */
3155#define PCIE_FW_PF_DEVLOG 7
3156
3157#define PCIE_FW_PF_DEVLOG_NENTRIES128_S 28
3158#define PCIE_FW_PF_DEVLOG_NENTRIES128_M 0xf
3159#define PCIE_FW_PF_DEVLOG_NENTRIES128_V(x) \
3160 ((x) << PCIE_FW_PF_DEVLOG_NENTRIES128_S)
3161#define PCIE_FW_PF_DEVLOG_NENTRIES128_G(x) \
3162 (((x) >> PCIE_FW_PF_DEVLOG_NENTRIES128_S) & \
3163 PCIE_FW_PF_DEVLOG_NENTRIES128_M)
3164
3165#define PCIE_FW_PF_DEVLOG_ADDR16_S 4
3166#define PCIE_FW_PF_DEVLOG_ADDR16_M 0xffffff
3167#define PCIE_FW_PF_DEVLOG_ADDR16_V(x) ((x) << PCIE_FW_PF_DEVLOG_ADDR16_S)
3168#define PCIE_FW_PF_DEVLOG_ADDR16_G(x) \
3169 (((x) >> PCIE_FW_PF_DEVLOG_ADDR16_S) & PCIE_FW_PF_DEVLOG_ADDR16_M)
3170
3171#define PCIE_FW_PF_DEVLOG_MEMTYPE_S 0
3172#define PCIE_FW_PF_DEVLOG_MEMTYPE_M 0xf
3173#define PCIE_FW_PF_DEVLOG_MEMTYPE_V(x) ((x) << PCIE_FW_PF_DEVLOG_MEMTYPE_S)
3174#define PCIE_FW_PF_DEVLOG_MEMTYPE_G(x) \
3175 (((x) >> PCIE_FW_PF_DEVLOG_MEMTYPE_S) & PCIE_FW_PF_DEVLOG_MEMTYPE_M)
3176
3142#endif /* _T4FW_INTERFACE_H_ */ 3177#endif /* _T4FW_INTERFACE_H_ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index e2bd3f747858..b9d1cbac0eee 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,13 +36,13 @@
36#define __T4FW_VERSION_H__ 36#define __T4FW_VERSION_H__
37 37
38#define T4FW_VERSION_MAJOR 0x01 38#define T4FW_VERSION_MAJOR 0x01
39#define T4FW_VERSION_MINOR 0x0C 39#define T4FW_VERSION_MINOR 0x0D
40#define T4FW_VERSION_MICRO 0x19 40#define T4FW_VERSION_MICRO 0x20
41#define T4FW_VERSION_BUILD 0x00 41#define T4FW_VERSION_BUILD 0x00
42 42
43#define T5FW_VERSION_MAJOR 0x01 43#define T5FW_VERSION_MAJOR 0x01
44#define T5FW_VERSION_MINOR 0x0C 44#define T5FW_VERSION_MINOR 0x0D
45#define T5FW_VERSION_MICRO 0x19 45#define T5FW_VERSION_MICRO 0x20
46#define T5FW_VERSION_BUILD 0x00 46#define T5FW_VERSION_BUILD 0x00
47 47
48#endif 48#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 5ba14b32c370..7715982230e5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1004,7 +1004,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
1004 ? (tq->pidx - 1) 1004 ? (tq->pidx - 1)
1005 : (tq->size - 1)); 1005 : (tq->size - 1));
1006 __be64 *src = (__be64 *)&tq->desc[index]; 1006 __be64 *src = (__be64 *)&tq->desc[index];
1007 __be64 __iomem *dst = (__be64 *)(tq->bar2_addr + 1007 __be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr +
1008 SGE_UDB_WCDOORBELL); 1008 SGE_UDB_WCDOORBELL);
1009 unsigned int count = EQ_UNIT / sizeof(__be64); 1009 unsigned int count = EQ_UNIT / sizeof(__be64);
1010 1010
@@ -1018,7 +1018,11 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
1018 * DMA. 1018 * DMA.
1019 */ 1019 */
1020 while (count) { 1020 while (count) {
1021 writeq(*src, dst); 1021 /* the (__force u64) is because the compiler
1022 * doesn't understand the endian swizzling
1023 * going on
1024 */
1025 writeq((__force u64)*src, dst);
1022 src++; 1026 src++;
1023 dst++; 1027 dst++;
1024 count--; 1028 count--;
@@ -1252,8 +1256,8 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1252 BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); 1256 BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
1253 wr = (void *)&txq->q.desc[txq->q.pidx]; 1257 wr = (void *)&txq->q.desc[txq->q.pidx];
1254 wr->equiq_to_len16 = cpu_to_be32(wr_mid); 1258 wr->equiq_to_len16 = cpu_to_be32(wr_mid);
1255 wr->r3[0] = cpu_to_be64(0); 1259 wr->r3[0] = cpu_to_be32(0);
1256 wr->r3[1] = cpu_to_be64(0); 1260 wr->r3[1] = cpu_to_be32(0);
1257 skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); 1261 skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
1258 end = (u64 *)wr + flits; 1262 end = (u64 *)wr + flits;
1259 1263
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index c21e2e954ad8..966ee900ed00 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -210,10 +210,10 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
210 210
211 if (rpl) { 211 if (rpl) {
212 /* request bit in high-order BE word */ 212 /* request bit in high-order BE word */
213 WARN_ON((be32_to_cpu(*(const u32 *)cmd) 213 WARN_ON((be32_to_cpu(*(const __be32 *)cmd)
214 & FW_CMD_REQUEST_F) == 0); 214 & FW_CMD_REQUEST_F) == 0);
215 get_mbox_rpl(adapter, rpl, size, mbox_data); 215 get_mbox_rpl(adapter, rpl, size, mbox_data);
216 WARN_ON((be32_to_cpu(*(u32 *)rpl) 216 WARN_ON((be32_to_cpu(*(__be32 *)rpl)
217 & FW_CMD_REQUEST_F) != 0); 217 & FW_CMD_REQUEST_F) != 0);
218 } 218 }
219 t4_write_reg(adapter, mbox_ctl, 219 t4_write_reg(adapter, mbox_ctl,
@@ -484,7 +484,7 @@ int t4_bar2_sge_qregs(struct adapter *adapter,
484 * o The BAR2 Queue ID. 484 * o The BAR2 Queue ID.
485 * o The BAR2 Queue ID Offset into the BAR2 page. 485 * o The BAR2 Queue ID Offset into the BAR2 page.
486 */ 486 */
487 bar2_page_offset = ((qid >> qpp_shift) << page_shift); 487 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
488 bar2_qid = qid & qpp_mask; 488 bar2_qid = qid & qpp_mask;
489 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; 489 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
490 490
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 78e1ce09b1ab..f6a3a7abd468 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1954,6 +1954,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
1954 struct fec_enet_private *fep = netdev_priv(ndev); 1954 struct fec_enet_private *fep = netdev_priv(ndev);
1955 struct device_node *node; 1955 struct device_node *node;
1956 int err = -ENXIO, i; 1956 int err = -ENXIO, i;
1957 u32 mii_speed, holdtime;
1957 1958
1958 /* 1959 /*
1959 * The i.MX28 dual fec interfaces are not equal. 1960 * The i.MX28 dual fec interfaces are not equal.
@@ -1991,10 +1992,33 @@ static int fec_enet_mii_init(struct platform_device *pdev)
1991 * Reference Manual has an error on this, and gets fixed on i.MX6Q 1992 * Reference Manual has an error on this, and gets fixed on i.MX6Q
1992 * document. 1993 * document.
1993 */ 1994 */
1994 fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); 1995 mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
1995 if (fep->quirks & FEC_QUIRK_ENET_MAC) 1996 if (fep->quirks & FEC_QUIRK_ENET_MAC)
1996 fep->phy_speed--; 1997 mii_speed--;
1997 fep->phy_speed <<= 1; 1998 if (mii_speed > 63) {
1999 dev_err(&pdev->dev,
2000 "fec clock (%lu) to fast to get right mii speed\n",
2001 clk_get_rate(fep->clk_ipg));
2002 err = -EINVAL;
2003 goto err_out;
2004 }
2005
2006 /*
2007 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
2008 * MII_SPEED) register that defines the MDIO output hold time. Earlier
2009 * versions are RAZ there, so just ignore the difference and write the
2010 * register always.
2011 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
2012 * HOLDTIME + 1 is the number of clk cycles the fec is holding the
2013 * output.
2014 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
2015 * Given that ceil(clkrate / 5000000) <= 64, the calculation for
2016 * holdtime cannot result in a value greater than 3.
2017 */
2018 holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
2019
2020 fep->phy_speed = mii_speed << 1 | holdtime << 8;
2021
1998 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 2022 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1999 2023
2000 fep->mii_bus = mdiobus_alloc(); 2024 fep->mii_bus = mdiobus_alloc();
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index bfdccbd58be0..4dd40e057f40 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3893,6 +3893,9 @@ static int ucc_geth_probe(struct platform_device* ofdev)
3893 ugeth->phy_interface = phy_interface; 3893 ugeth->phy_interface = phy_interface;
3894 ugeth->max_speed = max_speed; 3894 ugeth->max_speed = max_speed;
3895 3895
3896 /* Carrier starts down, phylib will bring it up */
3897 netif_carrier_off(dev);
3898
3896 err = register_netdev(dev); 3899 err = register_netdev(dev);
3897 if (err) { 3900 if (err) {
3898 if (netif_msg_probe(ugeth)) 3901 if (netif_msg_probe(ugeth))
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 1c8bd7c152c2..33c35d3b7420 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -628,6 +628,7 @@ extern const char i40e_driver_name[];
628extern const char i40e_driver_version_str[]; 628extern const char i40e_driver_version_str[];
629void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags); 629void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
630void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags); 630void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
631struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
631void i40e_update_stats(struct i40e_vsi *vsi); 632void i40e_update_stats(struct i40e_vsi *vsi);
632void i40e_update_eth_stats(struct i40e_vsi *vsi); 633void i40e_update_eth_stats(struct i40e_vsi *vsi);
633struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi); 634struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 400fb28db576..bd5079d5c1b6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -178,6 +178,10 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
178 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) 178 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
179 return; 179 return;
180 180
181 /* MFP mode but not an iSCSI PF so return */
182 if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi))
183 return;
184
181 dcbxcfg = &hw->local_dcbx_config; 185 dcbxcfg = &hw->local_dcbx_config;
182 186
183 /* Set up all the App TLVs if DCBx is negotiated */ 187 /* Set up all the App TLVs if DCBx is negotiated */
@@ -282,6 +286,10 @@ void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
282 struct i40e_dcb_app_priority_table app; 286 struct i40e_dcb_app_priority_table app;
283 int i; 287 int i;
284 288
289 /* MFP mode but not an iSCSI PF so return */
290 if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi))
291 return;
292
285 for (i = 0; i < old_cfg->numapps; i++) { 293 for (i = 0; i < old_cfg->numapps; i++) {
286 app = old_cfg->app[i]; 294 app = old_cfg->app[i];
287 /* The APP is not available anymore delete it */ 295 /* The APP is not available anymore delete it */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 1ca48458e668..1803afeef23e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -1306,8 +1306,7 @@ static void i40e_fcoe_tx_map(struct i40e_ring *tx_ring,
1306 /* MACLEN is ether header length in words not bytes */ 1306 /* MACLEN is ether header length in words not bytes */
1307 td_offset |= (maclen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; 1307 td_offset |= (maclen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
1308 1308
1309 return i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, 1309 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, td_cmd, td_offset);
1310 td_cmd, td_offset);
1311} 1310}
1312 1311
1313/** 1312/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 845bceeda645..63de3f4b7a94 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -38,8 +38,8 @@ static const char i40e_driver_string[] =
38#define DRV_KERN "-k" 38#define DRV_KERN "-k"
39 39
40#define DRV_VERSION_MAJOR 1 40#define DRV_VERSION_MAJOR 1
41#define DRV_VERSION_MINOR 2 41#define DRV_VERSION_MINOR 3
42#define DRV_VERSION_BUILD 43 42#define DRV_VERSION_BUILD 1
43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44 __stringify(DRV_VERSION_MINOR) "." \ 44 __stringify(DRV_VERSION_MINOR) "." \
45 __stringify(DRV_VERSION_BUILD) DRV_KERN 45 __stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -250,6 +250,22 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
250} 250}
251 251
252/** 252/**
253 * i40e_find_vsi_from_id - searches for the vsi with the given id
254 * @pf - the pf structure to search for the vsi
255 * @id - id of the vsi it is searching for
256 **/
257struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
258{
259 int i;
260
261 for (i = 0; i < pf->num_alloc_vsi; i++)
262 if (pf->vsi[i] && (pf->vsi[i]->id == id))
263 return pf->vsi[i];
264
265 return NULL;
266}
267
268/**
253 * i40e_service_event_schedule - Schedule the service task to wake up 269 * i40e_service_event_schedule - Schedule the service task to wake up
254 * @pf: board private structure 270 * @pf: board private structure
255 * 271 *
@@ -1969,7 +1985,7 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
1969 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; 1985 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1970 1986
1971 ctxt.seid = vsi->seid; 1987 ctxt.seid = vsi->seid;
1972 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 1988 ctxt.info = vsi->info;
1973 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 1989 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1974 if (ret) { 1990 if (ret) {
1975 dev_info(&vsi->back->pdev->dev, 1991 dev_info(&vsi->back->pdev->dev,
@@ -1998,7 +2014,7 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
1998 I40E_AQ_VSI_PVLAN_EMOD_NOTHING; 2014 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1999 2015
2000 ctxt.seid = vsi->seid; 2016 ctxt.seid = vsi->seid;
2001 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 2017 ctxt.info = vsi->info;
2002 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2018 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2003 if (ret) { 2019 if (ret) {
2004 dev_info(&vsi->back->pdev->dev, 2020 dev_info(&vsi->back->pdev->dev,
@@ -2282,7 +2298,7 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2282 I40E_AQ_VSI_PVLAN_EMOD_STR; 2298 I40E_AQ_VSI_PVLAN_EMOD_STR;
2283 2299
2284 ctxt.seid = vsi->seid; 2300 ctxt.seid = vsi->seid;
2285 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 2301 ctxt.info = vsi->info;
2286 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); 2302 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2287 if (aq_ret) { 2303 if (aq_ret) {
2288 dev_info(&vsi->back->pdev->dev, 2304 dev_info(&vsi->back->pdev->dev,
@@ -3197,6 +3213,9 @@ static irqreturn_t i40e_intr(int irq, void *data)
3197 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { 3213 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3198 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK; 3214 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3199 dev_info(&pf->pdev->dev, "HMC error interrupt\n"); 3215 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3216 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3217 rd32(hw, I40E_PFHMC_ERRORINFO),
3218 rd32(hw, I40E_PFHMC_ERRORDATA));
3200 } 3219 }
3201 3220
3202 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { 3221 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
@@ -4392,7 +4411,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4392 ctxt.pf_num = vsi->back->hw.pf_id; 4411 ctxt.pf_num = vsi->back->hw.pf_id;
4393 ctxt.vf_num = 0; 4412 ctxt.vf_num = 0;
4394 ctxt.uplink_seid = vsi->uplink_seid; 4413 ctxt.uplink_seid = vsi->uplink_seid;
4395 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); 4414 ctxt.info = vsi->info;
4396 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); 4415 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
4397 4416
4398 /* Update the VSI after updating the VSI queue-mapping information */ 4417 /* Update the VSI after updating the VSI queue-mapping information */
@@ -5220,9 +5239,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
5220 goto exit; 5239 goto exit;
5221 } 5240 }
5222 5241
5223 memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
5224 /* Store the old configuration */ 5242 /* Store the old configuration */
5225 memcpy(&tmp_dcbx_cfg, &hw->local_dcbx_config, sizeof(tmp_dcbx_cfg)); 5243 tmp_dcbx_cfg = hw->local_dcbx_config;
5226 5244
5227 /* Reset the old DCBx configuration data */ 5245 /* Reset the old DCBx configuration data */
5228 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config)); 5246 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
@@ -5782,11 +5800,9 @@ static void i40e_handle_link_event(struct i40e_pf *pf,
5782 struct i40e_hw *hw = &pf->hw; 5800 struct i40e_hw *hw = &pf->hw;
5783 struct i40e_aqc_get_link_status *status = 5801 struct i40e_aqc_get_link_status *status =
5784 (struct i40e_aqc_get_link_status *)&e->desc.params.raw; 5802 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
5785 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
5786 5803
5787 /* save off old link status information */ 5804 /* save off old link status information */
5788 memcpy(&pf->hw.phy.link_info_old, hw_link_info, 5805 hw->phy.link_info_old = hw->phy.link_info;
5789 sizeof(pf->hw.phy.link_info_old));
5790 5806
5791 /* Do a new status request to re-enable LSE reporting 5807 /* Do a new status request to re-enable LSE reporting
5792 * and load new status information into the hw struct 5808 * and load new status information into the hw struct
@@ -6608,7 +6624,6 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
6608{ 6624{
6609 struct i40e_hw *hw = &pf->hw; 6625 struct i40e_hw *hw = &pf->hw;
6610 i40e_status ret; 6626 i40e_status ret;
6611 u8 filter_index;
6612 __be16 port; 6627 __be16 port;
6613 int i; 6628 int i;
6614 6629
@@ -6621,22 +6636,20 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
6621 if (pf->pending_vxlan_bitmap & (1 << i)) { 6636 if (pf->pending_vxlan_bitmap & (1 << i)) {
6622 pf->pending_vxlan_bitmap &= ~(1 << i); 6637 pf->pending_vxlan_bitmap &= ~(1 << i);
6623 port = pf->vxlan_ports[i]; 6638 port = pf->vxlan_ports[i];
6624 ret = port ? 6639 if (port)
6625 i40e_aq_add_udp_tunnel(hw, ntohs(port), 6640 ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
6626 I40E_AQC_TUNNEL_TYPE_VXLAN, 6641 I40E_AQC_TUNNEL_TYPE_VXLAN,
6627 &filter_index, NULL) 6642 NULL, NULL);
6628 : i40e_aq_del_udp_tunnel(hw, i, NULL); 6643 else
6644 ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
6629 6645
6630 if (ret) { 6646 if (ret) {
6631 dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n", 6647 dev_info(&pf->pdev->dev,
6632 port ? "adding" : "deleting", 6648 "%s vxlan port %d, index %d failed, err %d, aq_err %d\n",
6633 ntohs(port), port ? i : i); 6649 port ? "add" : "delete",
6634 6650 ntohs(port), i, ret,
6651 pf->hw.aq.asq_last_status);
6635 pf->vxlan_ports[i] = 0; 6652 pf->vxlan_ports[i] = 0;
6636 } else {
6637 dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n",
6638 port ? "Added" : "Deleted",
6639 ntohs(port), port ? i : filter_index);
6640 } 6653 }
6641 } 6654 }
6642 } 6655 }
@@ -7829,7 +7842,8 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
7829 7842
7830 /* Check if port already exists */ 7843 /* Check if port already exists */
7831 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 7844 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
7832 netdev_info(netdev, "Port %d already offloaded\n", ntohs(port)); 7845 netdev_info(netdev, "vxlan port %d already offloaded\n",
7846 ntohs(port));
7833 return; 7847 return;
7834 } 7848 }
7835 7849
@@ -7837,7 +7851,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
7837 next_idx = i40e_get_vxlan_port_idx(pf, 0); 7851 next_idx = i40e_get_vxlan_port_idx(pf, 0);
7838 7852
7839 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { 7853 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
7840 netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n", 7854 netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
7841 ntohs(port)); 7855 ntohs(port));
7842 return; 7856 return;
7843 } 7857 }
@@ -7845,8 +7859,9 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
7845 /* New port: add it and mark its index in the bitmap */ 7859 /* New port: add it and mark its index in the bitmap */
7846 pf->vxlan_ports[next_idx] = port; 7860 pf->vxlan_ports[next_idx] = port;
7847 pf->pending_vxlan_bitmap |= (1 << next_idx); 7861 pf->pending_vxlan_bitmap |= (1 << next_idx);
7848
7849 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; 7862 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
7863
7864 dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port));
7850} 7865}
7851 7866
7852/** 7867/**
@@ -7874,12 +7889,13 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
7874 * and make it pending 7889 * and make it pending
7875 */ 7890 */
7876 pf->vxlan_ports[idx] = 0; 7891 pf->vxlan_ports[idx] = 0;
7877
7878 pf->pending_vxlan_bitmap |= (1 << idx); 7892 pf->pending_vxlan_bitmap |= (1 << idx);
7879
7880 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; 7893 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
7894
7895 dev_info(&pf->pdev->dev, "deleting vxlan port %d\n",
7896 ntohs(port));
7881 } else { 7897 } else {
7882 netdev_warn(netdev, "Port %d was not found, not deleting\n", 7898 netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
7883 ntohs(port)); 7899 ntohs(port));
7884 } 7900 }
7885} 7901}
@@ -8269,7 +8285,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
8269 ret, pf->hw.aq.asq_last_status); 8285 ret, pf->hw.aq.asq_last_status);
8270 return -ENOENT; 8286 return -ENOENT;
8271 } 8287 }
8272 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); 8288 vsi->info = ctxt.info;
8273 vsi->info.valid_sections = 0; 8289 vsi->info.valid_sections = 0;
8274 8290
8275 vsi->seid = ctxt.seid; 8291 vsi->seid = ctxt.seid;
@@ -8403,7 +8419,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
8403 ret = -ENOENT; 8419 ret = -ENOENT;
8404 goto err; 8420 goto err;
8405 } 8421 }
8406 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); 8422 vsi->info = ctxt.info;
8407 vsi->info.valid_sections = 0; 8423 vsi->info.valid_sections = 0;
8408 vsi->seid = ctxt.seid; 8424 vsi->seid = ctxt.seid;
8409 vsi->id = ctxt.vsi_number; 8425 vsi->id = ctxt.vsi_number;
@@ -10210,6 +10226,8 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
10210 set_bit(__I40E_DOWN, &pf->state); 10226 set_bit(__I40E_DOWN, &pf->state);
10211 del_timer_sync(&pf->service_timer); 10227 del_timer_sync(&pf->service_timer);
10212 cancel_work_sync(&pf->service_task); 10228 cancel_work_sync(&pf->service_task);
10229 i40e_fdir_teardown(pf);
10230
10213 rtnl_lock(); 10231 rtnl_lock();
10214 i40e_prep_for_reset(pf); 10232 i40e_prep_for_reset(pf);
10215 rtnl_unlock(); 10233 rtnl_unlock();
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 9b11f2e7e361..d8989f9d1798 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1565,8 +1565,11 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
1565 if (likely(!skb)) { 1565 if (likely(!skb)) {
1566 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 1566 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1567 rx_ring->rx_hdr_len); 1567 rx_ring->rx_hdr_len);
1568 if (!skb) 1568 if (!skb) {
1569 rx_ring->rx_stats.alloc_buff_failed++; 1569 rx_ring->rx_stats.alloc_buff_failed++;
1570 break;
1571 }
1572
1570 /* initialize queue mapping */ 1573 /* initialize queue mapping */
1571 skb_record_rx_queue(skb, rx_ring->queue_index); 1574 skb_record_rx_queue(skb, rx_ring->queue_index);
1572 /* we are reusing so sync this buffer for CPU use */ 1575 /* we are reusing so sync this buffer for CPU use */
@@ -2054,6 +2057,19 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2054 __be16 protocol = skb->protocol; 2057 __be16 protocol = skb->protocol;
2055 u32 tx_flags = 0; 2058 u32 tx_flags = 0;
2056 2059
2060 if (protocol == htons(ETH_P_8021Q) &&
2061 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2062 /* When HW VLAN acceleration is turned off by the user the
2063 * stack sets the protocol to 8021q so that the driver
2064 * can take any steps required to support the SW only
2065 * VLAN handling. In our case the driver doesn't need
2066 * to take any further steps so just set the protocol
2067 * to the encapsulated ethertype.
2068 */
2069 skb->protocol = vlan_get_protocol(skb);
2070 goto out;
2071 }
2072
2057 /* if we have a HW VLAN tag being added, default to the HW one */ 2073 /* if we have a HW VLAN tag being added, default to the HW one */
2058 if (skb_vlan_tag_present(skb)) { 2074 if (skb_vlan_tag_present(skb)) {
2059 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; 2075 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 0a93684130b9..4d69e1f04901 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -53,11 +53,12 @@ static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
53 * 53 *
54 * check for the valid VSI id 54 * check for the valid VSI id
55 **/ 55 **/
56static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id) 56static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
57{ 57{
58 struct i40e_pf *pf = vf->pf; 58 struct i40e_pf *pf = vf->pf;
59 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
59 60
60 return pf->vsi[vsi_id]->vf_id == vf->vf_id; 61 return (vsi && (vsi->vf_id == vf->vf_id));
61} 62}
62 63
63/** 64/**
@@ -68,12 +69,13 @@ static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id)
68 * 69 *
69 * check for the valid queue id 70 * check for the valid queue id
70 **/ 71 **/
71static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id, 72static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
72 u8 qid) 73 u8 qid)
73{ 74{
74 struct i40e_pf *pf = vf->pf; 75 struct i40e_pf *pf = vf->pf;
76 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
75 77
76 return qid < pf->vsi[vsi_id]->alloc_queue_pairs; 78 return (vsi && (qid < vsi->alloc_queue_pairs));
77} 79}
78 80
79/** 81/**
@@ -95,18 +97,21 @@ static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
95/** 97/**
96 * i40e_vc_get_pf_queue_id 98 * i40e_vc_get_pf_queue_id
97 * @vf: pointer to the VF info 99 * @vf: pointer to the VF info
98 * @vsi_idx: index of VSI in PF struct 100 * @vsi_id: id of VSI as provided by the FW
99 * @vsi_queue_id: vsi relative queue id 101 * @vsi_queue_id: vsi relative queue id
100 * 102 *
101 * return PF relative queue id 103 * return PF relative queue id
102 **/ 104 **/
103static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx, 105static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
104 u8 vsi_queue_id) 106 u8 vsi_queue_id)
105{ 107{
106 struct i40e_pf *pf = vf->pf; 108 struct i40e_pf *pf = vf->pf;
107 struct i40e_vsi *vsi = pf->vsi[vsi_idx]; 109 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
108 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; 110 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
109 111
112 if (!vsi)
113 return pf_queue_id;
114
110 if (le16_to_cpu(vsi->info.mapping_flags) & 115 if (le16_to_cpu(vsi->info.mapping_flags) &
111 I40E_AQ_VSI_QUE_MAP_NONCONTIG) 116 I40E_AQ_VSI_QUE_MAP_NONCONTIG)
112 pf_queue_id = 117 pf_queue_id =
@@ -121,12 +126,12 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
121/** 126/**
122 * i40e_config_irq_link_list 127 * i40e_config_irq_link_list
123 * @vf: pointer to the VF info 128 * @vf: pointer to the VF info
124 * @vsi_idx: index of VSI in PF struct 129 * @vsi_id: id of VSI as given by the FW
125 * @vecmap: irq map info 130 * @vecmap: irq map info
126 * 131 *
127 * configure irq link list from the map 132 * configure irq link list from the map
128 **/ 133 **/
129static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx, 134static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
130 struct i40e_virtchnl_vector_map *vecmap) 135 struct i40e_virtchnl_vector_map *vecmap)
131{ 136{
132 unsigned long linklistmap = 0, tempmap; 137 unsigned long linklistmap = 0, tempmap;
@@ -171,7 +176,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
171 I40E_VIRTCHNL_SUPPORTED_QTYPES)); 176 I40E_VIRTCHNL_SUPPORTED_QTYPES));
172 vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES; 177 vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
173 qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES; 178 qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
174 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); 179 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
175 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); 180 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
176 181
177 wr32(hw, reg_idx, reg); 182 wr32(hw, reg_idx, reg);
@@ -198,7 +203,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
198 (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { 203 (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
199 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; 204 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
200 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; 205 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
201 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, 206 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id,
202 vsi_queue_id); 207 vsi_queue_id);
203 } else { 208 } else {
204 pf_queue_id = I40E_QUEUE_END_OF_LIST; 209 pf_queue_id = I40E_QUEUE_END_OF_LIST;
@@ -221,24 +226,26 @@ irq_list_done:
221/** 226/**
222 * i40e_config_vsi_tx_queue 227 * i40e_config_vsi_tx_queue
223 * @vf: pointer to the VF info 228 * @vf: pointer to the VF info
224 * @vsi_idx: index of VSI in PF struct 229 * @vsi_id: id of VSI as provided by the FW
225 * @vsi_queue_id: vsi relative queue index 230 * @vsi_queue_id: vsi relative queue index
226 * @info: config. info 231 * @info: config. info
227 * 232 *
228 * configure tx queue 233 * configure tx queue
229 **/ 234 **/
230static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx, 235static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
231 u16 vsi_queue_id, 236 u16 vsi_queue_id,
232 struct i40e_virtchnl_txq_info *info) 237 struct i40e_virtchnl_txq_info *info)
233{ 238{
234 struct i40e_pf *pf = vf->pf; 239 struct i40e_pf *pf = vf->pf;
235 struct i40e_hw *hw = &pf->hw; 240 struct i40e_hw *hw = &pf->hw;
236 struct i40e_hmc_obj_txq tx_ctx; 241 struct i40e_hmc_obj_txq tx_ctx;
242 struct i40e_vsi *vsi;
237 u16 pf_queue_id; 243 u16 pf_queue_id;
238 u32 qtx_ctl; 244 u32 qtx_ctl;
239 int ret = 0; 245 int ret = 0;
240 246
241 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); 247 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
248 vsi = i40e_find_vsi_from_id(pf, vsi_id);
242 249
243 /* clear the context structure first */ 250 /* clear the context structure first */
244 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); 251 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
@@ -246,7 +253,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
246 /* only set the required fields */ 253 /* only set the required fields */
247 tx_ctx.base = info->dma_ring_addr / 128; 254 tx_ctx.base = info->dma_ring_addr / 128;
248 tx_ctx.qlen = info->ring_len; 255 tx_ctx.qlen = info->ring_len;
249 tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]); 256 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
250 tx_ctx.rdylist_act = 0; 257 tx_ctx.rdylist_act = 0;
251 tx_ctx.head_wb_ena = info->headwb_enabled; 258 tx_ctx.head_wb_ena = info->headwb_enabled;
252 tx_ctx.head_wb_addr = info->dma_headwb_addr; 259 tx_ctx.head_wb_addr = info->dma_headwb_addr;
@@ -288,13 +295,13 @@ error_context:
288/** 295/**
289 * i40e_config_vsi_rx_queue 296 * i40e_config_vsi_rx_queue
290 * @vf: pointer to the VF info 297 * @vf: pointer to the VF info
291 * @vsi_idx: index of VSI in PF struct 298 * @vsi_id: id of VSI as provided by the FW
292 * @vsi_queue_id: vsi relative queue index 299 * @vsi_queue_id: vsi relative queue index
293 * @info: config. info 300 * @info: config. info
294 * 301 *
295 * configure rx queue 302 * configure rx queue
296 **/ 303 **/
297static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx, 304static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
298 u16 vsi_queue_id, 305 u16 vsi_queue_id,
299 struct i40e_virtchnl_rxq_info *info) 306 struct i40e_virtchnl_rxq_info *info)
300{ 307{
@@ -304,7 +311,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
304 u16 pf_queue_id; 311 u16 pf_queue_id;
305 int ret = 0; 312 int ret = 0;
306 313
307 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); 314 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
308 315
309 /* clear the context structure first */ 316 /* clear the context structure first */
310 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); 317 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
@@ -401,7 +408,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
401 } 408 }
402 if (type == I40E_VSI_SRIOV) { 409 if (type == I40E_VSI_SRIOV) {
403 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 410 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
404 vf->lan_vsi_index = vsi->idx; 411 vf->lan_vsi_idx = vsi->idx;
405 vf->lan_vsi_id = vsi->id; 412 vf->lan_vsi_id = vsi->id;
406 /* If the port VLAN has been configured and then the 413 /* If the port VLAN has been configured and then the
407 * VF driver was removed then the VSI port VLAN 414 * VF driver was removed then the VSI port VLAN
@@ -466,8 +473,8 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
466 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 473 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
467 474
468 /* map PF queues to VF queues */ 475 /* map PF queues to VF queues */
469 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; j++) { 476 for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) {
470 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j); 477 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j);
471 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 478 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
472 wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); 479 wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
473 total_queue_pairs++; 480 total_queue_pairs++;
@@ -475,13 +482,13 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
475 482
476 /* map PF queues to VSI */ 483 /* map PF queues to VSI */
477 for (j = 0; j < 7; j++) { 484 for (j = 0; j < 7; j++) {
478 if (j * 2 >= pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs) { 485 if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) {
479 reg = 0x07FF07FF; /* unused */ 486 reg = 0x07FF07FF; /* unused */
480 } else { 487 } else {
481 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, 488 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
482 j * 2); 489 j * 2);
483 reg = qid; 490 reg = qid;
484 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, 491 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
485 (j * 2) + 1); 492 (j * 2) + 1);
486 reg |= qid << 16; 493 reg |= qid << 16;
487 } 494 }
@@ -525,9 +532,9 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
525 int i, msix_vf; 532 int i, msix_vf;
526 533
527 /* free vsi & disconnect it from the parent uplink */ 534 /* free vsi & disconnect it from the parent uplink */
528 if (vf->lan_vsi_index) { 535 if (vf->lan_vsi_idx) {
529 i40e_vsi_release(pf->vsi[vf->lan_vsi_index]); 536 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
530 vf->lan_vsi_index = 0; 537 vf->lan_vsi_idx = 0;
531 vf->lan_vsi_id = 0; 538 vf->lan_vsi_id = 0;
532 } 539 }
533 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; 540 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
@@ -582,7 +589,7 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
582 ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); 589 ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
583 if (ret) 590 if (ret)
584 goto error_alloc; 591 goto error_alloc;
585 total_queue_pairs += pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; 592 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
586 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 593 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
587 594
588 /* store the total qps number for the runtime 595 /* store the total qps number for the runtime
@@ -692,10 +699,10 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
692 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); 699 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
693 700
694 /* On initial reset, we won't have any queues */ 701 /* On initial reset, we won't have any queues */
695 if (vf->lan_vsi_index == 0) 702 if (vf->lan_vsi_idx == 0)
696 goto complete_reset; 703 goto complete_reset;
697 704
698 i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false); 705 i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false);
699complete_reset: 706complete_reset:
700 /* reallocate VF resources to reset the VSI state */ 707 /* reallocate VF resources to reset the VSI state */
701 i40e_free_vf_res(vf); 708 i40e_free_vf_res(vf);
@@ -732,6 +739,8 @@ void i40e_free_vfs(struct i40e_pf *pf)
732 */ 739 */
733 if (!pci_vfs_assigned(pf->pdev)) 740 if (!pci_vfs_assigned(pf->pdev))
734 pci_disable_sriov(pf->pdev); 741 pci_disable_sriov(pf->pdev);
742 else
743 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
735 744
736 msleep(20); /* let any messages in transit get finished up */ 745 msleep(20); /* let any messages in transit get finished up */
737 746
@@ -761,9 +770,6 @@ void i40e_free_vfs(struct i40e_pf *pf)
761 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; 770 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
762 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); 771 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
763 } 772 }
764 } else {
765 dev_warn(&pf->pdev->dev,
766 "unable to disable SR-IOV because VFs are assigned.\n");
767 } 773 }
768 clear_bit(__I40E_VF_DISABLE, &pf->state); 774 clear_bit(__I40E_VF_DISABLE, &pf->state);
769} 775}
@@ -1017,18 +1023,18 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
1017 } 1023 }
1018 1024
1019 vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2; 1025 vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
1020 vsi = pf->vsi[vf->lan_vsi_index]; 1026 vsi = pf->vsi[vf->lan_vsi_idx];
1021 if (!vsi->info.pvid) 1027 if (!vsi->info.pvid)
1022 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; 1028 vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
1023 1029
1024 vfres->num_vsis = num_vsis; 1030 vfres->num_vsis = num_vsis;
1025 vfres->num_queue_pairs = vf->num_queue_pairs; 1031 vfres->num_queue_pairs = vf->num_queue_pairs;
1026 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; 1032 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
1027 if (vf->lan_vsi_index) { 1033 if (vf->lan_vsi_idx) {
1028 vfres->vsi_res[i].vsi_id = vf->lan_vsi_index; 1034 vfres->vsi_res[i].vsi_id = vf->lan_vsi_id;
1029 vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV; 1035 vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
1030 vfres->vsi_res[i].num_queue_pairs = 1036 vfres->vsi_res[i].num_queue_pairs =
1031 pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; 1037 pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1032 memcpy(vfres->vsi_res[i].default_mac_addr, 1038 memcpy(vfres->vsi_res[i].default_mac_addr,
1033 vf->default_lan_addr.addr, ETH_ALEN); 1039 vf->default_lan_addr.addr, ETH_ALEN);
1034 i++; 1040 i++;
@@ -1080,14 +1086,14 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
1080 bool allmulti = false; 1086 bool allmulti = false;
1081 i40e_status aq_ret; 1087 i40e_status aq_ret;
1082 1088
1089 vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
1083 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || 1090 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1084 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || 1091 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1085 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || 1092 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
1086 (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) { 1093 (vsi->type != I40E_VSI_FCOE)) {
1087 aq_ret = I40E_ERR_PARAM; 1094 aq_ret = I40E_ERR_PARAM;
1088 goto error_param; 1095 goto error_param;
1089 } 1096 }
1090 vsi = pf->vsi[info->vsi_id];
1091 if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC) 1097 if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
1092 allmulti = true; 1098 allmulti = true;
1093 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, 1099 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
@@ -1149,7 +1155,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1149 } 1155 }
1150 } 1156 }
1151 /* set vsi num_queue_pairs in use to num configured by VF */ 1157 /* set vsi num_queue_pairs in use to num configured by VF */
1152 pf->vsi[vf->lan_vsi_index]->num_queue_pairs = qci->num_queue_pairs; 1158 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs;
1153 1159
1154error_param: 1160error_param:
1155 /* send the response to the VF */ 1161 /* send the response to the VF */
@@ -1250,7 +1256,8 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1250 aq_ret = I40E_ERR_PARAM; 1256 aq_ret = I40E_ERR_PARAM;
1251 goto error_param; 1257 goto error_param;
1252 } 1258 }
1253 if (i40e_vsi_control_rings(pf->vsi[vsi_id], true)) 1259
1260 if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], true))
1254 aq_ret = I40E_ERR_TIMEOUT; 1261 aq_ret = I40E_ERR_TIMEOUT;
1255error_param: 1262error_param:
1256 /* send the response to the VF */ 1263 /* send the response to the VF */
@@ -1272,7 +1279,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1272 struct i40e_virtchnl_queue_select *vqs = 1279 struct i40e_virtchnl_queue_select *vqs =
1273 (struct i40e_virtchnl_queue_select *)msg; 1280 (struct i40e_virtchnl_queue_select *)msg;
1274 struct i40e_pf *pf = vf->pf; 1281 struct i40e_pf *pf = vf->pf;
1275 u16 vsi_id = vqs->vsi_id;
1276 i40e_status aq_ret = 0; 1282 i40e_status aq_ret = 0;
1277 1283
1278 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { 1284 if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
@@ -1289,7 +1295,8 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1289 aq_ret = I40E_ERR_PARAM; 1295 aq_ret = I40E_ERR_PARAM;
1290 goto error_param; 1296 goto error_param;
1291 } 1297 }
1292 if (i40e_vsi_control_rings(pf->vsi[vsi_id], false)) 1298
1299 if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false))
1293 aq_ret = I40E_ERR_TIMEOUT; 1300 aq_ret = I40E_ERR_TIMEOUT;
1294 1301
1295error_param: 1302error_param:
@@ -1327,7 +1334,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1327 goto error_param; 1334 goto error_param;
1328 } 1335 }
1329 1336
1330 vsi = pf->vsi[vqs->vsi_id]; 1337 vsi = pf->vsi[vf->lan_vsi_idx];
1331 if (!vsi) { 1338 if (!vsi) {
1332 aq_ret = I40E_ERR_PARAM; 1339 aq_ret = I40E_ERR_PARAM;
1333 goto error_param; 1340 goto error_param;
@@ -1405,7 +1412,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1405 if (ret) 1412 if (ret)
1406 goto error_param; 1413 goto error_param;
1407 } 1414 }
1408 vsi = pf->vsi[vsi_id]; 1415 vsi = pf->vsi[vf->lan_vsi_idx];
1409 1416
1410 /* add new addresses to the list */ 1417 /* add new addresses to the list */
1411 for (i = 0; i < al->num_elements; i++) { 1418 for (i = 0; i < al->num_elements; i++) {
@@ -1473,7 +1480,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1473 goto error_param; 1480 goto error_param;
1474 } 1481 }
1475 } 1482 }
1476 vsi = pf->vsi[vsi_id]; 1483 vsi = pf->vsi[vf->lan_vsi_idx];
1477 1484
1478 /* delete addresses from the list */ 1485 /* delete addresses from the list */
1479 for (i = 0; i < al->num_elements; i++) 1486 for (i = 0; i < al->num_elements; i++)
@@ -1523,7 +1530,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1523 goto error_param; 1530 goto error_param;
1524 } 1531 }
1525 } 1532 }
1526 vsi = pf->vsi[vsi_id]; 1533 vsi = pf->vsi[vf->lan_vsi_idx];
1527 if (vsi->info.pvid) { 1534 if (vsi->info.pvid) {
1528 aq_ret = I40E_ERR_PARAM; 1535 aq_ret = I40E_ERR_PARAM;
1529 goto error_param; 1536 goto error_param;
@@ -1576,7 +1583,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1576 } 1583 }
1577 } 1584 }
1578 1585
1579 vsi = pf->vsi[vsi_id]; 1586 vsi = pf->vsi[vf->lan_vsi_idx];
1580 if (vsi->info.pvid) { 1587 if (vsi->info.pvid) {
1581 aq_ret = I40E_ERR_PARAM; 1588 aq_ret = I40E_ERR_PARAM;
1582 goto error_param; 1589 goto error_param;
@@ -1965,7 +1972,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
1965 } 1972 }
1966 1973
1967 vf = &(pf->vf[vf_id]); 1974 vf = &(pf->vf[vf_id]);
1968 vsi = pf->vsi[vf->lan_vsi_index]; 1975 vsi = pf->vsi[vf->lan_vsi_idx];
1969 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 1976 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
1970 dev_err(&pf->pdev->dev, 1977 dev_err(&pf->pdev->dev,
1971 "Uninitialized VF %d\n", vf_id); 1978 "Uninitialized VF %d\n", vf_id);
@@ -2039,7 +2046,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
2039 } 2046 }
2040 2047
2041 vf = &(pf->vf[vf_id]); 2048 vf = &(pf->vf[vf_id]);
2042 vsi = pf->vsi[vf->lan_vsi_index]; 2049 vsi = pf->vsi[vf->lan_vsi_idx];
2043 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2050 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2044 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); 2051 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
2045 ret = -EINVAL; 2052 ret = -EINVAL;
@@ -2152,7 +2159,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
2152 } 2159 }
2153 2160
2154 vf = &(pf->vf[vf_id]); 2161 vf = &(pf->vf[vf_id]);
2155 vsi = pf->vsi[vf->lan_vsi_index]; 2162 vsi = pf->vsi[vf->lan_vsi_idx];
2156 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2163 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2157 dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id); 2164 dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id);
2158 ret = -EINVAL; 2165 ret = -EINVAL;
@@ -2226,7 +2233,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
2226 2233
2227 vf = &(pf->vf[vf_id]); 2234 vf = &(pf->vf[vf_id]);
2228 /* first vsi is always the LAN vsi */ 2235 /* first vsi is always the LAN vsi */
2229 vsi = pf->vsi[vf->lan_vsi_index]; 2236 vsi = pf->vsi[vf->lan_vsi_idx];
2230 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { 2237 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2231 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); 2238 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
2232 ret = -EINVAL; 2239 ret = -EINVAL;
@@ -2350,7 +2357,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
2350 2357
2351 vf->spoofchk = enable; 2358 vf->spoofchk = enable;
2352 memset(&ctxt, 0, sizeof(ctxt)); 2359 memset(&ctxt, 0, sizeof(ctxt));
2353 ctxt.seid = pf->vsi[vf->lan_vsi_index]->seid; 2360 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
2354 ctxt.pf_num = pf->hw.pf_id; 2361 ctxt.pf_num = pf->hw.pf_id;
2355 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); 2362 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
2356 if (enable) 2363 if (enable)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 9c3a41040835..09043c1aae54 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -88,7 +88,7 @@ struct i40e_vf {
88 * When assigned, these will be non-zero, because VSI 0 is always 88 * When assigned, these will be non-zero, because VSI 0 is always
89 * the main LAN VSI for the PF. 89 * the main LAN VSI for the PF.
90 */ 90 */
91 u8 lan_vsi_index; /* index into PF struct */ 91 u8 lan_vsi_idx; /* index into PF struct */
92 u8 lan_vsi_id; /* ID as used by firmware */ 92 u8 lan_vsi_id; /* ID as used by firmware */
93 93
94 u8 num_queue_pairs; /* num of qps assigned to VF vsis */ 94 u8 num_queue_pairs; /* num of qps assigned to VF vsis */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index f41da5d8047b..e2ddb30e96f5 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -915,9 +915,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
915 * so the total length of IPv4 header is IHL*4 bytes 915 * so the total length of IPv4 header is IHL*4 bytes
916 * The UDP_0 bit *may* bet set if the *inner* header is UDP 916 * The UDP_0 bit *may* bet set if the *inner* header is UDP
917 */ 917 */
918 if (ipv4_tunnel && 918 if (ipv4_tunnel) {
919 (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
920 !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
921 skb->transport_header = skb->mac_header + 919 skb->transport_header = skb->mac_header +
922 sizeof(struct ethhdr) + 920 sizeof(struct ethhdr) +
923 (ip_hdr(skb)->ihl * 4); 921 (ip_hdr(skb)->ihl * 4);
@@ -927,15 +925,19 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
927 skb->protocol == htons(ETH_P_8021AD)) 925 skb->protocol == htons(ETH_P_8021AD))
928 ? VLAN_HLEN : 0; 926 ? VLAN_HLEN : 0;
929 927
930 rx_udp_csum = udp_csum(skb); 928 if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
931 iph = ip_hdr(skb); 929 (udp_hdr(skb)->check != 0)) {
932 csum = csum_tcpudp_magic( 930 rx_udp_csum = udp_csum(skb);
933 iph->saddr, iph->daddr, 931 iph = ip_hdr(skb);
934 (skb->len - skb_transport_offset(skb)), 932 csum = csum_tcpudp_magic(iph->saddr, iph->daddr,
935 IPPROTO_UDP, rx_udp_csum); 933 (skb->len -
934 skb_transport_offset(skb)),
935 IPPROTO_UDP, rx_udp_csum);
936 936
937 if (udp_hdr(skb)->check != csum) 937 if (udp_hdr(skb)->check != csum)
938 goto checksum_fail; 938 goto checksum_fail;
939
940 } /* else its GRE and so no outer UDP header */
939 } 941 }
940 942
941 skb->ip_summed = CHECKSUM_UNNECESSARY; 943 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1038,8 +1040,11 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
1038 if (likely(!skb)) { 1040 if (likely(!skb)) {
1039 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 1041 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1040 rx_ring->rx_hdr_len); 1042 rx_ring->rx_hdr_len);
1041 if (!skb) 1043 if (!skb) {
1042 rx_ring->rx_stats.alloc_buff_failed++; 1044 rx_ring->rx_stats.alloc_buff_failed++;
1045 break;
1046 }
1047
1043 /* initialize queue mapping */ 1048 /* initialize queue mapping */
1044 skb_record_rx_queue(skb, rx_ring->queue_index); 1049 skb_record_rx_queue(skb, rx_ring->queue_index);
1045 /* we are reusing so sync this buffer for CPU use */ 1050 /* we are reusing so sync this buffer for CPU use */
@@ -1365,6 +1370,19 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
1365 __be16 protocol = skb->protocol; 1370 __be16 protocol = skb->protocol;
1366 u32 tx_flags = 0; 1371 u32 tx_flags = 0;
1367 1372
1373 if (protocol == htons(ETH_P_8021Q) &&
1374 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1375 /* When HW VLAN acceleration is turned off by the user the
1376 * stack sets the protocol to 8021q so that the driver
1377 * can take any steps required to support the SW only
1378 * VLAN handling. In our case the driver doesn't need
1379 * to take any further steps so just set the protocol
1380 * to the encapsulated ethertype.
1381 */
1382 skb->protocol = vlan_get_protocol(skb);
1383 goto out;
1384 }
1385
1368 /* if we have a HW VLAN tag being added, default to the HW one */ 1386 /* if we have a HW VLAN tag being added, default to the HW one */
1369 if (skb_vlan_tag_present(skb)) { 1387 if (skb_vlan_tag_present(skb)) {
1370 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; 1388 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
@@ -1381,6 +1399,7 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
1381 tx_flags |= I40E_TX_FLAGS_SW_VLAN; 1399 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
1382 } 1400 }
1383 1401
1402out:
1384 *flags = tx_flags; 1403 *flags = tx_flags;
1385 return 0; 1404 return 0;
1386} 1405}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index b08efafee1ae..6d5f3b21c68a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -664,13 +664,21 @@ i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
664static struct 664static struct
665i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) 665i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
666{ 666{
667 struct i40evf_vlan_filter *f; 667 struct i40evf_vlan_filter *f = NULL;
668 int count = 50;
669
670 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
671 &adapter->crit_section)) {
672 udelay(1);
673 if (--count == 0)
674 goto out;
675 }
668 676
669 f = i40evf_find_vlan(adapter, vlan); 677 f = i40evf_find_vlan(adapter, vlan);
670 if (!f) { 678 if (!f) {
671 f = kzalloc(sizeof(*f), GFP_ATOMIC); 679 f = kzalloc(sizeof(*f), GFP_ATOMIC);
672 if (!f) 680 if (!f)
673 return NULL; 681 goto clearout;
674 682
675 f->vlan = vlan; 683 f->vlan = vlan;
676 684
@@ -680,6 +688,9 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
680 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; 688 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
681 } 689 }
682 690
691clearout:
692 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
693out:
683 return f; 694 return f;
684} 695}
685 696
@@ -691,12 +702,21 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
691static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan) 702static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
692{ 703{
693 struct i40evf_vlan_filter *f; 704 struct i40evf_vlan_filter *f;
705 int count = 50;
706
707 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
708 &adapter->crit_section)) {
709 udelay(1);
710 if (--count == 0)
711 return;
712 }
694 713
695 f = i40evf_find_vlan(adapter, vlan); 714 f = i40evf_find_vlan(adapter, vlan);
696 if (f) { 715 if (f) {
697 f->remove = true; 716 f->remove = true;
698 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; 717 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
699 } 718 }
719 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
700} 720}
701 721
702/** 722/**
@@ -1415,41 +1435,22 @@ restart_watchdog:
1415} 1435}
1416 1436
1417/** 1437/**
1418 * next_queue - increment to next available tx queue 1438 * i40evf_configure_rss - Prepare for RSS
1419 * @adapter: board private structure
1420 * @j: queue counter
1421 *
1422 * Helper function for RSS programming to increment through available
1423 * queus. Returns the next queue value.
1424 **/
1425static int next_queue(struct i40evf_adapter *adapter, int j)
1426{
1427 j += 1;
1428
1429 return j >= adapter->num_active_queues ? 0 : j;
1430}
1431
1432/**
1433 * i40evf_configure_rss - Prepare for RSS if used
1434 * @adapter: board private structure 1439 * @adapter: board private structure
1435 **/ 1440 **/
1436static void i40evf_configure_rss(struct i40evf_adapter *adapter) 1441static void i40evf_configure_rss(struct i40evf_adapter *adapter)
1437{ 1442{
1438 u32 rss_key[I40E_VFQF_HKEY_MAX_INDEX + 1]; 1443 u32 rss_key[I40E_VFQF_HKEY_MAX_INDEX + 1];
1439 struct i40e_hw *hw = &adapter->hw; 1444 struct i40e_hw *hw = &adapter->hw;
1445 u32 cqueue = 0;
1440 u32 lut = 0; 1446 u32 lut = 0;
1441 int i, j; 1447 int i, j;
1442 u64 hena; 1448 u64 hena;
1443 1449
1444 /* No RSS for single queue. */
1445 if (adapter->num_active_queues == 1) {
1446 wr32(hw, I40E_VFQF_HENA(0), 0);
1447 wr32(hw, I40E_VFQF_HENA(1), 0);
1448 return;
1449 }
1450
1451 /* Hash type is configured by the PF - we just supply the key */ 1450 /* Hash type is configured by the PF - we just supply the key */
1452 netdev_rss_key_fill(rss_key, sizeof(rss_key)); 1451 netdev_rss_key_fill(rss_key, sizeof(rss_key));
1452
1453 /* Fill out hash function seed */
1453 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) 1454 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
1454 wr32(hw, I40E_VFQF_HKEY(i), rss_key[i]); 1455 wr32(hw, I40E_VFQF_HKEY(i), rss_key[i]);
1455 1456
@@ -1459,16 +1460,14 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
1459 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); 1460 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
1460 1461
1461 /* Populate the LUT with max no. of queues in round robin fashion */ 1462 /* Populate the LUT with max no. of queues in round robin fashion */
1462 j = adapter->num_active_queues;
1463 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { 1463 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
1464 j = next_queue(adapter, j); 1464 lut = 0;
1465 lut = j; 1465 for (j = 0; j < 4; j++) {
1466 j = next_queue(adapter, j); 1466 if (cqueue == adapter->vsi_res->num_queue_pairs)
1467 lut |= j << 8; 1467 cqueue = 0;
1468 j = next_queue(adapter, j); 1468 lut |= ((cqueue) << (8 * j));
1469 lut |= j << 16; 1469 cqueue++;
1470 j = next_queue(adapter, j); 1470 }
1471 lut |= j << 24;
1472 wr32(hw, I40E_VFQF_HLUT(i), lut); 1471 wr32(hw, I40E_VFQF_HLUT(i), lut);
1473 } 1472 }
1474 i40e_flush(hw); 1473 i40e_flush(hw);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 96208f17bb53..ce5f7f9cff06 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -100,6 +100,8 @@
100#define MVNETA_TXQ_CMD 0x2448 100#define MVNETA_TXQ_CMD 0x2448
101#define MVNETA_TXQ_DISABLE_SHIFT 8 101#define MVNETA_TXQ_DISABLE_SHIFT 8
102#define MVNETA_TXQ_ENABLE_MASK 0x000000ff 102#define MVNETA_TXQ_ENABLE_MASK 0x000000ff
103#define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
104#define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
103#define MVNETA_ACC_MODE 0x2500 105#define MVNETA_ACC_MODE 0x2500
104#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) 106#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
105#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff 107#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
@@ -122,6 +124,7 @@
122#define MVNETA_TX_INTR_MASK_ALL (0xff << 0) 124#define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
123#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) 125#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
124#define MVNETA_RX_INTR_MASK_ALL (0xff << 8) 126#define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
127#define MVNETA_MISCINTR_INTR_MASK BIT(31)
125 128
126#define MVNETA_INTR_OLD_CAUSE 0x25a8 129#define MVNETA_INTR_OLD_CAUSE 0x25a8
127#define MVNETA_INTR_OLD_MASK 0x25ac 130#define MVNETA_INTR_OLD_MASK 0x25ac
@@ -165,6 +168,7 @@
165#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc 168#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
166#define MVNETA_GMAC0_PORT_ENABLE BIT(0) 169#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
167#define MVNETA_GMAC_CTRL_2 0x2c08 170#define MVNETA_GMAC_CTRL_2 0x2c08
171#define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
168#define MVNETA_GMAC2_PCS_ENABLE BIT(3) 172#define MVNETA_GMAC2_PCS_ENABLE BIT(3)
169#define MVNETA_GMAC2_PORT_RGMII BIT(4) 173#define MVNETA_GMAC2_PORT_RGMII BIT(4)
170#define MVNETA_GMAC2_PORT_RESET BIT(6) 174#define MVNETA_GMAC2_PORT_RESET BIT(6)
@@ -180,9 +184,11 @@
180#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c 184#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
181#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) 185#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
182#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) 186#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
187#define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
183#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) 188#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
184#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) 189#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
185#define MVNETA_GMAC_AN_SPEED_EN BIT(7) 190#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
191#define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
186#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) 192#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
187#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) 193#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
188#define MVNETA_MIB_COUNTERS_BASE 0x3080 194#define MVNETA_MIB_COUNTERS_BASE 0x3080
@@ -304,6 +310,7 @@ struct mvneta_port {
304 unsigned int link; 310 unsigned int link;
305 unsigned int duplex; 311 unsigned int duplex;
306 unsigned int speed; 312 unsigned int speed;
313 int use_inband_status:1;
307}; 314};
308 315
309/* The mvneta_tx_desc and mvneta_rx_desc structures describe the 316/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
@@ -994,6 +1001,20 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
994 val &= ~MVNETA_PHY_POLLING_ENABLE; 1001 val &= ~MVNETA_PHY_POLLING_ENABLE;
995 mvreg_write(pp, MVNETA_UNIT_CONTROL, val); 1002 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
996 1003
1004 if (pp->use_inband_status) {
1005 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1006 val &= ~(MVNETA_GMAC_FORCE_LINK_PASS |
1007 MVNETA_GMAC_FORCE_LINK_DOWN |
1008 MVNETA_GMAC_AN_FLOW_CTRL_EN);
1009 val |= MVNETA_GMAC_INBAND_AN_ENABLE |
1010 MVNETA_GMAC_AN_SPEED_EN |
1011 MVNETA_GMAC_AN_DUPLEX_EN;
1012 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1013 val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
1014 val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
1015 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
1016 }
1017
997 mvneta_set_ucast_table(pp, -1); 1018 mvneta_set_ucast_table(pp, -1);
998 mvneta_set_special_mcast_table(pp, -1); 1019 mvneta_set_special_mcast_table(pp, -1);
999 mvneta_set_other_mcast_table(pp, -1); 1020 mvneta_set_other_mcast_table(pp, -1);
@@ -2043,6 +2064,28 @@ static irqreturn_t mvneta_isr(int irq, void *dev_id)
2043 return IRQ_HANDLED; 2064 return IRQ_HANDLED;
2044} 2065}
2045 2066
2067static int mvneta_fixed_link_update(struct mvneta_port *pp,
2068 struct phy_device *phy)
2069{
2070 struct fixed_phy_status status;
2071 struct fixed_phy_status changed = {};
2072 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
2073
2074 status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
2075 if (gmac_stat & MVNETA_GMAC_SPEED_1000)
2076 status.speed = SPEED_1000;
2077 else if (gmac_stat & MVNETA_GMAC_SPEED_100)
2078 status.speed = SPEED_100;
2079 else
2080 status.speed = SPEED_10;
2081 status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
2082 changed.link = 1;
2083 changed.speed = 1;
2084 changed.duplex = 1;
2085 fixed_phy_update_state(phy, &status, &changed);
2086 return 0;
2087}
2088
2046/* NAPI handler 2089/* NAPI handler
2047 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted 2090 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2048 * packets on the corresponding TXQ (Bit 0 is for TX queue 1). 2091 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
@@ -2063,8 +2106,18 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
2063 } 2106 }
2064 2107
2065 /* Read cause register */ 2108 /* Read cause register */
2066 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) & 2109 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
2067 (MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number)); 2110 if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
2111 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
2112
2113 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2114 if (pp->use_inband_status && (cause_misc &
2115 (MVNETA_CAUSE_PHY_STATUS_CHANGE |
2116 MVNETA_CAUSE_LINK_CHANGE |
2117 MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
2118 mvneta_fixed_link_update(pp, pp->phy_dev);
2119 }
2120 }
2068 2121
2069 /* Release Tx descriptors */ 2122 /* Release Tx descriptors */
2070 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) { 2123 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
@@ -2109,7 +2162,9 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
2109 napi_complete(napi); 2162 napi_complete(napi);
2110 local_irq_save(flags); 2163 local_irq_save(flags);
2111 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 2164 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2112 MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number)); 2165 MVNETA_RX_INTR_MASK(rxq_number) |
2166 MVNETA_TX_INTR_MASK(txq_number) |
2167 MVNETA_MISCINTR_INTR_MASK);
2113 local_irq_restore(flags); 2168 local_irq_restore(flags);
2114 } 2169 }
2115 2170
@@ -2373,7 +2428,13 @@ static void mvneta_start_dev(struct mvneta_port *pp)
2373 2428
2374 /* Unmask interrupts */ 2429 /* Unmask interrupts */
2375 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 2430 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2376 MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number)); 2431 MVNETA_RX_INTR_MASK(rxq_number) |
2432 MVNETA_TX_INTR_MASK(txq_number) |
2433 MVNETA_MISCINTR_INTR_MASK);
2434 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2435 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2436 MVNETA_CAUSE_LINK_CHANGE |
2437 MVNETA_CAUSE_PSC_SYNC_CHANGE);
2377 2438
2378 phy_start(pp->phy_dev); 2439 phy_start(pp->phy_dev);
2379 netif_tx_start_all_queues(pp->dev); 2440 netif_tx_start_all_queues(pp->dev);
@@ -2523,9 +2584,7 @@ static void mvneta_adjust_link(struct net_device *ndev)
2523 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 2584 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2524 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | 2585 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
2525 MVNETA_GMAC_CONFIG_GMII_SPEED | 2586 MVNETA_GMAC_CONFIG_GMII_SPEED |
2526 MVNETA_GMAC_CONFIG_FULL_DUPLEX | 2587 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
2527 MVNETA_GMAC_AN_SPEED_EN |
2528 MVNETA_GMAC_AN_DUPLEX_EN);
2529 2588
2530 if (phydev->duplex) 2589 if (phydev->duplex)
2531 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; 2590 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
@@ -2554,12 +2613,24 @@ static void mvneta_adjust_link(struct net_device *ndev)
2554 2613
2555 if (status_change) { 2614 if (status_change) {
2556 if (phydev->link) { 2615 if (phydev->link) {
2557 u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 2616 if (!pp->use_inband_status) {
2558 val |= (MVNETA_GMAC_FORCE_LINK_PASS | 2617 u32 val = mvreg_read(pp,
2559 MVNETA_GMAC_FORCE_LINK_DOWN); 2618 MVNETA_GMAC_AUTONEG_CONFIG);
2560 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 2619 val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
2620 val |= MVNETA_GMAC_FORCE_LINK_PASS;
2621 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
2622 val);
2623 }
2561 mvneta_port_up(pp); 2624 mvneta_port_up(pp);
2562 } else { 2625 } else {
2626 if (!pp->use_inband_status) {
2627 u32 val = mvreg_read(pp,
2628 MVNETA_GMAC_AUTONEG_CONFIG);
2629 val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
2630 val |= MVNETA_GMAC_FORCE_LINK_DOWN;
2631 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
2632 val);
2633 }
2563 mvneta_port_down(pp); 2634 mvneta_port_down(pp);
2564 } 2635 }
2565 phy_print_status(phydev); 2636 phy_print_status(phydev);
@@ -2658,16 +2729,11 @@ static int mvneta_stop(struct net_device *dev)
2658static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2729static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2659{ 2730{
2660 struct mvneta_port *pp = netdev_priv(dev); 2731 struct mvneta_port *pp = netdev_priv(dev);
2661 int ret;
2662 2732
2663 if (!pp->phy_dev) 2733 if (!pp->phy_dev)
2664 return -ENOTSUPP; 2734 return -ENOTSUPP;
2665 2735
2666 ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd); 2736 return phy_mii_ioctl(pp->phy_dev, ifr, cmd);
2667 if (!ret)
2668 mvneta_adjust_link(dev);
2669
2670 return ret;
2671} 2737}
2672 2738
2673/* Ethtool methods */ 2739/* Ethtool methods */
@@ -2910,6 +2976,9 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
2910 return -EINVAL; 2976 return -EINVAL;
2911 } 2977 }
2912 2978
2979 if (pp->use_inband_status)
2980 ctrl |= MVNETA_GMAC2_INBAND_AN_ENABLE;
2981
2913 /* Cancel Port Reset */ 2982 /* Cancel Port Reset */
2914 ctrl &= ~MVNETA_GMAC2_PORT_RESET; 2983 ctrl &= ~MVNETA_GMAC2_PORT_RESET;
2915 mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl); 2984 mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
@@ -2934,6 +3003,7 @@ static int mvneta_probe(struct platform_device *pdev)
2934 char hw_mac_addr[ETH_ALEN]; 3003 char hw_mac_addr[ETH_ALEN];
2935 const char *mac_from; 3004 const char *mac_from;
2936 int phy_mode; 3005 int phy_mode;
3006 int fixed_phy = 0;
2937 int err; 3007 int err;
2938 3008
2939 /* Our multiqueue support is not complete, so for now, only 3009 /* Our multiqueue support is not complete, so for now, only
@@ -2967,6 +3037,7 @@ static int mvneta_probe(struct platform_device *pdev)
2967 dev_err(&pdev->dev, "cannot register fixed PHY\n"); 3037 dev_err(&pdev->dev, "cannot register fixed PHY\n");
2968 goto err_free_irq; 3038 goto err_free_irq;
2969 } 3039 }
3040 fixed_phy = 1;
2970 3041
2971 /* In the case of a fixed PHY, the DT node associated 3042 /* In the case of a fixed PHY, the DT node associated
2972 * to the PHY is the Ethernet MAC DT node. 3043 * to the PHY is the Ethernet MAC DT node.
@@ -2990,6 +3061,8 @@ static int mvneta_probe(struct platform_device *pdev)
2990 pp = netdev_priv(dev); 3061 pp = netdev_priv(dev);
2991 pp->phy_node = phy_node; 3062 pp->phy_node = phy_node;
2992 pp->phy_interface = phy_mode; 3063 pp->phy_interface = phy_mode;
3064 pp->use_inband_status = (phy_mode == PHY_INTERFACE_MODE_SGMII) &&
3065 fixed_phy;
2993 3066
2994 pp->clk = devm_clk_get(&pdev->dev, NULL); 3067 pp->clk = devm_clk_get(&pdev->dev, NULL);
2995 if (IS_ERR(pp->clk)) { 3068 if (IS_ERR(pp->clk)) {
@@ -3067,6 +3140,12 @@ static int mvneta_probe(struct platform_device *pdev)
3067 3140
3068 platform_set_drvdata(pdev, pp->dev); 3141 platform_set_drvdata(pdev, pp->dev);
3069 3142
3143 if (pp->use_inband_status) {
3144 struct phy_device *phy = of_phy_find_device(dn);
3145
3146 mvneta_fixed_link_update(pp, phy);
3147 }
3148
3070 return 0; 3149 return 0;
3071 3150
3072err_free_stats: 3151err_free_stats:
diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile
index 3e9c70f15b42..c82217e0d22d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx4/Makefile
@@ -1,7 +1,8 @@
1obj-$(CONFIG_MLX4_CORE) += mlx4_core.o 1obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
2 2
3mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \ 3mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o fw_qos.o icm.o intf.o \
4 mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o resource_tracker.o 4 main.o mcg.o mr.o pd.o port.o profile.o qp.o reset.o sense.o \
5 srq.o resource_tracker.o
5 6
6obj-$(CONFIG_MLX4_EN) += mlx4_en.o 7obj-$(CONFIG_MLX4_EN) += mlx4_en.o
7 8
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 20b3c7b21e63..f0fbb4ade85d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -48,6 +48,7 @@
48 48
49#include "mlx4.h" 49#include "mlx4.h"
50#include "fw.h" 50#include "fw.h"
51#include "fw_qos.h"
51 52
52#define CMD_POLL_TOKEN 0xffff 53#define CMD_POLL_TOKEN 0xffff
53#define INBOX_MASK 0xffffffffffffff00ULL 54#define INBOX_MASK 0xffffffffffffff00ULL
@@ -724,8 +725,10 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
724 * on the host, we deprecate the error message for this 725 * on the host, we deprecate the error message for this
725 * specific command/input_mod/opcode_mod/fw-status to be debug. 726 * specific command/input_mod/opcode_mod/fw-status to be debug.
726 */ 727 */
727 if (op == MLX4_CMD_SET_PORT && in_modifier == 1 && 728 if (op == MLX4_CMD_SET_PORT &&
728 op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE) 729 (in_modifier == 1 || in_modifier == 2) &&
730 op_modifier == MLX4_SET_PORT_IB_OPCODE &&
731 context->fw_status == CMD_STAT_BAD_SIZE)
729 mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n", 732 mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
730 op, context->fw_status); 733 op, context->fw_status);
731 else 734 else
@@ -1455,6 +1458,24 @@ static struct mlx4_cmd_info cmd_info[] = {
1455 .wrapper = mlx4_CMD_EPERM_wrapper, 1458 .wrapper = mlx4_CMD_EPERM_wrapper,
1456 }, 1459 },
1457 { 1460 {
1461 .opcode = MLX4_CMD_ALLOCATE_VPP,
1462 .has_inbox = false,
1463 .has_outbox = true,
1464 .out_is_imm = false,
1465 .encode_slave_id = false,
1466 .verify = NULL,
1467 .wrapper = mlx4_CMD_EPERM_wrapper,
1468 },
1469 {
1470 .opcode = MLX4_CMD_SET_VPORT_QOS,
1471 .has_inbox = false,
1472 .has_outbox = true,
1473 .out_is_imm = false,
1474 .encode_slave_id = false,
1475 .verify = NULL,
1476 .wrapper = mlx4_CMD_EPERM_wrapper,
1477 },
1478 {
1458 .opcode = MLX4_CMD_CONF_SPECIAL_QP, 1479 .opcode = MLX4_CMD_CONF_SPECIAL_QP,
1459 .has_inbox = false, 1480 .has_inbox = false,
1460 .has_outbox = false, 1481 .has_outbox = false,
@@ -1790,7 +1811,8 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1790 1811
1791 if (vp_oper->state.default_vlan == vp_admin->default_vlan && 1812 if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
1792 vp_oper->state.default_qos == vp_admin->default_qos && 1813 vp_oper->state.default_qos == vp_admin->default_qos &&
1793 vp_oper->state.link_state == vp_admin->link_state) 1814 vp_oper->state.link_state == vp_admin->link_state &&
1815 vp_oper->state.qos_vport == vp_admin->qos_vport)
1794 return 0; 1816 return 0;
1795 1817
1796 if (!(priv->mfunc.master.slave_state[slave].active && 1818 if (!(priv->mfunc.master.slave_state[slave].active &&
@@ -1848,6 +1870,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1848 vp_oper->state.default_vlan = vp_admin->default_vlan; 1870 vp_oper->state.default_vlan = vp_admin->default_vlan;
1849 vp_oper->state.default_qos = vp_admin->default_qos; 1871 vp_oper->state.default_qos = vp_admin->default_qos;
1850 vp_oper->state.link_state = vp_admin->link_state; 1872 vp_oper->state.link_state = vp_admin->link_state;
1873 vp_oper->state.qos_vport = vp_admin->qos_vport;
1851 1874
1852 if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE) 1875 if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE)
1853 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE; 1876 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE;
@@ -1856,6 +1879,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1856 work->port = port; 1879 work->port = port;
1857 work->slave = slave; 1880 work->slave = slave;
1858 work->qos = vp_oper->state.default_qos; 1881 work->qos = vp_oper->state.default_qos;
1882 work->qos_vport = vp_oper->state.qos_vport;
1859 work->vlan_id = vp_oper->state.default_vlan; 1883 work->vlan_id = vp_oper->state.default_vlan;
1860 work->vlan_ix = vp_oper->vlan_idx; 1884 work->vlan_ix = vp_oper->vlan_idx;
1861 work->priv = priv; 1885 work->priv = priv;
@@ -1865,6 +1889,63 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1865 return 0; 1889 return 0;
1866} 1890}
1867 1891
1892static void mlx4_set_default_port_qos(struct mlx4_dev *dev, int port)
1893{
1894 struct mlx4_qos_manager *port_qos_ctl;
1895 struct mlx4_priv *priv = mlx4_priv(dev);
1896
1897 port_qos_ctl = &priv->mfunc.master.qos_ctl[port];
1898 bitmap_zero(port_qos_ctl->priority_bm, MLX4_NUM_UP);
1899
1900 /* Enable only default prio at PF init routine */
1901 set_bit(MLX4_DEFAULT_QOS_PRIO, port_qos_ctl->priority_bm);
1902}
1903
1904static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port)
1905{
1906 int i;
1907 int err;
1908 int num_vfs;
1909 u16 availible_vpp;
1910 u8 vpp_param[MLX4_NUM_UP];
1911 struct mlx4_qos_manager *port_qos;
1912 struct mlx4_priv *priv = mlx4_priv(dev);
1913
1914 err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param);
1915 if (err) {
1916 mlx4_info(dev, "Failed query availible VPPs\n");
1917 return;
1918 }
1919
1920 port_qos = &priv->mfunc.master.qos_ctl[port];
1921 num_vfs = (availible_vpp /
1922 bitmap_weight(port_qos->priority_bm, MLX4_NUM_UP));
1923
1924 for (i = 0; i < MLX4_NUM_UP; i++) {
1925 if (test_bit(i, port_qos->priority_bm))
1926 vpp_param[i] = num_vfs;
1927 }
1928
1929 err = mlx4_ALLOCATE_VPP_set(dev, port, vpp_param);
1930 if (err) {
1931 mlx4_info(dev, "Failed allocating VPPs\n");
1932 return;
1933 }
1934
1935 /* Query actual allocated VPP, just to make sure */
1936 err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param);
1937 if (err) {
1938 mlx4_info(dev, "Failed query availible VPPs\n");
1939 return;
1940 }
1941
1942 port_qos->num_of_qos_vfs = num_vfs;
1943 mlx4_dbg(dev, "Port %d Availible VPPs %d\n", port, availible_vpp);
1944
1945 for (i = 0; i < MLX4_NUM_UP; i++)
1946 mlx4_dbg(dev, "Port %d UP %d Allocated %d VPPs\n", port, i,
1947 vpp_param[i]);
1948}
1868 1949
1869static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave) 1950static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1870{ 1951{
@@ -2002,7 +2083,6 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
2002 goto reset_slave; 2083 goto reset_slave;
2003 slave_state[slave].vhcr_dma = ((u64) param) << 48; 2084 slave_state[slave].vhcr_dma = ((u64) param) << 48;
2004 priv->mfunc.master.slave_state[slave].cookie = 0; 2085 priv->mfunc.master.slave_state[slave].cookie = 0;
2005 mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
2006 break; 2086 break;
2007 case MLX4_COMM_CMD_VHCR1: 2087 case MLX4_COMM_CMD_VHCR1:
2008 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0) 2088 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
@@ -2213,6 +2293,9 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
2213 } 2293 }
2214 2294
2215 if (mlx4_is_master(dev)) { 2295 if (mlx4_is_master(dev)) {
2296 struct mlx4_vf_oper_state *vf_oper;
2297 struct mlx4_vf_admin_state *vf_admin;
2298
2216 priv->mfunc.master.slave_state = 2299 priv->mfunc.master.slave_state =
2217 kzalloc(dev->num_slaves * 2300 kzalloc(dev->num_slaves *
2218 sizeof(struct mlx4_slave_state), GFP_KERNEL); 2301 sizeof(struct mlx4_slave_state), GFP_KERNEL);
@@ -2232,8 +2315,11 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
2232 goto err_comm_oper; 2315 goto err_comm_oper;
2233 2316
2234 for (i = 0; i < dev->num_slaves; ++i) { 2317 for (i = 0; i < dev->num_slaves; ++i) {
2318 vf_admin = &priv->mfunc.master.vf_admin[i];
2319 vf_oper = &priv->mfunc.master.vf_oper[i];
2235 s_state = &priv->mfunc.master.slave_state[i]; 2320 s_state = &priv->mfunc.master.slave_state[i];
2236 s_state->last_cmd = MLX4_COMM_CMD_RESET; 2321 s_state->last_cmd = MLX4_COMM_CMD_RESET;
2322 mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
2237 for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j) 2323 for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
2238 s_state->event_eq[j].eqn = -1; 2324 s_state->event_eq[j].eqn = -1;
2239 __raw_writel((__force u32) 0, 2325 __raw_writel((__force u32) 0,
@@ -2242,6 +2328,9 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
2242 &priv->mfunc.comm[i].slave_read); 2328 &priv->mfunc.comm[i].slave_read);
2243 mmiowb(); 2329 mmiowb();
2244 for (port = 1; port <= MLX4_MAX_PORTS; port++) { 2330 for (port = 1; port <= MLX4_MAX_PORTS; port++) {
2331 struct mlx4_vport_state *admin_vport;
2332 struct mlx4_vport_state *oper_vport;
2333
2245 s_state->vlan_filter[port] = 2334 s_state->vlan_filter[port] =
2246 kzalloc(sizeof(struct mlx4_vlan_fltr), 2335 kzalloc(sizeof(struct mlx4_vlan_fltr),
2247 GFP_KERNEL); 2336 GFP_KERNEL);
@@ -2250,15 +2339,30 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
2250 kfree(s_state->vlan_filter[port]); 2339 kfree(s_state->vlan_filter[port]);
2251 goto err_slaves; 2340 goto err_slaves;
2252 } 2341 }
2342
2343 admin_vport = &vf_admin->vport[port];
2344 oper_vport = &vf_oper->vport[port].state;
2253 INIT_LIST_HEAD(&s_state->mcast_filters[port]); 2345 INIT_LIST_HEAD(&s_state->mcast_filters[port]);
2254 priv->mfunc.master.vf_admin[i].vport[port].default_vlan = MLX4_VGT; 2346 admin_vport->default_vlan = MLX4_VGT;
2255 priv->mfunc.master.vf_oper[i].vport[port].state.default_vlan = MLX4_VGT; 2347 oper_vport->default_vlan = MLX4_VGT;
2256 priv->mfunc.master.vf_oper[i].vport[port].vlan_idx = NO_INDX; 2348 admin_vport->qos_vport =
2257 priv->mfunc.master.vf_oper[i].vport[port].mac_idx = NO_INDX; 2349 MLX4_VPP_DEFAULT_VPORT;
2350 oper_vport->qos_vport = MLX4_VPP_DEFAULT_VPORT;
2351 vf_oper->vport[port].vlan_idx = NO_INDX;
2352 vf_oper->vport[port].mac_idx = NO_INDX;
2258 } 2353 }
2259 spin_lock_init(&s_state->lock); 2354 spin_lock_init(&s_state->lock);
2260 } 2355 }
2261 2356
2357 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP) {
2358 for (port = 1; port <= dev->caps.num_ports; port++) {
2359 if (mlx4_is_eth(dev, port)) {
2360 mlx4_set_default_port_qos(dev, port);
2361 mlx4_allocate_port_vpps(dev, port);
2362 }
2363 }
2364 }
2365
2262 memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size); 2366 memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
2263 priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD; 2367 priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
2264 INIT_WORK(&priv->mfunc.master.comm_work, 2368 INIT_WORK(&priv->mfunc.master.comm_work,
@@ -2679,6 +2783,103 @@ static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
2679 return port; 2783 return port;
2680} 2784}
2681 2785
2786static int mlx4_set_vport_qos(struct mlx4_priv *priv, int slave, int port,
2787 int max_tx_rate)
2788{
2789 int i;
2790 int err;
2791 struct mlx4_qos_manager *port_qos;
2792 struct mlx4_dev *dev = &priv->dev;
2793 struct mlx4_vport_qos_param vpp_qos[MLX4_NUM_UP];
2794
2795 port_qos = &priv->mfunc.master.qos_ctl[port];
2796 memset(vpp_qos, 0, sizeof(struct mlx4_vport_qos_param) * MLX4_NUM_UP);
2797
2798 if (slave > port_qos->num_of_qos_vfs) {
2799 mlx4_info(dev, "No availible VPP resources for this VF\n");
2800 return -EINVAL;
2801 }
2802
2803 /* Query for default QoS values from Vport 0 is needed */
2804 err = mlx4_SET_VPORT_QOS_get(dev, port, 0, vpp_qos);
2805 if (err) {
2806 mlx4_info(dev, "Failed to query Vport 0 QoS values\n");
2807 return err;
2808 }
2809
2810 for (i = 0; i < MLX4_NUM_UP; i++) {
2811 if (test_bit(i, port_qos->priority_bm) && max_tx_rate) {
2812 vpp_qos[i].max_avg_bw = max_tx_rate;
2813 vpp_qos[i].enable = 1;
2814 } else {
2815 /* if user supplied tx_rate == 0, meaning no rate limit
2816 * configuration is required. so we are leaving the
2817 * value of max_avg_bw as queried from Vport 0.
2818 */
2819 vpp_qos[i].enable = 0;
2820 }
2821 }
2822
2823 err = mlx4_SET_VPORT_QOS_set(dev, port, slave, vpp_qos);
2824 if (err) {
2825 mlx4_info(dev, "Failed to set Vport %d QoS values\n", slave);
2826 return err;
2827 }
2828
2829 return 0;
2830}
2831
2832static bool mlx4_is_vf_vst_and_prio_qos(struct mlx4_dev *dev, int port,
2833 struct mlx4_vport_state *vf_admin)
2834{
2835 struct mlx4_qos_manager *info;
2836 struct mlx4_priv *priv = mlx4_priv(dev);
2837
2838 if (!mlx4_is_master(dev) ||
2839 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
2840 return false;
2841
2842 info = &priv->mfunc.master.qos_ctl[port];
2843
2844 if (vf_admin->default_vlan != MLX4_VGT &&
2845 test_bit(vf_admin->default_qos, info->priority_bm))
2846 return true;
2847
2848 return false;
2849}
2850
2851static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port,
2852 struct mlx4_vport_state *vf_admin,
2853 int vlan, int qos)
2854{
2855 struct mlx4_vport_state dummy_admin = {0};
2856
2857 if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) ||
2858 !vf_admin->tx_rate)
2859 return true;
2860
2861 dummy_admin.default_qos = qos;
2862 dummy_admin.default_vlan = vlan;
2863
2864 /* VF wants to move to other VST state which is valid with current
2865 * rate limit. Either differnt default vlan in VST or other
2866 * supported QoS priority. Otherwise we don't allow this change when
2867 * the TX rate is still configured.
2868 */
2869 if (mlx4_is_vf_vst_and_prio_qos(dev, port, &dummy_admin))
2870 return true;
2871
2872 mlx4_info(dev, "Cannot change VF state to %s while rate is set\n",
2873 (vlan == MLX4_VGT) ? "VGT" : "VST");
2874
2875 if (vlan != MLX4_VGT)
2876 mlx4_info(dev, "VST priority %d not supported for QoS\n", qos);
2877
2878 mlx4_info(dev, "Please set rate to 0 prior to this VF state change\n");
2879
2880 return false;
2881}
2882
2682int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) 2883int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
2683{ 2884{
2684 struct mlx4_priv *priv = mlx4_priv(dev); 2885 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2722,12 +2923,22 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2722 port = mlx4_slaves_closest_port(dev, slave, port); 2923 port = mlx4_slaves_closest_port(dev, slave, port);
2723 vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; 2924 vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2724 2925
2926 if (!mlx4_valid_vf_state_change(dev, port, vf_admin, vlan, qos))
2927 return -EPERM;
2928
2725 if ((0 == vlan) && (0 == qos)) 2929 if ((0 == vlan) && (0 == qos))
2726 vf_admin->default_vlan = MLX4_VGT; 2930 vf_admin->default_vlan = MLX4_VGT;
2727 else 2931 else
2728 vf_admin->default_vlan = vlan; 2932 vf_admin->default_vlan = vlan;
2729 vf_admin->default_qos = qos; 2933 vf_admin->default_qos = qos;
2730 2934
2935 /* If rate was configured prior to VST, we saved the configured rate
2936 * in vf_admin->rate and now, if priority supported we enforce the QoS
2937 */
2938 if (mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) &&
2939 vf_admin->tx_rate)
2940 vf_admin->qos_vport = slave;
2941
2731 if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port)) 2942 if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
2732 mlx4_info(dev, 2943 mlx4_info(dev,
2733 "updating vf %d port %d config will take effect on next VF restart\n", 2944 "updating vf %d port %d config will take effect on next VF restart\n",
@@ -2736,6 +2947,69 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2736} 2947}
2737EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan); 2948EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
2738 2949
2950int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
2951 int max_tx_rate)
2952{
2953 int err;
2954 int slave;
2955 struct mlx4_vport_state *vf_admin;
2956 struct mlx4_priv *priv = mlx4_priv(dev);
2957
2958 if (!mlx4_is_master(dev) ||
2959 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
2960 return -EPROTONOSUPPORT;
2961
2962 if (min_tx_rate) {
2963 mlx4_info(dev, "Minimum BW share not supported\n");
2964 return -EPROTONOSUPPORT;
2965 }
2966
2967 slave = mlx4_get_slave_indx(dev, vf);
2968 if (slave < 0)
2969 return -EINVAL;
2970
2971 port = mlx4_slaves_closest_port(dev, slave, port);
2972 vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2973
2974 err = mlx4_set_vport_qos(priv, slave, port, max_tx_rate);
2975 if (err) {
2976 mlx4_info(dev, "vf %d failed to set rate %d\n", vf,
2977 max_tx_rate);
2978 return err;
2979 }
2980
2981 vf_admin->tx_rate = max_tx_rate;
2982 /* if VF is not in supported mode (VST with supported prio),
2983 * we do not change vport configuration for its QPs, but save
2984 * the rate, so it will be enforced when it moves to supported
2985 * mode next time.
2986 */
2987 if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin)) {
2988 mlx4_info(dev,
2989 "rate set for VF %d when not in valid state\n", vf);
2990
2991 if (vf_admin->default_vlan != MLX4_VGT)
2992 mlx4_info(dev, "VST priority not supported by QoS\n");
2993 else
2994 mlx4_info(dev, "VF in VGT mode (needed VST)\n");
2995
2996 mlx4_info(dev,
2997 "rate %d take affect when VF moves to valid state\n",
2998 max_tx_rate);
2999 return 0;
3000 }
3001
3002 /* If user sets rate 0 assigning default vport for its QPs */
3003 vf_admin->qos_vport = max_tx_rate ? slave : MLX4_VPP_DEFAULT_VPORT;
3004
3005 if (priv->mfunc.master.slave_state[slave].active &&
3006 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)
3007 mlx4_master_immediate_activate_vlan_qos(priv, slave, port);
3008
3009 return 0;
3010}
3011EXPORT_SYMBOL_GPL(mlx4_set_vf_rate);
3012
2739 /* mlx4_get_slave_default_vlan - 3013 /* mlx4_get_slave_default_vlan -
2740 * return true if VST ( default vlan) 3014 * return true if VST ( default vlan)
2741 * if VST, will return vlan & qos (if not NULL) 3015 * if VST, will return vlan & qos (if not NULL)
@@ -2809,7 +3083,12 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in
2809 3083
2810 ivf->vlan = s_info->default_vlan; 3084 ivf->vlan = s_info->default_vlan;
2811 ivf->qos = s_info->default_qos; 3085 ivf->qos = s_info->default_qos;
2812 ivf->max_tx_rate = s_info->tx_rate; 3086
3087 if (mlx4_is_vf_vst_and_prio_qos(dev, port, s_info))
3088 ivf->max_tx_rate = s_info->tx_rate;
3089 else
3090 ivf->max_tx_rate = 0;
3091
2813 ivf->min_tx_rate = 0; 3092 ivf->min_tx_rate = 0;
2814 ivf->spoofchk = s_info->spoofchk; 3093 ivf->spoofchk = s_info->spoofchk;
2815 ivf->linkstate = s_info->link_state; 3094 ivf->linkstate = s_info->link_state;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 8e3260c0eaa5..f01918c63f28 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -35,6 +35,7 @@
35#include <linux/math64.h> 35#include <linux/math64.h>
36 36
37#include "mlx4_en.h" 37#include "mlx4_en.h"
38#include "fw_qos.h"
38 39
39/* Definitions for QCN 40/* Definitions for QCN
40 */ 41 */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index eba969b08dd1..3f44e2bbb982 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1939,6 +1939,32 @@ static int mlx4_en_get_module_eeprom(struct net_device *dev,
1939 return 0; 1939 return 0;
1940} 1940}
1941 1941
1942static int mlx4_en_set_phys_id(struct net_device *dev,
1943 enum ethtool_phys_id_state state)
1944{
1945 int err;
1946 u16 beacon_duration;
1947 struct mlx4_en_priv *priv = netdev_priv(dev);
1948 struct mlx4_en_dev *mdev = priv->mdev;
1949
1950 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_BEACON))
1951 return -EOPNOTSUPP;
1952
1953 switch (state) {
1954 case ETHTOOL_ID_ACTIVE:
1955 beacon_duration = PORT_BEACON_MAX_LIMIT;
1956 break;
1957 case ETHTOOL_ID_INACTIVE:
1958 beacon_duration = 0;
1959 break;
1960 default:
1961 return -EOPNOTSUPP;
1962 }
1963
1964 err = mlx4_SET_PORT_BEACON(mdev->dev, priv->port, beacon_duration);
1965 return err;
1966}
1967
1942const struct ethtool_ops mlx4_en_ethtool_ops = { 1968const struct ethtool_ops mlx4_en_ethtool_ops = {
1943 .get_drvinfo = mlx4_en_get_drvinfo, 1969 .get_drvinfo = mlx4_en_get_drvinfo,
1944 .get_settings = mlx4_en_get_settings, 1970 .get_settings = mlx4_en_get_settings,
@@ -1948,6 +1974,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
1948 .get_sset_count = mlx4_en_get_sset_count, 1974 .get_sset_count = mlx4_en_get_sset_count,
1949 .get_ethtool_stats = mlx4_en_get_ethtool_stats, 1975 .get_ethtool_stats = mlx4_en_get_ethtool_stats,
1950 .self_test = mlx4_en_self_test, 1976 .self_test = mlx4_en_self_test,
1977 .set_phys_id = mlx4_en_set_phys_id,
1951 .get_wol = mlx4_en_get_wol, 1978 .get_wol = mlx4_en_get_wol,
1952 .set_wol = mlx4_en_set_wol, 1979 .set_wol = mlx4_en_set_wol,
1953 .get_msglevel = mlx4_en_get_msglevel, 1980 .get_msglevel = mlx4_en_get_msglevel,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 58d5a07d0ff4..913b716ed2e1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -103,6 +103,11 @@ void mlx4_en_update_loopback_state(struct net_device *dev,
103{ 103{
104 struct mlx4_en_priv *priv = netdev_priv(dev); 104 struct mlx4_en_priv *priv = netdev_priv(dev);
105 105
106 if (features & NETIF_F_LOOPBACK)
107 priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
108 else
109 priv->ctrl_flags &= cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
110
106 priv->flags &= ~(MLX4_EN_FLAG_RX_FILTER_NEEDED| 111 priv->flags &= ~(MLX4_EN_FLAG_RX_FILTER_NEEDED|
107 MLX4_EN_FLAG_ENABLE_HW_LOOPBACK); 112 MLX4_EN_FLAG_ENABLE_HW_LOOPBACK);
108 113
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 354e254b53cf..0f1afc085d58 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2195,31 +2195,50 @@ static int mlx4_en_set_features(struct net_device *netdev,
2195 netdev_features_t features) 2195 netdev_features_t features)
2196{ 2196{
2197 struct mlx4_en_priv *priv = netdev_priv(netdev); 2197 struct mlx4_en_priv *priv = netdev_priv(netdev);
2198 bool reset = false;
2198 int ret = 0; 2199 int ret = 0;
2199 2200
2201 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) {
2202 en_info(priv, "Turn %s RX-FCS\n",
2203 (features & NETIF_F_RXFCS) ? "ON" : "OFF");
2204 reset = true;
2205 }
2206
2207 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) {
2208 u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0;
2209
2210 en_info(priv, "Turn %s RX-ALL\n",
2211 ignore_fcs_value ? "ON" : "OFF");
2212 ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev,
2213 priv->port, ignore_fcs_value);
2214 if (ret)
2215 return ret;
2216 }
2217
2200 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) { 2218 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
2201 en_info(priv, "Turn %s RX vlan strip offload\n", 2219 en_info(priv, "Turn %s RX vlan strip offload\n",
2202 (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF"); 2220 (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
2203 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config, 2221 reset = true;
2204 features);
2205 if (ret)
2206 return ret;
2207 } 2222 }
2208 2223
2209 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX)) 2224 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
2210 en_info(priv, "Turn %s TX vlan strip offload\n", 2225 en_info(priv, "Turn %s TX vlan strip offload\n",
2211 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF"); 2226 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
2212 2227
2213 if (features & NETIF_F_LOOPBACK) 2228 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
2214 priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); 2229 en_info(priv, "Turn %s loopback\n",
2215 else 2230 (features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
2216 priv->ctrl_flags &= 2231 mlx4_en_update_loopback_state(netdev, features);
2217 cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK); 2232 }
2218 2233
2219 mlx4_en_update_loopback_state(netdev, features); 2234 if (reset) {
2235 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
2236 features);
2237 if (ret)
2238 return ret;
2239 }
2220 2240
2221 return 0; 2241 return 0;
2222
2223} 2242}
2224 2243
2225static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac) 2244static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
@@ -2242,6 +2261,16 @@ static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
2242 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos); 2261 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
2243} 2262}
2244 2263
2264static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2265 int max_tx_rate)
2266{
2267 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2268 struct mlx4_en_dev *mdev = en_priv->mdev;
2269
2270 return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate,
2271 max_tx_rate);
2272}
2273
2245static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting) 2274static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
2246{ 2275{
2247 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2276 struct mlx4_en_priv *en_priv = netdev_priv(dev);
@@ -2460,6 +2489,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
2460 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, 2489 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2461 .ndo_set_vf_mac = mlx4_en_set_vf_mac, 2490 .ndo_set_vf_mac = mlx4_en_set_vf_mac,
2462 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan, 2491 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
2492 .ndo_set_vf_rate = mlx4_en_set_vf_rate,
2463 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk, 2493 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
2464 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, 2494 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
2465 .ndo_get_vf_config = mlx4_en_get_vf_config, 2495 .ndo_get_vf_config = mlx4_en_get_vf_config,
@@ -2805,7 +2835,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2805 priv->msg_enable = MLX4_EN_MSG_LEVEL; 2835 priv->msg_enable = MLX4_EN_MSG_LEVEL;
2806#ifdef CONFIG_MLX4_EN_DCB 2836#ifdef CONFIG_MLX4_EN_DCB
2807 if (!mlx4_is_slave(priv->mdev->dev)) { 2837 if (!mlx4_is_slave(priv->mdev->dev)) {
2808 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { 2838 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
2809 dev->dcbnl_ops = &mlx4_en_dcbnl_ops; 2839 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
2810 } else { 2840 } else {
2811 en_info(priv, "enabling only PFC DCB ops\n"); 2841 en_info(priv, "enabling only PFC DCB ops\n");
@@ -2892,6 +2922,12 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2892 dev->hw_features |= NETIF_F_LOOPBACK | 2922 dev->hw_features |= NETIF_F_LOOPBACK |
2893 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 2923 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2894 2924
2925 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
2926 dev->hw_features |= NETIF_F_RXFCS;
2927
2928 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)
2929 dev->hw_features |= NETIF_F_RXALL;
2930
2895 if (mdev->dev->caps.steering_mode == 2931 if (mdev->dev->caps.steering_mode ==
2896 MLX4_STEERING_MODE_DEVICE_MANAGED && 2932 MLX4_STEERING_MODE_DEVICE_MANAGED &&
2897 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC) 2933 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
@@ -2917,13 +2953,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2917 netif_carrier_off(dev); 2953 netif_carrier_off(dev);
2918 mlx4_en_set_default_moderation(priv); 2954 mlx4_en_set_default_moderation(priv);
2919 2955
2920 err = register_netdev(dev);
2921 if (err) {
2922 en_err(priv, "Netdev registration failed for port %d\n", port);
2923 goto out;
2924 }
2925 priv->registered = 1;
2926
2927 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 2956 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
2928 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 2957 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
2929 2958
@@ -2969,6 +2998,14 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2969 mdev->profile.prof[priv->port].tx_ppp, 2998 mdev->profile.prof[priv->port].tx_ppp,
2970 mdev->profile.prof[priv->port].tx_pause); 2999 mdev->profile.prof[priv->port].tx_pause);
2971 3000
3001 err = register_netdev(dev);
3002 if (err) {
3003 en_err(priv, "Netdev registration failed for port %d\n", port);
3004 goto out;
3005 }
3006
3007 priv->registered = 1;
3008
2972 return 0; 3009 return 0;
2973 3010
2974out: 3011out:
@@ -2987,7 +3024,8 @@ int mlx4_en_reset_config(struct net_device *dev,
2987 3024
2988 if (priv->hwtstamp_config.tx_type == ts_config.tx_type && 3025 if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
2989 priv->hwtstamp_config.rx_filter == ts_config.rx_filter && 3026 priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
2990 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) 3027 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3028 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
2991 return 0; /* Nothing to change */ 3029 return 0; /* Nothing to change */
2992 3030
2993 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && 3031 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
@@ -3026,6 +3064,13 @@ int mlx4_en_reset_config(struct net_device *dev,
3026 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 3064 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3027 } 3065 }
3028 3066
3067 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) {
3068 if (features & NETIF_F_RXFCS)
3069 dev->features |= NETIF_F_RXFCS;
3070 else
3071 dev->features &= ~NETIF_F_RXFCS;
3072 }
3073
3029 /* RX vlan offload and RX time-stamping can't co-exist ! 3074 /* RX vlan offload and RX time-stamping can't co-exist !
3030 * Regardless of the caller's choice, 3075 * Regardless of the caller's choice,
3031 * Turn Off RX vlan offload in case of time-stamping is ON 3076 * Turn Off RX vlan offload in case of time-stamping is ON
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 698d60de1255..79b1501e7951 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -1116,7 +1116,10 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
1116 /* Cancel FCS removal if FW allows */ 1116 /* Cancel FCS removal if FW allows */
1117 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) { 1117 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
1118 context->param3 |= cpu_to_be32(1 << 29); 1118 context->param3 |= cpu_to_be32(1 << 29);
1119 ring->fcs_del = ETH_FCS_LEN; 1119 if (priv->dev->features & NETIF_F_RXFCS)
1120 ring->fcs_del = 0;
1121 else
1122 ring->fcs_del = ETH_FCS_LEN;
1120 } else 1123 } else
1121 ring->fcs_del = 0; 1124 ring->fcs_del = 0;
1122 1125
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 264bc15c1ff2..6e70ffee8e87 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -153,12 +153,10 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
153 153
154 /* All active slaves need to receive the event */ 154 /* All active slaves need to receive the event */
155 if (slave == ALL_SLAVES) { 155 if (slave == ALL_SLAVES) {
156 for (i = 0; i < dev->num_slaves; i++) { 156 for (i = 0; i <= dev->persist->num_vfs; i++) {
157 if (i != dev->caps.function && 157 if (mlx4_GEN_EQE(dev, i, eqe))
158 master->slave_state[i].active) 158 mlx4_warn(dev, "Failed to generate event for slave %d\n",
159 if (mlx4_GEN_EQE(dev, i, eqe)) 159 i);
160 mlx4_warn(dev, "Failed to generate event for slave %d\n",
161 i);
162 } 160 }
163 } else { 161 } else {
164 if (mlx4_GEN_EQE(dev, slave, eqe)) 162 if (mlx4_GEN_EQE(dev, slave, eqe))
@@ -203,13 +201,11 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
203 struct mlx4_eqe *eqe) 201 struct mlx4_eqe *eqe)
204{ 202{
205 struct mlx4_priv *priv = mlx4_priv(dev); 203 struct mlx4_priv *priv = mlx4_priv(dev);
206 struct mlx4_slave_state *s_slave =
207 &priv->mfunc.master.slave_state[slave];
208 204
209 if (!s_slave->active) { 205 if (slave < 0 || slave > dev->persist->num_vfs ||
210 /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/ 206 slave == dev->caps.function ||
207 !priv->mfunc.master.slave_state[slave].active)
211 return; 208 return;
212 }
213 209
214 slave_event(dev, slave, eqe); 210 slave_event(dev, slave, eqe);
215} 211}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 209a6171e59b..b9881fc1252f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -49,9 +49,9 @@ enum {
49extern void __buggy_use_of_MLX4_GET(void); 49extern void __buggy_use_of_MLX4_GET(void);
50extern void __buggy_use_of_MLX4_PUT(void); 50extern void __buggy_use_of_MLX4_PUT(void);
51 51
52static bool enable_qos; 52static bool enable_qos = true;
53module_param(enable_qos, bool, 0444); 53module_param(enable_qos, bool, 0444);
54MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)"); 54MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)");
55 55
56#define MLX4_GET(dest, source, offset) \ 56#define MLX4_GET(dest, source, offset) \
57 do { \ 57 do { \
@@ -105,6 +105,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
105 [41] = "Unicast VEP steering support", 105 [41] = "Unicast VEP steering support",
106 [42] = "Multicast VEP steering support", 106 [42] = "Multicast VEP steering support",
107 [48] = "Counters support", 107 [48] = "Counters support",
108 [52] = "RSS IP fragments support",
108 [53] = "Port ETS Scheduler support", 109 [53] = "Port ETS Scheduler support",
109 [55] = "Port link type sensing support", 110 [55] = "Port link type sensing support",
110 [59] = "Port management change event support", 111 [59] = "Port management change event support",
@@ -146,7 +147,11 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
146 [21] = "Port Remap support", 147 [21] = "Port Remap support",
147 [22] = "QCN support", 148 [22] = "QCN support",
148 [23] = "QP rate limiting support", 149 [23] = "QP rate limiting support",
149 [24] = "Ethernet Flow control statistics support" 150 [24] = "Ethernet Flow control statistics support",
151 [25] = "Granular QoS per VF support",
152 [26] = "Port ETS Scheduler support",
153 [27] = "Port beacon support",
154 [28] = "RX-ALL support",
150 }; 155 };
151 int i; 156 int i;
152 157
@@ -644,6 +649,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
644#define QUERY_DEV_CAP_RSS_OFFSET 0x2e 649#define QUERY_DEV_CAP_RSS_OFFSET 0x2e
645#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f 650#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
646#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33 651#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
652#define QUERY_DEV_CAP_PORT_BEACON_OFFSET 0x34
647#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35 653#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
648#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36 654#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
649#define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37 655#define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
@@ -783,6 +789,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
783 if (field & 0x80) 789 if (field & 0x80)
784 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN; 790 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
785 dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f; 791 dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
792 MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
793 if (field & 0x80)
794 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_BEACON;
786 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); 795 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
787 if (field & 0x80) 796 if (field & 0x80)
788 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB; 797 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
@@ -870,6 +879,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
870 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); 879 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
871 dev_cap->max_rq_desc_sz = size; 880 dev_cap->max_rq_desc_sz = size;
872 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); 881 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
882 if (field & (1 << 4))
883 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QOS_VPP;
873 if (field & (1 << 5)) 884 if (field & (1 << 5))
874 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL; 885 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL;
875 if (field & (1 << 6)) 886 if (field & (1 << 6))
@@ -883,6 +894,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
883 MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); 894 MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
884 if (field & 0x20) 895 if (field & 0x20)
885 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV; 896 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
897 if (field & (1 << 2))
898 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
886 MLX4_GET(dev_cap->reserved_lkey, outbox, 899 MLX4_GET(dev_cap->reserved_lkey, outbox,
887 QUERY_DEV_CAP_RSVD_LKEY_OFFSET); 900 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
888 MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET); 901 MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
@@ -896,6 +909,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
896 MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN); 909 MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
897 if (field & 1<<3) 910 if (field & 1<<3)
898 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS; 911 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS;
912 if (field & (1 << 5))
913 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
899 MLX4_GET(dev_cap->max_icm_sz, outbox, 914 MLX4_GET(dev_cap->max_icm_sz, outbox,
900 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); 915 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
901 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS) 916 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
@@ -1138,6 +1153,9 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
1138 } 1153 }
1139 for (; slave_port < dev->caps.num_ports; ++slave_port) 1154 for (; slave_port < dev->caps.num_ports; ++slave_port)
1140 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port); 1155 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
1156
1157 /* Not exposing RSS IP fragments to guests */
1158 flags &= ~MLX4_DEV_CAP_FLAG_RSS_IP_FRAG;
1141 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 1159 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
1142 1160
1143 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET); 1161 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET);
@@ -1150,11 +1168,16 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
1150 field &= 0x7f; 1168 field &= 0x7f;
1151 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); 1169 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
1152 1170
1153 /* For guests, disable vxlan tunneling */ 1171 /* For guests, disable vxlan tunneling and QoS support */
1154 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN); 1172 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN);
1155 field &= 0xf7; 1173 field &= 0xd7;
1156 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN); 1174 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN);
1157 1175
1176 /* For guests, disable port BEACON */
1177 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
1178 field &= 0x7f;
1179 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
1180
1158 /* For guests, report Blueflame disabled */ 1181 /* For guests, report Blueflame disabled */
1159 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET); 1182 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
1160 field &= 0x7f; 1183 field &= 0x7f;
@@ -1195,6 +1218,16 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
1195 field16 = 0; 1218 field16 = 0;
1196 MLX4_PUT(outbox->buf, field16, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET); 1219 MLX4_PUT(outbox->buf, field16, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET);
1197 1220
1221 /* turn off QoS per VF support for guests */
1222 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
1223 field &= 0xef;
1224 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
1225
1226 /* turn off ignore FCS feature for guests */
1227 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
1228 field &= 0xfb;
1229 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
1230
1198 return 0; 1231 return 0;
1199} 1232}
1200 1233
@@ -1694,13 +1727,17 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1694 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3); 1727 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
1695 1728
1696 /* Enable QoS support if module parameter set */ 1729 /* Enable QoS support if module parameter set */
1697 if (enable_qos) 1730 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG && enable_qos)
1698 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2); 1731 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
1699 1732
1700 /* enable counters */ 1733 /* enable counters */
1701 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS) 1734 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
1702 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4); 1735 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
1703 1736
1737 /* Enable RSS spread to fragmented IP packets when supported */
1738 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_RSS_IP_FRAG)
1739 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 13);
1740
1704 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ 1741 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1705 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) { 1742 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) {
1706 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29); 1743 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29);
@@ -1889,6 +1926,10 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1889 else 1926 else
1890 param->steering_mode = MLX4_STEERING_MODE_A0; 1927 param->steering_mode = MLX4_STEERING_MODE_A0;
1891 } 1928 }
1929
1930 if (dword_field & (1 << 13))
1931 param->rss_ip_frags = 1;
1932
1892 /* steering attributes */ 1933 /* steering attributes */
1893 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 1934 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
1894 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); 1935 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 863655bd3947..07cb7c2461ad 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -203,6 +203,7 @@ struct mlx4_init_hca_param {
203 u64 dev_cap_enabled; 203 u64 dev_cap_enabled;
204 u16 cqe_size; /* For use only when CQE stride feature enabled */ 204 u16 cqe_size; /* For use only when CQE stride feature enabled */
205 u16 eqe_size; /* For use only when EQE stride feature enabled */ 205 u16 eqe_size; /* For use only when EQE stride feature enabled */
206 u8 rss_ip_frags;
206}; 207};
207 208
208struct mlx4_init_ib_param { 209struct mlx4_init_ib_param {
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.c b/drivers/net/ethernet/mellanox/mlx4/fw_qos.c
new file mode 100644
index 000000000000..8f2fde0487c4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/fw_qos.c
@@ -0,0 +1,289 @@
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/export.h>
36#include "fw_qos.h"
37#include "fw.h"
38
39enum {
40 /* allocate vpp opcode modifiers */
41 MLX4_ALLOCATE_VPP_ALLOCATE = 0x0,
42 MLX4_ALLOCATE_VPP_QUERY = 0x1
43};
44
45enum {
46 /* set vport qos opcode modifiers */
47 MLX4_SET_VPORT_QOS_SET = 0x0,
48 MLX4_SET_VPORT_QOS_QUERY = 0x1
49};
50
51struct mlx4_set_port_prio2tc_context {
52 u8 prio2tc[4];
53};
54
55struct mlx4_port_scheduler_tc_cfg_be {
56 __be16 pg;
57 __be16 bw_precentage;
58 __be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */
59 __be16 max_bw_value;
60};
61
62struct mlx4_set_port_scheduler_context {
63 struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC];
64};
65
66/* Granular Qos (per VF) section */
67struct mlx4_alloc_vpp_param {
68 __be32 availible_vpp;
69 __be32 vpp_p_up[MLX4_NUM_UP];
70};
71
72struct mlx4_prio_qos_param {
73 __be32 bw_share;
74 __be32 max_avg_bw;
75 __be32 reserved;
76 __be32 enable;
77 __be32 reserved1[4];
78};
79
80struct mlx4_set_vport_context {
81 __be32 reserved[8];
82 struct mlx4_prio_qos_param qos_p_up[MLX4_NUM_UP];
83};
84
85int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
86{
87 struct mlx4_cmd_mailbox *mailbox;
88 struct mlx4_set_port_prio2tc_context *context;
89 int err;
90 u32 in_mod;
91 int i;
92
93 mailbox = mlx4_alloc_cmd_mailbox(dev);
94 if (IS_ERR(mailbox))
95 return PTR_ERR(mailbox);
96
97 context = mailbox->buf;
98
99 for (i = 0; i < MLX4_NUM_UP; i += 2)
100 context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
101
102 in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port;
103 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
104 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
105
106 mlx4_free_cmd_mailbox(dev, mailbox);
107 return err;
108}
109EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC);
110
111int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
112 u8 *pg, u16 *ratelimit)
113{
114 struct mlx4_cmd_mailbox *mailbox;
115 struct mlx4_set_port_scheduler_context *context;
116 int err;
117 u32 in_mod;
118 int i;
119
120 mailbox = mlx4_alloc_cmd_mailbox(dev);
121 if (IS_ERR(mailbox))
122 return PTR_ERR(mailbox);
123
124 context = mailbox->buf;
125
126 for (i = 0; i < MLX4_NUM_TC; i++) {
127 struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
128 u16 r;
129
130 if (ratelimit && ratelimit[i]) {
131 if (ratelimit[i] <= MLX4_MAX_100M_UNITS_VAL) {
132 r = ratelimit[i];
133 tc->max_bw_units =
134 htons(MLX4_RATELIMIT_100M_UNITS);
135 } else {
136 r = ratelimit[i] / 10;
137 tc->max_bw_units =
138 htons(MLX4_RATELIMIT_1G_UNITS);
139 }
140 tc->max_bw_value = htons(r);
141 } else {
142 tc->max_bw_value = htons(MLX4_RATELIMIT_DEFAULT);
143 tc->max_bw_units = htons(MLX4_RATELIMIT_1G_UNITS);
144 }
145
146 tc->pg = htons(pg[i]);
147 tc->bw_precentage = htons(tc_tx_bw[i]);
148 }
149
150 in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
151 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
152 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
153
154 mlx4_free_cmd_mailbox(dev, mailbox);
155 return err;
156}
157EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
158
159int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port,
160 u16 *availible_vpp, u8 *vpp_p_up)
161{
162 int i;
163 int err;
164 struct mlx4_cmd_mailbox *mailbox;
165 struct mlx4_alloc_vpp_param *out_param;
166
167 mailbox = mlx4_alloc_cmd_mailbox(dev);
168 if (IS_ERR(mailbox))
169 return PTR_ERR(mailbox);
170
171 out_param = mailbox->buf;
172
173 err = mlx4_cmd_box(dev, 0, mailbox->dma, port,
174 MLX4_ALLOCATE_VPP_QUERY,
175 MLX4_CMD_ALLOCATE_VPP,
176 MLX4_CMD_TIME_CLASS_A,
177 MLX4_CMD_NATIVE);
178 if (err)
179 goto out;
180
181 /* Total number of supported VPPs */
182 *availible_vpp = (u16)be32_to_cpu(out_param->availible_vpp);
183
184 for (i = 0; i < MLX4_NUM_UP; i++)
185 vpp_p_up[i] = (u8)be32_to_cpu(out_param->vpp_p_up[i]);
186
187out:
188 mlx4_free_cmd_mailbox(dev, mailbox);
189
190 return err;
191}
192EXPORT_SYMBOL(mlx4_ALLOCATE_VPP_get);
193
194int mlx4_ALLOCATE_VPP_set(struct mlx4_dev *dev, u8 port, u8 *vpp_p_up)
195{
196 int i;
197 int err;
198 struct mlx4_cmd_mailbox *mailbox;
199 struct mlx4_alloc_vpp_param *in_param;
200
201 mailbox = mlx4_alloc_cmd_mailbox(dev);
202 if (IS_ERR(mailbox))
203 return PTR_ERR(mailbox);
204
205 in_param = mailbox->buf;
206
207 for (i = 0; i < MLX4_NUM_UP; i++)
208 in_param->vpp_p_up[i] = cpu_to_be32(vpp_p_up[i]);
209
210 err = mlx4_cmd(dev, mailbox->dma, port,
211 MLX4_ALLOCATE_VPP_ALLOCATE,
212 MLX4_CMD_ALLOCATE_VPP,
213 MLX4_CMD_TIME_CLASS_A,
214 MLX4_CMD_NATIVE);
215
216 mlx4_free_cmd_mailbox(dev, mailbox);
217 return err;
218}
219EXPORT_SYMBOL(mlx4_ALLOCATE_VPP_set);
220
221int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport,
222 struct mlx4_vport_qos_param *out_param)
223{
224 int i;
225 int err;
226 struct mlx4_cmd_mailbox *mailbox;
227 struct mlx4_set_vport_context *ctx;
228
229 mailbox = mlx4_alloc_cmd_mailbox(dev);
230 if (IS_ERR(mailbox))
231 return PTR_ERR(mailbox);
232
233 ctx = mailbox->buf;
234
235 err = mlx4_cmd_box(dev, 0, mailbox->dma, (vport << 8) | port,
236 MLX4_SET_VPORT_QOS_QUERY,
237 MLX4_CMD_SET_VPORT_QOS,
238 MLX4_CMD_TIME_CLASS_A,
239 MLX4_CMD_NATIVE);
240 if (err)
241 goto out;
242
243 for (i = 0; i < MLX4_NUM_UP; i++) {
244 out_param[i].bw_share = be32_to_cpu(ctx->qos_p_up[i].bw_share);
245 out_param[i].max_avg_bw =
246 be32_to_cpu(ctx->qos_p_up[i].max_avg_bw);
247 out_param[i].enable =
248 !!(be32_to_cpu(ctx->qos_p_up[i].enable) & 31);
249 }
250
251out:
252 mlx4_free_cmd_mailbox(dev, mailbox);
253
254 return err;
255}
256EXPORT_SYMBOL(mlx4_SET_VPORT_QOS_get);
257
258int mlx4_SET_VPORT_QOS_set(struct mlx4_dev *dev, u8 port, u8 vport,
259 struct mlx4_vport_qos_param *in_param)
260{
261 int i;
262 int err;
263 struct mlx4_cmd_mailbox *mailbox;
264 struct mlx4_set_vport_context *ctx;
265
266 mailbox = mlx4_alloc_cmd_mailbox(dev);
267 if (IS_ERR(mailbox))
268 return PTR_ERR(mailbox);
269
270 ctx = mailbox->buf;
271
272 for (i = 0; i < MLX4_NUM_UP; i++) {
273 ctx->qos_p_up[i].bw_share = cpu_to_be32(in_param[i].bw_share);
274 ctx->qos_p_up[i].max_avg_bw =
275 cpu_to_be32(in_param[i].max_avg_bw);
276 ctx->qos_p_up[i].enable =
277 cpu_to_be32(in_param[i].enable << 31);
278 }
279
280 err = mlx4_cmd(dev, mailbox->dma, (vport << 8) | port,
281 MLX4_SET_VPORT_QOS_SET,
282 MLX4_CMD_SET_VPORT_QOS,
283 MLX4_CMD_TIME_CLASS_A,
284 MLX4_CMD_NATIVE);
285
286 mlx4_free_cmd_mailbox(dev, mailbox);
287 return err;
288}
289EXPORT_SYMBOL(mlx4_SET_VPORT_QOS_set);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h
new file mode 100644
index 000000000000..ac1f331878e6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h
@@ -0,0 +1,145 @@
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef MLX4_FW_QOS_H
36#define MLX4_FW_QOS_H
37
38#include <linux/mlx4/cmd.h>
39#include <linux/mlx4/device.h>
40
41#define MLX4_NUM_UP 8
42#define MLX4_NUM_TC 8
43
44/* Default supported priorities for VPP allocation */
45#define MLX4_DEFAULT_QOS_PRIO (0)
46
47/* Derived from FW feature definition, 0 is the default vport fo all QPs */
48#define MLX4_VPP_DEFAULT_VPORT (0)
49
50struct mlx4_vport_qos_param {
51 u32 bw_share;
52 u32 max_avg_bw;
53 u8 enable;
54};
55
56/**
57 * mlx4_SET_PORT_PRIO2TC - This routine maps user priorities to traffic
58 * classes of a given port and device.
59 *
60 * @dev: mlx4_dev.
61 * @port: Physical port number.
62 * @prio2tc: Array of TC associated with each priorities.
63 *
64 * Returns 0 on success or a negative mlx4_core errno code.
65 **/
66int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc);
67
68/**
69 * mlx4_SET_PORT_SCHEDULER - This routine configures the arbitration between
70 * traffic classes (ETS) and configured rate limit for traffic classes.
71 * tc_tx_bw, pg and ratelimit are arrays where each index represents a TC.
72 * The description for those parameters below refers to a single TC.
73 *
74 * @dev: mlx4_dev.
75 * @port: Physical port number.
76 * @tc_tx_bw: The percentage of the bandwidth allocated for traffic class
77 * within a TC group. The sum of the bw_percentage of all the traffic
78 * classes within a TC group must equal 100% for correct operation.
79 * @pg: The TC group the traffic class is associated with.
80 * @ratelimit: The maximal bandwidth allowed for the use by this traffic class.
81 *
82 * Returns 0 on success or a negative mlx4_core errno code.
83 **/
84int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
85 u8 *pg, u16 *ratelimit);
86/**
87 * mlx4_ALLOCATE_VPP_get - Query port VPP availible resources and allocation.
88 * Before distribution of VPPs to priorities, only availible_vpp is returned.
89 * After initialization it returns the distribution of VPPs among priorities.
90 *
91 * @dev: mlx4_dev.
92 * @port: Physical port number.
93 * @availible_vpp: Pointer to variable where number of availible VPPs is stored
94 * @vpp_p_up: Distribution of VPPs to priorities is stored in this array
95 *
96 * Returns 0 on success or a negative mlx4_core errno code.
97 **/
98int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port,
99 u16 *availible_vpp, u8 *vpp_p_up);
100/**
101 * mlx4_ALLOCATE_VPP_set - Distribution of VPPs among differnt priorities.
102 * The total number of VPPs assigned to all for a port must not exceed
103 * the value reported by availible_vpp in mlx4_ALLOCATE_VPP_get.
104 * VPP allocation is allowed only after the port type has been set,
105 * and while no QPs are open for this port.
106 *
107 * @dev: mlx4_dev.
108 * @port: Physical port number.
109 * @vpp_p_up: Allocation of VPPs to different priorities.
110 *
111 * Returns 0 on success or a negative mlx4_core errno code.
112 **/
113int mlx4_ALLOCATE_VPP_set(struct mlx4_dev *dev, u8 port, u8 *vpp_p_up);
114
115/**
116 * mlx4_SET_VPORT_QOS_get - Query QoS proporties of a Vport.
117 * Each priority allowed for the Vport is assigned with a share of the BW,
118 * and a BW limitation. This commands query the current QoS values.
119 *
120 * @dev: mlx4_dev.
121 * @port: Physical port number.
122 * @vport: Vport id.
123 * @out_param: Array of mlx4_vport_qos_param that will contain the values.
124 *
125 * Returns 0 on success or a negative mlx4_core errno code.
126 **/
127int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport,
128 struct mlx4_vport_qos_param *out_param);
129
130/**
131 * mlx4_SET_VPORT_QOS_set - Set QoS proporties of a Vport.
132 * QoS parameters can be modified at any time, but must be initialized
133 * before any QP is associated with the VPort.
134 *
135 * @dev: mlx4_dev.
136 * @port: Physical port number.
137 * @vport: Vport id.
138 * @out_param: Array of mlx4_vport_qos_param which holds the requested values.
139 *
140 * Returns 0 on success or a negative mlx4_core errno code.
141 **/
142int mlx4_SET_VPORT_QOS_set(struct mlx4_dev *dev, u8 port, u8 vport,
143 struct mlx4_vport_qos_param *in_param);
144
145#endif /* MLX4_FW_QOS_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 43aa76775b5f..acceb75e8c44 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -297,6 +297,25 @@ static int mlx4_dev_port(struct mlx4_dev *dev, int port,
297 return err; 297 return err;
298} 298}
299 299
300static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev)
301{
302 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS))
303 return;
304
305 if (mlx4_is_mfunc(dev)) {
306 mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS");
307 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
308 return;
309 }
310
311 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
312 mlx4_dbg(dev,
313 "Keep FCS is not supported - Disabling Ignore FCS");
314 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
315 return;
316 }
317}
318
300#define MLX4_A0_STEERING_TABLE_SIZE 256 319#define MLX4_A0_STEERING_TABLE_SIZE 256
301static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 320static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
302{ 321{
@@ -528,10 +547,20 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
528 dev->caps.alloc_res_qp_mask = 547 dev->caps.alloc_res_qp_mask =
529 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) | 548 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) |
530 MLX4_RESERVE_A0_QP; 549 MLX4_RESERVE_A0_QP;
550
551 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) &&
552 dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
553 mlx4_warn(dev, "Old device ETS support detected\n");
554 mlx4_warn(dev, "Consider upgrading device FW.\n");
555 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
556 }
557
531 } else { 558 } else {
532 dev->caps.alloc_res_qp_mask = 0; 559 dev->caps.alloc_res_qp_mask = 0;
533 } 560 }
534 561
562 mlx4_enable_ignore_fcs(dev);
563
535 return 0; 564 return 0;
536} 565}
537 566
@@ -885,6 +914,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
885 mlx4_warn(dev, "Timestamping is not supported in slave mode\n"); 914 mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
886 915
887 slave_adjust_steering_mode(dev, &dev_cap, &hca_param); 916 slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
917 mlx4_dbg(dev, "RSS support for IP fragments is %s\n",
918 hca_param.rss_ip_frags ? "on" : "off");
888 919
889 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP && 920 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP &&
890 dev->caps.bf_reg_size) 921 dev->caps.bf_reg_size)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 0b16db015745..f30eeb730a86 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -50,6 +50,7 @@
50#include <linux/mlx4/driver.h> 50#include <linux/mlx4/driver.h>
51#include <linux/mlx4/doorbell.h> 51#include <linux/mlx4/doorbell.h>
52#include <linux/mlx4/cmd.h> 52#include <linux/mlx4/cmd.h>
53#include "fw_qos.h"
53 54
54#define DRV_NAME "mlx4_core" 55#define DRV_NAME "mlx4_core"
55#define PFX DRV_NAME ": " 56#define PFX DRV_NAME ": "
@@ -64,21 +65,6 @@
64 65
65#define INIT_HCA_TPT_MW_ENABLE (1 << 7) 66#define INIT_HCA_TPT_MW_ENABLE (1 << 7)
66 67
67struct mlx4_set_port_prio2tc_context {
68 u8 prio2tc[4];
69};
70
71struct mlx4_port_scheduler_tc_cfg_be {
72 __be16 pg;
73 __be16 bw_precentage;
74 __be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */
75 __be16 max_bw_value;
76};
77
78struct mlx4_set_port_scheduler_context {
79 struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC];
80};
81
82enum { 68enum {
83 MLX4_HCR_BASE = 0x80680, 69 MLX4_HCR_BASE = 0x80680,
84 MLX4_HCR_SIZE = 0x0001c, 70 MLX4_HCR_SIZE = 0x0001c,
@@ -512,6 +498,7 @@ struct mlx4_vport_state {
512 u32 tx_rate; 498 u32 tx_rate;
513 bool spoofchk; 499 bool spoofchk;
514 u32 link_state; 500 u32 link_state;
501 u8 qos_vport;
515}; 502};
516 503
517struct mlx4_vf_admin_state { 504struct mlx4_vf_admin_state {
@@ -568,6 +555,11 @@ struct mlx4_slave_event_eq {
568 struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE]; 555 struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE];
569}; 556};
570 557
558struct mlx4_qos_manager {
559 int num_of_qos_vfs;
560 DECLARE_BITMAP(priority_bm, MLX4_NUM_UP);
561};
562
571struct mlx4_master_qp0_state { 563struct mlx4_master_qp0_state {
572 int proxy_qp0_active; 564 int proxy_qp0_active;
573 int qp0_active; 565 int qp0_active;
@@ -592,6 +584,7 @@ struct mlx4_mfunc_master_ctx {
592 struct mlx4_eqe cmd_eqe; 584 struct mlx4_eqe cmd_eqe;
593 struct mlx4_slave_event_eq slave_eq; 585 struct mlx4_slave_event_eq slave_eq;
594 struct mutex gen_eqe_mutex[MLX4_MFUNC_MAX]; 586 struct mutex gen_eqe_mutex[MLX4_MFUNC_MAX];
587 struct mlx4_qos_manager qos_ctl[MLX4_MAX_PORTS + 1];
595}; 588};
596 589
597struct mlx4_mfunc { 590struct mlx4_mfunc {
@@ -644,6 +637,7 @@ struct mlx4_vf_immed_vlan_work {
644 int orig_vlan_ix; 637 int orig_vlan_ix;
645 u8 port; 638 u8 port;
646 u8 qos; 639 u8 qos;
640 u8 qos_vport;
647 u16 vlan_id; 641 u16 vlan_id;
648 u16 orig_vlan_id; 642 u16 orig_vlan_id;
649}; 643};
@@ -769,9 +763,11 @@ enum {
769 763
770 764
771struct mlx4_set_port_general_context { 765struct mlx4_set_port_general_context {
772 u8 reserved[3]; 766 u16 reserved1;
767 u8 v_ignore_fcs;
773 u8 flags; 768 u8 flags;
774 u16 reserved2; 769 u8 ignore_fcs;
770 u8 reserved2;
775 __be16 mtu; 771 __be16 mtu;
776 u8 pptx; 772 u8 pptx;
777 u8 pfctx; 773 u8 pfctx;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 67eeea244eff..9de30216b146 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -482,6 +482,7 @@ enum {
482 MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP = (1 << 5), 482 MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP = (1 << 5),
483}; 483};
484 484
485#define PORT_BEACON_MAX_LIMIT (65535)
485#define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE) 486#define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE)
486#define MLX4_EN_MAC_HASH_IDX 5 487#define MLX4_EN_MAC_HASH_IDX 5
487 488
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index b97f173ab062..c2b21313dba7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -45,6 +45,14 @@
45#define MLX4_VLAN_VALID (1u << 31) 45#define MLX4_VLAN_VALID (1u << 31)
46#define MLX4_VLAN_MASK 0xfff 46#define MLX4_VLAN_MASK 0xfff
47 47
48#define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL
49#define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL
50#define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL
51#define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL
52
53#define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2
54#define MLX4_IGNORE_FCS_MASK 0x1
55
48void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) 56void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
49{ 57{
50 int i; 58 int i;
@@ -123,8 +131,9 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
123 131
124 in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port; 132 in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
125 133
126 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, 134 err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
127 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 135 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
136 MLX4_CMD_NATIVE);
128 137
129 mlx4_free_cmd_mailbox(dev, mailbox); 138 mlx4_free_cmd_mailbox(dev, mailbox);
130 return err; 139 return err;
@@ -337,8 +346,9 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
337 346
338 memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE); 347 memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
339 in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port; 348 in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
340 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, 349 err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
341 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 350 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
351 MLX4_CMD_NATIVE);
342 352
343 mlx4_free_cmd_mailbox(dev, mailbox); 353 mlx4_free_cmd_mailbox(dev, mailbox);
344 354
@@ -625,9 +635,9 @@ static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave,
625 MLX4_ROCE_GID_ENTRY_SIZE); 635 MLX4_ROCE_GID_ENTRY_SIZE);
626 636
627 err = mlx4_cmd(dev, mailbox->dma, 637 err = mlx4_cmd(dev, mailbox->dma,
628 ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8), 1, 638 ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8),
629 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 639 MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
630 MLX4_CMD_NATIVE); 640 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
631 mutex_unlock(&(priv->port[port].gid_table.mutex)); 641 mutex_unlock(&(priv->port[port].gid_table.mutex));
632 return err; 642 return err;
633} 643}
@@ -833,6 +843,12 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
833 MLX4_CMD_NATIVE); 843 MLX4_CMD_NATIVE);
834 } 844 }
835 845
846 /* Slaves are not allowed to SET_PORT beacon (LED) blink */
847 if (op_mod == MLX4_SET_PORT_BEACON_OPCODE) {
848 mlx4_warn(dev, "denying SET_PORT Beacon slave:%d\n", slave);
849 return -EPERM;
850 }
851
836 /* For IB, we only consider: 852 /* For IB, we only consider:
837 * - The capability mask, which is set to the aggregate of all 853 * - The capability mask, which is set to the aggregate of all
838 * slave function capabilities 854 * slave function capabilities
@@ -941,8 +957,9 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
941 (pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) | 957 (pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
942 (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) | 958 (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
943 (vl_cap << MLX4_SET_PORT_VL_CAP)); 959 (vl_cap << MLX4_SET_PORT_VL_CAP));
944 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, 960 err = mlx4_cmd(dev, mailbox->dma, port,
945 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 961 MLX4_SET_PORT_IB_OPCODE, MLX4_CMD_SET_PORT,
962 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
946 if (err != -ENOMEM) 963 if (err != -ENOMEM)
947 break; 964 break;
948 } 965 }
@@ -971,8 +988,9 @@ int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
971 context->pfcrx = pfcrx; 988 context->pfcrx = pfcrx;
972 989
973 in_mod = MLX4_SET_PORT_GENERAL << 8 | port; 990 in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
974 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, 991 err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
975 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 992 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
993 MLX4_CMD_WRAPPED);
976 994
977 mlx4_free_cmd_mailbox(dev, mailbox); 995 mlx4_free_cmd_mailbox(dev, mailbox);
978 return err; 996 return err;
@@ -1008,84 +1026,40 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
1008 context->vlan_miss = MLX4_VLAN_MISS_IDX; 1026 context->vlan_miss = MLX4_VLAN_MISS_IDX;
1009 1027
1010 in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port; 1028 in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
1011 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, 1029 err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
1012 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 1030 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1031 MLX4_CMD_WRAPPED);
1013 1032
1014 mlx4_free_cmd_mailbox(dev, mailbox); 1033 mlx4_free_cmd_mailbox(dev, mailbox);
1015 return err; 1034 return err;
1016} 1035}
1017EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc); 1036EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
1018 1037
1019int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc) 1038int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value)
1020{ 1039{
1021 struct mlx4_cmd_mailbox *mailbox; 1040 struct mlx4_cmd_mailbox *mailbox;
1022 struct mlx4_set_port_prio2tc_context *context; 1041 struct mlx4_set_port_general_context *context;
1023 int err;
1024 u32 in_mod; 1042 u32 in_mod;
1025 int i;
1026
1027 mailbox = mlx4_alloc_cmd_mailbox(dev);
1028 if (IS_ERR(mailbox))
1029 return PTR_ERR(mailbox);
1030 context = mailbox->buf;
1031 for (i = 0; i < MLX4_NUM_UP; i += 2)
1032 context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
1033
1034 in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port;
1035 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
1036 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1037
1038 mlx4_free_cmd_mailbox(dev, mailbox);
1039 return err;
1040}
1041EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC);
1042
1043int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
1044 u8 *pg, u16 *ratelimit)
1045{
1046 struct mlx4_cmd_mailbox *mailbox;
1047 struct mlx4_set_port_scheduler_context *context;
1048 int err; 1043 int err;
1049 u32 in_mod;
1050 int i;
1051 1044
1052 mailbox = mlx4_alloc_cmd_mailbox(dev); 1045 mailbox = mlx4_alloc_cmd_mailbox(dev);
1053 if (IS_ERR(mailbox)) 1046 if (IS_ERR(mailbox))
1054 return PTR_ERR(mailbox); 1047 return PTR_ERR(mailbox);
1055 context = mailbox->buf; 1048 context = mailbox->buf;
1049 context->v_ignore_fcs |= MLX4_FLAG_V_IGNORE_FCS_MASK;
1050 if (ignore_fcs_value)
1051 context->ignore_fcs |= MLX4_IGNORE_FCS_MASK;
1052 else
1053 context->ignore_fcs &= ~MLX4_IGNORE_FCS_MASK;
1056 1054
1057 for (i = 0; i < MLX4_NUM_TC; i++) { 1055 in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
1058 struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
1059 u16 r;
1060
1061 if (ratelimit && ratelimit[i]) {
1062 if (ratelimit[i] <= MLX4_MAX_100M_UNITS_VAL) {
1063 r = ratelimit[i];
1064 tc->max_bw_units =
1065 htons(MLX4_RATELIMIT_100M_UNITS);
1066 } else {
1067 r = ratelimit[i]/10;
1068 tc->max_bw_units =
1069 htons(MLX4_RATELIMIT_1G_UNITS);
1070 }
1071 tc->max_bw_value = htons(r);
1072 } else {
1073 tc->max_bw_value = htons(MLX4_RATELIMIT_DEFAULT);
1074 tc->max_bw_units = htons(MLX4_RATELIMIT_1G_UNITS);
1075 }
1076
1077 tc->pg = htons(pg[i]);
1078 tc->bw_precentage = htons(tc_tx_bw[i]);
1079 }
1080
1081 in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
1082 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, 1056 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
1083 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 1057 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1084 1058
1085 mlx4_free_cmd_mailbox(dev, mailbox); 1059 mlx4_free_cmd_mailbox(dev, mailbox);
1086 return err; 1060 return err;
1087} 1061}
1088EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER); 1062EXPORT_SYMBOL(mlx4_SET_PORT_fcs_check);
1089 1063
1090enum { 1064enum {
1091 VXLAN_ENABLE_MODIFY = 1 << 7, 1065 VXLAN_ENABLE_MODIFY = 1 << 7,
@@ -1121,14 +1095,35 @@ int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable)
1121 context->steering = steering; 1095 context->steering = steering;
1122 1096
1123 in_mod = MLX4_SET_PORT_VXLAN << 8 | port; 1097 in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
1124 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, 1098 err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
1125 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 1099 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1100 MLX4_CMD_NATIVE);
1126 1101
1127 mlx4_free_cmd_mailbox(dev, mailbox); 1102 mlx4_free_cmd_mailbox(dev, mailbox);
1128 return err; 1103 return err;
1129} 1104}
1130EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN); 1105EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN);
1131 1106
1107int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time)
1108{
1109 int err;
1110 struct mlx4_cmd_mailbox *mailbox;
1111
1112 mailbox = mlx4_alloc_cmd_mailbox(dev);
1113 if (IS_ERR(mailbox))
1114 return PTR_ERR(mailbox);
1115
1116 *((__be32 *)mailbox->buf) = cpu_to_be32(time);
1117
1118 err = mlx4_cmd(dev, mailbox->dma, port, MLX4_SET_PORT_BEACON_OPCODE,
1119 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1120 MLX4_CMD_NATIVE);
1121
1122 mlx4_free_cmd_mailbox(dev, mailbox);
1123 return err;
1124}
1125EXPORT_SYMBOL(mlx4_SET_PORT_BEACON);
1126
1132int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, 1127int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
1133 struct mlx4_vhcr *vhcr, 1128 struct mlx4_vhcr *vhcr,
1134 struct mlx4_cmd_mailbox *inbox, 1129 struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 69e4462e4ee4..b75214a80d0e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -447,6 +447,11 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
447 cmd->qp_context.rate_limit_params = cpu_to_be16((params->rate_unit << 14) | params->rate_val); 447 cmd->qp_context.rate_limit_params = cpu_to_be16((params->rate_unit << 14) | params->rate_val);
448 } 448 }
449 449
450 if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
451 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
452 cmd->qp_context.qos_vport = params->qos_vport;
453 }
454
450 cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); 455 cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
451 cmd->qp_mask = cpu_to_be64(qp_mask); 456 cmd->qp_mask = cpu_to_be64(qp_mask);
452 457
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index c258f8625aac..c7f28bf4b8e2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -221,11 +221,6 @@ struct res_fs_rule {
221 int qpn; 221 int qpn;
222}; 222};
223 223
224static int mlx4_is_eth(struct mlx4_dev *dev, int port)
225{
226 return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
227}
228
229static void *res_tracker_lookup(struct rb_root *root, u64 res_id) 224static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
230{ 225{
231 struct rb_node *node = root->rb_node; 226 struct rb_node *node = root->rb_node;
@@ -770,6 +765,7 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
770 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; 765 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
771 qpc->pri_path.sched_queue &= 0xC7; 766 qpc->pri_path.sched_queue &= 0xC7;
772 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3; 767 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
768 qpc->qos_vport = vp_oper->state.qos_vport;
773 } 769 }
774 if (vp_oper->state.spoofchk) { 770 if (vp_oper->state.spoofchk) {
775 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; 771 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
@@ -3099,6 +3095,12 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3099 if (!priv->mfunc.master.slave_state) 3095 if (!priv->mfunc.master.slave_state)
3100 return -EINVAL; 3096 return -EINVAL;
3101 3097
3098 /* check for slave valid, slave not PF, and slave active */
3099 if (slave < 0 || slave > dev->persist->num_vfs ||
3100 slave == dev->caps.function ||
3101 !priv->mfunc.master.slave_state[slave].active)
3102 return 0;
3103
3102 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type]; 3104 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3103 3105
3104 /* Create the event only if the slave is registered */ 3106 /* Create the event only if the slave is registered */
@@ -4916,6 +4918,11 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4916 qp->sched_queue & 0xC7; 4918 qp->sched_queue & 0xC7;
4917 upd_context->qp_context.pri_path.sched_queue |= 4919 upd_context->qp_context.pri_path.sched_queue |=
4918 ((work->qos & 0x7) << 3); 4920 ((work->qos & 0x7) << 3);
4921 upd_context->qp_mask |=
4922 cpu_to_be64(1ULL <<
4923 MLX4_UPD_QP_MASK_QOS_VPP);
4924 upd_context->qp_context.qos_vport =
4925 work->qos_vport;
4919 } 4926 }
4920 4927
4921 err = mlx4_cmd(dev, mailbox->dma, 4928 err = mlx4_cmd(dev, mailbox->dma,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 201ca6d76ce5..ac0f7bf4be95 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -171,6 +171,9 @@ static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
171 db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page); 171 db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page);
172 db->dma = pgdir->db_dma + offset; 172 db->dma = pgdir->db_dma + offset;
173 173
174 db->db[0] = 0;
175 db->db[1] = 0;
176
174 return 0; 177 return 0;
175} 178}
176 179
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index a2853057c779..e3273faf4568 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -125,7 +125,10 @@ static u8 alloc_token(struct mlx5_cmd *cmd)
125 u8 token; 125 u8 token;
126 126
127 spin_lock(&cmd->token_lock); 127 spin_lock(&cmd->token_lock);
128 token = cmd->token++ % 255 + 1; 128 cmd->token++;
129 if (cmd->token == 0)
130 cmd->token++;
131 token = cmd->token;
129 spin_unlock(&cmd->token_lock); 132 spin_unlock(&cmd->token_lock);
130 133
131 return token; 134 return token;
@@ -515,10 +518,11 @@ static void cmd_work_handler(struct work_struct *work)
515 ent->ts1 = ktime_get_ns(); 518 ent->ts1 = ktime_get_ns();
516 519
517 /* ring doorbell after the descriptor is valid */ 520 /* ring doorbell after the descriptor is valid */
521 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
518 wmb(); 522 wmb();
519 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); 523 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
520 mlx5_core_dbg(dev, "write 0x%x to command doorbell\n", 1 << ent->idx);
521 mmiowb(); 524 mmiowb();
525 /* if not in polling don't use ent after this point */
522 if (cmd->mode == CMD_MODE_POLLING) { 526 if (cmd->mode == CMD_MODE_POLLING) {
523 poll_timeout(ent); 527 poll_timeout(ent);
524 /* make sure we read the descriptor after ownership is SW */ 528 /* make sure we read the descriptor after ownership is SW */
@@ -1236,7 +1240,8 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1236 goto out_out; 1240 goto out_out;
1237 } 1241 }
1238 1242
1239 err = mlx5_copy_from_msg(out, outb, out_size); 1243 if (!callback)
1244 err = mlx5_copy_from_msg(out, outb, out_size);
1240 1245
1241out_out: 1246out_out:
1242 if (!callback) 1247 if (!callback)
@@ -1319,6 +1324,45 @@ ex_err:
1319 return err; 1324 return err;
1320} 1325}
1321 1326
1327static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1328{
1329 struct device *ddev = &dev->pdev->dev;
1330
1331 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
1332 &cmd->alloc_dma, GFP_KERNEL);
1333 if (!cmd->cmd_alloc_buf)
1334 return -ENOMEM;
1335
1336 /* make sure it is aligned to 4K */
1337 if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
1338 cmd->cmd_buf = cmd->cmd_alloc_buf;
1339 cmd->dma = cmd->alloc_dma;
1340 cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
1341 return 0;
1342 }
1343
1344 dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
1345 cmd->alloc_dma);
1346 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev,
1347 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
1348 &cmd->alloc_dma, GFP_KERNEL);
1349 if (!cmd->cmd_alloc_buf)
1350 return -ENOMEM;
1351
1352 cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
1353 cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
1354 cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
1355 return 0;
1356}
1357
1358static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1359{
1360 struct device *ddev = &dev->pdev->dev;
1361
1362 dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
1363 cmd->alloc_dma);
1364}
1365
1322int mlx5_cmd_init(struct mlx5_core_dev *dev) 1366int mlx5_cmd_init(struct mlx5_core_dev *dev)
1323{ 1367{
1324 int size = sizeof(struct mlx5_cmd_prot_block); 1368 int size = sizeof(struct mlx5_cmd_prot_block);
@@ -1341,17 +1385,9 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
1341 if (!cmd->pool) 1385 if (!cmd->pool)
1342 return -ENOMEM; 1386 return -ENOMEM;
1343 1387
1344 cmd->cmd_buf = (void *)__get_free_pages(GFP_ATOMIC, 0); 1388 err = alloc_cmd_page(dev, cmd);
1345 if (!cmd->cmd_buf) { 1389 if (err)
1346 err = -ENOMEM;
1347 goto err_free_pool; 1390 goto err_free_pool;
1348 }
1349 cmd->dma = dma_map_single(&dev->pdev->dev, cmd->cmd_buf, PAGE_SIZE,
1350 DMA_BIDIRECTIONAL);
1351 if (dma_mapping_error(&dev->pdev->dev, cmd->dma)) {
1352 err = -ENOMEM;
1353 goto err_free;
1354 }
1355 1391
1356 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; 1392 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
1357 cmd->log_sz = cmd_l >> 4 & 0xf; 1393 cmd->log_sz = cmd_l >> 4 & 0xf;
@@ -1360,13 +1396,13 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
1360 dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n", 1396 dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
1361 1 << cmd->log_sz); 1397 1 << cmd->log_sz);
1362 err = -EINVAL; 1398 err = -EINVAL;
1363 goto err_map; 1399 goto err_free_page;
1364 } 1400 }
1365 1401
1366 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { 1402 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
1367 dev_err(&dev->pdev->dev, "command queue size overflow\n"); 1403 dev_err(&dev->pdev->dev, "command queue size overflow\n");
1368 err = -EINVAL; 1404 err = -EINVAL;
1369 goto err_map; 1405 goto err_free_page;
1370 } 1406 }
1371 1407
1372 cmd->checksum_disabled = 1; 1408 cmd->checksum_disabled = 1;
@@ -1378,7 +1414,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
1378 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n", 1414 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
1379 CMD_IF_REV, cmd->cmdif_rev); 1415 CMD_IF_REV, cmd->cmdif_rev);
1380 err = -ENOTSUPP; 1416 err = -ENOTSUPP;
1381 goto err_map; 1417 goto err_free_page;
1382 } 1418 }
1383 1419
1384 spin_lock_init(&cmd->alloc_lock); 1420 spin_lock_init(&cmd->alloc_lock);
@@ -1394,7 +1430,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
1394 if (cmd_l & 0xfff) { 1430 if (cmd_l & 0xfff) {
1395 dev_err(&dev->pdev->dev, "invalid command queue address\n"); 1431 dev_err(&dev->pdev->dev, "invalid command queue address\n");
1396 err = -ENOMEM; 1432 err = -ENOMEM;
1397 goto err_map; 1433 goto err_free_page;
1398 } 1434 }
1399 1435
1400 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); 1436 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
@@ -1410,7 +1446,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
1410 err = create_msg_cache(dev); 1446 err = create_msg_cache(dev);
1411 if (err) { 1447 if (err) {
1412 dev_err(&dev->pdev->dev, "failed to create command cache\n"); 1448 dev_err(&dev->pdev->dev, "failed to create command cache\n");
1413 goto err_map; 1449 goto err_free_page;
1414 } 1450 }
1415 1451
1416 set_wqname(dev); 1452 set_wqname(dev);
@@ -1435,11 +1471,8 @@ err_wq:
1435err_cache: 1471err_cache:
1436 destroy_msg_cache(dev); 1472 destroy_msg_cache(dev);
1437 1473
1438err_map: 1474err_free_page:
1439 dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE, 1475 free_cmd_page(dev, cmd);
1440 DMA_BIDIRECTIONAL);
1441err_free:
1442 free_pages((unsigned long)cmd->cmd_buf, 0);
1443 1476
1444err_free_pool: 1477err_free_pool:
1445 pci_pool_destroy(cmd->pool); 1478 pci_pool_destroy(cmd->pool);
@@ -1455,9 +1488,7 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
1455 clean_debug_files(dev); 1488 clean_debug_files(dev);
1456 destroy_workqueue(cmd->wq); 1489 destroy_workqueue(cmd->wq);
1457 destroy_msg_cache(dev); 1490 destroy_msg_cache(dev);
1458 dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE, 1491 free_cmd_page(dev, cmd);
1459 DMA_BIDIRECTIONAL);
1460 free_pages((unsigned long)cmd->cmd_buf, 0);
1461 pci_pool_destroy(cmd->pool); 1492 pci_pool_destroy(cmd->pool);
1462} 1493}
1463EXPORT_SYMBOL(mlx5_cmd_cleanup); 1494EXPORT_SYMBOL(mlx5_cmd_cleanup);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 43c5f4809526..eb0cf81f5f45 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 4878025e231c..5210d92e6bc7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index da82991239a8..dbf190d9b9ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 06f9036acd83..4b4cda3bcc5f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 3e6670c4a7cd..292d76f2a904 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mad.c b/drivers/net/ethernet/mellanox/mlx5/core/mad.c
index fd80ecfa7195..ee1b0b965f34 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mad.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mad.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 350c6297fe5d..28425e5ea91f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -48,11 +48,11 @@
48#include "mlx5_core.h" 48#include "mlx5_core.h"
49 49
50#define DRIVER_NAME "mlx5_core" 50#define DRIVER_NAME "mlx5_core"
51#define DRIVER_VERSION "2.2-1" 51#define DRIVER_VERSION "3.0"
52#define DRIVER_RELDATE "Feb 2014" 52#define DRIVER_RELDATE "January 2015"
53 53
54MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); 54MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
55MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library"); 55MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
56MODULE_LICENSE("Dual BSD/GPL"); 56MODULE_LICENSE("Dual BSD/GPL");
57MODULE_VERSION(DRIVER_VERSION); 57MODULE_VERSION(DRIVER_VERSION);
58 58
@@ -288,8 +288,6 @@ static void copy_rw_fields(void *to, struct mlx5_caps *from)
288 MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp); 288 MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp);
289 MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp); 289 MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp);
290 MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size); 290 MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size);
291 MLX5_SET(cmd_hca_cap, to, log_max_ra_req_dc, from->gen.log_max_ra_req_dc);
292 MLX5_SET(cmd_hca_cap, to, log_max_ra_res_dc, from->gen.log_max_ra_res_dc);
293 MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size)); 291 MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size));
294 MLX5_SET(cmd_hca_cap, to, log_uar_page_sz, PAGE_SHIFT - 12); 292 MLX5_SET(cmd_hca_cap, to, log_uar_page_sz, PAGE_SHIFT - 12);
295 v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK; 293 v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK;
@@ -509,6 +507,87 @@ static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
509 return 0; 507 return 0;
510} 508}
511 509
510int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
511{
512 struct mlx5_eq_table *table = &dev->priv.eq_table;
513 struct mlx5_eq *eq, *n;
514 int err = -ENOENT;
515
516 spin_lock(&table->lock);
517 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
518 if (eq->index == vector) {
519 *eqn = eq->eqn;
520 *irqn = eq->irqn;
521 err = 0;
522 break;
523 }
524 }
525 spin_unlock(&table->lock);
526
527 return err;
528}
529EXPORT_SYMBOL(mlx5_vector2eqn);
530
531static void free_comp_eqs(struct mlx5_core_dev *dev)
532{
533 struct mlx5_eq_table *table = &dev->priv.eq_table;
534 struct mlx5_eq *eq, *n;
535
536 spin_lock(&table->lock);
537 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
538 list_del(&eq->list);
539 spin_unlock(&table->lock);
540 if (mlx5_destroy_unmap_eq(dev, eq))
541 mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
542 eq->eqn);
543 kfree(eq);
544 spin_lock(&table->lock);
545 }
546 spin_unlock(&table->lock);
547}
548
549static int alloc_comp_eqs(struct mlx5_core_dev *dev)
550{
551 struct mlx5_eq_table *table = &dev->priv.eq_table;
552 char name[MLX5_MAX_EQ_NAME];
553 struct mlx5_eq *eq;
554 int ncomp_vec;
555 int nent;
556 int err;
557 int i;
558
559 INIT_LIST_HEAD(&table->comp_eqs_list);
560 ncomp_vec = table->num_comp_vectors;
561 nent = MLX5_COMP_EQ_SIZE;
562 for (i = 0; i < ncomp_vec; i++) {
563 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
564 if (!eq) {
565 err = -ENOMEM;
566 goto clean;
567 }
568
569 snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
570 err = mlx5_create_map_eq(dev, eq,
571 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
572 name, &dev->priv.uuari.uars[0]);
573 if (err) {
574 kfree(eq);
575 goto clean;
576 }
577 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
578 eq->index = i;
579 spin_lock(&table->lock);
580 list_add_tail(&eq->list, &table->comp_eqs_list);
581 spin_unlock(&table->lock);
582 }
583
584 return 0;
585
586clean:
587 free_comp_eqs(dev);
588 return err;
589}
590
512static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) 591static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
513{ 592{
514 struct mlx5_priv *priv = &dev->priv; 593 struct mlx5_priv *priv = &dev->priv;
@@ -645,6 +724,12 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
645 goto err_free_uar; 724 goto err_free_uar;
646 } 725 }
647 726
727 err = alloc_comp_eqs(dev);
728 if (err) {
729 dev_err(&pdev->dev, "Failed to alloc completion EQs\n");
730 goto err_stop_eqs;
731 }
732
648 MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); 733 MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
649 734
650 mlx5_init_cq_table(dev); 735 mlx5_init_cq_table(dev);
@@ -654,6 +739,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
654 739
655 return 0; 740 return 0;
656 741
742err_stop_eqs:
743 mlx5_stop_eqs(dev);
744
657err_free_uar: 745err_free_uar:
658 mlx5_free_uuars(dev, &priv->uuari); 746 mlx5_free_uuars(dev, &priv->uuari);
659 747
@@ -705,6 +793,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
705 mlx5_cleanup_srq_table(dev); 793 mlx5_cleanup_srq_table(dev);
706 mlx5_cleanup_qp_table(dev); 794 mlx5_cleanup_qp_table(dev);
707 mlx5_cleanup_cq_table(dev); 795 mlx5_cleanup_cq_table(dev);
796 free_comp_eqs(dev);
708 mlx5_stop_eqs(dev); 797 mlx5_stop_eqs(dev);
709 mlx5_free_uuars(dev, &priv->uuari); 798 mlx5_free_uuars(dev, &priv->uuari);
710 mlx5_eq_cleanup(dev); 799 mlx5_eq_cleanup(dev);
@@ -819,6 +908,28 @@ void mlx5_unregister_interface(struct mlx5_interface *intf)
819} 908}
820EXPORT_SYMBOL(mlx5_unregister_interface); 909EXPORT_SYMBOL(mlx5_unregister_interface);
821 910
911void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
912{
913 struct mlx5_priv *priv = &mdev->priv;
914 struct mlx5_device_context *dev_ctx;
915 unsigned long flags;
916 void *result = NULL;
917
918 spin_lock_irqsave(&priv->ctx_lock, flags);
919
920 list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
921 if ((dev_ctx->intf->protocol == protocol) &&
922 dev_ctx->intf->get_dev) {
923 result = dev_ctx->intf->get_dev(dev_ctx->context);
924 break;
925 }
926
927 spin_unlock_irqrestore(&priv->ctx_lock, flags);
928
929 return result;
930}
931EXPORT_SYMBOL(mlx5_get_protocol_dev);
932
822static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, 933static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
823 unsigned long param) 934 unsigned long param)
824{ 935{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
index 44837640bd7c..d79fd85d1dd5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index f0c9f9a7a361..a051b906afdf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 184c3615f479..1adb300dd850 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -141,7 +141,7 @@ EXPORT_SYMBOL(mlx5_core_destroy_mkey);
141int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, 141int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
142 struct mlx5_query_mkey_mbox_out *out, int outlen) 142 struct mlx5_query_mkey_mbox_out *out, int outlen)
143{ 143{
144 struct mlx5_destroy_mkey_mbox_in in; 144 struct mlx5_query_mkey_mbox_in in;
145 int err; 145 int err;
146 146
147 memset(&in, 0, sizeof(in)); 147 memset(&in, 0, sizeof(in));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 4fdaae9b54d9..df2238372ea7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -243,8 +243,9 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
243 struct page *page; 243 struct page *page;
244 u64 addr; 244 u64 addr;
245 int err; 245 int err;
246 int nid = dev_to_node(&dev->pdev->dev);
246 247
247 page = alloc_page(GFP_HIGHUSER); 248 page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
248 if (!page) { 249 if (!page) {
249 mlx5_core_warn(dev, "failed to allocate page\n"); 250 mlx5_core_warn(dev, "failed to allocate page\n");
250 return -ENOMEM; 251 return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pd.c b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
index 790da5c4ca4f..f2d3aee909e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 72c2d002c3b8..49e90f2612d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 575d853dbe05..dc7dbf7e9d98 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index 38bce93f8314..f9d25dcd03c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 06801d6f595e..5a89bb1d678a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
index b07d552a27d4..be916eb2f2e7 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
@@ -18,6 +18,25 @@
18 18
19#include "vxge-ethtool.h" 19#include "vxge-ethtool.h"
20 20
21static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
22 {"\n DRIVER STATISTICS"},
23 {"vpaths_opened"},
24 {"vpath_open_fail_cnt"},
25 {"link_up_cnt"},
26 {"link_down_cnt"},
27 {"tx_frms"},
28 {"tx_errors"},
29 {"tx_bytes"},
30 {"txd_not_free"},
31 {"txd_out_of_desc"},
32 {"rx_frms"},
33 {"rx_errors"},
34 {"rx_bytes"},
35 {"rx_mcast"},
36 {"pci_map_fail_cnt"},
37 {"skb_alloc_fail_cnt"}
38};
39
21/** 40/**
22 * vxge_ethtool_sset - Sets different link parameters. 41 * vxge_ethtool_sset - Sets different link parameters.
23 * @dev: device pointer. 42 * @dev: device pointer.
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h
index 6cf3044d7f43..065a2c0429a4 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h
@@ -19,25 +19,6 @@
19/* Ethtool related variables and Macros. */ 19/* Ethtool related variables and Macros. */
20static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset); 20static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset);
21 21
22static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
23 {"\n DRIVER STATISTICS"},
24 {"vpaths_opened"},
25 {"vpath_open_fail_cnt"},
26 {"link_up_cnt"},
27 {"link_down_cnt"},
28 {"tx_frms"},
29 {"tx_errors"},
30 {"tx_bytes"},
31 {"txd_not_free"},
32 {"txd_out_of_desc"},
33 {"rx_frms"},
34 {"rx_errors"},
35 {"rx_bytes"},
36 {"rx_mcast"},
37 {"pci_map_fail_cnt"},
38 {"skb_alloc_fail_cnt"}
39};
40
41#define VXGE_TITLE_LEN 5 22#define VXGE_TITLE_LEN 5
42#define VXGE_HW_VPATH_STATS_LEN 27 23#define VXGE_HW_VPATH_STATS_LEN 27
43#define VXGE_HW_AGGR_STATS_LEN 13 24#define VXGE_HW_AGGR_STATS_LEN 13
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index c9558e6d57ad..a87b177bd723 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -4937,10 +4937,16 @@ static int rocker_port_master_changed(struct net_device *dev)
4937 struct net_device *master = netdev_master_upper_dev_get(dev); 4937 struct net_device *master = netdev_master_upper_dev_get(dev);
4938 int err = 0; 4938 int err = 0;
4939 4939
4940 /* There are currently three cases handled here:
4941 * 1. Joining a bridge
4942 * 2. Leaving a previously joined bridge
4943 * 3. Other, e.g. being added to or removed from a bond or openvswitch,
4944 * in which case nothing is done
4945 */
4940 if (master && master->rtnl_link_ops && 4946 if (master && master->rtnl_link_ops &&
4941 !strcmp(master->rtnl_link_ops->kind, "bridge")) 4947 !strcmp(master->rtnl_link_ops->kind, "bridge"))
4942 err = rocker_port_bridge_join(rocker_port, master); 4948 err = rocker_port_bridge_join(rocker_port, master);
4943 else 4949 else if (rocker_port_is_bridged(rocker_port))
4944 err = rocker_port_bridge_leave(rocker_port); 4950 err = rocker_port_bridge_leave(rocker_port);
4945 4951
4946 return err; 4952 return err;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 309adee6e791..f0b8b3e0ed7c 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -130,7 +130,6 @@ struct hv_netvsc_packet {
130 u32 status; 130 u32 status;
131 bool part_of_skb; 131 bool part_of_skb;
132 132
133 struct hv_device *device;
134 bool is_data_pkt; 133 bool is_data_pkt;
135 bool xmit_more; /* from skb */ 134 bool xmit_more; /* from skb */
136 u16 vlan_tci; 135 u16 vlan_tci;
@@ -189,6 +188,7 @@ int netvsc_send(struct hv_device *device,
189 struct hv_netvsc_packet *packet); 188 struct hv_netvsc_packet *packet);
190void netvsc_linkstatus_callback(struct hv_device *device_obj, 189void netvsc_linkstatus_callback(struct hv_device *device_obj,
191 struct rndis_message *resp); 190 struct rndis_message *resp);
191void netvsc_xmit_completion(void *context);
192int netvsc_recv_callback(struct hv_device *device_obj, 192int netvsc_recv_callback(struct hv_device *device_obj,
193 struct hv_netvsc_packet *packet, 193 struct hv_netvsc_packet *packet,
194 struct ndis_tcp_ip_checksum_info *csum_info); 194 struct ndis_tcp_ip_checksum_info *csum_info);
@@ -959,6 +959,10 @@ struct ndis_tcp_lso_info {
959#define NDIS_HASH_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \ 959#define NDIS_HASH_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
960 sizeof(u32)) 960 sizeof(u32))
961 961
962/* Total size of all PPI data */
963#define NDIS_ALL_PPI_SIZE (NDIS_VLAN_PPI_SIZE + NDIS_CSUM_PPI_SIZE + \
964 NDIS_LSO_PPI_SIZE + NDIS_HASH_PPI_SIZE)
965
962/* Format of Information buffer passed in a SetRequest for the OID */ 966/* Format of Information buffer passed in a SetRequest for the OID */
963/* OID_GEN_RNDIS_CONFIG_PARAMETER. */ 967/* OID_GEN_RNDIS_CONFIG_PARAMETER. */
964struct rndis_config_parameter_info { 968struct rndis_config_parameter_info {
@@ -1171,6 +1175,8 @@ struct rndis_message {
1171#define RNDIS_HEADER_SIZE (sizeof(struct rndis_message) - \ 1175#define RNDIS_HEADER_SIZE (sizeof(struct rndis_message) - \
1172 sizeof(union rndis_message_container)) 1176 sizeof(union rndis_message_container))
1173 1177
1178#define RNDIS_AND_PPI_SIZE (sizeof(struct rndis_message) + NDIS_ALL_PPI_SIZE)
1179
1174#define NDIS_PACKET_TYPE_DIRECTED 0x00000001 1180#define NDIS_PACKET_TYPE_DIRECTED 0x00000001
1175#define NDIS_PACKET_TYPE_MULTICAST 0x00000002 1181#define NDIS_PACKET_TYPE_MULTICAST 0x00000002
1176#define NDIS_PACKET_TYPE_ALL_MULTICAST 0x00000004 1182#define NDIS_PACKET_TYPE_ALL_MULTICAST 0x00000004
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index f69923695b5b..4d4d497d5762 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -878,7 +878,9 @@ int netvsc_send(struct hv_device *device,
878 packet->send_buf_index = section_index; 878 packet->send_buf_index = section_index;
879 packet->total_data_buflen += msd_len; 879 packet->total_data_buflen += msd_len;
880 880
881 kfree(msdp->pkt); 881 if (msdp->pkt)
882 netvsc_xmit_completion(msdp->pkt);
883
882 if (packet->xmit_more) { 884 if (packet->xmit_more) {
883 msdp->pkt = packet; 885 msdp->pkt = packet;
884 msdp->count++; 886 msdp->count++;
@@ -902,7 +904,7 @@ int netvsc_send(struct hv_device *device,
902 if (m_ret != 0) { 904 if (m_ret != 0) {
903 netvsc_free_send_slot(net_device, 905 netvsc_free_send_slot(net_device,
904 msd_send->send_buf_index); 906 msd_send->send_buf_index);
905 kfree(msd_send); 907 netvsc_xmit_completion(msd_send);
906 } 908 }
907 } 909 }
908 910
@@ -1011,7 +1013,6 @@ static void netvsc_receive(struct netvsc_device *net_device,
1011 } 1013 }
1012 1014
1013 count = vmxferpage_packet->range_cnt; 1015 count = vmxferpage_packet->range_cnt;
1014 netvsc_packet->device = device;
1015 netvsc_packet->channel = channel; 1016 netvsc_packet->channel = channel;
1016 1017
1017 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */ 1018 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index f9db6bc513e9..e5fa094e6fe2 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -229,7 +229,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
229 return q_idx; 229 return q_idx;
230} 230}
231 231
232static void netvsc_xmit_completion(void *context) 232void netvsc_xmit_completion(void *context)
233{ 233{
234 struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context; 234 struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
235 struct sk_buff *skb = (struct sk_buff *) 235 struct sk_buff *skb = (struct sk_buff *)
@@ -401,10 +401,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
401 return NETDEV_TX_OK; 401 return NETDEV_TX_OK;
402 } 402 }
403 403
404 pkt_sz = sizeof(struct hv_netvsc_packet) + 404 pkt_sz = sizeof(struct hv_netvsc_packet) + RNDIS_AND_PPI_SIZE;
405 sizeof(struct rndis_message) +
406 NDIS_VLAN_PPI_SIZE + NDIS_CSUM_PPI_SIZE +
407 NDIS_LSO_PPI_SIZE + NDIS_HASH_PPI_SIZE;
408 405
409 if (head_room < pkt_sz) { 406 if (head_room < pkt_sz) {
410 packet = kmalloc(pkt_sz, GFP_ATOMIC); 407 packet = kmalloc(pkt_sz, GFP_ATOMIC);
@@ -436,11 +433,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
436 packet->rndis_msg = (struct rndis_message *)((unsigned long)packet + 433 packet->rndis_msg = (struct rndis_message *)((unsigned long)packet +
437 sizeof(struct hv_netvsc_packet)); 434 sizeof(struct hv_netvsc_packet));
438 435
439 memset(packet->rndis_msg, 0, sizeof(struct rndis_message) + 436 memset(packet->rndis_msg, 0, RNDIS_AND_PPI_SIZE);
440 NDIS_VLAN_PPI_SIZE +
441 NDIS_CSUM_PPI_SIZE +
442 NDIS_LSO_PPI_SIZE +
443 NDIS_HASH_PPI_SIZE);
444 437
445 /* Set the completion routine */ 438 /* Set the completion routine */
446 packet->send_completion = netvsc_xmit_completion; 439 packet->send_completion = netvsc_xmit_completion;
@@ -872,9 +865,7 @@ static int netvsc_probe(struct hv_device *dev,
872 return -ENOMEM; 865 return -ENOMEM;
873 866
874 max_needed_headroom = sizeof(struct hv_netvsc_packet) + 867 max_needed_headroom = sizeof(struct hv_netvsc_packet) +
875 sizeof(struct rndis_message) + 868 RNDIS_AND_PPI_SIZE;
876 NDIS_VLAN_PPI_SIZE + NDIS_CSUM_PPI_SIZE +
877 NDIS_LSO_PPI_SIZE + NDIS_HASH_PPI_SIZE;
878 869
879 netif_carrier_off(net); 870 netif_carrier_off(net);
880 871
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index a1604376aee1..0d92efefd796 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -47,8 +47,6 @@ struct rndis_request {
47 47
48 /* Simplify allocation by having a netvsc packet inline */ 48 /* Simplify allocation by having a netvsc packet inline */
49 struct hv_netvsc_packet pkt; 49 struct hv_netvsc_packet pkt;
50 /* Set 2 pages for rndis requests crossing page boundary */
51 struct hv_page_buffer buf[2];
52 50
53 struct rndis_message request_msg; 51 struct rndis_message request_msg;
54 /* 52 /*
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 924ea98bd531..54549a6223dd 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -114,7 +114,9 @@ unsigned int ipvlan_mac_hash(const unsigned char *addr);
114rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb); 114rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb);
115int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev); 115int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev);
116void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr); 116void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr);
117bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6); 117struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
118 const void *iaddr, bool is_v6);
119bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6);
118struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, 120struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
119 const void *iaddr, bool is_v6); 121 const void *iaddr, bool is_v6);
120void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync); 122void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync);
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 2a175006028b..c30b5c300c05 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -81,19 +81,20 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
81 hash = (addr->atype == IPVL_IPV6) ? 81 hash = (addr->atype == IPVL_IPV6) ?
82 ipvlan_get_v6_hash(&addr->ip6addr) : 82 ipvlan_get_v6_hash(&addr->ip6addr) :
83 ipvlan_get_v4_hash(&addr->ip4addr); 83 ipvlan_get_v4_hash(&addr->ip4addr);
84 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); 84 if (hlist_unhashed(&addr->hlnode))
85 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
85} 86}
86 87
87void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync) 88void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync)
88{ 89{
89 hlist_del_rcu(&addr->hlnode); 90 hlist_del_init_rcu(&addr->hlnode);
90 if (sync) 91 if (sync)
91 synchronize_rcu(); 92 synchronize_rcu();
92} 93}
93 94
94bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6) 95struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
96 const void *iaddr, bool is_v6)
95{ 97{
96 struct ipvl_port *port = ipvlan->port;
97 struct ipvl_addr *addr; 98 struct ipvl_addr *addr;
98 99
99 list_for_each_entry(addr, &ipvlan->addrs, anode) { 100 list_for_each_entry(addr, &ipvlan->addrs, anode) {
@@ -101,12 +102,21 @@ bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
101 ipv6_addr_equal(&addr->ip6addr, iaddr)) || 102 ipv6_addr_equal(&addr->ip6addr, iaddr)) ||
102 (!is_v6 && addr->atype == IPVL_IPV4 && 103 (!is_v6 && addr->atype == IPVL_IPV4 &&
103 addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr)) 104 addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr))
104 return true; 105 return addr;
105 } 106 }
107 return NULL;
108}
109
110bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
111{
112 struct ipvl_dev *ipvlan;
106 113
107 if (ipvlan_ht_addr_lookup(port, iaddr, is_v6)) 114 ASSERT_RTNL();
108 return true;
109 115
116 list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
117 if (ipvlan_find_addr(ipvlan, iaddr, is_v6))
118 return true;
119 }
110 return false; 120 return false;
111} 121}
112 122
@@ -192,7 +202,8 @@ static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb,
192 if (skb->protocol == htons(ETH_P_PAUSE)) 202 if (skb->protocol == htons(ETH_P_PAUSE))
193 return; 203 return;
194 204
195 list_for_each_entry(ipvlan, &port->ipvlans, pnode) { 205 rcu_read_lock();
206 list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
196 if (local && (ipvlan == in_dev)) 207 if (local && (ipvlan == in_dev))
197 continue; 208 continue;
198 209
@@ -219,6 +230,7 @@ static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb,
219mcast_acct: 230mcast_acct:
220 ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true); 231 ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
221 } 232 }
233 rcu_read_unlock();
222 234
223 /* Locally generated? ...Forward a copy to the main-device as 235 /* Locally generated? ...Forward a copy to the main-device as
224 * well. On the RX side we'll ignore it (wont give it to any 236 * well. On the RX side we'll ignore it (wont give it to any
@@ -330,7 +342,7 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
330 struct rtable *rt; 342 struct rtable *rt;
331 int err, ret = NET_XMIT_DROP; 343 int err, ret = NET_XMIT_DROP;
332 struct flowi4 fl4 = { 344 struct flowi4 fl4 = {
333 .flowi4_oif = dev->iflink, 345 .flowi4_oif = dev_get_iflink(dev),
334 .flowi4_tos = RT_TOS(ip4h->tos), 346 .flowi4_tos = RT_TOS(ip4h->tos),
335 .flowi4_flags = FLOWI_FLAG_ANYSRC, 347 .flowi4_flags = FLOWI_FLAG_ANYSRC,
336 .daddr = ip4h->daddr, 348 .daddr = ip4h->daddr,
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 2950c3780230..77b92a0fe557 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -114,7 +114,6 @@ static int ipvlan_init(struct net_device *dev)
114 dev->features = phy_dev->features & IPVLAN_FEATURES; 114 dev->features = phy_dev->features & IPVLAN_FEATURES;
115 dev->features |= NETIF_F_LLTX; 115 dev->features |= NETIF_F_LLTX;
116 dev->gso_max_size = phy_dev->gso_max_size; 116 dev->gso_max_size = phy_dev->gso_max_size;
117 dev->iflink = phy_dev->ifindex;
118 dev->hard_header_len = phy_dev->hard_header_len; 117 dev->hard_header_len = phy_dev->hard_header_len;
119 118
120 ipvlan_set_lockdep_class(dev); 119 ipvlan_set_lockdep_class(dev);
@@ -305,6 +304,13 @@ static int ipvlan_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
305 return 0; 304 return 0;
306} 305}
307 306
307static int ipvlan_get_iflink(const struct net_device *dev)
308{
309 struct ipvl_dev *ipvlan = netdev_priv(dev);
310
311 return ipvlan->phy_dev->ifindex;
312}
313
308static const struct net_device_ops ipvlan_netdev_ops = { 314static const struct net_device_ops ipvlan_netdev_ops = {
309 .ndo_init = ipvlan_init, 315 .ndo_init = ipvlan_init,
310 .ndo_uninit = ipvlan_uninit, 316 .ndo_uninit = ipvlan_uninit,
@@ -317,6 +323,7 @@ static const struct net_device_ops ipvlan_netdev_ops = {
317 .ndo_get_stats64 = ipvlan_get_stats64, 323 .ndo_get_stats64 = ipvlan_get_stats64,
318 .ndo_vlan_rx_add_vid = ipvlan_vlan_rx_add_vid, 324 .ndo_vlan_rx_add_vid = ipvlan_vlan_rx_add_vid,
319 .ndo_vlan_rx_kill_vid = ipvlan_vlan_rx_kill_vid, 325 .ndo_vlan_rx_kill_vid = ipvlan_vlan_rx_kill_vid,
326 .ndo_get_iflink = ipvlan_get_iflink,
320}; 327};
321 328
322static int ipvlan_hard_header(struct sk_buff *skb, struct net_device *dev, 329static int ipvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
@@ -504,7 +511,7 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
504 if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { 511 if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
505 list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { 512 list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
506 ipvlan_ht_addr_del(addr, !dev->dismantle); 513 ipvlan_ht_addr_del(addr, !dev->dismantle);
507 list_del_rcu(&addr->anode); 514 list_del(&addr->anode);
508 } 515 }
509 } 516 }
510 list_del_rcu(&ipvlan->pnode); 517 list_del_rcu(&ipvlan->pnode);
@@ -606,7 +613,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
606{ 613{
607 struct ipvl_addr *addr; 614 struct ipvl_addr *addr;
608 615
609 if (ipvlan_addr_busy(ipvlan, ip6_addr, true)) { 616 if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) {
610 netif_err(ipvlan, ifup, ipvlan->dev, 617 netif_err(ipvlan, ifup, ipvlan->dev,
611 "Failed to add IPv6=%pI6c addr for %s intf\n", 618 "Failed to add IPv6=%pI6c addr for %s intf\n",
612 ip6_addr, ipvlan->dev->name); 619 ip6_addr, ipvlan->dev->name);
@@ -619,9 +626,13 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
619 addr->master = ipvlan; 626 addr->master = ipvlan;
620 memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr)); 627 memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr));
621 addr->atype = IPVL_IPV6; 628 addr->atype = IPVL_IPV6;
622 list_add_tail_rcu(&addr->anode, &ipvlan->addrs); 629 list_add_tail(&addr->anode, &ipvlan->addrs);
623 ipvlan->ipv6cnt++; 630 ipvlan->ipv6cnt++;
624 ipvlan_ht_addr_add(ipvlan, addr); 631 /* If the interface is not up, the address will be added to the hash
632 * list by ipvlan_open.
633 */
634 if (netif_running(ipvlan->dev))
635 ipvlan_ht_addr_add(ipvlan, addr);
625 636
626 return 0; 637 return 0;
627} 638}
@@ -630,12 +641,12 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
630{ 641{
631 struct ipvl_addr *addr; 642 struct ipvl_addr *addr;
632 643
633 addr = ipvlan_ht_addr_lookup(ipvlan->port, ip6_addr, true); 644 addr = ipvlan_find_addr(ipvlan, ip6_addr, true);
634 if (!addr) 645 if (!addr)
635 return; 646 return;
636 647
637 ipvlan_ht_addr_del(addr, true); 648 ipvlan_ht_addr_del(addr, true);
638 list_del_rcu(&addr->anode); 649 list_del(&addr->anode);
639 ipvlan->ipv6cnt--; 650 ipvlan->ipv6cnt--;
640 WARN_ON(ipvlan->ipv6cnt < 0); 651 WARN_ON(ipvlan->ipv6cnt < 0);
641 kfree_rcu(addr, rcu); 652 kfree_rcu(addr, rcu);
@@ -674,7 +685,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
674{ 685{
675 struct ipvl_addr *addr; 686 struct ipvl_addr *addr;
676 687
677 if (ipvlan_addr_busy(ipvlan, ip4_addr, false)) { 688 if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false)) {
678 netif_err(ipvlan, ifup, ipvlan->dev, 689 netif_err(ipvlan, ifup, ipvlan->dev,
679 "Failed to add IPv4=%pI4 on %s intf.\n", 690 "Failed to add IPv4=%pI4 on %s intf.\n",
680 ip4_addr, ipvlan->dev->name); 691 ip4_addr, ipvlan->dev->name);
@@ -687,9 +698,13 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
687 addr->master = ipvlan; 698 addr->master = ipvlan;
688 memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr)); 699 memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr));
689 addr->atype = IPVL_IPV4; 700 addr->atype = IPVL_IPV4;
690 list_add_tail_rcu(&addr->anode, &ipvlan->addrs); 701 list_add_tail(&addr->anode, &ipvlan->addrs);
691 ipvlan->ipv4cnt++; 702 ipvlan->ipv4cnt++;
692 ipvlan_ht_addr_add(ipvlan, addr); 703 /* If the interface is not up, the address will be added to the hash
704 * list by ipvlan_open.
705 */
706 if (netif_running(ipvlan->dev))
707 ipvlan_ht_addr_add(ipvlan, addr);
693 ipvlan_set_broadcast_mac_filter(ipvlan, true); 708 ipvlan_set_broadcast_mac_filter(ipvlan, true);
694 709
695 return 0; 710 return 0;
@@ -699,12 +714,12 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
699{ 714{
700 struct ipvl_addr *addr; 715 struct ipvl_addr *addr;
701 716
702 addr = ipvlan_ht_addr_lookup(ipvlan->port, ip4_addr, false); 717 addr = ipvlan_find_addr(ipvlan, ip4_addr, false);
703 if (!addr) 718 if (!addr)
704 return; 719 return;
705 720
706 ipvlan_ht_addr_del(addr, true); 721 ipvlan_ht_addr_del(addr, true);
707 list_del_rcu(&addr->anode); 722 list_del(&addr->anode);
708 ipvlan->ipv4cnt--; 723 ipvlan->ipv4cnt--;
709 WARN_ON(ipvlan->ipv4cnt < 0); 724 WARN_ON(ipvlan->ipv4cnt < 0);
710 if (!ipvlan->ipv4cnt) 725 if (!ipvlan->ipv4cnt)
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index b5e3320ca506..b227a13f6473 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -786,7 +786,6 @@ static int macvlan_init(struct net_device *dev)
786 dev->hw_features |= NETIF_F_LRO; 786 dev->hw_features |= NETIF_F_LRO;
787 dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES; 787 dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES;
788 dev->gso_max_size = lowerdev->gso_max_size; 788 dev->gso_max_size = lowerdev->gso_max_size;
789 dev->iflink = lowerdev->ifindex;
790 dev->hard_header_len = lowerdev->hard_header_len; 789 dev->hard_header_len = lowerdev->hard_header_len;
791 790
792 macvlan_set_lockdep_class(dev); 791 macvlan_set_lockdep_class(dev);
@@ -995,6 +994,13 @@ static void macvlan_dev_netpoll_cleanup(struct net_device *dev)
995} 994}
996#endif /* CONFIG_NET_POLL_CONTROLLER */ 995#endif /* CONFIG_NET_POLL_CONTROLLER */
997 996
997static int macvlan_dev_get_iflink(const struct net_device *dev)
998{
999 struct macvlan_dev *vlan = netdev_priv(dev);
1000
1001 return vlan->lowerdev->ifindex;
1002}
1003
998static const struct ethtool_ops macvlan_ethtool_ops = { 1004static const struct ethtool_ops macvlan_ethtool_ops = {
999 .get_link = ethtool_op_get_link, 1005 .get_link = ethtool_op_get_link,
1000 .get_settings = macvlan_ethtool_get_settings, 1006 .get_settings = macvlan_ethtool_get_settings,
@@ -1025,6 +1031,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
1025 .ndo_netpoll_setup = macvlan_dev_netpoll_setup, 1031 .ndo_netpoll_setup = macvlan_dev_netpoll_setup,
1026 .ndo_netpoll_cleanup = macvlan_dev_netpoll_cleanup, 1032 .ndo_netpoll_cleanup = macvlan_dev_netpoll_cleanup,
1027#endif 1033#endif
1034 .ndo_get_iflink = macvlan_dev_get_iflink,
1028}; 1035};
1029 1036
1030void macvlan_common_setup(struct net_device *dev) 1037void macvlan_common_setup(struct net_device *dev)
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index f80e19ac6704..fabf11d32d27 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -192,16 +192,17 @@ static int at803x_probe(struct phy_device *phydev)
192{ 192{
193 struct device *dev = &phydev->dev; 193 struct device *dev = &phydev->dev;
194 struct at803x_priv *priv; 194 struct at803x_priv *priv;
195 struct gpio_desc *gpiod_reset;
195 196
196 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 197 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
197 if (!priv) 198 if (!priv)
198 return -ENOMEM; 199 return -ENOMEM;
199 200
200 priv->gpiod_reset = devm_gpiod_get(dev, "reset"); 201 gpiod_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
201 if (IS_ERR(priv->gpiod_reset)) 202 if (IS_ERR(gpiod_reset))
202 priv->gpiod_reset = NULL; 203 return PTR_ERR(gpiod_reset);
203 else 204
204 gpiod_direction_output(priv->gpiod_reset, 1); 205 priv->gpiod_reset = gpiod_reset;
205 206
206 phydev->priv = priv; 207 phydev->priv = priv;
207 208
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index a08a3c78ba97..1960b46add65 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -183,6 +183,35 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
183} 183}
184EXPORT_SYMBOL_GPL(fixed_phy_set_link_update); 184EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
185 185
186int fixed_phy_update_state(struct phy_device *phydev,
187 const struct fixed_phy_status *status,
188 const struct fixed_phy_status *changed)
189{
190 struct fixed_mdio_bus *fmb = &platform_fmb;
191 struct fixed_phy *fp;
192
193 if (!phydev || !phydev->bus)
194 return -EINVAL;
195
196 list_for_each_entry(fp, &fmb->phys, node) {
197 if (fp->addr == phydev->addr) {
198#define _UPD(x) if (changed->x) \
199 fp->status.x = status->x
200 _UPD(link);
201 _UPD(speed);
202 _UPD(duplex);
203 _UPD(pause);
204 _UPD(asym_pause);
205#undef _UPD
206 fixed_phy_update_regs(fp);
207 return 0;
208 }
209 }
210
211 return -ENOENT;
212}
213EXPORT_SYMBOL(fixed_phy_update_state);
214
186int fixed_phy_add(unsigned int irq, int phy_addr, 215int fixed_phy_add(unsigned int irq, int phy_addr,
187 struct fixed_phy_status *status) 216 struct fixed_phy_status *status)
188{ 217{
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 724a9b50df7a..75d6f26729a3 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -189,7 +189,7 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
189 skb_put(skb, sizeof(padbytes)); 189 skb_put(skb, sizeof(padbytes));
190 } 190 }
191 191
192 usbnet_set_skb_tx_stats(skb, 1); 192 usbnet_set_skb_tx_stats(skb, 1, 0);
193 return skb; 193 return skb;
194} 194}
195 195
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 9311a08565be..4545e78840b0 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -522,6 +522,7 @@ static const struct driver_info wwan_info = {
522#define DELL_VENDOR_ID 0x413C 522#define DELL_VENDOR_ID 0x413C
523#define REALTEK_VENDOR_ID 0x0bda 523#define REALTEK_VENDOR_ID 0x0bda
524#define SAMSUNG_VENDOR_ID 0x04e8 524#define SAMSUNG_VENDOR_ID 0x04e8
525#define LENOVO_VENDOR_ID 0x17ef
525 526
526static const struct usb_device_id products[] = { 527static const struct usb_device_id products[] = {
527/* BLACKLIST !! 528/* BLACKLIST !!
@@ -702,6 +703,13 @@ static const struct usb_device_id products[] = {
702 .driver_info = 0, 703 .driver_info = 0,
703}, 704},
704 705
706/* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
707{
708 USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
709 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
710 .driver_info = 0,
711},
712
705/* WHITELIST!!! 713/* WHITELIST!!!
706 * 714 *
707 * CDC Ether uses two interfaces, not necessarily consecutive. 715 * CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 70cbea551139..c3e4da9e79ca 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1177,13 +1177,12 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
1177 ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload; 1177 ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload;
1178 ctx->tx_ntbs++; 1178 ctx->tx_ntbs++;
1179 1179
1180 /* usbnet has already counted all the framing overhead. 1180 /* usbnet will count all the framing overhead by default.
1181 * Adjust the stats so that the tx_bytes counter show real 1181 * Adjust the stats so that the tx_bytes counter show real
1182 * payload data instead. 1182 * payload data instead.
1183 */ 1183 */
1184 dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload; 1184 usbnet_set_skb_tx_stats(skb_out, n,
1185 1185 ctx->tx_curr_frame_payload - skb_out->len);
1186 usbnet_set_skb_tx_stats(skb_out, n);
1187 1186
1188 return skb_out; 1187 return skb_out;
1189 1188
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 5065538dd03b..ac4d03b328b1 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -493,6 +493,7 @@ enum rtl8152_flags {
493/* Define these values to match your device */ 493/* Define these values to match your device */
494#define VENDOR_ID_REALTEK 0x0bda 494#define VENDOR_ID_REALTEK 0x0bda
495#define VENDOR_ID_SAMSUNG 0x04e8 495#define VENDOR_ID_SAMSUNG 0x04e8
496#define VENDOR_ID_LENOVO 0x17ef
496 497
497#define MCU_TYPE_PLA 0x0100 498#define MCU_TYPE_PLA 0x0100
498#define MCU_TYPE_USB 0x0000 499#define MCU_TYPE_USB 0x0000
@@ -4114,6 +4115,7 @@ static struct usb_device_id rtl8152_table[] = {
4114 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)}, 4115 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)},
4115 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, 4116 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
4116 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, 4117 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
4118 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
4117 {} 4119 {}
4118}; 4120};
4119 4121
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index 7650cdc8fe6b..953de13267df 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -144,7 +144,7 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
144 skb_put(skb, sizeof(padbytes)); 144 skb_put(skb, sizeof(padbytes));
145 } 145 }
146 146
147 usbnet_set_skb_tx_stats(skb, 1); 147 usbnet_set_skb_tx_stats(skb, 1, 0);
148 return skb; 148 return skb;
149} 149}
150 150
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 0f3ff285f6a1..777757ae1973 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1346,9 +1346,19 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1346 } else 1346 } else
1347 urb->transfer_flags |= URB_ZERO_PACKET; 1347 urb->transfer_flags |= URB_ZERO_PACKET;
1348 } 1348 }
1349 entry->length = urb->transfer_buffer_length = length; 1349 urb->transfer_buffer_length = length;
1350 if (!(info->flags & FLAG_MULTI_PACKET)) 1350
1351 usbnet_set_skb_tx_stats(skb, 1); 1351 if (info->flags & FLAG_MULTI_PACKET) {
1352 /* Driver has set number of packets and a length delta.
1353 * Calculate the complete length and ensure that it's
1354 * positive.
1355 */
1356 entry->length += length;
1357 if (WARN_ON_ONCE(entry->length <= 0))
1358 entry->length = length;
1359 } else {
1360 usbnet_set_skb_tx_stats(skb, 1, length);
1361 }
1352 1362
1353 spin_lock_irqsave(&dev->txq.lock, flags); 1363 spin_lock_irqsave(&dev->txq.lock, flags);
1354 retval = usb_autopm_get_interface_async(dev->intf); 1364 retval = usb_autopm_get_interface_async(dev->intf);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 4cca36ebc4fb..c8186ffda1a3 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -263,6 +263,20 @@ static void veth_poll_controller(struct net_device *dev)
263} 263}
264#endif /* CONFIG_NET_POLL_CONTROLLER */ 264#endif /* CONFIG_NET_POLL_CONTROLLER */
265 265
266static int veth_get_iflink(const struct net_device *dev)
267{
268 struct veth_priv *priv = netdev_priv(dev);
269 struct net_device *peer;
270 int iflink;
271
272 rcu_read_lock();
273 peer = rcu_dereference(priv->peer);
274 iflink = peer ? peer->ifindex : 0;
275 rcu_read_unlock();
276
277 return iflink;
278}
279
266static const struct net_device_ops veth_netdev_ops = { 280static const struct net_device_ops veth_netdev_ops = {
267 .ndo_init = veth_dev_init, 281 .ndo_init = veth_dev_init,
268 .ndo_open = veth_open, 282 .ndo_open = veth_open,
@@ -275,6 +289,7 @@ static const struct net_device_ops veth_netdev_ops = {
275#ifdef CONFIG_NET_POLL_CONTROLLER 289#ifdef CONFIG_NET_POLL_CONTROLLER
276 .ndo_poll_controller = veth_poll_controller, 290 .ndo_poll_controller = veth_poll_controller,
277#endif 291#endif
292 .ndo_get_iflink = veth_get_iflink,
278}; 293};
279 294
280#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ 295#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index a829930dac15..63c7810e1545 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -939,11 +939,15 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
939 skb_orphan(skb); 939 skb_orphan(skb);
940 nf_reset(skb); 940 nf_reset(skb);
941 941
942 /* It is better to stop queue if running out of space 942 /* If running out of space, stop queue to avoid getting packets that we
943 * instead of forcing queuing layer to requeue the skb 943 * are then unable to transmit.
944 * by returning TX_BUSY (and cause a BUG message). 944 * An alternative would be to force queuing layer to requeue the skb by
945 * Since most packets only take 1 or 2 ring slots 945 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
946 * this means 16 slots are typically wasted. 946 * returned in a normal path of operation: it means that driver is not
947 * maintaining the TX queue stop/start state properly, and causes
948 * the stack to do a non-trivial amount of useless work.
949 * Since most packets only take 1 or 2 ring slots, stopping the queue
950 * early means 16 slots are typically wasted.
947 */ 951 */
948 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { 952 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
949 netif_stop_subqueue(dev, qnum); 953 netif_stop_subqueue(dev, qnum);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 4c8a944d58b4..c1d0e7a9da04 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -104,7 +104,7 @@ vmxnet3_rq_driver_stats[] = {
104 rx_buf_alloc_failure) }, 104 rx_buf_alloc_failure) },
105}; 105};
106 106
107/* gloabl stats maintained by the driver */ 107/* global stats maintained by the driver */
108static const struct vmxnet3_stat_desc 108static const struct vmxnet3_stat_desc
109vmxnet3_global_stats[] = { 109vmxnet3_global_stats[] = {
110 /* description, offset */ 110 /* description, offset */
@@ -272,7 +272,7 @@ int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
272 adapter->shared->devRead.misc.uptFeatures &= 272 adapter->shared->devRead.misc.uptFeatures &=
273 ~UPT1_F_RXCSUM; 273 ~UPT1_F_RXCSUM;
274 274
275 /* update harware LRO capability accordingly */ 275 /* update hardware LRO capability accordingly */
276 if (features & NETIF_F_LRO) 276 if (features & NETIF_F_LRO)
277 adapter->shared->devRead.misc.uptFeatures |= 277 adapter->shared->devRead.misc.uptFeatures |=
278 UPT1_F_LRO; 278 UPT1_F_LRO;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index a8d345054d23..51baac725a48 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -989,7 +989,7 @@ out:
989 989
990/* Watch incoming packets to learn mapping between Ethernet address 990/* Watch incoming packets to learn mapping between Ethernet address
991 * and Tunnel endpoint. 991 * and Tunnel endpoint.
992 * Return true if packet is bogus and should be droppped. 992 * Return true if packet is bogus and should be dropped.
993 */ 993 */
994static bool vxlan_snoop(struct net_device *dev, 994static bool vxlan_snoop(struct net_device *dev,
995 union vxlan_addr *src_ip, const u8 *src_mac) 995 union vxlan_addr *src_ip, const u8 *src_mac)
@@ -1085,7 +1085,7 @@ void vxlan_sock_release(struct vxlan_sock *vs)
1085EXPORT_SYMBOL_GPL(vxlan_sock_release); 1085EXPORT_SYMBOL_GPL(vxlan_sock_release);
1086 1086
1087/* Update multicast group membership when first VNI on 1087/* Update multicast group membership when first VNI on
1088 * multicast asddress is brought up 1088 * multicast address is brought up
1089 */ 1089 */
1090static int vxlan_igmp_join(struct vxlan_dev *vxlan) 1090static int vxlan_igmp_join(struct vxlan_dev *vxlan)
1091{ 1091{
@@ -1229,7 +1229,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1229 * this as a malformed packet. This behavior diverges from 1229 * this as a malformed packet. This behavior diverges from
1230 * VXLAN RFC (RFC7348) which stipulates that bits in reserved 1230 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
1231 * in reserved fields are to be ignored. The approach here 1231 * in reserved fields are to be ignored. The approach here
1232 * maintains compatbility with previous stack code, and also 1232 * maintains compatibility with previous stack code, and also
1233 * is more robust and provides a little more security in 1233 * is more robust and provides a little more security in
1234 * adding extensions to VXLAN. 1234 * adding extensions to VXLAN.
1235 */ 1235 */
@@ -1672,7 +1672,8 @@ static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
1672} 1672}
1673 1673
1674#if IS_ENABLED(CONFIG_IPV6) 1674#if IS_ENABLED(CONFIG_IPV6)
1675static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb, 1675static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
1676 struct sk_buff *skb,
1676 struct net_device *dev, struct in6_addr *saddr, 1677 struct net_device *dev, struct in6_addr *saddr,
1677 struct in6_addr *daddr, __u8 prio, __u8 ttl, 1678 struct in6_addr *daddr, __u8 prio, __u8 ttl,
1678 __be16 src_port, __be16 dst_port, 1679 __be16 src_port, __be16 dst_port,
@@ -1748,7 +1749,7 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
1748 1749
1749 skb_set_inner_protocol(skb, htons(ETH_P_TEB)); 1750 skb_set_inner_protocol(skb, htons(ETH_P_TEB));
1750 1751
1751 udp_tunnel6_xmit_skb(dst, skb, dev, saddr, daddr, prio, 1752 udp_tunnel6_xmit_skb(dst, sk, skb, dev, saddr, daddr, prio,
1752 ttl, src_port, dst_port, 1753 ttl, src_port, dst_port,
1753 !!(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX)); 1754 !!(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX));
1754 return 0; 1755 return 0;
@@ -1758,7 +1759,7 @@ err:
1758} 1759}
1759#endif 1760#endif
1760 1761
1761int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb, 1762int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
1762 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, 1763 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
1763 __be16 src_port, __be16 dst_port, 1764 __be16 src_port, __be16 dst_port,
1764 struct vxlan_metadata *md, bool xnet, u32 vxflags) 1765 struct vxlan_metadata *md, bool xnet, u32 vxflags)
@@ -1827,7 +1828,7 @@ int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
1827 1828
1828 skb_set_inner_protocol(skb, htons(ETH_P_TEB)); 1829 skb_set_inner_protocol(skb, htons(ETH_P_TEB));
1829 1830
1830 return udp_tunnel_xmit_skb(rt, skb, src, dst, tos, 1831 return udp_tunnel_xmit_skb(rt, sk, skb, src, dst, tos,
1831 ttl, df, src_port, dst_port, xnet, 1832 ttl, df, src_port, dst_port, xnet,
1832 !(vxflags & VXLAN_F_UDP_CSUM)); 1833 !(vxflags & VXLAN_F_UDP_CSUM));
1833} 1834}
@@ -1882,6 +1883,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1882 struct vxlan_rdst *rdst, bool did_rsc) 1883 struct vxlan_rdst *rdst, bool did_rsc)
1883{ 1884{
1884 struct vxlan_dev *vxlan = netdev_priv(dev); 1885 struct vxlan_dev *vxlan = netdev_priv(dev);
1886 struct sock *sk = vxlan->vn_sock->sock->sk;
1885 struct rtable *rt = NULL; 1887 struct rtable *rt = NULL;
1886 const struct iphdr *old_iph; 1888 const struct iphdr *old_iph;
1887 struct flowi4 fl4; 1889 struct flowi4 fl4;
@@ -1961,7 +1963,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1961 md.vni = htonl(vni << 8); 1963 md.vni = htonl(vni << 8);
1962 md.gbp = skb->mark; 1964 md.gbp = skb->mark;
1963 1965
1964 err = vxlan_xmit_skb(rt, skb, fl4.saddr, 1966 err = vxlan_xmit_skb(rt, sk, skb, fl4.saddr,
1965 dst->sin.sin_addr.s_addr, tos, ttl, df, 1967 dst->sin.sin_addr.s_addr, tos, ttl, df,
1966 src_port, dst_port, &md, 1968 src_port, dst_port, &md,
1967 !net_eq(vxlan->net, dev_net(vxlan->dev)), 1969 !net_eq(vxlan->net, dev_net(vxlan->dev)),
@@ -2021,7 +2023,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2021 md.vni = htonl(vni << 8); 2023 md.vni = htonl(vni << 8);
2022 md.gbp = skb->mark; 2024 md.gbp = skb->mark;
2023 2025
2024 err = vxlan6_xmit_skb(ndst, skb, dev, &fl6.saddr, &fl6.daddr, 2026 err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr,
2025 0, ttl, src_port, dst_port, &md, 2027 0, ttl, src_port, dst_port, &md,
2026 !net_eq(vxlan->net, dev_net(vxlan->dev)), 2028 !net_eq(vxlan->net, dev_net(vxlan->dev)),
2027 vxlan->flags); 2029 vxlan->flags);
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 88d121d43c08..bcfa01add7cc 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -579,6 +579,7 @@ static int cosa_probe(int base, int irq, int dma)
579 /* Register the network interface */ 579 /* Register the network interface */
580 if (!(chan->netdev = alloc_hdlcdev(chan))) { 580 if (!(chan->netdev = alloc_hdlcdev(chan))) {
581 pr_warn("%s: alloc_hdlcdev failed\n", chan->name); 581 pr_warn("%s: alloc_hdlcdev failed\n", chan->name);
582 err = -ENOMEM;
582 goto err_hdlcdev; 583 goto err_hdlcdev;
583 } 584 }
584 dev_to_hdlc(chan->netdev)->attach = cosa_net_attach; 585 dev_to_hdlc(chan->netdev)->attach = cosa_net_attach;
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index bea0f313a7a8..317bc79cc8b9 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -850,6 +850,7 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
850 dev = alloc_hdlcdev(sc); 850 dev = alloc_hdlcdev(sc);
851 if (!dev) { 851 if (!dev) {
852 printk(KERN_ERR "lmc:alloc_netdev for device failed\n"); 852 printk(KERN_ERR "lmc:alloc_netdev for device failed\n");
853 err = -ENOMEM;
853 goto err_hdlcdev; 854 goto err_hdlcdev;
854 } 855 }
855 856
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index f92050617ae6..5147ebe4cd05 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -779,8 +779,6 @@ static void ar5523_tx(struct ieee80211_hw *hw,
779 ieee80211_stop_queues(hw); 779 ieee80211_stop_queues(hw);
780 } 780 }
781 781
782 data->skb = skb;
783
784 spin_lock_irqsave(&ar->tx_data_list_lock, flags); 782 spin_lock_irqsave(&ar->tx_data_list_lock, flags);
785 list_add_tail(&data->list, &ar->tx_queue_pending); 783 list_add_tail(&data->list, &ar->tx_queue_pending);
786 spin_unlock_irqrestore(&ar->tx_data_list_lock, flags); 784 spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
@@ -817,10 +815,13 @@ static void ar5523_tx_work_locked(struct ar5523 *ar)
817 if (!data) 815 if (!data)
818 break; 816 break;
819 817
820 skb = data->skb; 818 txi = container_of((void *)data, struct ieee80211_tx_info,
819 driver_data);
821 txqid = 0; 820 txqid = 0;
822 txi = IEEE80211_SKB_CB(skb); 821
822 skb = container_of((void *)txi, struct sk_buff, cb);
823 paylen = skb->len; 823 paylen = skb->len;
824
824 urb = usb_alloc_urb(0, GFP_KERNEL); 825 urb = usb_alloc_urb(0, GFP_KERNEL);
825 if (!urb) { 826 if (!urb) {
826 ar5523_err(ar, "Failed to allocate TX urb\n"); 827 ar5523_err(ar, "Failed to allocate TX urb\n");
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.h b/drivers/net/wireless/ath/ar5523/ar5523.h
index 00c6fd346d48..9a322a65cdb5 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.h
+++ b/drivers/net/wireless/ath/ar5523/ar5523.h
@@ -74,7 +74,6 @@ struct ar5523_tx_cmd {
74struct ar5523_tx_data { 74struct ar5523_tx_data {
75 struct list_head list; 75 struct list_head list;
76 struct ar5523 *ar; 76 struct ar5523 *ar;
77 struct sk_buff *skb;
78 struct urb *urb; 77 struct urb *urb;
79}; 78};
80 79
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 1eebe2ea3dfb..7e9481099a8e 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -131,6 +131,9 @@ struct ath_ops {
131 void (*enable_write_buffer)(void *); 131 void (*enable_write_buffer)(void *);
132 void (*write_flush) (void *); 132 void (*write_flush) (void *);
133 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr); 133 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
134 void (*enable_rmw_buffer)(void *);
135 void (*rmw_flush) (void *);
136
134}; 137};
135 138
136struct ath_common; 139struct ath_common;
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 1ed7a88aeea9..7ca0d6f930fd 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1283,6 +1283,7 @@ struct ath5k_hw {
1283#define ATH_STAT_PROMISC 1 1283#define ATH_STAT_PROMISC 1
1284#define ATH_STAT_LEDSOFT 2 /* enable LED gpio status */ 1284#define ATH_STAT_LEDSOFT 2 /* enable LED gpio status */
1285#define ATH_STAT_STARTED 3 /* opened & irqs enabled */ 1285#define ATH_STAT_STARTED 3 /* opened & irqs enabled */
1286#define ATH_STAT_RESET 4 /* hw reset */
1286 1287
1287 unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */ 1288 unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
1288 unsigned int fif_filter_flags; /* Current FIF_* filter flags */ 1289 unsigned int fif_filter_flags; /* Current FIF_* filter flags */
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 57a80e89822d..a6131825c9f6 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1523,6 +1523,9 @@ ath5k_set_current_imask(struct ath5k_hw *ah)
1523 enum ath5k_int imask; 1523 enum ath5k_int imask;
1524 unsigned long flags; 1524 unsigned long flags;
1525 1525
1526 if (test_bit(ATH_STAT_RESET, ah->status))
1527 return;
1528
1526 spin_lock_irqsave(&ah->irqlock, flags); 1529 spin_lock_irqsave(&ah->irqlock, flags);
1527 imask = ah->imask; 1530 imask = ah->imask;
1528 if (ah->rx_pending) 1531 if (ah->rx_pending)
@@ -2858,10 +2861,12 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
2858{ 2861{
2859 struct ath_common *common = ath5k_hw_common(ah); 2862 struct ath_common *common = ath5k_hw_common(ah);
2860 int ret, ani_mode; 2863 int ret, ani_mode;
2861 bool fast; 2864 bool fast = chan && modparam_fastchanswitch ? 1 : 0;
2862 2865
2863 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "resetting\n"); 2866 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "resetting\n");
2864 2867
2868 __set_bit(ATH_STAT_RESET, ah->status);
2869
2865 ath5k_hw_set_imr(ah, 0); 2870 ath5k_hw_set_imr(ah, 0);
2866 synchronize_irq(ah->irq); 2871 synchronize_irq(ah->irq);
2867 ath5k_stop_tasklets(ah); 2872 ath5k_stop_tasklets(ah);
@@ -2876,11 +2881,29 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
2876 * so we should also free any remaining 2881 * so we should also free any remaining
2877 * tx buffers */ 2882 * tx buffers */
2878 ath5k_drain_tx_buffs(ah); 2883 ath5k_drain_tx_buffs(ah);
2884
2885 /* Stop PCU */
2886 ath5k_hw_stop_rx_pcu(ah);
2887
2888 /* Stop DMA
2889 *
2890 * Note: If DMA didn't stop continue
2891 * since only a reset will fix it.
2892 */
2893 ret = ath5k_hw_dma_stop(ah);
2894
2895 /* RF Bus grant won't work if we have pending
2896 * frames
2897 */
2898 if (ret && fast) {
2899 ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2900 "DMA didn't stop, falling back to normal reset\n");
2901 fast = false;
2902 }
2903
2879 if (chan) 2904 if (chan)
2880 ah->curchan = chan; 2905 ah->curchan = chan;
2881 2906
2882 fast = ((chan != NULL) && modparam_fastchanswitch) ? 1 : 0;
2883
2884 ret = ath5k_hw_reset(ah, ah->opmode, ah->curchan, fast, skip_pcu); 2907 ret = ath5k_hw_reset(ah, ah->opmode, ah->curchan, fast, skip_pcu);
2885 if (ret) { 2908 if (ret) {
2886 ATH5K_ERR(ah, "can't reset hardware (%d)\n", ret); 2909 ATH5K_ERR(ah, "can't reset hardware (%d)\n", ret);
@@ -2934,6 +2957,8 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
2934 */ 2957 */
2935/* ath5k_chan_change(ah, c); */ 2958/* ath5k_chan_change(ah, c); */
2936 2959
2960 __clear_bit(ATH_STAT_RESET, ah->status);
2961
2937 ath5k_beacon_config(ah); 2962 ath5k_beacon_config(ah);
2938 /* intrs are enabled by ath5k_beacon_config */ 2963 /* intrs are enabled by ath5k_beacon_config */
2939 2964
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index b9b651ea9851..99e62f99a182 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -1169,30 +1169,6 @@ ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1169 if (ah->ah_version == AR5K_AR5212) 1169 if (ah->ah_version == AR5K_AR5212)
1170 ath5k_hw_set_sleep_clock(ah, false); 1170 ath5k_hw_set_sleep_clock(ah, false);
1171 1171
1172 /*
1173 * Stop PCU
1174 */
1175 ath5k_hw_stop_rx_pcu(ah);
1176
1177 /*
1178 * Stop DMA
1179 *
1180 * Note: If DMA didn't stop continue
1181 * since only a reset will fix it.
1182 */
1183 ret = ath5k_hw_dma_stop(ah);
1184
1185 /* RF Bus grant won't work if we have pending
1186 * frames */
1187 if (ret && fast) {
1188 ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
1189 "DMA didn't stop, falling back to normal reset\n");
1190 fast = false;
1191 /* Non fatal, just continue with
1192 * normal reset */
1193 ret = 0;
1194 }
1195
1196 mode = channel->hw_value; 1172 mode = channel->hw_value;
1197 switch (mode) { 1173 switch (mode) {
1198 case AR5K_MODE_11A: 1174 case AR5K_MODE_11A:
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 473972288a84..ecda613c2d54 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -46,7 +46,8 @@ ath9k_hw-y:= \
46ath9k_hw-$(CONFIG_ATH9K_WOW) += ar9003_wow.o 46ath9k_hw-$(CONFIG_ATH9K_WOW) += ar9003_wow.o
47 47
48ath9k_hw-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += btcoex.o \ 48ath9k_hw-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += btcoex.o \
49 ar9003_mci.o 49 ar9003_mci.o \
50 ar9003_aic.o
50 51
51ath9k_hw-$(CONFIG_ATH9K_PCOEM) += ar9003_rtt.o 52ath9k_hw-$(CONFIG_ATH9K_PCOEM) += ar9003_rtt.o
52 53
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index ca01d17d130f..25e45e4d1a60 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -107,11 +107,21 @@ static const struct ani_cck_level_entry cck_level_table[] = {
107static void ath9k_hw_update_mibstats(struct ath_hw *ah, 107static void ath9k_hw_update_mibstats(struct ath_hw *ah,
108 struct ath9k_mib_stats *stats) 108 struct ath9k_mib_stats *stats)
109{ 109{
110 stats->ackrcv_bad += REG_READ(ah, AR_ACK_FAIL); 110 u32 addr[5] = {AR_RTS_OK, AR_RTS_FAIL, AR_ACK_FAIL,
111 stats->rts_bad += REG_READ(ah, AR_RTS_FAIL); 111 AR_FCS_FAIL, AR_BEACON_CNT};
112 stats->fcs_bad += REG_READ(ah, AR_FCS_FAIL); 112 u32 data[5];
113 stats->rts_good += REG_READ(ah, AR_RTS_OK); 113
114 stats->beacons += REG_READ(ah, AR_BEACON_CNT); 114 REG_READ_MULTI(ah, &addr[0], &data[0], 5);
115 /* AR_RTS_OK */
116 stats->rts_good += data[0];
117 /* AR_RTS_FAIL */
118 stats->rts_bad += data[1];
119 /* AR_ACK_FAIL */
120 stats->ackrcv_bad += data[2];
121 /* AR_FCS_FAIL */
122 stats->fcs_bad += data[3];
123 /* AR_BEACON_CNT */
124 stats->beacons += data[4];
115} 125}
116 126
117static void ath9k_ani_restart(struct ath_hw *ah) 127static void ath9k_ani_restart(struct ath_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index f273427fdd29..6c23d279525f 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -681,12 +681,13 @@ static void ar5008_hw_set_channel_regs(struct ath_hw *ah,
681 phymode |= AR_PHY_FC_DYN2040_PRI_CH; 681 phymode |= AR_PHY_FC_DYN2040_PRI_CH;
682 682
683 } 683 }
684 ENABLE_REGWRITE_BUFFER(ah);
684 REG_WRITE(ah, AR_PHY_TURBO, phymode); 685 REG_WRITE(ah, AR_PHY_TURBO, phymode);
685 686
687 /* This function do only REG_WRITE, so
688 * we can include it to REGWRITE_BUFFER. */
686 ath9k_hw_set11nmac2040(ah, chan); 689 ath9k_hw_set11nmac2040(ah, chan);
687 690
688 ENABLE_REGWRITE_BUFFER(ah);
689
690 REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S); 691 REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
691 REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S); 692 REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
692 693
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 42190b67c671..50fcd343c41a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -430,46 +430,43 @@ static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
430 u32 regVal; 430 u32 regVal;
431 unsigned int i; 431 unsigned int i;
432 u32 regList[][2] = { 432 u32 regList[][2] = {
433 { 0x786c, 0 }, 433 { AR9285_AN_TOP3, 0 },
434 { 0x7854, 0 }, 434 { AR9285_AN_RXTXBB1, 0 },
435 { 0x7820, 0 }, 435 { AR9285_AN_RF2G1, 0 },
436 { 0x7824, 0 }, 436 { AR9285_AN_RF2G2, 0 },
437 { 0x7868, 0 }, 437 { AR9285_AN_TOP2, 0 },
438 { 0x783c, 0 }, 438 { AR9285_AN_RF2G8, 0 },
439 { 0x7838, 0 } , 439 { AR9285_AN_RF2G7, 0 },
440 { 0x7828, 0 } , 440 { AR9285_AN_RF2G3, 0 },
441 }; 441 };
442 442
443 for (i = 0; i < ARRAY_SIZE(regList); i++) 443 REG_READ_ARRAY(ah, regList, ARRAY_SIZE(regList));
444 regList[i][1] = REG_READ(ah, regList[i][0]);
445
446 regVal = REG_READ(ah, 0x7834);
447 regVal &= (~(0x1));
448 REG_WRITE(ah, 0x7834, regVal);
449 regVal = REG_READ(ah, 0x9808);
450 regVal |= (0x1 << 27);
451 REG_WRITE(ah, 0x9808, regVal);
452 444
445 ENABLE_REG_RMW_BUFFER(ah);
446 /* 7834, b1=0 */
447 REG_CLR_BIT(ah, AR9285_AN_RF2G6, 1 << 0);
448 /* 9808, b27=1 */
449 REG_SET_BIT(ah, 0x9808, 1 << 27);
453 /* 786c,b23,1, pwddac=1 */ 450 /* 786c,b23,1, pwddac=1 */
454 REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1); 451 REG_SET_BIT(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC);
455 /* 7854, b5,1, pdrxtxbb=1 */ 452 /* 7854, b5,1, pdrxtxbb=1 */
456 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1); 453 REG_SET_BIT(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1);
457 /* 7854, b7,1, pdv2i=1 */ 454 /* 7854, b7,1, pdv2i=1 */
458 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1); 455 REG_SET_BIT(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I);
459 /* 7854, b8,1, pddacinterface=1 */ 456 /* 7854, b8,1, pddacinterface=1 */
460 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1); 457 REG_SET_BIT(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF);
461 /* 7824,b12,0, offcal=0 */ 458 /* 7824,b12,0, offcal=0 */
462 REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0); 459 REG_CLR_BIT(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL);
463 /* 7838, b1,0, pwddb=0 */ 460 /* 7838, b1,0, pwddb=0 */
464 REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0); 461 REG_CLR_BIT(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB);
465 /* 7820,b11,0, enpacal=0 */ 462 /* 7820,b11,0, enpacal=0 */
466 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0); 463 REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL);
467 /* 7820,b25,1, pdpadrv1=0 */ 464 /* 7820,b25,1, pdpadrv1=0 */
468 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0); 465 REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1);
469 /* 7820,b24,0, pdpadrv2=0 */ 466 /* 7820,b24,0, pdpadrv2=0 */
470 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0); 467 REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2);
471 /* 7820,b23,0, pdpaout=0 */ 468 /* 7820,b23,0, pdpaout=0 */
472 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0); 469 REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT);
473 /* 783c,b14-16,7, padrvgn2tab_0=7 */ 470 /* 783c,b14-16,7, padrvgn2tab_0=7 */
474 REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7); 471 REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
475 /* 472 /*
@@ -477,8 +474,9 @@ static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
477 * does not matter since we turn it off 474 * does not matter since we turn it off
478 */ 475 */
479 REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0); 476 REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
480 477 /* 7828, b0-11, ccom=fff */
481 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_CCOMP, 0xfff); 478 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_CCOMP, 0xfff);
479 REG_RMW_BUFFER_FLUSH(ah);
482 480
483 /* Set: 481 /* Set:
484 * localmode=1,bmode=1,bmoderxtx=1,synthon=1, 482 * localmode=1,bmode=1,bmoderxtx=1,synthon=1,
@@ -490,15 +488,16 @@ static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
490 488
491 /* find off_6_1; */ 489 /* find off_6_1; */
492 for (i = 6; i > 0; i--) { 490 for (i = 6; i > 0; i--) {
493 regVal = REG_READ(ah, 0x7834); 491 regVal = REG_READ(ah, AR9285_AN_RF2G6);
494 regVal |= (1 << (20 + i)); 492 regVal |= (1 << (20 + i));
495 REG_WRITE(ah, 0x7834, regVal); 493 REG_WRITE(ah, AR9285_AN_RF2G6, regVal);
496 udelay(1); 494 udelay(1);
497 /* regVal = REG_READ(ah, 0x7834); */ 495 /* regVal = REG_READ(ah, 0x7834); */
498 regVal &= (~(0x1 << (20 + i))); 496 regVal &= (~(0x1 << (20 + i)));
499 regVal |= (MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9) 497 regVal |= (MS(REG_READ(ah, AR9285_AN_RF2G9),
498 AR9285_AN_RXTXBB1_SPARE9)
500 << (20 + i)); 499 << (20 + i));
501 REG_WRITE(ah, 0x7834, regVal); 500 REG_WRITE(ah, AR9285_AN_RF2G6, regVal);
502 } 501 }
503 502
504 regVal = (regVal >> 20) & 0x7f; 503 regVal = (regVal >> 20) & 0x7f;
@@ -515,15 +514,15 @@ static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
515 ah->pacal_info.prev_offset = regVal; 514 ah->pacal_info.prev_offset = regVal;
516 } 515 }
517 516
518 ENABLE_REGWRITE_BUFFER(ah);
519 517
520 regVal = REG_READ(ah, 0x7834); 518 ENABLE_REG_RMW_BUFFER(ah);
521 regVal |= 0x1; 519 /* 7834, b1=1 */
522 REG_WRITE(ah, 0x7834, regVal); 520 REG_SET_BIT(ah, AR9285_AN_RF2G6, 1 << 0);
523 regVal = REG_READ(ah, 0x9808); 521 /* 9808, b27=0 */
524 regVal &= (~(0x1 << 27)); 522 REG_CLR_BIT(ah, 0x9808, 1 << 27);
525 REG_WRITE(ah, 0x9808, regVal); 523 REG_RMW_BUFFER_FLUSH(ah);
526 524
525 ENABLE_REGWRITE_BUFFER(ah);
527 for (i = 0; i < ARRAY_SIZE(regList); i++) 526 for (i = 0; i < ARRAY_SIZE(regList); i++)
528 REG_WRITE(ah, regList[i][0], regList[i][1]); 527 REG_WRITE(ah, regList[i][0], regList[i][1]);
529 528
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_aic.c b/drivers/net/wireless/ath/ath9k/ar9003_aic.c
new file mode 100644
index 000000000000..1db119d77783
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_aic.c
@@ -0,0 +1,599 @@
1/*
2 * Copyright (c) 2015 Qualcomm Atheros Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "hw.h"
18#include "hw-ops.h"
19#include "ar9003_mci.h"
20#include "ar9003_aic.h"
21#include "ar9003_phy.h"
22#include "reg_aic.h"
23
24static const u8 com_att_db_table[ATH_AIC_MAX_COM_ATT_DB_TABLE] = {
25 0, 3, 9, 15, 21, 27
26};
27
28static const u16 aic_lin_table[ATH_AIC_MAX_AIC_LIN_TABLE] = {
29 8191, 7300, 6506, 5799, 5168, 4606, 4105, 3659,
30 3261, 2906, 2590, 2309, 2057, 1834, 1634, 1457,
31 1298, 1157, 1031, 919, 819, 730, 651, 580,
32 517, 461, 411, 366, 326, 291, 259, 231,
33 206, 183, 163, 146, 130, 116, 103, 92,
34 82, 73, 65, 58, 52, 46, 41, 37,
35 33, 29, 26, 23, 21, 18, 16, 15,
36 13, 12, 10, 9, 8, 7, 7, 6,
37 5, 5, 4, 4, 3
38};
39
40static bool ar9003_hw_is_aic_enabled(struct ath_hw *ah)
41{
42 struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
43
44 /*
45 * Disable AIC for now, until we have all the
46 * HW code and the driver-layer support ready.
47 */
48 return false;
49
50 if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_AIC)
51 return false;
52
53 return true;
54}
55
56static int16_t ar9003_aic_find_valid(struct ath_aic_sram_info *cal_sram,
57 bool dir, u8 index)
58{
59 int16_t i;
60
61 if (dir) {
62 for (i = index + 1; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
63 if (cal_sram[i].valid)
64 break;
65 }
66 } else {
67 for (i = index - 1; i >= 0; i--) {
68 if (cal_sram[i].valid)
69 break;
70 }
71 }
72
73 if ((i >= ATH_AIC_MAX_BT_CHANNEL) || (i < 0))
74 i = -1;
75
76 return i;
77}
78
79/*
80 * type 0: aic_lin_table, 1: com_att_db_table
81 */
82static int16_t ar9003_aic_find_index(u8 type, int16_t value)
83{
84 int16_t i = -1;
85
86 if (type == 0) {
87 for (i = ATH_AIC_MAX_AIC_LIN_TABLE - 1; i >= 0; i--) {
88 if (aic_lin_table[i] >= value)
89 break;
90 }
91 } else if (type == 1) {
92 for (i = 0; i < ATH_AIC_MAX_COM_ATT_DB_TABLE; i++) {
93 if (com_att_db_table[i] > value) {
94 i--;
95 break;
96 }
97 }
98
99 if (i >= ATH_AIC_MAX_COM_ATT_DB_TABLE)
100 i = -1;
101 }
102
103 return i;
104}
105
106static void ar9003_aic_gain_table(struct ath_hw *ah)
107{
108 u32 aic_atten_word[19], i;
109
110 /* Config LNA gain difference */
111 REG_WRITE(ah, AR_PHY_BT_COEX_4, 0x2c200a00);
112 REG_WRITE(ah, AR_PHY_BT_COEX_5, 0x5c4e4438);
113
114 /* Program gain table */
115 aic_atten_word[0] = (0x1 & 0xf) << 14 | (0x1f & 0x1f) << 9 | (0x0 & 0xf) << 5 |
116 (0x1f & 0x1f); /* -01 dB: 4'd1, 5'd31, 00 dB: 4'd0, 5'd31 */
117 aic_atten_word[1] = (0x3 & 0xf) << 14 | (0x1f & 0x1f) << 9 | (0x2 & 0xf) << 5 |
118 (0x1f & 0x1f); /* -03 dB: 4'd3, 5'd31, -02 dB: 4'd2, 5'd31 */
119 aic_atten_word[2] = (0x5 & 0xf) << 14 | (0x1f & 0x1f) << 9 | (0x4 & 0xf) << 5 |
120 (0x1f & 0x1f); /* -05 dB: 4'd5, 5'd31, -04 dB: 4'd4, 5'd31 */
121 aic_atten_word[3] = (0x1 & 0xf) << 14 | (0x1e & 0x1f) << 9 | (0x0 & 0xf) << 5 |
122 (0x1e & 0x1f); /* -07 dB: 4'd1, 5'd30, -06 dB: 4'd0, 5'd30 */
123 aic_atten_word[4] = (0x3 & 0xf) << 14 | (0x1e & 0x1f) << 9 | (0x2 & 0xf) << 5 |
124 (0x1e & 0x1f); /* -09 dB: 4'd3, 5'd30, -08 dB: 4'd2, 5'd30 */
125 aic_atten_word[5] = (0x5 & 0xf) << 14 | (0x1e & 0x1f) << 9 | (0x4 & 0xf) << 5 |
126 (0x1e & 0x1f); /* -11 dB: 4'd5, 5'd30, -10 dB: 4'd4, 5'd30 */
127 aic_atten_word[6] = (0x1 & 0xf) << 14 | (0xf & 0x1f) << 9 | (0x0 & 0xf) << 5 |
128 (0xf & 0x1f); /* -13 dB: 4'd1, 5'd15, -12 dB: 4'd0, 5'd15 */
129 aic_atten_word[7] = (0x3 & 0xf) << 14 | (0xf & 0x1f) << 9 | (0x2 & 0xf) << 5 |
130 (0xf & 0x1f); /* -15 dB: 4'd3, 5'd15, -14 dB: 4'd2, 5'd15 */
131 aic_atten_word[8] = (0x5 & 0xf) << 14 | (0xf & 0x1f) << 9 | (0x4 & 0xf) << 5 |
132 (0xf & 0x1f); /* -17 dB: 4'd5, 5'd15, -16 dB: 4'd4, 5'd15 */
133 aic_atten_word[9] = (0x1 & 0xf) << 14 | (0x7 & 0x1f) << 9 | (0x0 & 0xf) << 5 |
134 (0x7 & 0x1f); /* -19 dB: 4'd1, 5'd07, -18 dB: 4'd0, 5'd07 */
135 aic_atten_word[10] = (0x3 & 0xf) << 14 | (0x7 & 0x1f) << 9 | (0x2 & 0xf) << 5 |
136 (0x7 & 0x1f); /* -21 dB: 4'd3, 5'd07, -20 dB: 4'd2, 5'd07 */
137 aic_atten_word[11] = (0x5 & 0xf) << 14 | (0x7 & 0x1f) << 9 | (0x4 & 0xf) << 5 |
138 (0x7 & 0x1f); /* -23 dB: 4'd5, 5'd07, -22 dB: 4'd4, 5'd07 */
139 aic_atten_word[12] = (0x7 & 0xf) << 14 | (0x7 & 0x1f) << 9 | (0x6 & 0xf) << 5 |
140 (0x7 & 0x1f); /* -25 dB: 4'd7, 5'd07, -24 dB: 4'd6, 5'd07 */
141 aic_atten_word[13] = (0x3 & 0xf) << 14 | (0x3 & 0x1f) << 9 | (0x2 & 0xf) << 5 |
142 (0x3 & 0x1f); /* -27 dB: 4'd3, 5'd03, -26 dB: 4'd2, 5'd03 */
143 aic_atten_word[14] = (0x5 & 0xf) << 14 | (0x3 & 0x1f) << 9 | (0x4 & 0xf) << 5 |
144 (0x3 & 0x1f); /* -29 dB: 4'd5, 5'd03, -28 dB: 4'd4, 5'd03 */
145 aic_atten_word[15] = (0x1 & 0xf) << 14 | (0x1 & 0x1f) << 9 | (0x0 & 0xf) << 5 |
146 (0x1 & 0x1f); /* -31 dB: 4'd1, 5'd01, -30 dB: 4'd0, 5'd01 */
147 aic_atten_word[16] = (0x3 & 0xf) << 14 | (0x1 & 0x1f) << 9 | (0x2 & 0xf) << 5 |
148 (0x1 & 0x1f); /* -33 dB: 4'd3, 5'd01, -32 dB: 4'd2, 5'd01 */
149 aic_atten_word[17] = (0x5 & 0xf) << 14 | (0x1 & 0x1f) << 9 | (0x4 & 0xf) << 5 |
150 (0x1 & 0x1f); /* -35 dB: 4'd5, 5'd01, -34 dB: 4'd4, 5'd01 */
151 aic_atten_word[18] = (0x7 & 0xf) << 14 | (0x1 & 0x1f) << 9 | (0x6 & 0xf) << 5 |
152 (0x1 & 0x1f); /* -37 dB: 4'd7, 5'd01, -36 dB: 4'd6, 5'd01 */
153
154 /* Write to Gain table with auto increment enabled. */
155 REG_WRITE(ah, (AR_PHY_AIC_SRAM_ADDR_B0 + 0x3000),
156 (ATH_AIC_SRAM_AUTO_INCREMENT |
157 ATH_AIC_SRAM_GAIN_TABLE_OFFSET));
158
159 for (i = 0; i < 19; i++) {
160 REG_WRITE(ah, (AR_PHY_AIC_SRAM_DATA_B0 + 0x3000),
161 aic_atten_word[i]);
162 }
163}
164
165static u8 ar9003_aic_cal_start(struct ath_hw *ah, u8 min_valid_count)
166{
167 struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
168 int i;
169
170 /* Write to Gain table with auto increment enabled. */
171 REG_WRITE(ah, (AR_PHY_AIC_SRAM_ADDR_B0 + 0x3000),
172 (ATH_AIC_SRAM_AUTO_INCREMENT |
173 ATH_AIC_SRAM_CAL_OFFSET));
174
175 for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
176 REG_WRITE(ah, (AR_PHY_AIC_SRAM_DATA_B0 + 0x3000), 0);
177 aic->aic_sram[i] = 0;
178 }
179
180 REG_WRITE(ah, AR_PHY_AIC_CTRL_0_B0,
181 (SM(0, AR_PHY_AIC_MON_ENABLE) |
182 SM(127, AR_PHY_AIC_CAL_MAX_HOP_COUNT) |
183 SM(min_valid_count, AR_PHY_AIC_CAL_MIN_VALID_COUNT) |
184 SM(37, AR_PHY_AIC_F_WLAN) |
185 SM(1, AR_PHY_AIC_CAL_CH_VALID_RESET) |
186 SM(0, AR_PHY_AIC_CAL_ENABLE) |
187 SM(0x40, AR_PHY_AIC_BTTX_PWR_THR) |
188 SM(0, AR_PHY_AIC_ENABLE)));
189
190 REG_WRITE(ah, AR_PHY_AIC_CTRL_0_B1,
191 (SM(0, AR_PHY_AIC_MON_ENABLE) |
192 SM(1, AR_PHY_AIC_CAL_CH_VALID_RESET) |
193 SM(0, AR_PHY_AIC_CAL_ENABLE) |
194 SM(0x40, AR_PHY_AIC_BTTX_PWR_THR) |
195 SM(0, AR_PHY_AIC_ENABLE)));
196
197 REG_WRITE(ah, AR_PHY_AIC_CTRL_1_B0,
198 (SM(8, AR_PHY_AIC_CAL_BT_REF_DELAY) |
199 SM(0, AR_PHY_AIC_BT_IDLE_CFG) |
200 SM(1, AR_PHY_AIC_STDBY_COND) |
201 SM(37, AR_PHY_AIC_STDBY_ROT_ATT_DB) |
202 SM(5, AR_PHY_AIC_STDBY_COM_ATT_DB) |
203 SM(15, AR_PHY_AIC_RSSI_MAX) |
204 SM(0, AR_PHY_AIC_RSSI_MIN)));
205
206 REG_WRITE(ah, AR_PHY_AIC_CTRL_1_B1,
207 (SM(15, AR_PHY_AIC_RSSI_MAX) |
208 SM(0, AR_PHY_AIC_RSSI_MIN)));
209
210 REG_WRITE(ah, AR_PHY_AIC_CTRL_2_B0,
211 (SM(44, AR_PHY_AIC_RADIO_DELAY) |
212 SM(8, AR_PHY_AIC_CAL_STEP_SIZE_CORR) |
213 SM(12, AR_PHY_AIC_CAL_ROT_IDX_CORR) |
214 SM(2, AR_PHY_AIC_CAL_CONV_CHECK_FACTOR) |
215 SM(5, AR_PHY_AIC_ROT_IDX_COUNT_MAX) |
216 SM(0, AR_PHY_AIC_CAL_SYNTH_TOGGLE) |
217 SM(0, AR_PHY_AIC_CAL_SYNTH_AFTER_BTRX) |
218 SM(200, AR_PHY_AIC_CAL_SYNTH_SETTLING)));
219
220 REG_WRITE(ah, AR_PHY_AIC_CTRL_3_B0,
221 (SM(2, AR_PHY_AIC_MON_MAX_HOP_COUNT) |
222 SM(1, AR_PHY_AIC_MON_MIN_STALE_COUNT) |
223 SM(1, AR_PHY_AIC_MON_PWR_EST_LONG) |
224 SM(2, AR_PHY_AIC_MON_PD_TALLY_SCALING) |
225 SM(10, AR_PHY_AIC_MON_PERF_THR) |
226 SM(2, AR_PHY_AIC_CAL_TARGET_MAG_SETTING) |
227 SM(1, AR_PHY_AIC_CAL_PERF_CHECK_FACTOR) |
228 SM(1, AR_PHY_AIC_CAL_PWR_EST_LONG)));
229
230 REG_WRITE(ah, AR_PHY_AIC_CTRL_4_B0,
231 (SM(2, AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO) |
232 SM(3, AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO) |
233 SM(0, AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING) |
234 SM(2, AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF) |
235 SM(1, AR_PHY_AIC_CAL_COM_ATT_DB_FIXED)));
236
237 REG_WRITE(ah, AR_PHY_AIC_CTRL_4_B1,
238 (SM(2, AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO) |
239 SM(3, AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO) |
240 SM(0, AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING) |
241 SM(2, AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF) |
242 SM(1, AR_PHY_AIC_CAL_COM_ATT_DB_FIXED)));
243
244 ar9003_aic_gain_table(ah);
245
246 /* Need to enable AIC reference signal in BT modem. */
247 REG_WRITE(ah, ATH_AIC_BT_JUPITER_CTRL,
248 (REG_READ(ah, ATH_AIC_BT_JUPITER_CTRL) |
249 ATH_AIC_BT_AIC_ENABLE));
250
251 aic->aic_cal_start_time = REG_READ(ah, AR_TSF_L32);
252
253 /* Start calibration */
254 REG_CLR_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE);
255 REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_CH_VALID_RESET);
256 REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE);
257
258 aic->aic_caled_chan = 0;
259 aic->aic_cal_state = AIC_CAL_STATE_STARTED;
260
261 return aic->aic_cal_state;
262}
263
264static bool ar9003_aic_cal_post_process(struct ath_hw *ah)
265{
266 struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
267 struct ath_aic_sram_info cal_sram[ATH_AIC_MAX_BT_CHANNEL];
268 struct ath_aic_out_info aic_sram[ATH_AIC_MAX_BT_CHANNEL];
269 u32 dir_path_gain_idx, quad_path_gain_idx, value;
270 u32 fixed_com_att_db;
271 int8_t dir_path_sign, quad_path_sign;
272 int16_t i;
273 bool ret = true;
274
275 memset(&cal_sram, 0, sizeof(cal_sram));
276 memset(&aic_sram, 0, sizeof(aic_sram));
277
278 for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
279 value = aic->aic_sram[i];
280
281 cal_sram[i].valid =
282 MS(value, AR_PHY_AIC_SRAM_VALID);
283 cal_sram[i].rot_quad_att_db =
284 MS(value, AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB);
285 cal_sram[i].vga_quad_sign =
286 MS(value, AR_PHY_AIC_SRAM_VGA_QUAD_SIGN);
287 cal_sram[i].rot_dir_att_db =
288 MS(value, AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB);
289 cal_sram[i].vga_dir_sign =
290 MS(value, AR_PHY_AIC_SRAM_VGA_DIR_SIGN);
291 cal_sram[i].com_att_6db =
292 MS(value, AR_PHY_AIC_SRAM_COM_ATT_6DB);
293
294 if (cal_sram[i].valid) {
295 dir_path_gain_idx = cal_sram[i].rot_dir_att_db +
296 com_att_db_table[cal_sram[i].com_att_6db];
297 quad_path_gain_idx = cal_sram[i].rot_quad_att_db +
298 com_att_db_table[cal_sram[i].com_att_6db];
299
300 dir_path_sign = (cal_sram[i].vga_dir_sign) ? 1 : -1;
301 quad_path_sign = (cal_sram[i].vga_quad_sign) ? 1 : -1;
302
303 aic_sram[i].dir_path_gain_lin = dir_path_sign *
304 aic_lin_table[dir_path_gain_idx];
305 aic_sram[i].quad_path_gain_lin = quad_path_sign *
306 aic_lin_table[quad_path_gain_idx];
307 }
308 }
309
310 for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
311 int16_t start_idx, end_idx;
312
313 if (cal_sram[i].valid)
314 continue;
315
316 start_idx = ar9003_aic_find_valid(cal_sram, 0, i);
317 end_idx = ar9003_aic_find_valid(cal_sram, 1, i);
318
319 if (start_idx < 0) {
320 /* extrapolation */
321 start_idx = end_idx;
322 end_idx = ar9003_aic_find_valid(cal_sram, 1, start_idx);
323
324 if (end_idx < 0) {
325 ret = false;
326 break;
327 }
328
329 aic_sram[i].dir_path_gain_lin =
330 ((aic_sram[start_idx].dir_path_gain_lin -
331 aic_sram[end_idx].dir_path_gain_lin) *
332 (start_idx - i) + ((end_idx - i) >> 1)) /
333 (end_idx - i) +
334 aic_sram[start_idx].dir_path_gain_lin;
335 aic_sram[i].quad_path_gain_lin =
336 ((aic_sram[start_idx].quad_path_gain_lin -
337 aic_sram[end_idx].quad_path_gain_lin) *
338 (start_idx - i) + ((end_idx - i) >> 1)) /
339 (end_idx - i) +
340 aic_sram[start_idx].quad_path_gain_lin;
341 }
342
343 if (end_idx < 0) {
344 /* extrapolation */
345 end_idx = ar9003_aic_find_valid(cal_sram, 0, start_idx);
346
347 if (end_idx < 0) {
348 ret = false;
349 break;
350 }
351
352 aic_sram[i].dir_path_gain_lin =
353 ((aic_sram[start_idx].dir_path_gain_lin -
354 aic_sram[end_idx].dir_path_gain_lin) *
355 (i - start_idx) + ((start_idx - end_idx) >> 1)) /
356 (start_idx - end_idx) +
357 aic_sram[start_idx].dir_path_gain_lin;
358 aic_sram[i].quad_path_gain_lin =
359 ((aic_sram[start_idx].quad_path_gain_lin -
360 aic_sram[end_idx].quad_path_gain_lin) *
361 (i - start_idx) + ((start_idx - end_idx) >> 1)) /
362 (start_idx - end_idx) +
363 aic_sram[start_idx].quad_path_gain_lin;
364
365 } else if (start_idx >= 0){
366 /* interpolation */
367 aic_sram[i].dir_path_gain_lin =
368 (((end_idx - i) * aic_sram[start_idx].dir_path_gain_lin) +
369 ((i - start_idx) * aic_sram[end_idx].dir_path_gain_lin) +
370 ((end_idx - start_idx) >> 1)) /
371 (end_idx - start_idx);
372 aic_sram[i].quad_path_gain_lin =
373 (((end_idx - i) * aic_sram[start_idx].quad_path_gain_lin) +
374 ((i - start_idx) * aic_sram[end_idx].quad_path_gain_lin) +
375 ((end_idx - start_idx) >> 1))/
376 (end_idx - start_idx);
377 }
378 }
379
380 /* From dir/quad_path_gain_lin to sram. */
381 i = ar9003_aic_find_valid(cal_sram, 1, 0);
382 if (i < 0) {
383 i = 0;
384 ret = false;
385 }
386 fixed_com_att_db = com_att_db_table[cal_sram[i].com_att_6db];
387
388 for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
389 int16_t rot_dir_path_att_db, rot_quad_path_att_db;
390
391 aic_sram[i].sram.vga_dir_sign =
392 (aic_sram[i].dir_path_gain_lin >= 0) ? 1 : 0;
393 aic_sram[i].sram.vga_quad_sign=
394 (aic_sram[i].quad_path_gain_lin >= 0) ? 1 : 0;
395
396 rot_dir_path_att_db =
397 ar9003_aic_find_index(0, abs(aic_sram[i].dir_path_gain_lin)) -
398 fixed_com_att_db;
399 rot_quad_path_att_db =
400 ar9003_aic_find_index(0, abs(aic_sram[i].quad_path_gain_lin)) -
401 fixed_com_att_db;
402
403 aic_sram[i].sram.com_att_6db =
404 ar9003_aic_find_index(1, fixed_com_att_db);
405
406 aic_sram[i].sram.valid = 1;
407
408 aic_sram[i].sram.rot_dir_att_db =
409 min(max(rot_dir_path_att_db,
410 (int16_t)ATH_AIC_MIN_ROT_DIR_ATT_DB),
411 ATH_AIC_MAX_ROT_DIR_ATT_DB);
412 aic_sram[i].sram.rot_quad_att_db =
413 min(max(rot_quad_path_att_db,
414 (int16_t)ATH_AIC_MIN_ROT_QUAD_ATT_DB),
415 ATH_AIC_MAX_ROT_QUAD_ATT_DB);
416 }
417
418 for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
419 aic->aic_sram[i] = (SM(aic_sram[i].sram.vga_dir_sign,
420 AR_PHY_AIC_SRAM_VGA_DIR_SIGN) |
421 SM(aic_sram[i].sram.vga_quad_sign,
422 AR_PHY_AIC_SRAM_VGA_QUAD_SIGN) |
423 SM(aic_sram[i].sram.com_att_6db,
424 AR_PHY_AIC_SRAM_COM_ATT_6DB) |
425 SM(aic_sram[i].sram.valid,
426 AR_PHY_AIC_SRAM_VALID) |
427 SM(aic_sram[i].sram.rot_dir_att_db,
428 AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB) |
429 SM(aic_sram[i].sram.rot_quad_att_db,
430 AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB));
431 }
432
433 return ret;
434}
435
436static void ar9003_aic_cal_done(struct ath_hw *ah)
437{
438 struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
439
440 /* Disable AIC reference signal in BT modem. */
441 REG_WRITE(ah, ATH_AIC_BT_JUPITER_CTRL,
442 (REG_READ(ah, ATH_AIC_BT_JUPITER_CTRL) &
443 ~ATH_AIC_BT_AIC_ENABLE));
444
445 if (ar9003_aic_cal_post_process(ah))
446 aic->aic_cal_state = AIC_CAL_STATE_DONE;
447 else
448 aic->aic_cal_state = AIC_CAL_STATE_ERROR;
449}
450
451static u8 ar9003_aic_cal_continue(struct ath_hw *ah, bool cal_once)
452{
453 struct ath_common *common = ath9k_hw_common(ah);
454 struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
455 struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
456 int i, num_chan;
457
458 num_chan = MS(mci_hw->config, ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN);
459
460 if (!num_chan) {
461 aic->aic_cal_state = AIC_CAL_STATE_ERROR;
462 return aic->aic_cal_state;
463 }
464
465 if (cal_once) {
466 for (i = 0; i < 10000; i++) {
467 if ((REG_READ(ah, AR_PHY_AIC_CTRL_0_B1) &
468 AR_PHY_AIC_CAL_ENABLE) == 0)
469 break;
470
471 udelay(100);
472 }
473 }
474
475 /*
476 * Use AR_PHY_AIC_CAL_ENABLE bit instead of AR_PHY_AIC_CAL_DONE.
477 * Sometimes CAL_DONE bit is not asserted.
478 */
479 if ((REG_READ(ah, AR_PHY_AIC_CTRL_0_B1) &
480 AR_PHY_AIC_CAL_ENABLE) != 0) {
481 ath_dbg(common, MCI, "AIC cal is not done after 40ms");
482 goto exit;
483 }
484
485 REG_WRITE(ah, AR_PHY_AIC_SRAM_ADDR_B1,
486 (ATH_AIC_SRAM_CAL_OFFSET | ATH_AIC_SRAM_AUTO_INCREMENT));
487
488 for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
489 u32 value;
490
491 value = REG_READ(ah, AR_PHY_AIC_SRAM_DATA_B1);
492
493 if (value & 0x01) {
494 if (aic->aic_sram[i] == 0)
495 aic->aic_caled_chan++;
496
497 aic->aic_sram[i] = value;
498
499 if (!cal_once)
500 break;
501 }
502 }
503
504 if ((aic->aic_caled_chan >= num_chan) || cal_once) {
505 ar9003_aic_cal_done(ah);
506 } else {
507 /* Start calibration */
508 REG_CLR_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE);
509 REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1,
510 AR_PHY_AIC_CAL_CH_VALID_RESET);
511 REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE);
512 }
513exit:
514 return aic->aic_cal_state;
515
516}
517
518u8 ar9003_aic_calibration(struct ath_hw *ah)
519{
520 struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
521 u8 cal_ret = AIC_CAL_STATE_ERROR;
522
523 switch (aic->aic_cal_state) {
524 case AIC_CAL_STATE_IDLE:
525 cal_ret = ar9003_aic_cal_start(ah, 1);
526 break;
527 case AIC_CAL_STATE_STARTED:
528 cal_ret = ar9003_aic_cal_continue(ah, false);
529 break;
530 case AIC_CAL_STATE_DONE:
531 cal_ret = AIC_CAL_STATE_DONE;
532 break;
533 default:
534 break;
535 }
536
537 return cal_ret;
538}
539
540u8 ar9003_aic_start_normal(struct ath_hw *ah)
541{
542 struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
543 int16_t i;
544
545 if (aic->aic_cal_state != AIC_CAL_STATE_DONE)
546 return 1;
547
548 ar9003_aic_gain_table(ah);
549
550 REG_WRITE(ah, AR_PHY_AIC_SRAM_ADDR_B1, ATH_AIC_SRAM_AUTO_INCREMENT);
551
552 for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
553 REG_WRITE(ah, AR_PHY_AIC_SRAM_DATA_B1, aic->aic_sram[i]);
554 }
555
556 /* FIXME: Replace these with proper register names */
557 REG_WRITE(ah, 0xa6b0, 0x80);
558 REG_WRITE(ah, 0xa6b4, 0x5b2df0);
559 REG_WRITE(ah, 0xa6b8, 0x10762cc8);
560 REG_WRITE(ah, 0xa6bc, 0x1219a4b);
561 REG_WRITE(ah, 0xa6c0, 0x1e01);
562 REG_WRITE(ah, 0xb6b4, 0xf0);
563 REG_WRITE(ah, 0xb6c0, 0x1e01);
564 REG_WRITE(ah, 0xb6b0, 0x81);
565 REG_WRITE(ah, AR_PHY_65NM_CH1_RXTX4, 0x40000000);
566
567 aic->aic_enabled = true;
568
569 return 0;
570}
571
572u8 ar9003_aic_cal_reset(struct ath_hw *ah)
573{
574 struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
575
576 aic->aic_cal_state = AIC_CAL_STATE_IDLE;
577 return aic->aic_cal_state;
578}
579
580u8 ar9003_aic_calibration_single(struct ath_hw *ah)
581{
582 struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
583 u8 cal_ret;
584 int num_chan;
585
586 num_chan = MS(mci_hw->config, ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN);
587
588 (void) ar9003_aic_cal_start(ah, num_chan);
589 cal_ret = ar9003_aic_cal_continue(ah, true);
590
591 return cal_ret;
592}
593
594void ar9003_hw_attach_aic_ops(struct ath_hw *ah)
595{
596 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
597
598 priv_ops->is_aic_enabled = ar9003_hw_is_aic_enabled;
599}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_aic.h b/drivers/net/wireless/ath/ath9k/ar9003_aic.h
new file mode 100644
index 000000000000..86f40644be43
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_aic.h
@@ -0,0 +1,61 @@
1/*
2 * Copyright (c) 2015 Qualcomm Atheros Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef AR9003_AIC_H
18#define AR9003_AIC_H
19
20#define ATH_AIC_MAX_COM_ATT_DB_TABLE 6
21#define ATH_AIC_MAX_AIC_LIN_TABLE 69
22#define ATH_AIC_MIN_ROT_DIR_ATT_DB 0
23#define ATH_AIC_MIN_ROT_QUAD_ATT_DB 0
24#define ATH_AIC_MAX_ROT_DIR_ATT_DB 37
25#define ATH_AIC_MAX_ROT_QUAD_ATT_DB 37
26#define ATH_AIC_SRAM_AUTO_INCREMENT 0x80000000
27#define ATH_AIC_SRAM_GAIN_TABLE_OFFSET 0x280
28#define ATH_AIC_SRAM_CAL_OFFSET 0x140
29#define ATH_AIC_SRAM_OFFSET 0x00
30#define ATH_AIC_MEAS_MAG_THRESH 20
31#define ATH_AIC_BT_JUPITER_CTRL 0x66820
32#define ATH_AIC_BT_AIC_ENABLE 0x02
33
34enum aic_cal_state {
35 AIC_CAL_STATE_IDLE = 0,
36 AIC_CAL_STATE_STARTED,
37 AIC_CAL_STATE_DONE,
38 AIC_CAL_STATE_ERROR
39};
40
41struct ath_aic_sram_info {
42 bool valid:1;
43 bool vga_quad_sign:1;
44 bool vga_dir_sign:1;
45 u8 rot_quad_att_db;
46 u8 rot_dir_att_db;
47 u8 com_att_6db;
48};
49
50struct ath_aic_out_info {
51 int16_t dir_path_gain_lin;
52 int16_t quad_path_gain_lin;
53 struct ath_aic_sram_info sram;
54};
55
56u8 ar9003_aic_calibration(struct ath_hw *ah);
57u8 ar9003_aic_start_normal(struct ath_hw *ah);
58u8 ar9003_aic_cal_reset(struct ath_hw *ah);
59u8 ar9003_aic_calibration_single(struct ath_hw *ah);
60
61#endif /* AR9003_AIC_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 4335ccbe7d7e..79fd3b2dcbde 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -195,16 +195,16 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
195 INIT_INI_ARRAY(&ah->iniCckfirJapan2484, 195 INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
196 ar9485_1_1_baseband_core_txfir_coeff_japan_2484); 196 ar9485_1_1_baseband_core_txfir_coeff_japan_2484);
197 197
198 if (ah->config.no_pll_pwrsave) { 198 if (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) {
199 INIT_INI_ARRAY(&ah->iniPcieSerdes, 199 INIT_INI_ARRAY(&ah->iniPcieSerdes,
200 ar9485_1_1_pcie_phy_clkreq_disable_L1); 200 ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1);
201 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 201 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
202 ar9485_1_1_pcie_phy_clkreq_disable_L1); 202 ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1);
203 } else { 203 } else {
204 INIT_INI_ARRAY(&ah->iniPcieSerdes, 204 INIT_INI_ARRAY(&ah->iniPcieSerdes,
205 ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1); 205 ar9485_1_1_pcie_phy_clkreq_disable_L1);
206 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 206 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
207 ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1); 207 ar9485_1_1_pcie_phy_clkreq_disable_L1);
208 } 208 }
209 } else if (AR_SREV_9462_21(ah)) { 209 } else if (AR_SREV_9462_21(ah)) {
210 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], 210 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -231,10 +231,20 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
231 ar9462_2p1_modes_fast_clock); 231 ar9462_2p1_modes_fast_clock);
232 INIT_INI_ARRAY(&ah->iniCckfirJapan2484, 232 INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
233 ar9462_2p1_baseband_core_txfir_coeff_japan_2484); 233 ar9462_2p1_baseband_core_txfir_coeff_japan_2484);
234 INIT_INI_ARRAY(&ah->iniPcieSerdes, 234
235 ar9462_2p1_pciephy_clkreq_disable_L1); 235 /* Awake -> Sleep Setting */
236 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 236 if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
237 ar9462_2p1_pciephy_clkreq_disable_L1); 237 (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) {
238 INIT_INI_ARRAY(&ah->iniPcieSerdes,
239 ar9462_2p1_pciephy_clkreq_disable_L1);
240 }
241
242 /* Sleep -> Awake Setting */
243 if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
244 (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) {
245 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
246 ar9462_2p1_pciephy_clkreq_disable_L1);
247 }
238 } else if (AR_SREV_9462_20(ah)) { 248 } else if (AR_SREV_9462_20(ah)) {
239 249
240 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_2p0_mac_core); 250 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_2p0_mac_core);
@@ -262,11 +272,18 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
262 ar9462_2p0_common_rx_gain); 272 ar9462_2p0_common_rx_gain);
263 273
264 /* Awake -> Sleep Setting */ 274 /* Awake -> Sleep Setting */
265 INIT_INI_ARRAY(&ah->iniPcieSerdes, 275 if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
266 ar9462_2p0_pciephy_clkreq_disable_L1); 276 (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) {
277 INIT_INI_ARRAY(&ah->iniPcieSerdes,
278 ar9462_2p0_pciephy_clkreq_disable_L1);
279 }
280
267 /* Sleep -> Awake Setting */ 281 /* Sleep -> Awake Setting */
268 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 282 if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
269 ar9462_2p0_pciephy_clkreq_disable_L1); 283 (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) {
284 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
285 ar9462_2p0_pciephy_clkreq_disable_L1);
286 }
270 287
271 /* Fast clock modal settings */ 288 /* Fast clock modal settings */
272 INIT_INI_ARRAY(&ah->iniModesFastClock, 289 INIT_INI_ARRAY(&ah->iniModesFastClock,
@@ -456,10 +473,19 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
456 INIT_INI_ARRAY(&ah->iniModesTxGain, 473 INIT_INI_ARRAY(&ah->iniModesTxGain,
457 ar9565_1p1_Modes_lowest_ob_db_tx_gain_table); 474 ar9565_1p1_Modes_lowest_ob_db_tx_gain_table);
458 475
459 INIT_INI_ARRAY(&ah->iniPcieSerdes, 476 /* Awake -> Sleep Setting */
460 ar9565_1p1_pciephy_clkreq_disable_L1); 477 if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
461 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 478 (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) {
462 ar9565_1p1_pciephy_clkreq_disable_L1); 479 INIT_INI_ARRAY(&ah->iniPcieSerdes,
480 ar9565_1p1_pciephy_clkreq_disable_L1);
481 }
482
483 /* Sleep -> Awake Setting */
484 if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
485 (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) {
486 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
487 ar9565_1p1_pciephy_clkreq_disable_L1);
488 }
463 489
464 INIT_INI_ARRAY(&ah->iniModesFastClock, 490 INIT_INI_ARRAY(&ah->iniModesFastClock,
465 ar9565_1p1_modes_fast_clock); 491 ar9565_1p1_modes_fast_clock);
@@ -491,10 +517,19 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
491 INIT_INI_ARRAY(&ah->iniModesTxGain, 517 INIT_INI_ARRAY(&ah->iniModesTxGain,
492 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table); 518 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table);
493 519
494 INIT_INI_ARRAY(&ah->iniPcieSerdes, 520 /* Awake -> Sleep Setting */
495 ar9565_1p0_pciephy_clkreq_disable_L1); 521 if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
496 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 522 (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) {
497 ar9565_1p0_pciephy_clkreq_disable_L1); 523 INIT_INI_ARRAY(&ah->iniPcieSerdes,
524 ar9565_1p0_pciephy_clkreq_disable_L1);
525 }
526
527 /* Sleep -> Awake Setting */
528 if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) &&
529 (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) {
530 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
531 ar9565_1p0_pciephy_clkreq_disable_L1);
532 }
498 533
499 INIT_INI_ARRAY(&ah->iniModesFastClock, 534 INIT_INI_ARRAY(&ah->iniModesFastClock,
500 ar9565_1p0_modes_fast_clock); 535 ar9565_1p0_modes_fast_clock);
@@ -1130,6 +1165,12 @@ void ar9003_hw_attach_ops(struct ath_hw *ah)
1130 struct ath_hw_ops *ops = ath9k_hw_ops(ah); 1165 struct ath_hw_ops *ops = ath9k_hw_ops(ah);
1131 1166
1132 ar9003_hw_init_mode_regs(ah); 1167 ar9003_hw_init_mode_regs(ah);
1168
1169 if (AR_SREV_9003_PCOEM(ah)) {
1170 WARN_ON(!ah->iniPcieSerdes.ia_array);
1171 WARN_ON(!ah->iniPcieSerdesLowPower.ia_array);
1172 }
1173
1133 priv_ops->init_mode_gain_regs = ar9003_hw_init_mode_gain_regs; 1174 priv_ops->init_mode_gain_regs = ar9003_hw_init_mode_gain_regs;
1134 priv_ops->init_hang_checks = ar9003_hw_init_hang_checks; 1175 priv_ops->init_hang_checks = ar9003_hw_init_hang_checks;
1135 priv_ops->detect_mac_hang = ar9003_hw_detect_mac_hang; 1176 priv_ops->detect_mac_hang = ar9003_hw_detect_mac_hang;
@@ -1139,4 +1180,5 @@ void ar9003_hw_attach_ops(struct ath_hw *ah)
1139 ar9003_hw_attach_phy_ops(ah); 1180 ar9003_hw_attach_phy_ops(ah);
1140 ar9003_hw_attach_calib_ops(ah); 1181 ar9003_hw_attach_calib_ops(ah);
1141 ar9003_hw_attach_mac_ops(ah); 1182 ar9003_hw_attach_mac_ops(ah);
1183 ar9003_hw_attach_aic_ops(ah);
1142} 1184}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index bd169fae32a1..af5ee416a560 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -19,6 +19,7 @@
19#include "hw-ops.h" 19#include "hw-ops.h"
20#include "ar9003_phy.h" 20#include "ar9003_phy.h"
21#include "ar9003_mci.h" 21#include "ar9003_mci.h"
22#include "ar9003_aic.h"
22 23
23static void ar9003_mci_reset_req_wakeup(struct ath_hw *ah) 24static void ar9003_mci_reset_req_wakeup(struct ath_hw *ah)
24{ 25{
@@ -1016,6 +1017,9 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
1016 if (en_int) 1017 if (en_int)
1017 ar9003_mci_enable_interrupt(ah); 1018 ar9003_mci_enable_interrupt(ah);
1018 1019
1020 if (ath9k_hw_is_aic_enabled(ah))
1021 ar9003_aic_start_normal(ah);
1022
1019 return 0; 1023 return 0;
1020} 1024}
1021 1025
@@ -1362,6 +1366,22 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
1362 value = (!mci->unhalt_bt_gpm && mci->need_flush_btinfo) ? 1 : 0; 1366 value = (!mci->unhalt_bt_gpm && mci->need_flush_btinfo) ? 1 : 0;
1363 mci->need_flush_btinfo = false; 1367 mci->need_flush_btinfo = false;
1364 break; 1368 break;
1369 case MCI_STATE_AIC_CAL:
1370 if (ath9k_hw_is_aic_enabled(ah))
1371 value = ar9003_aic_calibration(ah);
1372 break;
1373 case MCI_STATE_AIC_START:
1374 if (ath9k_hw_is_aic_enabled(ah))
1375 ar9003_aic_start_normal(ah);
1376 break;
1377 case MCI_STATE_AIC_CAL_RESET:
1378 if (ath9k_hw_is_aic_enabled(ah))
1379 value = ar9003_aic_cal_reset(ah);
1380 break;
1381 case MCI_STATE_AIC_CAL_SINGLE:
1382 if (ath9k_hw_is_aic_enabled(ah))
1383 value = ar9003_aic_calibration_single(ah);
1384 break;
1365 default: 1385 default:
1366 break; 1386 break;
1367 } 1387 }
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index c311b2bfdb00..fc595b92ac56 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -640,16 +640,6 @@
640#define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE 0x0000ff00 640#define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE 0x0000ff00
641#define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE_S 8 641#define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE_S 8
642 642
643/* AIC Registers */
644#define AR_PHY_AIC_CTRL_0_B0 (AR_SM_BASE + 0x4b0)
645#define AR_PHY_AIC_CTRL_1_B0 (AR_SM_BASE + 0x4b4)
646#define AR_PHY_AIC_CTRL_2_B0 (AR_SM_BASE + 0x4b8)
647#define AR_PHY_AIC_CTRL_3_B0 (AR_SM_BASE + 0x4bc)
648#define AR_PHY_AIC_STAT_0_B0 (AR_SM_BASE + 0x4c4))
649#define AR_PHY_AIC_STAT_1_B0 (AR_SM_BASE + 0x4c8))
650#define AR_PHY_AIC_CTRL_4_B0 (AR_SM_BASE + 0x4c0)
651#define AR_PHY_AIC_STAT_2_B0 (AR_SM_BASE + 0x4cc)
652
653#define AR_PHY_65NM_CH0_TXRF3 0x16048 643#define AR_PHY_65NM_CH0_TXRF3 0x16048
654#define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G 0x0000001e 644#define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G 0x0000001e
655#define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G_S 1 645#define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G_S 1
@@ -989,21 +979,6 @@
989#define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c) 979#define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c)
990#define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i) (AR_SM1_BASE + 0x450 + ((_i) << 2)) 980#define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i) (AR_SM1_BASE + 0x450 + ((_i) << 2))
991 981
992/* SM 1 AIC Registers */
993
994#define AR_PHY_AIC_CTRL_0_B1 (AR_SM1_BASE + 0x4b0)
995#define AR_PHY_AIC_CTRL_1_B1 (AR_SM1_BASE + 0x4b4)
996#define AR_PHY_AIC_CTRL_2_B1 (AR_SM1_BASE + 0x4b8)
997#define AR_PHY_AIC_STAT_0_B1 (AR_SM1_BASE + (AR_SREV_9462_10(ah) ? \
998 0x4c0 : 0x4c4))
999#define AR_PHY_AIC_STAT_1_B1 (AR_SM1_BASE + (AR_SREV_9462_10(ah) ? \
1000 0x4c4 : 0x4c8))
1001#define AR_PHY_AIC_CTRL_4_B1 (AR_SM1_BASE + 0x4c0)
1002#define AR_PHY_AIC_STAT_2_B1 (AR_SM1_BASE + 0x4cc)
1003
1004#define AR_PHY_AIC_SRAM_ADDR_B1 (AR_SM1_BASE + 0x5f0)
1005#define AR_PHY_AIC_SRAM_DATA_B1 (AR_SM1_BASE + 0x5f4)
1006
1007#define AR_PHY_RTT_TABLE_SW_INTF_B(i) (0x384 + ((i) ? \ 982#define AR_PHY_RTT_TABLE_SW_INTF_B(i) (0x384 + ((i) ? \
1008 AR_SM1_BASE : AR_SM_BASE)) 983 AR_SM1_BASE : AR_SM_BASE))
1009#define AR_PHY_RTT_TABLE_SW_INTF_1_B(i) (0x388 + ((i) ? \ 984#define AR_PHY_RTT_TABLE_SW_INTF_1_B(i) (0x388 + ((i) ? \
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
index 934418872e8e..e4d11fa7fe8c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
@@ -106,7 +106,7 @@ void ar9003_hw_rtt_load_hist(struct ath_hw *ah)
106 int chain, i; 106 int chain, i;
107 107
108 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) { 108 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
109 if (!(ah->rxchainmask & (1 << chain))) 109 if (!(ah->caps.rx_chainmask & (1 << chain)))
110 continue; 110 continue;
111 for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) { 111 for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) {
112 ar9003_hw_rtt_load_hist_entry(ah, chain, i, 112 ar9003_hw_rtt_load_hist_entry(ah, chain, i,
@@ -171,7 +171,7 @@ void ar9003_hw_rtt_fill_hist(struct ath_hw *ah)
171 int chain, i; 171 int chain, i;
172 172
173 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) { 173 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
174 if (!(ah->rxchainmask & (1 << chain))) 174 if (!(ah->caps.rx_chainmask & (1 << chain)))
175 continue; 175 continue;
176 for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) { 176 for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) {
177 ah->caldata->rtt_table[chain][i] = 177 ah->caldata->rtt_table[chain][i] =
@@ -193,7 +193,7 @@ void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
193 int chain, i; 193 int chain, i;
194 194
195 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) { 195 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
196 if (!(ah->rxchainmask & (1 << chain))) 196 if (!(ah->caps.rx_chainmask & (1 << chain)))
197 continue; 197 continue;
198 for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) 198 for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++)
199 ar9003_hw_rtt_load_hist_entry(ah, chain, i, 0); 199 ar9003_hw_rtt_load_hist_entry(ah, chain, i, 0);
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 7e89236c0e13..a7a81b3969ce 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -184,12 +184,12 @@ struct ath_frame_info {
184 struct ath_buf *bf; 184 struct ath_buf *bf;
185 u16 framelen; 185 u16 framelen;
186 s8 txq; 186 s8 txq;
187 enum ath9k_key_type keytype;
188 u8 keyix; 187 u8 keyix;
189 u8 rtscts_rate; 188 u8 rtscts_rate;
190 u8 retries : 7; 189 u8 retries : 7;
191 u8 baw_tracked : 1; 190 u8 baw_tracked : 1;
192 u8 tx_power; 191 u8 tx_power;
192 enum ath9k_key_type keytype:2;
193}; 193};
194 194
195struct ath_rxbuf { 195struct ath_rxbuf {
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index cb366adc820b..f50a6bc5d06e 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -219,12 +219,15 @@ void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif)
219 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 219 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
220 struct ath_vif *avp = (void *)vif->drv_priv; 220 struct ath_vif *avp = (void *)vif->drv_priv;
221 struct ath_buf *bf = avp->av_bcbuf; 221 struct ath_buf *bf = avp->av_bcbuf;
222 struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
222 223
223 ath_dbg(common, CONFIG, "Removing interface at beacon slot: %d\n", 224 ath_dbg(common, CONFIG, "Removing interface at beacon slot: %d\n",
224 avp->av_bslot); 225 avp->av_bslot);
225 226
226 tasklet_disable(&sc->bcon_tasklet); 227 tasklet_disable(&sc->bcon_tasklet);
227 228
229 cur_conf->enable_beacon &= ~BIT(avp->av_bslot);
230
228 if (bf && bf->bf_mpdu) { 231 if (bf && bf->bf_mpdu) {
229 struct sk_buff *skb = bf->bf_mpdu; 232 struct sk_buff *skb = bf->bf_mpdu;
230 dma_unmap_single(sc->dev, bf->bf_buf_addr, 233 dma_unmap_single(sc->dev, bf->bf_buf_addr,
@@ -521,8 +524,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
521 } 524 }
522 525
523 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) { 526 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
524 if ((vif->type != NL80211_IFTYPE_AP) || 527 if (vif->type != NL80211_IFTYPE_AP) {
525 (sc->nbcnvifs > 1)) {
526 ath_dbg(common, CONFIG, 528 ath_dbg(common, CONFIG,
527 "An AP interface is already present !\n"); 529 "An AP interface is already present !\n");
528 return false; 530 return false;
@@ -616,12 +618,14 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
616 * enabling/disabling SWBA. 618 * enabling/disabling SWBA.
617 */ 619 */
618 if (changed & BSS_CHANGED_BEACON_ENABLED) { 620 if (changed & BSS_CHANGED_BEACON_ENABLED) {
619 if (!bss_conf->enable_beacon && 621 bool enabled = cur_conf->enable_beacon;
620 (sc->nbcnvifs <= 1)) { 622
621 cur_conf->enable_beacon = false; 623 if (!bss_conf->enable_beacon) {
622 } else if (bss_conf->enable_beacon) { 624 cur_conf->enable_beacon &= ~BIT(avp->av_bslot);
623 cur_conf->enable_beacon = true; 625 } else {
624 ath9k_cache_beacon_config(sc, ctx, bss_conf); 626 cur_conf->enable_beacon |= BIT(avp->av_bslot);
627 if (!enabled)
628 ath9k_cache_beacon_config(sc, ctx, bss_conf);
625 } 629 }
626 } 630 }
627 631
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 5fe62ff2223b..cd2f0a2373cb 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -44,6 +44,9 @@
44 44
45#define AR9300_NUM_BT_WEIGHTS 4 45#define AR9300_NUM_BT_WEIGHTS 4
46#define AR9300_NUM_WLAN_WEIGHTS 4 46#define AR9300_NUM_WLAN_WEIGHTS 4
47
48#define ATH_AIC_MAX_BT_CHANNEL 79
49
47/* Defines the BT AR_BT_COEX_WGHT used */ 50/* Defines the BT AR_BT_COEX_WGHT used */
48enum ath_stomp_type { 51enum ath_stomp_type {
49 ATH_BTCOEX_STOMP_ALL, 52 ATH_BTCOEX_STOMP_ALL,
@@ -93,9 +96,18 @@ struct ath9k_hw_mci {
93 u32 last_recovery; 96 u32 last_recovery;
94}; 97};
95 98
99struct ath9k_hw_aic {
100 bool aic_enabled;
101 u8 aic_cal_state;
102 u8 aic_caled_chan;
103 u32 aic_sram[ATH_AIC_MAX_BT_CHANNEL];
104 u32 aic_cal_start_time;
105};
106
96struct ath_btcoex_hw { 107struct ath_btcoex_hw {
97 enum ath_btcoex_scheme scheme; 108 enum ath_btcoex_scheme scheme;
98 struct ath9k_hw_mci mci; 109 struct ath9k_hw_mci mci;
110 struct ath9k_hw_aic aic;
99 bool enabled; 111 bool enabled;
100 u8 wlanactive_gpio; 112 u8 wlanactive_gpio;
101 u8 btactive_gpio; 113 u8 btactive_gpio;
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index e200a6e3aca5..3e2e24e4843f 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -238,7 +238,6 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
238{ 238{
239 struct ath9k_nfcal_hist *h = NULL; 239 struct ath9k_nfcal_hist *h = NULL;
240 unsigned i, j; 240 unsigned i, j;
241 int32_t val;
242 u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask; 241 u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
243 struct ath_common *common = ath9k_hw_common(ah); 242 struct ath_common *common = ath9k_hw_common(ah);
244 s16 default_nf = ath9k_hw_get_default_nf(ah, chan); 243 s16 default_nf = ath9k_hw_get_default_nf(ah, chan);
@@ -246,6 +245,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
246 if (ah->caldata) 245 if (ah->caldata)
247 h = ah->caldata->nfCalHist; 246 h = ah->caldata->nfCalHist;
248 247
248 ENABLE_REG_RMW_BUFFER(ah);
249 for (i = 0; i < NUM_NF_READINGS; i++) { 249 for (i = 0; i < NUM_NF_READINGS; i++) {
250 if (chainmask & (1 << i)) { 250 if (chainmask & (1 << i)) {
251 s16 nfval; 251 s16 nfval;
@@ -258,10 +258,8 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
258 else 258 else
259 nfval = default_nf; 259 nfval = default_nf;
260 260
261 val = REG_READ(ah, ah->nf_regs[i]); 261 REG_RMW(ah, ah->nf_regs[i],
262 val &= 0xFFFFFE00; 262 (((u32) nfval << 1) & 0x1ff), 0x1ff);
263 val |= (((u32) nfval << 1) & 0x1ff);
264 REG_WRITE(ah, ah->nf_regs[i], val);
265 } 263 }
266 } 264 }
267 265
@@ -274,6 +272,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
274 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, 272 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
275 AR_PHY_AGC_CONTROL_NO_UPDATE_NF); 273 AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
276 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); 274 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
275 REG_RMW_BUFFER_FLUSH(ah);
277 276
278 /* 277 /*
279 * Wait for load to complete, should be fast, a few 10s of us. 278 * Wait for load to complete, should be fast, a few 10s of us.
@@ -309,19 +308,17 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
309 * by the median we just loaded. This will be initial (and max) value 308 * by the median we just loaded. This will be initial (and max) value
310 * of next noise floor calibration the baseband does. 309 * of next noise floor calibration the baseband does.
311 */ 310 */
312 ENABLE_REGWRITE_BUFFER(ah); 311 ENABLE_REG_RMW_BUFFER(ah);
313 for (i = 0; i < NUM_NF_READINGS; i++) { 312 for (i = 0; i < NUM_NF_READINGS; i++) {
314 if (chainmask & (1 << i)) { 313 if (chainmask & (1 << i)) {
315 if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan)) 314 if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan))
316 continue; 315 continue;
317 316
318 val = REG_READ(ah, ah->nf_regs[i]); 317 REG_RMW(ah, ah->nf_regs[i],
319 val &= 0xFFFFFE00; 318 (((u32) (-50) << 1) & 0x1ff), 0x1ff);
320 val |= (((u32) (-50) << 1) & 0x1ff);
321 REG_WRITE(ah, ah->nf_regs[i], val);
322 } 319 }
323 } 320 }
324 REGWRITE_BUFFER_FLUSH(ah); 321 REG_RMW_BUFFER_FLUSH(ah);
325 322
326 return 0; 323 return 0;
327} 324}
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 2b79a568e803..d23737342f4f 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -54,7 +54,7 @@ struct ath_beacon_config {
54 u16 dtim_period; 54 u16 dtim_period;
55 u16 bmiss_timeout; 55 u16 bmiss_timeout;
56 u8 dtim_count; 56 u8 dtim_count;
57 bool enable_beacon; 57 u8 enable_beacon;
58 bool ibss_creator; 58 bool ibss_creator;
59 u32 nexttbtt; 59 u32 nexttbtt;
60 u32 intval; 60 u32 intval;
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index 726271c7c330..e98a9eaba7ff 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -126,8 +126,19 @@ ath9k_postprocess_radar_event(struct ath_softc *sc,
126 DFS_STAT_INC(sc, pulses_detected); 126 DFS_STAT_INC(sc, pulses_detected);
127 return true; 127 return true;
128} 128}
129#undef PRI_CH_RADAR_FOUND 129
130#undef EXT_CH_RADAR_FOUND 130static void
131ath9k_dfs_process_radar_pulse(struct ath_softc *sc, struct pulse_event *pe)
132{
133 struct dfs_pattern_detector *pd = sc->dfs_detector;
134 DFS_STAT_INC(sc, pulses_processed);
135 if (pd == NULL)
136 return;
137 if (!pd->add_pulse(pd, pe))
138 return;
139 DFS_STAT_INC(sc, radar_detected);
140 ieee80211_radar_detected(sc->hw);
141}
131 142
132/* 143/*
133 * DFS: check PHY-error for radar pulse and feed the detector 144 * DFS: check PHY-error for radar pulse and feed the detector
@@ -176,18 +187,21 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
176 ard.pulse_length_pri = vdata_end[-3]; 187 ard.pulse_length_pri = vdata_end[-3];
177 pe.freq = ah->curchan->channel; 188 pe.freq = ah->curchan->channel;
178 pe.ts = mactime; 189 pe.ts = mactime;
179 if (ath9k_postprocess_radar_event(sc, &ard, &pe)) { 190 if (!ath9k_postprocess_radar_event(sc, &ard, &pe))
180 struct dfs_pattern_detector *pd = sc->dfs_detector; 191 return;
181 ath_dbg(common, DFS, 192
182 "ath9k_dfs_process_phyerr: channel=%d, ts=%llu, " 193 ath_dbg(common, DFS,
183 "width=%d, rssi=%d, delta_ts=%llu\n", 194 "ath9k_dfs_process_phyerr: type=%d, freq=%d, ts=%llu, "
184 pe.freq, pe.ts, pe.width, pe.rssi, 195 "width=%d, rssi=%d, delta_ts=%llu\n",
185 pe.ts - sc->dfs_prev_pulse_ts); 196 ard.pulse_bw_info, pe.freq, pe.ts, pe.width, pe.rssi,
186 sc->dfs_prev_pulse_ts = pe.ts; 197 pe.ts - sc->dfs_prev_pulse_ts);
187 DFS_STAT_INC(sc, pulses_processed); 198 sc->dfs_prev_pulse_ts = pe.ts;
188 if (pd != NULL && pd->add_pulse(pd, &pe)) { 199 if (ard.pulse_bw_info & PRI_CH_RADAR_FOUND)
189 DFS_STAT_INC(sc, radar_detected); 200 ath9k_dfs_process_radar_pulse(sc, &pe);
190 ieee80211_radar_detected(sc->hw); 201 if (ard.pulse_bw_info & EXT_CH_RADAR_FOUND) {
191 } 202 pe.freq += IS_CHAN_HT40PLUS(ah->curchan) ? 20 : -20;
203 ath9k_dfs_process_radar_pulse(sc, &pe);
192 } 204 }
193} 205}
206#undef PRI_CH_RADAR_FOUND
207#undef EXT_CH_RADAR_FOUND
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index 971d770722cf..cc81482c934d 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -27,12 +27,7 @@ void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val)
27void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask, 27void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask,
28 u32 shift, u32 val) 28 u32 shift, u32 val)
29{ 29{
30 u32 regVal; 30 REG_RMW(ah, reg, ((val << shift) & mask), mask);
31
32 regVal = REG_READ(ah, reg) & ~mask;
33 regVal |= (val << shift) & mask;
34
35 REG_WRITE(ah, reg, regVal);
36 31
37 if (ah->config.analog_shiftreg) 32 if (ah->config.analog_shiftreg)
38 udelay(100); 33 udelay(100);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index e5a78d4fd66e..4773da6dc6f2 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -389,6 +389,7 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
389 } 389 }
390 } 390 }
391 391
392 ENABLE_REG_RMW_BUFFER(ah);
392 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN, 393 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN,
393 (numXpdGain - 1) & 0x3); 394 (numXpdGain - 1) & 0x3);
394 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1, 395 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1,
@@ -396,6 +397,7 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
396 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2, 397 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2,
397 xpdGainValues[1]); 398 xpdGainValues[1]);
398 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3, 0); 399 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3, 0);
400 REG_RMW_BUFFER_FLUSH(ah);
399 401
400 for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) { 402 for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) {
401 regChainOffset = i * 0x1000; 403 regChainOffset = i * 0x1000;
@@ -770,15 +772,14 @@ static void ath9k_hw_4k_set_gain(struct ath_hw *ah,
770 struct ar5416_eeprom_4k *eep, 772 struct ar5416_eeprom_4k *eep,
771 u8 txRxAttenLocal) 773 u8 txRxAttenLocal)
772{ 774{
773 REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0, 775 ENABLE_REG_RMW_BUFFER(ah);
774 pModal->antCtrlChain[0]); 776 REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0,
777 pModal->antCtrlChain[0], 0);
775 778
776 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), 779 REG_RMW(ah, AR_PHY_TIMING_CTRL4(0),
777 (REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) & 780 SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
778 ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | 781 SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF),
779 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) | 782 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF);
780 SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
781 SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
782 783
783 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= 784 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
784 AR5416_EEP_MINOR_VER_3) { 785 AR5416_EEP_MINOR_VER_3) {
@@ -817,6 +818,7 @@ static void ath9k_hw_4k_set_gain(struct ath_hw *ah,
817 AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal); 818 AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal);
818 REG_RMW_FIELD(ah, AR_PHY_RXGAIN + 0x1000, 819 REG_RMW_FIELD(ah, AR_PHY_RXGAIN + 0x1000,
819 AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]); 820 AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]);
821 REG_RMW_BUFFER_FLUSH(ah);
820} 822}
821 823
822/* 824/*
@@ -928,6 +930,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
928 } 930 }
929 } 931 }
930 932
933 ENABLE_REG_RMW_BUFFER(ah);
931 if (AR_SREV_9271(ah)) { 934 if (AR_SREV_9271(ah)) {
932 ath9k_hw_analog_shift_rmw(ah, 935 ath9k_hw_analog_shift_rmw(ah,
933 AR9285_AN_RF2G3, 936 AR9285_AN_RF2G3,
@@ -1032,18 +1035,19 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
1032 AR9285_AN_RF2G4_DB2_4_S, 1035 AR9285_AN_RF2G4_DB2_4_S,
1033 db2[4]); 1036 db2[4]);
1034 } 1037 }
1038 REG_RMW_BUFFER_FLUSH(ah);
1035 1039
1036 1040 ENABLE_REG_RMW_BUFFER(ah);
1037 REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH, 1041 REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH,
1038 pModal->switchSettling); 1042 pModal->switchSettling);
1039 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC, 1043 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC,
1040 pModal->adcDesiredSize); 1044 pModal->adcDesiredSize);
1041 1045
1042 REG_WRITE(ah, AR_PHY_RF_CTL4, 1046 REG_RMW(ah, AR_PHY_RF_CTL4,
1043 SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF) | 1047 SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF) |
1044 SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAB_OFF) | 1048 SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAB_OFF) |
1045 SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAA_ON) | 1049 SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAA_ON) |
1046 SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAB_ON)); 1050 SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAB_ON), 0);
1047 1051
1048 REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON, 1052 REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
1049 pModal->txEndToRxOn); 1053 pModal->txEndToRxOn);
@@ -1072,6 +1076,8 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
1072 pModal->swSettleHt40); 1076 pModal->swSettleHt40);
1073 } 1077 }
1074 1078
1079 REG_RMW_BUFFER_FLUSH(ah);
1080
1075 bb_desired_scale = (pModal->bb_scale_smrt_antenna & 1081 bb_desired_scale = (pModal->bb_scale_smrt_antenna &
1076 EEP_4K_BB_DESIRED_SCALE_MASK); 1082 EEP_4K_BB_DESIRED_SCALE_MASK);
1077 if ((pBase->txGainType == 0) && (bb_desired_scale != 0)) { 1083 if ((pBase->txGainType == 0) && (bb_desired_scale != 0)) {
@@ -1080,6 +1086,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
1080 mask = BIT(0)|BIT(5)|BIT(10)|BIT(15)|BIT(20)|BIT(25); 1086 mask = BIT(0)|BIT(5)|BIT(10)|BIT(15)|BIT(20)|BIT(25);
1081 pwrctrl = mask * bb_desired_scale; 1087 pwrctrl = mask * bb_desired_scale;
1082 clr = mask * 0x1f; 1088 clr = mask * 0x1f;
1089 ENABLE_REG_RMW_BUFFER(ah);
1083 REG_RMW(ah, AR_PHY_TX_PWRCTRL8, pwrctrl, clr); 1090 REG_RMW(ah, AR_PHY_TX_PWRCTRL8, pwrctrl, clr);
1084 REG_RMW(ah, AR_PHY_TX_PWRCTRL10, pwrctrl, clr); 1091 REG_RMW(ah, AR_PHY_TX_PWRCTRL10, pwrctrl, clr);
1085 REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL12, pwrctrl, clr); 1092 REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL12, pwrctrl, clr);
@@ -1094,6 +1101,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
1094 clr = mask * 0x1f; 1101 clr = mask * 0x1f;
1095 REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL11, pwrctrl, clr); 1102 REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL11, pwrctrl, clr);
1096 REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL13, pwrctrl, clr); 1103 REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL13, pwrctrl, clr);
1104 REG_RMW_BUFFER_FLUSH(ah);
1097 } 1105 }
1098} 1106}
1099 1107
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 098059039351..056f516bf017 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -466,6 +466,7 @@ static void ath9k_hw_def_set_gain(struct ath_hw *ah,
466 struct ar5416_eeprom_def *eep, 466 struct ar5416_eeprom_def *eep,
467 u8 txRxAttenLocal, int regChainOffset, int i) 467 u8 txRxAttenLocal, int regChainOffset, int i)
468{ 468{
469 ENABLE_REG_RMW_BUFFER(ah);
469 if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) { 470 if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) {
470 txRxAttenLocal = pModal->txRxAttenCh[i]; 471 txRxAttenLocal = pModal->txRxAttenCh[i];
471 472
@@ -483,16 +484,12 @@ static void ath9k_hw_def_set_gain(struct ath_hw *ah,
483 AR_PHY_GAIN_2GHZ_XATTEN2_DB, 484 AR_PHY_GAIN_2GHZ_XATTEN2_DB,
484 pModal->xatten2Db[i]); 485 pModal->xatten2Db[i]);
485 } else { 486 } else {
486 REG_WRITE(ah, AR_PHY_GAIN_2GHZ + regChainOffset, 487 REG_RMW(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
487 (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) & 488 SM(pModal-> bswMargin[i], AR_PHY_GAIN_2GHZ_BSW_MARGIN),
488 ~AR_PHY_GAIN_2GHZ_BSW_MARGIN) 489 AR_PHY_GAIN_2GHZ_BSW_MARGIN);
489 | SM(pModal-> bswMargin[i], 490 REG_RMW(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
490 AR_PHY_GAIN_2GHZ_BSW_MARGIN)); 491 SM(pModal->bswAtten[i], AR_PHY_GAIN_2GHZ_BSW_ATTEN),
491 REG_WRITE(ah, AR_PHY_GAIN_2GHZ + regChainOffset, 492 AR_PHY_GAIN_2GHZ_BSW_ATTEN);
492 (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) &
493 ~AR_PHY_GAIN_2GHZ_BSW_ATTEN)
494 | SM(pModal->bswAtten[i],
495 AR_PHY_GAIN_2GHZ_BSW_ATTEN));
496 } 493 }
497 } 494 }
498 495
@@ -504,17 +501,14 @@ static void ath9k_hw_def_set_gain(struct ath_hw *ah,
504 AR_PHY_RXGAIN + regChainOffset, 501 AR_PHY_RXGAIN + regChainOffset,
505 AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[i]); 502 AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[i]);
506 } else { 503 } else {
507 REG_WRITE(ah, 504 REG_RMW(ah, AR_PHY_RXGAIN + regChainOffset,
508 AR_PHY_RXGAIN + regChainOffset, 505 SM(txRxAttenLocal, AR_PHY_RXGAIN_TXRX_ATTEN),
509 (REG_READ(ah, AR_PHY_RXGAIN + regChainOffset) & 506 AR_PHY_RXGAIN_TXRX_ATTEN);
510 ~AR_PHY_RXGAIN_TXRX_ATTEN) 507 REG_RMW(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
511 | SM(txRxAttenLocal, AR_PHY_RXGAIN_TXRX_ATTEN)); 508 SM(pModal->rxTxMarginCh[i], AR_PHY_GAIN_2GHZ_RXTX_MARGIN),
512 REG_WRITE(ah, 509 AR_PHY_GAIN_2GHZ_RXTX_MARGIN);
513 AR_PHY_GAIN_2GHZ + regChainOffset,
514 (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) &
515 ~AR_PHY_GAIN_2GHZ_RXTX_MARGIN) |
516 SM(pModal->rxTxMarginCh[i], AR_PHY_GAIN_2GHZ_RXTX_MARGIN));
517 } 510 }
511 REG_RMW_BUFFER_FLUSH(ah);
518} 512}
519 513
520static void ath9k_hw_def_set_board_values(struct ath_hw *ah, 514static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 300d3671d0ef..e82a0d4ce23f 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -444,6 +444,10 @@ static inline void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
444#define OP_BT_SCAN BIT(4) 444#define OP_BT_SCAN BIT(4)
445#define OP_TSF_RESET BIT(6) 445#define OP_TSF_RESET BIT(6)
446 446
447enum htc_op_flags {
448 HTC_FWFLAG_NO_RMW,
449};
450
447struct ath9k_htc_priv { 451struct ath9k_htc_priv {
448 struct device *dev; 452 struct device *dev;
449 struct ieee80211_hw *hw; 453 struct ieee80211_hw *hw;
@@ -482,6 +486,7 @@ struct ath9k_htc_priv {
482 bool reconfig_beacon; 486 bool reconfig_beacon;
483 unsigned int rxfilter; 487 unsigned int rxfilter;
484 unsigned long op_flags; 488 unsigned long op_flags;
489 unsigned long fw_flags;
485 490
486 struct ath9k_hw_cal_data caldata; 491 struct ath9k_hw_cal_data caldata;
487 struct ath_spec_scan_priv spec_priv; 492 struct ath_spec_scan_priv spec_priv;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index fd229409f676..d7beefe60683 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -376,17 +376,139 @@ static void ath9k_regwrite_flush(void *hw_priv)
376 mutex_unlock(&priv->wmi->multi_write_mutex); 376 mutex_unlock(&priv->wmi->multi_write_mutex);
377} 377}
378 378
379static u32 ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr) 379static void ath9k_reg_rmw_buffer(void *hw_priv,
380 u32 reg_offset, u32 set, u32 clr)
381{
382 struct ath_hw *ah = (struct ath_hw *) hw_priv;
383 struct ath_common *common = ath9k_hw_common(ah);
384 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
385 u32 rsp_status;
386 int r;
387
388 mutex_lock(&priv->wmi->multi_rmw_mutex);
389
390 /* Store the register/value */
391 priv->wmi->multi_rmw[priv->wmi->multi_rmw_idx].reg =
392 cpu_to_be32(reg_offset);
393 priv->wmi->multi_rmw[priv->wmi->multi_rmw_idx].set =
394 cpu_to_be32(set);
395 priv->wmi->multi_rmw[priv->wmi->multi_rmw_idx].clr =
396 cpu_to_be32(clr);
397
398 priv->wmi->multi_rmw_idx++;
399
400 /* If the buffer is full, send it out. */
401 if (priv->wmi->multi_rmw_idx == MAX_RMW_CMD_NUMBER) {
402 r = ath9k_wmi_cmd(priv->wmi, WMI_REG_RMW_CMDID,
403 (u8 *) &priv->wmi->multi_rmw,
404 sizeof(struct register_write) * priv->wmi->multi_rmw_idx,
405 (u8 *) &rsp_status, sizeof(rsp_status),
406 100);
407 if (unlikely(r)) {
408 ath_dbg(common, WMI,
409 "REGISTER RMW FAILED, multi len: %d\n",
410 priv->wmi->multi_rmw_idx);
411 }
412 priv->wmi->multi_rmw_idx = 0;
413 }
414
415 mutex_unlock(&priv->wmi->multi_rmw_mutex);
416}
417
418static void ath9k_reg_rmw_flush(void *hw_priv)
380{ 419{
381 u32 val; 420 struct ath_hw *ah = (struct ath_hw *) hw_priv;
421 struct ath_common *common = ath9k_hw_common(ah);
422 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
423 u32 rsp_status;
424 int r;
425
426 if (test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags))
427 return;
428
429 atomic_dec(&priv->wmi->m_rmw_cnt);
382 430
383 val = ath9k_regread(hw_priv, reg_offset); 431 mutex_lock(&priv->wmi->multi_rmw_mutex);
384 val &= ~clr; 432
385 val |= set; 433 if (priv->wmi->multi_rmw_idx) {
386 ath9k_regwrite(hw_priv, val, reg_offset); 434 r = ath9k_wmi_cmd(priv->wmi, WMI_REG_RMW_CMDID,
435 (u8 *) &priv->wmi->multi_rmw,
436 sizeof(struct register_rmw) * priv->wmi->multi_rmw_idx,
437 (u8 *) &rsp_status, sizeof(rsp_status),
438 100);
439 if (unlikely(r)) {
440 ath_dbg(common, WMI,
441 "REGISTER RMW FAILED, multi len: %d\n",
442 priv->wmi->multi_rmw_idx);
443 }
444 priv->wmi->multi_rmw_idx = 0;
445 }
446
447 mutex_unlock(&priv->wmi->multi_rmw_mutex);
448}
449
450static void ath9k_enable_rmw_buffer(void *hw_priv)
451{
452 struct ath_hw *ah = (struct ath_hw *) hw_priv;
453 struct ath_common *common = ath9k_hw_common(ah);
454 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
455
456 if (test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags))
457 return;
458
459 atomic_inc(&priv->wmi->m_rmw_cnt);
460}
461
462static u32 ath9k_reg_rmw_single(void *hw_priv,
463 u32 reg_offset, u32 set, u32 clr)
464{
465 struct ath_hw *ah = (struct ath_hw *) hw_priv;
466 struct ath_common *common = ath9k_hw_common(ah);
467 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
468 struct register_rmw buf, buf_ret;
469 int ret;
470 u32 val = 0;
471
472 buf.reg = cpu_to_be32(reg_offset);
473 buf.set = cpu_to_be32(set);
474 buf.clr = cpu_to_be32(clr);
475
476 ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_RMW_CMDID,
477 (u8 *) &buf, sizeof(buf),
478 (u8 *) &buf_ret, sizeof(buf_ret),
479 100);
480 if (unlikely(ret)) {
481 ath_dbg(common, WMI, "REGISTER RMW FAILED:(0x%04x, %d)\n",
482 reg_offset, ret);
483 }
387 return val; 484 return val;
388} 485}
389 486
487static u32 ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
488{
489 struct ath_hw *ah = (struct ath_hw *) hw_priv;
490 struct ath_common *common = ath9k_hw_common(ah);
491 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
492
493 if (test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags)) {
494 u32 val;
495
496 val = REG_READ(ah, reg_offset);
497 val &= ~clr;
498 val |= set;
499 REG_WRITE(ah, reg_offset, val);
500
501 return 0;
502 }
503
504 if (atomic_read(&priv->wmi->m_rmw_cnt))
505 ath9k_reg_rmw_buffer(hw_priv, reg_offset, set, clr);
506 else
507 ath9k_reg_rmw_single(hw_priv, reg_offset, set, clr);
508
509 return 0;
510}
511
390static void ath_usb_read_cachesize(struct ath_common *common, int *csz) 512static void ath_usb_read_cachesize(struct ath_common *common, int *csz)
391{ 513{
392 *csz = L1_CACHE_BYTES >> 2; 514 *csz = L1_CACHE_BYTES >> 2;
@@ -501,6 +623,8 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
501 ah->reg_ops.write = ath9k_regwrite; 623 ah->reg_ops.write = ath9k_regwrite;
502 ah->reg_ops.enable_write_buffer = ath9k_enable_regwrite_buffer; 624 ah->reg_ops.enable_write_buffer = ath9k_enable_regwrite_buffer;
503 ah->reg_ops.write_flush = ath9k_regwrite_flush; 625 ah->reg_ops.write_flush = ath9k_regwrite_flush;
626 ah->reg_ops.enable_rmw_buffer = ath9k_enable_rmw_buffer;
627 ah->reg_ops.rmw_flush = ath9k_reg_rmw_flush;
504 ah->reg_ops.rmw = ath9k_reg_rmw; 628 ah->reg_ops.rmw = ath9k_reg_rmw;
505 priv->ah = ah; 629 priv->ah = ah;
506 630
@@ -686,6 +810,12 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv)
686 return -EINVAL; 810 return -EINVAL;
687 } 811 }
688 812
813 if (priv->fw_version_major == 1 && priv->fw_version_minor < 4)
814 set_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags);
815
816 dev_info(priv->dev, "FW RMW support: %s\n",
817 test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags) ? "Off" : "On");
818
689 return 0; 819 return 0;
690} 820}
691 821
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 88769b64b20b..232339b05540 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -108,6 +108,14 @@ static inline void ath9k_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
108 ath9k_hw_ops(ah)->set_bt_ant_diversity(ah, enable); 108 ath9k_hw_ops(ah)->set_bt_ant_diversity(ah, enable);
109} 109}
110 110
111static inline bool ath9k_hw_is_aic_enabled(struct ath_hw *ah)
112{
113 if (ath9k_hw_private_ops(ah)->is_aic_enabled)
114 return ath9k_hw_private_ops(ah)->is_aic_enabled(ah);
115
116 return false;
117}
118
111#endif 119#endif
112 120
113/* Private hardware call ops */ 121/* Private hardware call ops */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 60aa8d71e753..5cdbdb038371 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -121,6 +121,36 @@ void ath9k_hw_write_array(struct ath_hw *ah, const struct ar5416IniArray *array,
121 REGWRITE_BUFFER_FLUSH(ah); 121 REGWRITE_BUFFER_FLUSH(ah);
122} 122}
123 123
124void ath9k_hw_read_array(struct ath_hw *ah, u32 array[][2], int size)
125{
126 u32 *tmp_reg_list, *tmp_data;
127 int i;
128
129 tmp_reg_list = kmalloc(size * sizeof(u32), GFP_KERNEL);
130 if (!tmp_reg_list) {
131 dev_err(ah->dev, "%s: tmp_reg_list: alloc filed\n", __func__);
132 return;
133 }
134
135 tmp_data = kmalloc(size * sizeof(u32), GFP_KERNEL);
136 if (!tmp_data) {
137 dev_err(ah->dev, "%s tmp_data: alloc filed\n", __func__);
138 goto error_tmp_data;
139 }
140
141 for (i = 0; i < size; i++)
142 tmp_reg_list[i] = array[i][0];
143
144 REG_READ_MULTI(ah, tmp_reg_list, tmp_data, size);
145
146 for (i = 0; i < size; i++)
147 array[i][1] = tmp_data[i];
148
149 kfree(tmp_data);
150error_tmp_data:
151 kfree(tmp_reg_list);
152}
153
124u32 ath9k_hw_reverse_bits(u32 val, u32 n) 154u32 ath9k_hw_reverse_bits(u32 val, u32 n)
125{ 155{
126 u32 retval; 156 u32 retval;
@@ -366,6 +396,9 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
366 ah->config.rimt_first = 700; 396 ah->config.rimt_first = 700;
367 } 397 }
368 398
399 if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
400 ah->config.pll_pwrsave = 7;
401
369 /* 402 /*
370 * We need this for PCI devices only (Cardbus, PCI, miniPCI) 403 * We need this for PCI devices only (Cardbus, PCI, miniPCI)
371 * _and_ if on non-uniprocessor systems (Multiprocessor/HT). 404 * _and_ if on non-uniprocessor systems (Multiprocessor/HT).
@@ -424,7 +457,7 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
424 ah->power_mode = ATH9K_PM_UNDEFINED; 457 ah->power_mode = ATH9K_PM_UNDEFINED;
425 ah->htc_reset_init = true; 458 ah->htc_reset_init = true;
426 459
427 ah->tpc_enabled = true; 460 ah->tpc_enabled = false;
428 461
429 ah->ani_function = ATH9K_ANI_ALL; 462 ah->ani_function = ATH9K_ANI_ALL;
430 if (!AR_SREV_9300_20_OR_LATER(ah)) 463 if (!AR_SREV_9300_20_OR_LATER(ah))
@@ -1197,6 +1230,7 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
1197 u32 mask = AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC; 1230 u32 mask = AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC;
1198 u32 set = AR_STA_ID1_KSRCH_MODE; 1231 u32 set = AR_STA_ID1_KSRCH_MODE;
1199 1232
1233 ENABLE_REG_RMW_BUFFER(ah);
1200 switch (opmode) { 1234 switch (opmode) {
1201 case NL80211_IFTYPE_ADHOC: 1235 case NL80211_IFTYPE_ADHOC:
1202 if (!AR_SREV_9340_13(ah)) { 1236 if (!AR_SREV_9340_13(ah)) {
@@ -1218,6 +1252,7 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
1218 break; 1252 break;
1219 } 1253 }
1220 REG_RMW(ah, AR_STA_ID1, set, mask); 1254 REG_RMW(ah, AR_STA_ID1, set, mask);
1255 REG_RMW_BUFFER_FLUSH(ah);
1221} 1256}
1222 1257
1223void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled, 1258void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
@@ -1930,6 +1965,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1930 if (!ath9k_hw_mci_is_enabled(ah)) 1965 if (!ath9k_hw_mci_is_enabled(ah))
1931 REG_WRITE(ah, AR_OBS, 8); 1966 REG_WRITE(ah, AR_OBS, 8);
1932 1967
1968 ENABLE_REG_RMW_BUFFER(ah);
1933 if (ah->config.rx_intr_mitigation) { 1969 if (ah->config.rx_intr_mitigation) {
1934 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, ah->config.rimt_last); 1970 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, ah->config.rimt_last);
1935 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, ah->config.rimt_first); 1971 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, ah->config.rimt_first);
@@ -1939,6 +1975,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1939 REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_LAST, 300); 1975 REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_LAST, 300);
1940 REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_FIRST, 750); 1976 REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_FIRST, 750);
1941 } 1977 }
1978 REG_RMW_BUFFER_FLUSH(ah);
1942 1979
1943 ath9k_hw_init_bb(ah, chan); 1980 ath9k_hw_init_bb(ah, chan);
1944 1981
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 29a25d92add7..92fab1a54697 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -100,6 +100,18 @@
100 (_ah)->reg_ops.write_flush((_ah)); \ 100 (_ah)->reg_ops.write_flush((_ah)); \
101 } while (0) 101 } while (0)
102 102
103#define ENABLE_REG_RMW_BUFFER(_ah) \
104 do { \
105 if ((_ah)->reg_ops.enable_rmw_buffer) \
106 (_ah)->reg_ops.enable_rmw_buffer((_ah)); \
107 } while (0)
108
109#define REG_RMW_BUFFER_FLUSH(_ah) \
110 do { \
111 if ((_ah)->reg_ops.rmw_flush) \
112 (_ah)->reg_ops.rmw_flush((_ah)); \
113 } while (0)
114
103#define PR_EEP(_s, _val) \ 115#define PR_EEP(_s, _val) \
104 do { \ 116 do { \
105 len += scnprintf(buf + len, size - len, "%20s : %10d\n",\ 117 len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
@@ -126,6 +138,8 @@
126 138
127#define REG_WRITE_ARRAY(iniarray, column, regWr) \ 139#define REG_WRITE_ARRAY(iniarray, column, regWr) \
128 ath9k_hw_write_array(ah, iniarray, column, &(regWr)) 140 ath9k_hw_write_array(ah, iniarray, column, &(regWr))
141#define REG_READ_ARRAY(ah, array, size) \
142 ath9k_hw_read_array(ah, array, size)
129 143
130#define AR_GPIO_OUTPUT_MUX_AS_OUTPUT 0 144#define AR_GPIO_OUTPUT_MUX_AS_OUTPUT 0
131#define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1 145#define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1
@@ -309,6 +323,12 @@ enum ath9k_hw_hang_checks {
309 HW_MAC_HANG = BIT(5), 323 HW_MAC_HANG = BIT(5),
310}; 324};
311 325
326#define AR_PCIE_PLL_PWRSAVE_CONTROL BIT(0)
327#define AR_PCIE_PLL_PWRSAVE_ON_D3 BIT(1)
328#define AR_PCIE_PLL_PWRSAVE_ON_D0 BIT(2)
329#define AR_PCIE_CDR_PWRSAVE_ON_D3 BIT(3)
330#define AR_PCIE_CDR_PWRSAVE_ON_D0 BIT(4)
331
312struct ath9k_ops_config { 332struct ath9k_ops_config {
313 int dma_beacon_response_time; 333 int dma_beacon_response_time;
314 int sw_beacon_response_time; 334 int sw_beacon_response_time;
@@ -335,7 +355,7 @@ struct ath9k_ops_config {
335 u32 ant_ctrl_comm2g_switch_enable; 355 u32 ant_ctrl_comm2g_switch_enable;
336 bool xatten_margin_cfg; 356 bool xatten_margin_cfg;
337 bool alt_mingainidx; 357 bool alt_mingainidx;
338 bool no_pll_pwrsave; 358 u8 pll_pwrsave;
339 bool tx_gain_buffalo; 359 bool tx_gain_buffalo;
340 bool led_active_high; 360 bool led_active_high;
341}; 361};
@@ -647,6 +667,10 @@ struct ath_hw_private_ops {
647 667
648 /* ANI */ 668 /* ANI */
649 void (*ani_cache_ini_regs)(struct ath_hw *ah); 669 void (*ani_cache_ini_regs)(struct ath_hw *ah);
670
671#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
672 bool (*is_aic_enabled)(struct ath_hw *ah);
673#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
650}; 674};
651 675
652/** 676/**
@@ -1008,6 +1032,7 @@ void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan,
1008bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout); 1032bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout);
1009void ath9k_hw_write_array(struct ath_hw *ah, const struct ar5416IniArray *array, 1033void ath9k_hw_write_array(struct ath_hw *ah, const struct ar5416IniArray *array,
1010 int column, unsigned int *writecnt); 1034 int column, unsigned int *writecnt);
1035void ath9k_hw_read_array(struct ath_hw *ah, u32 array[][2], int size);
1011u32 ath9k_hw_reverse_bits(u32 val, u32 n); 1036u32 ath9k_hw_reverse_bits(u32 val, u32 n);
1012u16 ath9k_hw_computetxtime(struct ath_hw *ah, 1037u16 ath9k_hw_computetxtime(struct ath_hw *ah,
1013 u8 phy, int kbps, 1038 u8 phy, int kbps,
@@ -1117,6 +1142,7 @@ void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us);
1117void ath9k_hw_setslottime(struct ath_hw *ah, u32 us); 1142void ath9k_hw_setslottime(struct ath_hw *ah, u32 us);
1118 1143
1119#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 1144#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
1145void ar9003_hw_attach_aic_ops(struct ath_hw *ah);
1120static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah) 1146static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah)
1121{ 1147{
1122 return ah->btcoex_hw.enabled; 1148 return ah->btcoex_hw.enabled;
@@ -1134,6 +1160,9 @@ ath9k_hw_get_btcoex_scheme(struct ath_hw *ah)
1134 return ah->btcoex_hw.scheme; 1160 return ah->btcoex_hw.scheme;
1135} 1161}
1136#else 1162#else
1163static inline void ar9003_hw_attach_aic_ops(struct ath_hw *ah)
1164{
1165}
1137static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah) 1166static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah)
1138{ 1167{
1139 return false; 1168 return false;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 6c6e88495394..f8d11efa7b0f 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -141,6 +141,16 @@ static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
141 return val; 141 return val;
142} 142}
143 143
144static void ath9k_multi_ioread32(void *hw_priv, u32 *addr,
145 u32 *val, u16 count)
146{
147 int i;
148
149 for (i = 0; i < count; i++)
150 val[i] = ath9k_ioread32(hw_priv, addr[i]);
151}
152
153
144static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset, 154static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset,
145 u32 set, u32 clr) 155 u32 set, u32 clr)
146{ 156{
@@ -437,8 +447,15 @@ static void ath9k_init_pcoem_platform(struct ath_softc *sc)
437 ath_info(common, "Enable WAR for ASPM D3/L1\n"); 447 ath_info(common, "Enable WAR for ASPM D3/L1\n");
438 } 448 }
439 449
450 /*
451 * The default value of pll_pwrsave is 1.
452 * For certain AR9485 cards, it is set to 0.
453 * For AR9462, AR9565 it's set to 7.
454 */
455 ah->config.pll_pwrsave = 1;
456
440 if (sc->driver_data & ATH9K_PCI_NO_PLL_PWRSAVE) { 457 if (sc->driver_data & ATH9K_PCI_NO_PLL_PWRSAVE) {
441 ah->config.no_pll_pwrsave = true; 458 ah->config.pll_pwrsave = 0;
442 ath_info(common, "Disable PLL PowerSave\n"); 459 ath_info(common, "Disable PLL PowerSave\n");
443 } 460 }
444 461
@@ -530,6 +547,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
530 ah->hw = sc->hw; 547 ah->hw = sc->hw;
531 ah->hw_version.devid = devid; 548 ah->hw_version.devid = devid;
532 ah->reg_ops.read = ath9k_ioread32; 549 ah->reg_ops.read = ath9k_ioread32;
550 ah->reg_ops.multi_read = ath9k_multi_ioread32;
533 ah->reg_ops.write = ath9k_iowrite32; 551 ah->reg_ops.write = ath9k_iowrite32;
534 ah->reg_ops.rmw = ath9k_reg_rmw; 552 ah->reg_ops.rmw = ath9k_reg_rmw;
535 pCap = &ah->caps; 553 pCap = &ah->caps;
@@ -763,7 +781,8 @@ static const struct ieee80211_iface_combination if_comb[] = {
763 .num_different_channels = 1, 781 .num_different_channels = 1,
764 .beacon_int_infra_match = true, 782 .beacon_int_infra_match = true,
765 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 783 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
766 BIT(NL80211_CHAN_WIDTH_20), 784 BIT(NL80211_CHAN_WIDTH_20) |
785 BIT(NL80211_CHAN_WIDTH_40),
767 } 786 }
768#endif 787#endif
769}; 788};
diff --git a/drivers/net/wireless/ath/ath9k/reg_aic.h b/drivers/net/wireless/ath/ath9k/reg_aic.h
new file mode 100644
index 000000000000..955147ab48a2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/reg_aic.h
@@ -0,0 +1,168 @@
1/*
2 * Copyright (c) 2015 Qualcomm Atheros Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef REG_AIC_H
18#define REG_AIC_H
19
20#define AR_SM_BASE 0xa200
21#define AR_SM1_BASE 0xb200
22#define AR_AGC_BASE 0x9e00
23
24#define AR_PHY_AIC_CTRL_0_B0 (AR_SM_BASE + 0x4b0)
25#define AR_PHY_AIC_CTRL_1_B0 (AR_SM_BASE + 0x4b4)
26#define AR_PHY_AIC_CTRL_2_B0 (AR_SM_BASE + 0x4b8)
27#define AR_PHY_AIC_CTRL_3_B0 (AR_SM_BASE + 0x4bc)
28#define AR_PHY_AIC_CTRL_4_B0 (AR_SM_BASE + 0x4c0)
29
30#define AR_PHY_AIC_STAT_0_B0 (AR_SM_BASE + 0x4c4)
31#define AR_PHY_AIC_STAT_1_B0 (AR_SM_BASE + 0x4c8)
32#define AR_PHY_AIC_STAT_2_B0 (AR_SM_BASE + 0x4cc)
33
34#define AR_PHY_AIC_CTRL_0_B1 (AR_SM1_BASE + 0x4b0)
35#define AR_PHY_AIC_CTRL_1_B1 (AR_SM1_BASE + 0x4b4)
36#define AR_PHY_AIC_CTRL_4_B1 (AR_SM1_BASE + 0x4c0)
37
38#define AR_PHY_AIC_STAT_0_B1 (AR_SM1_BASE + 0x4c4)
39#define AR_PHY_AIC_STAT_1_B1 (AR_SM1_BASE + 0x4c8)
40#define AR_PHY_AIC_STAT_2_B1 (AR_SM1_BASE + 0x4cc)
41
42#define AR_PHY_AIC_SRAM_ADDR_B0 (AR_SM_BASE + 0x5f0)
43#define AR_PHY_AIC_SRAM_DATA_B0 (AR_SM_BASE + 0x5f4)
44
45#define AR_PHY_AIC_SRAM_ADDR_B1 (AR_SM1_BASE + 0x5f0)
46#define AR_PHY_AIC_SRAM_DATA_B1 (AR_SM1_BASE + 0x5f4)
47
48#define AR_PHY_BT_COEX_4 (AR_AGC_BASE + 0x60)
49#define AR_PHY_BT_COEX_5 (AR_AGC_BASE + 0x64)
50
51/* AIC fields */
52#define AR_PHY_AIC_MON_ENABLE 0x80000000
53#define AR_PHY_AIC_MON_ENABLE_S 31
54#define AR_PHY_AIC_CAL_MAX_HOP_COUNT 0x7F000000
55#define AR_PHY_AIC_CAL_MAX_HOP_COUNT_S 24
56#define AR_PHY_AIC_CAL_MIN_VALID_COUNT 0x00FE0000
57#define AR_PHY_AIC_CAL_MIN_VALID_COUNT_S 17
58#define AR_PHY_AIC_F_WLAN 0x0001FC00
59#define AR_PHY_AIC_F_WLAN_S 10
60#define AR_PHY_AIC_CAL_CH_VALID_RESET 0x00000200
61#define AR_PHY_AIC_CAL_CH_VALID_RESET_S 9
62#define AR_PHY_AIC_CAL_ENABLE 0x00000100
63#define AR_PHY_AIC_CAL_ENABLE_S 8
64#define AR_PHY_AIC_BTTX_PWR_THR 0x000000FE
65#define AR_PHY_AIC_BTTX_PWR_THR_S 1
66#define AR_PHY_AIC_ENABLE 0x00000001
67#define AR_PHY_AIC_ENABLE_S 0
68#define AR_PHY_AIC_CAL_BT_REF_DELAY 0x00F00000
69#define AR_PHY_AIC_CAL_BT_REF_DELAY_S 20
70#define AR_PHY_AIC_BT_IDLE_CFG 0x00080000
71#define AR_PHY_AIC_BT_IDLE_CFG_S 19
72#define AR_PHY_AIC_STDBY_COND 0x00060000
73#define AR_PHY_AIC_STDBY_COND_S 17
74#define AR_PHY_AIC_STDBY_ROT_ATT_DB 0x0001F800
75#define AR_PHY_AIC_STDBY_ROT_ATT_DB_S 11
76#define AR_PHY_AIC_STDBY_COM_ATT_DB 0x00000700
77#define AR_PHY_AIC_STDBY_COM_ATT_DB_S 8
78#define AR_PHY_AIC_RSSI_MAX 0x000000F0
79#define AR_PHY_AIC_RSSI_MAX_S 4
80#define AR_PHY_AIC_RSSI_MIN 0x0000000F
81#define AR_PHY_AIC_RSSI_MIN_S 0
82#define AR_PHY_AIC_RADIO_DELAY 0x7F000000
83#define AR_PHY_AIC_RADIO_DELAY_S 24
84#define AR_PHY_AIC_CAL_STEP_SIZE_CORR 0x00F00000
85#define AR_PHY_AIC_CAL_STEP_SIZE_CORR_S 20
86#define AR_PHY_AIC_CAL_ROT_IDX_CORR 0x000F8000
87#define AR_PHY_AIC_CAL_ROT_IDX_CORR_S 15
88#define AR_PHY_AIC_CAL_CONV_CHECK_FACTOR 0x00006000
89#define AR_PHY_AIC_CAL_CONV_CHECK_FACTOR_S 13
90#define AR_PHY_AIC_ROT_IDX_COUNT_MAX 0x00001C00
91#define AR_PHY_AIC_ROT_IDX_COUNT_MAX_S 10
92#define AR_PHY_AIC_CAL_SYNTH_TOGGLE 0x00000200
93#define AR_PHY_AIC_CAL_SYNTH_TOGGLE_S 9
94#define AR_PHY_AIC_CAL_SYNTH_AFTER_BTRX 0x00000100
95#define AR_PHY_AIC_CAL_SYNTH_AFTER_BTRX_S 8
96#define AR_PHY_AIC_CAL_SYNTH_SETTLING 0x000000FF
97#define AR_PHY_AIC_CAL_SYNTH_SETTLING_S 0
98#define AR_PHY_AIC_MON_MAX_HOP_COUNT 0x07F00000
99#define AR_PHY_AIC_MON_MAX_HOP_COUNT_S 20
100#define AR_PHY_AIC_MON_MIN_STALE_COUNT 0x000FE000
101#define AR_PHY_AIC_MON_MIN_STALE_COUNT_S 13
102#define AR_PHY_AIC_MON_PWR_EST_LONG 0x00001000
103#define AR_PHY_AIC_MON_PWR_EST_LONG_S 12
104#define AR_PHY_AIC_MON_PD_TALLY_SCALING 0x00000C00
105#define AR_PHY_AIC_MON_PD_TALLY_SCALING_S 10
106#define AR_PHY_AIC_MON_PERF_THR 0x000003E0
107#define AR_PHY_AIC_MON_PERF_THR_S 5
108#define AR_PHY_AIC_CAL_TARGET_MAG_SETTING 0x00000018
109#define AR_PHY_AIC_CAL_TARGET_MAG_SETTING_S 3
110#define AR_PHY_AIC_CAL_PERF_CHECK_FACTOR 0x00000006
111#define AR_PHY_AIC_CAL_PERF_CHECK_FACTOR_S 1
112#define AR_PHY_AIC_CAL_PWR_EST_LONG 0x00000001
113#define AR_PHY_AIC_CAL_PWR_EST_LONG_S 0
114#define AR_PHY_AIC_MON_DONE 0x80000000
115#define AR_PHY_AIC_MON_DONE_S 31
116#define AR_PHY_AIC_MON_ACTIVE 0x40000000
117#define AR_PHY_AIC_MON_ACTIVE_S 30
118#define AR_PHY_AIC_MEAS_COUNT 0x3F000000
119#define AR_PHY_AIC_MEAS_COUNT_S 24
120#define AR_PHY_AIC_CAL_ANT_ISO_EST 0x00FC0000
121#define AR_PHY_AIC_CAL_ANT_ISO_EST_S 18
122#define AR_PHY_AIC_CAL_HOP_COUNT 0x0003F800
123#define AR_PHY_AIC_CAL_HOP_COUNT_S 11
124#define AR_PHY_AIC_CAL_VALID_COUNT 0x000007F0
125#define AR_PHY_AIC_CAL_VALID_COUNT_S 4
126#define AR_PHY_AIC_CAL_BT_TOO_WEAK_ERR 0x00000008
127#define AR_PHY_AIC_CAL_BT_TOO_WEAK_ERR_S 3
128#define AR_PHY_AIC_CAL_BT_TOO_STRONG_ERR 0x00000004
129#define AR_PHY_AIC_CAL_BT_TOO_STRONG_ERR_S 2
130#define AR_PHY_AIC_CAL_DONE 0x00000002
131#define AR_PHY_AIC_CAL_DONE_S 1
132#define AR_PHY_AIC_CAL_ACTIVE 0x00000001
133#define AR_PHY_AIC_CAL_ACTIVE_S 0
134
135#define AR_PHY_AIC_MEAS_MAG_MIN 0xFFC00000
136#define AR_PHY_AIC_MEAS_MAG_MIN_S 22
137#define AR_PHY_AIC_MON_STALE_COUNT 0x003F8000
138#define AR_PHY_AIC_MON_STALE_COUNT_S 15
139#define AR_PHY_AIC_MON_HOP_COUNT 0x00007F00
140#define AR_PHY_AIC_MON_HOP_COUNT_S 8
141#define AR_PHY_AIC_CAL_AIC_SM 0x000000F8
142#define AR_PHY_AIC_CAL_AIC_SM_S 3
143#define AR_PHY_AIC_SM 0x00000007
144#define AR_PHY_AIC_SM_S 0
145#define AR_PHY_AIC_SRAM_VALID 0x00000001
146#define AR_PHY_AIC_SRAM_VALID_S 0
147#define AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB 0x0000007E
148#define AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB_S 1
149#define AR_PHY_AIC_SRAM_VGA_QUAD_SIGN 0x00000080
150#define AR_PHY_AIC_SRAM_VGA_QUAD_SIGN_S 7
151#define AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB 0x00003F00
152#define AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB_S 8
153#define AR_PHY_AIC_SRAM_VGA_DIR_SIGN 0x00004000
154#define AR_PHY_AIC_SRAM_VGA_DIR_SIGN_S 14
155#define AR_PHY_AIC_SRAM_COM_ATT_6DB 0x00038000
156#define AR_PHY_AIC_SRAM_COM_ATT_6DB_S 15
157#define AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO 0x0000E000
158#define AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO_S 13
159#define AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO 0x00001E00
160#define AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO_S 9
161#define AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING 0x000001F8
162#define AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING_S 3
163#define AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF 0x00000006
164#define AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF_S 1
165#define AR_PHY_AIC_CAL_COM_ATT_DB_FIXED 0x00000001
166#define AR_PHY_AIC_CAL_COM_ATT_DB_FIXED_S 0
167
168#endif /* REG_AIC_H */
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index 65c8894c5f81..67a2f8c88829 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -61,6 +61,8 @@ static const char *wmi_cmd_to_name(enum wmi_cmd_id wmi_cmd)
61 return "WMI_REG_READ_CMDID"; 61 return "WMI_REG_READ_CMDID";
62 case WMI_REG_WRITE_CMDID: 62 case WMI_REG_WRITE_CMDID:
63 return "WMI_REG_WRITE_CMDID"; 63 return "WMI_REG_WRITE_CMDID";
64 case WMI_REG_RMW_CMDID:
65 return "WMI_REG_RMW_CMDID";
64 case WMI_RC_STATE_CHANGE_CMDID: 66 case WMI_RC_STATE_CHANGE_CMDID:
65 return "WMI_RC_STATE_CHANGE_CMDID"; 67 return "WMI_RC_STATE_CHANGE_CMDID";
66 case WMI_RC_RATE_UPDATE_CMDID: 68 case WMI_RC_RATE_UPDATE_CMDID:
@@ -101,6 +103,7 @@ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
101 spin_lock_init(&wmi->event_lock); 103 spin_lock_init(&wmi->event_lock);
102 mutex_init(&wmi->op_mutex); 104 mutex_init(&wmi->op_mutex);
103 mutex_init(&wmi->multi_write_mutex); 105 mutex_init(&wmi->multi_write_mutex);
106 mutex_init(&wmi->multi_rmw_mutex);
104 init_completion(&wmi->cmd_wait); 107 init_completion(&wmi->cmd_wait);
105 INIT_LIST_HEAD(&wmi->pending_tx_events); 108 INIT_LIST_HEAD(&wmi->pending_tx_events);
106 tasklet_init(&wmi->wmi_event_tasklet, ath9k_wmi_event_tasklet, 109 tasklet_init(&wmi->wmi_event_tasklet, ath9k_wmi_event_tasklet,
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
index 0db37f230018..aa84a335289a 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.h
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -112,6 +112,7 @@ enum wmi_cmd_id {
112 WMI_TX_STATS_CMDID, 112 WMI_TX_STATS_CMDID,
113 WMI_RX_STATS_CMDID, 113 WMI_RX_STATS_CMDID,
114 WMI_BITRATE_MASK_CMDID, 114 WMI_BITRATE_MASK_CMDID,
115 WMI_REG_RMW_CMDID,
115}; 116};
116 117
117enum wmi_event_id { 118enum wmi_event_id {
@@ -125,12 +126,19 @@ enum wmi_event_id {
125}; 126};
126 127
127#define MAX_CMD_NUMBER 62 128#define MAX_CMD_NUMBER 62
129#define MAX_RMW_CMD_NUMBER 15
128 130
129struct register_write { 131struct register_write {
130 __be32 reg; 132 __be32 reg;
131 __be32 val; 133 __be32 val;
132}; 134};
133 135
136struct register_rmw {
137 __be32 reg;
138 __be32 set;
139 __be32 clr;
140} __packed;
141
134struct ath9k_htc_tx_event { 142struct ath9k_htc_tx_event {
135 int count; 143 int count;
136 struct __wmi_event_txstatus txs; 144 struct __wmi_event_txstatus txs;
@@ -156,10 +164,18 @@ struct wmi {
156 164
157 spinlock_t wmi_lock; 165 spinlock_t wmi_lock;
158 166
167 /* multi write section */
159 atomic_t mwrite_cnt; 168 atomic_t mwrite_cnt;
160 struct register_write multi_write[MAX_CMD_NUMBER]; 169 struct register_write multi_write[MAX_CMD_NUMBER];
161 u32 multi_write_idx; 170 u32 multi_write_idx;
162 struct mutex multi_write_mutex; 171 struct mutex multi_write_mutex;
172
173 /* multi rmw section */
174 atomic_t m_rmw_cnt;
175 struct register_rmw multi_rmw[MAX_RMW_CMD_NUMBER];
176 u32 multi_rmw_idx;
177 struct mutex multi_rmw_mutex;
178
163}; 179};
164 180
165struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv); 181struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv);
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index 3d57f8772389..c657ca26a71a 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -289,7 +289,7 @@ dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event)
289 "count=%d, count_false=%d\n", 289 "count=%d, count_false=%d\n",
290 event->freq, pd->rs->type_id, 290 event->freq, pd->rs->type_id,
291 ps->pri, ps->count, ps->count_falses); 291 ps->pri, ps->count, ps->count_falses);
292 channel_detector_reset(dpd, cd); 292 pd->reset(pd, dpd->last_pulse_ts);
293 return true; 293 return true;
294 } 294 }
295 } 295 }
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 47d14db59b93..b97172667bc7 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -14,6 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/etherdevice.h>
17#include "wil6210.h" 18#include "wil6210.h"
18#include "wmi.h" 19#include "wmi.h"
19 20
@@ -217,7 +218,7 @@ static int wil_cfg80211_dump_station(struct wiphy *wiphy,
217 if (cid < 0) 218 if (cid < 0)
218 return -ENOENT; 219 return -ENOENT;
219 220
220 memcpy(mac, wil->sta[cid].addr, ETH_ALEN); 221 ether_addr_copy(mac, wil->sta[cid].addr);
221 wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid); 222 wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
222 223
223 rc = wil_cid_fill_sinfo(wil, cid, sinfo); 224 rc = wil_cid_fill_sinfo(wil, cid, sinfo);
@@ -478,8 +479,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
478 } 479 }
479 conn.channel = ch - 1; 480 conn.channel = ch - 1;
480 481
481 memcpy(conn.bssid, bss->bssid, ETH_ALEN); 482 ether_addr_copy(conn.bssid, bss->bssid);
482 memcpy(conn.dst_mac, bss->bssid, ETH_ALEN); 483 ether_addr_copy(conn.dst_mac, bss->bssid);
483 484
484 set_bit(wil_status_fwconnecting, wil->status); 485 set_bit(wil_status_fwconnecting, wil->status);
485 486
@@ -782,8 +783,17 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
782 rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype, 783 rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
783 channel->hw_value); 784 channel->hw_value);
784 if (rc) 785 if (rc)
785 netif_carrier_off(ndev); 786 goto err_pcp_start;
786 787
788 rc = wil_bcast_init(wil);
789 if (rc)
790 goto err_bcast;
791
792 goto out; /* success */
793err_bcast:
794 wmi_pcp_stop(wil);
795err_pcp_start:
796 netif_carrier_off(ndev);
787out: 797out:
788 mutex_unlock(&wil->mutex); 798 mutex_unlock(&wil->mutex);
789 return rc; 799 return rc;
@@ -917,6 +927,21 @@ static int wil_cfg80211_probe_client(struct wiphy *wiphy,
917 return 0; 927 return 0;
918} 928}
919 929
930static int wil_cfg80211_change_bss(struct wiphy *wiphy,
931 struct net_device *dev,
932 struct bss_parameters *params)
933{
934 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
935
936 if (params->ap_isolate >= 0) {
937 wil_dbg_misc(wil, "%s(ap_isolate %d => %d)\n", __func__,
938 wil->ap_isolate, params->ap_isolate);
939 wil->ap_isolate = params->ap_isolate;
940 }
941
942 return 0;
943}
944
920static struct cfg80211_ops wil_cfg80211_ops = { 945static struct cfg80211_ops wil_cfg80211_ops = {
921 .scan = wil_cfg80211_scan, 946 .scan = wil_cfg80211_scan,
922 .connect = wil_cfg80211_connect, 947 .connect = wil_cfg80211_connect,
@@ -937,6 +962,7 @@ static struct cfg80211_ops wil_cfg80211_ops = {
937 .stop_ap = wil_cfg80211_stop_ap, 962 .stop_ap = wil_cfg80211_stop_ap,
938 .del_station = wil_cfg80211_del_station, 963 .del_station = wil_cfg80211_del_station,
939 .probe_client = wil_cfg80211_probe_client, 964 .probe_client = wil_cfg80211_probe_client,
965 .change_bss = wil_cfg80211_change_bss,
940}; 966};
941 967
942static void wil_wiphy_init(struct wiphy *wiphy) 968static void wil_wiphy_init(struct wiphy *wiphy)
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 3830cc20d4fa..bbc22d88f78f 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -121,12 +121,18 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
121 121
122 snprintf(name, sizeof(name), "tx_%2d", i); 122 snprintf(name, sizeof(name), "tx_%2d", i);
123 123
124 seq_printf(s, 124 if (cid < WIL6210_MAX_CID)
125 "\n%pM CID %d TID %d BACK([%d] %d TU A%s) [%3d|%3d] idle %s\n", 125 seq_printf(s,
126 wil->sta[cid].addr, cid, tid, 126 "\n%pM CID %d TID %d BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n",
127 txdata->agg_wsize, txdata->agg_timeout, 127 wil->sta[cid].addr, cid, tid,
128 txdata->agg_amsdu ? "+" : "-", 128 txdata->agg_wsize,
129 used, avail, sidle); 129 txdata->agg_timeout,
130 txdata->agg_amsdu ? "+" : "-",
131 used, avail, sidle);
132 else
133 seq_printf(s,
134 "\nBroadcast [%3d|%3d] idle %s\n",
135 used, avail, sidle);
130 136
131 wil_print_vring(s, wil, name, vring, '_', 'H'); 137 wil_print_vring(s, wil, name, vring, '_', 'H');
132 } 138 }
@@ -1405,6 +1411,7 @@ static const struct dbg_off dbg_wil_off[] = {
1405 WIL_FIELD(fw_version, S_IRUGO, doff_u32), 1411 WIL_FIELD(fw_version, S_IRUGO, doff_u32),
1406 WIL_FIELD(hw_version, S_IRUGO, doff_x32), 1412 WIL_FIELD(hw_version, S_IRUGO, doff_x32),
1407 WIL_FIELD(recovery_count, S_IRUGO, doff_u32), 1413 WIL_FIELD(recovery_count, S_IRUGO, doff_u32),
1414 WIL_FIELD(ap_isolate, S_IRUGO, doff_u32),
1408 {}, 1415 {},
1409}; 1416};
1410 1417
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index db74e811f5c4..c2a238426425 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -68,6 +68,7 @@ MODULE_PARM_DESC(mtu_max, " Max MTU value.");
68 68
69static uint rx_ring_order = WIL_RX_RING_SIZE_ORDER_DEFAULT; 69static uint rx_ring_order = WIL_RX_RING_SIZE_ORDER_DEFAULT;
70static uint tx_ring_order = WIL_TX_RING_SIZE_ORDER_DEFAULT; 70static uint tx_ring_order = WIL_TX_RING_SIZE_ORDER_DEFAULT;
71static uint bcast_ring_order = WIL_BCAST_RING_SIZE_ORDER_DEFAULT;
71 72
72static int ring_order_set(const char *val, const struct kernel_param *kp) 73static int ring_order_set(const char *val, const struct kernel_param *kp)
73{ 74{
@@ -216,6 +217,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
216 switch (wdev->iftype) { 217 switch (wdev->iftype) {
217 case NL80211_IFTYPE_STATION: 218 case NL80211_IFTYPE_STATION:
218 case NL80211_IFTYPE_P2P_CLIENT: 219 case NL80211_IFTYPE_P2P_CLIENT:
220 wil_bcast_fini(wil);
219 netif_tx_stop_all_queues(ndev); 221 netif_tx_stop_all_queues(ndev);
220 netif_carrier_off(ndev); 222 netif_carrier_off(ndev);
221 223
@@ -360,6 +362,35 @@ static int wil_find_free_vring(struct wil6210_priv *wil)
360 return -EINVAL; 362 return -EINVAL;
361} 363}
362 364
365int wil_bcast_init(struct wil6210_priv *wil)
366{
367 int ri = wil->bcast_vring, rc;
368
369 if ((ri >= 0) && wil->vring_tx[ri].va)
370 return 0;
371
372 ri = wil_find_free_vring(wil);
373 if (ri < 0)
374 return ri;
375
376 rc = wil_vring_init_bcast(wil, ri, 1 << bcast_ring_order);
377 if (rc == 0)
378 wil->bcast_vring = ri;
379
380 return rc;
381}
382
383void wil_bcast_fini(struct wil6210_priv *wil)
384{
385 int ri = wil->bcast_vring;
386
387 if (ri < 0)
388 return;
389
390 wil->bcast_vring = -1;
391 wil_vring_fini_tx(wil, ri);
392}
393
363static void wil_connect_worker(struct work_struct *work) 394static void wil_connect_worker(struct work_struct *work)
364{ 395{
365 int rc; 396 int rc;
@@ -407,6 +438,7 @@ int wil_priv_init(struct wil6210_priv *wil)
407 init_completion(&wil->wmi_call); 438 init_completion(&wil->wmi_call);
408 439
409 wil->pending_connect_cid = -1; 440 wil->pending_connect_cid = -1;
441 wil->bcast_vring = -1;
410 setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil); 442 setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
411 setup_timer(&wil->scan_timer, wil_scan_timer_fn, (ulong)wil); 443 setup_timer(&wil->scan_timer, wil_scan_timer_fn, (ulong)wil);
412 444
@@ -656,6 +688,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
656 688
657 cancel_work_sync(&wil->disconnect_worker); 689 cancel_work_sync(&wil->disconnect_worker);
658 wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false); 690 wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
691 wil_bcast_fini(wil);
659 692
660 /* prevent NAPI from being scheduled */ 693 /* prevent NAPI from being scheduled */
661 bitmap_zero(wil->status, wil_status_last); 694 bitmap_zero(wil->status, wil_status_last);
@@ -714,6 +747,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
714 747
715 /* init after reset */ 748 /* init after reset */
716 wil->pending_connect_cid = -1; 749 wil->pending_connect_cid = -1;
750 wil->ap_isolate = 0;
717 reinit_completion(&wil->wmi_ready); 751 reinit_completion(&wil->wmi_ready);
718 reinit_completion(&wil->wmi_call); 752 reinit_completion(&wil->wmi_call);
719 753
@@ -723,6 +757,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
723 757
724 /* we just started MAC, wait for FW ready */ 758 /* we just started MAC, wait for FW ready */
725 rc = wil_wait_for_fw_ready(wil); 759 rc = wil_wait_for_fw_ready(wil);
760 if (rc == 0) /* check FW is responsive */
761 rc = wmi_echo(wil);
726 } 762 }
727 763
728 return rc; 764 return rc;
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index ace30c1b5c64..f2f7ea29558e 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -82,7 +82,7 @@ static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
82 wil_rx_handle(wil, &quota); 82 wil_rx_handle(wil, &quota);
83 done = budget - quota; 83 done = budget - quota;
84 84
85 if (done <= 1) { /* burst ends - only one packet processed */ 85 if (done < budget) {
86 napi_complete(napi); 86 napi_complete(napi);
87 wil6210_unmask_irq_rx(wil); 87 wil6210_unmask_irq_rx(wil);
88 wil_dbg_txrx(wil, "NAPI RX complete\n"); 88 wil_dbg_txrx(wil, "NAPI RX complete\n");
@@ -110,7 +110,7 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
110 tx_done += wil_tx_complete(wil, i); 110 tx_done += wil_tx_complete(wil, i);
111 } 111 }
112 112
113 if (tx_done <= 1) { /* burst ends - only one packet processed */ 113 if (tx_done < budget) {
114 napi_complete(napi); 114 napi_complete(napi);
115 wil6210_unmask_irq_tx(wil); 115 wil6210_unmask_irq_tx(wil);
116 wil_dbg_txrx(wil, "NAPI TX complete\n"); 116 wil_dbg_txrx(wil, "NAPI TX complete\n");
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 25343cffe229..109986114abf 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -246,8 +246,6 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
246 246
247 wil6210_debugfs_init(wil); 247 wil6210_debugfs_init(wil);
248 248
249 /* check FW is alive */
250 wmi_echo(wil);
251 249
252 return 0; 250 return 0;
253 251
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 7f2f560b8638..e8bd512d81a9 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -33,6 +33,15 @@ module_param(rtap_include_phy_info, bool, S_IRUGO);
33MODULE_PARM_DESC(rtap_include_phy_info, 33MODULE_PARM_DESC(rtap_include_phy_info,
34 " Include PHY info in the radiotap header, default - no"); 34 " Include PHY info in the radiotap header, default - no");
35 35
36bool rx_align_2;
37module_param(rx_align_2, bool, S_IRUGO);
38MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
39
40static inline uint wil_rx_snaplen(void)
41{
42 return rx_align_2 ? 6 : 0;
43}
44
36static inline int wil_vring_is_empty(struct vring *vring) 45static inline int wil_vring_is_empty(struct vring *vring)
37{ 46{
38 return vring->swhead == vring->swtail; 47 return vring->swhead == vring->swtail;
@@ -209,7 +218,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
209 u32 i, int headroom) 218 u32 i, int headroom)
210{ 219{
211 struct device *dev = wil_to_dev(wil); 220 struct device *dev = wil_to_dev(wil);
212 unsigned int sz = mtu_max + ETH_HLEN; 221 unsigned int sz = mtu_max + ETH_HLEN + wil_rx_snaplen();
213 struct vring_rx_desc dd, *d = &dd; 222 struct vring_rx_desc dd, *d = &dd;
214 volatile struct vring_rx_desc *_d = &vring->va[i].rx; 223 volatile struct vring_rx_desc *_d = &vring->va[i].rx;
215 dma_addr_t pa; 224 dma_addr_t pa;
@@ -365,10 +374,12 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
365 struct vring_rx_desc *d; 374 struct vring_rx_desc *d;
366 struct sk_buff *skb; 375 struct sk_buff *skb;
367 dma_addr_t pa; 376 dma_addr_t pa;
368 unsigned int sz = mtu_max + ETH_HLEN; 377 unsigned int snaplen = wil_rx_snaplen();
378 unsigned int sz = mtu_max + ETH_HLEN + snaplen;
369 u16 dmalen; 379 u16 dmalen;
370 u8 ftype; 380 u8 ftype;
371 int cid; 381 int cid;
382 int i = (int)vring->swhead;
372 struct wil_net_stats *stats; 383 struct wil_net_stats *stats;
373 384
374 BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb)); 385 BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
@@ -376,24 +387,28 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
376 if (unlikely(wil_vring_is_empty(vring))) 387 if (unlikely(wil_vring_is_empty(vring)))
377 return NULL; 388 return NULL;
378 389
379 _d = &vring->va[vring->swhead].rx; 390 _d = &vring->va[i].rx;
380 if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) { 391 if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
381 /* it is not error, we just reached end of Rx done area */ 392 /* it is not error, we just reached end of Rx done area */
382 return NULL; 393 return NULL;
383 } 394 }
384 395
385 skb = vring->ctx[vring->swhead].skb; 396 skb = vring->ctx[i].skb;
397 vring->ctx[i].skb = NULL;
398 wil_vring_advance_head(vring, 1);
399 if (!skb) {
400 wil_err(wil, "No Rx skb at [%d]\n", i);
401 return NULL;
402 }
386 d = wil_skb_rxdesc(skb); 403 d = wil_skb_rxdesc(skb);
387 *d = *_d; 404 *d = *_d;
388 pa = wil_desc_addr(&d->dma.addr); 405 pa = wil_desc_addr(&d->dma.addr);
389 vring->ctx[vring->swhead].skb = NULL;
390 wil_vring_advance_head(vring, 1);
391 406
392 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE); 407 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
393 dmalen = le16_to_cpu(d->dma.length); 408 dmalen = le16_to_cpu(d->dma.length);
394 409
395 trace_wil6210_rx(vring->swhead, d); 410 trace_wil6210_rx(i, d);
396 wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, dmalen); 411 wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
397 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4, 412 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
398 (const void *)d, sizeof(*d), false); 413 (const void *)d, sizeof(*d), false);
399 414
@@ -433,7 +448,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
433 return NULL; 448 return NULL;
434 } 449 }
435 450
436 if (unlikely(skb->len < ETH_HLEN)) { 451 if (unlikely(skb->len < ETH_HLEN + snaplen)) {
437 wil_err(wil, "Short frame, len = %d\n", skb->len); 452 wil_err(wil, "Short frame, len = %d\n", skb->len);
438 /* TODO: process it (i.e. BAR) */ 453 /* TODO: process it (i.e. BAR) */
439 kfree_skb(skb); 454 kfree_skb(skb);
@@ -455,6 +470,17 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
455 */ 470 */
456 } 471 }
457 472
473 if (snaplen) {
474 /* Packet layout
475 * +-------+-------+---------+------------+------+
476 * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA |
477 * +-------+-------+---------+------------+------+
478 * Need to remove SNAP, shifting SA and DA forward
479 */
480 memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN);
481 skb_pull(skb, snaplen);
482 }
483
458 return skb; 484 return skb;
459} 485}
460 486
@@ -492,17 +518,71 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
492 */ 518 */
493void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) 519void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
494{ 520{
495 gro_result_t rc; 521 gro_result_t rc = GRO_NORMAL;
496 struct wil6210_priv *wil = ndev_to_wil(ndev); 522 struct wil6210_priv *wil = ndev_to_wil(ndev);
523 struct wireless_dev *wdev = wil_to_wdev(wil);
497 unsigned int len = skb->len; 524 unsigned int len = skb->len;
498 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 525 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
499 int cid = wil_rxdesc_cid(d); 526 int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
527 struct ethhdr *eth = (void *)skb->data;
528 /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
529 * is not suitable, need to look at data
530 */
531 int mcast = is_multicast_ether_addr(eth->h_dest);
500 struct wil_net_stats *stats = &wil->sta[cid].stats; 532 struct wil_net_stats *stats = &wil->sta[cid].stats;
533 struct sk_buff *xmit_skb = NULL;
534 static const char * const gro_res_str[] = {
535 [GRO_MERGED] = "GRO_MERGED",
536 [GRO_MERGED_FREE] = "GRO_MERGED_FREE",
537 [GRO_HELD] = "GRO_HELD",
538 [GRO_NORMAL] = "GRO_NORMAL",
539 [GRO_DROP] = "GRO_DROP",
540 };
501 541
502 skb_orphan(skb); 542 skb_orphan(skb);
503 543
504 rc = napi_gro_receive(&wil->napi_rx, skb); 544 if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
545 if (mcast) {
546 /* send multicast frames both to higher layers in
547 * local net stack and back to the wireless medium
548 */
549 xmit_skb = skb_copy(skb, GFP_ATOMIC);
550 } else {
551 int xmit_cid = wil_find_cid(wil, eth->h_dest);
552
553 if (xmit_cid >= 0) {
554 /* The destination station is associated to
555 * this AP (in this VLAN), so send the frame
556 * directly to it and do not pass it to local
557 * net stack.
558 */
559 xmit_skb = skb;
560 skb = NULL;
561 }
562 }
563 }
564 if (xmit_skb) {
565 /* Send to wireless media and increase priority by 256 to
566 * keep the received priority instead of reclassifying
567 * the frame (see cfg80211_classify8021d).
568 */
569 xmit_skb->dev = ndev;
570 xmit_skb->priority += 256;
571 xmit_skb->protocol = htons(ETH_P_802_3);
572 skb_reset_network_header(xmit_skb);
573 skb_reset_mac_header(xmit_skb);
574 wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len);
575 dev_queue_xmit(xmit_skb);
576 }
505 577
578 if (skb) { /* deliver to local stack */
579
580 skb->protocol = eth_type_trans(skb, ndev);
581 rc = napi_gro_receive(&wil->napi_rx, skb);
582 wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
583 len, gro_res_str[rc]);
584 }
585 /* statistics. rc set to GRO_NORMAL for AP bridging */
506 if (unlikely(rc == GRO_DROP)) { 586 if (unlikely(rc == GRO_DROP)) {
507 ndev->stats.rx_dropped++; 587 ndev->stats.rx_dropped++;
508 stats->rx_dropped++; 588 stats->rx_dropped++;
@@ -512,17 +592,8 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
512 stats->rx_packets++; 592 stats->rx_packets++;
513 ndev->stats.rx_bytes += len; 593 ndev->stats.rx_bytes += len;
514 stats->rx_bytes += len; 594 stats->rx_bytes += len;
515 } 595 if (mcast)
516 { 596 ndev->stats.multicast++;
517 static const char * const gro_res_str[] = {
518 [GRO_MERGED] = "GRO_MERGED",
519 [GRO_MERGED_FREE] = "GRO_MERGED_FREE",
520 [GRO_HELD] = "GRO_HELD",
521 [GRO_NORMAL] = "GRO_NORMAL",
522 [GRO_DROP] = "GRO_DROP",
523 };
524 wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
525 len, gro_res_str[rc]);
526 } 597 }
527} 598}
528 599
@@ -553,7 +624,6 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
553 skb->protocol = htons(ETH_P_802_2); 624 skb->protocol = htons(ETH_P_802_2);
554 wil_netif_rx_any(skb, ndev); 625 wil_netif_rx_any(skb, ndev);
555 } else { 626 } else {
556 skb->protocol = eth_type_trans(skb, ndev);
557 wil_rx_reorder(wil, skb); 627 wil_rx_reorder(wil, skb);
558 } 628 }
559 } 629 }
@@ -679,6 +749,72 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
679 return rc; 749 return rc;
680} 750}
681 751
752int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
753{
754 int rc;
755 struct wmi_bcast_vring_cfg_cmd cmd = {
756 .action = cpu_to_le32(WMI_VRING_CMD_ADD),
757 .vring_cfg = {
758 .tx_sw_ring = {
759 .max_mpdu_size =
760 cpu_to_le16(wil_mtu2macbuf(mtu_max)),
761 .ring_size = cpu_to_le16(size),
762 },
763 .ringid = id,
764 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
765 },
766 };
767 struct {
768 struct wil6210_mbox_hdr_wmi wmi;
769 struct wmi_vring_cfg_done_event cmd;
770 } __packed reply;
771 struct vring *vring = &wil->vring_tx[id];
772 struct vring_tx_data *txdata = &wil->vring_tx_data[id];
773
774 wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
775 cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
776
777 if (vring->va) {
778 wil_err(wil, "Tx ring [%d] already allocated\n", id);
779 rc = -EINVAL;
780 goto out;
781 }
782
783 memset(txdata, 0, sizeof(*txdata));
784 spin_lock_init(&txdata->lock);
785 vring->size = size;
786 rc = wil_vring_alloc(wil, vring);
787 if (rc)
788 goto out;
789
790 wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
791 wil->vring2cid_tid[id][1] = 0; /* TID */
792
793 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
794
795 rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd),
796 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
797 if (rc)
798 goto out_free;
799
800 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
801 wil_err(wil, "Tx config failed, status 0x%02x\n",
802 reply.cmd.status);
803 rc = -EINVAL;
804 goto out_free;
805 }
806 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
807
808 txdata->enabled = 1;
809
810 return 0;
811 out_free:
812 wil_vring_free(wil, vring, 1);
813 out:
814
815 return rc;
816}
817
682void wil_vring_fini_tx(struct wil6210_priv *wil, int id) 818void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
683{ 819{
684 struct vring *vring = &wil->vring_tx[id]; 820 struct vring *vring = &wil->vring_tx[id];
@@ -702,7 +838,7 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
702 memset(txdata, 0, sizeof(*txdata)); 838 memset(txdata, 0, sizeof(*txdata));
703} 839}
704 840
705static struct vring *wil_find_tx_vring(struct wil6210_priv *wil, 841static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
706 struct sk_buff *skb) 842 struct sk_buff *skb)
707{ 843{
708 int i; 844 int i;
@@ -735,15 +871,6 @@ static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
735 return NULL; 871 return NULL;
736} 872}
737 873
738static void wil_set_da_for_vring(struct wil6210_priv *wil,
739 struct sk_buff *skb, int vring_index)
740{
741 struct ethhdr *eth = (void *)skb->data;
742 int cid = wil->vring2cid_tid[vring_index][0];
743
744 memcpy(eth->h_dest, wil->sta[cid].addr, ETH_ALEN);
745}
746
747static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, 874static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
748 struct sk_buff *skb); 875 struct sk_buff *skb);
749 876
@@ -764,6 +891,9 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
764 continue; 891 continue;
765 892
766 cid = wil->vring2cid_tid[i][0]; 893 cid = wil->vring2cid_tid[i][0];
894 if (cid >= WIL6210_MAX_CID) /* skip BCAST */
895 continue;
896
767 if (!wil->sta[cid].data_port_open && 897 if (!wil->sta[cid].data_port_open &&
768 (skb->protocol != cpu_to_be16(ETH_P_PAE))) 898 (skb->protocol != cpu_to_be16(ETH_P_PAE)))
769 break; 899 break;
@@ -778,17 +908,51 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
778 return NULL; 908 return NULL;
779} 909}
780 910
781/* 911/* Use one of 2 strategies:
782 * Find 1-st vring and return it; set dest address for this vring in skb 912 *
783 * duplicate skb and send it to other active vrings 913 * 1. New (real broadcast):
914 * use dedicated broadcast vring
915 * 2. Old (pseudo-DMS):
916 * Find 1-st vring and return it;
917 * duplicate skb and send it to other active vrings;
918 * in all cases override dest address to unicast peer's address
919 * Use old strategy when new is not supported yet:
920 * - for PBSS
921 * - for secure link
784 */ 922 */
785static struct vring *wil_tx_bcast(struct wil6210_priv *wil, 923static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
786 struct sk_buff *skb) 924 struct sk_buff *skb)
925{
926 struct vring *v;
927 int i = wil->bcast_vring;
928
929 if (i < 0)
930 return NULL;
931 v = &wil->vring_tx[i];
932 if (!v->va)
933 return NULL;
934
935 return v;
936}
937
938static void wil_set_da_for_vring(struct wil6210_priv *wil,
939 struct sk_buff *skb, int vring_index)
940{
941 struct ethhdr *eth = (void *)skb->data;
942 int cid = wil->vring2cid_tid[vring_index][0];
943
944 ether_addr_copy(eth->h_dest, wil->sta[cid].addr);
945}
946
947static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
948 struct sk_buff *skb)
787{ 949{
788 struct vring *v, *v2; 950 struct vring *v, *v2;
789 struct sk_buff *skb2; 951 struct sk_buff *skb2;
790 int i; 952 int i;
791 u8 cid; 953 u8 cid;
954 struct ethhdr *eth = (void *)skb->data;
955 char *src = eth->h_source;
792 956
793 /* find 1-st vring eligible for data */ 957 /* find 1-st vring eligible for data */
794 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { 958 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
@@ -797,9 +961,15 @@ static struct vring *wil_tx_bcast(struct wil6210_priv *wil,
797 continue; 961 continue;
798 962
799 cid = wil->vring2cid_tid[i][0]; 963 cid = wil->vring2cid_tid[i][0];
964 if (cid >= WIL6210_MAX_CID) /* skip BCAST */
965 continue;
800 if (!wil->sta[cid].data_port_open) 966 if (!wil->sta[cid].data_port_open)
801 continue; 967 continue;
802 968
969 /* don't Tx back to source when re-routing Rx->Tx at the AP */
970 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
971 continue;
972
803 goto found; 973 goto found;
804 } 974 }
805 975
@@ -817,9 +987,14 @@ found:
817 if (!v2->va) 987 if (!v2->va)
818 continue; 988 continue;
819 cid = wil->vring2cid_tid[i][0]; 989 cid = wil->vring2cid_tid[i][0];
990 if (cid >= WIL6210_MAX_CID) /* skip BCAST */
991 continue;
820 if (!wil->sta[cid].data_port_open) 992 if (!wil->sta[cid].data_port_open)
821 continue; 993 continue;
822 994
995 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
996 continue;
997
823 skb2 = skb_copy(skb, GFP_ATOMIC); 998 skb2 = skb_copy(skb, GFP_ATOMIC);
824 if (skb2) { 999 if (skb2) {
825 wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i); 1000 wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
@@ -833,6 +1008,20 @@ found:
833 return v; 1008 return v;
834} 1009}
835 1010
1011static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil,
1012 struct sk_buff *skb)
1013{
1014 struct wireless_dev *wdev = wil->wdev;
1015
1016 if (wdev->iftype != NL80211_IFTYPE_AP)
1017 return wil_find_tx_bcast_2(wil, skb);
1018
1019 if (wil->privacy)
1020 return wil_find_tx_bcast_2(wil, skb);
1021
1022 return wil_find_tx_bcast_1(wil, skb);
1023}
1024
836static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len, 1025static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
837 int vring_index) 1026 int vring_index)
838{ 1027{
@@ -925,6 +1114,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
925 uint i = swhead; 1114 uint i = swhead;
926 dma_addr_t pa; 1115 dma_addr_t pa;
927 int used; 1116 int used;
1117 bool mcast = (vring_index == wil->bcast_vring);
1118 uint len = skb_headlen(skb);
928 1119
929 wil_dbg_txrx(wil, "%s()\n", __func__); 1120 wil_dbg_txrx(wil, "%s()\n", __func__);
930 1121
@@ -950,7 +1141,17 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
950 return -EINVAL; 1141 return -EINVAL;
951 vring->ctx[i].mapped_as = wil_mapped_as_single; 1142 vring->ctx[i].mapped_as = wil_mapped_as_single;
952 /* 1-st segment */ 1143 /* 1-st segment */
953 wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index); 1144 wil_tx_desc_map(d, pa, len, vring_index);
1145 if (unlikely(mcast)) {
1146 d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
1147 if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) {
1148 /* set MCS 1 */
1149 d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
1150 /* packet mode 2 */
1151 d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS) |
1152 (2 << MAC_CFG_DESC_TX_1_PKT_MODE_POS);
1153 }
1154 }
954 /* Process TCP/UDP checksum offloading */ 1155 /* Process TCP/UDP checksum offloading */
955 if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) { 1156 if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) {
956 wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n", 1157 wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
@@ -1056,6 +1257,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1056{ 1257{
1057 struct wil6210_priv *wil = ndev_to_wil(ndev); 1258 struct wil6210_priv *wil = ndev_to_wil(ndev);
1058 struct ethhdr *eth = (void *)skb->data; 1259 struct ethhdr *eth = (void *)skb->data;
1260 bool bcast = is_multicast_ether_addr(eth->h_dest);
1059 struct vring *vring; 1261 struct vring *vring;
1060 static bool pr_once_fw; 1262 static bool pr_once_fw;
1061 int rc; 1263 int rc;
@@ -1083,10 +1285,8 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1083 /* in STA mode (ESS), all to same VRING */ 1285 /* in STA mode (ESS), all to same VRING */
1084 vring = wil_find_tx_vring_sta(wil, skb); 1286 vring = wil_find_tx_vring_sta(wil, skb);
1085 } else { /* direct communication, find matching VRING */ 1287 } else { /* direct communication, find matching VRING */
1086 if (is_unicast_ether_addr(eth->h_dest)) 1288 vring = bcast ? wil_find_tx_bcast(wil, skb) :
1087 vring = wil_find_tx_vring(wil, skb); 1289 wil_find_tx_ucast(wil, skb);
1088 else
1089 vring = wil_tx_bcast(wil, skb);
1090 } 1290 }
1091 if (unlikely(!vring)) { 1291 if (unlikely(!vring)) {
1092 wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest); 1292 wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
@@ -1149,7 +1349,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
1149 struct vring_tx_data *txdata = &wil->vring_tx_data[ringid]; 1349 struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
1150 int done = 0; 1350 int done = 0;
1151 int cid = wil->vring2cid_tid[ringid][0]; 1351 int cid = wil->vring2cid_tid[ringid][0];
1152 struct wil_net_stats *stats = &wil->sta[cid].stats; 1352 struct wil_net_stats *stats = NULL;
1153 volatile struct vring_tx_desc *_d; 1353 volatile struct vring_tx_desc *_d;
1154 int used_before_complete; 1354 int used_before_complete;
1155 int used_new; 1355 int used_new;
@@ -1168,6 +1368,9 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
1168 1368
1169 used_before_complete = wil_vring_used_tx(vring); 1369 used_before_complete = wil_vring_used_tx(vring);
1170 1370
1371 if (cid < WIL6210_MAX_CID)
1372 stats = &wil->sta[cid].stats;
1373
1171 while (!wil_vring_is_empty(vring)) { 1374 while (!wil_vring_is_empty(vring)) {
1172 int new_swtail; 1375 int new_swtail;
1173 struct wil_ctx *ctx = &vring->ctx[vring->swtail]; 1376 struct wil_ctx *ctx = &vring->ctx[vring->swtail];
@@ -1209,12 +1412,15 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
1209 if (skb) { 1412 if (skb) {
1210 if (likely(d->dma.error == 0)) { 1413 if (likely(d->dma.error == 0)) {
1211 ndev->stats.tx_packets++; 1414 ndev->stats.tx_packets++;
1212 stats->tx_packets++;
1213 ndev->stats.tx_bytes += skb->len; 1415 ndev->stats.tx_bytes += skb->len;
1214 stats->tx_bytes += skb->len; 1416 if (stats) {
1417 stats->tx_packets++;
1418 stats->tx_bytes += skb->len;
1419 }
1215 } else { 1420 } else {
1216 ndev->stats.tx_errors++; 1421 ndev->stats.tx_errors++;
1217 stats->tx_errors++; 1422 if (stats)
1423 stats->tx_errors++;
1218 } 1424 }
1219 wil_consume_skb(skb, d->dma.error == 0); 1425 wil_consume_skb(skb, d->dma.error == 0);
1220 } 1426 }
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index b6e65c37d410..4310972c9e16 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -28,6 +28,7 @@ extern unsigned int mtu_max;
28extern unsigned short rx_ring_overflow_thrsh; 28extern unsigned short rx_ring_overflow_thrsh;
29extern int agg_wsize; 29extern int agg_wsize;
30extern u32 vring_idle_trsh; 30extern u32 vring_idle_trsh;
31extern bool rx_align_2;
31 32
32#define WIL_NAME "wil6210" 33#define WIL_NAME "wil6210"
33#define WIL_FW_NAME "wil6210.fw" /* code */ 34#define WIL_FW_NAME "wil6210.fw" /* code */
@@ -49,6 +50,8 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
49#define WIL_TX_Q_LEN_DEFAULT (4000) 50#define WIL_TX_Q_LEN_DEFAULT (4000)
50#define WIL_RX_RING_SIZE_ORDER_DEFAULT (10) 51#define WIL_RX_RING_SIZE_ORDER_DEFAULT (10)
51#define WIL_TX_RING_SIZE_ORDER_DEFAULT (10) 52#define WIL_TX_RING_SIZE_ORDER_DEFAULT (10)
53#define WIL_BCAST_RING_SIZE_ORDER_DEFAULT (7)
54#define WIL_BCAST_MCS0_LIMIT (1024) /* limit for MCS0 frame size */
52/* limit ring size in range [32..32k] */ 55/* limit ring size in range [32..32k] */
53#define WIL_RING_SIZE_ORDER_MIN (5) 56#define WIL_RING_SIZE_ORDER_MIN (5)
54#define WIL_RING_SIZE_ORDER_MAX (15) 57#define WIL_RING_SIZE_ORDER_MAX (15)
@@ -542,6 +545,7 @@ struct wil6210_priv {
542 u32 monitor_flags; 545 u32 monitor_flags;
543 u32 privacy; /* secure connection? */ 546 u32 privacy; /* secure connection? */
544 int sinfo_gen; 547 int sinfo_gen;
548 u32 ap_isolate; /* no intra-BSS communication */
545 /* interrupt moderation */ 549 /* interrupt moderation */
546 u32 tx_max_burst_duration; 550 u32 tx_max_burst_duration;
547 u32 tx_interframe_timeout; 551 u32 tx_interframe_timeout;
@@ -593,6 +597,7 @@ struct wil6210_priv {
593 struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS]; 597 struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS];
594 u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */ 598 u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
595 struct wil_sta_info sta[WIL6210_MAX_CID]; 599 struct wil_sta_info sta[WIL6210_MAX_CID];
600 int bcast_vring;
596 /* scan */ 601 /* scan */
597 struct cfg80211_scan_request *scan_request; 602 struct cfg80211_scan_request *scan_request;
598 603
@@ -755,6 +760,9 @@ void wil_rx_fini(struct wil6210_priv *wil);
755int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, 760int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
756 int cid, int tid); 761 int cid, int tid);
757void wil_vring_fini_tx(struct wil6210_priv *wil, int id); 762void wil_vring_fini_tx(struct wil6210_priv *wil, int id);
763int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size);
764int wil_bcast_init(struct wil6210_priv *wil);
765void wil_bcast_fini(struct wil6210_priv *wil);
758 766
759netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev); 767netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
760int wil_tx_complete(struct wil6210_priv *wil, int ringid); 768int wil_tx_complete(struct wil6210_priv *wil, int ringid);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 021313524913..9fe2085be2c5 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -466,7 +466,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
466 466
467 /* FIXME FW can transmit only ucast frames to peer */ 467 /* FIXME FW can transmit only ucast frames to peer */
468 /* FIXME real ring_id instead of hard coded 0 */ 468 /* FIXME real ring_id instead of hard coded 0 */
469 memcpy(wil->sta[evt->cid].addr, evt->bssid, ETH_ALEN); 469 ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid);
470 wil->sta[evt->cid].status = wil_sta_conn_pending; 470 wil->sta[evt->cid].status = wil_sta_conn_pending;
471 471
472 wil->pending_connect_cid = evt->cid; 472 wil->pending_connect_cid = evt->cid;
@@ -524,8 +524,8 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
524 } 524 }
525 525
526 eth = (struct ethhdr *)skb_put(skb, ETH_HLEN); 526 eth = (struct ethhdr *)skb_put(skb, ETH_HLEN);
527 memcpy(eth->h_dest, ndev->dev_addr, ETH_ALEN); 527 ether_addr_copy(eth->h_dest, ndev->dev_addr);
528 memcpy(eth->h_source, evt->src_mac, ETH_ALEN); 528 ether_addr_copy(eth->h_source, evt->src_mac);
529 eth->h_proto = cpu_to_be16(ETH_P_PAE); 529 eth->h_proto = cpu_to_be16(ETH_P_PAE);
530 memcpy(skb_put(skb, eapol_len), evt->eapol, eapol_len); 530 memcpy(skb_put(skb, eapol_len), evt->eapol, eapol_len);
531 skb->protocol = eth_type_trans(skb, ndev); 531 skb->protocol = eth_type_trans(skb, ndev);
@@ -851,7 +851,7 @@ int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
851{ 851{
852 struct wmi_set_mac_address_cmd cmd; 852 struct wmi_set_mac_address_cmd cmd;
853 853
854 memcpy(cmd.mac, addr, ETH_ALEN); 854 ether_addr_copy(cmd.mac, addr);
855 855
856 wil_dbg_wmi(wil, "Set MAC %pM\n", addr); 856 wil_dbg_wmi(wil, "Set MAC %pM\n", addr);
857 857
@@ -1109,6 +1109,11 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
1109 */ 1109 */
1110 cmd.l3_l4_ctrl |= (1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS); 1110 cmd.l3_l4_ctrl |= (1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS);
1111 } 1111 }
1112
1113 if (rx_align_2)
1114 cmd.l2_802_3_offload_ctrl |=
1115 L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK;
1116
1112 /* typical time for secure PCP is 840ms */ 1117 /* typical time for secure PCP is 840ms */
1113 rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd), 1118 rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
1114 WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000); 1119 WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000);
@@ -1157,7 +1162,8 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason)
1157 struct wmi_disconnect_sta_cmd cmd = { 1162 struct wmi_disconnect_sta_cmd cmd = {
1158 .disconnect_reason = cpu_to_le16(reason), 1163 .disconnect_reason = cpu_to_le16(reason),
1159 }; 1164 };
1160 memcpy(cmd.dst_mac, mac, ETH_ALEN); 1165
1166 ether_addr_copy(cmd.dst_mac, mac);
1161 1167
1162 wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason); 1168 wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason);
1163 1169
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 8a4af613e191..b29055315350 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -70,7 +70,6 @@ enum wmi_command_id {
70 WMI_SET_UCODE_IDLE_CMDID = 0x0813, 70 WMI_SET_UCODE_IDLE_CMDID = 0x0813,
71 WMI_SET_WORK_MODE_CMDID = 0x0815, 71 WMI_SET_WORK_MODE_CMDID = 0x0815,
72 WMI_LO_LEAKAGE_CALIB_CMDID = 0x0816, 72 WMI_LO_LEAKAGE_CALIB_CMDID = 0x0816,
73 WMI_MARLON_R_ACTIVATE_CMDID = 0x0817,
74 WMI_MARLON_R_READ_CMDID = 0x0818, 73 WMI_MARLON_R_READ_CMDID = 0x0818,
75 WMI_MARLON_R_WRITE_CMDID = 0x0819, 74 WMI_MARLON_R_WRITE_CMDID = 0x0819,
76 WMI_MARLON_R_TXRX_SEL_CMDID = 0x081a, 75 WMI_MARLON_R_TXRX_SEL_CMDID = 0x081a,
@@ -80,6 +79,7 @@ enum wmi_command_id {
80 WMI_RF_RX_TEST_CMDID = 0x081e, 79 WMI_RF_RX_TEST_CMDID = 0x081e,
81 WMI_CFG_RX_CHAIN_CMDID = 0x0820, 80 WMI_CFG_RX_CHAIN_CMDID = 0x0820,
82 WMI_VRING_CFG_CMDID = 0x0821, 81 WMI_VRING_CFG_CMDID = 0x0821,
82 WMI_BCAST_VRING_CFG_CMDID = 0x0822,
83 WMI_VRING_BA_EN_CMDID = 0x0823, 83 WMI_VRING_BA_EN_CMDID = 0x0823,
84 WMI_VRING_BA_DIS_CMDID = 0x0824, 84 WMI_VRING_BA_DIS_CMDID = 0x0824,
85 WMI_RCP_ADDBA_RESP_CMDID = 0x0825, 85 WMI_RCP_ADDBA_RESP_CMDID = 0x0825,
@@ -99,6 +99,7 @@ enum wmi_command_id {
99 WMI_BF_TXSS_MGMT_CMDID = 0x0837, 99 WMI_BF_TXSS_MGMT_CMDID = 0x0837,
100 WMI_BF_SM_MGMT_CMDID = 0x0838, 100 WMI_BF_SM_MGMT_CMDID = 0x0838,
101 WMI_BF_RXSS_MGMT_CMDID = 0x0839, 101 WMI_BF_RXSS_MGMT_CMDID = 0x0839,
102 WMI_BF_TRIG_CMDID = 0x083A,
102 WMI_SET_SECTORS_CMDID = 0x0849, 103 WMI_SET_SECTORS_CMDID = 0x0849,
103 WMI_MAINTAIN_PAUSE_CMDID = 0x0850, 104 WMI_MAINTAIN_PAUSE_CMDID = 0x0850,
104 WMI_MAINTAIN_RESUME_CMDID = 0x0851, 105 WMI_MAINTAIN_RESUME_CMDID = 0x0851,
@@ -596,6 +597,22 @@ struct wmi_vring_cfg_cmd {
596} __packed; 597} __packed;
597 598
598/* 599/*
600 * WMI_BCAST_VRING_CFG_CMDID
601 */
602struct wmi_bcast_vring_cfg {
603 struct wmi_sw_ring_cfg tx_sw_ring;
604 u8 ringid; /* 0-23 vrings */
605 u8 encap_trans_type;
606 u8 ds_cfg; /* 802.3 DS cfg */
607 u8 nwifi_ds_trans_type;
608} __packed;
609
610struct wmi_bcast_vring_cfg_cmd {
611 __le32 action;
612 struct wmi_bcast_vring_cfg vring_cfg;
613} __packed;
614
615/*
599 * WMI_VRING_BA_EN_CMDID 616 * WMI_VRING_BA_EN_CMDID
600 */ 617 */
601struct wmi_vring_ba_en_cmd { 618struct wmi_vring_ba_en_cmd {
@@ -687,6 +704,9 @@ struct wmi_cfg_rx_chain_cmd {
687 #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0) 704 #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0)
688 #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1) 705 #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1)
689 #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1) 706 #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1)
707 #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_POS (1)
708 #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_LEN (1)
709 #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK (0x2)
690 u8 l2_802_3_offload_ctrl; 710 u8 l2_802_3_offload_ctrl;
691 711
692 #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0) 712 #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0)
@@ -841,7 +861,6 @@ enum wmi_event_id {
841 WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812, 861 WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812,
842 WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815, 862 WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815,
843 WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816, 863 WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816,
844 WMI_MARLON_R_ACTIVATE_DONE_EVENTID = 0x1817,
845 WMI_MARLON_R_READ_DONE_EVENTID = 0x1818, 864 WMI_MARLON_R_READ_DONE_EVENTID = 0x1818,
846 WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819, 865 WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819,
847 WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181a, 866 WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181a,
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index ea4843be773c..b2f9521fe551 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4866,7 +4866,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
4866 switch (dev->dev->bus_type) { 4866 switch (dev->dev->bus_type) {
4867#ifdef CONFIG_B43_BCMA 4867#ifdef CONFIG_B43_BCMA
4868 case B43_BUS_BCMA: 4868 case B43_BUS_BCMA:
4869 bcma_core_pci_irq_ctl(dev->dev->bdev->bus, 4869 bcma_host_pci_irq_ctl(dev->dev->bdev->bus,
4870 dev->dev->bdev, true); 4870 dev->dev->bdev, true);
4871 bcma_host_pci_up(dev->dev->bdev->bus); 4871 bcma_host_pci_up(dev->dev->bdev->bus);
4872 break; 4872 break;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index c438ccdb6ed8..9b508bd3b839 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -29,6 +29,7 @@
29#include <linux/mmc/host.h> 29#include <linux/mmc/host.h>
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <linux/platform_data/brcmfmac-sdio.h> 31#include <linux/platform_data/brcmfmac-sdio.h>
32#include <linux/pm_runtime.h>
32#include <linux/suspend.h> 33#include <linux/suspend.h>
33#include <linux/errno.h> 34#include <linux/errno.h>
34#include <linux/module.h> 35#include <linux/module.h>
@@ -1006,6 +1007,7 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
1006 sg_free_table(&sdiodev->sgtable); 1007 sg_free_table(&sdiodev->sgtable);
1007 sdiodev->sbwad = 0; 1008 sdiodev->sbwad = 0;
1008 1009
1010 pm_runtime_allow(sdiodev->func[1]->card->host->parent);
1009 return 0; 1011 return 0;
1010} 1012}
1011 1013
@@ -1074,7 +1076,7 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
1074 ret = -ENODEV; 1076 ret = -ENODEV;
1075 goto out; 1077 goto out;
1076 } 1078 }
1077 1079 pm_runtime_forbid(host->parent);
1078out: 1080out:
1079 if (ret) 1081 if (ret)
1080 brcmf_sdiod_remove(sdiodev); 1082 brcmf_sdiod_remove(sdiodev);
@@ -1096,6 +1098,8 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
1096 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341), 1098 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
1097 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362), 1099 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
1098 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339), 1100 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
1101 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
1102 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
1099 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354), 1103 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
1100 { /* end: all zeroes */ } 1104 { /* end: all zeroes */ }
1101}; 1105};
@@ -1194,7 +1198,7 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
1194 brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device); 1198 brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
1195 brcmf_dbg(SDIO, "Function: %d\n", func->num); 1199 brcmf_dbg(SDIO, "Function: %d\n", func->num);
1196 1200
1197 if (func->num != 1 && func->num != 2) 1201 if (func->num != 1)
1198 return; 1202 return;
1199 1203
1200 bus_if = dev_get_drvdata(&func->dev); 1204 bus_if = dev_get_drvdata(&func->dev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
index 04d2ca0d87d6..ab2fac8b2760 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
@@ -100,9 +100,6 @@
100#define BCM4329_CORE_SOCRAM_BASE 0x18003000 100#define BCM4329_CORE_SOCRAM_BASE 0x18003000
101/* ARM Cortex M3 core, ID 0x82a */ 101/* ARM Cortex M3 core, ID 0x82a */
102#define BCM4329_CORE_ARM_BASE 0x18002000 102#define BCM4329_CORE_ARM_BASE 0x18002000
103#define BCM4329_RAMSIZE 0x48000
104/* bcm43143 */
105#define BCM43143_RAMSIZE 0x70000
106 103
107#define CORE_SB(base, field) \ 104#define CORE_SB(base, field) \
108 (base + SBCONFIGOFF + offsetof(struct sbconfig, field)) 105 (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
@@ -150,6 +147,78 @@ struct sbconfig {
150 u32 sbidhigh; /* identification */ 147 u32 sbidhigh; /* identification */
151}; 148};
152 149
150/* bankidx and bankinfo reg defines corerev >= 8 */
151#define SOCRAM_BANKINFO_RETNTRAM_MASK 0x00010000
152#define SOCRAM_BANKINFO_SZMASK 0x0000007f
153#define SOCRAM_BANKIDX_ROM_MASK 0x00000100
154
155#define SOCRAM_BANKIDX_MEMTYPE_SHIFT 8
156/* socram bankinfo memtype */
157#define SOCRAM_MEMTYPE_RAM 0
158#define SOCRAM_MEMTYPE_R0M 1
159#define SOCRAM_MEMTYPE_DEVRAM 2
160
161#define SOCRAM_BANKINFO_SZBASE 8192
162#define SRCI_LSS_MASK 0x00f00000
163#define SRCI_LSS_SHIFT 20
164#define SRCI_SRNB_MASK 0xf0
165#define SRCI_SRNB_SHIFT 4
166#define SRCI_SRBSZ_MASK 0xf
167#define SRCI_SRBSZ_SHIFT 0
168#define SR_BSZ_BASE 14
169
170struct sbsocramregs {
171 u32 coreinfo;
172 u32 bwalloc;
173 u32 extracoreinfo;
174 u32 biststat;
175 u32 bankidx;
176 u32 standbyctrl;
177
178 u32 errlogstatus; /* rev 6 */
179 u32 errlogaddr; /* rev 6 */
180 /* used for patching rev 3 & 5 */
181 u32 cambankidx;
182 u32 cambankstandbyctrl;
183 u32 cambankpatchctrl;
184 u32 cambankpatchtblbaseaddr;
185 u32 cambankcmdreg;
186 u32 cambankdatareg;
187 u32 cambankmaskreg;
188 u32 PAD[1];
189 u32 bankinfo; /* corev 8 */
190 u32 bankpda;
191 u32 PAD[14];
192 u32 extmemconfig;
193 u32 extmemparitycsr;
194 u32 extmemparityerrdata;
195 u32 extmemparityerrcnt;
196 u32 extmemwrctrlandsize;
197 u32 PAD[84];
198 u32 workaround;
199 u32 pwrctl; /* corerev >= 2 */
200 u32 PAD[133];
201 u32 sr_control; /* corerev >= 15 */
202 u32 sr_status; /* corerev >= 15 */
203 u32 sr_address; /* corerev >= 15 */
204 u32 sr_data; /* corerev >= 15 */
205};
206
207#define SOCRAMREGOFFS(_f) offsetof(struct sbsocramregs, _f)
208
209#define ARMCR4_CAP (0x04)
210#define ARMCR4_BANKIDX (0x40)
211#define ARMCR4_BANKINFO (0x44)
212#define ARMCR4_BANKPDA (0x4C)
213
214#define ARMCR4_TCBBNB_MASK 0xf0
215#define ARMCR4_TCBBNB_SHIFT 4
216#define ARMCR4_TCBANB_MASK 0xf
217#define ARMCR4_TCBANB_SHIFT 0
218
219#define ARMCR4_BSZ_MASK 0x3f
220#define ARMCR4_BSZ_MULT 8192
221
153struct brcmf_core_priv { 222struct brcmf_core_priv {
154 struct brcmf_core pub; 223 struct brcmf_core pub;
155 u32 wrapbase; 224 u32 wrapbase;
@@ -419,13 +488,13 @@ static struct brcmf_core *brcmf_chip_add_core(struct brcmf_chip_priv *ci,
419 return &core->pub; 488 return &core->pub;
420} 489}
421 490
422#ifdef DEBUG
423/* safety check for chipinfo */ 491/* safety check for chipinfo */
424static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci) 492static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
425{ 493{
426 struct brcmf_core_priv *core; 494 struct brcmf_core_priv *core;
427 bool need_socram = false; 495 bool need_socram = false;
428 bool has_socram = false; 496 bool has_socram = false;
497 bool cpu_found = false;
429 int idx = 1; 498 int idx = 1;
430 499
431 list_for_each_entry(core, &ci->cores, list) { 500 list_for_each_entry(core, &ci->cores, list) {
@@ -435,22 +504,24 @@ static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
435 504
436 switch (core->pub.id) { 505 switch (core->pub.id) {
437 case BCMA_CORE_ARM_CM3: 506 case BCMA_CORE_ARM_CM3:
507 cpu_found = true;
438 need_socram = true; 508 need_socram = true;
439 break; 509 break;
440 case BCMA_CORE_INTERNAL_MEM: 510 case BCMA_CORE_INTERNAL_MEM:
441 has_socram = true; 511 has_socram = true;
442 break; 512 break;
443 case BCMA_CORE_ARM_CR4: 513 case BCMA_CORE_ARM_CR4:
444 if (ci->pub.rambase == 0) { 514 cpu_found = true;
445 brcmf_err("RAM base not provided with ARM CR4 core\n");
446 return -ENOMEM;
447 }
448 break; 515 break;
449 default: 516 default:
450 break; 517 break;
451 } 518 }
452 } 519 }
453 520
521 if (!cpu_found) {
522 brcmf_err("CPU core not detected\n");
523 return -ENXIO;
524 }
454 /* check RAM core presence for ARM CM3 core */ 525 /* check RAM core presence for ARM CM3 core */
455 if (need_socram && !has_socram) { 526 if (need_socram && !has_socram) {
456 brcmf_err("RAM core not provided with ARM CM3 core\n"); 527 brcmf_err("RAM core not provided with ARM CM3 core\n");
@@ -458,56 +529,164 @@ static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
458 } 529 }
459 return 0; 530 return 0;
460} 531}
461#else /* DEBUG */ 532
462static inline int brcmf_chip_cores_check(struct brcmf_chip_priv *ci) 533static u32 brcmf_chip_core_read32(struct brcmf_core_priv *core, u16 reg)
463{ 534{
464 return 0; 535 return core->chip->ops->read32(core->chip->ctx, core->pub.base + reg);
465} 536}
466#endif
467 537
468static void brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci) 538static void brcmf_chip_core_write32(struct brcmf_core_priv *core,
539 u16 reg, u32 val)
469{ 540{
470 switch (ci->pub.chip) { 541 core->chip->ops->write32(core->chip->ctx, core->pub.base + reg, val);
471 case BRCM_CC_4329_CHIP_ID: 542}
472 ci->pub.ramsize = BCM4329_RAMSIZE; 543
473 break; 544static bool brcmf_chip_socram_banksize(struct brcmf_core_priv *core, u8 idx,
474 case BRCM_CC_43143_CHIP_ID: 545 u32 *banksize)
475 ci->pub.ramsize = BCM43143_RAMSIZE; 546{
476 break; 547 u32 bankinfo;
477 case BRCM_CC_43241_CHIP_ID: 548 u32 bankidx = (SOCRAM_MEMTYPE_RAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
478 ci->pub.ramsize = 0x90000; 549
479 break; 550 bankidx |= idx;
480 case BRCM_CC_4330_CHIP_ID: 551 brcmf_chip_core_write32(core, SOCRAMREGOFFS(bankidx), bankidx);
481 ci->pub.ramsize = 0x48000; 552 bankinfo = brcmf_chip_core_read32(core, SOCRAMREGOFFS(bankinfo));
482 break; 553 *banksize = (bankinfo & SOCRAM_BANKINFO_SZMASK) + 1;
554 *banksize *= SOCRAM_BANKINFO_SZBASE;
555 return !!(bankinfo & SOCRAM_BANKINFO_RETNTRAM_MASK);
556}
557
558static void brcmf_chip_socram_ramsize(struct brcmf_core_priv *sr, u32 *ramsize,
559 u32 *srsize)
560{
561 u32 coreinfo;
562 uint nb, banksize, lss;
563 bool retent;
564 int i;
565
566 *ramsize = 0;
567 *srsize = 0;
568
569 if (WARN_ON(sr->pub.rev < 4))
570 return;
571
572 if (!brcmf_chip_iscoreup(&sr->pub))
573 brcmf_chip_resetcore(&sr->pub, 0, 0, 0);
574
575 /* Get info for determining size */
576 coreinfo = brcmf_chip_core_read32(sr, SOCRAMREGOFFS(coreinfo));
577 nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
578
579 if ((sr->pub.rev <= 7) || (sr->pub.rev == 12)) {
580 banksize = (coreinfo & SRCI_SRBSZ_MASK);
581 lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
582 if (lss != 0)
583 nb--;
584 *ramsize = nb * (1 << (banksize + SR_BSZ_BASE));
585 if (lss != 0)
586 *ramsize += (1 << ((lss - 1) + SR_BSZ_BASE));
587 } else {
588 nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
589 for (i = 0; i < nb; i++) {
590 retent = brcmf_chip_socram_banksize(sr, i, &banksize);
591 *ramsize += banksize;
592 if (retent)
593 *srsize += banksize;
594 }
595 }
596
597 /* hardcoded save&restore memory sizes */
598 switch (sr->chip->pub.chip) {
483 case BRCM_CC_4334_CHIP_ID: 599 case BRCM_CC_4334_CHIP_ID:
484 case BRCM_CC_43340_CHIP_ID: 600 if (sr->chip->pub.chiprev < 2)
485 ci->pub.ramsize = 0x80000; 601 *srsize = (32 * 1024);
486 break; 602 break;
487 case BRCM_CC_4335_CHIP_ID: 603 case BRCM_CC_43430_CHIP_ID:
488 ci->pub.ramsize = 0xc0000; 604 /* assume sr for now as we can not check
489 ci->pub.rambase = 0x180000; 605 * firmware sr capability at this point.
606 */
607 *srsize = (64 * 1024);
490 break; 608 break;
491 case BRCM_CC_43362_CHIP_ID: 609 default:
492 ci->pub.ramsize = 0x3c000;
493 break; 610 break;
611 }
612}
613
614/** Return the TCM-RAM size of the ARMCR4 core. */
615static u32 brcmf_chip_tcm_ramsize(struct brcmf_core_priv *cr4)
616{
617 u32 corecap;
618 u32 memsize = 0;
619 u32 nab;
620 u32 nbb;
621 u32 totb;
622 u32 bxinfo;
623 u32 idx;
624
625 corecap = brcmf_chip_core_read32(cr4, ARMCR4_CAP);
626
627 nab = (corecap & ARMCR4_TCBANB_MASK) >> ARMCR4_TCBANB_SHIFT;
628 nbb = (corecap & ARMCR4_TCBBNB_MASK) >> ARMCR4_TCBBNB_SHIFT;
629 totb = nab + nbb;
630
631 for (idx = 0; idx < totb; idx++) {
632 brcmf_chip_core_write32(cr4, ARMCR4_BANKIDX, idx);
633 bxinfo = brcmf_chip_core_read32(cr4, ARMCR4_BANKINFO);
634 memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT;
635 }
636
637 return memsize;
638}
639
640static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
641{
642 switch (ci->pub.chip) {
643 case BRCM_CC_4345_CHIP_ID:
644 return 0x198000;
645 case BRCM_CC_4335_CHIP_ID:
494 case BRCM_CC_4339_CHIP_ID: 646 case BRCM_CC_4339_CHIP_ID:
495 case BRCM_CC_4354_CHIP_ID: 647 case BRCM_CC_4354_CHIP_ID:
496 case BRCM_CC_4356_CHIP_ID: 648 case BRCM_CC_4356_CHIP_ID:
497 case BRCM_CC_43567_CHIP_ID: 649 case BRCM_CC_43567_CHIP_ID:
498 case BRCM_CC_43569_CHIP_ID: 650 case BRCM_CC_43569_CHIP_ID:
499 case BRCM_CC_43570_CHIP_ID: 651 case BRCM_CC_43570_CHIP_ID:
500 ci->pub.ramsize = 0xc0000;
501 ci->pub.rambase = 0x180000;
502 break;
503 case BRCM_CC_43602_CHIP_ID: 652 case BRCM_CC_43602_CHIP_ID:
504 ci->pub.ramsize = 0xf0000; 653 return 0x180000;
505 ci->pub.rambase = 0x180000;
506 break;
507 default: 654 default:
508 brcmf_err("unknown chip: %s\n", ci->pub.name); 655 brcmf_err("unknown chip: %s\n", ci->pub.name);
509 break; 656 break;
510 } 657 }
658 return 0;
659}
660
661static int brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
662{
663 struct brcmf_core_priv *mem_core;
664 struct brcmf_core *mem;
665
666 mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_ARM_CR4);
667 if (mem) {
668 mem_core = container_of(mem, struct brcmf_core_priv, pub);
669 ci->pub.ramsize = brcmf_chip_tcm_ramsize(mem_core);
670 ci->pub.rambase = brcmf_chip_tcm_rambase(ci);
671 if (!ci->pub.rambase) {
672 brcmf_err("RAM base not provided with ARM CR4 core\n");
673 return -EINVAL;
674 }
675 } else {
676 mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_INTERNAL_MEM);
677 mem_core = container_of(mem, struct brcmf_core_priv, pub);
678 brcmf_chip_socram_ramsize(mem_core, &ci->pub.ramsize,
679 &ci->pub.srsize);
680 }
681 brcmf_dbg(INFO, "RAM: base=0x%x size=%d (0x%x) sr=%d (0x%x)\n",
682 ci->pub.rambase, ci->pub.ramsize, ci->pub.ramsize,
683 ci->pub.srsize, ci->pub.srsize);
684
685 if (!ci->pub.ramsize) {
686 brcmf_err("RAM size is undetermined\n");
687 return -ENOMEM;
688 }
689 return 0;
511} 690}
512 691
513static u32 brcmf_chip_dmp_get_desc(struct brcmf_chip_priv *ci, u32 *eromaddr, 692static u32 brcmf_chip_dmp_get_desc(struct brcmf_chip_priv *ci, u32 *eromaddr,
@@ -660,6 +839,7 @@ static int brcmf_chip_recognition(struct brcmf_chip_priv *ci)
660 struct brcmf_core *core; 839 struct brcmf_core *core;
661 u32 regdata; 840 u32 regdata;
662 u32 socitype; 841 u32 socitype;
842 int ret;
663 843
664 /* Get CC core rev 844 /* Get CC core rev
665 * Chipid is assume to be at offset 0 from SI_ENUM_BASE 845 * Chipid is assume to be at offset 0 from SI_ENUM_BASE
@@ -712,9 +892,13 @@ static int brcmf_chip_recognition(struct brcmf_chip_priv *ci)
712 return -ENODEV; 892 return -ENODEV;
713 } 893 }
714 894
715 brcmf_chip_get_raminfo(ci); 895 ret = brcmf_chip_cores_check(ci);
896 if (ret)
897 return ret;
716 898
717 return brcmf_chip_cores_check(ci); 899 /* assure chip is passive for core access */
900 brcmf_chip_set_passive(&ci->pub);
901 return brcmf_chip_get_raminfo(ci);
718} 902}
719 903
720static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id) 904static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id)
@@ -778,12 +962,6 @@ static int brcmf_chip_setup(struct brcmf_chip_priv *chip)
778 if (chip->ops->setup) 962 if (chip->ops->setup)
779 ret = chip->ops->setup(chip->ctx, pub); 963 ret = chip->ops->setup(chip->ctx, pub);
780 964
781 /*
782 * Make sure any on-chip ARM is off (in case strapping is wrong),
783 * or downloaded code was already running.
784 */
785 brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
786 brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4);
787 return ret; 965 return ret;
788} 966}
789 967
@@ -799,7 +977,7 @@ struct brcmf_chip *brcmf_chip_attach(void *ctx,
799 err = -EINVAL; 977 err = -EINVAL;
800 if (WARN_ON(!ops->prepare)) 978 if (WARN_ON(!ops->prepare))
801 err = -EINVAL; 979 err = -EINVAL;
802 if (WARN_ON(!ops->exit_dl)) 980 if (WARN_ON(!ops->activate))
803 err = -EINVAL; 981 err = -EINVAL;
804 if (err < 0) 982 if (err < 0)
805 return ERR_PTR(-EINVAL); 983 return ERR_PTR(-EINVAL);
@@ -897,9 +1075,10 @@ void brcmf_chip_resetcore(struct brcmf_core *pub, u32 prereset, u32 reset,
897} 1075}
898 1076
899static void 1077static void
900brcmf_chip_cm3_enterdl(struct brcmf_chip_priv *chip) 1078brcmf_chip_cm3_set_passive(struct brcmf_chip_priv *chip)
901{ 1079{
902 struct brcmf_core *core; 1080 struct brcmf_core *core;
1081 struct brcmf_core_priv *sr;
903 1082
904 brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3); 1083 brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
905 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211); 1084 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
@@ -909,9 +1088,16 @@ brcmf_chip_cm3_enterdl(struct brcmf_chip_priv *chip)
909 D11_BCMA_IOCTL_PHYCLOCKEN); 1088 D11_BCMA_IOCTL_PHYCLOCKEN);
910 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM); 1089 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
911 brcmf_chip_resetcore(core, 0, 0, 0); 1090 brcmf_chip_resetcore(core, 0, 0, 0);
1091
1092 /* disable bank #3 remap for this device */
1093 if (chip->pub.chip == BRCM_CC_43430_CHIP_ID) {
1094 sr = container_of(core, struct brcmf_core_priv, pub);
1095 brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankidx), 3);
1096 brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankpda), 0);
1097 }
912} 1098}
913 1099
914static bool brcmf_chip_cm3_exitdl(struct brcmf_chip_priv *chip) 1100static bool brcmf_chip_cm3_set_active(struct brcmf_chip_priv *chip)
915{ 1101{
916 struct brcmf_core *core; 1102 struct brcmf_core *core;
917 1103
@@ -921,7 +1107,7 @@ static bool brcmf_chip_cm3_exitdl(struct brcmf_chip_priv *chip)
921 return false; 1107 return false;
922 } 1108 }
923 1109
924 chip->ops->exit_dl(chip->ctx, &chip->pub, 0); 1110 chip->ops->activate(chip->ctx, &chip->pub, 0);
925 1111
926 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CM3); 1112 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CM3);
927 brcmf_chip_resetcore(core, 0, 0, 0); 1113 brcmf_chip_resetcore(core, 0, 0, 0);
@@ -930,7 +1116,7 @@ static bool brcmf_chip_cm3_exitdl(struct brcmf_chip_priv *chip)
930} 1116}
931 1117
932static inline void 1118static inline void
933brcmf_chip_cr4_enterdl(struct brcmf_chip_priv *chip) 1119brcmf_chip_cr4_set_passive(struct brcmf_chip_priv *chip)
934{ 1120{
935 struct brcmf_core *core; 1121 struct brcmf_core *core;
936 1122
@@ -943,11 +1129,11 @@ brcmf_chip_cr4_enterdl(struct brcmf_chip_priv *chip)
943 D11_BCMA_IOCTL_PHYCLOCKEN); 1129 D11_BCMA_IOCTL_PHYCLOCKEN);
944} 1130}
945 1131
946static bool brcmf_chip_cr4_exitdl(struct brcmf_chip_priv *chip, u32 rstvec) 1132static bool brcmf_chip_cr4_set_active(struct brcmf_chip_priv *chip, u32 rstvec)
947{ 1133{
948 struct brcmf_core *core; 1134 struct brcmf_core *core;
949 1135
950 chip->ops->exit_dl(chip->ctx, &chip->pub, rstvec); 1136 chip->ops->activate(chip->ctx, &chip->pub, rstvec);
951 1137
952 /* restore ARM */ 1138 /* restore ARM */
953 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CR4); 1139 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CR4);
@@ -956,7 +1142,7 @@ static bool brcmf_chip_cr4_exitdl(struct brcmf_chip_priv *chip, u32 rstvec)
956 return true; 1142 return true;
957} 1143}
958 1144
959void brcmf_chip_enter_download(struct brcmf_chip *pub) 1145void brcmf_chip_set_passive(struct brcmf_chip *pub)
960{ 1146{
961 struct brcmf_chip_priv *chip; 1147 struct brcmf_chip_priv *chip;
962 struct brcmf_core *arm; 1148 struct brcmf_core *arm;
@@ -966,14 +1152,14 @@ void brcmf_chip_enter_download(struct brcmf_chip *pub)
966 chip = container_of(pub, struct brcmf_chip_priv, pub); 1152 chip = container_of(pub, struct brcmf_chip_priv, pub);
967 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4); 1153 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
968 if (arm) { 1154 if (arm) {
969 brcmf_chip_cr4_enterdl(chip); 1155 brcmf_chip_cr4_set_passive(chip);
970 return; 1156 return;
971 } 1157 }
972 1158
973 brcmf_chip_cm3_enterdl(chip); 1159 brcmf_chip_cm3_set_passive(chip);
974} 1160}
975 1161
976bool brcmf_chip_exit_download(struct brcmf_chip *pub, u32 rstvec) 1162bool brcmf_chip_set_active(struct brcmf_chip *pub, u32 rstvec)
977{ 1163{
978 struct brcmf_chip_priv *chip; 1164 struct brcmf_chip_priv *chip;
979 struct brcmf_core *arm; 1165 struct brcmf_core *arm;
@@ -983,9 +1169,9 @@ bool brcmf_chip_exit_download(struct brcmf_chip *pub, u32 rstvec)
983 chip = container_of(pub, struct brcmf_chip_priv, pub); 1169 chip = container_of(pub, struct brcmf_chip_priv, pub);
984 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4); 1170 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
985 if (arm) 1171 if (arm)
986 return brcmf_chip_cr4_exitdl(chip, rstvec); 1172 return brcmf_chip_cr4_set_active(chip, rstvec);
987 1173
988 return brcmf_chip_cm3_exitdl(chip); 1174 return brcmf_chip_cm3_set_active(chip);
989} 1175}
990 1176
991bool brcmf_chip_sr_capable(struct brcmf_chip *pub) 1177bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
@@ -1016,6 +1202,10 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
1016 addr = CORE_CC_REG(base, chipcontrol_data); 1202 addr = CORE_CC_REG(base, chipcontrol_data);
1017 reg = chip->ops->read32(chip->ctx, addr); 1203 reg = chip->ops->read32(chip->ctx, addr);
1018 return (reg & pmu_cc3_mask) != 0; 1204 return (reg & pmu_cc3_mask) != 0;
1205 case BRCM_CC_43430_CHIP_ID:
1206 addr = CORE_CC_REG(base, sr_control1);
1207 reg = chip->ops->read32(chip->ctx, addr);
1208 return reg != 0;
1019 default: 1209 default:
1020 addr = CORE_CC_REG(base, pmucapabilities_ext); 1210 addr = CORE_CC_REG(base, pmucapabilities_ext);
1021 reg = chip->ops->read32(chip->ctx, addr); 1211 reg = chip->ops->read32(chip->ctx, addr);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.h b/drivers/net/wireless/brcm80211/brcmfmac/chip.h
index c32908da90c8..60dcb38fc77a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/chip.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.h
@@ -30,7 +30,8 @@
30 * @pmucaps: PMU capabilities. 30 * @pmucaps: PMU capabilities.
31 * @pmurev: PMU revision. 31 * @pmurev: PMU revision.
32 * @rambase: RAM base address (only applicable for ARM CR4 chips). 32 * @rambase: RAM base address (only applicable for ARM CR4 chips).
33 * @ramsize: amount of RAM on chip. 33 * @ramsize: amount of RAM on chip including retention.
34 * @srsize: amount of retention RAM on chip.
34 * @name: string representation of the chip identifier. 35 * @name: string representation of the chip identifier.
35 */ 36 */
36struct brcmf_chip { 37struct brcmf_chip {
@@ -41,6 +42,7 @@ struct brcmf_chip {
41 u32 pmurev; 42 u32 pmurev;
42 u32 rambase; 43 u32 rambase;
43 u32 ramsize; 44 u32 ramsize;
45 u32 srsize;
44 char name[8]; 46 char name[8];
45}; 47};
46 48
@@ -64,7 +66,7 @@ struct brcmf_core {
64 * @write32: write 32-bit value over bus. 66 * @write32: write 32-bit value over bus.
65 * @prepare: prepare bus for core configuration. 67 * @prepare: prepare bus for core configuration.
66 * @setup: bus-specific core setup. 68 * @setup: bus-specific core setup.
67 * @exit_dl: exit download state. 69 * @active: chip becomes active.
68 * The callback should use the provided @rstvec when non-zero. 70 * The callback should use the provided @rstvec when non-zero.
69 */ 71 */
70struct brcmf_buscore_ops { 72struct brcmf_buscore_ops {
@@ -72,7 +74,7 @@ struct brcmf_buscore_ops {
72 void (*write32)(void *ctx, u32 addr, u32 value); 74 void (*write32)(void *ctx, u32 addr, u32 value);
73 int (*prepare)(void *ctx); 75 int (*prepare)(void *ctx);
74 int (*setup)(void *ctx, struct brcmf_chip *chip); 76 int (*setup)(void *ctx, struct brcmf_chip *chip);
75 void (*exit_dl)(void *ctx, struct brcmf_chip *chip, u32 rstvec); 77 void (*activate)(void *ctx, struct brcmf_chip *chip, u32 rstvec);
76}; 78};
77 79
78struct brcmf_chip *brcmf_chip_attach(void *ctx, 80struct brcmf_chip *brcmf_chip_attach(void *ctx,
@@ -84,8 +86,8 @@ bool brcmf_chip_iscoreup(struct brcmf_core *core);
84void brcmf_chip_coredisable(struct brcmf_core *core, u32 prereset, u32 reset); 86void brcmf_chip_coredisable(struct brcmf_core *core, u32 prereset, u32 reset);
85void brcmf_chip_resetcore(struct brcmf_core *core, u32 prereset, u32 reset, 87void brcmf_chip_resetcore(struct brcmf_core *core, u32 prereset, u32 reset,
86 u32 postreset); 88 u32 postreset);
87void brcmf_chip_enter_download(struct brcmf_chip *ci); 89void brcmf_chip_set_passive(struct brcmf_chip *ci);
88bool brcmf_chip_exit_download(struct brcmf_chip *ci, u32 rstvec); 90bool brcmf_chip_set_active(struct brcmf_chip *ci, u32 rstvec);
89bool brcmf_chip_sr_capable(struct brcmf_chip *pub); 91bool brcmf_chip_sr_capable(struct brcmf_chip *pub);
90 92
91#endif /* BRCMF_AXIDMP_H */ 93#endif /* BRCMF_AXIDMP_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
index defb7a44e0bc..7748a1ccf14f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
@@ -126,7 +126,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
126 brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan"); 126 brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan");
127 if (drvr->bus_if->wowl_supported) 127 if (drvr->bus_if->wowl_supported)
128 brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl"); 128 brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
129 brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0); 129 if (drvr->bus_if->chip != BRCM_CC_43362_CHIP_ID)
130 brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0);
130 131
131 /* set chip related quirks */ 132 /* set chip related quirks */
132 switch (drvr->bus_if->chip) { 133 switch (drvr->bus_if->chip) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
index 6262612dec45..4ec9811f49c8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
@@ -481,10 +481,9 @@ static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf *msgbuf)
481 481
482static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf) 482static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf)
483{ 483{
484 if (waitqueue_active(&msgbuf->ioctl_resp_wait)) { 484 msgbuf->ctl_completed = true;
485 msgbuf->ctl_completed = true; 485 if (waitqueue_active(&msgbuf->ioctl_resp_wait))
486 wake_up(&msgbuf->ioctl_resp_wait); 486 wake_up(&msgbuf->ioctl_resp_wait);
487 }
488} 487}
489 488
490 489
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h
index 77a51b8c1e12..3d513e407e3d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h
@@ -17,11 +17,11 @@
17 17
18#ifdef CONFIG_BRCMFMAC_PROTO_MSGBUF 18#ifdef CONFIG_BRCMFMAC_PROTO_MSGBUF
19 19
20#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 20 20#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 64
21#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 256 21#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 512
22#define BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM 20 22#define BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM 64
23#define BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM 1024 23#define BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM 1024
24#define BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM 256 24#define BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM 512
25#define BRCMF_H2D_TXFLOWRING_MAX_ITEM 512 25#define BRCMF_H2D_TXFLOWRING_MAX_ITEM 512
26 26
27#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE 40 27#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE 40
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
index 61c053a729be..1831ecd0813e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
@@ -47,8 +47,6 @@ enum brcmf_pcie_state {
47 47
48#define BRCMF_PCIE_43602_FW_NAME "brcm/brcmfmac43602-pcie.bin" 48#define BRCMF_PCIE_43602_FW_NAME "brcm/brcmfmac43602-pcie.bin"
49#define BRCMF_PCIE_43602_NVRAM_NAME "brcm/brcmfmac43602-pcie.txt" 49#define BRCMF_PCIE_43602_NVRAM_NAME "brcm/brcmfmac43602-pcie.txt"
50#define BRCMF_PCIE_4354_FW_NAME "brcm/brcmfmac4354-pcie.bin"
51#define BRCMF_PCIE_4354_NVRAM_NAME "brcm/brcmfmac4354-pcie.txt"
52#define BRCMF_PCIE_4356_FW_NAME "brcm/brcmfmac4356-pcie.bin" 50#define BRCMF_PCIE_4356_FW_NAME "brcm/brcmfmac4356-pcie.bin"
53#define BRCMF_PCIE_4356_NVRAM_NAME "brcm/brcmfmac4356-pcie.txt" 51#define BRCMF_PCIE_4356_NVRAM_NAME "brcm/brcmfmac4356-pcie.txt"
54#define BRCMF_PCIE_43570_FW_NAME "brcm/brcmfmac43570-pcie.bin" 52#define BRCMF_PCIE_43570_FW_NAME "brcm/brcmfmac43570-pcie.bin"
@@ -187,8 +185,8 @@ enum brcmf_pcie_state {
187 185
188MODULE_FIRMWARE(BRCMF_PCIE_43602_FW_NAME); 186MODULE_FIRMWARE(BRCMF_PCIE_43602_FW_NAME);
189MODULE_FIRMWARE(BRCMF_PCIE_43602_NVRAM_NAME); 187MODULE_FIRMWARE(BRCMF_PCIE_43602_NVRAM_NAME);
190MODULE_FIRMWARE(BRCMF_PCIE_4354_FW_NAME); 188MODULE_FIRMWARE(BRCMF_PCIE_4356_FW_NAME);
191MODULE_FIRMWARE(BRCMF_PCIE_4354_NVRAM_NAME); 189MODULE_FIRMWARE(BRCMF_PCIE_4356_NVRAM_NAME);
192MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME); 190MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME);
193MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME); 191MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME);
194 192
@@ -509,8 +507,6 @@ static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
509 507
510static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo) 508static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo)
511{ 509{
512 brcmf_chip_enter_download(devinfo->ci);
513
514 if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) { 510 if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
515 brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4); 511 brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4);
516 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX, 512 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
@@ -536,7 +532,7 @@ static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
536 brcmf_chip_resetcore(core, 0, 0, 0); 532 brcmf_chip_resetcore(core, 0, 0, 0);
537 } 533 }
538 534
539 return !brcmf_chip_exit_download(devinfo->ci, resetintr); 535 return !brcmf_chip_set_active(devinfo->ci, resetintr);
540} 536}
541 537
542 538
@@ -653,10 +649,9 @@ static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
653 console->log_str[console->log_idx] = ch; 649 console->log_str[console->log_idx] = ch;
654 console->log_idx++; 650 console->log_idx++;
655 } 651 }
656
657 if (ch == '\n') { 652 if (ch == '\n') {
658 console->log_str[console->log_idx] = 0; 653 console->log_str[console->log_idx] = 0;
659 brcmf_dbg(PCIE, "CONSOLE: %s\n", console->log_str); 654 brcmf_dbg(PCIE, "CONSOLE: %s", console->log_str);
660 console->log_idx = 0; 655 console->log_idx = 0;
661 } 656 }
662 } 657 }
@@ -1328,10 +1323,6 @@ static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo)
1328 fw_name = BRCMF_PCIE_43602_FW_NAME; 1323 fw_name = BRCMF_PCIE_43602_FW_NAME;
1329 nvram_name = BRCMF_PCIE_43602_NVRAM_NAME; 1324 nvram_name = BRCMF_PCIE_43602_NVRAM_NAME;
1330 break; 1325 break;
1331 case BRCM_CC_4354_CHIP_ID:
1332 fw_name = BRCMF_PCIE_4354_FW_NAME;
1333 nvram_name = BRCMF_PCIE_4354_NVRAM_NAME;
1334 break;
1335 case BRCM_CC_4356_CHIP_ID: 1326 case BRCM_CC_4356_CHIP_ID:
1336 fw_name = BRCMF_PCIE_4356_FW_NAME; 1327 fw_name = BRCMF_PCIE_4356_FW_NAME;
1337 nvram_name = BRCMF_PCIE_4356_NVRAM_NAME; 1328 nvram_name = BRCMF_PCIE_4356_NVRAM_NAME;
@@ -1566,8 +1557,8 @@ static int brcmf_pcie_buscoreprep(void *ctx)
1566} 1557}
1567 1558
1568 1559
1569static void brcmf_pcie_buscore_exitdl(void *ctx, struct brcmf_chip *chip, 1560static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip,
1570 u32 rstvec) 1561 u32 rstvec)
1571{ 1562{
1572 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx; 1563 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1573 1564
@@ -1577,7 +1568,7 @@ static void brcmf_pcie_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
1577 1568
1578static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = { 1569static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
1579 .prepare = brcmf_pcie_buscoreprep, 1570 .prepare = brcmf_pcie_buscoreprep,
1580 .exit_dl = brcmf_pcie_buscore_exitdl, 1571 .activate = brcmf_pcie_buscore_activate,
1581 .read32 = brcmf_pcie_buscore_read32, 1572 .read32 = brcmf_pcie_buscore_read32,
1582 .write32 = brcmf_pcie_buscore_write32, 1573 .write32 = brcmf_pcie_buscore_write32,
1583}; 1574};
@@ -1856,7 +1847,6 @@ cleanup:
1856 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 } 1847 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
1857 1848
1858static struct pci_device_id brcmf_pcie_devid_table[] = { 1849static struct pci_device_id brcmf_pcie_devid_table[] = {
1859 BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_DEVICE_ID),
1860 BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID), 1850 BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
1861 BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID), 1851 BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
1862 BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID), 1852 BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
index 257ee70feb5b..ab0c89833013 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
@@ -432,8 +432,6 @@ struct brcmf_sdio {
432 struct brcmf_sdio_dev *sdiodev; /* sdio device handler */ 432 struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
433 struct brcmf_chip *ci; /* Chip info struct */ 433 struct brcmf_chip *ci; /* Chip info struct */
434 434
435 u32 ramsize; /* Size of RAM in SOCRAM (bytes) */
436
437 u32 hostintmask; /* Copy of Host Interrupt Mask */ 435 u32 hostintmask; /* Copy of Host Interrupt Mask */
438 atomic_t intstatus; /* Intstatus bits (events) pending */ 436 atomic_t intstatus; /* Intstatus bits (events) pending */
439 atomic_t fcstate; /* State of dongle flow-control */ 437 atomic_t fcstate; /* State of dongle flow-control */
@@ -485,10 +483,9 @@ struct brcmf_sdio {
485#endif /* DEBUG */ 483#endif /* DEBUG */
486 484
487 uint clkstate; /* State of sd and backplane clock(s) */ 485 uint clkstate; /* State of sd and backplane clock(s) */
488 bool activity; /* Activity flag for clock down */
489 s32 idletime; /* Control for activity timeout */ 486 s32 idletime; /* Control for activity timeout */
490 s32 idlecount; /* Activity timeout counter */ 487 s32 idlecount; /* Activity timeout counter */
491 s32 idleclock; /* How to set bus driver when idle */ 488 s32 idleclock; /* How to set bus driver when idle */
492 bool rxflow_mode; /* Rx flow control mode */ 489 bool rxflow_mode; /* Rx flow control mode */
493 bool rxflow; /* Is rx flow control on */ 490 bool rxflow; /* Is rx flow control on */
494 bool alp_only; /* Don't use HT clock (ALP only) */ 491 bool alp_only; /* Don't use HT clock (ALP only) */
@@ -510,7 +507,8 @@ struct brcmf_sdio {
510 507
511 struct workqueue_struct *brcmf_wq; 508 struct workqueue_struct *brcmf_wq;
512 struct work_struct datawork; 509 struct work_struct datawork;
513 atomic_t dpc_tskcnt; 510 bool dpc_triggered;
511 bool dpc_running;
514 512
515 bool txoff; /* Transmit flow-controlled */ 513 bool txoff; /* Transmit flow-controlled */
516 struct brcmf_sdio_count sdcnt; 514 struct brcmf_sdio_count sdcnt;
@@ -617,6 +615,10 @@ static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
617#define BCM43362_NVRAM_NAME "brcm/brcmfmac43362-sdio.txt" 615#define BCM43362_NVRAM_NAME "brcm/brcmfmac43362-sdio.txt"
618#define BCM4339_FIRMWARE_NAME "brcm/brcmfmac4339-sdio.bin" 616#define BCM4339_FIRMWARE_NAME "brcm/brcmfmac4339-sdio.bin"
619#define BCM4339_NVRAM_NAME "brcm/brcmfmac4339-sdio.txt" 617#define BCM4339_NVRAM_NAME "brcm/brcmfmac4339-sdio.txt"
618#define BCM43430_FIRMWARE_NAME "brcm/brcmfmac43430-sdio.bin"
619#define BCM43430_NVRAM_NAME "brcm/brcmfmac43430-sdio.txt"
620#define BCM43455_FIRMWARE_NAME "brcm/brcmfmac43455-sdio.bin"
621#define BCM43455_NVRAM_NAME "brcm/brcmfmac43455-sdio.txt"
620#define BCM4354_FIRMWARE_NAME "brcm/brcmfmac4354-sdio.bin" 622#define BCM4354_FIRMWARE_NAME "brcm/brcmfmac4354-sdio.bin"
621#define BCM4354_NVRAM_NAME "brcm/brcmfmac4354-sdio.txt" 623#define BCM4354_NVRAM_NAME "brcm/brcmfmac4354-sdio.txt"
622 624
@@ -640,6 +642,10 @@ MODULE_FIRMWARE(BCM43362_FIRMWARE_NAME);
640MODULE_FIRMWARE(BCM43362_NVRAM_NAME); 642MODULE_FIRMWARE(BCM43362_NVRAM_NAME);
641MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME); 643MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME);
642MODULE_FIRMWARE(BCM4339_NVRAM_NAME); 644MODULE_FIRMWARE(BCM4339_NVRAM_NAME);
645MODULE_FIRMWARE(BCM43430_FIRMWARE_NAME);
646MODULE_FIRMWARE(BCM43430_NVRAM_NAME);
647MODULE_FIRMWARE(BCM43455_FIRMWARE_NAME);
648MODULE_FIRMWARE(BCM43455_NVRAM_NAME);
643MODULE_FIRMWARE(BCM4354_FIRMWARE_NAME); 649MODULE_FIRMWARE(BCM4354_FIRMWARE_NAME);
644MODULE_FIRMWARE(BCM4354_NVRAM_NAME); 650MODULE_FIRMWARE(BCM4354_NVRAM_NAME);
645 651
@@ -669,6 +675,8 @@ static const struct brcmf_firmware_names brcmf_fwname_data[] = {
669 { BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) }, 675 { BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) },
670 { BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) }, 676 { BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) },
671 { BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) }, 677 { BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) },
678 { BRCM_CC_43430_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43430) },
679 { BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, BRCMF_FIRMWARE_NVRAM(BCM43455) },
672 { BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) } 680 { BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) }
673}; 681};
674 682
@@ -959,13 +967,8 @@ static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
959 brcmf_dbg(SDIO, "Enter\n"); 967 brcmf_dbg(SDIO, "Enter\n");
960 968
961 /* Early exit if we're already there */ 969 /* Early exit if we're already there */
962 if (bus->clkstate == target) { 970 if (bus->clkstate == target)
963 if (target == CLK_AVAIL) {
964 brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
965 bus->activity = true;
966 }
967 return 0; 971 return 0;
968 }
969 972
970 switch (target) { 973 switch (target) {
971 case CLK_AVAIL: 974 case CLK_AVAIL:
@@ -974,8 +977,6 @@ static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
974 brcmf_sdio_sdclk(bus, true); 977 brcmf_sdio_sdclk(bus, true);
975 /* Now request HT Avail on the backplane */ 978 /* Now request HT Avail on the backplane */
976 brcmf_sdio_htclk(bus, true, pendok); 979 brcmf_sdio_htclk(bus, true, pendok);
977 brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
978 bus->activity = true;
979 break; 980 break;
980 981
981 case CLK_SDONLY: 982 case CLK_SDONLY:
@@ -987,7 +988,6 @@ static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
987 else 988 else
988 brcmf_err("request for %d -> %d\n", 989 brcmf_err("request for %d -> %d\n",
989 bus->clkstate, target); 990 bus->clkstate, target);
990 brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
991 break; 991 break;
992 992
993 case CLK_NONE: 993 case CLK_NONE:
@@ -996,7 +996,6 @@ static int brcmf_sdio_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
996 brcmf_sdio_htclk(bus, false, false); 996 brcmf_sdio_htclk(bus, false, false);
997 /* Now remove the SD clock */ 997 /* Now remove the SD clock */
998 brcmf_sdio_sdclk(bus, false); 998 brcmf_sdio_sdclk(bus, false);
999 brcmf_sdio_wd_timer(bus, 0);
1000 break; 999 break;
1001 } 1000 }
1002#ifdef DEBUG 1001#ifdef DEBUG
@@ -1024,17 +1023,6 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
1024 1023
1025 /* Going to sleep */ 1024 /* Going to sleep */
1026 if (sleep) { 1025 if (sleep) {
1027 /* Don't sleep if something is pending */
1028 if (atomic_read(&bus->intstatus) ||
1029 atomic_read(&bus->ipend) > 0 ||
1030 bus->ctrl_frame_stat ||
1031 (!atomic_read(&bus->fcstate) &&
1032 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
1033 data_ok(bus))) {
1034 err = -EBUSY;
1035 goto done;
1036 }
1037
1038 clkcsr = brcmf_sdiod_regrb(bus->sdiodev, 1026 clkcsr = brcmf_sdiod_regrb(bus->sdiodev,
1039 SBSDIO_FUNC1_CHIPCLKCSR, 1027 SBSDIO_FUNC1_CHIPCLKCSR,
1040 &err); 1028 &err);
@@ -1045,11 +1033,7 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
1045 SBSDIO_ALP_AVAIL_REQ, &err); 1033 SBSDIO_ALP_AVAIL_REQ, &err);
1046 } 1034 }
1047 err = brcmf_sdio_kso_control(bus, false); 1035 err = brcmf_sdio_kso_control(bus, false);
1048 /* disable watchdog */
1049 if (!err)
1050 brcmf_sdio_wd_timer(bus, 0);
1051 } else { 1036 } else {
1052 bus->idlecount = 0;
1053 err = brcmf_sdio_kso_control(bus, true); 1037 err = brcmf_sdio_kso_control(bus, true);
1054 } 1038 }
1055 if (err) { 1039 if (err) {
@@ -1066,6 +1050,7 @@ end:
1066 brcmf_sdio_clkctl(bus, CLK_NONE, pendok); 1050 brcmf_sdio_clkctl(bus, CLK_NONE, pendok);
1067 } else { 1051 } else {
1068 brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok); 1052 brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
1053 brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS);
1069 } 1054 }
1070 bus->sleeping = sleep; 1055 bus->sleeping = sleep;
1071 brcmf_dbg(SDIO, "new state %s\n", 1056 brcmf_dbg(SDIO, "new state %s\n",
@@ -1085,44 +1070,47 @@ static inline bool brcmf_sdio_valid_shared_address(u32 addr)
1085static int brcmf_sdio_readshared(struct brcmf_sdio *bus, 1070static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
1086 struct sdpcm_shared *sh) 1071 struct sdpcm_shared *sh)
1087{ 1072{
1088 u32 addr; 1073 u32 addr = 0;
1089 int rv; 1074 int rv;
1090 u32 shaddr = 0; 1075 u32 shaddr = 0;
1091 struct sdpcm_shared_le sh_le; 1076 struct sdpcm_shared_le sh_le;
1092 __le32 addr_le; 1077 __le32 addr_le;
1093 1078
1094 shaddr = bus->ci->rambase + bus->ramsize - 4; 1079 sdio_claim_host(bus->sdiodev->func[1]);
1080 brcmf_sdio_bus_sleep(bus, false, false);
1095 1081
1096 /* 1082 /*
1097 * Read last word in socram to determine 1083 * Read last word in socram to determine
1098 * address of sdpcm_shared structure 1084 * address of sdpcm_shared structure
1099 */ 1085 */
1100 sdio_claim_host(bus->sdiodev->func[1]); 1086 shaddr = bus->ci->rambase + bus->ci->ramsize - 4;
1101 brcmf_sdio_bus_sleep(bus, false, false); 1087 if (!bus->ci->rambase && brcmf_chip_sr_capable(bus->ci))
1102 rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4); 1088 shaddr -= bus->ci->srsize;
1103 sdio_release_host(bus->sdiodev->func[1]); 1089 rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr,
1090 (u8 *)&addr_le, 4);
1104 if (rv < 0) 1091 if (rv < 0)
1105 return rv; 1092 goto fail;
1106
1107 addr = le32_to_cpu(addr_le);
1108
1109 brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
1110 1093
1111 /* 1094 /*
1112 * Check if addr is valid. 1095 * Check if addr is valid.
1113 * NVRAM length at the end of memory should have been overwritten. 1096 * NVRAM length at the end of memory should have been overwritten.
1114 */ 1097 */
1098 addr = le32_to_cpu(addr_le);
1115 if (!brcmf_sdio_valid_shared_address(addr)) { 1099 if (!brcmf_sdio_valid_shared_address(addr)) {
1116 brcmf_err("invalid sdpcm_shared address 0x%08X\n", 1100 brcmf_err("invalid sdpcm_shared address 0x%08X\n", addr);
1117 addr); 1101 rv = -EINVAL;
1118 return -EINVAL; 1102 goto fail;
1119 } 1103 }
1120 1104
1105 brcmf_dbg(INFO, "sdpcm_shared address 0x%08X\n", addr);
1106
1121 /* Read hndrte_shared structure */ 1107 /* Read hndrte_shared structure */
1122 rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le, 1108 rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
1123 sizeof(struct sdpcm_shared_le)); 1109 sizeof(struct sdpcm_shared_le));
1124 if (rv < 0) 1110 if (rv < 0)
1125 return rv; 1111 goto fail;
1112
1113 sdio_release_host(bus->sdiodev->func[1]);
1126 1114
1127 /* Endianness */ 1115 /* Endianness */
1128 sh->flags = le32_to_cpu(sh_le.flags); 1116 sh->flags = le32_to_cpu(sh_le.flags);
@@ -1139,8 +1127,13 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
1139 sh->flags & SDPCM_SHARED_VERSION_MASK); 1127 sh->flags & SDPCM_SHARED_VERSION_MASK);
1140 return -EPROTO; 1128 return -EPROTO;
1141 } 1129 }
1142
1143 return 0; 1130 return 0;
1131
1132fail:
1133 brcmf_err("unable to obtain sdpcm_shared info: rv=%d (addr=0x%x)\n",
1134 rv, addr);
1135 sdio_release_host(bus->sdiodev->func[1]);
1136 return rv;
1144} 1137}
1145 1138
1146static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus) 1139static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
@@ -2721,11 +2714,14 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
2721 if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) && 2714 if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) &&
2722 data_ok(bus)) { 2715 data_ok(bus)) {
2723 sdio_claim_host(bus->sdiodev->func[1]); 2716 sdio_claim_host(bus->sdiodev->func[1]);
2724 err = brcmf_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf, 2717 if (bus->ctrl_frame_stat) {
2725 bus->ctrl_frame_len); 2718 err = brcmf_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf,
2719 bus->ctrl_frame_len);
2720 bus->ctrl_frame_err = err;
2721 wmb();
2722 bus->ctrl_frame_stat = false;
2723 }
2726 sdio_release_host(bus->sdiodev->func[1]); 2724 sdio_release_host(bus->sdiodev->func[1]);
2727 bus->ctrl_frame_err = err;
2728 bus->ctrl_frame_stat = false;
2729 brcmf_sdio_wait_event_wakeup(bus); 2725 brcmf_sdio_wait_event_wakeup(bus);
2730 } 2726 }
2731 /* Send queued frames (limit 1 if rx may still be pending) */ 2727 /* Send queued frames (limit 1 if rx may still be pending) */
@@ -2740,12 +2736,22 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
2740 if ((bus->sdiodev->state != BRCMF_SDIOD_DATA) || (err != 0)) { 2736 if ((bus->sdiodev->state != BRCMF_SDIOD_DATA) || (err != 0)) {
2741 brcmf_err("failed backplane access over SDIO, halting operation\n"); 2737 brcmf_err("failed backplane access over SDIO, halting operation\n");
2742 atomic_set(&bus->intstatus, 0); 2738 atomic_set(&bus->intstatus, 0);
2739 if (bus->ctrl_frame_stat) {
2740 sdio_claim_host(bus->sdiodev->func[1]);
2741 if (bus->ctrl_frame_stat) {
2742 bus->ctrl_frame_err = -ENODEV;
2743 wmb();
2744 bus->ctrl_frame_stat = false;
2745 brcmf_sdio_wait_event_wakeup(bus);
2746 }
2747 sdio_release_host(bus->sdiodev->func[1]);
2748 }
2743 } else if (atomic_read(&bus->intstatus) || 2749 } else if (atomic_read(&bus->intstatus) ||
2744 atomic_read(&bus->ipend) > 0 || 2750 atomic_read(&bus->ipend) > 0 ||
2745 (!atomic_read(&bus->fcstate) && 2751 (!atomic_read(&bus->fcstate) &&
2746 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && 2752 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
2747 data_ok(bus))) { 2753 data_ok(bus))) {
2748 atomic_inc(&bus->dpc_tskcnt); 2754 bus->dpc_triggered = true;
2749 } 2755 }
2750} 2756}
2751 2757
@@ -2941,20 +2947,27 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2941 /* Send from dpc */ 2947 /* Send from dpc */
2942 bus->ctrl_frame_buf = msg; 2948 bus->ctrl_frame_buf = msg;
2943 bus->ctrl_frame_len = msglen; 2949 bus->ctrl_frame_len = msglen;
2950 wmb();
2944 bus->ctrl_frame_stat = true; 2951 bus->ctrl_frame_stat = true;
2945 2952
2946 brcmf_sdio_trigger_dpc(bus); 2953 brcmf_sdio_trigger_dpc(bus);
2947 wait_event_interruptible_timeout(bus->ctrl_wait, !bus->ctrl_frame_stat, 2954 wait_event_interruptible_timeout(bus->ctrl_wait, !bus->ctrl_frame_stat,
2948 msecs_to_jiffies(CTL_DONE_TIMEOUT)); 2955 msecs_to_jiffies(CTL_DONE_TIMEOUT));
2949 2956 ret = 0;
2950 if (!bus->ctrl_frame_stat) { 2957 if (bus->ctrl_frame_stat) {
2958 sdio_claim_host(bus->sdiodev->func[1]);
2959 if (bus->ctrl_frame_stat) {
2960 brcmf_dbg(SDIO, "ctrl_frame timeout\n");
2961 bus->ctrl_frame_stat = false;
2962 ret = -ETIMEDOUT;
2963 }
2964 sdio_release_host(bus->sdiodev->func[1]);
2965 }
2966 if (!ret) {
2951 brcmf_dbg(SDIO, "ctrl_frame complete, err=%d\n", 2967 brcmf_dbg(SDIO, "ctrl_frame complete, err=%d\n",
2952 bus->ctrl_frame_err); 2968 bus->ctrl_frame_err);
2969 rmb();
2953 ret = bus->ctrl_frame_err; 2970 ret = bus->ctrl_frame_err;
2954 } else {
2955 brcmf_dbg(SDIO, "ctrl_frame timeout\n");
2956 bus->ctrl_frame_stat = false;
2957 ret = -ETIMEDOUT;
2958 } 2971 }
2959 2972
2960 if (ret) 2973 if (ret)
@@ -3358,9 +3371,6 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
3358 sdio_claim_host(bus->sdiodev->func[1]); 3371 sdio_claim_host(bus->sdiodev->func[1]);
3359 brcmf_sdio_clkctl(bus, CLK_AVAIL, false); 3372 brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
3360 3373
3361 /* Keep arm in reset */
3362 brcmf_chip_enter_download(bus->ci);
3363
3364 rstvec = get_unaligned_le32(fw->data); 3374 rstvec = get_unaligned_le32(fw->data);
3365 brcmf_dbg(SDIO, "firmware rstvec: %x\n", rstvec); 3375 brcmf_dbg(SDIO, "firmware rstvec: %x\n", rstvec);
3366 3376
@@ -3380,7 +3390,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
3380 } 3390 }
3381 3391
3382 /* Take arm out of reset */ 3392 /* Take arm out of reset */
3383 if (!brcmf_chip_exit_download(bus->ci, rstvec)) { 3393 if (!brcmf_chip_set_active(bus->ci, rstvec)) {
3384 brcmf_err("error getting out of ARM core reset\n"); 3394 brcmf_err("error getting out of ARM core reset\n");
3385 goto err; 3395 goto err;
3386 } 3396 }
@@ -3525,8 +3535,8 @@ done:
3525 3535
3526void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus) 3536void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus)
3527{ 3537{
3528 if (atomic_read(&bus->dpc_tskcnt) == 0) { 3538 if (!bus->dpc_triggered) {
3529 atomic_inc(&bus->dpc_tskcnt); 3539 bus->dpc_triggered = true;
3530 queue_work(bus->brcmf_wq, &bus->datawork); 3540 queue_work(bus->brcmf_wq, &bus->datawork);
3531 } 3541 }
3532} 3542}
@@ -3557,11 +3567,11 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus)
3557 if (!bus->intr) 3567 if (!bus->intr)
3558 brcmf_err("isr w/o interrupt configured!\n"); 3568 brcmf_err("isr w/o interrupt configured!\n");
3559 3569
3560 atomic_inc(&bus->dpc_tskcnt); 3570 bus->dpc_triggered = true;
3561 queue_work(bus->brcmf_wq, &bus->datawork); 3571 queue_work(bus->brcmf_wq, &bus->datawork);
3562} 3572}
3563 3573
3564static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus) 3574static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
3565{ 3575{
3566 brcmf_dbg(TIMER, "Enter\n"); 3576 brcmf_dbg(TIMER, "Enter\n");
3567 3577
@@ -3577,7 +3587,7 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
3577 if (!bus->intr || 3587 if (!bus->intr ||
3578 (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) { 3588 (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
3579 3589
3580 if (atomic_read(&bus->dpc_tskcnt) == 0) { 3590 if (!bus->dpc_triggered) {
3581 u8 devpend; 3591 u8 devpend;
3582 3592
3583 sdio_claim_host(bus->sdiodev->func[1]); 3593 sdio_claim_host(bus->sdiodev->func[1]);
@@ -3595,7 +3605,7 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
3595 bus->sdcnt.pollcnt++; 3605 bus->sdcnt.pollcnt++;
3596 atomic_set(&bus->ipend, 1); 3606 atomic_set(&bus->ipend, 1);
3597 3607
3598 atomic_inc(&bus->dpc_tskcnt); 3608 bus->dpc_triggered = true;
3599 queue_work(bus->brcmf_wq, &bus->datawork); 3609 queue_work(bus->brcmf_wq, &bus->datawork);
3600 } 3610 }
3601 } 3611 }
@@ -3622,22 +3632,25 @@ static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
3622#endif /* DEBUG */ 3632#endif /* DEBUG */
3623 3633
3624 /* On idle timeout clear activity flag and/or turn off clock */ 3634 /* On idle timeout clear activity flag and/or turn off clock */
3625 if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) { 3635 if (!bus->dpc_triggered) {
3626 if (++bus->idlecount >= bus->idletime) { 3636 rmb();
3627 bus->idlecount = 0; 3637 if ((!bus->dpc_running) && (bus->idletime > 0) &&
3628 if (bus->activity) { 3638 (bus->clkstate == CLK_AVAIL)) {
3629 bus->activity = false; 3639 bus->idlecount++;
3630 brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS); 3640 if (bus->idlecount > bus->idletime) {
3631 } else {
3632 brcmf_dbg(SDIO, "idle\n"); 3641 brcmf_dbg(SDIO, "idle\n");
3633 sdio_claim_host(bus->sdiodev->func[1]); 3642 sdio_claim_host(bus->sdiodev->func[1]);
3643 brcmf_sdio_wd_timer(bus, 0);
3644 bus->idlecount = 0;
3634 brcmf_sdio_bus_sleep(bus, true, false); 3645 brcmf_sdio_bus_sleep(bus, true, false);
3635 sdio_release_host(bus->sdiodev->func[1]); 3646 sdio_release_host(bus->sdiodev->func[1]);
3636 } 3647 }
3648 } else {
3649 bus->idlecount = 0;
3637 } 3650 }
3651 } else {
3652 bus->idlecount = 0;
3638 } 3653 }
3639
3640 return (atomic_read(&bus->ipend) > 0);
3641} 3654}
3642 3655
3643static void brcmf_sdio_dataworker(struct work_struct *work) 3656static void brcmf_sdio_dataworker(struct work_struct *work)
@@ -3645,10 +3658,14 @@ static void brcmf_sdio_dataworker(struct work_struct *work)
3645 struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio, 3658 struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
3646 datawork); 3659 datawork);
3647 3660
3648 while (atomic_read(&bus->dpc_tskcnt)) { 3661 bus->dpc_running = true;
3649 atomic_set(&bus->dpc_tskcnt, 0); 3662 wmb();
3663 while (ACCESS_ONCE(bus->dpc_triggered)) {
3664 bus->dpc_triggered = false;
3650 brcmf_sdio_dpc(bus); 3665 brcmf_sdio_dpc(bus);
3666 bus->idlecount = 0;
3651 } 3667 }
3668 bus->dpc_running = false;
3652 if (brcmf_sdiod_freezing(bus->sdiodev)) { 3669 if (brcmf_sdiod_freezing(bus->sdiodev)) {
3653 brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DOWN); 3670 brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DOWN);
3654 brcmf_sdiod_try_freeze(bus->sdiodev); 3671 brcmf_sdiod_try_freeze(bus->sdiodev);
@@ -3771,8 +3788,8 @@ static int brcmf_sdio_buscoreprep(void *ctx)
3771 return 0; 3788 return 0;
3772} 3789}
3773 3790
3774static void brcmf_sdio_buscore_exitdl(void *ctx, struct brcmf_chip *chip, 3791static void brcmf_sdio_buscore_activate(void *ctx, struct brcmf_chip *chip,
3775 u32 rstvec) 3792 u32 rstvec)
3776{ 3793{
3777 struct brcmf_sdio_dev *sdiodev = ctx; 3794 struct brcmf_sdio_dev *sdiodev = ctx;
3778 struct brcmf_core *core; 3795 struct brcmf_core *core;
@@ -3815,7 +3832,7 @@ static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val)
3815 3832
3816static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = { 3833static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
3817 .prepare = brcmf_sdio_buscoreprep, 3834 .prepare = brcmf_sdio_buscoreprep,
3818 .exit_dl = brcmf_sdio_buscore_exitdl, 3835 .activate = brcmf_sdio_buscore_activate,
3819 .read32 = brcmf_sdio_buscore_read32, 3836 .read32 = brcmf_sdio_buscore_read32,
3820 .write32 = brcmf_sdio_buscore_write32, 3837 .write32 = brcmf_sdio_buscore_write32,
3821}; 3838};
@@ -3869,13 +3886,6 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
3869 drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH; 3886 drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
3870 brcmf_sdio_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength); 3887 brcmf_sdio_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
3871 3888
3872 /* Get info on the SOCRAM cores... */
3873 bus->ramsize = bus->ci->ramsize;
3874 if (!(bus->ramsize)) {
3875 brcmf_err("failed to find SOCRAM memory!\n");
3876 goto fail;
3877 }
3878
3879 /* Set card control so an SDIO card reset does a WLAN backplane reset */ 3889 /* Set card control so an SDIO card reset does a WLAN backplane reset */
3880 reg_val = brcmf_sdiod_regrb(bus->sdiodev, 3890 reg_val = brcmf_sdiod_regrb(bus->sdiodev,
3881 SDIO_CCCR_BRCM_CARDCTRL, &err); 3891 SDIO_CCCR_BRCM_CARDCTRL, &err);
@@ -4148,7 +4158,8 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
4148 bus->watchdog_tsk = NULL; 4158 bus->watchdog_tsk = NULL;
4149 } 4159 }
4150 /* Initialize DPC thread */ 4160 /* Initialize DPC thread */
4151 atomic_set(&bus->dpc_tskcnt, 0); 4161 bus->dpc_triggered = false;
4162 bus->dpc_running = false;
4152 4163
4153 /* Assign bus interface call back */ 4164 /* Assign bus interface call back */
4154 bus->sdiodev->bus_if->dev = bus->sdiodev->dev; 4165 bus->sdiodev->bus_if->dev = bus->sdiodev->dev;
@@ -4243,14 +4254,14 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
4243 if (bus->ci) { 4254 if (bus->ci) {
4244 if (bus->sdiodev->state != BRCMF_SDIOD_NOMEDIUM) { 4255 if (bus->sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
4245 sdio_claim_host(bus->sdiodev->func[1]); 4256 sdio_claim_host(bus->sdiodev->func[1]);
4257 brcmf_sdio_wd_timer(bus, 0);
4246 brcmf_sdio_clkctl(bus, CLK_AVAIL, false); 4258 brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
4247 /* Leave the device in state where it is 4259 /* Leave the device in state where it is
4248 * 'quiet'. This is done by putting it in 4260 * 'passive'. This is done by resetting all
4249 * download_state which essentially resets 4261 * necessary cores.
4250 * all necessary cores.
4251 */ 4262 */
4252 msleep(20); 4263 msleep(20);
4253 brcmf_chip_enter_download(bus->ci); 4264 brcmf_chip_set_passive(bus->ci);
4254 brcmf_sdio_clkctl(bus, CLK_NONE, false); 4265 brcmf_sdio_clkctl(bus, CLK_NONE, false);
4255 sdio_release_host(bus->sdiodev->func[1]); 4266 sdio_release_host(bus->sdiodev->func[1]);
4256 } 4267 }
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index c84af1dfc88f..369527e27689 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -4959,7 +4959,7 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
4959 * Configure pci/pcmcia here instead of in brcms_c_attach() 4959 * Configure pci/pcmcia here instead of in brcms_c_attach()
4960 * to allow mfg hotswap: down, hotswap (chip power cycle), up. 4960 * to allow mfg hotswap: down, hotswap (chip power cycle), up.
4961 */ 4961 */
4962 bcma_core_pci_irq_ctl(wlc_hw->d11core->bus, wlc_hw->d11core, 4962 bcma_host_pci_irq_ctl(wlc_hw->d11core->bus, wlc_hw->d11core,
4963 true); 4963 true);
4964 4964
4965 /* 4965 /*
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index 2124a17d0bfd..4efdd51af9c8 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -37,6 +37,8 @@
37#define BRCM_CC_43362_CHIP_ID 43362 37#define BRCM_CC_43362_CHIP_ID 43362
38#define BRCM_CC_4335_CHIP_ID 0x4335 38#define BRCM_CC_4335_CHIP_ID 0x4335
39#define BRCM_CC_4339_CHIP_ID 0x4339 39#define BRCM_CC_4339_CHIP_ID 0x4339
40#define BRCM_CC_43430_CHIP_ID 43430
41#define BRCM_CC_4345_CHIP_ID 0x4345
40#define BRCM_CC_4354_CHIP_ID 0x4354 42#define BRCM_CC_4354_CHIP_ID 0x4354
41#define BRCM_CC_4356_CHIP_ID 0x4356 43#define BRCM_CC_4356_CHIP_ID 0x4356
42#define BRCM_CC_43566_CHIP_ID 43566 44#define BRCM_CC_43566_CHIP_ID 43566
diff --git a/drivers/net/wireless/brcm80211/include/chipcommon.h b/drivers/net/wireless/brcm80211/include/chipcommon.h
index d242333b7559..e1fd499930a0 100644
--- a/drivers/net/wireless/brcm80211/include/chipcommon.h
+++ b/drivers/net/wireless/brcm80211/include/chipcommon.h
@@ -183,7 +183,14 @@ struct chipcregs {
183 u8 uart1lsr; 183 u8 uart1lsr;
184 u8 uart1msr; 184 u8 uart1msr;
185 u8 uart1scratch; 185 u8 uart1scratch;
186 u32 PAD[126]; 186 u32 PAD[62];
187
188 /* save/restore, corerev >= 48 */
189 u32 sr_capability; /* 0x500 */
190 u32 sr_control0; /* 0x504 */
191 u32 sr_control1; /* 0x508 */
192 u32 gpio_control; /* 0x50C */
193 u32 PAD[60];
187 194
188 /* PMU registers (corerev >= 20) */ 195 /* PMU registers (corerev >= 20) */
189 u32 pmucontrol; /* 0x600 */ 196 u32 pmucontrol; /* 0x600 */
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
index 964b64ab7fe3..7603546d2de3 100644
--- a/drivers/net/wireless/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -447,7 +447,7 @@ static int cw1200_spi_disconnect(struct spi_device *func)
447} 447}
448 448
449#ifdef CONFIG_PM 449#ifdef CONFIG_PM
450static int cw1200_spi_suspend(struct device *dev, pm_message_t state) 450static int cw1200_spi_suspend(struct device *dev)
451{ 451{
452 struct hwbus_priv *self = spi_get_drvdata(to_spi_device(dev)); 452 struct hwbus_priv *self = spi_get_drvdata(to_spi_device(dev));
453 453
@@ -458,10 +458,8 @@ static int cw1200_spi_suspend(struct device *dev, pm_message_t state)
458 return 0; 458 return 0;
459} 459}
460 460
461static int cw1200_spi_resume(struct device *dev) 461static SIMPLE_DEV_PM_OPS(cw1200_pm_ops, cw1200_spi_suspend, NULL);
462{ 462
463 return 0;
464}
465#endif 463#endif
466 464
467static struct spi_driver spi_driver = { 465static struct spi_driver spi_driver = {
@@ -472,8 +470,7 @@ static struct spi_driver spi_driver = {
472 .bus = &spi_bus_type, 470 .bus = &spi_bus_type,
473 .owner = THIS_MODULE, 471 .owner = THIS_MODULE,
474#ifdef CONFIG_PM 472#ifdef CONFIG_PM
475 .suspend = cw1200_spi_suspend, 473 .pm = &cw1200_pm_ops,
476 .resume = cw1200_spi_resume,
477#endif 474#endif
478 }, 475 },
479}; 476};
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index a6f22c32a279..3811878ab9cd 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -708,7 +708,6 @@ struct iwl_priv {
708 unsigned long reload_jiffies; 708 unsigned long reload_jiffies;
709 int reload_count; 709 int reload_count;
710 bool ucode_loaded; 710 bool ucode_loaded;
711 bool init_ucode_run; /* Don't run init uCode again */
712 711
713 u8 plcp_delta_threshold; 712 u8 plcp_delta_threshold;
714 713
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 5707ba5ce23f..5abd62ed8cb4 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1114,16 +1114,17 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1114 scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) | 1114 scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) |
1115 BIT(IWL_DEFAULT_CMD_QUEUE_NUM)); 1115 BIT(IWL_DEFAULT_CMD_QUEUE_NUM));
1116 1116
1117 if (vif) 1117 if (drop) {
1118 scd_queues &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]); 1118 IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n",
1119 1119 scd_queues);
1120 IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", scd_queues); 1120 if (iwlagn_txfifo_flush(priv, scd_queues)) {
1121 if (iwlagn_txfifo_flush(priv, scd_queues)) { 1121 IWL_ERR(priv, "flush request fail\n");
1122 IWL_ERR(priv, "flush request fail\n"); 1122 goto done;
1123 goto done; 1123 }
1124 } 1124 }
1125
1125 IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n"); 1126 IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n");
1126 iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff); 1127 iwl_trans_wait_tx_queue_empty(priv->trans, scd_queues);
1127done: 1128done:
1128 mutex_unlock(&priv->mutex); 1129 mutex_unlock(&priv->mutex);
1129 IWL_DEBUG_MAC80211(priv, "leave\n"); 1130 IWL_DEBUG_MAC80211(priv, "leave\n");
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 32b78a66536d..3bd7c86e90d9 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -3153,12 +3153,13 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
3153 desc += sprintf(buff+desc, "lq type %s\n", 3153 desc += sprintf(buff+desc, "lq type %s\n",
3154 (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); 3154 (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
3155 if (is_Ht(tbl->lq_type)) { 3155 if (is_Ht(tbl->lq_type)) {
3156 desc += sprintf(buff+desc, " %s", 3156 desc += sprintf(buff + desc, " %s",
3157 (is_siso(tbl->lq_type)) ? "SISO" : 3157 (is_siso(tbl->lq_type)) ? "SISO" :
3158 ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3")); 3158 ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3"));
3159 desc += sprintf(buff+desc, " %s", 3159 desc += sprintf(buff + desc, " %s",
3160 (tbl->is_ht40) ? "40MHz" : "20MHz"); 3160 (tbl->is_ht40) ? "40MHz" : "20MHz");
3161 desc += sprintf(buff+desc, " %s %s %s\n", (tbl->is_SGI) ? "SGI" : "", 3161 desc += sprintf(buff + desc, " %s %s %s\n",
3162 (tbl->is_SGI) ? "SGI" : "",
3162 (lq_sta->is_green) ? "GF enabled" : "", 3163 (lq_sta->is_green) ? "GF enabled" : "",
3163 (lq_sta->is_agg) ? "AGG on" : ""); 3164 (lq_sta->is_agg) ? "AGG on" : "");
3164 } 3165 }
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 1e40a12de077..275df12a6045 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -189,9 +189,9 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
189 rate_flags |= RATE_MCS_CCK_MSK; 189 rate_flags |= RATE_MCS_CCK_MSK;
190 190
191 /* Set up antennas */ 191 /* Set up antennas */
192 if (priv->lib->bt_params && 192 if (priv->lib->bt_params &&
193 priv->lib->bt_params->advanced_bt_coexist && 193 priv->lib->bt_params->advanced_bt_coexist &&
194 priv->bt_full_concurrent) { 194 priv->bt_full_concurrent) {
195 /* operated as 1x1 in full concurrency mode */ 195 /* operated as 1x1 in full concurrency mode */
196 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 196 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
197 first_antenna(priv->nvm_data->valid_tx_ant)); 197 first_antenna(priv->nvm_data->valid_tx_ant));
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 4dbef7e58c2e..5244e43bfafb 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -418,9 +418,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
418 if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len) 418 if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len)
419 return 0; 419 return 0;
420 420
421 if (priv->init_ucode_run)
422 return 0;
423
424 iwl_init_notification_wait(&priv->notif_wait, &calib_wait, 421 iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
425 calib_complete, ARRAY_SIZE(calib_complete), 422 calib_complete, ARRAY_SIZE(calib_complete),
426 iwlagn_wait_calib, priv); 423 iwlagn_wait_calib, priv);
@@ -440,8 +437,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
440 */ 437 */
441 ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, 438 ret = iwl_wait_notification(&priv->notif_wait, &calib_wait,
442 UCODE_CALIB_TIMEOUT); 439 UCODE_CALIB_TIMEOUT);
443 if (!ret)
444 priv->init_ucode_run = true;
445 440
446 goto out; 441 goto out;
447 442
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 0597a9cfd2f6..36e786f0387b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -69,12 +69,12 @@
69#include "iwl-agn-hw.h" 69#include "iwl-agn-hw.h"
70 70
71/* Highest firmware API version supported */ 71/* Highest firmware API version supported */
72#define IWL7260_UCODE_API_MAX 12 72#define IWL7260_UCODE_API_MAX 13
73#define IWL3160_UCODE_API_MAX 12 73#define IWL3160_UCODE_API_MAX 13
74 74
75/* Oldest version we won't warn about */ 75/* Oldest version we won't warn about */
76#define IWL7260_UCODE_API_OK 10 76#define IWL7260_UCODE_API_OK 12
77#define IWL3160_UCODE_API_OK 10 77#define IWL3160_UCODE_API_OK 12
78 78
79/* Lowest firmware API version supported */ 79/* Lowest firmware API version supported */
80#define IWL7260_UCODE_API_MIN 10 80#define IWL7260_UCODE_API_MIN 10
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index d8dfa6da6307..9c396a42aec8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -69,10 +69,10 @@
69#include "iwl-agn-hw.h" 69#include "iwl-agn-hw.h"
70 70
71/* Highest firmware API version supported */ 71/* Highest firmware API version supported */
72#define IWL8000_UCODE_API_MAX 12 72#define IWL8000_UCODE_API_MAX 13
73 73
74/* Oldest version we won't warn about */ 74/* Oldest version we won't warn about */
75#define IWL8000_UCODE_API_OK 10 75#define IWL8000_UCODE_API_OK 12
76 76
77/* Lowest firmware API version supported */ 77/* Lowest firmware API version supported */
78#define IWL8000_UCODE_API_MIN 10 78#define IWL8000_UCODE_API_MIN 10
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 684254553558..9bb36d79c2bd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -157,6 +157,7 @@ do { \
157/* 0x0000F000 - 0x00001000 */ 157/* 0x0000F000 - 0x00001000 */
158#define IWL_DL_ASSOC 0x00001000 158#define IWL_DL_ASSOC 0x00001000
159#define IWL_DL_DROP 0x00002000 159#define IWL_DL_DROP 0x00002000
160#define IWL_DL_LAR 0x00004000
160#define IWL_DL_COEX 0x00008000 161#define IWL_DL_COEX 0x00008000
161/* 0x000F0000 - 0x00010000 */ 162/* 0x000F0000 - 0x00010000 */
162#define IWL_DL_FW 0x00010000 163#define IWL_DL_FW 0x00010000
@@ -219,5 +220,6 @@ do { \
219#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a) 220#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
220#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a) 221#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
221#define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a) 222#define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a)
223#define IWL_DEBUG_LAR(p, f, a...) IWL_DEBUG(p, IWL_DL_LAR, f, ## a)
222 224
223#endif 225#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 141331d41abf..aefdd9b7c105 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1014,34 +1014,34 @@ static int validate_sec_sizes(struct iwl_drv *drv,
1014 1014
1015 /* Verify that uCode images will fit in card's SRAM. */ 1015 /* Verify that uCode images will fit in card's SRAM. */
1016 if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) > 1016 if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) >
1017 cfg->max_inst_size) { 1017 cfg->max_inst_size) {
1018 IWL_ERR(drv, "uCode instr len %Zd too large to fit in\n", 1018 IWL_ERR(drv, "uCode instr len %Zd too large to fit in\n",
1019 get_sec_size(pieces, IWL_UCODE_REGULAR, 1019 get_sec_size(pieces, IWL_UCODE_REGULAR,
1020 IWL_UCODE_SECTION_INST)); 1020 IWL_UCODE_SECTION_INST));
1021 return -1; 1021 return -1;
1022 } 1022 }
1023 1023
1024 if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) > 1024 if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) >
1025 cfg->max_data_size) { 1025 cfg->max_data_size) {
1026 IWL_ERR(drv, "uCode data len %Zd too large to fit in\n", 1026 IWL_ERR(drv, "uCode data len %Zd too large to fit in\n",
1027 get_sec_size(pieces, IWL_UCODE_REGULAR, 1027 get_sec_size(pieces, IWL_UCODE_REGULAR,
1028 IWL_UCODE_SECTION_DATA)); 1028 IWL_UCODE_SECTION_DATA));
1029 return -1; 1029 return -1;
1030 } 1030 }
1031 1031
1032 if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) > 1032 if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) >
1033 cfg->max_inst_size) { 1033 cfg->max_inst_size) {
1034 IWL_ERR(drv, "uCode init instr len %Zd too large to fit in\n", 1034 IWL_ERR(drv, "uCode init instr len %Zd too large to fit in\n",
1035 get_sec_size(pieces, IWL_UCODE_INIT, 1035 get_sec_size(pieces, IWL_UCODE_INIT,
1036 IWL_UCODE_SECTION_INST)); 1036 IWL_UCODE_SECTION_INST));
1037 return -1; 1037 return -1;
1038 } 1038 }
1039 1039
1040 if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA) > 1040 if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA) >
1041 cfg->max_data_size) { 1041 cfg->max_data_size) {
1042 IWL_ERR(drv, "uCode init data len %Zd too large to fit in\n", 1042 IWL_ERR(drv, "uCode init data len %Zd too large to fit in\n",
1043 get_sec_size(pieces, IWL_UCODE_REGULAR, 1043 get_sec_size(pieces, IWL_UCODE_REGULAR,
1044 IWL_UCODE_SECTION_DATA)); 1044 IWL_UCODE_SECTION_DATA));
1045 return -1; 1045 return -1;
1046 } 1046 }
1047 return 0; 1047 return 0;
@@ -1319,6 +1319,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
1319 op->name, err); 1319 op->name, err);
1320#endif 1320#endif
1321 } 1321 }
1322 kfree(pieces);
1322 return; 1323 return;
1323 1324
1324 try_again: 1325 try_again:
@@ -1546,6 +1547,10 @@ module_param_named(d0i3_disable, iwlwifi_mod_params.d0i3_disable,
1546 bool, S_IRUGO); 1547 bool, S_IRUGO);
1547MODULE_PARM_DESC(d0i3_disable, "disable d0i3 functionality (default: Y)"); 1548MODULE_PARM_DESC(d0i3_disable, "disable d0i3 functionality (default: Y)");
1548 1549
1550module_param_named(lar_disable, iwlwifi_mod_params.lar_disable,
1551 bool, S_IRUGO);
1552MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
1553
1549module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, 1554module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable,
1550 bool, S_IRUGO | S_IWUSR); 1555 bool, S_IRUGO | S_IWUSR);
1551#ifdef CONFIG_IWLWIFI_UAPSD 1556#ifdef CONFIG_IWLWIFI_UAPSD
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index adf522c756e6..67a3a241b331 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -68,7 +68,7 @@
68 68
69/* for all modules */ 69/* for all modules */
70#define DRV_NAME "iwlwifi" 70#define DRV_NAME "iwlwifi"
71#define DRV_COPYRIGHT "Copyright(c) 2003- 2014 Intel Corporation" 71#define DRV_COPYRIGHT "Copyright(c) 2003- 2015 Intel Corporation"
72#define DRV_AUTHOR "<ilw@linux.intel.com>" 72#define DRV_AUTHOR "<ilw@linux.intel.com>"
73 73
74/* radio config bits (actual values from NVM definition) */ 74/* radio config bits (actual values from NVM definition) */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
index f0548b8a64b0..5234a0bf11e4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -94,6 +94,7 @@ struct iwl_nvm_data {
94 u32 nvm_version; 94 u32 nvm_version;
95 s8 max_tx_pwr_half_dbm; 95 s8 max_tx_pwr_half_dbm;
96 96
97 bool lar_enabled;
97 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; 98 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
98 struct ieee80211_channel channels[]; 99 struct ieee80211_channel channels[];
99}; 100};
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 5ea381861d5d..291a3382aa3f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -240,10 +240,9 @@ enum iwl_ucode_tlv_flag {
240/** 240/**
241 * enum iwl_ucode_tlv_api - ucode api 241 * enum iwl_ucode_tlv_api - ucode api
242 * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex 242 * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
243 * @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit.
244 * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif.
245 * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time 243 * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
246 * longer than the passive one, which is essential for fragmented scan. 244 * longer than the passive one, which is essential for fragmented scan.
245 * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
247 * IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR 246 * IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR
248 * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, 247 * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
249 * regardless of the band or the number of the probes. FW will calculate 248 * regardless of the band or the number of the probes. FW will calculate
@@ -258,9 +257,8 @@ enum iwl_ucode_tlv_flag {
258 */ 257 */
259enum iwl_ucode_tlv_api { 258enum iwl_ucode_tlv_api {
260 IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3), 259 IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3),
261 IWL_UCODE_TLV_API_DISABLE_STA_TX = BIT(5),
262 IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7),
263 IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), 260 IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
261 IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = BIT(9),
264 IWL_UCODE_TLV_API_HDC_PHASE_0 = BIT(10), 262 IWL_UCODE_TLV_API_HDC_PHASE_0 = BIT(10),
265 IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), 263 IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13),
266 IWL_UCODE_TLV_API_SCD_CFG = BIT(15), 264 IWL_UCODE_TLV_API_SCD_CFG = BIT(15),
@@ -292,6 +290,7 @@ enum iwl_ucode_tlv_api {
292 * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command 290 * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
293 * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics 291 * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
294 * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running 292 * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
293 * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
295 */ 294 */
296enum iwl_ucode_tlv_capa { 295enum iwl_ucode_tlv_capa {
297 IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0), 296 IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
@@ -308,6 +307,7 @@ enum iwl_ucode_tlv_capa {
308 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18), 307 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18),
309 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = BIT(22), 308 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = BIT(22),
310 IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = BIT(28), 309 IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = BIT(28),
310 IWL_UCODE_TLV_CAPA_BT_COEX_RRC = BIT(30),
311}; 311};
312 312
313/* The default calibrate table size if not specified by firmware file */ 313/* The default calibrate table size if not specified by firmware file */
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 03250a45272e..78cac43e2bcd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -201,6 +201,8 @@ void iwl_force_nmi(struct iwl_trans *trans)
201 } else { 201 } else {
202 iwl_write_prph(trans, DEVICE_SET_NMI_8000B_REG, 202 iwl_write_prph(trans, DEVICE_SET_NMI_8000B_REG,
203 DEVICE_SET_NMI_8000B_VAL); 203 DEVICE_SET_NMI_8000B_VAL);
204 iwl_write_prph(trans, DEVICE_SET_NMI_REG,
205 DEVICE_SET_NMI_VAL_DRV);
204 } 206 }
205} 207}
206IWL_EXPORT_SYMBOL(iwl_force_nmi); 208IWL_EXPORT_SYMBOL(iwl_force_nmi);
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index e8eabd21ccfe..ac2b90df8413 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -103,6 +103,7 @@ enum iwl_disable_11n {
103 * @debug_level: levels are IWL_DL_* 103 * @debug_level: levels are IWL_DL_*
104 * @ant_coupling: antenna coupling in dB, default = 0 104 * @ant_coupling: antenna coupling in dB, default = 0
105 * @d0i3_disable: disable d0i3, default = 1, 105 * @d0i3_disable: disable d0i3, default = 1,
106 * @lar_disable: disable LAR (regulatory), default = 0
106 * @fw_monitor: allow to use firmware monitor 107 * @fw_monitor: allow to use firmware monitor
107 */ 108 */
108struct iwl_mod_params { 109struct iwl_mod_params {
@@ -121,6 +122,7 @@ struct iwl_mod_params {
121 char *nvm_file; 122 char *nvm_file;
122 bool uapsd_disable; 123 bool uapsd_disable;
123 bool d0i3_disable; 124 bool d0i3_disable;
125 bool lar_disable;
124 bool fw_monitor; 126 bool fw_monitor;
125}; 127};
126 128
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index c74f1a4edf23..774637746427 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -103,8 +103,16 @@ enum family_8000_nvm_offsets {
103 SKU_FAMILY_8000 = 4, 103 SKU_FAMILY_8000 = 4,
104 N_HW_ADDRS_FAMILY_8000 = 5, 104 N_HW_ADDRS_FAMILY_8000 = 5,
105 105
106 /* NVM PHY-SKU-Section offset (in words) for B0 */
107 RADIO_CFG_FAMILY_8000_B0 = 0,
108 SKU_FAMILY_8000_B0 = 2,
109 N_HW_ADDRS_FAMILY_8000_B0 = 3,
110
106 /* NVM REGULATORY -Section offset (in words) definitions */ 111 /* NVM REGULATORY -Section offset (in words) definitions */
107 NVM_CHANNELS_FAMILY_8000 = 0, 112 NVM_CHANNELS_FAMILY_8000 = 0,
113 NVM_LAR_OFFSET_FAMILY_8000_OLD = 0x4C7,
114 NVM_LAR_OFFSET_FAMILY_8000 = 0x507,
115 NVM_LAR_ENABLED_FAMILY_8000 = 0x7,
108 116
109 /* NVM calibration section offset (in words) definitions */ 117 /* NVM calibration section offset (in words) definitions */
110 NVM_CALIB_SECTION_FAMILY_8000 = 0x2B8, 118 NVM_CALIB_SECTION_FAMILY_8000 = 0x2B8,
@@ -146,7 +154,9 @@ static const u8 iwl_nvm_channels_family_8000[] = {
146#define NUM_2GHZ_CHANNELS_FAMILY_8000 14 154#define NUM_2GHZ_CHANNELS_FAMILY_8000 14
147#define FIRST_2GHZ_HT_MINUS 5 155#define FIRST_2GHZ_HT_MINUS 5
148#define LAST_2GHZ_HT_PLUS 9 156#define LAST_2GHZ_HT_PLUS 9
149#define LAST_5GHZ_HT 161 157#define LAST_5GHZ_HT 165
158#define LAST_5GHZ_HT_FAMILY_8000 181
159#define N_HW_ADDR_MASK 0xF
150 160
151/* rate data (static) */ 161/* rate data (static) */
152static struct ieee80211_rate iwl_cfg80211_rates[] = { 162static struct ieee80211_rate iwl_cfg80211_rates[] = {
@@ -201,9 +211,57 @@ enum iwl_nvm_channel_flags {
201#define CHECK_AND_PRINT_I(x) \ 211#define CHECK_AND_PRINT_I(x) \
202 ((ch_flags & NVM_CHANNEL_##x) ? # x " " : "") 212 ((ch_flags & NVM_CHANNEL_##x) ? # x " " : "")
203 213
214static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
215 u16 nvm_flags, const struct iwl_cfg *cfg)
216{
217 u32 flags = IEEE80211_CHAN_NO_HT40;
218 u32 last_5ghz_ht = LAST_5GHZ_HT;
219
220 if (cfg->device_family == IWL_DEVICE_FAMILY_8000)
221 last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
222
223 if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
224 if (ch_num <= LAST_2GHZ_HT_PLUS)
225 flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
226 if (ch_num >= FIRST_2GHZ_HT_MINUS)
227 flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
228 } else if (ch_num <= last_5ghz_ht && (nvm_flags & NVM_CHANNEL_40MHZ)) {
229 if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
230 flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
231 else
232 flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
233 }
234 if (!(nvm_flags & NVM_CHANNEL_80MHZ))
235 flags |= IEEE80211_CHAN_NO_80MHZ;
236 if (!(nvm_flags & NVM_CHANNEL_160MHZ))
237 flags |= IEEE80211_CHAN_NO_160MHZ;
238
239 if (!(nvm_flags & NVM_CHANNEL_IBSS))
240 flags |= IEEE80211_CHAN_NO_IR;
241
242 if (!(nvm_flags & NVM_CHANNEL_ACTIVE))
243 flags |= IEEE80211_CHAN_NO_IR;
244
245 if (nvm_flags & NVM_CHANNEL_RADAR)
246 flags |= IEEE80211_CHAN_RADAR;
247
248 if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY)
249 flags |= IEEE80211_CHAN_INDOOR_ONLY;
250
251 /* Set the GO concurrent flag only in case that NO_IR is set.
252 * Otherwise it is meaningless
253 */
254 if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) &&
255 (flags & IEEE80211_CHAN_NO_IR))
256 flags |= IEEE80211_CHAN_GO_CONCURRENT;
257
258 return flags;
259}
260
204static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, 261static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
205 struct iwl_nvm_data *data, 262 struct iwl_nvm_data *data,
206 const __le16 * const nvm_ch_flags) 263 const __le16 * const nvm_ch_flags,
264 bool lar_supported)
207{ 265{
208 int ch_idx; 266 int ch_idx;
209 int n_channels = 0; 267 int n_channels = 0;
@@ -228,9 +286,14 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
228 286
229 if (ch_idx >= num_2ghz_channels && 287 if (ch_idx >= num_2ghz_channels &&
230 !data->sku_cap_band_52GHz_enable) 288 !data->sku_cap_band_52GHz_enable)
231 ch_flags &= ~NVM_CHANNEL_VALID; 289 continue;
232 290
233 if (!(ch_flags & NVM_CHANNEL_VALID)) { 291 if (!lar_supported && !(ch_flags & NVM_CHANNEL_VALID)) {
292 /*
293 * Channels might become valid later if lar is
294 * supported, hence we still want to add them to
295 * the list of supported channels to cfg80211.
296 */
234 IWL_DEBUG_EEPROM(dev, 297 IWL_DEBUG_EEPROM(dev,
235 "Ch. %d Flags %x [%sGHz] - No traffic\n", 298 "Ch. %d Flags %x [%sGHz] - No traffic\n",
236 nvm_chan[ch_idx], 299 nvm_chan[ch_idx],
@@ -250,45 +313,6 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
250 ieee80211_channel_to_frequency( 313 ieee80211_channel_to_frequency(
251 channel->hw_value, channel->band); 314 channel->hw_value, channel->band);
252 315
253 /* TODO: Need to be dependent to the NVM */
254 channel->flags = IEEE80211_CHAN_NO_HT40;
255 if (ch_idx < num_2ghz_channels &&
256 (ch_flags & NVM_CHANNEL_40MHZ)) {
257 if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
258 channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
259 if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS)
260 channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
261 } else if (nvm_chan[ch_idx] <= LAST_5GHZ_HT &&
262 (ch_flags & NVM_CHANNEL_40MHZ)) {
263 if ((ch_idx - num_2ghz_channels) % 2 == 0)
264 channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
265 else
266 channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
267 }
268 if (!(ch_flags & NVM_CHANNEL_80MHZ))
269 channel->flags |= IEEE80211_CHAN_NO_80MHZ;
270 if (!(ch_flags & NVM_CHANNEL_160MHZ))
271 channel->flags |= IEEE80211_CHAN_NO_160MHZ;
272
273 if (!(ch_flags & NVM_CHANNEL_IBSS))
274 channel->flags |= IEEE80211_CHAN_NO_IR;
275
276 if (!(ch_flags & NVM_CHANNEL_ACTIVE))
277 channel->flags |= IEEE80211_CHAN_NO_IR;
278
279 if (ch_flags & NVM_CHANNEL_RADAR)
280 channel->flags |= IEEE80211_CHAN_RADAR;
281
282 if (ch_flags & NVM_CHANNEL_INDOOR_ONLY)
283 channel->flags |= IEEE80211_CHAN_INDOOR_ONLY;
284
285 /* Set the GO concurrent flag only in case that NO_IR is set.
286 * Otherwise it is meaningless
287 */
288 if ((ch_flags & NVM_CHANNEL_GO_CONCURRENT) &&
289 (channel->flags & IEEE80211_CHAN_NO_IR))
290 channel->flags |= IEEE80211_CHAN_GO_CONCURRENT;
291
292 /* Initialize regulatory-based run-time data */ 316 /* Initialize regulatory-based run-time data */
293 317
294 /* 318 /*
@@ -297,6 +321,15 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
297 */ 321 */
298 channel->max_power = IWL_DEFAULT_MAX_TX_POWER; 322 channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
299 is_5ghz = channel->band == IEEE80211_BAND_5GHZ; 323 is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
324
325 /* don't put limitations in case we're using LAR */
326 if (!lar_supported)
327 channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx],
328 ch_idx, is_5ghz,
329 ch_flags, cfg);
330 else
331 channel->flags = 0;
332
300 IWL_DEBUG_EEPROM(dev, 333 IWL_DEBUG_EEPROM(dev,
301 "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", 334 "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
302 channel->hw_value, 335 channel->hw_value,
@@ -370,8 +403,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
370 403
371static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, 404static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
372 struct iwl_nvm_data *data, 405 struct iwl_nvm_data *data,
373 const __le16 *ch_section, bool enable_vht, 406 const __le16 *ch_section,
374 u8 tx_chains, u8 rx_chains) 407 u8 tx_chains, u8 rx_chains, bool lar_supported)
375{ 408{
376 int n_channels; 409 int n_channels;
377 int n_used = 0; 410 int n_used = 0;
@@ -380,11 +413,12 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
380 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) 413 if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
381 n_channels = iwl_init_channel_map( 414 n_channels = iwl_init_channel_map(
382 dev, cfg, data, 415 dev, cfg, data,
383 &ch_section[NVM_CHANNELS]); 416 &ch_section[NVM_CHANNELS], lar_supported);
384 else 417 else
385 n_channels = iwl_init_channel_map( 418 n_channels = iwl_init_channel_map(
386 dev, cfg, data, 419 dev, cfg, data,
387 &ch_section[NVM_CHANNELS_FAMILY_8000]); 420 &ch_section[NVM_CHANNELS_FAMILY_8000],
421 lar_supported);
388 422
389 sband = &data->bands[IEEE80211_BAND_2GHZ]; 423 sband = &data->bands[IEEE80211_BAND_2GHZ];
390 sband->band = IEEE80211_BAND_2GHZ; 424 sband->band = IEEE80211_BAND_2GHZ;
@@ -403,7 +437,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
403 IEEE80211_BAND_5GHZ); 437 IEEE80211_BAND_5GHZ);
404 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ, 438 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
405 tx_chains, rx_chains); 439 tx_chains, rx_chains);
406 if (enable_vht) 440 if (data->sku_cap_11ac_enable)
407 iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap, 441 iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
408 tx_chains, rx_chains); 442 tx_chains, rx_chains);
409 443
@@ -413,10 +447,15 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
413} 447}
414 448
415static int iwl_get_sku(const struct iwl_cfg *cfg, 449static int iwl_get_sku(const struct iwl_cfg *cfg,
416 const __le16 *nvm_sw) 450 const __le16 *nvm_sw, const __le16 *phy_sku,
451 bool is_family_8000_a_step)
417{ 452{
418 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) 453 if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
419 return le16_to_cpup(nvm_sw + SKU); 454 return le16_to_cpup(nvm_sw + SKU);
455
456 if (!is_family_8000_a_step)
457 return le32_to_cpup((__le32 *)(phy_sku +
458 SKU_FAMILY_8000_B0));
420 else 459 else
421 return le32_to_cpup((__le32 *)(nvm_sw + SKU_FAMILY_8000)); 460 return le32_to_cpup((__le32 *)(nvm_sw + SKU_FAMILY_8000));
422} 461}
@@ -432,23 +471,36 @@ static int iwl_get_nvm_version(const struct iwl_cfg *cfg,
432} 471}
433 472
434static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, 473static int iwl_get_radio_cfg(const struct iwl_cfg *cfg,
435 const __le16 *nvm_sw) 474 const __le16 *nvm_sw, const __le16 *phy_sku,
475 bool is_family_8000_a_step)
436{ 476{
437 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) 477 if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
438 return le16_to_cpup(nvm_sw + RADIO_CFG); 478 return le16_to_cpup(nvm_sw + RADIO_CFG);
479
480 if (!is_family_8000_a_step)
481 return le32_to_cpup((__le32 *)(phy_sku +
482 RADIO_CFG_FAMILY_8000_B0));
439 else 483 else
440 return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000)); 484 return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000));
485
441} 486}
442 487
443#define N_HW_ADDRS_MASK_FAMILY_8000 0xF
444static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, 488static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg,
445 const __le16 *nvm_sw) 489 const __le16 *nvm_sw, bool is_family_8000_a_step)
446{ 490{
491 int n_hw_addr;
492
447 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) 493 if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
448 return le16_to_cpup(nvm_sw + N_HW_ADDRS); 494 return le16_to_cpup(nvm_sw + N_HW_ADDRS);
495
496 if (!is_family_8000_a_step)
497 n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw +
498 N_HW_ADDRS_FAMILY_8000_B0));
449 else 499 else
450 return le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000)) 500 n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw +
451 & N_HW_ADDRS_MASK_FAMILY_8000; 501 N_HW_ADDRS_FAMILY_8000));
502
503 return n_hw_addr & N_HW_ADDR_MASK;
452} 504}
453 505
454static void iwl_set_radio_cfg(const struct iwl_cfg *cfg, 506static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
@@ -491,7 +543,8 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
491 const struct iwl_cfg *cfg, 543 const struct iwl_cfg *cfg,
492 struct iwl_nvm_data *data, 544 struct iwl_nvm_data *data,
493 const __le16 *mac_override, 545 const __le16 *mac_override,
494 const __le16 *nvm_hw) 546 const __le16 *nvm_hw,
547 u32 mac_addr0, u32 mac_addr1)
495{ 548{
496 const u8 *hw_addr; 549 const u8 *hw_addr;
497 550
@@ -515,48 +568,17 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
515 } 568 }
516 569
517 if (nvm_hw) { 570 if (nvm_hw) {
518 /* read the MAC address from OTP */ 571 /* read the MAC address from HW resisters */
519 if (!dev_is_pci(dev) || (data->nvm_version < 0xE08)) { 572 hw_addr = (const u8 *)&mac_addr0;
520 /* read the mac address from the WFPM location */ 573 data->hw_addr[0] = hw_addr[3];
521 hw_addr = (const u8 *)(nvm_hw + 574 data->hw_addr[1] = hw_addr[2];
522 HW_ADDR0_WFPM_FAMILY_8000); 575 data->hw_addr[2] = hw_addr[1];
523 data->hw_addr[0] = hw_addr[3]; 576 data->hw_addr[3] = hw_addr[0];
524 data->hw_addr[1] = hw_addr[2]; 577
525 data->hw_addr[2] = hw_addr[1]; 578 hw_addr = (const u8 *)&mac_addr1;
526 data->hw_addr[3] = hw_addr[0]; 579 data->hw_addr[4] = hw_addr[1];
527 580 data->hw_addr[5] = hw_addr[0];
528 hw_addr = (const u8 *)(nvm_hw + 581
529 HW_ADDR1_WFPM_FAMILY_8000);
530 data->hw_addr[4] = hw_addr[1];
531 data->hw_addr[5] = hw_addr[0];
532 } else if ((data->nvm_version >= 0xE08) &&
533 (data->nvm_version < 0xE0B)) {
534 /* read "reverse order" from the PCIe location */
535 hw_addr = (const u8 *)(nvm_hw +
536 HW_ADDR0_PCIE_FAMILY_8000);
537 data->hw_addr[5] = hw_addr[2];
538 data->hw_addr[4] = hw_addr[1];
539 data->hw_addr[3] = hw_addr[0];
540
541 hw_addr = (const u8 *)(nvm_hw +
542 HW_ADDR1_PCIE_FAMILY_8000);
543 data->hw_addr[2] = hw_addr[3];
544 data->hw_addr[1] = hw_addr[2];
545 data->hw_addr[0] = hw_addr[1];
546 } else {
547 /* read from the PCIe location */
548 hw_addr = (const u8 *)(nvm_hw +
549 HW_ADDR0_PCIE_FAMILY_8000);
550 data->hw_addr[5] = hw_addr[0];
551 data->hw_addr[4] = hw_addr[1];
552 data->hw_addr[3] = hw_addr[2];
553
554 hw_addr = (const u8 *)(nvm_hw +
555 HW_ADDR1_PCIE_FAMILY_8000);
556 data->hw_addr[2] = hw_addr[1];
557 data->hw_addr[1] = hw_addr[2];
558 data->hw_addr[0] = hw_addr[3];
559 }
560 if (!is_valid_ether_addr(data->hw_addr)) 582 if (!is_valid_ether_addr(data->hw_addr))
561 IWL_ERR_DEV(dev, 583 IWL_ERR_DEV(dev,
562 "mac address from hw section is not valid\n"); 584 "mac address from hw section is not valid\n");
@@ -571,11 +593,15 @@ struct iwl_nvm_data *
571iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, 593iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
572 const __le16 *nvm_hw, const __le16 *nvm_sw, 594 const __le16 *nvm_hw, const __le16 *nvm_sw,
573 const __le16 *nvm_calib, const __le16 *regulatory, 595 const __le16 *nvm_calib, const __le16 *regulatory,
574 const __le16 *mac_override, u8 tx_chains, u8 rx_chains) 596 const __le16 *mac_override, const __le16 *phy_sku,
597 u8 tx_chains, u8 rx_chains,
598 bool lar_fw_supported, bool is_family_8000_a_step,
599 u32 mac_addr0, u32 mac_addr1)
575{ 600{
576 struct iwl_nvm_data *data; 601 struct iwl_nvm_data *data;
577 u32 sku; 602 u32 sku;
578 u32 radio_cfg; 603 u32 radio_cfg;
604 u16 lar_config;
579 605
580 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) 606 if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
581 data = kzalloc(sizeof(*data) + 607 data = kzalloc(sizeof(*data) +
@@ -592,22 +618,25 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
592 618
593 data->nvm_version = iwl_get_nvm_version(cfg, nvm_sw); 619 data->nvm_version = iwl_get_nvm_version(cfg, nvm_sw);
594 620
595 radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw); 621 radio_cfg =
622 iwl_get_radio_cfg(cfg, nvm_sw, phy_sku, is_family_8000_a_step);
596 iwl_set_radio_cfg(cfg, data, radio_cfg); 623 iwl_set_radio_cfg(cfg, data, radio_cfg);
597 if (data->valid_tx_ant) 624 if (data->valid_tx_ant)
598 tx_chains &= data->valid_tx_ant; 625 tx_chains &= data->valid_tx_ant;
599 if (data->valid_rx_ant) 626 if (data->valid_rx_ant)
600 rx_chains &= data->valid_rx_ant; 627 rx_chains &= data->valid_rx_ant;
601 628
602 sku = iwl_get_sku(cfg, nvm_sw); 629 sku = iwl_get_sku(cfg, nvm_sw, phy_sku, is_family_8000_a_step);
603 data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ; 630 data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ;
604 data->sku_cap_band_52GHz_enable = sku & NVM_SKU_CAP_BAND_52GHZ; 631 data->sku_cap_band_52GHz_enable = sku & NVM_SKU_CAP_BAND_52GHZ;
605 data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE; 632 data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE;
606 data->sku_cap_11ac_enable = sku & NVM_SKU_CAP_11AC_ENABLE;
607 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) 633 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
608 data->sku_cap_11n_enable = false; 634 data->sku_cap_11n_enable = false;
635 data->sku_cap_11ac_enable = data->sku_cap_11n_enable &&
636 (sku & NVM_SKU_CAP_11AC_ENABLE);
609 637
610 data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw); 638 data->n_hw_addrs =
639 iwl_get_n_hw_addrs(cfg, nvm_sw, is_family_8000_a_step);
611 640
612 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) { 641 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
613 /* Checking for required sections */ 642 /* Checking for required sections */
@@ -626,16 +655,23 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
626 iwl_set_hw_address(cfg, data, nvm_hw); 655 iwl_set_hw_address(cfg, data, nvm_hw);
627 656
628 iwl_init_sbands(dev, cfg, data, nvm_sw, 657 iwl_init_sbands(dev, cfg, data, nvm_sw,
629 sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains, 658 tx_chains, rx_chains, lar_fw_supported);
630 rx_chains);
631 } else { 659 } else {
660 u16 lar_offset = data->nvm_version < 0xE39 ?
661 NVM_LAR_OFFSET_FAMILY_8000_OLD :
662 NVM_LAR_OFFSET_FAMILY_8000;
663
664 lar_config = le16_to_cpup(regulatory + lar_offset);
665 data->lar_enabled = !!(lar_config &
666 NVM_LAR_ENABLED_FAMILY_8000);
667
632 /* MAC address in family 8000 */ 668 /* MAC address in family 8000 */
633 iwl_set_hw_address_family_8000(dev, cfg, data, mac_override, 669 iwl_set_hw_address_family_8000(dev, cfg, data, mac_override,
634 nvm_hw); 670 nvm_hw, mac_addr0, mac_addr1);
635 671
636 iwl_init_sbands(dev, cfg, data, regulatory, 672 iwl_init_sbands(dev, cfg, data, regulatory,
637 sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains, 673 tx_chains, rx_chains,
638 rx_chains); 674 lar_fw_supported && data->lar_enabled);
639 } 675 }
640 676
641 data->calib_version = 255; 677 data->calib_version = 255;
@@ -643,3 +679,164 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
643 return data; 679 return data;
644} 680}
645IWL_EXPORT_SYMBOL(iwl_parse_nvm_data); 681IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
682
683static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
684 int ch_idx, u16 nvm_flags,
685 const struct iwl_cfg *cfg)
686{
687 u32 flags = NL80211_RRF_NO_HT40;
688 u32 last_5ghz_ht = LAST_5GHZ_HT;
689
690 if (cfg->device_family == IWL_DEVICE_FAMILY_8000)
691 last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
692
693 if (ch_idx < NUM_2GHZ_CHANNELS &&
694 (nvm_flags & NVM_CHANNEL_40MHZ)) {
695 if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
696 flags &= ~NL80211_RRF_NO_HT40PLUS;
697 if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS)
698 flags &= ~NL80211_RRF_NO_HT40MINUS;
699 } else if (nvm_chan[ch_idx] <= last_5ghz_ht &&
700 (nvm_flags & NVM_CHANNEL_40MHZ)) {
701 if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
702 flags &= ~NL80211_RRF_NO_HT40PLUS;
703 else
704 flags &= ~NL80211_RRF_NO_HT40MINUS;
705 }
706
707 if (!(nvm_flags & NVM_CHANNEL_80MHZ))
708 flags |= NL80211_RRF_NO_80MHZ;
709 if (!(nvm_flags & NVM_CHANNEL_160MHZ))
710 flags |= NL80211_RRF_NO_160MHZ;
711
712 if (!(nvm_flags & NVM_CHANNEL_ACTIVE))
713 flags |= NL80211_RRF_NO_IR;
714
715 if (nvm_flags & NVM_CHANNEL_RADAR)
716 flags |= NL80211_RRF_DFS;
717
718 if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY)
719 flags |= NL80211_RRF_NO_OUTDOOR;
720
721 /* Set the GO concurrent flag only in case that NO_IR is set.
722 * Otherwise it is meaningless
723 */
724 if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) &&
725 (flags & NL80211_RRF_NO_IR))
726 flags |= NL80211_RRF_GO_CONCURRENT;
727
728 return flags;
729}
730
731struct ieee80211_regdomain *
732iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
733 int num_of_ch, __le32 *channels, u16 fw_mcc)
734{
735 int ch_idx;
736 u16 ch_flags, prev_ch_flags = 0;
737 const u8 *nvm_chan = cfg->device_family == IWL_DEVICE_FAMILY_8000 ?
738 iwl_nvm_channels_family_8000 : iwl_nvm_channels;
739 struct ieee80211_regdomain *regd;
740 int size_of_regd;
741 struct ieee80211_reg_rule *rule;
742 enum ieee80211_band band;
743 int center_freq, prev_center_freq = 0;
744 int valid_rules = 0;
745 bool new_rule;
746 int max_num_ch = cfg->device_family == IWL_DEVICE_FAMILY_8000 ?
747 IWL_NUM_CHANNELS_FAMILY_8000 : IWL_NUM_CHANNELS;
748
749 if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
750 return ERR_PTR(-EINVAL);
751
752 if (WARN_ON(num_of_ch > max_num_ch))
753 num_of_ch = max_num_ch;
754
755 IWL_DEBUG_DEV(dev, IWL_DL_LAR, "building regdom for %d channels\n",
756 num_of_ch);
757
758 /* build a regdomain rule for every valid channel */
759 size_of_regd =
760 sizeof(struct ieee80211_regdomain) +
761 num_of_ch * sizeof(struct ieee80211_reg_rule);
762
763 regd = kzalloc(size_of_regd, GFP_KERNEL);
764 if (!regd)
765 return ERR_PTR(-ENOMEM);
766
767 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
768 ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
769 band = (ch_idx < NUM_2GHZ_CHANNELS) ?
770 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
771 center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx],
772 band);
773 new_rule = false;
774
775 if (!(ch_flags & NVM_CHANNEL_VALID)) {
776 IWL_DEBUG_DEV(dev, IWL_DL_LAR,
777 "Ch. %d Flags %x [%sGHz] - No traffic\n",
778 nvm_chan[ch_idx],
779 ch_flags,
780 (ch_idx >= NUM_2GHZ_CHANNELS) ?
781 "5.2" : "2.4");
782 continue;
783 }
784
785 /* we can't continue the same rule */
786 if (ch_idx == 0 || prev_ch_flags != ch_flags ||
787 center_freq - prev_center_freq > 20) {
788 valid_rules++;
789 new_rule = true;
790 }
791
792 rule = &regd->reg_rules[valid_rules - 1];
793
794 if (new_rule)
795 rule->freq_range.start_freq_khz =
796 MHZ_TO_KHZ(center_freq - 10);
797
798 rule->freq_range.end_freq_khz = MHZ_TO_KHZ(center_freq + 10);
799
800 /* this doesn't matter - not used by FW */
801 rule->power_rule.max_antenna_gain = DBI_TO_MBI(6);
802 rule->power_rule.max_eirp =
803 DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER);
804
805 rule->flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
806 ch_flags, cfg);
807
808 /* rely on auto-calculation to merge BW of contiguous chans */
809 rule->flags |= NL80211_RRF_AUTO_BW;
810 rule->freq_range.max_bandwidth_khz = 0;
811
812 prev_ch_flags = ch_flags;
813 prev_center_freq = center_freq;
814
815 IWL_DEBUG_DEV(dev, IWL_DL_LAR,
816 "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n",
817 center_freq,
818 band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4",
819 CHECK_AND_PRINT_I(VALID),
820 CHECK_AND_PRINT_I(ACTIVE),
821 CHECK_AND_PRINT_I(RADAR),
822 CHECK_AND_PRINT_I(WIDE),
823 CHECK_AND_PRINT_I(40MHZ),
824 CHECK_AND_PRINT_I(80MHZ),
825 CHECK_AND_PRINT_I(160MHZ),
826 CHECK_AND_PRINT_I(INDOOR_ONLY),
827 CHECK_AND_PRINT_I(GO_CONCURRENT),
828 ch_flags,
829 ((ch_flags & NVM_CHANNEL_ACTIVE) &&
830 !(ch_flags & NVM_CHANNEL_RADAR))
831 ? "" : "not ");
832 }
833
834 regd->n_reg_rules = valid_rules;
835
836 /* set alpha2 from FW. */
837 regd->alpha2[0] = fw_mcc >> 8;
838 regd->alpha2[1] = fw_mcc & 0xff;
839
840 return regd;
841}
842IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info);
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
index c9c45a39d212..c995d2cee3f6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
@@ -62,6 +62,7 @@
62#ifndef __iwl_nvm_parse_h__ 62#ifndef __iwl_nvm_parse_h__
63#define __iwl_nvm_parse_h__ 63#define __iwl_nvm_parse_h__
64 64
65#include <net/cfg80211.h>
65#include "iwl-eeprom-parse.h" 66#include "iwl-eeprom-parse.h"
66 67
67/** 68/**
@@ -76,6 +77,22 @@ struct iwl_nvm_data *
76iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, 77iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
77 const __le16 *nvm_hw, const __le16 *nvm_sw, 78 const __le16 *nvm_hw, const __le16 *nvm_sw,
78 const __le16 *nvm_calib, const __le16 *regulatory, 79 const __le16 *nvm_calib, const __le16 *regulatory,
79 const __le16 *mac_override, u8 tx_chains, u8 rx_chains); 80 const __le16 *mac_override, const __le16 *phy_sku,
81 u8 tx_chains, u8 rx_chains,
82 bool lar_fw_supported, bool is_family_8000_a_step,
83 u32 mac_addr0, u32 mac_addr1);
84
85/**
86 * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
87 *
88 * This function parses the regulatory channel data received as a
89 * MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain,
90 * to be fed into the regulatory core. An ERR_PTR is returned on error.
91 * If not given to the regulatory core, the user is responsible for freeing
92 * the regdomain returned here with kfree.
93 */
94struct ieee80211_regdomain *
95iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
96 int num_of_ch, __le32 *channels, u16 fw_mcc);
80 97
81#endif /* __iwl_nvm_parse_h__ */ 98#endif /* __iwl_nvm_parse_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 6095088b88d9..bc962888c583 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -371,6 +371,33 @@ enum secure_load_status_reg {
371 371
372#define DBGC_IN_SAMPLE (0xa03c00) 372#define DBGC_IN_SAMPLE (0xa03c00)
373 373
374/* enable the ID buf for read */
375#define WFPM_PS_CTL_CLR 0xA0300C
376#define WFMP_MAC_ADDR_0 0xA03080
377#define WFMP_MAC_ADDR_1 0xA03084
378#define LMPM_PMG_EN 0xA01CEC
379#define RADIO_REG_SYS_MANUAL_DFT_0 0xAD4078
380#define RFIC_REG_RD 0xAD0470
381#define WFPM_CTRL_REG 0xA03030
382enum {
383 ENABLE_WFPM = BIT(31),
384 WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000,
385};
386
387#define AUX_MISC_REG 0xA200B0
388enum {
389 HW_STEP_LOCATION_BITS = 24,
390};
391
392#define AUX_MISC_MASTER1_EN 0xA20818
393enum aux_misc_master1_en {
394 AUX_MISC_MASTER1_EN_SBE_MSK = 0x1,
395};
396
397#define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800
398#define RSA_ENABLE 0xA24B08
399#define PREG_AUX_BUS_WPROT_0 0xA04CC0
400
374/* FW chicken bits */ 401/* FW chicken bits */
375#define LMPM_CHICK 0xA01FF8 402#define LMPM_CHICK 0xA01FF8
376enum { 403enum {
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 542a6810c81c..11ac5c58527f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -458,6 +458,8 @@ struct iwl_trans_txq_scd_cfg {
458 * @txq_disable: de-configure a Tx queue to send AMPDUs 458 * @txq_disable: de-configure a Tx queue to send AMPDUs
459 * Must be atomic 459 * Must be atomic
460 * @wait_tx_queue_empty: wait until tx queues are empty. May sleep. 460 * @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
461 * @freeze_txq_timer: prevents the timer of the queue from firing until the
462 * queue is set to awake. Must be atomic.
461 * @dbgfs_register: add the dbgfs files under this directory. Files will be 463 * @dbgfs_register: add the dbgfs files under this directory. Files will be
462 * automatically deleted. 464 * automatically deleted.
463 * @write8: write a u8 to a register at offset ofs from the BAR 465 * @write8: write a u8 to a register at offset ofs from the BAR
@@ -517,6 +519,8 @@ struct iwl_trans_ops {
517 519
518 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir); 520 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
519 int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm); 521 int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
522 void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
523 bool freeze);
520 524
521 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val); 525 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
522 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val); 526 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
@@ -873,6 +877,17 @@ void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
873 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout); 877 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
874} 878}
875 879
880static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
881 unsigned long txqs,
882 bool freeze)
883{
884 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
885 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
886
887 if (trans->ops->freeze_txq_timer)
888 trans->ops->freeze_txq_timer(trans, txqs, freeze);
889}
890
876static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans, 891static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
877 u32 txqs) 892 u32 txqs)
878{ 893{
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 877f19bbae7e..13a0a03158de 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -72,158 +72,6 @@
72#include "mvm.h" 72#include "mvm.h"
73#include "iwl-debug.h" 73#include "iwl-debug.h"
74 74
75const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = {
76 [BT_KILL_MSK_DEFAULT] = 0xfffffc00,
77 [BT_KILL_MSK_NEVER] = 0xffffffff,
78 [BT_KILL_MSK_ALWAYS] = 0,
79};
80
81const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
82 {
83 BT_KILL_MSK_ALWAYS,
84 BT_KILL_MSK_ALWAYS,
85 BT_KILL_MSK_ALWAYS,
86 },
87 {
88 BT_KILL_MSK_NEVER,
89 BT_KILL_MSK_NEVER,
90 BT_KILL_MSK_NEVER,
91 },
92 {
93 BT_KILL_MSK_NEVER,
94 BT_KILL_MSK_NEVER,
95 BT_KILL_MSK_NEVER,
96 },
97 {
98 BT_KILL_MSK_DEFAULT,
99 BT_KILL_MSK_NEVER,
100 BT_KILL_MSK_DEFAULT,
101 },
102};
103
104const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
105 {
106 BT_KILL_MSK_ALWAYS,
107 BT_KILL_MSK_ALWAYS,
108 BT_KILL_MSK_ALWAYS,
109 },
110 {
111 BT_KILL_MSK_ALWAYS,
112 BT_KILL_MSK_ALWAYS,
113 BT_KILL_MSK_ALWAYS,
114 },
115 {
116 BT_KILL_MSK_ALWAYS,
117 BT_KILL_MSK_ALWAYS,
118 BT_KILL_MSK_ALWAYS,
119 },
120 {
121 BT_KILL_MSK_DEFAULT,
122 BT_KILL_MSK_ALWAYS,
123 BT_KILL_MSK_DEFAULT,
124 },
125};
126
127static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
128 cpu_to_le32(0xf0f0f0f0), /* 50% */
129 cpu_to_le32(0xc0c0c0c0), /* 25% */
130 cpu_to_le32(0xfcfcfcfc), /* 75% */
131 cpu_to_le32(0xfefefefe), /* 87.5% */
132};
133
134static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
135 {
136 cpu_to_le32(0x40000000),
137 cpu_to_le32(0x00000000),
138 cpu_to_le32(0x44000000),
139 cpu_to_le32(0x00000000),
140 cpu_to_le32(0x40000000),
141 cpu_to_le32(0x00000000),
142 cpu_to_le32(0x44000000),
143 cpu_to_le32(0x00000000),
144 cpu_to_le32(0xc0004000),
145 cpu_to_le32(0xf0005000),
146 cpu_to_le32(0xc0004000),
147 cpu_to_le32(0xf0005000),
148 },
149 {
150 cpu_to_le32(0x40000000),
151 cpu_to_le32(0x00000000),
152 cpu_to_le32(0x44000000),
153 cpu_to_le32(0x00000000),
154 cpu_to_le32(0x40000000),
155 cpu_to_le32(0x00000000),
156 cpu_to_le32(0x44000000),
157 cpu_to_le32(0x00000000),
158 cpu_to_le32(0xc0004000),
159 cpu_to_le32(0xf0005000),
160 cpu_to_le32(0xc0004000),
161 cpu_to_le32(0xf0005000),
162 },
163 {
164 cpu_to_le32(0x40000000),
165 cpu_to_le32(0x00000000),
166 cpu_to_le32(0x44000000),
167 cpu_to_le32(0x00000000),
168 cpu_to_le32(0x40000000),
169 cpu_to_le32(0x00000000),
170 cpu_to_le32(0x44000000),
171 cpu_to_le32(0x00000000),
172 cpu_to_le32(0xc0004000),
173 cpu_to_le32(0xf0005000),
174 cpu_to_le32(0xc0004000),
175 cpu_to_le32(0xf0005000),
176 },
177};
178
179static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
180 {
181 /* Tight */
182 cpu_to_le32(0xaaaaaaaa),
183 cpu_to_le32(0xaaaaaaaa),
184 cpu_to_le32(0xaeaaaaaa),
185 cpu_to_le32(0xaaaaaaaa),
186 cpu_to_le32(0xcc00ff28),
187 cpu_to_le32(0x0000aaaa),
188 cpu_to_le32(0xcc00aaaa),
189 cpu_to_le32(0x0000aaaa),
190 cpu_to_le32(0xc0004000),
191 cpu_to_le32(0x00004000),
192 cpu_to_le32(0xf0005000),
193 cpu_to_le32(0xf0005000),
194 },
195 {
196 /* Loose */
197 cpu_to_le32(0xaaaaaaaa),
198 cpu_to_le32(0xaaaaaaaa),
199 cpu_to_le32(0xaaaaaaaa),
200 cpu_to_le32(0xaaaaaaaa),
201 cpu_to_le32(0xcc00ff28),
202 cpu_to_le32(0x0000aaaa),
203 cpu_to_le32(0xcc00aaaa),
204 cpu_to_le32(0x0000aaaa),
205 cpu_to_le32(0x00000000),
206 cpu_to_le32(0x00000000),
207 cpu_to_le32(0xf0005000),
208 cpu_to_le32(0xf0005000),
209 },
210 {
211 /* Tx Tx disabled */
212 cpu_to_le32(0xaaaaaaaa),
213 cpu_to_le32(0xaaaaaaaa),
214 cpu_to_le32(0xeeaaaaaa),
215 cpu_to_le32(0xaaaaaaaa),
216 cpu_to_le32(0xcc00ff28),
217 cpu_to_le32(0x0000aaaa),
218 cpu_to_le32(0xcc00aaaa),
219 cpu_to_le32(0x0000aaaa),
220 cpu_to_le32(0xc0004000),
221 cpu_to_le32(0xc0004000),
222 cpu_to_le32(0xf0005000),
223 cpu_to_le32(0xf0005000),
224 },
225};
226
227/* 20MHz / 40MHz below / 40Mhz above*/ 75/* 20MHz / 40MHz below / 40Mhz above*/
228static const __le64 iwl_ci_mask[][3] = { 76static const __le64 iwl_ci_mask[][3] = {
229 /* dummy entry for channel 0 */ 77 /* dummy entry for channel 0 */
@@ -596,14 +444,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
596 goto send_cmd; 444 goto send_cmd;
597 } 445 }
598 446
599 bt_cmd->max_kill = cpu_to_le32(5);
600 bt_cmd->bt4_antenna_isolation_thr =
601 cpu_to_le32(IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS);
602 bt_cmd->bt4_tx_tx_delta_freq_thr = cpu_to_le32(15);
603 bt_cmd->bt4_tx_rx_max_freq0 = cpu_to_le32(15);
604 bt_cmd->override_primary_lut = cpu_to_le32(BT_COEX_INVALID_LUT);
605 bt_cmd->override_secondary_lut = cpu_to_le32(BT_COEX_INVALID_LUT);
606
607 mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE; 447 mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE;
608 bt_cmd->mode = cpu_to_le32(mode); 448 bt_cmd->mode = cpu_to_le32(mode);
609 449
@@ -622,18 +462,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
622 462
623 bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET); 463 bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
624 464
625 if (mvm->cfg->bt_shared_single_ant)
626 memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
627 sizeof(iwl_single_shared_ant));
628 else
629 memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
630 sizeof(iwl_combined_lookup));
631
632 memcpy(&bt_cmd->mplut_prio_boost, iwl_bt_prio_boost,
633 sizeof(iwl_bt_prio_boost));
634 bt_cmd->multiprio_lut[0] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG0);
635 bt_cmd->multiprio_lut[1] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG1);
636
637send_cmd: 465send_cmd:
638 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); 466 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
639 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); 467 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
@@ -644,48 +472,6 @@ send_cmd:
644 return ret; 472 return ret;
645} 473}
646 474
647static int iwl_mvm_bt_udpate_sw_boost(struct iwl_mvm *mvm)
648{
649 struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
650 u32 primary_lut = le32_to_cpu(notif->primary_ch_lut);
651 u32 secondary_lut = le32_to_cpu(notif->secondary_ch_lut);
652 u32 ag = le32_to_cpu(notif->bt_activity_grading);
653 struct iwl_bt_coex_sw_boost_update_cmd cmd = {};
654 u8 ack_kill_msk[NUM_PHY_CTX] = {};
655 u8 cts_kill_msk[NUM_PHY_CTX] = {};
656 int i;
657
658 lockdep_assert_held(&mvm->mutex);
659
660 ack_kill_msk[0] = iwl_bt_ack_kill_msk[ag][primary_lut];
661 cts_kill_msk[0] = iwl_bt_cts_kill_msk[ag][primary_lut];
662
663 ack_kill_msk[1] = iwl_bt_ack_kill_msk[ag][secondary_lut];
664 cts_kill_msk[1] = iwl_bt_cts_kill_msk[ag][secondary_lut];
665
666 /* Don't send HCMD if there is no update */
667 if (!memcmp(ack_kill_msk, mvm->bt_ack_kill_msk, sizeof(ack_kill_msk)) ||
668 !memcmp(cts_kill_msk, mvm->bt_cts_kill_msk, sizeof(cts_kill_msk)))
669 return 0;
670
671 memcpy(mvm->bt_ack_kill_msk, ack_kill_msk,
672 sizeof(mvm->bt_ack_kill_msk));
673 memcpy(mvm->bt_cts_kill_msk, cts_kill_msk,
674 sizeof(mvm->bt_cts_kill_msk));
675
676 BUILD_BUG_ON(ARRAY_SIZE(ack_kill_msk) < ARRAY_SIZE(cmd.boost_values));
677
678 for (i = 0; i < ARRAY_SIZE(cmd.boost_values); i++) {
679 cmd.boost_values[i].kill_ack_msk =
680 cpu_to_le32(iwl_bt_ctl_kill_msk[ack_kill_msk[i]]);
681 cmd.boost_values[i].kill_cts_msk =
682 cpu_to_le32(iwl_bt_ctl_kill_msk[cts_kill_msk[i]]);
683 }
684
685 return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_SW_BOOST, 0,
686 sizeof(cmd), &cmd);
687}
688
689static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, 475static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
690 bool enable) 476 bool enable)
691{ 477{
@@ -951,9 +737,6 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
951 IWL_ERR(mvm, "Failed to send BT_CI cmd\n"); 737 IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
952 memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd)); 738 memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
953 } 739 }
954
955 if (iwl_mvm_bt_udpate_sw_boost(mvm))
956 IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
957} 740}
958 741
959int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, 742int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
@@ -1074,9 +857,6 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1074 ieee80211_iterate_active_interfaces_atomic( 857 ieee80211_iterate_active_interfaces_atomic(
1075 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 858 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1076 iwl_mvm_bt_rssi_iterator, &data); 859 iwl_mvm_bt_rssi_iterator, &data);
1077
1078 if (iwl_mvm_bt_udpate_sw_boost(mvm))
1079 IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
1080} 860}
1081 861
1082#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) 862#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
index 5535ec9766cb..d954591e0be5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
@@ -288,6 +288,65 @@ static const __le64 iwl_ci_mask[][3] = {
288 }, 288 },
289}; 289};
290 290
291enum iwl_bt_kill_msk {
292 BT_KILL_MSK_DEFAULT,
293 BT_KILL_MSK_NEVER,
294 BT_KILL_MSK_ALWAYS,
295 BT_KILL_MSK_MAX,
296};
297
298static const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = {
299 [BT_KILL_MSK_DEFAULT] = 0xfffffc00,
300 [BT_KILL_MSK_NEVER] = 0xffffffff,
301 [BT_KILL_MSK_ALWAYS] = 0,
302};
303
304static const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
305 {
306 BT_KILL_MSK_ALWAYS,
307 BT_KILL_MSK_ALWAYS,
308 BT_KILL_MSK_ALWAYS,
309 },
310 {
311 BT_KILL_MSK_NEVER,
312 BT_KILL_MSK_NEVER,
313 BT_KILL_MSK_NEVER,
314 },
315 {
316 BT_KILL_MSK_NEVER,
317 BT_KILL_MSK_NEVER,
318 BT_KILL_MSK_NEVER,
319 },
320 {
321 BT_KILL_MSK_DEFAULT,
322 BT_KILL_MSK_NEVER,
323 BT_KILL_MSK_DEFAULT,
324 },
325};
326
327static const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
328 {
329 BT_KILL_MSK_ALWAYS,
330 BT_KILL_MSK_ALWAYS,
331 BT_KILL_MSK_ALWAYS,
332 },
333 {
334 BT_KILL_MSK_ALWAYS,
335 BT_KILL_MSK_ALWAYS,
336 BT_KILL_MSK_ALWAYS,
337 },
338 {
339 BT_KILL_MSK_ALWAYS,
340 BT_KILL_MSK_ALWAYS,
341 BT_KILL_MSK_ALWAYS,
342 },
343 {
344 BT_KILL_MSK_DEFAULT,
345 BT_KILL_MSK_ALWAYS,
346 BT_KILL_MSK_DEFAULT,
347 },
348};
349
291struct corunning_block_luts { 350struct corunning_block_luts {
292 u8 range; 351 u8 range;
293 __le32 lut20[BT_COEX_CORUN_LUT_SIZE]; 352 __le32 lut20[BT_COEX_CORUN_LUT_SIZE];
@@ -633,7 +692,7 @@ int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
633 if (IWL_MVM_BT_COEX_TTC) 692 if (IWL_MVM_BT_COEX_TTC)
634 bt_cmd->flags |= cpu_to_le32(BT_COEX_TTC); 693 bt_cmd->flags |= cpu_to_le32(BT_COEX_TTC);
635 694
636 if (IWL_MVM_BT_COEX_RRC) 695 if (iwl_mvm_bt_is_rrc_supported(mvm))
637 bt_cmd->flags |= cpu_to_le32(BT_COEX_RRC); 696 bt_cmd->flags |= cpu_to_le32(BT_COEX_RRC);
638 697
639 if (mvm->cfg->bt_shared_single_ant) 698 if (mvm->cfg->bt_shared_single_ant)
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 9bdfa95d6ce7..5f8afa5f11a3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -694,6 +694,9 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
694 if (ret) 694 if (ret)
695 IWL_ERR(mvm, "Failed to send quota: %d\n", ret); 695 IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
696 696
697 if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm))
698 IWL_ERR(mvm, "Failed to initialize D3 LAR information\n");
699
697 return 0; 700 return 0;
698} 701}
699 702
@@ -1596,7 +1599,7 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1596 1599
1597 /* RF-kill already asserted again... */ 1600 /* RF-kill already asserted again... */
1598 if (!cmd.resp_pkt) { 1601 if (!cmd.resp_pkt) {
1599 ret = -ERFKILL; 1602 fw_status = ERR_PTR(-ERFKILL);
1600 goto out_free_resp; 1603 goto out_free_resp;
1601 } 1604 }
1602 1605
@@ -1605,7 +1608,7 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1605 len = iwl_rx_packet_payload_len(cmd.resp_pkt); 1608 len = iwl_rx_packet_payload_len(cmd.resp_pkt);
1606 if (len < status_size) { 1609 if (len < status_size) {
1607 IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); 1610 IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
1608 ret = -EIO; 1611 fw_status = ERR_PTR(-EIO);
1609 goto out_free_resp; 1612 goto out_free_resp;
1610 } 1613 }
1611 1614
@@ -1613,7 +1616,7 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1613 if (len != (status_size + 1616 if (len != (status_size +
1614 ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) { 1617 ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) {
1615 IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); 1618 IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
1616 ret = -EIO; 1619 fw_status = ERR_PTR(-EIO);
1617 goto out_free_resp; 1620 goto out_free_resp;
1618 } 1621 }
1619 1622
@@ -1621,7 +1624,7 @@ iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1621 1624
1622out_free_resp: 1625out_free_resp:
1623 iwl_free_resp(&cmd); 1626 iwl_free_resp(&cmd);
1624 return ret ? ERR_PTR(ret) : fw_status; 1627 return fw_status;
1625} 1628}
1626 1629
1627/* releases the MVM mutex */ 1630/* releases the MVM mutex */
@@ -1874,6 +1877,12 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
1874 /* query SRAM first in case we want event logging */ 1877 /* query SRAM first in case we want event logging */
1875 iwl_mvm_read_d3_sram(mvm); 1878 iwl_mvm_read_d3_sram(mvm);
1876 1879
1880 /*
1881 * Query the current location and source from the D3 firmware so we
1882 * can play it back when we re-intiailize the D0 firmware
1883 */
1884 iwl_mvm_update_changed_regdom(mvm);
1885
1877 if (mvm->net_detect) { 1886 if (mvm->net_detect) {
1878 iwl_mvm_query_netdetect_reasons(mvm, vif); 1887 iwl_mvm_query_netdetect_reasons(mvm, vif);
1879 /* has unlocked the mutex, so skip that */ 1888 /* has unlocked the mutex, so skip that */
@@ -1883,9 +1892,9 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
1883#ifdef CONFIG_IWLWIFI_DEBUGFS 1892#ifdef CONFIG_IWLWIFI_DEBUGFS
1884 if (keep) 1893 if (keep)
1885 mvm->keep_vif = vif; 1894 mvm->keep_vif = vif;
1895#endif
1886 /* has unlocked the mutex, so skip that */ 1896 /* has unlocked the mutex, so skip that */
1887 goto out_iterate; 1897 goto out_iterate;
1888#endif
1889 } 1898 }
1890 1899
1891 out_unlock: 1900 out_unlock:
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 8cbe77dc1dbb..8c5229892e57 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -562,11 +562,12 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
562 "\tSecondary Channel Bitmap 0x%016llx\n", 562 "\tSecondary Channel Bitmap 0x%016llx\n",
563 le64_to_cpu(cmd->bt_secondary_ci)); 563 le64_to_cpu(cmd->bt_secondary_ci));
564 564
565 pos += scnprintf(buf+pos, bufsz-pos, "BT Configuration CMD\n"); 565 pos += scnprintf(buf+pos, bufsz-pos,
566 pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill Mask 0x%08x\n", 566 "BT Configuration CMD - 0=default, 1=never, 2=always\n");
567 iwl_bt_ctl_kill_msk[mvm->bt_ack_kill_msk[0]]); 567 pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill msk idx %d\n",
568 pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill Mask 0x%08x\n", 568 mvm->bt_ack_kill_msk[0]);
569 iwl_bt_ctl_kill_msk[mvm->bt_cts_kill_msk[0]]); 569 pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill msk idx %d\n",
570 mvm->bt_cts_kill_msk[0]);
570 571
571 } else { 572 } else {
572 struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd; 573 struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
@@ -579,21 +580,6 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
579 pos += scnprintf(buf+pos, bufsz-pos, 580 pos += scnprintf(buf+pos, bufsz-pos,
580 "\tSecondary Channel Bitmap 0x%016llx\n", 581 "\tSecondary Channel Bitmap 0x%016llx\n",
581 le64_to_cpu(cmd->bt_secondary_ci)); 582 le64_to_cpu(cmd->bt_secondary_ci));
582
583 pos += scnprintf(buf+pos, bufsz-pos, "BT Configuration CMD\n");
584 pos += scnprintf(buf+pos, bufsz-pos,
585 "\tPrimary: ACK Kill Mask 0x%08x\n",
586 iwl_bt_ctl_kill_msk[mvm->bt_ack_kill_msk[0]]);
587 pos += scnprintf(buf+pos, bufsz-pos,
588 "\tPrimary: CTS Kill Mask 0x%08x\n",
589 iwl_bt_ctl_kill_msk[mvm->bt_cts_kill_msk[0]]);
590 pos += scnprintf(buf+pos, bufsz-pos,
591 "\tSecondary: ACK Kill Mask 0x%08x\n",
592 iwl_bt_ctl_kill_msk[mvm->bt_ack_kill_msk[1]]);
593 pos += scnprintf(buf+pos, bufsz-pos,
594 "\tSecondary: CTS Kill Mask 0x%08x\n",
595 iwl_bt_ctl_kill_msk[mvm->bt_cts_kill_msk[1]]);
596
597 } 583 }
598 584
599 mutex_unlock(&mvm->mutex); 585 mutex_unlock(&mvm->mutex);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
index f3b11897991e..d398a6102805 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
@@ -235,36 +235,12 @@ enum iwl_bt_coex_enabled_modules {
235 * struct iwl_bt_coex_cmd - bt coex configuration command 235 * struct iwl_bt_coex_cmd - bt coex configuration command
236 * @mode: enum %iwl_bt_coex_mode 236 * @mode: enum %iwl_bt_coex_mode
237 * @enabled_modules: enum %iwl_bt_coex_enabled_modules 237 * @enabled_modules: enum %iwl_bt_coex_enabled_modules
238 * @max_kill: max count of Tx retries due to kill from PTA
239 * @override_primary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
240 * should be set by default
241 * @override_secondary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
242 * should be set by default
243 * @bt4_antenna_isolation_thr: antenna threshold value
244 * @bt4_tx_tx_delta_freq_thr: TxTx delta frequency
245 * @bt4_tx_rx_max_freq0: TxRx max frequency
246 * @multiprio_lut: multi priority LUT configuration
247 * @mplut_prio_boost: BT priority boost registers
248 * @decision_lut: PTA decision LUT, per Prio-Ch
249 * 238 *
250 * The structure is used for the BT_COEX command. 239 * The structure is used for the BT_COEX command.
251 */ 240 */
252struct iwl_bt_coex_cmd { 241struct iwl_bt_coex_cmd {
253 __le32 mode; 242 __le32 mode;
254 __le32 enabled_modules; 243 __le32 enabled_modules;
255
256 __le32 max_kill;
257 __le32 override_primary_lut;
258 __le32 override_secondary_lut;
259 __le32 bt4_antenna_isolation_thr;
260
261 __le32 bt4_tx_tx_delta_freq_thr;
262 __le32 bt4_tx_rx_max_freq0;
263
264 __le32 multiprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE];
265 __le32 mplut_prio_boost[BT_COEX_BOOST_SIZE];
266
267 __le32 decision_lut[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE];
268} __packed; /* BT_COEX_CMD_API_S_VER_6 */ 244} __packed; /* BT_COEX_CMD_API_S_VER_6 */
269 245
270/** 246/**
@@ -280,29 +256,6 @@ struct iwl_bt_coex_corun_lut_update_cmd {
280} __packed; /* BT_COEX_UPDATE_CORUN_LUT_API_S_VER_1 */ 256} __packed; /* BT_COEX_UPDATE_CORUN_LUT_API_S_VER_1 */
281 257
282/** 258/**
283 * struct iwl_bt_coex_sw_boost - SW boost values
284 * @wifi_tx_prio_boost: SW boost of wifi tx priority
285 * @wifi_rx_prio_boost: SW boost of wifi rx priority
286 * @kill_ack_msk: kill ACK mask. 1 - Tx ACK, 0 - kill Tx of ACK.
287 * @kill_cts_msk: kill CTS mask. 1 - Tx CTS, 0 - kill Tx of CTS.
288 */
289struct iwl_bt_coex_sw_boost {
290 __le32 wifi_tx_prio_boost;
291 __le32 wifi_rx_prio_boost;
292 __le32 kill_ack_msk;
293 __le32 kill_cts_msk;
294};
295
296/**
297 * struct iwl_bt_coex_sw_boost_update_cmd - command to update the SW boost
298 * @boost_values: check struct %iwl_bt_coex_sw_boost - one for each channel
299 * primary / secondary / low priority
300 */
301struct iwl_bt_coex_sw_boost_update_cmd {
302 struct iwl_bt_coex_sw_boost boost_values[3];
303} __packed; /* BT_COEX_UPDATE_SW_BOOST_S_VER_1 */
304
305/**
306 * struct iwl_bt_coex_reduced_txp_update_cmd 259 * struct iwl_bt_coex_reduced_txp_update_cmd
307 * @reduced_txp: bit BT_REDUCED_TX_POWER_BIT to enable / disable, rest of the 260 * @reduced_txp: bit BT_REDUCED_TX_POWER_BIT to enable / disable, rest of the
308 * bits are the sta_id (value) 261 * bits are the sta_id (value)
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index d95b47213731..aab68cbae754 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -212,6 +212,10 @@ enum {
212 REPLY_RX_MPDU_CMD = 0xc1, 212 REPLY_RX_MPDU_CMD = 0xc1,
213 BA_NOTIF = 0xc5, 213 BA_NOTIF = 0xc5,
214 214
215 /* Location Aware Regulatory */
216 MCC_UPDATE_CMD = 0xc8,
217 MCC_CHUB_UPDATE_CMD = 0xc9,
218
215 MARKER_CMD = 0xcb, 219 MARKER_CMD = 0xcb,
216 220
217 /* BT Coex */ 221 /* BT Coex */
@@ -362,7 +366,8 @@ enum {
362 NVM_SECTION_TYPE_CALIBRATION = 4, 366 NVM_SECTION_TYPE_CALIBRATION = 4,
363 NVM_SECTION_TYPE_PRODUCTION = 5, 367 NVM_SECTION_TYPE_PRODUCTION = 5,
364 NVM_SECTION_TYPE_MAC_OVERRIDE = 11, 368 NVM_SECTION_TYPE_MAC_OVERRIDE = 11,
365 NVM_MAX_NUM_SECTIONS = 12, 369 NVM_SECTION_TYPE_PHY_SKU = 12,
370 NVM_MAX_NUM_SECTIONS = 13,
366}; 371};
367 372
368/** 373/**
@@ -1442,7 +1447,19 @@ enum iwl_sf_scenario {
1442#define SF_W_MARK_LEGACY 4096 1447#define SF_W_MARK_LEGACY 4096
1443#define SF_W_MARK_SCAN 4096 1448#define SF_W_MARK_SCAN 4096
1444 1449
1445/* SF Scenarios timers for FULL_ON state (aligned to 32 uSec) */ 1450/* SF Scenarios timers for default configuration (aligned to 32 uSec) */
1451#define SF_SINGLE_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */
1452#define SF_SINGLE_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
1453#define SF_AGG_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */
1454#define SF_AGG_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
1455#define SF_MCAST_IDLE_TIMER_DEF 160 /* 150 mSec */
1456#define SF_MCAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
1457#define SF_BA_IDLE_TIMER_DEF 160 /* 150 uSec */
1458#define SF_BA_AGING_TIMER_DEF 400 /* 0.4 mSec */
1459#define SF_TX_RE_IDLE_TIMER_DEF 160 /* 150 uSec */
1460#define SF_TX_RE_AGING_TIMER_DEF 400 /* 0.4 mSec */
1461
1462/* SF Scenarios timers for BSS MAC configuration (aligned to 32 uSec) */
1446#define SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */ 1463#define SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */
1447#define SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */ 1464#define SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */
1448#define SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */ 1465#define SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */
@@ -1473,6 +1490,92 @@ struct iwl_sf_cfg_cmd {
1473 __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; 1490 __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
1474} __packed; /* SF_CFG_API_S_VER_2 */ 1491} __packed; /* SF_CFG_API_S_VER_2 */
1475 1492
1493/***********************************
1494 * Location Aware Regulatory (LAR) API - MCC updates
1495 ***********************************/
1496
1497/**
1498 * struct iwl_mcc_update_cmd - Request the device to update geographic
1499 * regulatory profile according to the given MCC (Mobile Country Code).
1500 * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
1501 * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
1502 * MCC in the cmd response will be the relevant MCC in the NVM.
1503 * @mcc: given mobile country code
1504 * @source_id: the source from where we got the MCC, see iwl_mcc_source
1505 * @reserved: reserved for alignment
1506 */
1507struct iwl_mcc_update_cmd {
1508 __le16 mcc;
1509 u8 source_id;
1510 u8 reserved;
1511} __packed; /* LAR_UPDATE_MCC_CMD_API_S */
1512
1513/**
1514 * iwl_mcc_update_resp - response to MCC_UPDATE_CMD.
1515 * Contains the new channel control profile map, if changed, and the new MCC
1516 * (mobile country code).
1517 * The new MCC may be different than what was requested in MCC_UPDATE_CMD.
1518 * @status: see &enum iwl_mcc_update_status
1519 * @mcc: the new applied MCC
1520 * @cap: capabilities for all channels which matches the MCC
1521 * @source_id: the MCC source, see iwl_mcc_source
1522 * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
1523 * channels, depending on platform)
1524 * @channels: channel control data map, DWORD for each channel. Only the first
1525 * 16bits are used.
1526 */
1527struct iwl_mcc_update_resp {
1528 __le32 status;
1529 __le16 mcc;
1530 u8 cap;
1531 u8 source_id;
1532 __le32 n_channels;
1533 __le32 channels[0];
1534} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S */
1535
1536/**
1537 * struct iwl_mcc_chub_notif - chub notifies of mcc change
1538 * (MCC_CHUB_UPDATE_CMD = 0xc9)
1539 * The Chub (Communication Hub, CommsHUB) is a HW component that connects to
1540 * the cellular and connectivity cores that gets updates of the mcc, and
1541 * notifies the ucode directly of any mcc change.
1542 * The ucode requests the driver to request the device to update geographic
1543 * regulatory profile according to the given MCC (Mobile Country Code).
1544 * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
1545 * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
1546 * MCC in the cmd response will be the relevant MCC in the NVM.
1547 * @mcc: given mobile country code
1548 * @source_id: identity of the change originator, see iwl_mcc_source
1549 * @reserved1: reserved for alignment
1550 */
1551struct iwl_mcc_chub_notif {
1552 u16 mcc;
1553 u8 source_id;
1554 u8 reserved1;
1555} __packed; /* LAR_MCC_NOTIFY_S */
1556
1557enum iwl_mcc_update_status {
1558 MCC_RESP_NEW_CHAN_PROFILE,
1559 MCC_RESP_SAME_CHAN_PROFILE,
1560 MCC_RESP_INVALID,
1561 MCC_RESP_NVM_DISABLED,
1562 MCC_RESP_ILLEGAL,
1563 MCC_RESP_LOW_PRIORITY,
1564};
1565
1566enum iwl_mcc_source {
1567 MCC_SOURCE_OLD_FW = 0,
1568 MCC_SOURCE_ME = 1,
1569 MCC_SOURCE_BIOS = 2,
1570 MCC_SOURCE_3G_LTE_HOST = 3,
1571 MCC_SOURCE_3G_LTE_DEVICE = 4,
1572 MCC_SOURCE_WIFI = 5,
1573 MCC_SOURCE_RESERVED = 6,
1574 MCC_SOURCE_DEFAULT = 7,
1575 MCC_SOURCE_UNINITIALIZED = 8,
1576 MCC_SOURCE_GET_CURRENT = 0x10
1577};
1578
1476/* DTS measurements */ 1579/* DTS measurements */
1477 1580
1478enum iwl_dts_measurement_flags { 1581enum iwl_dts_measurement_flags {
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index a81da4cde643..6cf7d9837ca5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -739,6 +739,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
739 if (ret) 739 if (ret)
740 goto error; 740 goto error;
741 741
742 /*
743 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
744 * anyway, so don't init MCC.
745 */
746 if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
747 ret = iwl_mvm_init_mcc(mvm);
748 if (ret)
749 goto error;
750 }
751
742 if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) { 752 if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
743 ret = iwl_mvm_config_scan(mvm); 753 ret = iwl_mvm_config_scan(mvm);
744 if (ret) 754 if (ret)
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 7396b52262b5..302c8cc50f25 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -86,6 +86,7 @@
86#include "iwl-fw-error-dump.h" 86#include "iwl-fw-error-dump.h"
87#include "iwl-prph.h" 87#include "iwl-prph.h"
88#include "iwl-csr.h" 88#include "iwl-csr.h"
89#include "iwl-nvm-parse.h"
89 90
90static const struct ieee80211_iface_limit iwl_mvm_limits[] = { 91static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
91 { 92 {
@@ -301,6 +302,109 @@ static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
301 } 302 }
302} 303}
303 304
305struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
306 const char *alpha2,
307 enum iwl_mcc_source src_id,
308 bool *changed)
309{
310 struct ieee80211_regdomain *regd = NULL;
311 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
312 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
313 struct iwl_mcc_update_resp *resp;
314
315 IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
316
317 lockdep_assert_held(&mvm->mutex);
318
319 resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
320 if (IS_ERR_OR_NULL(resp)) {
321 IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
322 PTR_RET(resp));
323 goto out;
324 }
325
326 if (changed)
327 *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
328
329 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
330 __le32_to_cpu(resp->n_channels),
331 resp->channels,
332 __le16_to_cpu(resp->mcc));
333 /* Store the return source id */
334 src_id = resp->source_id;
335 kfree(resp);
336 if (IS_ERR_OR_NULL(regd)) {
337 IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
338 PTR_RET(regd));
339 goto out;
340 }
341
342 IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
343 regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
344 mvm->lar_regdom_set = true;
345 mvm->mcc_src = src_id;
346
347out:
348 return regd;
349}
350
351void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
352{
353 bool changed;
354 struct ieee80211_regdomain *regd;
355
356 if (!iwl_mvm_is_lar_supported(mvm))
357 return;
358
359 regd = iwl_mvm_get_current_regdomain(mvm, &changed);
360 if (!IS_ERR_OR_NULL(regd)) {
361 /* only update the regulatory core if changed */
362 if (changed)
363 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
364
365 kfree(regd);
366 }
367}
368
369struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
370 bool *changed)
371{
372 return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
373 iwl_mvm_is_wifi_mcc_supported(mvm) ?
374 MCC_SOURCE_GET_CURRENT :
375 MCC_SOURCE_OLD_FW, changed);
376}
377
378int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
379{
380 enum iwl_mcc_source used_src;
381 struct ieee80211_regdomain *regd;
382 const struct ieee80211_regdomain *r =
383 rtnl_dereference(mvm->hw->wiphy->regd);
384
385 if (!r)
386 return 0;
387
388 /* save the last source in case we overwrite it below */
389 used_src = mvm->mcc_src;
390 if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
391 /* Notify the firmware we support wifi location updates */
392 regd = iwl_mvm_get_current_regdomain(mvm, NULL);
393 if (!IS_ERR_OR_NULL(regd))
394 kfree(regd);
395 }
396
397 /* Now set our last stored MCC and source */
398 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src, NULL);
399 if (IS_ERR_OR_NULL(regd))
400 return -EIO;
401
402 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
403 kfree(regd);
404
405 return 0;
406}
407
304int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) 408int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
305{ 409{
306 struct ieee80211_hw *hw = mvm->hw; 410 struct ieee80211_hw *hw = mvm->hw;
@@ -356,8 +460,12 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
356 BIT(NL80211_IFTYPE_ADHOC); 460 BIT(NL80211_IFTYPE_ADHOC);
357 461
358 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 462 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
359 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | 463 hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
360 REGULATORY_DISABLE_BEACON_HINTS; 464 if (iwl_mvm_is_lar_supported(mvm))
465 hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
466 else
467 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
468 REGULATORY_DISABLE_BEACON_HINTS;
361 469
362 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD) 470 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
363 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 471 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
@@ -1193,7 +1301,7 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
1193 1301
1194 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1302 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1195 iwl_mvm_d0i3_enable_tx(mvm, NULL); 1303 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1196 ret = iwl_mvm_update_quotas(mvm, NULL); 1304 ret = iwl_mvm_update_quotas(mvm, false, NULL);
1197 if (ret) 1305 if (ret)
1198 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", 1306 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
1199 ret); 1307 ret);
@@ -1872,7 +1980,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1872 sizeof(mvmvif->beacon_stats)); 1980 sizeof(mvmvif->beacon_stats));
1873 1981
1874 /* add quota for this interface */ 1982 /* add quota for this interface */
1875 ret = iwl_mvm_update_quotas(mvm, NULL); 1983 ret = iwl_mvm_update_quotas(mvm, true, NULL);
1876 if (ret) { 1984 if (ret) {
1877 IWL_ERR(mvm, "failed to update quotas\n"); 1985 IWL_ERR(mvm, "failed to update quotas\n");
1878 return; 1986 return;
@@ -1924,7 +2032,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1924 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; 2032 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1925 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; 2033 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
1926 /* remove quota for this interface */ 2034 /* remove quota for this interface */
1927 ret = iwl_mvm_update_quotas(mvm, NULL); 2035 ret = iwl_mvm_update_quotas(mvm, false, NULL);
1928 if (ret) 2036 if (ret)
1929 IWL_ERR(mvm, "failed to update quotas\n"); 2037 IWL_ERR(mvm, "failed to update quotas\n");
1930 2038
@@ -2043,7 +2151,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
2043 /* power updated needs to be done before quotas */ 2151 /* power updated needs to be done before quotas */
2044 iwl_mvm_power_update_mac(mvm); 2152 iwl_mvm_power_update_mac(mvm);
2045 2153
2046 ret = iwl_mvm_update_quotas(mvm, NULL); 2154 ret = iwl_mvm_update_quotas(mvm, false, NULL);
2047 if (ret) 2155 if (ret)
2048 goto out_quota_failed; 2156 goto out_quota_failed;
2049 2157
@@ -2109,7 +2217,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
2109 if (vif->p2p && mvm->p2p_device_vif) 2217 if (vif->p2p && mvm->p2p_device_vif)
2110 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); 2218 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2111 2219
2112 iwl_mvm_update_quotas(mvm, NULL); 2220 iwl_mvm_update_quotas(mvm, false, NULL);
2113 iwl_mvm_send_rm_bcast_sta(mvm, vif); 2221 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2114 iwl_mvm_binding_remove_vif(mvm, vif); 2222 iwl_mvm_binding_remove_vif(mvm, vif);
2115 2223
@@ -2248,6 +2356,12 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
2248 2356
2249 mutex_lock(&mvm->mutex); 2357 mutex_lock(&mvm->mutex);
2250 2358
2359 if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
2360 IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
2361 ret = -EBUSY;
2362 goto out;
2363 }
2364
2251 if (mvm->scan_status != IWL_MVM_SCAN_NONE) { 2365 if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
2252 ret = -EBUSY; 2366 ret = -EBUSY;
2253 goto out; 2367 goto out;
@@ -2328,25 +2442,35 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2328{ 2442{
2329 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2443 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2330 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2444 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2445 unsigned long txqs = 0, tids = 0;
2331 int tid; 2446 int tid;
2332 2447
2448 spin_lock_bh(&mvmsta->lock);
2449 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2450 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2451
2452 if (tid_data->state != IWL_AGG_ON &&
2453 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
2454 continue;
2455
2456 __set_bit(tid_data->txq_id, &txqs);
2457
2458 if (iwl_mvm_tid_queued(tid_data) == 0)
2459 continue;
2460
2461 __set_bit(tid, &tids);
2462 }
2463
2333 switch (cmd) { 2464 switch (cmd) {
2334 case STA_NOTIFY_SLEEP: 2465 case STA_NOTIFY_SLEEP:
2335 if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0) 2466 if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
2336 ieee80211_sta_block_awake(hw, sta, true); 2467 ieee80211_sta_block_awake(hw, sta, true);
2337 spin_lock_bh(&mvmsta->lock);
2338 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2339 struct iwl_mvm_tid_data *tid_data;
2340 2468
2341 tid_data = &mvmsta->tid_data[tid]; 2469 for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
2342 if (tid_data->state != IWL_AGG_ON &&
2343 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
2344 continue;
2345 if (iwl_mvm_tid_queued(tid_data) == 0)
2346 continue;
2347 ieee80211_sta_set_buffered(sta, tid, true); 2470 ieee80211_sta_set_buffered(sta, tid, true);
2348 } 2471
2349 spin_unlock_bh(&mvmsta->lock); 2472 if (txqs)
2473 iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
2350 /* 2474 /*
2351 * The fw updates the STA to be asleep. Tx packets on the Tx 2475 * The fw updates the STA to be asleep. Tx packets on the Tx
2352 * queues to this station will not be transmitted. The fw will 2476 * queues to this station will not be transmitted. The fw will
@@ -2356,11 +2480,15 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2356 case STA_NOTIFY_AWAKE: 2480 case STA_NOTIFY_AWAKE:
2357 if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) 2481 if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
2358 break; 2482 break;
2483
2484 if (txqs)
2485 iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
2359 iwl_mvm_sta_modify_ps_wake(mvm, sta); 2486 iwl_mvm_sta_modify_ps_wake(mvm, sta);
2360 break; 2487 break;
2361 default: 2488 default:
2362 break; 2489 break;
2363 } 2490 }
2491 spin_unlock_bh(&mvmsta->lock);
2364} 2492}
2365 2493
2366static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, 2494static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
@@ -2598,6 +2726,12 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
2598 2726
2599 mutex_lock(&mvm->mutex); 2727 mutex_lock(&mvm->mutex);
2600 2728
2729 if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
2730 IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
2731 ret = -EBUSY;
2732 goto out;
2733 }
2734
2601 if (!vif->bss_conf.idle) { 2735 if (!vif->bss_conf.idle) {
2602 ret = -EBUSY; 2736 ret = -EBUSY;
2603 goto out; 2737 goto out;
@@ -3159,14 +3293,14 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
3159 */ 3293 */
3160 if (vif->type == NL80211_IFTYPE_MONITOR) { 3294 if (vif->type == NL80211_IFTYPE_MONITOR) {
3161 mvmvif->monitor_active = true; 3295 mvmvif->monitor_active = true;
3162 ret = iwl_mvm_update_quotas(mvm, NULL); 3296 ret = iwl_mvm_update_quotas(mvm, false, NULL);
3163 if (ret) 3297 if (ret)
3164 goto out_remove_binding; 3298 goto out_remove_binding;
3165 } 3299 }
3166 3300
3167 /* Handle binding during CSA */ 3301 /* Handle binding during CSA */
3168 if (vif->type == NL80211_IFTYPE_AP) { 3302 if (vif->type == NL80211_IFTYPE_AP) {
3169 iwl_mvm_update_quotas(mvm, NULL); 3303 iwl_mvm_update_quotas(mvm, false, NULL);
3170 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3304 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3171 } 3305 }
3172 3306
@@ -3190,7 +3324,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
3190 3324
3191 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA); 3325 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
3192 3326
3193 iwl_mvm_update_quotas(mvm, NULL); 3327 iwl_mvm_update_quotas(mvm, false, NULL);
3194 } 3328 }
3195 3329
3196 goto out; 3330 goto out;
@@ -3263,7 +3397,7 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
3263 break; 3397 break;
3264 } 3398 }
3265 3399
3266 iwl_mvm_update_quotas(mvm, disabled_vif); 3400 iwl_mvm_update_quotas(mvm, false, disabled_vif);
3267 iwl_mvm_binding_remove_vif(mvm, vif); 3401 iwl_mvm_binding_remove_vif(mvm, vif);
3268 3402
3269out: 3403out:
@@ -3455,7 +3589,7 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
3455 mvm->noa_duration = noa_duration; 3589 mvm->noa_duration = noa_duration;
3456 mvm->noa_vif = vif; 3590 mvm->noa_vif = vif;
3457 3591
3458 return iwl_mvm_update_quotas(mvm, NULL); 3592 return iwl_mvm_update_quotas(mvm, false, NULL);
3459 case IWL_MVM_TM_CMD_SET_BEACON_FILTER: 3593 case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
3460 /* must be associated client vif - ignore authorized */ 3594 /* must be associated client vif - ignore authorized */
3461 if (!vif || vif->type != NL80211_IFTYPE_STATION || 3595 if (!vif || vif->type != NL80211_IFTYPE_STATION ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 95cad68ab069..4b5c8f66df8b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -810,6 +810,9 @@ struct iwl_mvm {
810 /* system time of last beacon (for AP/GO interface) */ 810 /* system time of last beacon (for AP/GO interface) */
811 u32 ap_last_beacon_gp2; 811 u32 ap_last_beacon_gp2;
812 812
813 bool lar_regdom_set;
814 enum iwl_mcc_source mcc_src;
815
813 u8 low_latency_agg_frame_limit; 816 u8 low_latency_agg_frame_limit;
814 817
815 /* TDLS channel switch data */ 818 /* TDLS channel switch data */
@@ -910,6 +913,30 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
910 (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); 913 (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
911} 914}
912 915
916static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
917{
918 bool nvm_lar = mvm->nvm_data->lar_enabled;
919 bool tlv_lar = mvm->fw->ucode_capa.capa[0] &
920 IWL_UCODE_TLV_CAPA_LAR_SUPPORT;
921
922 if (iwlwifi_mod_params.lar_disable)
923 return false;
924
925 /*
926 * Enable LAR only if it is supported by the FW (TLV) &&
927 * enabled in the NVM
928 */
929 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000)
930 return nvm_lar && tlv_lar;
931 else
932 return tlv_lar;
933}
934
935static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm)
936{
937 return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WIFI_MCC_UPDATE;
938}
939
913static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm) 940static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm)
914{ 941{
915 return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SCD_CFG; 942 return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SCD_CFG;
@@ -921,6 +948,12 @@ static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm)
921 IWL_MVM_BT_COEX_CORUNNING; 948 IWL_MVM_BT_COEX_CORUNNING;
922} 949}
923 950
951static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
952{
953 return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_RRC) &&
954 IWL_MVM_BT_COEX_RRC;
955}
956
924extern const u8 iwl_mvm_ac_to_tx_fifo[]; 957extern const u8 iwl_mvm_ac_to_tx_fifo[];
925 958
926struct iwl_rate_info { 959struct iwl_rate_info {
@@ -1106,7 +1139,7 @@ int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1106int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 1139int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
1107 1140
1108/* Quota management */ 1141/* Quota management */
1109int iwl_mvm_update_quotas(struct iwl_mvm *mvm, 1142int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload,
1110 struct ieee80211_vif *disabled_vif); 1143 struct ieee80211_vif *disabled_vif);
1111 1144
1112/* Scanning */ 1145/* Scanning */
@@ -1282,17 +1315,6 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
1282 struct iwl_rx_cmd_buffer *rxb, 1315 struct iwl_rx_cmd_buffer *rxb,
1283 struct iwl_device_cmd *cmd); 1316 struct iwl_device_cmd *cmd);
1284 1317
1285enum iwl_bt_kill_msk {
1286 BT_KILL_MSK_DEFAULT,
1287 BT_KILL_MSK_NEVER,
1288 BT_KILL_MSK_ALWAYS,
1289 BT_KILL_MSK_MAX,
1290};
1291
1292extern const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT];
1293extern const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT];
1294extern const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX];
1295
1296/* beacon filtering */ 1318/* beacon filtering */
1297#ifdef CONFIG_IWLWIFI_DEBUGFS 1319#ifdef CONFIG_IWLWIFI_DEBUGFS
1298void 1320void
@@ -1389,6 +1411,23 @@ void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
1389void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); 1411void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
1390int iwl_mvm_get_temp(struct iwl_mvm *mvm); 1412int iwl_mvm_get_temp(struct iwl_mvm *mvm);
1391 1413
1414/* Location Aware Regulatory */
1415struct iwl_mcc_update_resp *
1416iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
1417 enum iwl_mcc_source src_id);
1418int iwl_mvm_init_mcc(struct iwl_mvm *mvm);
1419int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
1420 struct iwl_rx_cmd_buffer *rxb,
1421 struct iwl_device_cmd *cmd);
1422struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
1423 const char *alpha2,
1424 enum iwl_mcc_source src_id,
1425 bool *changed);
1426struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
1427 bool *changed);
1428int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm);
1429void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm);
1430
1392/* smart fifo */ 1431/* smart fifo */
1393int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1432int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1394 bool added_vif); 1433 bool added_vif);
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index 5383429d96c1..123e0a16aea8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -63,12 +63,16 @@
63 * 63 *
64 *****************************************************************************/ 64 *****************************************************************************/
65#include <linux/firmware.h> 65#include <linux/firmware.h>
66#include <linux/rtnetlink.h>
67#include <linux/pci.h>
68#include <linux/acpi.h>
66#include "iwl-trans.h" 69#include "iwl-trans.h"
67#include "iwl-csr.h" 70#include "iwl-csr.h"
68#include "mvm.h" 71#include "mvm.h"
69#include "iwl-eeprom-parse.h" 72#include "iwl-eeprom-parse.h"
70#include "iwl-eeprom-read.h" 73#include "iwl-eeprom-read.h"
71#include "iwl-nvm-parse.h" 74#include "iwl-nvm-parse.h"
75#include "iwl-prph.h"
72 76
73/* Default NVM size to read */ 77/* Default NVM size to read */
74#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024) 78#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
@@ -262,7 +266,9 @@ static struct iwl_nvm_data *
262iwl_parse_nvm_sections(struct iwl_mvm *mvm) 266iwl_parse_nvm_sections(struct iwl_mvm *mvm)
263{ 267{
264 struct iwl_nvm_section *sections = mvm->nvm_sections; 268 struct iwl_nvm_section *sections = mvm->nvm_sections;
265 const __le16 *hw, *sw, *calib, *regulatory, *mac_override; 269 const __le16 *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
270 bool is_family_8000_a_step = false, lar_enabled;
271 u32 mac_addr0, mac_addr1;
266 272
267 /* Checking for required sections */ 273 /* Checking for required sections */
268 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) { 274 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
@@ -286,22 +292,43 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
286 "Can't parse mac_address, empty sections\n"); 292 "Can't parse mac_address, empty sections\n");
287 return NULL; 293 return NULL;
288 } 294 }
295
296 if (CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_A_STEP)
297 is_family_8000_a_step = true;
298
299 /* PHY_SKU section is mandatory in B0 */
300 if (!is_family_8000_a_step &&
301 !mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
302 IWL_ERR(mvm,
303 "Can't parse phy_sku in B0, empty sections\n");
304 return NULL;
305 }
289 } 306 }
290 307
291 if (WARN_ON(!mvm->cfg)) 308 if (WARN_ON(!mvm->cfg))
292 return NULL; 309 return NULL;
293 310
311 /* read the mac address from WFMP registers */
312 mac_addr0 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_0);
313 mac_addr1 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_1);
314
294 hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data; 315 hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data;
295 sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; 316 sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
296 calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data; 317 calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
297 regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data; 318 regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
298 mac_override = 319 mac_override =
299 (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data; 320 (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
321 phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data;
322
323 lar_enabled = !iwlwifi_mod_params.lar_disable &&
324 (mvm->fw->ucode_capa.capa[0] &
325 IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
300 326
301 return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib, 327 return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
302 regulatory, mac_override, 328 regulatory, mac_override, phy_sku,
303 mvm->fw->valid_tx_ant, 329 mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
304 mvm->fw->valid_rx_ant); 330 lar_enabled, is_family_8000_a_step,
331 mac_addr0, mac_addr1);
305} 332}
306 333
307#define MAX_NVM_FILE_LEN 16384 334#define MAX_NVM_FILE_LEN 16384
@@ -570,3 +597,258 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
570 597
571 return 0; 598 return 0;
572} 599}
600
601struct iwl_mcc_update_resp *
602iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
603 enum iwl_mcc_source src_id)
604{
605 struct iwl_mcc_update_cmd mcc_update_cmd = {
606 .mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]),
607 .source_id = (u8)src_id,
608 };
609 struct iwl_mcc_update_resp *mcc_resp, *resp_cp = NULL;
610 struct iwl_rx_packet *pkt;
611 struct iwl_host_cmd cmd = {
612 .id = MCC_UPDATE_CMD,
613 .flags = CMD_WANT_SKB,
614 .data = { &mcc_update_cmd },
615 };
616
617 int ret;
618 u32 status;
619 int resp_len, n_channels;
620 u16 mcc;
621
622 if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
623 return ERR_PTR(-EOPNOTSUPP);
624
625 cmd.len[0] = sizeof(struct iwl_mcc_update_cmd);
626
627 IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n",
628 alpha2[0], alpha2[1], src_id);
629
630 ret = iwl_mvm_send_cmd(mvm, &cmd);
631 if (ret)
632 return ERR_PTR(ret);
633
634 pkt = cmd.resp_pkt;
635 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
636 IWL_ERR(mvm, "Bad return from MCC_UPDATE_COMMAND (0x%08X)\n",
637 pkt->hdr.flags);
638 ret = -EIO;
639 goto exit;
640 }
641
642 /* Extract MCC response */
643 mcc_resp = (void *)pkt->data;
644 status = le32_to_cpu(mcc_resp->status);
645
646 mcc = le16_to_cpu(mcc_resp->mcc);
647
648 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
649 if (mcc == 0) {
650 mcc = 0x3030; /* "00" - world */
651 mcc_resp->mcc = cpu_to_le16(mcc);
652 }
653
654 n_channels = __le32_to_cpu(mcc_resp->n_channels);
655 IWL_DEBUG_LAR(mvm,
656 "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n",
657 status, mcc, mcc >> 8, mcc & 0xff,
658 !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels);
659
660 resp_len = sizeof(*mcc_resp) + n_channels * sizeof(__le32);
661 resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
662 if (!resp_cp) {
663 ret = -ENOMEM;
664 goto exit;
665 }
666
667 ret = 0;
668exit:
669 iwl_free_resp(&cmd);
670 if (ret)
671 return ERR_PTR(ret);
672 return resp_cp;
673}
674
675#ifdef CONFIG_ACPI
676#define WRD_METHOD "WRDD"
677#define WRDD_WIFI (0x07)
678#define WRDD_WIGIG (0x10)
679
680static u32 iwl_mvm_wrdd_get_mcc(struct iwl_mvm *mvm, union acpi_object *wrdd)
681{
682 union acpi_object *mcc_pkg, *domain_type, *mcc_value;
683 u32 i;
684
685 if (wrdd->type != ACPI_TYPE_PACKAGE ||
686 wrdd->package.count < 2 ||
687 wrdd->package.elements[0].type != ACPI_TYPE_INTEGER ||
688 wrdd->package.elements[0].integer.value != 0) {
689 IWL_DEBUG_LAR(mvm, "Unsupported wrdd structure\n");
690 return 0;
691 }
692
693 for (i = 1 ; i < wrdd->package.count ; ++i) {
694 mcc_pkg = &wrdd->package.elements[i];
695
696 if (mcc_pkg->type != ACPI_TYPE_PACKAGE ||
697 mcc_pkg->package.count < 2 ||
698 mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
699 mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
700 mcc_pkg = NULL;
701 continue;
702 }
703
704 domain_type = &mcc_pkg->package.elements[0];
705 if (domain_type->integer.value == WRDD_WIFI)
706 break;
707
708 mcc_pkg = NULL;
709 }
710
711 if (mcc_pkg) {
712 mcc_value = &mcc_pkg->package.elements[1];
713 return mcc_value->integer.value;
714 }
715
716 return 0;
717}
718
719static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc)
720{
721 acpi_handle root_handle;
722 acpi_handle handle;
723 struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
724 acpi_status status;
725 u32 mcc_val;
726 struct pci_dev *pdev = to_pci_dev(mvm->dev);
727
728 root_handle = ACPI_HANDLE(&pdev->dev);
729 if (!root_handle) {
730 IWL_DEBUG_LAR(mvm,
731 "Could not retrieve root port ACPI handle\n");
732 return -ENOENT;
733 }
734
735 /* Get the method's handle */
736 status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle);
737 if (ACPI_FAILURE(status)) {
738 IWL_DEBUG_LAR(mvm, "WRD method not found\n");
739 return -ENOENT;
740 }
741
742 /* Call WRDD with no arguments */
743 status = acpi_evaluate_object(handle, NULL, NULL, &wrdd);
744 if (ACPI_FAILURE(status)) {
745 IWL_DEBUG_LAR(mvm, "WRDC invocation failed (0x%x)\n", status);
746 return -ENOENT;
747 }
748
749 mcc_val = iwl_mvm_wrdd_get_mcc(mvm, wrdd.pointer);
750 kfree(wrdd.pointer);
751 if (!mcc_val)
752 return -ENOENT;
753
754 mcc[0] = (mcc_val >> 8) & 0xff;
755 mcc[1] = mcc_val & 0xff;
756 mcc[2] = '\0';
757 return 0;
758}
759#else /* CONFIG_ACPI */
760static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc)
761{
762 return -ENOENT;
763}
764#endif
765
766int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
767{
768 bool tlv_lar;
769 bool nvm_lar;
770 int retval;
771 struct ieee80211_regdomain *regd;
772 char mcc[3];
773
774 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
775 tlv_lar = mvm->fw->ucode_capa.capa[0] &
776 IWL_UCODE_TLV_CAPA_LAR_SUPPORT;
777 nvm_lar = mvm->nvm_data->lar_enabled;
778 if (tlv_lar != nvm_lar)
779 IWL_INFO(mvm,
780 "Conflict between TLV & NVM regarding enabling LAR (TLV = %s NVM =%s)\n",
781 tlv_lar ? "enabled" : "disabled",
782 nvm_lar ? "enabled" : "disabled");
783 }
784
785 if (!iwl_mvm_is_lar_supported(mvm))
786 return 0;
787
788 /*
789 * During HW restart, only replay the last set MCC to FW. Otherwise,
790 * queue an update to cfg80211 to retrieve the default alpha2 from FW.
791 */
792 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
793 /* This should only be called during vif up and hold RTNL */
794 return iwl_mvm_init_fw_regd(mvm);
795 }
796
797 /*
798 * Driver regulatory hint for initial update, this also informs the
799 * firmware we support wifi location updates.
800 * Disallow scans that might crash the FW while the LAR regdomain
801 * is not set.
802 */
803 mvm->lar_regdom_set = false;
804
805 regd = iwl_mvm_get_current_regdomain(mvm, NULL);
806 if (IS_ERR_OR_NULL(regd))
807 return -EIO;
808
809 if (iwl_mvm_is_wifi_mcc_supported(mvm) &&
810 !iwl_mvm_get_bios_mcc(mvm, mcc)) {
811 kfree(regd);
812 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc,
813 MCC_SOURCE_BIOS, NULL);
814 if (IS_ERR_OR_NULL(regd))
815 return -EIO;
816 }
817
818 retval = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
819 kfree(regd);
820 return retval;
821}
822
823int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
824 struct iwl_rx_cmd_buffer *rxb,
825 struct iwl_device_cmd *cmd)
826{
827 struct iwl_rx_packet *pkt = rxb_addr(rxb);
828 struct iwl_mcc_chub_notif *notif = (void *)pkt->data;
829 enum iwl_mcc_source src;
830 char mcc[3];
831 struct ieee80211_regdomain *regd;
832
833 lockdep_assert_held(&mvm->mutex);
834
835 if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
836 return 0;
837
838 mcc[0] = notif->mcc >> 8;
839 mcc[1] = notif->mcc & 0xff;
840 mcc[2] = '\0';
841 src = notif->source_id;
842
843 IWL_DEBUG_LAR(mvm,
844 "RX: received chub update mcc cmd (mcc '%s' src %d)\n",
845 mcc, src);
846 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL);
847 if (IS_ERR_OR_NULL(regd))
848 return 0;
849
850 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
851 kfree(regd);
852
853 return 0;
854}
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index fe40922a6b0d..80121e41ca22 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -82,7 +82,6 @@
82#include "rs.h" 82#include "rs.h"
83#include "fw-api-scan.h" 83#include "fw-api-scan.h"
84#include "time-event.h" 84#include "time-event.h"
85#include "iwl-fw-error-dump.h"
86 85
87#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux" 86#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
88MODULE_DESCRIPTION(DRV_DESCRIPTION); 87MODULE_DESCRIPTION(DRV_DESCRIPTION);
@@ -234,6 +233,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
234 iwl_mvm_rx_ant_coupling_notif, true), 233 iwl_mvm_rx_ant_coupling_notif, true),
235 234
236 RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false), 235 RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
236 RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, true),
237 237
238 RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false), 238 RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
239 239
@@ -358,6 +358,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
358 CMD(TDLS_CHANNEL_SWITCH_CMD), 358 CMD(TDLS_CHANNEL_SWITCH_CMD),
359 CMD(TDLS_CHANNEL_SWITCH_NOTIFICATION), 359 CMD(TDLS_CHANNEL_SWITCH_NOTIFICATION),
360 CMD(TDLS_CONFIG_CMD), 360 CMD(TDLS_CONFIG_CMD),
361 CMD(MCC_UPDATE_CMD),
361}; 362};
362#undef CMD 363#undef CMD
363 364
@@ -871,8 +872,8 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
871 872
872 /* start recording again if the firmware is not crashed */ 873 /* start recording again if the firmware is not crashed */
873 WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) && 874 WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) &&
874 mvm->fw->dbg_dest_tlv && 875 mvm->fw->dbg_dest_tlv &&
875 iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf)); 876 iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf));
876 877
877 mutex_unlock(&mvm->mutex); 878 mutex_unlock(&mvm->mutex);
878 879
@@ -1270,6 +1271,10 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
1270 iwl_free_resp(&get_status_cmd); 1271 iwl_free_resp(&get_status_cmd);
1271out: 1272out:
1272 iwl_mvm_d0i3_enable_tx(mvm, qos_seq); 1273 iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
1274
1275 /* the FW might have updated the regdomain */
1276 iwl_mvm_update_changed_regdom(mvm);
1277
1273 iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK); 1278 iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
1274 mutex_unlock(&mvm->mutex); 1279 mutex_unlock(&mvm->mutex);
1275} 1280}
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index 33bbdde0046f..d2c6ba9d326b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -358,7 +358,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
358 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); 358 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
359 359
360 if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) || 360 if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
361 !mvmvif->pm_enabled || iwl_mvm_tdls_sta_count(mvm, vif)) 361 !mvmvif->pm_enabled)
362 return; 362 return;
363 363
364 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK); 364 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -639,6 +639,10 @@ static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
639 if (vifs->ap_vif) 639 if (vifs->ap_vif)
640 ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif); 640 ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif);
641 641
642 /* don't allow PM if any TDLS stations exist */
643 if (iwl_mvm_tdls_sta_count(mvm, NULL))
644 return;
645
642 /* enable PM on bss if bss stand alone */ 646 /* enable PM on bss if bss stand alone */
643 if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) { 647 if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) {
644 bss_mvmvif->pm_enabled = true; 648 bss_mvmvif->pm_enabled = true;
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index dbb2594390e9..509a66d05245 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -172,6 +172,7 @@ static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
172} 172}
173 173
174int iwl_mvm_update_quotas(struct iwl_mvm *mvm, 174int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
175 bool force_update,
175 struct ieee80211_vif *disabled_vif) 176 struct ieee80211_vif *disabled_vif)
176{ 177{
177 struct iwl_time_quota_cmd cmd = {}; 178 struct iwl_time_quota_cmd cmd = {};
@@ -309,7 +310,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
309 "zero quota on binding %d\n", i); 310 "zero quota on binding %d\n", i);
310 } 311 }
311 312
312 if (!send) { 313 if (!send && !force_update) {
313 /* don't send a practically unchanged command, the firmware has 314 /* don't send a practically unchanged command, the firmware has
314 * to re-initialize a lot of state and that can have an adverse 315 * to re-initialize a lot of state and that can have an adverse
315 * impact on it 316 * impact on it
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 6578498dd5af..9140b0b701c7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -1065,6 +1065,37 @@ static inline bool rs_rate_column_match(struct rs_rate *a,
1065 && ant_match; 1065 && ant_match;
1066} 1066}
1067 1067
1068static inline enum rs_column rs_get_column_from_rate(struct rs_rate *rate)
1069{
1070 if (is_legacy(rate)) {
1071 if (rate->ant == ANT_A)
1072 return RS_COLUMN_LEGACY_ANT_A;
1073
1074 if (rate->ant == ANT_B)
1075 return RS_COLUMN_LEGACY_ANT_B;
1076
1077 goto err;
1078 }
1079
1080 if (is_siso(rate)) {
1081 if (rate->ant == ANT_A || rate->stbc || rate->bfer)
1082 return rate->sgi ? RS_COLUMN_SISO_ANT_A_SGI :
1083 RS_COLUMN_SISO_ANT_A;
1084
1085 if (rate->ant == ANT_B)
1086 return rate->sgi ? RS_COLUMN_SISO_ANT_B_SGI :
1087 RS_COLUMN_SISO_ANT_B;
1088
1089 goto err;
1090 }
1091
1092 if (is_mimo(rate))
1093 return rate->sgi ? RS_COLUMN_MIMO2_SGI : RS_COLUMN_MIMO2;
1094
1095err:
1096 return RS_COLUMN_INVALID;
1097}
1098
1068static u8 rs_get_tid(struct ieee80211_hdr *hdr) 1099static u8 rs_get_tid(struct ieee80211_hdr *hdr)
1069{ 1100{
1070 u8 tid = IWL_MAX_TID_COUNT; 1101 u8 tid = IWL_MAX_TID_COUNT;
@@ -1106,17 +1137,43 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1106 return; 1137 return;
1107 } 1138 }
1108 1139
1140 /* This packet was aggregated but doesn't carry status info */
1141 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
1142 !(info->flags & IEEE80211_TX_STAT_AMPDU))
1143 return;
1144
1145 rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate);
1146
1109#ifdef CONFIG_MAC80211_DEBUGFS 1147#ifdef CONFIG_MAC80211_DEBUGFS
1110 /* Disable last tx check if we are debugging with fixed rate */ 1148 /* Disable last tx check if we are debugging with fixed rate but
1149 * update tx stats */
1111 if (lq_sta->pers.dbg_fixed_rate) { 1150 if (lq_sta->pers.dbg_fixed_rate) {
1112 IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n"); 1151 int index = tx_resp_rate.index;
1152 enum rs_column column;
1153 int attempts, success;
1154
1155 column = rs_get_column_from_rate(&tx_resp_rate);
1156 if (WARN_ONCE(column == RS_COLUMN_INVALID,
1157 "Can't map rate 0x%x to column",
1158 tx_resp_hwrate))
1159 return;
1160
1161 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
1162 attempts = info->status.ampdu_len;
1163 success = info->status.ampdu_ack_len;
1164 } else {
1165 attempts = info->status.rates[0].count;
1166 success = !!(info->flags & IEEE80211_TX_STAT_ACK);
1167 }
1168
1169 lq_sta->pers.tx_stats[column][index].total += attempts;
1170 lq_sta->pers.tx_stats[column][index].success += success;
1171
1172 IWL_DEBUG_RATE(mvm, "Fixed rate 0x%x success %d attempts %d\n",
1173 tx_resp_hwrate, success, attempts);
1113 return; 1174 return;
1114 } 1175 }
1115#endif 1176#endif
1116 /* This packet was aggregated but doesn't carry status info */
1117 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
1118 !(info->flags & IEEE80211_TX_STAT_AMPDU))
1119 return;
1120 1177
1121 if (time_after(jiffies, 1178 if (time_after(jiffies,
1122 (unsigned long)(lq_sta->last_tx + 1179 (unsigned long)(lq_sta->last_tx +
@@ -1142,7 +1199,6 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1142 table = &lq_sta->lq; 1199 table = &lq_sta->lq;
1143 lq_hwrate = le32_to_cpu(table->rs_table[0]); 1200 lq_hwrate = le32_to_cpu(table->rs_table[0]);
1144 rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate); 1201 rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate);
1145 rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate);
1146 1202
1147 /* Here we actually compare this rate to the latest LQ command */ 1203 /* Here we actually compare this rate to the latest LQ command */
1148 if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) { 1204 if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) {
@@ -1280,6 +1336,9 @@ static void rs_mac80211_tx_status(void *mvm_r,
1280 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 1336 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1281 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1337 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1282 1338
1339 if (!iwl_mvm_sta_from_mac80211(sta)->vif)
1340 return;
1341
1283 if (!ieee80211_is_data(hdr->frame_control) || 1342 if (!ieee80211_is_data(hdr->frame_control) ||
1284 info->flags & IEEE80211_TX_CTL_NO_ACK) 1343 info->flags & IEEE80211_TX_CTL_NO_ACK)
1285 return; 1344 return;
@@ -2513,6 +2572,14 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
2513 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2572 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2514 struct iwl_lq_sta *lq_sta = mvm_sta; 2573 struct iwl_lq_sta *lq_sta = mvm_sta;
2515 2574
2575 if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) {
2576 /* if vif isn't initialized mvm doesn't know about
2577 * this station, so don't do anything with the it
2578 */
2579 sta = NULL;
2580 mvm_sta = NULL;
2581 }
2582
2516 /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */ 2583 /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
2517 2584
2518 /* Treat uninitialized rate scaling data same as non-existing. */ 2585 /* Treat uninitialized rate scaling data same as non-existing. */
@@ -2830,6 +2897,9 @@ static void rs_rate_update(void *mvm_r,
2830 (struct iwl_op_mode *)mvm_r; 2897 (struct iwl_op_mode *)mvm_r;
2831 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 2898 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
2832 2899
2900 if (!iwl_mvm_sta_from_mac80211(sta)->vif)
2901 return;
2902
2833 /* Stop any ongoing aggregations as rs starts off assuming no agg */ 2903 /* Stop any ongoing aggregations as rs starts off assuming no agg */
2834 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) 2904 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
2835 ieee80211_stop_tx_ba_session(sta, tid); 2905 ieee80211_stop_tx_ba_session(sta, tid);
@@ -3343,16 +3413,16 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
3343 (is_legacy(rate)) ? "legacy" : 3413 (is_legacy(rate)) ? "legacy" :
3344 is_vht(rate) ? "VHT" : "HT"); 3414 is_vht(rate) ? "VHT" : "HT");
3345 if (!is_legacy(rate)) { 3415 if (!is_legacy(rate)) {
3346 desc += sprintf(buff+desc, " %s", 3416 desc += sprintf(buff + desc, " %s",
3347 (is_siso(rate)) ? "SISO" : "MIMO2"); 3417 (is_siso(rate)) ? "SISO" : "MIMO2");
3348 desc += sprintf(buff+desc, " %s", 3418 desc += sprintf(buff + desc, " %s",
3349 (is_ht20(rate)) ? "20MHz" : 3419 (is_ht20(rate)) ? "20MHz" :
3350 (is_ht40(rate)) ? "40MHz" : 3420 (is_ht40(rate)) ? "40MHz" :
3351 (is_ht80(rate)) ? "80Mhz" : "BAD BW"); 3421 (is_ht80(rate)) ? "80Mhz" : "BAD BW");
3352 desc += sprintf(buff+desc, " %s %s %s\n", 3422 desc += sprintf(buff + desc, " %s %s %s\n",
3353 (rate->sgi) ? "SGI" : "NGI", 3423 (rate->sgi) ? "SGI" : "NGI",
3354 (rate->ldpc) ? "LDPC" : "BCC", 3424 (rate->ldpc) ? "LDPC" : "BCC",
3355 (lq_sta->is_agg) ? "AGG on" : ""); 3425 (lq_sta->is_agg) ? "AGG on" : "");
3356 } 3426 }
3357 desc += sprintf(buff+desc, "last tx rate=0x%X\n", 3427 desc += sprintf(buff+desc, "last tx rate=0x%X\n",
3358 lq_sta->last_rate_n_flags); 3428 lq_sta->last_rate_n_flags);
@@ -3373,13 +3443,13 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
3373 ss_params = le32_to_cpu(lq_sta->lq.ss_params); 3443 ss_params = le32_to_cpu(lq_sta->lq.ss_params);
3374 desc += sprintf(buff+desc, "single stream params: %s%s%s%s\n", 3444 desc += sprintf(buff+desc, "single stream params: %s%s%s%s\n",
3375 (ss_params & LQ_SS_PARAMS_VALID) ? 3445 (ss_params & LQ_SS_PARAMS_VALID) ?
3376 "VALID," : "INVALID", 3446 "VALID" : "INVALID",
3377 (ss_params & LQ_SS_BFER_ALLOWED) ? 3447 (ss_params & LQ_SS_BFER_ALLOWED) ?
3378 "BFER," : "", 3448 ", BFER" : "",
3379 (ss_params & LQ_SS_STBC_1SS_ALLOWED) ? 3449 (ss_params & LQ_SS_STBC_1SS_ALLOWED) ?
3380 "STBC," : "", 3450 ", STBC" : "",
3381 (ss_params & LQ_SS_FORCE) ? 3451 (ss_params & LQ_SS_FORCE) ?
3382 "FORCE" : ""); 3452 ", FORCE" : "");
3383 desc += sprintf(buff+desc, 3453 desc += sprintf(buff+desc,
3384 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n", 3454 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
3385 lq_sta->lq.initial_rate_index[0], 3455 lq_sta->lq.initial_rate_index[0],
@@ -3603,9 +3673,15 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
3603 3673
3604MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32); 3674MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32);
3605 3675
3606static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir) 3676static void rs_add_debugfs(void *mvm, void *priv_sta, struct dentry *dir)
3607{ 3677{
3608 struct iwl_lq_sta *lq_sta = mvm_sta; 3678 struct iwl_lq_sta *lq_sta = priv_sta;
3679 struct iwl_mvm_sta *mvmsta;
3680
3681 mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
3682
3683 if (!mvmsta->vif)
3684 return;
3609 3685
3610 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir, 3686 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
3611 lq_sta, &rs_sta_dbgfs_scale_table_ops); 3687 lq_sta, &rs_sta_dbgfs_scale_table_ops);
diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c
index 7eb78e2c240a..b0f59fdd287c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sf.c
@@ -99,7 +99,35 @@ static void iwl_mvm_bound_iface_iterator(void *_data, u8 *mac,
99 99
100/* 100/*
101 * Aging and idle timeouts for the different possible scenarios 101 * Aging and idle timeouts for the different possible scenarios
102 * in SF_FULL_ON state. 102 * in default configuration
103 */
104static const
105__le32 sf_full_timeout_def[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
106 {
107 cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER_DEF),
108 cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
109 },
110 {
111 cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER_DEF),
112 cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER_DEF)
113 },
114 {
115 cpu_to_le32(SF_MCAST_AGING_TIMER_DEF),
116 cpu_to_le32(SF_MCAST_IDLE_TIMER_DEF)
117 },
118 {
119 cpu_to_le32(SF_BA_AGING_TIMER_DEF),
120 cpu_to_le32(SF_BA_IDLE_TIMER_DEF)
121 },
122 {
123 cpu_to_le32(SF_TX_RE_AGING_TIMER_DEF),
124 cpu_to_le32(SF_TX_RE_IDLE_TIMER_DEF)
125 },
126};
127
128/*
129 * Aging and idle timeouts for the different possible scenarios
130 * in single BSS MAC configuration.
103 */ 131 */
104static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = { 132static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
105 { 133 {
@@ -124,7 +152,8 @@ static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
124 }, 152 },
125}; 153};
126 154
127static void iwl_mvm_fill_sf_command(struct iwl_sf_cfg_cmd *sf_cmd, 155static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm,
156 struct iwl_sf_cfg_cmd *sf_cmd,
128 struct ieee80211_sta *sta) 157 struct ieee80211_sta *sta)
129{ 158{
130 int i, j, watermark; 159 int i, j, watermark;
@@ -163,24 +192,38 @@ static void iwl_mvm_fill_sf_command(struct iwl_sf_cfg_cmd *sf_cmd,
163 cpu_to_le32(SF_LONG_DELAY_AGING_TIMER); 192 cpu_to_le32(SF_LONG_DELAY_AGING_TIMER);
164 } 193 }
165 } 194 }
166 BUILD_BUG_ON(sizeof(sf_full_timeout) !=
167 sizeof(__le32) * SF_NUM_SCENARIO * SF_NUM_TIMEOUT_TYPES);
168 195
169 memcpy(sf_cmd->full_on_timeouts, sf_full_timeout, 196 if (sta || IWL_UCODE_API(mvm->fw->ucode_ver) < 13) {
170 sizeof(sf_full_timeout)); 197 BUILD_BUG_ON(sizeof(sf_full_timeout) !=
198 sizeof(__le32) * SF_NUM_SCENARIO *
199 SF_NUM_TIMEOUT_TYPES);
200
201 memcpy(sf_cmd->full_on_timeouts, sf_full_timeout,
202 sizeof(sf_full_timeout));
203 } else {
204 BUILD_BUG_ON(sizeof(sf_full_timeout_def) !=
205 sizeof(__le32) * SF_NUM_SCENARIO *
206 SF_NUM_TIMEOUT_TYPES);
207
208 memcpy(sf_cmd->full_on_timeouts, sf_full_timeout_def,
209 sizeof(sf_full_timeout_def));
210 }
211
171} 212}
172 213
173static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, 214static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
174 enum iwl_sf_state new_state) 215 enum iwl_sf_state new_state)
175{ 216{
176 struct iwl_sf_cfg_cmd sf_cmd = { 217 struct iwl_sf_cfg_cmd sf_cmd = {
177 .state = cpu_to_le32(new_state), 218 .state = cpu_to_le32(SF_FULL_ON),
178 }; 219 };
179 struct ieee80211_sta *sta; 220 struct ieee80211_sta *sta;
180 int ret = 0; 221 int ret = 0;
181 222
182 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF && 223 if (IWL_UCODE_API(mvm->fw->ucode_ver) < 13)
183 mvm->cfg->disable_dummy_notification) 224 sf_cmd.state = cpu_to_le32(new_state);
225
226 if (mvm->cfg->disable_dummy_notification)
184 sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF); 227 sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF);
185 228
186 /* 229 /*
@@ -192,6 +235,8 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
192 235
193 switch (new_state) { 236 switch (new_state) {
194 case SF_UNINIT: 237 case SF_UNINIT:
238 if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 13)
239 iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
195 break; 240 break;
196 case SF_FULL_ON: 241 case SF_FULL_ON:
197 if (sta_id == IWL_MVM_STATION_COUNT) { 242 if (sta_id == IWL_MVM_STATION_COUNT) {
@@ -206,11 +251,11 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
206 rcu_read_unlock(); 251 rcu_read_unlock();
207 return -EINVAL; 252 return -EINVAL;
208 } 253 }
209 iwl_mvm_fill_sf_command(&sf_cmd, sta); 254 iwl_mvm_fill_sf_command(mvm, &sf_cmd, sta);
210 rcu_read_unlock(); 255 rcu_read_unlock();
211 break; 256 break;
212 case SF_INIT_OFF: 257 case SF_INIT_OFF:
213 iwl_mvm_fill_sf_command(&sf_cmd, NULL); 258 iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
214 break; 259 break;
215 default: 260 default:
216 WARN_ONCE(1, "Invalid state: %d. not sending Smart Fifo cmd\n", 261 WARN_ONCE(1, "Invalid state: %d. not sending Smart Fifo cmd\n",
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 5c23cddaaae3..50f9288368af 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -273,7 +273,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
273 else 273 else
274 sta_id = mvm_sta->sta_id; 274 sta_id = mvm_sta->sta_id;
275 275
276 if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT)) 276 if (sta_id == IWL_MVM_STATION_COUNT)
277 return -ENOSPC; 277 return -ENOSPC;
278 278
279 spin_lock_init(&mvm_sta->lock); 279 spin_lock_init(&mvm_sta->lock);
@@ -1681,9 +1681,6 @@ void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
1681 }; 1681 };
1682 int ret; 1682 int ret;
1683 1683
1684 if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_DISABLE_STA_TX))
1685 return;
1686
1687 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); 1684 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
1688 if (ret) 1685 if (ret)
1689 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 1686 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index f8d6f306dd76..8d179ab67cc2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -197,6 +197,8 @@ iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
197 struct iwl_time_event_notif *notif) 197 struct iwl_time_event_notif *notif)
198{ 198{
199 if (!le32_to_cpu(notif->status)) { 199 if (!le32_to_cpu(notif->status)) {
200 if (te_data->vif->type == NL80211_IFTYPE_STATION)
201 ieee80211_connection_loss(te_data->vif);
200 IWL_DEBUG_TE(mvm, "CSA time event failed to start\n"); 202 IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
201 iwl_mvm_te_clear_data(mvm, te_data); 203 iwl_mvm_te_clear_data(mvm, te_data);
202 return; 204 return;
@@ -261,17 +263,23 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
261 "TE ended - current time %lu, estimated end %lu\n", 263 "TE ended - current time %lu, estimated end %lu\n",
262 jiffies, te_data->end_jiffies); 264 jiffies, te_data->end_jiffies);
263 265
264 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { 266 switch (te_data->vif->type) {
267 case NL80211_IFTYPE_P2P_DEVICE:
265 ieee80211_remain_on_channel_expired(mvm->hw); 268 ieee80211_remain_on_channel_expired(mvm->hw);
266 iwl_mvm_roc_finished(mvm); 269 iwl_mvm_roc_finished(mvm);
270 break;
271 case NL80211_IFTYPE_STATION:
272 /*
273 * By now, we should have finished association
274 * and know the dtim period.
275 */
276 iwl_mvm_te_check_disconnect(mvm, te_data->vif,
277 "No association and the time event is over already...");
278 break;
279 default:
280 break;
267 } 281 }
268 282
269 /*
270 * By now, we should have finished association
271 * and know the dtim period.
272 */
273 iwl_mvm_te_check_disconnect(mvm, te_data->vif,
274 "No association and the time event is over already...");
275 iwl_mvm_te_clear_data(mvm, te_data); 283 iwl_mvm_te_clear_data(mvm, te_data);
276 } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) { 284 } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
277 te_data->running = true; 285 te_data->running = true;
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 7906b97c81b9..ba34dda1ae36 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -953,8 +953,10 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
953 mvmsta = iwl_mvm_sta_from_mac80211(sta); 953 mvmsta = iwl_mvm_sta_from_mac80211(sta);
954 tid_data = &mvmsta->tid_data[tid]; 954 tid_data = &mvmsta->tid_data[tid];
955 955
956 if (WARN_ONCE(tid_data->txq_id != scd_flow, "Q %d, tid %d, flow %d", 956 if (tid_data->txq_id != scd_flow) {
957 tid_data->txq_id, tid, scd_flow)) { 957 IWL_ERR(mvm,
958 "invalid BA notification: Q %d, tid %d, flow %d\n",
959 tid_data->txq_id, tid, scd_flow);
958 rcu_read_unlock(); 960 rcu_read_unlock();
959 return 0; 961 return 0;
960 } 962 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 2b9de63951e6..435faee0a28e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -857,7 +857,7 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
857 857
858 mvmvif->low_latency = value; 858 mvmvif->low_latency = value;
859 859
860 res = iwl_mvm_update_quotas(mvm, NULL); 860 res = iwl_mvm_update_quotas(mvm, false, NULL);
861 if (res) 861 if (res)
862 return res; 862 return res;
863 863
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index dbd6bcf52205..b18569734922 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -368,10 +368,12 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
368/* 3165 Series */ 368/* 3165 Series */
369 {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, 369 {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, 370 {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
372 {IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)},
373 {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, 371 {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
374 {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, 372 {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
373 {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
374 {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
375 {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
376 {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
375 377
376/* 7265 Series */ 378/* 7265 Series */
377 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, 379 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
@@ -413,10 +415,35 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
413 415
414/* 8000 Series */ 416/* 8000 Series */
415 {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, 417 {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
416 {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)}, 418 {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)},
419 {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)},
420 {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)},
421 {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)},
422 {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)},
423 {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)},
424 {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)},
417 {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)}, 425 {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
426 {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
427 {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
428 {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
429 {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
430 {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
431 {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
432 {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
433 {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
434 {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
435 {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)},
436 {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)},
437 {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)},
438 {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)},
439 {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)},
440 {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)},
418 {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)}, 441 {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)},
419 {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)}, 442 {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)},
443 {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)},
444 {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)},
445 {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)},
446 {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)},
420#endif /* CONFIG_IWLMVM */ 447#endif /* CONFIG_IWLMVM */
421 448
422 {0} 449 {0}
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index cae0eb8835ce..01996c9d98a7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -217,6 +217,8 @@ struct iwl_pcie_txq_scratch_buf {
217 * @active: stores if queue is active 217 * @active: stores if queue is active
218 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID 218 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
219 * @wd_timeout: queue watchdog timeout (jiffies) - per queue 219 * @wd_timeout: queue watchdog timeout (jiffies) - per queue
220 * @frozen: tx stuck queue timer is frozen
221 * @frozen_expiry_remainder: remember how long until the timer fires
220 * 222 *
221 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 223 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
222 * descriptors) and required locking structures. 224 * descriptors) and required locking structures.
@@ -228,9 +230,11 @@ struct iwl_txq {
228 dma_addr_t scratchbufs_dma; 230 dma_addr_t scratchbufs_dma;
229 struct iwl_pcie_txq_entry *entries; 231 struct iwl_pcie_txq_entry *entries;
230 spinlock_t lock; 232 spinlock_t lock;
233 unsigned long frozen_expiry_remainder;
231 struct timer_list stuck_timer; 234 struct timer_list stuck_timer;
232 struct iwl_trans_pcie *trans_pcie; 235 struct iwl_trans_pcie *trans_pcie;
233 bool need_update; 236 bool need_update;
237 bool frozen;
234 u8 active; 238 u8 active;
235 bool ampdu; 239 bool ampdu;
236 unsigned long wd_timeout; 240 unsigned long wd_timeout;
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index f31a94160771..dc247325d8d7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -682,6 +682,43 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
682 return ret; 682 return ret;
683} 683}
684 684
685/*
686 * Driver Takes the ownership on secure machine before FW load
687 * and prevent race with the BT load.
688 * W/A for ROM bug. (should be remove in the next Si step)
689 */
690static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
691{
692 u32 val, loop = 1000;
693
694 /* Check the RSA semaphore is accessible - if not, we are in trouble */
695 val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
696 if (val & (BIT(1) | BIT(17))) {
697 IWL_ERR(trans,
698 "can't access the RSA semaphore it is write protected\n");
699 return 0;
700 }
701
702 /* take ownership on the AUX IF */
703 iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK);
704 iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK);
705
706 do {
707 iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1);
708 val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS);
709 if (val == 0x1) {
710 iwl_write_prph(trans, RSA_ENABLE, 0);
711 return 0;
712 }
713
714 udelay(10);
715 loop--;
716 } while (loop > 0);
717
718 IWL_ERR(trans, "Failed to take ownership on secure machine\n");
719 return -EIO;
720}
721
685static int iwl_pcie_load_cpu_sections_8000b(struct iwl_trans *trans, 722static int iwl_pcie_load_cpu_sections_8000b(struct iwl_trans *trans,
686 const struct fw_img *image, 723 const struct fw_img *image,
687 int cpu, 724 int cpu,
@@ -901,6 +938,11 @@ static int iwl_pcie_load_given_ucode_8000b(struct iwl_trans *trans,
901 if (trans->dbg_dest_tlv) 938 if (trans->dbg_dest_tlv)
902 iwl_pcie_apply_destination(trans); 939 iwl_pcie_apply_destination(trans);
903 940
941 /* TODO: remove in the next Si step */
942 ret = iwl_pcie_rsa_race_bug_wa(trans);
943 if (ret)
944 return ret;
945
904 /* configure the ucode to be ready to get the secured image */ 946 /* configure the ucode to be ready to get the secured image */
905 /* release CPU reset */ 947 /* release CPU reset */
906 iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT); 948 iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
@@ -1462,6 +1504,60 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
1462 return ret; 1504 return ret;
1463} 1505}
1464 1506
1507static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
1508 unsigned long txqs,
1509 bool freeze)
1510{
1511 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1512 int queue;
1513
1514 for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
1515 struct iwl_txq *txq = &trans_pcie->txq[queue];
1516 unsigned long now;
1517
1518 spin_lock_bh(&txq->lock);
1519
1520 now = jiffies;
1521
1522 if (txq->frozen == freeze)
1523 goto next_queue;
1524
1525 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
1526 freeze ? "Freezing" : "Waking", queue);
1527
1528 txq->frozen = freeze;
1529
1530 if (txq->q.read_ptr == txq->q.write_ptr)
1531 goto next_queue;
1532
1533 if (freeze) {
1534 if (unlikely(time_after(now,
1535 txq->stuck_timer.expires))) {
1536 /*
1537 * The timer should have fired, maybe it is
1538 * spinning right now on the lock.
1539 */
1540 goto next_queue;
1541 }
1542 /* remember how long until the timer fires */
1543 txq->frozen_expiry_remainder =
1544 txq->stuck_timer.expires - now;
1545 del_timer(&txq->stuck_timer);
1546 goto next_queue;
1547 }
1548
1549 /*
1550 * Wake a non-empty queue -> arm timer with the
1551 * remainder before it froze
1552 */
1553 mod_timer(&txq->stuck_timer,
1554 now + txq->frozen_expiry_remainder);
1555
1556next_queue:
1557 spin_unlock_bh(&txq->lock);
1558 }
1559}
1560
1465#define IWL_FLUSH_WAIT_MS 2000 1561#define IWL_FLUSH_WAIT_MS 2000
1466 1562
1467static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm) 1563static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
@@ -1713,7 +1809,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1713 int ret; 1809 int ret;
1714 size_t bufsz; 1810 size_t bufsz;
1715 1811
1716 bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues; 1812 bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
1717 1813
1718 if (!trans_pcie->txq) 1814 if (!trans_pcie->txq)
1719 return -EAGAIN; 1815 return -EAGAIN;
@@ -1726,11 +1822,11 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1726 txq = &trans_pcie->txq[cnt]; 1822 txq = &trans_pcie->txq[cnt];
1727 q = &txq->q; 1823 q = &txq->q;
1728 pos += scnprintf(buf + pos, bufsz - pos, 1824 pos += scnprintf(buf + pos, bufsz - pos,
1729 "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d%s\n", 1825 "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
1730 cnt, q->read_ptr, q->write_ptr, 1826 cnt, q->read_ptr, q->write_ptr,
1731 !!test_bit(cnt, trans_pcie->queue_used), 1827 !!test_bit(cnt, trans_pcie->queue_used),
1732 !!test_bit(cnt, trans_pcie->queue_stopped), 1828 !!test_bit(cnt, trans_pcie->queue_stopped),
1733 txq->need_update, 1829 txq->need_update, txq->frozen,
1734 (cnt == trans_pcie->cmd_queue ? " HCMD" : "")); 1830 (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
1735 } 1831 }
1736 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1832 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
@@ -1961,24 +2057,25 @@ static const struct {
1961 { .start = 0x00a01c7c, .end = 0x00a01c7c }, 2057 { .start = 0x00a01c7c, .end = 0x00a01c7c },
1962 { .start = 0x00a01c28, .end = 0x00a01c54 }, 2058 { .start = 0x00a01c28, .end = 0x00a01c54 },
1963 { .start = 0x00a01c5c, .end = 0x00a01c5c }, 2059 { .start = 0x00a01c5c, .end = 0x00a01c5c },
1964 { .start = 0x00a01c84, .end = 0x00a01c84 }, 2060 { .start = 0x00a01c60, .end = 0x00a01cdc },
1965 { .start = 0x00a01ce0, .end = 0x00a01d0c }, 2061 { .start = 0x00a01ce0, .end = 0x00a01d0c },
1966 { .start = 0x00a01d18, .end = 0x00a01d20 }, 2062 { .start = 0x00a01d18, .end = 0x00a01d20 },
1967 { .start = 0x00a01d2c, .end = 0x00a01d30 }, 2063 { .start = 0x00a01d2c, .end = 0x00a01d30 },
1968 { .start = 0x00a01d40, .end = 0x00a01d5c }, 2064 { .start = 0x00a01d40, .end = 0x00a01d5c },
1969 { .start = 0x00a01d80, .end = 0x00a01d80 }, 2065 { .start = 0x00a01d80, .end = 0x00a01d80 },
1970 { .start = 0x00a01d98, .end = 0x00a01d98 }, 2066 { .start = 0x00a01d98, .end = 0x00a01d9c },
2067 { .start = 0x00a01da8, .end = 0x00a01da8 },
2068 { .start = 0x00a01db8, .end = 0x00a01df4 },
1971 { .start = 0x00a01dc0, .end = 0x00a01dfc }, 2069 { .start = 0x00a01dc0, .end = 0x00a01dfc },
1972 { .start = 0x00a01e00, .end = 0x00a01e2c }, 2070 { .start = 0x00a01e00, .end = 0x00a01e2c },
1973 { .start = 0x00a01e40, .end = 0x00a01e60 }, 2071 { .start = 0x00a01e40, .end = 0x00a01e60 },
2072 { .start = 0x00a01e68, .end = 0x00a01e6c },
2073 { .start = 0x00a01e74, .end = 0x00a01e74 },
1974 { .start = 0x00a01e84, .end = 0x00a01e90 }, 2074 { .start = 0x00a01e84, .end = 0x00a01e90 },
1975 { .start = 0x00a01e9c, .end = 0x00a01ec4 }, 2075 { .start = 0x00a01e9c, .end = 0x00a01ec4 },
1976 { .start = 0x00a01ed0, .end = 0x00a01ed0 }, 2076 { .start = 0x00a01ed0, .end = 0x00a01ee0 },
1977 { .start = 0x00a01f00, .end = 0x00a01f14 }, 2077 { .start = 0x00a01f00, .end = 0x00a01f1c },
1978 { .start = 0x00a01f44, .end = 0x00a01f58 }, 2078 { .start = 0x00a01f44, .end = 0x00a01ffc },
1979 { .start = 0x00a01f80, .end = 0x00a01fa8 },
1980 { .start = 0x00a01fb0, .end = 0x00a01fbc },
1981 { .start = 0x00a01ff8, .end = 0x00a01ffc },
1982 { .start = 0x00a02000, .end = 0x00a02048 }, 2079 { .start = 0x00a02000, .end = 0x00a02048 },
1983 { .start = 0x00a02068, .end = 0x00a020f0 }, 2080 { .start = 0x00a02068, .end = 0x00a020f0 },
1984 { .start = 0x00a02100, .end = 0x00a02118 }, 2081 { .start = 0x00a02100, .end = 0x00a02118 },
@@ -2305,6 +2402,7 @@ static const struct iwl_trans_ops trans_ops_pcie = {
2305 .dbgfs_register = iwl_trans_pcie_dbgfs_register, 2402 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
2306 2403
2307 .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty, 2404 .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
2405 .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
2308 2406
2309 .write8 = iwl_trans_pcie_write8, 2407 .write8 = iwl_trans_pcie_write8,
2310 .write32 = iwl_trans_pcie_write32, 2408 .write32 = iwl_trans_pcie_write32,
@@ -2423,10 +2521,45 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2423 * "dash" value). To keep hw_rev backwards compatible - we'll store it 2521 * "dash" value). To keep hw_rev backwards compatible - we'll store it
2424 * in the old format. 2522 * in the old format.
2425 */ 2523 */
2426 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) 2524 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
2525 unsigned long flags;
2526 int ret;
2527
2427 trans->hw_rev = (trans->hw_rev & 0xfff0) | 2528 trans->hw_rev = (trans->hw_rev & 0xfff0) |
2428 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); 2529 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
2429 2530
2531 /*
2532 * in-order to recognize C step driver should read chip version
2533 * id located at the AUX bus MISC address space.
2534 */
2535 iwl_set_bit(trans, CSR_GP_CNTRL,
2536 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2537 udelay(2);
2538
2539 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
2540 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2541 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2542 25000);
2543 if (ret < 0) {
2544 IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
2545 goto out_pci_disable_msi;
2546 }
2547
2548 if (iwl_trans_grab_nic_access(trans, false, &flags)) {
2549 u32 hw_step;
2550
2551 hw_step = __iwl_read_prph(trans, WFPM_CTRL_REG);
2552 hw_step |= ENABLE_WFPM;
2553 __iwl_write_prph(trans, WFPM_CTRL_REG, hw_step);
2554 hw_step = __iwl_read_prph(trans, AUX_MISC_REG);
2555 hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
2556 if (hw_step == 0x3)
2557 trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) |
2558 (SILICON_C_STEP << 2);
2559 iwl_trans_release_nic_access(trans, &flags);
2560 }
2561 }
2562
2430 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; 2563 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
2431 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), 2564 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
2432 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); 2565 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index af0bce736358..06952aadfd7b 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -725,33 +725,50 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
725 iwl_pcie_tx_start(trans, 0); 725 iwl_pcie_tx_start(trans, 0);
726} 726}
727 727
728static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
729{
730 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
731 unsigned long flags;
732 int ch, ret;
733 u32 mask = 0;
734
735 spin_lock(&trans_pcie->irq_lock);
736
737 if (!iwl_trans_grab_nic_access(trans, false, &flags))
738 goto out;
739
740 /* Stop each Tx DMA channel */
741 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
742 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
743 mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch);
744 }
745
746 /* Wait for DMA channels to be idle */
747 ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000);
748 if (ret < 0)
749 IWL_ERR(trans,
750 "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
751 ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));
752
753 iwl_trans_release_nic_access(trans, &flags);
754
755out:
756 spin_unlock(&trans_pcie->irq_lock);
757}
758
728/* 759/*
729 * iwl_pcie_tx_stop - Stop all Tx DMA channels 760 * iwl_pcie_tx_stop - Stop all Tx DMA channels
730 */ 761 */
731int iwl_pcie_tx_stop(struct iwl_trans *trans) 762int iwl_pcie_tx_stop(struct iwl_trans *trans)
732{ 763{
733 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 764 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
734 int ch, txq_id, ret; 765 int txq_id;
735 766
736 /* Turn off all Tx DMA fifos */ 767 /* Turn off all Tx DMA fifos */
737 spin_lock(&trans_pcie->irq_lock);
738
739 iwl_scd_deactivate_fifos(trans); 768 iwl_scd_deactivate_fifos(trans);
740 769
741 /* Stop each Tx DMA channel, and wait for it to be idle */ 770 /* Turn off all Tx DMA channels */
742 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 771 iwl_pcie_tx_stop_fh(trans);
743 iwl_write_direct32(trans,
744 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
745 ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
746 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
747 if (ret < 0)
748 IWL_ERR(trans,
749 "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
750 ch,
751 iwl_read_direct32(trans,
752 FH_TSSR_TX_STATUS_REG));
753 }
754 spin_unlock(&trans_pcie->irq_lock);
755 772
756 /* 773 /*
757 * This function can be called before the op_mode disabled the 774 * This function can be called before the op_mode disabled the
@@ -912,10 +929,19 @@ error:
912 929
913static inline void iwl_pcie_txq_progress(struct iwl_txq *txq) 930static inline void iwl_pcie_txq_progress(struct iwl_txq *txq)
914{ 931{
932 lockdep_assert_held(&txq->lock);
933
915 if (!txq->wd_timeout) 934 if (!txq->wd_timeout)
916 return; 935 return;
917 936
918 /* 937 /*
938 * station is asleep and we send data - that must
939 * be uAPSD or PS-Poll. Don't rearm the timer.
940 */
941 if (txq->frozen)
942 return;
943
944 /*
919 * if empty delete timer, otherwise move timer forward 945 * if empty delete timer, otherwise move timer forward
920 * since we're making progress on this queue 946 * since we're making progress on this queue
921 */ 947 */
@@ -1248,6 +1274,9 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
1248 SCD_TX_STTS_QUEUE_OFFSET(txq_id); 1274 SCD_TX_STTS_QUEUE_OFFSET(txq_id);
1249 static const u32 zero_val[4] = {}; 1275 static const u32 zero_val[4] = {};
1250 1276
1277 trans_pcie->txq[txq_id].frozen_expiry_remainder = 0;
1278 trans_pcie->txq[txq_id].frozen = false;
1279
1251 /* 1280 /*
1252 * Upon HW Rfkill - we stop the device, and then stop the queues 1281 * Upon HW Rfkill - we stop the device, and then stop the queues
1253 * in the op_mode. Just for the sake of the simplicity of the op_mode, 1282 * in the op_mode. Just for the sake of the simplicity of the op_mode,
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index d576dd6665d3..1a20cee5febe 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -365,7 +365,6 @@ static int if_usb_reset_device(struct if_usb_card *cardp)
365 365
366 return ret; 366 return ret;
367} 367}
368EXPORT_SYMBOL_GPL(if_usb_reset_device);
369 368
370/** 369/**
371 * usb_tx_block - transfer data to the device 370 * usb_tx_block - transfer data to the device
@@ -907,7 +906,6 @@ restart:
907 lbtf_deb_leave_args(LBTF_DEB_USB, "ret %d", ret); 906 lbtf_deb_leave_args(LBTF_DEB_USB, "ret %d", ret);
908 return ret; 907 return ret;
909} 908}
910EXPORT_SYMBOL_GPL(if_usb_prog_firmware);
911 909
912 910
913#define if_usb_suspend NULL 911#define if_usb_suspend NULL
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 543148d27b01..433bd6837c79 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -159,6 +159,7 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
159 int tid; 159 int tid;
160 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp; 160 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
161 struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl; 161 struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
162 struct mwifiex_ra_list_tbl *ra_list;
162 u16 block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set); 163 u16 block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
163 164
164 add_ba_rsp->ssn = cpu_to_le16((le16_to_cpu(add_ba_rsp->ssn)) 165 add_ba_rsp->ssn = cpu_to_le16((le16_to_cpu(add_ba_rsp->ssn))
@@ -166,7 +167,13 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
166 167
167 tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK) 168 tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK)
168 >> BLOCKACKPARAM_TID_POS; 169 >> BLOCKACKPARAM_TID_POS;
170 ra_list = mwifiex_wmm_get_ralist_node(priv, tid, add_ba_rsp->
171 peer_mac_addr);
169 if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) { 172 if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) {
173 if (ra_list) {
174 ra_list->ba_status = BA_SETUP_NONE;
175 ra_list->amsdu_in_ampdu = false;
176 }
170 mwifiex_del_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr, 177 mwifiex_del_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr,
171 TYPE_DELBA_SENT, true); 178 TYPE_DELBA_SENT, true);
172 if (add_ba_rsp->add_rsp_result != BA_RESULT_TIMEOUT) 179 if (add_ba_rsp->add_rsp_result != BA_RESULT_TIMEOUT)
@@ -185,6 +192,10 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
185 tx_ba_tbl->amsdu = true; 192 tx_ba_tbl->amsdu = true;
186 else 193 else
187 tx_ba_tbl->amsdu = false; 194 tx_ba_tbl->amsdu = false;
195 if (ra_list) {
196 ra_list->amsdu_in_ampdu = tx_ba_tbl->amsdu;
197 ra_list->ba_status = BA_SETUP_COMPLETE;
198 }
188 } else { 199 } else {
189 dev_err(priv->adapter->dev, "BA stream not created\n"); 200 dev_err(priv->adapter->dev, "BA stream not created\n");
190 } 201 }
@@ -515,6 +526,7 @@ void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
515 enum mwifiex_ba_status ba_status) 526 enum mwifiex_ba_status ba_status)
516{ 527{
517 struct mwifiex_tx_ba_stream_tbl *new_node; 528 struct mwifiex_tx_ba_stream_tbl *new_node;
529 struct mwifiex_ra_list_tbl *ra_list;
518 unsigned long flags; 530 unsigned long flags;
519 531
520 if (!mwifiex_get_ba_tbl(priv, tid, ra)) { 532 if (!mwifiex_get_ba_tbl(priv, tid, ra)) {
@@ -522,7 +534,11 @@ void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
522 GFP_ATOMIC); 534 GFP_ATOMIC);
523 if (!new_node) 535 if (!new_node)
524 return; 536 return;
525 537 ra_list = mwifiex_wmm_get_ralist_node(priv, tid, ra);
538 if (ra_list) {
539 ra_list->ba_status = ba_status;
540 ra_list->amsdu_in_ampdu = false;
541 }
526 INIT_LIST_HEAD(&new_node->list); 542 INIT_LIST_HEAD(&new_node->list);
527 543
528 new_node->tid = tid; 544 new_node->tid = tid;
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 8e2e39422ad8..afdd58aa90de 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -77,22 +77,6 @@ mwifiex_is_station_ampdu_allowed(struct mwifiex_private *priv,
77 return (node->ampdu_sta[tid] != BA_STREAM_NOT_ALLOWED) ? true : false; 77 return (node->ampdu_sta[tid] != BA_STREAM_NOT_ALLOWED) ? true : false;
78} 78}
79 79
80/* This function checks whether AMSDU is allowed for BA stream. */
81static inline u8
82mwifiex_is_amsdu_in_ampdu_allowed(struct mwifiex_private *priv,
83 struct mwifiex_ra_list_tbl *ptr, int tid)
84{
85 struct mwifiex_tx_ba_stream_tbl *tx_tbl;
86
87 if (is_broadcast_ether_addr(ptr->ra))
88 return false;
89 tx_tbl = mwifiex_get_ba_tbl(priv, tid, ptr->ra);
90 if (tx_tbl)
91 return tx_tbl->amsdu;
92
93 return false;
94}
95
96/* This function checks whether AMPDU is allowed or not for a particular TID. */ 80/* This function checks whether AMPDU is allowed or not for a particular TID. */
97static inline u8 81static inline u8
98mwifiex_is_ampdu_allowed(struct mwifiex_private *priv, 82mwifiex_is_ampdu_allowed(struct mwifiex_private *priv,
@@ -182,22 +166,6 @@ mwifiex_find_stream_to_delete(struct mwifiex_private *priv, int ptr_tid,
182} 166}
183 167
184/* 168/*
185 * This function checks whether BA stream is set up or not.
186 */
187static inline int
188mwifiex_is_ba_stream_setup(struct mwifiex_private *priv,
189 struct mwifiex_ra_list_tbl *ptr, int tid)
190{
191 struct mwifiex_tx_ba_stream_tbl *tx_tbl;
192
193 tx_tbl = mwifiex_get_ba_tbl(priv, tid, ptr->ra);
194 if (tx_tbl && IS_BASTREAM_SETUP(tx_tbl))
195 return true;
196
197 return false;
198}
199
200/*
201 * This function checks whether associated station is 11n enabled 169 * This function checks whether associated station is 11n enabled
202 */ 170 */
203static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv, 171static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv,
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 9b983b5cebbd..6183e255e62a 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -170,7 +170,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
170 struct mwifiex_adapter *adapter = priv->adapter; 170 struct mwifiex_adapter *adapter = priv->adapter;
171 struct sk_buff *skb_aggr, *skb_src; 171 struct sk_buff *skb_aggr, *skb_src;
172 struct mwifiex_txinfo *tx_info_aggr, *tx_info_src; 172 struct mwifiex_txinfo *tx_info_aggr, *tx_info_src;
173 int pad = 0, ret; 173 int pad = 0, aggr_num = 0, ret;
174 struct mwifiex_tx_param tx_param; 174 struct mwifiex_tx_param tx_param;
175 struct txpd *ptx_pd = NULL; 175 struct txpd *ptx_pd = NULL;
176 struct timeval tv; 176 struct timeval tv;
@@ -184,7 +184,8 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
184 } 184 }
185 185
186 tx_info_src = MWIFIEX_SKB_TXCB(skb_src); 186 tx_info_src = MWIFIEX_SKB_TXCB(skb_src);
187 skb_aggr = dev_alloc_skb(adapter->tx_buf_size); 187 skb_aggr = mwifiex_alloc_dma_align_buf(adapter->tx_buf_size,
188 GFP_ATOMIC | GFP_DMA);
188 if (!skb_aggr) { 189 if (!skb_aggr) {
189 dev_err(adapter->dev, "%s: alloc skb_aggr\n", __func__); 190 dev_err(adapter->dev, "%s: alloc skb_aggr\n", __func__);
190 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, 191 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
@@ -200,6 +201,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
200 201
201 if (tx_info_src->flags & MWIFIEX_BUF_FLAG_TDLS_PKT) 202 if (tx_info_src->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
202 tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT; 203 tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
204 tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_AGGR_PKT;
203 skb_aggr->priority = skb_src->priority; 205 skb_aggr->priority = skb_src->priority;
204 206
205 do_gettimeofday(&tv); 207 do_gettimeofday(&tv);
@@ -211,11 +213,9 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
211 break; 213 break;
212 214
213 skb_src = skb_dequeue(&pra_list->skb_head); 215 skb_src = skb_dequeue(&pra_list->skb_head);
214
215 pra_list->total_pkt_count--; 216 pra_list->total_pkt_count--;
216
217 atomic_dec(&priv->wmm.tx_pkts_queued); 217 atomic_dec(&priv->wmm.tx_pkts_queued);
218 218 aggr_num++;
219 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, 219 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
220 ra_list_flags); 220 ra_list_flags);
221 mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad); 221 mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad);
@@ -251,6 +251,12 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
251 ptx_pd = (struct txpd *)skb_aggr->data; 251 ptx_pd = (struct txpd *)skb_aggr->data;
252 252
253 skb_push(skb_aggr, headroom); 253 skb_push(skb_aggr, headroom);
254 tx_info_aggr->aggr_num = aggr_num * 2;
255 if (adapter->data_sent || adapter->tx_lock_flag) {
256 atomic_add(aggr_num * 2, &adapter->tx_queued);
257 skb_queue_tail(&adapter->tx_data_q, skb_aggr);
258 return 0;
259 }
254 260
255 if (adapter->iface_type == MWIFIEX_USB) { 261 if (adapter->iface_type == MWIFIEX_USB) {
256 adapter->data_sent = true; 262 adapter->data_sent = true;
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index a2e8817b56d8..f75f8acfaca0 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -659,6 +659,7 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
659{ 659{
660 struct mwifiex_rx_reorder_tbl *tbl; 660 struct mwifiex_rx_reorder_tbl *tbl;
661 struct mwifiex_tx_ba_stream_tbl *ptx_tbl; 661 struct mwifiex_tx_ba_stream_tbl *ptx_tbl;
662 struct mwifiex_ra_list_tbl *ra_list;
662 u8 cleanup_rx_reorder_tbl; 663 u8 cleanup_rx_reorder_tbl;
663 unsigned long flags; 664 unsigned long flags;
664 665
@@ -686,7 +687,11 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
686 "event: TID, RA not found in table\n"); 687 "event: TID, RA not found in table\n");
687 return; 688 return;
688 } 689 }
689 690 ra_list = mwifiex_wmm_get_ralist_node(priv, tid, peer_mac);
691 if (ra_list) {
692 ra_list->amsdu_in_ampdu = false;
693 ra_list->ba_status = BA_SETUP_NONE;
694 }
690 spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags); 695 spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
691 mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, ptx_tbl); 696 mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, ptx_tbl);
692 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags); 697 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 6f8993c12373..bf9020ff2d33 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -717,6 +717,9 @@ mwifiex_cfg80211_init_p2p_go(struct mwifiex_private *priv)
717 717
718static int mwifiex_deinit_priv_params(struct mwifiex_private *priv) 718static int mwifiex_deinit_priv_params(struct mwifiex_private *priv)
719{ 719{
720 struct mwifiex_adapter *adapter = priv->adapter;
721 unsigned long flags;
722
720 priv->mgmt_frame_mask = 0; 723 priv->mgmt_frame_mask = 0;
721 if (mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG, 724 if (mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
722 HostCmd_ACT_GEN_SET, 0, 725 HostCmd_ACT_GEN_SET, 0,
@@ -727,6 +730,25 @@ static int mwifiex_deinit_priv_params(struct mwifiex_private *priv)
727 } 730 }
728 731
729 mwifiex_deauthenticate(priv, NULL); 732 mwifiex_deauthenticate(priv, NULL);
733
734 spin_lock_irqsave(&adapter->main_proc_lock, flags);
735 adapter->main_locked = true;
736 if (adapter->mwifiex_processing) {
737 spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
738 flush_workqueue(adapter->workqueue);
739 } else {
740 spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
741 }
742
743 spin_lock_irqsave(&adapter->rx_proc_lock, flags);
744 adapter->rx_locked = true;
745 if (adapter->rx_processing) {
746 spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
747 flush_workqueue(adapter->rx_workqueue);
748 } else {
749 spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
750 }
751
730 mwifiex_free_priv(priv); 752 mwifiex_free_priv(priv);
731 priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; 753 priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
732 priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; 754 priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
@@ -740,6 +762,9 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
740 struct net_device *dev, 762 struct net_device *dev,
741 enum nl80211_iftype type) 763 enum nl80211_iftype type)
742{ 764{
765 struct mwifiex_adapter *adapter = priv->adapter;
766 unsigned long flags;
767
743 mwifiex_init_priv(priv); 768 mwifiex_init_priv(priv);
744 769
745 priv->bss_mode = type; 770 priv->bss_mode = type;
@@ -770,6 +795,14 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
770 return -EOPNOTSUPP; 795 return -EOPNOTSUPP;
771 } 796 }
772 797
798 spin_lock_irqsave(&adapter->main_proc_lock, flags);
799 adapter->main_locked = false;
800 spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
801
802 spin_lock_irqsave(&adapter->rx_proc_lock, flags);
803 adapter->rx_locked = false;
804 spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
805
773 return 0; 806 return 0;
774} 807}
775 808
@@ -2733,24 +2766,71 @@ mwifiex_is_pattern_supported(struct cfg80211_pkt_pattern *pat, s8 *byte_seq,
2733} 2766}
2734 2767
2735#ifdef CONFIG_PM 2768#ifdef CONFIG_PM
2736static int mwifiex_set_mef_filter(struct mwifiex_private *priv, 2769static void mwifiex_set_auto_arp_mef_entry(struct mwifiex_private *priv,
2737 struct cfg80211_wowlan *wowlan) 2770 struct mwifiex_mef_entry *mef_entry)
2771{
2772 int i, filt_num = 0, num_ipv4 = 0;
2773 struct in_device *in_dev;
2774 struct in_ifaddr *ifa;
2775 __be32 ips[MWIFIEX_MAX_SUPPORTED_IPADDR];
2776 struct mwifiex_adapter *adapter = priv->adapter;
2777
2778 mef_entry->mode = MEF_MODE_HOST_SLEEP;
2779 mef_entry->action = MEF_ACTION_AUTO_ARP;
2780
2781 /* Enable ARP offload feature */
2782 memset(ips, 0, sizeof(ips));
2783 for (i = 0; i < MWIFIEX_MAX_BSS_NUM; i++) {
2784 if (adapter->priv[i]->netdev) {
2785 in_dev = __in_dev_get_rtnl(adapter->priv[i]->netdev);
2786 if (!in_dev)
2787 continue;
2788 ifa = in_dev->ifa_list;
2789 if (!ifa || !ifa->ifa_local)
2790 continue;
2791 ips[i] = ifa->ifa_local;
2792 num_ipv4++;
2793 }
2794 }
2795
2796 for (i = 0; i < num_ipv4; i++) {
2797 if (!ips[i])
2798 continue;
2799 mef_entry->filter[filt_num].repeat = 1;
2800 memcpy(mef_entry->filter[filt_num].byte_seq,
2801 (u8 *)&ips[i], sizeof(ips[i]));
2802 mef_entry->filter[filt_num].
2803 byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] =
2804 sizeof(ips[i]);
2805 mef_entry->filter[filt_num].offset = 46;
2806 mef_entry->filter[filt_num].filt_type = TYPE_EQ;
2807 if (filt_num) {
2808 mef_entry->filter[filt_num].filt_action =
2809 TYPE_OR;
2810 }
2811 filt_num++;
2812 }
2813
2814 mef_entry->filter[filt_num].repeat = 1;
2815 mef_entry->filter[filt_num].byte_seq[0] = 0x08;
2816 mef_entry->filter[filt_num].byte_seq[1] = 0x06;
2817 mef_entry->filter[filt_num].byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] = 2;
2818 mef_entry->filter[filt_num].offset = 20;
2819 mef_entry->filter[filt_num].filt_type = TYPE_EQ;
2820 mef_entry->filter[filt_num].filt_action = TYPE_AND;
2821}
2822
2823static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv,
2824 struct mwifiex_ds_mef_cfg *mef_cfg,
2825 struct mwifiex_mef_entry *mef_entry,
2826 struct cfg80211_wowlan *wowlan)
2738{ 2827{
2739 int i, filt_num = 0, ret = 0; 2828 int i, filt_num = 0, ret = 0;
2740 bool first_pat = true; 2829 bool first_pat = true;
2741 u8 byte_seq[MWIFIEX_MEF_MAX_BYTESEQ + 1]; 2830 u8 byte_seq[MWIFIEX_MEF_MAX_BYTESEQ + 1];
2742 const u8 ipv4_mc_mac[] = {0x33, 0x33}; 2831 const u8 ipv4_mc_mac[] = {0x33, 0x33};
2743 const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e}; 2832 const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e};
2744 struct mwifiex_ds_mef_cfg mef_cfg;
2745 struct mwifiex_mef_entry *mef_entry;
2746 2833
2747 mef_entry = kzalloc(sizeof(*mef_entry), GFP_KERNEL);
2748 if (!mef_entry)
2749 return -ENOMEM;
2750
2751 memset(&mef_cfg, 0, sizeof(mef_cfg));
2752 mef_cfg.num_entries = 1;
2753 mef_cfg.mef_entry = mef_entry;
2754 mef_entry->mode = MEF_MODE_HOST_SLEEP; 2834 mef_entry->mode = MEF_MODE_HOST_SLEEP;
2755 mef_entry->action = MEF_ACTION_ALLOW_AND_WAKEUP_HOST; 2835 mef_entry->action = MEF_ACTION_ALLOW_AND_WAKEUP_HOST;
2756 2836
@@ -2767,20 +2847,19 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
2767 if (!wowlan->patterns[i].pkt_offset) { 2847 if (!wowlan->patterns[i].pkt_offset) {
2768 if (!(byte_seq[0] & 0x01) && 2848 if (!(byte_seq[0] & 0x01) &&
2769 (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 1)) { 2849 (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 1)) {
2770 mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST; 2850 mef_cfg->criteria |= MWIFIEX_CRITERIA_UNICAST;
2771 continue; 2851 continue;
2772 } else if (is_broadcast_ether_addr(byte_seq)) { 2852 } else if (is_broadcast_ether_addr(byte_seq)) {
2773 mef_cfg.criteria |= MWIFIEX_CRITERIA_BROADCAST; 2853 mef_cfg->criteria |= MWIFIEX_CRITERIA_BROADCAST;
2774 continue; 2854 continue;
2775 } else if ((!memcmp(byte_seq, ipv4_mc_mac, 2) && 2855 } else if ((!memcmp(byte_seq, ipv4_mc_mac, 2) &&
2776 (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 2)) || 2856 (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 2)) ||
2777 (!memcmp(byte_seq, ipv6_mc_mac, 3) && 2857 (!memcmp(byte_seq, ipv6_mc_mac, 3) &&
2778 (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 3))) { 2858 (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 3))) {
2779 mef_cfg.criteria |= MWIFIEX_CRITERIA_MULTICAST; 2859 mef_cfg->criteria |= MWIFIEX_CRITERIA_MULTICAST;
2780 continue; 2860 continue;
2781 } 2861 }
2782 } 2862 }
2783
2784 mef_entry->filter[filt_num].repeat = 1; 2863 mef_entry->filter[filt_num].repeat = 1;
2785 mef_entry->filter[filt_num].offset = 2864 mef_entry->filter[filt_num].offset =
2786 wowlan->patterns[i].pkt_offset; 2865 wowlan->patterns[i].pkt_offset;
@@ -2797,7 +2876,7 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
2797 } 2876 }
2798 2877
2799 if (wowlan->magic_pkt) { 2878 if (wowlan->magic_pkt) {
2800 mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST; 2879 mef_cfg->criteria |= MWIFIEX_CRITERIA_UNICAST;
2801 mef_entry->filter[filt_num].repeat = 16; 2880 mef_entry->filter[filt_num].repeat = 16;
2802 memcpy(mef_entry->filter[filt_num].byte_seq, priv->curr_addr, 2881 memcpy(mef_entry->filter[filt_num].byte_seq, priv->curr_addr,
2803 ETH_ALEN); 2882 ETH_ALEN);
@@ -2818,6 +2897,34 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
2818 mef_entry->filter[filt_num].filt_type = TYPE_EQ; 2897 mef_entry->filter[filt_num].filt_type = TYPE_EQ;
2819 mef_entry->filter[filt_num].filt_action = TYPE_OR; 2898 mef_entry->filter[filt_num].filt_action = TYPE_OR;
2820 } 2899 }
2900 return ret;
2901}
2902
2903static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
2904 struct cfg80211_wowlan *wowlan)
2905{
2906 int ret = 0, num_entries = 1;
2907 struct mwifiex_ds_mef_cfg mef_cfg;
2908 struct mwifiex_mef_entry *mef_entry;
2909
2910 if (wowlan->n_patterns || wowlan->magic_pkt)
2911 num_entries++;
2912
2913 mef_entry = kcalloc(num_entries, sizeof(*mef_entry), GFP_KERNEL);
2914 if (!mef_entry)
2915 return -ENOMEM;
2916
2917 memset(&mef_cfg, 0, sizeof(mef_cfg));
2918 mef_cfg.criteria |= MWIFIEX_CRITERIA_BROADCAST |
2919 MWIFIEX_CRITERIA_UNICAST;
2920 mef_cfg.num_entries = num_entries;
2921 mef_cfg.mef_entry = mef_entry;
2922
2923 mwifiex_set_auto_arp_mef_entry(priv, &mef_entry[0]);
2924
2925 if (wowlan->n_patterns || wowlan->magic_pkt)
2926 ret = mwifiex_set_wowlan_mef_entry(priv, &mef_cfg,
2927 &mef_entry[1], wowlan);
2821 2928
2822 if (!mef_cfg.criteria) 2929 if (!mef_cfg.criteria)
2823 mef_cfg.criteria = MWIFIEX_CRITERIA_BROADCAST | 2930 mef_cfg.criteria = MWIFIEX_CRITERIA_BROADCAST |
@@ -2825,8 +2932,8 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
2825 MWIFIEX_CRITERIA_MULTICAST; 2932 MWIFIEX_CRITERIA_MULTICAST;
2826 2933
2827 ret = mwifiex_send_cmd(priv, HostCmd_CMD_MEF_CFG, 2934 ret = mwifiex_send_cmd(priv, HostCmd_CMD_MEF_CFG,
2828 HostCmd_ACT_GEN_SET, 0, &mef_cfg, true); 2935 HostCmd_ACT_GEN_SET, 0,
2829 2936 &mef_cfg, true);
2830 kfree(mef_entry); 2937 kfree(mef_entry);
2831 return ret; 2938 return ret;
2832} 2939}
@@ -2836,27 +2943,33 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
2836{ 2943{
2837 struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy); 2944 struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
2838 struct mwifiex_ds_hs_cfg hs_cfg; 2945 struct mwifiex_ds_hs_cfg hs_cfg;
2839 int ret = 0; 2946 int i, ret = 0;
2840 struct mwifiex_private *priv = 2947 struct mwifiex_private *priv;
2841 mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); 2948
2949 for (i = 0; i < adapter->priv_num; i++) {
2950 priv = adapter->priv[i];
2951 mwifiex_abort_cac(priv);
2952 }
2953
2954 mwifiex_cancel_all_pending_cmd(adapter);
2842 2955
2843 if (!wowlan) { 2956 if (!wowlan) {
2844 dev_warn(adapter->dev, "None of the WOWLAN triggers enabled\n"); 2957 dev_warn(adapter->dev, "None of the WOWLAN triggers enabled\n");
2845 return 0; 2958 return 0;
2846 } 2959 }
2847 2960
2961 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
2962
2848 if (!priv->media_connected) { 2963 if (!priv->media_connected) {
2849 dev_warn(adapter->dev, 2964 dev_warn(adapter->dev,
2850 "Can not configure WOWLAN in disconnected state\n"); 2965 "Can not configure WOWLAN in disconnected state\n");
2851 return 0; 2966 return 0;
2852 } 2967 }
2853 2968
2854 if (wowlan->n_patterns || wowlan->magic_pkt) { 2969 ret = mwifiex_set_mef_filter(priv, wowlan);
2855 ret = mwifiex_set_mef_filter(priv, wowlan); 2970 if (ret) {
2856 if (ret) { 2971 dev_err(adapter->dev, "Failed to set MEF filter\n");
2857 dev_err(adapter->dev, "Failed to set MEF filter\n"); 2972 return ret;
2858 return ret;
2859 }
2860 } 2973 }
2861 2974
2862 if (wowlan->disconnect) { 2975 if (wowlan->disconnect) {
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index cf2fa110e251..38f24e0427d2 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -83,6 +83,7 @@
83#define MWIFIEX_BUF_FLAG_TDLS_PKT BIT(2) 83#define MWIFIEX_BUF_FLAG_TDLS_PKT BIT(2)
84#define MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS BIT(3) 84#define MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS BIT(3)
85#define MWIFIEX_BUF_FLAG_ACTION_TX_STATUS BIT(4) 85#define MWIFIEX_BUF_FLAG_ACTION_TX_STATUS BIT(4)
86#define MWIFIEX_BUF_FLAG_AGGR_PKT BIT(5)
86 87
87#define MWIFIEX_BRIDGED_PKTS_THR_HIGH 1024 88#define MWIFIEX_BRIDGED_PKTS_THR_HIGH 1024
88#define MWIFIEX_BRIDGED_PKTS_THR_LOW 128 89#define MWIFIEX_BRIDGED_PKTS_THR_LOW 128
@@ -111,6 +112,11 @@
111 112
112#define MWIFIEX_A_BAND_START_FREQ 5000 113#define MWIFIEX_A_BAND_START_FREQ 5000
113 114
115/* SDIO Aggr data packet special info */
116#define SDIO_MAX_AGGR_BUF_SIZE (256 * 255)
117#define BLOCK_NUMBER_OFFSET 15
118#define SDIO_HEADER_OFFSET 28
119
114enum mwifiex_bss_type { 120enum mwifiex_bss_type {
115 MWIFIEX_BSS_TYPE_STA = 0, 121 MWIFIEX_BSS_TYPE_STA = 0,
116 MWIFIEX_BSS_TYPE_UAP = 1, 122 MWIFIEX_BSS_TYPE_UAP = 1,
@@ -168,10 +174,11 @@ struct mwifiex_wait_queue {
168}; 174};
169 175
170struct mwifiex_rxinfo { 176struct mwifiex_rxinfo {
177 struct sk_buff *parent;
171 u8 bss_num; 178 u8 bss_num;
172 u8 bss_type; 179 u8 bss_type;
173 struct sk_buff *parent;
174 u8 use_count; 180 u8 use_count;
181 u8 buf_type;
175}; 182};
176 183
177struct mwifiex_txinfo { 184struct mwifiex_txinfo {
@@ -179,6 +186,7 @@ struct mwifiex_txinfo {
179 u8 flags; 186 u8 flags;
180 u8 bss_num; 187 u8 bss_num;
181 u8 bss_type; 188 u8 bss_type;
189 u8 aggr_num;
182 u32 pkt_len; 190 u32 pkt_len;
183 u8 ack_frame_id; 191 u8 ack_frame_id;
184 u64 cookie; 192 u64 cookie;
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index df553e86a0ad..59d8964dd0dc 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -197,6 +197,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
197 197
198#define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11)) 198#define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
199#define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14)) 199#define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14))
200#define ISSUPP_SDIO_SPA_ENABLED(FwCapInfo) (FwCapInfo & BIT(16))
200 201
201#define MWIFIEX_DEF_HT_CAP (IEEE80211_HT_CAP_DSSSCCK40 | \ 202#define MWIFIEX_DEF_HT_CAP (IEEE80211_HT_CAP_DSSSCCK40 | \
202 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | \ 203 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | \
@@ -353,6 +354,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
353#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d 354#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d
354#define HostCmd_CMD_11AC_CFG 0x0112 355#define HostCmd_CMD_11AC_CFG 0x0112
355#define HostCmd_CMD_TDLS_OPER 0x0122 356#define HostCmd_CMD_TDLS_OPER 0x0122
357#define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG 0x0223
356 358
357#define PROTOCOL_NO_SECURITY 0x01 359#define PROTOCOL_NO_SECURITY 0x01
358#define PROTOCOL_STATIC_WEP 0x02 360#define PROTOCOL_STATIC_WEP 0x02
@@ -523,9 +525,11 @@ enum P2P_MODES {
523#define TYPE_OR (MAX_OPERAND+5) 525#define TYPE_OR (MAX_OPERAND+5)
524#define MEF_MODE_HOST_SLEEP 1 526#define MEF_MODE_HOST_SLEEP 1
525#define MEF_ACTION_ALLOW_AND_WAKEUP_HOST 3 527#define MEF_ACTION_ALLOW_AND_WAKEUP_HOST 3
528#define MEF_ACTION_AUTO_ARP 0x10
526#define MWIFIEX_CRITERIA_BROADCAST BIT(0) 529#define MWIFIEX_CRITERIA_BROADCAST BIT(0)
527#define MWIFIEX_CRITERIA_UNICAST BIT(1) 530#define MWIFIEX_CRITERIA_UNICAST BIT(1)
528#define MWIFIEX_CRITERIA_MULTICAST BIT(3) 531#define MWIFIEX_CRITERIA_MULTICAST BIT(3)
532#define MWIFIEX_MAX_SUPPORTED_IPADDR 4
529 533
530#define ACT_TDLS_DELETE 0x00 534#define ACT_TDLS_DELETE 0x00
531#define ACT_TDLS_CREATE 0x01 535#define ACT_TDLS_CREATE 0x01
@@ -1240,6 +1244,12 @@ struct host_cmd_ds_chan_rpt_event {
1240 u8 tlvbuf[0]; 1244 u8 tlvbuf[0];
1241} __packed; 1245} __packed;
1242 1246
1247struct host_cmd_sdio_sp_rx_aggr_cfg {
1248 u8 action;
1249 u8 enable;
1250 __le16 block_size;
1251} __packed;
1252
1243struct mwifiex_fixed_bcn_param { 1253struct mwifiex_fixed_bcn_param {
1244 __le64 timestamp; 1254 __le64 timestamp;
1245 __le16 beacon_period; 1255 __le16 beacon_period;
@@ -1962,6 +1972,7 @@ struct host_cmd_ds_command {
1962 struct host_cmd_ds_coalesce_cfg coalesce_cfg; 1972 struct host_cmd_ds_coalesce_cfg coalesce_cfg;
1963 struct host_cmd_ds_tdls_oper tdls_oper; 1973 struct host_cmd_ds_tdls_oper tdls_oper;
1964 struct host_cmd_ds_chan_rpt_req chan_rpt_req; 1974 struct host_cmd_ds_chan_rpt_req chan_rpt_req;
1975 struct host_cmd_sdio_sp_rx_aggr_cfg sdio_rx_aggr_cfg;
1965 } params; 1976 } params;
1966} __packed; 1977} __packed;
1967 1978
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 0153ce6d5879..e12192f5cfad 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -266,18 +266,15 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
266 266
267 mwifiex_wmm_init(adapter); 267 mwifiex_wmm_init(adapter);
268 268
269 if (adapter->sleep_cfm) { 269 sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *)
270 sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *) 270 adapter->sleep_cfm->data;
271 adapter->sleep_cfm->data; 271 memset(sleep_cfm_buf, 0, adapter->sleep_cfm->len);
272 memset(sleep_cfm_buf, 0, adapter->sleep_cfm->len); 272 sleep_cfm_buf->command = cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH);
273 sleep_cfm_buf->command = 273 sleep_cfm_buf->size = cpu_to_le16(adapter->sleep_cfm->len);
274 cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH); 274 sleep_cfm_buf->result = 0;
275 sleep_cfm_buf->size = 275 sleep_cfm_buf->action = cpu_to_le16(SLEEP_CONFIRM);
276 cpu_to_le16(adapter->sleep_cfm->len); 276 sleep_cfm_buf->resp_ctrl = cpu_to_le16(RESP_NEEDED);
277 sleep_cfm_buf->result = 0; 277
278 sleep_cfm_buf->action = cpu_to_le16(SLEEP_CONFIRM);
279 sleep_cfm_buf->resp_ctrl = cpu_to_le16(RESP_NEEDED);
280 }
281 memset(&adapter->sleep_params, 0, sizeof(adapter->sleep_params)); 278 memset(&adapter->sleep_params, 0, sizeof(adapter->sleep_params));
282 memset(&adapter->sleep_period, 0, sizeof(adapter->sleep_period)); 279 memset(&adapter->sleep_period, 0, sizeof(adapter->sleep_period));
283 adapter->tx_lock_flag = false; 280 adapter->tx_lock_flag = false;
@@ -481,6 +478,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
481 spin_lock_init(&adapter->rx_proc_lock); 478 spin_lock_init(&adapter->rx_proc_lock);
482 479
483 skb_queue_head_init(&adapter->rx_data_q); 480 skb_queue_head_init(&adapter->rx_data_q);
481 skb_queue_head_init(&adapter->tx_data_q);
484 482
485 for (i = 0; i < adapter->priv_num; ++i) { 483 for (i = 0; i < adapter->priv_num; ++i) {
486 INIT_LIST_HEAD(&adapter->bss_prio_tbl[i].bss_prio_head); 484 INIT_LIST_HEAD(&adapter->bss_prio_tbl[i].bss_prio_head);
@@ -688,6 +686,10 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
688 } 686 }
689 } 687 }
690 688
689 atomic_set(&adapter->tx_queued, 0);
690 while ((skb = skb_dequeue(&adapter->tx_data_q)))
691 mwifiex_write_data_complete(adapter, skb, 0, 0);
692
691 spin_lock_irqsave(&adapter->rx_proc_lock, flags); 693 spin_lock_irqsave(&adapter->rx_proc_lock, flags);
692 694
693 while ((skb = skb_dequeue(&adapter->rx_data_q))) { 695 while ((skb = skb_dequeue(&adapter->rx_data_q))) {
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index d73a9217b9da..03a95c7d34bf 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -131,10 +131,39 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
131 return 0; 131 return 0;
132} 132}
133 133
134void mwifiex_queue_main_work(struct mwifiex_adapter *adapter)
135{
136 unsigned long flags;
137
138 spin_lock_irqsave(&adapter->main_proc_lock, flags);
139 if (adapter->mwifiex_processing) {
140 adapter->more_task_flag = true;
141 spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
142 } else {
143 spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
144 queue_work(adapter->workqueue, &adapter->main_work);
145 }
146}
147EXPORT_SYMBOL_GPL(mwifiex_queue_main_work);
148
149static void mwifiex_queue_rx_work(struct mwifiex_adapter *adapter)
150{
151 unsigned long flags;
152
153 spin_lock_irqsave(&adapter->rx_proc_lock, flags);
154 if (adapter->rx_processing) {
155 spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
156 } else {
157 spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
158 queue_work(adapter->rx_workqueue, &adapter->rx_work);
159 }
160}
161
134static int mwifiex_process_rx(struct mwifiex_adapter *adapter) 162static int mwifiex_process_rx(struct mwifiex_adapter *adapter)
135{ 163{
136 unsigned long flags; 164 unsigned long flags;
137 struct sk_buff *skb; 165 struct sk_buff *skb;
166 struct mwifiex_rxinfo *rx_info;
138 167
139 spin_lock_irqsave(&adapter->rx_proc_lock, flags); 168 spin_lock_irqsave(&adapter->rx_proc_lock, flags);
140 if (adapter->rx_processing || adapter->rx_locked) { 169 if (adapter->rx_processing || adapter->rx_locked) {
@@ -154,9 +183,16 @@ static int mwifiex_process_rx(struct mwifiex_adapter *adapter)
154 if (adapter->if_ops.submit_rem_rx_urbs) 183 if (adapter->if_ops.submit_rem_rx_urbs)
155 adapter->if_ops.submit_rem_rx_urbs(adapter); 184 adapter->if_ops.submit_rem_rx_urbs(adapter);
156 adapter->delay_main_work = false; 185 adapter->delay_main_work = false;
157 queue_work(adapter->workqueue, &adapter->main_work); 186 mwifiex_queue_main_work(adapter);
187 }
188 rx_info = MWIFIEX_SKB_RXCB(skb);
189 if (rx_info->buf_type == MWIFIEX_TYPE_AGGR_DATA) {
190 if (adapter->if_ops.deaggr_pkt)
191 adapter->if_ops.deaggr_pkt(adapter, skb);
192 dev_kfree_skb_any(skb);
193 } else {
194 mwifiex_handle_rx_packet(adapter, skb);
158 } 195 }
159 mwifiex_handle_rx_packet(adapter, skb);
160 } 196 }
161 spin_lock_irqsave(&adapter->rx_proc_lock, flags); 197 spin_lock_irqsave(&adapter->rx_proc_lock, flags);
162 adapter->rx_processing = false; 198 adapter->rx_processing = false;
@@ -189,7 +225,7 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
189 spin_lock_irqsave(&adapter->main_proc_lock, flags); 225 spin_lock_irqsave(&adapter->main_proc_lock, flags);
190 226
191 /* Check if already processing */ 227 /* Check if already processing */
192 if (adapter->mwifiex_processing) { 228 if (adapter->mwifiex_processing || adapter->main_locked) {
193 adapter->more_task_flag = true; 229 adapter->more_task_flag = true;
194 spin_unlock_irqrestore(&adapter->main_proc_lock, flags); 230 spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
195 goto exit_main_proc; 231 goto exit_main_proc;
@@ -214,9 +250,7 @@ process_start:
214 if (atomic_read(&adapter->rx_pending) >= HIGH_RX_PENDING && 250 if (atomic_read(&adapter->rx_pending) >= HIGH_RX_PENDING &&
215 adapter->iface_type != MWIFIEX_USB) { 251 adapter->iface_type != MWIFIEX_USB) {
216 adapter->delay_main_work = true; 252 adapter->delay_main_work = true;
217 if (!adapter->rx_processing) 253 mwifiex_queue_rx_work(adapter);
218 queue_work(adapter->rx_workqueue,
219 &adapter->rx_work);
220 break; 254 break;
221 } 255 }
222 256
@@ -229,13 +263,14 @@ process_start:
229 } 263 }
230 264
231 if (adapter->rx_work_enabled && adapter->data_received) 265 if (adapter->rx_work_enabled && adapter->data_received)
232 queue_work(adapter->rx_workqueue, &adapter->rx_work); 266 mwifiex_queue_rx_work(adapter);
233 267
234 /* Need to wake up the card ? */ 268 /* Need to wake up the card ? */
235 if ((adapter->ps_state == PS_STATE_SLEEP) && 269 if ((adapter->ps_state == PS_STATE_SLEEP) &&
236 (adapter->pm_wakeup_card_req && 270 (adapter->pm_wakeup_card_req &&
237 !adapter->pm_wakeup_fw_try) && 271 !adapter->pm_wakeup_fw_try) &&
238 (is_command_pending(adapter) || 272 (is_command_pending(adapter) ||
273 !skb_queue_empty(&adapter->tx_data_q) ||
239 !mwifiex_wmm_lists_empty(adapter))) { 274 !mwifiex_wmm_lists_empty(adapter))) {
240 adapter->pm_wakeup_fw_try = true; 275 adapter->pm_wakeup_fw_try = true;
241 mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3)); 276 mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3));
@@ -247,7 +282,7 @@ process_start:
247 if (IS_CARD_RX_RCVD(adapter)) { 282 if (IS_CARD_RX_RCVD(adapter)) {
248 adapter->data_received = false; 283 adapter->data_received = false;
249 adapter->pm_wakeup_fw_try = false; 284 adapter->pm_wakeup_fw_try = false;
250 del_timer_sync(&adapter->wakeup_timer); 285 del_timer(&adapter->wakeup_timer);
251 if (adapter->ps_state == PS_STATE_SLEEP) 286 if (adapter->ps_state == PS_STATE_SLEEP)
252 adapter->ps_state = PS_STATE_AWAKE; 287 adapter->ps_state = PS_STATE_AWAKE;
253 } else { 288 } else {
@@ -260,7 +295,8 @@ process_start:
260 295
261 if ((!adapter->scan_chan_gap_enabled && 296 if ((!adapter->scan_chan_gap_enabled &&
262 adapter->scan_processing) || adapter->data_sent || 297 adapter->scan_processing) || adapter->data_sent ||
263 mwifiex_wmm_lists_empty(adapter)) { 298 (mwifiex_wmm_lists_empty(adapter) &&
299 skb_queue_empty(&adapter->tx_data_q))) {
264 if (adapter->cmd_sent || adapter->curr_cmd || 300 if (adapter->cmd_sent || adapter->curr_cmd ||
265 (!is_command_pending(adapter))) 301 (!is_command_pending(adapter)))
266 break; 302 break;
@@ -312,6 +348,20 @@ process_start:
312 348
313 if ((adapter->scan_chan_gap_enabled || 349 if ((adapter->scan_chan_gap_enabled ||
314 !adapter->scan_processing) && 350 !adapter->scan_processing) &&
351 !adapter->data_sent &&
352 !skb_queue_empty(&adapter->tx_data_q)) {
353 mwifiex_process_tx_queue(adapter);
354 if (adapter->hs_activated) {
355 adapter->is_hs_configured = false;
356 mwifiex_hs_activated_event
357 (mwifiex_get_priv
358 (adapter, MWIFIEX_BSS_ROLE_ANY),
359 false);
360 }
361 }
362
363 if ((adapter->scan_chan_gap_enabled ||
364 !adapter->scan_processing) &&
315 !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter)) { 365 !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter)) {
316 mwifiex_wmm_process_tx(adapter); 366 mwifiex_wmm_process_tx(adapter);
317 if (adapter->hs_activated) { 367 if (adapter->hs_activated) {
@@ -325,7 +375,8 @@ process_start:
325 375
326 if (adapter->delay_null_pkt && !adapter->cmd_sent && 376 if (adapter->delay_null_pkt && !adapter->cmd_sent &&
327 !adapter->curr_cmd && !is_command_pending(adapter) && 377 !adapter->curr_cmd && !is_command_pending(adapter) &&
328 mwifiex_wmm_lists_empty(adapter)) { 378 (mwifiex_wmm_lists_empty(adapter) &&
379 skb_queue_empty(&adapter->tx_data_q))) {
329 if (!mwifiex_send_null_packet 380 if (!mwifiex_send_null_packet
330 (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA), 381 (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
331 MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET | 382 MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET |
@@ -606,7 +657,7 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
606 atomic_inc(&priv->adapter->tx_pending); 657 atomic_inc(&priv->adapter->tx_pending);
607 mwifiex_wmm_add_buf_txqueue(priv, skb); 658 mwifiex_wmm_add_buf_txqueue(priv, skb);
608 659
609 queue_work(priv->adapter->workqueue, &priv->adapter->main_work); 660 mwifiex_queue_main_work(priv->adapter);
610 661
611 return 0; 662 return 0;
612} 663}
@@ -1098,9 +1149,6 @@ mwifiex_add_card(void *card, struct semaphore *sem,
1098 INIT_WORK(&adapter->rx_work, mwifiex_rx_work_queue); 1149 INIT_WORK(&adapter->rx_work, mwifiex_rx_work_queue);
1099 } 1150 }
1100 1151
1101 if (adapter->if_ops.iface_work)
1102 INIT_WORK(&adapter->iface_work, adapter->if_ops.iface_work);
1103
1104 /* Register the device. Fill up the private data structure with relevant 1152 /* Register the device. Fill up the private data structure with relevant
1105 information from the card. */ 1153 information from the card. */
1106 if (adapter->if_ops.register_dev(adapter)) { 1154 if (adapter->if_ops.register_dev(adapter)) {
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index ad8db61aeeef..fe1256044a6c 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -35,6 +35,7 @@
35#include <linux/ctype.h> 35#include <linux/ctype.h>
36#include <linux/of.h> 36#include <linux/of.h>
37#include <linux/idr.h> 37#include <linux/idr.h>
38#include <linux/inetdevice.h>
38 39
39#include "decl.h" 40#include "decl.h"
40#include "ioctl.h" 41#include "ioctl.h"
@@ -58,6 +59,8 @@ enum {
58 59
59#define MWIFIEX_MAX_AP 64 60#define MWIFIEX_MAX_AP 64
60 61
62#define MWIFIEX_MAX_PKTS_TXQ 16
63
61#define MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT (5 * HZ) 64#define MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT (5 * HZ)
62 65
63#define MWIFIEX_TIMER_10S 10000 66#define MWIFIEX_TIMER_10S 10000
@@ -118,6 +121,7 @@ enum {
118 121
119#define MWIFIEX_TYPE_CMD 1 122#define MWIFIEX_TYPE_CMD 1
120#define MWIFIEX_TYPE_DATA 0 123#define MWIFIEX_TYPE_DATA 0
124#define MWIFIEX_TYPE_AGGR_DATA 10
121#define MWIFIEX_TYPE_EVENT 3 125#define MWIFIEX_TYPE_EVENT 3
122 126
123#define MAX_BITMAP_RATES_SIZE 18 127#define MAX_BITMAP_RATES_SIZE 18
@@ -210,6 +214,12 @@ struct mwifiex_tx_aggr {
210 u8 amsdu; 214 u8 amsdu;
211}; 215};
212 216
217enum mwifiex_ba_status {
218 BA_SETUP_NONE = 0,
219 BA_SETUP_INPROGRESS,
220 BA_SETUP_COMPLETE
221};
222
213struct mwifiex_ra_list_tbl { 223struct mwifiex_ra_list_tbl {
214 struct list_head list; 224 struct list_head list;
215 struct sk_buff_head skb_head; 225 struct sk_buff_head skb_head;
@@ -218,6 +228,8 @@ struct mwifiex_ra_list_tbl {
218 u16 max_amsdu; 228 u16 max_amsdu;
219 u16 ba_pkt_count; 229 u16 ba_pkt_count;
220 u8 ba_packet_thr; 230 u8 ba_packet_thr;
231 enum mwifiex_ba_status ba_status;
232 u8 amsdu_in_ampdu;
221 u16 total_pkt_count; 233 u16 total_pkt_count;
222 bool tdls_link; 234 bool tdls_link;
223}; 235};
@@ -601,11 +613,6 @@ struct mwifiex_private {
601 struct mwifiex_11h_intf_state state_11h; 613 struct mwifiex_11h_intf_state state_11h;
602}; 614};
603 615
604enum mwifiex_ba_status {
605 BA_SETUP_NONE = 0,
606 BA_SETUP_INPROGRESS,
607 BA_SETUP_COMPLETE
608};
609 616
610struct mwifiex_tx_ba_stream_tbl { 617struct mwifiex_tx_ba_stream_tbl {
611 struct list_head list; 618 struct list_head list;
@@ -738,6 +745,7 @@ struct mwifiex_if_ops {
738 int (*clean_pcie_ring) (struct mwifiex_adapter *adapter); 745 int (*clean_pcie_ring) (struct mwifiex_adapter *adapter);
739 void (*iface_work)(struct work_struct *work); 746 void (*iface_work)(struct work_struct *work);
740 void (*submit_rem_rx_urbs)(struct mwifiex_adapter *adapter); 747 void (*submit_rem_rx_urbs)(struct mwifiex_adapter *adapter);
748 void (*deaggr_pkt)(struct mwifiex_adapter *, struct sk_buff *);
741}; 749};
742 750
743struct mwifiex_adapter { 751struct mwifiex_adapter {
@@ -771,6 +779,7 @@ struct mwifiex_adapter {
771 bool rx_processing; 779 bool rx_processing;
772 bool delay_main_work; 780 bool delay_main_work;
773 bool rx_locked; 781 bool rx_locked;
782 bool main_locked;
774 struct mwifiex_bss_prio_tbl bss_prio_tbl[MWIFIEX_MAX_BSS_NUM]; 783 struct mwifiex_bss_prio_tbl bss_prio_tbl[MWIFIEX_MAX_BSS_NUM];
775 /* spin lock for init/shutdown */ 784 /* spin lock for init/shutdown */
776 spinlock_t mwifiex_lock; 785 spinlock_t mwifiex_lock;
@@ -780,6 +789,8 @@ struct mwifiex_adapter {
780 u8 more_task_flag; 789 u8 more_task_flag;
781 u16 tx_buf_size; 790 u16 tx_buf_size;
782 u16 curr_tx_buf_size; 791 u16 curr_tx_buf_size;
792 bool sdio_rx_aggr_enable;
793 u16 sdio_rx_block_size;
783 u32 ioport; 794 u32 ioport;
784 enum MWIFIEX_HARDWARE_STATUS hw_status; 795 enum MWIFIEX_HARDWARE_STATUS hw_status;
785 u16 number_of_antenna; 796 u16 number_of_antenna;
@@ -814,6 +825,8 @@ struct mwifiex_adapter {
814 spinlock_t scan_pending_q_lock; 825 spinlock_t scan_pending_q_lock;
815 /* spin lock for RX processing routine */ 826 /* spin lock for RX processing routine */
816 spinlock_t rx_proc_lock; 827 spinlock_t rx_proc_lock;
828 struct sk_buff_head tx_data_q;
829 atomic_t tx_queued;
817 u32 scan_processing; 830 u32 scan_processing;
818 u16 region_code; 831 u16 region_code;
819 struct mwifiex_802_11d_domain_reg domain_reg; 832 struct mwifiex_802_11d_domain_reg domain_reg;
@@ -885,8 +898,6 @@ struct mwifiex_adapter {
885 bool ext_scan; 898 bool ext_scan;
886 u8 fw_api_ver; 899 u8 fw_api_ver;
887 u8 key_api_major_ver, key_api_minor_ver; 900 u8 key_api_major_ver, key_api_minor_ver;
888 struct work_struct iface_work;
889 unsigned long iface_work_flags;
890 struct memory_type_mapping *mem_type_mapping_tbl; 901 struct memory_type_mapping *mem_type_mapping_tbl;
891 u8 num_mem_types; 902 u8 num_mem_types;
892 u8 curr_mem_idx; 903 u8 curr_mem_idx;
@@ -900,6 +911,8 @@ struct mwifiex_adapter {
900 bool auto_tdls; 911 bool auto_tdls;
901}; 912};
902 913
914void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
915
903int mwifiex_init_lock_list(struct mwifiex_adapter *adapter); 916int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
904 917
905void mwifiex_set_trans_start(struct net_device *dev); 918void mwifiex_set_trans_start(struct net_device *dev);
@@ -1422,7 +1435,8 @@ u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
1422 u8 rx_rate, u8 ht_info); 1435 u8 rx_rate, u8 ht_info);
1423 1436
1424void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter); 1437void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter);
1425void *mwifiex_alloc_rx_buf(int rx_len, gfp_t flags); 1438void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags);
1439void mwifiex_queue_main_work(struct mwifiex_adapter *adapter);
1426 1440
1427#ifdef CONFIG_DEBUG_FS 1441#ifdef CONFIG_DEBUG_FS
1428void mwifiex_debugfs_init(void); 1442void mwifiex_debugfs_init(void);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 4b463c3b9906..bcc7751d883c 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -234,8 +234,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
234 if (!adapter || !adapter->priv_num) 234 if (!adapter || !adapter->priv_num)
235 return; 235 return;
236 236
237 cancel_work_sync(&adapter->iface_work);
238
239 if (user_rmmod) { 237 if (user_rmmod) {
240#ifdef CONFIG_PM_SLEEP 238#ifdef CONFIG_PM_SLEEP
241 if (adapter->is_suspended) 239 if (adapter->is_suspended)
@@ -498,8 +496,8 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
498 496
499 for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) { 497 for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
500 /* Allocate skb here so that firmware can DMA data from it */ 498 /* Allocate skb here so that firmware can DMA data from it */
501 skb = mwifiex_alloc_rx_buf(MWIFIEX_RX_DATA_BUF_SIZE, 499 skb = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
502 GFP_KERNEL | GFP_DMA); 500 GFP_KERNEL | GFP_DMA);
503 if (!skb) { 501 if (!skb) {
504 dev_err(adapter->dev, 502 dev_err(adapter->dev,
505 "Unable to allocate skb for RX ring.\n"); 503 "Unable to allocate skb for RX ring.\n");
@@ -1298,8 +1296,8 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
1298 } 1296 }
1299 } 1297 }
1300 1298
1301 skb_tmp = mwifiex_alloc_rx_buf(MWIFIEX_RX_DATA_BUF_SIZE, 1299 skb_tmp = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
1302 GFP_KERNEL | GFP_DMA); 1300 GFP_KERNEL | GFP_DMA);
1303 if (!skb_tmp) { 1301 if (!skb_tmp) {
1304 dev_err(adapter->dev, 1302 dev_err(adapter->dev,
1305 "Unable to allocate skb.\n"); 1303 "Unable to allocate skb.\n");
@@ -2101,7 +2099,7 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
2101 goto exit; 2099 goto exit;
2102 2100
2103 mwifiex_interrupt_status(adapter); 2101 mwifiex_interrupt_status(adapter);
2104 queue_work(adapter->workqueue, &adapter->main_work); 2102 mwifiex_queue_main_work(adapter);
2105 2103
2106exit: 2104exit:
2107 return IRQ_HANDLED; 2105 return IRQ_HANDLED;
@@ -2373,25 +2371,26 @@ done:
2373 adapter->curr_mem_idx = 0; 2371 adapter->curr_mem_idx = 0;
2374} 2372}
2375 2373
2374static unsigned long iface_work_flags;
2375static struct mwifiex_adapter *save_adapter;
2376static void mwifiex_pcie_work(struct work_struct *work) 2376static void mwifiex_pcie_work(struct work_struct *work)
2377{ 2377{
2378 struct mwifiex_adapter *adapter =
2379 container_of(work, struct mwifiex_adapter, iface_work);
2380
2381 if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP, 2378 if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP,
2382 &adapter->iface_work_flags)) 2379 &iface_work_flags))
2383 mwifiex_pcie_fw_dump_work(adapter); 2380 mwifiex_pcie_fw_dump_work(save_adapter);
2384} 2381}
2385 2382
2383static DECLARE_WORK(pcie_work, mwifiex_pcie_work);
2386/* This function dumps FW information */ 2384/* This function dumps FW information */
2387static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) 2385static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
2388{ 2386{
2389 if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags)) 2387 save_adapter = adapter;
2388 if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags))
2390 return; 2389 return;
2391 2390
2392 set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags); 2391 set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags);
2393 2392
2394 schedule_work(&adapter->iface_work); 2393 schedule_work(&pcie_work);
2395} 2394}
2396 2395
2397/* 2396/*
@@ -2619,7 +2618,6 @@ static struct mwifiex_if_ops pcie_ops = {
2619 .init_fw_port = mwifiex_pcie_init_fw_port, 2618 .init_fw_port = mwifiex_pcie_init_fw_port,
2620 .clean_pcie_ring = mwifiex_clean_pcie_ring_buf, 2619 .clean_pcie_ring = mwifiex_clean_pcie_ring_buf,
2621 .fw_dump = mwifiex_pcie_fw_dump, 2620 .fw_dump = mwifiex_pcie_fw_dump,
2622 .iface_work = mwifiex_pcie_work,
2623}; 2621};
2624 2622
2625/* 2623/*
@@ -2665,6 +2663,7 @@ static void mwifiex_pcie_cleanup_module(void)
2665 /* Set the flag as user is removing this module. */ 2663 /* Set the flag as user is removing this module. */
2666 user_rmmod = 1; 2664 user_rmmod = 1;
2667 2665
2666 cancel_work_sync(&pcie_work);
2668 pci_unregister_driver(&mwifiex_pcie); 2667 pci_unregister_driver(&mwifiex_pcie);
2669} 2668}
2670 2669
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 57d85ab442bf..d10320f89bc1 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -47,6 +47,7 @@
47static u8 user_rmmod; 47static u8 user_rmmod;
48 48
49static struct mwifiex_if_ops sdio_ops; 49static struct mwifiex_if_ops sdio_ops;
50static unsigned long iface_work_flags;
50 51
51static struct semaphore add_remove_card_sem; 52static struct semaphore add_remove_card_sem;
52 53
@@ -200,8 +201,6 @@ mwifiex_sdio_remove(struct sdio_func *func)
200 if (!adapter || !adapter->priv_num) 201 if (!adapter || !adapter->priv_num)
201 return; 202 return;
202 203
203 cancel_work_sync(&adapter->iface_work);
204
205 if (user_rmmod) { 204 if (user_rmmod) {
206 if (adapter->is_suspended) 205 if (adapter->is_suspended)
207 mwifiex_sdio_resume(adapter->dev); 206 mwifiex_sdio_resume(adapter->dev);
@@ -1043,6 +1042,59 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
1043} 1042}
1044 1043
1045/* 1044/*
1045 * This function decode sdio aggreation pkt.
1046 *
1047 * Based on the the data block size and pkt_len,
1048 * skb data will be decoded to few packets.
1049 */
1050static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter,
1051 struct sk_buff *skb)
1052{
1053 u32 total_pkt_len, pkt_len;
1054 struct sk_buff *skb_deaggr;
1055 u32 pkt_type;
1056 u16 blk_size;
1057 u8 blk_num;
1058 u8 *data;
1059
1060 data = skb->data;
1061 total_pkt_len = skb->len;
1062
1063 while (total_pkt_len >= (SDIO_HEADER_OFFSET + INTF_HEADER_LEN)) {
1064 if (total_pkt_len < adapter->sdio_rx_block_size)
1065 break;
1066 blk_num = *(data + BLOCK_NUMBER_OFFSET);
1067 blk_size = adapter->sdio_rx_block_size * blk_num;
1068 if (blk_size > total_pkt_len) {
1069 dev_err(adapter->dev, "%s: error in pkt,\t"
1070 "blk_num=%d, blk_size=%d, total_pkt_len=%d\n",
1071 __func__, blk_num, blk_size, total_pkt_len);
1072 break;
1073 }
1074 pkt_len = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET));
1075 pkt_type = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET +
1076 2));
1077 if ((pkt_len + SDIO_HEADER_OFFSET) > blk_size) {
1078 dev_err(adapter->dev, "%s: error in pkt,\t"
1079 "pkt_len=%d, blk_size=%d\n",
1080 __func__, pkt_len, blk_size);
1081 break;
1082 }
1083 skb_deaggr = mwifiex_alloc_dma_align_buf(pkt_len,
1084 GFP_KERNEL | GFP_DMA);
1085 if (!skb_deaggr)
1086 break;
1087 skb_put(skb_deaggr, pkt_len);
1088 memcpy(skb_deaggr->data, data + SDIO_HEADER_OFFSET, pkt_len);
1089 skb_pull(skb_deaggr, INTF_HEADER_LEN);
1090
1091 mwifiex_handle_rx_packet(adapter, skb_deaggr);
1092 data += blk_size;
1093 total_pkt_len -= blk_size;
1094 }
1095}
1096
1097/*
1046 * This function decodes a received packet. 1098 * This function decodes a received packet.
1047 * 1099 *
1048 * Based on the type, the packet is treated as either a data, or 1100 * Based on the type, the packet is treated as either a data, or
@@ -1055,11 +1107,28 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
1055 u8 *cmd_buf; 1107 u8 *cmd_buf;
1056 __le16 *curr_ptr = (__le16 *)skb->data; 1108 __le16 *curr_ptr = (__le16 *)skb->data;
1057 u16 pkt_len = le16_to_cpu(*curr_ptr); 1109 u16 pkt_len = le16_to_cpu(*curr_ptr);
1110 struct mwifiex_rxinfo *rx_info;
1058 1111
1059 skb_trim(skb, pkt_len); 1112 if (upld_typ != MWIFIEX_TYPE_AGGR_DATA) {
1060 skb_pull(skb, INTF_HEADER_LEN); 1113 skb_trim(skb, pkt_len);
1114 skb_pull(skb, INTF_HEADER_LEN);
1115 }
1061 1116
1062 switch (upld_typ) { 1117 switch (upld_typ) {
1118 case MWIFIEX_TYPE_AGGR_DATA:
1119 dev_dbg(adapter->dev, "info: --- Rx: Aggr Data packet ---\n");
1120 rx_info = MWIFIEX_SKB_RXCB(skb);
1121 rx_info->buf_type = MWIFIEX_TYPE_AGGR_DATA;
1122 if (adapter->rx_work_enabled) {
1123 skb_queue_tail(&adapter->rx_data_q, skb);
1124 atomic_inc(&adapter->rx_pending);
1125 adapter->data_received = true;
1126 } else {
1127 mwifiex_deaggr_sdio_pkt(adapter, skb);
1128 dev_kfree_skb_any(skb);
1129 }
1130 break;
1131
1063 case MWIFIEX_TYPE_DATA: 1132 case MWIFIEX_TYPE_DATA:
1064 dev_dbg(adapter->dev, "info: --- Rx: Data packet ---\n"); 1133 dev_dbg(adapter->dev, "info: --- Rx: Data packet ---\n");
1065 if (adapter->rx_work_enabled) { 1134 if (adapter->rx_work_enabled) {
@@ -1127,17 +1196,17 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
1127 * provided there is space left, processed and finally uploaded. 1196 * provided there is space left, processed and finally uploaded.
1128 */ 1197 */
1129static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, 1198static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
1130 struct sk_buff *skb, u8 port) 1199 u16 rx_len, u8 port)
1131{ 1200{
1132 struct sdio_mmc_card *card = adapter->card; 1201 struct sdio_mmc_card *card = adapter->card;
1133 s32 f_do_rx_aggr = 0; 1202 s32 f_do_rx_aggr = 0;
1134 s32 f_do_rx_cur = 0; 1203 s32 f_do_rx_cur = 0;
1135 s32 f_aggr_cur = 0; 1204 s32 f_aggr_cur = 0;
1205 s32 f_post_aggr_cur = 0;
1136 struct sk_buff *skb_deaggr; 1206 struct sk_buff *skb_deaggr;
1137 u32 pind; 1207 struct sk_buff *skb = NULL;
1138 u32 pkt_len, pkt_type, mport; 1208 u32 pkt_len, pkt_type, mport, pind;
1139 u8 *curr_ptr; 1209 u8 *curr_ptr;
1140 u32 rx_len = skb->len;
1141 1210
1142 if ((card->has_control_mask) && (port == CTRL_PORT)) { 1211 if ((card->has_control_mask) && (port == CTRL_PORT)) {
1143 /* Read the command Resp without aggr */ 1212 /* Read the command Resp without aggr */
@@ -1164,12 +1233,12 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
1164 dev_dbg(adapter->dev, "info: %s: not last packet\n", __func__); 1233 dev_dbg(adapter->dev, "info: %s: not last packet\n", __func__);
1165 1234
1166 if (MP_RX_AGGR_IN_PROGRESS(card)) { 1235 if (MP_RX_AGGR_IN_PROGRESS(card)) {
1167 if (MP_RX_AGGR_BUF_HAS_ROOM(card, skb->len)) { 1236 if (MP_RX_AGGR_BUF_HAS_ROOM(card, rx_len)) {
1168 f_aggr_cur = 1; 1237 f_aggr_cur = 1;
1169 } else { 1238 } else {
1170 /* No room in Aggr buf, do rx aggr now */ 1239 /* No room in Aggr buf, do rx aggr now */
1171 f_do_rx_aggr = 1; 1240 f_do_rx_aggr = 1;
1172 f_do_rx_cur = 1; 1241 f_post_aggr_cur = 1;
1173 } 1242 }
1174 } else { 1243 } else {
1175 /* Rx aggr not in progress */ 1244 /* Rx aggr not in progress */
@@ -1182,7 +1251,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
1182 1251
1183 if (MP_RX_AGGR_IN_PROGRESS(card)) { 1252 if (MP_RX_AGGR_IN_PROGRESS(card)) {
1184 f_do_rx_aggr = 1; 1253 f_do_rx_aggr = 1;
1185 if (MP_RX_AGGR_BUF_HAS_ROOM(card, skb->len)) 1254 if (MP_RX_AGGR_BUF_HAS_ROOM(card, rx_len))
1186 f_aggr_cur = 1; 1255 f_aggr_cur = 1;
1187 else 1256 else
1188 /* No room in Aggr buf, do rx aggr now */ 1257 /* No room in Aggr buf, do rx aggr now */
@@ -1195,7 +1264,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
1195 if (f_aggr_cur) { 1264 if (f_aggr_cur) {
1196 dev_dbg(adapter->dev, "info: current packet aggregation\n"); 1265 dev_dbg(adapter->dev, "info: current packet aggregation\n");
1197 /* Curr pkt can be aggregated */ 1266 /* Curr pkt can be aggregated */
1198 mp_rx_aggr_setup(card, skb, port); 1267 mp_rx_aggr_setup(card, rx_len, port);
1199 1268
1200 if (MP_RX_AGGR_PKT_LIMIT_REACHED(card) || 1269 if (MP_RX_AGGR_PKT_LIMIT_REACHED(card) ||
1201 mp_rx_aggr_port_limit_reached(card)) { 1270 mp_rx_aggr_port_limit_reached(card)) {
@@ -1238,16 +1307,29 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
1238 curr_ptr = card->mpa_rx.buf; 1307 curr_ptr = card->mpa_rx.buf;
1239 1308
1240 for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) { 1309 for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) {
1310 u32 *len_arr = card->mpa_rx.len_arr;
1241 1311
1242 /* get curr PKT len & type */ 1312 /* get curr PKT len & type */
1243 pkt_len = le16_to_cpu(*(__le16 *) &curr_ptr[0]); 1313 pkt_len = le16_to_cpu(*(__le16 *) &curr_ptr[0]);
1244 pkt_type = le16_to_cpu(*(__le16 *) &curr_ptr[2]); 1314 pkt_type = le16_to_cpu(*(__le16 *) &curr_ptr[2]);
1245 1315
1246 /* copy pkt to deaggr buf */ 1316 /* copy pkt to deaggr buf */
1247 skb_deaggr = card->mpa_rx.skb_arr[pind]; 1317 skb_deaggr = mwifiex_alloc_dma_align_buf(len_arr[pind],
1318 GFP_KERNEL |
1319 GFP_DMA);
1320 if (!skb_deaggr) {
1321 dev_err(adapter->dev, "skb allocation failure drop pkt len=%d type=%d\n",
1322 pkt_len, pkt_type);
1323 curr_ptr += len_arr[pind];
1324 continue;
1325 }
1248 1326
1249 if ((pkt_type == MWIFIEX_TYPE_DATA) && (pkt_len <= 1327 skb_put(skb_deaggr, len_arr[pind]);
1250 card->mpa_rx.len_arr[pind])) { 1328
1329 if ((pkt_type == MWIFIEX_TYPE_DATA ||
1330 (pkt_type == MWIFIEX_TYPE_AGGR_DATA &&
1331 adapter->sdio_rx_aggr_enable)) &&
1332 (pkt_len <= len_arr[pind])) {
1251 1333
1252 memcpy(skb_deaggr->data, curr_ptr, pkt_len); 1334 memcpy(skb_deaggr->data, curr_ptr, pkt_len);
1253 1335
@@ -1257,13 +1339,15 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
1257 mwifiex_decode_rx_packet(adapter, skb_deaggr, 1339 mwifiex_decode_rx_packet(adapter, skb_deaggr,
1258 pkt_type); 1340 pkt_type);
1259 } else { 1341 } else {
1260 dev_err(adapter->dev, "wrong aggr pkt:" 1342 dev_err(adapter->dev, " drop wrong aggr pkt:\t"
1261 " type=%d len=%d max_len=%d\n", 1343 "sdio_single_port_rx_aggr=%d\t"
1344 "type=%d len=%d max_len=%d\n",
1345 adapter->sdio_rx_aggr_enable,
1262 pkt_type, pkt_len, 1346 pkt_type, pkt_len,
1263 card->mpa_rx.len_arr[pind]); 1347 len_arr[pind]);
1264 dev_kfree_skb_any(skb_deaggr); 1348 dev_kfree_skb_any(skb_deaggr);
1265 } 1349 }
1266 curr_ptr += card->mpa_rx.len_arr[pind]; 1350 curr_ptr += len_arr[pind];
1267 } 1351 }
1268 MP_RX_AGGR_BUF_RESET(card); 1352 MP_RX_AGGR_BUF_RESET(card);
1269 } 1353 }
@@ -1273,28 +1357,46 @@ rx_curr_single:
1273 dev_dbg(adapter->dev, "info: RX: port: %d, rx_len: %d\n", 1357 dev_dbg(adapter->dev, "info: RX: port: %d, rx_len: %d\n",
1274 port, rx_len); 1358 port, rx_len);
1275 1359
1360 skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA);
1361 if (!skb) {
1362 dev_err(adapter->dev, "single skb allocated fail,\t"
1363 "drop pkt port=%d len=%d\n", port, rx_len);
1364 if (mwifiex_sdio_card_to_host(adapter, &pkt_type,
1365 card->mpa_rx.buf, rx_len,
1366 adapter->ioport + port))
1367 goto error;
1368 return 0;
1369 }
1370
1371 skb_put(skb, rx_len);
1372
1276 if (mwifiex_sdio_card_to_host(adapter, &pkt_type, 1373 if (mwifiex_sdio_card_to_host(adapter, &pkt_type,
1277 skb->data, skb->len, 1374 skb->data, skb->len,
1278 adapter->ioport + port)) 1375 adapter->ioport + port))
1279 goto error; 1376 goto error;
1377 if (!adapter->sdio_rx_aggr_enable &&
1378 pkt_type == MWIFIEX_TYPE_AGGR_DATA) {
1379 dev_err(adapter->dev, "drop wrong pkt type %d\t"
1380 "current SDIO RX Aggr not enabled\n",
1381 pkt_type);
1382 dev_kfree_skb_any(skb);
1383 return 0;
1384 }
1280 1385
1281 mwifiex_decode_rx_packet(adapter, skb, pkt_type); 1386 mwifiex_decode_rx_packet(adapter, skb, pkt_type);
1282 } 1387 }
1388 if (f_post_aggr_cur) {
1389 dev_dbg(adapter->dev, "info: current packet aggregation\n");
1390 /* Curr pkt can be aggregated */
1391 mp_rx_aggr_setup(card, rx_len, port);
1392 }
1283 1393
1284 return 0; 1394 return 0;
1285
1286error: 1395error:
1287 if (MP_RX_AGGR_IN_PROGRESS(card)) { 1396 if (MP_RX_AGGR_IN_PROGRESS(card))
1288 /* Multiport-aggregation transfer failed - cleanup */
1289 for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) {
1290 /* copy pkt to deaggr buf */
1291 skb_deaggr = card->mpa_rx.skb_arr[pind];
1292 dev_kfree_skb_any(skb_deaggr);
1293 }
1294 MP_RX_AGGR_BUF_RESET(card); 1397 MP_RX_AGGR_BUF_RESET(card);
1295 }
1296 1398
1297 if (f_do_rx_cur) 1399 if (f_do_rx_cur && skb)
1298 /* Single transfer pending. Free curr buff also */ 1400 /* Single transfer pending. Free curr buff also */
1299 dev_kfree_skb_any(skb); 1401 dev_kfree_skb_any(skb);
1300 1402
@@ -1356,8 +1458,9 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
1356 MWIFIEX_RX_DATA_BUF_SIZE) 1458 MWIFIEX_RX_DATA_BUF_SIZE)
1357 return -1; 1459 return -1;
1358 rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE); 1460 rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
1461 dev_dbg(adapter->dev, "info: rx_len = %d\n", rx_len);
1359 1462
1360 skb = mwifiex_alloc_rx_buf(rx_len, GFP_KERNEL | GFP_DMA); 1463 skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA);
1361 if (!skb) 1464 if (!skb)
1362 return -1; 1465 return -1;
1363 1466
@@ -1447,28 +1550,16 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
1447 1) / MWIFIEX_SDIO_BLOCK_SIZE; 1550 1) / MWIFIEX_SDIO_BLOCK_SIZE;
1448 if (rx_len <= INTF_HEADER_LEN || 1551 if (rx_len <= INTF_HEADER_LEN ||
1449 (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) > 1552 (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) >
1450 MWIFIEX_RX_DATA_BUF_SIZE) { 1553 card->mpa_rx.buf_size) {
1451 dev_err(adapter->dev, "invalid rx_len=%d\n", 1554 dev_err(adapter->dev, "invalid rx_len=%d\n",
1452 rx_len); 1555 rx_len);
1453 return -1; 1556 return -1;
1454 } 1557 }
1455 rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
1456
1457 skb = mwifiex_alloc_rx_buf(rx_len,
1458 GFP_KERNEL | GFP_DMA);
1459
1460 if (!skb) {
1461 dev_err(adapter->dev, "%s: failed to alloc skb",
1462 __func__);
1463 return -1;
1464 }
1465 1558
1466 skb_put(skb, rx_len); 1559 rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
1467 1560 dev_dbg(adapter->dev, "info: rx_len = %d\n", rx_len);
1468 dev_dbg(adapter->dev, "info: rx_len = %d skb->len = %d\n",
1469 rx_len, skb->len);
1470 1561
1471 if (mwifiex_sdio_card_to_host_mp_aggr(adapter, skb, 1562 if (mwifiex_sdio_card_to_host_mp_aggr(adapter, rx_len,
1472 port)) { 1563 port)) {
1473 dev_err(adapter->dev, "card_to_host_mpa failed:" 1564 dev_err(adapter->dev, "card_to_host_mpa failed:"
1474 " int status=%#x\n", sdio_ireg); 1565 " int status=%#x\n", sdio_ireg);
@@ -1736,6 +1827,7 @@ static int mwifiex_alloc_sdio_mpa_buffers(struct mwifiex_adapter *adapter,
1736 u32 mpa_tx_buf_size, u32 mpa_rx_buf_size) 1827 u32 mpa_tx_buf_size, u32 mpa_rx_buf_size)
1737{ 1828{
1738 struct sdio_mmc_card *card = adapter->card; 1829 struct sdio_mmc_card *card = adapter->card;
1830 u32 rx_buf_size;
1739 int ret = 0; 1831 int ret = 0;
1740 1832
1741 card->mpa_tx.buf = kzalloc(mpa_tx_buf_size, GFP_KERNEL); 1833 card->mpa_tx.buf = kzalloc(mpa_tx_buf_size, GFP_KERNEL);
@@ -1746,13 +1838,15 @@ static int mwifiex_alloc_sdio_mpa_buffers(struct mwifiex_adapter *adapter,
1746 1838
1747 card->mpa_tx.buf_size = mpa_tx_buf_size; 1839 card->mpa_tx.buf_size = mpa_tx_buf_size;
1748 1840
1749 card->mpa_rx.buf = kzalloc(mpa_rx_buf_size, GFP_KERNEL); 1841 rx_buf_size = max_t(u32, mpa_rx_buf_size,
1842 (u32)SDIO_MAX_AGGR_BUF_SIZE);
1843 card->mpa_rx.buf = kzalloc(rx_buf_size, GFP_KERNEL);
1750 if (!card->mpa_rx.buf) { 1844 if (!card->mpa_rx.buf) {
1751 ret = -1; 1845 ret = -1;
1752 goto error; 1846 goto error;
1753 } 1847 }
1754 1848
1755 card->mpa_rx.buf_size = mpa_rx_buf_size; 1849 card->mpa_rx.buf_size = rx_buf_size;
1756 1850
1757error: 1851error:
1758 if (ret) { 1852 if (ret) {
@@ -1951,6 +2045,7 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
1951 port, card->mp_data_port_mask); 2045 port, card->mp_data_port_mask);
1952} 2046}
1953 2047
2048static struct mwifiex_adapter *save_adapter;
1954static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter) 2049static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
1955{ 2050{
1956 struct sdio_mmc_card *card = adapter->card; 2051 struct sdio_mmc_card *card = adapter->card;
@@ -2019,10 +2114,8 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter,
2019} 2114}
2020 2115
2021/* This function dump firmware memory to file */ 2116/* This function dump firmware memory to file */
2022static void mwifiex_sdio_fw_dump_work(struct work_struct *work) 2117static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
2023{ 2118{
2024 struct mwifiex_adapter *adapter =
2025 container_of(work, struct mwifiex_adapter, iface_work);
2026 struct sdio_mmc_card *card = adapter->card; 2119 struct sdio_mmc_card *card = adapter->card;
2027 int ret = 0; 2120 int ret = 0;
2028 unsigned int reg, reg_start, reg_end; 2121 unsigned int reg, reg_start, reg_end;
@@ -2144,36 +2237,36 @@ done:
2144 2237
2145static void mwifiex_sdio_work(struct work_struct *work) 2238static void mwifiex_sdio_work(struct work_struct *work)
2146{ 2239{
2147 struct mwifiex_adapter *adapter =
2148 container_of(work, struct mwifiex_adapter, iface_work);
2149
2150 if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET,
2151 &adapter->iface_work_flags))
2152 mwifiex_sdio_card_reset_work(adapter);
2153 if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP, 2240 if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP,
2154 &adapter->iface_work_flags)) 2241 &iface_work_flags))
2155 mwifiex_sdio_fw_dump_work(work); 2242 mwifiex_sdio_fw_dump_work(save_adapter);
2243 if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET,
2244 &iface_work_flags))
2245 mwifiex_sdio_card_reset_work(save_adapter);
2156} 2246}
2157 2247
2248static DECLARE_WORK(sdio_work, mwifiex_sdio_work);
2158/* This function resets the card */ 2249/* This function resets the card */
2159static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter) 2250static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter)
2160{ 2251{
2161 if (test_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &adapter->iface_work_flags)) 2252 save_adapter = adapter;
2253 if (test_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags))
2162 return; 2254 return;
2163 2255
2164 set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &adapter->iface_work_flags); 2256 set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags);
2165 2257
2166 schedule_work(&adapter->iface_work); 2258 schedule_work(&sdio_work);
2167} 2259}
2168 2260
2169/* This function dumps FW information */ 2261/* This function dumps FW information */
2170static void mwifiex_sdio_fw_dump(struct mwifiex_adapter *adapter) 2262static void mwifiex_sdio_fw_dump(struct mwifiex_adapter *adapter)
2171{ 2263{
2172 if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags)) 2264 save_adapter = adapter;
2265 if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags))
2173 return; 2266 return;
2174 2267
2175 set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags); 2268 set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags);
2176 schedule_work(&adapter->iface_work); 2269 schedule_work(&sdio_work);
2177} 2270}
2178 2271
2179/* Function to dump SDIO function registers and SDIO scratch registers in case 2272/* Function to dump SDIO function registers and SDIO scratch registers in case
@@ -2289,9 +2382,9 @@ static struct mwifiex_if_ops sdio_ops = {
2289 .cmdrsp_complete = mwifiex_sdio_cmdrsp_complete, 2382 .cmdrsp_complete = mwifiex_sdio_cmdrsp_complete,
2290 .event_complete = mwifiex_sdio_event_complete, 2383 .event_complete = mwifiex_sdio_event_complete,
2291 .card_reset = mwifiex_sdio_card_reset, 2384 .card_reset = mwifiex_sdio_card_reset,
2292 .iface_work = mwifiex_sdio_work,
2293 .fw_dump = mwifiex_sdio_fw_dump, 2385 .fw_dump = mwifiex_sdio_fw_dump,
2294 .reg_dump = mwifiex_sdio_reg_dump, 2386 .reg_dump = mwifiex_sdio_reg_dump,
2387 .deaggr_pkt = mwifiex_deaggr_sdio_pkt,
2295}; 2388};
2296 2389
2297/* 2390/*
@@ -2328,6 +2421,7 @@ mwifiex_sdio_cleanup_module(void)
2328 2421
2329 /* Set the flag as user is removing this module. */ 2422 /* Set the flag as user is removing this module. */
2330 user_rmmod = 1; 2423 user_rmmod = 1;
2424 cancel_work_sync(&sdio_work);
2331 2425
2332 sdio_unregister_driver(&mwifiex_sdio); 2426 sdio_unregister_driver(&mwifiex_sdio);
2333} 2427}
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index c636944c77bc..6f645cf47369 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -67,6 +67,8 @@
67 67
68#define MWIFIEX_MP_AGGR_BUF_SIZE_16K (16384) 68#define MWIFIEX_MP_AGGR_BUF_SIZE_16K (16384)
69#define MWIFIEX_MP_AGGR_BUF_SIZE_32K (32768) 69#define MWIFIEX_MP_AGGR_BUF_SIZE_32K (32768)
70/* we leave one block of 256 bytes for DMA alignment*/
71#define MWIFIEX_MP_AGGR_BUF_SIZE_MAX (65280)
70 72
71/* Misc. Config Register : Auto Re-enable interrupts */ 73/* Misc. Config Register : Auto Re-enable interrupts */
72#define AUTO_RE_ENABLE_INT BIT(4) 74#define AUTO_RE_ENABLE_INT BIT(4)
@@ -458,8 +460,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
458 .max_ports = 32, 460 .max_ports = 32,
459 .mp_agg_pkt_limit = 16, 461 .mp_agg_pkt_limit = 16,
460 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, 462 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
461 .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K, 463 .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
462 .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K, 464 .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
463 .supports_sdio_new_mode = true, 465 .supports_sdio_new_mode = true,
464 .has_control_mask = false, 466 .has_control_mask = false,
465 .can_dump_fw = true, 467 .can_dump_fw = true,
@@ -571,9 +573,9 @@ mp_tx_aggr_port_limit_reached(struct sdio_mmc_card *card)
571 573
572/* Prepare to copy current packet from card to SDIO Rx aggregation buffer */ 574/* Prepare to copy current packet from card to SDIO Rx aggregation buffer */
573static inline void mp_rx_aggr_setup(struct sdio_mmc_card *card, 575static inline void mp_rx_aggr_setup(struct sdio_mmc_card *card,
574 struct sk_buff *skb, u8 port) 576 u16 rx_len, u8 port)
575{ 577{
576 card->mpa_rx.buf_len += skb->len; 578 card->mpa_rx.buf_len += rx_len;
577 579
578 if (!card->mpa_rx.pkt_cnt) 580 if (!card->mpa_rx.pkt_cnt)
579 card->mpa_rx.start_port = port; 581 card->mpa_rx.start_port = port;
@@ -586,8 +588,8 @@ static inline void mp_rx_aggr_setup(struct sdio_mmc_card *card,
586 else 588 else
587 card->mpa_rx.ports |= 1 << (card->mpa_rx.pkt_cnt + 1); 589 card->mpa_rx.ports |= 1 << (card->mpa_rx.pkt_cnt + 1);
588 } 590 }
589 card->mpa_rx.skb_arr[card->mpa_rx.pkt_cnt] = skb; 591 card->mpa_rx.skb_arr[card->mpa_rx.pkt_cnt] = NULL;
590 card->mpa_rx.len_arr[card->mpa_rx.pkt_cnt] = skb->len; 592 card->mpa_rx.len_arr[card->mpa_rx.pkt_cnt] = rx_len;
591 card->mpa_rx.pkt_cnt++; 593 card->mpa_rx.pkt_cnt++;
592} 594}
593#endif /* _MWIFIEX_SDIO_H */ 595#endif /* _MWIFIEX_SDIO_H */
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index f7d204ffd6e9..49422f2a5380 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -1370,22 +1370,29 @@ mwifiex_cmd_mef_cfg(struct mwifiex_private *priv,
1370 struct mwifiex_ds_mef_cfg *mef) 1370 struct mwifiex_ds_mef_cfg *mef)
1371{ 1371{
1372 struct host_cmd_ds_mef_cfg *mef_cfg = &cmd->params.mef_cfg; 1372 struct host_cmd_ds_mef_cfg *mef_cfg = &cmd->params.mef_cfg;
1373 struct mwifiex_fw_mef_entry *mef_entry = NULL;
1373 u8 *pos = (u8 *)mef_cfg; 1374 u8 *pos = (u8 *)mef_cfg;
1375 u16 i;
1374 1376
1375 cmd->command = cpu_to_le16(HostCmd_CMD_MEF_CFG); 1377 cmd->command = cpu_to_le16(HostCmd_CMD_MEF_CFG);
1376 1378
1377 mef_cfg->criteria = cpu_to_le32(mef->criteria); 1379 mef_cfg->criteria = cpu_to_le32(mef->criteria);
1378 mef_cfg->num_entries = cpu_to_le16(mef->num_entries); 1380 mef_cfg->num_entries = cpu_to_le16(mef->num_entries);
1379 pos += sizeof(*mef_cfg); 1381 pos += sizeof(*mef_cfg);
1380 mef_cfg->mef_entry->mode = mef->mef_entry->mode;
1381 mef_cfg->mef_entry->action = mef->mef_entry->action;
1382 pos += sizeof(*(mef_cfg->mef_entry));
1383 1382
1384 if (mwifiex_cmd_append_rpn_expression(priv, mef->mef_entry, &pos)) 1383 for (i = 0; i < mef->num_entries; i++) {
1385 return -1; 1384 mef_entry = (struct mwifiex_fw_mef_entry *)pos;
1385 mef_entry->mode = mef->mef_entry[i].mode;
1386 mef_entry->action = mef->mef_entry[i].action;
1387 pos += sizeof(*mef_cfg->mef_entry);
1388
1389 if (mwifiex_cmd_append_rpn_expression(priv,
1390 &mef->mef_entry[i], &pos))
1391 return -1;
1386 1392
1387 mef_cfg->mef_entry->exprsize = 1393 mef_entry->exprsize =
1388 cpu_to_le16(pos - mef_cfg->mef_entry->expr); 1394 cpu_to_le16(pos - mef_entry->expr);
1395 }
1389 cmd->size = cpu_to_le16((u16) (pos - (u8 *)mef_cfg) + S_DS_GEN); 1396 cmd->size = cpu_to_le16((u16) (pos - (u8 *)mef_cfg) + S_DS_GEN);
1390 1397
1391 return 0; 1398 return 0;
@@ -1664,6 +1671,25 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
1664 1671
1665 return 0; 1672 return 0;
1666} 1673}
1674
1675/* This function prepares command of sdio rx aggr info. */
1676static int mwifiex_cmd_sdio_rx_aggr_cfg(struct host_cmd_ds_command *cmd,
1677 u16 cmd_action, void *data_buf)
1678{
1679 struct host_cmd_sdio_sp_rx_aggr_cfg *cfg =
1680 &cmd->params.sdio_rx_aggr_cfg;
1681
1682 cmd->command = cpu_to_le16(HostCmd_CMD_SDIO_SP_RX_AGGR_CFG);
1683 cmd->size =
1684 cpu_to_le16(sizeof(struct host_cmd_sdio_sp_rx_aggr_cfg) +
1685 S_DS_GEN);
1686 cfg->action = cmd_action;
1687 if (cmd_action == HostCmd_ACT_GEN_SET)
1688 cfg->enable = *(u8 *)data_buf;
1689
1690 return 0;
1691}
1692
1667/* 1693/*
1668 * This function prepares the commands before sending them to the firmware. 1694 * This function prepares the commands before sending them to the firmware.
1669 * 1695 *
@@ -1901,6 +1927,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
1901 ret = mwifiex_cmd_issue_chan_report_request(priv, cmd_ptr, 1927 ret = mwifiex_cmd_issue_chan_report_request(priv, cmd_ptr,
1902 data_buf); 1928 data_buf);
1903 break; 1929 break;
1930 case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
1931 ret = mwifiex_cmd_sdio_rx_aggr_cfg(cmd_ptr, cmd_action,
1932 data_buf);
1933 break;
1904 default: 1934 default:
1905 dev_err(priv->adapter->dev, 1935 dev_err(priv->adapter->dev,
1906 "PREP_CMD: unknown cmd- %#x\n", cmd_no); 1936 "PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -1940,6 +1970,7 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
1940 struct mwifiex_ds_auto_ds auto_ds; 1970 struct mwifiex_ds_auto_ds auto_ds;
1941 enum state_11d_t state_11d; 1971 enum state_11d_t state_11d;
1942 struct mwifiex_ds_11n_tx_cfg tx_cfg; 1972 struct mwifiex_ds_11n_tx_cfg tx_cfg;
1973 u8 sdio_sp_rx_aggr_enable;
1943 1974
1944 if (first_sta) { 1975 if (first_sta) {
1945 if (priv->adapter->iface_type == MWIFIEX_PCIE) { 1976 if (priv->adapter->iface_type == MWIFIEX_PCIE) {
@@ -1983,6 +2014,22 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
1983 if (ret) 2014 if (ret)
1984 return -1; 2015 return -1;
1985 2016
2017 /** Set SDIO Single Port RX Aggr Info */
2018 if (priv->adapter->iface_type == MWIFIEX_SDIO &&
2019 ISSUPP_SDIO_SPA_ENABLED(priv->adapter->fw_cap_info)) {
2020 sdio_sp_rx_aggr_enable = true;
2021 ret = mwifiex_send_cmd(priv,
2022 HostCmd_CMD_SDIO_SP_RX_AGGR_CFG,
2023 HostCmd_ACT_GEN_SET, 0,
2024 &sdio_sp_rx_aggr_enable,
2025 true);
2026 if (ret) {
2027 dev_err(priv->adapter->dev,
2028 "error while enabling SP aggregation..disable it");
2029 adapter->sdio_rx_aggr_enable = false;
2030 }
2031 }
2032
1986 /* Reconfigure tx buf size */ 2033 /* Reconfigure tx buf size */
1987 ret = mwifiex_send_cmd(priv, HostCmd_CMD_RECONFIGURE_TX_BUFF, 2034 ret = mwifiex_send_cmd(priv, HostCmd_CMD_RECONFIGURE_TX_BUFF,
1988 HostCmd_ACT_GEN_SET, 0, 2035 HostCmd_ACT_GEN_SET, 0,
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 5f8da5924666..88dc6b672ef4 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -90,6 +90,10 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
90 case HostCmd_CMD_MAC_CONTROL: 90 case HostCmd_CMD_MAC_CONTROL:
91 break; 91 break;
92 92
93 case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
94 dev_err(priv->adapter->dev, "SDIO RX single-port aggregation Not support\n");
95 break;
96
93 default: 97 default:
94 break; 98 break;
95 } 99 }
@@ -943,6 +947,20 @@ static int mwifiex_ret_cfg_data(struct mwifiex_private *priv,
943 return 0; 947 return 0;
944} 948}
945 949
950/** This Function handles the command response of sdio rx aggr */
951static int mwifiex_ret_sdio_rx_aggr_cfg(struct mwifiex_private *priv,
952 struct host_cmd_ds_command *resp)
953{
954 struct mwifiex_adapter *adapter = priv->adapter;
955 struct host_cmd_sdio_sp_rx_aggr_cfg *cfg =
956 &resp->params.sdio_rx_aggr_cfg;
957
958 adapter->sdio_rx_aggr_enable = cfg->enable;
959 adapter->sdio_rx_block_size = le16_to_cpu(cfg->block_size);
960
961 return 0;
962}
963
946/* 964/*
947 * This function handles the command responses. 965 * This function handles the command responses.
948 * 966 *
@@ -1124,6 +1142,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
1124 break; 1142 break;
1125 case HostCmd_CMD_CHAN_REPORT_REQUEST: 1143 case HostCmd_CMD_CHAN_REPORT_REQUEST:
1126 break; 1144 break;
1145 case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
1146 ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp);
1147 break;
1127 default: 1148 default:
1128 dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n", 1149 dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
1129 resp->command); 1150 resp->command);
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 64c4223a1e1e..0dc7a1d3993d 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -312,7 +312,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
312 adapter->ps_state = PS_STATE_AWAKE; 312 adapter->ps_state = PS_STATE_AWAKE;
313 adapter->pm_wakeup_card_req = false; 313 adapter->pm_wakeup_card_req = false;
314 adapter->pm_wakeup_fw_try = false; 314 adapter->pm_wakeup_fw_try = false;
315 del_timer_sync(&adapter->wakeup_timer); 315 del_timer(&adapter->wakeup_timer);
316 break; 316 break;
317 } 317 }
318 if (!mwifiex_send_null_packet 318 if (!mwifiex_send_null_packet
@@ -327,7 +327,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
327 adapter->ps_state = PS_STATE_AWAKE; 327 adapter->ps_state = PS_STATE_AWAKE;
328 adapter->pm_wakeup_card_req = false; 328 adapter->pm_wakeup_card_req = false;
329 adapter->pm_wakeup_fw_try = false; 329 adapter->pm_wakeup_fw_try = false;
330 del_timer_sync(&adapter->wakeup_timer); 330 del_timer(&adapter->wakeup_timer);
331 331
332 break; 332 break;
333 333
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index ea4549f0e0b9..a245f444aeec 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -92,6 +92,12 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
92 else 92 else
93 head_ptr = mwifiex_process_sta_txpd(priv, skb); 93 head_ptr = mwifiex_process_sta_txpd(priv, skb);
94 94
95 if ((adapter->data_sent || adapter->tx_lock_flag) && head_ptr) {
96 skb_queue_tail(&adapter->tx_data_q, skb);
97 atomic_inc(&adapter->tx_queued);
98 return 0;
99 }
100
95 if (head_ptr) { 101 if (head_ptr) {
96 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) 102 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
97 local_tx_pd = (struct txpd *)(head_ptr + hroom); 103 local_tx_pd = (struct txpd *)(head_ptr + hroom);
@@ -142,6 +148,123 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
142 return ret; 148 return ret;
143} 149}
144 150
151static int mwifiex_host_to_card(struct mwifiex_adapter *adapter,
152 struct sk_buff *skb,
153 struct mwifiex_tx_param *tx_param)
154{
155 struct txpd *local_tx_pd = NULL;
156 u8 *head_ptr = skb->data;
157 int ret = 0;
158 struct mwifiex_private *priv;
159 struct mwifiex_txinfo *tx_info;
160
161 tx_info = MWIFIEX_SKB_TXCB(skb);
162 priv = mwifiex_get_priv_by_id(adapter, tx_info->bss_num,
163 tx_info->bss_type);
164 if (!priv) {
165 dev_err(adapter->dev, "data: priv not found. Drop TX packet\n");
166 adapter->dbg.num_tx_host_to_card_failure++;
167 mwifiex_write_data_complete(adapter, skb, 0, 0);
168 return ret;
169 }
170 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) {
171 if (adapter->iface_type == MWIFIEX_USB)
172 local_tx_pd = (struct txpd *)head_ptr;
173 else
174 local_tx_pd = (struct txpd *) (head_ptr +
175 INTF_HEADER_LEN);
176 }
177
178 if (adapter->iface_type == MWIFIEX_USB) {
179 adapter->data_sent = true;
180 ret = adapter->if_ops.host_to_card(adapter,
181 MWIFIEX_USB_EP_DATA,
182 skb, NULL);
183 } else {
184 ret = adapter->if_ops.host_to_card(adapter,
185 MWIFIEX_TYPE_DATA,
186 skb, tx_param);
187 }
188 switch (ret) {
189 case -ENOSR:
190 dev_err(adapter->dev, "data: -ENOSR is returned\n");
191 break;
192 case -EBUSY:
193 if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
194 (adapter->pps_uapsd_mode) &&
195 (adapter->tx_lock_flag)) {
196 priv->adapter->tx_lock_flag = false;
197 if (local_tx_pd)
198 local_tx_pd->flags = 0;
199 }
200 skb_queue_head(&adapter->tx_data_q, skb);
201 if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT)
202 atomic_add(tx_info->aggr_num, &adapter->tx_queued);
203 else
204 atomic_inc(&adapter->tx_queued);
205 dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
206 break;
207 case -1:
208 if (adapter->iface_type != MWIFIEX_PCIE)
209 adapter->data_sent = false;
210 dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n",
211 ret);
212 adapter->dbg.num_tx_host_to_card_failure++;
213 mwifiex_write_data_complete(adapter, skb, 0, ret);
214 break;
215 case -EINPROGRESS:
216 if (adapter->iface_type != MWIFIEX_PCIE)
217 adapter->data_sent = false;
218 break;
219 case 0:
220 mwifiex_write_data_complete(adapter, skb, 0, ret);
221 break;
222 default:
223 break;
224 }
225 return ret;
226}
227
228static int
229mwifiex_dequeue_tx_queue(struct mwifiex_adapter *adapter)
230{
231 struct sk_buff *skb, *skb_next;
232 struct mwifiex_txinfo *tx_info;
233 struct mwifiex_tx_param tx_param;
234
235 skb = skb_dequeue(&adapter->tx_data_q);
236 if (!skb)
237 return -1;
238
239 tx_info = MWIFIEX_SKB_TXCB(skb);
240 if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT)
241 atomic_sub(tx_info->aggr_num, &adapter->tx_queued);
242 else
243 atomic_dec(&adapter->tx_queued);
244
245 if (!skb_queue_empty(&adapter->tx_data_q))
246 skb_next = skb_peek(&adapter->tx_data_q);
247 else
248 skb_next = NULL;
249 tx_param.next_pkt_len = ((skb_next) ? skb_next->len : 0);
250 if (!tx_param.next_pkt_len) {
251 if (!mwifiex_wmm_lists_empty(adapter))
252 tx_param.next_pkt_len = 1;
253 }
254 return mwifiex_host_to_card(adapter, skb, &tx_param);
255}
256
257void
258mwifiex_process_tx_queue(struct mwifiex_adapter *adapter)
259{
260 do {
261 if (adapter->data_sent || adapter->tx_lock_flag)
262 break;
263 if (mwifiex_dequeue_tx_queue(adapter))
264 break;
265 } while (!skb_queue_empty(&adapter->tx_data_q));
266}
267
145/* 268/*
146 * Packet send completion callback handler. 269 * Packet send completion callback handler.
147 * 270 *
@@ -179,8 +302,11 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
179 priv->stats.tx_errors++; 302 priv->stats.tx_errors++;
180 } 303 }
181 304
182 if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT) 305 if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT) {
183 atomic_dec_return(&adapter->pending_bridged_pkts); 306 atomic_dec_return(&adapter->pending_bridged_pkts);
307 if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT)
308 goto done;
309 }
184 310
185 if (aggr) 311 if (aggr)
186 /* For skb_aggr, do not wake up tx queue */ 312 /* For skb_aggr, do not wake up tx queue */
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 223873022ffe..fd8027f200a0 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -193,7 +193,7 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
193 dev_dbg(adapter->dev, "info: recv_length=%d, status=%d\n", 193 dev_dbg(adapter->dev, "info: recv_length=%d, status=%d\n",
194 recv_length, status); 194 recv_length, status);
195 if (status == -EINPROGRESS) { 195 if (status == -EINPROGRESS) {
196 queue_work(adapter->workqueue, &adapter->main_work); 196 mwifiex_queue_main_work(adapter);
197 197
198 /* urb for data_ep is re-submitted now; 198 /* urb for data_ep is re-submitted now;
199 * urb for cmd_ep will be re-submitted in callback 199 * urb for cmd_ep will be re-submitted in callback
@@ -262,7 +262,7 @@ static void mwifiex_usb_tx_complete(struct urb *urb)
262 urb->status ? -1 : 0); 262 urb->status ? -1 : 0);
263 } 263 }
264 264
265 queue_work(adapter->workqueue, &adapter->main_work); 265 mwifiex_queue_main_work(adapter);
266 266
267 return; 267 return;
268} 268}
@@ -1006,7 +1006,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
1006{ 1006{
1007 /* Simulation of HS_AWAKE event */ 1007 /* Simulation of HS_AWAKE event */
1008 adapter->pm_wakeup_fw_try = false; 1008 adapter->pm_wakeup_fw_try = false;
1009 del_timer_sync(&adapter->wakeup_timer); 1009 del_timer(&adapter->wakeup_timer);
1010 adapter->pm_wakeup_card_req = false; 1010 adapter->pm_wakeup_card_req = false;
1011 adapter->ps_state = PS_STATE_AWAKE; 1011 adapter->ps_state = PS_STATE_AWAKE;
1012 1012
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 2148a573396b..b8a45872354d 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -632,7 +632,7 @@ void mwifiex_hist_data_reset(struct mwifiex_private *priv)
632 atomic_set(&phist_data->sig_str[ix], 0); 632 atomic_set(&phist_data->sig_str[ix], 0);
633} 633}
634 634
635void *mwifiex_alloc_rx_buf(int rx_len, gfp_t flags) 635void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags)
636{ 636{
637 struct sk_buff *skb; 637 struct sk_buff *skb;
638 int buf_len, pad; 638 int buf_len, pad;
@@ -653,4 +653,4 @@ void *mwifiex_alloc_rx_buf(int rx_len, gfp_t flags)
653 653
654 return skb; 654 return skb;
655} 655}
656EXPORT_SYMBOL_GPL(mwifiex_alloc_rx_buf); 656EXPORT_SYMBOL_GPL(mwifiex_alloc_dma_align_buf);
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 0cd4f6bed9fc..b2e99569a0f8 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -157,6 +157,8 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
157 157
158 ra_list->is_11n_enabled = 0; 158 ra_list->is_11n_enabled = 0;
159 ra_list->tdls_link = false; 159 ra_list->tdls_link = false;
160 ra_list->ba_status = BA_SETUP_NONE;
161 ra_list->amsdu_in_ampdu = false;
160 if (!mwifiex_queuing_ra_based(priv)) { 162 if (!mwifiex_queuing_ra_based(priv)) {
161 if (mwifiex_get_tdls_link_status(priv, ra) == 163 if (mwifiex_get_tdls_link_status(priv, ra) ==
162 TDLS_SETUP_COMPLETE) { 164 TDLS_SETUP_COMPLETE) {
@@ -574,7 +576,7 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
574 * This function retrieves a particular RA list node, matching with the 576 * This function retrieves a particular RA list node, matching with the
575 * given TID and RA address. 577 * given TID and RA address.
576 */ 578 */
577static struct mwifiex_ra_list_tbl * 579struct mwifiex_ra_list_tbl *
578mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid, 580mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
579 const u8 *ra_addr) 581 const u8 *ra_addr)
580{ 582{
@@ -942,14 +944,11 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
942 struct mwifiex_ra_list_tbl *ptr; 944 struct mwifiex_ra_list_tbl *ptr;
943 struct mwifiex_tid_tbl *tid_ptr; 945 struct mwifiex_tid_tbl *tid_ptr;
944 atomic_t *hqp; 946 atomic_t *hqp;
945 unsigned long flags_bss, flags_ra; 947 unsigned long flags_ra;
946 int i, j; 948 int i, j;
947 949
948 /* check the BSS with highest priority first */ 950 /* check the BSS with highest priority first */
949 for (j = adapter->priv_num - 1; j >= 0; --j) { 951 for (j = adapter->priv_num - 1; j >= 0; --j) {
950 spin_lock_irqsave(&adapter->bss_prio_tbl[j].bss_prio_lock,
951 flags_bss);
952
953 /* iterate over BSS with the equal priority */ 952 /* iterate over BSS with the equal priority */
954 list_for_each_entry(adapter->bss_prio_tbl[j].bss_prio_cur, 953 list_for_each_entry(adapter->bss_prio_tbl[j].bss_prio_cur,
955 &adapter->bss_prio_tbl[j].bss_prio_head, 954 &adapter->bss_prio_tbl[j].bss_prio_head,
@@ -985,19 +984,15 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
985 } 984 }
986 } 985 }
987 986
988 spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock,
989 flags_bss);
990 } 987 }
991 988
992 return NULL; 989 return NULL;
993 990
994found: 991found:
995 /* holds bss_prio_lock / ra_list_spinlock */ 992 /* holds ra_list_spinlock */
996 if (atomic_read(hqp) > i) 993 if (atomic_read(hqp) > i)
997 atomic_set(hqp, i); 994 atomic_set(hqp, i);
998 spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags_ra); 995 spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags_ra);
999 spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock,
1000 flags_bss);
1001 996
1002 *priv = priv_tmp; 997 *priv = priv_tmp;
1003 *tid = tos_to_tid[i]; 998 *tid = tos_to_tid[i];
@@ -1179,6 +1174,14 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
1179 1174
1180 skb = skb_dequeue(&ptr->skb_head); 1175 skb = skb_dequeue(&ptr->skb_head);
1181 1176
1177 if (adapter->data_sent || adapter->tx_lock_flag) {
1178 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1179 ra_list_flags);
1180 skb_queue_tail(&adapter->tx_data_q, skb);
1181 atomic_inc(&adapter->tx_queued);
1182 return;
1183 }
1184
1182 if (!skb_queue_empty(&ptr->skb_head)) 1185 if (!skb_queue_empty(&ptr->skb_head))
1183 skb_next = skb_peek(&ptr->skb_head); 1186 skb_next = skb_peek(&ptr->skb_head);
1184 else 1187 else
@@ -1276,13 +1279,13 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1276 } 1279 }
1277 1280
1278 if (!ptr->is_11n_enabled || 1281 if (!ptr->is_11n_enabled ||
1279 mwifiex_is_ba_stream_setup(priv, ptr, tid) || 1282 ptr->ba_status ||
1280 priv->wps.session_enable) { 1283 priv->wps.session_enable) {
1281 if (ptr->is_11n_enabled && 1284 if (ptr->is_11n_enabled &&
1282 mwifiex_is_ba_stream_setup(priv, ptr, tid) && 1285 ptr->ba_status &&
1283 mwifiex_is_amsdu_in_ampdu_allowed(priv, ptr, tid) && 1286 ptr->amsdu_in_ampdu &&
1284 mwifiex_is_amsdu_allowed(priv, tid) && 1287 mwifiex_is_amsdu_allowed(priv, tid) &&
1285 mwifiex_is_11n_aggragation_possible(priv, ptr, 1288 mwifiex_is_11n_aggragation_possible(priv, ptr,
1286 adapter->tx_buf_size)) 1289 adapter->tx_buf_size))
1287 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags); 1290 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
1288 /* ra_list_spinlock has been freed in 1291 /* ra_list_spinlock has been freed in
@@ -1329,11 +1332,16 @@ void
1329mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter) 1332mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter)
1330{ 1333{
1331 do { 1334 do {
1332 /* Check if busy */
1333 if (adapter->data_sent || adapter->tx_lock_flag)
1334 break;
1335
1336 if (mwifiex_dequeue_tx_packet(adapter)) 1335 if (mwifiex_dequeue_tx_packet(adapter))
1337 break; 1336 break;
1337 if (adapter->iface_type != MWIFIEX_SDIO) {
1338 if (adapter->data_sent ||
1339 adapter->tx_lock_flag)
1340 break;
1341 } else {
1342 if (atomic_read(&adapter->tx_queued) >=
1343 MWIFIEX_MAX_PKTS_TXQ)
1344 break;
1345 }
1338 } while (!mwifiex_wmm_lists_empty(adapter)); 1346 } while (!mwifiex_wmm_lists_empty(adapter));
1339} 1347}
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
index 569bd73f33c5..48ece0b35591 100644
--- a/drivers/net/wireless/mwifiex/wmm.h
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -127,4 +127,6 @@ mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
127 const u8 *ra_addr); 127 const u8 *ra_addr);
128u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid); 128u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid);
129 129
130struct mwifiex_ra_list_tbl *mwifiex_wmm_get_ralist_node(struct mwifiex_private
131 *priv, u8 tid, const u8 *ra_addr);
130#endif /* !_MWIFIEX_WMM_H_ */ 132#endif /* !_MWIFIEX_WMM_H_ */
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 8444313eabe2..6ec2466b52b6 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -233,6 +233,7 @@ static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev)
233{ 233{
234 __le32 *reg; 234 __le32 *reg;
235 u32 fw_mode; 235 u32 fw_mode;
236 int ret;
236 237
237 reg = kmalloc(sizeof(*reg), GFP_KERNEL); 238 reg = kmalloc(sizeof(*reg), GFP_KERNEL);
238 if (reg == NULL) 239 if (reg == NULL)
@@ -242,11 +243,14 @@ static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev)
242 * magic value USB_MODE_AUTORUN (0x11) to the device, thus the 243 * magic value USB_MODE_AUTORUN (0x11) to the device, thus the
243 * returned value would be invalid. 244 * returned value would be invalid.
244 */ 245 */
245 rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE, 246 ret = rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE,
246 USB_VENDOR_REQUEST_IN, 0, USB_MODE_AUTORUN, 247 USB_VENDOR_REQUEST_IN, 0,
247 reg, sizeof(*reg), REGISTER_TIMEOUT_FIRMWARE); 248 USB_MODE_AUTORUN, reg, sizeof(*reg),
249 REGISTER_TIMEOUT_FIRMWARE);
248 fw_mode = le32_to_cpu(*reg); 250 fw_mode = le32_to_cpu(*reg);
249 kfree(reg); 251 kfree(reg);
252 if (ret < 0)
253 return ret;
250 254
251 if ((fw_mode & 0x00000003) == 2) 255 if ((fw_mode & 0x00000003) == 2)
252 return 1; 256 return 1;
@@ -289,6 +293,7 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
289 if (retval) { 293 if (retval) {
290 rt2x00_info(rt2x00dev, 294 rt2x00_info(rt2x00dev,
291 "Firmware loading not required - NIC in AutoRun mode\n"); 295 "Firmware loading not required - NIC in AutoRun mode\n");
296 __clear_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags);
292 } else { 297 } else {
293 rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, 298 rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
294 data + offset, length); 299 data + offset, length);
@@ -374,7 +379,6 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
374static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev) 379static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev)
375{ 380{
376 rt2800_disable_radio(rt2x00dev); 381 rt2800_disable_radio(rt2x00dev);
377 rt2x00usb_disable_radio(rt2x00dev);
378} 382}
379 383
380static int rt2800usb_set_state(struct rt2x00_dev *rt2x00dev, 384static int rt2800usb_set_state(struct rt2x00_dev *rt2x00dev,
@@ -1040,6 +1044,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
1040 { USB_DEVICE(0x07d1, 0x3c17) }, 1044 { USB_DEVICE(0x07d1, 0x3c17) },
1041 { USB_DEVICE(0x2001, 0x3317) }, 1045 { USB_DEVICE(0x2001, 0x3317) },
1042 { USB_DEVICE(0x2001, 0x3c1b) }, 1046 { USB_DEVICE(0x2001, 0x3c1b) },
1047 { USB_DEVICE(0x2001, 0x3c25) },
1043 /* Draytek */ 1048 /* Draytek */
1044 { USB_DEVICE(0x07fa, 0x7712) }, 1049 { USB_DEVICE(0x07fa, 0x7712) },
1045 /* DVICO */ 1050 /* DVICO */
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index 8f85fbd5f237..569363da00a2 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -199,7 +199,7 @@ static inline void rt2x00usb_register_read(struct rt2x00_dev *rt2x00dev,
199 const unsigned int offset, 199 const unsigned int offset,
200 u32 *value) 200 u32 *value)
201{ 201{
202 __le32 reg; 202 __le32 reg = 0;
203 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, 203 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
204 USB_VENDOR_REQUEST_IN, offset, 204 USB_VENDOR_REQUEST_IN, offset,
205 &reg, sizeof(reg)); 205 &reg, sizeof(reg));
@@ -219,7 +219,7 @@ static inline void rt2x00usb_register_read_lock(struct rt2x00_dev *rt2x00dev,
219 const unsigned int offset, 219 const unsigned int offset,
220 u32 *value) 220 u32 *value)
221{ 221{
222 __le32 reg; 222 __le32 reg = 0;
223 rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_READ, 223 rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_READ,
224 USB_VENDOR_REQUEST_IN, offset, 224 USB_VENDOR_REQUEST_IN, offset,
225 &reg, sizeof(reg), REGISTER_TIMEOUT); 225 &reg, sizeof(reg), REGISTER_TIMEOUT);
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index dee4ac2f27e2..ff9a4bfd4515 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -123,7 +123,6 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
123u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx); 123u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
124 124
125void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb); 125void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb);
126void rtl_watch_dog_timer_callback(unsigned long data);
127int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 126int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
128 struct ieee80211_sta *sta, u16 tid, u16 *ssn); 127 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
129int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 128int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index a62170ea0481..8c45cf44ce24 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -1124,12 +1124,22 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
1124 /*This is for new trx flow*/ 1124 /*This is for new trx flow*/
1125 struct rtl_tx_buffer_desc *pbuffer_desc = NULL; 1125 struct rtl_tx_buffer_desc *pbuffer_desc = NULL;
1126 u8 temp_one = 1; 1126 u8 temp_one = 1;
1127 u8 *entry;
1127 1128
1128 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); 1129 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
1129 ring = &rtlpci->tx_ring[BEACON_QUEUE]; 1130 ring = &rtlpci->tx_ring[BEACON_QUEUE];
1130 pskb = __skb_dequeue(&ring->queue); 1131 pskb = __skb_dequeue(&ring->queue);
1131 if (pskb) 1132 if (rtlpriv->use_new_trx_flow)
1133 entry = (u8 *)(&ring->buffer_desc[ring->idx]);
1134 else
1135 entry = (u8 *)(&ring->desc[ring->idx]);
1136 if (pskb) {
1137 pci_unmap_single(rtlpci->pdev,
1138 rtlpriv->cfg->ops->get_desc(
1139 (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
1140 pskb->len, PCI_DMA_TODEVICE);
1132 kfree_skb(pskb); 1141 kfree_skb(pskb);
1142 }
1133 1143
1134 /*NB: the beacon data buffer must be 32-bit aligned. */ 1144 /*NB: the beacon data buffer must be 32-bit aligned. */
1135 pskb = ieee80211_beacon_get(hw, mac->vif); 1145 pskb = ieee80211_beacon_get(hw, mac->vif);
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
index edc2cbb6253c..86ce5b1930e6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
@@ -30,6 +30,7 @@
30#include "../cam.h" 30#include "../cam.h"
31#include "../ps.h" 31#include "../ps.h"
32#include "../pci.h" 32#include "../pci.h"
33#include "../pwrseqcmd.h"
33#include "reg.h" 34#include "reg.h"
34#include "def.h" 35#include "def.h"
35#include "phy.h" 36#include "phy.h"
@@ -885,7 +886,7 @@ static bool _rtl88ee_init_mac(struct ieee80211_hw *hw)
885 886
886 rtl_write_word(rtlpriv, REG_CR, 0x2ff); 887 rtl_write_word(rtlpriv, REG_CR, 0x2ff);
887 rtl_write_byte(rtlpriv, REG_CR+1, 0x06); 888 rtl_write_byte(rtlpriv, REG_CR+1, 0x06);
888 rtl_write_byte(rtlpriv, REG_CR+2, 0x00); 889 rtl_write_byte(rtlpriv, MSR, 0x00);
889 890
890 if (!rtlhal->mac_func_enable) { 891 if (!rtlhal->mac_func_enable) {
891 if (_rtl88ee_llt_table_init(hw) == false) { 892 if (_rtl88ee_llt_table_init(hw) == false) {
@@ -1277,7 +1278,7 @@ static int _rtl88ee_set_media_status(struct ieee80211_hw *hw,
1277 mode); 1278 mode);
1278 } 1279 }
1279 1280
1280 rtl_write_byte(rtlpriv, (MSR), bt_msr | mode); 1281 rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
1281 rtlpriv->cfg->ops->led_control(hw, ledaction); 1282 rtlpriv->cfg->ops->led_control(hw, ledaction);
1282 if (mode == MSR_AP) 1283 if (mode == MSR_AP)
1283 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00); 1284 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 0c20dd74d6ec..d310d55d800e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -1364,7 +1364,7 @@ static int _rtl92cu_set_media_status(struct ieee80211_hw *hw,
1364 "Network type %d not supported!\n", type); 1364 "Network type %d not supported!\n", type);
1365 goto error_out; 1365 goto error_out;
1366 } 1366 }
1367 rtl_write_byte(rtlpriv, (MSR), bt_msr); 1367 rtl_write_byte(rtlpriv, MSR, bt_msr);
1368 rtlpriv->cfg->ops->led_control(hw, ledaction); 1368 rtlpriv->cfg->ops->led_control(hw, ledaction);
1369 if ((bt_msr & MSR_MASK) == MSR_AP) 1369 if ((bt_msr & MSR_MASK) == MSR_AP)
1370 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00); 1370 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
@@ -1471,8 +1471,7 @@ static void _InitBeaconParameters(struct ieee80211_hw *hw)
1471 rtl_write_word(rtlpriv, REG_BCNTCFG, 0x66FF); 1471 rtl_write_word(rtlpriv, REG_BCNTCFG, 0x66FF);
1472} 1472}
1473 1473
1474static void _beacon_function_enable(struct ieee80211_hw *hw, bool Enable, 1474static void _beacon_function_enable(struct ieee80211_hw *hw)
1475 bool Linked)
1476{ 1475{
1477 struct rtl_priv *rtlpriv = rtl_priv(hw); 1476 struct rtl_priv *rtlpriv = rtl_priv(hw);
1478 1477
@@ -1517,7 +1516,7 @@ void rtl92cu_set_beacon_related_registers(struct ieee80211_hw *hw)
1517 rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x50); 1516 rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x50);
1518 rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x50); 1517 rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x50);
1519 } 1518 }
1520 _beacon_function_enable(hw, true, true); 1519 _beacon_function_enable(hw);
1521} 1520}
1522 1521
1523void rtl92cu_set_beacon_interval(struct ieee80211_hw *hw) 1522void rtl92cu_set_beacon_interval(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 133e395b7401..adb810794eef 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -497,7 +497,7 @@ int rtl92c_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
497 "Network type %d not supported!\n", type); 497 "Network type %d not supported!\n", type);
498 return -EOPNOTSUPP; 498 return -EOPNOTSUPP;
499 } 499 }
500 rtl_write_byte(rtlpriv, (REG_CR + 2), value); 500 rtl_write_byte(rtlpriv, MSR, value);
501 return 0; 501 return 0;
502} 502}
503 503
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 90a714c189a8..23806c243a53 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -321,6 +321,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
321 {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ 321 {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
322 {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ 322 {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
323 {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/ 323 {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
324 {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
324 {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/ 325 {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
325 {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ 326 {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
326 {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ 327 {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
@@ -377,6 +378,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
377 {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/ 378 {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
378 {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ 379 {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
379 {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ 380 {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
381 {RTL_USB_DEVICE(0x2001, 0x330d, rtl92cu_hal_cfg)}, /*D-Link DWA-131 */
380 {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/ 382 {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
381 {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/ 383 {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
382 {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/ 384 {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index 01bcc2d218dc..f49b60d31450 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -1126,7 +1126,7 @@ static int _rtl92de_set_media_status(struct ieee80211_hw *hw,
1126 break; 1126 break;
1127 1127
1128 } 1128 }
1129 rtl_write_byte(rtlpriv, REG_CR + 2, bt_msr); 1129 rtl_write_byte(rtlpriv, MSR, bt_msr);
1130 rtlpriv->cfg->ops->led_control(hw, ledaction); 1130 rtlpriv->cfg->ops->led_control(hw, ledaction);
1131 if ((bt_msr & MSR_MASK) == MSR_AP) 1131 if ((bt_msr & MSR_MASK) == MSR_AP)
1132 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00); 1132 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
index db230a3f0137..da0a6125f314 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
@@ -1510,7 +1510,7 @@ static int _rtl92ee_set_media_status(struct ieee80211_hw *hw,
1510 mode); 1510 mode);
1511 } 1511 }
1512 1512
1513 rtl_write_byte(rtlpriv, (MSR), bt_msr | mode); 1513 rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
1514 rtlpriv->cfg->ops->led_control(hw, ledaction); 1514 rtlpriv->cfg->ops->led_control(hw, ledaction);
1515 if (mode == MSR_AP) 1515 if (mode == MSR_AP)
1516 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00); 1516 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index dee88a80bee1..12b0978ba4fa 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -1204,7 +1204,7 @@ static int _rtl92se_set_media_status(struct ieee80211_hw *hw,
1204 if (type != NL80211_IFTYPE_AP && 1204 if (type != NL80211_IFTYPE_AP &&
1205 rtlpriv->mac80211.link_state < MAC80211_LINKED) 1205 rtlpriv->mac80211.link_state < MAC80211_LINKED)
1206 bt_msr = rtl_read_byte(rtlpriv, MSR) & ~MSR_LINK_MASK; 1206 bt_msr = rtl_read_byte(rtlpriv, MSR) & ~MSR_LINK_MASK;
1207 rtl_write_byte(rtlpriv, (MSR), bt_msr); 1207 rtl_write_byte(rtlpriv, MSR, bt_msr);
1208 1208
1209 temp = rtl_read_dword(rtlpriv, TCR); 1209 temp = rtl_read_dword(rtlpriv, TCR);
1210 rtl_write_dword(rtlpriv, TCR, temp & (~BIT(8))); 1210 rtl_write_dword(rtlpriv, TCR, temp & (~BIT(8)));
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
index b3b094759f6d..67bb47d77b68 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
@@ -1183,7 +1183,7 @@ static int _rtl8723e_set_media_status(struct ieee80211_hw *hw,
1183 mode); 1183 mode);
1184 } 1184 }
1185 1185
1186 rtl_write_byte(rtlpriv, (MSR), bt_msr | mode); 1186 rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
1187 rtlpriv->cfg->ops->led_control(hw, ledaction); 1187 rtlpriv->cfg->ops->led_control(hw, ledaction);
1188 if (mode == MSR_AP) 1188 if (mode == MSR_AP)
1189 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00); 1189 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
index b46998341c40..b681af3c7a35 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
@@ -1558,7 +1558,7 @@ static int _rtl8723be_set_media_status(struct ieee80211_hw *hw,
1558 mode); 1558 mode);
1559 } 1559 }
1560 1560
1561 rtl_write_byte(rtlpriv, (MSR), bt_msr | mode); 1561 rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
1562 rtlpriv->cfg->ops->led_control(hw, ledaction); 1562 rtlpriv->cfg->ops->led_control(hw, ledaction);
1563 if (mode == MSR_AP) 1563 if (mode == MSR_AP)
1564 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00); 1564 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
index 2a0a71bac00c..8704eee9f3a4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
@@ -423,7 +423,7 @@ void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
423 *((u16 *)(val+4)) = rtl_read_word(rtlpriv, REG_BSSID+4); 423 *((u16 *)(val+4)) = rtl_read_word(rtlpriv, REG_BSSID+4);
424 break; 424 break;
425 case HW_VAR_MEDIA_STATUS: 425 case HW_VAR_MEDIA_STATUS:
426 val[0] = rtl_read_byte(rtlpriv, REG_CR+2) & 0x3; 426 val[0] = rtl_read_byte(rtlpriv, MSR) & 0x3;
427 break; 427 break;
428 case HW_VAR_SLOT_TIME: 428 case HW_VAR_SLOT_TIME:
429 *((u8 *)(val)) = mac->slot_time; 429 *((u8 *)(val)) = mac->slot_time;
@@ -2178,7 +2178,7 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw,
2178 return 1; 2178 return 1;
2179 } 2179 }
2180 2180
2181 rtl_write_byte(rtlpriv, (MSR), bt_msr); 2181 rtl_write_byte(rtlpriv, MSR, bt_msr);
2182 rtlpriv->cfg->ops->led_control(hw, ledaction); 2182 rtlpriv->cfg->ops->led_control(hw, ledaction);
2183 if ((bt_msr & 0xfc) == MSR_AP) 2183 if ((bt_msr & 0xfc) == MSR_AP)
2184 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00); 2184 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c
index 72af4b9ee32b..174743aef943 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c
@@ -64,6 +64,20 @@ static u16 odm_cfo(char value)
64 return ret_val; 64 return ret_val;
65} 65}
66 66
67static u8 _rtl8821ae_evm_dbm_jaguar(char value)
68{
69 char ret_val = value;
70
71 /* -33dB~0dB to 33dB ~ 0dB*/
72 if (ret_val == -128)
73 ret_val = 127;
74 else if (ret_val < 0)
75 ret_val = 0 - ret_val;
76
77 ret_val = ret_val >> 1;
78 return ret_val;
79}
80
67static void query_rxphystatus(struct ieee80211_hw *hw, 81static void query_rxphystatus(struct ieee80211_hw *hw,
68 struct rtl_stats *pstatus, u8 *pdesc, 82 struct rtl_stats *pstatus, u8 *pdesc,
69 struct rx_fwinfo_8821ae *p_drvinfo, 83 struct rx_fwinfo_8821ae *p_drvinfo,
@@ -246,7 +260,7 @@ static void query_rxphystatus(struct ieee80211_hw *hw,
246 260
247 for (i = 0; i < max_spatial_stream; i++) { 261 for (i = 0; i < max_spatial_stream; i++) {
248 evm = rtl_evm_db_to_percentage(p_phystrpt->rxevm[i]); 262 evm = rtl_evm_db_to_percentage(p_phystrpt->rxevm[i]);
249 evmdbm = rtl_evm_dbm_jaguar(p_phystrpt->rxevm[i]); 263 evmdbm = _rtl8821ae_evm_dbm_jaguar(p_phystrpt->rxevm[i]);
250 264
251 if (bpacket_match_bssid) { 265 if (bpacket_match_bssid) {
252 /* Fill value in RFD, Get the first 266 /* Fill value in RFD, Get the first
diff --git a/drivers/net/wireless/rtlwifi/stats.c b/drivers/net/wireless/rtlwifi/stats.c
index 2d0736a09fc0..d8b30690b00d 100644
--- a/drivers/net/wireless/rtlwifi/stats.c
+++ b/drivers/net/wireless/rtlwifi/stats.c
@@ -39,15 +39,8 @@ EXPORT_SYMBOL(rtl_query_rxpwrpercentage);
39 39
40u8 rtl_evm_db_to_percentage(char value) 40u8 rtl_evm_db_to_percentage(char value)
41{ 41{
42 char ret_val; 42 char ret_val = clamp(-value, 0, 33) * 3;
43 ret_val = value;
44 43
45 if (ret_val >= 0)
46 ret_val = 0;
47 if (ret_val <= -33)
48 ret_val = -33;
49 ret_val = 0 - ret_val;
50 ret_val *= 3;
51 if (ret_val == 99) 44 if (ret_val == 99)
52 ret_val = 100; 45 ret_val = 100;
53 46
@@ -55,21 +48,6 @@ u8 rtl_evm_db_to_percentage(char value)
55} 48}
56EXPORT_SYMBOL(rtl_evm_db_to_percentage); 49EXPORT_SYMBOL(rtl_evm_db_to_percentage);
57 50
58u8 rtl_evm_dbm_jaguar(char value)
59{
60 char ret_val = value;
61
62 /* -33dB~0dB to 33dB ~ 0dB*/
63 if (ret_val == -128)
64 ret_val = 127;
65 else if (ret_val < 0)
66 ret_val = 0 - ret_val;
67
68 ret_val = ret_val >> 1;
69 return ret_val;
70}
71EXPORT_SYMBOL(rtl_evm_dbm_jaguar);
72
73static long rtl_translate_todbm(struct ieee80211_hw *hw, 51static long rtl_translate_todbm(struct ieee80211_hw *hw,
74 u8 signal_strength_index) 52 u8 signal_strength_index)
75{ 53{
diff --git a/drivers/net/wireless/rtlwifi/stats.h b/drivers/net/wireless/rtlwifi/stats.h
index aa4eec80ccf7..2b57dffef572 100644
--- a/drivers/net/wireless/rtlwifi/stats.h
+++ b/drivers/net/wireless/rtlwifi/stats.h
@@ -35,7 +35,6 @@
35 35
36u8 rtl_query_rxpwrpercentage(char antpower); 36u8 rtl_query_rxpwrpercentage(char antpower);
37u8 rtl_evm_db_to_percentage(char value); 37u8 rtl_evm_db_to_percentage(char value);
38u8 rtl_evm_dbm_jaguar(char value);
39long rtl_signal_scale_mapping(struct ieee80211_hw *hw, long currsig); 38long rtl_signal_scale_mapping(struct ieee80211_hw *hw, long currsig);
40void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer, 39void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer,
41 struct rtl_stats *pstatus); 40 struct rtl_stats *pstatus);
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
index c93fae95baac..5fbd2230f372 100644
--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
@@ -139,7 +139,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, protection_filter, "%u");
139WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u"); 139WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u");
140WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u"); 140WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u");
141 141
142WL18XX_DEBUGFS_FWSTATS_FILE(rx_rate, rx_frames_per_rates, "%u"); 142WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50);
143 143
144WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate, 144WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
145 AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE); 145 AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
index 0f2cfb0d2a9e..bf14676e6515 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.h
+++ b/drivers/net/wireless/ti/wlcore/debugfs.h
@@ -26,8 +26,8 @@
26 26
27#include "wlcore.h" 27#include "wlcore.h"
28 28
29int wl1271_format_buffer(char __user *userbuf, size_t count, 29__printf(4, 5) int wl1271_format_buffer(char __user *userbuf, size_t count,
30 loff_t *ppos, char *fmt, ...); 30 loff_t *ppos, char *fmt, ...);
31 31
32int wl1271_debugfs_init(struct wl1271 *wl); 32int wl1271_debugfs_init(struct wl1271 *wl);
33void wl1271_debugfs_exit(struct wl1271 *wl); 33void wl1271_debugfs_exit(struct wl1271 *wl);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e9b960f0ff32..720aaf6313d2 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1008,8 +1008,7 @@ err:
1008 1008
1009static int xennet_change_mtu(struct net_device *dev, int mtu) 1009static int xennet_change_mtu(struct net_device *dev, int mtu)
1010{ 1010{
1011 int max = xennet_can_sg(dev) ? 1011 int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
1012 XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
1013 1012
1014 if (mtu > max) 1013 if (mtu > max)
1015 return -EINVAL; 1014 return -EINVAL;
@@ -1279,8 +1278,6 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1279 netdev->ethtool_ops = &xennet_ethtool_ops; 1278 netdev->ethtool_ops = &xennet_ethtool_ops;
1280 SET_NETDEV_DEV(netdev, &dev->dev); 1279 SET_NETDEV_DEV(netdev, &dev->dev);
1281 1280
1282 netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
1283
1284 np->netdev = netdev; 1281 np->netdev = netdev;
1285 1282
1286 netif_carrier_off(netdev); 1283 netif_carrier_off(netdev);
diff --git a/drivers/of/address.c b/drivers/of/address.c
index ad2906919d45..78a7dcbec7d8 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -450,12 +450,17 @@ static struct of_bus *of_match_bus(struct device_node *np)
450 return NULL; 450 return NULL;
451} 451}
452 452
453static int of_empty_ranges_quirk(void) 453static int of_empty_ranges_quirk(struct device_node *np)
454{ 454{
455 if (IS_ENABLED(CONFIG_PPC)) { 455 if (IS_ENABLED(CONFIG_PPC)) {
456 /* To save cycles, we cache the result */ 456 /* To save cycles, we cache the result for global "Mac" setting */
457 static int quirk_state = -1; 457 static int quirk_state = -1;
458 458
459 /* PA-SEMI sdc DT bug */
460 if (of_device_is_compatible(np, "1682m-sdc"))
461 return true;
462
463 /* Make quirk cached */
459 if (quirk_state < 0) 464 if (quirk_state < 0)
460 quirk_state = 465 quirk_state =
461 of_machine_is_compatible("Power Macintosh") || 466 of_machine_is_compatible("Power Macintosh") ||
@@ -490,7 +495,7 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
490 * This code is only enabled on powerpc. --gcl 495 * This code is only enabled on powerpc. --gcl
491 */ 496 */
492 ranges = of_get_property(parent, rprop, &rlen); 497 ranges = of_get_property(parent, rprop, &rlen);
493 if (ranges == NULL && !of_empty_ranges_quirk()) { 498 if (ranges == NULL && !of_empty_ranges_quirk(parent)) {
494 pr_debug("OF: no ranges; cannot translate\n"); 499 pr_debug("OF: no ranges; cannot translate\n");
495 return 1; 500 return 1;
496 } 501 }
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 9205f433573c..18198316b6cf 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -1572,6 +1572,10 @@ static int palmas_regulators_probe(struct platform_device *pdev)
1572 if (!pmic) 1572 if (!pmic)
1573 return -ENOMEM; 1573 return -ENOMEM;
1574 1574
1575 if (of_device_is_compatible(node, "ti,tps659038-pmic"))
1576 palmas_generic_regs_info[PALMAS_REG_REGEN2].ctrl_addr =
1577 TPS659038_REGEN2_CTRL;
1578
1575 pmic->dev = &pdev->dev; 1579 pmic->dev = &pdev->dev;
1576 pmic->palmas = palmas; 1580 pmic->palmas = palmas;
1577 palmas->pmic = pmic; 1581 palmas->pmic = pmic;
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index e2436d140175..3a6fd3a8a2ec 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -413,8 +413,8 @@ static void rtc_mrst_do_remove(struct device *dev)
413 mrst->dev = NULL; 413 mrst->dev = NULL;
414} 414}
415 415
416#ifdef CONFIG_PM 416#ifdef CONFIG_PM_SLEEP
417static int mrst_suspend(struct device *dev, pm_message_t mesg) 417static int mrst_suspend(struct device *dev)
418{ 418{
419 struct mrst_rtc *mrst = dev_get_drvdata(dev); 419 struct mrst_rtc *mrst = dev_get_drvdata(dev);
420 unsigned char tmp; 420 unsigned char tmp;
@@ -453,7 +453,7 @@ static int mrst_suspend(struct device *dev, pm_message_t mesg)
453 */ 453 */
454static inline int mrst_poweroff(struct device *dev) 454static inline int mrst_poweroff(struct device *dev)
455{ 455{
456 return mrst_suspend(dev, PMSG_HIBERNATE); 456 return mrst_suspend(dev);
457} 457}
458 458
459static int mrst_resume(struct device *dev) 459static int mrst_resume(struct device *dev)
@@ -490,9 +490,11 @@ static int mrst_resume(struct device *dev)
490 return 0; 490 return 0;
491} 491}
492 492
493static SIMPLE_DEV_PM_OPS(mrst_pm_ops, mrst_suspend, mrst_resume);
494#define MRST_PM_OPS (&mrst_pm_ops)
495
493#else 496#else
494#define mrst_suspend NULL 497#define MRST_PM_OPS NULL
495#define mrst_resume NULL
496 498
497static inline int mrst_poweroff(struct device *dev) 499static inline int mrst_poweroff(struct device *dev)
498{ 500{
@@ -529,9 +531,8 @@ static struct platform_driver vrtc_mrst_platform_driver = {
529 .remove = vrtc_mrst_platform_remove, 531 .remove = vrtc_mrst_platform_remove,
530 .shutdown = vrtc_mrst_platform_shutdown, 532 .shutdown = vrtc_mrst_platform_shutdown,
531 .driver = { 533 .driver = {
532 .name = (char *) driver_name, 534 .name = driver_name,
533 .suspend = mrst_suspend, 535 .pm = MRST_PM_OPS,
534 .resume = mrst_resume,
535 } 536 }
536}; 537};
537 538
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 9219953ee949..d9afc51af7d3 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6815,7 +6815,8 @@ static struct ata_port_operations ipr_sata_ops = {
6815}; 6815};
6816 6816
6817static struct ata_port_info sata_port_info = { 6817static struct ata_port_info sata_port_info = {
6818 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, 6818 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
6819 ATA_FLAG_SAS_HOST,
6819 .pio_mask = ATA_PIO4_ONLY, 6820 .pio_mask = ATA_PIO4_ONLY,
6820 .mwdma_mask = ATA_MWDMA2, 6821 .mwdma_mask = ATA_MWDMA2,
6821 .udma_mask = ATA_UDMA6, 6822 .udma_mask = ATA_UDMA6,
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 932d9cc98d2f..9c706d8c1441 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -547,7 +547,8 @@ static struct ata_port_operations sas_sata_ops = {
547}; 547};
548 548
549static struct ata_port_info sata_port_info = { 549static struct ata_port_info sata_port_info = {
550 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ, 550 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ |
551 ATA_FLAG_SAS_HOST,
551 .pio_mask = ATA_PIO4, 552 .pio_mask = ATA_PIO4,
552 .mwdma_mask = ATA_MWDMA2, 553 .mwdma_mask = ATA_MWDMA2,
553 .udma_mask = ATA_UDMA6, 554 .udma_mask = ATA_UDMA6,
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 3ce39d10fafb..4f8c798e0633 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -108,7 +108,8 @@ static void dw_spi_dma_tx_done(void *arg)
108{ 108{
109 struct dw_spi *dws = arg; 109 struct dw_spi *dws = arg;
110 110
111 if (test_and_clear_bit(TX_BUSY, &dws->dma_chan_busy) & BIT(RX_BUSY)) 111 clear_bit(TX_BUSY, &dws->dma_chan_busy);
112 if (test_bit(RX_BUSY, &dws->dma_chan_busy))
112 return; 113 return;
113 dw_spi_xfer_done(dws); 114 dw_spi_xfer_done(dws);
114} 115}
@@ -156,7 +157,8 @@ static void dw_spi_dma_rx_done(void *arg)
156{ 157{
157 struct dw_spi *dws = arg; 158 struct dw_spi *dws = arg;
158 159
159 if (test_and_clear_bit(RX_BUSY, &dws->dma_chan_busy) & BIT(TX_BUSY)) 160 clear_bit(RX_BUSY, &dws->dma_chan_busy);
161 if (test_bit(TX_BUSY, &dws->dma_chan_busy))
160 return; 162 return;
161 dw_spi_xfer_done(dws); 163 dw_spi_xfer_done(dws);
162} 164}
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index ff9cdbdb6672..2b2c359f5a50 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -498,7 +498,7 @@ static int spi_qup_probe(struct platform_device *pdev)
498 struct resource *res; 498 struct resource *res;
499 struct device *dev; 499 struct device *dev;
500 void __iomem *base; 500 void __iomem *base;
501 u32 max_freq, iomode; 501 u32 max_freq, iomode, num_cs;
502 int ret, irq, size; 502 int ret, irq, size;
503 503
504 dev = &pdev->dev; 504 dev = &pdev->dev;
@@ -550,10 +550,11 @@ static int spi_qup_probe(struct platform_device *pdev)
550 } 550 }
551 551
552 /* use num-cs unless not present or out of range */ 552 /* use num-cs unless not present or out of range */
553 if (of_property_read_u16(dev->of_node, "num-cs", 553 if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
554 &master->num_chipselect) || 554 num_cs > SPI_NUM_CHIPSELECTS)
555 (master->num_chipselect > SPI_NUM_CHIPSELECTS))
556 master->num_chipselect = SPI_NUM_CHIPSELECTS; 555 master->num_chipselect = SPI_NUM_CHIPSELECTS;
556 else
557 master->num_chipselect = num_cs;
557 558
558 master->bus_num = pdev->id; 559 master->bus_num = pdev->id;
559 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; 560 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index c64a3e59fce3..57a195041dc7 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1105,13 +1105,14 @@ void spi_finalize_current_message(struct spi_master *master)
1105 "failed to unprepare message: %d\n", ret); 1105 "failed to unprepare message: %d\n", ret);
1106 } 1106 }
1107 } 1107 }
1108
1109 trace_spi_message_done(mesg);
1110
1108 master->cur_msg_prepared = false; 1111 master->cur_msg_prepared = false;
1109 1112
1110 mesg->state = NULL; 1113 mesg->state = NULL;
1111 if (mesg->complete) 1114 if (mesg->complete)
1112 mesg->complete(mesg->context); 1115 mesg->complete(mesg->context);
1113
1114 trace_spi_message_done(mesg);
1115} 1116}
1116EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1117EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1117 1118
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index 24183028bd71..6d5b38d69578 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -38,6 +38,7 @@ config IIO_SIMPLE_DUMMY_EVENTS
38config IIO_SIMPLE_DUMMY_BUFFER 38config IIO_SIMPLE_DUMMY_BUFFER
39 bool "Buffered capture support" 39 bool "Buffered capture support"
40 select IIO_BUFFER 40 select IIO_BUFFER
41 select IIO_TRIGGER
41 select IIO_KFIFO_BUF 42 select IIO_KFIFO_BUF
42 help 43 help
43 Add buffered data capture to the simple dummy driver. 44 Add buffered data capture to the simple dummy driver.
diff --git a/drivers/staging/iio/magnetometer/hmc5843_core.c b/drivers/staging/iio/magnetometer/hmc5843_core.c
index fd171d8b38fb..90cc18b703cf 100644
--- a/drivers/staging/iio/magnetometer/hmc5843_core.c
+++ b/drivers/staging/iio/magnetometer/hmc5843_core.c
@@ -592,6 +592,7 @@ int hmc5843_common_probe(struct device *dev, struct regmap *regmap,
592 mutex_init(&data->lock); 592 mutex_init(&data->lock);
593 593
594 indio_dev->dev.parent = dev; 594 indio_dev->dev.parent = dev;
595 indio_dev->name = dev->driver->name;
595 indio_dev->info = &hmc5843_info; 596 indio_dev->info = &hmc5843_info;
596 indio_dev->modes = INDIO_DIRECT_MODE; 597 indio_dev->modes = INDIO_DIRECT_MODE;
597 indio_dev->channels = data->variant->channels; 598 indio_dev->channels = data->variant->channels;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index b1893f3f88f1..3ad1458bfeb0 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -921,6 +921,9 @@ static void lpuart_setup_watermark(struct lpuart_port *sport)
921 writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE, 921 writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE,
922 sport->port.membase + UARTPFIFO); 922 sport->port.membase + UARTPFIFO);
923 923
924 /* explicitly clear RDRF */
925 readb(sport->port.membase + UARTSR1);
926
924 /* flush Tx and Rx FIFO */ 927 /* flush Tx and Rx FIFO */
925 writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH, 928 writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
926 sport->port.membase + UARTCFIFO); 929 sport->port.membase + UARTCFIFO);
@@ -1076,6 +1079,8 @@ static int lpuart_startup(struct uart_port *port)
1076 sport->txfifo_size = 0x1 << (((temp >> UARTPFIFO_TXSIZE_OFF) & 1079 sport->txfifo_size = 0x1 << (((temp >> UARTPFIFO_TXSIZE_OFF) &
1077 UARTPFIFO_FIFOSIZE_MASK) + 1); 1080 UARTPFIFO_FIFOSIZE_MASK) + 1);
1078 1081
1082 sport->port.fifosize = sport->txfifo_size;
1083
1079 sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) & 1084 sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) &
1080 UARTPFIFO_FIFOSIZE_MASK) + 1); 1085 UARTPFIFO_FIFOSIZE_MASK) + 1);
1081 1086
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index af821a908720..cf08876922f1 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -963,6 +963,7 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
963 free_irq(ourport->tx_irq, ourport); 963 free_irq(ourport->tx_irq, ourport);
964 tx_enabled(port) = 0; 964 tx_enabled(port) = 0;
965 ourport->tx_claimed = 0; 965 ourport->tx_claimed = 0;
966 ourport->tx_mode = 0;
966 } 967 }
967 968
968 if (ourport->rx_claimed) { 969 if (ourport->rx_claimed) {
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index a7865c4b0498..0827d7c96527 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -387,6 +387,10 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
387 status = PORT_PLC; 387 status = PORT_PLC;
388 port_change_bit = "link state"; 388 port_change_bit = "link state";
389 break; 389 break;
390 case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
391 status = PORT_CEC;
392 port_change_bit = "config error";
393 break;
390 default: 394 default:
391 /* Should never happen */ 395 /* Should never happen */
392 return; 396 return;
@@ -588,6 +592,8 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
588 status |= USB_PORT_STAT_C_LINK_STATE << 16; 592 status |= USB_PORT_STAT_C_LINK_STATE << 16;
589 if ((raw_port_status & PORT_WRC)) 593 if ((raw_port_status & PORT_WRC))
590 status |= USB_PORT_STAT_C_BH_RESET << 16; 594 status |= USB_PORT_STAT_C_BH_RESET << 16;
595 if ((raw_port_status & PORT_CEC))
596 status |= USB_PORT_STAT_C_CONFIG_ERROR << 16;
591 } 597 }
592 598
593 if (hcd->speed != HCD_USB3) { 599 if (hcd->speed != HCD_USB3) {
@@ -1005,6 +1011,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1005 case USB_PORT_FEAT_C_OVER_CURRENT: 1011 case USB_PORT_FEAT_C_OVER_CURRENT:
1006 case USB_PORT_FEAT_C_ENABLE: 1012 case USB_PORT_FEAT_C_ENABLE:
1007 case USB_PORT_FEAT_C_PORT_LINK_STATE: 1013 case USB_PORT_FEAT_C_PORT_LINK_STATE:
1014 case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
1008 xhci_clear_port_change_bit(xhci, wValue, wIndex, 1015 xhci_clear_port_change_bit(xhci, wValue, wIndex,
1009 port_array[wIndex], temp); 1016 port_array[wIndex], temp);
1010 break; 1017 break;
@@ -1069,7 +1076,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
1069 */ 1076 */
1070 status = bus_state->resuming_ports; 1077 status = bus_state->resuming_ports;
1071 1078
1072 mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC; 1079 mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
1073 1080
1074 spin_lock_irqsave(&xhci->lock, flags); 1081 spin_lock_irqsave(&xhci->lock, flags);
1075 /* For each port, did anything change? If so, set that bit in buf. */ 1082 /* For each port, did anything change? If so, set that bit in buf. */
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index fd53c9ebd662..2af32e26fafc 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -115,6 +115,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
115 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 115 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
116 xhci->quirks |= XHCI_LPM_SUPPORT; 116 xhci->quirks |= XHCI_LPM_SUPPORT;
117 xhci->quirks |= XHCI_INTEL_HOST; 117 xhci->quirks |= XHCI_INTEL_HOST;
118 xhci->quirks |= XHCI_AVOID_BEI;
118 } 119 }
119 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 120 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
120 pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) { 121 pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
@@ -130,7 +131,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
130 * PPT chipsets. 131 * PPT chipsets.
131 */ 132 */
132 xhci->quirks |= XHCI_SPURIOUS_REBOOT; 133 xhci->quirks |= XHCI_SPURIOUS_REBOOT;
133 xhci->quirks |= XHCI_AVOID_BEI;
134 } 134 }
135 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 135 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
136 pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) { 136 pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
diff --git a/drivers/usb/isp1760/isp1760-udc.c b/drivers/usb/isp1760/isp1760-udc.c
index f32c292cc868..3fc4fe770253 100644
--- a/drivers/usb/isp1760/isp1760-udc.c
+++ b/drivers/usb/isp1760/isp1760-udc.c
@@ -1203,7 +1203,7 @@ static int isp1760_udc_start(struct usb_gadget *gadget,
1203 1203
1204 if (udc->driver) { 1204 if (udc->driver) {
1205 dev_err(udc->isp->dev, "UDC already has a gadget driver\n"); 1205 dev_err(udc->isp->dev, "UDC already has a gadget driver\n");
1206 spin_unlock(&udc->lock); 1206 spin_unlock_irqrestore(&udc->lock, flags);
1207 return -EBUSY; 1207 return -EBUSY;
1208 } 1208 }
1209 1209
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 3086dec0ef53..8eb68a31cab6 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -604,6 +604,7 @@ static const struct usb_device_id id_table_combined[] = {
604 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 604 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
605 { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID), 605 { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
606 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 606 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
607 { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
607 /* 608 /*
608 * ELV devices: 609 * ELV devices:
609 */ 610 */
@@ -1883,8 +1884,12 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
1883{ 1884{
1884 struct usb_device *udev = serial->dev; 1885 struct usb_device *udev = serial->dev;
1885 1886
1886 if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) || 1887 if (udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems"))
1887 (udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2"))) 1888 return ftdi_jtag_probe(serial);
1889
1890 if (udev->product &&
1891 (!strcmp(udev->product, "BeagleBone/XDS100V2") ||
1892 !strcmp(udev->product, "SNAP Connect E10")))
1888 return ftdi_jtag_probe(serial); 1893 return ftdi_jtag_probe(serial);
1889 1894
1890 return 0; 1895 return 0;
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 56b1b55c4751..4e4f46f3c89c 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -561,6 +561,12 @@
561 */ 561 */
562#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */ 562#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
563 563
564/*
565 * Synapse Wireless product ids (FTDI_VID)
566 * http://www.synapse-wireless.com
567 */
568#define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */
569
564 570
565/********************************/ 571/********************************/
566/** third-party VID/PID combos **/ 572/** third-party VID/PID combos **/
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index dd97d8b572c3..4f7e072e4e00 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -61,6 +61,7 @@ struct keyspan_pda_private {
61/* For Xircom PGSDB9 and older Entrega version of the same device */ 61/* For Xircom PGSDB9 and older Entrega version of the same device */
62#define XIRCOM_VENDOR_ID 0x085a 62#define XIRCOM_VENDOR_ID 0x085a
63#define XIRCOM_FAKE_ID 0x8027 63#define XIRCOM_FAKE_ID 0x8027
64#define XIRCOM_FAKE_ID_2 0x8025 /* "PGMFHUB" serial */
64#define ENTREGA_VENDOR_ID 0x1645 65#define ENTREGA_VENDOR_ID 0x1645
65#define ENTREGA_FAKE_ID 0x8093 66#define ENTREGA_FAKE_ID 0x8093
66 67
@@ -70,6 +71,7 @@ static const struct usb_device_id id_table_combined[] = {
70#endif 71#endif
71#ifdef XIRCOM 72#ifdef XIRCOM
72 { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) }, 73 { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
74 { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) },
73 { USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) }, 75 { USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) },
74#endif 76#endif
75 { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) }, 77 { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) },
@@ -93,6 +95,7 @@ static const struct usb_device_id id_table_fake[] = {
93#ifdef XIRCOM 95#ifdef XIRCOM
94static const struct usb_device_id id_table_fake_xircom[] = { 96static const struct usb_device_id id_table_fake_xircom[] = {
95 { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) }, 97 { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
98 { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) },
96 { USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) }, 99 { USB_DEVICE(ENTREGA_VENDOR_ID, ENTREGA_FAKE_ID) },
97 { } 100 { }
98}; 101};
diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c
index c8def68d9e4c..0deaa4f971f5 100644
--- a/drivers/watchdog/imgpdc_wdt.c
+++ b/drivers/watchdog/imgpdc_wdt.c
@@ -42,10 +42,10 @@
42#define PDC_WDT_MIN_TIMEOUT 1 42#define PDC_WDT_MIN_TIMEOUT 1
43#define PDC_WDT_DEF_TIMEOUT 64 43#define PDC_WDT_DEF_TIMEOUT 64
44 44
45static int heartbeat; 45static int heartbeat = PDC_WDT_DEF_TIMEOUT;
46module_param(heartbeat, int, 0); 46module_param(heartbeat, int, 0);
47MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. " 47MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds "
48 "(default = " __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")"); 48 "(default=" __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")");
49 49
50static bool nowayout = WATCHDOG_NOWAYOUT; 50static bool nowayout = WATCHDOG_NOWAYOUT;
51module_param(nowayout, bool, 0); 51module_param(nowayout, bool, 0);
@@ -191,6 +191,7 @@ static int pdc_wdt_probe(struct platform_device *pdev)
191 pdc_wdt->wdt_dev.ops = &pdc_wdt_ops; 191 pdc_wdt->wdt_dev.ops = &pdc_wdt_ops;
192 pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK; 192 pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK;
193 pdc_wdt->wdt_dev.parent = &pdev->dev; 193 pdc_wdt->wdt_dev.parent = &pdev->dev;
194 watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
194 195
195 ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev); 196 ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev);
196 if (ret < 0) { 197 if (ret < 0) {
@@ -232,7 +233,6 @@ static int pdc_wdt_probe(struct platform_device *pdev)
232 watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout); 233 watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout);
233 234
234 platform_set_drvdata(pdev, pdc_wdt); 235 platform_set_drvdata(pdev, pdc_wdt);
235 watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt);
236 236
237 ret = watchdog_register_device(&pdc_wdt->wdt_dev); 237 ret = watchdog_register_device(&pdc_wdt->wdt_dev);
238 if (ret) 238 if (ret)
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index a87f6df6e85f..938b987de551 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -133,7 +133,7 @@ static int mtk_wdt_start(struct watchdog_device *wdt_dev)
133 u32 reg; 133 u32 reg;
134 struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev); 134 struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
135 void __iomem *wdt_base = mtk_wdt->wdt_base; 135 void __iomem *wdt_base = mtk_wdt->wdt_base;
136 u32 ret; 136 int ret;
137 137
138 ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout); 138 ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
139 if (ret < 0) 139 if (ret < 0)
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index b812462083fc..94d96809e686 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -55,6 +55,23 @@ config XEN_BALLOON_MEMORY_HOTPLUG
55 55
56 In that case step 3 should be omitted. 56 In that case step 3 should be omitted.
57 57
58config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
59 int "Hotplugged memory limit (in GiB) for a PV guest"
60 default 512 if X86_64
61 default 4 if X86_32
62 range 0 64 if X86_32
63 depends on XEN_HAVE_PVMMU
64 depends on XEN_BALLOON_MEMORY_HOTPLUG
65 help
66 Maxmium amount of memory (in GiB) that a PV guest can be
67 expanded to when using memory hotplug.
68
69 A PV guest can have more memory than this limit if is
70 started with a larger maximum.
71
72 This value is used to allocate enough space in internal
73 tables needed for physical memory administration.
74
58config XEN_SCRUB_PAGES 75config XEN_SCRUB_PAGES
59 bool "Scrub pages before returning them to system" 76 bool "Scrub pages before returning them to system"
60 depends on XEN_BALLOON 77 depends on XEN_BALLOON
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 0b52d92cb2e5..fd933695f232 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -229,6 +229,29 @@ static enum bp_state reserve_additional_memory(long credit)
229 balloon_hotplug = round_up(balloon_hotplug, PAGES_PER_SECTION); 229 balloon_hotplug = round_up(balloon_hotplug, PAGES_PER_SECTION);
230 nid = memory_add_physaddr_to_nid(hotplug_start_paddr); 230 nid = memory_add_physaddr_to_nid(hotplug_start_paddr);
231 231
232#ifdef CONFIG_XEN_HAVE_PVMMU
233 /*
234 * add_memory() will build page tables for the new memory so
235 * the p2m must contain invalid entries so the correct
236 * non-present PTEs will be written.
237 *
238 * If a failure occurs, the original (identity) p2m entries
239 * are not restored since this region is now known not to
240 * conflict with any devices.
241 */
242 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
243 unsigned long pfn, i;
244
245 pfn = PFN_DOWN(hotplug_start_paddr);
246 for (i = 0; i < balloon_hotplug; i++) {
247 if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
248 pr_warn("set_phys_to_machine() failed, no memory added\n");
249 return BP_ECANCELED;
250 }
251 }
252 }
253#endif
254
232 rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT); 255 rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT);
233 256
234 if (rc) { 257 if (rc) {
diff --git a/fs/affs/file.c b/fs/affs/file.c
index d2468bf95669..a91795e01a7f 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -699,8 +699,10 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
699 boff = tmp % bsize; 699 boff = tmp % bsize;
700 if (boff) { 700 if (boff) {
701 bh = affs_bread_ino(inode, bidx, 0); 701 bh = affs_bread_ino(inode, bidx, 0);
702 if (IS_ERR(bh)) 702 if (IS_ERR(bh)) {
703 return PTR_ERR(bh); 703 written = PTR_ERR(bh);
704 goto err_first_bh;
705 }
704 tmp = min(bsize - boff, to - from); 706 tmp = min(bsize - boff, to - from);
705 BUG_ON(boff + tmp > bsize || tmp > bsize); 707 BUG_ON(boff + tmp > bsize || tmp > bsize);
706 memcpy(AFFS_DATA(bh) + boff, data + from, tmp); 708 memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
@@ -712,14 +714,16 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
712 bidx++; 714 bidx++;
713 } else if (bidx) { 715 } else if (bidx) {
714 bh = affs_bread_ino(inode, bidx - 1, 0); 716 bh = affs_bread_ino(inode, bidx - 1, 0);
715 if (IS_ERR(bh)) 717 if (IS_ERR(bh)) {
716 return PTR_ERR(bh); 718 written = PTR_ERR(bh);
719 goto err_first_bh;
720 }
717 } 721 }
718 while (from + bsize <= to) { 722 while (from + bsize <= to) {
719 prev_bh = bh; 723 prev_bh = bh;
720 bh = affs_getemptyblk_ino(inode, bidx); 724 bh = affs_getemptyblk_ino(inode, bidx);
721 if (IS_ERR(bh)) 725 if (IS_ERR(bh))
722 goto out; 726 goto err_bh;
723 memcpy(AFFS_DATA(bh), data + from, bsize); 727 memcpy(AFFS_DATA(bh), data + from, bsize);
724 if (buffer_new(bh)) { 728 if (buffer_new(bh)) {
725 AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); 729 AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
@@ -751,7 +755,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
751 prev_bh = bh; 755 prev_bh = bh;
752 bh = affs_bread_ino(inode, bidx, 1); 756 bh = affs_bread_ino(inode, bidx, 1);
753 if (IS_ERR(bh)) 757 if (IS_ERR(bh))
754 goto out; 758 goto err_bh;
755 tmp = min(bsize, to - from); 759 tmp = min(bsize, to - from);
756 BUG_ON(tmp > bsize); 760 BUG_ON(tmp > bsize);
757 memcpy(AFFS_DATA(bh), data + from, tmp); 761 memcpy(AFFS_DATA(bh), data + from, tmp);
@@ -790,12 +794,13 @@ done:
790 if (tmp > inode->i_size) 794 if (tmp > inode->i_size)
791 inode->i_size = AFFS_I(inode)->mmu_private = tmp; 795 inode->i_size = AFFS_I(inode)->mmu_private = tmp;
792 796
797err_first_bh:
793 unlock_page(page); 798 unlock_page(page);
794 page_cache_release(page); 799 page_cache_release(page);
795 800
796 return written; 801 return written;
797 802
798out: 803err_bh:
799 bh = prev_bh; 804 bh = prev_bh;
800 if (!written) 805 if (!written)
801 written = PTR_ERR(bh); 806 written = PTR_ERR(bh);
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 4ac7445e6ec7..aa0dc2573374 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -1,6 +1,9 @@
1/* 1/*
2 * fs/cifs/cifsencrypt.c 2 * fs/cifs/cifsencrypt.c
3 * 3 *
4 * Encryption and hashing operations relating to NTLM, NTLMv2. See MS-NLMP
5 * for more detailed information
6 *
4 * Copyright (C) International Business Machines Corp., 2005,2013 7 * Copyright (C) International Business Machines Corp., 2005,2013
5 * Author(s): Steve French (sfrench@us.ibm.com) 8 * Author(s): Steve French (sfrench@us.ibm.com)
6 * 9 *
@@ -515,7 +518,8 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
515 __func__); 518 __func__);
516 return rc; 519 return rc;
517 } 520 }
518 } else if (ses->serverName) { 521 } else {
522 /* We use ses->serverName if no domain name available */
519 len = strlen(ses->serverName); 523 len = strlen(ses->serverName);
520 524
521 server = kmalloc(2 + (len * 2), GFP_KERNEL); 525 server = kmalloc(2 + (len * 2), GFP_KERNEL);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index d3aa999ab785..480cf9c81d50 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1599,6 +1599,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1599 pr_warn("CIFS: username too long\n"); 1599 pr_warn("CIFS: username too long\n");
1600 goto cifs_parse_mount_err; 1600 goto cifs_parse_mount_err;
1601 } 1601 }
1602
1603 kfree(vol->username);
1602 vol->username = kstrdup(string, GFP_KERNEL); 1604 vol->username = kstrdup(string, GFP_KERNEL);
1603 if (!vol->username) 1605 if (!vol->username)
1604 goto cifs_parse_mount_err; 1606 goto cifs_parse_mount_err;
@@ -1700,6 +1702,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1700 goto cifs_parse_mount_err; 1702 goto cifs_parse_mount_err;
1701 } 1703 }
1702 1704
1705 kfree(vol->domainname);
1703 vol->domainname = kstrdup(string, GFP_KERNEL); 1706 vol->domainname = kstrdup(string, GFP_KERNEL);
1704 if (!vol->domainname) { 1707 if (!vol->domainname) {
1705 pr_warn("CIFS: no memory for domainname\n"); 1708 pr_warn("CIFS: no memory for domainname\n");
@@ -1731,6 +1734,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1731 } 1734 }
1732 1735
1733 if (strncasecmp(string, "default", 7) != 0) { 1736 if (strncasecmp(string, "default", 7) != 0) {
1737 kfree(vol->iocharset);
1734 vol->iocharset = kstrdup(string, 1738 vol->iocharset = kstrdup(string,
1735 GFP_KERNEL); 1739 GFP_KERNEL);
1736 if (!vol->iocharset) { 1740 if (!vol->iocharset) {
@@ -2913,8 +2917,7 @@ ip_rfc1001_connect(struct TCP_Server_Info *server)
2913 * calling name ends in null (byte 16) from old smb 2917 * calling name ends in null (byte 16) from old smb
2914 * convention. 2918 * convention.
2915 */ 2919 */
2916 if (server->workstation_RFC1001_name && 2920 if (server->workstation_RFC1001_name[0] != 0)
2917 server->workstation_RFC1001_name[0] != 0)
2918 rfc1002mangle(ses_init_buf->trailer. 2921 rfc1002mangle(ses_init_buf->trailer.
2919 session_req.calling_name, 2922 session_req.calling_name,
2920 server->workstation_RFC1001_name, 2923 server->workstation_RFC1001_name,
@@ -3692,6 +3695,12 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
3692#endif /* CIFS_WEAK_PW_HASH */ 3695#endif /* CIFS_WEAK_PW_HASH */
3693 rc = SMBNTencrypt(tcon->password, ses->server->cryptkey, 3696 rc = SMBNTencrypt(tcon->password, ses->server->cryptkey,
3694 bcc_ptr, nls_codepage); 3697 bcc_ptr, nls_codepage);
3698 if (rc) {
3699 cifs_dbg(FYI, "%s Can't generate NTLM rsp. Error: %d\n",
3700 __func__, rc);
3701 cifs_buf_release(smb_buffer);
3702 return rc;
3703 }
3695 3704
3696 bcc_ptr += CIFS_AUTH_RESP_SIZE; 3705 bcc_ptr += CIFS_AUTH_RESP_SIZE;
3697 if (ses->capabilities & CAP_UNICODE) { 3706 if (ses->capabilities & CAP_UNICODE) {
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index a94b3e673182..ca30c391a894 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1823,6 +1823,7 @@ refind_writable:
1823 cifsFileInfo_put(inv_file); 1823 cifsFileInfo_put(inv_file);
1824 spin_lock(&cifs_file_list_lock); 1824 spin_lock(&cifs_file_list_lock);
1825 ++refind; 1825 ++refind;
1826 inv_file = NULL;
1826 goto refind_writable; 1827 goto refind_writable;
1827 } 1828 }
1828 } 1829 }
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 2d4f37235ed0..3e126d7bb2ea 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -771,6 +771,8 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
771 cifs_buf_release(srchinf->ntwrk_buf_start); 771 cifs_buf_release(srchinf->ntwrk_buf_start);
772 } 772 }
773 kfree(srchinf); 773 kfree(srchinf);
774 if (rc)
775 goto cgii_exit;
774 } else 776 } else
775 goto cgii_exit; 777 goto cgii_exit;
776 778
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 689f035915cf..22dfdf17d065 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -322,7 +322,7 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr)
322 322
323 /* return pointer to beginning of data area, ie offset from SMB start */ 323 /* return pointer to beginning of data area, ie offset from SMB start */
324 if ((*off != 0) && (*len != 0)) 324 if ((*off != 0) && (*len != 0))
325 return hdr->ProtocolId + *off; 325 return (char *)(&hdr->ProtocolId[0]) + *off;
326 else 326 else
327 return NULL; 327 return NULL;
328} 328}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 96b5d40a2ece..eab05e1aa587 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -684,7 +684,8 @@ smb2_clone_range(const unsigned int xid,
684 684
685 /* No need to change MaxChunks since already set to 1 */ 685 /* No need to change MaxChunks since already set to 1 */
686 chunk_sizes_updated = true; 686 chunk_sizes_updated = true;
687 } 687 } else
688 goto cchunk_out;
688 } 689 }
689 690
690cchunk_out: 691cchunk_out:
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 3417340bf89e..65cd7a84c8bc 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1218,7 +1218,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1218 struct smb2_ioctl_req *req; 1218 struct smb2_ioctl_req *req;
1219 struct smb2_ioctl_rsp *rsp; 1219 struct smb2_ioctl_rsp *rsp;
1220 struct TCP_Server_Info *server; 1220 struct TCP_Server_Info *server;
1221 struct cifs_ses *ses = tcon->ses; 1221 struct cifs_ses *ses;
1222 struct kvec iov[2]; 1222 struct kvec iov[2];
1223 int resp_buftype; 1223 int resp_buftype;
1224 int num_iovecs; 1224 int num_iovecs;
@@ -1233,6 +1233,11 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1233 if (plen) 1233 if (plen)
1234 *plen = 0; 1234 *plen = 0;
1235 1235
1236 if (tcon)
1237 ses = tcon->ses;
1238 else
1239 return -EIO;
1240
1236 if (ses && (ses->server)) 1241 if (ses && (ses->server))
1237 server = ses->server; 1242 server = ses->server;
1238 else 1243 else
@@ -1296,14 +1301,12 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1296 rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base; 1301 rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base;
1297 1302
1298 if ((rc != 0) && (rc != -EINVAL)) { 1303 if ((rc != 0) && (rc != -EINVAL)) {
1299 if (tcon) 1304 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
1300 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
1301 goto ioctl_exit; 1305 goto ioctl_exit;
1302 } else if (rc == -EINVAL) { 1306 } else if (rc == -EINVAL) {
1303 if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) && 1307 if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
1304 (opcode != FSCTL_SRV_COPYCHUNK)) { 1308 (opcode != FSCTL_SRV_COPYCHUNK)) {
1305 if (tcon) 1309 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
1306 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
1307 goto ioctl_exit; 1310 goto ioctl_exit;
1308 } 1311 }
1309 } 1312 }
@@ -1629,7 +1632,7 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1629 1632
1630 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0); 1633 rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
1631 1634
1632 if ((rc != 0) && tcon) 1635 if (rc != 0)
1633 cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE); 1636 cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
1634 1637
1635 free_rsp_buf(resp_buftype, iov[0].iov_base); 1638 free_rsp_buf(resp_buftype, iov[0].iov_base);
@@ -2114,7 +2117,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
2114 struct kvec iov[2]; 2117 struct kvec iov[2];
2115 int rc = 0; 2118 int rc = 0;
2116 int len; 2119 int len;
2117 int resp_buftype; 2120 int resp_buftype = CIFS_NO_BUFFER;
2118 unsigned char *bufptr; 2121 unsigned char *bufptr;
2119 struct TCP_Server_Info *server; 2122 struct TCP_Server_Info *server;
2120 struct cifs_ses *ses = tcon->ses; 2123 struct cifs_ses *ses = tcon->ses;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index afec6450450f..6b8e2f091f5b 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -570,6 +570,7 @@ static int mt_ioctl_trans(unsigned int fd, unsigned int cmd, void __user *argp)
570#define BNEPCONNDEL _IOW('B', 201, int) 570#define BNEPCONNDEL _IOW('B', 201, int)
571#define BNEPGETCONNLIST _IOR('B', 210, int) 571#define BNEPGETCONNLIST _IOR('B', 210, int)
572#define BNEPGETCONNINFO _IOR('B', 211, int) 572#define BNEPGETCONNINFO _IOR('B', 211, int)
573#define BNEPGETSUPPFEAT _IOR('B', 212, int)
573 574
574#define CMTPCONNADD _IOW('C', 200, int) 575#define CMTPCONNADD _IOW('C', 200, int)
575#define CMTPCONNDEL _IOW('C', 201, int) 576#define CMTPCONNDEL _IOW('C', 201, int)
@@ -1247,6 +1248,7 @@ COMPATIBLE_IOCTL(BNEPCONNADD)
1247COMPATIBLE_IOCTL(BNEPCONNDEL) 1248COMPATIBLE_IOCTL(BNEPCONNDEL)
1248COMPATIBLE_IOCTL(BNEPGETCONNLIST) 1249COMPATIBLE_IOCTL(BNEPGETCONNLIST)
1249COMPATIBLE_IOCTL(BNEPGETCONNINFO) 1250COMPATIBLE_IOCTL(BNEPGETCONNINFO)
1251COMPATIBLE_IOCTL(BNEPGETSUPPFEAT)
1250COMPATIBLE_IOCTL(CMTPCONNADD) 1252COMPATIBLE_IOCTL(CMTPCONNADD)
1251COMPATIBLE_IOCTL(CMTPCONNDEL) 1253COMPATIBLE_IOCTL(CMTPCONNDEL)
1252COMPATIBLE_IOCTL(CMTPGETCONNLIST) 1254COMPATIBLE_IOCTL(CMTPGETCONNLIST)
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index e907052eeadb..32a8bbd7a9ad 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -53,6 +53,18 @@ struct wb_writeback_work {
53 struct completion *done; /* set if the caller waits */ 53 struct completion *done; /* set if the caller waits */
54}; 54};
55 55
56/*
57 * If an inode is constantly having its pages dirtied, but then the
58 * updates stop dirtytime_expire_interval seconds in the past, it's
59 * possible for the worst case time between when an inode has its
60 * timestamps updated and when they finally get written out to be two
61 * dirtytime_expire_intervals. We set the default to 12 hours (in
62 * seconds), which means most of the time inodes will have their
63 * timestamps written to disk after 12 hours, but in the worst case a
64 * few inodes might not their timestamps updated for 24 hours.
65 */
66unsigned int dirtytime_expire_interval = 12 * 60 * 60;
67
56/** 68/**
57 * writeback_in_progress - determine whether there is writeback in progress 69 * writeback_in_progress - determine whether there is writeback in progress
58 * @bdi: the device's backing_dev_info structure. 70 * @bdi: the device's backing_dev_info structure.
@@ -275,8 +287,8 @@ static int move_expired_inodes(struct list_head *delaying_queue,
275 287
276 if ((flags & EXPIRE_DIRTY_ATIME) == 0) 288 if ((flags & EXPIRE_DIRTY_ATIME) == 0)
277 older_than_this = work->older_than_this; 289 older_than_this = work->older_than_this;
278 else if ((work->reason == WB_REASON_SYNC) == 0) { 290 else if (!work->for_sync) {
279 expire_time = jiffies - (HZ * 86400); 291 expire_time = jiffies - (dirtytime_expire_interval * HZ);
280 older_than_this = &expire_time; 292 older_than_this = &expire_time;
281 } 293 }
282 while (!list_empty(delaying_queue)) { 294 while (!list_empty(delaying_queue)) {
@@ -458,6 +470,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
458 */ 470 */
459 redirty_tail(inode, wb); 471 redirty_tail(inode, wb);
460 } else if (inode->i_state & I_DIRTY_TIME) { 472 } else if (inode->i_state & I_DIRTY_TIME) {
473 inode->dirtied_when = jiffies;
461 list_move(&inode->i_wb_list, &wb->b_dirty_time); 474 list_move(&inode->i_wb_list, &wb->b_dirty_time);
462 } else { 475 } else {
463 /* The inode is clean. Remove from writeback lists. */ 476 /* The inode is clean. Remove from writeback lists. */
@@ -505,12 +518,17 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
505 spin_lock(&inode->i_lock); 518 spin_lock(&inode->i_lock);
506 519
507 dirty = inode->i_state & I_DIRTY; 520 dirty = inode->i_state & I_DIRTY;
508 if (((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) && 521 if (inode->i_state & I_DIRTY_TIME) {
509 (inode->i_state & I_DIRTY_TIME)) || 522 if ((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) ||
510 (inode->i_state & I_DIRTY_TIME_EXPIRED)) { 523 unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) ||
511 dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED; 524 unlikely(time_after(jiffies,
512 trace_writeback_lazytime(inode); 525 (inode->dirtied_time_when +
513 } 526 dirtytime_expire_interval * HZ)))) {
527 dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED;
528 trace_writeback_lazytime(inode);
529 }
530 } else
531 inode->i_state &= ~I_DIRTY_TIME_EXPIRED;
514 inode->i_state &= ~dirty; 532 inode->i_state &= ~dirty;
515 533
516 /* 534 /*
@@ -1131,6 +1149,56 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
1131 rcu_read_unlock(); 1149 rcu_read_unlock();
1132} 1150}
1133 1151
1152/*
1153 * Wake up bdi's periodically to make sure dirtytime inodes gets
1154 * written back periodically. We deliberately do *not* check the
1155 * b_dirtytime list in wb_has_dirty_io(), since this would cause the
1156 * kernel to be constantly waking up once there are any dirtytime
1157 * inodes on the system. So instead we define a separate delayed work
1158 * function which gets called much more rarely. (By default, only
1159 * once every 12 hours.)
1160 *
1161 * If there is any other write activity going on in the file system,
1162 * this function won't be necessary. But if the only thing that has
1163 * happened on the file system is a dirtytime inode caused by an atime
1164 * update, we need this infrastructure below to make sure that inode
1165 * eventually gets pushed out to disk.
1166 */
1167static void wakeup_dirtytime_writeback(struct work_struct *w);
1168static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback);
1169
1170static void wakeup_dirtytime_writeback(struct work_struct *w)
1171{
1172 struct backing_dev_info *bdi;
1173
1174 rcu_read_lock();
1175 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
1176 if (list_empty(&bdi->wb.b_dirty_time))
1177 continue;
1178 bdi_wakeup_thread(bdi);
1179 }
1180 rcu_read_unlock();
1181 schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
1182}
1183
1184static int __init start_dirtytime_writeback(void)
1185{
1186 schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
1187 return 0;
1188}
1189__initcall(start_dirtytime_writeback);
1190
1191int dirtytime_interval_handler(struct ctl_table *table, int write,
1192 void __user *buffer, size_t *lenp, loff_t *ppos)
1193{
1194 int ret;
1195
1196 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1197 if (ret == 0 && write)
1198 mod_delayed_work(system_wq, &dirtytime_work, 0);
1199 return ret;
1200}
1201
1134static noinline void block_dump___mark_inode_dirty(struct inode *inode) 1202static noinline void block_dump___mark_inode_dirty(struct inode *inode)
1135{ 1203{
1136 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { 1204 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
@@ -1269,8 +1337,13 @@ void __mark_inode_dirty(struct inode *inode, int flags)
1269 } 1337 }
1270 1338
1271 inode->dirtied_when = jiffies; 1339 inode->dirtied_when = jiffies;
1272 list_move(&inode->i_wb_list, dirtytime ? 1340 if (dirtytime)
1273 &bdi->wb.b_dirty_time : &bdi->wb.b_dirty); 1341 inode->dirtied_time_when = jiffies;
1342 if (inode->i_state & (I_DIRTY_INODE | I_DIRTY_PAGES))
1343 list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
1344 else
1345 list_move(&inode->i_wb_list,
1346 &bdi->wb.b_dirty_time);
1274 spin_unlock(&bdi->wb.list_lock); 1347 spin_unlock(&bdi->wb.list_lock);
1275 trace_writeback_dirty_inode_enqueue(inode); 1348 trace_writeback_dirty_inode_enqueue(inode);
1276 1349
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
index 6e560d56094b..754fdf8c6356 100644
--- a/fs/hfsplus/brec.c
+++ b/fs/hfsplus/brec.c
@@ -131,13 +131,16 @@ skip:
131 hfs_bnode_write(node, entry, data_off + key_len, entry_len); 131 hfs_bnode_write(node, entry, data_off + key_len, entry_len);
132 hfs_bnode_dump(node); 132 hfs_bnode_dump(node);
133 133
134 if (new_node) { 134 /*
135 /* update parent key if we inserted a key 135 * update parent key if we inserted a key
136 * at the start of the first node 136 * at the start of the node and it is not the new node
137 */ 137 */
138 if (!rec && new_node != node) 138 if (!rec && new_node != node) {
139 hfs_brec_update_parent(fd); 139 hfs_bnode_read_key(node, fd->search_key, data_off + size);
140 hfs_brec_update_parent(fd);
141 }
140 142
143 if (new_node) {
141 hfs_bnode_put(fd->bnode); 144 hfs_bnode_put(fd->bnode);
142 if (!new_node->parent) { 145 if (!new_node->parent) {
143 hfs_btree_inc_height(tree); 146 hfs_btree_inc_height(tree);
@@ -168,9 +171,6 @@ skip:
168 goto again; 171 goto again;
169 } 172 }
170 173
171 if (!rec)
172 hfs_brec_update_parent(fd);
173
174 return 0; 174 return 0;
175} 175}
176 176
@@ -370,6 +370,8 @@ again:
370 if (IS_ERR(parent)) 370 if (IS_ERR(parent))
371 return PTR_ERR(parent); 371 return PTR_ERR(parent);
372 __hfs_brec_find(parent, fd, hfs_find_rec_by_key); 372 __hfs_brec_find(parent, fd, hfs_find_rec_by_key);
373 if (fd->record < 0)
374 return -ENOENT;
373 hfs_bnode_dump(parent); 375 hfs_bnode_dump(parent);
374 rec = fd->record; 376 rec = fd->record;
375 377
diff --git a/fs/locks.c b/fs/locks.c
index 528fedfda15e..40bc384728c0 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1388,9 +1388,8 @@ any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1388int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) 1388int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1389{ 1389{
1390 int error = 0; 1390 int error = 0;
1391 struct file_lock *new_fl;
1392 struct file_lock_context *ctx = inode->i_flctx; 1391 struct file_lock_context *ctx = inode->i_flctx;
1393 struct file_lock *fl; 1392 struct file_lock *new_fl, *fl, *tmp;
1394 unsigned long break_time; 1393 unsigned long break_time;
1395 int want_write = (mode & O_ACCMODE) != O_RDONLY; 1394 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1396 LIST_HEAD(dispose); 1395 LIST_HEAD(dispose);
@@ -1420,7 +1419,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1420 break_time++; /* so that 0 means no break time */ 1419 break_time++; /* so that 0 means no break time */
1421 } 1420 }
1422 1421
1423 list_for_each_entry(fl, &ctx->flc_lease, fl_list) { 1422 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1424 if (!leases_conflict(fl, new_fl)) 1423 if (!leases_conflict(fl, new_fl))
1425 continue; 1424 continue;
1426 if (want_write) { 1425 if (want_write) {
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index cdbc78c72542..03d647bf195d 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -137,7 +137,7 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
137 seg->offset = iomap.offset; 137 seg->offset = iomap.offset;
138 seg->length = iomap.length; 138 seg->length = iomap.length;
139 139
140 dprintk("GET: %lld:%lld %d\n", bex->foff, bex->len, bex->es); 140 dprintk("GET: 0x%llx:0x%llx %d\n", bex->foff, bex->len, bex->es);
141 return 0; 141 return 0;
142 142
143out_error: 143out_error:
diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c
index 9da89fddab33..9aa2796da90d 100644
--- a/fs/nfsd/blocklayoutxdr.c
+++ b/fs/nfsd/blocklayoutxdr.c
@@ -122,19 +122,19 @@ nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
122 122
123 p = xdr_decode_hyper(p, &bex.foff); 123 p = xdr_decode_hyper(p, &bex.foff);
124 if (bex.foff & (block_size - 1)) { 124 if (bex.foff & (block_size - 1)) {
125 dprintk("%s: unaligned offset %lld\n", 125 dprintk("%s: unaligned offset 0x%llx\n",
126 __func__, bex.foff); 126 __func__, bex.foff);
127 goto fail; 127 goto fail;
128 } 128 }
129 p = xdr_decode_hyper(p, &bex.len); 129 p = xdr_decode_hyper(p, &bex.len);
130 if (bex.len & (block_size - 1)) { 130 if (bex.len & (block_size - 1)) {
131 dprintk("%s: unaligned length %lld\n", 131 dprintk("%s: unaligned length 0x%llx\n",
132 __func__, bex.foff); 132 __func__, bex.foff);
133 goto fail; 133 goto fail;
134 } 134 }
135 p = xdr_decode_hyper(p, &bex.soff); 135 p = xdr_decode_hyper(p, &bex.soff);
136 if (bex.soff & (block_size - 1)) { 136 if (bex.soff & (block_size - 1)) {
137 dprintk("%s: unaligned disk offset %lld\n", 137 dprintk("%s: unaligned disk offset 0x%llx\n",
138 __func__, bex.soff); 138 __func__, bex.soff);
139 goto fail; 139 goto fail;
140 } 140 }
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 1028a0629543..6904213a4363 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -118,7 +118,7 @@ void nfsd4_setup_layout_type(struct svc_export *exp)
118{ 118{
119 struct super_block *sb = exp->ex_path.mnt->mnt_sb; 119 struct super_block *sb = exp->ex_path.mnt->mnt_sb;
120 120
121 if (exp->ex_flags & NFSEXP_NOPNFS) 121 if (!(exp->ex_flags & NFSEXP_PNFS))
122 return; 122 return;
123 123
124 if (sb->s_export_op->get_uuid && 124 if (sb->s_export_op->get_uuid &&
@@ -440,15 +440,14 @@ nfsd4_return_file_layout(struct nfs4_layout *lp, struct nfsd4_layout_seg *seg,
440 list_move_tail(&lp->lo_perstate, reaplist); 440 list_move_tail(&lp->lo_perstate, reaplist);
441 return; 441 return;
442 } 442 }
443 end = seg->offset; 443 lo->offset = layout_end(seg);
444 } else { 444 } else {
445 /* retain the whole layout segment on a split. */ 445 /* retain the whole layout segment on a split. */
446 if (layout_end(seg) < end) { 446 if (layout_end(seg) < end) {
447 dprintk("%s: split not supported\n", __func__); 447 dprintk("%s: split not supported\n", __func__);
448 return; 448 return;
449 } 449 }
450 450 end = seg->offset;
451 lo->offset = layout_end(seg);
452 } 451 }
453 452
454 layout_update_len(lo, end); 453 layout_update_len(lo, end);
@@ -513,6 +512,9 @@ nfsd4_return_client_layouts(struct svc_rqst *rqstp,
513 512
514 spin_lock(&clp->cl_lock); 513 spin_lock(&clp->cl_lock);
515 list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) { 514 list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) {
515 if (ls->ls_layout_type != lrp->lr_layout_type)
516 continue;
517
516 if (lrp->lr_return_type == RETURN_FSID && 518 if (lrp->lr_return_type == RETURN_FSID &&
517 !fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle, 519 !fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle,
518 &cstate->current_fh.fh_handle)) 520 &cstate->current_fh.fh_handle))
@@ -587,6 +589,8 @@ nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls)
587 589
588 rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str)); 590 rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str));
589 591
592 trace_layout_recall_fail(&ls->ls_stid.sc_stateid);
593
590 printk(KERN_WARNING 594 printk(KERN_WARNING
591 "nfsd: client %s failed to respond to layout recall. " 595 "nfsd: client %s failed to respond to layout recall. "
592 " Fencing..\n", addr_str); 596 " Fencing..\n", addr_str);
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index d30bea8d0277..92b9d97aff4f 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1237,8 +1237,8 @@ nfsd4_getdeviceinfo(struct svc_rqst *rqstp,
1237 nfserr = ops->proc_getdeviceinfo(exp->ex_path.mnt->mnt_sb, gdp); 1237 nfserr = ops->proc_getdeviceinfo(exp->ex_path.mnt->mnt_sb, gdp);
1238 1238
1239 gdp->gd_notify_types &= ops->notify_types; 1239 gdp->gd_notify_types &= ops->notify_types;
1240 exp_put(exp);
1241out: 1240out:
1241 exp_put(exp);
1242 return nfserr; 1242 return nfserr;
1243} 1243}
1244 1244
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index d2f2c37dc2db..8ba1d888f1e6 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3221,7 +3221,7 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
3221 } else 3221 } else
3222 nfs4_free_openowner(&oo->oo_owner); 3222 nfs4_free_openowner(&oo->oo_owner);
3223 spin_unlock(&clp->cl_lock); 3223 spin_unlock(&clp->cl_lock);
3224 return oo; 3224 return ret;
3225} 3225}
3226 3226
3227static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) { 3227static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
@@ -5062,7 +5062,7 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
5062 } else 5062 } else
5063 nfs4_free_lockowner(&lo->lo_owner); 5063 nfs4_free_lockowner(&lo->lo_owner);
5064 spin_unlock(&clp->cl_lock); 5064 spin_unlock(&clp->cl_lock);
5065 return lo; 5065 return ret;
5066} 5066}
5067 5067
5068static void 5068static void
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index df5e66caf100..5fb7e78169a6 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1562,7 +1562,11 @@ nfsd4_decode_layoutget(struct nfsd4_compoundargs *argp,
1562 p = xdr_decode_hyper(p, &lgp->lg_seg.offset); 1562 p = xdr_decode_hyper(p, &lgp->lg_seg.offset);
1563 p = xdr_decode_hyper(p, &lgp->lg_seg.length); 1563 p = xdr_decode_hyper(p, &lgp->lg_seg.length);
1564 p = xdr_decode_hyper(p, &lgp->lg_minlength); 1564 p = xdr_decode_hyper(p, &lgp->lg_minlength);
1565 nfsd4_decode_stateid(argp, &lgp->lg_sid); 1565
1566 status = nfsd4_decode_stateid(argp, &lgp->lg_sid);
1567 if (status)
1568 return status;
1569
1566 READ_BUF(4); 1570 READ_BUF(4);
1567 lgp->lg_maxcount = be32_to_cpup(p++); 1571 lgp->lg_maxcount = be32_to_cpup(p++);
1568 1572
@@ -1580,7 +1584,11 @@ nfsd4_decode_layoutcommit(struct nfsd4_compoundargs *argp,
1580 p = xdr_decode_hyper(p, &lcp->lc_seg.offset); 1584 p = xdr_decode_hyper(p, &lcp->lc_seg.offset);
1581 p = xdr_decode_hyper(p, &lcp->lc_seg.length); 1585 p = xdr_decode_hyper(p, &lcp->lc_seg.length);
1582 lcp->lc_reclaim = be32_to_cpup(p++); 1586 lcp->lc_reclaim = be32_to_cpup(p++);
1583 nfsd4_decode_stateid(argp, &lcp->lc_sid); 1587
1588 status = nfsd4_decode_stateid(argp, &lcp->lc_sid);
1589 if (status)
1590 return status;
1591
1584 READ_BUF(4); 1592 READ_BUF(4);
1585 lcp->lc_newoffset = be32_to_cpup(p++); 1593 lcp->lc_newoffset = be32_to_cpup(p++);
1586 if (lcp->lc_newoffset) { 1594 if (lcp->lc_newoffset) {
@@ -1628,7 +1636,11 @@ nfsd4_decode_layoutreturn(struct nfsd4_compoundargs *argp,
1628 READ_BUF(16); 1636 READ_BUF(16);
1629 p = xdr_decode_hyper(p, &lrp->lr_seg.offset); 1637 p = xdr_decode_hyper(p, &lrp->lr_seg.offset);
1630 p = xdr_decode_hyper(p, &lrp->lr_seg.length); 1638 p = xdr_decode_hyper(p, &lrp->lr_seg.length);
1631 nfsd4_decode_stateid(argp, &lrp->lr_sid); 1639
1640 status = nfsd4_decode_stateid(argp, &lrp->lr_sid);
1641 if (status)
1642 return status;
1643
1632 READ_BUF(4); 1644 READ_BUF(4);
1633 lrp->lrf_body_len = be32_to_cpup(p++); 1645 lrp->lrf_body_len = be32_to_cpup(p++);
1634 if (lrp->lrf_body_len > 0) { 1646 if (lrp->lrf_body_len > 0) {
@@ -4123,7 +4135,7 @@ nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr,
4123 return nfserr_resource; 4135 return nfserr_resource;
4124 *p++ = cpu_to_be32(lrp->lrs_present); 4136 *p++ = cpu_to_be32(lrp->lrs_present);
4125 if (lrp->lrs_present) 4137 if (lrp->lrs_present)
4126 nfsd4_encode_stateid(xdr, &lrp->lr_sid); 4138 return nfsd4_encode_stateid(xdr, &lrp->lr_sid);
4127 return nfs_ok; 4139 return nfs_ok;
4128} 4140}
4129#endif /* CONFIG_NFSD_PNFS */ 4141#endif /* CONFIG_NFSD_PNFS */
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 83a9694ec485..46ec934f5dee 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -165,13 +165,17 @@ int nfsd_reply_cache_init(void)
165{ 165{
166 unsigned int hashsize; 166 unsigned int hashsize;
167 unsigned int i; 167 unsigned int i;
168 int status = 0;
168 169
169 max_drc_entries = nfsd_cache_size_limit(); 170 max_drc_entries = nfsd_cache_size_limit();
170 atomic_set(&num_drc_entries, 0); 171 atomic_set(&num_drc_entries, 0);
171 hashsize = nfsd_hashsize(max_drc_entries); 172 hashsize = nfsd_hashsize(max_drc_entries);
172 maskbits = ilog2(hashsize); 173 maskbits = ilog2(hashsize);
173 174
174 register_shrinker(&nfsd_reply_cache_shrinker); 175 status = register_shrinker(&nfsd_reply_cache_shrinker);
176 if (status)
177 return status;
178
175 drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), 179 drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
176 0, 0, NULL); 180 0, 0, NULL);
177 if (!drc_slab) 181 if (!drc_slab)
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 44057b45ed32..e34f906647d3 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -437,6 +437,8 @@ static inline struct bcma_device *bcma_find_core(struct bcma_bus *bus,
437#ifdef CONFIG_BCMA_HOST_PCI 437#ifdef CONFIG_BCMA_HOST_PCI
438extern void bcma_host_pci_up(struct bcma_bus *bus); 438extern void bcma_host_pci_up(struct bcma_bus *bus);
439extern void bcma_host_pci_down(struct bcma_bus *bus); 439extern void bcma_host_pci_down(struct bcma_bus *bus);
440extern int bcma_host_pci_irq_ctl(struct bcma_bus *bus,
441 struct bcma_device *core, bool enable);
440#else 442#else
441static inline void bcma_host_pci_up(struct bcma_bus *bus) 443static inline void bcma_host_pci_up(struct bcma_bus *bus)
442{ 444{
@@ -444,6 +446,13 @@ static inline void bcma_host_pci_up(struct bcma_bus *bus)
444static inline void bcma_host_pci_down(struct bcma_bus *bus) 446static inline void bcma_host_pci_down(struct bcma_bus *bus)
445{ 447{
446} 448}
449static inline int bcma_host_pci_irq_ctl(struct bcma_bus *bus,
450 struct bcma_device *core, bool enable)
451{
452 if (bus->hosttype == BCMA_HOSTTYPE_PCI)
453 return -ENOTSUPP;
454 return 0;
455}
447#endif 456#endif
448 457
449extern bool bcma_core_is_enabled(struct bcma_device *core); 458extern bool bcma_core_is_enabled(struct bcma_device *core);
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index 8e90004fdfd7..5ba6918ca20b 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -238,9 +238,13 @@ struct bcma_drv_pci {
238#define pcicore_write16(pc, offset, val) bcma_write16((pc)->core, offset, val) 238#define pcicore_write16(pc, offset, val) bcma_write16((pc)->core, offset, val)
239#define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val) 239#define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val)
240 240
241extern int bcma_core_pci_irq_ctl(struct bcma_bus *bus, 241#ifdef CONFIG_BCMA_DRIVER_PCI
242 struct bcma_device *core, bool enable);
243extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up); 242extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up);
243#else
244static inline void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
245{
246}
247#endif
244 248
245extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev); 249extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
246extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev); 250extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index b4d71b5e1ff2..f4131e8ead74 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -604,6 +604,7 @@ struct inode {
604 struct mutex i_mutex; 604 struct mutex i_mutex;
605 605
606 unsigned long dirtied_when; /* jiffies of first dirtying */ 606 unsigned long dirtied_when; /* jiffies of first dirtying */
607 unsigned long dirtied_time_when;
607 608
608 struct hlist_node i_hash; 609 struct hlist_node i_hash;
609 struct list_head i_wb_list; /* backing dev IO list */ 610 struct list_head i_wb_list; /* backing dev IO list */
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 781974afff9f..ffbc034c8810 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -126,8 +126,23 @@
126#define GICR_PROPBASER_WaWb (5U << 7) 126#define GICR_PROPBASER_WaWb (5U << 7)
127#define GICR_PROPBASER_RaWaWt (6U << 7) 127#define GICR_PROPBASER_RaWaWt (6U << 7)
128#define GICR_PROPBASER_RaWaWb (7U << 7) 128#define GICR_PROPBASER_RaWaWb (7U << 7)
129#define GICR_PROPBASER_CACHEABILITY_MASK (7U << 7)
129#define GICR_PROPBASER_IDBITS_MASK (0x1f) 130#define GICR_PROPBASER_IDBITS_MASK (0x1f)
130 131
132#define GICR_PENDBASER_NonShareable (0U << 10)
133#define GICR_PENDBASER_InnerShareable (1U << 10)
134#define GICR_PENDBASER_OuterShareable (2U << 10)
135#define GICR_PENDBASER_SHAREABILITY_MASK (3UL << 10)
136#define GICR_PENDBASER_nCnB (0U << 7)
137#define GICR_PENDBASER_nC (1U << 7)
138#define GICR_PENDBASER_RaWt (2U << 7)
139#define GICR_PENDBASER_RaWb (3U << 7)
140#define GICR_PENDBASER_WaWt (4U << 7)
141#define GICR_PENDBASER_WaWb (5U << 7)
142#define GICR_PENDBASER_RaWaWt (6U << 7)
143#define GICR_PENDBASER_RaWaWb (7U << 7)
144#define GICR_PENDBASER_CACHEABILITY_MASK (7U << 7)
145
131/* 146/*
132 * Re-Distributor registers, offsets from SGI_base 147 * Re-Distributor registers, offsets from SGI_base
133 */ 148 */
@@ -182,6 +197,7 @@
182#define GITS_CBASER_WaWb (5UL << 59) 197#define GITS_CBASER_WaWb (5UL << 59)
183#define GITS_CBASER_RaWaWt (6UL << 59) 198#define GITS_CBASER_RaWaWt (6UL << 59)
184#define GITS_CBASER_RaWaWb (7UL << 59) 199#define GITS_CBASER_RaWaWb (7UL << 59)
200#define GITS_CBASER_CACHEABILITY_MASK (7UL << 59)
185#define GITS_CBASER_NonShareable (0UL << 10) 201#define GITS_CBASER_NonShareable (0UL << 10)
186#define GITS_CBASER_InnerShareable (1UL << 10) 202#define GITS_CBASER_InnerShareable (1UL << 10)
187#define GITS_CBASER_OuterShareable (2UL << 10) 203#define GITS_CBASER_OuterShareable (2UL << 10)
@@ -198,6 +214,7 @@
198#define GITS_BASER_WaWb (5UL << 59) 214#define GITS_BASER_WaWb (5UL << 59)
199#define GITS_BASER_RaWaWt (6UL << 59) 215#define GITS_BASER_RaWaWt (6UL << 59)
200#define GITS_BASER_RaWaWb (7UL << 59) 216#define GITS_BASER_RaWaWb (7UL << 59)
217#define GITS_BASER_CACHEABILITY_MASK (7UL << 59)
201#define GITS_BASER_TYPE_SHIFT (56) 218#define GITS_BASER_TYPE_SHIFT (56)
202#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) 219#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
203#define GITS_BASER_ENTRY_SIZE_SHIFT (48) 220#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
diff --git a/include/linux/jhash.h b/include/linux/jhash.h
index 47cb09edec1a..348c6f47e4cc 100644
--- a/include/linux/jhash.h
+++ b/include/linux/jhash.h
@@ -145,11 +145,11 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
145} 145}
146 146
147 147
148/* jhash_3words - hash exactly 3, 2 or 1 word(s) */ 148/* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */
149static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval) 149static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
150{ 150{
151 a += JHASH_INITVAL; 151 a += initval;
152 b += JHASH_INITVAL; 152 b += initval;
153 c += initval; 153 c += initval;
154 154
155 __jhash_final(a, b, c); 155 __jhash_final(a, b, c);
@@ -157,14 +157,19 @@ static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
157 return c; 157 return c;
158} 158}
159 159
160static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
161{
162 return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2));
163}
164
160static inline u32 jhash_2words(u32 a, u32 b, u32 initval) 165static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
161{ 166{
162 return jhash_3words(a, b, 0, initval); 167 return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
163} 168}
164 169
165static inline u32 jhash_1word(u32 a, u32 initval) 170static inline u32 jhash_1word(u32 a, u32 initval)
166{ 171{
167 return jhash_3words(a, 0, 0, initval); 172 return __jhash_nwords(a, 0, 0, initval + JHASH_INITVAL + (1 << 2));
168} 173}
169 174
170#endif /* _LINUX_JHASH_H */ 175#endif /* _LINUX_JHASH_H */
diff --git a/include/linux/lcm.h b/include/linux/lcm.h
index 7bf01d779b45..1ce79a7f1daa 100644
--- a/include/linux/lcm.h
+++ b/include/linux/lcm.h
@@ -4,5 +4,6 @@
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5 5
6unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__; 6unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__;
7unsigned long lcm_not_zero(unsigned long a, unsigned long b) __attribute_const__;
7 8
8#endif /* _LCM_H */ 9#endif /* _LCM_H */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index fc03efa64ffe..6b08cc106c21 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -232,6 +232,7 @@ enum {
232 * led */ 232 * led */
233 ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */ 233 ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */
234 ATA_FLAG_LOWTAG = (1 << 24), /* host wants lowest available tag */ 234 ATA_FLAG_LOWTAG = (1 << 24), /* host wants lowest available tag */
235 ATA_FLAG_SAS_HOST = (1 << 25), /* SAS host */
235 236
236 /* bits 24:31 of ap->flags are reserved for LLD specific flags */ 237 /* bits 24:31 of ap->flags are reserved for LLD specific flags */
237 238
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index fb0390a1a498..ee7b1ce7a6f8 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -2999,6 +2999,9 @@ enum usb_irq_events {
2999#define PALMAS_GPADC_TRIM15 0x0E 2999#define PALMAS_GPADC_TRIM15 0x0E
3000#define PALMAS_GPADC_TRIM16 0x0F 3000#define PALMAS_GPADC_TRIM16 0x0F
3001 3001
3002/* TPS659038 regen2_ctrl offset iss different from palmas */
3003#define TPS659038_REGEN2_CTRL 0x12
3004
3002/* TPS65917 Interrupt registers */ 3005/* TPS65917 Interrupt registers */
3003 3006
3004/* Registers for function INTERRUPT */ 3007/* Registers for function INTERRUPT */
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 7299e9548906..f62e7cf227c6 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -68,6 +68,8 @@ enum {
68 MLX4_CMD_UNMAP_ICM_AUX = 0xffb, 68 MLX4_CMD_UNMAP_ICM_AUX = 0xffb,
69 MLX4_CMD_SET_ICM_SIZE = 0xffd, 69 MLX4_CMD_SET_ICM_SIZE = 0xffd,
70 MLX4_CMD_ACCESS_REG = 0x3b, 70 MLX4_CMD_ACCESS_REG = 0x3b,
71 MLX4_CMD_ALLOCATE_VPP = 0x80,
72 MLX4_CMD_SET_VPORT_QOS = 0x81,
71 73
72 /*master notify fw on finish for slave's flr*/ 74 /*master notify fw on finish for slave's flr*/
73 MLX4_CMD_INFORM_FLR_DONE = 0x5b, 75 MLX4_CMD_INFORM_FLR_DONE = 0x5b,
@@ -186,7 +188,14 @@ enum {
186}; 188};
187 189
188enum { 190enum {
189 /* set port opcode modifiers */ 191 /* Set port opcode modifiers */
192 MLX4_SET_PORT_IB_OPCODE = 0x0,
193 MLX4_SET_PORT_ETH_OPCODE = 0x1,
194 MLX4_SET_PORT_BEACON_OPCODE = 0x4,
195};
196
197enum {
198 /* Set port Ethernet input modifiers */
190 MLX4_SET_PORT_GENERAL = 0x0, 199 MLX4_SET_PORT_GENERAL = 0x0,
191 MLX4_SET_PORT_RQP_CALC = 0x1, 200 MLX4_SET_PORT_RQP_CALC = 0x1,
192 MLX4_SET_PORT_MAC_TABLE = 0x2, 201 MLX4_SET_PORT_MAC_TABLE = 0x2,
@@ -294,6 +303,8 @@ void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbo
294u32 mlx4_comm_get_version(void); 303u32 mlx4_comm_get_version(void);
295int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac); 304int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac);
296int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos); 305int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
306int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
307 int max_tx_rate);
297int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting); 308int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
298int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf); 309int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf);
299int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state); 310int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index ab7ebec943b8..f9ce34bec45b 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -49,8 +49,6 @@
49#define MSIX_LEGACY_SZ 4 49#define MSIX_LEGACY_SZ 4
50#define MIN_MSIX_P_PORT 5 50#define MIN_MSIX_P_PORT 5
51 51
52#define MLX4_NUM_UP 8
53#define MLX4_NUM_TC 8
54#define MLX4_MAX_100M_UNITS_VAL 255 /* 52#define MLX4_MAX_100M_UNITS_VAL 255 /*
55 * work around: can't set values 53 * work around: can't set values
56 * greater then this value when 54 * greater then this value when
@@ -174,6 +172,7 @@ enum {
174 MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41, 172 MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41,
175 MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42, 173 MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42,
176 MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48, 174 MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48,
175 MLX4_DEV_CAP_FLAG_RSS_IP_FRAG = 1LL << 52,
177 MLX4_DEV_CAP_FLAG_SET_ETH_SCHED = 1LL << 53, 176 MLX4_DEV_CAP_FLAG_SET_ETH_SCHED = 1LL << 53,
178 MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55, 177 MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55,
179 MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59, 178 MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59,
@@ -206,7 +205,11 @@ enum {
206 MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21, 205 MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21,
207 MLX4_DEV_CAP_FLAG2_QCN = 1LL << 22, 206 MLX4_DEV_CAP_FLAG2_QCN = 1LL << 22,
208 MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT = 1LL << 23, 207 MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT = 1LL << 23,
209 MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN = 1LL << 24 208 MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN = 1LL << 24,
209 MLX4_DEV_CAP_FLAG2_QOS_VPP = 1LL << 25,
210 MLX4_DEV_CAP_FLAG2_ETS_CFG = 1LL << 26,
211 MLX4_DEV_CAP_FLAG2_PORT_BEACON = 1LL << 27,
212 MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28,
210}; 213};
211 214
212enum { 215enum {
@@ -1001,6 +1004,11 @@ static inline int mlx4_is_slave(struct mlx4_dev *dev)
1001 return dev->flags & MLX4_FLAG_SLAVE; 1004 return dev->flags & MLX4_FLAG_SLAVE;
1002} 1005}
1003 1006
1007static inline int mlx4_is_eth(struct mlx4_dev *dev, int port)
1008{
1009 return dev->caps.port_type[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
1010}
1011
1004int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, 1012int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
1005 struct mlx4_buf *buf, gfp_t gfp); 1013 struct mlx4_buf *buf, gfp_t gfp);
1006void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); 1014void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
@@ -1305,9 +1313,9 @@ int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
1305 u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx); 1313 u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
1306int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, 1314int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
1307 u8 promisc); 1315 u8 promisc);
1308int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc); 1316int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time);
1309int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, 1317int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
1310 u8 *pg, u16 *ratelimit); 1318 u8 ignore_fcs_value);
1311int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable); 1319int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
1312int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx); 1320int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
1313int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); 1321int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 1023ebe035b7..6fed539e5456 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -209,7 +209,8 @@ struct mlx4_qp_context {
209 __be16 sq_wqe_counter; 209 __be16 sq_wqe_counter;
210 u32 reserved3; 210 u32 reserved3;
211 __be16 rate_limit_params; 211 __be16 rate_limit_params;
212 __be16 reserved4; 212 u8 reserved4;
213 u8 qos_vport;
213 __be32 param3; 214 __be32 param3;
214 __be32 nummmcpeers_basemkey; 215 __be32 nummmcpeers_basemkey;
215 u8 log_page_size; 216 u8 log_page_size;
@@ -231,6 +232,7 @@ struct mlx4_update_qp_context {
231enum { 232enum {
232 MLX4_UPD_QP_MASK_PM_STATE = 32, 233 MLX4_UPD_QP_MASK_PM_STATE = 32,
233 MLX4_UPD_QP_MASK_VSD = 33, 234 MLX4_UPD_QP_MASK_VSD = 33,
235 MLX4_UPD_QP_MASK_QOS_VPP = 34,
234 MLX4_UPD_QP_MASK_RATE_LIMIT = 35, 236 MLX4_UPD_QP_MASK_RATE_LIMIT = 35,
235}; 237};
236 238
@@ -432,7 +434,8 @@ enum mlx4_update_qp_attr {
432 MLX4_UPDATE_QP_SMAC = 1 << 0, 434 MLX4_UPDATE_QP_SMAC = 1 << 0,
433 MLX4_UPDATE_QP_VSD = 1 << 1, 435 MLX4_UPDATE_QP_VSD = 1 << 1,
434 MLX4_UPDATE_QP_RATE_LIMIT = 1 << 2, 436 MLX4_UPDATE_QP_RATE_LIMIT = 1 << 2,
435 MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 3) - 1 437 MLX4_UPDATE_QP_QOS_VPORT = 1 << 3,
438 MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 4) - 1
436}; 439};
437 440
438enum mlx4_update_qp_params_flags { 441enum mlx4_update_qp_params_flags {
@@ -441,6 +444,7 @@ enum mlx4_update_qp_params_flags {
441 444
442struct mlx4_update_qp_params { 445struct mlx4_update_qp_params {
443 u8 smac_index; 446 u8 smac_index;
447 u8 qos_vport;
444 u32 flags; 448 u32 flags;
445 u16 rate_unit; 449 u16 rate_unit;
446 u16 rate_val; 450 u16 rate_val;
diff --git a/include/linux/mlx5/cmd.h b/include/linux/mlx5/cmd.h
index 2826a4b6071e..68cd08f02c2f 100644
--- a/include/linux/mlx5/cmd.h
+++ b/include/linux/mlx5/cmd.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index f6b17ac601bd..2695ced222df 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -137,14 +137,15 @@ enum {
137 137
138static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, 138static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
139 void __iomem *uar_page, 139 void __iomem *uar_page,
140 spinlock_t *doorbell_lock) 140 spinlock_t *doorbell_lock,
141 u32 cons_index)
141{ 142{
142 __be32 doorbell[2]; 143 __be32 doorbell[2];
143 u32 sn; 144 u32 sn;
144 u32 ci; 145 u32 ci;
145 146
146 sn = cq->arm_sn & 3; 147 sn = cq->arm_sn & 3;
147 ci = cq->cons_index & 0xffffff; 148 ci = cons_index & 0xffffff;
148 149
149 *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci); 150 *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci);
150 151
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 4e5bd813bb9a..abf65c790421 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/include/linux/mlx5/doorbell.h b/include/linux/mlx5/doorbell.h
index 163a818411e7..afc78a3f4462 100644
--- a/include/linux/mlx5/doorbell.h
+++ b/include/linux/mlx5/doorbell.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 166d9315fe4b..9a90e7523dc2 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -232,6 +232,9 @@ struct mlx5_cmd_stats {
232}; 232};
233 233
234struct mlx5_cmd { 234struct mlx5_cmd {
235 void *cmd_alloc_buf;
236 dma_addr_t alloc_dma;
237 int alloc_size;
235 void *cmd_buf; 238 void *cmd_buf;
236 dma_addr_t dma; 239 dma_addr_t dma;
237 u16 cmdif_rev; 240 u16 cmdif_rev;
@@ -407,7 +410,7 @@ struct mlx5_core_srq {
407struct mlx5_eq_table { 410struct mlx5_eq_table {
408 void __iomem *update_ci; 411 void __iomem *update_ci;
409 void __iomem *update_arm_ci; 412 void __iomem *update_arm_ci;
410 struct list_head *comp_eq_head; 413 struct list_head comp_eqs_list;
411 struct mlx5_eq pages_eq; 414 struct mlx5_eq pages_eq;
412 struct mlx5_eq async_eq; 415 struct mlx5_eq async_eq;
413 struct mlx5_eq cmd_eq; 416 struct mlx5_eq cmd_eq;
@@ -722,6 +725,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
722int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 725int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
723int mlx5_start_eqs(struct mlx5_core_dev *dev); 726int mlx5_start_eqs(struct mlx5_core_dev *dev);
724int mlx5_stop_eqs(struct mlx5_core_dev *dev); 727int mlx5_stop_eqs(struct mlx5_core_dev *dev);
728int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn);
725int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); 729int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
726int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); 730int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
727 731
@@ -777,14 +781,22 @@ enum {
777 MAX_MR_CACHE_ENTRIES = 16, 781 MAX_MR_CACHE_ENTRIES = 16,
778}; 782};
779 783
784enum {
785 MLX5_INTERFACE_PROTOCOL_IB = 0,
786 MLX5_INTERFACE_PROTOCOL_ETH = 1,
787};
788
780struct mlx5_interface { 789struct mlx5_interface {
781 void * (*add)(struct mlx5_core_dev *dev); 790 void * (*add)(struct mlx5_core_dev *dev);
782 void (*remove)(struct mlx5_core_dev *dev, void *context); 791 void (*remove)(struct mlx5_core_dev *dev, void *context);
783 void (*event)(struct mlx5_core_dev *dev, void *context, 792 void (*event)(struct mlx5_core_dev *dev, void *context,
784 enum mlx5_dev_event event, unsigned long param); 793 enum mlx5_dev_event event, unsigned long param);
794 void * (*get_dev)(void *context);
795 int protocol;
785 struct list_head list; 796 struct list_head list;
786}; 797};
787 798
799void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
788int mlx5_register_interface(struct mlx5_interface *intf); 800int mlx5_register_interface(struct mlx5_interface *intf);
789void mlx5_unregister_interface(struct mlx5_interface *intf); 801void mlx5_unregister_interface(struct mlx5_interface *intf);
790 802
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 5f48b8f592c5..cb3ad17edd1f 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 61f7a342d1bf..310b5f7fd6ae 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h
index e1a363a33663..f43ed054a3e0 100644
--- a/include/linux/mlx5/srq.h
+++ b/include/linux/mlx5/srq.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 996807963716..83430f2ea757 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -33,6 +33,8 @@
33#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d 33#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d
34#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335 34#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
35#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 35#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
36#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
37#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345
36#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 38#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
37 39
38#define SDIO_VENDOR_ID_INTEL 0x0089 40#define SDIO_VENDOR_ID_INTEL 0x0089
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 967bb4c8caf1..bf6d9df34d7b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -795,7 +795,10 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
795 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, 795 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
796 * struct net_device *dev); 796 * struct net_device *dev);
797 * Called when a packet needs to be transmitted. 797 * Called when a packet needs to be transmitted.
798 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY. 798 * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop
799 * the queue before that can happen; it's for obsolete devices and weird
800 * corner cases, but the stack really does a non-trivial amount
801 * of useless work if you return NETDEV_TX_BUSY.
799 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) 802 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
800 * Required can not be NULL. 803 * Required can not be NULL.
801 * 804 *
@@ -1030,6 +1033,8 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
1030 * int queue_index, u32 maxrate); 1033 * int queue_index, u32 maxrate);
1031 * Called when a user wants to set a max-rate limitation of specific 1034 * Called when a user wants to set a max-rate limitation of specific
1032 * TX queue. 1035 * TX queue.
1036 * int (*ndo_get_iflink)(const struct net_device *dev);
1037 * Called to get the iflink value of this device.
1033 */ 1038 */
1034struct net_device_ops { 1039struct net_device_ops {
1035 int (*ndo_init)(struct net_device *dev); 1040 int (*ndo_init)(struct net_device *dev);
@@ -1191,6 +1196,7 @@ struct net_device_ops {
1191 int (*ndo_set_tx_maxrate)(struct net_device *dev, 1196 int (*ndo_set_tx_maxrate)(struct net_device *dev,
1192 int queue_index, 1197 int queue_index,
1193 u32 maxrate); 1198 u32 maxrate);
1199 int (*ndo_get_iflink)(const struct net_device *dev);
1194}; 1200};
1195 1201
1196/** 1202/**
@@ -1322,7 +1328,7 @@ enum netdev_priv_flags {
1322 * @mpls_features: Mask of features inheritable by MPLS 1328 * @mpls_features: Mask of features inheritable by MPLS
1323 * 1329 *
1324 * @ifindex: interface index 1330 * @ifindex: interface index
1325 * @iflink: unique device identifier 1331 * @group: The group, that the device belongs to
1326 * 1332 *
1327 * @stats: Statistics struct, which was left as a legacy, use 1333 * @stats: Statistics struct, which was left as a legacy, use
1328 * rtnl_link_stats64 instead 1334 * rtnl_link_stats64 instead
@@ -1482,7 +1488,6 @@ enum netdev_priv_flags {
1482 * 1488 *
1483 * @qdisc_tx_busylock: XXX: need comments on this one 1489 * @qdisc_tx_busylock: XXX: need comments on this one
1484 * 1490 *
1485 * @group: The group, that the device belongs to
1486 * @pm_qos_req: Power Management QoS object 1491 * @pm_qos_req: Power Management QoS object
1487 * 1492 *
1488 * FIXME: cleanup struct net_device such that network protocol info 1493 * FIXME: cleanup struct net_device such that network protocol info
@@ -1535,7 +1540,7 @@ struct net_device {
1535 netdev_features_t mpls_features; 1540 netdev_features_t mpls_features;
1536 1541
1537 int ifindex; 1542 int ifindex;
1538 int iflink; 1543 int group;
1539 1544
1540 struct net_device_stats stats; 1545 struct net_device_stats stats;
1541 1546
@@ -1738,7 +1743,6 @@ struct net_device {
1738#endif 1743#endif
1739 struct phy_device *phydev; 1744 struct phy_device *phydev;
1740 struct lock_class_key *qdisc_tx_busylock; 1745 struct lock_class_key *qdisc_tx_busylock;
1741 int group;
1742 struct pm_qos_request pm_qos_req; 1746 struct pm_qos_request pm_qos_req;
1743}; 1747};
1744#define to_net_dev(d) container_of(d, struct net_device, dev) 1748#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -2149,6 +2153,7 @@ void __dev_remove_pack(struct packet_type *pt);
2149void dev_add_offload(struct packet_offload *po); 2153void dev_add_offload(struct packet_offload *po);
2150void dev_remove_offload(struct packet_offload *po); 2154void dev_remove_offload(struct packet_offload *po);
2151 2155
2156int dev_get_iflink(const struct net_device *dev);
2152struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, 2157struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
2153 unsigned short mask); 2158 unsigned short mask);
2154struct net_device *dev_get_by_name(struct net *net, const char *name); 2159struct net_device *dev_get_by_name(struct net *net, const char *name);
@@ -2159,8 +2164,12 @@ int dev_open(struct net_device *dev);
2159int dev_close(struct net_device *dev); 2164int dev_close(struct net_device *dev);
2160int dev_close_many(struct list_head *head, bool unlink); 2165int dev_close_many(struct list_head *head, bool unlink);
2161void dev_disable_lro(struct net_device *dev); 2166void dev_disable_lro(struct net_device *dev);
2162int dev_loopback_xmit(struct sk_buff *newskb); 2167int dev_loopback_xmit(struct sock *sk, struct sk_buff *newskb);
2163int dev_queue_xmit(struct sk_buff *skb); 2168int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb);
2169static inline int dev_queue_xmit(struct sk_buff *skb)
2170{
2171 return dev_queue_xmit_sk(skb->sk, skb);
2172}
2164int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); 2173int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
2165int register_netdevice(struct net_device *dev); 2174int register_netdevice(struct net_device *dev);
2166void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); 2175void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
@@ -2176,6 +2185,12 @@ void netdev_freemem(struct net_device *dev);
2176void synchronize_net(void); 2185void synchronize_net(void);
2177int init_dummy_netdev(struct net_device *dev); 2186int init_dummy_netdev(struct net_device *dev);
2178 2187
2188DECLARE_PER_CPU(int, xmit_recursion);
2189static inline int dev_recursion_level(void)
2190{
2191 return this_cpu_read(xmit_recursion);
2192}
2193
2179struct net_device *dev_get_by_index(struct net *net, int ifindex); 2194struct net_device *dev_get_by_index(struct net *net, int ifindex);
2180struct net_device *__dev_get_by_index(struct net *net, int ifindex); 2195struct net_device *__dev_get_by_index(struct net *net, int ifindex);
2181struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); 2196struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
@@ -2915,7 +2930,11 @@ static inline void dev_consume_skb_any(struct sk_buff *skb)
2915 2930
2916int netif_rx(struct sk_buff *skb); 2931int netif_rx(struct sk_buff *skb);
2917int netif_rx_ni(struct sk_buff *skb); 2932int netif_rx_ni(struct sk_buff *skb);
2918int netif_receive_skb(struct sk_buff *skb); 2933int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb);
2934static inline int netif_receive_skb(struct sk_buff *skb)
2935{
2936 return netif_receive_skb_sk(skb->sk, skb);
2937}
2919gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); 2938gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
2920void napi_gro_flush(struct napi_struct *napi, bool flush_old); 2939void napi_gro_flush(struct napi_struct *napi, bool flush_old);
2921struct sk_buff *napi_get_frags(struct napi_struct *napi); 2940struct sk_buff *napi_get_frags(struct napi_struct *napi);
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 2517ece98820..63560d0a8dfe 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -44,11 +44,39 @@ int netfilter_init(void);
44struct sk_buff; 44struct sk_buff;
45 45
46struct nf_hook_ops; 46struct nf_hook_ops;
47
48struct sock;
49
50struct nf_hook_state {
51 unsigned int hook;
52 int thresh;
53 u_int8_t pf;
54 struct net_device *in;
55 struct net_device *out;
56 struct sock *sk;
57 int (*okfn)(struct sock *, struct sk_buff *);
58};
59
60static inline void nf_hook_state_init(struct nf_hook_state *p,
61 unsigned int hook,
62 int thresh, u_int8_t pf,
63 struct net_device *indev,
64 struct net_device *outdev,
65 struct sock *sk,
66 int (*okfn)(struct sock *, struct sk_buff *))
67{
68 p->hook = hook;
69 p->thresh = thresh;
70 p->pf = pf;
71 p->in = indev;
72 p->out = outdev;
73 p->sk = sk;
74 p->okfn = okfn;
75}
76
47typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops, 77typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops,
48 struct sk_buff *skb, 78 struct sk_buff *skb,
49 const struct net_device *in, 79 const struct nf_hook_state *state);
50 const struct net_device *out,
51 int (*okfn)(struct sk_buff *));
52 80
53struct nf_hook_ops { 81struct nf_hook_ops {
54 struct list_head list; 82 struct list_head list;
@@ -118,9 +146,7 @@ static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
118} 146}
119#endif 147#endif
120 148
121int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb, 149int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
122 struct net_device *indev, struct net_device *outdev,
123 int (*okfn)(struct sk_buff *), int thresh);
124 150
125/** 151/**
126 * nf_hook_thresh - call a netfilter hook 152 * nf_hook_thresh - call a netfilter hook
@@ -130,21 +156,29 @@ int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
130 * value indicates the packet has been consumed by the hook. 156 * value indicates the packet has been consumed by the hook.
131 */ 157 */
132static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, 158static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
159 struct sock *sk,
133 struct sk_buff *skb, 160 struct sk_buff *skb,
134 struct net_device *indev, 161 struct net_device *indev,
135 struct net_device *outdev, 162 struct net_device *outdev,
136 int (*okfn)(struct sk_buff *), int thresh) 163 int (*okfn)(struct sock *, struct sk_buff *),
164 int thresh)
137{ 165{
138 if (nf_hooks_active(pf, hook)) 166 if (nf_hooks_active(pf, hook)) {
139 return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh); 167 struct nf_hook_state state;
168
169 nf_hook_state_init(&state, hook, thresh, pf,
170 indev, outdev, sk, okfn);
171 return nf_hook_slow(skb, &state);
172 }
140 return 1; 173 return 1;
141} 174}
142 175
143static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb, 176static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk,
144 struct net_device *indev, struct net_device *outdev, 177 struct sk_buff *skb, struct net_device *indev,
145 int (*okfn)(struct sk_buff *)) 178 struct net_device *outdev,
179 int (*okfn)(struct sock *, struct sk_buff *))
146{ 180{
147 return nf_hook_thresh(pf, hook, skb, indev, outdev, okfn, INT_MIN); 181 return nf_hook_thresh(pf, hook, sk, skb, indev, outdev, okfn, INT_MIN);
148} 182}
149 183
150/* Activate hook; either okfn or kfree_skb called, unless a hook 184/* Activate hook; either okfn or kfree_skb called, unless a hook
@@ -165,35 +199,36 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
165*/ 199*/
166 200
167static inline int 201static inline int
168NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sk_buff *skb, 202NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sock *sk,
169 struct net_device *in, struct net_device *out, 203 struct sk_buff *skb, struct net_device *in,
170 int (*okfn)(struct sk_buff *), int thresh) 204 struct net_device *out,
205 int (*okfn)(struct sock *, struct sk_buff *), int thresh)
171{ 206{
172 int ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, thresh); 207 int ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, thresh);
173 if (ret == 1) 208 if (ret == 1)
174 ret = okfn(skb); 209 ret = okfn(sk, skb);
175 return ret; 210 return ret;
176} 211}
177 212
178static inline int 213static inline int
179NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sk_buff *skb, 214NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sock *sk,
180 struct net_device *in, struct net_device *out, 215 struct sk_buff *skb, struct net_device *in, struct net_device *out,
181 int (*okfn)(struct sk_buff *), bool cond) 216 int (*okfn)(struct sock *, struct sk_buff *), bool cond)
182{ 217{
183 int ret; 218 int ret;
184 219
185 if (!cond || 220 if (!cond ||
186 ((ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN)) == 1)) 221 ((ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, INT_MIN)) == 1))
187 ret = okfn(skb); 222 ret = okfn(sk, skb);
188 return ret; 223 return ret;
189} 224}
190 225
191static inline int 226static inline int
192NF_HOOK(uint8_t pf, unsigned int hook, struct sk_buff *skb, 227NF_HOOK(uint8_t pf, unsigned int hook, struct sock *sk, struct sk_buff *skb,
193 struct net_device *in, struct net_device *out, 228 struct net_device *in, struct net_device *out,
194 int (*okfn)(struct sk_buff *)) 229 int (*okfn)(struct sock *, struct sk_buff *))
195{ 230{
196 return NF_HOOK_THRESH(pf, hook, skb, in, out, okfn, INT_MIN); 231 return NF_HOOK_THRESH(pf, hook, sk, skb, in, out, okfn, INT_MIN);
197} 232}
198 233
199/* Call setsockopt() */ 234/* Call setsockopt() */
@@ -293,19 +328,21 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
293} 328}
294 329
295#else /* !CONFIG_NETFILTER */ 330#else /* !CONFIG_NETFILTER */
296#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb) 331#define NF_HOOK(pf, hook, sk, skb, indev, outdev, okfn) (okfn)(sk, skb)
297#define NF_HOOK_COND(pf, hook, skb, indev, outdev, okfn, cond) (okfn)(skb) 332#define NF_HOOK_COND(pf, hook, sk, skb, indev, outdev, okfn, cond) (okfn)(sk, skb)
298static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, 333static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
334 struct sock *sk,
299 struct sk_buff *skb, 335 struct sk_buff *skb,
300 struct net_device *indev, 336 struct net_device *indev,
301 struct net_device *outdev, 337 struct net_device *outdev,
302 int (*okfn)(struct sk_buff *), int thresh) 338 int (*okfn)(struct sock *sk, struct sk_buff *), int thresh)
303{ 339{
304 return okfn(skb); 340 return okfn(sk, skb);
305} 341}
306static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb, 342static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk,
307 struct net_device *indev, struct net_device *outdev, 343 struct sk_buff *skb, struct net_device *indev,
308 int (*okfn)(struct sk_buff *)) 344 struct net_device *outdev,
345 int (*okfn)(struct sock *, struct sk_buff *))
309{ 346{
310 return 1; 347 return 1;
311} 348}
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h
index cfb7191e6efa..c22a7fb8d0df 100644
--- a/include/linux/netfilter_arp/arp_tables.h
+++ b/include/linux/netfilter_arp/arp_tables.h
@@ -54,8 +54,7 @@ extern struct xt_table *arpt_register_table(struct net *net,
54extern void arpt_unregister_table(struct xt_table *table); 54extern void arpt_unregister_table(struct xt_table *table);
55extern unsigned int arpt_do_table(struct sk_buff *skb, 55extern unsigned int arpt_do_table(struct sk_buff *skb,
56 unsigned int hook, 56 unsigned int hook,
57 const struct net_device *in, 57 const struct nf_hook_state *state,
58 const struct net_device *out,
59 struct xt_table *table); 58 struct xt_table *table);
60 59
61#ifdef CONFIG_COMPAT 60#ifdef CONFIG_COMPAT
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 8912e8c355fd..ab8f76dba668 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -27,7 +27,7 @@ static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
27 return 0; 27 return 0;
28} 28}
29 29
30int br_handle_frame_finish(struct sk_buff *skb); 30int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
31 31
32static inline void br_drop_fake_rtable(struct sk_buff *skb) 32static inline void br_drop_fake_rtable(struct sk_buff *skb)
33{ 33{
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index 901e84db847d..4073510da485 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -65,8 +65,7 @@ struct ipt_error {
65extern void *ipt_alloc_initial_table(const struct xt_table *); 65extern void *ipt_alloc_initial_table(const struct xt_table *);
66extern unsigned int ipt_do_table(struct sk_buff *skb, 66extern unsigned int ipt_do_table(struct sk_buff *skb,
67 unsigned int hook, 67 unsigned int hook,
68 const struct net_device *in, 68 const struct nf_hook_state *state,
69 const struct net_device *out,
70 struct xt_table *table); 69 struct xt_table *table);
71 70
72#ifdef CONFIG_COMPAT 71#ifdef CONFIG_COMPAT
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index 610208b18c05..b40d2b635778 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -31,8 +31,7 @@ extern struct xt_table *ip6t_register_table(struct net *net,
31extern void ip6t_unregister_table(struct net *net, struct xt_table *table); 31extern void ip6t_unregister_table(struct net *net, struct xt_table *table);
32extern unsigned int ip6t_do_table(struct sk_buff *skb, 32extern unsigned int ip6t_do_table(struct sk_buff *skb,
33 unsigned int hook, 33 unsigned int hook,
34 const struct net_device *in, 34 const struct nf_hook_state *state,
35 const struct net_device *out,
36 struct xt_table *table); 35 struct xt_table *table);
37 36
38/* Check for an extension */ 37/* Check for an extension */
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index 7e75bfe37cc7..fe5732d53eda 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -21,6 +21,9 @@ extern void fixed_phy_del(int phy_addr);
21extern int fixed_phy_set_link_update(struct phy_device *phydev, 21extern int fixed_phy_set_link_update(struct phy_device *phydev,
22 int (*link_update)(struct net_device *, 22 int (*link_update)(struct net_device *,
23 struct fixed_phy_status *)); 23 struct fixed_phy_status *));
24extern int fixed_phy_update_state(struct phy_device *phydev,
25 const struct fixed_phy_status *status,
26 const struct fixed_phy_status *changed);
24#else 27#else
25static inline int fixed_phy_add(unsigned int irq, int phy_id, 28static inline int fixed_phy_add(unsigned int irq, int phy_id,
26 struct fixed_phy_status *status) 29 struct fixed_phy_status *status)
@@ -43,6 +46,12 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev,
43{ 46{
44 return -ENODEV; 47 return -ENODEV;
45} 48}
49static inline int fixed_phy_update_state(struct phy_device *phydev,
50 const struct fixed_phy_status *status,
51 const struct fixed_phy_status *changed)
52{
53 return -ENODEV;
54}
46#endif /* CONFIG_FIXED_PHY */ 55#endif /* CONFIG_FIXED_PHY */
47 56
48#endif /* __PHY_FIXED_H */ 57#endif /* __PHY_FIXED_H */
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index d4ad5b5a02bb..045f709cb89b 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -316,7 +316,7 @@ struct regulator_desc {
316 * @driver_data: private regulator data 316 * @driver_data: private regulator data
317 * @of_node: OpenFirmware node to parse for device tree bindings (may be 317 * @of_node: OpenFirmware node to parse for device tree bindings (may be
318 * NULL). 318 * NULL).
319 * @regmap: regmap to use for core regmap helpers if dev_get_regulator() is 319 * @regmap: regmap to use for core regmap helpers if dev_get_regmap() is
320 * insufficient. 320 * insufficient.
321 * @ena_gpio_initialized: GPIO controlling regulator enable was properly 321 * @ena_gpio_initialized: GPIO controlling regulator enable was properly
322 * initialized, meaning that >= 0 is a valid gpio 322 * initialized, meaning that >= 0 is a valid gpio
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6d77432e14ff..a419b65770d6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1625,11 +1625,11 @@ struct task_struct {
1625 1625
1626 /* 1626 /*
1627 * numa_faults_locality tracks if faults recorded during the last 1627 * numa_faults_locality tracks if faults recorded during the last
1628 * scan window were remote/local. The task scan period is adapted 1628 * scan window were remote/local or failed to migrate. The task scan
1629 * based on the locality of the faults with different weights 1629 * period is adapted based on the locality of the faults with different
1630 * depending on whether they were shared or private faults 1630 * weights depending on whether they were shared or private faults
1631 */ 1631 */
1632 unsigned long numa_faults_locality[2]; 1632 unsigned long numa_faults_locality[3];
1633 1633
1634 unsigned long numa_pages_migrated; 1634 unsigned long numa_pages_migrated;
1635#endif /* CONFIG_NUMA_BALANCING */ 1635#endif /* CONFIG_NUMA_BALANCING */
@@ -1719,6 +1719,7 @@ struct task_struct {
1719#define TNF_NO_GROUP 0x02 1719#define TNF_NO_GROUP 0x02
1720#define TNF_SHARED 0x04 1720#define TNF_SHARED 0x04
1721#define TNF_FAULT_LOCAL 0x08 1721#define TNF_FAULT_LOCAL 0x08
1722#define TNF_MIGRATE_FAIL 0x10
1722 1723
1723#ifdef CONFIG_NUMA_BALANCING 1724#ifdef CONFIG_NUMA_BALANCING
1724extern void task_numa_fault(int last_node, int node, int pages, int flags); 1725extern void task_numa_fault(int last_node, int node, int pages, int flags);
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h
index c57d8ea0716c..59a7889e15db 100644
--- a/include/linux/sunrpc/debug.h
+++ b/include/linux/sunrpc/debug.h
@@ -60,17 +60,17 @@ struct rpc_xprt;
60#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 60#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
61void rpc_register_sysctl(void); 61void rpc_register_sysctl(void);
62void rpc_unregister_sysctl(void); 62void rpc_unregister_sysctl(void);
63int sunrpc_debugfs_init(void); 63void sunrpc_debugfs_init(void);
64void sunrpc_debugfs_exit(void); 64void sunrpc_debugfs_exit(void);
65int rpc_clnt_debugfs_register(struct rpc_clnt *); 65void rpc_clnt_debugfs_register(struct rpc_clnt *);
66void rpc_clnt_debugfs_unregister(struct rpc_clnt *); 66void rpc_clnt_debugfs_unregister(struct rpc_clnt *);
67int rpc_xprt_debugfs_register(struct rpc_xprt *); 67void rpc_xprt_debugfs_register(struct rpc_xprt *);
68void rpc_xprt_debugfs_unregister(struct rpc_xprt *); 68void rpc_xprt_debugfs_unregister(struct rpc_xprt *);
69#else 69#else
70static inline int 70static inline void
71sunrpc_debugfs_init(void) 71sunrpc_debugfs_init(void)
72{ 72{
73 return 0; 73 return;
74} 74}
75 75
76static inline void 76static inline void
@@ -79,10 +79,10 @@ sunrpc_debugfs_exit(void)
79 return; 79 return;
80} 80}
81 81
82static inline int 82static inline void
83rpc_clnt_debugfs_register(struct rpc_clnt *clnt) 83rpc_clnt_debugfs_register(struct rpc_clnt *clnt)
84{ 84{
85 return 0; 85 return;
86} 86}
87 87
88static inline void 88static inline void
@@ -91,10 +91,10 @@ rpc_clnt_debugfs_unregister(struct rpc_clnt *clnt)
91 return; 91 return;
92} 92}
93 93
94static inline int 94static inline void
95rpc_xprt_debugfs_register(struct rpc_xprt *xprt) 95rpc_xprt_debugfs_register(struct rpc_xprt *xprt)
96{ 96{
97 return 0; 97 return;
98} 98}
99 99
100static inline void 100static inline void
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index f869ae8afbaf..0caa3a2d4106 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -58,6 +58,7 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
58struct tcp_fastopen_cookie { 58struct tcp_fastopen_cookie {
59 s8 len; 59 s8 len;
60 u8 val[TCP_FASTOPEN_COOKIE_MAX]; 60 u8 val[TCP_FASTOPEN_COOKIE_MAX];
61 bool exp; /* In RFC6994 experimental option format */
61}; 62};
62 63
63/* This defines a selective acknowledgement block. */ 64/* This defines a selective acknowledgement block. */
@@ -188,6 +189,7 @@ struct tcp_sock {
188 u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */ 189 u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */
189 syn_data:1, /* SYN includes data */ 190 syn_data:1, /* SYN includes data */
190 syn_fastopen:1, /* SYN includes Fast Open option */ 191 syn_fastopen:1, /* SYN includes Fast Open option */
192 syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
191 syn_data_acked:1,/* data in SYN is acked by SYN-ACK */ 193 syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
192 is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */ 194 is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
193 u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ 195 u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index ff3fb2bd0e90..6e0ce8c7b8cb 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -227,7 +227,7 @@ struct skb_data { /* skb->cb is one of these */
227 struct urb *urb; 227 struct urb *urb;
228 struct usbnet *dev; 228 struct usbnet *dev;
229 enum skb_state state; 229 enum skb_state state;
230 size_t length; 230 long length;
231 unsigned long packets; 231 unsigned long packets;
232}; 232};
233 233
@@ -235,11 +235,13 @@ struct skb_data { /* skb->cb is one of these */
235 * tx_fixup method before returning an skb. 235 * tx_fixup method before returning an skb.
236 */ 236 */
237static inline void 237static inline void
238usbnet_set_skb_tx_stats(struct sk_buff *skb, unsigned long packets) 238usbnet_set_skb_tx_stats(struct sk_buff *skb,
239 unsigned long packets, long bytes_delta)
239{ 240{
240 struct skb_data *entry = (struct skb_data *) skb->cb; 241 struct skb_data *entry = (struct skb_data *) skb->cb;
241 242
242 entry->packets = packets; 243 entry->packets = packets;
244 entry->length = bytes_delta;
243} 245}
244 246
245extern int usbnet_open(struct net_device *net); 247extern int usbnet_open(struct net_device *net);
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 00048339c23e..b2dd371ec0ca 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -130,6 +130,7 @@ extern int vm_dirty_ratio;
130extern unsigned long vm_dirty_bytes; 130extern unsigned long vm_dirty_bytes;
131extern unsigned int dirty_writeback_interval; 131extern unsigned int dirty_writeback_interval;
132extern unsigned int dirty_expire_interval; 132extern unsigned int dirty_expire_interval;
133extern unsigned int dirtytime_expire_interval;
133extern int vm_highmem_is_dirtyable; 134extern int vm_highmem_is_dirtyable;
134extern int block_dump; 135extern int block_dump;
135extern int laptop_mode; 136extern int laptop_mode;
@@ -146,6 +147,8 @@ extern int dirty_ratio_handler(struct ctl_table *table, int write,
146extern int dirty_bytes_handler(struct ctl_table *table, int write, 147extern int dirty_bytes_handler(struct ctl_table *table, int write,
147 void __user *buffer, size_t *lenp, 148 void __user *buffer, size_t *lenp,
148 loff_t *ppos); 149 loff_t *ppos);
150int dirtytime_interval_handler(struct ctl_table *table, int write,
151 void __user *buffer, size_t *lenp, loff_t *ppos);
149 152
150struct ctl_table; 153struct ctl_table;
151int dirty_writeback_centisecs_handler(struct ctl_table *, int, 154int dirty_writeback_centisecs_handler(struct ctl_table *, int,
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 33a5e00025aa..7dba80546f16 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -269,11 +269,23 @@ struct l2cap_ctrl {
269 __u16 reqseq; 269 __u16 reqseq;
270 __u16 txseq; 270 __u16 txseq;
271 __u8 retries; 271 __u8 retries;
272 __le16 psm;
273 bdaddr_t bdaddr;
274 struct l2cap_chan *chan;
272}; 275};
273 276
274struct hci_dev; 277struct hci_dev;
275 278
276typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode); 279typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode);
280typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status,
281 u16 opcode, struct sk_buff *skb);
282
283struct req_ctrl {
284 bool start;
285 u8 event;
286 hci_req_complete_t complete;
287 hci_req_complete_skb_t complete_skb;
288};
277 289
278struct bt_skb_cb { 290struct bt_skb_cb {
279 __u8 pkt_type; 291 __u8 pkt_type;
@@ -281,13 +293,10 @@ struct bt_skb_cb {
281 __u16 opcode; 293 __u16 opcode;
282 __u16 expect; 294 __u16 expect;
283 __u8 incoming:1; 295 __u8 incoming:1;
284 __u8 req_start:1; 296 union {
285 u8 req_event; 297 struct l2cap_ctrl l2cap;
286 hci_req_complete_t req_complete; 298 struct req_ctrl req;
287 struct l2cap_chan *chan; 299 };
288 struct l2cap_ctrl control;
289 bdaddr_t bdaddr;
290 __le16 psm;
291}; 300};
292#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb)) 301#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
293 302
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 540c07feece7..93fd3e756b8a 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -326,7 +326,6 @@ struct hci_dev {
326 struct sk_buff_head raw_q; 326 struct sk_buff_head raw_q;
327 struct sk_buff_head cmd_q; 327 struct sk_buff_head cmd_q;
328 328
329 struct sk_buff *recv_evt;
330 struct sk_buff *sent_cmd; 329 struct sk_buff *sent_cmd;
331 struct sk_buff *reassembly[NUM_REASSEMBLY]; 330 struct sk_buff *reassembly[NUM_REASSEMBLY];
332 331
@@ -334,6 +333,7 @@ struct hci_dev {
334 wait_queue_head_t req_wait_q; 333 wait_queue_head_t req_wait_q;
335 __u32 req_status; 334 __u32 req_status;
336 __u32 req_result; 335 __u32 req_result;
336 struct sk_buff *req_skb;
337 337
338 void *smp_data; 338 void *smp_data;
339 void *smp_bredr_data; 339 void *smp_bredr_data;
@@ -1284,8 +1284,6 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
1284int hci_register_cb(struct hci_cb *hcb); 1284int hci_register_cb(struct hci_cb *hcb);
1285int hci_unregister_cb(struct hci_cb *hcb); 1285int hci_unregister_cb(struct hci_cb *hcb);
1286 1286
1287bool hci_req_pending(struct hci_dev *hdev);
1288
1289struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, 1287struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1290 const void *param, u32 timeout); 1288 const void *param, u32 timeout);
1291struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, 1289struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
@@ -1393,9 +1391,6 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
1393void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, 1391void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
1394 u8 status); 1392 u8 status);
1395void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status); 1393void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
1396void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
1397 u8 *rand192, u8 *hash256, u8 *rand256,
1398 u8 status);
1399void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 1394void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1400 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, 1395 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
1401 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len); 1396 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len);
diff --git a/include/net/dn_neigh.h b/include/net/dn_neigh.h
index 0f26aa707e62..d0424269313f 100644
--- a/include/net/dn_neigh.h
+++ b/include/net/dn_neigh.h
@@ -18,11 +18,11 @@ struct dn_neigh {
18 18
19void dn_neigh_init(void); 19void dn_neigh_init(void);
20void dn_neigh_cleanup(void); 20void dn_neigh_cleanup(void);
21int dn_neigh_router_hello(struct sk_buff *skb); 21int dn_neigh_router_hello(struct sock *sk, struct sk_buff *skb);
22int dn_neigh_endnode_hello(struct sk_buff *skb); 22int dn_neigh_endnode_hello(struct sock *sk, struct sk_buff *skb);
23void dn_neigh_pointopoint_hello(struct sk_buff *skb); 23void dn_neigh_pointopoint_hello(struct sk_buff *skb);
24int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n); 24int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n);
25int dn_to_neigh_output(struct sk_buff *skb); 25int dn_to_neigh_output(struct sock *sk, struct sk_buff *skb);
26 26
27extern struct neigh_table dn_neigh_table; 27extern struct neigh_table dn_neigh_table;
28 28
diff --git a/include/net/ip.h b/include/net/ip.h
index d0808a323763..d14af7edd197 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -108,7 +108,8 @@ int ip_local_deliver(struct sk_buff *skb);
108int ip_mr_input(struct sk_buff *skb); 108int ip_mr_input(struct sk_buff *skb);
109int ip_output(struct sock *sk, struct sk_buff *skb); 109int ip_output(struct sock *sk, struct sk_buff *skb);
110int ip_mc_output(struct sock *sk, struct sk_buff *skb); 110int ip_mc_output(struct sock *sk, struct sk_buff *skb);
111int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); 111int ip_fragment(struct sock *sk, struct sk_buff *skb,
112 int (*output)(struct sock *, struct sk_buff *));
112int ip_do_nat(struct sk_buff *skb); 113int ip_do_nat(struct sk_buff *skb);
113void ip_send_check(struct iphdr *ip); 114void ip_send_check(struct iphdr *ip);
114int __ip_local_out(struct sk_buff *skb); 115int __ip_local_out(struct sk_buff *skb);
@@ -455,22 +456,6 @@ static __inline__ void inet_reset_saddr(struct sock *sk)
455 456
456#endif 457#endif
457 458
458static inline int sk_mc_loop(struct sock *sk)
459{
460 if (!sk)
461 return 1;
462 switch (sk->sk_family) {
463 case AF_INET:
464 return inet_sk(sk)->mc_loop;
465#if IS_ENABLED(CONFIG_IPV6)
466 case AF_INET6:
467 return inet6_sk(sk)->mc_loop;
468#endif
469 }
470 WARN_ON(1);
471 return 1;
472}
473
474bool ip_call_ra_chain(struct sk_buff *skb); 459bool ip_call_ra_chain(struct sk_buff *skb);
475 460
476/* 461/*
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 1d09b46c1e48..5e192068e6cb 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -170,11 +170,13 @@ static inline bool ipv6_anycast_destination(const struct sk_buff *skb)
170 return rt->rt6i_flags & RTF_ANYCAST; 170 return rt->rt6i_flags & RTF_ANYCAST;
171} 171}
172 172
173int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); 173int ip6_fragment(struct sock *sk, struct sk_buff *skb,
174 int (*output)(struct sock *, struct sk_buff *));
174 175
175static inline int ip6_skb_dst_mtu(struct sk_buff *skb) 176static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
176{ 177{
177 struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; 178 struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
179 inet6_sk(skb->sk) : NULL;
178 180
179 return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ? 181 return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ?
180 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); 182 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 76c091b53dae..b8529aa1dae7 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -71,14 +71,16 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
71__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr, 71__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
72 const struct in6_addr *raddr); 72 const struct in6_addr *raddr);
73struct net *ip6_tnl_get_link_net(const struct net_device *dev); 73struct net *ip6_tnl_get_link_net(const struct net_device *dev);
74int ip6_tnl_get_iflink(const struct net_device *dev);
74 75
75static inline void ip6tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 76static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
77 struct net_device *dev)
76{ 78{
77 struct net_device_stats *stats = &dev->stats; 79 struct net_device_stats *stats = &dev->stats;
78 int pkt_len, err; 80 int pkt_len, err;
79 81
80 pkt_len = skb->len; 82 pkt_len = skb->len;
81 err = ip6_local_out(skb); 83 err = ip6_local_out_sk(sk, skb);
82 84
83 if (net_xmit_eval(err) == 0) { 85 if (net_xmit_eval(err) == 0) {
84 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 86 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 2c47061a6954..d8214cb88bbc 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -142,6 +142,7 @@ int ip_tunnel_init(struct net_device *dev);
142void ip_tunnel_uninit(struct net_device *dev); 142void ip_tunnel_uninit(struct net_device *dev);
143void ip_tunnel_dellink(struct net_device *dev, struct list_head *head); 143void ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
144struct net *ip_tunnel_get_link_net(const struct net_device *dev); 144struct net *ip_tunnel_get_link_net(const struct net_device *dev);
145int ip_tunnel_get_iflink(const struct net_device *dev);
145int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, 146int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
146 struct rtnl_link_ops *ops, char *devname); 147 struct rtnl_link_ops *ops, char *devname);
147 148
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 65142e6af440..27470cd1d5f8 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -769,7 +769,7 @@ static inline u8 ip6_tclass(__be32 flowinfo)
769int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, 769int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
770 struct packet_type *pt, struct net_device *orig_dev); 770 struct packet_type *pt, struct net_device *orig_dev);
771 771
772int ip6_rcv_finish(struct sk_buff *skb); 772int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb);
773 773
774/* 774/*
775 * upper-layer output functions 775 * upper-layer output functions
@@ -827,6 +827,7 @@ int ip6_input(struct sk_buff *skb);
827int ip6_mc_input(struct sk_buff *skb); 827int ip6_mc_input(struct sk_buff *skb);
828 828
829int __ip6_local_out(struct sk_buff *skb); 829int __ip6_local_out(struct sk_buff *skb);
830int ip6_local_out_sk(struct sock *sk, struct sk_buff *skb);
830int ip6_local_out(struct sk_buff *skb); 831int ip6_local_out(struct sk_buff *skb);
831 832
832/* 833/*
diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h
index 340c013795a4..a3127325f624 100644
--- a/include/net/netfilter/nf_nat_l3proto.h
+++ b/include/net/netfilter/nf_nat_l3proto.h
@@ -44,40 +44,32 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
44 unsigned int hooknum); 44 unsigned int hooknum);
45 45
46unsigned int nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb, 46unsigned int nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
47 const struct net_device *in, 47 const struct nf_hook_state *state,
48 const struct net_device *out,
49 unsigned int (*do_chain)(const struct nf_hook_ops *ops, 48 unsigned int (*do_chain)(const struct nf_hook_ops *ops,
50 struct sk_buff *skb, 49 struct sk_buff *skb,
51 const struct net_device *in, 50 const struct nf_hook_state *state,
52 const struct net_device *out,
53 struct nf_conn *ct)); 51 struct nf_conn *ct));
54 52
55unsigned int nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb, 53unsigned int nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
56 const struct net_device *in, 54 const struct nf_hook_state *state,
57 const struct net_device *out,
58 unsigned int (*do_chain)(const struct nf_hook_ops *ops, 55 unsigned int (*do_chain)(const struct nf_hook_ops *ops,
59 struct sk_buff *skb, 56 struct sk_buff *skb,
60 const struct net_device *in, 57 const struct nf_hook_state *state,
61 const struct net_device *out,
62 struct nf_conn *ct)); 58 struct nf_conn *ct));
63 59
64unsigned int nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, 60unsigned int nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
65 struct sk_buff *skb, 61 struct sk_buff *skb,
66 const struct net_device *in, 62 const struct nf_hook_state *state,
67 const struct net_device *out,
68 unsigned int (*do_chain)(const struct nf_hook_ops *ops, 63 unsigned int (*do_chain)(const struct nf_hook_ops *ops,
69 struct sk_buff *skb, 64 struct sk_buff *skb,
70 const struct net_device *in, 65 const struct nf_hook_state *state,
71 const struct net_device *out,
72 struct nf_conn *ct)); 66 struct nf_conn *ct));
73 67
74unsigned int nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, 68unsigned int nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
75 const struct net_device *in, 69 const struct nf_hook_state *state,
76 const struct net_device *out,
77 unsigned int (*do_chain)(const struct nf_hook_ops *ops, 70 unsigned int (*do_chain)(const struct nf_hook_ops *ops,
78 struct sk_buff *skb, 71 struct sk_buff *skb,
79 const struct net_device *in, 72 const struct nf_hook_state *state,
80 const struct net_device *out,
81 struct nf_conn *ct)); 73 struct nf_conn *ct));
82 74
83int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct, 75int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
@@ -85,40 +77,32 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
85 unsigned int hooknum, unsigned int hdrlen); 77 unsigned int hooknum, unsigned int hdrlen);
86 78
87unsigned int nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb, 79unsigned int nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
88 const struct net_device *in, 80 const struct nf_hook_state *state,
89 const struct net_device *out,
90 unsigned int (*do_chain)(const struct nf_hook_ops *ops, 81 unsigned int (*do_chain)(const struct nf_hook_ops *ops,
91 struct sk_buff *skb, 82 struct sk_buff *skb,
92 const struct net_device *in, 83 const struct nf_hook_state *state,
93 const struct net_device *out,
94 struct nf_conn *ct)); 84 struct nf_conn *ct));
95 85
96unsigned int nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb, 86unsigned int nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
97 const struct net_device *in, 87 const struct nf_hook_state *state,
98 const struct net_device *out,
99 unsigned int (*do_chain)(const struct nf_hook_ops *ops, 88 unsigned int (*do_chain)(const struct nf_hook_ops *ops,
100 struct sk_buff *skb, 89 struct sk_buff *skb,
101 const struct net_device *in, 90 const struct nf_hook_state *state,
102 const struct net_device *out,
103 struct nf_conn *ct)); 91 struct nf_conn *ct));
104 92
105unsigned int nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, 93unsigned int nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops,
106 struct sk_buff *skb, 94 struct sk_buff *skb,
107 const struct net_device *in, 95 const struct nf_hook_state *state,
108 const struct net_device *out,
109 unsigned int (*do_chain)(const struct nf_hook_ops *ops, 96 unsigned int (*do_chain)(const struct nf_hook_ops *ops,
110 struct sk_buff *skb, 97 struct sk_buff *skb,
111 const struct net_device *in, 98 const struct nf_hook_state *state,
112 const struct net_device *out,
113 struct nf_conn *ct)); 99 struct nf_conn *ct));
114 100
115unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, 101unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
116 const struct net_device *in, 102 const struct nf_hook_state *state,
117 const struct net_device *out,
118 unsigned int (*do_chain)(const struct nf_hook_ops *ops, 103 unsigned int (*do_chain)(const struct nf_hook_ops *ops,
119 struct sk_buff *skb, 104 struct sk_buff *skb,
120 const struct net_device *in, 105 const struct nf_hook_state *state,
121 const struct net_device *out,
122 struct nf_conn *ct)); 106 struct nf_conn *ct));
123 107
124#endif /* _NF_NAT_L3PROTO_H */ 108#endif /* _NF_NAT_L3PROTO_H */
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 84a53d780306..d81d584157e1 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -12,12 +12,8 @@ struct nf_queue_entry {
12 unsigned int id; 12 unsigned int id;
13 13
14 struct nf_hook_ops *elem; 14 struct nf_hook_ops *elem;
15 u_int8_t pf; 15 struct nf_hook_state state;
16 u16 size; /* sizeof(entry) + saved route keys */ 16 u16 size; /* sizeof(entry) + saved route keys */
17 unsigned int hook;
18 struct net_device *indev;
19 struct net_device *outdev;
20 int (*okfn)(struct sk_buff *);
21 17
22 /* extra space to store route keys */ 18 /* extra space to store route keys */
23}; 19};
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 63c44bdfdd3b..d6a2f0ed5130 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -26,12 +26,11 @@ struct nft_pktinfo {
26static inline void nft_set_pktinfo(struct nft_pktinfo *pkt, 26static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
27 const struct nf_hook_ops *ops, 27 const struct nf_hook_ops *ops,
28 struct sk_buff *skb, 28 struct sk_buff *skb,
29 const struct net_device *in, 29 const struct nf_hook_state *state)
30 const struct net_device *out)
31{ 30{
32 pkt->skb = skb; 31 pkt->skb = skb;
33 pkt->in = pkt->xt.in = in; 32 pkt->in = pkt->xt.in = state->in;
34 pkt->out = pkt->xt.out = out; 33 pkt->out = pkt->xt.out = state->out;
35 pkt->ops = ops; 34 pkt->ops = ops;
36 pkt->xt.hooknum = ops->hooknum; 35 pkt->xt.hooknum = ops->hooknum;
37 pkt->xt.family = ops->pf; 36 pkt->xt.family = ops->pf;
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index cba143fbd2e4..2df7f96902ee 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -8,12 +8,11 @@ static inline void
8nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt, 8nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
9 const struct nf_hook_ops *ops, 9 const struct nf_hook_ops *ops,
10 struct sk_buff *skb, 10 struct sk_buff *skb,
11 const struct net_device *in, 11 const struct nf_hook_state *state)
12 const struct net_device *out)
13{ 12{
14 struct iphdr *ip; 13 struct iphdr *ip;
15 14
16 nft_set_pktinfo(pkt, ops, skb, in, out); 15 nft_set_pktinfo(pkt, ops, skb, state);
17 16
18 ip = ip_hdr(pkt->skb); 17 ip = ip_hdr(pkt->skb);
19 pkt->tprot = ip->protocol; 18 pkt->tprot = ip->protocol;
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index 74d976137658..97db2e3a5e65 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -8,13 +8,12 @@ static inline int
8nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt, 8nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
9 const struct nf_hook_ops *ops, 9 const struct nf_hook_ops *ops,
10 struct sk_buff *skb, 10 struct sk_buff *skb,
11 const struct net_device *in, 11 const struct nf_hook_state *state)
12 const struct net_device *out)
13{ 12{
14 int protohdr, thoff = 0; 13 int protohdr, thoff = 0;
15 unsigned short frag_off; 14 unsigned short frag_off;
16 15
17 nft_set_pktinfo(pkt, ops, skb, in, out); 16 nft_set_pktinfo(pkt, ops, skb, state);
18 17
19 protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL); 18 protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
20 /* If malformed, drop it */ 19 /* If malformed, drop it */
diff --git a/include/net/sock.h b/include/net/sock.h
index 3f9b8ce56948..bd6f523f2251 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1762,6 +1762,8 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
1762 1762
1763struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); 1763struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
1764 1764
1765bool sk_mc_loop(struct sock *sk);
1766
1765static inline bool sk_can_gso(const struct sock *sk) 1767static inline bool sk_can_gso(const struct sock *sk)
1766{ 1768{
1767 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); 1769 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 963303fb96ae..9598871485ce 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -179,6 +179,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
179#define TCPOPT_SACK 5 /* SACK Block */ 179#define TCPOPT_SACK 5 /* SACK Block */
180#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ 180#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
181#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */ 181#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
182#define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
182#define TCPOPT_EXP 254 /* Experimental */ 183#define TCPOPT_EXP 254 /* Experimental */
183/* Magic number to be after the option value for sharing TCP 184/* Magic number to be after the option value for sharing TCP
184 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt 185 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
@@ -194,6 +195,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
194#define TCPOLEN_SACK_PERM 2 195#define TCPOLEN_SACK_PERM 2
195#define TCPOLEN_TIMESTAMP 10 196#define TCPOLEN_TIMESTAMP 10
196#define TCPOLEN_MD5SIG 18 197#define TCPOLEN_MD5SIG 18
198#define TCPOLEN_FASTOPEN_BASE 2
197#define TCPOLEN_EXP_FASTOPEN_BASE 4 199#define TCPOLEN_EXP_FASTOPEN_BASE 4
198 200
199/* But this is what stacks really send out. */ 201/* But this is what stacks really send out. */
@@ -1337,7 +1339,8 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1337 struct tcp_fastopen_cookie *cookie, int *syn_loss, 1339 struct tcp_fastopen_cookie *cookie, int *syn_loss,
1338 unsigned long *last_syn_loss); 1340 unsigned long *last_syn_loss);
1339void tcp_fastopen_cache_set(struct sock *sk, u16 mss, 1341void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1340 struct tcp_fastopen_cookie *cookie, bool syn_lost); 1342 struct tcp_fastopen_cookie *cookie, bool syn_lost,
1343 u16 try_exp);
1341struct tcp_fastopen_request { 1344struct tcp_fastopen_request {
1342 /* Fast Open cookie. Size 0 means a cookie request */ 1345 /* Fast Open cookie. Size 0 means a cookie request */
1343 struct tcp_fastopen_cookie cookie; 1346 struct tcp_fastopen_cookie cookie;
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index 1a20d33d56bc..c491c1221606 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -77,13 +77,14 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
77 struct udp_tunnel_sock_cfg *sock_cfg); 77 struct udp_tunnel_sock_cfg *sock_cfg);
78 78
79/* Transmit the skb using UDP encapsulation. */ 79/* Transmit the skb using UDP encapsulation. */
80int udp_tunnel_xmit_skb(struct rtable *rt, struct sk_buff *skb, 80int udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
81 __be32 src, __be32 dst, __u8 tos, __u8 ttl, 81 __be32 src, __be32 dst, __u8 tos, __u8 ttl,
82 __be16 df, __be16 src_port, __be16 dst_port, 82 __be16 df, __be16 src_port, __be16 dst_port,
83 bool xnet, bool nocheck); 83 bool xnet, bool nocheck);
84 84
85#if IS_ENABLED(CONFIG_IPV6) 85#if IS_ENABLED(CONFIG_IPV6)
86int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb, 86int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
87 struct sk_buff *skb,
87 struct net_device *dev, struct in6_addr *saddr, 88 struct net_device *dev, struct in6_addr *saddr,
88 struct in6_addr *daddr, 89 struct in6_addr *daddr,
89 __u8 prio, __u8 ttl, __be16 src_port, 90 __u8 prio, __u8 ttl, __be16 src_port,
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 756e4636bad8..0082b5d33d7d 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -145,7 +145,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
145 145
146void vxlan_sock_release(struct vxlan_sock *vs); 146void vxlan_sock_release(struct vxlan_sock *vs);
147 147
148int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb, 148int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
149 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, 149 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
150 __be16 src_port, __be16 dst_port, struct vxlan_metadata *md, 150 __be16 src_port, __be16 dst_port, struct vxlan_metadata *md,
151 bool xnet, u32 vxflags); 151 bool xnet, u32 vxflags);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 461f83539493..36ac102c97c7 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -332,7 +332,7 @@ struct xfrm_state_afinfo {
332 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); 332 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
333 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); 333 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
334 int (*output)(struct sock *sk, struct sk_buff *skb); 334 int (*output)(struct sock *sk, struct sk_buff *skb);
335 int (*output_finish)(struct sk_buff *skb); 335 int (*output_finish)(struct sock *sk, struct sk_buff *skb);
336 int (*extract_input)(struct xfrm_state *x, 336 int (*extract_input)(struct xfrm_state *x,
337 struct sk_buff *skb); 337 struct sk_buff *skb);
338 int (*extract_output)(struct xfrm_state *x, 338 int (*extract_output)(struct xfrm_state *x,
@@ -1503,7 +1503,7 @@ int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
1503int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); 1503int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
1504int xfrm_input_resume(struct sk_buff *skb, int nexthdr); 1504int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1505int xfrm_output_resume(struct sk_buff *skb, int err); 1505int xfrm_output_resume(struct sk_buff *skb, int err);
1506int xfrm_output(struct sk_buff *skb); 1506int xfrm_output(struct sock *sk, struct sk_buff *skb);
1507int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb); 1507int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1508void xfrm_local_error(struct sk_buff *skb, int mtu); 1508void xfrm_local_error(struct sk_buff *skb, int mtu);
1509int xfrm4_extract_header(struct sk_buff *skb); 1509int xfrm4_extract_header(struct sk_buff *skb);
@@ -1524,7 +1524,7 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
1524int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb); 1524int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1525int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb); 1525int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1526int xfrm4_output(struct sock *sk, struct sk_buff *skb); 1526int xfrm4_output(struct sock *sk, struct sk_buff *skb);
1527int xfrm4_output_finish(struct sk_buff *skb); 1527int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb);
1528int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err); 1528int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err);
1529int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol); 1529int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
1530int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol); 1530int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
@@ -1549,7 +1549,7 @@ __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
1549int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); 1549int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1550int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb); 1550int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1551int xfrm6_output(struct sock *sk, struct sk_buff *skb); 1551int xfrm6_output(struct sock *sk, struct sk_buff *skb);
1552int xfrm6_output_finish(struct sk_buff *skb); 1552int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb);
1553int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, 1553int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
1554 u8 **prevhdr); 1554 u8 **prevhdr);
1555 1555
diff --git a/include/trace/events/regmap.h b/include/trace/events/regmap.h
index 23d561512f64..22317d2b52ab 100644
--- a/include/trace/events/regmap.h
+++ b/include/trace/events/regmap.h
@@ -7,27 +7,26 @@
7#include <linux/ktime.h> 7#include <linux/ktime.h>
8#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
9 9
10struct device; 10#include "../../../drivers/base/regmap/internal.h"
11struct regmap;
12 11
13/* 12/*
14 * Log register events 13 * Log register events
15 */ 14 */
16DECLARE_EVENT_CLASS(regmap_reg, 15DECLARE_EVENT_CLASS(regmap_reg,
17 16
18 TP_PROTO(struct device *dev, unsigned int reg, 17 TP_PROTO(struct regmap *map, unsigned int reg,
19 unsigned int val), 18 unsigned int val),
20 19
21 TP_ARGS(dev, reg, val), 20 TP_ARGS(map, reg, val),
22 21
23 TP_STRUCT__entry( 22 TP_STRUCT__entry(
24 __string( name, dev_name(dev) ) 23 __string( name, regmap_name(map) )
25 __field( unsigned int, reg ) 24 __field( unsigned int, reg )
26 __field( unsigned int, val ) 25 __field( unsigned int, val )
27 ), 26 ),
28 27
29 TP_fast_assign( 28 TP_fast_assign(
30 __assign_str(name, dev_name(dev)); 29 __assign_str(name, regmap_name(map));
31 __entry->reg = reg; 30 __entry->reg = reg;
32 __entry->val = val; 31 __entry->val = val;
33 ), 32 ),
@@ -39,45 +38,45 @@ DECLARE_EVENT_CLASS(regmap_reg,
39 38
40DEFINE_EVENT(regmap_reg, regmap_reg_write, 39DEFINE_EVENT(regmap_reg, regmap_reg_write,
41 40
42 TP_PROTO(struct device *dev, unsigned int reg, 41 TP_PROTO(struct regmap *map, unsigned int reg,
43 unsigned int val), 42 unsigned int val),
44 43
45 TP_ARGS(dev, reg, val) 44 TP_ARGS(map, reg, val)
46 45
47); 46);
48 47
49DEFINE_EVENT(regmap_reg, regmap_reg_read, 48DEFINE_EVENT(regmap_reg, regmap_reg_read,
50 49
51 TP_PROTO(struct device *dev, unsigned int reg, 50 TP_PROTO(struct regmap *map, unsigned int reg,
52 unsigned int val), 51 unsigned int val),
53 52
54 TP_ARGS(dev, reg, val) 53 TP_ARGS(map, reg, val)
55 54
56); 55);
57 56
58DEFINE_EVENT(regmap_reg, regmap_reg_read_cache, 57DEFINE_EVENT(regmap_reg, regmap_reg_read_cache,
59 58
60 TP_PROTO(struct device *dev, unsigned int reg, 59 TP_PROTO(struct regmap *map, unsigned int reg,
61 unsigned int val), 60 unsigned int val),
62 61
63 TP_ARGS(dev, reg, val) 62 TP_ARGS(map, reg, val)
64 63
65); 64);
66 65
67DECLARE_EVENT_CLASS(regmap_block, 66DECLARE_EVENT_CLASS(regmap_block,
68 67
69 TP_PROTO(struct device *dev, unsigned int reg, int count), 68 TP_PROTO(struct regmap *map, unsigned int reg, int count),
70 69
71 TP_ARGS(dev, reg, count), 70 TP_ARGS(map, reg, count),
72 71
73 TP_STRUCT__entry( 72 TP_STRUCT__entry(
74 __string( name, dev_name(dev) ) 73 __string( name, regmap_name(map) )
75 __field( unsigned int, reg ) 74 __field( unsigned int, reg )
76 __field( int, count ) 75 __field( int, count )
77 ), 76 ),
78 77
79 TP_fast_assign( 78 TP_fast_assign(
80 __assign_str(name, dev_name(dev)); 79 __assign_str(name, regmap_name(map));
81 __entry->reg = reg; 80 __entry->reg = reg;
82 __entry->count = count; 81 __entry->count = count;
83 ), 82 ),
@@ -89,48 +88,48 @@ DECLARE_EVENT_CLASS(regmap_block,
89 88
90DEFINE_EVENT(regmap_block, regmap_hw_read_start, 89DEFINE_EVENT(regmap_block, regmap_hw_read_start,
91 90
92 TP_PROTO(struct device *dev, unsigned int reg, int count), 91 TP_PROTO(struct regmap *map, unsigned int reg, int count),
93 92
94 TP_ARGS(dev, reg, count) 93 TP_ARGS(map, reg, count)
95); 94);
96 95
97DEFINE_EVENT(regmap_block, regmap_hw_read_done, 96DEFINE_EVENT(regmap_block, regmap_hw_read_done,
98 97
99 TP_PROTO(struct device *dev, unsigned int reg, int count), 98 TP_PROTO(struct regmap *map, unsigned int reg, int count),
100 99
101 TP_ARGS(dev, reg, count) 100 TP_ARGS(map, reg, count)
102); 101);
103 102
104DEFINE_EVENT(regmap_block, regmap_hw_write_start, 103DEFINE_EVENT(regmap_block, regmap_hw_write_start,
105 104
106 TP_PROTO(struct device *dev, unsigned int reg, int count), 105 TP_PROTO(struct regmap *map, unsigned int reg, int count),
107 106
108 TP_ARGS(dev, reg, count) 107 TP_ARGS(map, reg, count)
109); 108);
110 109
111DEFINE_EVENT(regmap_block, regmap_hw_write_done, 110DEFINE_EVENT(regmap_block, regmap_hw_write_done,
112 111
113 TP_PROTO(struct device *dev, unsigned int reg, int count), 112 TP_PROTO(struct regmap *map, unsigned int reg, int count),
114 113
115 TP_ARGS(dev, reg, count) 114 TP_ARGS(map, reg, count)
116); 115);
117 116
118TRACE_EVENT(regcache_sync, 117TRACE_EVENT(regcache_sync,
119 118
120 TP_PROTO(struct device *dev, const char *type, 119 TP_PROTO(struct regmap *map, const char *type,
121 const char *status), 120 const char *status),
122 121
123 TP_ARGS(dev, type, status), 122 TP_ARGS(map, type, status),
124 123
125 TP_STRUCT__entry( 124 TP_STRUCT__entry(
126 __string( name, dev_name(dev) ) 125 __string( name, regmap_name(map) )
127 __string( status, status ) 126 __string( status, status )
128 __string( type, type ) 127 __string( type, type )
129 __field( int, type ) 128 __field( int, type )
130 ), 129 ),
131 130
132 TP_fast_assign( 131 TP_fast_assign(
133 __assign_str(name, dev_name(dev)); 132 __assign_str(name, regmap_name(map));
134 __assign_str(status, status); 133 __assign_str(status, status);
135 __assign_str(type, type); 134 __assign_str(type, type);
136 ), 135 ),
@@ -141,17 +140,17 @@ TRACE_EVENT(regcache_sync,
141 140
142DECLARE_EVENT_CLASS(regmap_bool, 141DECLARE_EVENT_CLASS(regmap_bool,
143 142
144 TP_PROTO(struct device *dev, bool flag), 143 TP_PROTO(struct regmap *map, bool flag),
145 144
146 TP_ARGS(dev, flag), 145 TP_ARGS(map, flag),
147 146
148 TP_STRUCT__entry( 147 TP_STRUCT__entry(
149 __string( name, dev_name(dev) ) 148 __string( name, regmap_name(map) )
150 __field( int, flag ) 149 __field( int, flag )
151 ), 150 ),
152 151
153 TP_fast_assign( 152 TP_fast_assign(
154 __assign_str(name, dev_name(dev)); 153 __assign_str(name, regmap_name(map));
155 __entry->flag = flag; 154 __entry->flag = flag;
156 ), 155 ),
157 156
@@ -161,32 +160,32 @@ DECLARE_EVENT_CLASS(regmap_bool,
161 160
162DEFINE_EVENT(regmap_bool, regmap_cache_only, 161DEFINE_EVENT(regmap_bool, regmap_cache_only,
163 162
164 TP_PROTO(struct device *dev, bool flag), 163 TP_PROTO(struct regmap *map, bool flag),
165 164
166 TP_ARGS(dev, flag) 165 TP_ARGS(map, flag)
167 166
168); 167);
169 168
170DEFINE_EVENT(regmap_bool, regmap_cache_bypass, 169DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
171 170
172 TP_PROTO(struct device *dev, bool flag), 171 TP_PROTO(struct regmap *map, bool flag),
173 172
174 TP_ARGS(dev, flag) 173 TP_ARGS(map, flag)
175 174
176); 175);
177 176
178DECLARE_EVENT_CLASS(regmap_async, 177DECLARE_EVENT_CLASS(regmap_async,
179 178
180 TP_PROTO(struct device *dev), 179 TP_PROTO(struct regmap *map),
181 180
182 TP_ARGS(dev), 181 TP_ARGS(map),
183 182
184 TP_STRUCT__entry( 183 TP_STRUCT__entry(
185 __string( name, dev_name(dev) ) 184 __string( name, regmap_name(map) )
186 ), 185 ),
187 186
188 TP_fast_assign( 187 TP_fast_assign(
189 __assign_str(name, dev_name(dev)); 188 __assign_str(name, regmap_name(map));
190 ), 189 ),
191 190
192 TP_printk("%s", __get_str(name)) 191 TP_printk("%s", __get_str(name))
@@ -194,50 +193,50 @@ DECLARE_EVENT_CLASS(regmap_async,
194 193
195DEFINE_EVENT(regmap_block, regmap_async_write_start, 194DEFINE_EVENT(regmap_block, regmap_async_write_start,
196 195
197 TP_PROTO(struct device *dev, unsigned int reg, int count), 196 TP_PROTO(struct regmap *map, unsigned int reg, int count),
198 197
199 TP_ARGS(dev, reg, count) 198 TP_ARGS(map, reg, count)
200); 199);
201 200
202DEFINE_EVENT(regmap_async, regmap_async_io_complete, 201DEFINE_EVENT(regmap_async, regmap_async_io_complete,
203 202
204 TP_PROTO(struct device *dev), 203 TP_PROTO(struct regmap *map),
205 204
206 TP_ARGS(dev) 205 TP_ARGS(map)
207 206
208); 207);
209 208
210DEFINE_EVENT(regmap_async, regmap_async_complete_start, 209DEFINE_EVENT(regmap_async, regmap_async_complete_start,
211 210
212 TP_PROTO(struct device *dev), 211 TP_PROTO(struct regmap *map),
213 212
214 TP_ARGS(dev) 213 TP_ARGS(map)
215 214
216); 215);
217 216
218DEFINE_EVENT(regmap_async, regmap_async_complete_done, 217DEFINE_EVENT(regmap_async, regmap_async_complete_done,
219 218
220 TP_PROTO(struct device *dev), 219 TP_PROTO(struct regmap *map),
221 220
222 TP_ARGS(dev) 221 TP_ARGS(map)
223 222
224); 223);
225 224
226TRACE_EVENT(regcache_drop_region, 225TRACE_EVENT(regcache_drop_region,
227 226
228 TP_PROTO(struct device *dev, unsigned int from, 227 TP_PROTO(struct regmap *map, unsigned int from,
229 unsigned int to), 228 unsigned int to),
230 229
231 TP_ARGS(dev, from, to), 230 TP_ARGS(map, from, to),
232 231
233 TP_STRUCT__entry( 232 TP_STRUCT__entry(
234 __string( name, dev_name(dev) ) 233 __string( name, regmap_name(map) )
235 __field( unsigned int, from ) 234 __field( unsigned int, from )
236 __field( unsigned int, to ) 235 __field( unsigned int, to )
237 ), 236 ),
238 237
239 TP_fast_assign( 238 TP_fast_assign(
240 __assign_str(name, dev_name(dev)); 239 __assign_str(name, regmap_name(map));
241 __entry->from = from; 240 __entry->from = from;
242 __entry->to = to; 241 __entry->to = to;
243 ), 242 ),
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 74aab6e0d964..23df3e7f8e7d 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -168,7 +168,43 @@ enum bpf_func_id {
168 BPF_FUNC_map_delete_elem, /* int map_delete_elem(&map, &key) */ 168 BPF_FUNC_map_delete_elem, /* int map_delete_elem(&map, &key) */
169 BPF_FUNC_get_prandom_u32, /* u32 prandom_u32(void) */ 169 BPF_FUNC_get_prandom_u32, /* u32 prandom_u32(void) */
170 BPF_FUNC_get_smp_processor_id, /* u32 raw_smp_processor_id(void) */ 170 BPF_FUNC_get_smp_processor_id, /* u32 raw_smp_processor_id(void) */
171 BPF_FUNC_skb_store_bytes, /* int skb_store_bytes(skb, offset, from, len) */ 171
172 /**
173 * skb_store_bytes(skb, offset, from, len, flags) - store bytes into packet
174 * @skb: pointer to skb
175 * @offset: offset within packet from skb->data
176 * @from: pointer where to copy bytes from
177 * @len: number of bytes to store into packet
178 * @flags: bit 0 - if true, recompute skb->csum
179 * other bits - reserved
180 * Return: 0 on success
181 */
182 BPF_FUNC_skb_store_bytes,
183
184 /**
185 * l3_csum_replace(skb, offset, from, to, flags) - recompute IP checksum
186 * @skb: pointer to skb
187 * @offset: offset within packet where IP checksum is located
188 * @from: old value of header field
189 * @to: new value of header field
190 * @flags: bits 0-3 - size of header field
191 * other bits - reserved
192 * Return: 0 on success
193 */
194 BPF_FUNC_l3_csum_replace,
195
196 /**
197 * l4_csum_replace(skb, offset, from, to, flags) - recompute TCP/UDP checksum
198 * @skb: pointer to skb
199 * @offset: offset within packet where TCP/UDP checksum is located
200 * @from: old value of header field
201 * @to: new value of header field
202 * @flags: bits 0-3 - size of header field
203 * bit 4 - is pseudo header
204 * other bits - reserved
205 * Return: 0 on success
206 */
207 BPF_FUNC_l4_csum_replace,
172 __BPF_FUNC_MAX_ID, 208 __BPF_FUNC_MAX_ID,
173}; 209};
174 210
@@ -184,6 +220,7 @@ struct __sk_buff {
184 __u32 vlan_present; 220 __u32 vlan_present;
185 __u32 vlan_tci; 221 __u32 vlan_tci;
186 __u32 vlan_proto; 222 __u32 vlan_proto;
223 __u32 priority;
187}; 224};
188 225
189#endif /* _UAPI__LINUX_BPF_H__ */ 226#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/can/raw.h b/include/uapi/linux/can/raw.h
index 78ec76fd89a6..8735f1080385 100644
--- a/include/uapi/linux/can/raw.h
+++ b/include/uapi/linux/can/raw.h
@@ -57,6 +57,7 @@ enum {
57 CAN_RAW_LOOPBACK, /* local loopback (default:on) */ 57 CAN_RAW_LOOPBACK, /* local loopback (default:on) */
58 CAN_RAW_RECV_OWN_MSGS, /* receive my own msgs (default:off) */ 58 CAN_RAW_RECV_OWN_MSGS, /* receive my own msgs (default:off) */
59 CAN_RAW_FD_FRAMES, /* allow CAN FD frames (default:off) */ 59 CAN_RAW_FD_FRAMES, /* allow CAN FD frames (default:off) */
60 CAN_RAW_JOIN_FILTERS, /* all filters must match to trigger */
60}; 61};
61 62
62#endif /* !_UAPI_CAN_RAW_H */ 63#endif /* !_UAPI_CAN_RAW_H */
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index b0a813079852..2f62ab2d7bf9 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -973,7 +973,8 @@ struct input_keymap_entry {
973 */ 973 */
974#define MT_TOOL_FINGER 0 974#define MT_TOOL_FINGER 0
975#define MT_TOOL_PEN 1 975#define MT_TOOL_PEN 1
976#define MT_TOOL_MAX 1 976#define MT_TOOL_PALM 2
977#define MT_TOOL_MAX 2
977 978
978/* 979/*
979 * Values describing the status of a force-feedback effect 980 * Values describing the status of a force-feedback effect
diff --git a/include/uapi/linux/nfsd/export.h b/include/uapi/linux/nfsd/export.h
index 4742f2cb42f2..d3bd6ffec041 100644
--- a/include/uapi/linux/nfsd/export.h
+++ b/include/uapi/linux/nfsd/export.h
@@ -47,7 +47,7 @@
47 * exported filesystem. 47 * exported filesystem.
48 */ 48 */
49#define NFSEXP_V4ROOT 0x10000 49#define NFSEXP_V4ROOT 0x10000
50#define NFSEXP_NOPNFS 0x20000 50#define NFSEXP_PNFS 0x20000
51 51
52/* All flags that we claim to support. (Note we don't support NOACL.) */ 52/* All flags that we claim to support. (Note we don't support NOACL.) */
53#define NFSEXP_ALLFLAGS 0x3FE7F 53#define NFSEXP_ALLFLAGS 0x3FE7F
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index bea910f924dd..974db03f7b1a 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -134,6 +134,8 @@ enum {
134 134
135 RTM_NEWNSID = 88, 135 RTM_NEWNSID = 88,
136#define RTM_NEWNSID RTM_NEWNSID 136#define RTM_NEWNSID RTM_NEWNSID
137 RTM_DELNSID = 89,
138#define RTM_DELNSID RTM_DELNSID
137 RTM_GETNSID = 90, 139 RTM_GETNSID = 90,
138#define RTM_GETNSID RTM_GETNSID 140#define RTM_GETNSID RTM_GETNSID
139 141
@@ -635,6 +637,8 @@ enum rtnetlink_groups {
635#define RTNLGRP_MDB RTNLGRP_MDB 637#define RTNLGRP_MDB RTNLGRP_MDB
636 RTNLGRP_MPLS_ROUTE, 638 RTNLGRP_MPLS_ROUTE,
637#define RTNLGRP_MPLS_ROUTE RTNLGRP_MPLS_ROUTE 639#define RTNLGRP_MPLS_ROUTE RTNLGRP_MPLS_ROUTE
640 RTNLGRP_NSID,
641#define RTNLGRP_NSID RTNLGRP_NSID
638 __RTNLGRP_MAX 642 __RTNLGRP_MAX
639}; 643};
640#define RTNLGRP_MAX (__RTNLGRP_MAX - 1) 644#define RTNLGRP_MAX (__RTNLGRP_MAX - 1)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 453ef61311d4..2fabc0627165 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4574,6 +4574,13 @@ static void perf_pending_event(struct irq_work *entry)
4574{ 4574{
4575 struct perf_event *event = container_of(entry, 4575 struct perf_event *event = container_of(entry,
4576 struct perf_event, pending); 4576 struct perf_event, pending);
4577 int rctx;
4578
4579 rctx = perf_swevent_get_recursion_context();
4580 /*
4581 * If we 'fail' here, that's OK, it means recursion is already disabled
4582 * and we won't recurse 'further'.
4583 */
4577 4584
4578 if (event->pending_disable) { 4585 if (event->pending_disable) {
4579 event->pending_disable = 0; 4586 event->pending_disable = 0;
@@ -4584,6 +4591,9 @@ static void perf_pending_event(struct irq_work *entry)
4584 event->pending_wakeup = 0; 4591 event->pending_wakeup = 0;
4585 perf_event_wakeup(event); 4592 perf_event_wakeup(event);
4586 } 4593 }
4594
4595 if (rctx >= 0)
4596 perf_swevent_put_recursion_context(rctx);
4587} 4597}
4588 4598
4589/* 4599/*
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 88d0d4420ad2..ba77ab5f64dd 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -633,7 +633,7 @@ static int count_matching_names(struct lock_class *new_class)
633 if (!new_class->name) 633 if (!new_class->name)
634 return 0; 634 return 0;
635 635
636 list_for_each_entry(class, &all_lock_classes, lock_entry) { 636 list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) {
637 if (new_class->key - new_class->subclass == class->key) 637 if (new_class->key - new_class->subclass == class->key)
638 return class->name_version; 638 return class->name_version;
639 if (class->name && !strcmp(class->name, new_class->name)) 639 if (class->name && !strcmp(class->name, new_class->name))
@@ -700,10 +700,12 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
700 hash_head = classhashentry(key); 700 hash_head = classhashentry(key);
701 701
702 /* 702 /*
703 * We can walk the hash lockfree, because the hash only 703 * We do an RCU walk of the hash, see lockdep_free_key_range().
704 * grows, and we are careful when adding entries to the end:
705 */ 704 */
706 list_for_each_entry(class, hash_head, hash_entry) { 705 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
706 return NULL;
707
708 list_for_each_entry_rcu(class, hash_head, hash_entry) {
707 if (class->key == key) { 709 if (class->key == key) {
708 /* 710 /*
709 * Huh! same key, different name? Did someone trample 711 * Huh! same key, different name? Did someone trample
@@ -728,7 +730,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
728 struct lockdep_subclass_key *key; 730 struct lockdep_subclass_key *key;
729 struct list_head *hash_head; 731 struct list_head *hash_head;
730 struct lock_class *class; 732 struct lock_class *class;
731 unsigned long flags; 733
734 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
732 735
733 class = look_up_lock_class(lock, subclass); 736 class = look_up_lock_class(lock, subclass);
734 if (likely(class)) 737 if (likely(class))
@@ -750,28 +753,26 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
750 key = lock->key->subkeys + subclass; 753 key = lock->key->subkeys + subclass;
751 hash_head = classhashentry(key); 754 hash_head = classhashentry(key);
752 755
753 raw_local_irq_save(flags);
754 if (!graph_lock()) { 756 if (!graph_lock()) {
755 raw_local_irq_restore(flags);
756 return NULL; 757 return NULL;
757 } 758 }
758 /* 759 /*
759 * We have to do the hash-walk again, to avoid races 760 * We have to do the hash-walk again, to avoid races
760 * with another CPU: 761 * with another CPU:
761 */ 762 */
762 list_for_each_entry(class, hash_head, hash_entry) 763 list_for_each_entry_rcu(class, hash_head, hash_entry) {
763 if (class->key == key) 764 if (class->key == key)
764 goto out_unlock_set; 765 goto out_unlock_set;
766 }
767
765 /* 768 /*
766 * Allocate a new key from the static array, and add it to 769 * Allocate a new key from the static array, and add it to
767 * the hash: 770 * the hash:
768 */ 771 */
769 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { 772 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
770 if (!debug_locks_off_graph_unlock()) { 773 if (!debug_locks_off_graph_unlock()) {
771 raw_local_irq_restore(flags);
772 return NULL; 774 return NULL;
773 } 775 }
774 raw_local_irq_restore(flags);
775 776
776 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!"); 777 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
777 dump_stack(); 778 dump_stack();
@@ -798,7 +799,6 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
798 799
799 if (verbose(class)) { 800 if (verbose(class)) {
800 graph_unlock(); 801 graph_unlock();
801 raw_local_irq_restore(flags);
802 802
803 printk("\nnew class %p: %s", class->key, class->name); 803 printk("\nnew class %p: %s", class->key, class->name);
804 if (class->name_version > 1) 804 if (class->name_version > 1)
@@ -806,15 +806,12 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
806 printk("\n"); 806 printk("\n");
807 dump_stack(); 807 dump_stack();
808 808
809 raw_local_irq_save(flags);
810 if (!graph_lock()) { 809 if (!graph_lock()) {
811 raw_local_irq_restore(flags);
812 return NULL; 810 return NULL;
813 } 811 }
814 } 812 }
815out_unlock_set: 813out_unlock_set:
816 graph_unlock(); 814 graph_unlock();
817 raw_local_irq_restore(flags);
818 815
819out_set_class_cache: 816out_set_class_cache:
820 if (!subclass || force) 817 if (!subclass || force)
@@ -870,11 +867,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
870 entry->distance = distance; 867 entry->distance = distance;
871 entry->trace = *trace; 868 entry->trace = *trace;
872 /* 869 /*
873 * Since we never remove from the dependency list, the list can 870 * Both allocation and removal are done under the graph lock; but
874 * be walked lockless by other CPUs, it's only allocation 871 * iteration is under RCU-sched; see look_up_lock_class() and
875 * that must be protected by the spinlock. But this also means 872 * lockdep_free_key_range().
876 * we must make new entries visible only once writes to the
877 * entry become visible - hence the RCU op:
878 */ 873 */
879 list_add_tail_rcu(&entry->entry, head); 874 list_add_tail_rcu(&entry->entry, head);
880 875
@@ -1025,7 +1020,9 @@ static int __bfs(struct lock_list *source_entry,
1025 else 1020 else
1026 head = &lock->class->locks_before; 1021 head = &lock->class->locks_before;
1027 1022
1028 list_for_each_entry(entry, head, entry) { 1023 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1024
1025 list_for_each_entry_rcu(entry, head, entry) {
1029 if (!lock_accessed(entry)) { 1026 if (!lock_accessed(entry)) {
1030 unsigned int cq_depth; 1027 unsigned int cq_depth;
1031 mark_lock_accessed(entry, lock); 1028 mark_lock_accessed(entry, lock);
@@ -2022,7 +2019,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
2022 * We can walk it lock-free, because entries only get added 2019 * We can walk it lock-free, because entries only get added
2023 * to the hash: 2020 * to the hash:
2024 */ 2021 */
2025 list_for_each_entry(chain, hash_head, entry) { 2022 list_for_each_entry_rcu(chain, hash_head, entry) {
2026 if (chain->chain_key == chain_key) { 2023 if (chain->chain_key == chain_key) {
2027cache_hit: 2024cache_hit:
2028 debug_atomic_inc(chain_lookup_hits); 2025 debug_atomic_inc(chain_lookup_hits);
@@ -2996,8 +2993,18 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
2996 if (unlikely(!debug_locks)) 2993 if (unlikely(!debug_locks))
2997 return; 2994 return;
2998 2995
2999 if (subclass) 2996 if (subclass) {
2997 unsigned long flags;
2998
2999 if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
3000 return;
3001
3002 raw_local_irq_save(flags);
3003 current->lockdep_recursion = 1;
3000 register_lock_class(lock, subclass, 1); 3004 register_lock_class(lock, subclass, 1);
3005 current->lockdep_recursion = 0;
3006 raw_local_irq_restore(flags);
3007 }
3001} 3008}
3002EXPORT_SYMBOL_GPL(lockdep_init_map); 3009EXPORT_SYMBOL_GPL(lockdep_init_map);
3003 3010
@@ -3887,9 +3894,17 @@ static inline int within(const void *addr, void *start, unsigned long size)
3887 return addr >= start && addr < start + size; 3894 return addr >= start && addr < start + size;
3888} 3895}
3889 3896
3897/*
3898 * Used in module.c to remove lock classes from memory that is going to be
3899 * freed; and possibly re-used by other modules.
3900 *
3901 * We will have had one sync_sched() before getting here, so we're guaranteed
3902 * nobody will look up these exact classes -- they're properly dead but still
3903 * allocated.
3904 */
3890void lockdep_free_key_range(void *start, unsigned long size) 3905void lockdep_free_key_range(void *start, unsigned long size)
3891{ 3906{
3892 struct lock_class *class, *next; 3907 struct lock_class *class;
3893 struct list_head *head; 3908 struct list_head *head;
3894 unsigned long flags; 3909 unsigned long flags;
3895 int i; 3910 int i;
@@ -3905,7 +3920,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
3905 head = classhash_table + i; 3920 head = classhash_table + i;
3906 if (list_empty(head)) 3921 if (list_empty(head))
3907 continue; 3922 continue;
3908 list_for_each_entry_safe(class, next, head, hash_entry) { 3923 list_for_each_entry_rcu(class, head, hash_entry) {
3909 if (within(class->key, start, size)) 3924 if (within(class->key, start, size))
3910 zap_class(class); 3925 zap_class(class);
3911 else if (within(class->name, start, size)) 3926 else if (within(class->name, start, size))
@@ -3916,11 +3931,25 @@ void lockdep_free_key_range(void *start, unsigned long size)
3916 if (locked) 3931 if (locked)
3917 graph_unlock(); 3932 graph_unlock();
3918 raw_local_irq_restore(flags); 3933 raw_local_irq_restore(flags);
3934
3935 /*
3936 * Wait for any possible iterators from look_up_lock_class() to pass
3937 * before continuing to free the memory they refer to.
3938 *
3939 * sync_sched() is sufficient because the read-side is IRQ disable.
3940 */
3941 synchronize_sched();
3942
3943 /*
3944 * XXX at this point we could return the resources to the pool;
3945 * instead we leak them. We would need to change to bitmap allocators
3946 * instead of the linear allocators we have now.
3947 */
3919} 3948}
3920 3949
3921void lockdep_reset_lock(struct lockdep_map *lock) 3950void lockdep_reset_lock(struct lockdep_map *lock)
3922{ 3951{
3923 struct lock_class *class, *next; 3952 struct lock_class *class;
3924 struct list_head *head; 3953 struct list_head *head;
3925 unsigned long flags; 3954 unsigned long flags;
3926 int i, j; 3955 int i, j;
@@ -3948,7 +3977,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
3948 head = classhash_table + i; 3977 head = classhash_table + i;
3949 if (list_empty(head)) 3978 if (list_empty(head))
3950 continue; 3979 continue;
3951 list_for_each_entry_safe(class, next, head, hash_entry) { 3980 list_for_each_entry_rcu(class, head, hash_entry) {
3952 int match = 0; 3981 int match = 0;
3953 3982
3954 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) 3983 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
diff --git a/kernel/module.c b/kernel/module.c
index b3d634ed06c9..99fdf94efce8 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1865,7 +1865,7 @@ static void free_module(struct module *mod)
1865 kfree(mod->args); 1865 kfree(mod->args);
1866 percpu_modfree(mod); 1866 percpu_modfree(mod);
1867 1867
1868 /* Free lock-classes: */ 1868 /* Free lock-classes; relies on the preceding sync_rcu(). */
1869 lockdep_free_key_range(mod->module_core, mod->core_size); 1869 lockdep_free_key_range(mod->module_core, mod->core_size);
1870 1870
1871 /* Finally, free the core (containing the module structure) */ 1871 /* Finally, free the core (containing the module structure) */
@@ -3349,9 +3349,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
3349 module_bug_cleanup(mod); 3349 module_bug_cleanup(mod);
3350 mutex_unlock(&module_mutex); 3350 mutex_unlock(&module_mutex);
3351 3351
3352 /* Free lock-classes: */
3353 lockdep_free_key_range(mod->module_core, mod->core_size);
3354
3355 /* we can't deallocate the module until we clear memory protection */ 3352 /* we can't deallocate the module until we clear memory protection */
3356 unset_module_init_ro_nx(mod); 3353 unset_module_init_ro_nx(mod);
3357 unset_module_core_ro_nx(mod); 3354 unset_module_core_ro_nx(mod);
@@ -3375,6 +3372,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
3375 synchronize_rcu(); 3372 synchronize_rcu();
3376 mutex_unlock(&module_mutex); 3373 mutex_unlock(&module_mutex);
3377 free_module: 3374 free_module:
3375 /* Free lock-classes; relies on the preceding sync_rcu() */
3376 lockdep_free_key_range(mod->module_core, mod->core_size);
3377
3378 module_deallocate(mod, info); 3378 module_deallocate(mod, info);
3379 free_copy: 3379 free_copy:
3380 free_copy(info); 3380 free_copy(info);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f0f831e8a345..62671f53202a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3034,6 +3034,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3034 } else { 3034 } else {
3035 if (dl_prio(oldprio)) 3035 if (dl_prio(oldprio))
3036 p->dl.dl_boosted = 0; 3036 p->dl.dl_boosted = 0;
3037 if (rt_prio(oldprio))
3038 p->rt.timeout = 0;
3037 p->sched_class = &fair_sched_class; 3039 p->sched_class = &fair_sched_class;
3038 } 3040 }
3039 3041
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7ce18f3c097a..bcfe32088b37 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1609,9 +1609,11 @@ static void update_task_scan_period(struct task_struct *p,
1609 /* 1609 /*
1610 * If there were no record hinting faults then either the task is 1610 * If there were no record hinting faults then either the task is
1611 * completely idle or all activity is areas that are not of interest 1611 * completely idle or all activity is areas that are not of interest
1612 * to automatic numa balancing. Scan slower 1612 * to automatic numa balancing. Related to that, if there were failed
1613 * migration then it implies we are migrating too quickly or the local
1614 * node is overloaded. In either case, scan slower
1613 */ 1615 */
1614 if (local + shared == 0) { 1616 if (local + shared == 0 || p->numa_faults_locality[2]) {
1615 p->numa_scan_period = min(p->numa_scan_period_max, 1617 p->numa_scan_period = min(p->numa_scan_period_max,
1616 p->numa_scan_period << 1); 1618 p->numa_scan_period << 1);
1617 1619
@@ -2080,6 +2082,8 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
2080 2082
2081 if (migrated) 2083 if (migrated)
2082 p->numa_pages_migrated += pages; 2084 p->numa_pages_migrated += pages;
2085 if (flags & TNF_MIGRATE_FAIL)
2086 p->numa_faults_locality[2] += pages;
2083 2087
2084 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; 2088 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
2085 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; 2089 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 88ea2d6e0031..ce410bb9f2e1 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1228,6 +1228,14 @@ static struct ctl_table vm_table[] = {
1228 .extra1 = &zero, 1228 .extra1 = &zero,
1229 }, 1229 },
1230 { 1230 {
1231 .procname = "dirtytime_expire_seconds",
1232 .data = &dirtytime_expire_interval,
1233 .maxlen = sizeof(dirty_expire_interval),
1234 .mode = 0644,
1235 .proc_handler = dirtytime_interval_handler,
1236 .extra1 = &zero,
1237 },
1238 {
1231 .procname = "nr_pdflush_threads", 1239 .procname = "nr_pdflush_threads",
1232 .mode = 0444 /* read-only */, 1240 .mode = 0444 /* read-only */,
1233 .proc_handler = pdflush_proc_obsolete, 1241 .proc_handler = pdflush_proc_obsolete,
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
index eb682d5c697c..6aac4beedbbe 100644
--- a/kernel/time/tick-broadcast-hrtimer.c
+++ b/kernel/time/tick-broadcast-hrtimer.c
@@ -49,6 +49,7 @@ static void bc_set_mode(enum clock_event_mode mode,
49 */ 49 */
50static int bc_set_next(ktime_t expires, struct clock_event_device *bc) 50static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
51{ 51{
52 int bc_moved;
52 /* 53 /*
53 * We try to cancel the timer first. If the callback is on 54 * We try to cancel the timer first. If the callback is on
54 * flight on some other cpu then we let it handle it. If we 55 * flight on some other cpu then we let it handle it. If we
@@ -60,9 +61,15 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
60 * restart the timer because we are in the callback, but we 61 * restart the timer because we are in the callback, but we
61 * can set the expiry time and let the callback return 62 * can set the expiry time and let the callback return
62 * HRTIMER_RESTART. 63 * HRTIMER_RESTART.
64 *
65 * Since we are in the idle loop at this point and because
66 * hrtimer_{start/cancel} functions call into tracing,
67 * calls to these functions must be bound within RCU_NONIDLE.
63 */ 68 */
64 if (hrtimer_try_to_cancel(&bctimer) >= 0) { 69 RCU_NONIDLE(bc_moved = (hrtimer_try_to_cancel(&bctimer) >= 0) ?
65 hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED); 70 !hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED) :
71 0);
72 if (bc_moved) {
66 /* Bind the "device" to the cpu */ 73 /* Bind the "device" to the cpu */
67 bc->bound_on = smp_processor_id(); 74 bc->bound_on = smp_processor_id();
68 } else if (bc->bound_on == smp_processor_id()) { 75 } else if (bc->bound_on == smp_processor_id()) {
diff --git a/lib/lcm.c b/lib/lcm.c
index e97dbd51e756..03d7fcb420b5 100644
--- a/lib/lcm.c
+++ b/lib/lcm.c
@@ -12,3 +12,14 @@ unsigned long lcm(unsigned long a, unsigned long b)
12 return 0; 12 return 0;
13} 13}
14EXPORT_SYMBOL_GPL(lcm); 14EXPORT_SYMBOL_GPL(lcm);
15
16unsigned long lcm_not_zero(unsigned long a, unsigned long b)
17{
18 unsigned long l = lcm(a, b);
19
20 if (l)
21 return l;
22
23 return (b ? : a);
24}
25EXPORT_SYMBOL_GPL(lcm_not_zero);
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 76a1b59523ab..f5907d23272d 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -279,6 +279,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
279 int minlen = min_t(int, count, nla_len(src)); 279 int minlen = min_t(int, count, nla_len(src));
280 280
281 memcpy(dest, nla_data(src), minlen); 281 memcpy(dest, nla_data(src), minlen);
282 if (count > minlen)
283 memset(dest + minlen, 0, count - minlen);
282 284
283 return minlen; 285 return minlen;
284} 286}
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index a42a0d44e818..b2957540d3c7 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -44,7 +44,6 @@ static const struct rhashtable_params test_rht_params = {
44 .key_offset = offsetof(struct test_obj, value), 44 .key_offset = offsetof(struct test_obj, value),
45 .key_len = sizeof(int), 45 .key_len = sizeof(int),
46 .hashfn = jhash, 46 .hashfn = jhash,
47 .max_size = 2, /* we expand/shrink manually here */
48 .nulls_base = (3U << RHT_BASE_SHIFT), 47 .nulls_base = (3U << RHT_BASE_SHIFT),
49}; 48};
50 49
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 626e93db28ba..6817b0350c71 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1260,6 +1260,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1260 int target_nid, last_cpupid = -1; 1260 int target_nid, last_cpupid = -1;
1261 bool page_locked; 1261 bool page_locked;
1262 bool migrated = false; 1262 bool migrated = false;
1263 bool was_writable;
1263 int flags = 0; 1264 int flags = 0;
1264 1265
1265 /* A PROT_NONE fault should not end up here */ 1266 /* A PROT_NONE fault should not end up here */
@@ -1291,17 +1292,8 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1291 flags |= TNF_FAULT_LOCAL; 1292 flags |= TNF_FAULT_LOCAL;
1292 } 1293 }
1293 1294
1294 /* 1295 /* See similar comment in do_numa_page for explanation */
1295 * Avoid grouping on DSO/COW pages in specific and RO pages 1296 if (!(vma->vm_flags & VM_WRITE))
1296 * in general, RO pages shouldn't hurt as much anyway since
1297 * they can be in shared cache state.
1298 *
1299 * FIXME! This checks "pmd_dirty()" as an approximation of
1300 * "is this a read-only page", since checking "pmd_write()"
1301 * is even more broken. We haven't actually turned this into
1302 * a writable page, so pmd_write() will always be false.
1303 */
1304 if (!pmd_dirty(pmd))
1305 flags |= TNF_NO_GROUP; 1297 flags |= TNF_NO_GROUP;
1306 1298
1307 /* 1299 /*
@@ -1358,12 +1350,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1358 if (migrated) { 1350 if (migrated) {
1359 flags |= TNF_MIGRATED; 1351 flags |= TNF_MIGRATED;
1360 page_nid = target_nid; 1352 page_nid = target_nid;
1361 } 1353 } else
1354 flags |= TNF_MIGRATE_FAIL;
1362 1355
1363 goto out; 1356 goto out;
1364clear_pmdnuma: 1357clear_pmdnuma:
1365 BUG_ON(!PageLocked(page)); 1358 BUG_ON(!PageLocked(page));
1359 was_writable = pmd_write(pmd);
1366 pmd = pmd_modify(pmd, vma->vm_page_prot); 1360 pmd = pmd_modify(pmd, vma->vm_page_prot);
1361 pmd = pmd_mkyoung(pmd);
1362 if (was_writable)
1363 pmd = pmd_mkwrite(pmd);
1367 set_pmd_at(mm, haddr, pmdp, pmd); 1364 set_pmd_at(mm, haddr, pmdp, pmd);
1368 update_mmu_cache_pmd(vma, addr, pmdp); 1365 update_mmu_cache_pmd(vma, addr, pmdp);
1369 unlock_page(page); 1366 unlock_page(page);
@@ -1487,6 +1484,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1487 1484
1488 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 1485 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1489 pmd_t entry; 1486 pmd_t entry;
1487 bool preserve_write = prot_numa && pmd_write(*pmd);
1490 ret = 1; 1488 ret = 1;
1491 1489
1492 /* 1490 /*
@@ -1502,9 +1500,11 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1502 if (!prot_numa || !pmd_protnone(*pmd)) { 1500 if (!prot_numa || !pmd_protnone(*pmd)) {
1503 entry = pmdp_get_and_clear_notify(mm, addr, pmd); 1501 entry = pmdp_get_and_clear_notify(mm, addr, pmd);
1504 entry = pmd_modify(entry, newprot); 1502 entry = pmd_modify(entry, newprot);
1503 if (preserve_write)
1504 entry = pmd_mkwrite(entry);
1505 ret = HPAGE_PMD_NR; 1505 ret = HPAGE_PMD_NR;
1506 set_pmd_at(mm, addr, pmd, entry); 1506 set_pmd_at(mm, addr, pmd, entry);
1507 BUG_ON(pmd_write(entry)); 1507 BUG_ON(!preserve_write && pmd_write(entry));
1508 } 1508 }
1509 spin_unlock(ptl); 1509 spin_unlock(ptl);
1510 } 1510 }
diff --git a/mm/memory.c b/mm/memory.c
index 411144f977b1..97839f5c8c30 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3035,6 +3035,7 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3035 int last_cpupid; 3035 int last_cpupid;
3036 int target_nid; 3036 int target_nid;
3037 bool migrated = false; 3037 bool migrated = false;
3038 bool was_writable = pte_write(pte);
3038 int flags = 0; 3039 int flags = 0;
3039 3040
3040 /* A PROT_NONE fault should not end up here */ 3041 /* A PROT_NONE fault should not end up here */
@@ -3059,6 +3060,8 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3059 /* Make it present again */ 3060 /* Make it present again */
3060 pte = pte_modify(pte, vma->vm_page_prot); 3061 pte = pte_modify(pte, vma->vm_page_prot);
3061 pte = pte_mkyoung(pte); 3062 pte = pte_mkyoung(pte);
3063 if (was_writable)
3064 pte = pte_mkwrite(pte);
3062 set_pte_at(mm, addr, ptep, pte); 3065 set_pte_at(mm, addr, ptep, pte);
3063 update_mmu_cache(vma, addr, ptep); 3066 update_mmu_cache(vma, addr, ptep);
3064 3067
@@ -3069,16 +3072,14 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3069 } 3072 }
3070 3073
3071 /* 3074 /*
3072 * Avoid grouping on DSO/COW pages in specific and RO pages 3075 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
3073 * in general, RO pages shouldn't hurt as much anyway since 3076 * much anyway since they can be in shared cache state. This misses
3074 * they can be in shared cache state. 3077 * the case where a mapping is writable but the process never writes
3075 * 3078 * to it but pte_write gets cleared during protection updates and
3076 * FIXME! This checks "pmd_dirty()" as an approximation of 3079 * pte_dirty has unpredictable behaviour between PTE scan updates,
3077 * "is this a read-only page", since checking "pmd_write()" 3080 * background writeback, dirty balancing and application behaviour.
3078 * is even more broken. We haven't actually turned this into
3079 * a writable page, so pmd_write() will always be false.
3080 */ 3081 */
3081 if (!pte_dirty(pte)) 3082 if (!(vma->vm_flags & VM_WRITE))
3082 flags |= TNF_NO_GROUP; 3083 flags |= TNF_NO_GROUP;
3083 3084
3084 /* 3085 /*
@@ -3102,7 +3103,8 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3102 if (migrated) { 3103 if (migrated) {
3103 page_nid = target_nid; 3104 page_nid = target_nid;
3104 flags |= TNF_MIGRATED; 3105 flags |= TNF_MIGRATED;
3105 } 3106 } else
3107 flags |= TNF_MIGRATE_FAIL;
3106 3108
3107out: 3109out:
3108 if (page_nid != -1) 3110 if (page_nid != -1)
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9fab10795bea..65842d688b7c 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1092,6 +1092,10 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
1092 return NULL; 1092 return NULL;
1093 1093
1094 arch_refresh_nodedata(nid, pgdat); 1094 arch_refresh_nodedata(nid, pgdat);
1095 } else {
1096 /* Reset the nr_zones and classzone_idx to 0 before reuse */
1097 pgdat->nr_zones = 0;
1098 pgdat->classzone_idx = 0;
1095 } 1099 }
1096 1100
1097 /* we can use NODE_DATA(nid) from here */ 1101 /* we can use NODE_DATA(nid) from here */
@@ -1977,15 +1981,6 @@ void try_offline_node(int nid)
1977 if (is_vmalloc_addr(zone->wait_table)) 1981 if (is_vmalloc_addr(zone->wait_table))
1978 vfree(zone->wait_table); 1982 vfree(zone->wait_table);
1979 } 1983 }
1980
1981 /*
1982 * Since there is no way to guarentee the address of pgdat/zone is not
1983 * on stack of any kernel threads or used by other kernel objects
1984 * without reference counting or other symchronizing method, do not
1985 * reset node_data and free pgdat here. Just reset it to 0 and reuse
1986 * the memory when the node is online again.
1987 */
1988 memset(pgdat, 0, sizeof(*pgdat));
1989} 1984}
1990EXPORT_SYMBOL(try_offline_node); 1985EXPORT_SYMBOL(try_offline_node);
1991 1986
diff --git a/mm/mmap.c b/mm/mmap.c
index da9990acc08b..9ec50a368634 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -774,10 +774,8 @@ again: remove_next = 1 + (end > next->vm_end);
774 774
775 importer->anon_vma = exporter->anon_vma; 775 importer->anon_vma = exporter->anon_vma;
776 error = anon_vma_clone(importer, exporter); 776 error = anon_vma_clone(importer, exporter);
777 if (error) { 777 if (error)
778 importer->anon_vma = NULL;
779 return error; 778 return error;
780 }
781 } 779 }
782 } 780 }
783 781
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 44727811bf4c..88584838e704 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -75,6 +75,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
75 oldpte = *pte; 75 oldpte = *pte;
76 if (pte_present(oldpte)) { 76 if (pte_present(oldpte)) {
77 pte_t ptent; 77 pte_t ptent;
78 bool preserve_write = prot_numa && pte_write(oldpte);
78 79
79 /* 80 /*
80 * Avoid trapping faults against the zero or KSM 81 * Avoid trapping faults against the zero or KSM
@@ -94,6 +95,8 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
94 95
95 ptent = ptep_modify_prot_start(mm, addr, pte); 96 ptent = ptep_modify_prot_start(mm, addr, pte);
96 ptent = pte_modify(ptent, newprot); 97 ptent = pte_modify(ptent, newprot);
98 if (preserve_write)
99 ptent = pte_mkwrite(ptent);
97 100
98 /* Avoid taking write faults for known dirty pages */ 101 /* Avoid taking write faults for known dirty pages */
99 if (dirty_accountable && pte_dirty(ptent) && 102 if (dirty_accountable && pte_dirty(ptent) &&
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 45e187b2d971..644bcb665773 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -857,8 +857,11 @@ static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
857 * bw * elapsed + write_bandwidth * (period - elapsed) 857 * bw * elapsed + write_bandwidth * (period - elapsed)
858 * write_bandwidth = --------------------------------------------------- 858 * write_bandwidth = ---------------------------------------------------
859 * period 859 * period
860 *
861 * @written may have decreased due to account_page_redirty().
862 * Avoid underflowing @bw calculation.
860 */ 863 */
861 bw = written - bdi->written_stamp; 864 bw = written - min(written, bdi->written_stamp);
862 bw *= HZ; 865 bw *= HZ;
863 if (unlikely(elapsed > period)) { 866 if (unlikely(elapsed > period)) {
864 do_div(bw, elapsed); 867 do_div(bw, elapsed);
@@ -922,7 +925,7 @@ static void global_update_bandwidth(unsigned long thresh,
922 unsigned long now) 925 unsigned long now)
923{ 926{
924 static DEFINE_SPINLOCK(dirty_lock); 927 static DEFINE_SPINLOCK(dirty_lock);
925 static unsigned long update_time; 928 static unsigned long update_time = INITIAL_JIFFIES;
926 929
927 /* 930 /*
928 * check locklessly first to optimize away locking for the most time 931 * check locklessly first to optimize away locking for the most time
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 72f5ac381ab3..755a42c76eb4 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -103,6 +103,7 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype)
103 103
104 if (!is_migrate_isolate_page(buddy)) { 104 if (!is_migrate_isolate_page(buddy)) {
105 __isolate_free_page(page, order); 105 __isolate_free_page(page, order);
106 kernel_map_pages(page, (1 << order), 1);
106 set_page_refcounted(page); 107 set_page_refcounted(page);
107 isolated_page = page; 108 isolated_page = page;
108 } 109 }
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 75c1f2878519..29f2f8b853ae 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -265,8 +265,15 @@ int walk_page_range(unsigned long start, unsigned long end,
265 vma = vma->vm_next; 265 vma = vma->vm_next;
266 266
267 err = walk_page_test(start, next, walk); 267 err = walk_page_test(start, next, walk);
268 if (err > 0) 268 if (err > 0) {
269 /*
270 * positive return values are purely for
271 * controlling the pagewalk, so should never
272 * be passed to the callers.
273 */
274 err = 0;
269 continue; 275 continue;
276 }
270 if (err < 0) 277 if (err < 0)
271 break; 278 break;
272 } 279 }
diff --git a/mm/rmap.c b/mm/rmap.c
index 5e3e09081164..c161a14b6a8f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -287,6 +287,13 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
287 return 0; 287 return 0;
288 288
289 enomem_failure: 289 enomem_failure:
290 /*
291 * dst->anon_vma is dropped here otherwise its degree can be incorrectly
292 * decremented in unlink_anon_vmas().
293 * We can safely do this because callers of anon_vma_clone() don't care
294 * about dst->anon_vma if anon_vma_clone() failed.
295 */
296 dst->anon_vma = NULL;
290 unlink_anon_vmas(dst); 297 unlink_anon_vmas(dst);
291 return -ENOMEM; 298 return -ENOMEM;
292} 299}
diff --git a/mm/slub.c b/mm/slub.c
index 6832c4eab104..82c473780c91 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2449,7 +2449,8 @@ redo:
2449 do { 2449 do {
2450 tid = this_cpu_read(s->cpu_slab->tid); 2450 tid = this_cpu_read(s->cpu_slab->tid);
2451 c = raw_cpu_ptr(s->cpu_slab); 2451 c = raw_cpu_ptr(s->cpu_slab);
2452 } while (IS_ENABLED(CONFIG_PREEMPT) && unlikely(tid != c->tid)); 2452 } while (IS_ENABLED(CONFIG_PREEMPT) &&
2453 unlikely(tid != READ_ONCE(c->tid)));
2453 2454
2454 /* 2455 /*
2455 * Irqless object alloc/free algorithm used here depends on sequence 2456 * Irqless object alloc/free algorithm used here depends on sequence
@@ -2718,7 +2719,8 @@ redo:
2718 do { 2719 do {
2719 tid = this_cpu_read(s->cpu_slab->tid); 2720 tid = this_cpu_read(s->cpu_slab->tid);
2720 c = raw_cpu_ptr(s->cpu_slab); 2721 c = raw_cpu_ptr(s->cpu_slab);
2721 } while (IS_ENABLED(CONFIG_PREEMPT) && unlikely(tid != c->tid)); 2722 } while (IS_ENABLED(CONFIG_PREEMPT) &&
2723 unlikely(tid != READ_ONCE(c->tid)));
2722 2724
2723 /* Same with comment on barrier() in slab_alloc_node() */ 2725 /* Same with comment on barrier() in slab_alloc_node() */
2724 barrier(); 2726 barrier();
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 8b5ab9033b41..01d7ba840df8 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -538,7 +538,6 @@ static int vlan_dev_init(struct net_device *dev)
538 /* IFF_BROADCAST|IFF_MULTICAST; ??? */ 538 /* IFF_BROADCAST|IFF_MULTICAST; ??? */
539 dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | 539 dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
540 IFF_MASTER | IFF_SLAVE); 540 IFF_MASTER | IFF_SLAVE);
541 dev->iflink = real_dev->ifindex;
542 dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | 541 dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
543 (1<<__LINK_STATE_DORMANT))) | 542 (1<<__LINK_STATE_DORMANT))) |
544 (1<<__LINK_STATE_PRESENT); 543 (1<<__LINK_STATE_PRESENT);
@@ -733,6 +732,13 @@ static void vlan_dev_netpoll_cleanup(struct net_device *dev)
733} 732}
734#endif /* CONFIG_NET_POLL_CONTROLLER */ 733#endif /* CONFIG_NET_POLL_CONTROLLER */
735 734
735static int vlan_dev_get_iflink(const struct net_device *dev)
736{
737 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
738
739 return real_dev->ifindex;
740}
741
736static const struct ethtool_ops vlan_ethtool_ops = { 742static const struct ethtool_ops vlan_ethtool_ops = {
737 .get_settings = vlan_ethtool_get_settings, 743 .get_settings = vlan_ethtool_get_settings,
738 .get_drvinfo = vlan_ethtool_get_drvinfo, 744 .get_drvinfo = vlan_ethtool_get_drvinfo,
@@ -769,6 +775,7 @@ static const struct net_device_ops vlan_netdev_ops = {
769#endif 775#endif
770 .ndo_fix_features = vlan_dev_fix_features, 776 .ndo_fix_features = vlan_dev_fix_features,
771 .ndo_get_lock_subclass = vlan_dev_get_lock_subclass, 777 .ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
778 .ndo_get_iflink = vlan_dev_get_iflink,
772}; 779};
773 780
774static void vlan_dev_free(struct net_device *dev) 781static void vlan_dev_free(struct net_device *dev)
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index fbda6b54baff..baf1f9843f2c 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -83,11 +83,12 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
83 return true; 83 return true;
84 84
85 /* no more parents..stop recursion */ 85 /* no more parents..stop recursion */
86 if (net_dev->iflink == 0 || net_dev->iflink == net_dev->ifindex) 86 if (dev_get_iflink(net_dev) == 0 ||
87 dev_get_iflink(net_dev) == net_dev->ifindex)
87 return false; 88 return false;
88 89
89 /* recurse over the parent device */ 90 /* recurse over the parent device */
90 parent_dev = __dev_get_by_index(&init_net, net_dev->iflink); 91 parent_dev = __dev_get_by_index(&init_net, dev_get_iflink(net_dev));
91 /* if we got a NULL parent_dev there is something broken.. */ 92 /* if we got a NULL parent_dev there is something broken.. */
92 if (WARN(!parent_dev, "Cannot find parent device")) 93 if (WARN(!parent_dev, "Cannot find parent device"))
93 return false; 94 return false;
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index 5a5b16f365e9..40854c99bc1e 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -111,6 +111,10 @@ struct bnep_ext_hdr {
111#define BNEPCONNDEL _IOW('B', 201, int) 111#define BNEPCONNDEL _IOW('B', 201, int)
112#define BNEPGETCONNLIST _IOR('B', 210, int) 112#define BNEPGETCONNLIST _IOR('B', 210, int)
113#define BNEPGETCONNINFO _IOR('B', 211, int) 113#define BNEPGETCONNINFO _IOR('B', 211, int)
114#define BNEPGETSUPPFEAT _IOR('B', 212, int)
115
116#define BNEP_SETUP_RESPONSE 0
117#define BNEP_SETUP_RSP_SENT 10
114 118
115struct bnep_connadd_req { 119struct bnep_connadd_req {
116 int sock; /* Connected socket */ 120 int sock; /* Connected socket */
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 05f57e491ccb..1641367e54ca 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -231,7 +231,14 @@ static int bnep_rx_control(struct bnep_session *s, void *data, int len)
231 break; 231 break;
232 232
233 case BNEP_SETUP_CONN_REQ: 233 case BNEP_SETUP_CONN_REQ:
234 err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP, BNEP_CONN_NOT_ALLOWED); 234 /* Successful response should be sent only once */
235 if (test_bit(BNEP_SETUP_RESPONSE, &s->flags) &&
236 !test_and_set_bit(BNEP_SETUP_RSP_SENT, &s->flags))
237 err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP,
238 BNEP_SUCCESS);
239 else
240 err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP,
241 BNEP_CONN_NOT_ALLOWED);
235 break; 242 break;
236 243
237 default: { 244 default: {
@@ -239,7 +246,7 @@ static int bnep_rx_control(struct bnep_session *s, void *data, int len)
239 pkt[0] = BNEP_CONTROL; 246 pkt[0] = BNEP_CONTROL;
240 pkt[1] = BNEP_CMD_NOT_UNDERSTOOD; 247 pkt[1] = BNEP_CMD_NOT_UNDERSTOOD;
241 pkt[2] = cmd; 248 pkt[2] = cmd;
242 bnep_send(s, pkt, sizeof(pkt)); 249 err = bnep_send(s, pkt, sizeof(pkt));
243 } 250 }
244 break; 251 break;
245 } 252 }
@@ -292,29 +299,55 @@ static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
292{ 299{
293 struct net_device *dev = s->dev; 300 struct net_device *dev = s->dev;
294 struct sk_buff *nskb; 301 struct sk_buff *nskb;
295 u8 type; 302 u8 type, ctrl_type;
296 303
297 dev->stats.rx_bytes += skb->len; 304 dev->stats.rx_bytes += skb->len;
298 305
299 type = *(u8 *) skb->data; 306 type = *(u8 *) skb->data;
300 skb_pull(skb, 1); 307 skb_pull(skb, 1);
308 ctrl_type = *(u8 *)skb->data;
301 309
302 if ((type & BNEP_TYPE_MASK) >= sizeof(__bnep_rx_hlen)) 310 if ((type & BNEP_TYPE_MASK) >= sizeof(__bnep_rx_hlen))
303 goto badframe; 311 goto badframe;
304 312
305 if ((type & BNEP_TYPE_MASK) == BNEP_CONTROL) { 313 if ((type & BNEP_TYPE_MASK) == BNEP_CONTROL) {
306 bnep_rx_control(s, skb->data, skb->len); 314 if (bnep_rx_control(s, skb->data, skb->len) < 0) {
307 kfree_skb(skb); 315 dev->stats.tx_errors++;
308 return 0; 316 kfree_skb(skb);
309 } 317 return 0;
318 }
310 319
311 skb_reset_mac_header(skb); 320 if (!(type & BNEP_EXT_HEADER)) {
321 kfree_skb(skb);
322 return 0;
323 }
312 324
313 /* Verify and pull out header */ 325 /* Verify and pull ctrl message since it's already processed */
314 if (!skb_pull(skb, __bnep_rx_hlen[type & BNEP_TYPE_MASK])) 326 switch (ctrl_type) {
315 goto badframe; 327 case BNEP_SETUP_CONN_REQ:
328 /* Pull: ctrl type (1 b), len (1 b), data (len bytes) */
329 if (!skb_pull(skb, 2 + *(u8 *)(skb->data + 1) * 2))
330 goto badframe;
331 break;
332 case BNEP_FILTER_MULTI_ADDR_SET:
333 case BNEP_FILTER_NET_TYPE_SET:
334 /* Pull: ctrl type (1 b), len (2 b), data (len bytes) */
335 if (!skb_pull(skb, 3 + *(u16 *)(skb->data + 1) * 2))
336 goto badframe;
337 break;
338 default:
339 kfree_skb(skb);
340 return 0;
341 }
342 } else {
343 skb_reset_mac_header(skb);
316 344
317 s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2)); 345 /* Verify and pull out header */
346 if (!skb_pull(skb, __bnep_rx_hlen[type & BNEP_TYPE_MASK]))
347 goto badframe;
348
349 s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2));
350 }
318 351
319 if (type & BNEP_EXT_HEADER) { 352 if (type & BNEP_EXT_HEADER) {
320 if (bnep_rx_extension(s, skb) < 0) 353 if (bnep_rx_extension(s, skb) < 0)
@@ -525,6 +558,7 @@ static struct device_type bnep_type = {
525 558
526int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock) 559int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
527{ 560{
561 u32 valid_flags = BIT(BNEP_SETUP_RESPONSE);
528 struct net_device *dev; 562 struct net_device *dev;
529 struct bnep_session *s, *ss; 563 struct bnep_session *s, *ss;
530 u8 dst[ETH_ALEN], src[ETH_ALEN]; 564 u8 dst[ETH_ALEN], src[ETH_ALEN];
@@ -535,6 +569,9 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
535 if (!l2cap_is_socket(sock)) 569 if (!l2cap_is_socket(sock))
536 return -EBADFD; 570 return -EBADFD;
537 571
572 if (req->flags & ~valid_flags)
573 return -EINVAL;
574
538 baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst); 575 baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst);
539 baswap((void *) src, &l2cap_pi(sock->sk)->chan->src); 576 baswap((void *) src, &l2cap_pi(sock->sk)->chan->src);
540 577
@@ -566,6 +603,7 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
566 s->sock = sock; 603 s->sock = sock;
567 s->role = req->role; 604 s->role = req->role;
568 s->state = BT_CONNECTED; 605 s->state = BT_CONNECTED;
606 s->flags = req->flags;
569 607
570 s->msg.msg_flags = MSG_NOSIGNAL; 608 s->msg.msg_flags = MSG_NOSIGNAL;
571 609
@@ -611,11 +649,15 @@ failed:
611 649
612int bnep_del_connection(struct bnep_conndel_req *req) 650int bnep_del_connection(struct bnep_conndel_req *req)
613{ 651{
652 u32 valid_flags = 0;
614 struct bnep_session *s; 653 struct bnep_session *s;
615 int err = 0; 654 int err = 0;
616 655
617 BT_DBG(""); 656 BT_DBG("");
618 657
658 if (req->flags & ~valid_flags)
659 return -EINVAL;
660
619 down_read(&bnep_session_sem); 661 down_read(&bnep_session_sem);
620 662
621 s = __bnep_get_session(req->dst); 663 s = __bnep_get_session(req->dst);
@@ -631,10 +673,12 @@ int bnep_del_connection(struct bnep_conndel_req *req)
631 673
632static void __bnep_copy_ci(struct bnep_conninfo *ci, struct bnep_session *s) 674static void __bnep_copy_ci(struct bnep_conninfo *ci, struct bnep_session *s)
633{ 675{
676 u32 valid_flags = BIT(BNEP_SETUP_RESPONSE);
677
634 memset(ci, 0, sizeof(*ci)); 678 memset(ci, 0, sizeof(*ci));
635 memcpy(ci->dst, s->eh.h_source, ETH_ALEN); 679 memcpy(ci->dst, s->eh.h_source, ETH_ALEN);
636 strcpy(ci->device, s->dev->name); 680 strcpy(ci->device, s->dev->name);
637 ci->flags = s->flags; 681 ci->flags = s->flags & valid_flags;
638 ci->state = s->state; 682 ci->state = s->state;
639 ci->role = s->role; 683 ci->role = s->role;
640} 684}
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 5f051290daba..bde2bdd9e929 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -57,6 +57,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
57 struct bnep_conninfo ci; 57 struct bnep_conninfo ci;
58 struct socket *nsock; 58 struct socket *nsock;
59 void __user *argp = (void __user *)arg; 59 void __user *argp = (void __user *)arg;
60 __u32 supp_feat = BIT(BNEP_SETUP_RESPONSE);
60 int err; 61 int err;
61 62
62 BT_DBG("cmd %x arg %lx", cmd, arg); 63 BT_DBG("cmd %x arg %lx", cmd, arg);
@@ -120,6 +121,12 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
120 121
121 return err; 122 return err;
122 123
124 case BNEPGETSUPPFEAT:
125 if (copy_to_user(argp, &supp_feat, sizeof(supp_feat)))
126 return -EFAULT;
127
128 return 0;
129
123 default: 130 default:
124 return -EINVAL; 131 return -EINVAL;
125 } 132 }
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 75bd2c42e3e7..b0c6c6af76ef 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -333,7 +333,7 @@ void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb)
333 return; 333 return;
334 } 334 }
335 335
336 if (session->flags & (1 << CMTP_LOOPBACK)) { 336 if (session->flags & BIT(CMTP_LOOPBACK)) {
337 kfree_skb(skb); 337 kfree_skb(skb);
338 return; 338 return;
339 } 339 }
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 278a194e6af4..298ed37010e6 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -75,10 +75,11 @@ static void __cmtp_unlink_session(struct cmtp_session *session)
75 75
76static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci) 76static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci)
77{ 77{
78 u32 valid_flags = BIT(CMTP_LOOPBACK);
78 memset(ci, 0, sizeof(*ci)); 79 memset(ci, 0, sizeof(*ci));
79 bacpy(&ci->bdaddr, &session->bdaddr); 80 bacpy(&ci->bdaddr, &session->bdaddr);
80 81
81 ci->flags = session->flags; 82 ci->flags = session->flags & valid_flags;
82 ci->state = session->state; 83 ci->state = session->state;
83 84
84 ci->num = session->num; 85 ci->num = session->num;
@@ -313,7 +314,7 @@ static int cmtp_session(void *arg)
313 314
314 down_write(&cmtp_session_sem); 315 down_write(&cmtp_session_sem);
315 316
316 if (!(session->flags & (1 << CMTP_LOOPBACK))) 317 if (!(session->flags & BIT(CMTP_LOOPBACK)))
317 cmtp_detach_device(session); 318 cmtp_detach_device(session);
318 319
319 fput(session->sock->file); 320 fput(session->sock->file);
@@ -329,6 +330,7 @@ static int cmtp_session(void *arg)
329 330
330int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) 331int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
331{ 332{
333 u32 valid_flags = BIT(CMTP_LOOPBACK);
332 struct cmtp_session *session, *s; 334 struct cmtp_session *session, *s;
333 int i, err; 335 int i, err;
334 336
@@ -337,6 +339,9 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
337 if (!l2cap_is_socket(sock)) 339 if (!l2cap_is_socket(sock))
338 return -EBADFD; 340 return -EBADFD;
339 341
342 if (req->flags & ~valid_flags)
343 return -EINVAL;
344
340 session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL); 345 session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL);
341 if (!session) 346 if (!session)
342 return -ENOMEM; 347 return -ENOMEM;
@@ -385,7 +390,7 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
385 goto unlink; 390 goto unlink;
386 } 391 }
387 392
388 if (!(session->flags & (1 << CMTP_LOOPBACK))) { 393 if (!(session->flags & BIT(CMTP_LOOPBACK))) {
389 err = cmtp_attach_device(session); 394 err = cmtp_attach_device(session);
390 if (err < 0) { 395 if (err < 0) {
391 atomic_inc(&session->terminate); 396 atomic_inc(&session->terminate);
@@ -409,11 +414,15 @@ failed:
409 414
410int cmtp_del_connection(struct cmtp_conndel_req *req) 415int cmtp_del_connection(struct cmtp_conndel_req *req)
411{ 416{
417 u32 valid_flags = 0;
412 struct cmtp_session *session; 418 struct cmtp_session *session;
413 int err = 0; 419 int err = 0;
414 420
415 BT_DBG(""); 421 BT_DBG("");
416 422
423 if (req->flags & ~valid_flags)
424 return -EINVAL;
425
417 down_read(&cmtp_session_sem); 426 down_read(&cmtp_session_sem);
418 427
419 session = __cmtp_get_session(&req->bdaddr); 428 session = __cmtp_get_session(&req->bdaddr);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index e6bfeb7b4415..46b114c0140b 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -141,13 +141,16 @@ static const struct file_operations dut_mode_fops = {
141 141
142/* ---- HCI requests ---- */ 142/* ---- HCI requests ---- */
143 143
144static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode) 144static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
145 struct sk_buff *skb)
145{ 146{
146 BT_DBG("%s result 0x%2.2x", hdev->name, result); 147 BT_DBG("%s result 0x%2.2x", hdev->name, result);
147 148
148 if (hdev->req_status == HCI_REQ_PEND) { 149 if (hdev->req_status == HCI_REQ_PEND) {
149 hdev->req_result = result; 150 hdev->req_result = result;
150 hdev->req_status = HCI_REQ_DONE; 151 hdev->req_status = HCI_REQ_DONE;
152 if (skb)
153 hdev->req_skb = skb_get(skb);
151 wake_up_interruptible(&hdev->req_wait_q); 154 wake_up_interruptible(&hdev->req_wait_q);
152 } 155 }
153} 156}
@@ -163,66 +166,12 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
163 } 166 }
164} 167}
165 168
166static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
167 u8 event)
168{
169 struct hci_ev_cmd_complete *ev;
170 struct hci_event_hdr *hdr;
171 struct sk_buff *skb;
172
173 hci_dev_lock(hdev);
174
175 skb = hdev->recv_evt;
176 hdev->recv_evt = NULL;
177
178 hci_dev_unlock(hdev);
179
180 if (!skb)
181 return ERR_PTR(-ENODATA);
182
183 if (skb->len < sizeof(*hdr)) {
184 BT_ERR("Too short HCI event");
185 goto failed;
186 }
187
188 hdr = (void *) skb->data;
189 skb_pull(skb, HCI_EVENT_HDR_SIZE);
190
191 if (event) {
192 if (hdr->evt != event)
193 goto failed;
194 return skb;
195 }
196
197 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
198 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
199 goto failed;
200 }
201
202 if (skb->len < sizeof(*ev)) {
203 BT_ERR("Too short cmd_complete event");
204 goto failed;
205 }
206
207 ev = (void *) skb->data;
208 skb_pull(skb, sizeof(*ev));
209
210 if (opcode == __le16_to_cpu(ev->opcode))
211 return skb;
212
213 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
214 __le16_to_cpu(ev->opcode));
215
216failed:
217 kfree_skb(skb);
218 return ERR_PTR(-ENODATA);
219}
220
221struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, 169struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
222 const void *param, u8 event, u32 timeout) 170 const void *param, u8 event, u32 timeout)
223{ 171{
224 DECLARE_WAITQUEUE(wait, current); 172 DECLARE_WAITQUEUE(wait, current);
225 struct hci_request req; 173 struct hci_request req;
174 struct sk_buff *skb;
226 int err = 0; 175 int err = 0;
227 176
228 BT_DBG("%s", hdev->name); 177 BT_DBG("%s", hdev->name);
@@ -236,7 +185,7 @@ struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
236 add_wait_queue(&hdev->req_wait_q, &wait); 185 add_wait_queue(&hdev->req_wait_q, &wait);
237 set_current_state(TASK_INTERRUPTIBLE); 186 set_current_state(TASK_INTERRUPTIBLE);
238 187
239 err = hci_req_run(&req, hci_req_sync_complete); 188 err = hci_req_run_skb(&req, hci_req_sync_complete);
240 if (err < 0) { 189 if (err < 0) {
241 remove_wait_queue(&hdev->req_wait_q, &wait); 190 remove_wait_queue(&hdev->req_wait_q, &wait);
242 set_current_state(TASK_RUNNING); 191 set_current_state(TASK_RUNNING);
@@ -265,13 +214,20 @@ struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
265 } 214 }
266 215
267 hdev->req_status = hdev->req_result = 0; 216 hdev->req_status = hdev->req_result = 0;
217 skb = hdev->req_skb;
218 hdev->req_skb = NULL;
268 219
269 BT_DBG("%s end: err %d", hdev->name, err); 220 BT_DBG("%s end: err %d", hdev->name, err);
270 221
271 if (err < 0) 222 if (err < 0) {
223 kfree_skb(skb);
272 return ERR_PTR(err); 224 return ERR_PTR(err);
225 }
226
227 if (!skb)
228 return ERR_PTR(-ENODATA);
273 229
274 return hci_get_cmd_complete(hdev, opcode, event); 230 return skb;
275} 231}
276EXPORT_SYMBOL(__hci_cmd_sync_ev); 232EXPORT_SYMBOL(__hci_cmd_sync_ev);
277 233
@@ -303,7 +259,7 @@ static int __hci_req_sync(struct hci_dev *hdev,
303 add_wait_queue(&hdev->req_wait_q, &wait); 259 add_wait_queue(&hdev->req_wait_q, &wait);
304 set_current_state(TASK_INTERRUPTIBLE); 260 set_current_state(TASK_INTERRUPTIBLE);
305 261
306 err = hci_req_run(&req, hci_req_sync_complete); 262 err = hci_req_run_skb(&req, hci_req_sync_complete);
307 if (err < 0) { 263 if (err < 0) {
308 hdev->req_status = 0; 264 hdev->req_status = 0;
309 265
@@ -1690,9 +1646,6 @@ static int hci_dev_do_close(struct hci_dev *hdev)
1690 hdev->sent_cmd = NULL; 1646 hdev->sent_cmd = NULL;
1691 } 1647 }
1692 1648
1693 kfree_skb(hdev->recv_evt);
1694 hdev->recv_evt = NULL;
1695
1696 /* After this point our queues are empty 1649 /* After this point our queues are empty
1697 * and no tasks are scheduled. */ 1650 * and no tasks are scheduled. */
1698 hdev->close(hdev); 1651 hdev->close(hdev);
@@ -3563,11 +3516,6 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3563 } 3516 }
3564} 3517}
3565 3518
3566bool hci_req_pending(struct hci_dev *hdev)
3567{
3568 return (hdev->req_status == HCI_REQ_PEND);
3569}
3570
3571/* Send HCI command */ 3519/* Send HCI command */
3572int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, 3520int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3573 const void *param) 3521 const void *param)
@@ -3585,7 +3533,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3585 /* Stand-alone HCI commands must be flagged as 3533 /* Stand-alone HCI commands must be flagged as
3586 * single-command requests. 3534 * single-command requests.
3587 */ 3535 */
3588 bt_cb(skb)->req_start = 1; 3536 bt_cb(skb)->req.start = true;
3589 3537
3590 skb_queue_tail(&hdev->cmd_q, skb); 3538 skb_queue_tail(&hdev->cmd_q, skb);
3591 queue_work(hdev->workqueue, &hdev->cmd_work); 3539 queue_work(hdev->workqueue, &hdev->cmd_work);
@@ -4263,7 +4211,7 @@ static bool hci_req_is_complete(struct hci_dev *hdev)
4263 if (!skb) 4211 if (!skb)
4264 return true; 4212 return true;
4265 4213
4266 return bt_cb(skb)->req_start; 4214 return bt_cb(skb)->req.start;
4267} 4215}
4268 4216
4269static void hci_resend_last(struct hci_dev *hdev) 4217static void hci_resend_last(struct hci_dev *hdev)
@@ -4288,9 +4236,10 @@ static void hci_resend_last(struct hci_dev *hdev)
4288 queue_work(hdev->workqueue, &hdev->cmd_work); 4236 queue_work(hdev->workqueue, &hdev->cmd_work);
4289} 4237}
4290 4238
4291void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status) 4239void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4240 hci_req_complete_t *req_complete,
4241 hci_req_complete_skb_t *req_complete_skb)
4292{ 4242{
4293 hci_req_complete_t req_complete = NULL;
4294 struct sk_buff *skb; 4243 struct sk_buff *skb;
4295 unsigned long flags; 4244 unsigned long flags;
4296 4245
@@ -4322,36 +4271,29 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4322 * callback would be found in hdev->sent_cmd instead of the 4271 * callback would be found in hdev->sent_cmd instead of the
4323 * command queue (hdev->cmd_q). 4272 * command queue (hdev->cmd_q).
4324 */ 4273 */
4325 if (hdev->sent_cmd) { 4274 if (bt_cb(hdev->sent_cmd)->req.complete) {
4326 req_complete = bt_cb(hdev->sent_cmd)->req_complete; 4275 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4327 4276 return;
4328 if (req_complete) { 4277 }
4329 /* We must set the complete callback to NULL to
4330 * avoid calling the callback more than once if
4331 * this function gets called again.
4332 */
4333 bt_cb(hdev->sent_cmd)->req_complete = NULL;
4334 4278
4335 goto call_complete; 4279 if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4336 } 4280 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4281 return;
4337 } 4282 }
4338 4283
4339 /* Remove all pending commands belonging to this request */ 4284 /* Remove all pending commands belonging to this request */
4340 spin_lock_irqsave(&hdev->cmd_q.lock, flags); 4285 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4341 while ((skb = __skb_dequeue(&hdev->cmd_q))) { 4286 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4342 if (bt_cb(skb)->req_start) { 4287 if (bt_cb(skb)->req.start) {
4343 __skb_queue_head(&hdev->cmd_q, skb); 4288 __skb_queue_head(&hdev->cmd_q, skb);
4344 break; 4289 break;
4345 } 4290 }
4346 4291
4347 req_complete = bt_cb(skb)->req_complete; 4292 *req_complete = bt_cb(skb)->req.complete;
4293 *req_complete_skb = bt_cb(skb)->req.complete_skb;
4348 kfree_skb(skb); 4294 kfree_skb(skb);
4349 } 4295 }
4350 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); 4296 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4351
4352call_complete:
4353 if (req_complete)
4354 req_complete(hdev, status, status ? opcode : HCI_OP_NOP);
4355} 4297}
4356 4298
4357static void hci_rx_work(struct work_struct *work) 4299static void hci_rx_work(struct work_struct *work)
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
index e6255833a258..7db4220941cc 100644
--- a/net/bluetooth/hci_debugfs.c
+++ b/net/bluetooth/hci_debugfs.c
@@ -114,6 +114,30 @@ static const struct file_operations features_fops = {
114 .release = single_release, 114 .release = single_release,
115}; 115};
116 116
117static int device_id_show(struct seq_file *f, void *ptr)
118{
119 struct hci_dev *hdev = f->private;
120
121 hci_dev_lock(hdev);
122 seq_printf(f, "%4.4x:%4.4x:%4.4x:%4.4x\n", hdev->devid_source,
123 hdev->devid_vendor, hdev->devid_product, hdev->devid_version);
124 hci_dev_unlock(hdev);
125
126 return 0;
127}
128
129static int device_id_open(struct inode *inode, struct file *file)
130{
131 return single_open(file, device_id_show, inode->i_private);
132}
133
134static const struct file_operations device_id_fops = {
135 .open = device_id_open,
136 .read = seq_read,
137 .llseek = seq_lseek,
138 .release = single_release,
139};
140
117static int device_list_show(struct seq_file *f, void *ptr) 141static int device_list_show(struct seq_file *f, void *ptr)
118{ 142{
119 struct hci_dev *hdev = f->private; 143 struct hci_dev *hdev = f->private;
@@ -335,6 +359,8 @@ void hci_debugfs_create_common(struct hci_dev *hdev)
335 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev); 359 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
336 debugfs_create_u8("hardware_error", 0444, hdev->debugfs, 360 debugfs_create_u8("hardware_error", 0444, hdev->debugfs,
337 &hdev->hw_error_code); 361 &hdev->hw_error_code);
362 debugfs_create_file("device_id", 0444, hdev->debugfs, hdev,
363 &device_id_fops);
338 364
339 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev, 365 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
340 &device_list_fops); 366 &device_list_fops);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 62f92a508961..01031038eb0e 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1045,11 +1045,6 @@ static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1045 struct hci_rp_read_local_oob_data *rp = (void *) skb->data; 1045 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1046 1046
1047 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1047 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1048
1049 hci_dev_lock(hdev);
1050 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->rand, NULL, NULL,
1051 rp->status);
1052 hci_dev_unlock(hdev);
1053} 1048}
1054 1049
1055static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, 1050static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
@@ -1058,15 +1053,8 @@ static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1058 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data; 1053 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1059 1054
1060 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1055 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1061
1062 hci_dev_lock(hdev);
1063 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->rand192,
1064 rp->hash256, rp->rand256,
1065 rp->status);
1066 hci_dev_unlock(hdev);
1067} 1056}
1068 1057
1069
1070static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb) 1058static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1071{ 1059{
1072 __u8 status = *((__u8 *) skb->data); 1060 __u8 status = *((__u8 *) skb->data);
@@ -2732,17 +2720,19 @@ unlock:
2732 hci_dev_unlock(hdev); 2720 hci_dev_unlock(hdev);
2733} 2721}
2734 2722
2735static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2723static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
2724 u16 *opcode, u8 *status,
2725 hci_req_complete_t *req_complete,
2726 hci_req_complete_skb_t *req_complete_skb)
2736{ 2727{
2737 struct hci_ev_cmd_complete *ev = (void *) skb->data; 2728 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2738 u8 status = skb->data[sizeof(*ev)];
2739 __u16 opcode;
2740 2729
2741 skb_pull(skb, sizeof(*ev)); 2730 *opcode = __le16_to_cpu(ev->opcode);
2731 *status = skb->data[sizeof(*ev)];
2742 2732
2743 opcode = __le16_to_cpu(ev->opcode); 2733 skb_pull(skb, sizeof(*ev));
2744 2734
2745 switch (opcode) { 2735 switch (*opcode) {
2746 case HCI_OP_INQUIRY_CANCEL: 2736 case HCI_OP_INQUIRY_CANCEL:
2747 hci_cc_inquiry_cancel(hdev, skb); 2737 hci_cc_inquiry_cancel(hdev, skb);
2748 break; 2738 break;
@@ -3020,32 +3010,36 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3020 break; 3010 break;
3021 3011
3022 default: 3012 default:
3023 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); 3013 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3024 break; 3014 break;
3025 } 3015 }
3026 3016
3027 if (opcode != HCI_OP_NOP) 3017 if (*opcode != HCI_OP_NOP)
3028 cancel_delayed_work(&hdev->cmd_timer); 3018 cancel_delayed_work(&hdev->cmd_timer);
3029 3019
3030 hci_req_cmd_complete(hdev, opcode, status); 3020 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3031
3032 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
3033 atomic_set(&hdev->cmd_cnt, 1); 3021 atomic_set(&hdev->cmd_cnt, 1);
3034 if (!skb_queue_empty(&hdev->cmd_q)) 3022
3035 queue_work(hdev->workqueue, &hdev->cmd_work); 3023 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3036 } 3024 req_complete_skb);
3025
3026 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3027 queue_work(hdev->workqueue, &hdev->cmd_work);
3037} 3028}
3038 3029
3039static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) 3030static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3031 u16 *opcode, u8 *status,
3032 hci_req_complete_t *req_complete,
3033 hci_req_complete_skb_t *req_complete_skb)
3040{ 3034{
3041 struct hci_ev_cmd_status *ev = (void *) skb->data; 3035 struct hci_ev_cmd_status *ev = (void *) skb->data;
3042 __u16 opcode;
3043 3036
3044 skb_pull(skb, sizeof(*ev)); 3037 skb_pull(skb, sizeof(*ev));
3045 3038
3046 opcode = __le16_to_cpu(ev->opcode); 3039 *opcode = __le16_to_cpu(ev->opcode);
3040 *status = ev->status;
3047 3041
3048 switch (opcode) { 3042 switch (*opcode) {
3049 case HCI_OP_INQUIRY: 3043 case HCI_OP_INQUIRY:
3050 hci_cs_inquiry(hdev, ev->status); 3044 hci_cs_inquiry(hdev, ev->status);
3051 break; 3045 break;
@@ -3115,22 +3109,29 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
3115 break; 3109 break;
3116 3110
3117 default: 3111 default:
3118 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); 3112 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3119 break; 3113 break;
3120 } 3114 }
3121 3115
3122 if (opcode != HCI_OP_NOP) 3116 if (*opcode != HCI_OP_NOP)
3123 cancel_delayed_work(&hdev->cmd_timer); 3117 cancel_delayed_work(&hdev->cmd_timer);
3124 3118
3119 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3120 atomic_set(&hdev->cmd_cnt, 1);
3121
3122 /* Indicate request completion if the command failed. Also, if
3123 * we're not waiting for a special event and we get a success
3124 * command status we should try to flag the request as completed
3125 * (since for this kind of commands there will not be a command
3126 * complete event).
3127 */
3125 if (ev->status || 3128 if (ev->status ||
3126 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req_event)) 3129 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
3127 hci_req_cmd_complete(hdev, opcode, ev->status); 3130 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3131 req_complete_skb);
3128 3132
3129 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { 3133 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3130 atomic_set(&hdev->cmd_cnt, 1); 3134 queue_work(hdev->workqueue, &hdev->cmd_work);
3131 if (!skb_queue_empty(&hdev->cmd_q))
3132 queue_work(hdev->workqueue, &hdev->cmd_work);
3133 }
3134} 3135}
3135 3136
3136static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb) 3137static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -5031,32 +5032,79 @@ static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
5031 amp_read_loc_assoc_final_data(hdev, hcon); 5032 amp_read_loc_assoc_final_data(hdev, hcon);
5032} 5033}
5033 5034
5034void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 5035static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5036 u8 event, struct sk_buff *skb)
5035{ 5037{
5036 struct hci_event_hdr *hdr = (void *) skb->data; 5038 struct hci_ev_cmd_complete *ev;
5037 __u8 event = hdr->evt; 5039 struct hci_event_hdr *hdr;
5038 5040
5039 hci_dev_lock(hdev); 5041 if (!skb)
5042 return false;
5040 5043
5041 /* Received events are (currently) only needed when a request is 5044 if (skb->len < sizeof(*hdr)) {
5042 * ongoing so avoid unnecessary memory allocation. 5045 BT_ERR("Too short HCI event");
5043 */ 5046 return false;
5044 if (hci_req_pending(hdev)) {
5045 kfree_skb(hdev->recv_evt);
5046 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
5047 } 5047 }
5048 5048
5049 hci_dev_unlock(hdev); 5049 hdr = (void *) skb->data;
5050
5051 skb_pull(skb, HCI_EVENT_HDR_SIZE); 5050 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5052 5051
5053 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req_event == event) { 5052 if (event) {
5054 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data; 5053 if (hdr->evt != event)
5055 u16 opcode = __le16_to_cpu(cmd_hdr->opcode); 5054 return false;
5055 return true;
5056 }
5057
5058 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5059 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
5060 return false;
5061 }
5062
5063 if (skb->len < sizeof(*ev)) {
5064 BT_ERR("Too short cmd_complete event");
5065 return false;
5066 }
5067
5068 ev = (void *) skb->data;
5069 skb_pull(skb, sizeof(*ev));
5056 5070
5057 hci_req_cmd_complete(hdev, opcode, 0); 5071 if (opcode != __le16_to_cpu(ev->opcode)) {
5072 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5073 __le16_to_cpu(ev->opcode));
5074 return false;
5058 } 5075 }
5059 5076
5077 return true;
5078}
5079
5080void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5081{
5082 struct hci_event_hdr *hdr = (void *) skb->data;
5083 hci_req_complete_t req_complete = NULL;
5084 hci_req_complete_skb_t req_complete_skb = NULL;
5085 struct sk_buff *orig_skb = NULL;
5086 u8 status = 0, event = hdr->evt, req_evt = 0;
5087 u16 opcode = HCI_OP_NOP;
5088
5089 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
5090 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5091 opcode = __le16_to_cpu(cmd_hdr->opcode);
5092 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5093 &req_complete_skb);
5094 req_evt = event;
5095 }
5096
5097 /* If it looks like we might end up having to call
5098 * req_complete_skb, store a pristine copy of the skb since the
5099 * various handlers may modify the original one through
5100 * skb_pull() calls, etc.
5101 */
5102 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5103 event == HCI_EV_CMD_COMPLETE)
5104 orig_skb = skb_clone(skb, GFP_KERNEL);
5105
5106 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5107
5060 switch (event) { 5108 switch (event) {
5061 case HCI_EV_INQUIRY_COMPLETE: 5109 case HCI_EV_INQUIRY_COMPLETE:
5062 hci_inquiry_complete_evt(hdev, skb); 5110 hci_inquiry_complete_evt(hdev, skb);
@@ -5099,11 +5147,13 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5099 break; 5147 break;
5100 5148
5101 case HCI_EV_CMD_COMPLETE: 5149 case HCI_EV_CMD_COMPLETE:
5102 hci_cmd_complete_evt(hdev, skb); 5150 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5151 &req_complete, &req_complete_skb);
5103 break; 5152 break;
5104 5153
5105 case HCI_EV_CMD_STATUS: 5154 case HCI_EV_CMD_STATUS:
5106 hci_cmd_status_evt(hdev, skb); 5155 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5156 &req_complete_skb);
5107 break; 5157 break;
5108 5158
5109 case HCI_EV_HARDWARE_ERROR: 5159 case HCI_EV_HARDWARE_ERROR:
@@ -5235,6 +5285,17 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5235 break; 5285 break;
5236 } 5286 }
5237 5287
5288 if (req_complete) {
5289 req_complete(hdev, status, opcode);
5290 } else if (req_complete_skb) {
5291 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
5292 kfree_skb(orig_skb);
5293 orig_skb = NULL;
5294 }
5295 req_complete_skb(hdev, status, opcode, orig_skb);
5296 }
5297
5298 kfree_skb(orig_skb);
5238 kfree_skb(skb); 5299 kfree_skb(skb);
5239 hdev->stat.evt_rx++; 5300 hdev->stat.evt_rx++;
5240} 5301}
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index 55e096d20a0f..d6025d6e6d59 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -34,7 +34,8 @@ void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
34 req->err = 0; 34 req->err = 0;
35} 35}
36 36
37int hci_req_run(struct hci_request *req, hci_req_complete_t complete) 37static int req_run(struct hci_request *req, hci_req_complete_t complete,
38 hci_req_complete_skb_t complete_skb)
38{ 39{
39 struct hci_dev *hdev = req->hdev; 40 struct hci_dev *hdev = req->hdev;
40 struct sk_buff *skb; 41 struct sk_buff *skb;
@@ -55,7 +56,8 @@ int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
55 return -ENODATA; 56 return -ENODATA;
56 57
57 skb = skb_peek_tail(&req->cmd_q); 58 skb = skb_peek_tail(&req->cmd_q);
58 bt_cb(skb)->req_complete = complete; 59 bt_cb(skb)->req.complete = complete;
60 bt_cb(skb)->req.complete_skb = complete_skb;
59 61
60 spin_lock_irqsave(&hdev->cmd_q.lock, flags); 62 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
61 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); 63 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
@@ -66,6 +68,16 @@ int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
66 return 0; 68 return 0;
67} 69}
68 70
71int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
72{
73 return req_run(req, complete, NULL);
74}
75
76int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
77{
78 return req_run(req, NULL, complete);
79}
80
69struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, 81struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
70 const void *param) 82 const void *param)
71{ 83{
@@ -116,9 +128,9 @@ void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
116 } 128 }
117 129
118 if (skb_queue_empty(&req->cmd_q)) 130 if (skb_queue_empty(&req->cmd_q))
119 bt_cb(skb)->req_start = 1; 131 bt_cb(skb)->req.start = true;
120 132
121 bt_cb(skb)->req_event = event; 133 bt_cb(skb)->req.event = event;
122 134
123 skb_queue_tail(&req->cmd_q, skb); 135 skb_queue_tail(&req->cmd_q, skb);
124} 136}
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
index adf074d33544..bf6df92f42db 100644
--- a/net/bluetooth/hci_request.h
+++ b/net/bluetooth/hci_request.h
@@ -32,11 +32,14 @@ struct hci_request {
32 32
33void hci_req_init(struct hci_request *req, struct hci_dev *hdev); 33void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
34int hci_req_run(struct hci_request *req, hci_req_complete_t complete); 34int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
35int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete);
35void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, 36void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
36 const void *param); 37 const void *param);
37void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, 38void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
38 const void *param, u8 event); 39 const void *param, u8 event);
39void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status); 40void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
41 hci_req_complete_t *req_complete,
42 hci_req_complete_skb_t *req_complete_skb);
40 43
41struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, 44struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
42 const void *param); 45 const void *param);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 85a44a7dc150..56f9edbf3d05 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -1164,7 +1164,7 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1164 /* Stand-alone HCI commands must be flagged as 1164 /* Stand-alone HCI commands must be flagged as
1165 * single-command requests. 1165 * single-command requests.
1166 */ 1166 */
1167 bt_cb(skb)->req_start = 1; 1167 bt_cb(skb)->req.start = true;
1168 1168
1169 skb_queue_tail(&hdev->cmd_q, skb); 1169 skb_queue_tail(&hdev->cmd_q, skb);
1170 queue_work(hdev->workqueue, &hdev->cmd_work); 1170 queue_work(hdev->workqueue, &hdev->cmd_work);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 07348e142f16..a05b9dbf14c9 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -70,10 +70,11 @@ static void hidp_session_terminate(struct hidp_session *s);
70 70
71static void hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci) 71static void hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci)
72{ 72{
73 u32 valid_flags = 0;
73 memset(ci, 0, sizeof(*ci)); 74 memset(ci, 0, sizeof(*ci));
74 bacpy(&ci->bdaddr, &session->bdaddr); 75 bacpy(&ci->bdaddr, &session->bdaddr);
75 76
76 ci->flags = session->flags; 77 ci->flags = session->flags & valid_flags;
77 ci->state = BT_CONNECTED; 78 ci->state = BT_CONNECTED;
78 79
79 if (session->input) { 80 if (session->input) {
@@ -907,7 +908,7 @@ static int hidp_session_new(struct hidp_session **out, const bdaddr_t *bdaddr,
907 kref_init(&session->ref); 908 kref_init(&session->ref);
908 atomic_set(&session->state, HIDP_SESSION_IDLING); 909 atomic_set(&session->state, HIDP_SESSION_IDLING);
909 init_waitqueue_head(&session->state_queue); 910 init_waitqueue_head(&session->state_queue);
910 session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID); 911 session->flags = req->flags & BIT(HIDP_BLUETOOTH_VENDOR_ID);
911 912
912 /* connection management */ 913 /* connection management */
913 bacpy(&session->bdaddr, bdaddr); 914 bacpy(&session->bdaddr, bdaddr);
@@ -1312,6 +1313,7 @@ int hidp_connection_add(struct hidp_connadd_req *req,
1312 struct socket *ctrl_sock, 1313 struct socket *ctrl_sock,
1313 struct socket *intr_sock) 1314 struct socket *intr_sock)
1314{ 1315{
1316 u32 valid_flags = 0;
1315 struct hidp_session *session; 1317 struct hidp_session *session;
1316 struct l2cap_conn *conn; 1318 struct l2cap_conn *conn;
1317 struct l2cap_chan *chan; 1319 struct l2cap_chan *chan;
@@ -1321,6 +1323,9 @@ int hidp_connection_add(struct hidp_connadd_req *req,
1321 if (ret) 1323 if (ret)
1322 return ret; 1324 return ret;
1323 1325
1326 if (req->flags & ~valid_flags)
1327 return -EINVAL;
1328
1324 chan = l2cap_pi(ctrl_sock->sk)->chan; 1329 chan = l2cap_pi(ctrl_sock->sk)->chan;
1325 conn = NULL; 1330 conn = NULL;
1326 l2cap_chan_lock(chan); 1331 l2cap_chan_lock(chan);
@@ -1351,13 +1356,17 @@ out_conn:
1351 1356
1352int hidp_connection_del(struct hidp_conndel_req *req) 1357int hidp_connection_del(struct hidp_conndel_req *req)
1353{ 1358{
1359 u32 valid_flags = BIT(HIDP_VIRTUAL_CABLE_UNPLUG);
1354 struct hidp_session *session; 1360 struct hidp_session *session;
1355 1361
1362 if (req->flags & ~valid_flags)
1363 return -EINVAL;
1364
1356 session = hidp_session_find(&req->bdaddr); 1365 session = hidp_session_find(&req->bdaddr);
1357 if (!session) 1366 if (!session)
1358 return -ENOENT; 1367 return -ENOENT;
1359 1368
1360 if (req->flags & (1 << HIDP_VIRTUAL_CABLE_UNPLUG)) 1369 if (req->flags & BIT(HIDP_VIRTUAL_CABLE_UNPLUG))
1361 hidp_send_ctrl_message(session, 1370 hidp_send_ctrl_message(session,
1362 HIDP_TRANS_HID_CONTROL | 1371 HIDP_TRANS_HID_CONTROL |
1363 HIDP_CTRL_VIRTUAL_CABLE_UNPLUG, 1372 HIDP_CTRL_VIRTUAL_CABLE_UNPLUG,
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index d69861c89bb5..dad419782a12 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -292,7 +292,7 @@ static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
292 struct sk_buff *skb; 292 struct sk_buff *skb;
293 293
294 skb_queue_walk(head, skb) { 294 skb_queue_walk(head, skb) {
295 if (bt_cb(skb)->control.txseq == seq) 295 if (bt_cb(skb)->l2cap.txseq == seq)
296 return skb; 296 return skb;
297 } 297 }
298 298
@@ -954,11 +954,11 @@ static inline void __unpack_control(struct l2cap_chan *chan,
954{ 954{
955 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { 955 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
956 __unpack_extended_control(get_unaligned_le32(skb->data), 956 __unpack_extended_control(get_unaligned_le32(skb->data),
957 &bt_cb(skb)->control); 957 &bt_cb(skb)->l2cap);
958 skb_pull(skb, L2CAP_EXT_CTRL_SIZE); 958 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
959 } else { 959 } else {
960 __unpack_enhanced_control(get_unaligned_le16(skb->data), 960 __unpack_enhanced_control(get_unaligned_le16(skb->data),
961 &bt_cb(skb)->control); 961 &bt_cb(skb)->l2cap);
962 skb_pull(skb, L2CAP_ENH_CTRL_SIZE); 962 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
963 } 963 }
964} 964}
@@ -1200,8 +1200,8 @@ static void l2cap_move_setup(struct l2cap_chan *chan)
1200 1200
1201 chan->retry_count = 0; 1201 chan->retry_count = 0;
1202 skb_queue_walk(&chan->tx_q, skb) { 1202 skb_queue_walk(&chan->tx_q, skb) {
1203 if (bt_cb(skb)->control.retries) 1203 if (bt_cb(skb)->l2cap.retries)
1204 bt_cb(skb)->control.retries = 1; 1204 bt_cb(skb)->l2cap.retries = 1;
1205 else 1205 else
1206 break; 1206 break;
1207 } 1207 }
@@ -1846,8 +1846,8 @@ static void l2cap_streaming_send(struct l2cap_chan *chan,
1846 1846
1847 skb = skb_dequeue(&chan->tx_q); 1847 skb = skb_dequeue(&chan->tx_q);
1848 1848
1849 bt_cb(skb)->control.retries = 1; 1849 bt_cb(skb)->l2cap.retries = 1;
1850 control = &bt_cb(skb)->control; 1850 control = &bt_cb(skb)->l2cap;
1851 1851
1852 control->reqseq = 0; 1852 control->reqseq = 0;
1853 control->txseq = chan->next_tx_seq; 1853 control->txseq = chan->next_tx_seq;
@@ -1891,8 +1891,8 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
1891 1891
1892 skb = chan->tx_send_head; 1892 skb = chan->tx_send_head;
1893 1893
1894 bt_cb(skb)->control.retries = 1; 1894 bt_cb(skb)->l2cap.retries = 1;
1895 control = &bt_cb(skb)->control; 1895 control = &bt_cb(skb)->l2cap;
1896 1896
1897 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) 1897 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1898 control->final = 1; 1898 control->final = 1;
@@ -1963,11 +1963,11 @@ static void l2cap_ertm_resend(struct l2cap_chan *chan)
1963 continue; 1963 continue;
1964 } 1964 }
1965 1965
1966 bt_cb(skb)->control.retries++; 1966 bt_cb(skb)->l2cap.retries++;
1967 control = bt_cb(skb)->control; 1967 control = bt_cb(skb)->l2cap;
1968 1968
1969 if (chan->max_tx != 0 && 1969 if (chan->max_tx != 0 &&
1970 bt_cb(skb)->control.retries > chan->max_tx) { 1970 bt_cb(skb)->l2cap.retries > chan->max_tx) {
1971 BT_DBG("Retry limit exceeded (%d)", chan->max_tx); 1971 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1972 l2cap_send_disconn_req(chan, ECONNRESET); 1972 l2cap_send_disconn_req(chan, ECONNRESET);
1973 l2cap_seq_list_clear(&chan->retrans_list); 1973 l2cap_seq_list_clear(&chan->retrans_list);
@@ -2045,7 +2045,7 @@ static void l2cap_retransmit_all(struct l2cap_chan *chan,
2045 2045
2046 if (chan->unacked_frames) { 2046 if (chan->unacked_frames) {
2047 skb_queue_walk(&chan->tx_q, skb) { 2047 skb_queue_walk(&chan->tx_q, skb) {
2048 if (bt_cb(skb)->control.txseq == control->reqseq || 2048 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2049 skb == chan->tx_send_head) 2049 skb == chan->tx_send_head)
2050 break; 2050 break;
2051 } 2051 }
@@ -2055,7 +2055,7 @@ static void l2cap_retransmit_all(struct l2cap_chan *chan,
2055 break; 2055 break;
2056 2056
2057 l2cap_seq_list_append(&chan->retrans_list, 2057 l2cap_seq_list_append(&chan->retrans_list,
2058 bt_cb(skb)->control.txseq); 2058 bt_cb(skb)->l2cap.txseq);
2059 } 2059 }
2060 2060
2061 l2cap_ertm_resend(chan); 2061 l2cap_ertm_resend(chan);
@@ -2267,8 +2267,8 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2267 return ERR_PTR(err); 2267 return ERR_PTR(err);
2268 } 2268 }
2269 2269
2270 bt_cb(skb)->control.fcs = chan->fcs; 2270 bt_cb(skb)->l2cap.fcs = chan->fcs;
2271 bt_cb(skb)->control.retries = 0; 2271 bt_cb(skb)->l2cap.retries = 0;
2272 return skb; 2272 return skb;
2273} 2273}
2274 2274
@@ -2321,7 +2321,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
2321 return PTR_ERR(skb); 2321 return PTR_ERR(skb);
2322 } 2322 }
2323 2323
2324 bt_cb(skb)->control.sar = sar; 2324 bt_cb(skb)->l2cap.sar = sar;
2325 __skb_queue_tail(seg_queue, skb); 2325 __skb_queue_tail(seg_queue, skb);
2326 2326
2327 len -= pdu_len; 2327 len -= pdu_len;
@@ -2856,7 +2856,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2856 continue; 2856 continue;
2857 2857
2858 /* Don't send frame to the channel it came from */ 2858 /* Don't send frame to the channel it came from */
2859 if (bt_cb(skb)->chan == chan) 2859 if (bt_cb(skb)->l2cap.chan == chan)
2860 continue; 2860 continue;
2861 2861
2862 nskb = skb_clone(skb, GFP_KERNEL); 2862 nskb = skb_clone(skb, GFP_KERNEL);
@@ -5918,7 +5918,7 @@ static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5918 5918
5919 skb_unlink(skb, &chan->srej_q); 5919 skb_unlink(skb, &chan->srej_q);
5920 chan->buffer_seq = __next_seq(chan, chan->buffer_seq); 5920 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5921 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control); 5921 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5922 if (err) 5922 if (err)
5923 break; 5923 break;
5924 } 5924 }
@@ -5952,7 +5952,7 @@ static void l2cap_handle_srej(struct l2cap_chan *chan,
5952 return; 5952 return;
5953 } 5953 }
5954 5954
5955 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) { 5955 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5956 BT_DBG("Retry limit exceeded (%d)", chan->max_tx); 5956 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5957 l2cap_send_disconn_req(chan, ECONNRESET); 5957 l2cap_send_disconn_req(chan, ECONNRESET);
5958 return; 5958 return;
@@ -6005,7 +6005,7 @@ static void l2cap_handle_rej(struct l2cap_chan *chan,
6005 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq); 6005 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6006 6006
6007 if (chan->max_tx && skb && 6007 if (chan->max_tx && skb &&
6008 bt_cb(skb)->control.retries >= chan->max_tx) { 6008 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6009 BT_DBG("Retry limit exceeded (%d)", chan->max_tx); 6009 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6010 l2cap_send_disconn_req(chan, ECONNRESET); 6010 l2cap_send_disconn_req(chan, ECONNRESET);
6011 return; 6011 return;
@@ -6565,7 +6565,7 @@ static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6565 6565
6566static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) 6566static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6567{ 6567{
6568 struct l2cap_ctrl *control = &bt_cb(skb)->control; 6568 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6569 u16 len; 6569 u16 len;
6570 u8 event; 6570 u8 event;
6571 6571
@@ -6864,8 +6864,8 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6864 goto drop; 6864 goto drop;
6865 6865
6866 /* Store remote BD_ADDR and PSM for msg_name */ 6866 /* Store remote BD_ADDR and PSM for msg_name */
6867 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst); 6867 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6868 bt_cb(skb)->psm = psm; 6868 bt_cb(skb)->l2cap.psm = psm;
6869 6869
6870 if (!chan->ops->recv(chan, skb)) { 6870 if (!chan->ops->recv(chan, skb)) {
6871 l2cap_chan_put(chan); 6871 l2cap_chan_put(chan);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 9070720eedc8..a7278f05eafb 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1330,7 +1330,7 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
1330 1330
1331 skb->priority = sk->sk_priority; 1331 skb->priority = sk->sk_priority;
1332 1332
1333 bt_cb(skb)->chan = chan; 1333 bt_cb(skb)->l2cap.chan = chan;
1334 1334
1335 return skb; 1335 return skb;
1336} 1336}
@@ -1444,8 +1444,8 @@ static void l2cap_skb_msg_name(struct sk_buff *skb, void *msg_name,
1444 1444
1445 memset(la, 0, sizeof(struct sockaddr_l2)); 1445 memset(la, 0, sizeof(struct sockaddr_l2));
1446 la->l2_family = AF_BLUETOOTH; 1446 la->l2_family = AF_BLUETOOTH;
1447 la->l2_psm = bt_cb(skb)->psm; 1447 la->l2_psm = bt_cb(skb)->l2cap.psm;
1448 bacpy(&la->l2_bdaddr, &bt_cb(skb)->bdaddr); 1448 bacpy(&la->l2_bdaddr, &bt_cb(skb)->l2cap.bdaddr);
1449 1449
1450 *msg_namelen = sizeof(struct sockaddr_l2); 1450 *msg_namelen = sizeof(struct sockaddr_l2);
1451} 1451}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index fb2e764c6211..845dfcc43a20 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -985,14 +985,27 @@ static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
985 /* Instance 0 always manages the "Tx Power" and "Flags" fields */ 985 /* Instance 0 always manages the "Tx Power" and "Flags" fields */
986 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS; 986 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
987 987
988 /* For instance 0, assemble the flags from global settings */ 988 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting corresponds
989 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE) || 989 * to the "connectable" instance flag.
990 get_connectable(hdev)) 990 */
991 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
991 flags |= MGMT_ADV_FLAG_CONNECTABLE; 992 flags |= MGMT_ADV_FLAG_CONNECTABLE;
992 993
993 return flags; 994 return flags;
994} 995}
995 996
997static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
998{
999 /* Ignore instance 0 and other unsupported instances */
1000 if (instance != 0x01)
1001 return 0;
1002
1003 /* TODO: Take into account the "appearance" and "local-name" flags here.
1004 * These are currently being ignored as they are not supported.
1005 */
1006 return hdev->adv_instance.scan_rsp_len;
1007}
1008
996static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) 1009static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
997{ 1010{
998 u8 ad_len = 0, flags = 0; 1011 u8 ad_len = 0, flags = 0;
@@ -1030,6 +1043,14 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1030 } 1043 }
1031 } 1044 }
1032 1045
1046 if (instance) {
1047 memcpy(ptr, hdev->adv_instance.adv_data,
1048 hdev->adv_instance.adv_data_len);
1049
1050 ad_len += hdev->adv_instance.adv_data_len;
1051 ptr += hdev->adv_instance.adv_data_len;
1052 }
1053
1033 /* Provide Tx Power only if we can provide a valid value for it */ 1054 /* Provide Tx Power only if we can provide a valid value for it */
1034 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID && 1055 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1035 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) { 1056 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
@@ -1041,12 +1062,6 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1041 ptr += 3; 1062 ptr += 3;
1042 } 1063 }
1043 1064
1044 if (instance) {
1045 memcpy(ptr, hdev->adv_instance.adv_data,
1046 hdev->adv_instance.adv_data_len);
1047 ad_len += hdev->adv_instance.adv_data_len;
1048 }
1049
1050 return ad_len; 1065 return ad_len;
1051} 1066}
1052 1067
@@ -1242,7 +1257,12 @@ static void enable_advertising(struct hci_request *req)
1242 1257
1243 instance = get_current_adv_instance(hdev); 1258 instance = get_current_adv_instance(hdev);
1244 flags = get_adv_instance_flags(hdev, instance); 1259 flags = get_adv_instance_flags(hdev, instance);
1245 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE); 1260
1261 /* If the "connectable" instance flag was not set, then choose between
1262 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1263 */
1264 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1265 get_connectable(hdev);
1246 1266
1247 /* Set require_privacy to true only when non-connectable 1267 /* Set require_privacy to true only when non-connectable
1248 * advertising is used. In that case it is fine to use a 1268 * advertising is used. In that case it is fine to use a
@@ -1254,7 +1274,14 @@ static void enable_advertising(struct hci_request *req)
1254 memset(&cp, 0, sizeof(cp)); 1274 memset(&cp, 0, sizeof(cp));
1255 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval); 1275 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1256 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval); 1276 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1257 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND; 1277
1278 if (connectable)
1279 cp.type = LE_ADV_IND;
1280 else if (get_adv_instance_scan_rsp_len(hdev, instance))
1281 cp.type = LE_ADV_SCAN_IND;
1282 else
1283 cp.type = LE_ADV_NONCONN_IND;
1284
1258 cp.own_address_type = own_addr_type; 1285 cp.own_address_type = own_addr_type;
1259 cp.channel_map = hdev->le_adv_channel_map; 1286 cp.channel_map = hdev->le_adv_channel_map;
1260 1287
@@ -2088,7 +2115,8 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
2088 2115
2089no_scan_update: 2116no_scan_update:
2090 /* Update the advertising parameters if necessary */ 2117 /* Update the advertising parameters if necessary */
2091 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) 2118 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2119 hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
2092 enable_advertising(&req); 2120 enable_advertising(&req);
2093 2121
2094 err = hci_req_run(&req, set_connectable_complete); 2122 err = hci_req_run(&req, set_connectable_complete);
@@ -3757,10 +3785,70 @@ failed:
3757 return err; 3785 return err;
3758} 3786}
3759 3787
3788static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3789 u16 opcode, struct sk_buff *skb)
3790{
3791 struct mgmt_rp_read_local_oob_data mgmt_rp;
3792 size_t rp_size = sizeof(mgmt_rp);
3793 struct mgmt_pending_cmd *cmd;
3794
3795 BT_DBG("%s status %u", hdev->name, status);
3796
3797 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3798 if (!cmd)
3799 return;
3800
3801 if (status || !skb) {
3802 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3803 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3804 goto remove;
3805 }
3806
3807 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3808
3809 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3810 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3811
3812 if (skb->len < sizeof(*rp)) {
3813 mgmt_cmd_status(cmd->sk, hdev->id,
3814 MGMT_OP_READ_LOCAL_OOB_DATA,
3815 MGMT_STATUS_FAILED);
3816 goto remove;
3817 }
3818
3819 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3820 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3821
3822 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3823 } else {
3824 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3825
3826 if (skb->len < sizeof(*rp)) {
3827 mgmt_cmd_status(cmd->sk, hdev->id,
3828 MGMT_OP_READ_LOCAL_OOB_DATA,
3829 MGMT_STATUS_FAILED);
3830 goto remove;
3831 }
3832
3833 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3834 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3835
3836 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3837 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3838 }
3839
3840 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3841 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3842
3843remove:
3844 mgmt_pending_remove(cmd);
3845}
3846
3760static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev, 3847static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3761 void *data, u16 data_len) 3848 void *data, u16 data_len)
3762{ 3849{
3763 struct mgmt_pending_cmd *cmd; 3850 struct mgmt_pending_cmd *cmd;
3851 struct hci_request req;
3764 int err; 3852 int err;
3765 3853
3766 BT_DBG("%s", hdev->name); 3854 BT_DBG("%s", hdev->name);
@@ -3791,12 +3879,14 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3791 goto unlock; 3879 goto unlock;
3792 } 3880 }
3793 3881
3882 hci_req_init(&req, hdev);
3883
3794 if (bredr_sc_enabled(hdev)) 3884 if (bredr_sc_enabled(hdev))
3795 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 3885 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3796 0, NULL);
3797 else 3886 else
3798 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL); 3887 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3799 3888
3889 err = hci_req_run_skb(&req, read_local_oob_data_complete);
3800 if (err < 0) 3890 if (err < 0)
3801 mgmt_pending_remove(cmd); 3891 mgmt_pending_remove(cmd);
3802 3892
@@ -6388,46 +6478,41 @@ static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6388 6478
6389 BT_DBG("%s", hdev->name); 6479 BT_DBG("%s", hdev->name);
6390 6480
6391 if (!hdev_is_powered(hdev)) 6481 if (hdev_is_powered(hdev)) {
6392 return mgmt_cmd_complete(sk, hdev->id, 6482 switch (cp->type) {
6393 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, 6483 case BIT(BDADDR_BREDR):
6394 MGMT_STATUS_NOT_POWERED, 6484 status = mgmt_bredr_support(hdev);
6395 &cp->type, sizeof(cp->type)); 6485 if (status)
6396 6486 eir_len = 0;
6397 switch (cp->type) { 6487 else
6398 case BIT(BDADDR_BREDR): 6488 eir_len = 5;
6399 status = mgmt_bredr_support(hdev); 6489 break;
6400 if (status) 6490 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6401 return mgmt_cmd_complete(sk, hdev->id, 6491 status = mgmt_le_support(hdev);
6402 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, 6492 if (status)
6403 status, &cp->type, 6493 eir_len = 0;
6404 sizeof(cp->type)); 6494 else
6405 eir_len = 5; 6495 eir_len = 9 + 3 + 18 + 18 + 3;
6406 break; 6496 break;
6407 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)): 6497 default:
6408 status = mgmt_le_support(hdev); 6498 status = MGMT_STATUS_INVALID_PARAMS;
6409 if (status) 6499 eir_len = 0;
6410 return mgmt_cmd_complete(sk, hdev->id, 6500 break;
6411 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, 6501 }
6412 status, &cp->type, 6502 } else {
6413 sizeof(cp->type)); 6503 status = MGMT_STATUS_NOT_POWERED;
6414 eir_len = 9 + 3 + 18 + 18 + 3; 6504 eir_len = 0;
6415 break;
6416 default:
6417 return mgmt_cmd_complete(sk, hdev->id,
6418 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6419 MGMT_STATUS_INVALID_PARAMS,
6420 &cp->type, sizeof(cp->type));
6421 } 6505 }
6422 6506
6423 hci_dev_lock(hdev);
6424
6425 rp_len = sizeof(*rp) + eir_len; 6507 rp_len = sizeof(*rp) + eir_len;
6426 rp = kmalloc(rp_len, GFP_ATOMIC); 6508 rp = kmalloc(rp_len, GFP_ATOMIC);
6427 if (!rp) { 6509 if (!rp)
6428 hci_dev_unlock(hdev);
6429 return -ENOMEM; 6510 return -ENOMEM;
6430 } 6511
6512 if (status)
6513 goto complete;
6514
6515 hci_dev_lock(hdev);
6431 6516
6432 eir_len = 0; 6517 eir_len = 0;
6433 switch (cp->type) { 6518 switch (cp->type) {
@@ -6439,20 +6524,30 @@ static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6439 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) && 6524 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6440 smp_generate_oob(hdev, hash, rand) < 0) { 6525 smp_generate_oob(hdev, hash, rand) < 0) {
6441 hci_dev_unlock(hdev); 6526 hci_dev_unlock(hdev);
6442 err = mgmt_cmd_complete(sk, hdev->id, 6527 status = MGMT_STATUS_FAILED;
6443 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, 6528 goto complete;
6444 MGMT_STATUS_FAILED,
6445 &cp->type, sizeof(cp->type));
6446 goto done;
6447 } 6529 }
6448 6530
6531 /* This should return the active RPA, but since the RPA
6532 * is only programmed on demand, it is really hard to fill
6533 * this in at the moment. For now disallow retrieving
6534 * local out-of-band data when privacy is in use.
6535 *
6536 * Returning the identity address will not help here since
6537 * pairing happens before the identity resolving key is
6538 * known and thus the connection establishment happens
6539 * based on the RPA and not the identity address.
6540 */
6449 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { 6541 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6450 memcpy(addr, &hdev->rpa, 6); 6542 hci_dev_unlock(hdev);
6451 addr[6] = 0x01; 6543 status = MGMT_STATUS_REJECTED;
6452 } else if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || 6544 goto complete;
6453 !bacmp(&hdev->bdaddr, BDADDR_ANY) || 6545 }
6454 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && 6546
6455 bacmp(&hdev->static_addr, BDADDR_ANY))) { 6547 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6548 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6549 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6550 bacmp(&hdev->static_addr, BDADDR_ANY))) {
6456 memcpy(addr, &hdev->static_addr, 6); 6551 memcpy(addr, &hdev->static_addr, 6);
6457 addr[6] = 0x01; 6552 addr[6] = 0x01;
6458 } else { 6553 } else {
@@ -6491,16 +6586,19 @@ static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6491 break; 6586 break;
6492 } 6587 }
6493 6588
6494 rp->type = cp->type;
6495 rp->eir_len = cpu_to_le16(eir_len);
6496
6497 hci_dev_unlock(hdev); 6589 hci_dev_unlock(hdev);
6498 6590
6499 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS); 6591 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6500 6592
6593 status = MGMT_STATUS_SUCCESS;
6594
6595complete:
6596 rp->type = cp->type;
6597 rp->eir_len = cpu_to_le16(eir_len);
6598
6501 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, 6599 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6502 MGMT_STATUS_SUCCESS, rp, sizeof(*rp) + eir_len); 6600 status, rp, sizeof(*rp) + eir_len);
6503 if (err < 0) 6601 if (err < 0 || status)
6504 goto done; 6602 goto done;
6505 6603
6506 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev, 6604 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
@@ -7899,43 +7997,6 @@ void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7899 cmd ? cmd->sk : NULL); 7997 cmd ? cmd->sk : NULL);
7900} 7998}
7901 7999
7902void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7903 u8 *rand192, u8 *hash256, u8 *rand256,
7904 u8 status)
7905{
7906 struct mgmt_pending_cmd *cmd;
7907
7908 BT_DBG("%s status %u", hdev->name, status);
7909
7910 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7911 if (!cmd)
7912 return;
7913
7914 if (status) {
7915 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7916 mgmt_status(status));
7917 } else {
7918 struct mgmt_rp_read_local_oob_data rp;
7919 size_t rp_size = sizeof(rp);
7920
7921 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
7922 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7923
7924 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7925 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7926 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7927 } else {
7928 rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
7929 }
7930
7931 mgmt_cmd_complete(cmd->sk, hdev->id,
7932 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7933 &rp, rp_size);
7934 }
7935
7936 mgmt_pending_remove(cmd);
7937}
7938
7939static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16]) 8000static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7940{ 8001{
7941 int i; 8002 int i;
diff --git a/net/bluetooth/selftest.c b/net/bluetooth/selftest.c
index 378f4064952c..dc688f13e496 100644
--- a/net/bluetooth/selftest.c
+++ b/net/bluetooth/selftest.c
@@ -21,6 +21,8 @@
21 SOFTWARE IS DISCLAIMED. 21 SOFTWARE IS DISCLAIMED.
22*/ 22*/
23 23
24#include <linux/debugfs.h>
25
24#include <net/bluetooth/bluetooth.h> 26#include <net/bluetooth/bluetooth.h>
25#include <net/bluetooth/hci_core.h> 27#include <net/bluetooth/hci_core.h>
26 28
@@ -154,6 +156,21 @@ static int __init test_ecdh_sample(const u8 priv_a[32], const u8 priv_b[32],
154 return 0; 156 return 0;
155} 157}
156 158
159static char test_ecdh_buffer[32];
160
161static ssize_t test_ecdh_read(struct file *file, char __user *user_buf,
162 size_t count, loff_t *ppos)
163{
164 return simple_read_from_buffer(user_buf, count, ppos, test_ecdh_buffer,
165 strlen(test_ecdh_buffer));
166}
167
168static const struct file_operations test_ecdh_fops = {
169 .open = simple_open,
170 .read = test_ecdh_read,
171 .llseek = default_llseek,
172};
173
157static int __init test_ecdh(void) 174static int __init test_ecdh(void)
158{ 175{
159 ktime_t calltime, delta, rettime; 176 ktime_t calltime, delta, rettime;
@@ -165,19 +182,19 @@ static int __init test_ecdh(void)
165 err = test_ecdh_sample(priv_a_1, priv_b_1, pub_a_1, pub_b_1, dhkey_1); 182 err = test_ecdh_sample(priv_a_1, priv_b_1, pub_a_1, pub_b_1, dhkey_1);
166 if (err) { 183 if (err) {
167 BT_ERR("ECDH sample 1 failed"); 184 BT_ERR("ECDH sample 1 failed");
168 return err; 185 goto done;
169 } 186 }
170 187
171 err = test_ecdh_sample(priv_a_2, priv_b_2, pub_a_2, pub_b_2, dhkey_2); 188 err = test_ecdh_sample(priv_a_2, priv_b_2, pub_a_2, pub_b_2, dhkey_2);
172 if (err) { 189 if (err) {
173 BT_ERR("ECDH sample 2 failed"); 190 BT_ERR("ECDH sample 2 failed");
174 return err; 191 goto done;
175 } 192 }
176 193
177 err = test_ecdh_sample(priv_a_3, priv_a_3, pub_a_3, pub_a_3, dhkey_3); 194 err = test_ecdh_sample(priv_a_3, priv_a_3, pub_a_3, pub_a_3, dhkey_3);
178 if (err) { 195 if (err) {
179 BT_ERR("ECDH sample 3 failed"); 196 BT_ERR("ECDH sample 3 failed");
180 return err; 197 goto done;
181 } 198 }
182 199
183 rettime = ktime_get(); 200 rettime = ktime_get();
@@ -186,7 +203,17 @@ static int __init test_ecdh(void)
186 203
187 BT_INFO("ECDH test passed in %llu usecs", duration); 204 BT_INFO("ECDH test passed in %llu usecs", duration);
188 205
189 return 0; 206done:
207 if (!err)
208 snprintf(test_ecdh_buffer, sizeof(test_ecdh_buffer),
209 "PASS (%llu usecs)\n", duration);
210 else
211 snprintf(test_ecdh_buffer, sizeof(test_ecdh_buffer), "FAIL\n");
212
213 debugfs_create_file("selftest_ecdh", 0444, bt_debugfs, NULL,
214 &test_ecdh_fops);
215
216 return err;
190} 217}
191 218
192#else 219#else
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 1ec3f66b5a74..1ab3dc9c8f99 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -3017,7 +3017,7 @@ static struct sk_buff *smp_alloc_skb_cb(struct l2cap_chan *chan,
3017 return ERR_PTR(-ENOMEM); 3017 return ERR_PTR(-ENOMEM);
3018 3018
3019 skb->priority = HCI_PRIO_MAX; 3019 skb->priority = HCI_PRIO_MAX;
3020 bt_cb(skb)->chan = chan; 3020 bt_cb(skb)->l2cap.chan = chan;
3021 3021
3022 return skb; 3022 return skb;
3023} 3023}
@@ -3549,6 +3549,21 @@ static int __init test_h6(struct crypto_hash *tfm_cmac)
3549 return 0; 3549 return 0;
3550} 3550}
3551 3551
3552static char test_smp_buffer[32];
3553
3554static ssize_t test_smp_read(struct file *file, char __user *user_buf,
3555 size_t count, loff_t *ppos)
3556{
3557 return simple_read_from_buffer(user_buf, count, ppos, test_smp_buffer,
3558 strlen(test_smp_buffer));
3559}
3560
3561static const struct file_operations test_smp_fops = {
3562 .open = simple_open,
3563 .read = test_smp_read,
3564 .llseek = default_llseek,
3565};
3566
3552static int __init run_selftests(struct crypto_blkcipher *tfm_aes, 3567static int __init run_selftests(struct crypto_blkcipher *tfm_aes,
3553 struct crypto_hash *tfm_cmac) 3568 struct crypto_hash *tfm_cmac)
3554{ 3569{
@@ -3561,49 +3576,49 @@ static int __init run_selftests(struct crypto_blkcipher *tfm_aes,
3561 err = test_ah(tfm_aes); 3576 err = test_ah(tfm_aes);
3562 if (err) { 3577 if (err) {
3563 BT_ERR("smp_ah test failed"); 3578 BT_ERR("smp_ah test failed");
3564 return err; 3579 goto done;
3565 } 3580 }
3566 3581
3567 err = test_c1(tfm_aes); 3582 err = test_c1(tfm_aes);
3568 if (err) { 3583 if (err) {
3569 BT_ERR("smp_c1 test failed"); 3584 BT_ERR("smp_c1 test failed");
3570 return err; 3585 goto done;
3571 } 3586 }
3572 3587
3573 err = test_s1(tfm_aes); 3588 err = test_s1(tfm_aes);
3574 if (err) { 3589 if (err) {
3575 BT_ERR("smp_s1 test failed"); 3590 BT_ERR("smp_s1 test failed");
3576 return err; 3591 goto done;
3577 } 3592 }
3578 3593
3579 err = test_f4(tfm_cmac); 3594 err = test_f4(tfm_cmac);
3580 if (err) { 3595 if (err) {
3581 BT_ERR("smp_f4 test failed"); 3596 BT_ERR("smp_f4 test failed");
3582 return err; 3597 goto done;
3583 } 3598 }
3584 3599
3585 err = test_f5(tfm_cmac); 3600 err = test_f5(tfm_cmac);
3586 if (err) { 3601 if (err) {
3587 BT_ERR("smp_f5 test failed"); 3602 BT_ERR("smp_f5 test failed");
3588 return err; 3603 goto done;
3589 } 3604 }
3590 3605
3591 err = test_f6(tfm_cmac); 3606 err = test_f6(tfm_cmac);
3592 if (err) { 3607 if (err) {
3593 BT_ERR("smp_f6 test failed"); 3608 BT_ERR("smp_f6 test failed");
3594 return err; 3609 goto done;
3595 } 3610 }
3596 3611
3597 err = test_g2(tfm_cmac); 3612 err = test_g2(tfm_cmac);
3598 if (err) { 3613 if (err) {
3599 BT_ERR("smp_g2 test failed"); 3614 BT_ERR("smp_g2 test failed");
3600 return err; 3615 goto done;
3601 } 3616 }
3602 3617
3603 err = test_h6(tfm_cmac); 3618 err = test_h6(tfm_cmac);
3604 if (err) { 3619 if (err) {
3605 BT_ERR("smp_h6 test failed"); 3620 BT_ERR("smp_h6 test failed");
3606 return err; 3621 goto done;
3607 } 3622 }
3608 3623
3609 rettime = ktime_get(); 3624 rettime = ktime_get();
@@ -3612,7 +3627,17 @@ static int __init run_selftests(struct crypto_blkcipher *tfm_aes,
3612 3627
3613 BT_INFO("SMP test passed in %llu usecs", duration); 3628 BT_INFO("SMP test passed in %llu usecs", duration);
3614 3629
3615 return 0; 3630done:
3631 if (!err)
3632 snprintf(test_smp_buffer, sizeof(test_smp_buffer),
3633 "PASS (%llu usecs)\n", duration);
3634 else
3635 snprintf(test_smp_buffer, sizeof(test_smp_buffer), "FAIL\n");
3636
3637 debugfs_create_file("selftest_smp", 0444, bt_debugfs, NULL,
3638 &test_smp_fops);
3639
3640 return err;
3616} 3641}
3617 3642
3618int __init bt_selftest_smp(void) 3643int __init bt_selftest_smp(void)
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 3304a5442331..e97572b5d2cc 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -35,7 +35,7 @@ static inline int should_deliver(const struct net_bridge_port *p,
35 p->state == BR_STATE_FORWARDING; 35 p->state == BR_STATE_FORWARDING;
36} 36}
37 37
38int br_dev_queue_push_xmit(struct sk_buff *skb) 38int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb)
39{ 39{
40 if (!is_skb_forwardable(skb->dev, skb)) { 40 if (!is_skb_forwardable(skb->dev, skb)) {
41 kfree_skb(skb); 41 kfree_skb(skb);
@@ -49,9 +49,10 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
49} 49}
50EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit); 50EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
51 51
52int br_forward_finish(struct sk_buff *skb) 52int br_forward_finish(struct sock *sk, struct sk_buff *skb)
53{ 53{
54 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev, 54 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, sk, skb,
55 NULL, skb->dev,
55 br_dev_queue_push_xmit); 56 br_dev_queue_push_xmit);
56 57
57} 58}
@@ -75,7 +76,8 @@ static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
75 return; 76 return;
76 } 77 }
77 78
78 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 79 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, NULL, skb,
80 NULL, skb->dev,
79 br_forward_finish); 81 br_forward_finish);
80} 82}
81 83
@@ -96,7 +98,8 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
96 skb->dev = to->dev; 98 skb->dev = to->dev;
97 skb_forward_csum(skb); 99 skb_forward_csum(skb);
98 100
99 NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev, 101 NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, NULL, skb,
102 indev, skb->dev,
100 br_forward_finish); 103 br_forward_finish);
101} 104}
102 105
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 052c5ebbc947..f921a5dce22d 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -55,8 +55,9 @@ static int br_pass_frame_up(struct sk_buff *skb)
55 if (!skb) 55 if (!skb)
56 return NET_RX_DROP; 56 return NET_RX_DROP;
57 57
58 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, 58 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, NULL, skb,
59 netif_receive_skb); 59 indev, NULL,
60 netif_receive_skb_sk);
60} 61}
61 62
62static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br, 63static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
@@ -119,7 +120,7 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
119} 120}
120 121
121/* note: already called with rcu_read_lock */ 122/* note: already called with rcu_read_lock */
122int br_handle_frame_finish(struct sk_buff *skb) 123int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb)
123{ 124{
124 const unsigned char *dest = eth_hdr(skb)->h_dest; 125 const unsigned char *dest = eth_hdr(skb)->h_dest;
125 struct net_bridge_port *p = br_port_get_rcu(skb->dev); 126 struct net_bridge_port *p = br_port_get_rcu(skb->dev);
@@ -207,7 +208,7 @@ drop:
207EXPORT_SYMBOL_GPL(br_handle_frame_finish); 208EXPORT_SYMBOL_GPL(br_handle_frame_finish);
208 209
209/* note: already called with rcu_read_lock */ 210/* note: already called with rcu_read_lock */
210static int br_handle_local_finish(struct sk_buff *skb) 211static int br_handle_local_finish(struct sock *sk, struct sk_buff *skb)
211{ 212{
212 struct net_bridge_port *p = br_port_get_rcu(skb->dev); 213 struct net_bridge_port *p = br_port_get_rcu(skb->dev);
213 u16 vid = 0; 214 u16 vid = 0;
@@ -277,8 +278,8 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
277 } 278 }
278 279
279 /* Deliver packet to local host only */ 280 /* Deliver packet to local host only */
280 if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, 281 if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, NULL, skb,
281 NULL, br_handle_local_finish)) { 282 skb->dev, NULL, br_handle_local_finish)) {
282 return RX_HANDLER_CONSUMED; /* consumed by filter */ 283 return RX_HANDLER_CONSUMED; /* consumed by filter */
283 } else { 284 } else {
284 *pskb = skb; 285 *pskb = skb;
@@ -302,7 +303,8 @@ forward:
302 if (ether_addr_equal(p->br->dev->dev_addr, dest)) 303 if (ether_addr_equal(p->br->dev->dev_addr, dest))
303 skb->pkt_type = PACKET_HOST; 304 skb->pkt_type = PACKET_HOST;
304 305
305 NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, 306 NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, NULL, skb,
307 skb->dev, NULL,
306 br_handle_frame_finish); 308 br_handle_frame_finish);
307 break; 309 break;
308 default: 310 default:
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index c465876c7861..4b6722f8f179 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -814,7 +814,8 @@ static void __br_multicast_send_query(struct net_bridge *br,
814 814
815 if (port) { 815 if (port) {
816 skb->dev = port->dev; 816 skb->dev = port->dev;
817 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 817 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, NULL, skb,
818 NULL, skb->dev,
818 br_dev_queue_push_xmit); 819 br_dev_queue_push_xmit);
819 } else { 820 } else {
820 br_multicast_select_own_querier(br, ip, skb); 821 br_multicast_select_own_querier(br, ip, skb);
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index e8ac7432acb6..ab55e2472beb 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -277,7 +277,7 @@ static void nf_bridge_update_protocol(struct sk_buff *skb)
277/* PF_BRIDGE/PRE_ROUTING *********************************************/ 277/* PF_BRIDGE/PRE_ROUTING *********************************************/
278/* Undo the changes made for ip6tables PREROUTING and continue the 278/* Undo the changes made for ip6tables PREROUTING and continue the
279 * bridge PRE_ROUTING hook. */ 279 * bridge PRE_ROUTING hook. */
280static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb) 280static int br_nf_pre_routing_finish_ipv6(struct sock *sk, struct sk_buff *skb)
281{ 281{
282 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); 282 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
283 struct rtable *rt; 283 struct rtable *rt;
@@ -298,7 +298,8 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
298 skb->dev = nf_bridge->physindev; 298 skb->dev = nf_bridge->physindev;
299 nf_bridge_update_protocol(skb); 299 nf_bridge_update_protocol(skb);
300 nf_bridge_push_encap_header(skb); 300 nf_bridge_push_encap_header(skb);
301 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, 301 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, sk, skb,
302 skb->dev, NULL,
302 br_handle_frame_finish, 1); 303 br_handle_frame_finish, 1);
303 304
304 return 0; 305 return 0;
@@ -309,7 +310,7 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
309 * don't, we use the neighbour framework to find out. In both cases, we make 310 * don't, we use the neighbour framework to find out. In both cases, we make
310 * sure that br_handle_frame_finish() is called afterwards. 311 * sure that br_handle_frame_finish() is called afterwards.
311 */ 312 */
312static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) 313static int br_nf_pre_routing_finish_bridge(struct sock *sk, struct sk_buff *skb)
313{ 314{
314 struct neighbour *neigh; 315 struct neighbour *neigh;
315 struct dst_entry *dst; 316 struct dst_entry *dst;
@@ -326,7 +327,7 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
326 if (neigh->hh.hh_len) { 327 if (neigh->hh.hh_len) {
327 neigh_hh_bridge(&neigh->hh, skb); 328 neigh_hh_bridge(&neigh->hh, skb);
328 skb->dev = nf_bridge->physindev; 329 skb->dev = nf_bridge->physindev;
329 ret = br_handle_frame_finish(skb); 330 ret = br_handle_frame_finish(sk, skb);
330 } else { 331 } else {
331 /* the neighbour function below overwrites the complete 332 /* the neighbour function below overwrites the complete
332 * MAC header, so we save the Ethernet source address and 333 * MAC header, so we save the Ethernet source address and
@@ -403,7 +404,7 @@ static bool dnat_took_place(const struct sk_buff *skb)
403 * device, we proceed as if ip_route_input() succeeded. If it differs from the 404 * device, we proceed as if ip_route_input() succeeded. If it differs from the
404 * logical bridge port or if ip_route_output_key() fails we drop the packet. 405 * logical bridge port or if ip_route_output_key() fails we drop the packet.
405 */ 406 */
406static int br_nf_pre_routing_finish(struct sk_buff *skb) 407static int br_nf_pre_routing_finish(struct sock *sk, struct sk_buff *skb)
407{ 408{
408 struct net_device *dev = skb->dev; 409 struct net_device *dev = skb->dev;
409 struct iphdr *iph = ip_hdr(skb); 410 struct iphdr *iph = ip_hdr(skb);
@@ -456,7 +457,7 @@ bridged_dnat:
456 nf_bridge_push_encap_header(skb); 457 nf_bridge_push_encap_header(skb);
457 NF_HOOK_THRESH(NFPROTO_BRIDGE, 458 NF_HOOK_THRESH(NFPROTO_BRIDGE,
458 NF_BR_PRE_ROUTING, 459 NF_BR_PRE_ROUTING,
459 skb, skb->dev, NULL, 460 sk, skb, skb->dev, NULL,
460 br_nf_pre_routing_finish_bridge, 461 br_nf_pre_routing_finish_bridge,
461 1); 462 1);
462 return 0; 463 return 0;
@@ -476,7 +477,8 @@ bridged_dnat:
476 skb->dev = nf_bridge->physindev; 477 skb->dev = nf_bridge->physindev;
477 nf_bridge_update_protocol(skb); 478 nf_bridge_update_protocol(skb);
478 nf_bridge_push_encap_header(skb); 479 nf_bridge_push_encap_header(skb);
479 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, 480 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, sk, skb,
481 skb->dev, NULL,
480 br_handle_frame_finish, 1); 482 br_handle_frame_finish, 1);
481 483
482 return 0; 484 return 0;
@@ -579,9 +581,7 @@ bad:
579 * to ip6tables, which doesn't support NAT, so things are fairly simple. */ 581 * to ip6tables, which doesn't support NAT, so things are fairly simple. */
580static unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops, 582static unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
581 struct sk_buff *skb, 583 struct sk_buff *skb,
582 const struct net_device *in, 584 const struct nf_hook_state *state)
583 const struct net_device *out,
584 int (*okfn)(struct sk_buff *))
585{ 585{
586 const struct ipv6hdr *hdr; 586 const struct ipv6hdr *hdr;
587 u32 pkt_len; 587 u32 pkt_len;
@@ -615,7 +615,8 @@ static unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
615 return NF_DROP; 615 return NF_DROP;
616 616
617 skb->protocol = htons(ETH_P_IPV6); 617 skb->protocol = htons(ETH_P_IPV6);
618 NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL, 618 NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->sk, skb,
619 skb->dev, NULL,
619 br_nf_pre_routing_finish_ipv6); 620 br_nf_pre_routing_finish_ipv6);
620 621
621 return NF_STOLEN; 622 return NF_STOLEN;
@@ -629,9 +630,7 @@ static unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
629 * address to be able to detect DNAT afterwards. */ 630 * address to be able to detect DNAT afterwards. */
630static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops, 631static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
631 struct sk_buff *skb, 632 struct sk_buff *skb,
632 const struct net_device *in, 633 const struct nf_hook_state *state)
633 const struct net_device *out,
634 int (*okfn)(struct sk_buff *))
635{ 634{
636 struct net_bridge_port *p; 635 struct net_bridge_port *p;
637 struct net_bridge *br; 636 struct net_bridge *br;
@@ -640,7 +639,7 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
640 if (unlikely(!pskb_may_pull(skb, len))) 639 if (unlikely(!pskb_may_pull(skb, len)))
641 return NF_DROP; 640 return NF_DROP;
642 641
643 p = br_port_get_rcu(in); 642 p = br_port_get_rcu(state->in);
644 if (p == NULL) 643 if (p == NULL)
645 return NF_DROP; 644 return NF_DROP;
646 br = p->br; 645 br = p->br;
@@ -650,7 +649,7 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
650 return NF_ACCEPT; 649 return NF_ACCEPT;
651 650
652 nf_bridge_pull_encap_header_rcsum(skb); 651 nf_bridge_pull_encap_header_rcsum(skb);
653 return br_nf_pre_routing_ipv6(ops, skb, in, out, okfn); 652 return br_nf_pre_routing_ipv6(ops, skb, state);
654 } 653 }
655 654
656 if (!brnf_call_iptables && !br->nf_call_iptables) 655 if (!brnf_call_iptables && !br->nf_call_iptables)
@@ -672,7 +671,8 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
672 671
673 skb->protocol = htons(ETH_P_IP); 672 skb->protocol = htons(ETH_P_IP);
674 673
675 NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL, 674 NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->sk, skb,
675 skb->dev, NULL,
676 br_nf_pre_routing_finish); 676 br_nf_pre_routing_finish);
677 677
678 return NF_STOLEN; 678 return NF_STOLEN;
@@ -688,16 +688,14 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
688 * prevent this from happening. */ 688 * prevent this from happening. */
689static unsigned int br_nf_local_in(const struct nf_hook_ops *ops, 689static unsigned int br_nf_local_in(const struct nf_hook_ops *ops,
690 struct sk_buff *skb, 690 struct sk_buff *skb,
691 const struct net_device *in, 691 const struct nf_hook_state *state)
692 const struct net_device *out,
693 int (*okfn)(struct sk_buff *))
694{ 692{
695 br_drop_fake_rtable(skb); 693 br_drop_fake_rtable(skb);
696 return NF_ACCEPT; 694 return NF_ACCEPT;
697} 695}
698 696
699/* PF_BRIDGE/FORWARD *************************************************/ 697/* PF_BRIDGE/FORWARD *************************************************/
700static int br_nf_forward_finish(struct sk_buff *skb) 698static int br_nf_forward_finish(struct sock *sk, struct sk_buff *skb)
701{ 699{
702 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); 700 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
703 struct net_device *in; 701 struct net_device *in;
@@ -721,8 +719,8 @@ static int br_nf_forward_finish(struct sk_buff *skb)
721 } 719 }
722 nf_bridge_push_encap_header(skb); 720 nf_bridge_push_encap_header(skb);
723 721
724 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, in, 722 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, sk, skb,
725 skb->dev, br_forward_finish, 1); 723 in, skb->dev, br_forward_finish, 1);
726 return 0; 724 return 0;
727} 725}
728 726
@@ -734,9 +732,7 @@ static int br_nf_forward_finish(struct sk_buff *skb)
734 * bridge ports. */ 732 * bridge ports. */
735static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops, 733static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
736 struct sk_buff *skb, 734 struct sk_buff *skb,
737 const struct net_device *in, 735 const struct nf_hook_state *state)
738 const struct net_device *out,
739 int (*okfn)(struct sk_buff *))
740{ 736{
741 struct nf_bridge_info *nf_bridge; 737 struct nf_bridge_info *nf_bridge;
742 struct net_device *parent; 738 struct net_device *parent;
@@ -754,7 +750,7 @@ static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
754 if (!nf_bridge) 750 if (!nf_bridge)
755 return NF_DROP; 751 return NF_DROP;
756 752
757 parent = bridge_parent(out); 753 parent = bridge_parent(state->out);
758 if (!parent) 754 if (!parent)
759 return NF_DROP; 755 return NF_DROP;
760 756
@@ -787,23 +783,22 @@ static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
787 else 783 else
788 skb->protocol = htons(ETH_P_IPV6); 784 skb->protocol = htons(ETH_P_IPV6);
789 785
790 NF_HOOK(pf, NF_INET_FORWARD, skb, brnf_get_logical_dev(skb, in), parent, 786 NF_HOOK(pf, NF_INET_FORWARD, NULL, skb,
791 br_nf_forward_finish); 787 brnf_get_logical_dev(skb, state->in),
788 parent, br_nf_forward_finish);
792 789
793 return NF_STOLEN; 790 return NF_STOLEN;
794} 791}
795 792
796static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops, 793static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
797 struct sk_buff *skb, 794 struct sk_buff *skb,
798 const struct net_device *in, 795 const struct nf_hook_state *state)
799 const struct net_device *out,
800 int (*okfn)(struct sk_buff *))
801{ 796{
802 struct net_bridge_port *p; 797 struct net_bridge_port *p;
803 struct net_bridge *br; 798 struct net_bridge *br;
804 struct net_device **d = (struct net_device **)(skb->cb); 799 struct net_device **d = (struct net_device **)(skb->cb);
805 800
806 p = br_port_get_rcu(out); 801 p = br_port_get_rcu(state->out);
807 if (p == NULL) 802 if (p == NULL)
808 return NF_ACCEPT; 803 return NF_ACCEPT;
809 br = p->br; 804 br = p->br;
@@ -822,15 +817,15 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
822 nf_bridge_push_encap_header(skb); 817 nf_bridge_push_encap_header(skb);
823 return NF_ACCEPT; 818 return NF_ACCEPT;
824 } 819 }
825 *d = (struct net_device *)in; 820 *d = state->in;
826 NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, skb, (struct net_device *)in, 821 NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, state->sk, skb,
827 (struct net_device *)out, br_nf_forward_finish); 822 state->in, state->out, br_nf_forward_finish);
828 823
829 return NF_STOLEN; 824 return NF_STOLEN;
830} 825}
831 826
832#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) 827#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
833static int br_nf_push_frag_xmit(struct sk_buff *skb) 828static int br_nf_push_frag_xmit(struct sock *sk, struct sk_buff *skb)
834{ 829{
835 struct brnf_frag_data *data; 830 struct brnf_frag_data *data;
836 int err; 831 int err;
@@ -846,17 +841,17 @@ static int br_nf_push_frag_xmit(struct sk_buff *skb)
846 skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size); 841 skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size);
847 __skb_push(skb, data->encap_size); 842 __skb_push(skb, data->encap_size);
848 843
849 return br_dev_queue_push_xmit(skb); 844 return br_dev_queue_push_xmit(sk, skb);
850} 845}
851 846
852static int br_nf_dev_queue_xmit(struct sk_buff *skb) 847static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
853{ 848{
854 int ret; 849 int ret;
855 int frag_max_size; 850 int frag_max_size;
856 unsigned int mtu_reserved; 851 unsigned int mtu_reserved;
857 852
858 if (skb_is_gso(skb) || skb->protocol != htons(ETH_P_IP)) 853 if (skb_is_gso(skb) || skb->protocol != htons(ETH_P_IP))
859 return br_dev_queue_push_xmit(skb); 854 return br_dev_queue_push_xmit(sk, skb);
860 855
861 mtu_reserved = nf_bridge_mtu_reduction(skb); 856 mtu_reserved = nf_bridge_mtu_reduction(skb);
862 /* This is wrong! We should preserve the original fragment 857 /* This is wrong! We should preserve the original fragment
@@ -880,26 +875,24 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb)
880 skb_copy_from_linear_data_offset(skb, -data->size, data->mac, 875 skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
881 data->size); 876 data->size);
882 877
883 ret = ip_fragment(skb, br_nf_push_frag_xmit); 878 ret = ip_fragment(sk, skb, br_nf_push_frag_xmit);
884 } else { 879 } else {
885 ret = br_dev_queue_push_xmit(skb); 880 ret = br_dev_queue_push_xmit(sk, skb);
886 } 881 }
887 882
888 return ret; 883 return ret;
889} 884}
890#else 885#else
891static int br_nf_dev_queue_xmit(struct sk_buff *skb) 886static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
892{ 887{
893 return br_dev_queue_push_xmit(skb); 888 return br_dev_queue_push_xmit(sk, skb);
894} 889}
895#endif 890#endif
896 891
897/* PF_BRIDGE/POST_ROUTING ********************************************/ 892/* PF_BRIDGE/POST_ROUTING ********************************************/
898static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops, 893static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops,
899 struct sk_buff *skb, 894 struct sk_buff *skb,
900 const struct net_device *in, 895 const struct nf_hook_state *state)
901 const struct net_device *out,
902 int (*okfn)(struct sk_buff *))
903{ 896{
904 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); 897 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
905 struct net_device *realoutdev = bridge_parent(skb->dev); 898 struct net_device *realoutdev = bridge_parent(skb->dev);
@@ -936,7 +929,8 @@ static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops,
936 else 929 else
937 skb->protocol = htons(ETH_P_IPV6); 930 skb->protocol = htons(ETH_P_IPV6);
938 931
939 NF_HOOK(pf, NF_INET_POST_ROUTING, skb, NULL, realoutdev, 932 NF_HOOK(pf, NF_INET_POST_ROUTING, state->sk, skb,
933 NULL, realoutdev,
940 br_nf_dev_queue_xmit); 934 br_nf_dev_queue_xmit);
941 935
942 return NF_STOLEN; 936 return NF_STOLEN;
@@ -947,9 +941,7 @@ static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops,
947 * for the second time. */ 941 * for the second time. */
948static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops, 942static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops,
949 struct sk_buff *skb, 943 struct sk_buff *skb,
950 const struct net_device *in, 944 const struct nf_hook_state *state)
951 const struct net_device *out,
952 int (*okfn)(struct sk_buff *))
953{ 945{
954 if (skb->nf_bridge && 946 if (skb->nf_bridge &&
955 !(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) { 947 !(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) {
@@ -981,7 +973,7 @@ static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
981 nf_bridge->neigh_header, 973 nf_bridge->neigh_header,
982 ETH_HLEN - ETH_ALEN); 974 ETH_HLEN - ETH_ALEN);
983 skb->dev = nf_bridge->physindev; 975 skb->dev = nf_bridge->physindev;
984 br_handle_frame_finish(skb); 976 br_handle_frame_finish(NULL, skb);
985} 977}
986 978
987static int br_nf_dev_xmit(struct sk_buff *skb) 979static int br_nf_dev_xmit(struct sk_buff *skb)
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index e1115a224a95..0e4ddb81610d 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -305,8 +305,8 @@ static int br_fill_ifinfo(struct sk_buff *skb,
305 nla_put_u8(skb, IFLA_OPERSTATE, operstate) || 305 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
306 (dev->addr_len && 306 (dev->addr_len &&
307 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 307 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
308 (dev->ifindex != dev->iflink && 308 (dev->ifindex != dev_get_iflink(dev) &&
309 nla_put_u32(skb, IFLA_LINK, dev->iflink))) 309 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
310 goto nla_put_failure; 310 goto nla_put_failure;
311 311
312 if (event == RTM_NEWLINK && port) { 312 if (event == RTM_NEWLINK && port) {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index b46fa0c5b8ec..6ca0251cb478 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -410,10 +410,10 @@ int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
410 410
411/* br_forward.c */ 411/* br_forward.c */
412void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb); 412void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb);
413int br_dev_queue_push_xmit(struct sk_buff *skb); 413int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb);
414void br_forward(const struct net_bridge_port *to, 414void br_forward(const struct net_bridge_port *to,
415 struct sk_buff *skb, struct sk_buff *skb0); 415 struct sk_buff *skb, struct sk_buff *skb0);
416int br_forward_finish(struct sk_buff *skb); 416int br_forward_finish(struct sock *sk, struct sk_buff *skb);
417void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast); 417void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast);
418void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, 418void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
419 struct sk_buff *skb2, bool unicast); 419 struct sk_buff *skb2, bool unicast);
@@ -431,7 +431,7 @@ void br_port_flags_change(struct net_bridge_port *port, unsigned long mask);
431void br_manage_promisc(struct net_bridge *br); 431void br_manage_promisc(struct net_bridge *br);
432 432
433/* br_input.c */ 433/* br_input.c */
434int br_handle_frame_finish(struct sk_buff *skb); 434int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
435rx_handler_result_t br_handle_frame(struct sk_buff **pskb); 435rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
436 436
437static inline bool br_rx_handler_check_rcu(const struct net_device *dev) 437static inline bool br_rx_handler_check_rcu(const struct net_device *dev)
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index bdb459d21ad8..534fc4cd263e 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -54,8 +54,9 @@ static void br_send_bpdu(struct net_bridge_port *p,
54 54
55 skb_reset_mac_header(skb); 55 skb_reset_mac_header(skb);
56 56
57 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 57 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, NULL, skb,
58 dev_queue_xmit); 58 NULL, skb->dev,
59 dev_queue_xmit_sk);
59} 60}
60 61
61static inline void br_set_ticks(unsigned char *dest, int j) 62static inline void br_set_ticks(unsigned char *dest, int j)
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index ce205aabf9c5..8a3f63b2e807 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -58,20 +58,18 @@ static const struct ebt_table frame_filter = {
58 58
59static unsigned int 59static unsigned int
60ebt_in_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, 60ebt_in_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
61 const struct net_device *in, const struct net_device *out, 61 const struct nf_hook_state *state)
62 int (*okfn)(struct sk_buff *))
63{ 62{
64 return ebt_do_table(ops->hooknum, skb, in, out, 63 return ebt_do_table(ops->hooknum, skb, state->in, state->out,
65 dev_net(in)->xt.frame_filter); 64 dev_net(state->in)->xt.frame_filter);
66} 65}
67 66
68static unsigned int 67static unsigned int
69ebt_out_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, 68ebt_out_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
70 const struct net_device *in, const struct net_device *out, 69 const struct nf_hook_state *state)
71 int (*okfn)(struct sk_buff *))
72{ 70{
73 return ebt_do_table(ops->hooknum, skb, in, out, 71 return ebt_do_table(ops->hooknum, skb, state->in, state->out,
74 dev_net(out)->xt.frame_filter); 72 dev_net(state->out)->xt.frame_filter);
75} 73}
76 74
77static struct nf_hook_ops ebt_ops_filter[] __read_mostly = { 75static struct nf_hook_ops ebt_ops_filter[] __read_mostly = {
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index a0ac2984fb6c..c5ef5b1ab678 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -58,20 +58,18 @@ static struct ebt_table frame_nat = {
58 58
59static unsigned int 59static unsigned int
60ebt_nat_in(const struct nf_hook_ops *ops, struct sk_buff *skb, 60ebt_nat_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
61 const struct net_device *in, const struct net_device *out, 61 const struct nf_hook_state *state)
62 int (*okfn)(struct sk_buff *))
63{ 62{
64 return ebt_do_table(ops->hooknum, skb, in, out, 63 return ebt_do_table(ops->hooknum, skb, state->in, state->out,
65 dev_net(in)->xt.frame_nat); 64 dev_net(state->in)->xt.frame_nat);
66} 65}
67 66
68static unsigned int 67static unsigned int
69ebt_nat_out(const struct nf_hook_ops *ops, struct sk_buff *skb, 68ebt_nat_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
70 const struct net_device *in, const struct net_device *out, 69 const struct nf_hook_state *state)
71 int (*okfn)(struct sk_buff *))
72{ 70{
73 return ebt_do_table(ops->hooknum, skb, in, out, 71 return ebt_do_table(ops->hooknum, skb, state->in, state->out,
74 dev_net(out)->xt.frame_nat); 72 dev_net(state->out)->xt.frame_nat);
75} 73}
76 74
77static struct nf_hook_ops ebt_ops_nat[] __read_mostly = { 75static struct nf_hook_ops ebt_ops_nat[] __read_mostly = {
diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c
index 19473a9371b8..a343e62442b1 100644
--- a/net/bridge/netfilter/nf_tables_bridge.c
+++ b/net/bridge/netfilter/nf_tables_bridge.c
@@ -67,47 +67,43 @@ EXPORT_SYMBOL_GPL(nft_bridge_ip6hdr_validate);
67static inline void nft_bridge_set_pktinfo_ipv4(struct nft_pktinfo *pkt, 67static inline void nft_bridge_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
68 const struct nf_hook_ops *ops, 68 const struct nf_hook_ops *ops,
69 struct sk_buff *skb, 69 struct sk_buff *skb,
70 const struct net_device *in, 70 const struct nf_hook_state *state)
71 const struct net_device *out)
72{ 71{
73 if (nft_bridge_iphdr_validate(skb)) 72 if (nft_bridge_iphdr_validate(skb))
74 nft_set_pktinfo_ipv4(pkt, ops, skb, in, out); 73 nft_set_pktinfo_ipv4(pkt, ops, skb, state);
75 else 74 else
76 nft_set_pktinfo(pkt, ops, skb, in, out); 75 nft_set_pktinfo(pkt, ops, skb, state);
77} 76}
78 77
79static inline void nft_bridge_set_pktinfo_ipv6(struct nft_pktinfo *pkt, 78static inline void nft_bridge_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
80 const struct nf_hook_ops *ops, 79 const struct nf_hook_ops *ops,
81 struct sk_buff *skb, 80 struct sk_buff *skb,
82 const struct net_device *in, 81 const struct nf_hook_state *state)
83 const struct net_device *out)
84{ 82{
85#if IS_ENABLED(CONFIG_IPV6) 83#if IS_ENABLED(CONFIG_IPV6)
86 if (nft_bridge_ip6hdr_validate(skb) && 84 if (nft_bridge_ip6hdr_validate(skb) &&
87 nft_set_pktinfo_ipv6(pkt, ops, skb, in, out) == 0) 85 nft_set_pktinfo_ipv6(pkt, ops, skb, state) == 0)
88 return; 86 return;
89#endif 87#endif
90 nft_set_pktinfo(pkt, ops, skb, in, out); 88 nft_set_pktinfo(pkt, ops, skb, state);
91} 89}
92 90
93static unsigned int 91static unsigned int
94nft_do_chain_bridge(const struct nf_hook_ops *ops, 92nft_do_chain_bridge(const struct nf_hook_ops *ops,
95 struct sk_buff *skb, 93 struct sk_buff *skb,
96 const struct net_device *in, 94 const struct nf_hook_state *state)
97 const struct net_device *out,
98 int (*okfn)(struct sk_buff *))
99{ 95{
100 struct nft_pktinfo pkt; 96 struct nft_pktinfo pkt;
101 97
102 switch (eth_hdr(skb)->h_proto) { 98 switch (eth_hdr(skb)->h_proto) {
103 case htons(ETH_P_IP): 99 case htons(ETH_P_IP):
104 nft_bridge_set_pktinfo_ipv4(&pkt, ops, skb, in, out); 100 nft_bridge_set_pktinfo_ipv4(&pkt, ops, skb, state);
105 break; 101 break;
106 case htons(ETH_P_IPV6): 102 case htons(ETH_P_IPV6):
107 nft_bridge_set_pktinfo_ipv6(&pkt, ops, skb, in, out); 103 nft_bridge_set_pktinfo_ipv6(&pkt, ops, skb, state);
108 break; 104 break;
109 default: 105 default:
110 nft_set_pktinfo(&pkt, ops, skb, in, out); 106 nft_set_pktinfo(&pkt, ops, skb, state);
111 break; 107 break;
112 } 108 }
113 109
diff --git a/net/can/raw.c b/net/can/raw.c
index 63ffdb0f3a23..31b9748cbb4e 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -74,6 +74,12 @@ MODULE_ALIAS("can-proto-1");
74 * storing the single filter in dfilter, to avoid using dynamic memory. 74 * storing the single filter in dfilter, to avoid using dynamic memory.
75 */ 75 */
76 76
77struct uniqframe {
78 ktime_t tstamp;
79 const struct sk_buff *skb;
80 unsigned int join_rx_count;
81};
82
77struct raw_sock { 83struct raw_sock {
78 struct sock sk; 84 struct sock sk;
79 int bound; 85 int bound;
@@ -82,10 +88,12 @@ struct raw_sock {
82 int loopback; 88 int loopback;
83 int recv_own_msgs; 89 int recv_own_msgs;
84 int fd_frames; 90 int fd_frames;
91 int join_filters;
85 int count; /* number of active filters */ 92 int count; /* number of active filters */
86 struct can_filter dfilter; /* default/single filter */ 93 struct can_filter dfilter; /* default/single filter */
87 struct can_filter *filter; /* pointer to filter(s) */ 94 struct can_filter *filter; /* pointer to filter(s) */
88 can_err_mask_t err_mask; 95 can_err_mask_t err_mask;
96 struct uniqframe __percpu *uniq;
89}; 97};
90 98
91/* 99/*
@@ -123,6 +131,26 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
123 if (!ro->fd_frames && oskb->len != CAN_MTU) 131 if (!ro->fd_frames && oskb->len != CAN_MTU)
124 return; 132 return;
125 133
134 /* eliminate multiple filter matches for the same skb */
135 if (this_cpu_ptr(ro->uniq)->skb == oskb &&
136 ktime_equal(this_cpu_ptr(ro->uniq)->tstamp, oskb->tstamp)) {
137 if (ro->join_filters) {
138 this_cpu_inc(ro->uniq->join_rx_count);
139 /* drop frame until all enabled filters matched */
140 if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count)
141 return;
142 } else {
143 return;
144 }
145 } else {
146 this_cpu_ptr(ro->uniq)->skb = oskb;
147 this_cpu_ptr(ro->uniq)->tstamp = oskb->tstamp;
148 this_cpu_ptr(ro->uniq)->join_rx_count = 1;
149 /* drop first frame to check all enabled filters? */
150 if (ro->join_filters && ro->count > 1)
151 return;
152 }
153
126 /* clone the given skb to be able to enqueue it into the rcv queue */ 154 /* clone the given skb to be able to enqueue it into the rcv queue */
127 skb = skb_clone(oskb, GFP_ATOMIC); 155 skb = skb_clone(oskb, GFP_ATOMIC);
128 if (!skb) 156 if (!skb)
@@ -296,6 +324,12 @@ static int raw_init(struct sock *sk)
296 ro->loopback = 1; 324 ro->loopback = 1;
297 ro->recv_own_msgs = 0; 325 ro->recv_own_msgs = 0;
298 ro->fd_frames = 0; 326 ro->fd_frames = 0;
327 ro->join_filters = 0;
328
329 /* alloc_percpu provides zero'ed memory */
330 ro->uniq = alloc_percpu(struct uniqframe);
331 if (unlikely(!ro->uniq))
332 return -ENOMEM;
299 333
300 /* set notifier */ 334 /* set notifier */
301 ro->notifier.notifier_call = raw_notifier; 335 ro->notifier.notifier_call = raw_notifier;
@@ -339,6 +373,7 @@ static int raw_release(struct socket *sock)
339 ro->ifindex = 0; 373 ro->ifindex = 0;
340 ro->bound = 0; 374 ro->bound = 0;
341 ro->count = 0; 375 ro->count = 0;
376 free_percpu(ro->uniq);
342 377
343 sock_orphan(sk); 378 sock_orphan(sk);
344 sock->sk = NULL; 379 sock->sk = NULL;
@@ -583,6 +618,15 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
583 618
584 break; 619 break;
585 620
621 case CAN_RAW_JOIN_FILTERS:
622 if (optlen != sizeof(ro->join_filters))
623 return -EINVAL;
624
625 if (copy_from_user(&ro->join_filters, optval, optlen))
626 return -EFAULT;
627
628 break;
629
586 default: 630 default:
587 return -ENOPROTOOPT; 631 return -ENOPROTOOPT;
588 } 632 }
@@ -647,6 +691,12 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
647 val = &ro->fd_frames; 691 val = &ro->fd_frames;
648 break; 692 break;
649 693
694 case CAN_RAW_JOIN_FILTERS:
695 if (len > sizeof(int))
696 len = sizeof(int);
697 val = &ro->join_filters;
698 break;
699
650 default: 700 default:
651 return -ENOPROTOOPT; 701 return -ENOPROTOOPT;
652 } 702 }
diff --git a/net/core/dev.c b/net/core/dev.c
index 65492b0354c0..b2775f06c710 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -660,6 +660,27 @@ __setup("netdev=", netdev_boot_setup);
660*******************************************************************************/ 660*******************************************************************************/
661 661
662/** 662/**
663 * dev_get_iflink - get 'iflink' value of a interface
664 * @dev: targeted interface
665 *
666 * Indicates the ifindex the interface is linked to.
667 * Physical interfaces have the same 'ifindex' and 'iflink' values.
668 */
669
670int dev_get_iflink(const struct net_device *dev)
671{
672 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
673 return dev->netdev_ops->ndo_get_iflink(dev);
674
675 /* If dev->rtnl_link_ops is set, it's a virtual interface. */
676 if (dev->rtnl_link_ops)
677 return 0;
678
679 return dev->ifindex;
680}
681EXPORT_SYMBOL(dev_get_iflink);
682
683/**
663 * __dev_get_by_name - find a device by its name 684 * __dev_get_by_name - find a device by its name
664 * @net: the applicable net namespace 685 * @net: the applicable net namespace
665 * @name: name to find 686 * @name: name to find
@@ -2849,14 +2870,16 @@ static void skb_update_prio(struct sk_buff *skb)
2849#define skb_update_prio(skb) 2870#define skb_update_prio(skb)
2850#endif 2871#endif
2851 2872
2852static DEFINE_PER_CPU(int, xmit_recursion); 2873DEFINE_PER_CPU(int, xmit_recursion);
2874EXPORT_SYMBOL(xmit_recursion);
2875
2853#define RECURSION_LIMIT 10 2876#define RECURSION_LIMIT 10
2854 2877
2855/** 2878/**
2856 * dev_loopback_xmit - loop back @skb 2879 * dev_loopback_xmit - loop back @skb
2857 * @skb: buffer to transmit 2880 * @skb: buffer to transmit
2858 */ 2881 */
2859int dev_loopback_xmit(struct sk_buff *skb) 2882int dev_loopback_xmit(struct sock *sk, struct sk_buff *skb)
2860{ 2883{
2861 skb_reset_mac_header(skb); 2884 skb_reset_mac_header(skb);
2862 __skb_pull(skb, skb_network_offset(skb)); 2885 __skb_pull(skb, skb_network_offset(skb));
@@ -2994,11 +3017,11 @@ out:
2994 return rc; 3017 return rc;
2995} 3018}
2996 3019
2997int dev_queue_xmit(struct sk_buff *skb) 3020int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb)
2998{ 3021{
2999 return __dev_queue_xmit(skb, NULL); 3022 return __dev_queue_xmit(skb, NULL);
3000} 3023}
3001EXPORT_SYMBOL(dev_queue_xmit); 3024EXPORT_SYMBOL(dev_queue_xmit_sk);
3002 3025
3003int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) 3026int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3004{ 3027{
@@ -3830,13 +3853,13 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
3830 * NET_RX_SUCCESS: no congestion 3853 * NET_RX_SUCCESS: no congestion
3831 * NET_RX_DROP: packet was dropped 3854 * NET_RX_DROP: packet was dropped
3832 */ 3855 */
3833int netif_receive_skb(struct sk_buff *skb) 3856int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb)
3834{ 3857{
3835 trace_netif_receive_skb_entry(skb); 3858 trace_netif_receive_skb_entry(skb);
3836 3859
3837 return netif_receive_skb_internal(skb); 3860 return netif_receive_skb_internal(skb);
3838} 3861}
3839EXPORT_SYMBOL(netif_receive_skb); 3862EXPORT_SYMBOL(netif_receive_skb_sk);
3840 3863
3841/* Network device is going away, flush any packets still pending 3864/* Network device is going away, flush any packets still pending
3842 * Called with irqs disabled. 3865 * Called with irqs disabled.
@@ -6314,8 +6337,6 @@ int register_netdevice(struct net_device *dev)
6314 spin_lock_init(&dev->addr_list_lock); 6337 spin_lock_init(&dev->addr_list_lock);
6315 netdev_set_addr_lockdep_class(dev); 6338 netdev_set_addr_lockdep_class(dev);
6316 6339
6317 dev->iflink = -1;
6318
6319 ret = dev_get_valid_name(net, dev, dev->name); 6340 ret = dev_get_valid_name(net, dev, dev->name);
6320 if (ret < 0) 6341 if (ret < 0)
6321 goto out; 6342 goto out;
@@ -6345,9 +6366,6 @@ int register_netdevice(struct net_device *dev)
6345 else if (__dev_get_by_index(net, dev->ifindex)) 6366 else if (__dev_get_by_index(net, dev->ifindex))
6346 goto err_uninit; 6367 goto err_uninit;
6347 6368
6348 if (dev->iflink == -1)
6349 dev->iflink = dev->ifindex;
6350
6351 /* Transfer changeable features to wanted_features and enable 6369 /* Transfer changeable features to wanted_features and enable
6352 * software offloads (GSO and GRO). 6370 * software offloads (GSO and GRO).
6353 */ 6371 */
@@ -7060,12 +7078,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
7060 dev_net_set(dev, net); 7078 dev_net_set(dev, net);
7061 7079
7062 /* If there is an ifindex conflict assign a new one */ 7080 /* If there is an ifindex conflict assign a new one */
7063 if (__dev_get_by_index(net, dev->ifindex)) { 7081 if (__dev_get_by_index(net, dev->ifindex))
7064 int iflink = (dev->iflink == dev->ifindex);
7065 dev->ifindex = dev_new_index(net); 7082 dev->ifindex = dev_new_index(net);
7066 if (iflink)
7067 dev->iflink = dev->ifindex;
7068 }
7069 7083
7070 /* Send a netdev-add uevent to the new namespace */ 7084 /* Send a netdev-add uevent to the new namespace */
7071 kobject_uevent(&dev->dev.kobj, KOBJ_ADD); 7085 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 68ea6950cad1..9a12668f7d62 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -165,9 +165,9 @@ void fib_rules_unregister(struct fib_rules_ops *ops)
165 165
166 spin_lock(&net->rules_mod_lock); 166 spin_lock(&net->rules_mod_lock);
167 list_del_rcu(&ops->list); 167 list_del_rcu(&ops->list);
168 fib_rules_cleanup_ops(ops);
169 spin_unlock(&net->rules_mod_lock); 168 spin_unlock(&net->rules_mod_lock);
170 169
170 fib_rules_cleanup_ops(ops);
171 kfree_rcu(ops, rcu); 171 kfree_rcu(ops, rcu);
172} 172}
173EXPORT_SYMBOL_GPL(fib_rules_unregister); 173EXPORT_SYMBOL_GPL(fib_rules_unregister);
diff --git a/net/core/filter.c b/net/core/filter.c
index 444a07e4f68d..b669e75d2b36 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1175,7 +1175,9 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
1175 return 0; 1175 return 0;
1176} 1176}
1177 1177
1178static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 1178#define BPF_RECOMPUTE_CSUM(flags) ((flags) & 1)
1179
1180static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
1179{ 1181{
1180 struct sk_buff *skb = (struct sk_buff *) (long) r1; 1182 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1181 unsigned int offset = (unsigned int) r2; 1183 unsigned int offset = (unsigned int) r2;
@@ -1192,7 +1194,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1192 * 1194 *
1193 * so check for invalid 'offset' and too large 'len' 1195 * so check for invalid 'offset' and too large 'len'
1194 */ 1196 */
1195 if (offset > 0xffff || len > sizeof(buf)) 1197 if (unlikely(offset > 0xffff || len > sizeof(buf)))
1196 return -EFAULT; 1198 return -EFAULT;
1197 1199
1198 if (skb_cloned(skb) && !skb_clone_writable(skb, offset + len)) 1200 if (skb_cloned(skb) && !skb_clone_writable(skb, offset + len))
@@ -1202,7 +1204,8 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1202 if (unlikely(!ptr)) 1204 if (unlikely(!ptr))
1203 return -EFAULT; 1205 return -EFAULT;
1204 1206
1205 skb_postpull_rcsum(skb, ptr, len); 1207 if (BPF_RECOMPUTE_CSUM(flags))
1208 skb_postpull_rcsum(skb, ptr, len);
1206 1209
1207 memcpy(ptr, from, len); 1210 memcpy(ptr, from, len);
1208 1211
@@ -1210,7 +1213,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1210 /* skb_store_bits cannot return -EFAULT here */ 1213 /* skb_store_bits cannot return -EFAULT here */
1211 skb_store_bits(skb, offset, ptr, len); 1214 skb_store_bits(skb, offset, ptr, len);
1212 1215
1213 if (skb->ip_summed == CHECKSUM_COMPLETE) 1216 if (BPF_RECOMPUTE_CSUM(flags) && skb->ip_summed == CHECKSUM_COMPLETE)
1214 skb->csum = csum_add(skb->csum, csum_partial(ptr, len, 0)); 1217 skb->csum = csum_add(skb->csum, csum_partial(ptr, len, 0));
1215 return 0; 1218 return 0;
1216} 1219}
@@ -1223,6 +1226,99 @@ const struct bpf_func_proto bpf_skb_store_bytes_proto = {
1223 .arg2_type = ARG_ANYTHING, 1226 .arg2_type = ARG_ANYTHING,
1224 .arg3_type = ARG_PTR_TO_STACK, 1227 .arg3_type = ARG_PTR_TO_STACK,
1225 .arg4_type = ARG_CONST_STACK_SIZE, 1228 .arg4_type = ARG_CONST_STACK_SIZE,
1229 .arg5_type = ARG_ANYTHING,
1230};
1231
1232#define BPF_HEADER_FIELD_SIZE(flags) ((flags) & 0x0f)
1233#define BPF_IS_PSEUDO_HEADER(flags) ((flags) & 0x10)
1234
1235static u64 bpf_l3_csum_replace(u64 r1, u64 offset, u64 from, u64 to, u64 flags)
1236{
1237 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1238 __sum16 sum, *ptr;
1239
1240 if (unlikely(offset > 0xffff))
1241 return -EFAULT;
1242
1243 if (skb_cloned(skb) && !skb_clone_writable(skb, offset + sizeof(sum)))
1244 return -EFAULT;
1245
1246 ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
1247 if (unlikely(!ptr))
1248 return -EFAULT;
1249
1250 switch (BPF_HEADER_FIELD_SIZE(flags)) {
1251 case 2:
1252 csum_replace2(ptr, from, to);
1253 break;
1254 case 4:
1255 csum_replace4(ptr, from, to);
1256 break;
1257 default:
1258 return -EINVAL;
1259 }
1260
1261 if (ptr == &sum)
1262 /* skb_store_bits guaranteed to not return -EFAULT here */
1263 skb_store_bits(skb, offset, ptr, sizeof(sum));
1264
1265 return 0;
1266}
1267
1268const struct bpf_func_proto bpf_l3_csum_replace_proto = {
1269 .func = bpf_l3_csum_replace,
1270 .gpl_only = false,
1271 .ret_type = RET_INTEGER,
1272 .arg1_type = ARG_PTR_TO_CTX,
1273 .arg2_type = ARG_ANYTHING,
1274 .arg3_type = ARG_ANYTHING,
1275 .arg4_type = ARG_ANYTHING,
1276 .arg5_type = ARG_ANYTHING,
1277};
1278
1279static u64 bpf_l4_csum_replace(u64 r1, u64 offset, u64 from, u64 to, u64 flags)
1280{
1281 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1282 u32 is_pseudo = BPF_IS_PSEUDO_HEADER(flags);
1283 __sum16 sum, *ptr;
1284
1285 if (unlikely(offset > 0xffff))
1286 return -EFAULT;
1287
1288 if (skb_cloned(skb) && !skb_clone_writable(skb, offset + sizeof(sum)))
1289 return -EFAULT;
1290
1291 ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
1292 if (unlikely(!ptr))
1293 return -EFAULT;
1294
1295 switch (BPF_HEADER_FIELD_SIZE(flags)) {
1296 case 2:
1297 inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
1298 break;
1299 case 4:
1300 inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
1301 break;
1302 default:
1303 return -EINVAL;
1304 }
1305
1306 if (ptr == &sum)
1307 /* skb_store_bits guaranteed to not return -EFAULT here */
1308 skb_store_bits(skb, offset, ptr, sizeof(sum));
1309
1310 return 0;
1311}
1312
1313const struct bpf_func_proto bpf_l4_csum_replace_proto = {
1314 .func = bpf_l4_csum_replace,
1315 .gpl_only = false,
1316 .ret_type = RET_INTEGER,
1317 .arg1_type = ARG_PTR_TO_CTX,
1318 .arg2_type = ARG_ANYTHING,
1319 .arg3_type = ARG_ANYTHING,
1320 .arg4_type = ARG_ANYTHING,
1321 .arg5_type = ARG_ANYTHING,
1226}; 1322};
1227 1323
1228static const struct bpf_func_proto * 1324static const struct bpf_func_proto *
@@ -1250,6 +1346,10 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
1250 switch (func_id) { 1346 switch (func_id) {
1251 case BPF_FUNC_skb_store_bytes: 1347 case BPF_FUNC_skb_store_bytes:
1252 return &bpf_skb_store_bytes_proto; 1348 return &bpf_skb_store_bytes_proto;
1349 case BPF_FUNC_l3_csum_replace:
1350 return &bpf_l3_csum_replace_proto;
1351 case BPF_FUNC_l4_csum_replace:
1352 return &bpf_l4_csum_replace_proto;
1253 default: 1353 default:
1254 return sk_filter_func_proto(func_id); 1354 return sk_filter_func_proto(func_id);
1255 } 1355 }
@@ -1304,6 +1404,13 @@ static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off,
1304 offsetof(struct sk_buff, vlan_proto)); 1404 offsetof(struct sk_buff, vlan_proto));
1305 break; 1405 break;
1306 1406
1407 case offsetof(struct __sk_buff, priority):
1408 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, priority) != 4);
1409
1410 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
1411 offsetof(struct sk_buff, priority));
1412 break;
1413
1307 case offsetof(struct __sk_buff, mark): 1414 case offsetof(struct __sk_buff, mark):
1308 return convert_skb_access(SKF_AD_MARK, dst_reg, src_reg, insn); 1415 return convert_skb_access(SKF_AD_MARK, dst_reg, src_reg, insn);
1309 1416
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 49a9e3e06c08..982861607f88 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -40,7 +40,7 @@ static DEFINE_SPINLOCK(lweventlist_lock);
40static unsigned char default_operstate(const struct net_device *dev) 40static unsigned char default_operstate(const struct net_device *dev)
41{ 41{
42 if (!netif_carrier_ok(dev)) 42 if (!netif_carrier_ok(dev))
43 return (dev->ifindex != dev->iflink ? 43 return (dev->ifindex != dev_get_iflink(dev) ?
44 IF_OPER_LOWERLAYERDOWN : IF_OPER_DOWN); 44 IF_OPER_LOWERLAYERDOWN : IF_OPER_DOWN);
45 45
46 if (netif_dormant(dev)) 46 if (netif_dormant(dev))
@@ -89,7 +89,7 @@ static bool linkwatch_urgent_event(struct net_device *dev)
89 if (!netif_running(dev)) 89 if (!netif_running(dev))
90 return false; 90 return false;
91 91
92 if (dev->ifindex != dev->iflink) 92 if (dev->ifindex != dev_get_iflink(dev))
93 return true; 93 return true;
94 94
95 if (dev->priv_flags & IFF_TEAM_PORT) 95 if (dev->priv_flags & IFF_TEAM_PORT)
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index cc5cf689809c..4238d6da5c60 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -109,11 +109,19 @@ NETDEVICE_SHOW_RO(dev_id, fmt_hex);
109NETDEVICE_SHOW_RO(dev_port, fmt_dec); 109NETDEVICE_SHOW_RO(dev_port, fmt_dec);
110NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec); 110NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
111NETDEVICE_SHOW_RO(addr_len, fmt_dec); 111NETDEVICE_SHOW_RO(addr_len, fmt_dec);
112NETDEVICE_SHOW_RO(iflink, fmt_dec);
113NETDEVICE_SHOW_RO(ifindex, fmt_dec); 112NETDEVICE_SHOW_RO(ifindex, fmt_dec);
114NETDEVICE_SHOW_RO(type, fmt_dec); 113NETDEVICE_SHOW_RO(type, fmt_dec);
115NETDEVICE_SHOW_RO(link_mode, fmt_dec); 114NETDEVICE_SHOW_RO(link_mode, fmt_dec);
116 115
116static ssize_t iflink_show(struct device *dev, struct device_attribute *attr,
117 char *buf)
118{
119 struct net_device *ndev = to_net_dev(dev);
120
121 return sprintf(buf, fmt_dec, dev_get_iflink(ndev));
122}
123static DEVICE_ATTR_RO(iflink);
124
117static ssize_t format_name_assign_type(const struct net_device *dev, char *buf) 125static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
118{ 126{
119 return sprintf(buf, fmt_dec, dev->name_assign_type); 127 return sprintf(buf, fmt_dec, dev->name_assign_type);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index e5e96b0f6717..a3abb719221f 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -148,9 +148,11 @@ static void ops_free_list(const struct pernet_operations *ops,
148 } 148 }
149} 149}
150 150
151static void rtnl_net_notifyid(struct net *net, struct net *peer, int cmd,
152 int id);
151static int alloc_netid(struct net *net, struct net *peer, int reqid) 153static int alloc_netid(struct net *net, struct net *peer, int reqid)
152{ 154{
153 int min = 0, max = 0; 155 int min = 0, max = 0, id;
154 156
155 ASSERT_RTNL(); 157 ASSERT_RTNL();
156 158
@@ -159,7 +161,11 @@ static int alloc_netid(struct net *net, struct net *peer, int reqid)
159 max = reqid + 1; 161 max = reqid + 1;
160 } 162 }
161 163
162 return idr_alloc(&net->netns_ids, peer, min, max, GFP_KERNEL); 164 id = idr_alloc(&net->netns_ids, peer, min, max, GFP_KERNEL);
165 if (id >= 0)
166 rtnl_net_notifyid(net, peer, RTM_NEWNSID, id);
167
168 return id;
163} 169}
164 170
165/* This function is used by idr_for_each(). If net is equal to peer, the 171/* This function is used by idr_for_each(). If net is equal to peer, the
@@ -198,8 +204,10 @@ static int __peernet2id(struct net *net, struct net *peer, bool alloc)
198 */ 204 */
199int peernet2id(struct net *net, struct net *peer) 205int peernet2id(struct net *net, struct net *peer)
200{ 206{
201 int id = __peernet2id(net, peer, true); 207 bool alloc = atomic_read(&peer->count) == 0 ? false : true;
208 int id;
202 209
210 id = __peernet2id(net, peer, alloc);
203 return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; 211 return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
204} 212}
205EXPORT_SYMBOL(peernet2id); 213EXPORT_SYMBOL(peernet2id);
@@ -357,8 +365,10 @@ static void cleanup_net(struct work_struct *work)
357 for_each_net(tmp) { 365 for_each_net(tmp) {
358 int id = __peernet2id(tmp, net, false); 366 int id = __peernet2id(tmp, net, false);
359 367
360 if (id >= 0) 368 if (id >= 0) {
369 rtnl_net_notifyid(tmp, net, RTM_DELNSID, id);
361 idr_remove(&tmp->netns_ids, id); 370 idr_remove(&tmp->netns_ids, id);
371 }
362 } 372 }
363 idr_destroy(&net->netns_ids); 373 idr_destroy(&net->netns_ids);
364 374
@@ -529,7 +539,8 @@ static int rtnl_net_get_size(void)
529} 539}
530 540
531static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags, 541static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
532 int cmd, struct net *net, struct net *peer) 542 int cmd, struct net *net, struct net *peer,
543 int nsid)
533{ 544{
534 struct nlmsghdr *nlh; 545 struct nlmsghdr *nlh;
535 struct rtgenmsg *rth; 546 struct rtgenmsg *rth;
@@ -544,9 +555,13 @@ static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
544 rth = nlmsg_data(nlh); 555 rth = nlmsg_data(nlh);
545 rth->rtgen_family = AF_UNSPEC; 556 rth->rtgen_family = AF_UNSPEC;
546 557
547 id = __peernet2id(net, peer, false); 558 if (nsid >= 0) {
548 if (id < 0) 559 id = nsid;
549 id = NETNSA_NSID_NOT_ASSIGNED; 560 } else {
561 id = __peernet2id(net, peer, false);
562 if (id < 0)
563 id = NETNSA_NSID_NOT_ASSIGNED;
564 }
550 if (nla_put_s32(skb, NETNSA_NSID, id)) 565 if (nla_put_s32(skb, NETNSA_NSID, id))
551 goto nla_put_failure; 566 goto nla_put_failure;
552 567
@@ -563,8 +578,8 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh)
563 struct net *net = sock_net(skb->sk); 578 struct net *net = sock_net(skb->sk);
564 struct nlattr *tb[NETNSA_MAX + 1]; 579 struct nlattr *tb[NETNSA_MAX + 1];
565 struct sk_buff *msg; 580 struct sk_buff *msg;
566 int err = -ENOBUFS;
567 struct net *peer; 581 struct net *peer;
582 int err;
568 583
569 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, 584 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
570 rtnl_net_policy); 585 rtnl_net_policy);
@@ -587,7 +602,7 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh)
587 } 602 }
588 603
589 err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 604 err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
590 RTM_GETNSID, net, peer); 605 RTM_GETNSID, net, peer, -1);
591 if (err < 0) 606 if (err < 0)
592 goto err_out; 607 goto err_out;
593 608
@@ -601,6 +616,75 @@ out:
601 return err; 616 return err;
602} 617}
603 618
619struct rtnl_net_dump_cb {
620 struct net *net;
621 struct sk_buff *skb;
622 struct netlink_callback *cb;
623 int idx;
624 int s_idx;
625};
626
627static int rtnl_net_dumpid_one(int id, void *peer, void *data)
628{
629 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
630 int ret;
631
632 if (net_cb->idx < net_cb->s_idx)
633 goto cont;
634
635 ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
636 net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
637 RTM_NEWNSID, net_cb->net, peer, id);
638 if (ret < 0)
639 return ret;
640
641cont:
642 net_cb->idx++;
643 return 0;
644}
645
646static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
647{
648 struct net *net = sock_net(skb->sk);
649 struct rtnl_net_dump_cb net_cb = {
650 .net = net,
651 .skb = skb,
652 .cb = cb,
653 .idx = 0,
654 .s_idx = cb->args[0],
655 };
656
657 ASSERT_RTNL();
658
659 idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
660
661 cb->args[0] = net_cb.idx;
662 return skb->len;
663}
664
665static void rtnl_net_notifyid(struct net *net, struct net *peer, int cmd,
666 int id)
667{
668 struct sk_buff *msg;
669 int err = -ENOMEM;
670
671 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
672 if (!msg)
673 goto out;
674
675 err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, peer, id);
676 if (err < 0)
677 goto err_out;
678
679 rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
680 return;
681
682err_out:
683 nlmsg_free(msg);
684out:
685 rtnl_set_sk_err(net, RTNLGRP_NSID, err);
686}
687
604static int __init net_ns_init(void) 688static int __init net_ns_init(void)
605{ 689{
606 struct net_generic *ng; 690 struct net_generic *ng;
@@ -635,7 +719,8 @@ static int __init net_ns_init(void)
635 register_pernet_subsys(&net_ns_ops); 719 register_pernet_subsys(&net_ns_ops);
636 720
637 rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL); 721 rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL);
638 rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, NULL, NULL); 722 rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
723 NULL);
639 724
640 return 0; 725 return 0;
641} 726}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index b96ac2109c82..5e02260b087f 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1055,8 +1055,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1055#ifdef CONFIG_RPS 1055#ifdef CONFIG_RPS
1056 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) || 1056 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1057#endif 1057#endif
1058 (dev->ifindex != dev->iflink && 1058 (dev->ifindex != dev_get_iflink(dev) &&
1059 nla_put_u32(skb, IFLA_LINK, dev->iflink)) || 1059 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
1060 (upper_dev && 1060 (upper_dev &&
1061 nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex)) || 1061 nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex)) ||
1062 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) || 1062 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
@@ -1991,10 +1991,10 @@ static int rtnl_group_changelink(const struct sk_buff *skb,
1991 struct ifinfomsg *ifm, 1991 struct ifinfomsg *ifm,
1992 struct nlattr **tb) 1992 struct nlattr **tb)
1993{ 1993{
1994 struct net_device *dev; 1994 struct net_device *dev, *aux;
1995 int err; 1995 int err;
1996 1996
1997 for_each_netdev(net, dev) { 1997 for_each_netdev_safe(net, dev, aux) {
1998 if (dev->group == group) { 1998 if (dev->group == group) {
1999 err = do_setlink(skb, dev, ifm, tb, NULL, 0); 1999 err = do_setlink(skb, dev, ifm, tb, NULL, 0);
2000 if (err < 0) 2000 if (err < 0)
@@ -2863,8 +2863,8 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
2863 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) || 2863 nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
2864 (dev->addr_len && 2864 (dev->addr_len &&
2865 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 2865 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
2866 (dev->ifindex != dev->iflink && 2866 (dev->ifindex != dev_get_iflink(dev) &&
2867 nla_put_u32(skb, IFLA_LINK, dev->iflink))) 2867 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
2868 goto nla_put_failure; 2868 goto nla_put_failure;
2869 2869
2870 br_afspec = nla_nest_start(skb, IFLA_AF_SPEC); 2870 br_afspec = nla_nest_start(skb, IFLA_AF_SPEC);
diff --git a/net/core/sock.c b/net/core/sock.c
index 119ae464b44a..654e38a99759 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -653,6 +653,25 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
653 sock_reset_flag(sk, bit); 653 sock_reset_flag(sk, bit);
654} 654}
655 655
656bool sk_mc_loop(struct sock *sk)
657{
658 if (dev_recursion_level())
659 return false;
660 if (!sk)
661 return true;
662 switch (sk->sk_family) {
663 case AF_INET:
664 return inet_sk(sk)->mc_loop;
665#if IS_ENABLED(CONFIG_IPV6)
666 case AF_INET6:
667 return inet6_sk(sk)->mc_loop;
668#endif
669 }
670 WARN_ON(1);
671 return true;
672}
673EXPORT_SYMBOL(sk_mc_loop);
674
656/* 675/*
657 * This is meant for all protocols to use and covers goings on 676 * This is meant for all protocols to use and covers goings on
658 * at the socket level. Everything here is generic. 677 * at the socket level. Everything here is generic.
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index be1f08cdad29..4507b188fc51 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -194,7 +194,7 @@ static int dn_neigh_output(struct neighbour *neigh, struct sk_buff *skb)
194 return err; 194 return err;
195} 195}
196 196
197static int dn_neigh_output_packet(struct sk_buff *skb) 197static int dn_neigh_output_packet(struct sock *sk, struct sk_buff *skb)
198{ 198{
199 struct dst_entry *dst = skb_dst(skb); 199 struct dst_entry *dst = skb_dst(skb);
200 struct dn_route *rt = (struct dn_route *)dst; 200 struct dn_route *rt = (struct dn_route *)dst;
@@ -206,7 +206,8 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
206/* 206/*
207 * For talking to broadcast devices: Ethernet & PPP 207 * For talking to broadcast devices: Ethernet & PPP
208 */ 208 */
209static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb) 209static int dn_long_output(struct neighbour *neigh, struct sock *sk,
210 struct sk_buff *skb)
210{ 211{
211 struct net_device *dev = neigh->dev; 212 struct net_device *dev = neigh->dev;
212 int headroom = dev->hard_header_len + sizeof(struct dn_long_packet) + 3; 213 int headroom = dev->hard_header_len + sizeof(struct dn_long_packet) + 3;
@@ -245,14 +246,15 @@ static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb)
245 246
246 skb_reset_network_header(skb); 247 skb_reset_network_header(skb);
247 248
248 return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, skb, NULL, 249 return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, sk, skb,
249 neigh->dev, dn_neigh_output_packet); 250 NULL, neigh->dev, dn_neigh_output_packet);
250} 251}
251 252
252/* 253/*
253 * For talking to pointopoint and multidrop devices: DDCMP and X.25 254 * For talking to pointopoint and multidrop devices: DDCMP and X.25
254 */ 255 */
255static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb) 256static int dn_short_output(struct neighbour *neigh, struct sock *sk,
257 struct sk_buff *skb)
256{ 258{
257 struct net_device *dev = neigh->dev; 259 struct net_device *dev = neigh->dev;
258 int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2; 260 int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2;
@@ -284,8 +286,8 @@ static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb)
284 286
285 skb_reset_network_header(skb); 287 skb_reset_network_header(skb);
286 288
287 return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, skb, NULL, 289 return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, sk, skb,
288 neigh->dev, dn_neigh_output_packet); 290 NULL, neigh->dev, dn_neigh_output_packet);
289} 291}
290 292
291/* 293/*
@@ -293,7 +295,8 @@ static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb)
293 * Phase 3 output is the same as short output, execpt that 295 * Phase 3 output is the same as short output, execpt that
294 * it clears the area bits before transmission. 296 * it clears the area bits before transmission.
295 */ 297 */
296static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb) 298static int dn_phase3_output(struct neighbour *neigh, struct sock *sk,
299 struct sk_buff *skb)
297{ 300{
298 struct net_device *dev = neigh->dev; 301 struct net_device *dev = neigh->dev;
299 int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2; 302 int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2;
@@ -324,11 +327,11 @@ static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb)
324 327
325 skb_reset_network_header(skb); 328 skb_reset_network_header(skb);
326 329
327 return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, skb, NULL, 330 return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, sk, skb,
328 neigh->dev, dn_neigh_output_packet); 331 NULL, neigh->dev, dn_neigh_output_packet);
329} 332}
330 333
331int dn_to_neigh_output(struct sk_buff *skb) 334int dn_to_neigh_output(struct sock *sk, struct sk_buff *skb)
332{ 335{
333 struct dst_entry *dst = skb_dst(skb); 336 struct dst_entry *dst = skb_dst(skb);
334 struct dn_route *rt = (struct dn_route *) dst; 337 struct dn_route *rt = (struct dn_route *) dst;
@@ -347,11 +350,11 @@ int dn_to_neigh_output(struct sk_buff *skb)
347 rcu_read_unlock(); 350 rcu_read_unlock();
348 351
349 if (dn->flags & DN_NDFLAG_P3) 352 if (dn->flags & DN_NDFLAG_P3)
350 return dn_phase3_output(neigh, skb); 353 return dn_phase3_output(neigh, sk, skb);
351 if (use_long) 354 if (use_long)
352 return dn_long_output(neigh, skb); 355 return dn_long_output(neigh, sk, skb);
353 else 356 else
354 return dn_short_output(neigh, skb); 357 return dn_short_output(neigh, sk, skb);
355} 358}
356 359
357/* 360/*
@@ -372,7 +375,7 @@ void dn_neigh_pointopoint_hello(struct sk_buff *skb)
372/* 375/*
373 * Ethernet router hello message received 376 * Ethernet router hello message received
374 */ 377 */
375int dn_neigh_router_hello(struct sk_buff *skb) 378int dn_neigh_router_hello(struct sock *sk, struct sk_buff *skb)
376{ 379{
377 struct rtnode_hello_message *msg = (struct rtnode_hello_message *)skb->data; 380 struct rtnode_hello_message *msg = (struct rtnode_hello_message *)skb->data;
378 381
@@ -434,7 +437,7 @@ int dn_neigh_router_hello(struct sk_buff *skb)
434/* 437/*
435 * Endnode hello message received 438 * Endnode hello message received
436 */ 439 */
437int dn_neigh_endnode_hello(struct sk_buff *skb) 440int dn_neigh_endnode_hello(struct sock *sk, struct sk_buff *skb)
438{ 441{
439 struct endnode_hello_message *msg = (struct endnode_hello_message *)skb->data; 442 struct endnode_hello_message *msg = (struct endnode_hello_message *)skb->data;
440 struct neighbour *neigh; 443 struct neighbour *neigh;
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index fe5f01485d33..a321eac9fd0c 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -714,7 +714,7 @@ out:
714 return ret; 714 return ret;
715} 715}
716 716
717static int dn_nsp_rx_packet(struct sk_buff *skb) 717static int dn_nsp_rx_packet(struct sock *sk2, struct sk_buff *skb)
718{ 718{
719 struct dn_skb_cb *cb = DN_SKB_CB(skb); 719 struct dn_skb_cb *cb = DN_SKB_CB(skb);
720 struct sock *sk = NULL; 720 struct sock *sk = NULL;
@@ -814,7 +814,8 @@ free_out:
814 814
815int dn_nsp_rx(struct sk_buff *skb) 815int dn_nsp_rx(struct sk_buff *skb)
816{ 816{
817 return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_IN, skb, skb->dev, NULL, 817 return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_IN, NULL, skb,
818 skb->dev, NULL,
818 dn_nsp_rx_packet); 819 dn_nsp_rx_packet);
819} 820}
820 821
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 9ab0c4ba297f..03227ffd19ce 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -512,7 +512,7 @@ static int dn_return_long(struct sk_buff *skb)
512 * 512 *
513 * Returns: result of input function if route is found, error code otherwise 513 * Returns: result of input function if route is found, error code otherwise
514 */ 514 */
515static int dn_route_rx_packet(struct sk_buff *skb) 515static int dn_route_rx_packet(struct sock *sk, struct sk_buff *skb)
516{ 516{
517 struct dn_skb_cb *cb; 517 struct dn_skb_cb *cb;
518 int err; 518 int err;
@@ -573,7 +573,8 @@ static int dn_route_rx_long(struct sk_buff *skb)
573 ptr++; 573 ptr++;
574 cb->hops = *ptr++; /* Visit Count */ 574 cb->hops = *ptr++; /* Visit Count */
575 575
576 return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, 576 return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, NULL, skb,
577 skb->dev, NULL,
577 dn_route_rx_packet); 578 dn_route_rx_packet);
578 579
579drop_it: 580drop_it:
@@ -600,7 +601,8 @@ static int dn_route_rx_short(struct sk_buff *skb)
600 ptr += 2; 601 ptr += 2;
601 cb->hops = *ptr & 0x3f; 602 cb->hops = *ptr & 0x3f;
602 603
603 return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, 604 return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, NULL, skb,
605 skb->dev, NULL,
604 dn_route_rx_packet); 606 dn_route_rx_packet);
605 607
606drop_it: 608drop_it:
@@ -608,7 +610,7 @@ drop_it:
608 return NET_RX_DROP; 610 return NET_RX_DROP;
609} 611}
610 612
611static int dn_route_discard(struct sk_buff *skb) 613static int dn_route_discard(struct sock *sk, struct sk_buff *skb)
612{ 614{
613 /* 615 /*
614 * I know we drop the packet here, but thats considered success in 616 * I know we drop the packet here, but thats considered success in
@@ -618,7 +620,7 @@ static int dn_route_discard(struct sk_buff *skb)
618 return NET_RX_SUCCESS; 620 return NET_RX_SUCCESS;
619} 621}
620 622
621static int dn_route_ptp_hello(struct sk_buff *skb) 623static int dn_route_ptp_hello(struct sock *sk, struct sk_buff *skb)
622{ 624{
623 dn_dev_hello(skb); 625 dn_dev_hello(skb);
624 dn_neigh_pointopoint_hello(skb); 626 dn_neigh_pointopoint_hello(skb);
@@ -704,22 +706,22 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
704 switch (flags & DN_RT_CNTL_MSK) { 706 switch (flags & DN_RT_CNTL_MSK) {
705 case DN_RT_PKT_HELO: 707 case DN_RT_PKT_HELO:
706 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, 708 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
707 skb, skb->dev, NULL, 709 NULL, skb, skb->dev, NULL,
708 dn_route_ptp_hello); 710 dn_route_ptp_hello);
709 711
710 case DN_RT_PKT_L1RT: 712 case DN_RT_PKT_L1RT:
711 case DN_RT_PKT_L2RT: 713 case DN_RT_PKT_L2RT:
712 return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE, 714 return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE,
713 skb, skb->dev, NULL, 715 NULL, skb, skb->dev, NULL,
714 dn_route_discard); 716 dn_route_discard);
715 case DN_RT_PKT_ERTH: 717 case DN_RT_PKT_ERTH:
716 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, 718 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
717 skb, skb->dev, NULL, 719 NULL, skb, skb->dev, NULL,
718 dn_neigh_router_hello); 720 dn_neigh_router_hello);
719 721
720 case DN_RT_PKT_EEDH: 722 case DN_RT_PKT_EEDH:
721 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, 723 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
722 skb, skb->dev, NULL, 724 NULL, skb, skb->dev, NULL,
723 dn_neigh_endnode_hello); 725 dn_neigh_endnode_hello);
724 } 726 }
725 } else { 727 } else {
@@ -768,7 +770,8 @@ static int dn_output(struct sock *sk, struct sk_buff *skb)
768 cb->rt_flags |= DN_RT_F_IE; 770 cb->rt_flags |= DN_RT_F_IE;
769 cb->hops = 0; 771 cb->hops = 0;
770 772
771 return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT, skb, NULL, dev, 773 return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT, sk, skb,
774 NULL, dev,
772 dn_to_neigh_output); 775 dn_to_neigh_output);
773 776
774error: 777error:
@@ -816,7 +819,8 @@ static int dn_forward(struct sk_buff *skb)
816 if (rt->rt_flags & RTCF_DOREDIRECT) 819 if (rt->rt_flags & RTCF_DOREDIRECT)
817 cb->rt_flags |= DN_RT_F_IE; 820 cb->rt_flags |= DN_RT_F_IE;
818 821
819 return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD, skb, dev, skb->dev, 822 return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD, NULL, skb,
823 dev, skb->dev,
820 dn_to_neigh_output); 824 dn_to_neigh_output);
821 825
822drop: 826drop:
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index faf7cc3483fe..9d66a0f72f90 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -248,7 +248,9 @@ void __init dn_fib_rules_init(void)
248 248
249void __exit dn_fib_rules_cleanup(void) 249void __exit dn_fib_rules_cleanup(void)
250{ 250{
251 rtnl_lock();
251 fib_rules_unregister(dn_fib_rules_ops); 252 fib_rules_unregister(dn_fib_rules_ops);
253 rtnl_unlock();
252 rcu_barrier(); 254 rcu_barrier();
253} 255}
254 256
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index e4d9560a910b..af34fc9bdf69 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -89,9 +89,7 @@ static void dnrmg_send_peer(struct sk_buff *skb)
89 89
90static unsigned int dnrmg_hook(const struct nf_hook_ops *ops, 90static unsigned int dnrmg_hook(const struct nf_hook_ops *ops,
91 struct sk_buff *skb, 91 struct sk_buff *skb,
92 const struct net_device *in, 92 const struct nf_hook_state *state)
93 const struct net_device *out,
94 int (*okfn)(struct sk_buff *))
95{ 93{
96 dnrmg_send_peer(skb); 94 dnrmg_send_peer(skb);
97 return NF_ACCEPT; 95 return NF_ACCEPT;
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 899772108ee3..5eaadabe23a1 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -513,12 +513,10 @@ static struct net_device *dev_to_net_device(struct device *dev)
513#ifdef CONFIG_OF 513#ifdef CONFIG_OF
514static int dsa_of_setup_routing_table(struct dsa_platform_data *pd, 514static int dsa_of_setup_routing_table(struct dsa_platform_data *pd,
515 struct dsa_chip_data *cd, 515 struct dsa_chip_data *cd,
516 int chip_index, 516 int chip_index, int port_index,
517 struct device_node *link) 517 struct device_node *link)
518{ 518{
519 int ret;
520 const __be32 *reg; 519 const __be32 *reg;
521 int link_port_addr;
522 int link_sw_addr; 520 int link_sw_addr;
523 struct device_node *parent_sw; 521 struct device_node *parent_sw;
524 int len; 522 int len;
@@ -531,6 +529,10 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd,
531 if (!reg || (len != sizeof(*reg) * 2)) 529 if (!reg || (len != sizeof(*reg) * 2))
532 return -EINVAL; 530 return -EINVAL;
533 531
532 /*
533 * Get the destination switch number from the second field of its 'reg'
534 * property, i.e. for "reg = <0x19 1>" sw_addr is '1'.
535 */
534 link_sw_addr = be32_to_cpup(reg + 1); 536 link_sw_addr = be32_to_cpup(reg + 1);
535 537
536 if (link_sw_addr >= pd->nr_chips) 538 if (link_sw_addr >= pd->nr_chips)
@@ -547,20 +549,9 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd,
547 memset(cd->rtable, -1, pd->nr_chips * sizeof(s8)); 549 memset(cd->rtable, -1, pd->nr_chips * sizeof(s8));
548 } 550 }
549 551
550 reg = of_get_property(link, "reg", NULL); 552 cd->rtable[link_sw_addr] = port_index;
551 if (!reg) {
552 ret = -EINVAL;
553 goto out;
554 }
555
556 link_port_addr = be32_to_cpup(reg);
557
558 cd->rtable[link_sw_addr] = link_port_addr;
559 553
560 return 0; 554 return 0;
561out:
562 kfree(cd->rtable);
563 return ret;
564} 555}
565 556
566static void dsa_of_free_platform_data(struct dsa_platform_data *pd) 557static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
@@ -670,7 +661,7 @@ static int dsa_of_probe(struct device *dev)
670 if (!strcmp(port_name, "dsa") && link && 661 if (!strcmp(port_name, "dsa") && link &&
671 pd->nr_chips > 1) { 662 pd->nr_chips > 1) {
672 ret = dsa_of_setup_routing_table(pd, cd, 663 ret = dsa_of_setup_routing_table(pd, cd,
673 chip_index, link); 664 chip_index, port_index, link);
674 if (ret) 665 if (ret)
675 goto out_free_chip; 666 goto out_free_chip;
676 } 667 }
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 3597724ec3d8..827cda560a55 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -55,13 +55,11 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds)
55 55
56 56
57/* slave device handling ****************************************************/ 57/* slave device handling ****************************************************/
58static int dsa_slave_init(struct net_device *dev) 58static int dsa_slave_get_iflink(const struct net_device *dev)
59{ 59{
60 struct dsa_slave_priv *p = netdev_priv(dev); 60 struct dsa_slave_priv *p = netdev_priv(dev);
61 61
62 dev->iflink = p->parent->dst->master_netdev->ifindex; 62 return p->parent->dst->master_netdev->ifindex;
63
64 return 0;
65} 63}
66 64
67static inline bool dsa_port_is_bridged(struct dsa_slave_priv *p) 65static inline bool dsa_port_is_bridged(struct dsa_slave_priv *p)
@@ -664,7 +662,6 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
664}; 662};
665 663
666static const struct net_device_ops dsa_slave_netdev_ops = { 664static const struct net_device_ops dsa_slave_netdev_ops = {
667 .ndo_init = dsa_slave_init,
668 .ndo_open = dsa_slave_open, 665 .ndo_open = dsa_slave_open,
669 .ndo_stop = dsa_slave_close, 666 .ndo_stop = dsa_slave_close,
670 .ndo_start_xmit = dsa_slave_xmit, 667 .ndo_start_xmit = dsa_slave_xmit,
@@ -675,6 +672,7 @@ static const struct net_device_ops dsa_slave_netdev_ops = {
675 .ndo_fdb_del = dsa_slave_fdb_del, 672 .ndo_fdb_del = dsa_slave_fdb_del,
676 .ndo_fdb_dump = dsa_slave_fdb_dump, 673 .ndo_fdb_dump = dsa_slave_fdb_dump,
677 .ndo_do_ioctl = dsa_slave_ioctl, 674 .ndo_do_ioctl = dsa_slave_ioctl,
675 .ndo_get_iflink = dsa_slave_get_iflink,
678}; 676};
679 677
680static const struct swdev_ops dsa_slave_swdev_ops = { 678static const struct swdev_ops dsa_slave_swdev_ops = {
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 64a9c0fdc4aa..8b47a4d79d04 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -217,7 +217,7 @@ int inet_listen(struct socket *sock, int backlog)
217 * shutdown() (rather than close()). 217 * shutdown() (rather than close()).
218 */ 218 */
219 if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 && 219 if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 &&
220 inet_csk(sk)->icsk_accept_queue.fastopenq == NULL) { 220 !inet_csk(sk)->icsk_accept_queue.fastopenq) {
221 if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0) 221 if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0)
222 err = fastopen_init_queue(sk, backlog); 222 err = fastopen_init_queue(sk, backlog);
223 else if ((sysctl_tcp_fastopen & 223 else if ((sysctl_tcp_fastopen &
@@ -314,11 +314,11 @@ lookup_protocol:
314 answer_flags = answer->flags; 314 answer_flags = answer->flags;
315 rcu_read_unlock(); 315 rcu_read_unlock();
316 316
317 WARN_ON(answer_prot->slab == NULL); 317 WARN_ON(!answer_prot->slab);
318 318
319 err = -ENOBUFS; 319 err = -ENOBUFS;
320 sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot); 320 sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot);
321 if (sk == NULL) 321 if (!sk)
322 goto out; 322 goto out;
323 323
324 err = 0; 324 err = 0;
@@ -1269,7 +1269,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1269 if (udpfrag) { 1269 if (udpfrag) {
1270 iph->id = htons(id); 1270 iph->id = htons(id);
1271 iph->frag_off = htons(offset >> 3); 1271 iph->frag_off = htons(offset >> 3);
1272 if (skb->next != NULL) 1272 if (skb->next)
1273 iph->frag_off |= htons(IP_MF); 1273 iph->frag_off |= htons(IP_MF);
1274 offset += skb->len - nhoff - ihl; 1274 offset += skb->len - nhoff - ihl;
1275 } else { 1275 } else {
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 5f5c674e130a..933a92820d26 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -228,7 +228,7 @@ static int arp_constructor(struct neighbour *neigh)
228 228
229 rcu_read_lock(); 229 rcu_read_lock();
230 in_dev = __in_dev_get_rcu(dev); 230 in_dev = __in_dev_get_rcu(dev);
231 if (in_dev == NULL) { 231 if (!in_dev) {
232 rcu_read_unlock(); 232 rcu_read_unlock();
233 return -EINVAL; 233 return -EINVAL;
234 } 234 }
@@ -475,7 +475,7 @@ static inline int arp_fwd_pvlan(struct in_device *in_dev,
475 */ 475 */
476 476
477/* 477/*
478 * Create an arp packet. If (dest_hw == NULL), we create a broadcast 478 * Create an arp packet. If dest_hw is not set, we create a broadcast
479 * message. 479 * message.
480 */ 480 */
481struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, 481struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
@@ -495,7 +495,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
495 */ 495 */
496 496
497 skb = alloc_skb(arp_hdr_len(dev) + hlen + tlen, GFP_ATOMIC); 497 skb = alloc_skb(arp_hdr_len(dev) + hlen + tlen, GFP_ATOMIC);
498 if (skb == NULL) 498 if (!skb)
499 return NULL; 499 return NULL;
500 500
501 skb_reserve(skb, hlen); 501 skb_reserve(skb, hlen);
@@ -503,9 +503,9 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
503 arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev)); 503 arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev));
504 skb->dev = dev; 504 skb->dev = dev;
505 skb->protocol = htons(ETH_P_ARP); 505 skb->protocol = htons(ETH_P_ARP);
506 if (src_hw == NULL) 506 if (!src_hw)
507 src_hw = dev->dev_addr; 507 src_hw = dev->dev_addr;
508 if (dest_hw == NULL) 508 if (!dest_hw)
509 dest_hw = dev->broadcast; 509 dest_hw = dev->broadcast;
510 510
511 /* 511 /*
@@ -569,7 +569,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
569 break; 569 break;
570#endif 570#endif
571 default: 571 default:
572 if (target_hw != NULL) 572 if (target_hw)
573 memcpy(arp_ptr, target_hw, dev->addr_len); 573 memcpy(arp_ptr, target_hw, dev->addr_len);
574 else 574 else
575 memset(arp_ptr, 0, dev->addr_len); 575 memset(arp_ptr, 0, dev->addr_len);
@@ -591,7 +591,8 @@ EXPORT_SYMBOL(arp_create);
591void arp_xmit(struct sk_buff *skb) 591void arp_xmit(struct sk_buff *skb)
592{ 592{
593 /* Send it off, maybe filter it using firewalling first. */ 593 /* Send it off, maybe filter it using firewalling first. */
594 NF_HOOK(NFPROTO_ARP, NF_ARP_OUT, skb, NULL, skb->dev, dev_queue_xmit); 594 NF_HOOK(NFPROTO_ARP, NF_ARP_OUT, NULL, skb,
595 NULL, skb->dev, dev_queue_xmit_sk);
595} 596}
596EXPORT_SYMBOL(arp_xmit); 597EXPORT_SYMBOL(arp_xmit);
597 598
@@ -614,7 +615,7 @@ void arp_send(int type, int ptype, __be32 dest_ip,
614 615
615 skb = arp_create(type, ptype, dest_ip, dev, src_ip, 616 skb = arp_create(type, ptype, dest_ip, dev, src_ip,
616 dest_hw, src_hw, target_hw); 617 dest_hw, src_hw, target_hw);
617 if (skb == NULL) 618 if (!skb)
618 return; 619 return;
619 620
620 arp_xmit(skb); 621 arp_xmit(skb);
@@ -625,7 +626,7 @@ EXPORT_SYMBOL(arp_send);
625 * Process an arp request. 626 * Process an arp request.
626 */ 627 */
627 628
628static int arp_process(struct sk_buff *skb) 629static int arp_process(struct sock *sk, struct sk_buff *skb)
629{ 630{
630 struct net_device *dev = skb->dev; 631 struct net_device *dev = skb->dev;
631 struct in_device *in_dev = __in_dev_get_rcu(dev); 632 struct in_device *in_dev = __in_dev_get_rcu(dev);
@@ -644,7 +645,7 @@ static int arp_process(struct sk_buff *skb)
644 * is ARP'able. 645 * is ARP'able.
645 */ 646 */
646 647
647 if (in_dev == NULL) 648 if (!in_dev)
648 goto out; 649 goto out;
649 650
650 arp = arp_hdr(skb); 651 arp = arp_hdr(skb);
@@ -808,7 +809,7 @@ static int arp_process(struct sk_buff *skb)
808 is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip && 809 is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip &&
809 inet_addr_type(net, sip) == RTN_UNICAST; 810 inet_addr_type(net, sip) == RTN_UNICAST;
810 811
811 if (n == NULL && 812 if (!n &&
812 ((arp->ar_op == htons(ARPOP_REPLY) && 813 ((arp->ar_op == htons(ARPOP_REPLY) &&
813 inet_addr_type(net, sip) == RTN_UNICAST) || is_garp)) 814 inet_addr_type(net, sip) == RTN_UNICAST) || is_garp))
814 n = __neigh_lookup(&arp_tbl, &sip, dev, 1); 815 n = __neigh_lookup(&arp_tbl, &sip, dev, 1);
@@ -846,7 +847,7 @@ out:
846 847
847static void parp_redo(struct sk_buff *skb) 848static void parp_redo(struct sk_buff *skb)
848{ 849{
849 arp_process(skb); 850 arp_process(NULL, skb);
850} 851}
851 852
852 853
@@ -879,7 +880,8 @@ static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
879 880
880 memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb)); 881 memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
881 882
882 return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process); 883 return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, NULL, skb,
884 dev, NULL, arp_process);
883 885
884consumeskb: 886consumeskb:
885 consume_skb(skb); 887 consume_skb(skb);
@@ -900,7 +902,7 @@ out_of_mem:
900 902
901static int arp_req_set_proxy(struct net *net, struct net_device *dev, int on) 903static int arp_req_set_proxy(struct net *net, struct net_device *dev, int on)
902{ 904{
903 if (dev == NULL) { 905 if (!dev) {
904 IPV4_DEVCONF_ALL(net, PROXY_ARP) = on; 906 IPV4_DEVCONF_ALL(net, PROXY_ARP) = on;
905 return 0; 907 return 0;
906 } 908 }
@@ -926,7 +928,7 @@ static int arp_req_set_public(struct net *net, struct arpreq *r,
926 return -ENODEV; 928 return -ENODEV;
927 } 929 }
928 if (mask) { 930 if (mask) {
929 if (pneigh_lookup(&arp_tbl, net, &ip, dev, 1) == NULL) 931 if (!pneigh_lookup(&arp_tbl, net, &ip, dev, 1))
930 return -ENOBUFS; 932 return -ENOBUFS;
931 return 0; 933 return 0;
932 } 934 }
@@ -947,7 +949,7 @@ static int arp_req_set(struct net *net, struct arpreq *r,
947 ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr; 949 ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
948 if (r->arp_flags & ATF_PERM) 950 if (r->arp_flags & ATF_PERM)
949 r->arp_flags |= ATF_COM; 951 r->arp_flags |= ATF_COM;
950 if (dev == NULL) { 952 if (!dev) {
951 struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0); 953 struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0);
952 954
953 if (IS_ERR(rt)) 955 if (IS_ERR(rt))
@@ -1067,7 +1069,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
1067 return arp_req_delete_public(net, r, dev); 1069 return arp_req_delete_public(net, r, dev);
1068 1070
1069 ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr; 1071 ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
1070 if (dev == NULL) { 1072 if (!dev) {
1071 struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0); 1073 struct rtable *rt = ip_route_output(net, ip, 0, RTO_ONLINK, 0);
1072 if (IS_ERR(rt)) 1074 if (IS_ERR(rt))
1073 return PTR_ERR(rt); 1075 return PTR_ERR(rt);
@@ -1116,7 +1118,7 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1116 if (r.arp_dev[0]) { 1118 if (r.arp_dev[0]) {
1117 err = -ENODEV; 1119 err = -ENODEV;
1118 dev = __dev_get_by_name(net, r.arp_dev); 1120 dev = __dev_get_by_name(net, r.arp_dev);
1119 if (dev == NULL) 1121 if (!dev)
1120 goto out; 1122 goto out;
1121 1123
1122 /* Mmmm... It is wrong... ARPHRD_NETROM==0 */ 1124 /* Mmmm... It is wrong... ARPHRD_NETROM==0 */
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index e361ea6f3fc8..bdb2a07ec363 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -255,7 +255,7 @@ static int __init cipso_v4_cache_init(void)
255 cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS, 255 cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS,
256 sizeof(struct cipso_v4_map_cache_bkt), 256 sizeof(struct cipso_v4_map_cache_bkt),
257 GFP_KERNEL); 257 GFP_KERNEL);
258 if (cipso_v4_cache == NULL) 258 if (!cipso_v4_cache)
259 return -ENOMEM; 259 return -ENOMEM;
260 260
261 for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { 261 for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
@@ -339,7 +339,7 @@ static int cipso_v4_cache_check(const unsigned char *key,
339 secattr->cache = entry->lsm_data; 339 secattr->cache = entry->lsm_data;
340 secattr->flags |= NETLBL_SECATTR_CACHE; 340 secattr->flags |= NETLBL_SECATTR_CACHE;
341 secattr->type = NETLBL_NLTYPE_CIPSOV4; 341 secattr->type = NETLBL_NLTYPE_CIPSOV4;
342 if (prev_entry == NULL) { 342 if (!prev_entry) {
343 spin_unlock_bh(&cipso_v4_cache[bkt].lock); 343 spin_unlock_bh(&cipso_v4_cache[bkt].lock);
344 return 0; 344 return 0;
345 } 345 }
@@ -393,10 +393,10 @@ int cipso_v4_cache_add(const unsigned char *cipso_ptr,
393 cipso_ptr_len = cipso_ptr[1]; 393 cipso_ptr_len = cipso_ptr[1];
394 394
395 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 395 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
396 if (entry == NULL) 396 if (!entry)
397 return -ENOMEM; 397 return -ENOMEM;
398 entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC); 398 entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC);
399 if (entry->key == NULL) { 399 if (!entry->key) {
400 ret_val = -ENOMEM; 400 ret_val = -ENOMEM;
401 goto cache_add_failure; 401 goto cache_add_failure;
402 } 402 }
@@ -502,7 +502,7 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
502 atomic_set(&doi_def->refcount, 1); 502 atomic_set(&doi_def->refcount, 1);
503 503
504 spin_lock(&cipso_v4_doi_list_lock); 504 spin_lock(&cipso_v4_doi_list_lock);
505 if (cipso_v4_doi_search(doi_def->doi) != NULL) { 505 if (cipso_v4_doi_search(doi_def->doi)) {
506 spin_unlock(&cipso_v4_doi_list_lock); 506 spin_unlock(&cipso_v4_doi_list_lock);
507 ret_val = -EEXIST; 507 ret_val = -EEXIST;
508 goto doi_add_return; 508 goto doi_add_return;
@@ -513,7 +513,7 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
513 513
514doi_add_return: 514doi_add_return:
515 audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info); 515 audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info);
516 if (audit_buf != NULL) { 516 if (audit_buf) {
517 const char *type_str; 517 const char *type_str;
518 switch (doi_type) { 518 switch (doi_type) {
519 case CIPSO_V4_MAP_TRANS: 519 case CIPSO_V4_MAP_TRANS:
@@ -547,7 +547,7 @@ doi_add_return:
547 */ 547 */
548void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) 548void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
549{ 549{
550 if (doi_def == NULL) 550 if (!doi_def)
551 return; 551 return;
552 552
553 switch (doi_def->type) { 553 switch (doi_def->type) {
@@ -598,7 +598,7 @@ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info)
598 598
599 spin_lock(&cipso_v4_doi_list_lock); 599 spin_lock(&cipso_v4_doi_list_lock);
600 doi_def = cipso_v4_doi_search(doi); 600 doi_def = cipso_v4_doi_search(doi);
601 if (doi_def == NULL) { 601 if (!doi_def) {
602 spin_unlock(&cipso_v4_doi_list_lock); 602 spin_unlock(&cipso_v4_doi_list_lock);
603 ret_val = -ENOENT; 603 ret_val = -ENOENT;
604 goto doi_remove_return; 604 goto doi_remove_return;
@@ -617,7 +617,7 @@ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info)
617 617
618doi_remove_return: 618doi_remove_return:
619 audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info); 619 audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info);
620 if (audit_buf != NULL) { 620 if (audit_buf) {
621 audit_log_format(audit_buf, 621 audit_log_format(audit_buf,
622 " cipso_doi=%u res=%u", 622 " cipso_doi=%u res=%u",
623 doi, ret_val == 0 ? 1 : 0); 623 doi, ret_val == 0 ? 1 : 0);
@@ -644,7 +644,7 @@ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi)
644 644
645 rcu_read_lock(); 645 rcu_read_lock();
646 doi_def = cipso_v4_doi_search(doi); 646 doi_def = cipso_v4_doi_search(doi);
647 if (doi_def == NULL) 647 if (!doi_def)
648 goto doi_getdef_return; 648 goto doi_getdef_return;
649 if (!atomic_inc_not_zero(&doi_def->refcount)) 649 if (!atomic_inc_not_zero(&doi_def->refcount))
650 doi_def = NULL; 650 doi_def = NULL;
@@ -664,7 +664,7 @@ doi_getdef_return:
664 */ 664 */
665void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) 665void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def)
666{ 666{
667 if (doi_def == NULL) 667 if (!doi_def)
668 return; 668 return;
669 669
670 if (!atomic_dec_and_test(&doi_def->refcount)) 670 if (!atomic_dec_and_test(&doi_def->refcount))
@@ -1642,7 +1642,7 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
1642 1642
1643 rcu_read_lock(); 1643 rcu_read_lock();
1644 doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); 1644 doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2]));
1645 if (doi_def == NULL) { 1645 if (!doi_def) {
1646 err_offset = 2; 1646 err_offset = 2;
1647 goto validate_return_locked; 1647 goto validate_return_locked;
1648 } 1648 }
@@ -1736,7 +1736,7 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
1736 * not the loopback device drop the packet. Further, 1736 * not the loopback device drop the packet. Further,
1737 * there is no legitimate reason for setting this from 1737 * there is no legitimate reason for setting this from
1738 * userspace so reject it if skb is NULL. */ 1738 * userspace so reject it if skb is NULL. */
1739 if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) { 1739 if (!skb || !(skb->dev->flags & IFF_LOOPBACK)) {
1740 err_offset = opt_iter; 1740 err_offset = opt_iter;
1741 goto validate_return_locked; 1741 goto validate_return_locked;
1742 } 1742 }
@@ -1897,7 +1897,7 @@ int cipso_v4_sock_setattr(struct sock *sk,
1897 * defined yet but it is not a problem as the only users of these 1897 * defined yet but it is not a problem as the only users of these
1898 * "lite" PF_INET sockets are functions which do an accept() call 1898 * "lite" PF_INET sockets are functions which do an accept() call
1899 * afterwards so we will label the socket as part of the accept(). */ 1899 * afterwards so we will label the socket as part of the accept(). */
1900 if (sk == NULL) 1900 if (!sk)
1901 return 0; 1901 return 0;
1902 1902
1903 /* We allocate the maximum CIPSO option size here so we are probably 1903 /* We allocate the maximum CIPSO option size here so we are probably
@@ -1905,7 +1905,7 @@ int cipso_v4_sock_setattr(struct sock *sk,
1905 * on and after all we are only talking about 40 bytes. */ 1905 * on and after all we are only talking about 40 bytes. */
1906 buf_len = CIPSO_V4_OPT_LEN_MAX; 1906 buf_len = CIPSO_V4_OPT_LEN_MAX;
1907 buf = kmalloc(buf_len, GFP_ATOMIC); 1907 buf = kmalloc(buf_len, GFP_ATOMIC);
1908 if (buf == NULL) { 1908 if (!buf) {
1909 ret_val = -ENOMEM; 1909 ret_val = -ENOMEM;
1910 goto socket_setattr_failure; 1910 goto socket_setattr_failure;
1911 } 1911 }
@@ -1921,7 +1921,7 @@ int cipso_v4_sock_setattr(struct sock *sk,
1921 * set the IPOPT_CIPSO option. */ 1921 * set the IPOPT_CIPSO option. */
1922 opt_len = (buf_len + 3) & ~3; 1922 opt_len = (buf_len + 3) & ~3;
1923 opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); 1923 opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
1924 if (opt == NULL) { 1924 if (!opt) {
1925 ret_val = -ENOMEM; 1925 ret_val = -ENOMEM;
1926 goto socket_setattr_failure; 1926 goto socket_setattr_failure;
1927 } 1927 }
@@ -1981,7 +1981,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
1981 * on and after all we are only talking about 40 bytes. */ 1981 * on and after all we are only talking about 40 bytes. */
1982 buf_len = CIPSO_V4_OPT_LEN_MAX; 1982 buf_len = CIPSO_V4_OPT_LEN_MAX;
1983 buf = kmalloc(buf_len, GFP_ATOMIC); 1983 buf = kmalloc(buf_len, GFP_ATOMIC);
1984 if (buf == NULL) { 1984 if (!buf) {
1985 ret_val = -ENOMEM; 1985 ret_val = -ENOMEM;
1986 goto req_setattr_failure; 1986 goto req_setattr_failure;
1987 } 1987 }
@@ -1997,7 +1997,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
1997 * set the IPOPT_CIPSO option. */ 1997 * set the IPOPT_CIPSO option. */
1998 opt_len = (buf_len + 3) & ~3; 1998 opt_len = (buf_len + 3) & ~3;
1999 opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); 1999 opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
2000 if (opt == NULL) { 2000 if (!opt) {
2001 ret_val = -ENOMEM; 2001 ret_val = -ENOMEM;
2002 goto req_setattr_failure; 2002 goto req_setattr_failure;
2003 } 2003 }
@@ -2102,7 +2102,7 @@ void cipso_v4_sock_delattr(struct sock *sk)
2102 2102
2103 sk_inet = inet_sk(sk); 2103 sk_inet = inet_sk(sk);
2104 opt = rcu_dereference_protected(sk_inet->inet_opt, 1); 2104 opt = rcu_dereference_protected(sk_inet->inet_opt, 1);
2105 if (opt == NULL || opt->opt.cipso == 0) 2105 if (!opt || opt->opt.cipso == 0)
2106 return; 2106 return;
2107 2107
2108 hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt); 2108 hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
@@ -2128,7 +2128,7 @@ void cipso_v4_req_delattr(struct request_sock *req)
2128 2128
2129 req_inet = inet_rsk(req); 2129 req_inet = inet_rsk(req);
2130 opt = req_inet->opt; 2130 opt = req_inet->opt;
2131 if (opt == NULL || opt->opt.cipso == 0) 2131 if (!opt || opt->opt.cipso == 0)
2132 return; 2132 return;
2133 2133
2134 cipso_v4_delopt(&req_inet->opt); 2134 cipso_v4_delopt(&req_inet->opt);
@@ -2157,7 +2157,7 @@ int cipso_v4_getattr(const unsigned char *cipso,
2157 doi = get_unaligned_be32(&cipso[2]); 2157 doi = get_unaligned_be32(&cipso[2]);
2158 rcu_read_lock(); 2158 rcu_read_lock();
2159 doi_def = cipso_v4_doi_search(doi); 2159 doi_def = cipso_v4_doi_search(doi);
2160 if (doi_def == NULL) 2160 if (!doi_def)
2161 goto getattr_return; 2161 goto getattr_return;
2162 /* XXX - This code assumes only one tag per CIPSO option which isn't 2162 /* XXX - This code assumes only one tag per CIPSO option which isn't
2163 * really a good assumption to make but since we only support the MAC 2163 * really a good assumption to make but since we only support the MAC
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index c6473f365ad1..419d23c53ec7 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -585,7 +585,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
585 585
586 ifm = nlmsg_data(nlh); 586 ifm = nlmsg_data(nlh);
587 in_dev = inetdev_by_index(net, ifm->ifa_index); 587 in_dev = inetdev_by_index(net, ifm->ifa_index);
588 if (in_dev == NULL) { 588 if (!in_dev) {
589 err = -ENODEV; 589 err = -ENODEV;
590 goto errout; 590 goto errout;
591 } 591 }
@@ -755,21 +755,21 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
755 755
756 ifm = nlmsg_data(nlh); 756 ifm = nlmsg_data(nlh);
757 err = -EINVAL; 757 err = -EINVAL;
758 if (ifm->ifa_prefixlen > 32 || tb[IFA_LOCAL] == NULL) 758 if (ifm->ifa_prefixlen > 32 || !tb[IFA_LOCAL])
759 goto errout; 759 goto errout;
760 760
761 dev = __dev_get_by_index(net, ifm->ifa_index); 761 dev = __dev_get_by_index(net, ifm->ifa_index);
762 err = -ENODEV; 762 err = -ENODEV;
763 if (dev == NULL) 763 if (!dev)
764 goto errout; 764 goto errout;
765 765
766 in_dev = __in_dev_get_rtnl(dev); 766 in_dev = __in_dev_get_rtnl(dev);
767 err = -ENOBUFS; 767 err = -ENOBUFS;
768 if (in_dev == NULL) 768 if (!in_dev)
769 goto errout; 769 goto errout;
770 770
771 ifa = inet_alloc_ifa(); 771 ifa = inet_alloc_ifa();
772 if (ifa == NULL) 772 if (!ifa)
773 /* 773 /*
774 * A potential indev allocation can be left alive, it stays 774 * A potential indev allocation can be left alive, it stays
775 * assigned to its device and is destroy with it. 775 * assigned to its device and is destroy with it.
@@ -780,7 +780,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
780 neigh_parms_data_state_setall(in_dev->arp_parms); 780 neigh_parms_data_state_setall(in_dev->arp_parms);
781 in_dev_hold(in_dev); 781 in_dev_hold(in_dev);
782 782
783 if (tb[IFA_ADDRESS] == NULL) 783 if (!tb[IFA_ADDRESS])
784 tb[IFA_ADDRESS] = tb[IFA_LOCAL]; 784 tb[IFA_ADDRESS] = tb[IFA_LOCAL];
785 785
786 INIT_HLIST_NODE(&ifa->hash); 786 INIT_HLIST_NODE(&ifa->hash);
@@ -1290,7 +1290,7 @@ __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
1290 __be32 addr = 0; 1290 __be32 addr = 0;
1291 struct net_device *dev; 1291 struct net_device *dev;
1292 1292
1293 if (in_dev != NULL) 1293 if (in_dev)
1294 return confirm_addr_indev(in_dev, dst, local, scope); 1294 return confirm_addr_indev(in_dev, dst, local, scope);
1295 1295
1296 rcu_read_lock(); 1296 rcu_read_lock();
@@ -1340,7 +1340,7 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
1340 if (named++ == 0) 1340 if (named++ == 0)
1341 goto skip; 1341 goto skip;
1342 dot = strchr(old, ':'); 1342 dot = strchr(old, ':');
1343 if (dot == NULL) { 1343 if (!dot) {
1344 sprintf(old, ":%d", named); 1344 sprintf(old, ":%d", named);
1345 dot = old; 1345 dot = old;
1346 } 1346 }
@@ -1509,7 +1509,7 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1509 u32 preferred, valid; 1509 u32 preferred, valid;
1510 1510
1511 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags); 1511 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
1512 if (nlh == NULL) 1512 if (!nlh)
1513 return -EMSGSIZE; 1513 return -EMSGSIZE;
1514 1514
1515 ifm = nlmsg_data(nlh); 1515 ifm = nlmsg_data(nlh);
@@ -1628,7 +1628,7 @@ static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
1628 1628
1629 net = dev_net(ifa->ifa_dev->dev); 1629 net = dev_net(ifa->ifa_dev->dev);
1630 skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL); 1630 skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
1631 if (skb == NULL) 1631 if (!skb)
1632 goto errout; 1632 goto errout;
1633 1633
1634 err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0); 1634 err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0);
@@ -1665,7 +1665,7 @@ static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
1665 return -ENODATA; 1665 return -ENODATA;
1666 1666
1667 nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4); 1667 nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
1668 if (nla == NULL) 1668 if (!nla)
1669 return -EMSGSIZE; 1669 return -EMSGSIZE;
1670 1670
1671 for (i = 0; i < IPV4_DEVCONF_MAX; i++) 1671 for (i = 0; i < IPV4_DEVCONF_MAX; i++)
@@ -1754,7 +1754,7 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
1754 1754
1755 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg), 1755 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
1756 flags); 1756 flags);
1757 if (nlh == NULL) 1757 if (!nlh)
1758 return -EMSGSIZE; 1758 return -EMSGSIZE;
1759 1759
1760 ncm = nlmsg_data(nlh); 1760 ncm = nlmsg_data(nlh);
@@ -1796,7 +1796,7 @@ void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
1796 int err = -ENOBUFS; 1796 int err = -ENOBUFS;
1797 1797
1798 skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_ATOMIC); 1798 skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_ATOMIC);
1799 if (skb == NULL) 1799 if (!skb)
1800 goto errout; 1800 goto errout;
1801 1801
1802 err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0, 1802 err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
@@ -1853,10 +1853,10 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
1853 break; 1853 break;
1854 default: 1854 default:
1855 dev = __dev_get_by_index(net, ifindex); 1855 dev = __dev_get_by_index(net, ifindex);
1856 if (dev == NULL) 1856 if (!dev)
1857 goto errout; 1857 goto errout;
1858 in_dev = __in_dev_get_rtnl(dev); 1858 in_dev = __in_dev_get_rtnl(dev);
1859 if (in_dev == NULL) 1859 if (!in_dev)
1860 goto errout; 1860 goto errout;
1861 devconf = &in_dev->cnf; 1861 devconf = &in_dev->cnf;
1862 break; 1862 break;
@@ -1864,7 +1864,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
1864 1864
1865 err = -ENOBUFS; 1865 err = -ENOBUFS;
1866 skb = nlmsg_new(inet_netconf_msgsize_devconf(-1), GFP_ATOMIC); 1866 skb = nlmsg_new(inet_netconf_msgsize_devconf(-1), GFP_ATOMIC);
1867 if (skb == NULL) 1867 if (!skb)
1868 goto errout; 1868 goto errout;
1869 1869
1870 err = inet_netconf_fill_devconf(skb, ifindex, devconf, 1870 err = inet_netconf_fill_devconf(skb, ifindex, devconf,
@@ -2215,7 +2215,7 @@ static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
2215{ 2215{
2216 struct devinet_sysctl_table *t = cnf->sysctl; 2216 struct devinet_sysctl_table *t = cnf->sysctl;
2217 2217
2218 if (t == NULL) 2218 if (!t)
2219 return; 2219 return;
2220 2220
2221 cnf->sysctl = NULL; 2221 cnf->sysctl = NULL;
@@ -2276,16 +2276,16 @@ static __net_init int devinet_init_net(struct net *net)
2276 2276
2277 if (!net_eq(net, &init_net)) { 2277 if (!net_eq(net, &init_net)) {
2278 all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL); 2278 all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL);
2279 if (all == NULL) 2279 if (!all)
2280 goto err_alloc_all; 2280 goto err_alloc_all;
2281 2281
2282 dflt = kmemdup(dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL); 2282 dflt = kmemdup(dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
2283 if (dflt == NULL) 2283 if (!dflt)
2284 goto err_alloc_dflt; 2284 goto err_alloc_dflt;
2285 2285
2286#ifdef CONFIG_SYSCTL 2286#ifdef CONFIG_SYSCTL
2287 tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL); 2287 tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
2288 if (tbl == NULL) 2288 if (!tbl)
2289 goto err_alloc_ctl; 2289 goto err_alloc_ctl;
2290 2290
2291 tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1]; 2291 tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
@@ -2305,7 +2305,7 @@ static __net_init int devinet_init_net(struct net *net)
2305 2305
2306 err = -ENOMEM; 2306 err = -ENOMEM;
2307 forw_hdr = register_net_sysctl(net, "net/ipv4", tbl); 2307 forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
2308 if (forw_hdr == NULL) 2308 if (!forw_hdr)
2309 goto err_reg_ctl; 2309 goto err_reg_ctl;
2310 net->ipv4.forw_hdr = forw_hdr; 2310 net->ipv4.forw_hdr = forw_hdr;
2311#endif 2311#endif
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 60173d4d3a0e..421a80b09b62 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -553,7 +553,7 @@ static int esp_init_authenc(struct xfrm_state *x)
553 int err; 553 int err;
554 554
555 err = -EINVAL; 555 err = -EINVAL;
556 if (x->ealg == NULL) 556 if (!x->ealg)
557 goto error; 557 goto error;
558 558
559 err = -ENAMETOOLONG; 559 err = -ENAMETOOLONG;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 718b0a16ea40..872494e6e6eb 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -53,11 +53,11 @@ static int __net_init fib4_rules_init(struct net *net)
53 struct fib_table *local_table, *main_table; 53 struct fib_table *local_table, *main_table;
54 54
55 main_table = fib_trie_table(RT_TABLE_MAIN, NULL); 55 main_table = fib_trie_table(RT_TABLE_MAIN, NULL);
56 if (main_table == NULL) 56 if (!main_table)
57 return -ENOMEM; 57 return -ENOMEM;
58 58
59 local_table = fib_trie_table(RT_TABLE_LOCAL, main_table); 59 local_table = fib_trie_table(RT_TABLE_LOCAL, main_table);
60 if (local_table == NULL) 60 if (!local_table)
61 goto fail; 61 goto fail;
62 62
63 hlist_add_head_rcu(&local_table->tb_hlist, 63 hlist_add_head_rcu(&local_table->tb_hlist,
@@ -486,7 +486,7 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
486 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) 486 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next)
487 if (strcmp(ifa->ifa_label, devname) == 0) 487 if (strcmp(ifa->ifa_label, devname) == 0)
488 break; 488 break;
489 if (ifa == NULL) 489 if (!ifa)
490 return -ENODEV; 490 return -ENODEV;
491 cfg->fc_prefsrc = ifa->ifa_local; 491 cfg->fc_prefsrc = ifa->ifa_local;
492 } 492 }
@@ -514,7 +514,7 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
514 int len = 0; 514 int len = 0;
515 515
516 mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL); 516 mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL);
517 if (mx == NULL) 517 if (!mx)
518 return -ENOMEM; 518 return -ENOMEM;
519 519
520 if (rt->rt_flags & RTF_MTU) 520 if (rt->rt_flags & RTF_MTU)
@@ -676,7 +676,7 @@ static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
676 goto errout; 676 goto errout;
677 677
678 tb = fib_get_table(net, cfg.fc_table); 678 tb = fib_get_table(net, cfg.fc_table);
679 if (tb == NULL) { 679 if (!tb) {
680 err = -ESRCH; 680 err = -ESRCH;
681 goto errout; 681 goto errout;
682 } 682 }
@@ -698,7 +698,7 @@ static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
698 goto errout; 698 goto errout;
699 699
700 tb = fib_new_table(net, cfg.fc_table); 700 tb = fib_new_table(net, cfg.fc_table);
701 if (tb == NULL) { 701 if (!tb) {
702 err = -ENOBUFS; 702 err = -ENOBUFS;
703 goto errout; 703 goto errout;
704 } 704 }
@@ -779,7 +779,7 @@ static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifad
779 else 779 else
780 tb = fib_new_table(net, RT_TABLE_LOCAL); 780 tb = fib_new_table(net, RT_TABLE_LOCAL);
781 781
782 if (tb == NULL) 782 if (!tb)
783 return; 783 return;
784 784
785 cfg.fc_table = tb->tb_id; 785 cfg.fc_table = tb->tb_id;
@@ -806,7 +806,7 @@ void fib_add_ifaddr(struct in_ifaddr *ifa)
806 806
807 if (ifa->ifa_flags & IFA_F_SECONDARY) { 807 if (ifa->ifa_flags & IFA_F_SECONDARY) {
808 prim = inet_ifa_byprefix(in_dev, prefix, mask); 808 prim = inet_ifa_byprefix(in_dev, prefix, mask);
809 if (prim == NULL) { 809 if (!prim) {
810 pr_warn("%s: bug: prim == NULL\n", __func__); 810 pr_warn("%s: bug: prim == NULL\n", __func__);
811 return; 811 return;
812 } 812 }
@@ -860,7 +860,7 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
860 860
861 if (ifa->ifa_flags & IFA_F_SECONDARY) { 861 if (ifa->ifa_flags & IFA_F_SECONDARY) {
862 prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask); 862 prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
863 if (prim == NULL) { 863 if (!prim) {
864 pr_warn("%s: bug: prim == NULL\n", __func__); 864 pr_warn("%s: bug: prim == NULL\n", __func__);
865 return; 865 return;
866 } 866 }
@@ -1030,7 +1030,7 @@ static void nl_fib_input(struct sk_buff *skb)
1030 return; 1030 return;
1031 1031
1032 skb = netlink_skb_clone(skb, GFP_KERNEL); 1032 skb = netlink_skb_clone(skb, GFP_KERNEL);
1033 if (skb == NULL) 1033 if (!skb)
1034 return; 1034 return;
1035 nlh = nlmsg_hdr(skb); 1035 nlh = nlmsg_hdr(skb);
1036 1036
@@ -1051,7 +1051,7 @@ static int __net_init nl_fib_lookup_init(struct net *net)
1051 }; 1051 };
1052 1052
1053 sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, &cfg); 1053 sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, &cfg);
1054 if (sk == NULL) 1054 if (!sk)
1055 return -EAFNOSUPPORT; 1055 return -EAFNOSUPPORT;
1056 net->ipv4.fibnl = sk; 1056 net->ipv4.fibnl = sk;
1057 return 0; 1057 return 0;
@@ -1089,7 +1089,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
1089 case NETDEV_DOWN: 1089 case NETDEV_DOWN:
1090 fib_del_ifaddr(ifa, NULL); 1090 fib_del_ifaddr(ifa, NULL);
1091 atomic_inc(&net->ipv4.dev_addr_genid); 1091 atomic_inc(&net->ipv4.dev_addr_genid);
1092 if (ifa->ifa_dev->ifa_list == NULL) { 1092 if (!ifa->ifa_dev->ifa_list) {
1093 /* Last address was deleted from this interface. 1093 /* Last address was deleted from this interface.
1094 * Disable IP. 1094 * Disable IP.
1095 */ 1095 */
@@ -1157,7 +1157,7 @@ static int __net_init ip_fib_net_init(struct net *net)
1157 size = max_t(size_t, size, L1_CACHE_BYTES); 1157 size = max_t(size_t, size, L1_CACHE_BYTES);
1158 1158
1159 net->ipv4.fib_table_hash = kzalloc(size, GFP_KERNEL); 1159 net->ipv4.fib_table_hash = kzalloc(size, GFP_KERNEL);
1160 if (net->ipv4.fib_table_hash == NULL) 1160 if (!net->ipv4.fib_table_hash)
1161 return -ENOMEM; 1161 return -ENOMEM;
1162 1162
1163 err = fib4_rules_init(net); 1163 err = fib4_rules_init(net);
@@ -1175,13 +1175,11 @@ static void ip_fib_net_exit(struct net *net)
1175 unsigned int i; 1175 unsigned int i;
1176 1176
1177 rtnl_lock(); 1177 rtnl_lock();
1178
1179#ifdef CONFIG_IP_MULTIPLE_TABLES 1178#ifdef CONFIG_IP_MULTIPLE_TABLES
1180 RCU_INIT_POINTER(net->ipv4.fib_local, NULL); 1179 RCU_INIT_POINTER(net->ipv4.fib_local, NULL);
1181 RCU_INIT_POINTER(net->ipv4.fib_main, NULL); 1180 RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
1182 RCU_INIT_POINTER(net->ipv4.fib_default, NULL); 1181 RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
1183#endif 1182#endif
1184
1185 for (i = 0; i < FIB_TABLE_HASHSZ; i++) { 1183 for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
1186 struct hlist_head *head = &net->ipv4.fib_table_hash[i]; 1184 struct hlist_head *head = &net->ipv4.fib_table_hash[i];
1187 struct hlist_node *tmp; 1185 struct hlist_node *tmp;
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 8162dd8e86d7..56151982f74e 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -153,7 +153,7 @@ static struct fib_table *fib_empty_table(struct net *net)
153 u32 id; 153 u32 id;
154 154
155 for (id = 1; id <= RT_TABLE_MAX; id++) 155 for (id = 1; id <= RT_TABLE_MAX; id++)
156 if (fib_get_table(net, id) == NULL) 156 if (!fib_get_table(net, id))
157 return fib_new_table(net, id); 157 return fib_new_table(net, id);
158 return NULL; 158 return NULL;
159} 159}
@@ -184,7 +184,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
184 struct fib_table *table; 184 struct fib_table *table;
185 185
186 table = fib_empty_table(net); 186 table = fib_empty_table(net);
187 if (table == NULL) { 187 if (!table) {
188 err = -ENOBUFS; 188 err = -ENOBUFS;
189 goto errout; 189 goto errout;
190 } 190 }
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index eac5aec7772a..8d695b6659c7 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -390,7 +390,7 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
390 int err = -ENOBUFS; 390 int err = -ENOBUFS;
391 391
392 skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL); 392 skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL);
393 if (skb == NULL) 393 if (!skb)
394 goto errout; 394 goto errout;
395 395
396 err = fib_dump_info(skb, info->portid, seq, event, tb_id, 396 err = fib_dump_info(skb, info->portid, seq, event, tb_id,
@@ -503,7 +503,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
503 } 503 }
504 504
505#ifdef CONFIG_IP_ROUTE_MULTIPATH 505#ifdef CONFIG_IP_ROUTE_MULTIPATH
506 if (cfg->fc_mp == NULL) 506 if (!cfg->fc_mp)
507 return 0; 507 return 0;
508 508
509 rtnh = cfg->fc_mp; 509 rtnh = cfg->fc_mp;
@@ -646,7 +646,7 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
646 rcu_read_lock(); 646 rcu_read_lock();
647 err = -ENODEV; 647 err = -ENODEV;
648 in_dev = inetdev_by_index(net, nh->nh_oif); 648 in_dev = inetdev_by_index(net, nh->nh_oif);
649 if (in_dev == NULL) 649 if (!in_dev)
650 goto out; 650 goto out;
651 err = -ENETDOWN; 651 err = -ENETDOWN;
652 if (!(in_dev->dev->flags & IFF_UP)) 652 if (!(in_dev->dev->flags & IFF_UP))
@@ -803,7 +803,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
803 } 803 }
804 804
805 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); 805 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
806 if (fi == NULL) 806 if (!fi)
807 goto failure; 807 goto failure;
808 fib_info_cnt++; 808 fib_info_cnt++;
809 if (cfg->fc_mx) { 809 if (cfg->fc_mx) {
@@ -921,7 +921,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
921 nh->nh_scope = RT_SCOPE_NOWHERE; 921 nh->nh_scope = RT_SCOPE_NOWHERE;
922 nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif); 922 nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif);
923 err = -ENODEV; 923 err = -ENODEV;
924 if (nh->nh_dev == NULL) 924 if (!nh->nh_dev)
925 goto failure; 925 goto failure;
926 } else { 926 } else {
927 change_nexthops(fi) { 927 change_nexthops(fi) {
@@ -995,7 +995,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
995 struct rtmsg *rtm; 995 struct rtmsg *rtm;
996 996
997 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags); 997 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
998 if (nlh == NULL) 998 if (!nlh)
999 return -EMSGSIZE; 999 return -EMSGSIZE;
1000 1000
1001 rtm = nlmsg_data(nlh); 1001 rtm = nlmsg_data(nlh);
@@ -1045,12 +1045,12 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
1045 struct nlattr *mp; 1045 struct nlattr *mp;
1046 1046
1047 mp = nla_nest_start(skb, RTA_MULTIPATH); 1047 mp = nla_nest_start(skb, RTA_MULTIPATH);
1048 if (mp == NULL) 1048 if (!mp)
1049 goto nla_put_failure; 1049 goto nla_put_failure;
1050 1050
1051 for_nexthops(fi) { 1051 for_nexthops(fi) {
1052 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh)); 1052 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
1053 if (rtnh == NULL) 1053 if (!rtnh)
1054 goto nla_put_failure; 1054 goto nla_put_failure;
1055 1055
1056 rtnh->rtnh_flags = nh->nh_flags & 0xFF; 1056 rtnh->rtnh_flags = nh->nh_flags & 0xFF;
@@ -1093,7 +1093,7 @@ int fib_sync_down_addr(struct net *net, __be32 local)
1093 struct hlist_head *head = &fib_info_laddrhash[hash]; 1093 struct hlist_head *head = &fib_info_laddrhash[hash];
1094 struct fib_info *fi; 1094 struct fib_info *fi;
1095 1095
1096 if (fib_info_laddrhash == NULL || local == 0) 1096 if (!fib_info_laddrhash || local == 0)
1097 return 0; 1097 return 0;
1098 1098
1099 hlist_for_each_entry(fi, head, fib_lhash) { 1099 hlist_for_each_entry(fi, head, fib_lhash) {
@@ -1182,7 +1182,7 @@ void fib_select_default(struct fib_result *res)
1182 1182
1183 fib_alias_accessed(fa); 1183 fib_alias_accessed(fa);
1184 1184
1185 if (fi == NULL) { 1185 if (!fi) {
1186 if (next_fi != res->fi) 1186 if (next_fi != res->fi)
1187 break; 1187 break;
1188 } else if (!fib_detect_death(fi, order, &last_resort, 1188 } else if (!fib_detect_death(fi, order, &last_resort,
@@ -1195,7 +1195,7 @@ void fib_select_default(struct fib_result *res)
1195 order++; 1195 order++;
1196 } 1196 }
1197 1197
1198 if (order <= 0 || fi == NULL) { 1198 if (order <= 0 || !fi) {
1199 tb->tb_default = -1; 1199 tb->tb_default = -1;
1200 goto out; 1200 goto out;
1201 } 1201 }
@@ -1251,7 +1251,7 @@ int fib_sync_up(struct net_device *dev)
1251 alive++; 1251 alive++;
1252 continue; 1252 continue;
1253 } 1253 }
1254 if (nexthop_nh->nh_dev == NULL || 1254 if (!nexthop_nh->nh_dev ||
1255 !(nexthop_nh->nh_dev->flags & IFF_UP)) 1255 !(nexthop_nh->nh_dev->flags & IFF_UP))
1256 continue; 1256 continue;
1257 if (nexthop_nh->nh_dev != dev || 1257 if (nexthop_nh->nh_dev != dev ||
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 2c7c299ee2b9..e13fcc602da2 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -391,9 +391,9 @@ static void put_child(struct key_vector *tn, unsigned long i,
391 BUG_ON(i >= child_length(tn)); 391 BUG_ON(i >= child_length(tn));
392 392
393 /* update emptyChildren, overflow into fullChildren */ 393 /* update emptyChildren, overflow into fullChildren */
394 if (n == NULL && chi != NULL) 394 if (!n && chi)
395 empty_child_inc(tn); 395 empty_child_inc(tn);
396 if (n != NULL && chi == NULL) 396 if (n && !chi)
397 empty_child_dec(tn); 397 empty_child_dec(tn);
398 398
399 /* update fullChildren */ 399 /* update fullChildren */
@@ -528,7 +528,7 @@ static struct key_vector *inflate(struct trie *t,
528 unsigned long j, k; 528 unsigned long j, k;
529 529
530 /* An empty child */ 530 /* An empty child */
531 if (inode == NULL) 531 if (!inode)
532 continue; 532 continue;
533 533
534 /* A leaf or an internal node with skipped bits */ 534 /* A leaf or an internal node with skipped bits */
@@ -1154,7 +1154,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
1154 } 1154 }
1155 err = -ENOBUFS; 1155 err = -ENOBUFS;
1156 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); 1156 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1157 if (new_fa == NULL) 1157 if (!new_fa)
1158 goto out; 1158 goto out;
1159 1159
1160 fi_drop = fa->fa_info; 1160 fi_drop = fa->fa_info;
@@ -1204,7 +1204,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
1204 1204
1205 err = -ENOBUFS; 1205 err = -ENOBUFS;
1206 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); 1206 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1207 if (new_fa == NULL) 1207 if (!new_fa)
1208 goto out; 1208 goto out;
1209 1209
1210 new_fa->fa_info = fi; 1210 new_fa->fa_info = fi;
@@ -1975,7 +1975,7 @@ struct fib_table *fib_trie_table(u32 id, struct fib_table *alias)
1975 sz += sizeof(struct trie); 1975 sz += sizeof(struct trie);
1976 1976
1977 tb = kzalloc(sz, GFP_KERNEL); 1977 tb = kzalloc(sz, GFP_KERNEL);
1978 if (tb == NULL) 1978 if (!tb)
1979 return NULL; 1979 return NULL;
1980 1980
1981 tb->tb_id = id; 1981 tb->tb_id = id;
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
index 5a4828ba05ad..b77f5e84c623 100644
--- a/net/ipv4/geneve.c
+++ b/net/ipv4/geneve.c
@@ -136,7 +136,7 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
136 136
137 skb_set_inner_protocol(skb, htons(ETH_P_TEB)); 137 skb_set_inner_protocol(skb, htons(ETH_P_TEB));
138 138
139 return udp_tunnel_xmit_skb(rt, skb, src, dst, 139 return udp_tunnel_xmit_skb(rt, gs->sock->sk, skb, src, dst,
140 tos, ttl, df, src_port, dst_port, xnet, 140 tos, ttl, df, src_port, dst_port, xnet,
141 !csum); 141 !csum);
142} 142}
@@ -196,7 +196,7 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
196 196
197 rcu_read_lock(); 197 rcu_read_lock();
198 ptype = gro_find_receive_by_type(type); 198 ptype = gro_find_receive_by_type(type);
199 if (ptype == NULL) { 199 if (!ptype) {
200 flush = 1; 200 flush = 1;
201 goto out_unlock; 201 goto out_unlock;
202 } 202 }
@@ -230,7 +230,7 @@ static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
230 230
231 rcu_read_lock(); 231 rcu_read_lock();
232 ptype = gro_find_complete_by_type(type); 232 ptype = gro_find_complete_by_type(type);
233 if (ptype != NULL) 233 if (ptype)
234 err = ptype->callbacks.gro_complete(skb, nhoff + gh_len); 234 err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
235 235
236 rcu_read_unlock(); 236 rcu_read_unlock();
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 51973ddc05a6..5aa46d4b44ef 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -149,7 +149,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
149 149
150 rcu_read_lock(); 150 rcu_read_lock();
151 ptype = gro_find_receive_by_type(type); 151 ptype = gro_find_receive_by_type(type);
152 if (ptype == NULL) 152 if (!ptype)
153 goto out_unlock; 153 goto out_unlock;
154 154
155 grehlen = GRE_HEADER_SECTION; 155 grehlen = GRE_HEADER_SECTION;
@@ -243,7 +243,7 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
243 243
244 rcu_read_lock(); 244 rcu_read_lock();
245 ptype = gro_find_complete_by_type(type); 245 ptype = gro_find_complete_by_type(type);
246 if (ptype != NULL) 246 if (ptype)
247 err = ptype->callbacks.gro_complete(skb, nhoff + grehlen); 247 err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
248 248
249 rcu_read_unlock(); 249 rcu_read_unlock();
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 5e564014a0b7..f5203fba6236 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -399,7 +399,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
399 return; 399 return;
400 400
401 sk = icmp_xmit_lock(net); 401 sk = icmp_xmit_lock(net);
402 if (sk == NULL) 402 if (!sk)
403 return; 403 return;
404 inet = inet_sk(sk); 404 inet = inet_sk(sk);
405 405
@@ -609,7 +609,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
609 skb_in->data, 609 skb_in->data,
610 sizeof(_inner_type), 610 sizeof(_inner_type),
611 &_inner_type); 611 &_inner_type);
612 if (itp == NULL) 612 if (!itp)
613 goto out; 613 goto out;
614 614
615 /* 615 /*
@@ -627,7 +627,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
627 return; 627 return;
628 628
629 sk = icmp_xmit_lock(net); 629 sk = icmp_xmit_lock(net);
630 if (sk == NULL) 630 if (!sk)
631 goto out_free; 631 goto out_free;
632 632
633 /* 633 /*
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index ad09213ac5b2..a3a697f5ffba 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -692,7 +692,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
692 hlen = LL_RESERVED_SPACE(dev); 692 hlen = LL_RESERVED_SPACE(dev);
693 tlen = dev->needed_tailroom; 693 tlen = dev->needed_tailroom;
694 skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC); 694 skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC);
695 if (skb == NULL) { 695 if (!skb) {
696 ip_rt_put(rt); 696 ip_rt_put(rt);
697 return -1; 697 return -1;
698 } 698 }
@@ -981,7 +981,7 @@ int igmp_rcv(struct sk_buff *skb)
981 int len = skb->len; 981 int len = skb->len;
982 bool dropped = true; 982 bool dropped = true;
983 983
984 if (in_dev == NULL) 984 if (!in_dev)
985 goto drop; 985 goto drop;
986 986
987 if (!pskb_may_pull(skb, sizeof(struct igmphdr))) 987 if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
@@ -1888,7 +1888,7 @@ int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
1888 if (count >= sysctl_igmp_max_memberships) 1888 if (count >= sysctl_igmp_max_memberships)
1889 goto done; 1889 goto done;
1890 iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL); 1890 iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL);
1891 if (iml == NULL) 1891 if (!iml)
1892 goto done; 1892 goto done;
1893 1893
1894 memcpy(&iml->multi, imr, sizeof(*imr)); 1894 memcpy(&iml->multi, imr, sizeof(*imr));
@@ -1909,7 +1909,7 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
1909 struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist); 1909 struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist);
1910 int err; 1910 int err;
1911 1911
1912 if (psf == NULL) { 1912 if (!psf) {
1913 /* any-source empty exclude case */ 1913 /* any-source empty exclude case */
1914 return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, 1914 return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
1915 iml->sfmode, 0, NULL, 0); 1915 iml->sfmode, 0, NULL, 0);
@@ -2360,7 +2360,7 @@ void ip_mc_drop_socket(struct sock *sk)
2360 struct ip_mc_socklist *iml; 2360 struct ip_mc_socklist *iml;
2361 struct net *net = sock_net(sk); 2361 struct net *net = sock_net(sk);
2362 2362
2363 if (inet->mc_list == NULL) 2363 if (!inet->mc_list)
2364 return; 2364 return;
2365 2365
2366 rtnl_lock(); 2366 rtnl_lock();
@@ -2370,7 +2370,7 @@ void ip_mc_drop_socket(struct sock *sk)
2370 inet->mc_list = iml->next_rcu; 2370 inet->mc_list = iml->next_rcu;
2371 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); 2371 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
2372 (void) ip_mc_leave_src(sk, iml, in_dev); 2372 (void) ip_mc_leave_src(sk, iml, in_dev);
2373 if (in_dev != NULL) 2373 if (in_dev)
2374 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); 2374 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
2375 /* decrease mem now to avoid the memleak warning */ 2375 /* decrease mem now to avoid the memleak warning */
2376 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); 2376 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
@@ -2587,13 +2587,13 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
2587 for_each_netdev_rcu(net, state->dev) { 2587 for_each_netdev_rcu(net, state->dev) {
2588 struct in_device *idev; 2588 struct in_device *idev;
2589 idev = __in_dev_get_rcu(state->dev); 2589 idev = __in_dev_get_rcu(state->dev);
2590 if (unlikely(idev == NULL)) 2590 if (unlikely(!idev))
2591 continue; 2591 continue;
2592 im = rcu_dereference(idev->mc_list); 2592 im = rcu_dereference(idev->mc_list);
2593 if (likely(im != NULL)) { 2593 if (likely(im)) {
2594 spin_lock_bh(&im->lock); 2594 spin_lock_bh(&im->lock);
2595 psf = im->sources; 2595 psf = im->sources;
2596 if (likely(psf != NULL)) { 2596 if (likely(psf)) {
2597 state->im = im; 2597 state->im = im;
2598 state->idev = idev; 2598 state->idev = idev;
2599 break; 2599 break;
@@ -2663,7 +2663,7 @@ static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
2663 __releases(rcu) 2663 __releases(rcu)
2664{ 2664{
2665 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); 2665 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2666 if (likely(state->im != NULL)) { 2666 if (likely(state->im)) {
2667 spin_unlock_bh(&state->im->lock); 2667 spin_unlock_bh(&state->im->lock);
2668 state->im = NULL; 2668 state->im = NULL;
2669 } 2669 }
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 79c0c9439fdc..5c3dd6267ed3 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -673,7 +673,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
673{ 673{
674 struct sock *newsk = sk_clone_lock(sk, priority); 674 struct sock *newsk = sk_clone_lock(sk, priority);
675 675
676 if (newsk != NULL) { 676 if (newsk) {
677 struct inet_connection_sock *newicsk = inet_csk(newsk); 677 struct inet_connection_sock *newicsk = inet_csk(newsk);
678 678
679 newsk->sk_state = TCP_SYN_RECV; 679 newsk->sk_state = TCP_SYN_RECV;
@@ -843,7 +843,7 @@ void inet_csk_listen_stop(struct sock *sk)
843 sk_acceptq_removed(sk); 843 sk_acceptq_removed(sk);
844 reqsk_put(req); 844 reqsk_put(req);
845 } 845 }
846 if (queue->fastopenq != NULL) { 846 if (queue->fastopenq) {
847 /* Free all the reqs queued in rskq_rst_head. */ 847 /* Free all the reqs queued in rskq_rst_head. */
848 spin_lock_bh(&queue->fastopenq->lock); 848 spin_lock_bh(&queue->fastopenq->lock);
849 acc_req = queue->fastopenq->rskq_rst_head; 849 acc_req = queue->fastopenq->rskq_rst_head;
@@ -875,7 +875,7 @@ int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
875{ 875{
876 const struct inet_connection_sock *icsk = inet_csk(sk); 876 const struct inet_connection_sock *icsk = inet_csk(sk);
877 877
878 if (icsk->icsk_af_ops->compat_getsockopt != NULL) 878 if (icsk->icsk_af_ops->compat_getsockopt)
879 return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname, 879 return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
880 optval, optlen); 880 optval, optlen);
881 return icsk->icsk_af_ops->getsockopt(sk, level, optname, 881 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
@@ -888,7 +888,7 @@ int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
888{ 888{
889 const struct inet_connection_sock *icsk = inet_csk(sk); 889 const struct inet_connection_sock *icsk = inet_csk(sk);
890 890
891 if (icsk->icsk_af_ops->compat_setsockopt != NULL) 891 if (icsk->icsk_af_ops->compat_setsockopt)
892 return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname, 892 return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
893 optval, optlen); 893 optval, optlen);
894 return icsk->icsk_af_ops->setsockopt(sk, level, optname, 894 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index e7920352646a..5e346a082e5f 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -385,7 +385,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
385 } 385 }
386 386
387 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC); 387 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
388 if (q == NULL) 388 if (!q)
389 return NULL; 389 return NULL;
390 390
391 q->net = nf; 391 q->net = nf;
@@ -406,7 +406,7 @@ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
406 struct inet_frag_queue *q; 406 struct inet_frag_queue *q;
407 407
408 q = inet_frag_alloc(nf, f, arg); 408 q = inet_frag_alloc(nf, f, arg);
409 if (q == NULL) 409 if (!q)
410 return NULL; 410 return NULL;
411 411
412 return inet_frag_intern(nf, q, f, arg); 412 return inet_frag_intern(nf, q, f, arg);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 0fb841b9d834..d4630bf2d9aa 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -64,7 +64,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
64{ 64{
65 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); 65 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
66 66
67 if (tb != NULL) { 67 if (tb) {
68 write_pnet(&tb->ib_net, net); 68 write_pnet(&tb->ib_net, net);
69 tb->port = snum; 69 tb->port = snum;
70 tb->fastreuse = 0; 70 tb->fastreuse = 0;
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index f38e387448fb..118f0f195820 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -173,7 +173,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
173 struct inet_timewait_sock *tw = 173 struct inet_timewait_sock *tw =
174 kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab, 174 kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
175 GFP_ATOMIC); 175 GFP_ATOMIC);
176 if (tw != NULL) { 176 if (tw) {
177 const struct inet_sock *inet = inet_sk(sk); 177 const struct inet_sock *inet = inet_sk(sk);
178 178
179 kmemcheck_annotate_bitfield(tw, flags); 179 kmemcheck_annotate_bitfield(tw, flags);
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index d9bc28ac5d1b..939992c456f3 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -57,7 +57,7 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
57} 57}
58 58
59 59
60static int ip_forward_finish(struct sk_buff *skb) 60static int ip_forward_finish(struct sock *sk, struct sk_buff *skb)
61{ 61{
62 struct ip_options *opt = &(IPCB(skb)->opt); 62 struct ip_options *opt = &(IPCB(skb)->opt);
63 63
@@ -68,7 +68,7 @@ static int ip_forward_finish(struct sk_buff *skb)
68 ip_forward_options(skb); 68 ip_forward_options(skb);
69 69
70 skb_sender_cpu_clear(skb); 70 skb_sender_cpu_clear(skb);
71 return dst_output(skb); 71 return dst_output_sk(sk, skb);
72} 72}
73 73
74int ip_forward(struct sk_buff *skb) 74int ip_forward(struct sk_buff *skb)
@@ -136,8 +136,8 @@ int ip_forward(struct sk_buff *skb)
136 136
137 skb->priority = rt_tos2priority(iph->tos); 137 skb->priority = rt_tos2priority(iph->tos);
138 138
139 return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, 139 return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, NULL, skb,
140 rt->dst.dev, ip_forward_finish); 140 skb->dev, rt->dst.dev, ip_forward_finish);
141 141
142sr_failed: 142sr_failed:
143 /* 143 /*
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 145a50c4d566..cc1da6d9cb35 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -372,7 +372,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
372 goto err; 372 goto err;
373 373
374 err = -ENOMEM; 374 err = -ENOMEM;
375 if (pskb_pull(skb, ihl) == NULL) 375 if (!pskb_pull(skb, ihl))
376 goto err; 376 goto err;
377 377
378 err = pskb_trim_rcsum(skb, end - offset); 378 err = pskb_trim_rcsum(skb, end - offset);
@@ -537,7 +537,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
537 qp->q.fragments = head; 537 qp->q.fragments = head;
538 } 538 }
539 539
540 WARN_ON(head == NULL); 540 WARN_ON(!head);
541 WARN_ON(FRAG_CB(head)->offset != 0); 541 WARN_ON(FRAG_CB(head)->offset != 0);
542 542
543 /* Allocate a new buffer for the datagram. */ 543 /* Allocate a new buffer for the datagram. */
@@ -559,7 +559,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
559 struct sk_buff *clone; 559 struct sk_buff *clone;
560 int i, plen = 0; 560 int i, plen = 0;
561 561
562 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) 562 clone = alloc_skb(0, GFP_ATOMIC);
563 if (!clone)
563 goto out_nomem; 564 goto out_nomem;
564 clone->next = head->next; 565 clone->next = head->next;
565 head->next = clone; 566 head->next = clone;
@@ -638,7 +639,8 @@ int ip_defrag(struct sk_buff *skb, u32 user)
638 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); 639 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
639 640
640 /* Lookup (or create) queue header */ 641 /* Lookup (or create) queue header */
641 if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) { 642 qp = ip_find(net, ip_hdr(skb), user);
643 if (qp) {
642 int ret; 644 int ret;
643 645
644 spin_lock(&qp->q.lock); 646 spin_lock(&qp->q.lock);
@@ -754,7 +756,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
754 table = ip4_frags_ns_ctl_table; 756 table = ip4_frags_ns_ctl_table;
755 if (!net_eq(net, &init_net)) { 757 if (!net_eq(net, &init_net)) {
756 table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); 758 table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
757 if (table == NULL) 759 if (!table)
758 goto err_alloc; 760 goto err_alloc;
759 761
760 table[0].data = &net->ipv4.frags.high_thresh; 762 table[0].data = &net->ipv4.frags.high_thresh;
@@ -770,7 +772,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
770 } 772 }
771 773
772 hdr = register_net_sysctl(net, "net/ipv4", table); 774 hdr = register_net_sysctl(net, "net/ipv4", table);
773 if (hdr == NULL) 775 if (!hdr)
774 goto err_reg; 776 goto err_reg;
775 777
776 net->ipv4.frags_hdr = hdr; 778 net->ipv4.frags_hdr = hdr;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 0eb2a040a830..5fd706473c73 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -182,7 +182,7 @@ static int ipgre_err(struct sk_buff *skb, u32 info,
182 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags, 182 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
183 iph->daddr, iph->saddr, tpi->key); 183 iph->daddr, iph->saddr, tpi->key);
184 184
185 if (t == NULL) 185 if (!t)
186 return PACKET_REJECT; 186 return PACKET_REJECT;
187 187
188 if (t->parms.iph.daddr == 0 || 188 if (t->parms.iph.daddr == 0 ||
@@ -423,7 +423,7 @@ static int ipgre_open(struct net_device *dev)
423 return -EADDRNOTAVAIL; 423 return -EADDRNOTAVAIL;
424 dev = rt->dst.dev; 424 dev = rt->dst.dev;
425 ip_rt_put(rt); 425 ip_rt_put(rt);
426 if (__in_dev_get_rtnl(dev) == NULL) 426 if (!__in_dev_get_rtnl(dev))
427 return -EADDRNOTAVAIL; 427 return -EADDRNOTAVAIL;
428 t->mlink = dev->ifindex; 428 t->mlink = dev->ifindex;
429 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr); 429 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
@@ -456,6 +456,7 @@ static const struct net_device_ops ipgre_netdev_ops = {
456 .ndo_do_ioctl = ipgre_tunnel_ioctl, 456 .ndo_do_ioctl = ipgre_tunnel_ioctl,
457 .ndo_change_mtu = ip_tunnel_change_mtu, 457 .ndo_change_mtu = ip_tunnel_change_mtu,
458 .ndo_get_stats64 = ip_tunnel_get_stats64, 458 .ndo_get_stats64 = ip_tunnel_get_stats64,
459 .ndo_get_iflink = ip_tunnel_get_iflink,
459}; 460};
460 461
461#define GRE_FEATURES (NETIF_F_SG | \ 462#define GRE_FEATURES (NETIF_F_SG | \
@@ -686,6 +687,7 @@ static const struct net_device_ops gre_tap_netdev_ops = {
686 .ndo_validate_addr = eth_validate_addr, 687 .ndo_validate_addr = eth_validate_addr,
687 .ndo_change_mtu = ip_tunnel_change_mtu, 688 .ndo_change_mtu = ip_tunnel_change_mtu,
688 .ndo_get_stats64 = ip_tunnel_get_stats64, 689 .ndo_get_stats64 = ip_tunnel_get_stats64,
690 .ndo_get_iflink = ip_tunnel_get_iflink,
689}; 691};
690 692
691static void ipgre_tap_setup(struct net_device *dev) 693static void ipgre_tap_setup(struct net_device *dev)
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 3d4da2c16b6a..2db4c8773c1b 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -187,7 +187,7 @@ bool ip_call_ra_chain(struct sk_buff *skb)
187 return false; 187 return false;
188} 188}
189 189
190static int ip_local_deliver_finish(struct sk_buff *skb) 190static int ip_local_deliver_finish(struct sock *sk, struct sk_buff *skb)
191{ 191{
192 struct net *net = dev_net(skb->dev); 192 struct net *net = dev_net(skb->dev);
193 193
@@ -203,7 +203,7 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
203 raw = raw_local_deliver(skb, protocol); 203 raw = raw_local_deliver(skb, protocol);
204 204
205 ipprot = rcu_dereference(inet_protos[protocol]); 205 ipprot = rcu_dereference(inet_protos[protocol]);
206 if (ipprot != NULL) { 206 if (ipprot) {
207 int ret; 207 int ret;
208 208
209 if (!ipprot->no_policy) { 209 if (!ipprot->no_policy) {
@@ -253,7 +253,8 @@ int ip_local_deliver(struct sk_buff *skb)
253 return 0; 253 return 0;
254 } 254 }
255 255
256 return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN, skb, skb->dev, NULL, 256 return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN, NULL, skb,
257 skb->dev, NULL,
257 ip_local_deliver_finish); 258 ip_local_deliver_finish);
258} 259}
259 260
@@ -309,12 +310,12 @@ drop:
309int sysctl_ip_early_demux __read_mostly = 1; 310int sysctl_ip_early_demux __read_mostly = 1;
310EXPORT_SYMBOL(sysctl_ip_early_demux); 311EXPORT_SYMBOL(sysctl_ip_early_demux);
311 312
312static int ip_rcv_finish(struct sk_buff *skb) 313static int ip_rcv_finish(struct sock *sk, struct sk_buff *skb)
313{ 314{
314 const struct iphdr *iph = ip_hdr(skb); 315 const struct iphdr *iph = ip_hdr(skb);
315 struct rtable *rt; 316 struct rtable *rt;
316 317
317 if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) { 318 if (sysctl_ip_early_demux && !skb_dst(skb) && !skb->sk) {
318 const struct net_protocol *ipprot; 319 const struct net_protocol *ipprot;
319 int protocol = iph->protocol; 320 int protocol = iph->protocol;
320 321
@@ -387,7 +388,8 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
387 388
388 IP_UPD_PO_STATS_BH(dev_net(dev), IPSTATS_MIB_IN, skb->len); 389 IP_UPD_PO_STATS_BH(dev_net(dev), IPSTATS_MIB_IN, skb->len);
389 390
390 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { 391 skb = skb_share_check(skb, GFP_ATOMIC);
392 if (!skb) {
391 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS); 393 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
392 goto out; 394 goto out;
393 } 395 }
@@ -450,7 +452,8 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
450 /* Must drop socket now because of tproxy. */ 452 /* Must drop socket now because of tproxy. */
451 skb_orphan(skb); 453 skb_orphan(skb);
452 454
453 return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, dev, NULL, 455 return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, NULL, skb,
456 dev, NULL,
454 ip_rcv_finish); 457 ip_rcv_finish);
455 458
456csum_error: 459csum_error:
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 5b3d91be2db0..bd246792360b 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -264,7 +264,7 @@ int ip_options_compile(struct net *net,
264 unsigned char *iph; 264 unsigned char *iph;
265 int optlen, l; 265 int optlen, l;
266 266
267 if (skb != NULL) { 267 if (skb) {
268 rt = skb_rtable(skb); 268 rt = skb_rtable(skb);
269 optptr = (unsigned char *)&(ip_hdr(skb)[1]); 269 optptr = (unsigned char *)&(ip_hdr(skb)[1]);
270 } else 270 } else
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 8259e777b249..5da4d15262fd 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -91,14 +91,19 @@ void ip_send_check(struct iphdr *iph)
91} 91}
92EXPORT_SYMBOL(ip_send_check); 92EXPORT_SYMBOL(ip_send_check);
93 93
94int __ip_local_out(struct sk_buff *skb) 94int __ip_local_out_sk(struct sock *sk, struct sk_buff *skb)
95{ 95{
96 struct iphdr *iph = ip_hdr(skb); 96 struct iphdr *iph = ip_hdr(skb);
97 97
98 iph->tot_len = htons(skb->len); 98 iph->tot_len = htons(skb->len);
99 ip_send_check(iph); 99 ip_send_check(iph);
100 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL, 100 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, sk, skb, NULL,
101 skb_dst(skb)->dev, dst_output); 101 skb_dst(skb)->dev, dst_output_sk);
102}
103
104int __ip_local_out(struct sk_buff *skb)
105{
106 return __ip_local_out_sk(skb->sk, skb);
102} 107}
103 108
104int ip_local_out_sk(struct sock *sk, struct sk_buff *skb) 109int ip_local_out_sk(struct sock *sk, struct sk_buff *skb)
@@ -163,7 +168,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
163} 168}
164EXPORT_SYMBOL_GPL(ip_build_and_send_pkt); 169EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
165 170
166static inline int ip_finish_output2(struct sk_buff *skb) 171static inline int ip_finish_output2(struct sock *sk, struct sk_buff *skb)
167{ 172{
168 struct dst_entry *dst = skb_dst(skb); 173 struct dst_entry *dst = skb_dst(skb);
169 struct rtable *rt = (struct rtable *)dst; 174 struct rtable *rt = (struct rtable *)dst;
@@ -182,7 +187,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
182 struct sk_buff *skb2; 187 struct sk_buff *skb2;
183 188
184 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev)); 189 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
185 if (skb2 == NULL) { 190 if (!skb2) {
186 kfree_skb(skb); 191 kfree_skb(skb);
187 return -ENOMEM; 192 return -ENOMEM;
188 } 193 }
@@ -211,7 +216,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
211 return -EINVAL; 216 return -EINVAL;
212} 217}
213 218
214static int ip_finish_output_gso(struct sk_buff *skb) 219static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb)
215{ 220{
216 netdev_features_t features; 221 netdev_features_t features;
217 struct sk_buff *segs; 222 struct sk_buff *segs;
@@ -220,7 +225,7 @@ static int ip_finish_output_gso(struct sk_buff *skb)
220 /* common case: locally created skb or seglen is <= mtu */ 225 /* common case: locally created skb or seglen is <= mtu */
221 if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) || 226 if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) ||
222 skb_gso_network_seglen(skb) <= ip_skb_dst_mtu(skb)) 227 skb_gso_network_seglen(skb) <= ip_skb_dst_mtu(skb))
223 return ip_finish_output2(skb); 228 return ip_finish_output2(sk, skb);
224 229
225 /* Slowpath - GSO segment length is exceeding the dst MTU. 230 /* Slowpath - GSO segment length is exceeding the dst MTU.
226 * 231 *
@@ -243,7 +248,7 @@ static int ip_finish_output_gso(struct sk_buff *skb)
243 int err; 248 int err;
244 249
245 segs->next = NULL; 250 segs->next = NULL;
246 err = ip_fragment(segs, ip_finish_output2); 251 err = ip_fragment(sk, segs, ip_finish_output2);
247 252
248 if (err && ret == 0) 253 if (err && ret == 0)
249 ret = err; 254 ret = err;
@@ -253,22 +258,22 @@ static int ip_finish_output_gso(struct sk_buff *skb)
253 return ret; 258 return ret;
254} 259}
255 260
256static int ip_finish_output(struct sk_buff *skb) 261static int ip_finish_output(struct sock *sk, struct sk_buff *skb)
257{ 262{
258#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) 263#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
259 /* Policy lookup after SNAT yielded a new policy */ 264 /* Policy lookup after SNAT yielded a new policy */
260 if (skb_dst(skb)->xfrm != NULL) { 265 if (skb_dst(skb)->xfrm) {
261 IPCB(skb)->flags |= IPSKB_REROUTED; 266 IPCB(skb)->flags |= IPSKB_REROUTED;
262 return dst_output(skb); 267 return dst_output_sk(sk, skb);
263 } 268 }
264#endif 269#endif
265 if (skb_is_gso(skb)) 270 if (skb_is_gso(skb))
266 return ip_finish_output_gso(skb); 271 return ip_finish_output_gso(sk, skb);
267 272
268 if (skb->len > ip_skb_dst_mtu(skb)) 273 if (skb->len > ip_skb_dst_mtu(skb))
269 return ip_fragment(skb, ip_finish_output2); 274 return ip_fragment(sk, skb, ip_finish_output2);
270 275
271 return ip_finish_output2(skb); 276 return ip_finish_output2(sk, skb);
272} 277}
273 278
274int ip_mc_output(struct sock *sk, struct sk_buff *skb) 279int ip_mc_output(struct sock *sk, struct sk_buff *skb)
@@ -307,7 +312,7 @@ int ip_mc_output(struct sock *sk, struct sk_buff *skb)
307 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); 312 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
308 if (newskb) 313 if (newskb)
309 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, 314 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
310 newskb, NULL, newskb->dev, 315 sk, newskb, NULL, newskb->dev,
311 dev_loopback_xmit); 316 dev_loopback_xmit);
312 } 317 }
313 318
@@ -322,11 +327,11 @@ int ip_mc_output(struct sock *sk, struct sk_buff *skb)
322 if (rt->rt_flags&RTCF_BROADCAST) { 327 if (rt->rt_flags&RTCF_BROADCAST) {
323 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); 328 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
324 if (newskb) 329 if (newskb)
325 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb, 330 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, newskb,
326 NULL, newskb->dev, dev_loopback_xmit); 331 NULL, newskb->dev, dev_loopback_xmit);
327 } 332 }
328 333
329 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, 334 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb, NULL,
330 skb->dev, ip_finish_output, 335 skb->dev, ip_finish_output,
331 !(IPCB(skb)->flags & IPSKB_REROUTED)); 336 !(IPCB(skb)->flags & IPSKB_REROUTED));
332} 337}
@@ -340,7 +345,8 @@ int ip_output(struct sock *sk, struct sk_buff *skb)
340 skb->dev = dev; 345 skb->dev = dev;
341 skb->protocol = htons(ETH_P_IP); 346 skb->protocol = htons(ETH_P_IP);
342 347
343 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev, 348 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb,
349 NULL, dev,
344 ip_finish_output, 350 ip_finish_output,
345 !(IPCB(skb)->flags & IPSKB_REROUTED)); 351 !(IPCB(skb)->flags & IPSKB_REROUTED));
346} 352}
@@ -376,12 +382,12 @@ int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
376 inet_opt = rcu_dereference(inet->inet_opt); 382 inet_opt = rcu_dereference(inet->inet_opt);
377 fl4 = &fl->u.ip4; 383 fl4 = &fl->u.ip4;
378 rt = skb_rtable(skb); 384 rt = skb_rtable(skb);
379 if (rt != NULL) 385 if (rt)
380 goto packet_routed; 386 goto packet_routed;
381 387
382 /* Make sure we can route this packet. */ 388 /* Make sure we can route this packet. */
383 rt = (struct rtable *)__sk_dst_check(sk, 0); 389 rt = (struct rtable *)__sk_dst_check(sk, 0);
384 if (rt == NULL) { 390 if (!rt) {
385 __be32 daddr; 391 __be32 daddr;
386 392
387 /* Use correct destination address if we have options. */ 393 /* Use correct destination address if we have options. */
@@ -480,7 +486,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
480 * single device frame, and queue such a frame for sending. 486 * single device frame, and queue such a frame for sending.
481 */ 487 */
482 488
483int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) 489int ip_fragment(struct sock *sk, struct sk_buff *skb,
490 int (*output)(struct sock *, struct sk_buff *))
484{ 491{
485 struct iphdr *iph; 492 struct iphdr *iph;
486 int ptr; 493 int ptr;
@@ -587,13 +594,13 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
587 ip_options_fragment(frag); 594 ip_options_fragment(frag);
588 offset += skb->len - hlen; 595 offset += skb->len - hlen;
589 iph->frag_off = htons(offset>>3); 596 iph->frag_off = htons(offset>>3);
590 if (frag->next != NULL) 597 if (frag->next)
591 iph->frag_off |= htons(IP_MF); 598 iph->frag_off |= htons(IP_MF);
592 /* Ready, complete checksum */ 599 /* Ready, complete checksum */
593 ip_send_check(iph); 600 ip_send_check(iph);
594 } 601 }
595 602
596 err = output(skb); 603 err = output(sk, skb);
597 604
598 if (!err) 605 if (!err)
599 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES); 606 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
@@ -730,7 +737,7 @@ slow_path:
730 737
731 ip_send_check(iph); 738 ip_send_check(iph);
732 739
733 err = output(skb2); 740 err = output(sk, skb2);
734 if (err) 741 if (err)
735 goto fail; 742 goto fail;
736 743
@@ -790,12 +797,13 @@ static inline int ip_ufo_append_data(struct sock *sk,
790 * device, so create one single skb packet containing complete 797 * device, so create one single skb packet containing complete
791 * udp datagram 798 * udp datagram
792 */ 799 */
793 if ((skb = skb_peek_tail(queue)) == NULL) { 800 skb = skb_peek_tail(queue);
801 if (!skb) {
794 skb = sock_alloc_send_skb(sk, 802 skb = sock_alloc_send_skb(sk,
795 hh_len + fragheaderlen + transhdrlen + 20, 803 hh_len + fragheaderlen + transhdrlen + 20,
796 (flags & MSG_DONTWAIT), &err); 804 (flags & MSG_DONTWAIT), &err);
797 805
798 if (skb == NULL) 806 if (!skb)
799 return err; 807 return err;
800 808
801 /* reserve space for Hardware header */ 809 /* reserve space for Hardware header */
@@ -961,10 +969,10 @@ alloc_new_skb:
961 skb = sock_wmalloc(sk, 969 skb = sock_wmalloc(sk,
962 alloclen + hh_len + 15, 1, 970 alloclen + hh_len + 15, 1,
963 sk->sk_allocation); 971 sk->sk_allocation);
964 if (unlikely(skb == NULL)) 972 if (unlikely(!skb))
965 err = -ENOBUFS; 973 err = -ENOBUFS;
966 } 974 }
967 if (skb == NULL) 975 if (!skb)
968 goto error; 976 goto error;
969 977
970 /* 978 /*
@@ -1088,10 +1096,10 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1088 */ 1096 */
1089 opt = ipc->opt; 1097 opt = ipc->opt;
1090 if (opt) { 1098 if (opt) {
1091 if (cork->opt == NULL) { 1099 if (!cork->opt) {
1092 cork->opt = kmalloc(sizeof(struct ip_options) + 40, 1100 cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1093 sk->sk_allocation); 1101 sk->sk_allocation);
1094 if (unlikely(cork->opt == NULL)) 1102 if (unlikely(!cork->opt))
1095 return -ENOBUFS; 1103 return -ENOBUFS;
1096 } 1104 }
1097 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen); 1105 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
@@ -1198,7 +1206,8 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1198 return -EMSGSIZE; 1206 return -EMSGSIZE;
1199 } 1207 }
1200 1208
1201 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) 1209 skb = skb_peek_tail(&sk->sk_write_queue);
1210 if (!skb)
1202 return -EINVAL; 1211 return -EINVAL;
1203 1212
1204 cork->length += size; 1213 cork->length += size;
@@ -1329,7 +1338,8 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
1329 __be16 df = 0; 1338 __be16 df = 0;
1330 __u8 ttl; 1339 __u8 ttl;
1331 1340
1332 if ((skb = __skb_dequeue(queue)) == NULL) 1341 skb = __skb_dequeue(queue);
1342 if (!skb)
1333 goto out; 1343 goto out;
1334 tail_skb = &(skb_shinfo(skb)->frag_list); 1344 tail_skb = &(skb_shinfo(skb)->frag_list);
1335 1345
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index f6a0d54b308a..7cfb0893f263 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -351,7 +351,7 @@ int ip_ra_control(struct sock *sk, unsigned char on,
351 return 0; 351 return 0;
352 } 352 }
353 } 353 }
354 if (new_ra == NULL) { 354 if (!new_ra) {
355 spin_unlock_bh(&ip_ra_lock); 355 spin_unlock_bh(&ip_ra_lock);
356 return -ENOBUFS; 356 return -ENOBUFS;
357 } 357 }
@@ -387,7 +387,7 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
387 skb_network_header(skb); 387 skb_network_header(skb);
388 serr->port = port; 388 serr->port = port;
389 389
390 if (skb_pull(skb, payload - skb->data) != NULL) { 390 if (skb_pull(skb, payload - skb->data)) {
391 skb_reset_transport_header(skb); 391 skb_reset_transport_header(skb);
392 if (sock_queue_err_skb(sk, skb) == 0) 392 if (sock_queue_err_skb(sk, skb) == 0)
393 return; 393 return;
@@ -482,7 +482,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
482 482
483 err = -EAGAIN; 483 err = -EAGAIN;
484 skb = sock_dequeue_err_skb(sk); 484 skb = sock_dequeue_err_skb(sk);
485 if (skb == NULL) 485 if (!skb)
486 goto out; 486 goto out;
487 487
488 copied = skb->len; 488 copied = skb->len;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 2cd08280c77b..4c2c3ba4ba65 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -389,7 +389,6 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
389 hlen = tdev->hard_header_len + tdev->needed_headroom; 389 hlen = tdev->hard_header_len + tdev->needed_headroom;
390 mtu = tdev->mtu; 390 mtu = tdev->mtu;
391 } 391 }
392 dev->iflink = tunnel->parms.link;
393 392
394 dev->needed_headroom = t_hlen + hlen; 393 dev->needed_headroom = t_hlen + hlen;
395 mtu -= (dev->hard_header_len + t_hlen); 394 mtu -= (dev->hard_header_len + t_hlen);
@@ -655,7 +654,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
655 if (dst == 0) { 654 if (dst == 0) {
656 /* NBMA tunnel */ 655 /* NBMA tunnel */
657 656
658 if (skb_dst(skb) == NULL) { 657 if (!skb_dst(skb)) {
659 dev->stats.tx_fifo_errors++; 658 dev->stats.tx_fifo_errors++;
660 goto tx_error; 659 goto tx_error;
661 } 660 }
@@ -673,7 +672,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
673 672
674 neigh = dst_neigh_lookup(skb_dst(skb), 673 neigh = dst_neigh_lookup(skb_dst(skb),
675 &ipv6_hdr(skb)->daddr); 674 &ipv6_hdr(skb)->daddr);
676 if (neigh == NULL) 675 if (!neigh)
677 goto tx_error; 676 goto tx_error;
678 677
679 addr6 = (const struct in6_addr *)&neigh->primary_key; 678 addr6 = (const struct in6_addr *)&neigh->primary_key;
@@ -783,7 +782,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
783 return; 782 return;
784 } 783 }
785 784
786 err = iptunnel_xmit(skb->sk, rt, skb, fl4.saddr, fl4.daddr, protocol, 785 err = iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol,
787 tos, ttl, df, !net_eq(tunnel->net, dev_net(dev))); 786 tos, ttl, df, !net_eq(tunnel->net, dev_net(dev)));
788 iptunnel_xmit_stats(err, &dev->stats, dev->tstats); 787 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
789 788
@@ -844,7 +843,7 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
844 case SIOCGETTUNNEL: 843 case SIOCGETTUNNEL:
845 if (dev == itn->fb_tunnel_dev) { 844 if (dev == itn->fb_tunnel_dev) {
846 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type); 845 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
847 if (t == NULL) 846 if (!t)
848 t = netdev_priv(dev); 847 t = netdev_priv(dev);
849 } 848 }
850 memcpy(p, &t->parms, sizeof(*p)); 849 memcpy(p, &t->parms, sizeof(*p));
@@ -877,7 +876,7 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
877 break; 876 break;
878 } 877 }
879 if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { 878 if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
880 if (t != NULL) { 879 if (t) {
881 if (t->dev != dev) { 880 if (t->dev != dev) {
882 err = -EEXIST; 881 err = -EEXIST;
883 break; 882 break;
@@ -915,7 +914,7 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
915 if (dev == itn->fb_tunnel_dev) { 914 if (dev == itn->fb_tunnel_dev) {
916 err = -ENOENT; 915 err = -ENOENT;
917 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type); 916 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
918 if (t == NULL) 917 if (!t)
919 goto done; 918 goto done;
920 err = -EPERM; 919 err = -EPERM;
921 if (t == netdev_priv(itn->fb_tunnel_dev)) 920 if (t == netdev_priv(itn->fb_tunnel_dev))
@@ -980,6 +979,14 @@ struct net *ip_tunnel_get_link_net(const struct net_device *dev)
980} 979}
981EXPORT_SYMBOL(ip_tunnel_get_link_net); 980EXPORT_SYMBOL(ip_tunnel_get_link_net);
982 981
982int ip_tunnel_get_iflink(const struct net_device *dev)
983{
984 struct ip_tunnel *tunnel = netdev_priv(dev);
985
986 return tunnel->parms.link;
987}
988EXPORT_SYMBOL(ip_tunnel_get_iflink);
989
983int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, 990int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
984 struct rtnl_link_ops *ops, char *devname) 991 struct rtnl_link_ops *ops, char *devname)
985{ 992{
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 5a6e27054f0a..9f7269f3c54a 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -60,7 +60,7 @@ static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
60 60
61 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, 61 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
62 iph->saddr, iph->daddr, 0); 62 iph->saddr, iph->daddr, 0);
63 if (tunnel != NULL) { 63 if (tunnel) {
64 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 64 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
65 goto drop; 65 goto drop;
66 66
@@ -341,6 +341,7 @@ static const struct net_device_ops vti_netdev_ops = {
341 .ndo_do_ioctl = vti_tunnel_ioctl, 341 .ndo_do_ioctl = vti_tunnel_ioctl,
342 .ndo_change_mtu = ip_tunnel_change_mtu, 342 .ndo_change_mtu = ip_tunnel_change_mtu,
343 .ndo_get_stats64 = ip_tunnel_get_stats64, 343 .ndo_get_stats64 = ip_tunnel_get_stats64,
344 .ndo_get_iflink = ip_tunnel_get_iflink,
344}; 345};
345 346
346static void vti_tunnel_setup(struct net_device *dev) 347static void vti_tunnel_setup(struct net_device *dev)
@@ -361,7 +362,6 @@ static int vti_tunnel_init(struct net_device *dev)
361 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); 362 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
362 dev->mtu = ETH_DATA_LEN; 363 dev->mtu = ETH_DATA_LEN;
363 dev->flags = IFF_NOARP; 364 dev->flags = IFF_NOARP;
364 dev->iflink = 0;
365 dev->addr_len = 4; 365 dev->addr_len = 4;
366 dev->features |= NETIF_F_LLTX; 366 dev->features |= NETIF_F_LLTX;
367 netif_keep_dst(dev); 367 netif_keep_dst(dev);
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index c0855d50a3fa..d97f4f2787f5 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -63,7 +63,7 @@ static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
63 struct xfrm_state *t; 63 struct xfrm_state *t;
64 64
65 t = xfrm_state_alloc(net); 65 t = xfrm_state_alloc(net);
66 if (t == NULL) 66 if (!t)
67 goto out; 67 goto out;
68 68
69 t->id.proto = IPPROTO_IPIP; 69 t->id.proto = IPPROTO_IPIP;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index b26376ef87f6..8e7328c6a390 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -504,7 +504,8 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
504 if (!net_eq(dev_net(dev), &init_net)) 504 if (!net_eq(dev_net(dev), &init_net))
505 goto drop; 505 goto drop;
506 506
507 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 507 skb = skb_share_check(skb, GFP_ATOMIC);
508 if (!skb)
508 return NET_RX_DROP; 509 return NET_RX_DROP;
509 510
510 if (!pskb_may_pull(skb, sizeof(struct arphdr))) 511 if (!pskb_may_pull(skb, sizeof(struct arphdr)))
@@ -958,7 +959,8 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
958 if (skb->pkt_type == PACKET_OTHERHOST) 959 if (skb->pkt_type == PACKET_OTHERHOST)
959 goto drop; 960 goto drop;
960 961
961 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 962 skb = skb_share_check(skb, GFP_ATOMIC);
963 if (!skb)
962 return NET_RX_DROP; 964 return NET_RX_DROP;
963 965
964 if (!pskb_may_pull(skb, 966 if (!pskb_may_pull(skb,
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index bfbcc85c02ee..ff96396ebec5 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -144,7 +144,7 @@ static int ipip_err(struct sk_buff *skb, u32 info)
144 err = -ENOENT; 144 err = -ENOENT;
145 t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, 145 t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
146 iph->daddr, iph->saddr, 0); 146 iph->daddr, iph->saddr, 0);
147 if (t == NULL) 147 if (!t)
148 goto out; 148 goto out;
149 149
150 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { 150 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
@@ -272,6 +272,7 @@ static const struct net_device_ops ipip_netdev_ops = {
272 .ndo_do_ioctl = ipip_tunnel_ioctl, 272 .ndo_do_ioctl = ipip_tunnel_ioctl,
273 .ndo_change_mtu = ip_tunnel_change_mtu, 273 .ndo_change_mtu = ip_tunnel_change_mtu,
274 .ndo_get_stats64 = ip_tunnel_get_stats64, 274 .ndo_get_stats64 = ip_tunnel_get_stats64,
275 .ndo_get_iflink = ip_tunnel_get_iflink,
275}; 276};
276 277
277#define IPIP_FEATURES (NETIF_F_SG | \ 278#define IPIP_FEATURES (NETIF_F_SG | \
@@ -286,7 +287,6 @@ static void ipip_tunnel_setup(struct net_device *dev)
286 287
287 dev->type = ARPHRD_TUNNEL; 288 dev->type = ARPHRD_TUNNEL;
288 dev->flags = IFF_NOARP; 289 dev->flags = IFF_NOARP;
289 dev->iflink = 0;
290 dev->addr_len = 4; 290 dev->addr_len = 4;
291 dev->features |= NETIF_F_LLTX; 291 dev->features |= NETIF_F_LLTX;
292 netif_keep_dst(dev); 292 netif_keep_dst(dev);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index b4a545d24adb..3a2c0162c3ba 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -189,7 +189,7 @@ static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
189 } 189 }
190 190
191 mrt = ipmr_get_table(rule->fr_net, rule->table); 191 mrt = ipmr_get_table(rule->fr_net, rule->table);
192 if (mrt == NULL) 192 if (!mrt)
193 return -EAGAIN; 193 return -EAGAIN;
194 res->mrt = mrt; 194 res->mrt = mrt;
195 return 0; 195 return 0;
@@ -253,7 +253,7 @@ static int __net_init ipmr_rules_init(struct net *net)
253 INIT_LIST_HEAD(&net->ipv4.mr_tables); 253 INIT_LIST_HEAD(&net->ipv4.mr_tables);
254 254
255 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); 255 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
256 if (mrt == NULL) { 256 if (!mrt) {
257 err = -ENOMEM; 257 err = -ENOMEM;
258 goto err1; 258 goto err1;
259 } 259 }
@@ -266,7 +266,7 @@ static int __net_init ipmr_rules_init(struct net *net)
266 return 0; 266 return 0;
267 267
268err2: 268err2:
269 kfree(mrt); 269 ipmr_free_table(mrt);
270err1: 270err1:
271 fib_rules_unregister(ops); 271 fib_rules_unregister(ops);
272 return err; 272 return err;
@@ -276,11 +276,13 @@ static void __net_exit ipmr_rules_exit(struct net *net)
276{ 276{
277 struct mr_table *mrt, *next; 277 struct mr_table *mrt, *next;
278 278
279 rtnl_lock();
279 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { 280 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
280 list_del(&mrt->list); 281 list_del(&mrt->list);
281 ipmr_free_table(mrt); 282 ipmr_free_table(mrt);
282 } 283 }
283 fib_rules_unregister(net->ipv4.mr_rules_ops); 284 fib_rules_unregister(net->ipv4.mr_rules_ops);
285 rtnl_unlock();
284} 286}
285#else 287#else
286#define ipmr_for_each_table(mrt, net) \ 288#define ipmr_for_each_table(mrt, net) \
@@ -306,7 +308,10 @@ static int __net_init ipmr_rules_init(struct net *net)
306 308
307static void __net_exit ipmr_rules_exit(struct net *net) 309static void __net_exit ipmr_rules_exit(struct net *net)
308{ 310{
311 rtnl_lock();
309 ipmr_free_table(net->ipv4.mrt); 312 ipmr_free_table(net->ipv4.mrt);
313 net->ipv4.mrt = NULL;
314 rtnl_unlock();
310} 315}
311#endif 316#endif
312 317
@@ -316,11 +321,11 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
316 unsigned int i; 321 unsigned int i;
317 322
318 mrt = ipmr_get_table(net, id); 323 mrt = ipmr_get_table(net, id);
319 if (mrt != NULL) 324 if (mrt)
320 return mrt; 325 return mrt;
321 326
322 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); 327 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
323 if (mrt == NULL) 328 if (!mrt)
324 return NULL; 329 return NULL;
325 write_pnet(&mrt->net, net); 330 write_pnet(&mrt->net, net);
326 mrt->id = id; 331 mrt->id = id;
@@ -422,7 +427,7 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
422 dev->flags |= IFF_MULTICAST; 427 dev->flags |= IFF_MULTICAST;
423 428
424 in_dev = __in_dev_get_rtnl(dev); 429 in_dev = __in_dev_get_rtnl(dev);
425 if (in_dev == NULL) 430 if (!in_dev)
426 goto failure; 431 goto failure;
427 432
428 ipv4_devconf_setall(in_dev); 433 ipv4_devconf_setall(in_dev);
@@ -473,8 +478,14 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
473 return NETDEV_TX_OK; 478 return NETDEV_TX_OK;
474} 479}
475 480
481static int reg_vif_get_iflink(const struct net_device *dev)
482{
483 return 0;
484}
485
476static const struct net_device_ops reg_vif_netdev_ops = { 486static const struct net_device_ops reg_vif_netdev_ops = {
477 .ndo_start_xmit = reg_vif_xmit, 487 .ndo_start_xmit = reg_vif_xmit,
488 .ndo_get_iflink = reg_vif_get_iflink,
478}; 489};
479 490
480static void reg_vif_setup(struct net_device *dev) 491static void reg_vif_setup(struct net_device *dev)
@@ -500,7 +511,7 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
500 511
501 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup); 512 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
502 513
503 if (dev == NULL) 514 if (!dev)
504 return NULL; 515 return NULL;
505 516
506 dev_net_set(dev, net); 517 dev_net_set(dev, net);
@@ -509,7 +520,6 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
509 free_netdev(dev); 520 free_netdev(dev);
510 return NULL; 521 return NULL;
511 } 522 }
512 dev->iflink = 0;
513 523
514 rcu_read_lock(); 524 rcu_read_lock();
515 in_dev = __in_dev_get_rcu(dev); 525 in_dev = __in_dev_get_rcu(dev);
@@ -757,7 +767,7 @@ static int vif_add(struct net *net, struct mr_table *mrt,
757 case 0: 767 case 0:
758 if (vifc->vifc_flags == VIFF_USE_IFINDEX) { 768 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
759 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex); 769 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
760 if (dev && __in_dev_get_rtnl(dev) == NULL) { 770 if (dev && !__in_dev_get_rtnl(dev)) {
761 dev_put(dev); 771 dev_put(dev);
762 return -EADDRNOTAVAIL; 772 return -EADDRNOTAVAIL;
763 } 773 }
@@ -801,7 +811,7 @@ static int vif_add(struct net *net, struct mr_table *mrt,
801 v->pkt_out = 0; 811 v->pkt_out = 0;
802 v->link = dev->ifindex; 812 v->link = dev->ifindex;
803 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER)) 813 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
804 v->link = dev->iflink; 814 v->link = dev_get_iflink(dev);
805 815
806 /* And finish update writing critical data */ 816 /* And finish update writing critical data */
807 write_lock_bh(&mrt_lock); 817 write_lock_bh(&mrt_lock);
@@ -1003,7 +1013,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
1003 1013
1004 rcu_read_lock(); 1014 rcu_read_lock();
1005 mroute_sk = rcu_dereference(mrt->mroute_sk); 1015 mroute_sk = rcu_dereference(mrt->mroute_sk);
1006 if (mroute_sk == NULL) { 1016 if (!mroute_sk) {
1007 rcu_read_unlock(); 1017 rcu_read_unlock();
1008 kfree_skb(skb); 1018 kfree_skb(skb);
1009 return -EINVAL; 1019 return -EINVAL;
@@ -1156,7 +1166,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1156 return -EINVAL; 1166 return -EINVAL;
1157 1167
1158 c = ipmr_cache_alloc(); 1168 c = ipmr_cache_alloc();
1159 if (c == NULL) 1169 if (!c)
1160 return -ENOMEM; 1170 return -ENOMEM;
1161 1171
1162 c->mfc_origin = mfc->mfcc_origin.s_addr; 1172 c->mfc_origin = mfc->mfcc_origin.s_addr;
@@ -1278,7 +1288,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1278 return -EOPNOTSUPP; 1288 return -EOPNOTSUPP;
1279 1289
1280 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); 1290 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1281 if (mrt == NULL) 1291 if (!mrt)
1282 return -ENOENT; 1292 return -ENOENT;
1283 1293
1284 if (optname != MRT_INIT) { 1294 if (optname != MRT_INIT) {
@@ -1441,7 +1451,7 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
1441 return -EOPNOTSUPP; 1451 return -EOPNOTSUPP;
1442 1452
1443 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); 1453 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1444 if (mrt == NULL) 1454 if (!mrt)
1445 return -ENOENT; 1455 return -ENOENT;
1446 1456
1447 if (optname != MRT_VERSION && 1457 if (optname != MRT_VERSION &&
@@ -1487,7 +1497,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1487 struct mr_table *mrt; 1497 struct mr_table *mrt;
1488 1498
1489 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); 1499 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1490 if (mrt == NULL) 1500 if (!mrt)
1491 return -ENOENT; 1501 return -ENOENT;
1492 1502
1493 switch (cmd) { 1503 switch (cmd) {
@@ -1561,7 +1571,7 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1561 struct mr_table *mrt; 1571 struct mr_table *mrt;
1562 1572
1563 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); 1573 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1564 if (mrt == NULL) 1574 if (!mrt)
1565 return -ENOENT; 1575 return -ENOENT;
1566 1576
1567 switch (cmd) { 1577 switch (cmd) {
@@ -1669,7 +1679,7 @@ static void ip_encap(struct net *net, struct sk_buff *skb,
1669 nf_reset(skb); 1679 nf_reset(skb);
1670} 1680}
1671 1681
1672static inline int ipmr_forward_finish(struct sk_buff *skb) 1682static inline int ipmr_forward_finish(struct sock *sk, struct sk_buff *skb)
1673{ 1683{
1674 struct ip_options *opt = &(IPCB(skb)->opt); 1684 struct ip_options *opt = &(IPCB(skb)->opt);
1675 1685
@@ -1679,7 +1689,7 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
1679 if (unlikely(opt->optlen)) 1689 if (unlikely(opt->optlen))
1680 ip_forward_options(skb); 1690 ip_forward_options(skb);
1681 1691
1682 return dst_output(skb); 1692 return dst_output_sk(sk, skb);
1683} 1693}
1684 1694
1685/* 1695/*
@@ -1696,7 +1706,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1696 struct flowi4 fl4; 1706 struct flowi4 fl4;
1697 int encap = 0; 1707 int encap = 0;
1698 1708
1699 if (vif->dev == NULL) 1709 if (!vif->dev)
1700 goto out_free; 1710 goto out_free;
1701 1711
1702#ifdef CONFIG_IP_PIMSM 1712#ifdef CONFIG_IP_PIMSM
@@ -1778,7 +1788,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1778 * not mrouter) cannot join to more than one interface - it will 1788 * not mrouter) cannot join to more than one interface - it will
1779 * result in receiving multiple packets. 1789 * result in receiving multiple packets.
1780 */ 1790 */
1781 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, dev, 1791 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, NULL, skb,
1792 skb->dev, dev,
1782 ipmr_forward_finish); 1793 ipmr_forward_finish);
1783 return; 1794 return;
1784 1795
@@ -1987,7 +1998,7 @@ int ip_mr_input(struct sk_buff *skb)
1987 1998
1988 /* already under rcu_read_lock() */ 1999 /* already under rcu_read_lock() */
1989 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); 2000 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1990 if (cache == NULL) { 2001 if (!cache) {
1991 int vif = ipmr_find_vif(mrt, skb->dev); 2002 int vif = ipmr_find_vif(mrt, skb->dev);
1992 2003
1993 if (vif >= 0) 2004 if (vif >= 0)
@@ -1998,13 +2009,13 @@ int ip_mr_input(struct sk_buff *skb)
1998 /* 2009 /*
1999 * No usable cache entry 2010 * No usable cache entry
2000 */ 2011 */
2001 if (cache == NULL) { 2012 if (!cache) {
2002 int vif; 2013 int vif;
2003 2014
2004 if (local) { 2015 if (local) {
2005 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 2016 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2006 ip_local_deliver(skb); 2017 ip_local_deliver(skb);
2007 if (skb2 == NULL) 2018 if (!skb2)
2008 return -ENOBUFS; 2019 return -ENOBUFS;
2009 skb = skb2; 2020 skb = skb2;
2010 } 2021 }
@@ -2063,7 +2074,7 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
2063 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev; 2074 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
2064 read_unlock(&mrt_lock); 2075 read_unlock(&mrt_lock);
2065 2076
2066 if (reg_dev == NULL) 2077 if (!reg_dev)
2067 return 1; 2078 return 1;
2068 2079
2069 skb->mac_header = skb->network_header; 2080 skb->mac_header = skb->network_header;
@@ -2193,18 +2204,18 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
2193 int err; 2204 int err;
2194 2205
2195 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); 2206 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2196 if (mrt == NULL) 2207 if (!mrt)
2197 return -ENOENT; 2208 return -ENOENT;
2198 2209
2199 rcu_read_lock(); 2210 rcu_read_lock();
2200 cache = ipmr_cache_find(mrt, saddr, daddr); 2211 cache = ipmr_cache_find(mrt, saddr, daddr);
2201 if (cache == NULL && skb->dev) { 2212 if (!cache && skb->dev) {
2202 int vif = ipmr_find_vif(mrt, skb->dev); 2213 int vif = ipmr_find_vif(mrt, skb->dev);
2203 2214
2204 if (vif >= 0) 2215 if (vif >= 0)
2205 cache = ipmr_cache_find_any(mrt, daddr, vif); 2216 cache = ipmr_cache_find_any(mrt, daddr, vif);
2206 } 2217 }
2207 if (cache == NULL) { 2218 if (!cache) {
2208 struct sk_buff *skb2; 2219 struct sk_buff *skb2;
2209 struct iphdr *iph; 2220 struct iphdr *iph;
2210 struct net_device *dev; 2221 struct net_device *dev;
@@ -2262,7 +2273,7 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2262 int err; 2273 int err;
2263 2274
2264 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags); 2275 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2265 if (nlh == NULL) 2276 if (!nlh)
2266 return -EMSGSIZE; 2277 return -EMSGSIZE;
2267 2278
2268 rtm = nlmsg_data(nlh); 2279 rtm = nlmsg_data(nlh);
@@ -2327,7 +2338,7 @@ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2327 2338
2328 skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif), 2339 skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif),
2329 GFP_ATOMIC); 2340 GFP_ATOMIC);
2330 if (skb == NULL) 2341 if (!skb)
2331 goto errout; 2342 goto errout;
2332 2343
2333 err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0); 2344 err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
@@ -2442,7 +2453,7 @@ static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
2442 struct mr_table *mrt; 2453 struct mr_table *mrt;
2443 2454
2444 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); 2455 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2445 if (mrt == NULL) 2456 if (!mrt)
2446 return ERR_PTR(-ENOENT); 2457 return ERR_PTR(-ENOENT);
2447 2458
2448 iter->mrt = mrt; 2459 iter->mrt = mrt;
@@ -2561,7 +2572,7 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2561 struct mr_table *mrt; 2572 struct mr_table *mrt;
2562 2573
2563 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); 2574 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2564 if (mrt == NULL) 2575 if (!mrt)
2565 return ERR_PTR(-ENOENT); 2576 return ERR_PTR(-ENOENT);
2566 2577
2567 it->mrt = mrt; 2578 it->mrt = mrt;
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 7ebd6e37875c..65de0684e22a 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -94,7 +94,7 @@ static void nf_ip_saveroute(const struct sk_buff *skb,
94{ 94{
95 struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry); 95 struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
96 96
97 if (entry->hook == NF_INET_LOCAL_OUT) { 97 if (entry->state.hook == NF_INET_LOCAL_OUT) {
98 const struct iphdr *iph = ip_hdr(skb); 98 const struct iphdr *iph = ip_hdr(skb);
99 99
100 rt_info->tos = iph->tos; 100 rt_info->tos = iph->tos;
@@ -109,7 +109,7 @@ static int nf_ip_reroute(struct sk_buff *skb,
109{ 109{
110 const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry); 110 const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
111 111
112 if (entry->hook == NF_INET_LOCAL_OUT) { 112 if (entry->state.hook == NF_INET_LOCAL_OUT) {
113 const struct iphdr *iph = ip_hdr(skb); 113 const struct iphdr *iph = ip_hdr(skb);
114 114
115 if (!(iph->tos == rt_info->tos && 115 if (!(iph->tos == rt_info->tos &&
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index f95b6f93814b..13bfe84bf3ca 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -248,8 +248,7 @@ struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry)
248 248
249unsigned int arpt_do_table(struct sk_buff *skb, 249unsigned int arpt_do_table(struct sk_buff *skb,
250 unsigned int hook, 250 unsigned int hook,
251 const struct net_device *in, 251 const struct nf_hook_state *state,
252 const struct net_device *out,
253 struct xt_table *table) 252 struct xt_table *table)
254{ 253{
255 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); 254 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
@@ -265,8 +264,8 @@ unsigned int arpt_do_table(struct sk_buff *skb,
265 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) 264 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
266 return NF_DROP; 265 return NF_DROP;
267 266
268 indev = in ? in->name : nulldevname; 267 indev = state->in ? state->in->name : nulldevname;
269 outdev = out ? out->name : nulldevname; 268 outdev = state->out ? state->out->name : nulldevname;
270 269
271 local_bh_disable(); 270 local_bh_disable();
272 addend = xt_write_recseq_begin(); 271 addend = xt_write_recseq_begin();
@@ -281,8 +280,8 @@ unsigned int arpt_do_table(struct sk_buff *skb,
281 e = get_entry(table_base, private->hook_entry[hook]); 280 e = get_entry(table_base, private->hook_entry[hook]);
282 back = get_entry(table_base, private->underflow[hook]); 281 back = get_entry(table_base, private->underflow[hook]);
283 282
284 acpar.in = in; 283 acpar.in = state->in;
285 acpar.out = out; 284 acpar.out = state->out;
286 acpar.hooknum = hook; 285 acpar.hooknum = hook;
287 acpar.family = NFPROTO_ARP; 286 acpar.family = NFPROTO_ARP;
288 acpar.hotdrop = false; 287 acpar.hotdrop = false;
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index 802ddecb30b8..93876d03120c 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -28,12 +28,11 @@ static const struct xt_table packet_filter = {
28/* The work comes in here from netfilter.c */ 28/* The work comes in here from netfilter.c */
29static unsigned int 29static unsigned int
30arptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, 30arptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
31 const struct net_device *in, const struct net_device *out, 31 const struct nf_hook_state *state)
32 int (*okfn)(struct sk_buff *))
33{ 32{
34 const struct net *net = dev_net((in != NULL) ? in : out); 33 const struct net *net = dev_net(state->in ? state->in : state->out);
35 34
36 return arpt_do_table(skb, ops->hooknum, in, out, 35 return arpt_do_table(skb, ops->hooknum, state,
37 net->ipv4.arptable_filter); 36 net->ipv4.arptable_filter);
38} 37}
39 38
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index cf5e82f39d3b..c69db7fa25ee 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -288,8 +288,7 @@ struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
288unsigned int 288unsigned int
289ipt_do_table(struct sk_buff *skb, 289ipt_do_table(struct sk_buff *skb,
290 unsigned int hook, 290 unsigned int hook,
291 const struct net_device *in, 291 const struct nf_hook_state *state,
292 const struct net_device *out,
293 struct xt_table *table) 292 struct xt_table *table)
294{ 293{
295 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); 294 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
@@ -306,8 +305,8 @@ ipt_do_table(struct sk_buff *skb,
306 305
307 /* Initialization */ 306 /* Initialization */
308 ip = ip_hdr(skb); 307 ip = ip_hdr(skb);
309 indev = in ? in->name : nulldevname; 308 indev = state->in ? state->in->name : nulldevname;
310 outdev = out ? out->name : nulldevname; 309 outdev = state->out ? state->out->name : nulldevname;
311 /* We handle fragments by dealing with the first fragment as 310 /* We handle fragments by dealing with the first fragment as
312 * if it was a normal packet. All other fragments are treated 311 * if it was a normal packet. All other fragments are treated
313 * normally, except that they will NEVER match rules that ask 312 * normally, except that they will NEVER match rules that ask
@@ -317,8 +316,8 @@ ipt_do_table(struct sk_buff *skb,
317 acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET; 316 acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
318 acpar.thoff = ip_hdrlen(skb); 317 acpar.thoff = ip_hdrlen(skb);
319 acpar.hotdrop = false; 318 acpar.hotdrop = false;
320 acpar.in = in; 319 acpar.in = state->in;
321 acpar.out = out; 320 acpar.out = state->out;
322 acpar.family = NFPROTO_IPV4; 321 acpar.family = NFPROTO_IPV4;
323 acpar.hooknum = hook; 322 acpar.hooknum = hook;
324 323
@@ -370,7 +369,7 @@ ipt_do_table(struct sk_buff *skb,
370#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) 369#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
371 /* The packet is traced: log it */ 370 /* The packet is traced: log it */
372 if (unlikely(skb->nf_trace)) 371 if (unlikely(skb->nf_trace))
373 trace_packet(skb, hook, in, out, 372 trace_packet(skb, hook, state->in, state->out,
374 table->name, private, e); 373 table->name, private, e);
375#endif 374#endif
376 /* Standard target? */ 375 /* Standard target? */
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index f75e9df5e017..771ab3d01ad3 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -504,14 +504,12 @@ static void arp_print(struct arp_payload *payload)
504static unsigned int 504static unsigned int
505arp_mangle(const struct nf_hook_ops *ops, 505arp_mangle(const struct nf_hook_ops *ops,
506 struct sk_buff *skb, 506 struct sk_buff *skb,
507 const struct net_device *in, 507 const struct nf_hook_state *state)
508 const struct net_device *out,
509 int (*okfn)(struct sk_buff *))
510{ 508{
511 struct arphdr *arp = arp_hdr(skb); 509 struct arphdr *arp = arp_hdr(skb);
512 struct arp_payload *payload; 510 struct arp_payload *payload;
513 struct clusterip_config *c; 511 struct clusterip_config *c;
514 struct net *net = dev_net(in ? in : out); 512 struct net *net = dev_net(state->in ? state->in : state->out);
515 513
516 /* we don't care about non-ethernet and non-ipv4 ARP */ 514 /* we don't care about non-ethernet and non-ipv4 ARP */
517 if (arp->ar_hrd != htons(ARPHRD_ETHER) || 515 if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
@@ -536,10 +534,10 @@ arp_mangle(const struct nf_hook_ops *ops,
536 * addresses on different interfacs. However, in the CLUSTERIP case 534 * addresses on different interfacs. However, in the CLUSTERIP case
537 * this wouldn't work, since we didn't subscribe the mcast group on 535 * this wouldn't work, since we didn't subscribe the mcast group on
538 * other interfaces */ 536 * other interfaces */
539 if (c->dev != out) { 537 if (c->dev != state->out) {
540 pr_debug("not mangling arp reply on different " 538 pr_debug("not mangling arp reply on different "
541 "interface: cip'%s'-skb'%s'\n", 539 "interface: cip'%s'-skb'%s'\n",
542 c->dev->name, out->name); 540 c->dev->name, state->out->name);
543 clusterip_config_put(c); 541 clusterip_config_put(c);
544 return NF_ACCEPT; 542 return NF_ACCEPT;
545 } 543 }
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index a313c3fbeb46..e9e67793055f 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -300,11 +300,9 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
300 300
301static unsigned int ipv4_synproxy_hook(const struct nf_hook_ops *ops, 301static unsigned int ipv4_synproxy_hook(const struct nf_hook_ops *ops,
302 struct sk_buff *skb, 302 struct sk_buff *skb,
303 const struct net_device *in, 303 const struct nf_hook_state *nhs)
304 const struct net_device *out,
305 int (*okfn)(struct sk_buff *))
306{ 304{
307 struct synproxy_net *snet = synproxy_pernet(dev_net(in ? : out)); 305 struct synproxy_net *snet = synproxy_pernet(dev_net(nhs->in ? : nhs->out));
308 enum ip_conntrack_info ctinfo; 306 enum ip_conntrack_info ctinfo;
309 struct nf_conn *ct; 307 struct nf_conn *ct;
310 struct nf_conn_synproxy *synproxy; 308 struct nf_conn_synproxy *synproxy;
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index e08a74a243a8..a0f3beca52d2 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -34,8 +34,7 @@ static const struct xt_table packet_filter = {
34 34
35static unsigned int 35static unsigned int
36iptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, 36iptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
37 const struct net_device *in, const struct net_device *out, 37 const struct nf_hook_state *state)
38 int (*okfn)(struct sk_buff *))
39{ 38{
40 const struct net *net; 39 const struct net *net;
41 40
@@ -45,9 +44,8 @@ iptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
45 /* root is playing with raw sockets. */ 44 /* root is playing with raw sockets. */
46 return NF_ACCEPT; 45 return NF_ACCEPT;
47 46
48 net = dev_net((in != NULL) ? in : out); 47 net = dev_net(state->in ? state->in : state->out);
49 return ipt_do_table(skb, ops->hooknum, in, out, 48 return ipt_do_table(skb, ops->hooknum, state, net->ipv4.iptable_filter);
50 net->ipv4.iptable_filter);
51} 49}
52 50
53static struct nf_hook_ops *filter_ops __read_mostly; 51static struct nf_hook_ops *filter_ops __read_mostly;
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index 6a5079c34bb3..62cbb8c5f4a8 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -37,8 +37,9 @@ static const struct xt_table packet_mangler = {
37}; 37};
38 38
39static unsigned int 39static unsigned int
40ipt_mangle_out(struct sk_buff *skb, const struct net_device *out) 40ipt_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
41{ 41{
42 struct net_device *out = state->out;
42 unsigned int ret; 43 unsigned int ret;
43 const struct iphdr *iph; 44 const struct iphdr *iph;
44 u_int8_t tos; 45 u_int8_t tos;
@@ -58,7 +59,7 @@ ipt_mangle_out(struct sk_buff *skb, const struct net_device *out)
58 daddr = iph->daddr; 59 daddr = iph->daddr;
59 tos = iph->tos; 60 tos = iph->tos;
60 61
61 ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, NULL, out, 62 ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, state,
62 dev_net(out)->ipv4.iptable_mangle); 63 dev_net(out)->ipv4.iptable_mangle);
63 /* Reroute for ANY change. */ 64 /* Reroute for ANY change. */
64 if (ret != NF_DROP && ret != NF_STOLEN) { 65 if (ret != NF_DROP && ret != NF_STOLEN) {
@@ -81,18 +82,16 @@ ipt_mangle_out(struct sk_buff *skb, const struct net_device *out)
81static unsigned int 82static unsigned int
82iptable_mangle_hook(const struct nf_hook_ops *ops, 83iptable_mangle_hook(const struct nf_hook_ops *ops,
83 struct sk_buff *skb, 84 struct sk_buff *skb,
84 const struct net_device *in, 85 const struct nf_hook_state *state)
85 const struct net_device *out,
86 int (*okfn)(struct sk_buff *))
87{ 86{
88 if (ops->hooknum == NF_INET_LOCAL_OUT) 87 if (ops->hooknum == NF_INET_LOCAL_OUT)
89 return ipt_mangle_out(skb, out); 88 return ipt_mangle_out(skb, state);
90 if (ops->hooknum == NF_INET_POST_ROUTING) 89 if (ops->hooknum == NF_INET_POST_ROUTING)
91 return ipt_do_table(skb, ops->hooknum, in, out, 90 return ipt_do_table(skb, ops->hooknum, state,
92 dev_net(out)->ipv4.iptable_mangle); 91 dev_net(state->out)->ipv4.iptable_mangle);
93 /* PREROUTING/INPUT/FORWARD: */ 92 /* PREROUTING/INPUT/FORWARD: */
94 return ipt_do_table(skb, ops->hooknum, in, out, 93 return ipt_do_table(skb, ops->hooknum, state,
95 dev_net(in)->ipv4.iptable_mangle); 94 dev_net(state->in)->ipv4.iptable_mangle);
96} 95}
97 96
98static struct nf_hook_ops *mangle_ops __read_mostly; 97static struct nf_hook_ops *mangle_ops __read_mostly;
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index 6b67d7e9a75d..0d4d9cdf98a4 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -30,49 +30,40 @@ static const struct xt_table nf_nat_ipv4_table = {
30 30
31static unsigned int iptable_nat_do_chain(const struct nf_hook_ops *ops, 31static unsigned int iptable_nat_do_chain(const struct nf_hook_ops *ops,
32 struct sk_buff *skb, 32 struct sk_buff *skb,
33 const struct net_device *in, 33 const struct nf_hook_state *state,
34 const struct net_device *out,
35 struct nf_conn *ct) 34 struct nf_conn *ct)
36{ 35{
37 struct net *net = nf_ct_net(ct); 36 struct net *net = nf_ct_net(ct);
38 37
39 return ipt_do_table(skb, ops->hooknum, in, out, net->ipv4.nat_table); 38 return ipt_do_table(skb, ops->hooknum, state, net->ipv4.nat_table);
40} 39}
41 40
42static unsigned int iptable_nat_ipv4_fn(const struct nf_hook_ops *ops, 41static unsigned int iptable_nat_ipv4_fn(const struct nf_hook_ops *ops,
43 struct sk_buff *skb, 42 struct sk_buff *skb,
44 const struct net_device *in, 43 const struct nf_hook_state *state)
45 const struct net_device *out,
46 int (*okfn)(struct sk_buff *))
47{ 44{
48 return nf_nat_ipv4_fn(ops, skb, in, out, iptable_nat_do_chain); 45 return nf_nat_ipv4_fn(ops, skb, state, iptable_nat_do_chain);
49} 46}
50 47
51static unsigned int iptable_nat_ipv4_in(const struct nf_hook_ops *ops, 48static unsigned int iptable_nat_ipv4_in(const struct nf_hook_ops *ops,
52 struct sk_buff *skb, 49 struct sk_buff *skb,
53 const struct net_device *in, 50 const struct nf_hook_state *state)
54 const struct net_device *out,
55 int (*okfn)(struct sk_buff *))
56{ 51{
57 return nf_nat_ipv4_in(ops, skb, in, out, iptable_nat_do_chain); 52 return nf_nat_ipv4_in(ops, skb, state, iptable_nat_do_chain);
58} 53}
59 54
60static unsigned int iptable_nat_ipv4_out(const struct nf_hook_ops *ops, 55static unsigned int iptable_nat_ipv4_out(const struct nf_hook_ops *ops,
61 struct sk_buff *skb, 56 struct sk_buff *skb,
62 const struct net_device *in, 57 const struct nf_hook_state *state)
63 const struct net_device *out,
64 int (*okfn)(struct sk_buff *))
65{ 58{
66 return nf_nat_ipv4_out(ops, skb, in, out, iptable_nat_do_chain); 59 return nf_nat_ipv4_out(ops, skb, state, iptable_nat_do_chain);
67} 60}
68 61
69static unsigned int iptable_nat_ipv4_local_fn(const struct nf_hook_ops *ops, 62static unsigned int iptable_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
70 struct sk_buff *skb, 63 struct sk_buff *skb,
71 const struct net_device *in, 64 const struct nf_hook_state *state)
72 const struct net_device *out,
73 int (*okfn)(struct sk_buff *))
74{ 65{
75 return nf_nat_ipv4_local_fn(ops, skb, in, out, iptable_nat_do_chain); 66 return nf_nat_ipv4_local_fn(ops, skb, state, iptable_nat_do_chain);
76} 67}
77 68
78static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = { 69static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = {
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index b2f7e8f98316..0356e6da4bb7 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -21,8 +21,7 @@ static const struct xt_table packet_raw = {
21/* The work comes in here from netfilter.c. */ 21/* The work comes in here from netfilter.c. */
22static unsigned int 22static unsigned int
23iptable_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, 23iptable_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
24 const struct net_device *in, const struct net_device *out, 24 const struct nf_hook_state *state)
25 int (*okfn)(struct sk_buff *))
26{ 25{
27 const struct net *net; 26 const struct net *net;
28 27
@@ -32,8 +31,8 @@ iptable_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
32 /* root is playing with raw sockets. */ 31 /* root is playing with raw sockets. */
33 return NF_ACCEPT; 32 return NF_ACCEPT;
34 33
35 net = dev_net((in != NULL) ? in : out); 34 net = dev_net(state->in ? state->in : state->out);
36 return ipt_do_table(skb, ops->hooknum, in, out, net->ipv4.iptable_raw); 35 return ipt_do_table(skb, ops->hooknum, state, net->ipv4.iptable_raw);
37} 36}
38 37
39static struct nf_hook_ops *rawtable_ops __read_mostly; 38static struct nf_hook_ops *rawtable_ops __read_mostly;
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index c86647ed2078..4bce3980ccd9 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -38,9 +38,7 @@ static const struct xt_table security_table = {
38 38
39static unsigned int 39static unsigned int
40iptable_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, 40iptable_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
41 const struct net_device *in, 41 const struct nf_hook_state *state)
42 const struct net_device *out,
43 int (*okfn)(struct sk_buff *))
44{ 42{
45 const struct net *net; 43 const struct net *net;
46 44
@@ -50,8 +48,8 @@ iptable_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
50 /* Somebody is playing with raw sockets. */ 48 /* Somebody is playing with raw sockets. */
51 return NF_ACCEPT; 49 return NF_ACCEPT;
52 50
53 net = dev_net((in != NULL) ? in : out); 51 net = dev_net(state->in ? state->in : state->out);
54 return ipt_do_table(skb, ops->hooknum, in, out, 52 return ipt_do_table(skb, ops->hooknum, state,
55 net->ipv4.iptable_security); 53 net->ipv4.iptable_security);
56} 54}
57 55
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 8c8d6642cbb0..30ad9554b5e9 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -94,9 +94,7 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
94 94
95static unsigned int ipv4_helper(const struct nf_hook_ops *ops, 95static unsigned int ipv4_helper(const struct nf_hook_ops *ops,
96 struct sk_buff *skb, 96 struct sk_buff *skb,
97 const struct net_device *in, 97 const struct nf_hook_state *state)
98 const struct net_device *out,
99 int (*okfn)(struct sk_buff *))
100{ 98{
101 struct nf_conn *ct; 99 struct nf_conn *ct;
102 enum ip_conntrack_info ctinfo; 100 enum ip_conntrack_info ctinfo;
@@ -123,9 +121,7 @@ static unsigned int ipv4_helper(const struct nf_hook_ops *ops,
123 121
124static unsigned int ipv4_confirm(const struct nf_hook_ops *ops, 122static unsigned int ipv4_confirm(const struct nf_hook_ops *ops,
125 struct sk_buff *skb, 123 struct sk_buff *skb,
126 const struct net_device *in, 124 const struct nf_hook_state *state)
127 const struct net_device *out,
128 int (*okfn)(struct sk_buff *))
129{ 125{
130 struct nf_conn *ct; 126 struct nf_conn *ct;
131 enum ip_conntrack_info ctinfo; 127 enum ip_conntrack_info ctinfo;
@@ -149,24 +145,20 @@ out:
149 145
150static unsigned int ipv4_conntrack_in(const struct nf_hook_ops *ops, 146static unsigned int ipv4_conntrack_in(const struct nf_hook_ops *ops,
151 struct sk_buff *skb, 147 struct sk_buff *skb,
152 const struct net_device *in, 148 const struct nf_hook_state *state)
153 const struct net_device *out,
154 int (*okfn)(struct sk_buff *))
155{ 149{
156 return nf_conntrack_in(dev_net(in), PF_INET, ops->hooknum, skb); 150 return nf_conntrack_in(dev_net(state->in), PF_INET, ops->hooknum, skb);
157} 151}
158 152
159static unsigned int ipv4_conntrack_local(const struct nf_hook_ops *ops, 153static unsigned int ipv4_conntrack_local(const struct nf_hook_ops *ops,
160 struct sk_buff *skb, 154 struct sk_buff *skb,
161 const struct net_device *in, 155 const struct nf_hook_state *state)
162 const struct net_device *out,
163 int (*okfn)(struct sk_buff *))
164{ 156{
165 /* root is playing with raw sockets. */ 157 /* root is playing with raw sockets. */
166 if (skb->len < sizeof(struct iphdr) || 158 if (skb->len < sizeof(struct iphdr) ||
167 ip_hdrlen(skb) < sizeof(struct iphdr)) 159 ip_hdrlen(skb) < sizeof(struct iphdr))
168 return NF_ACCEPT; 160 return NF_ACCEPT;
169 return nf_conntrack_in(dev_net(out), PF_INET, ops->hooknum, skb); 161 return nf_conntrack_in(dev_net(state->out), PF_INET, ops->hooknum, skb);
170} 162}
171 163
172/* Connection tracking may drop packets, but never alters them, so 164/* Connection tracking may drop packets, but never alters them, so
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 7e5ca6f2d0cd..c88b7d434718 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -63,9 +63,7 @@ static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
63 63
64static unsigned int ipv4_conntrack_defrag(const struct nf_hook_ops *ops, 64static unsigned int ipv4_conntrack_defrag(const struct nf_hook_ops *ops,
65 struct sk_buff *skb, 65 struct sk_buff *skb,
66 const struct net_device *in, 66 const struct nf_hook_state *state)
67 const struct net_device *out,
68 int (*okfn)(struct sk_buff *))
69{ 67{
70 struct sock *sk = skb->sk; 68 struct sock *sk = skb->sk;
71 struct inet_sock *inet = inet_sk(skb->sk); 69 struct inet_sock *inet = inet_sk(skb->sk);
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index fc37711e11f3..e59cc05c09e9 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -256,11 +256,10 @@ EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
256 256
257unsigned int 257unsigned int
258nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, 258nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
259 const struct net_device *in, const struct net_device *out, 259 const struct nf_hook_state *state,
260 unsigned int (*do_chain)(const struct nf_hook_ops *ops, 260 unsigned int (*do_chain)(const struct nf_hook_ops *ops,
261 struct sk_buff *skb, 261 struct sk_buff *skb,
262 const struct net_device *in, 262 const struct nf_hook_state *state,
263 const struct net_device *out,
264 struct nf_conn *ct)) 263 struct nf_conn *ct))
265{ 264{
266 struct nf_conn *ct; 265 struct nf_conn *ct;
@@ -309,7 +308,7 @@ nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
309 if (!nf_nat_initialized(ct, maniptype)) { 308 if (!nf_nat_initialized(ct, maniptype)) {
310 unsigned int ret; 309 unsigned int ret;
311 310
312 ret = do_chain(ops, skb, in, out, ct); 311 ret = do_chain(ops, skb, state, ct);
313 if (ret != NF_ACCEPT) 312 if (ret != NF_ACCEPT)
314 return ret; 313 return ret;
315 314
@@ -323,7 +322,8 @@ nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
323 pr_debug("Already setup manip %s for ct %p\n", 322 pr_debug("Already setup manip %s for ct %p\n",
324 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST", 323 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
325 ct); 324 ct);
326 if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out)) 325 if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat,
326 state->out))
327 goto oif_changed; 327 goto oif_changed;
328 } 328 }
329 break; 329 break;
@@ -332,7 +332,7 @@ nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
332 /* ESTABLISHED */ 332 /* ESTABLISHED */
333 NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || 333 NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
334 ctinfo == IP_CT_ESTABLISHED_REPLY); 334 ctinfo == IP_CT_ESTABLISHED_REPLY);
335 if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out)) 335 if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, state->out))
336 goto oif_changed; 336 goto oif_changed;
337 } 337 }
338 338
@@ -346,17 +346,16 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv4_fn);
346 346
347unsigned int 347unsigned int
348nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb, 348nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
349 const struct net_device *in, const struct net_device *out, 349 const struct nf_hook_state *state,
350 unsigned int (*do_chain)(const struct nf_hook_ops *ops, 350 unsigned int (*do_chain)(const struct nf_hook_ops *ops,
351 struct sk_buff *skb, 351 struct sk_buff *skb,
352 const struct net_device *in, 352 const struct nf_hook_state *state,
353 const struct net_device *out,
354 struct nf_conn *ct)) 353 struct nf_conn *ct))
355{ 354{
356 unsigned int ret; 355 unsigned int ret;
357 __be32 daddr = ip_hdr(skb)->daddr; 356 __be32 daddr = ip_hdr(skb)->daddr;
358 357
359 ret = nf_nat_ipv4_fn(ops, skb, in, out, do_chain); 358 ret = nf_nat_ipv4_fn(ops, skb, state, do_chain);
360 if (ret != NF_DROP && ret != NF_STOLEN && 359 if (ret != NF_DROP && ret != NF_STOLEN &&
361 daddr != ip_hdr(skb)->daddr) 360 daddr != ip_hdr(skb)->daddr)
362 skb_dst_drop(skb); 361 skb_dst_drop(skb);
@@ -367,11 +366,10 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv4_in);
367 366
368unsigned int 367unsigned int
369nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb, 368nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
370 const struct net_device *in, const struct net_device *out, 369 const struct nf_hook_state *state,
371 unsigned int (*do_chain)(const struct nf_hook_ops *ops, 370 unsigned int (*do_chain)(const struct nf_hook_ops *ops,
372 struct sk_buff *skb, 371 struct sk_buff *skb,
373 const struct net_device *in, 372 const struct nf_hook_state *state,
374 const struct net_device *out,
375 struct nf_conn *ct)) 373 struct nf_conn *ct))
376{ 374{
377#ifdef CONFIG_XFRM 375#ifdef CONFIG_XFRM
@@ -386,7 +384,7 @@ nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
386 ip_hdrlen(skb) < sizeof(struct iphdr)) 384 ip_hdrlen(skb) < sizeof(struct iphdr))
387 return NF_ACCEPT; 385 return NF_ACCEPT;
388 386
389 ret = nf_nat_ipv4_fn(ops, skb, in, out, do_chain); 387 ret = nf_nat_ipv4_fn(ops, skb, state, do_chain);
390#ifdef CONFIG_XFRM 388#ifdef CONFIG_XFRM
391 if (ret != NF_DROP && ret != NF_STOLEN && 389 if (ret != NF_DROP && ret != NF_STOLEN &&
392 !(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && 390 !(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
@@ -410,11 +408,10 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv4_out);
410 408
411unsigned int 409unsigned int
412nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, 410nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
413 const struct net_device *in, const struct net_device *out, 411 const struct nf_hook_state *state,
414 unsigned int (*do_chain)(const struct nf_hook_ops *ops, 412 unsigned int (*do_chain)(const struct nf_hook_ops *ops,
415 struct sk_buff *skb, 413 struct sk_buff *skb,
416 const struct net_device *in, 414 const struct nf_hook_state *state,
417 const struct net_device *out,
418 struct nf_conn *ct)) 415 struct nf_conn *ct))
419{ 416{
420 const struct nf_conn *ct; 417 const struct nf_conn *ct;
@@ -427,7 +424,7 @@ nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
427 ip_hdrlen(skb) < sizeof(struct iphdr)) 424 ip_hdrlen(skb) < sizeof(struct iphdr))
428 return NF_ACCEPT; 425 return NF_ACCEPT;
429 426
430 ret = nf_nat_ipv4_fn(ops, skb, in, out, do_chain); 427 ret = nf_nat_ipv4_fn(ops, skb, state, do_chain);
431 if (ret != NF_DROP && ret != NF_STOLEN && 428 if (ret != NF_DROP && ret != NF_STOLEN &&
432 (ct = nf_ct_get(skb, &ctinfo)) != NULL) { 429 (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
433 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 430 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
diff --git a/net/ipv4/netfilter/nf_tables_arp.c b/net/ipv4/netfilter/nf_tables_arp.c
index 19412a4063fb..8412268bbad1 100644
--- a/net/ipv4/netfilter/nf_tables_arp.c
+++ b/net/ipv4/netfilter/nf_tables_arp.c
@@ -17,13 +17,11 @@
17static unsigned int 17static unsigned int
18nft_do_chain_arp(const struct nf_hook_ops *ops, 18nft_do_chain_arp(const struct nf_hook_ops *ops,
19 struct sk_buff *skb, 19 struct sk_buff *skb,
20 const struct net_device *in, 20 const struct nf_hook_state *state)
21 const struct net_device *out,
22 int (*okfn)(struct sk_buff *))
23{ 21{
24 struct nft_pktinfo pkt; 22 struct nft_pktinfo pkt;
25 23
26 nft_set_pktinfo(&pkt, ops, skb, in, out); 24 nft_set_pktinfo(&pkt, ops, skb, state);
27 25
28 return nft_do_chain(&pkt, ops); 26 return nft_do_chain(&pkt, ops);
29} 27}
diff --git a/net/ipv4/netfilter/nf_tables_ipv4.c b/net/ipv4/netfilter/nf_tables_ipv4.c
index 6820c8c40842..aa180d3a69a5 100644
--- a/net/ipv4/netfilter/nf_tables_ipv4.c
+++ b/net/ipv4/netfilter/nf_tables_ipv4.c
@@ -20,22 +20,18 @@
20 20
21static unsigned int nft_do_chain_ipv4(const struct nf_hook_ops *ops, 21static unsigned int nft_do_chain_ipv4(const struct nf_hook_ops *ops,
22 struct sk_buff *skb, 22 struct sk_buff *skb,
23 const struct net_device *in, 23 const struct nf_hook_state *state)
24 const struct net_device *out,
25 int (*okfn)(struct sk_buff *))
26{ 24{
27 struct nft_pktinfo pkt; 25 struct nft_pktinfo pkt;
28 26
29 nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out); 27 nft_set_pktinfo_ipv4(&pkt, ops, skb, state);
30 28
31 return nft_do_chain(&pkt, ops); 29 return nft_do_chain(&pkt, ops);
32} 30}
33 31
34static unsigned int nft_ipv4_output(const struct nf_hook_ops *ops, 32static unsigned int nft_ipv4_output(const struct nf_hook_ops *ops,
35 struct sk_buff *skb, 33 struct sk_buff *skb,
36 const struct net_device *in, 34 const struct nf_hook_state *state)
37 const struct net_device *out,
38 int (*okfn)(struct sk_buff *))
39{ 35{
40 if (unlikely(skb->len < sizeof(struct iphdr) || 36 if (unlikely(skb->len < sizeof(struct iphdr) ||
41 ip_hdr(skb)->ihl < sizeof(struct iphdr) / 4)) { 37 ip_hdr(skb)->ihl < sizeof(struct iphdr) / 4)) {
@@ -45,7 +41,7 @@ static unsigned int nft_ipv4_output(const struct nf_hook_ops *ops,
45 return NF_ACCEPT; 41 return NF_ACCEPT;
46 } 42 }
47 43
48 return nft_do_chain_ipv4(ops, skb, in, out, okfn); 44 return nft_do_chain_ipv4(ops, skb, state);
49} 45}
50 46
51struct nft_af_info nft_af_ipv4 __read_mostly = { 47struct nft_af_info nft_af_ipv4 __read_mostly = {
diff --git a/net/ipv4/netfilter/nft_chain_nat_ipv4.c b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
index df547bf50078..bf5c30ae14e4 100644
--- a/net/ipv4/netfilter/nft_chain_nat_ipv4.c
+++ b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
@@ -28,51 +28,42 @@
28 28
29static unsigned int nft_nat_do_chain(const struct nf_hook_ops *ops, 29static unsigned int nft_nat_do_chain(const struct nf_hook_ops *ops,
30 struct sk_buff *skb, 30 struct sk_buff *skb,
31 const struct net_device *in, 31 const struct nf_hook_state *state,
32 const struct net_device *out,
33 struct nf_conn *ct) 32 struct nf_conn *ct)
34{ 33{
35 struct nft_pktinfo pkt; 34 struct nft_pktinfo pkt;
36 35
37 nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out); 36 nft_set_pktinfo_ipv4(&pkt, ops, skb, state);
38 37
39 return nft_do_chain(&pkt, ops); 38 return nft_do_chain(&pkt, ops);
40} 39}
41 40
42static unsigned int nft_nat_ipv4_fn(const struct nf_hook_ops *ops, 41static unsigned int nft_nat_ipv4_fn(const struct nf_hook_ops *ops,
43 struct sk_buff *skb, 42 struct sk_buff *skb,
44 const struct net_device *in, 43 const struct nf_hook_state *state)
45 const struct net_device *out,
46 int (*okfn)(struct sk_buff *))
47{ 44{
48 return nf_nat_ipv4_fn(ops, skb, in, out, nft_nat_do_chain); 45 return nf_nat_ipv4_fn(ops, skb, state, nft_nat_do_chain);
49} 46}
50 47
51static unsigned int nft_nat_ipv4_in(const struct nf_hook_ops *ops, 48static unsigned int nft_nat_ipv4_in(const struct nf_hook_ops *ops,
52 struct sk_buff *skb, 49 struct sk_buff *skb,
53 const struct net_device *in, 50 const struct nf_hook_state *state)
54 const struct net_device *out,
55 int (*okfn)(struct sk_buff *))
56{ 51{
57 return nf_nat_ipv4_in(ops, skb, in, out, nft_nat_do_chain); 52 return nf_nat_ipv4_in(ops, skb, state, nft_nat_do_chain);
58} 53}
59 54
60static unsigned int nft_nat_ipv4_out(const struct nf_hook_ops *ops, 55static unsigned int nft_nat_ipv4_out(const struct nf_hook_ops *ops,
61 struct sk_buff *skb, 56 struct sk_buff *skb,
62 const struct net_device *in, 57 const struct nf_hook_state *state)
63 const struct net_device *out,
64 int (*okfn)(struct sk_buff *))
65{ 58{
66 return nf_nat_ipv4_out(ops, skb, in, out, nft_nat_do_chain); 59 return nf_nat_ipv4_out(ops, skb, state, nft_nat_do_chain);
67} 60}
68 61
69static unsigned int nft_nat_ipv4_local_fn(const struct nf_hook_ops *ops, 62static unsigned int nft_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
70 struct sk_buff *skb, 63 struct sk_buff *skb,
71 const struct net_device *in, 64 const struct nf_hook_state *state)
72 const struct net_device *out,
73 int (*okfn)(struct sk_buff *))
74{ 65{
75 return nf_nat_ipv4_local_fn(ops, skb, in, out, nft_nat_do_chain); 66 return nf_nat_ipv4_local_fn(ops, skb, state, nft_nat_do_chain);
76} 67}
77 68
78static const struct nf_chain_type nft_chain_nat_ipv4 = { 69static const struct nf_chain_type nft_chain_nat_ipv4 = {
diff --git a/net/ipv4/netfilter/nft_chain_route_ipv4.c b/net/ipv4/netfilter/nft_chain_route_ipv4.c
index 125b66766c0a..e335b0afdaf3 100644
--- a/net/ipv4/netfilter/nft_chain_route_ipv4.c
+++ b/net/ipv4/netfilter/nft_chain_route_ipv4.c
@@ -23,9 +23,7 @@
23 23
24static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops, 24static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
25 struct sk_buff *skb, 25 struct sk_buff *skb,
26 const struct net_device *in, 26 const struct nf_hook_state *state)
27 const struct net_device *out,
28 int (*okfn)(struct sk_buff *))
29{ 27{
30 unsigned int ret; 28 unsigned int ret;
31 struct nft_pktinfo pkt; 29 struct nft_pktinfo pkt;
@@ -39,7 +37,7 @@ static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
39 ip_hdrlen(skb) < sizeof(struct iphdr)) 37 ip_hdrlen(skb) < sizeof(struct iphdr))
40 return NF_ACCEPT; 38 return NF_ACCEPT;
41 39
42 nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out); 40 nft_set_pktinfo_ipv4(&pkt, ops, skb, state);
43 41
44 mark = skb->mark; 42 mark = skb->mark;
45 iph = ip_hdr(skb); 43 iph = ip_hdr(skb);
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 344e7cdfb8d4..a93f260cf24c 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -516,7 +516,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
516 ntohs(icmph->un.echo.sequence)); 516 ntohs(icmph->un.echo.sequence));
517 517
518 sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); 518 sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
519 if (sk == NULL) { 519 if (!sk) {
520 pr_debug("no socket, dropping\n"); 520 pr_debug("no socket, dropping\n");
521 return; /* No socket for error */ 521 return; /* No socket for error */
522 } 522 }
@@ -971,7 +971,7 @@ bool ping_rcv(struct sk_buff *skb)
971 skb_push(skb, skb->data - (u8 *)icmph); 971 skb_push(skb, skb->data - (u8 *)icmph);
972 972
973 sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); 973 sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
974 if (sk != NULL) { 974 if (sk) {
975 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 975 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
976 976
977 pr_debug("rcv on socket %p\n", sk); 977 pr_debug("rcv on socket %p\n", sk);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 56946f47d446..c0bb648fb2f9 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -293,7 +293,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
293 293
294 read_lock(&raw_v4_hashinfo.lock); 294 read_lock(&raw_v4_hashinfo.lock);
295 raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]); 295 raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
296 if (raw_sk != NULL) { 296 if (raw_sk) {
297 iph = (const struct iphdr *)skb->data; 297 iph = (const struct iphdr *)skb->data;
298 net = dev_net(skb->dev); 298 net = dev_net(skb->dev);
299 299
@@ -363,7 +363,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
363 skb = sock_alloc_send_skb(sk, 363 skb = sock_alloc_send_skb(sk,
364 length + hlen + tlen + 15, 364 length + hlen + tlen + 15,
365 flags & MSG_DONTWAIT, &err); 365 flags & MSG_DONTWAIT, &err);
366 if (skb == NULL) 366 if (!skb)
367 goto error; 367 goto error;
368 skb_reserve(skb, hlen); 368 skb_reserve(skb, hlen);
369 369
@@ -412,8 +412,8 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
412 icmp_out_count(net, ((struct icmphdr *) 412 icmp_out_count(net, ((struct icmphdr *)
413 skb_transport_header(skb))->type); 413 skb_transport_header(skb))->type);
414 414
415 err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL, 415 err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, sk, skb,
416 rt->dst.dev, dst_output); 416 NULL, rt->dst.dev, dst_output_sk);
417 if (err > 0) 417 if (err > 0)
418 err = net_xmit_errno(err); 418 err = net_xmit_errno(err);
419 if (err) 419 if (err)
@@ -872,7 +872,7 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
872 872
873 spin_lock_bh(&sk->sk_receive_queue.lock); 873 spin_lock_bh(&sk->sk_receive_queue.lock);
874 skb = skb_peek(&sk->sk_receive_queue); 874 skb = skb_peek(&sk->sk_receive_queue);
875 if (skb != NULL) 875 if (skb)
876 amount = skb->len; 876 amount = skb->len;
877 spin_unlock_bh(&sk->sk_receive_queue.lock); 877 spin_unlock_bh(&sk->sk_receive_queue.lock);
878 return put_user(amount, (int __user *)arg); 878 return put_user(amount, (int __user *)arg);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 652b92ebd7ba..a78540f28276 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1056,7 +1056,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1056 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0); 1056 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
1057 1057
1058 rt = (struct rtable *)odst; 1058 rt = (struct rtable *)odst;
1059 if (odst->obsolete && odst->ops->check(odst, 0) == NULL) { 1059 if (odst->obsolete && !odst->ops->check(odst, 0)) {
1060 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); 1060 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1061 if (IS_ERR(rt)) 1061 if (IS_ERR(rt))
1062 goto out; 1062 goto out;
@@ -1450,7 +1450,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1450 1450
1451 /* Primary sanity checks. */ 1451 /* Primary sanity checks. */
1452 1452
1453 if (in_dev == NULL) 1453 if (!in_dev)
1454 return -EINVAL; 1454 return -EINVAL;
1455 1455
1456 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || 1456 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
@@ -1553,7 +1553,7 @@ static int __mkroute_input(struct sk_buff *skb,
1553 1553
1554 /* get a working reference to the output device */ 1554 /* get a working reference to the output device */
1555 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res)); 1555 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1556 if (out_dev == NULL) { 1556 if (!out_dev) {
1557 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n"); 1557 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1558 return -EINVAL; 1558 return -EINVAL;
1559 } 1559 }
@@ -1591,7 +1591,7 @@ static int __mkroute_input(struct sk_buff *skb,
1591 1591
1592 fnhe = find_exception(&FIB_RES_NH(*res), daddr); 1592 fnhe = find_exception(&FIB_RES_NH(*res), daddr);
1593 if (do_cache) { 1593 if (do_cache) {
1594 if (fnhe != NULL) 1594 if (fnhe)
1595 rth = rcu_dereference(fnhe->fnhe_rth_input); 1595 rth = rcu_dereference(fnhe->fnhe_rth_input);
1596 else 1596 else
1597 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); 1597 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
@@ -2054,7 +2054,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
2054 ipv4_is_lbcast(fl4->daddr))) { 2054 ipv4_is_lbcast(fl4->daddr))) {
2055 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ 2055 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2056 dev_out = __ip_dev_find(net, fl4->saddr, false); 2056 dev_out = __ip_dev_find(net, fl4->saddr, false);
2057 if (dev_out == NULL) 2057 if (!dev_out)
2058 goto out; 2058 goto out;
2059 2059
2060 /* Special hack: user can direct multicasts 2060 /* Special hack: user can direct multicasts
@@ -2087,7 +2087,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
2087 if (fl4->flowi4_oif) { 2087 if (fl4->flowi4_oif) {
2088 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif); 2088 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2089 rth = ERR_PTR(-ENODEV); 2089 rth = ERR_PTR(-ENODEV);
2090 if (dev_out == NULL) 2090 if (!dev_out)
2091 goto out; 2091 goto out;
2092 2092
2093 /* RACE: Check return value of inet_select_addr instead. */ 2093 /* RACE: Check return value of inet_select_addr instead. */
@@ -2299,7 +2299,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2299 u32 metrics[RTAX_MAX]; 2299 u32 metrics[RTAX_MAX];
2300 2300
2301 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags); 2301 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
2302 if (nlh == NULL) 2302 if (!nlh)
2303 return -EMSGSIZE; 2303 return -EMSGSIZE;
2304 2304
2305 r = nlmsg_data(nlh); 2305 r = nlmsg_data(nlh);
@@ -2421,7 +2421,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
2421 rtm = nlmsg_data(nlh); 2421 rtm = nlmsg_data(nlh);
2422 2422
2423 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2423 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2424 if (skb == NULL) { 2424 if (!skb) {
2425 err = -ENOBUFS; 2425 err = -ENOBUFS;
2426 goto errout; 2426 goto errout;
2427 } 2427 }
@@ -2452,7 +2452,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
2452 struct net_device *dev; 2452 struct net_device *dev;
2453 2453
2454 dev = __dev_get_by_index(net, iif); 2454 dev = __dev_get_by_index(net, iif);
2455 if (dev == NULL) { 2455 if (!dev) {
2456 err = -ENODEV; 2456 err = -ENODEV;
2457 goto errout_free; 2457 goto errout_free;
2458 } 2458 }
@@ -2651,7 +2651,7 @@ static __net_init int sysctl_route_net_init(struct net *net)
2651 tbl = ipv4_route_flush_table; 2651 tbl = ipv4_route_flush_table;
2652 if (!net_eq(net, &init_net)) { 2652 if (!net_eq(net, &init_net)) {
2653 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL); 2653 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
2654 if (tbl == NULL) 2654 if (!tbl)
2655 goto err_dup; 2655 goto err_dup;
2656 2656
2657 /* Don't export sysctls to unprivileged users */ 2657 /* Don't export sysctls to unprivileged users */
@@ -2661,7 +2661,7 @@ static __net_init int sysctl_route_net_init(struct net *net)
2661 tbl[0].extra1 = net; 2661 tbl[0].extra1 = net;
2662 2662
2663 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl); 2663 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
2664 if (net->ipv4.route_hdr == NULL) 2664 if (!net->ipv4.route_hdr)
2665 goto err_reg; 2665 goto err_reg;
2666 return 0; 2666 return 0;
2667 2667
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index fdf899163d44..c3852a7ff3c7 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -909,7 +909,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
909 int i; 909 int i;
910 910
911 table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL); 911 table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
912 if (table == NULL) 912 if (!table)
913 goto err_alloc; 913 goto err_alloc;
914 914
915 /* Update the variables to point into the current struct net */ 915 /* Update the variables to point into the current struct net */
@@ -918,7 +918,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
918 } 918 }
919 919
920 net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table); 920 net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
921 if (net->ipv4.ipv4_hdr == NULL) 921 if (!net->ipv4.ipv4_hdr)
922 goto err_reg; 922 goto err_reg;
923 923
924 net->ipv4.sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL); 924 net->ipv4.sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
@@ -956,7 +956,7 @@ static __init int sysctl_ipv4_init(void)
956 struct ctl_table_header *hdr; 956 struct ctl_table_header *hdr;
957 957
958 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table); 958 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
959 if (hdr == NULL) 959 if (!hdr)
960 return -ENOMEM; 960 return -ENOMEM;
961 961
962 if (register_pernet_subsys(&ipv4_sysctl_ops)) { 962 if (register_pernet_subsys(&ipv4_sysctl_ops)) {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index dbd51cefaf02..094a6822c71d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -496,7 +496,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
496 496
497 /* Connected or passive Fast Open socket? */ 497 /* Connected or passive Fast Open socket? */
498 if (sk->sk_state != TCP_SYN_SENT && 498 if (sk->sk_state != TCP_SYN_SENT &&
499 (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk != NULL)) { 499 (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk)) {
500 int target = sock_rcvlowat(sk, 0, INT_MAX); 500 int target = sock_rcvlowat(sk, 0, INT_MAX);
501 501
502 if (tp->urg_seq == tp->copied_seq && 502 if (tp->urg_seq == tp->copied_seq &&
@@ -1028,7 +1028,7 @@ static inline int select_size(const struct sock *sk, bool sg)
1028 1028
1029void tcp_free_fastopen_req(struct tcp_sock *tp) 1029void tcp_free_fastopen_req(struct tcp_sock *tp)
1030{ 1030{
1031 if (tp->fastopen_req != NULL) { 1031 if (tp->fastopen_req) {
1032 kfree(tp->fastopen_req); 1032 kfree(tp->fastopen_req);
1033 tp->fastopen_req = NULL; 1033 tp->fastopen_req = NULL;
1034 } 1034 }
@@ -1042,12 +1042,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
1042 1042
1043 if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE)) 1043 if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
1044 return -EOPNOTSUPP; 1044 return -EOPNOTSUPP;
1045 if (tp->fastopen_req != NULL) 1045 if (tp->fastopen_req)
1046 return -EALREADY; /* Another Fast Open is in progress */ 1046 return -EALREADY; /* Another Fast Open is in progress */
1047 1047
1048 tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request), 1048 tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
1049 sk->sk_allocation); 1049 sk->sk_allocation);
1050 if (unlikely(tp->fastopen_req == NULL)) 1050 if (unlikely(!tp->fastopen_req))
1051 return -ENOBUFS; 1051 return -ENOBUFS;
1052 tp->fastopen_req->data = msg; 1052 tp->fastopen_req->data = msg;
1053 tp->fastopen_req->size = size; 1053 tp->fastopen_req->size = size;
@@ -2138,7 +2138,7 @@ adjudge_to_death:
2138 * aborted (e.g., closed with unread data) before 3WHS 2138 * aborted (e.g., closed with unread data) before 3WHS
2139 * finishes. 2139 * finishes.
2140 */ 2140 */
2141 if (req != NULL) 2141 if (req)
2142 reqsk_fastopen_remove(sk, req, false); 2142 reqsk_fastopen_remove(sk, req, false);
2143 inet_csk_destroy_sock(sk); 2143 inet_csk_destroy_sock(sk);
2144 } 2144 }
@@ -2776,7 +2776,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
2776 break; 2776 break;
2777 2777
2778 case TCP_FASTOPEN: 2778 case TCP_FASTOPEN:
2779 if (icsk->icsk_accept_queue.fastopenq != NULL) 2779 if (icsk->icsk_accept_queue.fastopenq)
2780 val = icsk->icsk_accept_queue.fastopenq->max_qlen; 2780 val = icsk->icsk_accept_queue.fastopenq->max_qlen;
2781 else 2781 else
2782 val = 0; 2782 val = 0;
@@ -2960,7 +2960,7 @@ void tcp_done(struct sock *sk)
2960 2960
2961 tcp_set_state(sk, TCP_CLOSE); 2961 tcp_set_state(sk, TCP_CLOSE);
2962 tcp_clear_xmit_timers(sk); 2962 tcp_clear_xmit_timers(sk);
2963 if (req != NULL) 2963 if (req)
2964 reqsk_fastopen_remove(sk, req, false); 2964 reqsk_fastopen_remove(sk, req, false);
2965 2965
2966 sk->sk_shutdown = SHUTDOWN_MASK; 2966 sk->sk_shutdown = SHUTDOWN_MASK;
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 86dc119a3815..79b34a0f4a4a 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -29,7 +29,7 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
29 r->idiag_rqueue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); 29 r->idiag_rqueue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
30 r->idiag_wqueue = tp->write_seq - tp->snd_una; 30 r->idiag_wqueue = tp->write_seq - tp->snd_una;
31 } 31 }
32 if (info != NULL) 32 if (info)
33 tcp_get_info(sk, info); 33 tcp_get_info(sk, info);
34} 34}
35 35
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 2eb887ec0ce3..e3d87aca6be8 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -141,7 +141,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
141 req->sk = NULL; 141 req->sk = NULL;
142 142
143 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); 143 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
144 if (child == NULL) 144 if (!child)
145 return false; 145 return false;
146 146
147 spin_lock(&queue->fastopenq->lock); 147 spin_lock(&queue->fastopenq->lock);
@@ -214,7 +214,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
214 sk->sk_data_ready(sk); 214 sk->sk_data_ready(sk);
215 bh_unlock_sock(child); 215 bh_unlock_sock(child);
216 sock_put(child); 216 sock_put(child);
217 WARN_ON(req->sk == NULL); 217 WARN_ON(!req->sk);
218 return true; 218 return true;
219} 219}
220 220
@@ -233,7 +233,7 @@ static bool tcp_fastopen_queue_check(struct sock *sk)
233 * temporarily vs a server not supporting Fast Open at all. 233 * temporarily vs a server not supporting Fast Open at all.
234 */ 234 */
235 fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq; 235 fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
236 if (fastopenq == NULL || fastopenq->max_qlen == 0) 236 if (!fastopenq || fastopenq->max_qlen == 0)
237 return false; 237 return false;
238 238
239 if (fastopenq->qlen >= fastopenq->max_qlen) { 239 if (fastopenq->qlen >= fastopenq->max_qlen) {
@@ -303,6 +303,7 @@ fastopen:
303 } else if (foc->len > 0) /* Client presents an invalid cookie */ 303 } else if (foc->len > 0) /* Client presents an invalid cookie */
304 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); 304 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
305 305
306 valid_foc.exp = foc->exp;
306 *foc = valid_foc; 307 *foc = valid_foc;
307 return false; 308 return false;
308} 309}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 18b80e8bc533..031cf72cd05c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -866,7 +866,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
866/* This must be called before lost_out is incremented */ 866/* This must be called before lost_out is incremented */
867static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) 867static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
868{ 868{
869 if ((tp->retransmit_skb_hint == NULL) || 869 if (!tp->retransmit_skb_hint ||
870 before(TCP_SKB_CB(skb)->seq, 870 before(TCP_SKB_CB(skb)->seq,
871 TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) 871 TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
872 tp->retransmit_skb_hint = skb; 872 tp->retransmit_skb_hint = skb;
@@ -1256,7 +1256,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
1256 fack_count += pcount; 1256 fack_count += pcount;
1257 1257
1258 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ 1258 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
1259 if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) && 1259 if (!tcp_is_fack(tp) && tp->lost_skb_hint &&
1260 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) 1260 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
1261 tp->lost_cnt_hint += pcount; 1261 tp->lost_cnt_hint += pcount;
1262 1262
@@ -1535,7 +1535,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1535 if (!before(TCP_SKB_CB(skb)->seq, end_seq)) 1535 if (!before(TCP_SKB_CB(skb)->seq, end_seq))
1536 break; 1536 break;
1537 1537
1538 if ((next_dup != NULL) && 1538 if (next_dup &&
1539 before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) { 1539 before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) {
1540 in_sack = tcp_match_skb_to_sack(sk, skb, 1540 in_sack = tcp_match_skb_to_sack(sk, skb,
1541 next_dup->start_seq, 1541 next_dup->start_seq,
@@ -1551,7 +1551,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1551 if (in_sack <= 0) { 1551 if (in_sack <= 0) {
1552 tmp = tcp_shift_skb_data(sk, skb, state, 1552 tmp = tcp_shift_skb_data(sk, skb, state,
1553 start_seq, end_seq, dup_sack); 1553 start_seq, end_seq, dup_sack);
1554 if (tmp != NULL) { 1554 if (tmp) {
1555 if (tmp != skb) { 1555 if (tmp != skb) {
1556 skb = tmp; 1556 skb = tmp;
1557 continue; 1557 continue;
@@ -1614,7 +1614,7 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
1614 struct tcp_sacktag_state *state, 1614 struct tcp_sacktag_state *state,
1615 u32 skip_to_seq) 1615 u32 skip_to_seq)
1616{ 1616{
1617 if (next_dup == NULL) 1617 if (!next_dup)
1618 return skb; 1618 return skb;
1619 1619
1620 if (before(next_dup->start_seq, skip_to_seq)) { 1620 if (before(next_dup->start_seq, skip_to_seq)) {
@@ -1783,7 +1783,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1783 if (tcp_highest_sack_seq(tp) == cache->end_seq) { 1783 if (tcp_highest_sack_seq(tp) == cache->end_seq) {
1784 /* ...but better entrypoint exists! */ 1784 /* ...but better entrypoint exists! */
1785 skb = tcp_highest_sack(sk); 1785 skb = tcp_highest_sack(sk);
1786 if (skb == NULL) 1786 if (!skb)
1787 break; 1787 break;
1788 state.fack_count = tp->fackets_out; 1788 state.fack_count = tp->fackets_out;
1789 cache++; 1789 cache++;
@@ -1798,7 +1798,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1798 1798
1799 if (!before(start_seq, tcp_highest_sack_seq(tp))) { 1799 if (!before(start_seq, tcp_highest_sack_seq(tp))) {
1800 skb = tcp_highest_sack(sk); 1800 skb = tcp_highest_sack(sk);
1801 if (skb == NULL) 1801 if (!skb)
1802 break; 1802 break;
1803 state.fack_count = tp->fackets_out; 1803 state.fack_count = tp->fackets_out;
1804 } 1804 }
@@ -3105,10 +3105,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3105 if (!first_ackt.v64) 3105 if (!first_ackt.v64)
3106 first_ackt = last_ackt; 3106 first_ackt = last_ackt;
3107 3107
3108 if (!(sacked & TCPCB_SACKED_ACKED)) 3108 if (!(sacked & TCPCB_SACKED_ACKED)) {
3109 reord = min(pkts_acked, reord); 3109 reord = min(pkts_acked, reord);
3110 if (!after(scb->end_seq, tp->high_seq)) 3110 if (!after(scb->end_seq, tp->high_seq))
3111 flag |= FLAG_ORIG_SACK_ACKED; 3111 flag |= FLAG_ORIG_SACK_ACKED;
3112 }
3112 } 3113 }
3113 3114
3114 if (sacked & TCPCB_SACKED_ACKED) 3115 if (sacked & TCPCB_SACKED_ACKED)
@@ -3602,6 +3603,23 @@ old_ack:
3602 return 0; 3603 return 0;
3603} 3604}
3604 3605
3606static void tcp_parse_fastopen_option(int len, const unsigned char *cookie,
3607 bool syn, struct tcp_fastopen_cookie *foc,
3608 bool exp_opt)
3609{
3610 /* Valid only in SYN or SYN-ACK with an even length. */
3611 if (!foc || !syn || len < 0 || (len & 1))
3612 return;
3613
3614 if (len >= TCP_FASTOPEN_COOKIE_MIN &&
3615 len <= TCP_FASTOPEN_COOKIE_MAX)
3616 memcpy(foc->val, cookie, len);
3617 else if (len != 0)
3618 len = -1;
3619 foc->len = len;
3620 foc->exp = exp_opt;
3621}
3622
3605/* Look for tcp options. Normally only called on SYN and SYNACK packets. 3623/* Look for tcp options. Normally only called on SYN and SYNACK packets.
3606 * But, this can also be called on packets in the established flow when 3624 * But, this can also be called on packets in the established flow when
3607 * the fast version below fails. 3625 * the fast version below fails.
@@ -3691,21 +3709,22 @@ void tcp_parse_options(const struct sk_buff *skb,
3691 */ 3709 */
3692 break; 3710 break;
3693#endif 3711#endif
3712 case TCPOPT_FASTOPEN:
3713 tcp_parse_fastopen_option(
3714 opsize - TCPOLEN_FASTOPEN_BASE,
3715 ptr, th->syn, foc, false);
3716 break;
3717
3694 case TCPOPT_EXP: 3718 case TCPOPT_EXP:
3695 /* Fast Open option shares code 254 using a 3719 /* Fast Open option shares code 254 using a
3696 * 16 bits magic number. It's valid only in 3720 * 16 bits magic number.
3697 * SYN or SYN-ACK with an even size.
3698 */ 3721 */
3699 if (opsize < TCPOLEN_EXP_FASTOPEN_BASE || 3722 if (opsize >= TCPOLEN_EXP_FASTOPEN_BASE &&
3700 get_unaligned_be16(ptr) != TCPOPT_FASTOPEN_MAGIC || 3723 get_unaligned_be16(ptr) ==
3701 foc == NULL || !th->syn || (opsize & 1)) 3724 TCPOPT_FASTOPEN_MAGIC)
3702 break; 3725 tcp_parse_fastopen_option(opsize -
3703 foc->len = opsize - TCPOLEN_EXP_FASTOPEN_BASE; 3726 TCPOLEN_EXP_FASTOPEN_BASE,
3704 if (foc->len >= TCP_FASTOPEN_COOKIE_MIN && 3727 ptr + 2, th->syn, foc, true);
3705 foc->len <= TCP_FASTOPEN_COOKIE_MAX)
3706 memcpy(foc->val, ptr + 2, foc->len);
3707 else if (foc->len != 0)
3708 foc->len = -1;
3709 break; 3728 break;
3710 3729
3711 } 3730 }
@@ -4669,7 +4688,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
4669 struct sk_buff *head; 4688 struct sk_buff *head;
4670 u32 start, end; 4689 u32 start, end;
4671 4690
4672 if (skb == NULL) 4691 if (!skb)
4673 return; 4692 return;
4674 4693
4675 start = TCP_SKB_CB(skb)->seq; 4694 start = TCP_SKB_CB(skb)->seq;
@@ -5124,7 +5143,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5124{ 5143{
5125 struct tcp_sock *tp = tcp_sk(sk); 5144 struct tcp_sock *tp = tcp_sk(sk);
5126 5145
5127 if (unlikely(sk->sk_rx_dst == NULL)) 5146 if (unlikely(!sk->sk_rx_dst))
5128 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); 5147 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
5129 /* 5148 /*
5130 * Header prediction. 5149 * Header prediction.
@@ -5321,7 +5340,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
5321 5340
5322 tcp_set_state(sk, TCP_ESTABLISHED); 5341 tcp_set_state(sk, TCP_ESTABLISHED);
5323 5342
5324 if (skb != NULL) { 5343 if (skb) {
5325 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); 5344 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
5326 security_inet_conn_established(sk, skb); 5345 security_inet_conn_established(sk, skb);
5327 } 5346 }
@@ -5359,8 +5378,8 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
5359{ 5378{
5360 struct tcp_sock *tp = tcp_sk(sk); 5379 struct tcp_sock *tp = tcp_sk(sk);
5361 struct sk_buff *data = tp->syn_data ? tcp_write_queue_head(sk) : NULL; 5380 struct sk_buff *data = tp->syn_data ? tcp_write_queue_head(sk) : NULL;
5362 u16 mss = tp->rx_opt.mss_clamp; 5381 u16 mss = tp->rx_opt.mss_clamp, try_exp = 0;
5363 bool syn_drop; 5382 bool syn_drop = false;
5364 5383
5365 if (mss == tp->rx_opt.user_mss) { 5384 if (mss == tp->rx_opt.user_mss) {
5366 struct tcp_options_received opt; 5385 struct tcp_options_received opt;
@@ -5372,16 +5391,25 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
5372 mss = opt.mss_clamp; 5391 mss = opt.mss_clamp;
5373 } 5392 }
5374 5393
5375 if (!tp->syn_fastopen) /* Ignore an unsolicited cookie */ 5394 if (!tp->syn_fastopen) {
5395 /* Ignore an unsolicited cookie */
5376 cookie->len = -1; 5396 cookie->len = -1;
5397 } else if (tp->total_retrans) {
5398 /* SYN timed out and the SYN-ACK neither has a cookie nor
5399 * acknowledges data. Presumably the remote received only
5400 * the retransmitted (regular) SYNs: either the original
5401 * SYN-data or the corresponding SYN-ACK was dropped.
5402 */
5403 syn_drop = (cookie->len < 0 && data);
5404 } else if (cookie->len < 0 && !tp->syn_data) {
5405 /* We requested a cookie but didn't get it. If we did not use
5406 * the (old) exp opt format then try so next time (try_exp=1).
5407 * Otherwise we go back to use the RFC7413 opt (try_exp=2).
5408 */
5409 try_exp = tp->syn_fastopen_exp ? 2 : 1;
5410 }
5377 5411
5378 /* The SYN-ACK neither has cookie nor acknowledges the data. Presumably 5412 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp);
5379 * the remote receives only the retransmitted (regular) SYNs: either
5380 * the original SYN-data or the corresponding SYN-ACK is lost.
5381 */
5382 syn_drop = (cookie->len <= 0 && data && tp->total_retrans);
5383
5384 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
5385 5413
5386 if (data) { /* Retransmit unacked data in SYN */ 5414 if (data) { /* Retransmit unacked data in SYN */
5387 tcp_for_write_queue_from(data, sk) { 5415 tcp_for_write_queue_from(data, sk) {
@@ -5690,11 +5718,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5690 } 5718 }
5691 5719
5692 req = tp->fastopen_rsk; 5720 req = tp->fastopen_rsk;
5693 if (req != NULL) { 5721 if (req) {
5694 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && 5722 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
5695 sk->sk_state != TCP_FIN_WAIT1); 5723 sk->sk_state != TCP_FIN_WAIT1);
5696 5724
5697 if (tcp_check_req(sk, skb, req, true) == NULL) 5725 if (!tcp_check_req(sk, skb, req, true))
5698 goto discard; 5726 goto discard;
5699 } 5727 }
5700 5728
@@ -5780,7 +5808,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5780 * ACK we have received, this would have acknowledged 5808 * ACK we have received, this would have acknowledged
5781 * our SYNACK so stop the SYNACK timer. 5809 * our SYNACK so stop the SYNACK timer.
5782 */ 5810 */
5783 if (req != NULL) { 5811 if (req) {
5784 /* Return RST if ack_seq is invalid. 5812 /* Return RST if ack_seq is invalid.
5785 * Note that RFC793 only says to generate a 5813 * Note that RFC793 only says to generate a
5786 * DUPACK for it but for TCP Fast Open it seems 5814 * DUPACK for it but for TCP Fast Open it seems
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5aababa20a21..560f9571f7c4 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -122,7 +122,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
122 and use initial timestamp retrieved from peer table. 122 and use initial timestamp retrieved from peer table.
123 */ 123 */
124 if (tcptw->tw_ts_recent_stamp && 124 if (tcptw->tw_ts_recent_stamp &&
125 (twp == NULL || (sysctl_tcp_tw_reuse && 125 (!twp || (sysctl_tcp_tw_reuse &&
126 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) { 126 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
127 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; 127 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128 if (tp->write_seq == 0) 128 if (tp->write_seq == 0)
@@ -494,7 +494,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
494 /* Only in fast or simultaneous open. If a fast open socket is 494 /* Only in fast or simultaneous open. If a fast open socket is
495 * is already accepted it is treated as a connected one below. 495 * is already accepted it is treated as a connected one below.
496 */ 496 */
497 if (fastopen && fastopen->sk == NULL) 497 if (fastopen && !fastopen->sk)
498 break; 498 break;
499 499
500 if (!sock_owned_by_user(sk)) { 500 if (!sock_owned_by_user(sk)) {
@@ -1305,7 +1305,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1305 /* Copy over the MD5 key from the original socket */ 1305 /* Copy over the MD5 key from the original socket */
1306 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr, 1306 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1307 AF_INET); 1307 AF_INET);
1308 if (key != NULL) { 1308 if (key) {
1309 /* 1309 /*
1310 * We're using one, so create a matching key 1310 * We're using one, so create a matching key
1311 * on the newsk structure. If we fail to get 1311 * on the newsk structure. If we fail to get
@@ -1390,7 +1390,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1390 sk_mark_napi_id(sk, skb); 1390 sk_mark_napi_id(sk, skb);
1391 if (dst) { 1391 if (dst) {
1392 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || 1392 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1393 dst->ops->check(dst, 0) == NULL) { 1393 !dst->ops->check(dst, 0)) {
1394 dst_release(dst); 1394 dst_release(dst);
1395 sk->sk_rx_dst = NULL; 1395 sk->sk_rx_dst = NULL;
1396 } 1396 }
@@ -1469,7 +1469,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
1469 skb->sk = sk; 1469 skb->sk = sk;
1470 skb->destructor = sock_edemux; 1470 skb->destructor = sock_edemux;
1471 if (sk_fullsock(sk)) { 1471 if (sk_fullsock(sk)) {
1472 struct dst_entry *dst = sk->sk_rx_dst; 1472 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1473 1473
1474 if (dst) 1474 if (dst)
1475 dst = dst_check(dst, 0); 1475 dst = dst_check(dst, 0);
@@ -1797,7 +1797,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
1797 if (inet_csk(sk)->icsk_bind_hash) 1797 if (inet_csk(sk)->icsk_bind_hash)
1798 inet_put_port(sk); 1798 inet_put_port(sk);
1799 1799
1800 BUG_ON(tp->fastopen_rsk != NULL); 1800 BUG_ON(tp->fastopen_rsk);
1801 1801
1802 /* If socket is aborted during connect operation */ 1802 /* If socket is aborted during connect operation */
1803 tcp_free_fastopen_req(tp); 1803 tcp_free_fastopen_req(tp);
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 71ec14c87579..a51d63a43e33 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -28,7 +28,8 @@ static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *s
28 28
29struct tcp_fastopen_metrics { 29struct tcp_fastopen_metrics {
30 u16 mss; 30 u16 mss;
31 u16 syn_loss:10; /* Recurring Fast Open SYN losses */ 31 u16 syn_loss:10, /* Recurring Fast Open SYN losses */
32 try_exp:2; /* Request w/ exp. option (once) */
32 unsigned long last_syn_loss; /* Last Fast Open SYN loss */ 33 unsigned long last_syn_loss; /* Last Fast Open SYN loss */
33 struct tcp_fastopen_cookie cookie; 34 struct tcp_fastopen_cookie cookie;
34}; 35};
@@ -131,6 +132,8 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm,
131 if (fastopen_clear) { 132 if (fastopen_clear) {
132 tm->tcpm_fastopen.mss = 0; 133 tm->tcpm_fastopen.mss = 0;
133 tm->tcpm_fastopen.syn_loss = 0; 134 tm->tcpm_fastopen.syn_loss = 0;
135 tm->tcpm_fastopen.try_exp = 0;
136 tm->tcpm_fastopen.cookie.exp = false;
134 tm->tcpm_fastopen.cookie.len = 0; 137 tm->tcpm_fastopen.cookie.len = 0;
135 } 138 }
136} 139}
@@ -505,7 +508,7 @@ void tcp_init_metrics(struct sock *sk)
505 struct tcp_metrics_block *tm; 508 struct tcp_metrics_block *tm;
506 u32 val, crtt = 0; /* cached RTT scaled by 8 */ 509 u32 val, crtt = 0; /* cached RTT scaled by 8 */
507 510
508 if (dst == NULL) 511 if (!dst)
509 goto reset; 512 goto reset;
510 513
511 dst_confirm(dst); 514 dst_confirm(dst);
@@ -713,6 +716,8 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
713 if (tfom->mss) 716 if (tfom->mss)
714 *mss = tfom->mss; 717 *mss = tfom->mss;
715 *cookie = tfom->cookie; 718 *cookie = tfom->cookie;
719 if (cookie->len <= 0 && tfom->try_exp == 1)
720 cookie->exp = true;
716 *syn_loss = tfom->syn_loss; 721 *syn_loss = tfom->syn_loss;
717 *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0; 722 *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
718 } while (read_seqretry(&fastopen_seqlock, seq)); 723 } while (read_seqretry(&fastopen_seqlock, seq));
@@ -721,7 +726,8 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
721} 726}
722 727
723void tcp_fastopen_cache_set(struct sock *sk, u16 mss, 728void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
724 struct tcp_fastopen_cookie *cookie, bool syn_lost) 729 struct tcp_fastopen_cookie *cookie, bool syn_lost,
730 u16 try_exp)
725{ 731{
726 struct dst_entry *dst = __sk_dst_get(sk); 732 struct dst_entry *dst = __sk_dst_get(sk);
727 struct tcp_metrics_block *tm; 733 struct tcp_metrics_block *tm;
@@ -738,6 +744,9 @@ void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
738 tfom->mss = mss; 744 tfom->mss = mss;
739 if (cookie && cookie->len > 0) 745 if (cookie && cookie->len > 0)
740 tfom->cookie = *cookie; 746 tfom->cookie = *cookie;
747 else if (try_exp > tfom->try_exp &&
748 tfom->cookie.len <= 0 && !tfom->cookie.exp)
749 tfom->try_exp = try_exp;
741 if (syn_lost) { 750 if (syn_lost) {
742 ++tfom->syn_loss; 751 ++tfom->syn_loss;
743 tfom->last_syn_loss = jiffies; 752 tfom->last_syn_loss = jiffies;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 274e96fb369b..d7003911c894 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -294,7 +294,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
294 if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets) 294 if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
295 tw = inet_twsk_alloc(sk, state); 295 tw = inet_twsk_alloc(sk, state);
296 296
297 if (tw != NULL) { 297 if (tw) {
298 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 298 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
299 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); 299 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
300 struct inet_sock *inet = inet_sk(sk); 300 struct inet_sock *inet = inet_sk(sk);
@@ -332,7 +332,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
332 struct tcp_md5sig_key *key; 332 struct tcp_md5sig_key *key;
333 tcptw->tw_md5_key = NULL; 333 tcptw->tw_md5_key = NULL;
334 key = tp->af_specific->md5_lookup(sk, sk); 334 key = tp->af_specific->md5_lookup(sk, sk);
335 if (key != NULL) { 335 if (key) {
336 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC); 336 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
337 if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool()) 337 if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool())
338 BUG(); 338 BUG();
@@ -454,7 +454,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
454{ 454{
455 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); 455 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
456 456
457 if (newsk != NULL) { 457 if (newsk) {
458 const struct inet_request_sock *ireq = inet_rsk(req); 458 const struct inet_request_sock *ireq = inet_rsk(req);
459 struct tcp_request_sock *treq = tcp_rsk(req); 459 struct tcp_request_sock *treq = tcp_rsk(req);
460 struct inet_connection_sock *newicsk = inet_csk(newsk); 460 struct inet_connection_sock *newicsk = inet_csk(newsk);
@@ -763,7 +763,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
763 * socket is created, wait for troubles. 763 * socket is created, wait for troubles.
764 */ 764 */
765 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); 765 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
766 if (child == NULL) 766 if (!child)
767 goto listen_overflow; 767 goto listen_overflow;
768 768
769 inet_csk_reqsk_queue_unlink(sk, req); 769 inet_csk_reqsk_queue_unlink(sk, req);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 2e69b8d16e68..e662d85d1635 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -518,17 +518,26 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
518 518
519 if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) { 519 if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
520 struct tcp_fastopen_cookie *foc = opts->fastopen_cookie; 520 struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
521 u8 *p = (u8 *)ptr;
522 u32 len; /* Fast Open option length */
523
524 if (foc->exp) {
525 len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
526 *ptr = htonl((TCPOPT_EXP << 24) | (len << 16) |
527 TCPOPT_FASTOPEN_MAGIC);
528 p += TCPOLEN_EXP_FASTOPEN_BASE;
529 } else {
530 len = TCPOLEN_FASTOPEN_BASE + foc->len;
531 *p++ = TCPOPT_FASTOPEN;
532 *p++ = len;
533 }
521 534
522 *ptr++ = htonl((TCPOPT_EXP << 24) | 535 memcpy(p, foc->val, foc->len);
523 ((TCPOLEN_EXP_FASTOPEN_BASE + foc->len) << 16) | 536 if ((len & 3) == 2) {
524 TCPOPT_FASTOPEN_MAGIC); 537 p[foc->len] = TCPOPT_NOP;
525 538 p[foc->len + 1] = TCPOPT_NOP;
526 memcpy(ptr, foc->val, foc->len);
527 if ((foc->len & 3) == 2) {
528 u8 *align = ((u8 *)ptr) + foc->len;
529 align[0] = align[1] = TCPOPT_NOP;
530 } 539 }
531 ptr += (foc->len + 3) >> 2; 540 ptr += (len + 3) >> 2;
532 } 541 }
533} 542}
534 543
@@ -565,7 +574,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
565 opts->mss = tcp_advertise_mss(sk); 574 opts->mss = tcp_advertise_mss(sk);
566 remaining -= TCPOLEN_MSS_ALIGNED; 575 remaining -= TCPOLEN_MSS_ALIGNED;
567 576
568 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { 577 if (likely(sysctl_tcp_timestamps && !*md5)) {
569 opts->options |= OPTION_TS; 578 opts->options |= OPTION_TS;
570 opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset; 579 opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
571 opts->tsecr = tp->rx_opt.ts_recent; 580 opts->tsecr = tp->rx_opt.ts_recent;
@@ -583,13 +592,17 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
583 } 592 }
584 593
585 if (fastopen && fastopen->cookie.len >= 0) { 594 if (fastopen && fastopen->cookie.len >= 0) {
586 u32 need = TCPOLEN_EXP_FASTOPEN_BASE + fastopen->cookie.len; 595 u32 need = fastopen->cookie.len;
596
597 need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE :
598 TCPOLEN_FASTOPEN_BASE;
587 need = (need + 3) & ~3U; /* Align to 32 bits */ 599 need = (need + 3) & ~3U; /* Align to 32 bits */
588 if (remaining >= need) { 600 if (remaining >= need) {
589 opts->options |= OPTION_FAST_OPEN_COOKIE; 601 opts->options |= OPTION_FAST_OPEN_COOKIE;
590 opts->fastopen_cookie = &fastopen->cookie; 602 opts->fastopen_cookie = &fastopen->cookie;
591 remaining -= need; 603 remaining -= need;
592 tp->syn_fastopen = 1; 604 tp->syn_fastopen = 1;
605 tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0;
593 } 606 }
594 } 607 }
595 608
@@ -642,7 +655,10 @@ static unsigned int tcp_synack_options(struct sock *sk,
642 remaining -= TCPOLEN_SACKPERM_ALIGNED; 655 remaining -= TCPOLEN_SACKPERM_ALIGNED;
643 } 656 }
644 if (foc != NULL && foc->len >= 0) { 657 if (foc != NULL && foc->len >= 0) {
645 u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; 658 u32 need = foc->len;
659
660 need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE :
661 TCPOLEN_FASTOPEN_BASE;
646 need = (need + 3) & ~3U; /* Align to 32 bits */ 662 need = (need + 3) & ~3U; /* Align to 32 bits */
647 if (remaining >= need) { 663 if (remaining >= need) {
648 opts->options |= OPTION_FAST_OPEN_COOKIE; 664 opts->options |= OPTION_FAST_OPEN_COOKIE;
@@ -1148,7 +1164,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1148 1164
1149 /* Get a new skb... force flag on. */ 1165 /* Get a new skb... force flag on. */
1150 buff = sk_stream_alloc_skb(sk, nsize, gfp); 1166 buff = sk_stream_alloc_skb(sk, nsize, gfp);
1151 if (buff == NULL) 1167 if (!buff)
1152 return -ENOMEM; /* We'll just try again later. */ 1168 return -ENOMEM; /* We'll just try again later. */
1153 1169
1154 sk->sk_wmem_queued += buff->truesize; 1170 sk->sk_wmem_queued += buff->truesize;
@@ -1707,7 +1723,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1707 return tcp_fragment(sk, skb, len, mss_now, gfp); 1723 return tcp_fragment(sk, skb, len, mss_now, gfp);
1708 1724
1709 buff = sk_stream_alloc_skb(sk, 0, gfp); 1725 buff = sk_stream_alloc_skb(sk, 0, gfp);
1710 if (unlikely(buff == NULL)) 1726 if (unlikely(!buff))
1711 return -ENOMEM; 1727 return -ENOMEM;
1712 1728
1713 sk->sk_wmem_queued += buff->truesize; 1729 sk->sk_wmem_queued += buff->truesize;
@@ -1925,7 +1941,8 @@ static int tcp_mtu_probe(struct sock *sk)
1925 } 1941 }
1926 1942
1927 /* We're allowed to probe. Build it now. */ 1943 /* We're allowed to probe. Build it now. */
1928 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) 1944 nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC);
1945 if (!nskb)
1929 return -1; 1946 return -1;
1930 sk->sk_wmem_queued += nskb->truesize; 1947 sk->sk_wmem_queued += nskb->truesize;
1931 sk_mem_charge(sk, nskb->truesize); 1948 sk_mem_charge(sk, nskb->truesize);
@@ -2223,7 +2240,7 @@ void tcp_send_loss_probe(struct sock *sk)
2223 int mss = tcp_current_mss(sk); 2240 int mss = tcp_current_mss(sk);
2224 int err = -1; 2241 int err = -1;
2225 2242
2226 if (tcp_send_head(sk) != NULL) { 2243 if (tcp_send_head(sk)) {
2227 err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); 2244 err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
2228 goto rearm_timer; 2245 goto rearm_timer;
2229 } 2246 }
@@ -2733,7 +2750,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2733 if (skb == tcp_send_head(sk)) 2750 if (skb == tcp_send_head(sk))
2734 break; 2751 break;
2735 /* we could do better than to assign each time */ 2752 /* we could do better than to assign each time */
2736 if (hole == NULL) 2753 if (!hole)
2737 tp->retransmit_skb_hint = skb; 2754 tp->retransmit_skb_hint = skb;
2738 2755
2739 /* Assume this retransmit will generate 2756 /* Assume this retransmit will generate
@@ -2757,7 +2774,7 @@ begin_fwd:
2757 if (!tcp_can_forward_retransmit(sk)) 2774 if (!tcp_can_forward_retransmit(sk))
2758 break; 2775 break;
2759 /* Backtrack if necessary to non-L'ed skb */ 2776 /* Backtrack if necessary to non-L'ed skb */
2760 if (hole != NULL) { 2777 if (hole) {
2761 skb = hole; 2778 skb = hole;
2762 hole = NULL; 2779 hole = NULL;
2763 } 2780 }
@@ -2765,7 +2782,7 @@ begin_fwd:
2765 goto begin_fwd; 2782 goto begin_fwd;
2766 2783
2767 } else if (!(sacked & TCPCB_LOST)) { 2784 } else if (!(sacked & TCPCB_LOST)) {
2768 if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) 2785 if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
2769 hole = skb; 2786 hole = skb;
2770 continue; 2787 continue;
2771 2788
@@ -2810,7 +2827,7 @@ void tcp_send_fin(struct sock *sk)
2810 */ 2827 */
2811 mss_now = tcp_current_mss(sk); 2828 mss_now = tcp_current_mss(sk);
2812 2829
2813 if (tcp_send_head(sk) != NULL) { 2830 if (tcp_send_head(sk)) {
2814 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN; 2831 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
2815 TCP_SKB_CB(skb)->end_seq++; 2832 TCP_SKB_CB(skb)->end_seq++;
2816 tp->write_seq++; 2833 tp->write_seq++;
@@ -2868,14 +2885,14 @@ int tcp_send_synack(struct sock *sk)
2868 struct sk_buff *skb; 2885 struct sk_buff *skb;
2869 2886
2870 skb = tcp_write_queue_head(sk); 2887 skb = tcp_write_queue_head(sk);
2871 if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 2888 if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
2872 pr_debug("%s: wrong queue state\n", __func__); 2889 pr_debug("%s: wrong queue state\n", __func__);
2873 return -EFAULT; 2890 return -EFAULT;
2874 } 2891 }
2875 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { 2892 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
2876 if (skb_cloned(skb)) { 2893 if (skb_cloned(skb)) {
2877 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 2894 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
2878 if (nskb == NULL) 2895 if (!nskb)
2879 return -ENOMEM; 2896 return -ENOMEM;
2880 tcp_unlink_write_queue(skb, sk); 2897 tcp_unlink_write_queue(skb, sk);
2881 __skb_header_release(nskb); 2898 __skb_header_release(nskb);
@@ -3014,7 +3031,7 @@ static void tcp_connect_init(struct sock *sk)
3014 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); 3031 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
3015 3032
3016#ifdef CONFIG_TCP_MD5SIG 3033#ifdef CONFIG_TCP_MD5SIG
3017 if (tp->af_specific->md5_lookup(sk, sk) != NULL) 3034 if (tp->af_specific->md5_lookup(sk, sk))
3018 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 3035 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
3019#endif 3036#endif
3020 3037
@@ -3300,7 +3317,7 @@ void tcp_send_ack(struct sock *sk)
3300 * sock. 3317 * sock.
3301 */ 3318 */
3302 buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); 3319 buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
3303 if (buff == NULL) { 3320 if (!buff) {
3304 inet_csk_schedule_ack(sk); 3321 inet_csk_schedule_ack(sk);
3305 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 3322 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
3306 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 3323 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
@@ -3344,7 +3361,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
3344 3361
3345 /* We don't queue it, tcp_transmit_skb() sets ownership. */ 3362 /* We don't queue it, tcp_transmit_skb() sets ownership. */
3346 skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); 3363 skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
3347 if (skb == NULL) 3364 if (!skb)
3348 return -1; 3365 return -1;
3349 3366
3350 /* Reserve space for headers and set control bits. */ 3367 /* Reserve space for headers and set control bits. */
@@ -3375,8 +3392,8 @@ int tcp_write_wakeup(struct sock *sk)
3375 if (sk->sk_state == TCP_CLOSE) 3392 if (sk->sk_state == TCP_CLOSE)
3376 return -1; 3393 return -1;
3377 3394
3378 if ((skb = tcp_send_head(sk)) != NULL && 3395 skb = tcp_send_head(sk);
3379 before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { 3396 if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
3380 int err; 3397 int err;
3381 unsigned int mss = tcp_current_mss(sk); 3398 unsigned int mss = tcp_current_mss(sk);
3382 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 3399 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 2568fd282873..8c65dc147d8b 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -167,7 +167,7 @@ static int tcp_write_timeout(struct sock *sk)
167 if (icsk->icsk_retransmits) { 167 if (icsk->icsk_retransmits) {
168 dst_negative_advice(sk); 168 dst_negative_advice(sk);
169 if (tp->syn_fastopen || tp->syn_data) 169 if (tp->syn_fastopen || tp->syn_data)
170 tcp_fastopen_cache_set(sk, 0, NULL, true); 170 tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
171 if (tp->syn_data) 171 if (tp->syn_data)
172 NET_INC_STATS_BH(sock_net(sk), 172 NET_INC_STATS_BH(sock_net(sk),
173 LINUX_MIB_TCPFASTOPENACTIVEFAIL); 173 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 294af16633af..2162fc6ce1c1 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -633,7 +633,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
633 633
634 sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, 634 sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
635 iph->saddr, uh->source, skb->dev->ifindex, udptable); 635 iph->saddr, uh->source, skb->dev->ifindex, udptable);
636 if (sk == NULL) { 636 if (!sk) {
637 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); 637 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
638 return; /* No socket for error */ 638 return; /* No socket for error */
639 } 639 }
@@ -1011,7 +1011,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1011 if (connected) 1011 if (connected)
1012 rt = (struct rtable *)sk_dst_check(sk, 0); 1012 rt = (struct rtable *)sk_dst_check(sk, 0);
1013 1013
1014 if (rt == NULL) { 1014 if (!rt) {
1015 struct net *net = sock_net(sk); 1015 struct net *net = sock_net(sk);
1016 1016
1017 fl4 = &fl4_stack; 1017 fl4 = &fl4_stack;
@@ -1522,7 +1522,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1522 1522
1523 /* if we're overly short, let UDP handle it */ 1523 /* if we're overly short, let UDP handle it */
1524 encap_rcv = ACCESS_ONCE(up->encap_rcv); 1524 encap_rcv = ACCESS_ONCE(up->encap_rcv);
1525 if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) { 1525 if (skb->len > sizeof(struct udphdr) && encap_rcv) {
1526 int ret; 1526 int ret;
1527 1527
1528 /* Verify checksum before giving to encap */ 1528 /* Verify checksum before giving to encap */
@@ -1619,7 +1619,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
1619 1619
1620 for (i = 0; i < count; i++) { 1620 for (i = 0; i < count; i++) {
1621 sk = stack[i]; 1621 sk = stack[i];
1622 if (likely(skb1 == NULL)) 1622 if (likely(!skb1))
1623 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); 1623 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
1624 1624
1625 if (!skb1) { 1625 if (!skb1) {
@@ -1802,7 +1802,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1802 saddr, daddr, udptable, proto); 1802 saddr, daddr, udptable, proto);
1803 1803
1804 sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 1804 sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
1805 if (sk != NULL) { 1805 if (sk) {
1806 int ret; 1806 int ret;
1807 1807
1808 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) 1808 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 2dbfc1f1f7b3..b763c39ae1d7 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -58,7 +58,7 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
58 goto out_nosk; 58 goto out_nosk;
59 59
60 err = -ENOENT; 60 err = -ENOENT;
61 if (sk == NULL) 61 if (!sk)
62 goto out_nosk; 62 goto out_nosk;
63 63
64 err = sock_diag_check_cookie(sk, req->id.idiag_cookie); 64 err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 4915d8284a86..f9386160cbee 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -285,7 +285,7 @@ void udp_del_offload(struct udp_offload *uo)
285 pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port)); 285 pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port));
286unlock: 286unlock:
287 spin_unlock(&udp_offload_lock); 287 spin_unlock(&udp_offload_lock);
288 if (uo_priv != NULL) 288 if (uo_priv)
289 call_rcu(&uo_priv->rcu, udp_offload_free_routine); 289 call_rcu(&uo_priv->rcu, udp_offload_free_routine);
290} 290}
291EXPORT_SYMBOL(udp_del_offload); 291EXPORT_SYMBOL(udp_del_offload);
@@ -394,7 +394,7 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff)
394 break; 394 break;
395 } 395 }
396 396
397 if (uo_priv != NULL) { 397 if (uo_priv) {
398 NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; 398 NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
399 err = uo_priv->offload->callbacks.gro_complete(skb, 399 err = uo_priv->offload->callbacks.gro_complete(skb,
400 nhoff + sizeof(struct udphdr), 400 nhoff + sizeof(struct udphdr),
diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
index c83b35485056..6bb98cc193c9 100644
--- a/net/ipv4/udp_tunnel.c
+++ b/net/ipv4/udp_tunnel.c
@@ -75,7 +75,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
75} 75}
76EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock); 76EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock);
77 77
78int udp_tunnel_xmit_skb(struct rtable *rt, struct sk_buff *skb, 78int udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
79 __be32 src, __be32 dst, __u8 tos, __u8 ttl, 79 __be32 src, __be32 dst, __u8 tos, __u8 ttl,
80 __be16 df, __be16 src_port, __be16 dst_port, 80 __be16 df, __be16 src_port, __be16 dst_port,
81 bool xnet, bool nocheck) 81 bool xnet, bool nocheck)
@@ -92,7 +92,7 @@ int udp_tunnel_xmit_skb(struct rtable *rt, struct sk_buff *skb,
92 92
93 udp_set_csum(nocheck, skb, src, dst, skb->len); 93 udp_set_csum(nocheck, skb, src, dst, skb->len);
94 94
95 return iptunnel_xmit(skb->sk, rt, skb, src, dst, IPPROTO_UDP, 95 return iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP,
96 tos, ttl, df, xnet); 96 tos, ttl, df, xnet);
97} 97}
98EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb); 98EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb);
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index aac6197b7a71..60b032f58ccc 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -22,9 +22,9 @@ int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb)
22 return xfrm4_extract_header(skb); 22 return xfrm4_extract_header(skb);
23} 23}
24 24
25static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb) 25static inline int xfrm4_rcv_encap_finish(struct sock *sk, struct sk_buff *skb)
26{ 26{
27 if (skb_dst(skb) == NULL) { 27 if (!skb_dst(skb)) {
28 const struct iphdr *iph = ip_hdr(skb); 28 const struct iphdr *iph = ip_hdr(skb);
29 29
30 if (ip_route_input_noref(skb, iph->daddr, iph->saddr, 30 if (ip_route_input_noref(skb, iph->daddr, iph->saddr,
@@ -52,7 +52,8 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
52 iph->tot_len = htons(skb->len); 52 iph->tot_len = htons(skb->len);
53 ip_send_check(iph); 53 ip_send_check(iph);
54 54
55 NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL, 55 NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, NULL, skb,
56 skb->dev, NULL,
56 xfrm4_rcv_encap_finish); 57 xfrm4_rcv_encap_finish);
57 return 0; 58 return 0;
58} 59}
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index dab73813cb92..2878dbfffeb7 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -69,7 +69,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
69} 69}
70EXPORT_SYMBOL(xfrm4_prepare_output); 70EXPORT_SYMBOL(xfrm4_prepare_output);
71 71
72int xfrm4_output_finish(struct sk_buff *skb) 72int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb)
73{ 73{
74 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 74 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
75 75
@@ -77,26 +77,26 @@ int xfrm4_output_finish(struct sk_buff *skb)
77 IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; 77 IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
78#endif 78#endif
79 79
80 return xfrm_output(skb); 80 return xfrm_output(sk, skb);
81} 81}
82 82
83static int __xfrm4_output(struct sk_buff *skb) 83static int __xfrm4_output(struct sock *sk, struct sk_buff *skb)
84{ 84{
85 struct xfrm_state *x = skb_dst(skb)->xfrm; 85 struct xfrm_state *x = skb_dst(skb)->xfrm;
86 86
87#ifdef CONFIG_NETFILTER 87#ifdef CONFIG_NETFILTER
88 if (!x) { 88 if (!x) {
89 IPCB(skb)->flags |= IPSKB_REROUTED; 89 IPCB(skb)->flags |= IPSKB_REROUTED;
90 return dst_output(skb); 90 return dst_output_sk(sk, skb);
91 } 91 }
92#endif 92#endif
93 93
94 return x->outer_mode->afinfo->output_finish(skb); 94 return x->outer_mode->afinfo->output_finish(sk, skb);
95} 95}
96 96
97int xfrm4_output(struct sock *sk, struct sk_buff *skb) 97int xfrm4_output(struct sock *sk, struct sk_buff *skb)
98{ 98{
99 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, 99 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb,
100 NULL, skb_dst(skb)->dev, __xfrm4_output, 100 NULL, skb_dst(skb)->dev, __xfrm4_output,
101 !(IPCB(skb)->flags & IPSKB_REROUTED)); 101 !(IPCB(skb)->flags & IPSKB_REROUTED));
102} 102}
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index c224c856247b..bff69746e05f 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -298,7 +298,7 @@ static void __net_exit xfrm4_net_exit(struct net *net)
298{ 298{
299 struct ctl_table *table; 299 struct ctl_table *table;
300 300
301 if (net->ipv4.xfrm4_hdr == NULL) 301 if (!net->ipv4.xfrm4_hdr)
302 return; 302 return;
303 303
304 table = net->ipv4.xfrm4_hdr->ctl_table_arg; 304 table = net->ipv4.xfrm4_hdr->ctl_table_arg;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 5c9e94cb1b2c..37b70e82bff8 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4858,8 +4858,8 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
4858 (dev->addr_len && 4858 (dev->addr_len &&
4859 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || 4859 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
4860 nla_put_u32(skb, IFLA_MTU, dev->mtu) || 4860 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
4861 (dev->ifindex != dev->iflink && 4861 (dev->ifindex != dev_get_iflink(dev) &&
4862 nla_put_u32(skb, IFLA_LINK, dev->iflink))) 4862 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
4863 goto nla_put_failure; 4863 goto nla_put_failure;
4864 protoinfo = nla_nest_start(skb, IFLA_PROTINFO); 4864 protoinfo = nla_nest_start(skb, IFLA_PROTINFO);
4865 if (!protoinfo) 4865 if (!protoinfo)
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 61fb184b818d..2367a16eae58 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -315,7 +315,9 @@ out_fib6_rules_ops:
315 315
316static void __net_exit fib6_rules_net_exit(struct net *net) 316static void __net_exit fib6_rules_net_exit(struct net *net)
317{ 317{
318 rtnl_lock();
318 fib_rules_unregister(net->ipv6.fib6_rules_ops); 319 fib_rules_unregister(net->ipv6.fib6_rules_ops);
320 rtnl_unlock();
319} 321}
320 322
321static struct pernet_operations fib6_rules_net_ops = { 323static struct pernet_operations fib6_rules_net_ops = {
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 0f4e73da14e4..b5e6cc1d4a73 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -760,7 +760,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
760 760
761 skb_set_inner_protocol(skb, protocol); 761 skb_set_inner_protocol(skb, protocol);
762 762
763 ip6tunnel_xmit(skb, dev); 763 ip6tunnel_xmit(NULL, skb, dev);
764 if (ndst) 764 if (ndst)
765 ip6_tnl_dst_store(tunnel, ndst); 765 ip6_tnl_dst_store(tunnel, ndst);
766 return 0; 766 return 0;
@@ -1216,6 +1216,7 @@ static const struct net_device_ops ip6gre_netdev_ops = {
1216 .ndo_do_ioctl = ip6gre_tunnel_ioctl, 1216 .ndo_do_ioctl = ip6gre_tunnel_ioctl,
1217 .ndo_change_mtu = ip6gre_tunnel_change_mtu, 1217 .ndo_change_mtu = ip6gre_tunnel_change_mtu,
1218 .ndo_get_stats64 = ip_tunnel_get_stats64, 1218 .ndo_get_stats64 = ip_tunnel_get_stats64,
1219 .ndo_get_iflink = ip6_tnl_get_iflink,
1219}; 1220};
1220 1221
1221static void ip6gre_dev_free(struct net_device *dev) 1222static void ip6gre_dev_free(struct net_device *dev)
@@ -1238,7 +1239,6 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
1238 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1239 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1239 dev->mtu -= 8; 1240 dev->mtu -= 8;
1240 dev->flags |= IFF_NOARP; 1241 dev->flags |= IFF_NOARP;
1241 dev->iflink = 0;
1242 dev->addr_len = sizeof(struct in6_addr); 1242 dev->addr_len = sizeof(struct in6_addr);
1243 netif_keep_dst(dev); 1243 netif_keep_dst(dev);
1244} 1244}
@@ -1270,8 +1270,6 @@ static int ip6gre_tunnel_init(struct net_device *dev)
1270 u64_stats_init(&ip6gre_tunnel_stats->syncp); 1270 u64_stats_init(&ip6gre_tunnel_stats->syncp);
1271 } 1271 }
1272 1272
1273 dev->iflink = tunnel->parms.link;
1274
1275 return 0; 1273 return 0;
1276} 1274}
1277 1275
@@ -1480,8 +1478,6 @@ static int ip6gre_tap_init(struct net_device *dev)
1480 if (!dev->tstats) 1478 if (!dev->tstats)
1481 return -ENOMEM; 1479 return -ENOMEM;
1482 1480
1483 dev->iflink = tunnel->parms.link;
1484
1485 return 0; 1481 return 0;
1486} 1482}
1487 1483
@@ -1493,6 +1489,7 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
1493 .ndo_validate_addr = eth_validate_addr, 1489 .ndo_validate_addr = eth_validate_addr,
1494 .ndo_change_mtu = ip6gre_tunnel_change_mtu, 1490 .ndo_change_mtu = ip6gre_tunnel_change_mtu,
1495 .ndo_get_stats64 = ip_tunnel_get_stats64, 1491 .ndo_get_stats64 = ip_tunnel_get_stats64,
1492 .ndo_get_iflink = ip6_tnl_get_iflink,
1496}; 1493};
1497 1494
1498static void ip6gre_tap_setup(struct net_device *dev) 1495static void ip6gre_tap_setup(struct net_device *dev)
@@ -1503,7 +1500,6 @@ static void ip6gre_tap_setup(struct net_device *dev)
1503 dev->netdev_ops = &ip6gre_tap_netdev_ops; 1500 dev->netdev_ops = &ip6gre_tap_netdev_ops;
1504 dev->destructor = ip6gre_dev_free; 1501 dev->destructor = ip6gre_dev_free;
1505 1502
1506 dev->iflink = 0;
1507 dev->features |= NETIF_F_NETNS_LOCAL; 1503 dev->features |= NETIF_F_NETNS_LOCAL;
1508} 1504}
1509 1505
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index fb97f7f8d4ed..f2e464eba5ef 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -46,8 +46,7 @@
46#include <net/xfrm.h> 46#include <net/xfrm.h>
47#include <net/inet_ecn.h> 47#include <net/inet_ecn.h>
48 48
49 49int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb)
50int ip6_rcv_finish(struct sk_buff *skb)
51{ 50{
52 if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) { 51 if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
53 const struct inet6_protocol *ipprot; 52 const struct inet6_protocol *ipprot;
@@ -183,7 +182,8 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
183 /* Must drop socket now because of tproxy. */ 182 /* Must drop socket now because of tproxy. */
184 skb_orphan(skb); 183 skb_orphan(skb);
185 184
186 return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, dev, NULL, 185 return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, NULL, skb,
186 dev, NULL,
187 ip6_rcv_finish); 187 ip6_rcv_finish);
188err: 188err:
189 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS); 189 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS);
@@ -198,7 +198,7 @@ drop:
198 */ 198 */
199 199
200 200
201static int ip6_input_finish(struct sk_buff *skb) 201static int ip6_input_finish(struct sock *sk, struct sk_buff *skb)
202{ 202{
203 struct net *net = dev_net(skb_dst(skb)->dev); 203 struct net *net = dev_net(skb_dst(skb)->dev);
204 const struct inet6_protocol *ipprot; 204 const struct inet6_protocol *ipprot;
@@ -277,7 +277,8 @@ discard:
277 277
278int ip6_input(struct sk_buff *skb) 278int ip6_input(struct sk_buff *skb)
279{ 279{
280 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN, skb, skb->dev, NULL, 280 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN, NULL, skb,
281 skb->dev, NULL,
281 ip6_input_finish); 282 ip6_input_finish);
282} 283}
283 284
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 84c58da10f5c..7fde1f265c90 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -56,7 +56,7 @@
56#include <net/checksum.h> 56#include <net/checksum.h>
57#include <linux/mroute6.h> 57#include <linux/mroute6.h>
58 58
59static int ip6_finish_output2(struct sk_buff *skb) 59static int ip6_finish_output2(struct sock *sk, struct sk_buff *skb)
60{ 60{
61 struct dst_entry *dst = skb_dst(skb); 61 struct dst_entry *dst = skb_dst(skb);
62 struct net_device *dev = dst->dev; 62 struct net_device *dev = dst->dev;
@@ -70,7 +70,7 @@ static int ip6_finish_output2(struct sk_buff *skb)
70 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { 70 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
71 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); 71 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
72 72
73 if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) && 73 if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
74 ((mroute6_socket(dev_net(dev), skb) && 74 ((mroute6_socket(dev_net(dev), skb) &&
75 !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || 75 !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
76 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, 76 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
@@ -82,7 +82,7 @@ static int ip6_finish_output2(struct sk_buff *skb)
82 */ 82 */
83 if (newskb) 83 if (newskb)
84 NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, 84 NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
85 newskb, NULL, newskb->dev, 85 sk, newskb, NULL, newskb->dev,
86 dev_loopback_xmit); 86 dev_loopback_xmit);
87 87
88 if (ipv6_hdr(skb)->hop_limit == 0) { 88 if (ipv6_hdr(skb)->hop_limit == 0) {
@@ -122,14 +122,14 @@ static int ip6_finish_output2(struct sk_buff *skb)
122 return -EINVAL; 122 return -EINVAL;
123} 123}
124 124
125static int ip6_finish_output(struct sk_buff *skb) 125static int ip6_finish_output(struct sock *sk, struct sk_buff *skb)
126{ 126{
127 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || 127 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
128 dst_allfrag(skb_dst(skb)) || 128 dst_allfrag(skb_dst(skb)) ||
129 (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size)) 129 (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
130 return ip6_fragment(skb, ip6_finish_output2); 130 return ip6_fragment(sk, skb, ip6_finish_output2);
131 else 131 else
132 return ip6_finish_output2(skb); 132 return ip6_finish_output2(sk, skb);
133} 133}
134 134
135int ip6_output(struct sock *sk, struct sk_buff *skb) 135int ip6_output(struct sock *sk, struct sk_buff *skb)
@@ -143,7 +143,8 @@ int ip6_output(struct sock *sk, struct sk_buff *skb)
143 return 0; 143 return 0;
144 } 144 }
145 145
146 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev, 146 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, sk, skb,
147 NULL, dev,
147 ip6_finish_output, 148 ip6_finish_output,
148 !(IP6CB(skb)->flags & IP6SKB_REROUTED)); 149 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
149} 150}
@@ -223,8 +224,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
223 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) { 224 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
224 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)), 225 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
225 IPSTATS_MIB_OUT, skb->len); 226 IPSTATS_MIB_OUT, skb->len);
226 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, 227 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb,
227 dst->dev, dst_output); 228 NULL, dst->dev, dst_output_sk);
228 } 229 }
229 230
230 skb->dev = dst->dev; 231 skb->dev = dst->dev;
@@ -316,10 +317,10 @@ static int ip6_forward_proxy_check(struct sk_buff *skb)
316 return 0; 317 return 0;
317} 318}
318 319
319static inline int ip6_forward_finish(struct sk_buff *skb) 320static inline int ip6_forward_finish(struct sock *sk, struct sk_buff *skb)
320{ 321{
321 skb_sender_cpu_clear(skb); 322 skb_sender_cpu_clear(skb);
322 return dst_output(skb); 323 return dst_output_sk(sk, skb);
323} 324}
324 325
325static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst) 326static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
@@ -511,7 +512,8 @@ int ip6_forward(struct sk_buff *skb)
511 512
512 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); 513 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
513 IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len); 514 IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
514 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev, 515 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, NULL, skb,
516 skb->dev, dst->dev,
515 ip6_forward_finish); 517 ip6_forward_finish);
516 518
517error: 519error:
@@ -538,11 +540,13 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
538 skb_copy_secmark(to, from); 540 skb_copy_secmark(to, from);
539} 541}
540 542
541int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) 543int ip6_fragment(struct sock *sk, struct sk_buff *skb,
544 int (*output)(struct sock *, struct sk_buff *))
542{ 545{
543 struct sk_buff *frag; 546 struct sk_buff *frag;
544 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); 547 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
545 struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; 548 struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
549 inet6_sk(skb->sk) : NULL;
546 struct ipv6hdr *tmp_hdr; 550 struct ipv6hdr *tmp_hdr;
547 struct frag_hdr *fh; 551 struct frag_hdr *fh;
548 unsigned int mtu, hlen, left, len; 552 unsigned int mtu, hlen, left, len;
@@ -666,7 +670,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
666 ip6_copy_metadata(frag, skb); 670 ip6_copy_metadata(frag, skb);
667 } 671 }
668 672
669 err = output(skb); 673 err = output(sk, skb);
670 if (!err) 674 if (!err)
671 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), 675 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
672 IPSTATS_MIB_FRAGCREATES); 676 IPSTATS_MIB_FRAGCREATES);
@@ -799,7 +803,7 @@ slow_path:
799 /* 803 /*
800 * Put this fragment into the sending queue. 804 * Put this fragment into the sending queue.
801 */ 805 */
802 err = output(frag); 806 err = output(sk, frag);
803 if (err) 807 if (err)
804 goto fail; 808 goto fail;
805 809
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 9bd85f0dff69..5cafd92c2312 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1100,7 +1100,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
1100 ipv6h->nexthdr = proto; 1100 ipv6h->nexthdr = proto;
1101 ipv6h->saddr = fl6->saddr; 1101 ipv6h->saddr = fl6->saddr;
1102 ipv6h->daddr = fl6->daddr; 1102 ipv6h->daddr = fl6->daddr;
1103 ip6tunnel_xmit(skb, dev); 1103 ip6tunnel_xmit(NULL, skb, dev);
1104 if (ndst) 1104 if (ndst)
1105 ip6_tnl_dst_store(t, ndst); 1105 ip6_tnl_dst_store(t, ndst);
1106 return 0; 1106 return 0;
@@ -1264,8 +1264,6 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
1264 else 1264 else
1265 dev->flags &= ~IFF_POINTOPOINT; 1265 dev->flags &= ~IFF_POINTOPOINT;
1266 1266
1267 dev->iflink = p->link;
1268
1269 if (p->flags & IP6_TNL_F_CAP_XMIT) { 1267 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1270 int strict = (ipv6_addr_type(&p->raddr) & 1268 int strict = (ipv6_addr_type(&p->raddr) &
1271 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); 1269 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
@@ -1517,6 +1515,13 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1517 return 0; 1515 return 0;
1518} 1516}
1519 1517
1518int ip6_tnl_get_iflink(const struct net_device *dev)
1519{
1520 struct ip6_tnl *t = netdev_priv(dev);
1521
1522 return t->parms.link;
1523}
1524EXPORT_SYMBOL(ip6_tnl_get_iflink);
1520 1525
1521static const struct net_device_ops ip6_tnl_netdev_ops = { 1526static const struct net_device_ops ip6_tnl_netdev_ops = {
1522 .ndo_init = ip6_tnl_dev_init, 1527 .ndo_init = ip6_tnl_dev_init,
@@ -1525,6 +1530,7 @@ static const struct net_device_ops ip6_tnl_netdev_ops = {
1525 .ndo_do_ioctl = ip6_tnl_ioctl, 1530 .ndo_do_ioctl = ip6_tnl_ioctl,
1526 .ndo_change_mtu = ip6_tnl_change_mtu, 1531 .ndo_change_mtu = ip6_tnl_change_mtu,
1527 .ndo_get_stats = ip6_get_stats, 1532 .ndo_get_stats = ip6_get_stats,
1533 .ndo_get_iflink = ip6_tnl_get_iflink,
1528}; 1534};
1529 1535
1530 1536
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index 32d9b268e7d8..bba8903e871f 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -62,7 +62,8 @@ error:
62} 62}
63EXPORT_SYMBOL_GPL(udp_sock_create6); 63EXPORT_SYMBOL_GPL(udp_sock_create6);
64 64
65int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb, 65int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
66 struct sk_buff *skb,
66 struct net_device *dev, struct in6_addr *saddr, 67 struct net_device *dev, struct in6_addr *saddr,
67 struct in6_addr *daddr, 68 struct in6_addr *daddr,
68 __u8 prio, __u8 ttl, __be16 src_port, 69 __u8 prio, __u8 ttl, __be16 src_port,
@@ -97,7 +98,7 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
97 ip6h->daddr = *daddr; 98 ip6h->daddr = *daddr;
98 ip6h->saddr = *saddr; 99 ip6h->saddr = *saddr;
99 100
100 ip6tunnel_xmit(skb, dev); 101 ip6tunnel_xmit(sk, skb, dev);
101 return 0; 102 return 0;
102} 103}
103EXPORT_SYMBOL_GPL(udp_tunnel6_xmit_skb); 104EXPORT_SYMBOL_GPL(udp_tunnel6_xmit_skb);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 53d90ed68905..b53148444e15 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -601,8 +601,6 @@ static void vti6_link_config(struct ip6_tnl *t)
601 dev->flags |= IFF_POINTOPOINT; 601 dev->flags |= IFF_POINTOPOINT;
602 else 602 else
603 dev->flags &= ~IFF_POINTOPOINT; 603 dev->flags &= ~IFF_POINTOPOINT;
604
605 dev->iflink = p->link;
606} 604}
607 605
608/** 606/**
@@ -808,6 +806,7 @@ static const struct net_device_ops vti6_netdev_ops = {
808 .ndo_do_ioctl = vti6_ioctl, 806 .ndo_do_ioctl = vti6_ioctl,
809 .ndo_change_mtu = vti6_change_mtu, 807 .ndo_change_mtu = vti6_change_mtu,
810 .ndo_get_stats64 = ip_tunnel_get_stats64, 808 .ndo_get_stats64 = ip_tunnel_get_stats64,
809 .ndo_get_iflink = ip6_tnl_get_iflink,
811}; 810};
812 811
813/** 812/**
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index caf6b99374e6..74ceb73c1c9a 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -250,7 +250,7 @@ static int __net_init ip6mr_rules_init(struct net *net)
250 return 0; 250 return 0;
251 251
252err2: 252err2:
253 kfree(mrt); 253 ip6mr_free_table(mrt);
254err1: 254err1:
255 fib_rules_unregister(ops); 255 fib_rules_unregister(ops);
256 return err; 256 return err;
@@ -265,8 +265,8 @@ static void __net_exit ip6mr_rules_exit(struct net *net)
265 list_del(&mrt->list); 265 list_del(&mrt->list);
266 ip6mr_free_table(mrt); 266 ip6mr_free_table(mrt);
267 } 267 }
268 rtnl_unlock();
269 fib_rules_unregister(net->ipv6.mr6_rules_ops); 268 fib_rules_unregister(net->ipv6.mr6_rules_ops);
269 rtnl_unlock();
270} 270}
271#else 271#else
272#define ip6mr_for_each_table(mrt, net) \ 272#define ip6mr_for_each_table(mrt, net) \
@@ -334,7 +334,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
334 334
335static void ip6mr_free_table(struct mr6_table *mrt) 335static void ip6mr_free_table(struct mr6_table *mrt)
336{ 336{
337 del_timer(&mrt->ipmr_expire_timer); 337 del_timer_sync(&mrt->ipmr_expire_timer);
338 mroute_clean_tables(mrt); 338 mroute_clean_tables(mrt);
339 kfree(mrt); 339 kfree(mrt);
340} 340}
@@ -718,8 +718,14 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
718 return NETDEV_TX_OK; 718 return NETDEV_TX_OK;
719} 719}
720 720
721static int reg_vif_get_iflink(const struct net_device *dev)
722{
723 return 0;
724}
725
721static const struct net_device_ops reg_vif_netdev_ops = { 726static const struct net_device_ops reg_vif_netdev_ops = {
722 .ndo_start_xmit = reg_vif_xmit, 727 .ndo_start_xmit = reg_vif_xmit,
728 .ndo_get_iflink = reg_vif_get_iflink,
723}; 729};
724 730
725static void reg_vif_setup(struct net_device *dev) 731static void reg_vif_setup(struct net_device *dev)
@@ -752,7 +758,6 @@ static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
752 free_netdev(dev); 758 free_netdev(dev);
753 return NULL; 759 return NULL;
754 } 760 }
755 dev->iflink = 0;
756 761
757 if (dev_open(dev)) 762 if (dev_open(dev))
758 goto failure; 763 goto failure;
@@ -992,7 +997,7 @@ static int mif6_add(struct net *net, struct mr6_table *mrt,
992 v->pkt_out = 0; 997 v->pkt_out = 0;
993 v->link = dev->ifindex; 998 v->link = dev->ifindex;
994 if (v->flags & MIFF_REGISTER) 999 if (v->flags & MIFF_REGISTER)
995 v->link = dev->iflink; 1000 v->link = dev_get_iflink(dev);
996 1001
997 /* And finish update writing critical data */ 1002 /* And finish update writing critical data */
998 write_lock_bh(&mrt_lock); 1003 write_lock_bh(&mrt_lock);
@@ -1981,13 +1986,13 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1981} 1986}
1982#endif 1987#endif
1983 1988
1984static inline int ip6mr_forward2_finish(struct sk_buff *skb) 1989static inline int ip6mr_forward2_finish(struct sock *sk, struct sk_buff *skb)
1985{ 1990{
1986 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), 1991 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1987 IPSTATS_MIB_OUTFORWDATAGRAMS); 1992 IPSTATS_MIB_OUTFORWDATAGRAMS);
1988 IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), 1993 IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1989 IPSTATS_MIB_OUTOCTETS, skb->len); 1994 IPSTATS_MIB_OUTOCTETS, skb->len);
1990 return dst_output(skb); 1995 return dst_output_sk(sk, skb);
1991} 1996}
1992 1997
1993/* 1998/*
@@ -2059,7 +2064,8 @@ static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
2059 2064
2060 IP6CB(skb)->flags |= IP6SKB_FORWARDED; 2065 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2061 2066
2062 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dev, 2067 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, NULL, skb,
2068 skb->dev, dev,
2063 ip6mr_forward2_finish); 2069 ip6mr_forward2_finish);
2064 2070
2065out_free: 2071out_free:
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index fac1f27e428e..083b2927fc67 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1644,8 +1644,9 @@ static void mld_sendpack(struct sk_buff *skb)
1644 1644
1645 payload_len = skb->len; 1645 payload_len = skb->len;
1646 1646
1647 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, 1647 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
1648 dst_output); 1648 net->ipv6.igmp_sk, skb, NULL, skb->dev,
1649 dst_output_sk);
1649out: 1650out:
1650 if (!err) { 1651 if (!err) {
1651 ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT); 1652 ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
@@ -2007,8 +2008,8 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
2007 } 2008 }
2008 2009
2009 skb_dst_set(skb, dst); 2010 skb_dst_set(skb, dst);
2010 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, 2011 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb,
2011 dst_output); 2012 NULL, skb->dev, dst_output_sk);
2012out: 2013out:
2013 if (!err) { 2014 if (!err) {
2014 ICMP6MSGOUT_INC_STATS(net, idev, type); 2015 ICMP6MSGOUT_INC_STATS(net, idev, type);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index c283827d60e2..96f153c0846b 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -463,8 +463,9 @@ static void ndisc_send_skb(struct sk_buff *skb,
463 idev = __in6_dev_get(dst->dev); 463 idev = __in6_dev_get(dst->dev);
464 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); 464 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
465 465
466 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, 466 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb,
467 dst_output); 467 NULL, dst->dev,
468 dst_output_sk);
468 if (!err) { 469 if (!err) {
469 ICMP6MSGOUT_INC_STATS(net, idev, type); 470 ICMP6MSGOUT_INC_STATS(net, idev, type);
470 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); 471 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
@@ -1225,7 +1226,14 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1225 if (rt) 1226 if (rt)
1226 rt6_set_expires(rt, jiffies + (HZ * lifetime)); 1227 rt6_set_expires(rt, jiffies + (HZ * lifetime));
1227 if (ra_msg->icmph.icmp6_hop_limit) { 1228 if (ra_msg->icmph.icmp6_hop_limit) {
1228 in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; 1229 /* Only set hop_limit on the interface if it is higher than
1230 * the current hop_limit.
1231 */
1232 if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
1233 in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
1234 } else {
1235 ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
1236 }
1229 if (rt) 1237 if (rt)
1230 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 1238 dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
1231 ra_msg->icmph.icmp6_hop_limit); 1239 ra_msg->icmph.icmp6_hop_limit);
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 398377a9d018..d958718b5031 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -84,7 +84,7 @@ static void nf_ip6_saveroute(const struct sk_buff *skb,
84{ 84{
85 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); 85 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
86 86
87 if (entry->hook == NF_INET_LOCAL_OUT) { 87 if (entry->state.hook == NF_INET_LOCAL_OUT) {
88 const struct ipv6hdr *iph = ipv6_hdr(skb); 88 const struct ipv6hdr *iph = ipv6_hdr(skb);
89 89
90 rt_info->daddr = iph->daddr; 90 rt_info->daddr = iph->daddr;
@@ -98,7 +98,7 @@ static int nf_ip6_reroute(struct sk_buff *skb,
98{ 98{
99 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); 99 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
100 100
101 if (entry->hook == NF_INET_LOCAL_OUT) { 101 if (entry->state.hook == NF_INET_LOCAL_OUT) {
102 const struct ipv6hdr *iph = ipv6_hdr(skb); 102 const struct ipv6hdr *iph = ipv6_hdr(skb);
103 if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) || 103 if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
104 !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) || 104 !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) ||
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 83f59dc3cccc..1a732a1d3c8e 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -317,8 +317,7 @@ ip6t_next_entry(const struct ip6t_entry *entry)
317unsigned int 317unsigned int
318ip6t_do_table(struct sk_buff *skb, 318ip6t_do_table(struct sk_buff *skb,
319 unsigned int hook, 319 unsigned int hook,
320 const struct net_device *in, 320 const struct nf_hook_state *state,
321 const struct net_device *out,
322 struct xt_table *table) 321 struct xt_table *table)
323{ 322{
324 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); 323 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
@@ -333,8 +332,8 @@ ip6t_do_table(struct sk_buff *skb,
333 unsigned int addend; 332 unsigned int addend;
334 333
335 /* Initialization */ 334 /* Initialization */
336 indev = in ? in->name : nulldevname; 335 indev = state->in ? state->in->name : nulldevname;
337 outdev = out ? out->name : nulldevname; 336 outdev = state->out ? state->out->name : nulldevname;
338 /* We handle fragments by dealing with the first fragment as 337 /* We handle fragments by dealing with the first fragment as
339 * if it was a normal packet. All other fragments are treated 338 * if it was a normal packet. All other fragments are treated
340 * normally, except that they will NEVER match rules that ask 339 * normally, except that they will NEVER match rules that ask
@@ -342,8 +341,8 @@ ip6t_do_table(struct sk_buff *skb,
342 * rule is also a fragment-specific rule, non-fragments won't 341 * rule is also a fragment-specific rule, non-fragments won't
343 * match it. */ 342 * match it. */
344 acpar.hotdrop = false; 343 acpar.hotdrop = false;
345 acpar.in = in; 344 acpar.in = state->in;
346 acpar.out = out; 345 acpar.out = state->out;
347 acpar.family = NFPROTO_IPV6; 346 acpar.family = NFPROTO_IPV6;
348 acpar.hooknum = hook; 347 acpar.hooknum = hook;
349 348
@@ -393,7 +392,7 @@ ip6t_do_table(struct sk_buff *skb,
393#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) 392#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
394 /* The packet is traced: log it */ 393 /* The packet is traced: log it */
395 if (unlikely(skb->nf_trace)) 394 if (unlikely(skb->nf_trace))
396 trace_packet(skb, hook, in, out, 395 trace_packet(skb, hook, state->in, state->out,
397 table->name, private, e); 396 table->name, private, e);
398#endif 397#endif
399 /* Standard target? */ 398 /* Standard target? */
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index a0d17270117c..6edb7b106de7 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -315,11 +315,9 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
315 315
316static unsigned int ipv6_synproxy_hook(const struct nf_hook_ops *ops, 316static unsigned int ipv6_synproxy_hook(const struct nf_hook_ops *ops,
317 struct sk_buff *skb, 317 struct sk_buff *skb,
318 const struct net_device *in, 318 const struct nf_hook_state *nhs)
319 const struct net_device *out,
320 int (*okfn)(struct sk_buff *))
321{ 319{
322 struct synproxy_net *snet = synproxy_pernet(dev_net(in ? : out)); 320 struct synproxy_net *snet = synproxy_pernet(dev_net(nhs->in ? : nhs->out));
323 enum ip_conntrack_info ctinfo; 321 enum ip_conntrack_info ctinfo;
324 struct nf_conn *ct; 322 struct nf_conn *ct;
325 struct nf_conn_synproxy *synproxy; 323 struct nf_conn_synproxy *synproxy;
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index ca7f6c128086..5c33d8abc077 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -33,13 +33,11 @@ static const struct xt_table packet_filter = {
33/* The work comes in here from netfilter.c. */ 33/* The work comes in here from netfilter.c. */
34static unsigned int 34static unsigned int
35ip6table_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, 35ip6table_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
36 const struct net_device *in, const struct net_device *out, 36 const struct nf_hook_state *state)
37 int (*okfn)(struct sk_buff *))
38{ 37{
39 const struct net *net = dev_net((in != NULL) ? in : out); 38 const struct net *net = dev_net(state->in ? state->in : state->out);
40 39
41 return ip6t_do_table(skb, ops->hooknum, in, out, 40 return ip6t_do_table(skb, ops->hooknum, state, net->ipv6.ip6table_filter);
42 net->ipv6.ip6table_filter);
43} 41}
44 42
45static struct nf_hook_ops *filter_ops __read_mostly; 43static struct nf_hook_ops *filter_ops __read_mostly;
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 307bbb782d14..b551f5b79fe2 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -32,7 +32,7 @@ static const struct xt_table packet_mangler = {
32}; 32};
33 33
34static unsigned int 34static unsigned int
35ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out) 35ip6t_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
36{ 36{
37 unsigned int ret; 37 unsigned int ret;
38 struct in6_addr saddr, daddr; 38 struct in6_addr saddr, daddr;
@@ -57,8 +57,8 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
57 /* flowlabel and prio (includes version, which shouldn't change either */ 57 /* flowlabel and prio (includes version, which shouldn't change either */
58 flowlabel = *((u_int32_t *)ipv6_hdr(skb)); 58 flowlabel = *((u_int32_t *)ipv6_hdr(skb));
59 59
60 ret = ip6t_do_table(skb, NF_INET_LOCAL_OUT, NULL, out, 60 ret = ip6t_do_table(skb, NF_INET_LOCAL_OUT, state,
61 dev_net(out)->ipv6.ip6table_mangle); 61 dev_net(state->out)->ipv6.ip6table_mangle);
62 62
63 if (ret != NF_DROP && ret != NF_STOLEN && 63 if (ret != NF_DROP && ret != NF_STOLEN &&
64 (!ipv6_addr_equal(&ipv6_hdr(skb)->saddr, &saddr) || 64 (!ipv6_addr_equal(&ipv6_hdr(skb)->saddr, &saddr) ||
@@ -77,17 +77,16 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
77/* The work comes in here from netfilter.c. */ 77/* The work comes in here from netfilter.c. */
78static unsigned int 78static unsigned int
79ip6table_mangle_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, 79ip6table_mangle_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
80 const struct net_device *in, const struct net_device *out, 80 const struct nf_hook_state *state)
81 int (*okfn)(struct sk_buff *))
82{ 81{
83 if (ops->hooknum == NF_INET_LOCAL_OUT) 82 if (ops->hooknum == NF_INET_LOCAL_OUT)
84 return ip6t_mangle_out(skb, out); 83 return ip6t_mangle_out(skb, state);
85 if (ops->hooknum == NF_INET_POST_ROUTING) 84 if (ops->hooknum == NF_INET_POST_ROUTING)
86 return ip6t_do_table(skb, ops->hooknum, in, out, 85 return ip6t_do_table(skb, ops->hooknum, state,
87 dev_net(out)->ipv6.ip6table_mangle); 86 dev_net(state->out)->ipv6.ip6table_mangle);
88 /* INPUT/FORWARD */ 87 /* INPUT/FORWARD */
89 return ip6t_do_table(skb, ops->hooknum, in, out, 88 return ip6t_do_table(skb, ops->hooknum, state,
90 dev_net(in)->ipv6.ip6table_mangle); 89 dev_net(state->in)->ipv6.ip6table_mangle);
91} 90}
92 91
93static struct nf_hook_ops *mangle_ops __read_mostly; 92static struct nf_hook_ops *mangle_ops __read_mostly;
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index b0634ac996b7..c3a7f7af0ed4 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -32,49 +32,40 @@ static const struct xt_table nf_nat_ipv6_table = {
32 32
33static unsigned int ip6table_nat_do_chain(const struct nf_hook_ops *ops, 33static unsigned int ip6table_nat_do_chain(const struct nf_hook_ops *ops,
34 struct sk_buff *skb, 34 struct sk_buff *skb,
35 const struct net_device *in, 35 const struct nf_hook_state *state,
36 const struct net_device *out,
37 struct nf_conn *ct) 36 struct nf_conn *ct)
38{ 37{
39 struct net *net = nf_ct_net(ct); 38 struct net *net = nf_ct_net(ct);
40 39
41 return ip6t_do_table(skb, ops->hooknum, in, out, net->ipv6.ip6table_nat); 40 return ip6t_do_table(skb, ops->hooknum, state, net->ipv6.ip6table_nat);
42} 41}
43 42
44static unsigned int ip6table_nat_fn(const struct nf_hook_ops *ops, 43static unsigned int ip6table_nat_fn(const struct nf_hook_ops *ops,
45 struct sk_buff *skb, 44 struct sk_buff *skb,
46 const struct net_device *in, 45 const struct nf_hook_state *state)
47 const struct net_device *out,
48 int (*okfn)(struct sk_buff *))
49{ 46{
50 return nf_nat_ipv6_fn(ops, skb, in, out, ip6table_nat_do_chain); 47 return nf_nat_ipv6_fn(ops, skb, state, ip6table_nat_do_chain);
51} 48}
52 49
53static unsigned int ip6table_nat_in(const struct nf_hook_ops *ops, 50static unsigned int ip6table_nat_in(const struct nf_hook_ops *ops,
54 struct sk_buff *skb, 51 struct sk_buff *skb,
55 const struct net_device *in, 52 const struct nf_hook_state *state)
56 const struct net_device *out,
57 int (*okfn)(struct sk_buff *))
58{ 53{
59 return nf_nat_ipv6_in(ops, skb, in, out, ip6table_nat_do_chain); 54 return nf_nat_ipv6_in(ops, skb, state, ip6table_nat_do_chain);
60} 55}
61 56
62static unsigned int ip6table_nat_out(const struct nf_hook_ops *ops, 57static unsigned int ip6table_nat_out(const struct nf_hook_ops *ops,
63 struct sk_buff *skb, 58 struct sk_buff *skb,
64 const struct net_device *in, 59 const struct nf_hook_state *state)
65 const struct net_device *out,
66 int (*okfn)(struct sk_buff *))
67{ 60{
68 return nf_nat_ipv6_out(ops, skb, in, out, ip6table_nat_do_chain); 61 return nf_nat_ipv6_out(ops, skb, state, ip6table_nat_do_chain);
69} 62}
70 63
71static unsigned int ip6table_nat_local_fn(const struct nf_hook_ops *ops, 64static unsigned int ip6table_nat_local_fn(const struct nf_hook_ops *ops,
72 struct sk_buff *skb, 65 struct sk_buff *skb,
73 const struct net_device *in, 66 const struct nf_hook_state *state)
74 const struct net_device *out,
75 int (*okfn)(struct sk_buff *))
76{ 67{
77 return nf_nat_ipv6_local_fn(ops, skb, in, out, ip6table_nat_do_chain); 68 return nf_nat_ipv6_local_fn(ops, skb, state, ip6table_nat_do_chain);
78} 69}
79 70
80static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = { 71static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = {
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index 5274740acecc..0b33caad2b69 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -20,13 +20,11 @@ static const struct xt_table packet_raw = {
20/* The work comes in here from netfilter.c. */ 20/* The work comes in here from netfilter.c. */
21static unsigned int 21static unsigned int
22ip6table_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, 22ip6table_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
23 const struct net_device *in, const struct net_device *out, 23 const struct nf_hook_state *state)
24 int (*okfn)(struct sk_buff *))
25{ 24{
26 const struct net *net = dev_net((in != NULL) ? in : out); 25 const struct net *net = dev_net(state->in ? state->in : state->out);
27 26
28 return ip6t_do_table(skb, ops->hooknum, in, out, 27 return ip6t_do_table(skb, ops->hooknum, state, net->ipv6.ip6table_raw);
29 net->ipv6.ip6table_raw);
30} 28}
31 29
32static struct nf_hook_ops *rawtable_ops __read_mostly; 30static struct nf_hook_ops *rawtable_ops __read_mostly;
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c
index ab3b0219ecfa..fcef83c25f7b 100644
--- a/net/ipv6/netfilter/ip6table_security.c
+++ b/net/ipv6/netfilter/ip6table_security.c
@@ -37,13 +37,11 @@ static const struct xt_table security_table = {
37 37
38static unsigned int 38static unsigned int
39ip6table_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, 39ip6table_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
40 const struct net_device *in, 40 const struct nf_hook_state *state)
41 const struct net_device *out,
42 int (*okfn)(struct sk_buff *))
43{ 41{
44 const struct net *net = dev_net((in != NULL) ? in : out); 42 const struct net *net = dev_net(state->in ? state->in : state->out);
45 43
46 return ip6t_do_table(skb, ops->hooknum, in, out, 44 return ip6t_do_table(skb, ops->hooknum, state,
47 net->ipv6.ip6table_security); 45 net->ipv6.ip6table_security);
48} 46}
49 47
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index fba91c6fc7ca..4ba0c34c627b 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -97,9 +97,7 @@ static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
97 97
98static unsigned int ipv6_helper(const struct nf_hook_ops *ops, 98static unsigned int ipv6_helper(const struct nf_hook_ops *ops,
99 struct sk_buff *skb, 99 struct sk_buff *skb,
100 const struct net_device *in, 100 const struct nf_hook_state *state)
101 const struct net_device *out,
102 int (*okfn)(struct sk_buff *))
103{ 101{
104 struct nf_conn *ct; 102 struct nf_conn *ct;
105 const struct nf_conn_help *help; 103 const struct nf_conn_help *help;
@@ -135,9 +133,7 @@ static unsigned int ipv6_helper(const struct nf_hook_ops *ops,
135 133
136static unsigned int ipv6_confirm(const struct nf_hook_ops *ops, 134static unsigned int ipv6_confirm(const struct nf_hook_ops *ops,
137 struct sk_buff *skb, 135 struct sk_buff *skb,
138 const struct net_device *in, 136 const struct nf_hook_state *state)
139 const struct net_device *out,
140 int (*okfn)(struct sk_buff *))
141{ 137{
142 struct nf_conn *ct; 138 struct nf_conn *ct;
143 enum ip_conntrack_info ctinfo; 139 enum ip_conntrack_info ctinfo;
@@ -171,25 +167,21 @@ out:
171 167
172static unsigned int ipv6_conntrack_in(const struct nf_hook_ops *ops, 168static unsigned int ipv6_conntrack_in(const struct nf_hook_ops *ops,
173 struct sk_buff *skb, 169 struct sk_buff *skb,
174 const struct net_device *in, 170 const struct nf_hook_state *state)
175 const struct net_device *out,
176 int (*okfn)(struct sk_buff *))
177{ 171{
178 return nf_conntrack_in(dev_net(in), PF_INET6, ops->hooknum, skb); 172 return nf_conntrack_in(dev_net(state->in), PF_INET6, ops->hooknum, skb);
179} 173}
180 174
181static unsigned int ipv6_conntrack_local(const struct nf_hook_ops *ops, 175static unsigned int ipv6_conntrack_local(const struct nf_hook_ops *ops,
182 struct sk_buff *skb, 176 struct sk_buff *skb,
183 const struct net_device *in, 177 const struct nf_hook_state *state)
184 const struct net_device *out,
185 int (*okfn)(struct sk_buff *))
186{ 178{
187 /* root is playing with raw sockets. */ 179 /* root is playing with raw sockets. */
188 if (skb->len < sizeof(struct ipv6hdr)) { 180 if (skb->len < sizeof(struct ipv6hdr)) {
189 net_notice_ratelimited("ipv6_conntrack_local: packet too short\n"); 181 net_notice_ratelimited("ipv6_conntrack_local: packet too short\n");
190 return NF_ACCEPT; 182 return NF_ACCEPT;
191 } 183 }
192 return nf_conntrack_in(dev_net(out), PF_INET6, ops->hooknum, skb); 184 return nf_conntrack_in(dev_net(state->out), PF_INET6, ops->hooknum, skb);
193} 185}
194 186
195static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = { 187static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index e70382e4dfb5..a45db0b4785c 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -54,9 +54,7 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
54 54
55static unsigned int ipv6_defrag(const struct nf_hook_ops *ops, 55static unsigned int ipv6_defrag(const struct nf_hook_ops *ops,
56 struct sk_buff *skb, 56 struct sk_buff *skb,
57 const struct net_device *in, 57 const struct nf_hook_state *state)
58 const struct net_device *out,
59 int (*okfn)(struct sk_buff *))
60{ 58{
61 struct sk_buff *reasm; 59 struct sk_buff *reasm;
62 60
@@ -77,9 +75,9 @@ static unsigned int ipv6_defrag(const struct nf_hook_ops *ops,
77 75
78 nf_ct_frag6_consume_orig(reasm); 76 nf_ct_frag6_consume_orig(reasm);
79 77
80 NF_HOOK_THRESH(NFPROTO_IPV6, ops->hooknum, reasm, 78 NF_HOOK_THRESH(NFPROTO_IPV6, ops->hooknum, state->sk, reasm,
81 (struct net_device *) in, (struct net_device *) out, 79 state->in, state->out,
82 okfn, NF_IP6_PRI_CONNTRACK_DEFRAG + 1); 80 state->okfn, NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
83 81
84 return NF_STOLEN; 82 return NF_STOLEN;
85} 83}
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
index c5812e1c1ffb..e76900e0aa92 100644
--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
@@ -263,11 +263,10 @@ EXPORT_SYMBOL_GPL(nf_nat_icmpv6_reply_translation);
263 263
264unsigned int 264unsigned int
265nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, 265nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
266 const struct net_device *in, const struct net_device *out, 266 const struct nf_hook_state *state,
267 unsigned int (*do_chain)(const struct nf_hook_ops *ops, 267 unsigned int (*do_chain)(const struct nf_hook_ops *ops,
268 struct sk_buff *skb, 268 struct sk_buff *skb,
269 const struct net_device *in, 269 const struct nf_hook_state *state,
270 const struct net_device *out,
271 struct nf_conn *ct)) 270 struct nf_conn *ct))
272{ 271{
273 struct nf_conn *ct; 272 struct nf_conn *ct;
@@ -318,7 +317,7 @@ nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
318 if (!nf_nat_initialized(ct, maniptype)) { 317 if (!nf_nat_initialized(ct, maniptype)) {
319 unsigned int ret; 318 unsigned int ret;
320 319
321 ret = do_chain(ops, skb, in, out, ct); 320 ret = do_chain(ops, skb, state, ct);
322 if (ret != NF_ACCEPT) 321 if (ret != NF_ACCEPT)
323 return ret; 322 return ret;
324 323
@@ -332,7 +331,7 @@ nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
332 pr_debug("Already setup manip %s for ct %p\n", 331 pr_debug("Already setup manip %s for ct %p\n",
333 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST", 332 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
334 ct); 333 ct);
335 if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out)) 334 if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, state->out))
336 goto oif_changed; 335 goto oif_changed;
337 } 336 }
338 break; 337 break;
@@ -341,7 +340,7 @@ nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
341 /* ESTABLISHED */ 340 /* ESTABLISHED */
342 NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || 341 NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
343 ctinfo == IP_CT_ESTABLISHED_REPLY); 342 ctinfo == IP_CT_ESTABLISHED_REPLY);
344 if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out)) 343 if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, state->out))
345 goto oif_changed; 344 goto oif_changed;
346 } 345 }
347 346
@@ -355,17 +354,16 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv6_fn);
355 354
356unsigned int 355unsigned int
357nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb, 356nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
358 const struct net_device *in, const struct net_device *out, 357 const struct nf_hook_state *state,
359 unsigned int (*do_chain)(const struct nf_hook_ops *ops, 358 unsigned int (*do_chain)(const struct nf_hook_ops *ops,
360 struct sk_buff *skb, 359 struct sk_buff *skb,
361 const struct net_device *in, 360 const struct nf_hook_state *state,
362 const struct net_device *out,
363 struct nf_conn *ct)) 361 struct nf_conn *ct))
364{ 362{
365 unsigned int ret; 363 unsigned int ret;
366 struct in6_addr daddr = ipv6_hdr(skb)->daddr; 364 struct in6_addr daddr = ipv6_hdr(skb)->daddr;
367 365
368 ret = nf_nat_ipv6_fn(ops, skb, in, out, do_chain); 366 ret = nf_nat_ipv6_fn(ops, skb, state, do_chain);
369 if (ret != NF_DROP && ret != NF_STOLEN && 367 if (ret != NF_DROP && ret != NF_STOLEN &&
370 ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr)) 368 ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
371 skb_dst_drop(skb); 369 skb_dst_drop(skb);
@@ -376,11 +374,10 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv6_in);
376 374
377unsigned int 375unsigned int
378nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb, 376nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
379 const struct net_device *in, const struct net_device *out, 377 const struct nf_hook_state *state,
380 unsigned int (*do_chain)(const struct nf_hook_ops *ops, 378 unsigned int (*do_chain)(const struct nf_hook_ops *ops,
381 struct sk_buff *skb, 379 struct sk_buff *skb,
382 const struct net_device *in, 380 const struct nf_hook_state *state,
383 const struct net_device *out,
384 struct nf_conn *ct)) 381 struct nf_conn *ct))
385{ 382{
386#ifdef CONFIG_XFRM 383#ifdef CONFIG_XFRM
@@ -394,7 +391,7 @@ nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
394 if (skb->len < sizeof(struct ipv6hdr)) 391 if (skb->len < sizeof(struct ipv6hdr))
395 return NF_ACCEPT; 392 return NF_ACCEPT;
396 393
397 ret = nf_nat_ipv6_fn(ops, skb, in, out, do_chain); 394 ret = nf_nat_ipv6_fn(ops, skb, state, do_chain);
398#ifdef CONFIG_XFRM 395#ifdef CONFIG_XFRM
399 if (ret != NF_DROP && ret != NF_STOLEN && 396 if (ret != NF_DROP && ret != NF_STOLEN &&
400 !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && 397 !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
@@ -418,11 +415,10 @@ EXPORT_SYMBOL_GPL(nf_nat_ipv6_out);
418 415
419unsigned int 416unsigned int
420nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, 417nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
421 const struct net_device *in, const struct net_device *out, 418 const struct nf_hook_state *state,
422 unsigned int (*do_chain)(const struct nf_hook_ops *ops, 419 unsigned int (*do_chain)(const struct nf_hook_ops *ops,
423 struct sk_buff *skb, 420 struct sk_buff *skb,
424 const struct net_device *in, 421 const struct nf_hook_state *state,
425 const struct net_device *out,
426 struct nf_conn *ct)) 422 struct nf_conn *ct))
427{ 423{
428 const struct nf_conn *ct; 424 const struct nf_conn *ct;
@@ -434,7 +430,7 @@ nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
434 if (skb->len < sizeof(struct ipv6hdr)) 430 if (skb->len < sizeof(struct ipv6hdr))
435 return NF_ACCEPT; 431 return NF_ACCEPT;
436 432
437 ret = nf_nat_ipv6_fn(ops, skb, in, out, do_chain); 433 ret = nf_nat_ipv6_fn(ops, skb, state, do_chain);
438 if (ret != NF_DROP && ret != NF_STOLEN && 434 if (ret != NF_DROP && ret != NF_STOLEN &&
439 (ct = nf_ct_get(skb, &ctinfo)) != NULL) { 435 (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
440 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 436 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
diff --git a/net/ipv6/netfilter/nf_tables_ipv6.c b/net/ipv6/netfilter/nf_tables_ipv6.c
index 0d812b31277d..c8148ba76d1a 100644
--- a/net/ipv6/netfilter/nf_tables_ipv6.c
+++ b/net/ipv6/netfilter/nf_tables_ipv6.c
@@ -18,14 +18,12 @@
18 18
19static unsigned int nft_do_chain_ipv6(const struct nf_hook_ops *ops, 19static unsigned int nft_do_chain_ipv6(const struct nf_hook_ops *ops,
20 struct sk_buff *skb, 20 struct sk_buff *skb,
21 const struct net_device *in, 21 const struct nf_hook_state *state)
22 const struct net_device *out,
23 int (*okfn)(struct sk_buff *))
24{ 22{
25 struct nft_pktinfo pkt; 23 struct nft_pktinfo pkt;
26 24
27 /* malformed packet, drop it */ 25 /* malformed packet, drop it */
28 if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0) 26 if (nft_set_pktinfo_ipv6(&pkt, ops, skb, state) < 0)
29 return NF_DROP; 27 return NF_DROP;
30 28
31 return nft_do_chain(&pkt, ops); 29 return nft_do_chain(&pkt, ops);
@@ -33,9 +31,7 @@ static unsigned int nft_do_chain_ipv6(const struct nf_hook_ops *ops,
33 31
34static unsigned int nft_ipv6_output(const struct nf_hook_ops *ops, 32static unsigned int nft_ipv6_output(const struct nf_hook_ops *ops,
35 struct sk_buff *skb, 33 struct sk_buff *skb,
36 const struct net_device *in, 34 const struct nf_hook_state *state)
37 const struct net_device *out,
38 int (*okfn)(struct sk_buff *))
39{ 35{
40 if (unlikely(skb->len < sizeof(struct ipv6hdr))) { 36 if (unlikely(skb->len < sizeof(struct ipv6hdr))) {
41 if (net_ratelimit()) 37 if (net_ratelimit())
@@ -44,7 +40,7 @@ static unsigned int nft_ipv6_output(const struct nf_hook_ops *ops,
44 return NF_ACCEPT; 40 return NF_ACCEPT;
45 } 41 }
46 42
47 return nft_do_chain_ipv6(ops, skb, in, out, okfn); 43 return nft_do_chain_ipv6(ops, skb, state);
48} 44}
49 45
50struct nft_af_info nft_af_ipv6 __read_mostly = { 46struct nft_af_info nft_af_ipv6 __read_mostly = {
diff --git a/net/ipv6/netfilter/nft_chain_nat_ipv6.c b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
index 1c4b75dd425b..951bb458b7bd 100644
--- a/net/ipv6/netfilter/nft_chain_nat_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
@@ -26,51 +26,42 @@
26 26
27static unsigned int nft_nat_do_chain(const struct nf_hook_ops *ops, 27static unsigned int nft_nat_do_chain(const struct nf_hook_ops *ops,
28 struct sk_buff *skb, 28 struct sk_buff *skb,
29 const struct net_device *in, 29 const struct nf_hook_state *state,
30 const struct net_device *out,
31 struct nf_conn *ct) 30 struct nf_conn *ct)
32{ 31{
33 struct nft_pktinfo pkt; 32 struct nft_pktinfo pkt;
34 33
35 nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out); 34 nft_set_pktinfo_ipv6(&pkt, ops, skb, state);
36 35
37 return nft_do_chain(&pkt, ops); 36 return nft_do_chain(&pkt, ops);
38} 37}
39 38
40static unsigned int nft_nat_ipv6_fn(const struct nf_hook_ops *ops, 39static unsigned int nft_nat_ipv6_fn(const struct nf_hook_ops *ops,
41 struct sk_buff *skb, 40 struct sk_buff *skb,
42 const struct net_device *in, 41 const struct nf_hook_state *state)
43 const struct net_device *out,
44 int (*okfn)(struct sk_buff *))
45{ 42{
46 return nf_nat_ipv6_fn(ops, skb, in, out, nft_nat_do_chain); 43 return nf_nat_ipv6_fn(ops, skb, state, nft_nat_do_chain);
47} 44}
48 45
49static unsigned int nft_nat_ipv6_in(const struct nf_hook_ops *ops, 46static unsigned int nft_nat_ipv6_in(const struct nf_hook_ops *ops,
50 struct sk_buff *skb, 47 struct sk_buff *skb,
51 const struct net_device *in, 48 const struct nf_hook_state *state)
52 const struct net_device *out,
53 int (*okfn)(struct sk_buff *))
54{ 49{
55 return nf_nat_ipv6_in(ops, skb, in, out, nft_nat_do_chain); 50 return nf_nat_ipv6_in(ops, skb, state, nft_nat_do_chain);
56} 51}
57 52
58static unsigned int nft_nat_ipv6_out(const struct nf_hook_ops *ops, 53static unsigned int nft_nat_ipv6_out(const struct nf_hook_ops *ops,
59 struct sk_buff *skb, 54 struct sk_buff *skb,
60 const struct net_device *in, 55 const struct nf_hook_state *state)
61 const struct net_device *out,
62 int (*okfn)(struct sk_buff *))
63{ 56{
64 return nf_nat_ipv6_out(ops, skb, in, out, nft_nat_do_chain); 57 return nf_nat_ipv6_out(ops, skb, state, nft_nat_do_chain);
65} 58}
66 59
67static unsigned int nft_nat_ipv6_local_fn(const struct nf_hook_ops *ops, 60static unsigned int nft_nat_ipv6_local_fn(const struct nf_hook_ops *ops,
68 struct sk_buff *skb, 61 struct sk_buff *skb,
69 const struct net_device *in, 62 const struct nf_hook_state *state)
70 const struct net_device *out,
71 int (*okfn)(struct sk_buff *))
72{ 63{
73 return nf_nat_ipv6_local_fn(ops, skb, in, out, nft_nat_do_chain); 64 return nf_nat_ipv6_local_fn(ops, skb, state, nft_nat_do_chain);
74} 65}
75 66
76static const struct nf_chain_type nft_chain_nat_ipv6 = { 67static const struct nf_chain_type nft_chain_nat_ipv6 = {
diff --git a/net/ipv6/netfilter/nft_chain_route_ipv6.c b/net/ipv6/netfilter/nft_chain_route_ipv6.c
index 42031299585e..0dafdaac5e17 100644
--- a/net/ipv6/netfilter/nft_chain_route_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_route_ipv6.c
@@ -24,9 +24,7 @@
24 24
25static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops, 25static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
26 struct sk_buff *skb, 26 struct sk_buff *skb,
27 const struct net_device *in, 27 const struct nf_hook_state *state)
28 const struct net_device *out,
29 int (*okfn)(struct sk_buff *))
30{ 28{
31 unsigned int ret; 29 unsigned int ret;
32 struct nft_pktinfo pkt; 30 struct nft_pktinfo pkt;
@@ -35,7 +33,7 @@ static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
35 u32 mark, flowlabel; 33 u32 mark, flowlabel;
36 34
37 /* malformed packet, drop it */ 35 /* malformed packet, drop it */
38 if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0) 36 if (nft_set_pktinfo_ipv6(&pkt, ops, skb, state) < 0)
39 return NF_DROP; 37 return NF_DROP;
40 38
41 /* save source/dest address, mark, hoplimit, flowlabel, priority */ 39 /* save source/dest address, mark, hoplimit, flowlabel, priority */
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 4016a6ef9d61..85892af57364 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -136,7 +136,7 @@ int ip6_dst_hoplimit(struct dst_entry *dst)
136EXPORT_SYMBOL(ip6_dst_hoplimit); 136EXPORT_SYMBOL(ip6_dst_hoplimit);
137#endif 137#endif
138 138
139int __ip6_local_out(struct sk_buff *skb) 139static int __ip6_local_out_sk(struct sock *sk, struct sk_buff *skb)
140{ 140{
141 int len; 141 int len;
142 142
@@ -146,19 +146,30 @@ int __ip6_local_out(struct sk_buff *skb)
146 ipv6_hdr(skb)->payload_len = htons(len); 146 ipv6_hdr(skb)->payload_len = htons(len);
147 IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); 147 IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
148 148
149 return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, 149 return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb,
150 skb_dst(skb)->dev, dst_output); 150 NULL, skb_dst(skb)->dev, dst_output_sk);
151}
152
153int __ip6_local_out(struct sk_buff *skb)
154{
155 return __ip6_local_out_sk(skb->sk, skb);
151} 156}
152EXPORT_SYMBOL_GPL(__ip6_local_out); 157EXPORT_SYMBOL_GPL(__ip6_local_out);
153 158
154int ip6_local_out(struct sk_buff *skb) 159int ip6_local_out_sk(struct sock *sk, struct sk_buff *skb)
155{ 160{
156 int err; 161 int err;
157 162
158 err = __ip6_local_out(skb); 163 err = __ip6_local_out_sk(sk, skb);
159 if (likely(err == 1)) 164 if (likely(err == 1))
160 err = dst_output(skb); 165 err = dst_output_sk(sk, skb);
161 166
162 return err; 167 return err;
163} 168}
169EXPORT_SYMBOL_GPL(ip6_local_out_sk);
170
171int ip6_local_out(struct sk_buff *skb)
172{
173 return ip6_local_out_sk(skb->sk, skb);
174}
164EXPORT_SYMBOL_GPL(ip6_local_out); 175EXPORT_SYMBOL_GPL(ip6_local_out);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 79ccdb4c1b33..8072bd4139b7 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -652,8 +652,8 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
652 goto error_fault; 652 goto error_fault;
653 653
654 IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); 654 IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
655 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, 655 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb,
656 rt->dst.dev, dst_output); 656 NULL, rt->dst.dev, dst_output_sk);
657 if (err > 0) 657 if (err > 0)
658 err = net_xmit_errno(err); 658 err = net_xmit_errno(err);
659 if (err) 659 if (err)
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index e6b9f51b15e8..6cf2026a9cea 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1076,7 +1076,6 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
1076 if (dev->mtu < IPV6_MIN_MTU) 1076 if (dev->mtu < IPV6_MIN_MTU)
1077 dev->mtu = IPV6_MIN_MTU; 1077 dev->mtu = IPV6_MIN_MTU;
1078 } 1078 }
1079 dev->iflink = tunnel->parms.link;
1080} 1079}
1081 1080
1082static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p) 1081static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p)
@@ -1336,6 +1335,7 @@ static const struct net_device_ops ipip6_netdev_ops = {
1336 .ndo_do_ioctl = ipip6_tunnel_ioctl, 1335 .ndo_do_ioctl = ipip6_tunnel_ioctl,
1337 .ndo_change_mtu = ipip6_tunnel_change_mtu, 1336 .ndo_change_mtu = ipip6_tunnel_change_mtu,
1338 .ndo_get_stats64 = ip_tunnel_get_stats64, 1337 .ndo_get_stats64 = ip_tunnel_get_stats64,
1338 .ndo_get_iflink = ip_tunnel_get_iflink,
1339}; 1339};
1340 1340
1341static void ipip6_dev_free(struct net_device *dev) 1341static void ipip6_dev_free(struct net_device *dev)
@@ -1366,7 +1366,6 @@ static void ipip6_tunnel_setup(struct net_device *dev)
1366 dev->mtu = ETH_DATA_LEN - t_hlen; 1366 dev->mtu = ETH_DATA_LEN - t_hlen;
1367 dev->flags = IFF_NOARP; 1367 dev->flags = IFF_NOARP;
1368 netif_keep_dst(dev); 1368 netif_keep_dst(dev);
1369 dev->iflink = 0;
1370 dev->addr_len = 4; 1369 dev->addr_len = 4;
1371 dev->features |= NETIF_F_LLTX; 1370 dev->features |= NETIF_F_LLTX;
1372 dev->features |= SIT_FEATURES; 1371 dev->features |= SIT_FEATURES;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 7cdad8401434..f73a97f6e68e 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1348,6 +1348,15 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1348 TCP_SKB_CB(skb)->sacked = 0; 1348 TCP_SKB_CB(skb)->sacked = 0;
1349} 1349}
1350 1350
1351static void tcp_v6_restore_cb(struct sk_buff *skb)
1352{
1353 /* We need to move header back to the beginning if xfrm6_policy_check()
1354 * and tcp_v6_fill_cb() are going to be called again.
1355 */
1356 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1357 sizeof(struct inet6_skb_parm));
1358}
1359
1351static int tcp_v6_rcv(struct sk_buff *skb) 1360static int tcp_v6_rcv(struct sk_buff *skb)
1352{ 1361{
1353 const struct tcphdr *th; 1362 const struct tcphdr *th;
@@ -1480,6 +1489,7 @@ do_time_wait:
1480 inet_twsk_deschedule(tw, &tcp_death_row); 1489 inet_twsk_deschedule(tw, &tcp_death_row);
1481 inet_twsk_put(tw); 1490 inet_twsk_put(tw);
1482 sk = sk2; 1491 sk = sk2;
1492 tcp_v6_restore_cb(skb);
1483 goto process; 1493 goto process;
1484 } 1494 }
1485 /* Fall through to ACK */ 1495 /* Fall through to ACK */
@@ -1488,6 +1498,7 @@ do_time_wait:
1488 tcp_v6_timewait_ack(sk, skb); 1498 tcp_v6_timewait_ack(sk, skb);
1489 break; 1499 break;
1490 case TCP_TW_RST: 1500 case TCP_TW_RST:
1501 tcp_v6_restore_cb(skb);
1491 goto no_tcp_socket; 1502 goto no_tcp_socket;
1492 case TCP_TW_SUCCESS: 1503 case TCP_TW_SUCCESS:
1493 ; 1504 ;
@@ -1522,7 +1533,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
1522 skb->sk = sk; 1533 skb->sk = sk;
1523 skb->destructor = sock_edemux; 1534 skb->destructor = sock_edemux;
1524 if (sk_fullsock(sk)) { 1535 if (sk_fullsock(sk)) {
1525 struct dst_entry *dst = sk->sk_rx_dst; 1536 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1526 1537
1527 if (dst) 1538 if (dst)
1528 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); 1539 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index f48fbe4d16f5..74bd17882a2f 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -42,7 +42,8 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
42 ipv6_hdr(skb)->payload_len = htons(skb->len); 42 ipv6_hdr(skb)->payload_len = htons(skb->len);
43 __skb_push(skb, skb->data - skb_network_header(skb)); 43 __skb_push(skb, skb->data - skb_network_header(skb));
44 44
45 NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL, 45 NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, NULL, skb,
46 skb->dev, NULL,
46 ip6_rcv_finish); 47 ip6_rcv_finish);
47 return -1; 48 return -1;
48} 49}
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 010f8bd2d577..09c76a7b474d 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -120,7 +120,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
120} 120}
121EXPORT_SYMBOL(xfrm6_prepare_output); 121EXPORT_SYMBOL(xfrm6_prepare_output);
122 122
123int xfrm6_output_finish(struct sk_buff *skb) 123int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb)
124{ 124{
125 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); 125 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
126 126
@@ -128,10 +128,10 @@ int xfrm6_output_finish(struct sk_buff *skb)
128 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; 128 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
129#endif 129#endif
130 130
131 return xfrm_output(skb); 131 return xfrm_output(sk, skb);
132} 132}
133 133
134static int __xfrm6_output(struct sk_buff *skb) 134static int __xfrm6_output(struct sock *sk, struct sk_buff *skb)
135{ 135{
136 struct dst_entry *dst = skb_dst(skb); 136 struct dst_entry *dst = skb_dst(skb);
137 struct xfrm_state *x = dst->xfrm; 137 struct xfrm_state *x = dst->xfrm;
@@ -140,7 +140,7 @@ static int __xfrm6_output(struct sk_buff *skb)
140#ifdef CONFIG_NETFILTER 140#ifdef CONFIG_NETFILTER
141 if (!x) { 141 if (!x) {
142 IP6CB(skb)->flags |= IP6SKB_REROUTED; 142 IP6CB(skb)->flags |= IP6SKB_REROUTED;
143 return dst_output(skb); 143 return dst_output_sk(sk, skb);
144 } 144 }
145#endif 145#endif
146 146
@@ -160,14 +160,15 @@ static int __xfrm6_output(struct sk_buff *skb)
160 if (x->props.mode == XFRM_MODE_TUNNEL && 160 if (x->props.mode == XFRM_MODE_TUNNEL &&
161 ((skb->len > mtu && !skb_is_gso(skb)) || 161 ((skb->len > mtu && !skb_is_gso(skb)) ||
162 dst_allfrag(skb_dst(skb)))) { 162 dst_allfrag(skb_dst(skb)))) {
163 return ip6_fragment(skb, x->outer_mode->afinfo->output_finish); 163 return ip6_fragment(sk, skb,
164 x->outer_mode->afinfo->output_finish);
164 } 165 }
165 return x->outer_mode->afinfo->output_finish(skb); 166 return x->outer_mode->afinfo->output_finish(sk, skb);
166} 167}
167 168
168int xfrm6_output(struct sock *sk, struct sk_buff *skb) 169int xfrm6_output(struct sock *sk, struct sk_buff *skb)
169{ 170{
170 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, 171 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, sk, skb,
171 NULL, skb_dst(skb)->dev, __xfrm6_output, 172 NULL, skb_dst(skb)->dev, __xfrm6_output,
172 !(IP6CB(skb)->flags & IP6SKB_REROUTED)); 173 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
173} 174}
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 94b4c898a116..6daa52a18d40 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1114,10 +1114,8 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1114 noblock, &err); 1114 noblock, &err);
1115 else 1115 else
1116 skb = sock_alloc_send_skb(sk, len, noblock, &err); 1116 skb = sock_alloc_send_skb(sk, len, noblock, &err);
1117 if (!skb) { 1117 if (!skb)
1118 err = -ENOMEM;
1119 goto out; 1118 goto out;
1120 }
1121 if (iucv->transport == AF_IUCV_TRANS_HIPER) 1119 if (iucv->transport == AF_IUCV_TRANS_HIPER)
1122 skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN); 1120 skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
1123 if (memcpy_from_msg(skb_put(skb, len), msg, len)) { 1121 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 895348e44c7d..a29a504492af 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1871,6 +1871,7 @@ static int __init l2tp_init(void)
1871 l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0); 1871 l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
1872 if (!l2tp_wq) { 1872 if (!l2tp_wq) {
1873 pr_err("alloc_workqueue failed\n"); 1873 pr_err("alloc_workqueue failed\n");
1874 unregister_pernet_device(&l2tp_net_ops);
1874 rc = -ENOMEM; 1875 rc = -ENOMEM;
1875 goto out; 1876 goto out;
1876 } 1877 }
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 2c090c507391..5c564a68fb50 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -49,8 +49,6 @@ static void ieee80211_free_tid_rx(struct rcu_head *h)
49 container_of(h, struct tid_ampdu_rx, rcu_head); 49 container_of(h, struct tid_ampdu_rx, rcu_head);
50 int i; 50 int i;
51 51
52 del_timer_sync(&tid_rx->reorder_timer);
53
54 for (i = 0; i < tid_rx->buf_size; i++) 52 for (i = 0; i < tid_rx->buf_size; i++)
55 __skb_queue_purge(&tid_rx->reorder_buf[i]); 53 __skb_queue_purge(&tid_rx->reorder_buf[i]);
56 kfree(tid_rx->reorder_buf); 54 kfree(tid_rx->reorder_buf);
@@ -93,6 +91,12 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
93 91
94 del_timer_sync(&tid_rx->session_timer); 92 del_timer_sync(&tid_rx->session_timer);
95 93
94 /* make sure ieee80211_sta_reorder_release() doesn't re-arm the timer */
95 spin_lock_bh(&tid_rx->reorder_lock);
96 tid_rx->removed = true;
97 spin_unlock_bh(&tid_rx->reorder_lock);
98 del_timer_sync(&tid_rx->reorder_timer);
99
96 call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx); 100 call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
97} 101}
98 102
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 4f7b922cfda4..2cd02278d4d4 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -873,9 +873,10 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
873 873
874 set_release_timer: 874 set_release_timer:
875 875
876 mod_timer(&tid_agg_rx->reorder_timer, 876 if (!tid_agg_rx->removed)
877 tid_agg_rx->reorder_time[j] + 1 + 877 mod_timer(&tid_agg_rx->reorder_timer,
878 HT_RX_REORDER_BUF_TIMEOUT); 878 tid_agg_rx->reorder_time[j] + 1 +
879 HT_RX_REORDER_BUF_TIMEOUT);
879 } else { 880 } else {
880 del_timer(&tid_agg_rx->reorder_timer); 881 del_timer(&tid_agg_rx->reorder_timer);
881 } 882 }
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 248f56e59ebc..7e2fa4018d41 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -175,6 +175,7 @@ struct tid_ampdu_tx {
175 * @reorder_lock: serializes access to reorder buffer, see below. 175 * @reorder_lock: serializes access to reorder buffer, see below.
176 * @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and 176 * @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and
177 * and ssn. 177 * and ssn.
178 * @removed: this session is removed (but might have been found due to RCU)
178 * 179 *
179 * This structure's lifetime is managed by RCU, assignments to 180 * This structure's lifetime is managed by RCU, assignments to
180 * the array holding it must hold the aggregation mutex. 181 * the array holding it must hold the aggregation mutex.
@@ -199,6 +200,7 @@ struct tid_ampdu_rx {
199 u16 timeout; 200 u16 timeout;
200 u8 dialog_token; 201 u8 dialog_token;
201 bool auto_seq; 202 bool auto_seq;
203 bool removed;
202}; 204};
203 205
204/** 206/**
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index fea9ef566427..e6163017c42d 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -120,12 +120,8 @@ EXPORT_SYMBOL(nf_unregister_hooks);
120 120
121unsigned int nf_iterate(struct list_head *head, 121unsigned int nf_iterate(struct list_head *head,
122 struct sk_buff *skb, 122 struct sk_buff *skb,
123 unsigned int hook, 123 struct nf_hook_state *state,
124 const struct net_device *indev, 124 struct nf_hook_ops **elemp)
125 const struct net_device *outdev,
126 struct nf_hook_ops **elemp,
127 int (*okfn)(struct sk_buff *),
128 int hook_thresh)
129{ 125{
130 unsigned int verdict; 126 unsigned int verdict;
131 127
@@ -134,19 +130,19 @@ unsigned int nf_iterate(struct list_head *head,
134 * function because of risk of continuing from deleted element. 130 * function because of risk of continuing from deleted element.
135 */ 131 */
136 list_for_each_entry_continue_rcu((*elemp), head, list) { 132 list_for_each_entry_continue_rcu((*elemp), head, list) {
137 if (hook_thresh > (*elemp)->priority) 133 if (state->thresh > (*elemp)->priority)
138 continue; 134 continue;
139 135
140 /* Optimization: we don't need to hold module 136 /* Optimization: we don't need to hold module
141 reference here, since function can't sleep. --RR */ 137 reference here, since function can't sleep. --RR */
142repeat: 138repeat:
143 verdict = (*elemp)->hook(*elemp, skb, indev, outdev, okfn); 139 verdict = (*elemp)->hook(*elemp, skb, state);
144 if (verdict != NF_ACCEPT) { 140 if (verdict != NF_ACCEPT) {
145#ifdef CONFIG_NETFILTER_DEBUG 141#ifdef CONFIG_NETFILTER_DEBUG
146 if (unlikely((verdict & NF_VERDICT_MASK) 142 if (unlikely((verdict & NF_VERDICT_MASK)
147 > NF_MAX_VERDICT)) { 143 > NF_MAX_VERDICT)) {
148 NFDEBUG("Evil return from %p(%u).\n", 144 NFDEBUG("Evil return from %p(%u).\n",
149 (*elemp)->hook, hook); 145 (*elemp)->hook, state->hook);
150 continue; 146 continue;
151 } 147 }
152#endif 148#endif
@@ -161,11 +157,7 @@ repeat:
161 157
162/* Returns 1 if okfn() needs to be executed by the caller, 158/* Returns 1 if okfn() needs to be executed by the caller,
163 * -EPERM for NF_DROP, 0 otherwise. */ 159 * -EPERM for NF_DROP, 0 otherwise. */
164int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb, 160int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state)
165 struct net_device *indev,
166 struct net_device *outdev,
167 int (*okfn)(struct sk_buff *),
168 int hook_thresh)
169{ 161{
170 struct nf_hook_ops *elem; 162 struct nf_hook_ops *elem;
171 unsigned int verdict; 163 unsigned int verdict;
@@ -174,10 +166,11 @@ int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
174 /* We may already have this, but read-locks nest anyway */ 166 /* We may already have this, but read-locks nest anyway */
175 rcu_read_lock(); 167 rcu_read_lock();
176 168
177 elem = list_entry_rcu(&nf_hooks[pf][hook], struct nf_hook_ops, list); 169 elem = list_entry_rcu(&nf_hooks[state->pf][state->hook],
170 struct nf_hook_ops, list);
178next_hook: 171next_hook:
179 verdict = nf_iterate(&nf_hooks[pf][hook], skb, hook, indev, 172 verdict = nf_iterate(&nf_hooks[state->pf][state->hook], skb, state,
180 outdev, &elem, okfn, hook_thresh); 173 &elem);
181 if (verdict == NF_ACCEPT || verdict == NF_STOP) { 174 if (verdict == NF_ACCEPT || verdict == NF_STOP) {
182 ret = 1; 175 ret = 1;
183 } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) { 176 } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
@@ -186,8 +179,8 @@ next_hook:
186 if (ret == 0) 179 if (ret == 0)
187 ret = -EPERM; 180 ret = -EPERM;
188 } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) { 181 } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
189 int err = nf_queue(skb, elem, pf, hook, indev, outdev, okfn, 182 int err = nf_queue(skb, elem, state,
190 verdict >> NF_VERDICT_QBITS); 183 verdict >> NF_VERDICT_QBITS);
191 if (err < 0) { 184 if (err < 0) {
192 if (err == -ECANCELED) 185 if (err == -ECANCELED)
193 goto next_hook; 186 goto next_hook;
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 04dbd9c7213f..5d2b806a862e 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1272,8 +1272,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
1272 */ 1272 */
1273static unsigned int 1273static unsigned int
1274ip_vs_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb, 1274ip_vs_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
1275 const struct net_device *in, const struct net_device *out, 1275 const struct nf_hook_state *state)
1276 int (*okfn)(struct sk_buff *))
1277{ 1276{
1278 return ip_vs_out(ops->hooknum, skb, AF_INET); 1277 return ip_vs_out(ops->hooknum, skb, AF_INET);
1279} 1278}
@@ -1284,8 +1283,7 @@ ip_vs_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
1284 */ 1283 */
1285static unsigned int 1284static unsigned int
1286ip_vs_local_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb, 1285ip_vs_local_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
1287 const struct net_device *in, const struct net_device *out, 1286 const struct nf_hook_state *state)
1288 int (*okfn)(struct sk_buff *))
1289{ 1287{
1290 return ip_vs_out(ops->hooknum, skb, AF_INET); 1288 return ip_vs_out(ops->hooknum, skb, AF_INET);
1291} 1289}
@@ -1299,8 +1297,7 @@ ip_vs_local_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
1299 */ 1297 */
1300static unsigned int 1298static unsigned int
1301ip_vs_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb, 1299ip_vs_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
1302 const struct net_device *in, const struct net_device *out, 1300 const struct nf_hook_state *state)
1303 int (*okfn)(struct sk_buff *))
1304{ 1301{
1305 return ip_vs_out(ops->hooknum, skb, AF_INET6); 1302 return ip_vs_out(ops->hooknum, skb, AF_INET6);
1306} 1303}
@@ -1311,8 +1308,7 @@ ip_vs_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
1311 */ 1308 */
1312static unsigned int 1309static unsigned int
1313ip_vs_local_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb, 1310ip_vs_local_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
1314 const struct net_device *in, const struct net_device *out, 1311 const struct nf_hook_state *state)
1315 int (*okfn)(struct sk_buff *))
1316{ 1312{
1317 return ip_vs_out(ops->hooknum, skb, AF_INET6); 1313 return ip_vs_out(ops->hooknum, skb, AF_INET6);
1318} 1314}
@@ -1769,9 +1765,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
1769 */ 1765 */
1770static unsigned int 1766static unsigned int
1771ip_vs_remote_request4(const struct nf_hook_ops *ops, struct sk_buff *skb, 1767ip_vs_remote_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
1772 const struct net_device *in, 1768 const struct nf_hook_state *state)
1773 const struct net_device *out,
1774 int (*okfn)(struct sk_buff *))
1775{ 1769{
1776 return ip_vs_in(ops->hooknum, skb, AF_INET); 1770 return ip_vs_in(ops->hooknum, skb, AF_INET);
1777} 1771}
@@ -1782,8 +1776,7 @@ ip_vs_remote_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
1782 */ 1776 */
1783static unsigned int 1777static unsigned int
1784ip_vs_local_request4(const struct nf_hook_ops *ops, struct sk_buff *skb, 1778ip_vs_local_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
1785 const struct net_device *in, const struct net_device *out, 1779 const struct nf_hook_state *state)
1786 int (*okfn)(struct sk_buff *))
1787{ 1780{
1788 return ip_vs_in(ops->hooknum, skb, AF_INET); 1781 return ip_vs_in(ops->hooknum, skb, AF_INET);
1789} 1782}
@@ -1796,9 +1789,7 @@ ip_vs_local_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
1796 */ 1789 */
1797static unsigned int 1790static unsigned int
1798ip_vs_remote_request6(const struct nf_hook_ops *ops, struct sk_buff *skb, 1791ip_vs_remote_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
1799 const struct net_device *in, 1792 const struct nf_hook_state *state)
1800 const struct net_device *out,
1801 int (*okfn)(struct sk_buff *))
1802{ 1793{
1803 return ip_vs_in(ops->hooknum, skb, AF_INET6); 1794 return ip_vs_in(ops->hooknum, skb, AF_INET6);
1804} 1795}
@@ -1809,8 +1800,7 @@ ip_vs_remote_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
1809 */ 1800 */
1810static unsigned int 1801static unsigned int
1811ip_vs_local_request6(const struct nf_hook_ops *ops, struct sk_buff *skb, 1802ip_vs_local_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
1812 const struct net_device *in, const struct net_device *out, 1803 const struct nf_hook_state *state)
1813 int (*okfn)(struct sk_buff *))
1814{ 1804{
1815 return ip_vs_in(ops->hooknum, skb, AF_INET6); 1805 return ip_vs_in(ops->hooknum, skb, AF_INET6);
1816} 1806}
@@ -1829,8 +1819,7 @@ ip_vs_local_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
1829 */ 1819 */
1830static unsigned int 1820static unsigned int
1831ip_vs_forward_icmp(const struct nf_hook_ops *ops, struct sk_buff *skb, 1821ip_vs_forward_icmp(const struct nf_hook_ops *ops, struct sk_buff *skb,
1832 const struct net_device *in, const struct net_device *out, 1822 const struct nf_hook_state *state)
1833 int (*okfn)(struct sk_buff *))
1834{ 1823{
1835 int r; 1824 int r;
1836 struct net *net; 1825 struct net *net;
@@ -1851,8 +1840,7 @@ ip_vs_forward_icmp(const struct nf_hook_ops *ops, struct sk_buff *skb,
1851#ifdef CONFIG_IP_VS_IPV6 1840#ifdef CONFIG_IP_VS_IPV6
1852static unsigned int 1841static unsigned int
1853ip_vs_forward_icmp_v6(const struct nf_hook_ops *ops, struct sk_buff *skb, 1842ip_vs_forward_icmp_v6(const struct nf_hook_ops *ops, struct sk_buff *skb,
1854 const struct net_device *in, const struct net_device *out, 1843 const struct nf_hook_state *state)
1855 int (*okfn)(struct sk_buff *))
1856{ 1844{
1857 int r; 1845 int r;
1858 struct net *net; 1846 struct net *net;
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index bf02932b7188..19986ec5f21a 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -536,8 +536,8 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
536 ip_vs_update_conntrack(skb, cp, 1); 536 ip_vs_update_conntrack(skb, cp, 1);
537 if (!local) { 537 if (!local) {
538 skb_forward_csum(skb); 538 skb_forward_csum(skb);
539 NF_HOOK(pf, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev, 539 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
540 dst_output); 540 NULL, skb_dst(skb)->dev, dst_output_sk);
541 } else 541 } else
542 ret = NF_ACCEPT; 542 ret = NF_ACCEPT;
543 return ret; 543 return ret;
@@ -554,8 +554,8 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
554 ip_vs_notrack(skb); 554 ip_vs_notrack(skb);
555 if (!local) { 555 if (!local) {
556 skb_forward_csum(skb); 556 skb_forward_csum(skb);
557 NF_HOOK(pf, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev, 557 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
558 dst_output); 558 NULL, skb_dst(skb)->dev, dst_output_sk);
559 } else 559 } else
560 ret = NF_ACCEPT; 560 ret = NF_ACCEPT;
561 return ret; 561 return ret;
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index 61a3c927e63c..ea7f36784b3d 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -14,16 +14,11 @@
14 14
15/* core.c */ 15/* core.c */
16unsigned int nf_iterate(struct list_head *head, struct sk_buff *skb, 16unsigned int nf_iterate(struct list_head *head, struct sk_buff *skb,
17 unsigned int hook, const struct net_device *indev, 17 struct nf_hook_state *state, struct nf_hook_ops **elemp);
18 const struct net_device *outdev,
19 struct nf_hook_ops **elemp,
20 int (*okfn)(struct sk_buff *), int hook_thresh);
21 18
22/* nf_queue.c */ 19/* nf_queue.c */
23int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem, u_int8_t pf, 20int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem,
24 unsigned int hook, struct net_device *indev, 21 struct nf_hook_state *state, unsigned int queuenum);
25 struct net_device *outdev, int (*okfn)(struct sk_buff *),
26 unsigned int queuenum);
27int __init netfilter_queue_init(void); 22int __init netfilter_queue_init(void);
28 23
29/* nf_log.c */ 24/* nf_log.c */
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index fb045b4c2966..2e88032cd5ad 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -48,11 +48,15 @@ EXPORT_SYMBOL(nf_unregister_queue_handler);
48 48
49void nf_queue_entry_release_refs(struct nf_queue_entry *entry) 49void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
50{ 50{
51 struct nf_hook_state *state = &entry->state;
52
51 /* Release those devices we held, or Alexey will kill me. */ 53 /* Release those devices we held, or Alexey will kill me. */
52 if (entry->indev) 54 if (state->in)
53 dev_put(entry->indev); 55 dev_put(state->in);
54 if (entry->outdev) 56 if (state->out)
55 dev_put(entry->outdev); 57 dev_put(state->out);
58 if (state->sk)
59 sock_put(state->sk);
56#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 60#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
57 if (entry->skb->nf_bridge) { 61 if (entry->skb->nf_bridge) {
58 struct net_device *physdev; 62 struct net_device *physdev;
@@ -73,13 +77,17 @@ EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
73/* Bump dev refs so they don't vanish while packet is out */ 77/* Bump dev refs so they don't vanish while packet is out */
74bool nf_queue_entry_get_refs(struct nf_queue_entry *entry) 78bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
75{ 79{
80 struct nf_hook_state *state = &entry->state;
81
76 if (!try_module_get(entry->elem->owner)) 82 if (!try_module_get(entry->elem->owner))
77 return false; 83 return false;
78 84
79 if (entry->indev) 85 if (state->in)
80 dev_hold(entry->indev); 86 dev_hold(state->in);
81 if (entry->outdev) 87 if (state->out)
82 dev_hold(entry->outdev); 88 dev_hold(state->out);
89 if (state->sk)
90 sock_hold(state->sk);
83#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 91#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
84 if (entry->skb->nf_bridge) { 92 if (entry->skb->nf_bridge) {
85 struct net_device *physdev; 93 struct net_device *physdev;
@@ -102,12 +110,9 @@ EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
102 * through nf_reinject(). 110 * through nf_reinject().
103 */ 111 */
104int nf_queue(struct sk_buff *skb, 112int nf_queue(struct sk_buff *skb,
105 struct nf_hook_ops *elem, 113 struct nf_hook_ops *elem,
106 u_int8_t pf, unsigned int hook, 114 struct nf_hook_state *state,
107 struct net_device *indev, 115 unsigned int queuenum)
108 struct net_device *outdev,
109 int (*okfn)(struct sk_buff *),
110 unsigned int queuenum)
111{ 116{
112 int status = -ENOENT; 117 int status = -ENOENT;
113 struct nf_queue_entry *entry = NULL; 118 struct nf_queue_entry *entry = NULL;
@@ -123,7 +128,7 @@ int nf_queue(struct sk_buff *skb,
123 goto err_unlock; 128 goto err_unlock;
124 } 129 }
125 130
126 afinfo = nf_get_afinfo(pf); 131 afinfo = nf_get_afinfo(state->pf);
127 if (!afinfo) 132 if (!afinfo)
128 goto err_unlock; 133 goto err_unlock;
129 134
@@ -136,11 +141,7 @@ int nf_queue(struct sk_buff *skb,
136 *entry = (struct nf_queue_entry) { 141 *entry = (struct nf_queue_entry) {
137 .skb = skb, 142 .skb = skb,
138 .elem = elem, 143 .elem = elem,
139 .pf = pf, 144 .state = *state,
140 .hook = hook,
141 .indev = indev,
142 .outdev = outdev,
143 .okfn = okfn,
144 .size = sizeof(*entry) + afinfo->route_key_size, 145 .size = sizeof(*entry) + afinfo->route_key_size,
145 }; 146 };
146 147
@@ -186,30 +187,29 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
186 } 187 }
187 188
188 if (verdict == NF_ACCEPT) { 189 if (verdict == NF_ACCEPT) {
189 afinfo = nf_get_afinfo(entry->pf); 190 afinfo = nf_get_afinfo(entry->state.pf);
190 if (!afinfo || afinfo->reroute(skb, entry) < 0) 191 if (!afinfo || afinfo->reroute(skb, entry) < 0)
191 verdict = NF_DROP; 192 verdict = NF_DROP;
192 } 193 }
193 194
195 entry->state.thresh = INT_MIN;
196
194 if (verdict == NF_ACCEPT) { 197 if (verdict == NF_ACCEPT) {
195 next_hook: 198 next_hook:
196 verdict = nf_iterate(&nf_hooks[entry->pf][entry->hook], 199 verdict = nf_iterate(&nf_hooks[entry->state.pf][entry->state.hook],
197 skb, entry->hook, 200 skb, &entry->state, &elem);
198 entry->indev, entry->outdev, &elem,
199 entry->okfn, INT_MIN);
200 } 201 }
201 202
202 switch (verdict & NF_VERDICT_MASK) { 203 switch (verdict & NF_VERDICT_MASK) {
203 case NF_ACCEPT: 204 case NF_ACCEPT:
204 case NF_STOP: 205 case NF_STOP:
205 local_bh_disable(); 206 local_bh_disable();
206 entry->okfn(skb); 207 entry->state.okfn(entry->state.sk, skb);
207 local_bh_enable(); 208 local_bh_enable();
208 break; 209 break;
209 case NF_QUEUE: 210 case NF_QUEUE:
210 err = nf_queue(skb, elem, entry->pf, entry->hook, 211 err = nf_queue(skb, elem, &entry->state,
211 entry->indev, entry->outdev, entry->okfn, 212 verdict >> NF_VERDICT_QBITS);
212 verdict >> NF_VERDICT_QBITS);
213 if (err < 0) { 213 if (err < 0) {
214 if (err == -ECANCELED) 214 if (err == -ECANCELED)
215 goto next_hook; 215 goto next_hook;
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 94e1aaf86070..628afc350c02 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -315,13 +315,13 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
315 if (entskb->tstamp.tv64) 315 if (entskb->tstamp.tv64)
316 size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)); 316 size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
317 317
318 if (entry->hook <= NF_INET_FORWARD || 318 if (entry->state.hook <= NF_INET_FORWARD ||
319 (entry->hook == NF_INET_POST_ROUTING && entskb->sk == NULL)) 319 (entry->state.hook == NF_INET_POST_ROUTING && entskb->sk == NULL))
320 csum_verify = !skb_csum_unnecessary(entskb); 320 csum_verify = !skb_csum_unnecessary(entskb);
321 else 321 else
322 csum_verify = false; 322 csum_verify = false;
323 323
324 outdev = entry->outdev; 324 outdev = entry->state.out;
325 325
326 switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) { 326 switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
327 case NFQNL_COPY_META: 327 case NFQNL_COPY_META:
@@ -369,23 +369,23 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
369 return NULL; 369 return NULL;
370 } 370 }
371 nfmsg = nlmsg_data(nlh); 371 nfmsg = nlmsg_data(nlh);
372 nfmsg->nfgen_family = entry->pf; 372 nfmsg->nfgen_family = entry->state.pf;
373 nfmsg->version = NFNETLINK_V0; 373 nfmsg->version = NFNETLINK_V0;
374 nfmsg->res_id = htons(queue->queue_num); 374 nfmsg->res_id = htons(queue->queue_num);
375 375
376 nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg)); 376 nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg));
377 pmsg = nla_data(nla); 377 pmsg = nla_data(nla);
378 pmsg->hw_protocol = entskb->protocol; 378 pmsg->hw_protocol = entskb->protocol;
379 pmsg->hook = entry->hook; 379 pmsg->hook = entry->state.hook;
380 *packet_id_ptr = &pmsg->packet_id; 380 *packet_id_ptr = &pmsg->packet_id;
381 381
382 indev = entry->indev; 382 indev = entry->state.in;
383 if (indev) { 383 if (indev) {
384#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 384#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
385 if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex))) 385 if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
386 goto nla_put_failure; 386 goto nla_put_failure;
387#else 387#else
388 if (entry->pf == PF_BRIDGE) { 388 if (entry->state.pf == PF_BRIDGE) {
389 /* Case 1: indev is physical input device, we need to 389 /* Case 1: indev is physical input device, we need to
390 * look for bridge group (when called from 390 * look for bridge group (when called from
391 * netfilter_bridge) */ 391 * netfilter_bridge) */
@@ -419,7 +419,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
419 if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex))) 419 if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
420 goto nla_put_failure; 420 goto nla_put_failure;
421#else 421#else
422 if (entry->pf == PF_BRIDGE) { 422 if (entry->state.pf == PF_BRIDGE) {
423 /* Case 1: outdev is physical output device, we need to 423 /* Case 1: outdev is physical output device, we need to
424 * look for bridge group (when called from 424 * look for bridge group (when called from
425 * netfilter_bridge) */ 425 * netfilter_bridge) */
@@ -642,8 +642,8 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
642 struct nfqnl_instance *queue; 642 struct nfqnl_instance *queue;
643 struct sk_buff *skb, *segs; 643 struct sk_buff *skb, *segs;
644 int err = -ENOBUFS; 644 int err = -ENOBUFS;
645 struct net *net = dev_net(entry->indev ? 645 struct net *net = dev_net(entry->state.in ?
646 entry->indev : entry->outdev); 646 entry->state.in : entry->state.out);
647 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 647 struct nfnl_queue_net *q = nfnl_queue_pernet(net);
648 648
649 /* rcu_read_lock()ed by nf_hook_slow() */ 649 /* rcu_read_lock()ed by nf_hook_slow() */
@@ -656,7 +656,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
656 656
657 skb = entry->skb; 657 skb = entry->skb;
658 658
659 switch (entry->pf) { 659 switch (entry->state.pf) {
660 case NFPROTO_IPV4: 660 case NFPROTO_IPV4:
661 skb->protocol = htons(ETH_P_IP); 661 skb->protocol = htons(ETH_P_IP);
662 break; 662 break;
@@ -766,11 +766,11 @@ nfqnl_set_mode(struct nfqnl_instance *queue,
766static int 766static int
767dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex) 767dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
768{ 768{
769 if (entry->indev) 769 if (entry->state.in)
770 if (entry->indev->ifindex == ifindex) 770 if (entry->state.in->ifindex == ifindex)
771 return 1; 771 return 1;
772 if (entry->outdev) 772 if (entry->state.out)
773 if (entry->outdev->ifindex == ifindex) 773 if (entry->state.out->ifindex == ifindex)
774 return 1; 774 return 1;
775#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 775#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
776 if (entry->skb->nf_bridge) { 776 if (entry->skb->nf_bridge) {
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index 3277a7520e31..6d39766e7828 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -222,7 +222,8 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
222{ 222{
223 struct net *net = ovs_dp_get_net(vport->dp); 223 struct net *net = ovs_dp_get_net(vport->dp);
224 struct vxlan_port *vxlan_port = vxlan_vport(vport); 224 struct vxlan_port *vxlan_port = vxlan_vport(vport);
225 __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport; 225 struct sock *sk = vxlan_port->vs->sock->sk;
226 __be16 dst_port = inet_sk(sk)->inet_sport;
226 const struct ovs_key_ipv4_tunnel *tun_key; 227 const struct ovs_key_ipv4_tunnel *tun_key;
227 struct vxlan_metadata md = {0}; 228 struct vxlan_metadata md = {0};
228 struct rtable *rt; 229 struct rtable *rt;
@@ -255,7 +256,7 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
255 vxflags = vxlan_port->exts | 256 vxflags = vxlan_port->exts |
256 (tun_key->tun_flags & TUNNEL_CSUM ? VXLAN_F_UDP_CSUM : 0); 257 (tun_key->tun_flags & TUNNEL_CSUM ? VXLAN_F_UDP_CSUM : 0);
257 258
258 err = vxlan_xmit_skb(rt, skb, fl.saddr, tun_key->ipv4_dst, 259 err = vxlan_xmit_skb(rt, sk, skb, fl.saddr, tun_key->ipv4_dst,
259 tun_key->ipv4_tos, tun_key->ipv4_ttl, df, 260 tun_key->ipv4_tos, tun_key->ipv4_ttl, df,
260 src_port, dst_port, 261 src_port, dst_port,
261 &md, false, vxflags); 262 &md, false, vxflags);
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index ec2954ffc690..067a3fff1d2c 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -274,10 +274,8 @@ void ovs_vport_del(struct vport *vport)
274 ASSERT_OVSL(); 274 ASSERT_OVSL();
275 275
276 hlist_del_rcu(&vport->hash_node); 276 hlist_del_rcu(&vport->hash_node);
277
278 vport->ops->destroy(vport);
279
280 module_put(vport->ops->owner); 277 module_put(vport->ops->owner);
278 vport->ops->destroy(vport);
281} 279}
282 280
283/** 281/**
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index dfcea20e3171..f377702d4b91 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -8,7 +8,7 @@
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 * 10 *
11 * Meant to be mostly used for localy generated traffic : 11 * Meant to be mostly used for locally generated traffic :
12 * Fast classification depends on skb->sk being set before reaching us. 12 * Fast classification depends on skb->sk being set before reaching us.
13 * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash. 13 * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
14 * All packets belonging to a socket are considered as a 'flow'. 14 * All packets belonging to a socket are considered as a 'flow'.
@@ -63,7 +63,7 @@ struct fq_flow {
63 struct sk_buff *tail; /* last skb in the list */ 63 struct sk_buff *tail; /* last skb in the list */
64 unsigned long age; /* jiffies when flow was emptied, for gc */ 64 unsigned long age; /* jiffies when flow was emptied, for gc */
65 }; 65 };
66 struct rb_node fq_node; /* anchor in fq_root[] trees */ 66 struct rb_node fq_node; /* anchor in fq_root[] trees */
67 struct sock *sk; 67 struct sock *sk;
68 int qlen; /* number of packets in flow queue */ 68 int qlen; /* number of packets in flow queue */
69 int credit; 69 int credit;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 612aa73bbc60..e6ce1517367f 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -303,9 +303,7 @@ static int rpc_client_register(struct rpc_clnt *clnt,
303 struct super_block *pipefs_sb; 303 struct super_block *pipefs_sb;
304 int err; 304 int err;
305 305
306 err = rpc_clnt_debugfs_register(clnt); 306 rpc_clnt_debugfs_register(clnt);
307 if (err)
308 return err;
309 307
310 pipefs_sb = rpc_get_sb_net(net); 308 pipefs_sb = rpc_get_sb_net(net);
311 if (pipefs_sb) { 309 if (pipefs_sb) {
diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c
index e811f390f9f6..82962f7e6e88 100644
--- a/net/sunrpc/debugfs.c
+++ b/net/sunrpc/debugfs.c
@@ -129,48 +129,52 @@ static const struct file_operations tasks_fops = {
129 .release = tasks_release, 129 .release = tasks_release,
130}; 130};
131 131
132int 132void
133rpc_clnt_debugfs_register(struct rpc_clnt *clnt) 133rpc_clnt_debugfs_register(struct rpc_clnt *clnt)
134{ 134{
135 int len, err; 135 int len;
136 char name[24]; /* enough for "../../rpc_xprt/ + 8 hex digits + NULL */ 136 char name[24]; /* enough for "../../rpc_xprt/ + 8 hex digits + NULL */
137 struct rpc_xprt *xprt;
137 138
138 /* Already registered? */ 139 /* Already registered? */
139 if (clnt->cl_debugfs) 140 if (clnt->cl_debugfs || !rpc_clnt_dir)
140 return 0; 141 return;
141 142
142 len = snprintf(name, sizeof(name), "%x", clnt->cl_clid); 143 len = snprintf(name, sizeof(name), "%x", clnt->cl_clid);
143 if (len >= sizeof(name)) 144 if (len >= sizeof(name))
144 return -EINVAL; 145 return;
145 146
146 /* make the per-client dir */ 147 /* make the per-client dir */
147 clnt->cl_debugfs = debugfs_create_dir(name, rpc_clnt_dir); 148 clnt->cl_debugfs = debugfs_create_dir(name, rpc_clnt_dir);
148 if (!clnt->cl_debugfs) 149 if (!clnt->cl_debugfs)
149 return -ENOMEM; 150 return;
150 151
151 /* make tasks file */ 152 /* make tasks file */
152 err = -ENOMEM;
153 if (!debugfs_create_file("tasks", S_IFREG | S_IRUSR, clnt->cl_debugfs, 153 if (!debugfs_create_file("tasks", S_IFREG | S_IRUSR, clnt->cl_debugfs,
154 clnt, &tasks_fops)) 154 clnt, &tasks_fops))
155 goto out_err; 155 goto out_err;
156 156
157 err = -EINVAL;
158 rcu_read_lock(); 157 rcu_read_lock();
158 xprt = rcu_dereference(clnt->cl_xprt);
159 /* no "debugfs" dentry? Don't bother with the symlink. */
160 if (!xprt->debugfs) {
161 rcu_read_unlock();
162 return;
163 }
159 len = snprintf(name, sizeof(name), "../../rpc_xprt/%s", 164 len = snprintf(name, sizeof(name), "../../rpc_xprt/%s",
160 rcu_dereference(clnt->cl_xprt)->debugfs->d_name.name); 165 xprt->debugfs->d_name.name);
161 rcu_read_unlock(); 166 rcu_read_unlock();
167
162 if (len >= sizeof(name)) 168 if (len >= sizeof(name))
163 goto out_err; 169 goto out_err;
164 170
165 err = -ENOMEM;
166 if (!debugfs_create_symlink("xprt", clnt->cl_debugfs, name)) 171 if (!debugfs_create_symlink("xprt", clnt->cl_debugfs, name))
167 goto out_err; 172 goto out_err;
168 173
169 return 0; 174 return;
170out_err: 175out_err:
171 debugfs_remove_recursive(clnt->cl_debugfs); 176 debugfs_remove_recursive(clnt->cl_debugfs);
172 clnt->cl_debugfs = NULL; 177 clnt->cl_debugfs = NULL;
173 return err;
174} 178}
175 179
176void 180void
@@ -226,33 +230,33 @@ static const struct file_operations xprt_info_fops = {
226 .release = xprt_info_release, 230 .release = xprt_info_release,
227}; 231};
228 232
229int 233void
230rpc_xprt_debugfs_register(struct rpc_xprt *xprt) 234rpc_xprt_debugfs_register(struct rpc_xprt *xprt)
231{ 235{
232 int len, id; 236 int len, id;
233 static atomic_t cur_id; 237 static atomic_t cur_id;
234 char name[9]; /* 8 hex digits + NULL term */ 238 char name[9]; /* 8 hex digits + NULL term */
235 239
240 if (!rpc_xprt_dir)
241 return;
242
236 id = (unsigned int)atomic_inc_return(&cur_id); 243 id = (unsigned int)atomic_inc_return(&cur_id);
237 244
238 len = snprintf(name, sizeof(name), "%x", id); 245 len = snprintf(name, sizeof(name), "%x", id);
239 if (len >= sizeof(name)) 246 if (len >= sizeof(name))
240 return -EINVAL; 247 return;
241 248
242 /* make the per-client dir */ 249 /* make the per-client dir */
243 xprt->debugfs = debugfs_create_dir(name, rpc_xprt_dir); 250 xprt->debugfs = debugfs_create_dir(name, rpc_xprt_dir);
244 if (!xprt->debugfs) 251 if (!xprt->debugfs)
245 return -ENOMEM; 252 return;
246 253
247 /* make tasks file */ 254 /* make tasks file */
248 if (!debugfs_create_file("info", S_IFREG | S_IRUSR, xprt->debugfs, 255 if (!debugfs_create_file("info", S_IFREG | S_IRUSR, xprt->debugfs,
249 xprt, &xprt_info_fops)) { 256 xprt, &xprt_info_fops)) {
250 debugfs_remove_recursive(xprt->debugfs); 257 debugfs_remove_recursive(xprt->debugfs);
251 xprt->debugfs = NULL; 258 xprt->debugfs = NULL;
252 return -ENOMEM;
253 } 259 }
254
255 return 0;
256} 260}
257 261
258void 262void
@@ -266,14 +270,17 @@ void __exit
266sunrpc_debugfs_exit(void) 270sunrpc_debugfs_exit(void)
267{ 271{
268 debugfs_remove_recursive(topdir); 272 debugfs_remove_recursive(topdir);
273 topdir = NULL;
274 rpc_clnt_dir = NULL;
275 rpc_xprt_dir = NULL;
269} 276}
270 277
271int __init 278void __init
272sunrpc_debugfs_init(void) 279sunrpc_debugfs_init(void)
273{ 280{
274 topdir = debugfs_create_dir("sunrpc", NULL); 281 topdir = debugfs_create_dir("sunrpc", NULL);
275 if (!topdir) 282 if (!topdir)
276 goto out; 283 return;
277 284
278 rpc_clnt_dir = debugfs_create_dir("rpc_clnt", topdir); 285 rpc_clnt_dir = debugfs_create_dir("rpc_clnt", topdir);
279 if (!rpc_clnt_dir) 286 if (!rpc_clnt_dir)
@@ -283,10 +290,9 @@ sunrpc_debugfs_init(void)
283 if (!rpc_xprt_dir) 290 if (!rpc_xprt_dir)
284 goto out_remove; 291 goto out_remove;
285 292
286 return 0; 293 return;
287out_remove: 294out_remove:
288 debugfs_remove_recursive(topdir); 295 debugfs_remove_recursive(topdir);
289 topdir = NULL; 296 topdir = NULL;
290out: 297 rpc_clnt_dir = NULL;
291 return -ENOMEM;
292} 298}
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index e37fbed87956..ee5d3d253102 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -98,10 +98,7 @@ init_sunrpc(void)
98 if (err) 98 if (err)
99 goto out4; 99 goto out4;
100 100
101 err = sunrpc_debugfs_init(); 101 sunrpc_debugfs_init();
102 if (err)
103 goto out5;
104
105#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 102#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
106 rpc_register_sysctl(); 103 rpc_register_sysctl();
107#endif 104#endif
@@ -109,8 +106,6 @@ init_sunrpc(void)
109 init_socket_xprt(); /* clnt sock transport */ 106 init_socket_xprt(); /* clnt sock transport */
110 return 0; 107 return 0;
111 108
112out5:
113 unregister_rpc_pipefs();
114out4: 109out4:
115 unregister_pernet_subsys(&sunrpc_net_ops); 110 unregister_pernet_subsys(&sunrpc_net_ops);
116out3: 111out3:
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index e3015aede0d9..9949722d99ce 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1331,7 +1331,6 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1331 */ 1331 */
1332struct rpc_xprt *xprt_create_transport(struct xprt_create *args) 1332struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1333{ 1333{
1334 int err;
1335 struct rpc_xprt *xprt; 1334 struct rpc_xprt *xprt;
1336 struct xprt_class *t; 1335 struct xprt_class *t;
1337 1336
@@ -1372,11 +1371,7 @@ found:
1372 return ERR_PTR(-ENOMEM); 1371 return ERR_PTR(-ENOMEM);
1373 } 1372 }
1374 1373
1375 err = rpc_xprt_debugfs_register(xprt); 1374 rpc_xprt_debugfs_register(xprt);
1376 if (err) {
1377 xprt_destroy(xprt);
1378 return ERR_PTR(err);
1379 }
1380 1375
1381 dprintk("RPC: created transport %p with %u slots\n", xprt, 1376 dprintk("RPC: created transport %p with %u slots\n", xprt,
1382 xprt->max_reqs); 1377 xprt->max_reqs);
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index ae558dd7f8ee..c5cbdcb1f0b5 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -413,7 +413,7 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
413 */ 413 */
414 if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) { 414 if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
415 tipc_link_proto_xmit(node->active_links[node->addr & 1], 415 tipc_link_proto_xmit(node->active_links[node->addr & 1],
416 STATE_MSG, 0, 0, 0, 0, 0); 416 STATE_MSG, 0, 0, 0, 0);
417 tn->bcl->stats.sent_acks++; 417 tn->bcl->stats.sent_acks++;
418 } 418 }
419} 419}
@@ -899,7 +899,7 @@ int tipc_bclink_init(struct net *net)
899 skb_queue_head_init(&bclink->inputq); 899 skb_queue_head_init(&bclink->inputq);
900 bcl->owner = &bclink->node; 900 bcl->owner = &bclink->node;
901 bcl->owner->net = net; 901 bcl->owner->net = net;
902 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; 902 bcl->mtu = MAX_PKT_DEFAULT_MCAST;
903 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); 903 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
904 bcl->bearer_id = MAX_BEARERS; 904 bcl->bearer_id = MAX_BEARERS;
905 rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer); 905 rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 935205e6bcfe..be1c9fa60b09 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -152,11 +152,11 @@ out_netlink:
152static void __exit tipc_exit(void) 152static void __exit tipc_exit(void)
153{ 153{
154 tipc_bearer_cleanup(); 154 tipc_bearer_cleanup();
155 unregister_pernet_subsys(&tipc_net_ops);
155 tipc_netlink_stop(); 156 tipc_netlink_stop();
156 tipc_netlink_compat_stop(); 157 tipc_netlink_compat_stop();
157 tipc_socket_stop(); 158 tipc_socket_stop();
158 tipc_unregister_sysctl(); 159 tipc_unregister_sysctl();
159 unregister_pernet_subsys(&tipc_net_ops);
160 160
161 pr_info("Deactivated\n"); 161 pr_info("Deactivated\n");
162} 162}
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 514466efc25c..a6b30df6ec02 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -89,24 +89,14 @@ static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
89#define TIMEOUT_EVT 560817u /* link timer expired */ 89#define TIMEOUT_EVT 560817u /* link timer expired */
90 90
91/* 91/*
92 * The following two 'message types' is really just implementation 92 * State value stored in 'failover_pkts'
93 * data conveniently stored in the message header.
94 * They must not be considered part of the protocol
95 */ 93 */
96#define OPEN_MSG 0 94#define FIRST_FAILOVER 0xffffu
97#define CLOSED_MSG 1
98
99/*
100 * State value stored in 'exp_msg_count'
101 */
102#define START_CHANGEOVER 100000u
103 95
104static void link_handle_out_of_seq_msg(struct tipc_link *link, 96static void link_handle_out_of_seq_msg(struct tipc_link *link,
105 struct sk_buff *skb); 97 struct sk_buff *skb);
106static void tipc_link_proto_rcv(struct tipc_link *link, 98static void tipc_link_proto_rcv(struct tipc_link *link,
107 struct sk_buff *skb); 99 struct sk_buff *skb);
108static int tipc_link_tunnel_rcv(struct tipc_node *node,
109 struct sk_buff **skb);
110static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol); 100static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
111static void link_state_event(struct tipc_link *l_ptr, u32 event); 101static void link_state_event(struct tipc_link *l_ptr, u32 event);
112static void link_reset_statistics(struct tipc_link *l_ptr); 102static void link_reset_statistics(struct tipc_link *l_ptr);
@@ -115,7 +105,7 @@ static void tipc_link_sync_xmit(struct tipc_link *l);
115static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf); 105static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
116static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb); 106static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
117static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb); 107static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
118 108static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
119/* 109/*
120 * Simple link routines 110 * Simple link routines
121 */ 111 */
@@ -146,34 +136,6 @@ static struct tipc_link *tipc_parallel_link(struct tipc_link *l)
146 return l->owner->active_links[1]; 136 return l->owner->active_links[1];
147} 137}
148 138
149static void link_init_max_pkt(struct tipc_link *l_ptr)
150{
151 struct tipc_node *node = l_ptr->owner;
152 struct tipc_net *tn = net_generic(node->net, tipc_net_id);
153 struct tipc_bearer *b_ptr;
154 u32 max_pkt;
155
156 rcu_read_lock();
157 b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
158 if (!b_ptr) {
159 rcu_read_unlock();
160 return;
161 }
162 max_pkt = (b_ptr->mtu & ~3);
163 rcu_read_unlock();
164
165 if (max_pkt > MAX_MSG_SIZE)
166 max_pkt = MAX_MSG_SIZE;
167
168 l_ptr->max_pkt_target = max_pkt;
169 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
170 l_ptr->max_pkt = l_ptr->max_pkt_target;
171 else
172 l_ptr->max_pkt = MAX_PKT_DEFAULT;
173
174 l_ptr->max_pkt_probes = 0;
175}
176
177/* 139/*
178 * Simple non-static link routines (i.e. referenced outside this file) 140 * Simple non-static link routines (i.e. referenced outside this file)
179 */ 141 */
@@ -314,7 +276,8 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
314 msg_set_bearer_id(msg, b_ptr->identity); 276 msg_set_bearer_id(msg, b_ptr->identity);
315 strcpy((char *)msg_data(msg), if_name); 277 strcpy((char *)msg_data(msg), if_name);
316 l_ptr->net_plane = b_ptr->net_plane; 278 l_ptr->net_plane = b_ptr->net_plane;
317 link_init_max_pkt(l_ptr); 279 l_ptr->advertised_mtu = b_ptr->mtu;
280 l_ptr->mtu = l_ptr->advertised_mtu;
318 l_ptr->priority = b_ptr->priority; 281 l_ptr->priority = b_ptr->priority;
319 tipc_link_set_queue_limits(l_ptr, b_ptr->window); 282 tipc_link_set_queue_limits(l_ptr, b_ptr->window);
320 l_ptr->next_out_no = 1; 283 l_ptr->next_out_no = 1;
@@ -333,15 +296,19 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
333} 296}
334 297
335/** 298/**
336 * link_delete - Conditional deletion of link. 299 * tipc_link_delete - Delete a link
337 * If timer still running, real delete is done when it expires 300 * @l: link to be deleted
338 * @link: link to be deleted
339 */ 301 */
340void tipc_link_delete(struct tipc_link *link) 302void tipc_link_delete(struct tipc_link *l)
341{ 303{
342 tipc_link_reset_fragments(link); 304 tipc_link_reset(l);
343 tipc_node_detach_link(link->owner, link); 305 if (del_timer(&l->timer))
344 tipc_link_put(link); 306 tipc_link_put(l);
307 l->flags |= LINK_STOPPED;
308 /* Delete link now, or when timer is finished: */
309 tipc_link_reset_fragments(l);
310 tipc_node_detach_link(l->owner, l);
311 tipc_link_put(l);
345} 312}
346 313
347void tipc_link_delete_list(struct net *net, unsigned int bearer_id, 314void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
@@ -350,23 +317,12 @@ void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
350 struct tipc_net *tn = net_generic(net, tipc_net_id); 317 struct tipc_net *tn = net_generic(net, tipc_net_id);
351 struct tipc_link *link; 318 struct tipc_link *link;
352 struct tipc_node *node; 319 struct tipc_node *node;
353 bool del_link;
354 320
355 rcu_read_lock(); 321 rcu_read_lock();
356 list_for_each_entry_rcu(node, &tn->node_list, list) { 322 list_for_each_entry_rcu(node, &tn->node_list, list) {
357 tipc_node_lock(node); 323 tipc_node_lock(node);
358 link = node->links[bearer_id]; 324 link = node->links[bearer_id];
359 if (!link) { 325 if (link)
360 tipc_node_unlock(node);
361 continue;
362 }
363 del_link = !tipc_link_is_up(link) && !link->exp_msg_count;
364 tipc_link_reset(link);
365 if (del_timer(&link->timer))
366 tipc_link_put(link);
367 link->flags |= LINK_STOPPED;
368 /* Delete link now, or when failover is finished: */
369 if (shutting_down || !tipc_node_is_up(node) || del_link)
370 tipc_link_delete(link); 326 tipc_link_delete(link);
371 tipc_node_unlock(node); 327 tipc_node_unlock(node);
372 } 328 }
@@ -473,17 +429,17 @@ void tipc_link_purge_queues(struct tipc_link *l_ptr)
473void tipc_link_reset(struct tipc_link *l_ptr) 429void tipc_link_reset(struct tipc_link *l_ptr)
474{ 430{
475 u32 prev_state = l_ptr->state; 431 u32 prev_state = l_ptr->state;
476 u32 checkpoint = l_ptr->next_in_no;
477 int was_active_link = tipc_link_is_active(l_ptr); 432 int was_active_link = tipc_link_is_active(l_ptr);
478 struct tipc_node *owner = l_ptr->owner; 433 struct tipc_node *owner = l_ptr->owner;
434 struct tipc_link *pl = tipc_parallel_link(l_ptr);
479 435
480 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff)); 436 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
481 437
482 /* Link is down, accept any session */ 438 /* Link is down, accept any session */
483 l_ptr->peer_session = INVALID_SESSION; 439 l_ptr->peer_session = INVALID_SESSION;
484 440
485 /* Prepare for max packet size negotiation */ 441 /* Prepare for renewed mtu size negotiation */
486 link_init_max_pkt(l_ptr); 442 l_ptr->mtu = l_ptr->advertised_mtu;
487 443
488 l_ptr->state = RESET_UNKNOWN; 444 l_ptr->state = RESET_UNKNOWN;
489 445
@@ -493,11 +449,15 @@ void tipc_link_reset(struct tipc_link *l_ptr)
493 tipc_node_link_down(l_ptr->owner, l_ptr); 449 tipc_node_link_down(l_ptr->owner, l_ptr);
494 tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr); 450 tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
495 451
496 if (was_active_link && tipc_node_active_links(l_ptr->owner)) { 452 if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) {
497 l_ptr->reset_checkpoint = checkpoint; 453 l_ptr->flags |= LINK_FAILINGOVER;
498 l_ptr->exp_msg_count = START_CHANGEOVER; 454 l_ptr->failover_checkpt = l_ptr->next_in_no;
455 pl->failover_pkts = FIRST_FAILOVER;
456 pl->failover_checkpt = l_ptr->next_in_no;
457 pl->failover_skb = l_ptr->reasm_buf;
458 } else {
459 kfree_skb(l_ptr->reasm_buf);
499 } 460 }
500
501 /* Clean up all queues, except inputq: */ 461 /* Clean up all queues, except inputq: */
502 __skb_queue_purge(&l_ptr->transmq); 462 __skb_queue_purge(&l_ptr->transmq);
503 __skb_queue_purge(&l_ptr->deferdq); 463 __skb_queue_purge(&l_ptr->deferdq);
@@ -507,6 +467,7 @@ void tipc_link_reset(struct tipc_link *l_ptr)
507 if (!skb_queue_empty(owner->inputq)) 467 if (!skb_queue_empty(owner->inputq))
508 owner->action_flags |= TIPC_MSG_EVT; 468 owner->action_flags |= TIPC_MSG_EVT;
509 tipc_link_purge_backlog(l_ptr); 469 tipc_link_purge_backlog(l_ptr);
470 l_ptr->reasm_buf = NULL;
510 l_ptr->rcv_unacked = 0; 471 l_ptr->rcv_unacked = 0;
511 l_ptr->checkpoint = 1; 472 l_ptr->checkpoint = 1;
512 l_ptr->next_out_no = 1; 473 l_ptr->next_out_no = 1;
@@ -558,8 +519,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
558 if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT)) 519 if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
559 return; /* Not yet. */ 520 return; /* Not yet. */
560 521
561 /* Check whether changeover is going on */ 522 if (l_ptr->flags & LINK_FAILINGOVER) {
562 if (l_ptr->exp_msg_count) {
563 if (event == TIMEOUT_EVT) 523 if (event == TIMEOUT_EVT)
564 link_set_timer(l_ptr, cont_intv); 524 link_set_timer(l_ptr, cont_intv);
565 return; 525 return;
@@ -576,11 +536,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
576 l_ptr->checkpoint = l_ptr->next_in_no; 536 l_ptr->checkpoint = l_ptr->next_in_no;
577 if (tipc_bclink_acks_missing(l_ptr->owner)) { 537 if (tipc_bclink_acks_missing(l_ptr->owner)) {
578 tipc_link_proto_xmit(l_ptr, STATE_MSG, 538 tipc_link_proto_xmit(l_ptr, STATE_MSG,
579 0, 0, 0, 0, 0); 539 0, 0, 0, 0);
580 l_ptr->fsm_msg_cnt++;
581 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
582 tipc_link_proto_xmit(l_ptr, STATE_MSG,
583 1, 0, 0, 0, 0);
584 l_ptr->fsm_msg_cnt++; 540 l_ptr->fsm_msg_cnt++;
585 } 541 }
586 link_set_timer(l_ptr, cont_intv); 542 link_set_timer(l_ptr, cont_intv);
@@ -588,7 +544,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
588 } 544 }
589 l_ptr->state = WORKING_UNKNOWN; 545 l_ptr->state = WORKING_UNKNOWN;
590 l_ptr->fsm_msg_cnt = 0; 546 l_ptr->fsm_msg_cnt = 0;
591 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 547 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
592 l_ptr->fsm_msg_cnt++; 548 l_ptr->fsm_msg_cnt++;
593 link_set_timer(l_ptr, cont_intv / 4); 549 link_set_timer(l_ptr, cont_intv / 4);
594 break; 550 break;
@@ -599,7 +555,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
599 l_ptr->state = RESET_RESET; 555 l_ptr->state = RESET_RESET;
600 l_ptr->fsm_msg_cnt = 0; 556 l_ptr->fsm_msg_cnt = 0;
601 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, 557 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
602 0, 0, 0, 0, 0); 558 0, 0, 0, 0);
603 l_ptr->fsm_msg_cnt++; 559 l_ptr->fsm_msg_cnt++;
604 link_set_timer(l_ptr, cont_intv); 560 link_set_timer(l_ptr, cont_intv);
605 break; 561 break;
@@ -622,7 +578,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
622 l_ptr->state = RESET_RESET; 578 l_ptr->state = RESET_RESET;
623 l_ptr->fsm_msg_cnt = 0; 579 l_ptr->fsm_msg_cnt = 0;
624 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, 580 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
625 0, 0, 0, 0, 0); 581 0, 0, 0, 0);
626 l_ptr->fsm_msg_cnt++; 582 l_ptr->fsm_msg_cnt++;
627 link_set_timer(l_ptr, cont_intv); 583 link_set_timer(l_ptr, cont_intv);
628 break; 584 break;
@@ -633,13 +589,13 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
633 l_ptr->checkpoint = l_ptr->next_in_no; 589 l_ptr->checkpoint = l_ptr->next_in_no;
634 if (tipc_bclink_acks_missing(l_ptr->owner)) { 590 if (tipc_bclink_acks_missing(l_ptr->owner)) {
635 tipc_link_proto_xmit(l_ptr, STATE_MSG, 591 tipc_link_proto_xmit(l_ptr, STATE_MSG,
636 0, 0, 0, 0, 0); 592 0, 0, 0, 0);
637 l_ptr->fsm_msg_cnt++; 593 l_ptr->fsm_msg_cnt++;
638 } 594 }
639 link_set_timer(l_ptr, cont_intv); 595 link_set_timer(l_ptr, cont_intv);
640 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) { 596 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
641 tipc_link_proto_xmit(l_ptr, STATE_MSG, 597 tipc_link_proto_xmit(l_ptr, STATE_MSG,
642 1, 0, 0, 0, 0); 598 1, 0, 0, 0);
643 l_ptr->fsm_msg_cnt++; 599 l_ptr->fsm_msg_cnt++;
644 link_set_timer(l_ptr, cont_intv / 4); 600 link_set_timer(l_ptr, cont_intv / 4);
645 } else { /* Link has failed */ 601 } else { /* Link has failed */
@@ -649,7 +605,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
649 l_ptr->state = RESET_UNKNOWN; 605 l_ptr->state = RESET_UNKNOWN;
650 l_ptr->fsm_msg_cnt = 0; 606 l_ptr->fsm_msg_cnt = 0;
651 tipc_link_proto_xmit(l_ptr, RESET_MSG, 607 tipc_link_proto_xmit(l_ptr, RESET_MSG,
652 0, 0, 0, 0, 0); 608 0, 0, 0, 0);
653 l_ptr->fsm_msg_cnt++; 609 l_ptr->fsm_msg_cnt++;
654 link_set_timer(l_ptr, cont_intv); 610 link_set_timer(l_ptr, cont_intv);
655 } 611 }
@@ -669,7 +625,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
669 l_ptr->state = WORKING_WORKING; 625 l_ptr->state = WORKING_WORKING;
670 l_ptr->fsm_msg_cnt = 0; 626 l_ptr->fsm_msg_cnt = 0;
671 link_activate(l_ptr); 627 link_activate(l_ptr);
672 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 628 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
673 l_ptr->fsm_msg_cnt++; 629 l_ptr->fsm_msg_cnt++;
674 if (l_ptr->owner->working_links == 1) 630 if (l_ptr->owner->working_links == 1)
675 tipc_link_sync_xmit(l_ptr); 631 tipc_link_sync_xmit(l_ptr);
@@ -679,7 +635,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
679 l_ptr->state = RESET_RESET; 635 l_ptr->state = RESET_RESET;
680 l_ptr->fsm_msg_cnt = 0; 636 l_ptr->fsm_msg_cnt = 0;
681 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, 637 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
682 1, 0, 0, 0, 0); 638 1, 0, 0, 0);
683 l_ptr->fsm_msg_cnt++; 639 l_ptr->fsm_msg_cnt++;
684 link_set_timer(l_ptr, cont_intv); 640 link_set_timer(l_ptr, cont_intv);
685 break; 641 break;
@@ -689,7 +645,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
689 link_set_timer(l_ptr, cont_intv); 645 link_set_timer(l_ptr, cont_intv);
690 break; 646 break;
691 case TIMEOUT_EVT: 647 case TIMEOUT_EVT:
692 tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); 648 tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0);
693 l_ptr->fsm_msg_cnt++; 649 l_ptr->fsm_msg_cnt++;
694 link_set_timer(l_ptr, cont_intv); 650 link_set_timer(l_ptr, cont_intv);
695 break; 651 break;
@@ -707,7 +663,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
707 l_ptr->state = WORKING_WORKING; 663 l_ptr->state = WORKING_WORKING;
708 l_ptr->fsm_msg_cnt = 0; 664 l_ptr->fsm_msg_cnt = 0;
709 link_activate(l_ptr); 665 link_activate(l_ptr);
710 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 666 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
711 l_ptr->fsm_msg_cnt++; 667 l_ptr->fsm_msg_cnt++;
712 if (l_ptr->owner->working_links == 1) 668 if (l_ptr->owner->working_links == 1)
713 tipc_link_sync_xmit(l_ptr); 669 tipc_link_sync_xmit(l_ptr);
@@ -717,7 +673,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
717 break; 673 break;
718 case TIMEOUT_EVT: 674 case TIMEOUT_EVT:
719 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, 675 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
720 0, 0, 0, 0, 0); 676 0, 0, 0, 0);
721 l_ptr->fsm_msg_cnt++; 677 l_ptr->fsm_msg_cnt++;
722 link_set_timer(l_ptr, cont_intv); 678 link_set_timer(l_ptr, cont_intv);
723 break; 679 break;
@@ -746,7 +702,7 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
746 struct tipc_msg *msg = buf_msg(skb_peek(list)); 702 struct tipc_msg *msg = buf_msg(skb_peek(list));
747 unsigned int maxwin = link->window; 703 unsigned int maxwin = link->window;
748 unsigned int imp = msg_importance(msg); 704 unsigned int imp = msg_importance(msg);
749 uint mtu = link->max_pkt; 705 uint mtu = link->mtu;
750 uint ack = mod(link->next_in_no - 1); 706 uint ack = mod(link->next_in_no - 1);
751 uint seqno = link->next_out_no; 707 uint seqno = link->next_out_no;
752 uint bc_last_in = link->owner->bclink.last_in; 708 uint bc_last_in = link->owner->bclink.last_in;
@@ -1200,7 +1156,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
1200 link_retrieve_defq(l_ptr, &head); 1156 link_retrieve_defq(l_ptr, &head);
1201 if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) { 1157 if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
1202 l_ptr->stats.sent_acks++; 1158 l_ptr->stats.sent_acks++;
1203 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1159 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
1204 } 1160 }
1205 tipc_link_input(l_ptr, skb); 1161 tipc_link_input(l_ptr, skb);
1206 skb = NULL; 1162 skb = NULL;
@@ -1243,7 +1199,7 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
1243 node->action_flags |= TIPC_NAMED_MSG_EVT; 1199 node->action_flags |= TIPC_NAMED_MSG_EVT;
1244 return true; 1200 return true;
1245 case MSG_BUNDLER: 1201 case MSG_BUNDLER:
1246 case CHANGEOVER_PROTOCOL: 1202 case TUNNEL_PROTOCOL:
1247 case MSG_FRAGMENTER: 1203 case MSG_FRAGMENTER:
1248 case BCAST_PROTOCOL: 1204 case BCAST_PROTOCOL:
1249 return false; 1205 return false;
@@ -1270,12 +1226,14 @@ static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
1270 return; 1226 return;
1271 1227
1272 switch (msg_user(msg)) { 1228 switch (msg_user(msg)) {
1273 case CHANGEOVER_PROTOCOL: 1229 case TUNNEL_PROTOCOL:
1274 if (msg_dup(msg)) { 1230 if (msg_dup(msg)) {
1275 link->flags |= LINK_SYNCHING; 1231 link->flags |= LINK_SYNCHING;
1276 link->synch_point = msg_seqno(msg_get_wrapped(msg)); 1232 link->synch_point = msg_seqno(msg_get_wrapped(msg));
1233 kfree_skb(skb);
1234 break;
1277 } 1235 }
1278 if (!tipc_link_tunnel_rcv(node, &skb)) 1236 if (!tipc_link_failover_rcv(link, &skb))
1279 break; 1237 break;
1280 if (msg_user(buf_msg(skb)) != MSG_BUNDLER) { 1238 if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
1281 tipc_data_input(link, skb); 1239 tipc_data_input(link, skb);
@@ -1373,7 +1331,7 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1373 if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) { 1331 if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) {
1374 l_ptr->stats.deferred_recv++; 1332 l_ptr->stats.deferred_recv++;
1375 if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1) 1333 if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1)
1376 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1334 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
1377 } else { 1335 } else {
1378 l_ptr->stats.duplicates++; 1336 l_ptr->stats.duplicates++;
1379 } 1337 }
@@ -1383,15 +1341,15 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1383 * Send protocol message to the other endpoint. 1341 * Send protocol message to the other endpoint.
1384 */ 1342 */
1385void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, 1343void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1386 u32 gap, u32 tolerance, u32 priority, u32 ack_mtu) 1344 u32 gap, u32 tolerance, u32 priority)
1387{ 1345{
1388 struct sk_buff *buf = NULL; 1346 struct sk_buff *buf = NULL;
1389 struct tipc_msg *msg = l_ptr->pmsg; 1347 struct tipc_msg *msg = l_ptr->pmsg;
1390 u32 msg_size = sizeof(l_ptr->proto_msg); 1348 u32 msg_size = sizeof(l_ptr->proto_msg);
1391 int r_flag; 1349 int r_flag;
1392 1350
1393 /* Don't send protocol message during link changeover */ 1351 /* Don't send protocol message during link failover */
1394 if (l_ptr->exp_msg_count) 1352 if (l_ptr->flags & LINK_FAILINGOVER)
1395 return; 1353 return;
1396 1354
1397 /* Abort non-RESET send if communication with node is prohibited */ 1355 /* Abort non-RESET send if communication with node is prohibited */
@@ -1421,35 +1379,20 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1421 l_ptr->stats.sent_nacks++; 1379 l_ptr->stats.sent_nacks++;
1422 msg_set_link_tolerance(msg, tolerance); 1380 msg_set_link_tolerance(msg, tolerance);
1423 msg_set_linkprio(msg, priority); 1381 msg_set_linkprio(msg, priority);
1424 msg_set_max_pkt(msg, ack_mtu); 1382 msg_set_max_pkt(msg, l_ptr->mtu);
1425 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1383 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1426 msg_set_probe(msg, probe_msg != 0); 1384 msg_set_probe(msg, probe_msg != 0);
1427 if (probe_msg) { 1385 if (probe_msg)
1428 u32 mtu = l_ptr->max_pkt;
1429
1430 if ((mtu < l_ptr->max_pkt_target) &&
1431 link_working_working(l_ptr) &&
1432 l_ptr->fsm_msg_cnt) {
1433 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1434 if (l_ptr->max_pkt_probes == 10) {
1435 l_ptr->max_pkt_target = (msg_size - 4);
1436 l_ptr->max_pkt_probes = 0;
1437 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1438 }
1439 l_ptr->max_pkt_probes++;
1440 }
1441
1442 l_ptr->stats.sent_probes++; 1386 l_ptr->stats.sent_probes++;
1443 }
1444 l_ptr->stats.sent_states++; 1387 l_ptr->stats.sent_states++;
1445 } else { /* RESET_MSG or ACTIVATE_MSG */ 1388 } else { /* RESET_MSG or ACTIVATE_MSG */
1446 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1)); 1389 msg_set_ack(msg, mod(l_ptr->failover_checkpt - 1));
1447 msg_set_seq_gap(msg, 0); 1390 msg_set_seq_gap(msg, 0);
1448 msg_set_next_sent(msg, 1); 1391 msg_set_next_sent(msg, 1);
1449 msg_set_probe(msg, 0); 1392 msg_set_probe(msg, 0);
1450 msg_set_link_tolerance(msg, l_ptr->tolerance); 1393 msg_set_link_tolerance(msg, l_ptr->tolerance);
1451 msg_set_linkprio(msg, l_ptr->priority); 1394 msg_set_linkprio(msg, l_ptr->priority);
1452 msg_set_max_pkt(msg, l_ptr->max_pkt_target); 1395 msg_set_max_pkt(msg, l_ptr->advertised_mtu);
1453 } 1396 }
1454 1397
1455 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr)); 1398 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
@@ -1480,13 +1423,10 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
1480 struct sk_buff *buf) 1423 struct sk_buff *buf)
1481{ 1424{
1482 u32 rec_gap = 0; 1425 u32 rec_gap = 0;
1483 u32 max_pkt_info;
1484 u32 max_pkt_ack;
1485 u32 msg_tol; 1426 u32 msg_tol;
1486 struct tipc_msg *msg = buf_msg(buf); 1427 struct tipc_msg *msg = buf_msg(buf);
1487 1428
1488 /* Discard protocol message during link changeover */ 1429 if (l_ptr->flags & LINK_FAILINGOVER)
1489 if (l_ptr->exp_msg_count)
1490 goto exit; 1430 goto exit;
1491 1431
1492 if (l_ptr->net_plane != msg_net_plane(msg)) 1432 if (l_ptr->net_plane != msg_net_plane(msg))
@@ -1525,15 +1465,8 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
1525 if (msg_linkprio(msg) > l_ptr->priority) 1465 if (msg_linkprio(msg) > l_ptr->priority)
1526 l_ptr->priority = msg_linkprio(msg); 1466 l_ptr->priority = msg_linkprio(msg);
1527 1467
1528 max_pkt_info = msg_max_pkt(msg); 1468 if (l_ptr->mtu > msg_max_pkt(msg))
1529 if (max_pkt_info) { 1469 l_ptr->mtu = msg_max_pkt(msg);
1530 if (max_pkt_info < l_ptr->max_pkt_target)
1531 l_ptr->max_pkt_target = max_pkt_info;
1532 if (l_ptr->max_pkt > l_ptr->max_pkt_target)
1533 l_ptr->max_pkt = l_ptr->max_pkt_target;
1534 } else {
1535 l_ptr->max_pkt = l_ptr->max_pkt_target;
1536 }
1537 1470
1538 /* Synchronize broadcast link info, if not done previously */ 1471 /* Synchronize broadcast link info, if not done previously */
1539 if (!tipc_node_is_up(l_ptr->owner)) { 1472 if (!tipc_node_is_up(l_ptr->owner)) {
@@ -1578,18 +1511,8 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
1578 mod(l_ptr->next_in_no)); 1511 mod(l_ptr->next_in_no));
1579 } 1512 }
1580 1513
1581 max_pkt_ack = msg_max_pkt(msg); 1514 if (msg_probe(msg))
1582 if (max_pkt_ack > l_ptr->max_pkt) {
1583 l_ptr->max_pkt = max_pkt_ack;
1584 l_ptr->max_pkt_probes = 0;
1585 }
1586
1587 max_pkt_ack = 0;
1588 if (msg_probe(msg)) {
1589 l_ptr->stats.recv_probes++; 1515 l_ptr->stats.recv_probes++;
1590 if (msg_size(msg) > sizeof(l_ptr->proto_msg))
1591 max_pkt_ack = msg_size(msg);
1592 }
1593 1516
1594 /* Protocol message before retransmits, reduce loss risk */ 1517 /* Protocol message before retransmits, reduce loss risk */
1595 if (l_ptr->owner->bclink.recv_permitted) 1518 if (l_ptr->owner->bclink.recv_permitted)
@@ -1597,8 +1520,8 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
1597 msg_last_bcast(msg)); 1520 msg_last_bcast(msg));
1598 1521
1599 if (rec_gap || (msg_probe(msg))) { 1522 if (rec_gap || (msg_probe(msg))) {
1600 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0, 1523 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0,
1601 0, max_pkt_ack); 1524 rec_gap, 0, 0);
1602 } 1525 }
1603 if (msg_seq_gap(msg)) { 1526 if (msg_seq_gap(msg)) {
1604 l_ptr->stats.recv_nacks++; 1527 l_ptr->stats.recv_nacks++;
@@ -1658,8 +1581,8 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1658 if (!tunnel) 1581 if (!tunnel)
1659 return; 1582 return;
1660 1583
1661 tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL, 1584 tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
1662 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr); 1585 FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
1663 skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq); 1586 skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
1664 tipc_link_purge_backlog(l_ptr); 1587 tipc_link_purge_backlog(l_ptr);
1665 msgcount = skb_queue_len(&l_ptr->transmq); 1588 msgcount = skb_queue_len(&l_ptr->transmq);
@@ -1721,8 +1644,8 @@ void tipc_link_dup_queue_xmit(struct tipc_link *link,
1721 struct sk_buff_head *queue = &link->transmq; 1644 struct sk_buff_head *queue = &link->transmq;
1722 int mcnt; 1645 int mcnt;
1723 1646
1724 tipc_msg_init(link_own_addr(link), &tnl_hdr, CHANGEOVER_PROTOCOL, 1647 tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
1725 DUPLICATE_MSG, INT_H_SIZE, link->addr); 1648 SYNCH_MSG, INT_H_SIZE, link->addr);
1726 mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq); 1649 mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq);
1727 msg_set_msgcnt(&tnl_hdr, mcnt); 1650 msg_set_msgcnt(&tnl_hdr, mcnt);
1728 msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id); 1651 msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id);
@@ -1755,101 +1678,63 @@ tunnel_queue:
1755 goto tunnel_queue; 1678 goto tunnel_queue;
1756} 1679}
1757 1680
1758/* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet. 1681/* tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet
1759 * Owner node is locked.
1760 */
1761static void tipc_link_dup_rcv(struct tipc_link *link,
1762 struct sk_buff *skb)
1763{
1764 struct sk_buff *iskb;
1765 int pos = 0;
1766
1767 if (!tipc_link_is_up(link))
1768 return;
1769
1770 if (!tipc_msg_extract(skb, &iskb, &pos)) {
1771 pr_warn("%sfailed to extract inner dup pkt\n", link_co_err);
1772 return;
1773 }
1774 /* Append buffer to deferred queue, if applicable: */
1775 link_handle_out_of_seq_msg(link, iskb);
1776}
1777
1778/* tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
1779 * Owner node is locked. 1682 * Owner node is locked.
1780 */ 1683 */
1781static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr, 1684static bool tipc_link_failover_rcv(struct tipc_link *link,
1782 struct sk_buff *t_buf) 1685 struct sk_buff **skb)
1783{ 1686{
1784 struct tipc_msg *t_msg = buf_msg(t_buf); 1687 struct tipc_msg *msg = buf_msg(*skb);
1785 struct sk_buff *buf = NULL; 1688 struct sk_buff *iskb = NULL;
1786 struct tipc_msg *msg; 1689 struct tipc_link *pl = NULL;
1690 int bearer_id = msg_bearer_id(msg);
1787 int pos = 0; 1691 int pos = 0;
1788 1692
1789 if (tipc_link_is_up(l_ptr)) 1693 if (msg_type(msg) != FAILOVER_MSG) {
1790 tipc_link_reset(l_ptr); 1694 pr_warn("%sunknown tunnel pkt received\n", link_co_err);
1791 1695 goto exit;
1792 /* First failover packet? */
1793 if (l_ptr->exp_msg_count == START_CHANGEOVER)
1794 l_ptr->exp_msg_count = msg_msgcnt(t_msg);
1795
1796 /* Should there be an inner packet? */
1797 if (l_ptr->exp_msg_count) {
1798 l_ptr->exp_msg_count--;
1799 if (!tipc_msg_extract(t_buf, &buf, &pos)) {
1800 pr_warn("%sno inner failover pkt\n", link_co_err);
1801 goto exit;
1802 }
1803 msg = buf_msg(buf);
1804
1805 if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) {
1806 kfree_skb(buf);
1807 buf = NULL;
1808 goto exit;
1809 }
1810 if (msg_user(msg) == MSG_FRAGMENTER) {
1811 l_ptr->stats.recv_fragments++;
1812 tipc_buf_append(&l_ptr->reasm_buf, &buf);
1813 }
1814 } 1696 }
1815exit: 1697 if (bearer_id >= MAX_BEARERS)
1816 if ((!l_ptr->exp_msg_count) && (l_ptr->flags & LINK_STOPPED)) 1698 goto exit;
1817 tipc_link_delete(l_ptr);
1818 return buf;
1819}
1820 1699
1821/* tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent 1700 if (bearer_id == link->bearer_id)
1822 * via other link as result of a failover (ORIGINAL_MSG) or 1701 goto exit;
1823 * a new active link (DUPLICATE_MSG). Failover packets are
1824 * returned to the active link for delivery upwards.
1825 * Owner node is locked.
1826 */
1827static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
1828 struct sk_buff **buf)
1829{
1830 struct sk_buff *t_buf = *buf;
1831 struct tipc_link *l_ptr;
1832 struct tipc_msg *t_msg = buf_msg(t_buf);
1833 u32 bearer_id = msg_bearer_id(t_msg);
1834 1702
1835 *buf = NULL; 1703 pl = link->owner->links[bearer_id];
1704 if (pl && tipc_link_is_up(pl))
1705 tipc_link_reset(pl);
1836 1706
1837 if (bearer_id >= MAX_BEARERS) 1707 if (link->failover_pkts == FIRST_FAILOVER)
1708 link->failover_pkts = msg_msgcnt(msg);
1709
1710 /* Should we expect an inner packet? */
1711 if (!link->failover_pkts)
1838 goto exit; 1712 goto exit;
1839 1713
1840 l_ptr = n_ptr->links[bearer_id]; 1714 if (!tipc_msg_extract(*skb, &iskb, &pos)) {
1841 if (!l_ptr) 1715 pr_warn("%sno inner failover pkt\n", link_co_err);
1716 *skb = NULL;
1842 goto exit; 1717 goto exit;
1718 }
1719 link->failover_pkts--;
1720 *skb = NULL;
1843 1721
1844 if (msg_type(t_msg) == DUPLICATE_MSG) 1722 /* Was this packet already delivered? */
1845 tipc_link_dup_rcv(l_ptr, t_buf); 1723 if (less(buf_seqno(iskb), link->failover_checkpt)) {
1846 else if (msg_type(t_msg) == ORIGINAL_MSG) 1724 kfree_skb(iskb);
1847 *buf = tipc_link_failover_rcv(l_ptr, t_buf); 1725 iskb = NULL;
1848 else 1726 goto exit;
1849 pr_warn("%sunknown tunnel pkt received\n", link_co_err); 1727 }
1728 if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) {
1729 link->stats.recv_fragments++;
1730 tipc_buf_append(&link->failover_skb, &iskb);
1731 }
1850exit: 1732exit:
1851 kfree_skb(t_buf); 1733 if (!link->failover_pkts && pl)
1852 return *buf != NULL; 1734 pl->flags &= ~LINK_FAILINGOVER;
1735 kfree_skb(*skb);
1736 *skb = iskb;
1737 return *skb;
1853} 1738}
1854 1739
1855static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol) 1740static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
@@ -1866,7 +1751,7 @@ static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
1866 1751
1867void tipc_link_set_queue_limits(struct tipc_link *l, u32 win) 1752void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
1868{ 1753{
1869 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->max_pkt / ITEM_SIZE); 1754 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
1870 1755
1871 l->window = win; 1756 l->window = win;
1872 l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2; 1757 l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2;
@@ -2038,14 +1923,14 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
2038 1923
2039 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 1924 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2040 link_set_supervision_props(link, tol); 1925 link_set_supervision_props(link, tol);
2041 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0, 0); 1926 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
2042 } 1927 }
2043 if (props[TIPC_NLA_PROP_PRIO]) { 1928 if (props[TIPC_NLA_PROP_PRIO]) {
2044 u32 prio; 1929 u32 prio;
2045 1930
2046 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); 1931 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2047 link->priority = prio; 1932 link->priority = prio;
2048 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio, 0); 1933 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio);
2049 } 1934 }
2050 if (props[TIPC_NLA_PROP_WIN]) { 1935 if (props[TIPC_NLA_PROP_WIN]) {
2051 u32 win; 1936 u32 win;
@@ -2150,7 +2035,7 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2150 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, 2035 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
2151 tipc_cluster_mask(tn->own_addr))) 2036 tipc_cluster_mask(tn->own_addr)))
2152 goto attr_msg_full; 2037 goto attr_msg_full;
2153 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt)) 2038 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
2154 goto attr_msg_full; 2039 goto attr_msg_full;
2155 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no)) 2040 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no))
2156 goto attr_msg_full; 2041 goto attr_msg_full;
diff --git a/net/tipc/link.h b/net/tipc/link.h
index d2b5663643da..b5b4e3554d4e 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -58,9 +58,10 @@
58 58
59/* Link endpoint execution states 59/* Link endpoint execution states
60 */ 60 */
61#define LINK_STARTED 0x0001 61#define LINK_STARTED 0x0001
62#define LINK_STOPPED 0x0002 62#define LINK_STOPPED 0x0002
63#define LINK_SYNCHING 0x0004 63#define LINK_SYNCHING 0x0004
64#define LINK_FAILINGOVER 0x0008
64 65
65/* Starting value for maximum packet size negotiation on unicast links 66/* Starting value for maximum packet size negotiation on unicast links
66 * (unless bearer MTU is less) 67 * (unless bearer MTU is less)
@@ -122,9 +123,8 @@ struct tipc_stats {
122 * @backlog_limit: backlog queue congestion thresholds (indexed by importance) 123 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
123 * @exp_msg_count: # of tunnelled messages expected during link changeover 124 * @exp_msg_count: # of tunnelled messages expected during link changeover
124 * @reset_checkpoint: seq # of last acknowledged message at time of link reset 125 * @reset_checkpoint: seq # of last acknowledged message at time of link reset
125 * @max_pkt: current maximum packet size for this link 126 * @mtu: current maximum packet size for this link
126 * @max_pkt_target: desired maximum packet size for this link 127 * @advertised_mtu: advertised own mtu when link is being established
127 * @max_pkt_probes: # of probes based on current (max_pkt, max_pkt_target)
128 * @transmitq: queue for sent, non-acked messages 128 * @transmitq: queue for sent, non-acked messages
129 * @backlogq: queue for messages waiting to be sent 129 * @backlogq: queue for messages waiting to be sent
130 * @next_out_no: next sequence number to use for outbound messages 130 * @next_out_no: next sequence number to use for outbound messages
@@ -167,16 +167,16 @@ struct tipc_link {
167 struct tipc_msg *pmsg; 167 struct tipc_msg *pmsg;
168 u32 priority; 168 u32 priority;
169 char net_plane; 169 char net_plane;
170 u16 synch_point;
170 171
171 /* Changeover */ 172 /* Failover */
172 u32 exp_msg_count; 173 u16 failover_pkts;
173 u32 reset_checkpoint; 174 u16 failover_checkpt;
174 u32 synch_point; 175 struct sk_buff *failover_skb;
175 176
176 /* Max packet negotiation */ 177 /* Max packet negotiation */
177 u32 max_pkt; 178 u16 mtu;
178 u32 max_pkt_target; 179 u16 advertised_mtu;
179 u32 max_pkt_probes;
180 180
181 /* Sending */ 181 /* Sending */
182 struct sk_buff_head transmq; 182 struct sk_buff_head transmq;
@@ -201,7 +201,6 @@ struct tipc_link {
201 struct sk_buff_head wakeupq; 201 struct sk_buff_head wakeupq;
202 202
203 /* Fragmentation/reassembly */ 203 /* Fragmentation/reassembly */
204 u32 long_msg_seq_no;
205 struct sk_buff *reasm_buf; 204 struct sk_buff *reasm_buf;
206 205
207 /* Statistics */ 206 /* Statistics */
@@ -232,7 +231,7 @@ int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dest,
232int __tipc_link_xmit(struct net *net, struct tipc_link *link, 231int __tipc_link_xmit(struct net *net, struct tipc_link *link,
233 struct sk_buff_head *list); 232 struct sk_buff_head *list);
234void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob, 233void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
235 u32 gap, u32 tolerance, u32 priority, u32 acked_mtu); 234 u32 gap, u32 tolerance, u32 priority);
236void tipc_link_push_packets(struct tipc_link *l_ptr); 235void tipc_link_push_packets(struct tipc_link *l_ptr);
237u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *buf); 236u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *buf);
238void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window); 237void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window);
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 3bb499c61918..c3e96e815418 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -355,7 +355,7 @@ bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu)
355 start = align(bsz); 355 start = align(bsz);
356 pad = start - bsz; 356 pad = start - bsz;
357 357
358 if (unlikely(msg_user(msg) == CHANGEOVER_PROTOCOL)) 358 if (unlikely(msg_user(msg) == TUNNEL_PROTOCOL))
359 return false; 359 return false;
360 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) 360 if (unlikely(msg_user(msg) == BCAST_PROTOCOL))
361 return false; 361 return false;
@@ -433,7 +433,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode)
433 433
434 if (msg_user(msg) == MSG_FRAGMENTER) 434 if (msg_user(msg) == MSG_FRAGMENTER)
435 return false; 435 return false;
436 if (msg_user(msg) == CHANGEOVER_PROTOCOL) 436 if (msg_user(msg) == TUNNEL_PROTOCOL)
437 return false; 437 return false;
438 if (msg_user(msg) == BCAST_PROTOCOL) 438 if (msg_user(msg) == BCAST_PROTOCOL)
439 return false; 439 return false;
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index d273207ede28..e1d3595e2ee9 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -72,7 +72,7 @@ struct plist;
72#define MSG_BUNDLER 6 72#define MSG_BUNDLER 6
73#define LINK_PROTOCOL 7 73#define LINK_PROTOCOL 7
74#define CONN_MANAGER 8 74#define CONN_MANAGER 8
75#define CHANGEOVER_PROTOCOL 10 75#define TUNNEL_PROTOCOL 10
76#define NAME_DISTRIBUTOR 11 76#define NAME_DISTRIBUTOR 11
77#define MSG_FRAGMENTER 12 77#define MSG_FRAGMENTER 12
78#define LINK_CONFIG 13 78#define LINK_CONFIG 13
@@ -512,8 +512,8 @@ static inline void msg_set_nameupper(struct tipc_msg *m, u32 n)
512/* 512/*
513 * Changeover tunnel message types 513 * Changeover tunnel message types
514 */ 514 */
515#define DUPLICATE_MSG 0 515#define SYNCH_MSG 0
516#define ORIGINAL_MSG 1 516#define FAILOVER_MSG 1
517 517
518/* 518/*
519 * Config protocol message types 519 * Config protocol message types
@@ -556,9 +556,9 @@ static inline void msg_set_node_capabilities(struct tipc_msg *m, u32 n)
556 556
557static inline bool msg_dup(struct tipc_msg *m) 557static inline bool msg_dup(struct tipc_msg *m)
558{ 558{
559 if (likely(msg_user(m) != CHANGEOVER_PROTOCOL)) 559 if (likely(msg_user(m) != TUNNEL_PROTOCOL))
560 return false; 560 return false;
561 if (msg_type(m) != DUPLICATE_MSG) 561 if (msg_type(m) != SYNCH_MSG)
562 return false; 562 return false;
563 return true; 563 return true;
564} 564}
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 3e4f04897c03..22c059ad2999 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -254,8 +254,8 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
254 active[0] = active[1] = l_ptr; 254 active[0] = active[1] = l_ptr;
255exit: 255exit:
256 /* Leave room for changeover header when returning 'mtu' to users: */ 256 /* Leave room for changeover header when returning 'mtu' to users: */
257 n_ptr->act_mtus[0] = active[0]->max_pkt - INT_H_SIZE; 257 n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE;
258 n_ptr->act_mtus[1] = active[1]->max_pkt - INT_H_SIZE; 258 n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE;
259} 259}
260 260
261/** 261/**
@@ -319,11 +319,10 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
319 319
320 /* Leave room for changeover header when returning 'mtu' to users: */ 320 /* Leave room for changeover header when returning 'mtu' to users: */
321 if (active[0]) { 321 if (active[0]) {
322 n_ptr->act_mtus[0] = active[0]->max_pkt - INT_H_SIZE; 322 n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE;
323 n_ptr->act_mtus[1] = active[1]->max_pkt - INT_H_SIZE; 323 n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE;
324 return; 324 return;
325 } 325 }
326
327 /* Loopback link went down? No fragmentation needed from now on. */ 326 /* Loopback link went down? No fragmentation needed from now on. */
328 if (n_ptr->addr == tn->own_addr) { 327 if (n_ptr->addr == tn->own_addr) {
329 n_ptr->act_mtus[0] = MAX_MSG_SIZE; 328 n_ptr->act_mtus[0] = MAX_MSG_SIZE;
@@ -394,18 +393,17 @@ static void node_lost_contact(struct tipc_node *n_ptr)
394 n_ptr->bclink.recv_permitted = false; 393 n_ptr->bclink.recv_permitted = false;
395 } 394 }
396 395
397 /* Abort link changeover */ 396 /* Abort any ongoing link failover */
398 for (i = 0; i < MAX_BEARERS; i++) { 397 for (i = 0; i < MAX_BEARERS; i++) {
399 struct tipc_link *l_ptr = n_ptr->links[i]; 398 struct tipc_link *l_ptr = n_ptr->links[i];
400 if (!l_ptr) 399 if (!l_ptr)
401 continue; 400 continue;
402 l_ptr->reset_checkpoint = l_ptr->next_in_no; 401 l_ptr->flags &= ~LINK_FAILINGOVER;
403 l_ptr->exp_msg_count = 0; 402 l_ptr->failover_checkpt = 0;
403 l_ptr->failover_pkts = 0;
404 kfree_skb(l_ptr->failover_skb);
405 l_ptr->failover_skb = NULL;
404 tipc_link_reset_fragments(l_ptr); 406 tipc_link_reset_fragments(l_ptr);
405
406 /* Link marked for deletion after failover? => do it now */
407 if (l_ptr->flags & LINK_STOPPED)
408 tipc_link_delete(l_ptr);
409 } 407 }
410 408
411 n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN; 409 n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN;
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index ef3d7aa2854a..66deebc66aa1 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -176,7 +176,8 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
176 goto tx_error; 176 goto tx_error;
177 } 177 }
178 ttl = ip4_dst_hoplimit(&rt->dst); 178 ttl = ip4_dst_hoplimit(&rt->dst);
179 err = udp_tunnel_xmit_skb(rt, clone, src->ipv4.s_addr, 179 err = udp_tunnel_xmit_skb(rt, ub->ubsock->sk, clone,
180 src->ipv4.s_addr,
180 dst->ipv4.s_addr, 0, ttl, 0, 181 dst->ipv4.s_addr, 0, ttl, 0,
181 src->udp_port, dst->udp_port, 182 src->udp_port, dst->udp_port,
182 false, true); 183 false, true);
@@ -197,7 +198,8 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
197 if (err) 198 if (err)
198 goto tx_error; 199 goto tx_error;
199 ttl = ip6_dst_hoplimit(ndst); 200 ttl = ip6_dst_hoplimit(ndst);
200 err = udp_tunnel6_xmit_skb(ndst, clone, ndst->dev, &src->ipv6, 201 err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, clone,
202 ndst->dev, &src->ipv6,
201 &dst->ipv6, 0, ttl, src->udp_port, 203 &dst->ipv6, 0, ttl, src->udp_port,
202 dst->udp_port, false); 204 dst->udp_port, false);
203#endif 205#endif
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 7c532856b398..fbcedbe33190 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -19,7 +19,7 @@
19#include <net/dst.h> 19#include <net/dst.h>
20#include <net/xfrm.h> 20#include <net/xfrm.h>
21 21
22static int xfrm_output2(struct sk_buff *skb); 22static int xfrm_output2(struct sock *sk, struct sk_buff *skb);
23 23
24static int xfrm_skb_check_space(struct sk_buff *skb) 24static int xfrm_skb_check_space(struct sk_buff *skb)
25{ 25{
@@ -130,7 +130,7 @@ int xfrm_output_resume(struct sk_buff *skb, int err)
130 return dst_output(skb); 130 return dst_output(skb);
131 131
132 err = nf_hook(skb_dst(skb)->ops->family, 132 err = nf_hook(skb_dst(skb)->ops->family,
133 NF_INET_POST_ROUTING, skb, 133 NF_INET_POST_ROUTING, skb->sk, skb,
134 NULL, skb_dst(skb)->dev, xfrm_output2); 134 NULL, skb_dst(skb)->dev, xfrm_output2);
135 if (unlikely(err != 1)) 135 if (unlikely(err != 1))
136 goto out; 136 goto out;
@@ -144,12 +144,12 @@ out:
144} 144}
145EXPORT_SYMBOL_GPL(xfrm_output_resume); 145EXPORT_SYMBOL_GPL(xfrm_output_resume);
146 146
147static int xfrm_output2(struct sk_buff *skb) 147static int xfrm_output2(struct sock *sk, struct sk_buff *skb)
148{ 148{
149 return xfrm_output_resume(skb, 1); 149 return xfrm_output_resume(skb, 1);
150} 150}
151 151
152static int xfrm_output_gso(struct sk_buff *skb) 152static int xfrm_output_gso(struct sock *sk, struct sk_buff *skb)
153{ 153{
154 struct sk_buff *segs; 154 struct sk_buff *segs;
155 155
@@ -165,7 +165,7 @@ static int xfrm_output_gso(struct sk_buff *skb)
165 int err; 165 int err;
166 166
167 segs->next = NULL; 167 segs->next = NULL;
168 err = xfrm_output2(segs); 168 err = xfrm_output2(sk, segs);
169 169
170 if (unlikely(err)) { 170 if (unlikely(err)) {
171 kfree_skb_list(nskb); 171 kfree_skb_list(nskb);
@@ -178,13 +178,13 @@ static int xfrm_output_gso(struct sk_buff *skb)
178 return 0; 178 return 0;
179} 179}
180 180
181int xfrm_output(struct sk_buff *skb) 181int xfrm_output(struct sock *sk, struct sk_buff *skb)
182{ 182{
183 struct net *net = dev_net(skb_dst(skb)->dev); 183 struct net *net = dev_net(skb_dst(skb)->dev);
184 int err; 184 int err;
185 185
186 if (skb_is_gso(skb)) 186 if (skb_is_gso(skb))
187 return xfrm_output_gso(skb); 187 return xfrm_output_gso(sk, skb);
188 188
189 if (skb->ip_summed == CHECKSUM_PARTIAL) { 189 if (skb->ip_summed == CHECKSUM_PARTIAL) {
190 err = skb_checksum_help(skb); 190 err = skb_checksum_help(skb);
@@ -195,7 +195,7 @@ int xfrm_output(struct sk_buff *skb)
195 } 195 }
196 } 196 }
197 197
198 return xfrm_output2(skb); 198 return xfrm_output2(sk, skb);
199} 199}
200EXPORT_SYMBOL_GPL(xfrm_output); 200EXPORT_SYMBOL_GPL(xfrm_output);
201 201
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index b5b3600dcdf5..d24f51bca465 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -17,6 +17,7 @@ sockex2-objs := bpf_load.o libbpf.o sockex2_user.o
17always := $(hostprogs-y) 17always := $(hostprogs-y)
18always += sockex1_kern.o 18always += sockex1_kern.o
19always += sockex2_kern.o 19always += sockex2_kern.o
20always += tcbpf1_kern.o
20 21
21HOSTCFLAGS += -I$(objtree)/usr/include 22HOSTCFLAGS += -I$(objtree)/usr/include
22 23
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index ca0333146006..72540ec1f003 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -37,4 +37,11 @@ struct bpf_map_def {
37 unsigned int max_entries; 37 unsigned int max_entries;
38}; 38};
39 39
40static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) =
41 (void *) BPF_FUNC_skb_store_bytes;
42static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flags) =
43 (void *) BPF_FUNC_l3_csum_replace;
44static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) =
45 (void *) BPF_FUNC_l4_csum_replace;
46
40#endif 47#endif
diff --git a/samples/bpf/tcbpf1_kern.c b/samples/bpf/tcbpf1_kern.c
new file mode 100644
index 000000000000..7cf3f42a6e39
--- /dev/null
+++ b/samples/bpf/tcbpf1_kern.c
@@ -0,0 +1,71 @@
1#include <uapi/linux/bpf.h>
2#include <uapi/linux/if_ether.h>
3#include <uapi/linux/if_packet.h>
4#include <uapi/linux/ip.h>
5#include <uapi/linux/in.h>
6#include <uapi/linux/tcp.h>
7#include "bpf_helpers.h"
8
9/* compiler workaround */
10#define _htonl __builtin_bswap32
11
12static inline void set_dst_mac(struct __sk_buff *skb, char *mac)
13{
14 bpf_skb_store_bytes(skb, 0, mac, ETH_ALEN, 1);
15}
16
17/* use 1 below for ingress qdisc and 0 for egress */
18#if 0
19#undef ETH_HLEN
20#define ETH_HLEN 0
21#endif
22
23#define IP_CSUM_OFF (ETH_HLEN + offsetof(struct iphdr, check))
24#define TOS_OFF (ETH_HLEN + offsetof(struct iphdr, tos))
25
26static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos)
27{
28 __u8 old_tos = load_byte(skb, TOS_OFF);
29
30 bpf_l3_csum_replace(skb, IP_CSUM_OFF, htons(old_tos), htons(new_tos), 2);
31 bpf_skb_store_bytes(skb, TOS_OFF, &new_tos, sizeof(new_tos), 0);
32}
33
34#define TCP_CSUM_OFF (ETH_HLEN + sizeof(struct iphdr) + offsetof(struct tcphdr, check))
35#define IP_SRC_OFF (ETH_HLEN + offsetof(struct iphdr, saddr))
36
37#define IS_PSEUDO 0x10
38
39static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip)
40{
41 __u32 old_ip = _htonl(load_word(skb, IP_SRC_OFF));
42
43 bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_ip, new_ip, IS_PSEUDO | sizeof(new_ip));
44 bpf_l3_csum_replace(skb, IP_CSUM_OFF, old_ip, new_ip, sizeof(new_ip));
45 bpf_skb_store_bytes(skb, IP_SRC_OFF, &new_ip, sizeof(new_ip), 0);
46}
47
48#define TCP_DPORT_OFF (ETH_HLEN + sizeof(struct iphdr) + offsetof(struct tcphdr, dest))
49static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port)
50{
51 __u16 old_port = htons(load_half(skb, TCP_DPORT_OFF));
52
53 bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_port, new_port, sizeof(new_port));
54 bpf_skb_store_bytes(skb, TCP_DPORT_OFF, &new_port, sizeof(new_port), 0);
55}
56
57SEC("classifier")
58int bpf_prog1(struct __sk_buff *skb)
59{
60 __u8 proto = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol));
61 long *value;
62
63 if (proto == IPPROTO_TCP) {
64 set_ip_tos(skb, 8);
65 set_tcp_ip_src(skb, 0xA010101);
66 set_tcp_dest_port(skb, 5001);
67 }
68
69 return 0;
70}
71char _license[] SEC("license") = "GPL";
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index edc66de39f2e..7e392edaab97 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -4852,21 +4852,17 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb,
4852 4852
4853static unsigned int selinux_ipv4_forward(const struct nf_hook_ops *ops, 4853static unsigned int selinux_ipv4_forward(const struct nf_hook_ops *ops,
4854 struct sk_buff *skb, 4854 struct sk_buff *skb,
4855 const struct net_device *in, 4855 const struct nf_hook_state *state)
4856 const struct net_device *out,
4857 int (*okfn)(struct sk_buff *))
4858{ 4856{
4859 return selinux_ip_forward(skb, in, PF_INET); 4857 return selinux_ip_forward(skb, state->in, PF_INET);
4860} 4858}
4861 4859
4862#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 4860#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
4863static unsigned int selinux_ipv6_forward(const struct nf_hook_ops *ops, 4861static unsigned int selinux_ipv6_forward(const struct nf_hook_ops *ops,
4864 struct sk_buff *skb, 4862 struct sk_buff *skb,
4865 const struct net_device *in, 4863 const struct nf_hook_state *state)
4866 const struct net_device *out,
4867 int (*okfn)(struct sk_buff *))
4868{ 4864{
4869 return selinux_ip_forward(skb, in, PF_INET6); 4865 return selinux_ip_forward(skb, state->in, PF_INET6);
4870} 4866}
4871#endif /* IPV6 */ 4867#endif /* IPV6 */
4872 4868
@@ -4914,9 +4910,7 @@ static unsigned int selinux_ip_output(struct sk_buff *skb,
4914 4910
4915static unsigned int selinux_ipv4_output(const struct nf_hook_ops *ops, 4911static unsigned int selinux_ipv4_output(const struct nf_hook_ops *ops,
4916 struct sk_buff *skb, 4912 struct sk_buff *skb,
4917 const struct net_device *in, 4913 const struct nf_hook_state *state)
4918 const struct net_device *out,
4919 int (*okfn)(struct sk_buff *))
4920{ 4914{
4921 return selinux_ip_output(skb, PF_INET); 4915 return selinux_ip_output(skb, PF_INET);
4922} 4916}
@@ -5091,21 +5085,17 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb,
5091 5085
5092static unsigned int selinux_ipv4_postroute(const struct nf_hook_ops *ops, 5086static unsigned int selinux_ipv4_postroute(const struct nf_hook_ops *ops,
5093 struct sk_buff *skb, 5087 struct sk_buff *skb,
5094 const struct net_device *in, 5088 const struct nf_hook_state *state)
5095 const struct net_device *out,
5096 int (*okfn)(struct sk_buff *))
5097{ 5089{
5098 return selinux_ip_postroute(skb, out, PF_INET); 5090 return selinux_ip_postroute(skb, state->out, PF_INET);
5099} 5091}
5100 5092
5101#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 5093#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
5102static unsigned int selinux_ipv6_postroute(const struct nf_hook_ops *ops, 5094static unsigned int selinux_ipv6_postroute(const struct nf_hook_ops *ops,
5103 struct sk_buff *skb, 5095 struct sk_buff *skb,
5104 const struct net_device *in, 5096 const struct nf_hook_state *state)
5105 const struct net_device *out,
5106 int (*okfn)(struct sk_buff *))
5107{ 5097{
5108 return selinux_ip_postroute(skb, out, PF_INET6); 5098 return selinux_ip_postroute(skb, state->out, PF_INET6);
5109} 5099}
5110#endif /* IPV6 */ 5100#endif /* IPV6 */
5111 5101
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 1684bcc78b34..5fde34326dcf 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -152,7 +152,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
152 goto out; 152 goto out;
153 153
154 /* No partial writes. */ 154 /* No partial writes. */
155 length = EINVAL; 155 length = -EINVAL;
156 if (*ppos != 0) 156 if (*ppos != 0)
157 goto out; 157 goto out;
158 158
diff --git a/security/smack/smack_netfilter.c b/security/smack/smack_netfilter.c
index c952632afb0d..a455cfc9ec1f 100644
--- a/security/smack/smack_netfilter.c
+++ b/security/smack/smack_netfilter.c
@@ -23,9 +23,7 @@
23 23
24static unsigned int smack_ipv6_output(const struct nf_hook_ops *ops, 24static unsigned int smack_ipv6_output(const struct nf_hook_ops *ops,
25 struct sk_buff *skb, 25 struct sk_buff *skb,
26 const struct net_device *in, 26 const struct nf_hook_state *state)
27 const struct net_device *out,
28 int (*okfn)(struct sk_buff *))
29{ 27{
30 struct socket_smack *ssp; 28 struct socket_smack *ssp;
31 struct smack_known *skp; 29 struct smack_known *skp;
@@ -42,9 +40,7 @@ static unsigned int smack_ipv6_output(const struct nf_hook_ops *ops,
42 40
43static unsigned int smack_ipv4_output(const struct nf_hook_ops *ops, 41static unsigned int smack_ipv4_output(const struct nf_hook_ops *ops,
44 struct sk_buff *skb, 42 struct sk_buff *skb,
45 const struct net_device *in, 43 const struct nf_hook_state *state)
46 const struct net_device *out,
47 int (*okfn)(struct sk_buff *))
48{ 44{
49 struct socket_smack *ssp; 45 struct socket_smack *ssp;
50 struct smack_known *skp; 46 struct smack_known *skp;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 4ca3d5d02436..a8a1e14272a1 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1989,7 +1989,7 @@ static const struct pci_device_id azx_ids[] = {
1989 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, 1989 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
1990 /* Sunrise Point */ 1990 /* Sunrise Point */
1991 { PCI_DEVICE(0x8086, 0xa170), 1991 { PCI_DEVICE(0x8086, 0xa170),
1992 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, 1992 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
1993 /* Sunrise Point-LP */ 1993 /* Sunrise Point-LP */
1994 { PCI_DEVICE(0x8086, 0x9d70), 1994 { PCI_DEVICE(0x8086, 0x9d70),
1995 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, 1995 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 526398a4a442..74382137b9f5 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -396,7 +396,7 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
396{ 396{
397 /* We currently only handle front, HP */ 397 /* We currently only handle front, HP */
398 static hda_nid_t pins[] = { 398 static hda_nid_t pins[] = {
399 0x0f, 0x10, 0x14, 0x15, 0 399 0x0f, 0x10, 0x14, 0x15, 0x17, 0
400 }; 400 };
401 hda_nid_t *p; 401 hda_nid_t *p;
402 for (p = pins; *p; p++) 402 for (p = pins; *p; p++)
@@ -5036,6 +5036,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5036 SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), 5036 SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
5037 SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK), 5037 SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
5038 SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5038 SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5039 SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
5039 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5040 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5040 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), 5041 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
5041 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), 5042 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 4e511221a0c1..0db571340edb 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -22,6 +22,14 @@ TARGETS += vm
22TARGETS_HOTPLUG = cpu-hotplug 22TARGETS_HOTPLUG = cpu-hotplug
23TARGETS_HOTPLUG += memory-hotplug 23TARGETS_HOTPLUG += memory-hotplug
24 24
25# Clear LDFLAGS and MAKEFLAGS if called from main
26# Makefile to avoid test build failures when test
27# Makefile doesn't have explicit build rules.
28ifeq (1,$(MAKELEVEL))
29undefine LDFLAGS
30override MAKEFLAGS =
31endif
32
25all: 33all:
26 for TARGET in $(TARGETS); do \ 34 for TARGET in $(TARGETS); do \
27 make -C $$TARGET; \ 35 make -C $$TARGET; \
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a2214d9609bd..cc6a25d95fbf 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -471,7 +471,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
471 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); 471 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
472 472
473 r = -ENOMEM; 473 r = -ENOMEM;
474 kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); 474 kvm->memslots = kvm_kvzalloc(sizeof(struct kvm_memslots));
475 if (!kvm->memslots) 475 if (!kvm->memslots)
476 goto out_err_no_srcu; 476 goto out_err_no_srcu;
477 477
@@ -522,7 +522,7 @@ out_err_no_srcu:
522out_err_no_disable: 522out_err_no_disable:
523 for (i = 0; i < KVM_NR_BUSES; i++) 523 for (i = 0; i < KVM_NR_BUSES; i++)
524 kfree(kvm->buses[i]); 524 kfree(kvm->buses[i]);
525 kfree(kvm->memslots); 525 kvfree(kvm->memslots);
526 kvm_arch_free_vm(kvm); 526 kvm_arch_free_vm(kvm);
527 return ERR_PTR(r); 527 return ERR_PTR(r);
528} 528}
@@ -578,7 +578,7 @@ static void kvm_free_physmem(struct kvm *kvm)
578 kvm_for_each_memslot(memslot, slots) 578 kvm_for_each_memslot(memslot, slots)
579 kvm_free_physmem_slot(kvm, memslot, NULL); 579 kvm_free_physmem_slot(kvm, memslot, NULL);
580 580
581 kfree(kvm->memslots); 581 kvfree(kvm->memslots);
582} 582}
583 583
584static void kvm_destroy_devices(struct kvm *kvm) 584static void kvm_destroy_devices(struct kvm *kvm)
@@ -871,10 +871,10 @@ int __kvm_set_memory_region(struct kvm *kvm,
871 goto out_free; 871 goto out_free;
872 } 872 }
873 873
874 slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), 874 slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
875 GFP_KERNEL);
876 if (!slots) 875 if (!slots)
877 goto out_free; 876 goto out_free;
877 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
878 878
879 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { 879 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
880 slot = id_to_memslot(slots, mem->slot); 880 slot = id_to_memslot(slots, mem->slot);
@@ -917,7 +917,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
917 kvm_arch_commit_memory_region(kvm, mem, &old, change); 917 kvm_arch_commit_memory_region(kvm, mem, &old, change);
918 918
919 kvm_free_physmem_slot(kvm, &old, &new); 919 kvm_free_physmem_slot(kvm, &old, &new);
920 kfree(old_memslots); 920 kvfree(old_memslots);
921 921
922 /* 922 /*
923 * IOMMU mapping: New slots need to be mapped. Old slots need to be 923 * IOMMU mapping: New slots need to be mapped. Old slots need to be
@@ -936,7 +936,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
936 return 0; 936 return 0;
937 937
938out_slots: 938out_slots:
939 kfree(slots); 939 kvfree(slots);
940out_free: 940out_free:
941 kvm_free_physmem_slot(kvm, &new, &old); 941 kvm_free_physmem_slot(kvm, &new, &old);
942out: 942out: