aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap6
-rw-r--r--CREDITS1
-rw-r--r--Documentation/ABI/testing/configfs-usb-gadget-uvc58
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-proximity-as39352
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-batman-adv20
-rw-r--r--Documentation/devicetree/bindings/hwmon/ina2xx.txt1
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt4
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt3
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt6
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt4
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt6
-rw-r--r--Documentation/devicetree/bindings/net/davinci-mdio.txt5
-rw-r--r--Documentation/devicetree/bindings/net/keystone-netcp.txt2
-rw-r--r--Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt8
-rw-r--r--Documentation/devicetree/bindings/net/stmmac.txt3
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt2
-rw-r--r--Documentation/leds/leds-class.txt4
-rw-r--r--Documentation/networking/nf_conntrack-sysctl.txt3
-rw-r--r--Documentation/networking/stmmac.txt1
-rw-r--r--Documentation/scsi/scsi_eh.txt8
-rw-r--r--MAINTAINERS91
-rw-r--r--Makefile4
-rw-r--r--arch/Kconfig7
-rw-r--r--arch/alpha/include/asm/pgalloc.h4
-rw-r--r--arch/arc/Makefile2
-rw-r--r--arch/arc/include/asm/pgalloc.h4
-rw-r--r--arch/arc/kernel/stacktrace.c2
-rw-r--r--arch/arm/boot/dts/Makefile1
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi2
-rw-r--r--arch/arm/boot/dts/am4372.dtsi2
-rw-r--r--arch/arm/boot/dts/am437x-sk-evm.dts2
-rw-r--r--arch/arm/boot/dts/am57xx-idk-common.dtsi32
-rw-r--r--arch/arm/boot/dts/dm8148-evm.dts8
-rw-r--r--arch/arm/boot/dts/dm8148-t410.dts9
-rw-r--r--arch/arm/boot/dts/dra7.dtsi4
-rw-r--r--arch/arm/boot/dts/dra74x.dtsi4
-rw-r--r--arch/arm/boot/dts/exynos5250-snow-common.dtsi13
-rw-r--r--arch/arm/boot/dts/exynos5420-peach-pit.dts13
-rw-r--r--arch/arm/boot/dts/omap3-evm-37xx.dts2
-rw-r--r--arch/arm/boot/dts/omap3-igep.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-igep0020-common.dtsi11
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts4
-rw-r--r--arch/arm/boot/dts/omap3-n950-n9.dtsi6
-rw-r--r--arch/arm/boot/dts/omap3-zoom3.dts6
-rw-r--r--arch/arm/boot/dts/omap5-board-common.dtsi48
-rw-r--r--arch/arm/boot/dts/omap5-igep0050.dts26
-rw-r--r--arch/arm/boot/dts/omap5-uevm.dts10
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts1
-rw-r--r--arch/arm/boot/dts/stih407-family.dtsi3
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-primo81.dts2
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts2
-rw-r--r--arch/arm/configs/exynos_defconfig1
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/include/asm/pgalloc.h2
-rw-r--r--arch/arm/include/asm/pgtable-2level.h1
-rw-r--r--arch/arm/include/asm/pgtable-3level.h5
-rw-r--r--arch/arm/include/asm/pgtable.h1
-rw-r--r--arch/arm/kernel/smp.c2
-rw-r--r--arch/arm/kvm/arm.c1
-rw-r--r--arch/arm/mach-exynos/Kconfig1
-rw-r--r--arch/arm/mach-imx/mach-imx6ul.c2
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq-handler.S6
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq.c5
-rw-r--r--arch/arm/mach-omap1/include/mach/ams-delta-fiq.h2
-rw-r--r--arch/arm/mach-omap2/Kconfig12
-rw-r--r--arch/arm/mach-omap2/omap-secure.h1
-rw-r--r--arch/arm/mach-omap2/omap-smp.c48
-rw-r--r--arch/arm/mach-omap2/powerdomain.c9
-rw-r--r--arch/arm/mach-omap2/powerdomains7xx_data.c76
-rw-r--r--arch/arm/mach-omap2/timer.c7
-rw-r--r--arch/arm/plat-samsung/devs.c2
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/boot/dts/lg/lg1312.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi2
-rw-r--r--arch/arm64/include/asm/kgdb.h45
-rw-r--r--arch/arm64/include/asm/pgalloc.h2
-rw-r--r--arch/arm64/include/asm/smp.h12
-rw-r--r--arch/arm64/include/asm/spinlock.h42
-rw-r--r--arch/arm64/kernel/hibernate.c6
-rw-r--r--arch/arm64/kernel/kgdb.c14
-rw-r--r--arch/arm64/kernel/smp.c18
-rw-r--r--arch/arm64/kernel/traps.c26
-rw-r--r--arch/arm64/mm/context.c9
-rw-r--r--arch/arm64/mm/fault.c4
-rw-r--r--arch/arm64/mm/flush.c4
-rw-r--r--arch/avr32/include/asm/pgalloc.h6
-rw-r--r--arch/cris/include/asm/pgalloc.h4
-rw-r--r--arch/frv/mm/pgalloc.c6
-rw-r--r--arch/hexagon/include/asm/pgalloc.h4
-rw-r--r--arch/ia64/Kconfig2
-rw-r--r--arch/ia64/include/asm/thread_info.h8
-rw-r--r--arch/ia64/kernel/init_task.c1
-rw-r--r--arch/m68k/include/asm/mcf_pgalloc.h4
-rw-r--r--arch/m68k/include/asm/motorola_pgalloc.h4
-rw-r--r--arch/m68k/include/asm/sun3_pgalloc.h4
-rw-r--r--arch/metag/include/asm/pgalloc.h5
-rw-r--r--arch/microblaze/include/asm/pgalloc.h4
-rw-r--r--arch/microblaze/mm/pgtable.c3
-rw-r--r--arch/mips/include/asm/kvm_host.h3
-rw-r--r--arch/mips/include/asm/pgalloc.h6
-rw-r--r--arch/mips/include/asm/pgtable.h10
-rw-r--r--arch/mips/kvm/emulate.c19
-rw-r--r--arch/mips/kvm/interrupt.h1
-rw-r--r--arch/mips/kvm/locore.S1
-rw-r--r--arch/mips/kvm/mips.c11
-rw-r--r--arch/mn10300/include/asm/thread_info.h2
-rw-r--r--arch/mn10300/kernel/kgdb.c3
-rw-r--r--arch/mn10300/mm/pgtable.c6
-rw-r--r--arch/nios2/include/asm/pgalloc.h5
-rw-r--r--arch/openrisc/include/asm/pgalloc.h2
-rw-r--r--arch/openrisc/mm/ioremap.c2
-rw-r--r--arch/parisc/include/asm/pgalloc.h7
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgalloc.h1
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h1
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h28
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h15
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush.h14
-rw-r--r--arch/powerpc/include/asm/book3s/pgalloc.h5
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgalloc.h10
-rw-r--r--arch/powerpc/kernel/eeh_driver.c9
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S7
-rw-r--r--arch/powerpc/kernel/pci_64.c1
-rw-r--r--arch/powerpc/kernel/process.c10
-rw-r--r--arch/powerpc/kernel/prom_init.c2
-rw-r--r--arch/powerpc/kernel/ptrace.c4
-rw-r--r--arch/powerpc/kernel/tm.S61
-rw-r--r--arch/powerpc/mm/hash_native_64.c14
-rw-r--r--arch/powerpc/mm/hash_utils_64.c18
-rw-r--r--arch/powerpc/mm/hugetlbpage.c2
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c2
-rw-r--r--arch/powerpc/mm/pgtable-radix.c14
-rw-r--r--arch/powerpc/mm/pgtable_32.c4
-rw-r--r--arch/powerpc/mm/pgtable_64.c3
-rw-r--r--arch/powerpc/mm/tlb-radix.c92
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c4
-rw-r--r--arch/s390/include/asm/fpu/api.h2
-rw-r--r--arch/s390/include/asm/kvm_host.h1
-rw-r--r--arch/s390/kernel/ipl.c7
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c8
-rw-r--r--arch/s390/kvm/intercept.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c3
-rw-r--r--arch/s390/mm/pgalloc.c2
-rw-r--r--arch/s390/mm/pgtable.c2
-rw-r--r--arch/score/include/asm/pgalloc.h5
-rw-r--r--arch/sh/include/asm/pgalloc.h4
-rw-r--r--arch/sh/mm/pgtable.c2
-rw-r--r--arch/sparc/include/asm/pgalloc_64.h6
-rw-r--r--arch/sparc/mm/init_64.c6
-rw-r--r--arch/tile/include/asm/thread_info.h2
-rw-r--r--arch/tile/kernel/process.c3
-rw-r--r--arch/tile/mm/pgtable.c2
-rw-r--r--arch/um/kernel/mem.c4
-rw-r--r--arch/unicore32/include/asm/pgalloc.h2
-rw-r--r--arch/x86/Kconfig9
-rw-r--r--arch/x86/boot/Makefile3
-rw-r--r--arch/x86/events/intel/rapl.c2
-rw-r--r--arch/x86/events/intel/uncore_snbep.c21
-rw-r--r--arch/x86/include/asm/intel-family.h68
-rw-r--r--arch/x86/include/asm/kprobes.h11
-rw-r--r--arch/x86/include/asm/kvm_host.h11
-rw-r--r--arch/x86/include/asm/msr.h4
-rw-r--r--arch/x86/include/asm/pgalloc.h4
-rw-r--r--arch/x86/include/asm/pvclock.h25
-rw-r--r--arch/x86/include/asm/stacktrace.h6
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c4
-rw-r--r--arch/x86/kernel/dumpstack.c22
-rw-r--r--arch/x86/kernel/dumpstack_32.c4
-rw-r--r--arch/x86/kernel/dumpstack_64.c8
-rw-r--r--arch/x86/kernel/espfix_64.c2
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/kprobes/core.c12
-rw-r--r--arch/x86/kernel/pvclock.c11
-rw-r--r--arch/x86/kernel/traps.c20
-rw-r--r--arch/x86/kvm/lapic.c3
-rw-r--r--arch/x86/kvm/svm.c21
-rw-r--r--arch/x86/kvm/vmx.c38
-rw-r--r--arch/x86/kvm/x86.c6
-rw-r--r--arch/x86/kvm/x86.h7
-rw-r--r--arch/x86/mm/pgtable.c2
-rw-r--r--arch/x86/platform/efi/efi_64.c2
-rw-r--r--arch/x86/xen/mmu.c74
-rw-r--r--arch/x86/xen/p2m.c2
-rw-r--r--arch/xtensa/include/asm/pgalloc.h2
-rw-r--r--block/blk-lib.c12
-rw-r--r--block/blk-mq.c17
-rw-r--r--crypto/crypto_user.c1
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/hwregs.c146
-rw-r--r--drivers/acpi/acpica/nsparse.c9
-rw-r--r--drivers/acpi/nfit.c12
-rw-r--r--drivers/acpi/nfit.h10
-rw-r--r--drivers/acpi/pci_link.c2
-rw-r--r--drivers/acpi/utils.c6
-rw-r--r--drivers/ata/ahci_seattle.c2
-rw-r--r--drivers/ata/libata-eh.c2
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/atm/horizon.c4
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/isa.c2
-rw-r--r--drivers/base/module.c8
-rw-r--r--drivers/base/power/opp/cpu.c12
-rw-r--r--drivers/base/power/opp/of.c10
-rw-r--r--drivers/base/power/opp/opp.h8
-rw-r--r--drivers/block/aoe/aoecmd.c2
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/block/xen-blkfront.c35
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c8
-rw-r--r--drivers/clk/clk-oxnas.c4
-rw-r--r--drivers/clk/rockchip/clk-cpu.c4
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c12
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c8
-rw-r--r--drivers/connector/cn_proc.c47
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c7
-rw-r--r--drivers/cpufreq/cpufreq.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c27
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c2
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c4
-rw-r--r--drivers/crypto/vmx/aes_cbc.c2
-rw-r--r--drivers/crypto/vmx/aes_ctr.c2
-rw-r--r--drivers/crypto/vmx/ppc-xlate.pl2
-rw-r--r--drivers/devfreq/devfreq.c27
-rw-r--r--drivers/devfreq/event/exynos-nocp.c3
-rw-r--r--drivers/dma/at_xdmac.c82
-rw-r--r--drivers/dma/mv_xor.c10
-rw-r--r--drivers/extcon/extcon-palmas.c2
-rw-r--r--drivers/firmware/efi/arm-init.c14
-rw-r--r--drivers/gpio/Kconfig9
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c4
-rw-r--r--drivers/gpio/gpio-104-idi-48.c1
-rw-r--r--drivers/gpio/gpio-bcm-kona.c4
-rw-r--r--drivers/gpio/gpio-tegra.c9
-rw-r--r--drivers/gpio/gpio-zynq.c7
-rw-r--r--drivers/gpio/gpiolib-of.c1
-rw-r--r--drivers/gpio/gpiolib.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c70
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c2
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h72
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c423
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c43
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h32
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h16
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c91
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu74.h75
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h43
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c50
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c10
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c2
-rw-r--r--drivers/gpu/drm/drm_atomic.c27
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c54
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c46
-rw-r--r--drivers/gpu/drm/i915/intel_display.c104
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c31
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c26
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c7
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h4
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c3
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c3
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c3
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c16
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c23
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_dotclock.c39
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c50
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c16
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c4
-rw-r--r--drivers/hid/hid-elo.c2
-rw-r--r--drivers/hid/hid-multitouch.c26
-rw-r--r--drivers/hid/usbhid/hiddev.c10
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c85
-rw-r--r--drivers/hwmon/fam15h_power.c8
-rw-r--r--drivers/hwmon/lm90.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c11
-rw-r--r--drivers/hwtracing/coresight/coresight.c15
-rw-r--r--drivers/i2c/busses/i2c-i801.c99
-rw-r--r--drivers/i2c/busses/i2c-octeon.c17
-rw-r--r--drivers/i2c/muxes/i2c-mux-reg.c1
-rw-r--r--drivers/iio/accel/kxsd9.c4
-rw-r--r--drivers/iio/accel/st_accel_buffer.c2
-rw-r--r--drivers/iio/accel/st_accel_core.c1
-rw-r--r--drivers/iio/adc/ad7266.c7
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_buffer.c25
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c8
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c96
-rw-r--r--drivers/iio/dac/Kconfig2
-rw-r--r--drivers/iio/dac/ad5592r-base.c2
-rw-r--r--drivers/iio/gyro/st_gyro_buffer.c2
-rw-r--r--drivers/iio/gyro/st_gyro_core.c1
-rw-r--r--drivers/iio/humidity/am2315.c4
-rw-r--r--drivers/iio/humidity/hdc100x.c20
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c16
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c5
-rw-r--r--drivers/iio/industrialio-trigger.c23
-rw-r--r--drivers/iio/light/apds9960.c1
-rw-r--r--drivers/iio/light/bh1780.c10
-rw-r--r--drivers/iio/light/max44000.c1
-rw-r--r--drivers/iio/magnetometer/st_magn_buffer.c2
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c1
-rw-r--r--drivers/iio/pressure/bmp280.c4
-rw-r--r--drivers/iio/pressure/st_pressure_buffer.c2
-rw-r--r--drivers/iio/pressure/st_pressure_core.c81
-rw-r--r--drivers/iio/proximity/as3935.c17
-rw-r--r--drivers/infiniband/core/cache.c4
-rw-r--r--drivers/infiniband/core/cma.c62
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/core/verbs.c16
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c28
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c3
-rw-r--r--drivers/infiniband/hw/hfi1/init.c2
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c19
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h2
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c26
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.c3
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.c4
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c17
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c24
-rw-r--r--drivers/infiniband/hw/mlx4/main.c3
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h2
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c34
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c6
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c21
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c7
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c14
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c4
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c3
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h1
-rw-r--r--drivers/input/joystick/xpad.c39
-rw-r--r--drivers/input/mouse/elantech.c8
-rw-r--r--drivers/input/mouse/vmmouse.c22
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c11
-rw-r--r--drivers/iommu/amd_iommu_init.c2
-rw-r--r--drivers/iommu/arm-smmu-v3.c1
-rw-r--r--drivers/iommu/intel-iommu.c19
-rw-r--r--drivers/iommu/iova.c8
-rw-r--r--drivers/iommu/rockchip-iommu.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c12
-rw-r--r--drivers/leds/led-core.c9
-rw-r--r--drivers/leds/trigger/ledtrig-heartbeat.c31
-rw-r--r--drivers/mcb/mcb-core.c17
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c97
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c2
-rw-r--r--drivers/memory/omap-gpmc.c2
-rw-r--r--drivers/mfd/max77620.c2
-rw-r--r--drivers/misc/mei/client.c2
-rw-r--r--drivers/mtd/ubi/build.c11
-rw-r--r--drivers/mtd/ubi/eba.c22
-rw-r--r--drivers/mtd/ubi/kapi.c8
-rw-r--r--drivers/net/bonding/bond_3ad.c75
-rw-r--r--drivers/net/bonding/bond_alb.c7
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/can/at91_can.c5
-rw-r--r--drivers/net/can/c_can/c_can.c38
-rw-r--r--drivers/net/can/dev.c9
-rw-r--r--drivers/net/can/usb/Kconfig5
-rw-r--r--drivers/net/can/usb/gs_usb.c14
-rw-r--r--drivers/net/can/usb/kvaser_usb.c8
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c30
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c4
-rw-r--r--drivers/net/ethernet/arc/emac.h1
-rw-r--r--drivers/net/ethernet/arc/emac_main.c65
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c61
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c47
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c54
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h87
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c65
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c24
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.c22
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.c13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c73
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c325
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h35
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c50
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c99
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c42
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h20
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c24
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.c12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c98
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c19
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c103
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c16
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c91
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
-rw-r--r--drivers/net/ethernet/dnet.c48
-rw-r--r--drivers/net/ethernet/dnet.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c44
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c41
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c68
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c80
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c14
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c13
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c56
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c173
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c8
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h7
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c12
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c92
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c76
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_model.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c4
-rw-r--r--drivers/net/ethernet/lantiq_etop.c36
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c48
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c235
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c130
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h112
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c160
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c586
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c762
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c394
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h245
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c190
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c561
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c279
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h1206
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c1072
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h164
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c1814
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c116
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h5
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c19
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c4
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c45
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c31
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h6
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c55
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c145
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c4
-rw-r--r--drivers/net/ethernet/rdc/r6040.c91
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c3
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h1
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c31
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c32
-rw-r--r--drivers/net/ethernet/sfc/farch.c3
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c82
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h86
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c147
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c96
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c60
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c52
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h159
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c70
-rw-r--r--drivers/net/ethernet/ti/cpsw.c83
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c131
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c92
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c169
-rw-r--r--drivers/net/ethernet/tile/tilegx.c6
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c46
-rw-r--r--drivers/net/fddi/skfp/Makefile2
-rw-r--r--drivers/net/geneve.c31
-rw-r--r--drivers/net/hyperv/netvsc.c88
-rw-r--r--drivers/net/macsec.c50
-rw-r--r--drivers/net/phy/dp83867.c13
-rw-r--r--drivers/net/phy/fixed_phy.c22
-rw-r--r--drivers/net/phy/marvell.c82
-rw-r--r--drivers/net/phy/smsc.c17
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/tun.c140
-rw-r--r--drivers/net/usb/cdc_ncm.c7
-rw-r--r--drivers/net/usb/r8152.c70
-rw-r--r--drivers/net/usb/usbnet.c10
-rw-r--r--drivers/net/vrf.c21
-rw-r--r--drivers/net/vxlan.c58
-rw-r--r--drivers/net/wan/Kconfig11
-rw-r--r--drivers/net/wan/Makefile1
-rw-r--r--drivers/net/wan/slic_ds26522.c255
-rw-r--r--drivers/net/wan/slic_ds26522.h134
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c16
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h8
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c12
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c8
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c6
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/scan.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c16
-rw-r--r--drivers/net/wireless/intersil/orinoco/scan.c12
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c130
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c12
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c10
-rw-r--r--drivers/net/wireless/st/cw1200/scan.c6
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c6
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c11
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c5
-rw-r--r--drivers/nvdimm/pfn_devs.c51
-rw-r--r--drivers/nvme/host/pci.c9
-rw-r--r--drivers/of/Kconfig1
-rw-r--r--drivers/of/fdt.c15
-rw-r--r--drivers/of/irq.c19
-rw-r--r--drivers/of/of_mdio.c2
-rw-r--r--drivers/of/of_reserved_mem.c11
-rw-r--r--drivers/pci/vc.c4
-rw-r--r--drivers/perf/arm_pmu.c2
-rw-r--r--drivers/phy/phy-bcm-ns-usb2.c4
-rw-r--r--drivers/phy/phy-exynos-mipi-video.c6
-rw-r--r--drivers/phy/phy-miphy28lp.c3
-rw-r--r--drivers/phy/phy-rcar-gen3-usb2.c14
-rw-r--r--drivers/phy/phy-rockchip-dp.c2
-rw-r--r--drivers/phy/phy-stih407-usb.c4
-rw-r--r--drivers/phy/phy-sun4i-usb.c14
-rw-r--r--drivers/phy/phy-ti-pipe3.c15
-rw-r--r--drivers/phy/phy-twl4030-usb.c14
-rw-r--r--drivers/pinctrl/Makefile2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c6
-rw-r--r--drivers/pinctrl/pinctrl-single.c3
-rw-r--r--drivers/pinctrl/tegra/Makefile2
-rw-r--r--drivers/platform/chrome/cros_ec_dev.c8
-rw-r--r--drivers/platform/x86/Kconfig10
-rw-r--r--drivers/platform/x86/ideapad-laptop.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c87
-rw-r--r--drivers/power/power_supply_core.c27
-rw-r--r--drivers/power/tps65217_charger.c6
-rw-r--r--drivers/pwm/core.c3
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c2
-rw-r--r--drivers/pwm/sysfs.c2
-rw-r--r--drivers/regulator/anatop-regulator.c2
-rw-r--r--drivers/regulator/max77620-regulator.c7
-rw-r--r--drivers/regulator/qcom_smd-regulator.c15
-rw-r--r--drivers/regulator/tps51632-regulator.c9
-rw-r--r--drivers/s390/net/qeth_l2_main.c1
-rw-r--r--drivers/s390/net/qeth_l3_main.c1
-rw-r--r--drivers/scsi/53c700.c4
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_error.c4
-rw-r--r--drivers/scsi/sd.c8
-rw-r--r--drivers/scsi/sd.h5
-rw-r--r--drivers/spi/spi-rockchip.c4
-rw-r--r--drivers/spi/spi-sun4i.c23
-rw-r--r--drivers/spi/spi-sun6i.c10
-rw-r--r--drivers/spi/spi-ti-qspi.c7
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c2
-rw-r--r--drivers/staging/iio/adc/ad7606_spi.c2
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c6
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c7
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c3
-rw-r--r--drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c11
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c12
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c5
-rw-r--r--drivers/thermal/cpu_cooling.c16
-rw-r--r--drivers/tty/pty.c7
-rw-r--r--drivers/tty/vt/vt.c1
-rw-r--r--drivers/usb/common/usb-otg-fsm.c2
-rw-r--r--drivers/usb/core/hcd.c17
-rw-r--r--drivers/usb/core/quirks.c23
-rw-r--r--drivers/usb/dwc2/core.h27
-rw-r--r--drivers/usb/dwc2/gadget.c24
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c19
-rw-r--r--drivers/usb/dwc3/dwc3-st.c16
-rw-r--r--drivers/usb/dwc3/gadget.c30
-rw-r--r--drivers/usb/gadget/composite.c21
-rw-r--r--drivers/usb/gadget/configfs.c1
-rw-r--r--drivers/usb/gadget/function/f_fs.c30
-rw-r--r--drivers/usb/gadget/function/f_printer.c8
-rw-r--r--drivers/usb/gadget/function/f_tcm.c20
-rw-r--r--drivers/usb/gadget/function/f_uac2.c13
-rw-r--r--drivers/usb/gadget/function/storage_common.c4
-rw-r--r--drivers/usb/gadget/legacy/inode.c17
-rw-r--r--drivers/usb/gadget/udc/udc-core.c12
-rw-r--r--drivers/usb/host/ehci-hcd.c9
-rw-r--r--drivers/usb/host/ehci-hub.c14
-rw-r--r--drivers/usb/host/ehci-msm.c14
-rw-r--r--drivers/usb/host/ehci-st.c6
-rw-r--r--drivers/usb/host/ehci-tegra.c16
-rw-r--r--drivers/usb/host/ohci-q.c3
-rw-r--r--drivers/usb/host/ohci-st.c6
-rw-r--r--drivers/usb/host/xhci-pci.c5
-rw-r--r--drivers/usb/host/xhci-plat.c3
-rw-r--r--drivers/usb/host/xhci-ring.c30
-rw-r--r--drivers/usb/host/xhci.c29
-rw-r--r--drivers/usb/musb/musb_core.c85
-rw-r--r--drivers/usb/musb/musb_core.h3
-rw-r--r--drivers/usb/musb/musb_gadget.c34
-rw-r--r--drivers/usb/musb/musb_host.c68
-rw-r--r--drivers/usb/musb/omap2430.c257
-rw-r--r--drivers/usb/musb/sunxi.c54
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c29
-rw-r--r--drivers/usb/serial/mos7720.c1
-rw-r--r--drivers/usb/storage/uas.c2
-rw-r--r--drivers/usb/usbip/vhci_hcd.c2
-rw-r--r--drivers/vhost/net.c16
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/xen/balloon.c28
-rw-r--r--drivers/xen/xen-pciback/conf_space.c6
-rw-r--r--drivers/xen/xen-pciback/conf_space_header.c18
-rw-r--r--fs/9p/vfs_file.c6
-rw-r--r--fs/autofs4/autofs_i.h8
-rw-r--r--fs/autofs4/expire.c27
-rw-r--r--fs/autofs4/root.c2
-rw-r--r--fs/autofs4/waitq.c7
-rw-r--r--fs/btrfs/check-integrity.c2
-rw-r--r--fs/btrfs/ctree.c19
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/delayed-inode.c27
-rw-r--r--fs/btrfs/delayed-inode.h10
-rw-r--r--fs/btrfs/disk-io.c54
-rw-r--r--fs/btrfs/disk-io.h2
-rw-r--r--fs/btrfs/extent-tree.c27
-rw-r--r--fs/btrfs/extent_io.c32
-rw-r--r--fs/btrfs/extent_io.h4
-rw-r--r--fs/btrfs/file.c44
-rw-r--r--fs/btrfs/free-space-cache.c18
-rw-r--r--fs/btrfs/hash.c5
-rw-r--r--fs/btrfs/hash.h1
-rw-r--r--fs/btrfs/inode.c22
-rw-r--r--fs/btrfs/ordered-data.c3
-rw-r--r--fs/btrfs/super.c61
-rw-r--r--fs/btrfs/tests/btrfs-tests.c8
-rw-r--r--fs/btrfs/tests/btrfs-tests.h27
-rw-r--r--fs/btrfs/tests/extent-buffer-tests.c13
-rw-r--r--fs/btrfs/tests/extent-io-tests.c86
-rw-r--r--fs/btrfs/tests/free-space-tests.c76
-rw-r--r--fs/btrfs/tests/free-space-tree-tests.c30
-rw-r--r--fs/btrfs/tests/inode-tests.c344
-rw-r--r--fs/btrfs/tests/qgroup-tests.c111
-rw-r--r--fs/btrfs/transaction.c10
-rw-r--r--fs/btrfs/transaction.h2
-rw-r--r--fs/btrfs/tree-log.c4
-rw-r--r--fs/btrfs/volumes.c117
-rw-r--r--fs/ceph/export.c10
-rw-r--r--fs/cifs/cifs_unicode.c33
-rw-r--r--fs/cifs/cifs_unicode.h2
-rw-r--r--fs/cifs/cifsfs.c3
-rw-r--r--fs/cifs/cifsglob.h1
-rw-r--r--fs/cifs/connect.c4
-rw-r--r--fs/cifs/file.c14
-rw-r--r--fs/cifs/ntlmssp.h2
-rw-r--r--fs/cifs/sess.c80
-rw-r--r--fs/cifs/smb2pdu.c37
-rw-r--r--fs/dax.c7
-rw-r--r--fs/dcache.c75
-rw-r--r--fs/debugfs/file.c7
-rw-r--r--fs/ecryptfs/kthread.c13
-rw-r--r--fs/fuse/dir.c4
-rw-r--r--fs/fuse/fuse_i.h9
-rw-r--r--fs/fuse/inode.c19
-rw-r--r--fs/internal.h1
-rw-r--r--fs/jbd2/journal.c32
-rw-r--r--fs/libfs.c113
-rw-r--r--fs/lockd/svc.c13
-rw-r--r--fs/locks.c2
-rw-r--r--fs/namespace.c5
-rw-r--r--fs/nfs/dir.c11
-rw-r--r--fs/nfs/direct.c10
-rw-r--r--fs/nfs/inode.c1
-rw-r--r--fs/nfs/nfs4proc.c18
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/nfs/pnfs.c10
-rw-r--r--fs/nfs/pnfs_nfs.c12
-rw-r--r--fs/nfs/read.c4
-rw-r--r--fs/nfsd/blocklayout.c2
-rw-r--r--fs/nfsd/nfs2acl.c20
-rw-r--r--fs/nfsd/nfs3acl.c16
-rw-r--r--fs/nfsd/nfs4acl.c16
-rw-r--r--fs/nfsd/nfs4callback.c18
-rw-r--r--fs/nfsd/nfs4state.c67
-rw-r--r--fs/nfsd/state.h2
-rw-r--r--fs/nilfs2/the_nilfs.c2
-rw-r--r--fs/ocfs2/Makefile2
-rw-r--r--fs/ocfs2/buffer_head_io.c5
-rw-r--r--fs/overlayfs/dir.c13
-rw-r--r--fs/overlayfs/inode.c55
-rw-r--r--fs/overlayfs/super.c12
-rw-r--r--fs/posix_acl.c42
-rw-r--r--fs/proc/root.c7
-rw-r--r--fs/reiserfs/super.c9
-rw-r--r--fs/ubifs/file.c24
-rw-r--r--fs/udf/partition.c13
-rw-r--r--fs/udf/super.c22
-rw-r--r--fs/udf/udf_sb.h5
-rw-r--r--include/asm-generic/qspinlock.h53
-rw-r--r--include/drm/i915_pciids.h10
-rw-r--r--include/kvm/arm_pmu.h4
-rw-r--r--include/linux/audit.h24
-rw-r--r--include/linux/bpf.h36
-rw-r--r--include/linux/cgroup.h1
-rw-r--r--include/linux/dcache.h13
-rw-r--r--include/linux/efi.h2
-rw-r--r--include/linux/etherdevice.h23
-rw-r--r--include/linux/ieee80211.h32
-rw-r--r--include/linux/iio/common/st_sensors.h9
-rw-r--r--include/linux/inet_diag.h6
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/isa.h5
-rw-r--r--include/linux/jump_label.h16
-rw-r--r--include/linux/kasan.h11
-rw-r--r--include/linux/leds.h23
-rw-r--r--include/linux/mfd/da9052/da9052.h2
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mlx5/driver.h20
-rw-r--r--include/linux/mlx5/fs.h12
-rw-r--r--include/linux/mlx5/qp.h1
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/net.h4
-rw-r--r--include/linux/netdevice.h29
-rw-r--r--include/linux/netfilter/x_tables.h4
-rw-r--r--include/linux/netfilter_bridge/ebtables.h2
-rw-r--r--include/linux/of.h2
-rw-r--r--include/linux/of_mdio.h8
-rw-r--r--include/linux/of_pci.h2
-rw-r--r--include/linux/of_reserved_mem.h7
-rw-r--r--include/linux/ptr_ring.h77
-rw-r--r--include/linux/pwm.h19
-rw-r--r--include/linux/qed/qed_eth_if.h1
-rw-r--r--include/linux/reset.h211
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/seqlock.h7
-rw-r--r--include/linux/skb_array.h13
-rw-r--r--include/linux/skbuff.h30
-rw-r--r--include/linux/sock_diag.h6
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/sunrpc/clnt.h2
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/sunrpc/xprt.h1
-rw-r--r--include/linux/thermal.h2
-rw-r--r--include/linux/usb/ehci_def.h4
-rw-r--r--include/linux/usb/gadget.h3
-rw-r--r--include/linux/usb/musb.h5
-rw-r--r--include/media/v4l2-mc.h2
-rw-r--r--include/net/bonding.h7
-rw-r--r--include/net/cfg80211.h52
-rw-r--r--include/net/devlink.h3
-rw-r--r--include/net/gre.h2
-rw-r--r--include/net/ip.h5
-rw-r--r--include/net/mac80211.h5
-rw-r--r--include/net/netevent.h1
-rw-r--r--include/net/netfilter/nf_conntrack.h4
-rw-r--r--include/net/netfilter/nf_conntrack_extend.h4
-rw-r--r--include/net/netfilter/nf_conntrack_zones.h45
-rw-r--r--include/net/netfilter/nf_log.h7
-rw-r--r--include/net/netfilter/nf_tables.h44
-rw-r--r--include/net/rtnetlink.h5
-rw-r--r--include/net/tc_act/tc_ife.h6
-rw-r--r--include/net/tc_act/tc_skbedit.h10
-rw-r--r--include/net/tcp.h5
-rw-r--r--include/rdma/rdma_vt.h4
-rw-r--r--include/uapi/linux/batman_adv.h114
-rw-r--r--include/uapi/linux/bpf.h46
-rw-r--r--include/uapi/linux/btrfs.h2
-rw-r--r--include/uapi/linux/devlink.h8
-rw-r--r--include/uapi/linux/fuse.h7
-rw-r--r--include/uapi/linux/if_bridge.h26
-rw-r--r--include/uapi/linux/if_link.h2
-rw-r--r--include/uapi/linux/inet_diag.h1
-rw-r--r--include/uapi/linux/input-event-codes.h31
-rw-r--r--include/uapi/linux/input.h1
-rw-r--r--include/uapi/linux/netfilter/Kbuild1
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h6
-rw-r--r--include/uapi/linux/netfilter/xt_NFLOG.h6
-rw-r--r--include/uapi/linux/netfilter/xt_SYNPROXY.h2
-rw-r--r--include/uapi/linux/nl80211.h76
-rw-r--r--include/uapi/linux/tc_act/tc_skbedit.h2
-rw-r--r--include/uapi/linux/tcp.h10
-rw-r--r--init/main.c8
-rw-r--r--kernel/audit.c17
-rw-r--r--kernel/audit.h4
-rw-r--r--kernel/auditsc.c8
-rw-r--r--kernel/bpf/arraymap.c47
-rw-r--r--kernel/bpf/core.c3
-rw-r--r--kernel/bpf/helpers.c2
-rw-r--r--kernel/bpf/syscall.c43
-rw-r--r--kernel/bpf/verifier.c49
-rw-r--r--kernel/cgroup.c183
-rw-r--r--kernel/events/core.c6
-rw-r--r--kernel/fork.c50
-rw-r--r--kernel/futex.c14
-rw-r--r--kernel/jump_label.c36
-rw-r--r--kernel/kcov.c7
-rw-r--r--kernel/locking/mutex-debug.c12
-rw-r--r--kernel/locking/mutex-debug.h4
-rw-r--r--kernel/locking/mutex.c15
-rw-r--r--kernel/locking/mutex.h2
-rw-r--r--kernel/locking/qspinlock.c60
-rw-r--r--kernel/power/process.c12
-rw-r--r--kernel/sched/core.c42
-rw-r--r--kernel/sched/debug.c15
-rw-r--r--kernel/sched/fair.c72
-rw-r--r--kernel/sched/sched.h2
-rw-r--r--kernel/sched/stats.h3
-rw-r--r--kernel/trace/bpf_trace.c36
-rw-r--r--kernel/trace/trace_printk.c7
-rw-r--r--mm/compaction.c39
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/hugetlb.c4
-rw-r--r--mm/internal.h3
-rw-r--r--mm/kasan/kasan.c6
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/memcontrol.c7
-rw-r--r--mm/memory.c31
-rw-r--r--mm/mempool.c12
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/oom_kill.c7
-rw-r--r--mm/page-writeback.c21
-rw-r--r--mm/page_owner.c6
-rw-r--r--mm/percpu.c73
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/swap.c11
-rw-r--r--net/8021q/vlan_dev.c2
-rw-r--r--net/atm/clip.c2
-rw-r--r--net/ax25/af_ax25.c3
-rw-r--r--net/ax25/ax25_ds_timer.c5
-rw-r--r--net/ax25/ax25_std_timer.c5
-rw-r--r--net/ax25/ax25_subr.c3
-rw-r--r--net/batman-adv/Kconfig2
-rw-r--r--net/batman-adv/Makefile5
-rw-r--r--net/batman-adv/bat_algo.c140
-rw-r--r--net/batman-adv/bat_algo.h32
-rw-r--r--net/batman-adv/bat_iv_ogm.c106
-rw-r--r--net/batman-adv/bat_iv_ogm.h25
-rw-r--r--net/batman-adv/bat_v.c58
-rw-r--r--net/batman-adv/bat_v.h52
-rw-r--r--net/batman-adv/bat_v_elp.c7
-rw-r--r--net/batman-adv/bat_v_elp.h4
-rw-r--r--net/batman-adv/bat_v_ogm.c9
-rw-r--r--net/batman-adv/bat_v_ogm.h4
-rw-r--r--net/batman-adv/bitarray.c2
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c1
-rw-r--r--net/batman-adv/debugfs.c240
-rw-r--r--net/batman-adv/distributed-arp-table.c2
-rw-r--r--net/batman-adv/fragmentation.c53
-rw-r--r--net/batman-adv/fragmentation.h6
-rw-r--r--net/batman-adv/gateway_client.c16
-rw-r--r--net/batman-adv/gateway_common.c10
-rw-r--r--net/batman-adv/hard-interface.c25
-rw-r--r--net/batman-adv/icmp_socket.c1
-rw-r--r--net/batman-adv/log.c231
-rw-r--r--net/batman-adv/log.h111
-rw-r--r--net/batman-adv/main.c709
-rw-r--r--net/batman-adv/main.h121
-rw-r--r--net/batman-adv/multicast.c501
-rw-r--r--net/batman-adv/multicast.h3
-rw-r--r--net/batman-adv/netlink.c424
-rw-r--r--net/batman-adv/netlink.h32
-rw-r--r--net/batman-adv/network-coding.c2
-rw-r--r--net/batman-adv/originator.c91
-rw-r--r--net/batman-adv/originator.h6
-rw-r--r--net/batman-adv/packet.h61
-rw-r--r--net/batman-adv/routing.c46
-rw-r--r--net/batman-adv/send.c100
-rw-r--r--net/batman-adv/send.h4
-rw-r--r--net/batman-adv/soft-interface.c22
-rw-r--r--net/batman-adv/sysfs.c29
-rw-r--r--net/batman-adv/tp_meter.c1507
-rw-r--r--net/batman-adv/tp_meter.h34
-rw-r--r--net/batman-adv/translation-table.c60
-rw-r--r--net/batman-adv/tvlv.c632
-rw-r--r--net/batman-adv/tvlv.h61
-rw-r--r--net/batman-adv/types.h260
-rw-r--r--net/bridge/br_device.c12
-rw-r--r--net/bridge/br_forward.c13
-rw-r--r--net/bridge/br_if.c9
-rw-r--r--net/bridge/br_input.c18
-rw-r--r--net/bridge/br_multicast.c221
-rw-r--r--net/bridge/br_netfilter_hooks.c2
-rw-r--r--net/bridge/br_netlink.c148
-rw-r--r--net/bridge/br_private.h64
-rw-r--r--net/bridge/br_sysfs_br.c25
-rw-r--r--net/bridge/netfilter/ebt_802_3.c6
-rw-r--r--net/bridge/netfilter/ebt_arp.c43
-rw-r--r--net/bridge/netfilter/ebt_ip.c28
-rw-r--r--net/bridge/netfilter/ebt_ip6.c41
-rw-r--r--net/bridge/netfilter/ebt_stp.c97
-rw-r--r--net/bridge/netfilter/ebtables.c32
-rw-r--r--net/caif/chnl_net.c1
-rw-r--r--net/core/dev.c90
-rw-r--r--net/core/devlink.c87
-rw-r--r--net/core/fib_rules.c49
-rw-r--r--net/core/filter.c320
-rw-r--r--net/core/flow_dissector.c43
-rw-r--r--net/core/neighbour.c12
-rw-r--r--net/core/net-sysfs.c15
-rw-r--r--net/core/pktgen.c42
-rw-r--r--net/core/rtnetlink.c66
-rw-r--r--net/core/skbuff.c18
-rw-r--r--net/core/utils.c8
-rw-r--r--net/decnet/dn_fib.c21
-rw-r--r--net/ieee802154/6lowpan/core.c2
-rw-r--r--net/ipv4/esp4.c52
-rw-r--r--net/ipv4/gre_demux.c10
-rw-r--r--net/ipv4/inet_diag.c25
-rw-r--r--net/ipv4/ip_gre.c26
-rw-r--r--net/ipv4/ip_output.c4
-rw-r--r--net/ipv4/ipconfig.c4
-rw-r--r--net/ipv4/ipmr.c4
-rw-r--r--net/ipv4/netfilter/arp_tables.c41
-rw-r--r--net/ipv4/netfilter/ip_tables.c20
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c4
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c3
-rw-r--r--net/ipv4/tcp.c67
-rw-r--r--net/ipv4/tcp_ipv4.c31
-rw-r--r--net/ipv4/tcp_output.c7
-rw-r--r--net/ipv4/udp.c80
-rw-r--r--net/ipv6/icmp.c2
-rw-r--r--net/ipv6/ip6_checksum.c7
-rw-r--r--net/ipv6/ip6_fib.c1
-rw-r--r--net/ipv6/ip6_gre.c2
-rw-r--r--net/ipv6/ip6mr.c1
-rw-r--r--net/ipv6/netfilter/ip6_tables.c16
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c4
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/sit.c4
-rw-r--r--net/ipv6/tcp_ipv6.c33
-rw-r--r--net/ipv6/udp.c71
-rw-r--r--net/kcm/kcmproc.c1
-rw-r--r--net/kcm/kcmsock.c8
-rw-r--r--net/mac80211/agg-rx.c18
-rw-r--r--net/mac80211/cfg.c1
-rw-r--r--net/mac80211/ieee80211_i.h1
-rw-r--r--net/mac80211/mesh.c17
-rw-r--r--net/mac80211/mesh_plink.c16
-rw-r--r--net/mac80211/rx.c7
-rw-r--r--net/mac80211/scan.c42
-rw-r--r--net/mac80211/spectmgmt.c45
-rw-r--r--net/mac80211/tdls.c1
-rw-r--r--net/mac80211/tx.c10
-rw-r--r--net/netfilter/Kconfig10
-rw-r--r--net/netfilter/nf_conntrack_core.c78
-rw-r--r--net/netfilter/nf_conntrack_helper.c61
-rw-r--r--net/netfilter/nf_conntrack_standalone.c36
-rw-r--r--net/netfilter/nf_log.c33
-rw-r--r--net/netfilter/nf_tables_api.c390
-rw-r--r--net/netfilter/nf_tables_core.c2
-rw-r--r--net/netfilter/nfnetlink_log.c9
-rw-r--r--net/netfilter/nft_dynset.c7
-rw-r--r--net/netfilter/nft_hash.c9
-rw-r--r--net/netfilter/nft_log.c21
-rw-r--r--net/netfilter/nft_lookup.c43
-rw-r--r--net/netfilter/nft_meta.c9
-rw-r--r--net/netfilter/nft_rbtree.c5
-rw-r--r--net/netfilter/x_tables.c3
-rw-r--r--net/netfilter/xt_NFLOG.c3
-rw-r--r--net/netfilter/xt_TRACE.c25
-rw-r--r--net/netfilter/xt_owner.c41
-rw-r--r--net/netfilter/xt_tcpudp.c7
-rw-r--r--net/openvswitch/conntrack.c14
-rw-r--r--net/packet/af_packet.c8
-rw-r--r--net/rds/connection.c39
-rw-r--r--net/rds/ib.c8
-rw-r--r--net/rds/ib.h8
-rw-r--r--net/rds/ib_cm.c8
-rw-r--r--net/rds/ib_recv.c3
-rw-r--r--net/rds/ib_send.c3
-rw-r--r--net/rds/loop.c19
-rw-r--r--net/rds/rds.h7
-rw-r--r--net/rds/recv.c4
-rw-r--r--net/rds/send.c16
-rw-r--r--net/rds/sysctl.c3
-rw-r--r--net/rds/tcp.c135
-rw-r--r--net/rds/tcp.h24
-rw-r--r--net/rds/tcp_connect.c60
-rw-r--r--net/rds/tcp_listen.c16
-rw-r--r--net/rds/tcp_recv.c41
-rw-r--r--net/rds/tcp_send.c34
-rw-r--r--net/rds/threads.c12
-rw-r--r--net/rds/transport.c3
-rw-r--r--net/sched/act_api.c2
-rw-r--r--net/sched/act_bpf.c7
-rw-r--r--net/sched/act_ife.c53
-rw-r--r--net/sched/act_ipt.c7
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/act_skbedit.c26
-rw-r--r--net/sched/cls_bpf.c7
-rw-r--r--net/sched/sch_fifo.c4
-rw-r--r--net/sched/sch_hfsc.c64
-rw-r--r--net/sched/sch_htb.c2
-rw-r--r--net/sched/sch_netem.c24
-rw-r--r--net/sched/sch_prio.c67
-rw-r--r--net/sctp/sctp_diag.c6
-rw-r--r--net/sunrpc/clnt.c31
-rw-r--r--net/sunrpc/svc_xprt.c2
-rw-r--r--net/sunrpc/xprtsock.c1
-rw-r--r--net/tipc/bearer.c2
-rw-r--r--net/tipc/discover.c5
-rw-r--r--net/tipc/link.c3
-rw-r--r--net/tipc/msg.c6
-rw-r--r--net/tipc/msg.h11
-rw-r--r--net/tipc/netlink_compat.c2
-rw-r--r--net/tipc/socket.c54
-rw-r--r--net/tipc/udp_media.c24
-rw-r--r--net/unix/af_unix.c6
-rw-r--r--net/vmw_vsock/af_vsock.c12
-rw-r--r--net/wireless/core.c4
-rw-r--r--net/wireless/core.h12
-rw-r--r--net/wireless/nl80211.c173
-rw-r--r--net/wireless/scan.c18
-rw-r--r--net/wireless/trace.h33
-rw-r--r--net/wireless/util.c2
-rw-r--r--samples/bpf/Makefile3
-rw-r--r--samples/bpf/bpf_helpers.h2
-rw-r--r--samples/bpf/test_cgrp2_array_pin.c109
-rwxr-xr-xsamples/bpf/test_cgrp2_tc.sh184
-rw-r--r--samples/bpf/test_cgrp2_tc_kern.c69
-rwxr-xr-xsamples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh66
-rw-r--r--scripts/mod/file2alias.c2
-rw-r--r--security/keys/key.c2
-rw-r--r--sound/core/timer.c2
-rw-r--r--sound/drivers/dummy.c1
-rw-r--r--sound/hda/hdac_regmap.c4
-rw-r--r--sound/pci/au88x0/au88x0_core.c5
-rw-r--r--sound/pci/echoaudio/echoaudio.c4
-rw-r--r--sound/pci/hda/hda_generic.c2
-rw-r--r--sound/pci/hda/hda_intel.c6
-rw-r--r--sound/pci/hda/hda_tegra.c20
-rw-r--r--sound/pci/hda/patch_realtek.c7
-rw-r--r--sound/soc/codecs/Kconfig7
-rw-r--r--sound/soc/codecs/ak4613.c2
-rw-r--r--sound/soc/codecs/cx20442.c1
-rw-r--r--sound/soc/codecs/hdac_hdmi.c20
-rw-r--r--sound/soc/codecs/rt5645.c2
-rw-r--r--sound/soc/codecs/rt5670.c2
-rw-r--r--sound/soc/codecs/wm5102.c2
-rw-r--r--sound/soc/codecs/wm5110.c1
-rw-r--r--sound/soc/codecs/wm8940.c1
-rw-r--r--sound/soc/davinci/davinci-mcasp.c56
-rw-r--r--sound/soc/davinci/davinci-mcasp.h4
-rw-r--r--sound/soc/fsl/fsl_ssi.c12
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-compress.c9
-rw-r--r--sound/soc/intel/skylake/bxt-sst.c1
-rw-r--r--sound/soc/sh/rcar/adg.c2
-rw-r--r--tools/perf/util/data-convert-bt.c41
-rw-r--r--tools/perf/util/event.c2
-rw-r--r--tools/perf/util/symbol.c16
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc9
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc9
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc9
-rw-r--r--tools/testing/selftests/vm/compaction_test.c2
-rw-r--r--tools/virtio/ringtest/Makefile4
-rw-r--r--tools/virtio/ringtest/README4
-rw-r--r--tools/virtio/ringtest/noring.c69
-rw-r--r--tools/virtio/ringtest/ptr_ring.c5
-rwxr-xr-xtools/virtio/ringtest/run-on-all.sh4
-rw-r--r--tools/vm/slabinfo.c2
-rw-r--r--virt/kvm/kvm_main.c2
1171 files changed, 26558 insertions, 10390 deletions
diff --git a/.mailmap b/.mailmap
index 08b80428f583..52489f564069 100644
--- a/.mailmap
+++ b/.mailmap
@@ -21,6 +21,7 @@ Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
21Andrew Morton <akpm@linux-foundation.org> 21Andrew Morton <akpm@linux-foundation.org>
22Andrew Vasquez <andrew.vasquez@qlogic.com> 22Andrew Vasquez <andrew.vasquez@qlogic.com>
23Andy Adamson <andros@citi.umich.edu> 23Andy Adamson <andros@citi.umich.edu>
24Antoine Tenart <antoine.tenart@free-electrons.com>
24Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com> 25Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com>
25Archit Taneja <archit@ti.com> 26Archit Taneja <archit@ti.com>
26Arnaud Patard <arnaud.patard@rtp-net.org> 27Arnaud Patard <arnaud.patard@rtp-net.org>
@@ -30,6 +31,9 @@ Axel Lin <axel.lin@gmail.com>
30Ben Gardner <bgardner@wabtec.com> 31Ben Gardner <bgardner@wabtec.com>
31Ben M Cahill <ben.m.cahill@intel.com> 32Ben M Cahill <ben.m.cahill@intel.com>
32Björn Steinbrink <B.Steinbrink@gmx.de> 33Björn Steinbrink <B.Steinbrink@gmx.de>
34Boris Brezillon <boris.brezillon@free-electrons.com>
35Boris Brezillon <boris.brezillon@free-electrons.com> <b.brezillon.dev@gmail.com>
36Boris Brezillon <boris.brezillon@free-electrons.com> <b.brezillon@overkiz.com>
33Brian Avery <b.avery@hp.com> 37Brian Avery <b.avery@hp.com>
34Brian King <brking@us.ibm.com> 38Brian King <brking@us.ibm.com>
35Christoph Hellwig <hch@lst.de> 39Christoph Hellwig <hch@lst.de>
@@ -89,6 +93,7 @@ Leonid I Ananiev <leonid.i.ananiev@intel.com>
89Linas Vepstas <linas@austin.ibm.com> 93Linas Vepstas <linas@austin.ibm.com>
90Mark Brown <broonie@sirena.org.uk> 94Mark Brown <broonie@sirena.org.uk>
91Matthieu CASTET <castet.matthieu@free.fr> 95Matthieu CASTET <castet.matthieu@free.fr>
96Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com> <mchehab@infradead.org> <mchehab@redhat.com> <m.chehab@samsung.com> <mchehab@osg.samsung.com> <mchehab@s-opensource.com>
92Mayuresh Janorkar <mayur@ti.com> 97Mayuresh Janorkar <mayur@ti.com>
93Michael Buesch <m@bues.ch> 98Michael Buesch <m@bues.ch>
94Michel Dänzer <michel@tungstengraphics.com> 99Michel Dänzer <michel@tungstengraphics.com>
@@ -122,6 +127,7 @@ Santosh Shilimkar <santosh.shilimkar@oracle.org>
122Sascha Hauer <s.hauer@pengutronix.de> 127Sascha Hauer <s.hauer@pengutronix.de>
123S.Çağlar Onur <caglar@pardus.org.tr> 128S.Çağlar Onur <caglar@pardus.org.tr>
124Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com> 129Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
130Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com> <shuah.khan@hp.com> <shuahkh@osg.samsung.com> <shuah.kh@samsung.com>
125Simon Kelley <simon@thekelleys.org.uk> 131Simon Kelley <simon@thekelleys.org.uk>
126Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr> 132Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
127Stephen Hemminger <shemminger@osdl.org> 133Stephen Hemminger <shemminger@osdl.org>
diff --git a/CREDITS b/CREDITS
index 0f0bf22afe0c..2a3fbcd229e6 100644
--- a/CREDITS
+++ b/CREDITS
@@ -649,6 +649,7 @@ D: Configure, Menuconfig, xconfig
649 649
650N: Mauro Carvalho Chehab 650N: Mauro Carvalho Chehab
651E: m.chehab@samsung.org 651E: m.chehab@samsung.org
652E: mchehab@osg.samsung.com
652E: mchehab@infradead.org 653E: mchehab@infradead.org
653D: Media subsystem (V4L/DVB) drivers and core 654D: Media subsystem (V4L/DVB) drivers and core
654D: EDAC drivers and EDAC 3.0 core rework 655D: EDAC drivers and EDAC 3.0 core rework
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uvc b/Documentation/ABI/testing/configfs-usb-gadget-uvc
index 2f4a0051b32d..1ba0d0fda9c0 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-uvc
+++ b/Documentation/ABI/testing/configfs-usb-gadget-uvc
@@ -1,6 +1,6 @@
1What: /config/usb-gadget/gadget/functions/uvc.name 1What: /config/usb-gadget/gadget/functions/uvc.name
2Date: Dec 2014 2Date: Dec 2014
3KernelVersion: 3.20 3KernelVersion: 4.0
4Description: UVC function directory 4Description: UVC function directory
5 5
6 streaming_maxburst - 0..15 (ss only) 6 streaming_maxburst - 0..15 (ss only)
@@ -9,37 +9,37 @@ Description: UVC function directory
9 9
10What: /config/usb-gadget/gadget/functions/uvc.name/control 10What: /config/usb-gadget/gadget/functions/uvc.name/control
11Date: Dec 2014 11Date: Dec 2014
12KernelVersion: 3.20 12KernelVersion: 4.0
13Description: Control descriptors 13Description: Control descriptors
14 14
15What: /config/usb-gadget/gadget/functions/uvc.name/control/class 15What: /config/usb-gadget/gadget/functions/uvc.name/control/class
16Date: Dec 2014 16Date: Dec 2014
17KernelVersion: 3.20 17KernelVersion: 4.0
18Description: Class descriptors 18Description: Class descriptors
19 19
20What: /config/usb-gadget/gadget/functions/uvc.name/control/class/ss 20What: /config/usb-gadget/gadget/functions/uvc.name/control/class/ss
21Date: Dec 2014 21Date: Dec 2014
22KernelVersion: 3.20 22KernelVersion: 4.0
23Description: Super speed control class descriptors 23Description: Super speed control class descriptors
24 24
25What: /config/usb-gadget/gadget/functions/uvc.name/control/class/fs 25What: /config/usb-gadget/gadget/functions/uvc.name/control/class/fs
26Date: Dec 2014 26Date: Dec 2014
27KernelVersion: 3.20 27KernelVersion: 4.0
28Description: Full speed control class descriptors 28Description: Full speed control class descriptors
29 29
30What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal 30What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal
31Date: Dec 2014 31Date: Dec 2014
32KernelVersion: 3.20 32KernelVersion: 4.0
33Description: Terminal descriptors 33Description: Terminal descriptors
34 34
35What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/output 35What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/output
36Date: Dec 2014 36Date: Dec 2014
37KernelVersion: 3.20 37KernelVersion: 4.0
38Description: Output terminal descriptors 38Description: Output terminal descriptors
39 39
40What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/output/default 40What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/output/default
41Date: Dec 2014 41Date: Dec 2014
42KernelVersion: 3.20 42KernelVersion: 4.0
43Description: Default output terminal descriptors 43Description: Default output terminal descriptors
44 44
45 All attributes read only: 45 All attributes read only:
@@ -53,12 +53,12 @@ Description: Default output terminal descriptors
53 53
54What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/camera 54What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/camera
55Date: Dec 2014 55Date: Dec 2014
56KernelVersion: 3.20 56KernelVersion: 4.0
57Description: Camera terminal descriptors 57Description: Camera terminal descriptors
58 58
59What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/camera/default 59What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/camera/default
60Date: Dec 2014 60Date: Dec 2014
61KernelVersion: 3.20 61KernelVersion: 4.0
62Description: Default camera terminal descriptors 62Description: Default camera terminal descriptors
63 63
64 All attributes read only: 64 All attributes read only:
@@ -75,12 +75,12 @@ Description: Default camera terminal descriptors
75 75
76What: /config/usb-gadget/gadget/functions/uvc.name/control/processing 76What: /config/usb-gadget/gadget/functions/uvc.name/control/processing
77Date: Dec 2014 77Date: Dec 2014
78KernelVersion: 3.20 78KernelVersion: 4.0
79Description: Processing unit descriptors 79Description: Processing unit descriptors
80 80
81What: /config/usb-gadget/gadget/functions/uvc.name/control/processing/default 81What: /config/usb-gadget/gadget/functions/uvc.name/control/processing/default
82Date: Dec 2014 82Date: Dec 2014
83KernelVersion: 3.20 83KernelVersion: 4.0
84Description: Default processing unit descriptors 84Description: Default processing unit descriptors
85 85
86 All attributes read only: 86 All attributes read only:
@@ -94,49 +94,49 @@ Description: Default processing unit descriptors
94 94
95What: /config/usb-gadget/gadget/functions/uvc.name/control/header 95What: /config/usb-gadget/gadget/functions/uvc.name/control/header
96Date: Dec 2014 96Date: Dec 2014
97KernelVersion: 3.20 97KernelVersion: 4.0
98Description: Control header descriptors 98Description: Control header descriptors
99 99
100What: /config/usb-gadget/gadget/functions/uvc.name/control/header/name 100What: /config/usb-gadget/gadget/functions/uvc.name/control/header/name
101Date: Dec 2014 101Date: Dec 2014
102KernelVersion: 3.20 102KernelVersion: 4.0
103Description: Specific control header descriptors 103Description: Specific control header descriptors
104 104
105dwClockFrequency 105dwClockFrequency
106bcdUVC 106bcdUVC
107What: /config/usb-gadget/gadget/functions/uvc.name/streaming 107What: /config/usb-gadget/gadget/functions/uvc.name/streaming
108Date: Dec 2014 108Date: Dec 2014
109KernelVersion: 3.20 109KernelVersion: 4.0
110Description: Streaming descriptors 110Description: Streaming descriptors
111 111
112What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class 112What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class
113Date: Dec 2014 113Date: Dec 2014
114KernelVersion: 3.20 114KernelVersion: 4.0
115Description: Streaming class descriptors 115Description: Streaming class descriptors
116 116
117What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/ss 117What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/ss
118Date: Dec 2014 118Date: Dec 2014
119KernelVersion: 3.20 119KernelVersion: 4.0
120Description: Super speed streaming class descriptors 120Description: Super speed streaming class descriptors
121 121
122What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/hs 122What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/hs
123Date: Dec 2014 123Date: Dec 2014
124KernelVersion: 3.20 124KernelVersion: 4.0
125Description: High speed streaming class descriptors 125Description: High speed streaming class descriptors
126 126
127What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/fs 127What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/fs
128Date: Dec 2014 128Date: Dec 2014
129KernelVersion: 3.20 129KernelVersion: 4.0
130Description: Full speed streaming class descriptors 130Description: Full speed streaming class descriptors
131 131
132What: /config/usb-gadget/gadget/functions/uvc.name/streaming/color_matching 132What: /config/usb-gadget/gadget/functions/uvc.name/streaming/color_matching
133Date: Dec 2014 133Date: Dec 2014
134KernelVersion: 3.20 134KernelVersion: 4.0
135Description: Color matching descriptors 135Description: Color matching descriptors
136 136
137What: /config/usb-gadget/gadget/functions/uvc.name/streaming/color_matching/default 137What: /config/usb-gadget/gadget/functions/uvc.name/streaming/color_matching/default
138Date: Dec 2014 138Date: Dec 2014
139KernelVersion: 3.20 139KernelVersion: 4.0
140Description: Default color matching descriptors 140Description: Default color matching descriptors
141 141
142 All attributes read only: 142 All attributes read only:
@@ -150,12 +150,12 @@ Description: Default color matching descriptors
150 150
151What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg 151What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg
152Date: Dec 2014 152Date: Dec 2014
153KernelVersion: 3.20 153KernelVersion: 4.0
154Description: MJPEG format descriptors 154Description: MJPEG format descriptors
155 155
156What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg/name 156What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg/name
157Date: Dec 2014 157Date: Dec 2014
158KernelVersion: 3.20 158KernelVersion: 4.0
159Description: Specific MJPEG format descriptors 159Description: Specific MJPEG format descriptors
160 160
161 All attributes read only, 161 All attributes read only,
@@ -174,7 +174,7 @@ Description: Specific MJPEG format descriptors
174 174
175What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg/name/name 175What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg/name/name
176Date: Dec 2014 176Date: Dec 2014
177KernelVersion: 3.20 177KernelVersion: 4.0
178Description: Specific MJPEG frame descriptors 178Description: Specific MJPEG frame descriptors
179 179
180 dwFrameInterval - indicates how frame interval can be 180 dwFrameInterval - indicates how frame interval can be
@@ -196,12 +196,12 @@ Description: Specific MJPEG frame descriptors
196 196
197What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed 197What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed
198Date: Dec 2014 198Date: Dec 2014
199KernelVersion: 3.20 199KernelVersion: 4.0
200Description: Uncompressed format descriptors 200Description: Uncompressed format descriptors
201 201
202What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed/name 202What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed/name
203Date: Dec 2014 203Date: Dec 2014
204KernelVersion: 3.20 204KernelVersion: 4.0
205Description: Specific uncompressed format descriptors 205Description: Specific uncompressed format descriptors
206 206
207 bmaControls - this format's data for bmaControls in 207 bmaControls - this format's data for bmaControls in
@@ -221,7 +221,7 @@ Description: Specific uncompressed format descriptors
221 221
222What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed/name/name 222What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed/name/name
223Date: Dec 2014 223Date: Dec 2014
224KernelVersion: 3.20 224KernelVersion: 4.0
225Description: Specific uncompressed frame descriptors 225Description: Specific uncompressed frame descriptors
226 226
227 dwFrameInterval - indicates how frame interval can be 227 dwFrameInterval - indicates how frame interval can be
@@ -243,12 +243,12 @@ Description: Specific uncompressed frame descriptors
243 243
244What: /config/usb-gadget/gadget/functions/uvc.name/streaming/header 244What: /config/usb-gadget/gadget/functions/uvc.name/streaming/header
245Date: Dec 2014 245Date: Dec 2014
246KernelVersion: 3.20 246KernelVersion: 4.0
247Description: Streaming header descriptors 247Description: Streaming header descriptors
248 248
249What: /config/usb-gadget/gadget/functions/uvc.name/streaming/header/name 249What: /config/usb-gadget/gadget/functions/uvc.name/streaming/header/name
250Date: Dec 2014 250Date: Dec 2014
251KernelVersion: 3.20 251KernelVersion: 4.0
252Description: Specific streaming header descriptors 252Description: Specific streaming header descriptors
253 253
254 All attributes read only: 254 All attributes read only:
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935 b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
index 6708c5e264aa..33e96f740639 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
+++ b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
@@ -1,4 +1,4 @@
1What /sys/bus/iio/devices/iio:deviceX/in_proximity_raw 1What /sys/bus/iio/devices/iio:deviceX/in_proximity_input
2Date: March 2014 2Date: March 2014
3KernelVersion: 3.15 3KernelVersion: 3.15
4Contact: Matt Ranostay <mranostay@gmail.com> 4Contact: Matt Ranostay <mranostay@gmail.com>
diff --git a/Documentation/ABI/testing/sysfs-class-net-batman-adv b/Documentation/ABI/testing/sysfs-class-net-batman-adv
index 518f6a1dbc0c..898106849e27 100644
--- a/Documentation/ABI/testing/sysfs-class-net-batman-adv
+++ b/Documentation/ABI/testing/sysfs-class-net-batman-adv
@@ -1,19 +1,10 @@
1 1
2What: /sys/class/net/<iface>/batman-adv/throughput_override
3Date: Feb 2014
4Contact: Antonio Quartulli <antonio@meshcoding.com>
5description:
6 Defines the throughput value to be used by B.A.T.M.A.N. V
7 when estimating the link throughput using this interface.
8 If the value is set to 0 then batman-adv will try to
9 estimate the throughput by itself.
10
11What: /sys/class/net/<iface>/batman-adv/elp_interval 2What: /sys/class/net/<iface>/batman-adv/elp_interval
12Date: Feb 2014 3Date: Feb 2014
13Contact: Linus Lüssing <linus.luessing@web.de> 4Contact: Linus Lüssing <linus.luessing@web.de>
14Description: 5Description:
15 Defines the interval in milliseconds in which batman 6 Defines the interval in milliseconds in which batman
16 sends its probing packets for link quality measurements. 7 emits probing packets for neighbor sensing (ELP).
17 8
18What: /sys/class/net/<iface>/batman-adv/iface_status 9What: /sys/class/net/<iface>/batman-adv/iface_status
19Date: May 2010 10Date: May 2010
@@ -28,3 +19,12 @@ Description:
28 The /sys/class/net/<iface>/batman-adv/mesh_iface file 19 The /sys/class/net/<iface>/batman-adv/mesh_iface file
29 displays the batman mesh interface this <iface> 20 displays the batman mesh interface this <iface>
30 currently is associated with. 21 currently is associated with.
22
23What: /sys/class/net/<iface>/batman-adv/throughput_override
24Date: Feb 2014
25Contact: Antonio Quartulli <a@unstable.cc>
26description:
27 Defines the throughput value to be used by B.A.T.M.A.N. V
28 when estimating the link throughput using this interface.
29 If the value is set to 0 then batman-adv will try to
30 estimate the throughput by itself.
diff --git a/Documentation/devicetree/bindings/hwmon/ina2xx.txt b/Documentation/devicetree/bindings/hwmon/ina2xx.txt
index 9bcd5e87830d..02af0d94e921 100644
--- a/Documentation/devicetree/bindings/hwmon/ina2xx.txt
+++ b/Documentation/devicetree/bindings/hwmon/ina2xx.txt
@@ -7,6 +7,7 @@ Required properties:
7 - "ti,ina220" for ina220 7 - "ti,ina220" for ina220
8 - "ti,ina226" for ina226 8 - "ti,ina226" for ina226
9 - "ti,ina230" for ina230 9 - "ti,ina230" for ina230
10 - "ti,ina231" for ina231
10- reg: I2C address 11- reg: I2C address
11 12
12Optional properties: 13Optional properties:
diff --git a/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt b/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt
index bfeabb843941..71191ff0e781 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt
@@ -44,8 +44,8 @@ Required properties:
44- our-claim-gpio: The GPIO that we use to claim the bus. 44- our-claim-gpio: The GPIO that we use to claim the bus.
45- their-claim-gpios: The GPIOs that the other sides use to claim the bus. 45- their-claim-gpios: The GPIOs that the other sides use to claim the bus.
46 Note that some implementations may only support a single other master. 46 Note that some implementations may only support a single other master.
47- Standard I2C mux properties. See mux.txt in this directory. 47- Standard I2C mux properties. See i2c-mux.txt in this directory.
48- Single I2C child bus node at reg 0. See mux.txt in this directory. 48- Single I2C child bus node at reg 0. See i2c-mux.txt in this directory.
49 49
50Optional properties: 50Optional properties:
51- slew-delay-us: microseconds to wait for a GPIO to go high. Default is 10 us. 51- slew-delay-us: microseconds to wait for a GPIO to go high. Default is 10 us.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt b/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt
index 6078aefe7ed4..7ce23ac61308 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt
@@ -27,7 +27,8 @@ Required properties:
27- i2c-bus-name: The name of this bus. Also needed as pinctrl-name for the I2C 27- i2c-bus-name: The name of this bus. Also needed as pinctrl-name for the I2C
28 parents. 28 parents.
29 29
30Furthermore, I2C mux properties and child nodes. See mux.txt in this directory. 30Furthermore, I2C mux properties and child nodes. See i2c-mux.txt in this
31directory.
31 32
32Example: 33Example:
33 34
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt
index 66709a825541..21da3ecbb370 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt
@@ -22,8 +22,8 @@ Required properties:
22- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side 22- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
23 port is connected to. 23 port is connected to.
24- mux-gpios: list of gpios used to control the muxer 24- mux-gpios: list of gpios used to control the muxer
25* Standard I2C mux properties. See mux.txt in this directory. 25* Standard I2C mux properties. See i2c-mux.txt in this directory.
26* I2C child bus nodes. See mux.txt in this directory. 26* I2C child bus nodes. See i2c-mux.txt in this directory.
27 27
28Optional properties: 28Optional properties:
29- idle-state: value to set the muxer to when idle. When no value is 29- idle-state: value to set the muxer to when idle. When no value is
@@ -33,7 +33,7 @@ For each i2c child node, an I2C child bus will be created. They will
33be numbered based on their order in the device tree. 33be numbered based on their order in the device tree.
34 34
35Whenever an access is made to a device on a child bus, the value set 35Whenever an access is made to a device on a child bus, the value set
36in the revelant node's reg property will be output using the list of 36in the relevant node's reg property will be output using the list of
37GPIOs, the first in the list holding the least-significant value. 37GPIOs, the first in the list holding the least-significant value.
38 38
39If an idle state is defined, using the idle-state (optional) property, 39If an idle state is defined, using the idle-state (optional) property,
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt
index ae8af1694e95..33119a98e144 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt
@@ -28,9 +28,9 @@ Also required are:
28* Standard pinctrl properties that specify the pin mux state for each child 28* Standard pinctrl properties that specify the pin mux state for each child
29 bus. See ../pinctrl/pinctrl-bindings.txt. 29 bus. See ../pinctrl/pinctrl-bindings.txt.
30 30
31* Standard I2C mux properties. See mux.txt in this directory. 31* Standard I2C mux properties. See i2c-mux.txt in this directory.
32 32
33* I2C child bus nodes. See mux.txt in this directory. 33* I2C child bus nodes. See i2c-mux.txt in this directory.
34 34
35For each named state defined in the pinctrl-names property, an I2C child bus 35For each named state defined in the pinctrl-names property, an I2C child bus
36will be created. I2C child bus numbers are assigned based on the index into 36will be created. I2C child bus numbers are assigned based on the index into
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt
index 688783fbe696..de00d7fc450b 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt
@@ -7,8 +7,8 @@ Required properties:
7- compatible: i2c-mux-reg 7- compatible: i2c-mux-reg
8- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side 8- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
9 port is connected to. 9 port is connected to.
10* Standard I2C mux properties. See mux.txt in this directory. 10* Standard I2C mux properties. See i2c-mux.txt in this directory.
11* I2C child bus nodes. See mux.txt in this directory. 11* I2C child bus nodes. See i2c-mux.txt in this directory.
12 12
13Optional properties: 13Optional properties:
14- reg: this pair of <offset size> specifies the register to control the mux. 14- reg: this pair of <offset size> specifies the register to control the mux.
@@ -24,7 +24,7 @@ Optional properties:
24 given, it defaults to the last value used. 24 given, it defaults to the last value used.
25 25
26Whenever an access is made to a device on a child bus, the value set 26Whenever an access is made to a device on a child bus, the value set
27in the revelant node's reg property will be output to the register. 27in the relevant node's reg property will be output to the register.
28 28
29If an idle state is defined, using the idle-state (optional) property, 29If an idle state is defined, using the idle-state (optional) property,
30whenever an access is not being made to a device on a child bus, the 30whenever an access is not being made to a device on a child bus, the
diff --git a/Documentation/devicetree/bindings/net/davinci-mdio.txt b/Documentation/devicetree/bindings/net/davinci-mdio.txt
index 0369e25aabd2..621156ca4ffd 100644
--- a/Documentation/devicetree/bindings/net/davinci-mdio.txt
+++ b/Documentation/devicetree/bindings/net/davinci-mdio.txt
@@ -2,7 +2,10 @@ TI SoC Davinci/Keystone2 MDIO Controller Device Tree Bindings
2--------------------------------------------------- 2---------------------------------------------------
3 3
4Required properties: 4Required properties:
5- compatible : Should be "ti,davinci_mdio" or "ti,keystone_mdio" 5- compatible : Should be "ti,davinci_mdio"
6 and "ti,keystone_mdio" for Keystone 2 SoCs
7 and "ti,cpsw-mdio" for am335x, am472x, am57xx/dra7, dm814x SoCs
8 and "ti,am4372-mdio" for am472x SoC
6- reg : physical base address and size of the davinci mdio 9- reg : physical base address and size of the davinci mdio
7 registers map 10 registers map
8- bus_freq : Mdio Bus frequency 11- bus_freq : Mdio Bus frequency
diff --git a/Documentation/devicetree/bindings/net/keystone-netcp.txt b/Documentation/devicetree/bindings/net/keystone-netcp.txt
index b30ab6b5cbfa..04ba1dc34fd6 100644
--- a/Documentation/devicetree/bindings/net/keystone-netcp.txt
+++ b/Documentation/devicetree/bindings/net/keystone-netcp.txt
@@ -2,7 +2,7 @@ This document describes the device tree bindings associated with the
2keystone network coprocessor(NetCP) driver support. 2keystone network coprocessor(NetCP) driver support.
3 3
4The network coprocessor (NetCP) is a hardware accelerator that processes 4The network coprocessor (NetCP) is a hardware accelerator that processes
5Ethernet packets. NetCP has a gigabit Ethernet (GbE) subsytem with a ethernet 5Ethernet packets. NetCP has a gigabit Ethernet (GbE) subsystem with a ethernet
6switch sub-module to send and receive packets. NetCP also includes a packet 6switch sub-module to send and receive packets. NetCP also includes a packet
7accelerator (PA) module to perform packet classification operations such as 7accelerator (PA) module to perform packet classification operations such as
8header matching, and packet modification operations such as checksum 8header matching, and packet modification operations such as checksum
diff --git a/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt b/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt
index 14aa6cf58201..6a9a63cb0543 100644
--- a/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt
+++ b/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt
@@ -13,10 +13,10 @@ Optional properties:
13 initialization. This is an array of 28 values(u8). 13 initialization. This is an array of 28 values(u8).
14 14
15 - marvell,wakeup-pin: It represents wakeup pin number of the bluetooth chip. 15 - marvell,wakeup-pin: It represents wakeup pin number of the bluetooth chip.
16 firmware will use the pin to wakeup host system. 16 firmware will use the pin to wakeup host system (u16).
17 - marvell,wakeup-gap-ms: wakeup gap represents wakeup latency of the host 17 - marvell,wakeup-gap-ms: wakeup gap represents wakeup latency of the host
18 platform. The value will be configured to firmware. This 18 platform. The value will be configured to firmware. This
19 is needed to work chip's sleep feature as expected. 19 is needed to work chip's sleep feature as expected (u16).
20 - interrupt-parent: phandle of the parent interrupt controller 20 - interrupt-parent: phandle of the parent interrupt controller
21 - interrupts : interrupt pin number to the cpu. Driver will request an irq based 21 - interrupts : interrupt pin number to the cpu. Driver will request an irq based
22 on this interrupt number. During system suspend, the irq will be 22 on this interrupt number. During system suspend, the irq will be
@@ -50,7 +50,7 @@ calibration data is also available in below example.
50 0x37 0x01 0x1c 0x00 0xff 0xff 0xff 0xff 0x01 0x7f 0x04 0x02 50 0x37 0x01 0x1c 0x00 0xff 0xff 0xff 0xff 0x01 0x7f 0x04 0x02
51 0x00 0x00 0xba 0xce 0xc0 0xc6 0x2d 0x00 0x00 0x00 0x00 0x00 51 0x00 0x00 0xba 0xce 0xc0 0xc6 0x2d 0x00 0x00 0x00 0x00 0x00
52 0x00 0x00 0xf0 0x00>; 52 0x00 0x00 0xf0 0x00>;
53 marvell,wakeup-pin = <0x0d>; 53 marvell,wakeup-pin = /bits/ 16 <0x0d>;
54 marvell,wakeup-gap-ms = <0x64>; 54 marvell,wakeup-gap-ms = /bits/ 16 <0x64>;
55 }; 55 };
56}; 56};
diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt
index 95816c5fc589..41b49e6075f5 100644
--- a/Documentation/devicetree/bindings/net/stmmac.txt
+++ b/Documentation/devicetree/bindings/net/stmmac.txt
@@ -47,6 +47,9 @@ Optional properties:
47 supported by this device instance 47 supported by this device instance
48- snps,perfect-filter-entries: Number of perfect filter entries supported 48- snps,perfect-filter-entries: Number of perfect filter entries supported
49 by this device instance 49 by this device instance
50- snps,ps-speed: port selection speed that can be passed to the core when
51 PCS is supported. For example, this is used in case of SGMII
52 and MAC2MAC connection.
50- AXI BUS Mode parameters: below the list of all the parameters to program the 53- AXI BUS Mode parameters: below the list of all the parameters to program the
51 AXI register inside the DMA module: 54 AXI register inside the DMA module:
52 - snps,lpi_en: enable Low Power Interface 55 - snps,lpi_en: enable Low Power Interface
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index a7440bcd67ff..2c2500df0dce 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -255,6 +255,7 @@ synology Synology, Inc.
255SUNW Sun Microsystems, Inc 255SUNW Sun Microsystems, Inc
256tbs TBS Technologies 256tbs TBS Technologies
257tcl Toby Churchill Ltd. 257tcl Toby Churchill Ltd.
258technexion TechNexion
258technologic Technologic Systems 259technologic Technologic Systems
259thine THine Electronics, Inc. 260thine THine Electronics, Inc.
260ti Texas Instruments 261ti Texas Instruments
@@ -269,6 +270,7 @@ tronsmart Tronsmart
269truly Truly Semiconductors Limited 270truly Truly Semiconductors Limited
270tyan Tyan Computer Corporation 271tyan Tyan Computer Corporation
271upisemi uPI Semiconductor Corp. 272upisemi uPI Semiconductor Corp.
273uniwest United Western Technologies Corp (UniWest)
272urt United Radiant Technology Corporation 274urt United Radiant Technology Corporation
273usi Universal Scientific Industrial Co., Ltd. 275usi Universal Scientific Industrial Co., Ltd.
274v3 V3 Semiconductor 276v3 V3 Semiconductor
diff --git a/Documentation/leds/leds-class.txt b/Documentation/leds/leds-class.txt
index d406d98339b2..44f5e6bccd97 100644
--- a/Documentation/leds/leds-class.txt
+++ b/Documentation/leds/leds-class.txt
@@ -74,8 +74,8 @@ blink_set() function (see <linux/leds.h>). To set an LED to blinking,
74however, it is better to use the API function led_blink_set(), as it 74however, it is better to use the API function led_blink_set(), as it
75will check and implement software fallback if necessary. 75will check and implement software fallback if necessary.
76 76
77To turn off blinking again, use the API function led_brightness_set() 77To turn off blinking, use the API function led_brightness_set()
78as that will not just set the LED brightness but also stop any software 78with brightness value LED_OFF, which should stop any software
79timers that may have been required for blinking. 79timers that may have been required for blinking.
80 80
81The blink_set() function should choose a user friendly blinking value 81The blink_set() function should choose a user friendly blinking value
diff --git a/Documentation/networking/nf_conntrack-sysctl.txt b/Documentation/networking/nf_conntrack-sysctl.txt
index f55599c62c9d..4fb51d32fccc 100644
--- a/Documentation/networking/nf_conntrack-sysctl.txt
+++ b/Documentation/networking/nf_conntrack-sysctl.txt
@@ -7,12 +7,13 @@ nf_conntrack_acct - BOOLEAN
7 Enable connection tracking flow accounting. 64-bit byte and packet 7 Enable connection tracking flow accounting. 64-bit byte and packet
8 counters per flow are added. 8 counters per flow are added.
9 9
10nf_conntrack_buckets - INTEGER (read-only) 10nf_conntrack_buckets - INTEGER
11 Size of hash table. If not specified as parameter during module 11 Size of hash table. If not specified as parameter during module
12 loading, the default size is calculated by dividing total memory 12 loading, the default size is calculated by dividing total memory
13 by 16384 to determine the number of buckets but the hash table will 13 by 16384 to determine the number of buckets but the hash table will
14 never have fewer than 32 and limited to 16384 buckets. For systems 14 never have fewer than 32 and limited to 16384 buckets. For systems
15 with more than 4GB of memory it will be 65536 buckets. 15 with more than 4GB of memory it will be 65536 buckets.
16 This sysctl is only writeable in the initial net namespace.
16 17
17nf_conntrack_checksum - BOOLEAN 18nf_conntrack_checksum - BOOLEAN
18 0 - disabled 19 0 - disabled
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index 671fe3dd56d3..e226f8925c9e 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -285,6 +285,7 @@ Please see the following document:
285 o mmc_core.c/mmc.h: Management MAC Counters; 285 o mmc_core.c/mmc.h: Management MAC Counters;
286 o stmmac_hwtstamp.c: HW timestamp support for PTP; 286 o stmmac_hwtstamp.c: HW timestamp support for PTP;
287 o stmmac_ptp.c: PTP 1588 clock; 287 o stmmac_ptp.c: PTP 1588 clock;
288 o stmmac_pcs.h: Physical Coding Sublayer common implementation;
288 o dwmac-<XXX>.c: these are for the platform glue-logic file; e.g. dwmac-sti.c 289 o dwmac-<XXX>.c: these are for the platform glue-logic file; e.g. dwmac-sti.c
289 for STMicroelectronics SoCs. 290 for STMicroelectronics SoCs.
290 291
diff --git a/Documentation/scsi/scsi_eh.txt b/Documentation/scsi/scsi_eh.txt
index 8638f61c8c9d..37eca00796ee 100644
--- a/Documentation/scsi/scsi_eh.txt
+++ b/Documentation/scsi/scsi_eh.txt
@@ -263,19 +263,23 @@ scmd->allowed.
263 263
264 3. scmd recovered 264 3. scmd recovered
265 ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd 265 ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
266 - shost->host_failed--
267 - clear scmd->eh_eflags 266 - clear scmd->eh_eflags
268 - scsi_setup_cmd_retry() 267 - scsi_setup_cmd_retry()
269 - move from local eh_work_q to local eh_done_q 268 - move from local eh_work_q to local eh_done_q
270 LOCKING: none 269 LOCKING: none
270 CONCURRENCY: at most one thread per separate eh_work_q to
271 keep queue manipulation lockless
271 272
272 4. EH completes 273 4. EH completes
273 ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper 274 ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
274 layer of failure. 275 layer of failure. May be called concurrently but must have
276 a no more than one thread per separate eh_work_q to
277 manipulate the queue locklessly
275 - scmd is removed from eh_done_q and scmd->eh_entry is cleared 278 - scmd is removed from eh_done_q and scmd->eh_entry is cleared
276 - if retry is necessary, scmd is requeued using 279 - if retry is necessary, scmd is requeued using
277 scsi_queue_insert() 280 scsi_queue_insert()
278 - otherwise, scsi_finish_command() is invoked for scmd 281 - otherwise, scsi_finish_command() is invoked for scmd
282 - zero shost->host_failed
279 LOCKING: queue or finish function performs appropriate locking 283 LOCKING: queue or finish function performs appropriate locking
280 284
281 285
diff --git a/MAINTAINERS b/MAINTAINERS
index f5ddaa901133..6374be26dde3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -595,6 +595,10 @@ S: Odd Fixes
595L: linux-alpha@vger.kernel.org 595L: linux-alpha@vger.kernel.org
596F: arch/alpha/ 596F: arch/alpha/
597 597
598ALPS PS/2 TOUCHPAD DRIVER
599R: Pali Rohár <pali.rohar@gmail.com>
600F: drivers/input/mouse/alps.*
601
598ALTERA MAILBOX DRIVER 602ALTERA MAILBOX DRIVER
599M: Ley Foon Tan <lftan@altera.com> 603M: Ley Foon Tan <lftan@altera.com>
600L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) 604L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
@@ -1159,6 +1163,7 @@ F: arch/arm/mach-footbridge/
1159ARM/FREESCALE IMX / MXC ARM ARCHITECTURE 1163ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
1160M: Shawn Guo <shawnguo@kernel.org> 1164M: Shawn Guo <shawnguo@kernel.org>
1161M: Sascha Hauer <kernel@pengutronix.de> 1165M: Sascha Hauer <kernel@pengutronix.de>
1166R: Fabio Estevam <fabio.estevam@nxp.com>
1162L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1167L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1163S: Maintained 1168S: Maintained
1164T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git 1169T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
@@ -2242,7 +2247,8 @@ F: include/net/ax25.h
2242F: net/ax25/ 2247F: net/ax25/
2243 2248
2244AZ6007 DVB DRIVER 2249AZ6007 DVB DRIVER
2245M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 2250M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
2251M: Mauro Carvalho Chehab <mchehab@kernel.org>
2246L: linux-media@vger.kernel.org 2252L: linux-media@vger.kernel.org
2247W: https://linuxtv.org 2253W: https://linuxtv.org
2248T: git git://linuxtv.org/media_tree.git 2254T: git git://linuxtv.org/media_tree.git
@@ -2291,6 +2297,7 @@ S: Maintained
2291F: Documentation/ABI/testing/sysfs-class-net-batman-adv 2297F: Documentation/ABI/testing/sysfs-class-net-batman-adv
2292F: Documentation/ABI/testing/sysfs-class-net-mesh 2298F: Documentation/ABI/testing/sysfs-class-net-mesh
2293F: Documentation/networking/batman-adv.txt 2299F: Documentation/networking/batman-adv.txt
2300F: include/uapi/linux/batman_adv.h
2294F: net/batman-adv/ 2301F: net/batman-adv/
2295 2302
2296BAYCOM/HDLCDRV DRIVERS FOR AX.25 2303BAYCOM/HDLCDRV DRIVERS FOR AX.25
@@ -2716,7 +2723,8 @@ F: Documentation/filesystems/btrfs.txt
2716F: fs/btrfs/ 2723F: fs/btrfs/
2717 2724
2718BTTV VIDEO4LINUX DRIVER 2725BTTV VIDEO4LINUX DRIVER
2719M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 2726M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
2727M: Mauro Carvalho Chehab <mchehab@kernel.org>
2720L: linux-media@vger.kernel.org 2728L: linux-media@vger.kernel.org
2721W: https://linuxtv.org 2729W: https://linuxtv.org
2722T: git git://linuxtv.org/media_tree.git 2730T: git git://linuxtv.org/media_tree.git
@@ -2780,9 +2788,9 @@ F: include/net/caif/
2780F: net/caif/ 2788F: net/caif/
2781 2789
2782CALGARY x86-64 IOMMU 2790CALGARY x86-64 IOMMU
2783M: Muli Ben-Yehuda <muli@il.ibm.com> 2791M: Muli Ben-Yehuda <mulix@mulix.org>
2784M: "Jon D. Mason" <jdmason@kudzu.us> 2792M: Jon Mason <jdmason@kudzu.us>
2785L: discuss@x86-64.org 2793L: iommu@lists.linux-foundation.org
2786S: Maintained 2794S: Maintained
2787F: arch/x86/kernel/pci-calgary_64.c 2795F: arch/x86/kernel/pci-calgary_64.c
2788F: arch/x86/kernel/tce_64.c 2796F: arch/x86/kernel/tce_64.c
@@ -3352,7 +3360,8 @@ S: Maintained
3352F: drivers/media/dvb-frontends/cx24120* 3360F: drivers/media/dvb-frontends/cx24120*
3353 3361
3354CX88 VIDEO4LINUX DRIVER 3362CX88 VIDEO4LINUX DRIVER
3355M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 3363M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
3364M: Mauro Carvalho Chehab <mchehab@kernel.org>
3356L: linux-media@vger.kernel.org 3365L: linux-media@vger.kernel.org
3357W: https://linuxtv.org 3366W: https://linuxtv.org
3358T: git git://linuxtv.org/media_tree.git 3367T: git git://linuxtv.org/media_tree.git
@@ -3782,6 +3791,7 @@ Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
3782S: Maintained 3791S: Maintained
3783F: drivers/dma/ 3792F: drivers/dma/
3784F: include/linux/dmaengine.h 3793F: include/linux/dmaengine.h
3794F: Documentation/devicetree/bindings/dma/
3785F: Documentation/dmaengine/ 3795F: Documentation/dmaengine/
3786T: git git://git.infradead.org/users/vkoul/slave-dma.git 3796T: git git://git.infradead.org/users/vkoul/slave-dma.git
3787 3797
@@ -4299,7 +4309,8 @@ F: fs/ecryptfs/
4299EDAC-CORE 4309EDAC-CORE
4300M: Doug Thompson <dougthompson@xmission.com> 4310M: Doug Thompson <dougthompson@xmission.com>
4301M: Borislav Petkov <bp@alien8.de> 4311M: Borislav Petkov <bp@alien8.de>
4302M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 4312M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
4313M: Mauro Carvalho Chehab <mchehab@kernel.org>
4303L: linux-edac@vger.kernel.org 4314L: linux-edac@vger.kernel.org
4304T: git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next 4315T: git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next
4305T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-edac.git linux_next 4316T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-edac.git linux_next
@@ -4344,7 +4355,8 @@ S: Maintained
4344F: drivers/edac/e7xxx_edac.c 4355F: drivers/edac/e7xxx_edac.c
4345 4356
4346EDAC-GHES 4357EDAC-GHES
4347M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 4358M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
4359M: Mauro Carvalho Chehab <mchehab@kernel.org>
4348L: linux-edac@vger.kernel.org 4360L: linux-edac@vger.kernel.org
4349S: Maintained 4361S: Maintained
4350F: drivers/edac/ghes_edac.c 4362F: drivers/edac/ghes_edac.c
@@ -4368,19 +4380,22 @@ S: Maintained
4368F: drivers/edac/i5000_edac.c 4380F: drivers/edac/i5000_edac.c
4369 4381
4370EDAC-I5400 4382EDAC-I5400
4371M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 4383M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
4384M: Mauro Carvalho Chehab <mchehab@kernel.org>
4372L: linux-edac@vger.kernel.org 4385L: linux-edac@vger.kernel.org
4373S: Maintained 4386S: Maintained
4374F: drivers/edac/i5400_edac.c 4387F: drivers/edac/i5400_edac.c
4375 4388
4376EDAC-I7300 4389EDAC-I7300
4377M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 4390M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
4391M: Mauro Carvalho Chehab <mchehab@kernel.org>
4378L: linux-edac@vger.kernel.org 4392L: linux-edac@vger.kernel.org
4379S: Maintained 4393S: Maintained
4380F: drivers/edac/i7300_edac.c 4394F: drivers/edac/i7300_edac.c
4381 4395
4382EDAC-I7CORE 4396EDAC-I7CORE
4383M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 4397M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
4398M: Mauro Carvalho Chehab <mchehab@kernel.org>
4384L: linux-edac@vger.kernel.org 4399L: linux-edac@vger.kernel.org
4385S: Maintained 4400S: Maintained
4386F: drivers/edac/i7core_edac.c 4401F: drivers/edac/i7core_edac.c
@@ -4417,7 +4432,8 @@ S: Maintained
4417F: drivers/edac/r82600_edac.c 4432F: drivers/edac/r82600_edac.c
4418 4433
4419EDAC-SBRIDGE 4434EDAC-SBRIDGE
4420M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 4435M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
4436M: Mauro Carvalho Chehab <mchehab@kernel.org>
4421L: linux-edac@vger.kernel.org 4437L: linux-edac@vger.kernel.org
4422S: Maintained 4438S: Maintained
4423F: drivers/edac/sb_edac.c 4439F: drivers/edac/sb_edac.c
@@ -4476,7 +4492,8 @@ S: Maintained
4476F: drivers/net/ethernet/ibm/ehea/ 4492F: drivers/net/ethernet/ibm/ehea/
4477 4493
4478EM28XX VIDEO4LINUX DRIVER 4494EM28XX VIDEO4LINUX DRIVER
4479M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 4495M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
4496M: Mauro Carvalho Chehab <mchehab@kernel.org>
4480L: linux-media@vger.kernel.org 4497L: linux-media@vger.kernel.org
4481W: https://linuxtv.org 4498W: https://linuxtv.org
4482T: git git://linuxtv.org/media_tree.git 4499T: git git://linuxtv.org/media_tree.git
@@ -5421,6 +5438,15 @@ F: include/uapi/linux/if_hippi.h
5421F: net/802/hippi.c 5438F: net/802/hippi.c
5422F: drivers/net/hippi/ 5439F: drivers/net/hippi/
5423 5440
5441HISILICON NETWORK SUBSYSTEM DRIVER
5442M: Yisen Zhuang <yisen.zhuang@huawei.com>
5443M: Salil Mehta <salil.mehta@huawei.com>
5444L: netdev@vger.kernel.org
5445W: http://www.hisilicon.com
5446S: Maintained
5447F: drivers/net/ethernet/hisilicon/
5448F: Documentation/devicetree/bindings/net/hisilicon*.txt
5449
5424HISILICON SAS Controller 5450HISILICON SAS Controller
5425M: John Garry <john.garry@huawei.com> 5451M: John Garry <john.garry@huawei.com>
5426W: http://www.hisilicon.com 5452W: http://www.hisilicon.com
@@ -6502,6 +6528,7 @@ F: include/uapi/linux/sunrpc/
6502 6528
6503KERNEL SELFTEST FRAMEWORK 6529KERNEL SELFTEST FRAMEWORK
6504M: Shuah Khan <shuahkh@osg.samsung.com> 6530M: Shuah Khan <shuahkh@osg.samsung.com>
6531M: Shuah Khan <shuah@kernel.org>
6505L: linux-kselftest@vger.kernel.org 6532L: linux-kselftest@vger.kernel.org
6506T: git git://git.kernel.org/pub/scm/shuah/linux-kselftest 6533T: git git://git.kernel.org/pub/scm/shuah/linux-kselftest
6507S: Maintained 6534S: Maintained
@@ -7374,7 +7401,8 @@ S: Supported
7374F: drivers/media/pci/netup_unidvb/* 7401F: drivers/media/pci/netup_unidvb/*
7375 7402
7376MEDIA INPUT INFRASTRUCTURE (V4L/DVB) 7403MEDIA INPUT INFRASTRUCTURE (V4L/DVB)
7377M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 7404M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
7405M: Mauro Carvalho Chehab <mchehab@kernel.org>
7378P: LinuxTV.org Project 7406P: LinuxTV.org Project
7379L: linux-media@vger.kernel.org 7407L: linux-media@vger.kernel.org
7380W: https://linuxtv.org 7408W: https://linuxtv.org
@@ -7422,7 +7450,7 @@ F: drivers/scsi/megaraid.*
7422F: drivers/scsi/megaraid/ 7450F: drivers/scsi/megaraid/
7423 7451
7424MELLANOX ETHERNET DRIVER (mlx4_en) 7452MELLANOX ETHERNET DRIVER (mlx4_en)
7425M: Eugenia Emantayev <eugenia@mellanox.com> 7453M: Tariq Toukan <tariqt@mellanox.com>
7426L: netdev@vger.kernel.org 7454L: netdev@vger.kernel.org
7427S: Supported 7455S: Supported
7428W: http://www.mellanox.com 7456W: http://www.mellanox.com
@@ -8423,10 +8451,9 @@ F: drivers/i2c/busses/i2c-ocores.c
8423OPEN FIRMWARE AND FLATTENED DEVICE TREE 8451OPEN FIRMWARE AND FLATTENED DEVICE TREE
8424M: Rob Herring <robh+dt@kernel.org> 8452M: Rob Herring <robh+dt@kernel.org>
8425M: Frank Rowand <frowand.list@gmail.com> 8453M: Frank Rowand <frowand.list@gmail.com>
8426M: Grant Likely <grant.likely@linaro.org>
8427L: devicetree@vger.kernel.org 8454L: devicetree@vger.kernel.org
8428W: http://www.devicetree.org/ 8455W: http://www.devicetree.org/
8429T: git git://git.kernel.org/pub/scm/linux/kernel/git/glikely/linux.git 8456T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
8430S: Maintained 8457S: Maintained
8431F: drivers/of/ 8458F: drivers/of/
8432F: include/linux/of*.h 8459F: include/linux/of*.h
@@ -8434,12 +8461,10 @@ F: scripts/dtc/
8434 8461
8435OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS 8462OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
8436M: Rob Herring <robh+dt@kernel.org> 8463M: Rob Herring <robh+dt@kernel.org>
8437M: Pawel Moll <pawel.moll@arm.com>
8438M: Mark Rutland <mark.rutland@arm.com> 8464M: Mark Rutland <mark.rutland@arm.com>
8439M: Ian Campbell <ijc+devicetree@hellion.org.uk>
8440M: Kumar Gala <galak@codeaurora.org>
8441L: devicetree@vger.kernel.org 8465L: devicetree@vger.kernel.org
8442T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git 8466T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
8467Q: http://patchwork.ozlabs.org/project/devicetree-bindings/list/
8443S: Maintained 8468S: Maintained
8444F: Documentation/devicetree/ 8469F: Documentation/devicetree/
8445F: arch/*/boot/dts/ 8470F: arch/*/boot/dts/
@@ -8964,6 +8989,7 @@ L: linux-gpio@vger.kernel.org
8964T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git 8989T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
8965S: Maintained 8990S: Maintained
8966F: Documentation/devicetree/bindings/pinctrl/ 8991F: Documentation/devicetree/bindings/pinctrl/
8992F: Documentation/pinctrl.txt
8967F: drivers/pinctrl/ 8993F: drivers/pinctrl/
8968F: include/linux/pinctrl/ 8994F: include/linux/pinctrl/
8969 8995
@@ -9533,7 +9559,7 @@ M: Florian Fainelli <florian@openwrt.org>
9533S: Maintained 9559S: Maintained
9534 9560
9535RDC R6040 FAST ETHERNET DRIVER 9561RDC R6040 FAST ETHERNET DRIVER
9536M: Florian Fainelli <florian@openwrt.org> 9562M: Florian Fainelli <f.fainelli@gmail.com>
9537L: netdev@vger.kernel.org 9563L: netdev@vger.kernel.org
9538S: Maintained 9564S: Maintained
9539F: drivers/net/ethernet/rdc/r6040.c 9565F: drivers/net/ethernet/rdc/r6040.c
@@ -9871,7 +9897,8 @@ S: Odd Fixes
9871F: drivers/media/i2c/saa6588* 9897F: drivers/media/i2c/saa6588*
9872 9898
9873SAA7134 VIDEO4LINUX DRIVER 9899SAA7134 VIDEO4LINUX DRIVER
9874M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 9900M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
9901M: Mauro Carvalho Chehab <mchehab@kernel.org>
9875L: linux-media@vger.kernel.org 9902L: linux-media@vger.kernel.org
9876W: https://linuxtv.org 9903W: https://linuxtv.org
9877T: git git://linuxtv.org/media_tree.git 9904T: git git://linuxtv.org/media_tree.git
@@ -10389,7 +10416,8 @@ S: Maintained
10389F: drivers/media/radio/si4713/radio-usb-si4713.c 10416F: drivers/media/radio/si4713/radio-usb-si4713.c
10390 10417
10391SIANO DVB DRIVER 10418SIANO DVB DRIVER
10392M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 10419M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
10420M: Mauro Carvalho Chehab <mchehab@kernel.org>
10393L: linux-media@vger.kernel.org 10421L: linux-media@vger.kernel.org
10394W: https://linuxtv.org 10422W: https://linuxtv.org
10395T: git git://linuxtv.org/media_tree.git 10423T: git git://linuxtv.org/media_tree.git
@@ -11155,7 +11183,8 @@ S: Maintained
11155F: drivers/media/i2c/tda9840* 11183F: drivers/media/i2c/tda9840*
11156 11184
11157TEA5761 TUNER DRIVER 11185TEA5761 TUNER DRIVER
11158M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 11186M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
11187M: Mauro Carvalho Chehab <mchehab@kernel.org>
11159L: linux-media@vger.kernel.org 11188L: linux-media@vger.kernel.org
11160W: https://linuxtv.org 11189W: https://linuxtv.org
11161T: git git://linuxtv.org/media_tree.git 11190T: git git://linuxtv.org/media_tree.git
@@ -11163,7 +11192,8 @@ S: Odd fixes
11163F: drivers/media/tuners/tea5761.* 11192F: drivers/media/tuners/tea5761.*
11164 11193
11165TEA5767 TUNER DRIVER 11194TEA5767 TUNER DRIVER
11166M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 11195M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
11196M: Mauro Carvalho Chehab <mchehab@kernel.org>
11167L: linux-media@vger.kernel.org 11197L: linux-media@vger.kernel.org
11168W: https://linuxtv.org 11198W: https://linuxtv.org
11169T: git git://linuxtv.org/media_tree.git 11199T: git git://linuxtv.org/media_tree.git
@@ -11550,7 +11580,8 @@ F: include/linux/shmem_fs.h
11550F: mm/shmem.c 11580F: mm/shmem.c
11551 11581
11552TM6000 VIDEO4LINUX DRIVER 11582TM6000 VIDEO4LINUX DRIVER
11553M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 11583M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
11584M: Mauro Carvalho Chehab <mchehab@kernel.org>
11554L: linux-media@vger.kernel.org 11585L: linux-media@vger.kernel.org
11555W: https://linuxtv.org 11586W: https://linuxtv.org
11556T: git git://linuxtv.org/media_tree.git 11587T: git git://linuxtv.org/media_tree.git
@@ -11904,7 +11935,8 @@ F: drivers/usb/common/usb-otg-fsm.c
11904 11935
11905USB OVER IP DRIVER 11936USB OVER IP DRIVER
11906M: Valentina Manea <valentina.manea.m@gmail.com> 11937M: Valentina Manea <valentina.manea.m@gmail.com>
11907M: Shuah Khan <shuah.kh@samsung.com> 11938M: Shuah Khan <shuahkh@osg.samsung.com>
11939M: Shuah Khan <shuah@kernel.org>
11908L: linux-usb@vger.kernel.org 11940L: linux-usb@vger.kernel.org
11909S: Maintained 11941S: Maintained
11910F: Documentation/usb/usbip_protocol.txt 11942F: Documentation/usb/usbip_protocol.txt
@@ -11975,6 +12007,7 @@ L: linux-usb@vger.kernel.org
11975W: http://www.linux-usb.org 12007W: http://www.linux-usb.org
11976T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git 12008T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git
11977S: Supported 12009S: Supported
12010F: Documentation/devicetree/bindings/usb/
11978F: Documentation/usb/ 12011F: Documentation/usb/
11979F: drivers/usb/ 12012F: drivers/usb/
11980F: include/linux/usb.h 12013F: include/linux/usb.h
@@ -12148,6 +12181,7 @@ VIRTIO CORE, NET AND BLOCK DRIVERS
12148M: "Michael S. Tsirkin" <mst@redhat.com> 12181M: "Michael S. Tsirkin" <mst@redhat.com>
12149L: virtualization@lists.linux-foundation.org 12182L: virtualization@lists.linux-foundation.org
12150S: Maintained 12183S: Maintained
12184F: Documentation/devicetree/bindings/virtio/
12151F: drivers/virtio/ 12185F: drivers/virtio/
12152F: tools/virtio/ 12186F: tools/virtio/
12153F: drivers/net/virtio_net.c 12187F: drivers/net/virtio_net.c
@@ -12536,7 +12570,8 @@ S: Maintained
12536F: arch/x86/entry/vdso/ 12570F: arch/x86/entry/vdso/
12537 12571
12538XC2028/3028 TUNER DRIVER 12572XC2028/3028 TUNER DRIVER
12539M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> 12573M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
12574M: Mauro Carvalho Chehab <mchehab@kernel.org>
12540L: linux-media@vger.kernel.org 12575L: linux-media@vger.kernel.org
12541W: https://linuxtv.org 12576W: https://linuxtv.org
12542T: git git://linuxtv.org/media_tree.git 12577T: git git://linuxtv.org/media_tree.git
diff --git a/Makefile b/Makefile
index 8d1301ab59fd..0d504893df6e 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 7 2PATCHLEVEL = 7
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc6
5NAME = Psychotic Stoned Sheep 5NAME = Psychotic Stoned Sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -363,11 +363,13 @@ CHECK = sparse
363 363
364CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \ 364CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
365 -Wbitwise -Wno-return-void $(CF) 365 -Wbitwise -Wno-return-void $(CF)
366NOSTDINC_FLAGS =
366CFLAGS_MODULE = 367CFLAGS_MODULE =
367AFLAGS_MODULE = 368AFLAGS_MODULE =
368LDFLAGS_MODULE = 369LDFLAGS_MODULE =
369CFLAGS_KERNEL = 370CFLAGS_KERNEL =
370AFLAGS_KERNEL = 371AFLAGS_KERNEL =
372LDFLAGS_vmlinux =
371CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized 373CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized
372CFLAGS_KCOV = -fsanitize-coverage=trace-pc 374CFLAGS_KCOV = -fsanitize-coverage=trace-pc
373 375
diff --git a/arch/Kconfig b/arch/Kconfig
index d794384a0404..15996290fed4 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -226,8 +226,8 @@ config ARCH_INIT_TASK
226config ARCH_TASK_STRUCT_ALLOCATOR 226config ARCH_TASK_STRUCT_ALLOCATOR
227 bool 227 bool
228 228
229# Select if arch has its private alloc_thread_info() function 229# Select if arch has its private alloc_thread_stack() function
230config ARCH_THREAD_INFO_ALLOCATOR 230config ARCH_THREAD_STACK_ALLOCATOR
231 bool 231 bool
232 232
233# Select if arch wants to size task_struct dynamically via arch_task_struct_size: 233# Select if arch wants to size task_struct dynamically via arch_task_struct_size:
@@ -606,6 +606,9 @@ config HAVE_ARCH_HASH
606 file which provides platform-specific implementations of some 606 file which provides platform-specific implementations of some
607 functions in <linux/hash.h> or fs/namei.c. 607 functions in <linux/hash.h> or fs/namei.c.
608 608
609config ISA_BUS_API
610 def_bool ISA
611
609# 612#
610# ABI hall of shame 613# ABI hall of shame
611# 614#
diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
index aab14a019c20..c2ebb6f36c9d 100644
--- a/arch/alpha/include/asm/pgalloc.h
+++ b/arch/alpha/include/asm/pgalloc.h
@@ -40,7 +40,7 @@ pgd_free(struct mm_struct *mm, pgd_t *pgd)
40static inline pmd_t * 40static inline pmd_t *
41pmd_alloc_one(struct mm_struct *mm, unsigned long address) 41pmd_alloc_one(struct mm_struct *mm, unsigned long address)
42{ 42{
43 pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 43 pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
44 return ret; 44 return ret;
45} 45}
46 46
@@ -53,7 +53,7 @@ pmd_free(struct mm_struct *mm, pmd_t *pmd)
53static inline pte_t * 53static inline pte_t *
54pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 54pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
55{ 55{
56 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 56 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
57 return pte; 57 return pte;
58} 58}
59 59
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index d4df6be66d58..85814e74677d 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -66,8 +66,6 @@ endif
66 66
67endif 67endif
68 68
69cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
70
71# By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok 69# By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok
72ifeq ($(atleast_gcc48),y) 70ifeq ($(atleast_gcc48),y)
73cflags-$(CONFIG_ARC_DW2_UNWIND) += -gdwarf-2 71cflags-$(CONFIG_ARC_DW2_UNWIND) += -gdwarf-2
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index 86ed671286df..3749234b7419 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -95,7 +95,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
95{ 95{
96 pte_t *pte; 96 pte_t *pte;
97 97
98 pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 98 pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
99 __get_order_pte()); 99 __get_order_pte());
100 100
101 return pte; 101 return pte;
@@ -107,7 +107,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
107 pgtable_t pte_pg; 107 pgtable_t pte_pg;
108 struct page *page; 108 struct page *page;
109 109
110 pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte()); 110 pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte());
111 if (!pte_pg) 111 if (!pte_pg)
112 return 0; 112 return 0;
113 memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t)); 113 memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index e0efff15a5ae..b9192a653b7e 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -142,7 +142,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
142 * prelogue is setup (callee regs saved and then fp set and not other 142 * prelogue is setup (callee regs saved and then fp set and not other
143 * way around 143 * way around
144 */ 144 */
145 pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n"); 145 pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
146 return 0; 146 return 0;
147 147
148#endif 148#endif
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 06b6c2d695bf..414b42710a36 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -741,6 +741,7 @@ dtb-$(CONFIG_MACH_SUN7I) += \
741 sun7i-a20-olimex-som-evb.dtb \ 741 sun7i-a20-olimex-som-evb.dtb \
742 sun7i-a20-olinuxino-lime.dtb \ 742 sun7i-a20-olinuxino-lime.dtb \
743 sun7i-a20-olinuxino-lime2.dtb \ 743 sun7i-a20-olinuxino-lime2.dtb \
744 sun7i-a20-olinuxino-lime2-emmc.dtb \
744 sun7i-a20-olinuxino-micro.dtb \ 745 sun7i-a20-olinuxino-micro.dtb \
745 sun7i-a20-orangepi.dtb \ 746 sun7i-a20-orangepi.dtb \
746 sun7i-a20-orangepi-mini.dtb \ 747 sun7i-a20-orangepi-mini.dtb \
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 702126ff4a56..7fa295155543 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -788,7 +788,7 @@
788 status = "disabled"; 788 status = "disabled";
789 789
790 davinci_mdio: mdio@4a101000 { 790 davinci_mdio: mdio@4a101000 {
791 compatible = "ti,davinci_mdio"; 791 compatible = "ti,cpsw-mdio","ti,davinci_mdio";
792 #address-cells = <1>; 792 #address-cells = <1>;
793 #size-cells = <0>; 793 #size-cells = <0>;
794 ti,hwmods = "davinci_mdio"; 794 ti,hwmods = "davinci_mdio";
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index a10fa7f95442..cd81ecf12731 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -635,7 +635,7 @@
635 syscon = <&scm_conf>; 635 syscon = <&scm_conf>;
636 636
637 davinci_mdio: mdio@4a101000 { 637 davinci_mdio: mdio@4a101000 {
638 compatible = "ti,am4372-mdio","ti,davinci_mdio"; 638 compatible = "ti,am4372-mdio","ti,cpsw-mdio","ti,davinci_mdio";
639 reg = <0x4a101000 0x100>; 639 reg = <0x4a101000 0x100>;
640 #address-cells = <1>; 640 #address-cells = <1>;
641 #size-cells = <0>; 641 #size-cells = <0>;
diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts
index d82dd6e3f9b1..5687d6b4da60 100644
--- a/arch/arm/boot/dts/am437x-sk-evm.dts
+++ b/arch/arm/boot/dts/am437x-sk-evm.dts
@@ -418,7 +418,7 @@
418 status = "okay"; 418 status = "okay";
419 pinctrl-names = "default"; 419 pinctrl-names = "default";
420 pinctrl-0 = <&i2c0_pins>; 420 pinctrl-0 = <&i2c0_pins>;
421 clock-frequency = <400000>; 421 clock-frequency = <100000>;
422 422
423 tps@24 { 423 tps@24 {
424 compatible = "ti,tps65218"; 424 compatible = "ti,tps65218";
diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi
index b01a5948cdd0..0e63b9dff6e7 100644
--- a/arch/arm/boot/dts/am57xx-idk-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi
@@ -60,10 +60,26 @@
60 60
61 tps659038_pmic { 61 tps659038_pmic {
62 compatible = "ti,tps659038-pmic"; 62 compatible = "ti,tps659038-pmic";
63
64 smps12-in-supply = <&vmain>;
65 smps3-in-supply = <&vmain>;
66 smps45-in-supply = <&vmain>;
67 smps6-in-supply = <&vmain>;
68 smps7-in-supply = <&vmain>;
69 smps8-in-supply = <&vmain>;
70 smps9-in-supply = <&vmain>;
71 ldo1-in-supply = <&vmain>;
72 ldo2-in-supply = <&vmain>;
73 ldo3-in-supply = <&vmain>;
74 ldo4-in-supply = <&vmain>;
75 ldo9-in-supply = <&vmain>;
76 ldoln-in-supply = <&vmain>;
77 ldousb-in-supply = <&vmain>;
78 ldortc-in-supply = <&vmain>;
79
63 regulators { 80 regulators {
64 smps12_reg: smps12 { 81 smps12_reg: smps12 {
65 /* VDD_MPU */ 82 /* VDD_MPU */
66 vin-supply = <&vmain>;
67 regulator-name = "smps12"; 83 regulator-name = "smps12";
68 regulator-min-microvolt = <850000>; 84 regulator-min-microvolt = <850000>;
69 regulator-max-microvolt = <1250000>; 85 regulator-max-microvolt = <1250000>;
@@ -73,7 +89,6 @@
73 89
74 smps3_reg: smps3 { 90 smps3_reg: smps3 {
75 /* VDD_DDR EMIF1 EMIF2 */ 91 /* VDD_DDR EMIF1 EMIF2 */
76 vin-supply = <&vmain>;
77 regulator-name = "smps3"; 92 regulator-name = "smps3";
78 regulator-min-microvolt = <1350000>; 93 regulator-min-microvolt = <1350000>;
79 regulator-max-microvolt = <1350000>; 94 regulator-max-microvolt = <1350000>;
@@ -84,7 +99,6 @@
84 smps45_reg: smps45 { 99 smps45_reg: smps45 {
85 /* VDD_DSPEVE on AM572 */ 100 /* VDD_DSPEVE on AM572 */
86 /* VDD_IVA + VDD_DSP on AM571 */ 101 /* VDD_IVA + VDD_DSP on AM571 */
87 vin-supply = <&vmain>;
88 regulator-name = "smps45"; 102 regulator-name = "smps45";
89 regulator-min-microvolt = <850000>; 103 regulator-min-microvolt = <850000>;
90 regulator-max-microvolt = <1250000>; 104 regulator-max-microvolt = <1250000>;
@@ -94,7 +108,6 @@
94 108
95 smps6_reg: smps6 { 109 smps6_reg: smps6 {
96 /* VDD_GPU */ 110 /* VDD_GPU */
97 vin-supply = <&vmain>;
98 regulator-name = "smps6"; 111 regulator-name = "smps6";
99 regulator-min-microvolt = <850000>; 112 regulator-min-microvolt = <850000>;
100 regulator-max-microvolt = <1250000>; 113 regulator-max-microvolt = <1250000>;
@@ -104,7 +117,6 @@
104 117
105 smps7_reg: smps7 { 118 smps7_reg: smps7 {
106 /* VDD_CORE */ 119 /* VDD_CORE */
107 vin-supply = <&vmain>;
108 regulator-name = "smps7"; 120 regulator-name = "smps7";
109 regulator-min-microvolt = <850000>; 121 regulator-min-microvolt = <850000>;
110 regulator-max-microvolt = <1150000>; 122 regulator-max-microvolt = <1150000>;
@@ -115,13 +127,11 @@
115 smps8_reg: smps8 { 127 smps8_reg: smps8 {
116 /* 5728 - VDD_IVAHD */ 128 /* 5728 - VDD_IVAHD */
117 /* 5718 - N.C. test point */ 129 /* 5718 - N.C. test point */
118 vin-supply = <&vmain>;
119 regulator-name = "smps8"; 130 regulator-name = "smps8";
120 }; 131 };
121 132
122 smps9_reg: smps9 { 133 smps9_reg: smps9 {
123 /* VDD_3_3D */ 134 /* VDD_3_3D */
124 vin-supply = <&vmain>;
125 regulator-name = "smps9"; 135 regulator-name = "smps9";
126 regulator-min-microvolt = <3300000>; 136 regulator-min-microvolt = <3300000>;
127 regulator-max-microvolt = <3300000>; 137 regulator-max-microvolt = <3300000>;
@@ -132,7 +142,6 @@
132 ldo1_reg: ldo1 { 142 ldo1_reg: ldo1 {
133 /* VDDSHV8 - VSDMMC */ 143 /* VDDSHV8 - VSDMMC */
134 /* NOTE: on rev 1.3a, data supply */ 144 /* NOTE: on rev 1.3a, data supply */
135 vin-supply = <&vmain>;
136 regulator-name = "ldo1"; 145 regulator-name = "ldo1";
137 regulator-min-microvolt = <1800000>; 146 regulator-min-microvolt = <1800000>;
138 regulator-max-microvolt = <3300000>; 147 regulator-max-microvolt = <3300000>;
@@ -142,7 +151,6 @@
142 151
143 ldo2_reg: ldo2 { 152 ldo2_reg: ldo2 {
144 /* VDDSH18V */ 153 /* VDDSH18V */
145 vin-supply = <&vmain>;
146 regulator-name = "ldo2"; 154 regulator-name = "ldo2";
147 regulator-min-microvolt = <1800000>; 155 regulator-min-microvolt = <1800000>;
148 regulator-max-microvolt = <1800000>; 156 regulator-max-microvolt = <1800000>;
@@ -152,7 +160,6 @@
152 160
153 ldo3_reg: ldo3 { 161 ldo3_reg: ldo3 {
154 /* R1.3a 572x V1_8PHY_LDO3: USB, SATA */ 162 /* R1.3a 572x V1_8PHY_LDO3: USB, SATA */
155 vin-supply = <&vmain>;
156 regulator-name = "ldo3"; 163 regulator-name = "ldo3";
157 regulator-min-microvolt = <1800000>; 164 regulator-min-microvolt = <1800000>;
158 regulator-max-microvolt = <1800000>; 165 regulator-max-microvolt = <1800000>;
@@ -162,7 +169,6 @@
162 169
163 ldo4_reg: ldo4 { 170 ldo4_reg: ldo4 {
164 /* R1.3a 572x V1_8PHY_LDO4: PCIE, HDMI*/ 171 /* R1.3a 572x V1_8PHY_LDO4: PCIE, HDMI*/
165 vin-supply = <&vmain>;
166 regulator-name = "ldo4"; 172 regulator-name = "ldo4";
167 regulator-min-microvolt = <1800000>; 173 regulator-min-microvolt = <1800000>;
168 regulator-max-microvolt = <1800000>; 174 regulator-max-microvolt = <1800000>;
@@ -174,7 +180,6 @@
174 180
175 ldo9_reg: ldo9 { 181 ldo9_reg: ldo9 {
176 /* VDD_RTC */ 182 /* VDD_RTC */
177 vin-supply = <&vmain>;
178 regulator-name = "ldo9"; 183 regulator-name = "ldo9";
179 regulator-min-microvolt = <840000>; 184 regulator-min-microvolt = <840000>;
180 regulator-max-microvolt = <1160000>; 185 regulator-max-microvolt = <1160000>;
@@ -184,7 +189,6 @@
184 189
185 ldoln_reg: ldoln { 190 ldoln_reg: ldoln {
186 /* VDDA_1V8_PLL */ 191 /* VDDA_1V8_PLL */
187 vin-supply = <&vmain>;
188 regulator-name = "ldoln"; 192 regulator-name = "ldoln";
189 regulator-min-microvolt = <1800000>; 193 regulator-min-microvolt = <1800000>;
190 regulator-max-microvolt = <1800000>; 194 regulator-max-microvolt = <1800000>;
@@ -194,7 +198,6 @@
194 198
195 ldousb_reg: ldousb { 199 ldousb_reg: ldousb {
196 /* VDDA_3V_USB: VDDA_USBHS33 */ 200 /* VDDA_3V_USB: VDDA_USBHS33 */
197 vin-supply = <&vmain>;
198 regulator-name = "ldousb"; 201 regulator-name = "ldousb";
199 regulator-min-microvolt = <3300000>; 202 regulator-min-microvolt = <3300000>;
200 regulator-max-microvolt = <3300000>; 203 regulator-max-microvolt = <3300000>;
@@ -204,7 +207,6 @@
204 207
205 ldortc_reg: ldortc { 208 ldortc_reg: ldortc {
206 /* VDDA_RTC */ 209 /* VDDA_RTC */
207 vin-supply = <&vmain>;
208 regulator-name = "ldortc"; 210 regulator-name = "ldortc";
209 regulator-min-microvolt = <1800000>; 211 regulator-min-microvolt = <1800000>;
210 regulator-max-microvolt = <1800000>; 212 regulator-max-microvolt = <1800000>;
diff --git a/arch/arm/boot/dts/dm8148-evm.dts b/arch/arm/boot/dts/dm8148-evm.dts
index cbc17b0794b1..4128fa91823c 100644
--- a/arch/arm/boot/dts/dm8148-evm.dts
+++ b/arch/arm/boot/dts/dm8148-evm.dts
@@ -93,6 +93,10 @@
93 }; 93 };
94}; 94};
95 95
96&mmc1 {
97 status = "disabled";
98};
99
96&mmc2 { 100&mmc2 {
97 pinctrl-names = "default"; 101 pinctrl-names = "default";
98 pinctrl-0 = <&sd1_pins>; 102 pinctrl-0 = <&sd1_pins>;
@@ -101,6 +105,10 @@
101 cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>; 105 cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
102}; 106};
103 107
108&mmc3 {
109 status = "disabled";
110};
111
104&pincntl { 112&pincntl {
105 sd1_pins: pinmux_sd1_pins { 113 sd1_pins: pinmux_sd1_pins {
106 pinctrl-single,pins = < 114 pinctrl-single,pins = <
diff --git a/arch/arm/boot/dts/dm8148-t410.dts b/arch/arm/boot/dts/dm8148-t410.dts
index 5d4313fd5a46..3f184863e0c5 100644
--- a/arch/arm/boot/dts/dm8148-t410.dts
+++ b/arch/arm/boot/dts/dm8148-t410.dts
@@ -45,6 +45,14 @@
45 phy-mode = "rgmii"; 45 phy-mode = "rgmii";
46}; 46};
47 47
48&mmc1 {
49 status = "disabled";
50};
51
52&mmc2 {
53 status = "disabled";
54};
55
48&mmc3 { 56&mmc3 {
49 pinctrl-names = "default"; 57 pinctrl-names = "default";
50 pinctrl-0 = <&sd2_pins>; 58 pinctrl-0 = <&sd2_pins>;
@@ -53,6 +61,7 @@
53 dmas = <&edma_xbar 8 0 1 /* use SDTXEVT1 instead of MCASP0TX */ 61 dmas = <&edma_xbar 8 0 1 /* use SDTXEVT1 instead of MCASP0TX */
54 &edma_xbar 9 0 2>; /* use SDRXEVT1 instead of MCASP0RX */ 62 &edma_xbar 9 0 2>; /* use SDRXEVT1 instead of MCASP0RX */
55 dma-names = "tx", "rx"; 63 dma-names = "tx", "rx";
64 non-removable;
56}; 65};
57 66
58&pincntl { 67&pincntl {
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index b7ddc645dd52..de559f6e4fee 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1451,6 +1451,8 @@
1451 ti,hwmods = "gpmc"; 1451 ti,hwmods = "gpmc";
1452 reg = <0x50000000 0x37c>; /* device IO registers */ 1452 reg = <0x50000000 0x37c>; /* device IO registers */
1453 interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; 1453 interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
1454 dmas = <&edma_xbar 4 0>;
1455 dma-names = "rxtx";
1454 gpmc,num-cs = <8>; 1456 gpmc,num-cs = <8>;
1455 gpmc,num-waitpins = <2>; 1457 gpmc,num-waitpins = <2>;
1456 #address-cells = <2>; 1458 #address-cells = <2>;
@@ -1660,7 +1662,7 @@
1660 status = "disabled"; 1662 status = "disabled";
1661 1663
1662 davinci_mdio: mdio@48485000 { 1664 davinci_mdio: mdio@48485000 {
1663 compatible = "ti,davinci_mdio"; 1665 compatible = "ti,cpsw-mdio","ti,davinci_mdio";
1664 #address-cells = <1>; 1666 #address-cells = <1>;
1665 #size-cells = <0>; 1667 #size-cells = <0>;
1666 ti,hwmods = "davinci_mdio"; 1668 ti,hwmods = "davinci_mdio";
diff --git a/arch/arm/boot/dts/dra74x.dtsi b/arch/arm/boot/dts/dra74x.dtsi
index 4220eeffc65a..5e06020f450b 100644
--- a/arch/arm/boot/dts/dra74x.dtsi
+++ b/arch/arm/boot/dts/dra74x.dtsi
@@ -107,8 +107,8 @@
107 reg = <0x58000000 0x80>, 107 reg = <0x58000000 0x80>,
108 <0x58004054 0x4>, 108 <0x58004054 0x4>,
109 <0x58004300 0x20>, 109 <0x58004300 0x20>,
110 <0x58005054 0x4>, 110 <0x58009054 0x4>,
111 <0x58005300 0x20>; 111 <0x58009300 0x20>;
112 reg-names = "dss", "pll1_clkctrl", "pll1", 112 reg-names = "dss", "pll1_clkctrl", "pll1",
113 "pll2_clkctrl", "pll2"; 113 "pll2_clkctrl", "pll2";
114 114
diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
index ddfe1f558c10..fa14f77df563 100644
--- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi
+++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
@@ -242,7 +242,7 @@
242 hpd-gpios = <&gpx0 7 GPIO_ACTIVE_HIGH>; 242 hpd-gpios = <&gpx0 7 GPIO_ACTIVE_HIGH>;
243 243
244 ports { 244 ports {
245 port0 { 245 port {
246 dp_out: endpoint { 246 dp_out: endpoint {
247 remote-endpoint = <&bridge_in>; 247 remote-endpoint = <&bridge_in>;
248 }; 248 };
@@ -485,13 +485,20 @@
485 edid-emulation = <5>; 485 edid-emulation = <5>;
486 486
487 ports { 487 ports {
488 port0 { 488 #address-cells = <1>;
489 #size-cells = <0>;
490
491 port@0 {
492 reg = <0>;
493
489 bridge_out: endpoint { 494 bridge_out: endpoint {
490 remote-endpoint = <&panel_in>; 495 remote-endpoint = <&panel_in>;
491 }; 496 };
492 }; 497 };
493 498
494 port1 { 499 port@1 {
500 reg = <1>;
501
495 bridge_in: endpoint { 502 bridge_in: endpoint {
496 remote-endpoint = <&dp_out>; 503 remote-endpoint = <&dp_out>;
497 }; 504 };
diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts
index f9d2e4f1a0e0..1de972d46a87 100644
--- a/arch/arm/boot/dts/exynos5420-peach-pit.dts
+++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts
@@ -163,7 +163,7 @@
163 hpd-gpios = <&gpx2 6 GPIO_ACTIVE_HIGH>; 163 hpd-gpios = <&gpx2 6 GPIO_ACTIVE_HIGH>;
164 164
165 ports { 165 ports {
166 port0 { 166 port {
167 dp_out: endpoint { 167 dp_out: endpoint {
168 remote-endpoint = <&bridge_in>; 168 remote-endpoint = <&bridge_in>;
169 }; 169 };
@@ -631,13 +631,20 @@
631 use-external-pwm; 631 use-external-pwm;
632 632
633 ports { 633 ports {
634 port0 { 634 #address-cells = <1>;
635 #size-cells = <0>;
636
637 port@0 {
638 reg = <0>;
639
635 bridge_out: endpoint { 640 bridge_out: endpoint {
636 remote-endpoint = <&panel_in>; 641 remote-endpoint = <&panel_in>;
637 }; 642 };
638 }; 643 };
639 644
640 port1 { 645 port@1 {
646 reg = <1>;
647
641 bridge_in: endpoint { 648 bridge_in: endpoint {
642 remote-endpoint = <&dp_out>; 649 remote-endpoint = <&dp_out>;
643 }; 650 };
diff --git a/arch/arm/boot/dts/omap3-evm-37xx.dts b/arch/arm/boot/dts/omap3-evm-37xx.dts
index 76056ba92ced..ed449827c3d3 100644
--- a/arch/arm/boot/dts/omap3-evm-37xx.dts
+++ b/arch/arm/boot/dts/omap3-evm-37xx.dts
@@ -85,7 +85,7 @@
85 OMAP3_CORE1_IOPAD(0x2158, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk.sdmmc2_clk */ 85 OMAP3_CORE1_IOPAD(0x2158, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk.sdmmc2_clk */
86 OMAP3_CORE1_IOPAD(0x215a, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_cmd.sdmmc2_cmd */ 86 OMAP3_CORE1_IOPAD(0x215a, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_cmd.sdmmc2_cmd */
87 OMAP3_CORE1_IOPAD(0x215c, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat0.sdmmc2_dat0 */ 87 OMAP3_CORE1_IOPAD(0x215c, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat0.sdmmc2_dat0 */
88 OMAP3_CORE1_IOPAD(0x215e, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */ 88 OMAP3_CORE1_IOPAD(0x215e, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */
89 OMAP3_CORE1_IOPAD(0x2160, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2.sdmmc2_dat2 */ 89 OMAP3_CORE1_IOPAD(0x2160, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2.sdmmc2_dat2 */
90 OMAP3_CORE1_IOPAD(0x2162, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3.sdmmc2_dat3 */ 90 OMAP3_CORE1_IOPAD(0x2162, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3.sdmmc2_dat3 */
91 >; 91 >;
diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi
index 41f5d386f21f..f4f2ce46d681 100644
--- a/arch/arm/boot/dts/omap3-igep.dtsi
+++ b/arch/arm/boot/dts/omap3-igep.dtsi
@@ -188,6 +188,7 @@
188 vmmc-supply = <&vmmc1>; 188 vmmc-supply = <&vmmc1>;
189 vmmc_aux-supply = <&vsim>; 189 vmmc_aux-supply = <&vsim>;
190 bus-width = <4>; 190 bus-width = <4>;
191 cd-gpios = <&twl_gpio 0 GPIO_ACTIVE_LOW>;
191}; 192};
192 193
193&mmc3 { 194&mmc3 {
diff --git a/arch/arm/boot/dts/omap3-igep0020-common.dtsi b/arch/arm/boot/dts/omap3-igep0020-common.dtsi
index d6f839cab649..b6971060648a 100644
--- a/arch/arm/boot/dts/omap3-igep0020-common.dtsi
+++ b/arch/arm/boot/dts/omap3-igep0020-common.dtsi
@@ -194,6 +194,12 @@
194 OMAP3630_CORE2_IOPAD(0x25f8, PIN_OUTPUT | MUX_MODE4) /* etk_d14.gpio_28 */ 194 OMAP3630_CORE2_IOPAD(0x25f8, PIN_OUTPUT | MUX_MODE4) /* etk_d14.gpio_28 */
195 >; 195 >;
196 }; 196 };
197
198 mmc1_wp_pins: pinmux_mmc1_cd_pins {
199 pinctrl-single,pins = <
200 OMAP3630_CORE2_IOPAD(0x25fa, PIN_INPUT | MUX_MODE4) /* etk_d15.gpio_29 */
201 >;
202 };
197}; 203};
198 204
199&i2c3 { 205&i2c3 {
@@ -250,3 +256,8 @@
250 }; 256 };
251 }; 257 };
252}; 258};
259
260&mmc1 {
261 pinctrl-0 = <&mmc1_pins &mmc1_wp_pins>;
262 wp-gpios = <&gpio1 29 GPIO_ACTIVE_LOW>; /* gpio_29 */
263};
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index d9e2d9c6e999..2b74a81d1de2 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -288,7 +288,7 @@
288 pinctrl-single,pins = < 288 pinctrl-single,pins = <
289 OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE1) /* ssi1_rdy_tx */ 289 OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE1) /* ssi1_rdy_tx */
290 OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE1) /* ssi1_flag_tx */ 290 OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE1) /* ssi1_flag_tx */
291 OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */ 291 OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4) /* ssi1_wake_tx (cawake) */
292 OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE1) /* ssi1_dat_tx */ 292 OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE1) /* ssi1_dat_tx */
293 OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE1) /* ssi1_dat_rx */ 293 OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE1) /* ssi1_dat_rx */
294 OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE1) /* ssi1_flag_rx */ 294 OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE1) /* ssi1_flag_rx */
@@ -300,7 +300,7 @@
300 modem_pins: pinmux_modem { 300 modem_pins: pinmux_modem {
301 pinctrl-single,pins = < 301 pinctrl-single,pins = <
302 OMAP3_CORE1_IOPAD(0x20dc, PIN_OUTPUT | MUX_MODE4) /* gpio 70 => cmt_apeslpx */ 302 OMAP3_CORE1_IOPAD(0x20dc, PIN_OUTPUT | MUX_MODE4) /* gpio 70 => cmt_apeslpx */
303 OMAP3_CORE1_IOPAD(0x20e0, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* gpio 72 => ape_rst_rq */ 303 OMAP3_CORE1_IOPAD(0x20e0, PIN_INPUT | MUX_MODE4) /* gpio 72 => ape_rst_rq */
304 OMAP3_CORE1_IOPAD(0x20e2, PIN_OUTPUT | MUX_MODE4) /* gpio 73 => cmt_rst_rq */ 304 OMAP3_CORE1_IOPAD(0x20e2, PIN_OUTPUT | MUX_MODE4) /* gpio 73 => cmt_rst_rq */
305 OMAP3_CORE1_IOPAD(0x20e4, PIN_OUTPUT | MUX_MODE4) /* gpio 74 => cmt_en */ 305 OMAP3_CORE1_IOPAD(0x20e4, PIN_OUTPUT | MUX_MODE4) /* gpio 74 => cmt_en */
306 OMAP3_CORE1_IOPAD(0x20e6, PIN_OUTPUT | MUX_MODE4) /* gpio 75 => cmt_rst */ 306 OMAP3_CORE1_IOPAD(0x20e6, PIN_OUTPUT | MUX_MODE4) /* gpio 75 => cmt_rst */
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index a00ca761675d..927b17fc4ed8 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -97,7 +97,7 @@
97 OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE1) /* ssi1_dat_tx */ 97 OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE1) /* ssi1_dat_tx */
98 OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE1) /* ssi1_flag_tx */ 98 OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE1) /* ssi1_flag_tx */
99 OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE1) /* ssi1_rdy_tx */ 99 OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE1) /* ssi1_rdy_tx */
100 OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */ 100 OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4) /* ssi1_wake_tx (cawake) */
101 OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE1) /* ssi1_dat_rx */ 101 OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE1) /* ssi1_dat_rx */
102 OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE1) /* ssi1_flag_rx */ 102 OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE1) /* ssi1_flag_rx */
103 OMAP3_CORE1_IOPAD(0x2188, PIN_OUTPUT | MUX_MODE1) /* ssi1_rdy_rx */ 103 OMAP3_CORE1_IOPAD(0x2188, PIN_OUTPUT | MUX_MODE1) /* ssi1_rdy_rx */
@@ -110,7 +110,7 @@
110 OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE7) /* ssi1_dat_tx */ 110 OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE7) /* ssi1_dat_tx */
111 OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE7) /* ssi1_flag_tx */ 111 OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE7) /* ssi1_flag_tx */
112 OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLDOWN | MUX_MODE7) /* ssi1_rdy_tx */ 112 OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLDOWN | MUX_MODE7) /* ssi1_rdy_tx */
113 OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */ 113 OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4) /* ssi1_wake_tx (cawake) */
114 OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE7) /* ssi1_dat_rx */ 114 OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE7) /* ssi1_dat_rx */
115 OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE7) /* ssi1_flag_rx */ 115 OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE7) /* ssi1_flag_rx */
116 OMAP3_CORE1_IOPAD(0x2188, PIN_OUTPUT | MUX_MODE4) /* ssi1_rdy_rx */ 116 OMAP3_CORE1_IOPAD(0x2188, PIN_OUTPUT | MUX_MODE4) /* ssi1_rdy_rx */
@@ -120,7 +120,7 @@
120 120
121 modem_pins1: pinmux_modem_core1_pins { 121 modem_pins1: pinmux_modem_core1_pins {
122 pinctrl-single,pins = < 122 pinctrl-single,pins = <
123 OMAP3_CORE1_IOPAD(0x207a, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* gpio_34 (ape_rst_rq) */ 123 OMAP3_CORE1_IOPAD(0x207a, PIN_INPUT | MUX_MODE4) /* gpio_34 (ape_rst_rq) */
124 OMAP3_CORE1_IOPAD(0x2100, PIN_OUTPUT | MUX_MODE4) /* gpio_88 (cmt_rst_rq) */ 124 OMAP3_CORE1_IOPAD(0x2100, PIN_OUTPUT | MUX_MODE4) /* gpio_88 (cmt_rst_rq) */
125 OMAP3_CORE1_IOPAD(0x210a, PIN_OUTPUT | MUX_MODE4) /* gpio_93 (cmt_apeslpx) */ 125 OMAP3_CORE1_IOPAD(0x210a, PIN_OUTPUT | MUX_MODE4) /* gpio_93 (cmt_apeslpx) */
126 >; 126 >;
diff --git a/arch/arm/boot/dts/omap3-zoom3.dts b/arch/arm/boot/dts/omap3-zoom3.dts
index f19170bdcc1f..c29b41dc7b95 100644
--- a/arch/arm/boot/dts/omap3-zoom3.dts
+++ b/arch/arm/boot/dts/omap3-zoom3.dts
@@ -98,7 +98,7 @@
98 pinctrl-single,pins = < 98 pinctrl-single,pins = <
99 OMAP3_CORE1_IOPAD(0x2174, PIN_INPUT_PULLUP | MUX_MODE0) /* uart2_cts.uart2_cts */ 99 OMAP3_CORE1_IOPAD(0x2174, PIN_INPUT_PULLUP | MUX_MODE0) /* uart2_cts.uart2_cts */
100 OMAP3_CORE1_IOPAD(0x2176, PIN_OUTPUT | MUX_MODE0) /* uart2_rts.uart2_rts */ 100 OMAP3_CORE1_IOPAD(0x2176, PIN_OUTPUT | MUX_MODE0) /* uart2_rts.uart2_rts */
101 OMAP3_CORE1_IOPAD(0x217a, WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */ 101 OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */
102 OMAP3_CORE1_IOPAD(0x2178, PIN_OUTPUT | MUX_MODE0) /* uart2_tx.uart2_tx */ 102 OMAP3_CORE1_IOPAD(0x2178, PIN_OUTPUT | MUX_MODE0) /* uart2_tx.uart2_tx */
103 >; 103 >;
104 }; 104 };
@@ -107,7 +107,7 @@
107 pinctrl-single,pins = < 107 pinctrl-single,pins = <
108 OMAP3_CORE1_IOPAD(0x219a, PIN_INPUT_PULLDOWN | MUX_MODE0) /* uart3_cts_rctx.uart3_cts_rctx */ 108 OMAP3_CORE1_IOPAD(0x219a, PIN_INPUT_PULLDOWN | MUX_MODE0) /* uart3_cts_rctx.uart3_cts_rctx */
109 OMAP3_CORE1_IOPAD(0x219c, PIN_OUTPUT | MUX_MODE0) /* uart3_rts_sd.uart3_rts_sd */ 109 OMAP3_CORE1_IOPAD(0x219c, PIN_OUTPUT | MUX_MODE0) /* uart3_rts_sd.uart3_rts_sd */
110 OMAP3_CORE1_IOPAD(0x219e, WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart3_rx_irrx.uart3_rx_irrx */ 110 OMAP3_CORE1_IOPAD(0x219e, PIN_INPUT | MUX_MODE0) /* uart3_rx_irrx.uart3_rx_irrx */
111 OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0) /* uart3_tx_irtx.uart3_tx_irtx */ 111 OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0) /* uart3_tx_irtx.uart3_tx_irtx */
112 >; 112 >;
113 }; 113 };
@@ -125,7 +125,7 @@
125 pinctrl-single,pins = < 125 pinctrl-single,pins = <
126 OMAP3630_CORE2_IOPAD(0x25d8, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_clk.sdmmc3_clk */ 126 OMAP3630_CORE2_IOPAD(0x25d8, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_clk.sdmmc3_clk */
127 OMAP3630_CORE2_IOPAD(0x25e4, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d4.sdmmc3_dat0 */ 127 OMAP3630_CORE2_IOPAD(0x25e4, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d4.sdmmc3_dat0 */
128 OMAP3630_CORE2_IOPAD(0x25e6, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d5.sdmmc3_dat1 */ 128 OMAP3630_CORE2_IOPAD(0x25e6, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d5.sdmmc3_dat1 */
129 OMAP3630_CORE2_IOPAD(0x25e8, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d6.sdmmc3_dat2 */ 129 OMAP3630_CORE2_IOPAD(0x25e8, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d6.sdmmc3_dat2 */
130 OMAP3630_CORE2_IOPAD(0x25e2, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d3.sdmmc3_dat3 */ 130 OMAP3630_CORE2_IOPAD(0x25e2, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d3.sdmmc3_dat3 */
131 >; 131 >;
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
index dc759a3028b7..5d5b620b7d9b 100644
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
@@ -14,6 +14,29 @@
14 display0 = &hdmi0; 14 display0 = &hdmi0;
15 }; 15 };
16 16
17 vmain: fixedregulator-vmain {
18 compatible = "regulator-fixed";
19 regulator-name = "vmain";
20 regulator-min-microvolt = <5000000>;
21 regulator-max-microvolt = <5000000>;
22 };
23
24 vsys_cobra: fixedregulator-vsys_cobra {
25 compatible = "regulator-fixed";
26 regulator-name = "vsys_cobra";
27 vin-supply = <&vmain>;
28 regulator-min-microvolt = <5000000>;
29 regulator-max-microvolt = <5000000>;
30 };
31
32 vdds_1v8_main: fixedregulator-vdds_1v8_main {
33 compatible = "regulator-fixed";
34 regulator-name = "vdds_1v8_main";
35 vin-supply = <&smps7_reg>;
36 regulator-min-microvolt = <1800000>;
37 regulator-max-microvolt = <1800000>;
38 };
39
17 vmmcsd_fixed: fixedregulator-mmcsd { 40 vmmcsd_fixed: fixedregulator-mmcsd {
18 compatible = "regulator-fixed"; 41 compatible = "regulator-fixed";
19 regulator-name = "vmmcsd_fixed"; 42 regulator-name = "vmmcsd_fixed";
@@ -309,7 +332,7 @@
309 332
310 wlcore_irq_pin: pinmux_wlcore_irq_pin { 333 wlcore_irq_pin: pinmux_wlcore_irq_pin {
311 pinctrl-single,pins = < 334 pinctrl-single,pins = <
312 OMAP5_IOPAD(0x40, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE6) /* llia_wakereqin.gpio1_wk14 */ 335 OMAP5_IOPAD(0x40, PIN_INPUT_PULLUP | MUX_MODE6) /* llia_wakereqin.gpio1_wk14 */
313 >; 336 >;
314 }; 337 };
315}; 338};
@@ -409,6 +432,26 @@
409 432
410 ti,ldo6-vibrator; 433 ti,ldo6-vibrator;
411 434
435 smps123-in-supply = <&vsys_cobra>;
436 smps45-in-supply = <&vsys_cobra>;
437 smps6-in-supply = <&vsys_cobra>;
438 smps7-in-supply = <&vsys_cobra>;
439 smps8-in-supply = <&vsys_cobra>;
440 smps9-in-supply = <&vsys_cobra>;
441 smps10_out2-in-supply = <&vsys_cobra>;
442 smps10_out1-in-supply = <&vsys_cobra>;
443 ldo1-in-supply = <&vsys_cobra>;
444 ldo2-in-supply = <&vsys_cobra>;
445 ldo3-in-supply = <&vdds_1v8_main>;
446 ldo4-in-supply = <&vdds_1v8_main>;
447 ldo5-in-supply = <&vsys_cobra>;
448 ldo6-in-supply = <&vdds_1v8_main>;
449 ldo7-in-supply = <&vsys_cobra>;
450 ldo8-in-supply = <&vsys_cobra>;
451 ldo9-in-supply = <&vmmcsd_fixed>;
452 ldoln-in-supply = <&vsys_cobra>;
453 ldousb-in-supply = <&vsys_cobra>;
454
412 regulators { 455 regulators {
413 smps123_reg: smps123 { 456 smps123_reg: smps123 {
414 /* VDD_OPP_MPU */ 457 /* VDD_OPP_MPU */
@@ -600,7 +643,8 @@
600 pinctrl-0 = <&twl6040_pins>; 643 pinctrl-0 = <&twl6040_pins>;
601 644
602 interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */ 645 interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */
603 ti,audpwron-gpio = <&gpio5 13 GPIO_ACTIVE_HIGH>; /* gpio line 141 */ 646
647 /* audpwron gpio defined in the board specific dts */
604 648
605 vio-supply = <&smps7_reg>; 649 vio-supply = <&smps7_reg>;
606 v2v1-supply = <&smps9_reg>; 650 v2v1-supply = <&smps9_reg>;
diff --git a/arch/arm/boot/dts/omap5-igep0050.dts b/arch/arm/boot/dts/omap5-igep0050.dts
index 46ecb1dd3b5c..f75ce02fb398 100644
--- a/arch/arm/boot/dts/omap5-igep0050.dts
+++ b/arch/arm/boot/dts/omap5-igep0050.dts
@@ -35,6 +35,22 @@
35 }; 35 };
36}; 36};
37 37
38/* LDO4 is VPP1 - ball AD9 */
39&ldo4_reg {
40 regulator-min-microvolt = <2000000>;
41 regulator-max-microvolt = <2000000>;
42};
43
44/*
45 * LDO7 is used for HDMI: VDDA_DSIPORTA - ball AA33, VDDA_DSIPORTC - ball AE33,
46 * VDDA_HDMI - ball AN25
47 */
48&ldo7_reg {
49 status = "okay";
50 regulator-min-microvolt = <1800000>;
51 regulator-max-microvolt = <1800000>;
52};
53
38&omap5_pmx_core { 54&omap5_pmx_core {
39 i2c4_pins: pinmux_i2c4_pins { 55 i2c4_pins: pinmux_i2c4_pins {
40 pinctrl-single,pins = < 56 pinctrl-single,pins = <
@@ -52,3 +68,13 @@
52 <&gpio7 3 0>; /* 195, SDA */ 68 <&gpio7 3 0>; /* 195, SDA */
53}; 69};
54 70
71&twl6040 {
72 ti,audpwron-gpio = <&gpio5 16 GPIO_ACTIVE_HIGH>; /* gpio line 144 */
73};
74
75&twl6040_pins {
76 pinctrl-single,pins = <
77 OMAP5_IOPAD(0x1c4, PIN_OUTPUT | MUX_MODE6) /* mcspi1_somi.gpio5_144 */
78 OMAP5_IOPAD(0x1ca, PIN_OUTPUT | MUX_MODE6) /* perslimbus2_clock.gpio5_145 */
79 >;
80};
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
index 60b3fbb3bf07..a51e60518eb6 100644
--- a/arch/arm/boot/dts/omap5-uevm.dts
+++ b/arch/arm/boot/dts/omap5-uevm.dts
@@ -51,3 +51,13 @@
51 <&gpio9 1 GPIO_ACTIVE_HIGH>, /* TCA6424A P00, LS OE */ 51 <&gpio9 1 GPIO_ACTIVE_HIGH>, /* TCA6424A P00, LS OE */
52 <&gpio7 1 GPIO_ACTIVE_HIGH>; /* GPIO 193, HPD */ 52 <&gpio7 1 GPIO_ACTIVE_HIGH>; /* GPIO 193, HPD */
53}; 53};
54
55&twl6040 {
56 ti,audpwron-gpio = <&gpio5 13 GPIO_ACTIVE_HIGH>; /* gpio line 141 */
57};
58
59&twl6040_pins {
60 pinctrl-single,pins = <
61 OMAP5_IOPAD(0x1be, PIN_OUTPUT | MUX_MODE6) /* mcspi1_somi.gpio5_141 */
62 >;
63};
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
index a3601e4c0a2e..b844473601d2 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
@@ -136,6 +136,7 @@
136&gmac1 { 136&gmac1 {
137 status = "okay"; 137 status = "okay";
138 phy-mode = "rgmii"; 138 phy-mode = "rgmii";
139 phy-handle = <&phy1>;
139 140
140 snps,reset-gpio = <&porta 0 GPIO_ACTIVE_LOW>; 141 snps,reset-gpio = <&porta 0 GPIO_ACTIVE_LOW>;
141 snps,reset-active-low; 142 snps,reset-active-low;
diff --git a/arch/arm/boot/dts/stih407-family.dtsi b/arch/arm/boot/dts/stih407-family.dtsi
index ad8ba10764a3..d294e82447a2 100644
--- a/arch/arm/boot/dts/stih407-family.dtsi
+++ b/arch/arm/boot/dts/stih407-family.dtsi
@@ -24,18 +24,21 @@
24 compatible = "shared-dma-pool"; 24 compatible = "shared-dma-pool";
25 reg = <0x40000000 0x01000000>; 25 reg = <0x40000000 0x01000000>;
26 no-map; 26 no-map;
27 status = "disabled";
27 }; 28 };
28 29
29 gp1_reserved: rproc@41000000 { 30 gp1_reserved: rproc@41000000 {
30 compatible = "shared-dma-pool"; 31 compatible = "shared-dma-pool";
31 reg = <0x41000000 0x01000000>; 32 reg = <0x41000000 0x01000000>;
32 no-map; 33 no-map;
34 status = "disabled";
33 }; 35 };
34 36
35 audio_reserved: rproc@42000000 { 37 audio_reserved: rproc@42000000 {
36 compatible = "shared-dma-pool"; 38 compatible = "shared-dma-pool";
37 reg = <0x42000000 0x01000000>; 39 reg = <0x42000000 0x01000000>;
38 no-map; 40 no-map;
41 status = "disabled";
39 }; 42 };
40 43
41 dmu_reserved: rproc@43000000 { 44 dmu_reserved: rproc@43000000 {
diff --git a/arch/arm/boot/dts/sun6i-a31s-primo81.dts b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
index 68b479b8772c..73c133f5e79c 100644
--- a/arch/arm/boot/dts/sun6i-a31s-primo81.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
@@ -176,8 +176,6 @@
176}; 176};
177 177
178&reg_dc1sw { 178&reg_dc1sw {
179 regulator-min-microvolt = <3000000>;
180 regulator-max-microvolt = <3000000>;
181 regulator-name = "vcc-lcd"; 179 regulator-name = "vcc-lcd";
182}; 180};
183 181
diff --git a/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts b/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
index 360adfb1e9ca..d6ad6196a768 100644
--- a/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
@@ -135,8 +135,6 @@
135 135
136&reg_dc1sw { 136&reg_dc1sw {
137 regulator-name = "vcc-lcd-usb2"; 137 regulator-name = "vcc-lcd-usb2";
138 regulator-min-microvolt = <3000000>;
139 regulator-max-microvolt = <3000000>;
140}; 138};
141 139
142&reg_dc5ldo { 140&reg_dc5ldo {
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 10f49ab5328e..47195e8690b4 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -82,6 +82,7 @@ CONFIG_TOUCHSCREEN_MMS114=y
82CONFIG_INPUT_MISC=y 82CONFIG_INPUT_MISC=y
83CONFIG_INPUT_MAX77693_HAPTIC=y 83CONFIG_INPUT_MAX77693_HAPTIC=y
84CONFIG_INPUT_MAX8997_HAPTIC=y 84CONFIG_INPUT_MAX8997_HAPTIC=y
85CONFIG_KEYBOARD_SAMSUNG=y
85CONFIG_SERIAL_8250=y 86CONFIG_SERIAL_8250=y
86CONFIG_SERIAL_SAMSUNG=y 87CONFIG_SERIAL_SAMSUNG=y
87CONFIG_SERIAL_SAMSUNG_CONSOLE=y 88CONFIG_SERIAL_SAMSUNG_CONSOLE=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 8f857564657f..8a5fff1b7f6f 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -264,6 +264,7 @@ CONFIG_KEYBOARD_TEGRA=y
264CONFIG_KEYBOARD_SPEAR=y 264CONFIG_KEYBOARD_SPEAR=y
265CONFIG_KEYBOARD_ST_KEYSCAN=y 265CONFIG_KEYBOARD_ST_KEYSCAN=y
266CONFIG_KEYBOARD_CROS_EC=m 266CONFIG_KEYBOARD_CROS_EC=m
267CONFIG_KEYBOARD_SAMSUNG=m
267CONFIG_MOUSE_PS2_ELANTECH=y 268CONFIG_MOUSE_PS2_ELANTECH=y
268CONFIG_MOUSE_CYAPA=m 269CONFIG_MOUSE_CYAPA=m
269CONFIG_MOUSE_ELAN_I2C=y 270CONFIG_MOUSE_ELAN_I2C=y
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 19cfab526d13..20febb368844 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -29,7 +29,7 @@
29 29
30static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 30static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
31{ 31{
32 return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); 32 return (pmd_t *)get_zeroed_page(GFP_KERNEL);
33} 33}
34 34
35static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 35static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index aeddd28b3595..92fd2c8a9af0 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -193,6 +193,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
193 193
194#define pmd_large(pmd) (pmd_val(pmd) & 2) 194#define pmd_large(pmd) (pmd_val(pmd) & 2)
195#define pmd_bad(pmd) (pmd_val(pmd) & 2) 195#define pmd_bad(pmd) (pmd_val(pmd) & 2)
196#define pmd_present(pmd) (pmd_val(pmd))
196 197
197#define copy_pmd(pmdpd,pmdps) \ 198#define copy_pmd(pmdpd,pmdps) \
198 do { \ 199 do { \
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index fa70db7c714b..2a029bceaf2f 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -211,6 +211,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
211 : !!(pmd_val(pmd) & (val))) 211 : !!(pmd_val(pmd) & (val)))
212#define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val))) 212#define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
213 213
214#define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID))
214#define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF)) 215#define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
215#define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL)) 216#define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL))
216static inline pte_t pte_mkspecial(pte_t pte) 217static inline pte_t pte_mkspecial(pte_t pte)
@@ -249,10 +250,10 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
249#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) 250#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
250#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) 251#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
251 252
252/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */ 253/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
253static inline pmd_t pmd_mknotpresent(pmd_t pmd) 254static inline pmd_t pmd_mknotpresent(pmd_t pmd)
254{ 255{
255 return __pmd(0); 256 return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
256} 257}
257 258
258static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 259static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 348caabb7625..d62204060cbe 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -182,7 +182,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
182#define pgd_offset_k(addr) pgd_offset(&init_mm, addr) 182#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
183 183
184#define pmd_none(pmd) (!pmd_val(pmd)) 184#define pmd_none(pmd) (!pmd_val(pmd))
185#define pmd_present(pmd) (pmd_val(pmd))
186 185
187static inline pte_t *pmd_page_vaddr(pmd_t pmd) 186static inline pte_t *pmd_page_vaddr(pmd_t pmd)
188{ 187{
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index df90bc59bfce..861521606c6d 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -486,7 +486,7 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
486 486
487static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) 487static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
488{ 488{
489 trace_ipi_raise(target, ipi_types[ipinr]); 489 trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
490 __smp_cross_call(target, ipinr); 490 __smp_cross_call(target, ipinr);
491} 491}
492 492
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 893941ec98dc..f1bde7c4e736 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -263,6 +263,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
263 kvm_timer_vcpu_terminate(vcpu); 263 kvm_timer_vcpu_terminate(vcpu);
264 kvm_vgic_vcpu_destroy(vcpu); 264 kvm_vgic_vcpu_destroy(vcpu);
265 kvm_pmu_vcpu_destroy(vcpu); 265 kvm_pmu_vcpu_destroy(vcpu);
266 kvm_vcpu_uninit(vcpu);
266 kmem_cache_free(kvm_vcpu_cache, vcpu); 267 kmem_cache_free(kvm_vcpu_cache, vcpu);
267} 268}
268 269
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index e65aa7d11b20..20dcf6e904b2 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -61,7 +61,6 @@ config ARCH_EXYNOS4
61 select CLKSRC_SAMSUNG_PWM if CPU_EXYNOS4210 61 select CLKSRC_SAMSUNG_PWM if CPU_EXYNOS4210
62 select CPU_EXYNOS4210 62 select CPU_EXYNOS4210
63 select GIC_NON_BANKED 63 select GIC_NON_BANKED
64 select KEYBOARD_SAMSUNG if INPUT_KEYBOARD
65 select MIGHT_HAVE_CACHE_L2X0 64 select MIGHT_HAVE_CACHE_L2X0
66 help 65 help
67 Samsung EXYNOS4 (Cortex-A9) SoC based systems 66 Samsung EXYNOS4 (Cortex-A9) SoC based systems
diff --git a/arch/arm/mach-imx/mach-imx6ul.c b/arch/arm/mach-imx/mach-imx6ul.c
index a38b16b69923..b56de4b8cdf2 100644
--- a/arch/arm/mach-imx/mach-imx6ul.c
+++ b/arch/arm/mach-imx/mach-imx6ul.c
@@ -46,7 +46,7 @@ static int ksz8081_phy_fixup(struct phy_device *dev)
46static void __init imx6ul_enet_phy_init(void) 46static void __init imx6ul_enet_phy_init(void)
47{ 47{
48 if (IS_BUILTIN(CONFIG_PHYLIB)) 48 if (IS_BUILTIN(CONFIG_PHYLIB))
49 phy_register_fixup_for_uid(PHY_ID_KSZ8081, 0xffffffff, 49 phy_register_fixup_for_uid(PHY_ID_KSZ8081, MICREL_PHY_ID_MASK,
50 ksz8081_phy_fixup); 50 ksz8081_phy_fixup);
51} 51}
52 52
diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
index 5d7fb596bf4a..bf608441b357 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S
+++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
@@ -43,8 +43,8 @@
43#define OTHERS_MASK (MODEM_IRQ_MASK | HOOK_SWITCH_MASK) 43#define OTHERS_MASK (MODEM_IRQ_MASK | HOOK_SWITCH_MASK)
44 44
45/* IRQ handler register bitmasks */ 45/* IRQ handler register bitmasks */
46#define DEFERRED_FIQ_MASK (0x1 << (INT_DEFERRED_FIQ % IH2_BASE)) 46#define DEFERRED_FIQ_MASK OMAP_IRQ_BIT(INT_DEFERRED_FIQ)
47#define GPIO_BANK1_MASK (0x1 << INT_GPIO_BANK1) 47#define GPIO_BANK1_MASK OMAP_IRQ_BIT(INT_GPIO_BANK1)
48 48
49/* Driver buffer byte offsets */ 49/* Driver buffer byte offsets */
50#define BUF_MASK (FIQ_MASK * 4) 50#define BUF_MASK (FIQ_MASK * 4)
@@ -110,7 +110,7 @@ ENTRY(qwerty_fiqin_start)
110 mov r8, #2 @ reset FIQ agreement 110 mov r8, #2 @ reset FIQ agreement
111 str r8, [r12, #IRQ_CONTROL_REG_OFFSET] 111 str r8, [r12, #IRQ_CONTROL_REG_OFFSET]
112 112
113 cmp r10, #INT_GPIO_BANK1 @ is it GPIO bank interrupt? 113 cmp r10, #(INT_GPIO_BANK1 - NR_IRQS_LEGACY) @ is it GPIO interrupt?
114 beq gpio @ yes - process it 114 beq gpio @ yes - process it
115 115
116 mov r8, #1 116 mov r8, #1
diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c
index d1f12095f315..ec760ae2f917 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq.c
+++ b/arch/arm/mach-omap1/ams-delta-fiq.c
@@ -109,7 +109,8 @@ void __init ams_delta_init_fiq(void)
109 * Since no set_type() method is provided by OMAP irq chip, 109 * Since no set_type() method is provided by OMAP irq chip,
110 * switch to edge triggered interrupt type manually. 110 * switch to edge triggered interrupt type manually.
111 */ 111 */
112 offset = IRQ_ILR0_REG_OFFSET + INT_DEFERRED_FIQ * 0x4; 112 offset = IRQ_ILR0_REG_OFFSET +
113 ((INT_DEFERRED_FIQ - NR_IRQS_LEGACY) & 0x1f) * 0x4;
113 val = omap_readl(DEFERRED_FIQ_IH_BASE + offset) & ~(1 << 1); 114 val = omap_readl(DEFERRED_FIQ_IH_BASE + offset) & ~(1 << 1);
114 omap_writel(val, DEFERRED_FIQ_IH_BASE + offset); 115 omap_writel(val, DEFERRED_FIQ_IH_BASE + offset);
115 116
@@ -149,7 +150,7 @@ void __init ams_delta_init_fiq(void)
149 /* 150 /*
150 * Redirect GPIO interrupts to FIQ 151 * Redirect GPIO interrupts to FIQ
151 */ 152 */
152 offset = IRQ_ILR0_REG_OFFSET + INT_GPIO_BANK1 * 0x4; 153 offset = IRQ_ILR0_REG_OFFSET + (INT_GPIO_BANK1 - NR_IRQS_LEGACY) * 0x4;
153 val = omap_readl(OMAP_IH1_BASE + offset) | 1; 154 val = omap_readl(OMAP_IH1_BASE + offset) | 1;
154 omap_writel(val, OMAP_IH1_BASE + offset); 155 omap_writel(val, OMAP_IH1_BASE + offset);
155} 156}
diff --git a/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h b/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h
index adb5e7649659..6dfc3e1210a3 100644
--- a/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h
+++ b/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h
@@ -14,6 +14,8 @@
14#ifndef __AMS_DELTA_FIQ_H 14#ifndef __AMS_DELTA_FIQ_H
15#define __AMS_DELTA_FIQ_H 15#define __AMS_DELTA_FIQ_H
16 16
17#include <mach/irqs.h>
18
17/* 19/*
18 * Interrupt number used for passing control from FIQ to IRQ. 20 * Interrupt number used for passing control from FIQ to IRQ.
19 * IRQ12, described as reserved, has been selected. 21 * IRQ12, described as reserved, has been selected.
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 0517f0c1581a..1a648e9dfaa0 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -17,6 +17,7 @@ config ARCH_OMAP3
17 select PM_OPP if PM 17 select PM_OPP if PM
18 select PM if CPU_IDLE 18 select PM if CPU_IDLE
19 select SOC_HAS_OMAP2_SDRC 19 select SOC_HAS_OMAP2_SDRC
20 select ARM_ERRATA_430973
20 21
21config ARCH_OMAP4 22config ARCH_OMAP4
22 bool "TI OMAP4" 23 bool "TI OMAP4"
@@ -36,6 +37,7 @@ config ARCH_OMAP4
36 select PM if CPU_IDLE 37 select PM if CPU_IDLE
37 select ARM_ERRATA_754322 38 select ARM_ERRATA_754322
38 select ARM_ERRATA_775420 39 select ARM_ERRATA_775420
40 select OMAP_INTERCONNECT
39 41
40config SOC_OMAP5 42config SOC_OMAP5
41 bool "TI OMAP5" 43 bool "TI OMAP5"
@@ -67,6 +69,8 @@ config SOC_AM43XX
67 select HAVE_ARM_SCU 69 select HAVE_ARM_SCU
68 select GENERIC_CLOCKEVENTS_BROADCAST 70 select GENERIC_CLOCKEVENTS_BROADCAST
69 select HAVE_ARM_TWD 71 select HAVE_ARM_TWD
72 select ARM_ERRATA_754322
73 select ARM_ERRATA_775420
70 74
71config SOC_DRA7XX 75config SOC_DRA7XX
72 bool "TI DRA7XX" 76 bool "TI DRA7XX"
@@ -240,4 +244,12 @@ endmenu
240 244
241endif 245endif
242 246
247config OMAP5_ERRATA_801819
248 bool "Errata 801819: An eviction from L1 data cache might stall indefinitely"
249 depends on SOC_OMAP5 || SOC_DRA7XX
250 help
251 A livelock can occur in the L2 cache arbitration that might prevent
252 a snoop from completing. Under certain conditions this can cause the
253 system to deadlock.
254
243endmenu 255endmenu
diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h
index af2851fbcdf0..bae263fba640 100644
--- a/arch/arm/mach-omap2/omap-secure.h
+++ b/arch/arm/mach-omap2/omap-secure.h
@@ -46,6 +46,7 @@
46 46
47#define OMAP5_DRA7_MON_SET_CNTFRQ_INDEX 0x109 47#define OMAP5_DRA7_MON_SET_CNTFRQ_INDEX 0x109
48#define OMAP5_MON_AMBA_IF_INDEX 0x108 48#define OMAP5_MON_AMBA_IF_INDEX 0x108
49#define OMAP5_DRA7_MON_SET_ACR_INDEX 0x107
49 50
50/* Secure PPA(Primary Protected Application) APIs */ 51/* Secure PPA(Primary Protected Application) APIs */
51#define OMAP4_PPA_L2_POR_INDEX 0x23 52#define OMAP4_PPA_L2_POR_INDEX 0x23
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index c625cc10d9f9..8cd1de914ee4 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -50,6 +50,39 @@ void __iomem *omap4_get_scu_base(void)
50 return scu_base; 50 return scu_base;
51} 51}
52 52
53#ifdef CONFIG_OMAP5_ERRATA_801819
54void omap5_erratum_workaround_801819(void)
55{
56 u32 acr, revidr;
57 u32 acr_mask;
58
59 /* REVIDR[3] indicates erratum fix available on silicon */
60 asm volatile ("mrc p15, 0, %0, c0, c0, 6" : "=r" (revidr));
61 if (revidr & (0x1 << 3))
62 return;
63
64 asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr));
65 /*
66 * BIT(27) - Disables streaming. All write-allocate lines allocate in
67 * the L1 or L2 cache.
68 * BIT(25) - Disables streaming. All write-allocate lines allocate in
69 * the L1 cache.
70 */
71 acr_mask = (0x3 << 25) | (0x3 << 27);
72 /* do we already have it done.. if yes, skip expensive smc */
73 if ((acr & acr_mask) == acr_mask)
74 return;
75
76 acr |= acr_mask;
77 omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr);
78
79 pr_debug("%s: ARM erratum workaround 801819 applied on CPU%d\n",
80 __func__, smp_processor_id());
81}
82#else
83static inline void omap5_erratum_workaround_801819(void) { }
84#endif
85
53static void omap4_secondary_init(unsigned int cpu) 86static void omap4_secondary_init(unsigned int cpu)
54{ 87{
55 /* 88 /*
@@ -64,12 +97,15 @@ static void omap4_secondary_init(unsigned int cpu)
64 omap_secure_dispatcher(OMAP4_PPA_CPU_ACTRL_SMP_INDEX, 97 omap_secure_dispatcher(OMAP4_PPA_CPU_ACTRL_SMP_INDEX,
65 4, 0, 0, 0, 0, 0); 98 4, 0, 0, 0, 0, 0);
66 99
67 /* 100 if (soc_is_omap54xx() || soc_is_dra7xx()) {
68 * Configure the CNTFRQ register for the secondary cpu's which 101 /*
69 * indicates the frequency of the cpu local timers. 102 * Configure the CNTFRQ register for the secondary cpu's which
70 */ 103 * indicates the frequency of the cpu local timers.
71 if (soc_is_omap54xx() || soc_is_dra7xx()) 104 */
72 set_cntfreq(); 105 set_cntfreq();
106 /* Configure ACR to disable streaming WA for 801819 */
107 omap5_erratum_workaround_801819();
108 }
73 109
74 /* 110 /*
75 * Synchronise with the boot thread. 111 * Synchronise with the boot thread.
@@ -218,6 +254,8 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
218 254
219 if (cpu_is_omap446x()) 255 if (cpu_is_omap446x())
220 startup_addr = omap4460_secondary_startup; 256 startup_addr = omap4460_secondary_startup;
257 if (soc_is_dra74x() || soc_is_omap54xx())
258 omap5_erratum_workaround_801819();
221 259
222 /* 260 /*
223 * Write the address of secondary startup routine into the 261 * Write the address of secondary startup routine into the
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index 78af6d8cf2e2..daf2753de7aa 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -186,8 +186,9 @@ static int _pwrdm_state_switch(struct powerdomain *pwrdm, int flag)
186 trace_state = (PWRDM_TRACE_STATES_FLAG | 186 trace_state = (PWRDM_TRACE_STATES_FLAG |
187 ((next & OMAP_POWERSTATE_MASK) << 8) | 187 ((next & OMAP_POWERSTATE_MASK) << 8) |
188 ((prev & OMAP_POWERSTATE_MASK) << 0)); 188 ((prev & OMAP_POWERSTATE_MASK) << 0));
189 trace_power_domain_target(pwrdm->name, trace_state, 189 trace_power_domain_target_rcuidle(pwrdm->name,
190 smp_processor_id()); 190 trace_state,
191 smp_processor_id());
191 } 192 }
192 break; 193 break;
193 default: 194 default:
@@ -523,8 +524,8 @@ int pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst)
523 524
524 if (arch_pwrdm && arch_pwrdm->pwrdm_set_next_pwrst) { 525 if (arch_pwrdm && arch_pwrdm->pwrdm_set_next_pwrst) {
525 /* Trace the pwrdm desired target state */ 526 /* Trace the pwrdm desired target state */
526 trace_power_domain_target(pwrdm->name, pwrst, 527 trace_power_domain_target_rcuidle(pwrdm->name, pwrst,
527 smp_processor_id()); 528 smp_processor_id());
528 /* Program the pwrdm desired target state */ 529 /* Program the pwrdm desired target state */
529 ret = arch_pwrdm->pwrdm_set_next_pwrst(pwrdm, pwrst); 530 ret = arch_pwrdm->pwrdm_set_next_pwrst(pwrdm, pwrst);
530 } 531 }
diff --git a/arch/arm/mach-omap2/powerdomains7xx_data.c b/arch/arm/mach-omap2/powerdomains7xx_data.c
index 0ec2d00f4237..eb350a673133 100644
--- a/arch/arm/mach-omap2/powerdomains7xx_data.c
+++ b/arch/arm/mach-omap2/powerdomains7xx_data.c
@@ -36,14 +36,7 @@ static struct powerdomain iva_7xx_pwrdm = {
36 .prcm_offs = DRA7XX_PRM_IVA_INST, 36 .prcm_offs = DRA7XX_PRM_IVA_INST,
37 .prcm_partition = DRA7XX_PRM_PARTITION, 37 .prcm_partition = DRA7XX_PRM_PARTITION,
38 .pwrsts = PWRSTS_OFF_ON, 38 .pwrsts = PWRSTS_OFF_ON,
39 .pwrsts_logic_ret = PWRSTS_OFF,
40 .banks = 4, 39 .banks = 4,
41 .pwrsts_mem_ret = {
42 [0] = PWRSTS_OFF_RET, /* hwa_mem */
43 [1] = PWRSTS_OFF_RET, /* sl2_mem */
44 [2] = PWRSTS_OFF_RET, /* tcm1_mem */
45 [3] = PWRSTS_OFF_RET, /* tcm2_mem */
46 },
47 .pwrsts_mem_on = { 40 .pwrsts_mem_on = {
48 [0] = PWRSTS_ON, /* hwa_mem */ 41 [0] = PWRSTS_ON, /* hwa_mem */
49 [1] = PWRSTS_ON, /* sl2_mem */ 42 [1] = PWRSTS_ON, /* sl2_mem */
@@ -76,12 +69,7 @@ static struct powerdomain ipu_7xx_pwrdm = {
76 .prcm_offs = DRA7XX_PRM_IPU_INST, 69 .prcm_offs = DRA7XX_PRM_IPU_INST,
77 .prcm_partition = DRA7XX_PRM_PARTITION, 70 .prcm_partition = DRA7XX_PRM_PARTITION,
78 .pwrsts = PWRSTS_OFF_ON, 71 .pwrsts = PWRSTS_OFF_ON,
79 .pwrsts_logic_ret = PWRSTS_OFF,
80 .banks = 2, 72 .banks = 2,
81 .pwrsts_mem_ret = {
82 [0] = PWRSTS_OFF_RET, /* aessmem */
83 [1] = PWRSTS_OFF_RET, /* periphmem */
84 },
85 .pwrsts_mem_on = { 73 .pwrsts_mem_on = {
86 [0] = PWRSTS_ON, /* aessmem */ 74 [0] = PWRSTS_ON, /* aessmem */
87 [1] = PWRSTS_ON, /* periphmem */ 75 [1] = PWRSTS_ON, /* periphmem */
@@ -95,11 +83,7 @@ static struct powerdomain dss_7xx_pwrdm = {
95 .prcm_offs = DRA7XX_PRM_DSS_INST, 83 .prcm_offs = DRA7XX_PRM_DSS_INST,
96 .prcm_partition = DRA7XX_PRM_PARTITION, 84 .prcm_partition = DRA7XX_PRM_PARTITION,
97 .pwrsts = PWRSTS_OFF_ON, 85 .pwrsts = PWRSTS_OFF_ON,
98 .pwrsts_logic_ret = PWRSTS_OFF,
99 .banks = 1, 86 .banks = 1,
100 .pwrsts_mem_ret = {
101 [0] = PWRSTS_OFF_RET, /* dss_mem */
102 },
103 .pwrsts_mem_on = { 87 .pwrsts_mem_on = {
104 [0] = PWRSTS_ON, /* dss_mem */ 88 [0] = PWRSTS_ON, /* dss_mem */
105 }, 89 },
@@ -111,13 +95,8 @@ static struct powerdomain l4per_7xx_pwrdm = {
111 .name = "l4per_pwrdm", 95 .name = "l4per_pwrdm",
112 .prcm_offs = DRA7XX_PRM_L4PER_INST, 96 .prcm_offs = DRA7XX_PRM_L4PER_INST,
113 .prcm_partition = DRA7XX_PRM_PARTITION, 97 .prcm_partition = DRA7XX_PRM_PARTITION,
114 .pwrsts = PWRSTS_RET_ON, 98 .pwrsts = PWRSTS_ON,
115 .pwrsts_logic_ret = PWRSTS_RET,
116 .banks = 2, 99 .banks = 2,
117 .pwrsts_mem_ret = {
118 [0] = PWRSTS_OFF_RET, /* nonretained_bank */
119 [1] = PWRSTS_OFF_RET, /* retained_bank */
120 },
121 .pwrsts_mem_on = { 100 .pwrsts_mem_on = {
122 [0] = PWRSTS_ON, /* nonretained_bank */ 101 [0] = PWRSTS_ON, /* nonretained_bank */
123 [1] = PWRSTS_ON, /* retained_bank */ 102 [1] = PWRSTS_ON, /* retained_bank */
@@ -132,9 +111,6 @@ static struct powerdomain gpu_7xx_pwrdm = {
132 .prcm_partition = DRA7XX_PRM_PARTITION, 111 .prcm_partition = DRA7XX_PRM_PARTITION,
133 .pwrsts = PWRSTS_OFF_ON, 112 .pwrsts = PWRSTS_OFF_ON,
134 .banks = 1, 113 .banks = 1,
135 .pwrsts_mem_ret = {
136 [0] = PWRSTS_OFF_RET, /* gpu_mem */
137 },
138 .pwrsts_mem_on = { 114 .pwrsts_mem_on = {
139 [0] = PWRSTS_ON, /* gpu_mem */ 115 [0] = PWRSTS_ON, /* gpu_mem */
140 }, 116 },
@@ -148,8 +124,6 @@ static struct powerdomain wkupaon_7xx_pwrdm = {
148 .prcm_partition = DRA7XX_PRM_PARTITION, 124 .prcm_partition = DRA7XX_PRM_PARTITION,
149 .pwrsts = PWRSTS_ON, 125 .pwrsts = PWRSTS_ON,
150 .banks = 1, 126 .banks = 1,
151 .pwrsts_mem_ret = {
152 },
153 .pwrsts_mem_on = { 127 .pwrsts_mem_on = {
154 [0] = PWRSTS_ON, /* wkup_bank */ 128 [0] = PWRSTS_ON, /* wkup_bank */
155 }, 129 },
@@ -161,15 +135,7 @@ static struct powerdomain core_7xx_pwrdm = {
161 .prcm_offs = DRA7XX_PRM_CORE_INST, 135 .prcm_offs = DRA7XX_PRM_CORE_INST,
162 .prcm_partition = DRA7XX_PRM_PARTITION, 136 .prcm_partition = DRA7XX_PRM_PARTITION,
163 .pwrsts = PWRSTS_ON, 137 .pwrsts = PWRSTS_ON,
164 .pwrsts_logic_ret = PWRSTS_RET,
165 .banks = 5, 138 .banks = 5,
166 .pwrsts_mem_ret = {
167 [0] = PWRSTS_OFF_RET, /* core_nret_bank */
168 [1] = PWRSTS_OFF_RET, /* core_ocmram */
169 [2] = PWRSTS_OFF_RET, /* core_other_bank */
170 [3] = PWRSTS_OFF_RET, /* ipu_l2ram */
171 [4] = PWRSTS_OFF_RET, /* ipu_unicache */
172 },
173 .pwrsts_mem_on = { 139 .pwrsts_mem_on = {
174 [0] = PWRSTS_ON, /* core_nret_bank */ 140 [0] = PWRSTS_ON, /* core_nret_bank */
175 [1] = PWRSTS_ON, /* core_ocmram */ 141 [1] = PWRSTS_ON, /* core_ocmram */
@@ -226,11 +192,7 @@ static struct powerdomain vpe_7xx_pwrdm = {
226 .prcm_offs = DRA7XX_PRM_VPE_INST, 192 .prcm_offs = DRA7XX_PRM_VPE_INST,
227 .prcm_partition = DRA7XX_PRM_PARTITION, 193 .prcm_partition = DRA7XX_PRM_PARTITION,
228 .pwrsts = PWRSTS_OFF_ON, 194 .pwrsts = PWRSTS_OFF_ON,
229 .pwrsts_logic_ret = PWRSTS_OFF,
230 .banks = 1, 195 .banks = 1,
231 .pwrsts_mem_ret = {
232 [0] = PWRSTS_OFF_RET, /* vpe_bank */
233 },
234 .pwrsts_mem_on = { 196 .pwrsts_mem_on = {
235 [0] = PWRSTS_ON, /* vpe_bank */ 197 [0] = PWRSTS_ON, /* vpe_bank */
236 }, 198 },
@@ -260,14 +222,8 @@ static struct powerdomain l3init_7xx_pwrdm = {
260 .name = "l3init_pwrdm", 222 .name = "l3init_pwrdm",
261 .prcm_offs = DRA7XX_PRM_L3INIT_INST, 223 .prcm_offs = DRA7XX_PRM_L3INIT_INST,
262 .prcm_partition = DRA7XX_PRM_PARTITION, 224 .prcm_partition = DRA7XX_PRM_PARTITION,
263 .pwrsts = PWRSTS_RET_ON, 225 .pwrsts = PWRSTS_ON,
264 .pwrsts_logic_ret = PWRSTS_RET,
265 .banks = 3, 226 .banks = 3,
266 .pwrsts_mem_ret = {
267 [0] = PWRSTS_OFF_RET, /* gmac_bank */
268 [1] = PWRSTS_OFF_RET, /* l3init_bank1 */
269 [2] = PWRSTS_OFF_RET, /* l3init_bank2 */
270 },
271 .pwrsts_mem_on = { 227 .pwrsts_mem_on = {
272 [0] = PWRSTS_ON, /* gmac_bank */ 228 [0] = PWRSTS_ON, /* gmac_bank */
273 [1] = PWRSTS_ON, /* l3init_bank1 */ 229 [1] = PWRSTS_ON, /* l3init_bank1 */
@@ -283,9 +239,6 @@ static struct powerdomain eve3_7xx_pwrdm = {
283 .prcm_partition = DRA7XX_PRM_PARTITION, 239 .prcm_partition = DRA7XX_PRM_PARTITION,
284 .pwrsts = PWRSTS_OFF_ON, 240 .pwrsts = PWRSTS_OFF_ON,
285 .banks = 1, 241 .banks = 1,
286 .pwrsts_mem_ret = {
287 [0] = PWRSTS_OFF_RET, /* eve3_bank */
288 },
289 .pwrsts_mem_on = { 242 .pwrsts_mem_on = {
290 [0] = PWRSTS_ON, /* eve3_bank */ 243 [0] = PWRSTS_ON, /* eve3_bank */
291 }, 244 },
@@ -299,9 +252,6 @@ static struct powerdomain emu_7xx_pwrdm = {
299 .prcm_partition = DRA7XX_PRM_PARTITION, 252 .prcm_partition = DRA7XX_PRM_PARTITION,
300 .pwrsts = PWRSTS_OFF_ON, 253 .pwrsts = PWRSTS_OFF_ON,
301 .banks = 1, 254 .banks = 1,
302 .pwrsts_mem_ret = {
303 [0] = PWRSTS_OFF_RET, /* emu_bank */
304 },
305 .pwrsts_mem_on = { 255 .pwrsts_mem_on = {
306 [0] = PWRSTS_ON, /* emu_bank */ 256 [0] = PWRSTS_ON, /* emu_bank */
307 }, 257 },
@@ -314,11 +264,6 @@ static struct powerdomain dsp2_7xx_pwrdm = {
314 .prcm_partition = DRA7XX_PRM_PARTITION, 264 .prcm_partition = DRA7XX_PRM_PARTITION,
315 .pwrsts = PWRSTS_OFF_ON, 265 .pwrsts = PWRSTS_OFF_ON,
316 .banks = 3, 266 .banks = 3,
317 .pwrsts_mem_ret = {
318 [0] = PWRSTS_OFF_RET, /* dsp2_edma */
319 [1] = PWRSTS_OFF_RET, /* dsp2_l1 */
320 [2] = PWRSTS_OFF_RET, /* dsp2_l2 */
321 },
322 .pwrsts_mem_on = { 267 .pwrsts_mem_on = {
323 [0] = PWRSTS_ON, /* dsp2_edma */ 268 [0] = PWRSTS_ON, /* dsp2_edma */
324 [1] = PWRSTS_ON, /* dsp2_l1 */ 269 [1] = PWRSTS_ON, /* dsp2_l1 */
@@ -334,11 +279,6 @@ static struct powerdomain dsp1_7xx_pwrdm = {
334 .prcm_partition = DRA7XX_PRM_PARTITION, 279 .prcm_partition = DRA7XX_PRM_PARTITION,
335 .pwrsts = PWRSTS_OFF_ON, 280 .pwrsts = PWRSTS_OFF_ON,
336 .banks = 3, 281 .banks = 3,
337 .pwrsts_mem_ret = {
338 [0] = PWRSTS_OFF_RET, /* dsp1_edma */
339 [1] = PWRSTS_OFF_RET, /* dsp1_l1 */
340 [2] = PWRSTS_OFF_RET, /* dsp1_l2 */
341 },
342 .pwrsts_mem_on = { 282 .pwrsts_mem_on = {
343 [0] = PWRSTS_ON, /* dsp1_edma */ 283 [0] = PWRSTS_ON, /* dsp1_edma */
344 [1] = PWRSTS_ON, /* dsp1_l1 */ 284 [1] = PWRSTS_ON, /* dsp1_l1 */
@@ -354,9 +294,6 @@ static struct powerdomain cam_7xx_pwrdm = {
354 .prcm_partition = DRA7XX_PRM_PARTITION, 294 .prcm_partition = DRA7XX_PRM_PARTITION,
355 .pwrsts = PWRSTS_OFF_ON, 295 .pwrsts = PWRSTS_OFF_ON,
356 .banks = 1, 296 .banks = 1,
357 .pwrsts_mem_ret = {
358 [0] = PWRSTS_OFF_RET, /* vip_bank */
359 },
360 .pwrsts_mem_on = { 297 .pwrsts_mem_on = {
361 [0] = PWRSTS_ON, /* vip_bank */ 298 [0] = PWRSTS_ON, /* vip_bank */
362 }, 299 },
@@ -370,9 +307,6 @@ static struct powerdomain eve4_7xx_pwrdm = {
370 .prcm_partition = DRA7XX_PRM_PARTITION, 307 .prcm_partition = DRA7XX_PRM_PARTITION,
371 .pwrsts = PWRSTS_OFF_ON, 308 .pwrsts = PWRSTS_OFF_ON,
372 .banks = 1, 309 .banks = 1,
373 .pwrsts_mem_ret = {
374 [0] = PWRSTS_OFF_RET, /* eve4_bank */
375 },
376 .pwrsts_mem_on = { 310 .pwrsts_mem_on = {
377 [0] = PWRSTS_ON, /* eve4_bank */ 311 [0] = PWRSTS_ON, /* eve4_bank */
378 }, 312 },
@@ -386,9 +320,6 @@ static struct powerdomain eve2_7xx_pwrdm = {
386 .prcm_partition = DRA7XX_PRM_PARTITION, 320 .prcm_partition = DRA7XX_PRM_PARTITION,
387 .pwrsts = PWRSTS_OFF_ON, 321 .pwrsts = PWRSTS_OFF_ON,
388 .banks = 1, 322 .banks = 1,
389 .pwrsts_mem_ret = {
390 [0] = PWRSTS_OFF_RET, /* eve2_bank */
391 },
392 .pwrsts_mem_on = { 323 .pwrsts_mem_on = {
393 [0] = PWRSTS_ON, /* eve2_bank */ 324 [0] = PWRSTS_ON, /* eve2_bank */
394 }, 325 },
@@ -402,9 +333,6 @@ static struct powerdomain eve1_7xx_pwrdm = {
402 .prcm_partition = DRA7XX_PRM_PARTITION, 333 .prcm_partition = DRA7XX_PRM_PARTITION,
403 .pwrsts = PWRSTS_OFF_ON, 334 .pwrsts = PWRSTS_OFF_ON,
404 .banks = 1, 335 .banks = 1,
405 .pwrsts_mem_ret = {
406 [0] = PWRSTS_OFF_RET, /* eve1_bank */
407 },
408 .pwrsts_mem_on = { 336 .pwrsts_mem_on = {
409 [0] = PWRSTS_ON, /* eve1_bank */ 337 [0] = PWRSTS_ON, /* eve1_bank */
410 }, 338 },
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 5b385bb8aff9..cb9497a20fb3 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -496,8 +496,7 @@ void __init omap_init_time(void)
496 __omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon", 496 __omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon",
497 2, "timer_sys_ck", NULL, false); 497 2, "timer_sys_ck", NULL, false);
498 498
499 if (of_have_populated_dt()) 499 clocksource_probe();
500 clocksource_probe();
501} 500}
502 501
503#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM43XX) 502#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM43XX)
@@ -505,6 +504,8 @@ void __init omap3_secure_sync32k_timer_init(void)
505{ 504{
506 __omap_sync32k_timer_init(12, "secure_32k_fck", "ti,timer-secure", 505 __omap_sync32k_timer_init(12, "secure_32k_fck", "ti,timer-secure",
507 2, "timer_sys_ck", NULL, false); 506 2, "timer_sys_ck", NULL, false);
507
508 clocksource_probe();
508} 509}
509#endif /* CONFIG_ARCH_OMAP3 */ 510#endif /* CONFIG_ARCH_OMAP3 */
510 511
@@ -513,6 +514,8 @@ void __init omap3_gptimer_timer_init(void)
513{ 514{
514 __omap_sync32k_timer_init(2, "timer_sys_ck", NULL, 515 __omap_sync32k_timer_init(2, "timer_sys_ck", NULL,
515 1, "timer_sys_ck", "ti,timer-alwon", true); 516 1, "timer_sys_ck", "ti,timer-alwon", true);
517
518 clocksource_probe();
516} 519}
517#endif 520#endif
518 521
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index 84baa16f4c0b..e93aa6734147 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -68,7 +68,7 @@
68#include <linux/platform_data/asoc-s3c.h> 68#include <linux/platform_data/asoc-s3c.h>
69#include <linux/platform_data/spi-s3c64xx.h> 69#include <linux/platform_data/spi-s3c64xx.h>
70 70
71static u64 samsung_device_dma_mask = DMA_BIT_MASK(32); 71#define samsung_device_dma_mask (*((u64[]) { DMA_BIT_MASK(32) }))
72 72
73/* AC97 */ 73/* AC97 */
74#ifdef CONFIG_CPU_S3C2440 74#ifdef CONFIG_CPU_S3C2440
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 7085e322dc42..648a32c89541 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -95,7 +95,7 @@ boot := arch/arm64/boot
95Image: vmlinux 95Image: vmlinux
96 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 96 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
97 97
98Image.%: vmlinux 98Image.%: Image
99 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 99 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
100 100
101zinstall install: 101zinstall install:
diff --git a/arch/arm64/boot/dts/lg/lg1312.dtsi b/arch/arm64/boot/dts/lg/lg1312.dtsi
index 3a4e9a2ab313..fbafa24cd533 100644
--- a/arch/arm64/boot/dts/lg/lg1312.dtsi
+++ b/arch/arm64/boot/dts/lg/lg1312.dtsi
@@ -125,7 +125,7 @@
125 #size-cells = <1>; 125 #size-cells = <1>;
126 #interrupts-cells = <3>; 126 #interrupts-cells = <3>;
127 127
128 compatible = "arm,amba-bus"; 128 compatible = "simple-bus";
129 interrupt-parent = <&gic>; 129 interrupt-parent = <&gic>;
130 ranges; 130 ranges;
131 131
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index 46f325a143b0..d7f8e06910bc 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -163,7 +163,7 @@
163 }; 163 };
164 164
165 amba { 165 amba {
166 compatible = "arm,amba-bus"; 166 compatible = "simple-bus";
167 #address-cells = <2>; 167 #address-cells = <2>;
168 #size-cells = <2>; 168 #size-cells = <2>;
169 ranges; 169 ranges;
diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h
index f69f69c8120c..da84645525b9 100644
--- a/arch/arm64/include/asm/kgdb.h
+++ b/arch/arm64/include/asm/kgdb.h
@@ -38,25 +38,54 @@ extern int kgdb_fault_expected;
38#endif /* !__ASSEMBLY__ */ 38#endif /* !__ASSEMBLY__ */
39 39
40/* 40/*
41 * gdb is expecting the following registers layout. 41 * gdb remote procotol (well most versions of it) expects the following
42 * register layout.
42 * 43 *
43 * General purpose regs: 44 * General purpose regs:
44 * r0-r30: 64 bit 45 * r0-r30: 64 bit
45 * sp,pc : 64 bit 46 * sp,pc : 64 bit
46 * pstate : 64 bit 47 * pstate : 32 bit
47 * Total: 34 48 * Total: 33 + 1
48 * FPU regs: 49 * FPU regs:
49 * f0-f31: 128 bit 50 * f0-f31: 128 bit
50 * Total: 32
51 * Extra regs
52 * fpsr & fpcr: 32 bit 51 * fpsr & fpcr: 32 bit
53 * Total: 2 52 * Total: 32 + 2
54 * 53 *
54 * To expand a little on the "most versions of it"... when the gdb remote
55 * protocol for AArch64 was developed it depended on a statement in the
56 * Architecture Reference Manual that claimed "SPSR_ELx is a 32-bit register".
57 * and, as a result, allocated only 32-bits for the PSTATE in the remote
58 * protocol. In fact this statement is still present in ARM DDI 0487A.i.
59 *
60 * Unfortunately "is a 32-bit register" has a very special meaning for
61 * system registers. It means that "the upper bits, bits[63:32], are
62 * RES0.". RES0 is heavily used in the ARM architecture documents as a
63 * way to leave space for future architecture changes. So to translate a
64 * little for people who don't spend their spare time reading ARM architecture
65 * manuals, what "is a 32-bit register" actually means in this context is
66 * "is a 64-bit register but one with no meaning allocated to any of the
67 * upper 32-bits... *yet*".
68 *
69 * Perhaps then we should not be surprised that this has led to some
70 * confusion. Specifically a patch, influenced by the above translation,
71 * that extended PSTATE to 64-bit was accepted into gdb-7.7 but the patch
72 * was reverted in gdb-7.8.1 and all later releases, when this was
73 * discovered to be an undocumented protocol change.
74 *
75 * So... it is *not* wrong for us to only allocate 32-bits to PSTATE
76 * here even though the kernel itself allocates 64-bits for the same
77 * state. That is because this bit of code tells the kernel how the gdb
78 * remote protocol (well most versions of it) describes the register state.
79 *
80 * Note that if you are using one of the versions of gdb that supports
81 * the gdb-7.7 version of the protocol you cannot use kgdb directly
82 * without providing a custom register description (gdb can load new
83 * protocol descriptions at runtime).
55 */ 84 */
56 85
57#define _GP_REGS 34 86#define _GP_REGS 33
58#define _FP_REGS 32 87#define _FP_REGS 32
59#define _EXTRA_REGS 2 88#define _EXTRA_REGS 3
60/* 89/*
61 * general purpose registers size in bytes. 90 * general purpose registers size in bytes.
62 * pstate is only 4 bytes. subtract 4 bytes 91 * pstate is only 4 bytes. subtract 4 bytes
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index ff98585d085a..d25f4f137c2a 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -26,7 +26,7 @@
26 26
27#define check_pgt_cache() do { } while (0) 27#define check_pgt_cache() do { } while (0)
28 28
29#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) 29#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
30#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) 30#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
31 31
32#if CONFIG_PGTABLE_LEVELS > 2 32#if CONFIG_PGTABLE_LEVELS > 2
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 433e50405274..022644704a93 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -124,6 +124,18 @@ static inline void cpu_panic_kernel(void)
124 cpu_park_loop(); 124 cpu_park_loop();
125} 125}
126 126
127/*
128 * If a secondary CPU enters the kernel but fails to come online,
129 * (e.g. due to mismatched features), and cannot exit the kernel,
130 * we increment cpus_stuck_in_kernel and leave the CPU in a
131 * quiesecent loop within the kernel text. The memory containing
132 * this loop must not be re-used for anything else as the 'stuck'
133 * core is executing it.
134 *
135 * This function is used to inhibit features like kexec and hibernate.
136 */
137bool cpus_are_stuck_in_kernel(void);
138
127#endif /* ifndef __ASSEMBLY__ */ 139#endif /* ifndef __ASSEMBLY__ */
128 140
129#endif /* ifndef __ASM_SMP_H */ 141#endif /* ifndef __ASM_SMP_H */
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index fc9682bfe002..e875a5a551d7 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -30,22 +30,53 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
30{ 30{
31 unsigned int tmp; 31 unsigned int tmp;
32 arch_spinlock_t lockval; 32 arch_spinlock_t lockval;
33 u32 owner;
34
35 /*
36 * Ensure prior spin_lock operations to other locks have completed
37 * on this CPU before we test whether "lock" is locked.
38 */
39 smp_mb();
40 owner = READ_ONCE(lock->owner) << 16;
33 41
34 asm volatile( 42 asm volatile(
35" sevl\n" 43" sevl\n"
36"1: wfe\n" 44"1: wfe\n"
37"2: ldaxr %w0, %2\n" 45"2: ldaxr %w0, %2\n"
46 /* Is the lock free? */
38" eor %w1, %w0, %w0, ror #16\n" 47" eor %w1, %w0, %w0, ror #16\n"
39" cbnz %w1, 1b\n" 48" cbz %w1, 3f\n"
49 /* Lock taken -- has there been a subsequent unlock->lock transition? */
50" eor %w1, %w3, %w0, lsl #16\n"
51" cbz %w1, 1b\n"
52 /*
53 * The owner has been updated, so there was an unlock->lock
54 * transition that we missed. That means we can rely on the
55 * store-release of the unlock operation paired with the
56 * load-acquire of the lock operation to publish any of our
57 * previous stores to the new lock owner and therefore don't
58 * need to bother with the writeback below.
59 */
60" b 4f\n"
61"3:\n"
62 /*
63 * Serialise against any concurrent lockers by writing back the
64 * unlocked lock value
65 */
40 ARM64_LSE_ATOMIC_INSN( 66 ARM64_LSE_ATOMIC_INSN(
41 /* LL/SC */ 67 /* LL/SC */
42" stxr %w1, %w0, %2\n" 68" stxr %w1, %w0, %2\n"
43" cbnz %w1, 2b\n", /* Serialise against any concurrent lockers */
44 /* LSE atomics */
45" nop\n" 69" nop\n"
46" nop\n") 70" nop\n",
71 /* LSE atomics */
72" mov %w1, %w0\n"
73" cas %w0, %w0, %2\n"
74" eor %w1, %w1, %w0\n")
75 /* Somebody else wrote to the lock, GOTO 10 and reload the value */
76" cbnz %w1, 2b\n"
77"4:"
47 : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) 78 : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
48 : 79 : "r" (owner)
49 : "memory"); 80 : "memory");
50} 81}
51 82
@@ -148,6 +179,7 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
148 179
149static inline int arch_spin_is_locked(arch_spinlock_t *lock) 180static inline int arch_spin_is_locked(arch_spinlock_t *lock)
150{ 181{
182 smp_mb(); /* See arch_spin_unlock_wait */
151 return !arch_spin_value_unlocked(READ_ONCE(*lock)); 183 return !arch_spin_value_unlocked(READ_ONCE(*lock));
152} 184}
153 185
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index f8df75d740f4..21ab5df9fa76 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -33,6 +33,7 @@
33#include <asm/pgtable.h> 33#include <asm/pgtable.h>
34#include <asm/pgtable-hwdef.h> 34#include <asm/pgtable-hwdef.h>
35#include <asm/sections.h> 35#include <asm/sections.h>
36#include <asm/smp.h>
36#include <asm/suspend.h> 37#include <asm/suspend.h>
37#include <asm/virt.h> 38#include <asm/virt.h>
38 39
@@ -236,6 +237,11 @@ int swsusp_arch_suspend(void)
236 unsigned long flags; 237 unsigned long flags;
237 struct sleep_stack_data state; 238 struct sleep_stack_data state;
238 239
240 if (cpus_are_stuck_in_kernel()) {
241 pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
242 return -EBUSY;
243 }
244
239 local_dbg_save(flags); 245 local_dbg_save(flags);
240 246
241 if (__cpu_suspend_enter(&state)) { 247 if (__cpu_suspend_enter(&state)) {
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index b67531a13136..b5f063e5eff7 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -58,7 +58,17 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
58 { "x30", 8, offsetof(struct pt_regs, regs[30])}, 58 { "x30", 8, offsetof(struct pt_regs, regs[30])},
59 { "sp", 8, offsetof(struct pt_regs, sp)}, 59 { "sp", 8, offsetof(struct pt_regs, sp)},
60 { "pc", 8, offsetof(struct pt_regs, pc)}, 60 { "pc", 8, offsetof(struct pt_regs, pc)},
61 { "pstate", 8, offsetof(struct pt_regs, pstate)}, 61 /*
62 * struct pt_regs thinks PSTATE is 64-bits wide but gdb remote
63 * protocol disagrees. Therefore we must extract only the lower
64 * 32-bits. Look for the big comment in asm/kgdb.h for more
65 * detail.
66 */
67 { "pstate", 4, offsetof(struct pt_regs, pstate)
68#ifdef CONFIG_CPU_BIG_ENDIAN
69 + 4
70#endif
71 },
62 { "v0", 16, -1 }, 72 { "v0", 16, -1 },
63 { "v1", 16, -1 }, 73 { "v1", 16, -1 },
64 { "v2", 16, -1 }, 74 { "v2", 16, -1 },
@@ -128,6 +138,8 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
128 memset((char *)gdb_regs, 0, NUMREGBYTES); 138 memset((char *)gdb_regs, 0, NUMREGBYTES);
129 thread_regs = task_pt_regs(task); 139 thread_regs = task_pt_regs(task);
130 memcpy((void *)gdb_regs, (void *)thread_regs->regs, GP_REG_BYTES); 140 memcpy((void *)gdb_regs, (void *)thread_regs->regs, GP_REG_BYTES);
141 /* Special case for PSTATE (check comments in asm/kgdb.h for details) */
142 dbg_get_reg(33, gdb_regs + GP_REG_BYTES, thread_regs);
131} 143}
132 144
133void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) 145void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 678e0842cb3b..62ff3c0622e2 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -909,3 +909,21 @@ int setup_profiling_timer(unsigned int multiplier)
909{ 909{
910 return -EINVAL; 910 return -EINVAL;
911} 911}
912
913static bool have_cpu_die(void)
914{
915#ifdef CONFIG_HOTPLUG_CPU
916 int any_cpu = raw_smp_processor_id();
917
918 if (cpu_ops[any_cpu]->cpu_die)
919 return true;
920#endif
921 return false;
922}
923
924bool cpus_are_stuck_in_kernel(void)
925{
926 bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
927
928 return !!cpus_stuck_in_kernel || smp_spin_tables;
929}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index f7cf463107df..2a43012616b7 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -64,8 +64,7 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
64 64
65 /* 65 /*
66 * We need to switch to kernel mode so that we can use __get_user 66 * We need to switch to kernel mode so that we can use __get_user
67 * to safely read from kernel space. Note that we now dump the 67 * to safely read from kernel space.
68 * code first, just in case the backtrace kills us.
69 */ 68 */
70 fs = get_fs(); 69 fs = get_fs();
71 set_fs(KERNEL_DS); 70 set_fs(KERNEL_DS);
@@ -111,21 +110,12 @@ static void dump_backtrace_entry(unsigned long where)
111 print_ip_sym(where); 110 print_ip_sym(where);
112} 111}
113 112
114static void dump_instr(const char *lvl, struct pt_regs *regs) 113static void __dump_instr(const char *lvl, struct pt_regs *regs)
115{ 114{
116 unsigned long addr = instruction_pointer(regs); 115 unsigned long addr = instruction_pointer(regs);
117 mm_segment_t fs;
118 char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; 116 char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
119 int i; 117 int i;
120 118
121 /*
122 * We need to switch to kernel mode so that we can use __get_user
123 * to safely read from kernel space. Note that we now dump the
124 * code first, just in case the backtrace kills us.
125 */
126 fs = get_fs();
127 set_fs(KERNEL_DS);
128
129 for (i = -4; i < 1; i++) { 119 for (i = -4; i < 1; i++) {
130 unsigned int val, bad; 120 unsigned int val, bad;
131 121
@@ -139,8 +129,18 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
139 } 129 }
140 } 130 }
141 printk("%sCode: %s\n", lvl, str); 131 printk("%sCode: %s\n", lvl, str);
132}
142 133
143 set_fs(fs); 134static void dump_instr(const char *lvl, struct pt_regs *regs)
135{
136 if (!user_mode(regs)) {
137 mm_segment_t fs = get_fs();
138 set_fs(KERNEL_DS);
139 __dump_instr(lvl, regs);
140 set_fs(fs);
141 } else {
142 __dump_instr(lvl, regs);
143 }
144} 144}
145 145
146static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) 146static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index b7b397802088..efcf1f7ef1e4 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -179,7 +179,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
179 &asid_generation); 179 &asid_generation);
180 flush_context(cpu); 180 flush_context(cpu);
181 181
182 /* We have at least 1 ASID per CPU, so this will always succeed */ 182 /* We have more ASIDs than CPUs, so this will always succeed */
183 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); 183 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
184 184
185set_asid: 185set_asid:
@@ -227,8 +227,11 @@ switch_mm_fastpath:
227static int asids_init(void) 227static int asids_init(void)
228{ 228{
229 asid_bits = get_cpu_asid_bits(); 229 asid_bits = get_cpu_asid_bits();
230 /* If we end up with more CPUs than ASIDs, expect things to crash */ 230 /*
231 WARN_ON(NUM_USER_ASIDS < num_possible_cpus()); 231 * Expect allocation after rollover to fail if we don't have at least
232 * one more ASID than CPUs. ASID #0 is reserved for init_mm.
233 */
234 WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
232 atomic64_set(&asid_generation, ASID_FIRST_VERSION); 235 atomic64_set(&asid_generation, ASID_FIRST_VERSION);
233 asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map), 236 asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map),
234 GFP_KERNEL); 237 GFP_KERNEL);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 5954881a35ac..013e2cbe7924 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -109,7 +109,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
109 * PTE_RDONLY is cleared by default in the asm below, so set it in 109 * PTE_RDONLY is cleared by default in the asm below, so set it in
110 * back if necessary (read-only or clean PTE). 110 * back if necessary (read-only or clean PTE).
111 */ 111 */
112 if (!pte_write(entry) || !dirty) 112 if (!pte_write(entry) || !pte_sw_dirty(entry))
113 pte_val(entry) |= PTE_RDONLY; 113 pte_val(entry) |= PTE_RDONLY;
114 114
115 /* 115 /*
@@ -441,7 +441,7 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
441 return 1; 441 return 1;
442} 442}
443 443
444static struct fault_info { 444static const struct fault_info {
445 int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs); 445 int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
446 int sig; 446 int sig;
447 int code; 447 int code;
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index dbd12ea8ce68..43a76b07eb32 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -71,10 +71,6 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
71{ 71{
72 struct page *page = pte_page(pte); 72 struct page *page = pte_page(pte);
73 73
74 /* no flushing needed for anonymous pages */
75 if (!page_mapping(page))
76 return;
77
78 if (!test_and_set_bit(PG_dcache_clean, &page->flags)) 74 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
79 sync_icache_aliases(page_address(page), 75 sync_icache_aliases(page_address(page),
80 PAGE_SIZE << compound_order(page)); 76 PAGE_SIZE << compound_order(page));
diff --git a/arch/avr32/include/asm/pgalloc.h b/arch/avr32/include/asm/pgalloc.h
index 1aba19d68c5e..db039cb368be 100644
--- a/arch/avr32/include/asm/pgalloc.h
+++ b/arch/avr32/include/asm/pgalloc.h
@@ -43,7 +43,7 @@ static inline void pgd_ctor(void *x)
43 */ 43 */
44static inline pgd_t *pgd_alloc(struct mm_struct *mm) 44static inline pgd_t *pgd_alloc(struct mm_struct *mm)
45{ 45{
46 return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor); 46 return quicklist_alloc(QUICK_PGD, GFP_KERNEL, pgd_ctor);
47} 47}
48 48
49static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 49static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
@@ -54,7 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
54static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 54static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
55 unsigned long address) 55 unsigned long address)
56{ 56{
57 return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL); 57 return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
58} 58}
59 59
60static inline pgtable_t pte_alloc_one(struct mm_struct *mm, 60static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@@ -63,7 +63,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
63 struct page *page; 63 struct page *page;
64 void *pg; 64 void *pg;
65 65
66 pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL); 66 pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
67 if (!pg) 67 if (!pg)
68 return NULL; 68 return NULL;
69 69
diff --git a/arch/cris/include/asm/pgalloc.h b/arch/cris/include/asm/pgalloc.h
index 235ece437ddd..42f1affb9c2d 100644
--- a/arch/cris/include/asm/pgalloc.h
+++ b/arch/cris/include/asm/pgalloc.h
@@ -24,14 +24,14 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
24 24
25static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 25static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
26{ 26{
27 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 27 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
28 return pte; 28 return pte;
29} 29}
30 30
31static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) 31static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
32{ 32{
33 struct page *pte; 33 struct page *pte;
34 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); 34 pte = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
35 if (!pte) 35 if (!pte)
36 return NULL; 36 return NULL;
37 if (!pgtable_page_ctor(pte)) { 37 if (!pgtable_page_ctor(pte)) {
diff --git a/arch/frv/mm/pgalloc.c b/arch/frv/mm/pgalloc.c
index 41907d25ed38..c9ed14f6c67d 100644
--- a/arch/frv/mm/pgalloc.c
+++ b/arch/frv/mm/pgalloc.c
@@ -22,7 +22,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE)));
22 22
23pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 23pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
24{ 24{
25 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); 25 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL);
26 if (pte) 26 if (pte)
27 clear_page(pte); 27 clear_page(pte);
28 return pte; 28 return pte;
@@ -33,9 +33,9 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
33 struct page *page; 33 struct page *page;
34 34
35#ifdef CONFIG_HIGHPTE 35#ifdef CONFIG_HIGHPTE
36 page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0); 36 page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0);
37#else 37#else
38 page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); 38 page = alloc_pages(GFP_KERNEL, 0);
39#endif 39#endif
40 if (!page) 40 if (!page)
41 return NULL; 41 return NULL;
diff --git a/arch/hexagon/include/asm/pgalloc.h b/arch/hexagon/include/asm/pgalloc.h
index 77da3b0ae3c2..eeebf862c46c 100644
--- a/arch/hexagon/include/asm/pgalloc.h
+++ b/arch/hexagon/include/asm/pgalloc.h
@@ -64,7 +64,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
64{ 64{
65 struct page *pte; 65 struct page *pte;
66 66
67 pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); 67 pte = alloc_page(GFP_KERNEL | __GFP_ZERO);
68 if (!pte) 68 if (!pte)
69 return NULL; 69 return NULL;
70 if (!pgtable_page_ctor(pte)) { 70 if (!pgtable_page_ctor(pte)) {
@@ -78,7 +78,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
78static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 78static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
79 unsigned long address) 79 unsigned long address)
80{ 80{
81 gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO; 81 gfp_t flags = GFP_KERNEL | __GFP_ZERO;
82 return (pte_t *) __get_free_page(flags); 82 return (pte_t *) __get_free_page(flags);
83} 83}
84 84
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index f80758cb7157..e109ee95e919 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -45,7 +45,7 @@ config IA64
45 select GENERIC_SMP_IDLE_THREAD 45 select GENERIC_SMP_IDLE_THREAD
46 select ARCH_INIT_TASK 46 select ARCH_INIT_TASK
47 select ARCH_TASK_STRUCT_ALLOCATOR 47 select ARCH_TASK_STRUCT_ALLOCATOR
48 select ARCH_THREAD_INFO_ALLOCATOR 48 select ARCH_THREAD_STACK_ALLOCATOR
49 select ARCH_CLOCKSOURCE_DATA 49 select ARCH_CLOCKSOURCE_DATA
50 select GENERIC_TIME_VSYSCALL_OLD 50 select GENERIC_TIME_VSYSCALL_OLD
51 select SYSCTL_ARCH_UNALIGN_NO_WARN 51 select SYSCTL_ARCH_UNALIGN_NO_WARN
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index aa995b67c3f5..d1212b84fb83 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -48,15 +48,15 @@ struct thread_info {
48#ifndef ASM_OFFSETS_C 48#ifndef ASM_OFFSETS_C
49/* how to get the thread information struct from C */ 49/* how to get the thread information struct from C */
50#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE)) 50#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
51#define alloc_thread_info_node(tsk, node) \ 51#define alloc_thread_stack_node(tsk, node) \
52 ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE)) 52 ((unsigned long *) ((char *) (tsk) + IA64_TASK_SIZE))
53#define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE)) 53#define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
54#else 54#else
55#define current_thread_info() ((struct thread_info *) 0) 55#define current_thread_info() ((struct thread_info *) 0)
56#define alloc_thread_info_node(tsk, node) ((struct thread_info *) 0) 56#define alloc_thread_stack_node(tsk, node) ((unsigned long *) 0)
57#define task_thread_info(tsk) ((struct thread_info *) 0) 57#define task_thread_info(tsk) ((struct thread_info *) 0)
58#endif 58#endif
59#define free_thread_info(ti) /* nothing */ 59#define free_thread_stack(ti) /* nothing */
60#define task_stack_page(tsk) ((void *)(tsk)) 60#define task_stack_page(tsk) ((void *)(tsk))
61 61
62#define __HAVE_THREAD_FUNCTIONS 62#define __HAVE_THREAD_FUNCTIONS
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c
index f9efe9739d3f..0eaa89f3defd 100644
--- a/arch/ia64/kernel/init_task.c
+++ b/arch/ia64/kernel/init_task.c
@@ -26,6 +26,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
26 * handled. This is done by having a special ".data..init_task" section... 26 * handled. This is done by having a special ".data..init_task" section...
27 */ 27 */
28#define init_thread_info init_task_mem.s.thread_info 28#define init_thread_info init_task_mem.s.thread_info
29#define init_stack init_task_mem.stack
29 30
30union { 31union {
31 struct { 32 struct {
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
index f9924fbcfe42..fb95aed5f428 100644
--- a/arch/m68k/include/asm/mcf_pgalloc.h
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -14,7 +14,7 @@ extern const char bad_pmd_string[];
14extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 14extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
15 unsigned long address) 15 unsigned long address)
16{ 16{
17 unsigned long page = __get_free_page(GFP_DMA|__GFP_REPEAT); 17 unsigned long page = __get_free_page(GFP_DMA);
18 18
19 if (!page) 19 if (!page)
20 return NULL; 20 return NULL;
@@ -51,7 +51,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
51static inline struct page *pte_alloc_one(struct mm_struct *mm, 51static inline struct page *pte_alloc_one(struct mm_struct *mm,
52 unsigned long address) 52 unsigned long address)
53{ 53{
54 struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0); 54 struct page *page = alloc_pages(GFP_DMA, 0);
55 pte_t *pte; 55 pte_t *pte;
56 56
57 if (!page) 57 if (!page)
diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h
index 24bcba496c75..c895b987202c 100644
--- a/arch/m68k/include/asm/motorola_pgalloc.h
+++ b/arch/m68k/include/asm/motorola_pgalloc.h
@@ -11,7 +11,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long ad
11{ 11{
12 pte_t *pte; 12 pte_t *pte;
13 13
14 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 14 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
15 if (pte) { 15 if (pte) {
16 __flush_page_to_ram(pte); 16 __flush_page_to_ram(pte);
17 flush_tlb_kernel_page(pte); 17 flush_tlb_kernel_page(pte);
@@ -32,7 +32,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addres
32 struct page *page; 32 struct page *page;
33 pte_t *pte; 33 pte_t *pte;
34 34
35 page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); 35 page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
36 if(!page) 36 if(!page)
37 return NULL; 37 return NULL;
38 if (!pgtable_page_ctor(page)) { 38 if (!pgtable_page_ctor(page)) {
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h
index 0931388de47f..1901f61f926f 100644
--- a/arch/m68k/include/asm/sun3_pgalloc.h
+++ b/arch/m68k/include/asm/sun3_pgalloc.h
@@ -37,7 +37,7 @@ do { \
37static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 37static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
38 unsigned long address) 38 unsigned long address)
39{ 39{
40 unsigned long page = __get_free_page(GFP_KERNEL|__GFP_REPEAT); 40 unsigned long page = __get_free_page(GFP_KERNEL);
41 41
42 if (!page) 42 if (!page)
43 return NULL; 43 return NULL;
@@ -49,7 +49,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
49static inline pgtable_t pte_alloc_one(struct mm_struct *mm, 49static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
50 unsigned long address) 50 unsigned long address)
51{ 51{
52 struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); 52 struct page *page = alloc_pages(GFP_KERNEL, 0);
53 53
54 if (page == NULL) 54 if (page == NULL)
55 return NULL; 55 return NULL;
diff --git a/arch/metag/include/asm/pgalloc.h b/arch/metag/include/asm/pgalloc.h
index 3104df0a4822..c2caa1ee4360 100644
--- a/arch/metag/include/asm/pgalloc.h
+++ b/arch/metag/include/asm/pgalloc.h
@@ -42,8 +42,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
42static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 42static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
43 unsigned long address) 43 unsigned long address)
44{ 44{
45 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | 45 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
46 __GFP_ZERO);
47 return pte; 46 return pte;
48} 47}
49 48
@@ -51,7 +50,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
51 unsigned long address) 50 unsigned long address)
52{ 51{
53 struct page *pte; 52 struct page *pte;
54 pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0); 53 pte = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
55 if (!pte) 54 if (!pte)
56 return NULL; 55 return NULL;
57 if (!pgtable_page_ctor(pte)) { 56 if (!pgtable_page_ctor(pte)) {
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index 61436d69775c..7c89390c0c13 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -116,9 +116,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
116 struct page *ptepage; 116 struct page *ptepage;
117 117
118#ifdef CONFIG_HIGHPTE 118#ifdef CONFIG_HIGHPTE
119 int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT; 119 int flags = GFP_KERNEL | __GFP_HIGHMEM;
120#else 120#else
121 int flags = GFP_KERNEL | __GFP_REPEAT; 121 int flags = GFP_KERNEL;
122#endif 122#endif
123 123
124 ptepage = alloc_pages(flags, 0); 124 ptepage = alloc_pages(flags, 0);
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index 4f4520e779a5..eb99fcc76088 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -239,8 +239,7 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
239{ 239{
240 pte_t *pte; 240 pte_t *pte;
241 if (mem_init_done) { 241 if (mem_init_done) {
242 pte = (pte_t *)__get_free_page(GFP_KERNEL | 242 pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
243 __GFP_REPEAT | __GFP_ZERO);
244 } else { 243 } else {
245 pte = (pte_t *)early_get_page(); 244 pte = (pte_t *)early_get_page();
246 if (pte) 245 if (pte)
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 6733ac575da4..36a391d289aa 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -74,7 +74,7 @@
74#define KVM_GUEST_KUSEG 0x00000000UL 74#define KVM_GUEST_KUSEG 0x00000000UL
75#define KVM_GUEST_KSEG0 0x40000000UL 75#define KVM_GUEST_KSEG0 0x40000000UL
76#define KVM_GUEST_KSEG23 0x60000000UL 76#define KVM_GUEST_KSEG23 0x60000000UL
77#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0x60000000) 77#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0xe0000000)
78#define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff) 78#define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
79 79
80#define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) 80#define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
@@ -338,6 +338,7 @@ struct kvm_mips_tlb {
338#define KVM_MIPS_GUEST_TLB_SIZE 64 338#define KVM_MIPS_GUEST_TLB_SIZE 64
339struct kvm_vcpu_arch { 339struct kvm_vcpu_arch {
340 void *host_ebase, *guest_ebase; 340 void *host_ebase, *guest_ebase;
341 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
341 unsigned long host_stack; 342 unsigned long host_stack;
342 unsigned long host_gp; 343 unsigned long host_gp;
343 344
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index b336037e8768..93c079a1cfc8 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -69,7 +69,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
69{ 69{
70 pte_t *pte; 70 pte_t *pte;
71 71
72 pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, PTE_ORDER); 72 pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
73 73
74 return pte; 74 return pte;
75} 75}
@@ -79,7 +79,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
79{ 79{
80 struct page *pte; 80 struct page *pte;
81 81
82 pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); 82 pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
83 if (!pte) 83 if (!pte)
84 return NULL; 84 return NULL;
85 clear_highpage(pte); 85 clear_highpage(pte);
@@ -113,7 +113,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
113{ 113{
114 pmd_t *pmd; 114 pmd_t *pmd;
115 115
116 pmd = (pmd_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PMD_ORDER); 116 pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER);
117 if (pmd) 117 if (pmd)
118 pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table); 118 pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
119 return pmd; 119 return pmd;
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index a6b611f1da43..f53816744d60 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -24,7 +24,7 @@ struct mm_struct;
24struct vm_area_struct; 24struct vm_area_struct;
25 25
26#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \ 26#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \
27 _CACHE_CACHABLE_NONCOHERENT) 27 _page_cachable_default)
28#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \ 28#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
29 _page_cachable_default) 29 _page_cachable_default)
30#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \ 30#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \
@@ -476,7 +476,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
476 pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK); 476 pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
477 pte.pte_high &= (_PFN_MASK | _CACHE_MASK); 477 pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
478 pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK; 478 pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
479 pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK; 479 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
480 return pte; 480 return pte;
481} 481}
482#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 482#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
@@ -491,7 +491,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
491#else 491#else
492static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 492static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
493{ 493{
494 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); 494 return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
495 (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
495} 496}
496#endif 497#endif
497 498
@@ -632,7 +633,8 @@ static inline struct page *pmd_page(pmd_t pmd)
632 633
633static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 634static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
634{ 635{
635 pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot); 636 pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) |
637 (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
636 return pmd; 638 return pmd;
637} 639}
638 640
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 396df6eb0a12..645c8a1982a7 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -1636,6 +1636,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
1636 if (index < 0) { 1636 if (index < 0) {
1637 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK); 1637 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
1638 vcpu->arch.host_cp0_badvaddr = va; 1638 vcpu->arch.host_cp0_badvaddr = va;
1639 vcpu->arch.pc = curr_pc;
1639 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, 1640 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
1640 vcpu); 1641 vcpu);
1641 preempt_enable(); 1642 preempt_enable();
@@ -1647,6 +1648,8 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
1647 * invalid exception to the guest 1648 * invalid exception to the guest
1648 */ 1649 */
1649 if (!TLB_IS_VALID(*tlb, va)) { 1650 if (!TLB_IS_VALID(*tlb, va)) {
1651 vcpu->arch.host_cp0_badvaddr = va;
1652 vcpu->arch.pc = curr_pc;
1650 er = kvm_mips_emulate_tlbinv_ld(cause, NULL, 1653 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1651 run, vcpu); 1654 run, vcpu);
1652 preempt_enable(); 1655 preempt_enable();
@@ -1666,7 +1669,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
1666 cache, op, base, arch->gprs[base], offset); 1669 cache, op, base, arch->gprs[base], offset);
1667 er = EMULATE_FAIL; 1670 er = EMULATE_FAIL;
1668 preempt_enable(); 1671 preempt_enable();
1669 goto dont_update_pc; 1672 goto done;
1670 1673
1671 } 1674 }
1672 1675
@@ -1694,16 +1697,20 @@ skip_fault:
1694 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1697 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1695 cache, op, base, arch->gprs[base], offset); 1698 cache, op, base, arch->gprs[base], offset);
1696 er = EMULATE_FAIL; 1699 er = EMULATE_FAIL;
1697 preempt_enable();
1698 goto dont_update_pc;
1699 } 1700 }
1700 1701
1701 preempt_enable(); 1702 preempt_enable();
1703done:
1704 /* Rollback PC only if emulation was unsuccessful */
1705 if (er == EMULATE_FAIL)
1706 vcpu->arch.pc = curr_pc;
1702 1707
1703dont_update_pc: 1708dont_update_pc:
1704 /* Rollback PC */ 1709 /*
1705 vcpu->arch.pc = curr_pc; 1710 * This is for exceptions whose emulation updates the PC, so do not
1706done: 1711 * overwrite the PC under any circumstances
1712 */
1713
1707 return er; 1714 return er;
1708} 1715}
1709 1716
diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
index 4ab4bdfad703..2143884709e4 100644
--- a/arch/mips/kvm/interrupt.h
+++ b/arch/mips/kvm/interrupt.h
@@ -28,6 +28,7 @@
28#define MIPS_EXC_MAX 12 28#define MIPS_EXC_MAX 12
29/* XXXSL More to follow */ 29/* XXXSL More to follow */
30 30
31extern char __kvm_mips_vcpu_run_end[];
31extern char mips32_exception[], mips32_exceptionEnd[]; 32extern char mips32_exception[], mips32_exceptionEnd[];
32extern char mips32_GuestException[], mips32_GuestExceptionEnd[]; 33extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
33 34
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
index 3ef03009de5f..828fcfc1cd7f 100644
--- a/arch/mips/kvm/locore.S
+++ b/arch/mips/kvm/locore.S
@@ -202,6 +202,7 @@ FEXPORT(__kvm_mips_load_k0k1)
202 202
203 /* Jump to guest */ 203 /* Jump to guest */
204 eret 204 eret
205EXPORT(__kvm_mips_vcpu_run_end)
205 206
206VECTOR(MIPSX(exception), unknown) 207VECTOR(MIPSX(exception), unknown)
207/* Find out what mode we came from and jump to the proper handler. */ 208/* Find out what mode we came from and jump to the proper handler. */
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index dc052fb5c7a2..44da5259f390 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -315,6 +315,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
315 memcpy(gebase + offset, mips32_GuestException, 315 memcpy(gebase + offset, mips32_GuestException,
316 mips32_GuestExceptionEnd - mips32_GuestException); 316 mips32_GuestExceptionEnd - mips32_GuestException);
317 317
318#ifdef MODULE
319 offset += mips32_GuestExceptionEnd - mips32_GuestException;
320 memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
321 __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
322 vcpu->arch.vcpu_run = gebase + offset;
323#else
324 vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
325#endif
326
318 /* Invalidate the icache for these ranges */ 327 /* Invalidate the icache for these ranges */
319 local_flush_icache_range((unsigned long)gebase, 328 local_flush_icache_range((unsigned long)gebase,
320 (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); 329 (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
@@ -404,7 +413,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
404 /* Disable hardware page table walking while in guest */ 413 /* Disable hardware page table walking while in guest */
405 htw_stop(); 414 htw_stop();
406 415
407 r = __kvm_mips_vcpu_run(run, vcpu); 416 r = vcpu->arch.vcpu_run(run, vcpu);
408 417
409 /* Re-enable HTW before enabling interrupts */ 418 /* Re-enable HTW before enabling interrupts */
410 htw_start(); 419 htw_start();
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index 4861a78c7160..f5f90bbf019d 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -115,7 +115,7 @@ static inline unsigned long current_stack_pointer(void)
115} 115}
116 116
117#ifndef CONFIG_KGDB 117#ifndef CONFIG_KGDB
118void arch_release_thread_info(struct thread_info *ti); 118void arch_release_thread_stack(unsigned long *stack);
119#endif 119#endif
120#define get_thread_info(ti) get_task_struct((ti)->task) 120#define get_thread_info(ti) get_task_struct((ti)->task)
121#define put_thread_info(ti) put_task_struct((ti)->task) 121#define put_thread_info(ti) put_task_struct((ti)->task)
diff --git a/arch/mn10300/kernel/kgdb.c b/arch/mn10300/kernel/kgdb.c
index 99770823451a..2d7986c386fe 100644
--- a/arch/mn10300/kernel/kgdb.c
+++ b/arch/mn10300/kernel/kgdb.c
@@ -397,8 +397,9 @@ static bool kgdb_arch_undo_singlestep(struct pt_regs *regs)
397 * single-step state is cleared. At this point the breakpoints should have 397 * single-step state is cleared. At this point the breakpoints should have
398 * been removed by __switch_to(). 398 * been removed by __switch_to().
399 */ 399 */
400void arch_release_thread_info(struct thread_info *ti) 400void arch_release_thread_stack(unsigned long *stack)
401{ 401{
402 struct thread_info *ti = (void *)stack;
402 if (kgdb_sstep_thread == ti) { 403 if (kgdb_sstep_thread == ti) {
403 kgdb_sstep_thread = NULL; 404 kgdb_sstep_thread = NULL;
404 405
diff --git a/arch/mn10300/mm/pgtable.c b/arch/mn10300/mm/pgtable.c
index e77a7c728081..9577cf768875 100644
--- a/arch/mn10300/mm/pgtable.c
+++ b/arch/mn10300/mm/pgtable.c
@@ -63,7 +63,7 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
63 63
64pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 64pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
65{ 65{
66 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); 66 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL);
67 if (pte) 67 if (pte)
68 clear_page(pte); 68 clear_page(pte);
69 return pte; 69 return pte;
@@ -74,9 +74,9 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
74 struct page *pte; 74 struct page *pte;
75 75
76#ifdef CONFIG_HIGHPTE 76#ifdef CONFIG_HIGHPTE
77 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0); 77 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0);
78#else 78#else
79 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); 79 pte = alloc_pages(GFP_KERNEL, 0);
80#endif 80#endif
81 if (!pte) 81 if (!pte)
82 return NULL; 82 return NULL;
diff --git a/arch/nios2/include/asm/pgalloc.h b/arch/nios2/include/asm/pgalloc.h
index 6e2985e0a7b9..bb47d08c8ef7 100644
--- a/arch/nios2/include/asm/pgalloc.h
+++ b/arch/nios2/include/asm/pgalloc.h
@@ -42,8 +42,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
42{ 42{
43 pte_t *pte; 43 pte_t *pte;
44 44
45 pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 45 pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
46 PTE_ORDER);
47 46
48 return pte; 47 return pte;
49} 48}
@@ -53,7 +52,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
53{ 52{
54 struct page *pte; 53 struct page *pte;
55 54
56 pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); 55 pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
57 if (pte) { 56 if (pte) {
58 if (!pgtable_page_ctor(pte)) { 57 if (!pgtable_page_ctor(pte)) {
59 __free_page(pte); 58 __free_page(pte);
diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h
index 21484e5b9e9a..87eebd185089 100644
--- a/arch/openrisc/include/asm/pgalloc.h
+++ b/arch/openrisc/include/asm/pgalloc.h
@@ -77,7 +77,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
77 unsigned long address) 77 unsigned long address)
78{ 78{
79 struct page *pte; 79 struct page *pte;
80 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); 80 pte = alloc_pages(GFP_KERNEL, 0);
81 if (!pte) 81 if (!pte)
82 return NULL; 82 return NULL;
83 clear_page(page_address(pte)); 83 clear_page(page_address(pte));
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
index 62b08ef392be..5b2a95116e8f 100644
--- a/arch/openrisc/mm/ioremap.c
+++ b/arch/openrisc/mm/ioremap.c
@@ -122,7 +122,7 @@ pte_t __init_refok *pte_alloc_one_kernel(struct mm_struct *mm,
122 pte_t *pte; 122 pte_t *pte;
123 123
124 if (likely(mem_init_done)) { 124 if (likely(mem_init_done)) {
125 pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT); 125 pte = (pte_t *) __get_free_page(GFP_KERNEL);
126 } else { 126 } else {
127 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); 127 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
128#if 0 128#if 0
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index f2fd327dce2e..f08dda3f0995 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -63,8 +63,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
63 63
64static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) 64static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
65{ 65{
66 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 66 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER);
67 PMD_ORDER);
68 if (pmd) 67 if (pmd)
69 memset(pmd, 0, PAGE_SIZE<<PMD_ORDER); 68 memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
70 return pmd; 69 return pmd;
@@ -124,7 +123,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
124static inline pgtable_t 123static inline pgtable_t
125pte_alloc_one(struct mm_struct *mm, unsigned long address) 124pte_alloc_one(struct mm_struct *mm, unsigned long address)
126{ 125{
127 struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 126 struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO);
128 if (!page) 127 if (!page)
129 return NULL; 128 return NULL;
130 if (!pgtable_page_ctor(page)) { 129 if (!pgtable_page_ctor(page)) {
@@ -137,7 +136,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
137static inline pte_t * 136static inline pte_t *
138pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) 137pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
139{ 138{
140 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 139 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
141 return pte; 140 return pte;
142} 141}
143 142
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 01f7464d9fea..0a9d439bcda6 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -128,7 +128,7 @@ config PPC
128 select IRQ_FORCED_THREADING 128 select IRQ_FORCED_THREADING
129 select HAVE_RCU_TABLE_FREE if SMP 129 select HAVE_RCU_TABLE_FREE if SMP
130 select HAVE_SYSCALL_TRACEPOINTS 130 select HAVE_SYSCALL_TRACEPOINTS
131 select HAVE_CBPF_JIT 131 select HAVE_CBPF_JIT if CPU_BIG_ENDIAN
132 select HAVE_ARCH_JUMP_LABEL 132 select HAVE_ARCH_JUMP_LABEL
133 select ARCH_HAVE_NMI_SAFE_CMPXCHG 133 select ARCH_HAVE_NMI_SAFE_CMPXCHG
134 select ARCH_HAS_GCOV_PROFILE_ALL 134 select ARCH_HAS_GCOV_PROFILE_ALL
diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h
index a2350194fc76..8e21bb492dca 100644
--- a/arch/powerpc/include/asm/book3s/32/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h
@@ -102,7 +102,6 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
102static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, 102static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
103 unsigned long address) 103 unsigned long address)
104{ 104{
105 tlb_flush_pgtable(tlb, address);
106 pgtable_page_dtor(table); 105 pgtable_page_dtor(table);
107 pgtable_free_tlb(tlb, page_address(table), 0); 106 pgtable_free_tlb(tlb, page_address(table), 0);
108} 107}
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 290157e8d5b2..74839f24f412 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -88,6 +88,7 @@
88#define HPTE_R_RPN_SHIFT 12 88#define HPTE_R_RPN_SHIFT 12
89#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000) 89#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
90#define HPTE_R_PP ASM_CONST(0x0000000000000003) 90#define HPTE_R_PP ASM_CONST(0x0000000000000003)
91#define HPTE_R_PPP ASM_CONST(0x8000000000000003)
91#define HPTE_R_N ASM_CONST(0x0000000000000004) 92#define HPTE_R_N ASM_CONST(0x0000000000000004)
92#define HPTE_R_G ASM_CONST(0x0000000000000008) 93#define HPTE_R_G ASM_CONST(0x0000000000000008)
93#define HPTE_R_M ASM_CONST(0x0000000000000010) 94#define HPTE_R_M ASM_CONST(0x0000000000000010)
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index 488279edb1f0..cd5e7aa8cc34 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -41,7 +41,7 @@ extern struct kmem_cache *pgtable_cache[];
41 pgtable_cache[(shift) - 1]; \ 41 pgtable_cache[(shift) - 1]; \
42 }) 42 })
43 43
44#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO 44#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
45 45
46extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int); 46extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
47extern void pte_fragment_free(unsigned long *, int); 47extern void pte_fragment_free(unsigned long *, int);
@@ -56,7 +56,7 @@ static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
56 return (pgd_t *)__get_free_page(PGALLOC_GFP); 56 return (pgd_t *)__get_free_page(PGALLOC_GFP);
57#else 57#else
58 struct page *page; 58 struct page *page;
59 page = alloc_pages(PGALLOC_GFP, 4); 59 page = alloc_pages(PGALLOC_GFP | __GFP_REPEAT, 4);
60 if (!page) 60 if (!page)
61 return NULL; 61 return NULL;
62 return (pgd_t *) page_address(page); 62 return (pgd_t *) page_address(page);
@@ -93,8 +93,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
93 93
94static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 94static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
95{ 95{
96 return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), 96 return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
97 GFP_KERNEL|__GFP_REPEAT);
98} 97}
99 98
100static inline void pud_free(struct mm_struct *mm, pud_t *pud) 99static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -110,13 +109,17 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
110static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, 109static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
111 unsigned long address) 110 unsigned long address)
112{ 111{
112 /*
113 * By now all the pud entries should be none entries. So go
114 * ahead and flush the page walk cache
115 */
116 flush_tlb_pgtable(tlb, address);
113 pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE); 117 pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE);
114} 118}
115 119
116static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 120static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
117{ 121{
118 return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), 122 return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
119 GFP_KERNEL|__GFP_REPEAT);
120} 123}
121 124
122static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 125static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
@@ -127,6 +130,11 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
127static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, 130static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
128 unsigned long address) 131 unsigned long address)
129{ 132{
133 /*
134 * By now all the pud entries should be none entries. So go
135 * ahead and flush the page walk cache
136 */
137 flush_tlb_pgtable(tlb, address);
130 return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX); 138 return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX);
131} 139}
132 140
@@ -151,7 +159,7 @@ static inline pgtable_t pmd_pgtable(pmd_t pmd)
151static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 159static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
152 unsigned long address) 160 unsigned long address)
153{ 161{
154 return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); 162 return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
155} 163}
156 164
157static inline pgtable_t pte_alloc_one(struct mm_struct *mm, 165static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@@ -198,7 +206,11 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
198static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, 206static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
199 unsigned long address) 207 unsigned long address)
200{ 208{
201 tlb_flush_pgtable(tlb, address); 209 /*
210 * By now all the pud entries should be none entries. So go
211 * ahead and flush the page walk cache
212 */
213 flush_tlb_pgtable(tlb, address);
202 pgtable_free_tlb(tlb, table, 0); 214 pgtable_free_tlb(tlb, table, 0);
203} 215}
204 216
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 88a5ecaa157b..ab84c89c9e98 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -230,6 +230,7 @@ extern unsigned long __kernel_virt_size;
230#define KERN_VIRT_SIZE __kernel_virt_size 230#define KERN_VIRT_SIZE __kernel_virt_size
231extern struct page *vmemmap; 231extern struct page *vmemmap;
232extern unsigned long ioremap_bot; 232extern unsigned long ioremap_bot;
233extern unsigned long pci_io_base;
233#endif /* __ASSEMBLY__ */ 234#endif /* __ASSEMBLY__ */
234 235
235#include <asm/book3s/64/hash.h> 236#include <asm/book3s/64/hash.h>
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 937d4e247ac3..df294224e280 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -228,5 +228,20 @@ extern void radix__vmemmap_remove_mapping(unsigned long start,
228 228
229extern int radix__map_kernel_page(unsigned long ea, unsigned long pa, 229extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
230 pgprot_t flags, unsigned int psz); 230 pgprot_t flags, unsigned int psz);
231
232static inline unsigned long radix__get_tree_size(void)
233{
234 unsigned long rts_field;
235 /*
236 * we support 52 bits, hence 52-31 = 21, 0b10101
237 * RTS encoding details
238 * bits 0 - 3 of rts -> bits 6 - 8 unsigned long
239 * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
240 */
241 rts_field = (0x5UL << 5); /* 6 - 8 bits */
242 rts_field |= (0x2UL << 61);
243
244 return rts_field;
245}
231#endif /* __ASSEMBLY__ */ 246#endif /* __ASSEMBLY__ */
232#endif 247#endif
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index 13ef38828dfe..3fa94fcac628 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -18,16 +18,19 @@ extern void radix__local_flush_tlb_mm(struct mm_struct *mm);
18extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 18extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
19extern void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, 19extern void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
20 unsigned long ap, int nid); 20 unsigned long ap, int nid);
21extern void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
21extern void radix__tlb_flush(struct mmu_gather *tlb); 22extern void radix__tlb_flush(struct mmu_gather *tlb);
22#ifdef CONFIG_SMP 23#ifdef CONFIG_SMP
23extern void radix__flush_tlb_mm(struct mm_struct *mm); 24extern void radix__flush_tlb_mm(struct mm_struct *mm);
24extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 25extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
25extern void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, 26extern void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
26 unsigned long ap, int nid); 27 unsigned long ap, int nid);
28extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
27#else 29#else
28#define radix__flush_tlb_mm(mm) radix__local_flush_tlb_mm(mm) 30#define radix__flush_tlb_mm(mm) radix__local_flush_tlb_mm(mm)
29#define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr) 31#define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr)
30#define radix___flush_tlb_page(mm,addr,p,i) radix___local_flush_tlb_page(mm,addr,p,i) 32#define radix___flush_tlb_page(mm,addr,p,i) radix___local_flush_tlb_page(mm,addr,p,i)
33#define radix__flush_tlb_pwc(tlb, addr) radix__local_flush_tlb_pwc(tlb, addr)
31#endif 34#endif
32 35
33#endif 36#endif
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index d98424ae356c..96e5769b18b0 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -72,5 +72,19 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
72#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) 72#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
73#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr) 73#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
74#endif /* CONFIG_SMP */ 74#endif /* CONFIG_SMP */
75/*
76 * flush the page walk cache for the address
77 */
78static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
79{
80 /*
81 * Flush the page table walk cache on freeing a page table. We already
82 * have marked the upper/higher level page table entry none by now.
83 * So it is safe to flush PWC here.
84 */
85 if (!radix_enabled())
86 return;
75 87
88 radix__flush_tlb_pwc(tlb, address);
89}
76#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */ 90#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/book3s/pgalloc.h b/arch/powerpc/include/asm/book3s/pgalloc.h
index 54f591e9572e..c0a69ae92256 100644
--- a/arch/powerpc/include/asm/book3s/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/pgalloc.h
@@ -4,11 +4,6 @@
4#include <linux/mm.h> 4#include <linux/mm.h>
5 5
6extern void tlb_remove_table(struct mmu_gather *tlb, void *table); 6extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
7static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
8 unsigned long address)
9{
10
11}
12 7
13#ifdef CONFIG_PPC64 8#ifdef CONFIG_PPC64
14#include <asm/book3s/64/pgalloc.h> 9#include <asm/book3s/64/pgalloc.h>
diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h
index 0c12a3bfe2ab..897d2e1c8a9b 100644
--- a/arch/powerpc/include/asm/nohash/64/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h
@@ -57,8 +57,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
57 57
58static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 58static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
59{ 59{
60 return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), 60 return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
61 GFP_KERNEL|__GFP_REPEAT);
62} 61}
63 62
64static inline void pud_free(struct mm_struct *mm, pud_t *pud) 63static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -88,7 +87,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
88static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 87static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
89 unsigned long address) 88 unsigned long address)
90{ 89{
91 return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); 90 return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
92} 91}
93 92
94static inline pgtable_t pte_alloc_one(struct mm_struct *mm, 93static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@@ -172,7 +171,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
172 171
173static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 172static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
174{ 173{
175 pte_fragment_fre((unsigned long *)pte, 1); 174 pte_fragment_free((unsigned long *)pte, 1);
176} 175}
177 176
178static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) 177static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
@@ -190,8 +189,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
190 189
191static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 190static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
192{ 191{
193 return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), 192 return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
194 GFP_KERNEL|__GFP_REPEAT);
195} 193}
196 194
197static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 195static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 2714a3b81d24..d70101e1e25c 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -642,13 +642,12 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
642 if (pe->type & EEH_PE_VF) { 642 if (pe->type & EEH_PE_VF) {
643 eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL); 643 eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
644 } else { 644 } else {
645 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
646 pci_lock_rescan_remove(); 645 pci_lock_rescan_remove();
647 pci_hp_remove_devices(bus); 646 pci_hp_remove_devices(bus);
648 pci_unlock_rescan_remove(); 647 pci_unlock_rescan_remove();
649 } 648 }
650 } else if (frozen_bus) { 649 } else if (frozen_bus) {
651 eeh_pe_dev_traverse(pe, eeh_rmv_device, &rmv_data); 650 eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
652 } 651 }
653 652
654 /* 653 /*
@@ -692,10 +691,12 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
692 */ 691 */
693 edev = list_first_entry(&pe->edevs, struct eeh_dev, list); 692 edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
694 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL); 693 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
695 if (pe->type & EEH_PE_VF) 694 if (pe->type & EEH_PE_VF) {
696 eeh_add_virt_device(edev, NULL); 695 eeh_add_virt_device(edev, NULL);
697 else 696 } else {
697 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
698 pci_hp_add_devices(bus); 698 pci_hp_add_devices(bus);
699 }
699 } else if (frozen_bus && rmv_data->removed) { 700 } else if (frozen_bus && rmv_data->removed) {
700 pr_info("EEH: Sleep 5s ahead of partial hotplug\n"); 701 pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
701 ssleep(5); 702 ssleep(5);
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 4c9440629128..8bcc1b457115 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1399,11 +1399,12 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_RADIX)
1399 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 1399 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1400 1400
1401 mtlr r10 1401 mtlr r10
1402BEGIN_MMU_FTR_SECTION
1403 b 2f
1404END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX)
1405 andi. r10,r12,MSR_RI /* check for unrecoverable exception */ 1402 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1403BEGIN_MMU_FTR_SECTION
1406 beq- 2f 1404 beq- 2f
1405FTR_SECTION_ELSE
1406 b 2f
1407ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)
1407 1408
1408.machine push 1409.machine push
1409.machine "power4" 1410.machine "power4"
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 3759df52bd67..a5ae49a2dcc4 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -47,7 +47,6 @@ static int __init pcibios_init(void)
47 47
48 printk(KERN_INFO "PCI: Probing PCI hardware\n"); 48 printk(KERN_INFO "PCI: Probing PCI hardware\n");
49 49
50 pci_io_base = ISA_IO_BASE;
51 /* For now, override phys_mem_access_prot. If we need it,g 50 /* For now, override phys_mem_access_prot. If we need it,g
52 * later, we may move that initialization to each ppc_md 51 * later, we may move that initialization to each ppc_md
53 */ 52 */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index e2f12cbcade9..0b93893424f5 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1505,6 +1505,16 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1505 current->thread.regs = regs - 1; 1505 current->thread.regs = regs - 1;
1506 } 1506 }
1507 1507
1508#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1509 /*
1510 * Clear any transactional state, we're exec()ing. The cause is
1511 * not important as there will never be a recheckpoint so it's not
1512 * user visible.
1513 */
1514 if (MSR_TM_SUSPENDED(mfmsr()))
1515 tm_reclaim_current(0);
1516#endif
1517
1508 memset(regs->gpr, 0, sizeof(regs->gpr)); 1518 memset(regs->gpr, 0, sizeof(regs->gpr));
1509 regs->ctr = 0; 1519 regs->ctr = 0;
1510 regs->link = 0; 1520 regs->link = 0;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index ccd2037c797f..6ee4b72cda42 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -719,7 +719,7 @@ unsigned char ibm_architecture_vec[] = {
719 * must match by the macro below. Update the definition if 719 * must match by the macro below. Update the definition if
720 * the structure layout changes. 720 * the structure layout changes.
721 */ 721 */
722#define IBM_ARCH_VEC_NRCORES_OFFSET 125 722#define IBM_ARCH_VEC_NRCORES_OFFSET 133
723 W(NR_CPUS), /* number of cores supported */ 723 W(NR_CPUS), /* number of cores supported */
724 0, 724 0,
725 0, 725 0,
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 30a03c03fe73..060b140f03c6 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -377,7 +377,7 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
377 377
378#else 378#else
379 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != 379 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
380 offsetof(struct thread_fp_state, fpr[32][0])); 380 offsetof(struct thread_fp_state, fpr[32]));
381 381
382 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 382 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
383 &target->thread.fp_state, 0, -1); 383 &target->thread.fp_state, 0, -1);
@@ -405,7 +405,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
405 return 0; 405 return 0;
406#else 406#else
407 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != 407 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
408 offsetof(struct thread_fp_state, fpr[32][0])); 408 offsetof(struct thread_fp_state, fpr[32]));
409 409
410 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 410 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
411 &target->thread.fp_state, 0, -1); 411 &target->thread.fp_state, 0, -1);
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index bf8f34a58670..b7019b559ddb 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -110,17 +110,11 @@ _GLOBAL(tm_reclaim)
110 std r3, STK_PARAM(R3)(r1) 110 std r3, STK_PARAM(R3)(r1)
111 SAVE_NVGPRS(r1) 111 SAVE_NVGPRS(r1)
112 112
113 /* We need to setup MSR for VSX register save instructions. Here we 113 /* We need to setup MSR for VSX register save instructions. */
114 * also clear the MSR RI since when we do the treclaim, we won't have a
115 * valid kernel pointer for a while. We clear RI here as it avoids
116 * adding another mtmsr closer to the treclaim. This makes the region
117 * maked as non-recoverable wider than it needs to be but it saves on
118 * inserting another mtmsrd later.
119 */
120 mfmsr r14 114 mfmsr r14
121 mr r15, r14 115 mr r15, r14
122 ori r15, r15, MSR_FP 116 ori r15, r15, MSR_FP
123 li r16, MSR_RI 117 li r16, 0
124 ori r16, r16, MSR_EE /* IRQs hard off */ 118 ori r16, r16, MSR_EE /* IRQs hard off */
125 andc r15, r15, r16 119 andc r15, r15, r16
126 oris r15, r15, MSR_VEC@h 120 oris r15, r15, MSR_VEC@h
@@ -176,7 +170,17 @@ dont_backup_fp:
1761: tdeqi r6, 0 1701: tdeqi r6, 0
177 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0 171 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0
178 172
179 /* The moment we treclaim, ALL of our GPRs will switch 173 /* Clear MSR RI since we are about to change r1, EE is already off. */
174 li r4, 0
175 mtmsrd r4, 1
176
177 /*
178 * BE CAREFUL HERE:
179 * At this point we can't take an SLB miss since we have MSR_RI
180 * off. Load only to/from the stack/paca which are in SLB bolted regions
181 * until we turn MSR RI back on.
182 *
183 * The moment we treclaim, ALL of our GPRs will switch
180 * to user register state. (FPRs, CCR etc. also!) 184 * to user register state. (FPRs, CCR etc. also!)
181 * Use an sprg and a tm_scratch in the PACA to shuffle. 185 * Use an sprg and a tm_scratch in the PACA to shuffle.
182 */ 186 */
@@ -197,6 +201,11 @@ dont_backup_fp:
197 201
198 /* Store the PPR in r11 and reset to decent value */ 202 /* Store the PPR in r11 and reset to decent value */
199 std r11, GPR11(r1) /* Temporary stash */ 203 std r11, GPR11(r1) /* Temporary stash */
204
205 /* Reset MSR RI so we can take SLB faults again */
206 li r11, MSR_RI
207 mtmsrd r11, 1
208
200 mfspr r11, SPRN_PPR 209 mfspr r11, SPRN_PPR
201 HMT_MEDIUM 210 HMT_MEDIUM
202 211
@@ -397,11 +406,6 @@ restore_gprs:
397 ld r5, THREAD_TM_DSCR(r3) 406 ld r5, THREAD_TM_DSCR(r3)
398 ld r6, THREAD_TM_PPR(r3) 407 ld r6, THREAD_TM_PPR(r3)
399 408
400 /* Clear the MSR RI since we are about to change R1. EE is already off
401 */
402 li r4, 0
403 mtmsrd r4, 1
404
405 REST_GPR(0, r7) /* GPR0 */ 409 REST_GPR(0, r7) /* GPR0 */
406 REST_2GPRS(2, r7) /* GPR2-3 */ 410 REST_2GPRS(2, r7) /* GPR2-3 */
407 REST_GPR(4, r7) /* GPR4 */ 411 REST_GPR(4, r7) /* GPR4 */
@@ -439,10 +443,33 @@ restore_gprs:
439 ld r6, _CCR(r7) 443 ld r6, _CCR(r7)
440 mtcr r6 444 mtcr r6
441 445
442 REST_GPR(1, r7) /* GPR1 */
443 REST_GPR(5, r7) /* GPR5-7 */
444 REST_GPR(6, r7) 446 REST_GPR(6, r7)
445 ld r7, GPR7(r7) 447
448 /*
449 * Store r1 and r5 on the stack so that we can access them
450 * after we clear MSR RI.
451 */
452
453 REST_GPR(5, r7)
454 std r5, -8(r1)
455 ld r5, GPR1(r7)
456 std r5, -16(r1)
457
458 REST_GPR(7, r7)
459
460 /* Clear MSR RI since we are about to change r1. EE is already off */
461 li r5, 0
462 mtmsrd r5, 1
463
464 /*
465 * BE CAREFUL HERE:
466 * At this point we can't take an SLB miss since we have MSR_RI
467 * off. Load only to/from the stack/paca which are in SLB bolted regions
468 * until we turn MSR RI back on.
469 */
470
471 ld r5, -8(r1)
472 ld r1, -16(r1)
446 473
447 /* Commit register state as checkpointed state: */ 474 /* Commit register state as checkpointed state: */
448 TRECHKPT 475 TRECHKPT
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index d873f6507f72..f8a871a72985 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -316,8 +316,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
316 DBG_LOW(" -> hit\n"); 316 DBG_LOW(" -> hit\n");
317 /* Update the HPTE */ 317 /* Update the HPTE */
318 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & 318 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
319 ~(HPTE_R_PP | HPTE_R_N)) | 319 ~(HPTE_R_PPP | HPTE_R_N)) |
320 (newpp & (HPTE_R_PP | HPTE_R_N | 320 (newpp & (HPTE_R_PPP | HPTE_R_N |
321 HPTE_R_C))); 321 HPTE_R_C)));
322 } 322 }
323 native_unlock_hpte(hptep); 323 native_unlock_hpte(hptep);
@@ -385,8 +385,8 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
385 385
386 /* Update the HPTE */ 386 /* Update the HPTE */
387 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & 387 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
388 ~(HPTE_R_PP | HPTE_R_N)) | 388 ~(HPTE_R_PPP | HPTE_R_N)) |
389 (newpp & (HPTE_R_PP | HPTE_R_N))); 389 (newpp & (HPTE_R_PPP | HPTE_R_N)));
390 /* 390 /*
391 * Ensure it is out of the tlb too. Bolted entries base and 391 * Ensure it is out of the tlb too. Bolted entries base and
392 * actual page size will be same. 392 * actual page size will be same.
@@ -550,7 +550,11 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
550 } 550 }
551 } 551 }
552 /* This works for all page sizes, and for 256M and 1T segments */ 552 /* This works for all page sizes, and for 256M and 1T segments */
553 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT; 553 if (cpu_has_feature(CPU_FTR_ARCH_300))
554 *ssize = hpte_r >> HPTE_R_3_0_SSIZE_SHIFT;
555 else
556 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
557
554 shift = mmu_psize_defs[size].shift; 558 shift = mmu_psize_defs[size].shift;
555 559
556 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm); 560 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index b2740c67e172..2971ea18c768 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -201,9 +201,8 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
201 /* 201 /*
202 * We can't allow hardware to update hpte bits. Hence always 202 * We can't allow hardware to update hpte bits. Hence always
203 * set 'R' bit and set 'C' if it is a write fault 203 * set 'R' bit and set 'C' if it is a write fault
204 * Memory coherence is always enabled
205 */ 204 */
206 rflags |= HPTE_R_R | HPTE_R_M; 205 rflags |= HPTE_R_R;
207 206
208 if (pteflags & _PAGE_DIRTY) 207 if (pteflags & _PAGE_DIRTY)
209 rflags |= HPTE_R_C; 208 rflags |= HPTE_R_C;
@@ -213,10 +212,15 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
213 212
214 if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT) 213 if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT)
215 rflags |= HPTE_R_I; 214 rflags |= HPTE_R_I;
216 if ((pteflags & _PAGE_CACHE_CTL ) == _PAGE_NON_IDEMPOTENT) 215 else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT)
217 rflags |= (HPTE_R_I | HPTE_R_G); 216 rflags |= (HPTE_R_I | HPTE_R_G);
218 if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO) 217 else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
219 rflags |= (HPTE_R_I | HPTE_R_W); 218 rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M);
219 else
220 /*
221 * Add memory coherence if cache inhibited is not set
222 */
223 rflags |= HPTE_R_M;
220 224
221 return rflags; 225 return rflags;
222} 226}
@@ -918,6 +922,10 @@ void __init hash__early_init_mmu(void)
918 vmemmap = (struct page *)H_VMEMMAP_BASE; 922 vmemmap = (struct page *)H_VMEMMAP_BASE;
919 ioremap_bot = IOREMAP_BASE; 923 ioremap_bot = IOREMAP_BASE;
920 924
925#ifdef CONFIG_PCI
926 pci_io_base = ISA_IO_BASE;
927#endif
928
921 /* Initialize the MMU Hash table and create the linear mapping 929 /* Initialize the MMU Hash table and create the linear mapping
922 * of memory. Has to be done before SLB initialization as this is 930 * of memory. Has to be done before SLB initialization as this is
923 * currently where the page size encoding is obtained. 931 * currently where the page size encoding is obtained.
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 5aac1a3f86cd..119d18611500 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -73,7 +73,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
73 cachep = PGT_CACHE(pdshift - pshift); 73 cachep = PGT_CACHE(pdshift - pshift);
74#endif 74#endif
75 75
76 new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT); 76 new = kmem_cache_zalloc(cachep, GFP_KERNEL);
77 77
78 BUG_ON(pshift > HUGEPD_SHIFT_MASK); 78 BUG_ON(pshift > HUGEPD_SHIFT_MASK);
79 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK); 79 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index 227b2a6c4544..196222227e82 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -65,7 +65,7 @@ static int radix__init_new_context(struct mm_struct *mm, int index)
65 /* 65 /*
66 * set the process table entry, 66 * set the process table entry,
67 */ 67 */
68 rts_field = 3ull << PPC_BITLSHIFT(2); 68 rts_field = radix__get_tree_size();
69 process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE); 69 process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
70 return 0; 70 return 0;
71} 71}
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index c939e6e57a9e..7931e1496f0d 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -160,9 +160,8 @@ redo:
160 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT); 160 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
161 /* 161 /*
162 * Fill in the process table. 162 * Fill in the process table.
163 * we support 52 bits, hence 52-28 = 24, 11000
164 */ 163 */
165 rts_field = 3ull << PPC_BITLSHIFT(2); 164 rts_field = radix__get_tree_size();
166 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE); 165 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
167 /* 166 /*
168 * Fill in the partition table. We are suppose to use effective address 167 * Fill in the partition table. We are suppose to use effective address
@@ -176,10 +175,8 @@ redo:
176static void __init radix_init_partition_table(void) 175static void __init radix_init_partition_table(void)
177{ 176{
178 unsigned long rts_field; 177 unsigned long rts_field;
179 /* 178
180 * we support 52 bits, hence 52-28 = 24, 11000 179 rts_field = radix__get_tree_size();
181 */
182 rts_field = 3ull << PPC_BITLSHIFT(2);
183 180
184 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large."); 181 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large.");
185 partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT); 182 partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT);
@@ -331,6 +328,11 @@ void __init radix__early_init_mmu(void)
331 __vmalloc_end = RADIX_VMALLOC_END; 328 __vmalloc_end = RADIX_VMALLOC_END;
332 vmemmap = (struct page *)RADIX_VMEMMAP_BASE; 329 vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
333 ioremap_bot = IOREMAP_BASE; 330 ioremap_bot = IOREMAP_BASE;
331
332#ifdef CONFIG_PCI
333 pci_io_base = ISA_IO_BASE;
334#endif
335
334 /* 336 /*
335 * For now radix also use the same frag size 337 * For now radix also use the same frag size
336 */ 338 */
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index bf7bf32b54f8..7f922f557936 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -84,7 +84,7 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long add
84 pte_t *pte; 84 pte_t *pte;
85 85
86 if (slab_is_available()) { 86 if (slab_is_available()) {
87 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 87 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
88 } else { 88 } else {
89 pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); 89 pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
90 if (pte) 90 if (pte)
@@ -97,7 +97,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
97{ 97{
98 struct page *ptepage; 98 struct page *ptepage;
99 99
100 gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO; 100 gfp_t flags = GFP_KERNEL | __GFP_ZERO;
101 101
102 ptepage = alloc_pages(flags, 0); 102 ptepage = alloc_pages(flags, 0);
103 if (!ptepage) 103 if (!ptepage)
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index e009e0604a8a..f5e8d4edb808 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -350,8 +350,7 @@ static pte_t *get_from_cache(struct mm_struct *mm)
350static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel) 350static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
351{ 351{
352 void *ret = NULL; 352 void *ret = NULL;
353 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | 353 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
354 __GFP_REPEAT | __GFP_ZERO);
355 if (!page) 354 if (!page)
356 return NULL; 355 return NULL;
357 if (!kernel && !pgtable_page_ctor(page)) { 356 if (!kernel && !pgtable_page_ctor(page)) {
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 0fdaf93a3e09..ab2f60e812e2 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -18,16 +18,20 @@
18 18
19static DEFINE_RAW_SPINLOCK(native_tlbie_lock); 19static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
20 20
21static inline void __tlbiel_pid(unsigned long pid, int set) 21#define RIC_FLUSH_TLB 0
22#define RIC_FLUSH_PWC 1
23#define RIC_FLUSH_ALL 2
24
25static inline void __tlbiel_pid(unsigned long pid, int set,
26 unsigned long ric)
22{ 27{
23 unsigned long rb,rs,ric,prs,r; 28 unsigned long rb,rs,prs,r;
24 29
25 rb = PPC_BIT(53); /* IS = 1 */ 30 rb = PPC_BIT(53); /* IS = 1 */
26 rb |= set << PPC_BITLSHIFT(51); 31 rb |= set << PPC_BITLSHIFT(51);
27 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31); 32 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
28 prs = 1; /* process scoped */ 33 prs = 1; /* process scoped */
29 r = 1; /* raidx format */ 34 r = 1; /* raidx format */
30 ric = 2; /* invalidate all the caches */
31 35
32 asm volatile("ptesync": : :"memory"); 36 asm volatile("ptesync": : :"memory");
33 asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |" 37 asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
@@ -39,25 +43,24 @@ static inline void __tlbiel_pid(unsigned long pid, int set)
39/* 43/*
40 * We use 128 set in radix mode and 256 set in hpt mode. 44 * We use 128 set in radix mode and 256 set in hpt mode.
41 */ 45 */
42static inline void _tlbiel_pid(unsigned long pid) 46static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
43{ 47{
44 int set; 48 int set;
45 49
46 for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { 50 for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
47 __tlbiel_pid(pid, set); 51 __tlbiel_pid(pid, set, ric);
48 } 52 }
49 return; 53 return;
50} 54}
51 55
52static inline void _tlbie_pid(unsigned long pid) 56static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
53{ 57{
54 unsigned long rb,rs,ric,prs,r; 58 unsigned long rb,rs,prs,r;
55 59
56 rb = PPC_BIT(53); /* IS = 1 */ 60 rb = PPC_BIT(53); /* IS = 1 */
57 rs = pid << PPC_BITLSHIFT(31); 61 rs = pid << PPC_BITLSHIFT(31);
58 prs = 1; /* process scoped */ 62 prs = 1; /* process scoped */
59 r = 1; /* raidx format */ 63 r = 1; /* raidx format */
60 ric = 2; /* invalidate all the caches */
61 64
62 asm volatile("ptesync": : :"memory"); 65 asm volatile("ptesync": : :"memory");
63 asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |" 66 asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
@@ -67,16 +70,15 @@ static inline void _tlbie_pid(unsigned long pid)
67} 70}
68 71
69static inline void _tlbiel_va(unsigned long va, unsigned long pid, 72static inline void _tlbiel_va(unsigned long va, unsigned long pid,
70 unsigned long ap) 73 unsigned long ap, unsigned long ric)
71{ 74{
72 unsigned long rb,rs,ric,prs,r; 75 unsigned long rb,rs,prs,r;
73 76
74 rb = va & ~(PPC_BITMASK(52, 63)); 77 rb = va & ~(PPC_BITMASK(52, 63));
75 rb |= ap << PPC_BITLSHIFT(58); 78 rb |= ap << PPC_BITLSHIFT(58);
76 rs = pid << PPC_BITLSHIFT(31); 79 rs = pid << PPC_BITLSHIFT(31);
77 prs = 1; /* process scoped */ 80 prs = 1; /* process scoped */
78 r = 1; /* raidx format */ 81 r = 1; /* raidx format */
79 ric = 0; /* no cluster flush yet */
80 82
81 asm volatile("ptesync": : :"memory"); 83 asm volatile("ptesync": : :"memory");
82 asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |" 84 asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
@@ -86,16 +88,15 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
86} 88}
87 89
88static inline void _tlbie_va(unsigned long va, unsigned long pid, 90static inline void _tlbie_va(unsigned long va, unsigned long pid,
89 unsigned long ap) 91 unsigned long ap, unsigned long ric)
90{ 92{
91 unsigned long rb,rs,ric,prs,r; 93 unsigned long rb,rs,prs,r;
92 94
93 rb = va & ~(PPC_BITMASK(52, 63)); 95 rb = va & ~(PPC_BITMASK(52, 63));
94 rb |= ap << PPC_BITLSHIFT(58); 96 rb |= ap << PPC_BITLSHIFT(58);
95 rs = pid << PPC_BITLSHIFT(31); 97 rs = pid << PPC_BITLSHIFT(31);
96 prs = 1; /* process scoped */ 98 prs = 1; /* process scoped */
97 r = 1; /* raidx format */ 99 r = 1; /* raidx format */
98 ric = 0; /* no cluster flush yet */
99 100
100 asm volatile("ptesync": : :"memory"); 101 asm volatile("ptesync": : :"memory");
101 asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |" 102 asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
@@ -117,25 +118,40 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid,
117 */ 118 */
118void radix__local_flush_tlb_mm(struct mm_struct *mm) 119void radix__local_flush_tlb_mm(struct mm_struct *mm)
119{ 120{
120 unsigned int pid; 121 unsigned long pid;
121 122
122 preempt_disable(); 123 preempt_disable();
123 pid = mm->context.id; 124 pid = mm->context.id;
124 if (pid != MMU_NO_CONTEXT) 125 if (pid != MMU_NO_CONTEXT)
125 _tlbiel_pid(pid); 126 _tlbiel_pid(pid, RIC_FLUSH_ALL);
126 preempt_enable(); 127 preempt_enable();
127} 128}
128EXPORT_SYMBOL(radix__local_flush_tlb_mm); 129EXPORT_SYMBOL(radix__local_flush_tlb_mm);
129 130
131void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
132{
133 unsigned long pid;
134 struct mm_struct *mm = tlb->mm;
135
136 preempt_disable();
137
138 pid = mm->context.id;
139 if (pid != MMU_NO_CONTEXT)
140 _tlbiel_pid(pid, RIC_FLUSH_PWC);
141
142 preempt_enable();
143}
144EXPORT_SYMBOL(radix__local_flush_tlb_pwc);
145
130void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, 146void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
131 unsigned long ap, int nid) 147 unsigned long ap, int nid)
132{ 148{
133 unsigned int pid; 149 unsigned long pid;
134 150
135 preempt_disable(); 151 preempt_disable();
136 pid = mm ? mm->context.id : 0; 152 pid = mm ? mm->context.id : 0;
137 if (pid != MMU_NO_CONTEXT) 153 if (pid != MMU_NO_CONTEXT)
138 _tlbiel_va(vmaddr, pid, ap); 154 _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
139 preempt_enable(); 155 preempt_enable();
140} 156}
141 157
@@ -160,7 +176,7 @@ static int mm_is_core_local(struct mm_struct *mm)
160 176
161void radix__flush_tlb_mm(struct mm_struct *mm) 177void radix__flush_tlb_mm(struct mm_struct *mm)
162{ 178{
163 unsigned int pid; 179 unsigned long pid;
164 180
165 preempt_disable(); 181 preempt_disable();
166 pid = mm->context.id; 182 pid = mm->context.id;
@@ -172,20 +188,46 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
172 188
173 if (lock_tlbie) 189 if (lock_tlbie)
174 raw_spin_lock(&native_tlbie_lock); 190 raw_spin_lock(&native_tlbie_lock);
175 _tlbie_pid(pid); 191 _tlbie_pid(pid, RIC_FLUSH_ALL);
176 if (lock_tlbie) 192 if (lock_tlbie)
177 raw_spin_unlock(&native_tlbie_lock); 193 raw_spin_unlock(&native_tlbie_lock);
178 } else 194 } else
179 _tlbiel_pid(pid); 195 _tlbiel_pid(pid, RIC_FLUSH_ALL);
180no_context: 196no_context:
181 preempt_enable(); 197 preempt_enable();
182} 198}
183EXPORT_SYMBOL(radix__flush_tlb_mm); 199EXPORT_SYMBOL(radix__flush_tlb_mm);
184 200
201void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
202{
203 unsigned long pid;
204 struct mm_struct *mm = tlb->mm;
205
206 preempt_disable();
207
208 pid = mm->context.id;
209 if (unlikely(pid == MMU_NO_CONTEXT))
210 goto no_context;
211
212 if (!mm_is_core_local(mm)) {
213 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
214
215 if (lock_tlbie)
216 raw_spin_lock(&native_tlbie_lock);
217 _tlbie_pid(pid, RIC_FLUSH_PWC);
218 if (lock_tlbie)
219 raw_spin_unlock(&native_tlbie_lock);
220 } else
221 _tlbiel_pid(pid, RIC_FLUSH_PWC);
222no_context:
223 preempt_enable();
224}
225EXPORT_SYMBOL(radix__flush_tlb_pwc);
226
185void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, 227void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
186 unsigned long ap, int nid) 228 unsigned long ap, int nid)
187{ 229{
188 unsigned int pid; 230 unsigned long pid;
189 231
190 preempt_disable(); 232 preempt_disable();
191 pid = mm ? mm->context.id : 0; 233 pid = mm ? mm->context.id : 0;
@@ -196,11 +238,11 @@ void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
196 238
197 if (lock_tlbie) 239 if (lock_tlbie)
198 raw_spin_lock(&native_tlbie_lock); 240 raw_spin_lock(&native_tlbie_lock);
199 _tlbie_va(vmaddr, pid, ap); 241 _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
200 if (lock_tlbie) 242 if (lock_tlbie)
201 raw_spin_unlock(&native_tlbie_lock); 243 raw_spin_unlock(&native_tlbie_lock);
202 } else 244 } else
203 _tlbiel_va(vmaddr, pid, ap); 245 _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
204bail: 246bail:
205 preempt_enable(); 247 preempt_enable();
206} 248}
@@ -224,7 +266,7 @@ void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
224 266
225 if (lock_tlbie) 267 if (lock_tlbie)
226 raw_spin_lock(&native_tlbie_lock); 268 raw_spin_lock(&native_tlbie_lock);
227 _tlbie_pid(0); 269 _tlbie_pid(0, RIC_FLUSH_ALL);
228 if (lock_tlbie) 270 if (lock_tlbie)
229 raw_spin_unlock(&native_tlbie_lock); 271 raw_spin_unlock(&native_tlbie_lock);
230} 272}
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index b7dfc1359d01..3e8865b187de 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -927,7 +927,7 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
927 dn = pci_device_to_OF_node(dev); 927 dn = pci_device_to_OF_node(dev);
928 pdn = PCI_DN(dn); 928 pdn = PCI_DN(dn);
929 buid = pdn->phb->buid; 929 buid = pdn->phb->buid;
930 cfg_addr = (pdn->busno << 8) | pdn->devfn; 930 cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
931 931
932 ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query, 932 ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
933 cfg_addr, BUID_HI(buid), BUID_LO(buid)); 933 cfg_addr, BUID_HI(buid), BUID_LO(buid));
@@ -956,7 +956,7 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
956 dn = pci_device_to_OF_node(dev); 956 dn = pci_device_to_OF_node(dev);
957 pdn = PCI_DN(dn); 957 pdn = PCI_DN(dn);
958 buid = pdn->phb->buid; 958 buid = pdn->phb->buid;
959 cfg_addr = (pdn->busno << 8) | pdn->devfn; 959 cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
960 960
961 do { 961 do {
962 /* extra outputs are LIOBN and dma-addr (hi, lo) */ 962 /* extra outputs are LIOBN and dma-addr (hi, lo) */
diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h
index 5e04f3cbd320..8ae236b0f80b 100644
--- a/arch/s390/include/asm/fpu/api.h
+++ b/arch/s390/include/asm/fpu/api.h
@@ -22,7 +22,7 @@ static inline int test_fp_ctl(u32 fpc)
22 " la %0,0\n" 22 " la %0,0\n"
23 "1:\n" 23 "1:\n"
24 EX_TABLE(0b,1b) 24 EX_TABLE(0b,1b)
25 : "=d" (rc), "=d" (orig_fpc) 25 : "=d" (rc), "=&d" (orig_fpc)
26 : "d" (fpc), "0" (-EINVAL)); 26 : "d" (fpc), "0" (-EINVAL));
27 return rc; 27 return rc;
28} 28}
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 37b9017c6a96..ac82e8eb936d 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -245,6 +245,7 @@ struct kvm_vcpu_stat {
245 u32 exit_stop_request; 245 u32 exit_stop_request;
246 u32 exit_validity; 246 u32 exit_validity;
247 u32 exit_instruction; 247 u32 exit_instruction;
248 u32 exit_pei;
248 u32 halt_successful_poll; 249 u32 halt_successful_poll;
249 u32 halt_attempted_poll; 250 u32 halt_attempted_poll;
250 u32 halt_poll_invalid; 251 u32 halt_poll_invalid;
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index f20abdb5630a..d14069d4b88d 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -2064,12 +2064,5 @@ void s390_reset_system(void)
2064 S390_lowcore.program_new_psw.addr = 2064 S390_lowcore.program_new_psw.addr =
2065 (unsigned long) s390_base_pgm_handler; 2065 (unsigned long) s390_base_pgm_handler;
2066 2066
2067 /*
2068 * Clear subchannel ID and number to signal new kernel that no CCW or
2069 * SCSI IPL has been done (for kexec and kdump)
2070 */
2071 S390_lowcore.subchannel_id = 0;
2072 S390_lowcore.subchannel_nr = 0;
2073
2074 do_reset_calls(); 2067 do_reset_calls();
2075} 2068}
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 59215c518f37..7ec63b1d920d 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -649,6 +649,8 @@ static int cpumf_pmu_commit_txn(struct pmu *pmu)
649 649
650/* Performance monitoring unit for s390x */ 650/* Performance monitoring unit for s390x */
651static struct pmu cpumf_pmu = { 651static struct pmu cpumf_pmu = {
652 .task_ctx_nr = perf_sw_context,
653 .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
652 .pmu_enable = cpumf_pmu_enable, 654 .pmu_enable = cpumf_pmu_enable,
653 .pmu_disable = cpumf_pmu_disable, 655 .pmu_disable = cpumf_pmu_disable,
654 .event_init = cpumf_pmu_event_init, 656 .event_init = cpumf_pmu_event_init,
@@ -708,12 +710,6 @@ static int __init cpumf_pmu_init(void)
708 goto out; 710 goto out;
709 } 711 }
710 712
711 /* The CPU measurement counter facility does not have overflow
712 * interrupts to do sampling. Sampling must be provided by
713 * external means, for example, by timers.
714 */
715 cpumf_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
716
717 cpumf_pmu.attr_groups = cpumf_cf_event_group(); 713 cpumf_pmu.attr_groups = cpumf_cf_event_group();
718 rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); 714 rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
719 if (rc) { 715 if (rc) {
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 2e6b54e4d3f9..252157181302 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -341,6 +341,8 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
341 341
342static int handle_partial_execution(struct kvm_vcpu *vcpu) 342static int handle_partial_execution(struct kvm_vcpu *vcpu)
343{ 343{
344 vcpu->stat.exit_pei++;
345
344 if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */ 346 if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */
345 return handle_mvpg_pei(vcpu); 347 return handle_mvpg_pei(vcpu);
346 if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */ 348 if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 6d8ec3ac9dd8..43f2a2b80490 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -61,6 +61,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
61 { "exit_external_request", VCPU_STAT(exit_external_request) }, 61 { "exit_external_request", VCPU_STAT(exit_external_request) },
62 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) }, 62 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
63 { "exit_instruction", VCPU_STAT(exit_instruction) }, 63 { "exit_instruction", VCPU_STAT(exit_instruction) },
64 { "exit_pei", VCPU_STAT(exit_pei) },
64 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, 65 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
65 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, 66 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
66 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, 67 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
@@ -657,7 +658,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
657 kvm->arch.model.cpuid = proc->cpuid; 658 kvm->arch.model.cpuid = proc->cpuid;
658 lowest_ibc = sclp.ibc >> 16 & 0xfff; 659 lowest_ibc = sclp.ibc >> 16 & 0xfff;
659 unblocked_ibc = sclp.ibc & 0xfff; 660 unblocked_ibc = sclp.ibc & 0xfff;
660 if (lowest_ibc) { 661 if (lowest_ibc && proc->ibc) {
661 if (proc->ibc > unblocked_ibc) 662 if (proc->ibc > unblocked_ibc)
662 kvm->arch.model.ibc = unblocked_ibc; 663 kvm->arch.model.ibc = unblocked_ibc;
663 else if (proc->ibc < lowest_ibc) 664 else if (proc->ibc < lowest_ibc)
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index e8b5962ac12a..e2565d2d0c32 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -169,7 +169,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
169 return table; 169 return table;
170 } 170 }
171 /* Allocate a fresh page */ 171 /* Allocate a fresh page */
172 page = alloc_page(GFP_KERNEL|__GFP_REPEAT); 172 page = alloc_page(GFP_KERNEL);
173 if (!page) 173 if (!page)
174 return NULL; 174 return NULL;
175 if (!pgtable_page_ctor(page)) { 175 if (!pgtable_page_ctor(page)) {
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 4324b87f9398..9f0ce0e6eeb4 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -437,7 +437,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
437 pgste = pgste_get_lock(ptep); 437 pgste = pgste_get_lock(ptep);
438 pgstev = pgste_val(pgste); 438 pgstev = pgste_val(pgste);
439 pte = *ptep; 439 pte = *ptep;
440 if (pte_swap(pte) && 440 if (!reset && pte_swap(pte) &&
441 ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED || 441 ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
442 (pgstev & _PGSTE_GPS_ZERO))) { 442 (pgstev & _PGSTE_GPS_ZERO))) {
443 ptep_zap_swap_entry(mm, pte_to_swp_entry(pte)); 443 ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
diff --git a/arch/score/include/asm/pgalloc.h b/arch/score/include/asm/pgalloc.h
index 2e067657db98..49b012d78c1a 100644
--- a/arch/score/include/asm/pgalloc.h
+++ b/arch/score/include/asm/pgalloc.h
@@ -42,8 +42,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
42{ 42{
43 pte_t *pte; 43 pte_t *pte;
44 44
45 pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 45 pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
46 PTE_ORDER);
47 46
48 return pte; 47 return pte;
49} 48}
@@ -53,7 +52,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
53{ 52{
54 struct page *pte; 53 struct page *pte;
55 54
56 pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); 55 pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
57 if (!pte) 56 if (!pte)
58 return NULL; 57 return NULL;
59 clear_highpage(pte); 58 clear_highpage(pte);
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index a33673b3687d..f3f42c84c40f 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -34,7 +34,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
34static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 34static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
35 unsigned long address) 35 unsigned long address)
36{ 36{
37 return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL); 37 return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
38} 38}
39 39
40static inline pgtable_t pte_alloc_one(struct mm_struct *mm, 40static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@@ -43,7 +43,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
43 struct page *page; 43 struct page *page;
44 void *pg; 44 void *pg;
45 45
46 pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL); 46 pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
47 if (!pg) 47 if (!pg)
48 return NULL; 48 return NULL;
49 page = virt_to_page(pg); 49 page = virt_to_page(pg);
diff --git a/arch/sh/mm/pgtable.c b/arch/sh/mm/pgtable.c
index 26e03a1f7ca4..a62bd8696779 100644
--- a/arch/sh/mm/pgtable.c
+++ b/arch/sh/mm/pgtable.c
@@ -1,7 +1,7 @@
1#include <linux/mm.h> 1#include <linux/mm.h>
2#include <linux/slab.h> 2#include <linux/slab.h>
3 3
4#define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO 4#define PGALLOC_GFP GFP_KERNEL | __GFP_ZERO
5 5
6static struct kmem_cache *pgd_cachep; 6static struct kmem_cache *pgd_cachep;
7#if PAGETABLE_LEVELS > 2 7#if PAGETABLE_LEVELS > 2
diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
index 5e3187185b4a..3529f1378cd8 100644
--- a/arch/sparc/include/asm/pgalloc_64.h
+++ b/arch/sparc/include/asm/pgalloc_64.h
@@ -41,8 +41,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
41 41
42static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 42static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
43{ 43{
44 return kmem_cache_alloc(pgtable_cache, 44 return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
45 GFP_KERNEL|__GFP_REPEAT);
46} 45}
47 46
48static inline void pud_free(struct mm_struct *mm, pud_t *pud) 47static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -52,8 +51,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
52 51
53static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 52static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
54{ 53{
55 return kmem_cache_alloc(pgtable_cache, 54 return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
56 GFP_KERNEL|__GFP_REPEAT);
57} 55}
58 56
59static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 57static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 14bb0d5ed3c6..aec508e37490 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2704,8 +2704,7 @@ void __flush_tlb_all(void)
2704pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 2704pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
2705 unsigned long address) 2705 unsigned long address)
2706{ 2706{
2707 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | 2707 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
2708 __GFP_REPEAT | __GFP_ZERO);
2709 pte_t *pte = NULL; 2708 pte_t *pte = NULL;
2710 2709
2711 if (page) 2710 if (page)
@@ -2717,8 +2716,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
2717pgtable_t pte_alloc_one(struct mm_struct *mm, 2716pgtable_t pte_alloc_one(struct mm_struct *mm,
2718 unsigned long address) 2717 unsigned long address)
2719{ 2718{
2720 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | 2719 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
2721 __GFP_REPEAT | __GFP_ZERO);
2722 if (!page) 2720 if (!page)
2723 return NULL; 2721 return NULL;
2724 if (!pgtable_page_ctor(page)) { 2722 if (!pgtable_page_ctor(page)) {
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index 4b7cef9e94e0..c1467ac59ce6 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -78,7 +78,7 @@ struct thread_info {
78 78
79#ifndef __ASSEMBLY__ 79#ifndef __ASSEMBLY__
80 80
81void arch_release_thread_info(struct thread_info *info); 81void arch_release_thread_stack(unsigned long *stack);
82 82
83/* How to get the thread information struct from C. */ 83/* How to get the thread information struct from C. */
84register unsigned long stack_pointer __asm__("sp"); 84register unsigned long stack_pointer __asm__("sp");
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 6b705ccc9cc1..a465d8372edd 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -73,8 +73,9 @@ void arch_cpu_idle(void)
73/* 73/*
74 * Release a thread_info structure 74 * Release a thread_info structure
75 */ 75 */
76void arch_release_thread_info(struct thread_info *info) 76void arch_release_thread_stack(unsigned long *stack)
77{ 77{
78 struct thread_info *info = (void *)stack;
78 struct single_step_state *step_state = info->step_state; 79 struct single_step_state *step_state = info->step_state;
79 80
80 if (step_state) { 81 if (step_state) {
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 7bf2491a9c1f..c4d5bf841a7f 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -231,7 +231,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
231struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address, 231struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
232 int order) 232 int order)
233{ 233{
234 gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO; 234 gfp_t flags = GFP_KERNEL|__GFP_ZERO;
235 struct page *p; 235 struct page *p;
236 int i; 236 int i;
237 237
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index b2a2dff50b4e..e7437ec62710 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -204,7 +204,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
204{ 204{
205 pte_t *pte; 205 pte_t *pte;
206 206
207 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 207 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
208 return pte; 208 return pte;
209} 209}
210 210
@@ -212,7 +212,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
212{ 212{
213 struct page *pte; 213 struct page *pte;
214 214
215 pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 215 pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
216 if (!pte) 216 if (!pte)
217 return NULL; 217 return NULL;
218 if (!pgtable_page_ctor(pte)) { 218 if (!pgtable_page_ctor(pte)) {
diff --git a/arch/unicore32/include/asm/pgalloc.h b/arch/unicore32/include/asm/pgalloc.h
index 2e02d1356fdf..26775793c204 100644
--- a/arch/unicore32/include/asm/pgalloc.h
+++ b/arch/unicore32/include/asm/pgalloc.h
@@ -28,7 +28,7 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
28#define pgd_alloc(mm) get_pgd_slow(mm) 28#define pgd_alloc(mm) get_pgd_slow(mm)
29#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd) 29#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
30 30
31#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) 31#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
32 32
33/* 33/*
34 * Allocate one PTE table. 34 * Allocate one PTE table.
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 0a7b885964ba..d9a94da0c29f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2439,6 +2439,15 @@ config PCI_CNB20LE_QUIRK
2439 2439
2440source "drivers/pci/Kconfig" 2440source "drivers/pci/Kconfig"
2441 2441
2442config ISA_BUS
2443 bool "ISA-style bus support on modern systems" if EXPERT
2444 select ISA_BUS_API
2445 help
2446 Enables ISA-style drivers on modern systems. This is necessary to
2447 support PC/104 devices on X86_64 platforms.
2448
2449 If unsure, say N.
2450
2442# x86_64 have no ISA slots, but can have ISA-style DMA. 2451# x86_64 have no ISA slots, but can have ISA-style DMA.
2443config ISA_DMA_API 2452config ISA_DMA_API
2444 bool "ISA-style DMA support" if (X86_64 && EXPERT) 2453 bool "ISA-style DMA support" if (X86_64 && EXPERT)
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 700a9c6e6159..be8e688fa0d4 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -162,6 +162,9 @@ isoimage: $(obj)/bzImage
162 for i in lib lib64 share end ; do \ 162 for i in lib lib64 share end ; do \
163 if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \ 163 if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
164 cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \ 164 cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
165 if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \
166 cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \
167 fi ; \
165 break ; \ 168 break ; \
166 fi ; \ 169 fi ; \
167 if [ $$i = end ] ; then exit 1 ; fi ; \ 170 if [ $$i = end ] ; then exit 1 ; fi ; \
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 99c4bab123cd..e30eef4f29a6 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -714,7 +714,7 @@ static void cleanup_rapl_pmus(void)
714 int i; 714 int i;
715 715
716 for (i = 0; i < rapl_pmus->maxpkg; i++) 716 for (i = 0; i < rapl_pmus->maxpkg; i++)
717 kfree(rapl_pmus->pmus + i); 717 kfree(rapl_pmus->pmus[i]);
718 kfree(rapl_pmus); 718 kfree(rapl_pmus);
719} 719}
720 720
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index b2625867ebd1..874e8bd64d1d 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -2868,27 +2868,10 @@ static struct intel_uncore_type bdx_uncore_cbox = {
2868 .format_group = &hswep_uncore_cbox_format_group, 2868 .format_group = &hswep_uncore_cbox_format_group,
2869}; 2869};
2870 2870
2871static struct intel_uncore_type bdx_uncore_sbox = {
2872 .name = "sbox",
2873 .num_counters = 4,
2874 .num_boxes = 4,
2875 .perf_ctr_bits = 48,
2876 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
2877 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
2878 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2879 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
2880 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
2881 .ops = &hswep_uncore_sbox_msr_ops,
2882 .format_group = &hswep_uncore_sbox_format_group,
2883};
2884
2885#define BDX_MSR_UNCORE_SBOX 3
2886
2887static struct intel_uncore_type *bdx_msr_uncores[] = { 2871static struct intel_uncore_type *bdx_msr_uncores[] = {
2888 &bdx_uncore_ubox, 2872 &bdx_uncore_ubox,
2889 &bdx_uncore_cbox, 2873 &bdx_uncore_cbox,
2890 &hswep_uncore_pcu, 2874 &hswep_uncore_pcu,
2891 &bdx_uncore_sbox,
2892 NULL, 2875 NULL,
2893}; 2876};
2894 2877
@@ -2897,10 +2880,6 @@ void bdx_uncore_cpu_init(void)
2897 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 2880 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2898 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 2881 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2899 uncore_msr_uncores = bdx_msr_uncores; 2882 uncore_msr_uncores = bdx_msr_uncores;
2900
2901 /* BDX-DE doesn't have SBOX */
2902 if (boot_cpu_data.x86_model == 86)
2903 uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
2904} 2883}
2905 2884
2906static struct intel_uncore_type bdx_uncore_ha = { 2885static struct intel_uncore_type bdx_uncore_ha = {
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
new file mode 100644
index 000000000000..6999f7d01a0d
--- /dev/null
+++ b/arch/x86/include/asm/intel-family.h
@@ -0,0 +1,68 @@
1#ifndef _ASM_X86_INTEL_FAMILY_H
2#define _ASM_X86_INTEL_FAMILY_H
3
4/*
5 * "Big Core" Processors (Branded as Core, Xeon, etc...)
6 *
7 * The "_X" parts are generally the EP and EX Xeons, or the
8 * "Extreme" ones, like Broadwell-E.
9 *
10 * Things ending in "2" are usually because we have no better
11 * name for them. There's no processor called "WESTMERE2".
12 */
13
14#define INTEL_FAM6_CORE_YONAH 0x0E
15#define INTEL_FAM6_CORE2_MEROM 0x0F
16#define INTEL_FAM6_CORE2_MEROM_L 0x16
17#define INTEL_FAM6_CORE2_PENRYN 0x17
18#define INTEL_FAM6_CORE2_DUNNINGTON 0x1D
19
20#define INTEL_FAM6_NEHALEM 0x1E
21#define INTEL_FAM6_NEHALEM_EP 0x1A
22#define INTEL_FAM6_NEHALEM_EX 0x2E
23#define INTEL_FAM6_WESTMERE 0x25
24#define INTEL_FAM6_WESTMERE2 0x1F
25#define INTEL_FAM6_WESTMERE_EP 0x2C
26#define INTEL_FAM6_WESTMERE_EX 0x2F
27
28#define INTEL_FAM6_SANDYBRIDGE 0x2A
29#define INTEL_FAM6_SANDYBRIDGE_X 0x2D
30#define INTEL_FAM6_IVYBRIDGE 0x3A
31#define INTEL_FAM6_IVYBRIDGE_X 0x3E
32
33#define INTEL_FAM6_HASWELL_CORE 0x3C
34#define INTEL_FAM6_HASWELL_X 0x3F
35#define INTEL_FAM6_HASWELL_ULT 0x45
36#define INTEL_FAM6_HASWELL_GT3E 0x46
37
38#define INTEL_FAM6_BROADWELL_CORE 0x3D
39#define INTEL_FAM6_BROADWELL_XEON_D 0x56
40#define INTEL_FAM6_BROADWELL_GT3E 0x47
41#define INTEL_FAM6_BROADWELL_X 0x4F
42
43#define INTEL_FAM6_SKYLAKE_MOBILE 0x4E
44#define INTEL_FAM6_SKYLAKE_DESKTOP 0x5E
45#define INTEL_FAM6_SKYLAKE_X 0x55
46#define INTEL_FAM6_KABYLAKE_MOBILE 0x8E
47#define INTEL_FAM6_KABYLAKE_DESKTOP 0x9E
48
49/* "Small Core" Processors (Atom) */
50
51#define INTEL_FAM6_ATOM_PINEVIEW 0x1C
52#define INTEL_FAM6_ATOM_LINCROFT 0x26
53#define INTEL_FAM6_ATOM_PENWELL 0x27
54#define INTEL_FAM6_ATOM_CLOVERVIEW 0x35
55#define INTEL_FAM6_ATOM_CEDARVIEW 0x36
56#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */
57#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
58#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
59#define INTEL_FAM6_ATOM_MERRIFIELD1 0x4A /* Tangier */
60#define INTEL_FAM6_ATOM_MERRIFIELD2 0x5A /* Annidale */
61#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
62#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
63
64/* Xeon Phi */
65
66#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
67
68#endif /* _ASM_X86_INTEL_FAMILY_H */
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 4421b5da409d..d1d1e5094c28 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -38,12 +38,11 @@ typedef u8 kprobe_opcode_t;
38#define RELATIVECALL_OPCODE 0xe8 38#define RELATIVECALL_OPCODE 0xe8
39#define RELATIVE_ADDR_SIZE 4 39#define RELATIVE_ADDR_SIZE 4
40#define MAX_STACK_SIZE 64 40#define MAX_STACK_SIZE 64
41#define MIN_STACK_SIZE(ADDR) \ 41#define CUR_STACK_SIZE(ADDR) \
42 (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \ 42 (current_top_of_stack() - (unsigned long)(ADDR))
43 THREAD_SIZE - (unsigned long)(ADDR))) \ 43#define MIN_STACK_SIZE(ADDR) \
44 ? (MAX_STACK_SIZE) \ 44 (MAX_STACK_SIZE < CUR_STACK_SIZE(ADDR) ? \
45 : (((unsigned long)current_thread_info()) + \ 45 MAX_STACK_SIZE : CUR_STACK_SIZE(ADDR))
46 THREAD_SIZE - (unsigned long)(ADDR)))
47 46
48#define flush_insn_slot(p) do { } while (0) 47#define flush_insn_slot(p) do { } while (0)
49 48
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index e0fbe7e70dc1..69e62862b622 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -27,6 +27,7 @@
27#include <linux/irqbypass.h> 27#include <linux/irqbypass.h>
28#include <linux/hyperv.h> 28#include <linux/hyperv.h>
29 29
30#include <asm/apic.h>
30#include <asm/pvclock-abi.h> 31#include <asm/pvclock-abi.h>
31#include <asm/desc.h> 32#include <asm/desc.h>
32#include <asm/mtrr.h> 33#include <asm/mtrr.h>
@@ -1368,4 +1369,14 @@ static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
1368 1369
1369static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} 1370static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
1370 1371
1372static inline int kvm_cpu_get_apicid(int mps_cpu)
1373{
1374#ifdef CONFIG_X86_LOCAL_APIC
1375 return __default_cpu_present_to_apicid(mps_cpu);
1376#else
1377 WARN_ON_ONCE(1);
1378 return BAD_APICID;
1379#endif
1380}
1381
1371#endif /* _ASM_X86_KVM_HOST_H */ 1382#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 7dc1d8fef7fd..b5fee97813cd 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -122,7 +122,7 @@ notrace static inline void native_write_msr(unsigned int msr,
122 "2:\n" 122 "2:\n"
123 _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe) 123 _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
124 : : "c" (msr), "a"(low), "d" (high) : "memory"); 124 : : "c" (msr), "a"(low), "d" (high) : "memory");
125 if (msr_tracepoint_active(__tracepoint_read_msr)) 125 if (msr_tracepoint_active(__tracepoint_write_msr))
126 do_trace_write_msr(msr, ((u64)high << 32 | low), 0); 126 do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
127} 127}
128 128
@@ -141,7 +141,7 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
141 : "c" (msr), "0" (low), "d" (high), 141 : "c" (msr), "0" (low), "d" (high),
142 [fault] "i" (-EIO) 142 [fault] "i" (-EIO)
143 : "memory"); 143 : "memory");
144 if (msr_tracepoint_active(__tracepoint_read_msr)) 144 if (msr_tracepoint_active(__tracepoint_write_msr))
145 do_trace_write_msr(msr, ((u64)high << 32 | low), err); 145 do_trace_write_msr(msr, ((u64)high << 32 | low), err);
146 return err; 146 return err;
147} 147}
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index bf7f8b55b0f9..574c23cf761a 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -81,7 +81,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
81static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 81static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
82{ 82{
83 struct page *page; 83 struct page *page;
84 page = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0); 84 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
85 if (!page) 85 if (!page)
86 return NULL; 86 return NULL;
87 if (!pgtable_pmd_page_ctor(page)) { 87 if (!pgtable_pmd_page_ctor(page)) {
@@ -125,7 +125,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
125 125
126static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 126static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
127{ 127{
128 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); 128 return (pud_t *)get_zeroed_page(GFP_KERNEL);
129} 129}
130 130
131static inline void pud_free(struct mm_struct *mm, pud_t *pud) 131static inline void pud_free(struct mm_struct *mm, pud_t *pud)
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index fdcc04020636..7c1c89598688 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -69,29 +69,22 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
69} 69}
70 70
71static __always_inline 71static __always_inline
72u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
73{
74 u64 delta = rdtsc_ordered() - src->tsc_timestamp;
75 return pvclock_scale_delta(delta, src->tsc_to_system_mul,
76 src->tsc_shift);
77}
78
79static __always_inline
80unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, 72unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
81 cycle_t *cycles, u8 *flags) 73 cycle_t *cycles, u8 *flags)
82{ 74{
83 unsigned version; 75 unsigned version;
84 cycle_t ret, offset; 76 cycle_t offset;
85 u8 ret_flags; 77 u64 delta;
86 78
87 version = src->version; 79 version = src->version;
80 /* Make the latest version visible */
81 smp_rmb();
88 82
89 offset = pvclock_get_nsec_offset(src); 83 delta = rdtsc_ordered() - src->tsc_timestamp;
90 ret = src->system_time + offset; 84 offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
91 ret_flags = src->flags; 85 src->tsc_shift);
92 86 *cycles = src->system_time + offset;
93 *cycles = ret; 87 *flags = src->flags;
94 *flags = ret_flags;
95 return version; 88 return version;
96} 89}
97 90
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index 7c247e7404be..0944218af9e2 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -14,7 +14,7 @@ extern int kstack_depth_to_print;
14struct thread_info; 14struct thread_info;
15struct stacktrace_ops; 15struct stacktrace_ops;
16 16
17typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo, 17typedef unsigned long (*walk_stack_t)(struct task_struct *task,
18 unsigned long *stack, 18 unsigned long *stack,
19 unsigned long bp, 19 unsigned long bp,
20 const struct stacktrace_ops *ops, 20 const struct stacktrace_ops *ops,
@@ -23,13 +23,13 @@ typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
23 int *graph); 23 int *graph);
24 24
25extern unsigned long 25extern unsigned long
26print_context_stack(struct thread_info *tinfo, 26print_context_stack(struct task_struct *task,
27 unsigned long *stack, unsigned long bp, 27 unsigned long *stack, unsigned long bp,
28 const struct stacktrace_ops *ops, void *data, 28 const struct stacktrace_ops *ops, void *data,
29 unsigned long *end, int *graph); 29 unsigned long *end, int *graph);
30 30
31extern unsigned long 31extern unsigned long
32print_context_stack_bp(struct thread_info *tinfo, 32print_context_stack_bp(struct task_struct *task,
33 unsigned long *stack, unsigned long bp, 33 unsigned long *stack, unsigned long bp,
34 const struct stacktrace_ops *ops, void *data, 34 const struct stacktrace_ops *ops, void *data,
35 unsigned long *end, int *graph); 35 unsigned long *end, int *graph);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 84e33ff5a6d5..446702ed99dc 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2588,8 +2588,8 @@ static struct resource * __init ioapic_setup_resources(void)
2588 res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY; 2588 res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
2589 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); 2589 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
2590 mem += IOAPIC_RESOURCE_NAME_SIZE; 2590 mem += IOAPIC_RESOURCE_NAME_SIZE;
2591 ioapics[i].iomem_res = &res[num];
2591 num++; 2592 num++;
2592 ioapics[i].iomem_res = res;
2593 } 2593 }
2594 2594
2595 ioapic_resources = res; 2595 ioapic_resources = res;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index c343a54bed39..f5c69d8974e1 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -674,14 +674,14 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
674 u64 value; 674 u64 value;
675 675
676 /* re-enable TopologyExtensions if switched off by BIOS */ 676 /* re-enable TopologyExtensions if switched off by BIOS */
677 if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && 677 if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
678 !cpu_has(c, X86_FEATURE_TOPOEXT)) { 678 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
679 679
680 if (msr_set_bit(0xc0011005, 54) > 0) { 680 if (msr_set_bit(0xc0011005, 54) > 0) {
681 rdmsrl(0xc0011005, value); 681 rdmsrl(0xc0011005, value);
682 if (value & BIT_64(54)) { 682 if (value & BIT_64(54)) {
683 set_cpu_cap(c, X86_FEATURE_TOPOEXT); 683 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
684 pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n"); 684 pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
685 } 685 }
686 } 686 }
687 } 687 }
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 2bb25c3fe2e8..ef8017ca5ba9 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -42,16 +42,14 @@ void printk_address(unsigned long address)
42static void 42static void
43print_ftrace_graph_addr(unsigned long addr, void *data, 43print_ftrace_graph_addr(unsigned long addr, void *data,
44 const struct stacktrace_ops *ops, 44 const struct stacktrace_ops *ops,
45 struct thread_info *tinfo, int *graph) 45 struct task_struct *task, int *graph)
46{ 46{
47 struct task_struct *task;
48 unsigned long ret_addr; 47 unsigned long ret_addr;
49 int index; 48 int index;
50 49
51 if (addr != (unsigned long)return_to_handler) 50 if (addr != (unsigned long)return_to_handler)
52 return; 51 return;
53 52
54 task = tinfo->task;
55 index = task->curr_ret_stack; 53 index = task->curr_ret_stack;
56 54
57 if (!task->ret_stack || index < *graph) 55 if (!task->ret_stack || index < *graph)
@@ -68,7 +66,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
68static inline void 66static inline void
69print_ftrace_graph_addr(unsigned long addr, void *data, 67print_ftrace_graph_addr(unsigned long addr, void *data,
70 const struct stacktrace_ops *ops, 68 const struct stacktrace_ops *ops,
71 struct thread_info *tinfo, int *graph) 69 struct task_struct *task, int *graph)
72{ } 70{ }
73#endif 71#endif
74 72
@@ -79,10 +77,10 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
79 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack 77 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
80 */ 78 */
81 79
82static inline int valid_stack_ptr(struct thread_info *tinfo, 80static inline int valid_stack_ptr(struct task_struct *task,
83 void *p, unsigned int size, void *end) 81 void *p, unsigned int size, void *end)
84{ 82{
85 void *t = tinfo; 83 void *t = task_stack_page(task);
86 if (end) { 84 if (end) {
87 if (p < end && p >= (end-THREAD_SIZE)) 85 if (p < end && p >= (end-THREAD_SIZE))
88 return 1; 86 return 1;
@@ -93,14 +91,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
93} 91}
94 92
95unsigned long 93unsigned long
96print_context_stack(struct thread_info *tinfo, 94print_context_stack(struct task_struct *task,
97 unsigned long *stack, unsigned long bp, 95 unsigned long *stack, unsigned long bp,
98 const struct stacktrace_ops *ops, void *data, 96 const struct stacktrace_ops *ops, void *data,
99 unsigned long *end, int *graph) 97 unsigned long *end, int *graph)
100{ 98{
101 struct stack_frame *frame = (struct stack_frame *)bp; 99 struct stack_frame *frame = (struct stack_frame *)bp;
102 100
103 while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) { 101 while (valid_stack_ptr(task, stack, sizeof(*stack), end)) {
104 unsigned long addr; 102 unsigned long addr;
105 103
106 addr = *stack; 104 addr = *stack;
@@ -112,7 +110,7 @@ print_context_stack(struct thread_info *tinfo,
112 } else { 110 } else {
113 ops->address(data, addr, 0); 111 ops->address(data, addr, 0);
114 } 112 }
115 print_ftrace_graph_addr(addr, data, ops, tinfo, graph); 113 print_ftrace_graph_addr(addr, data, ops, task, graph);
116 } 114 }
117 stack++; 115 stack++;
118 } 116 }
@@ -121,7 +119,7 @@ print_context_stack(struct thread_info *tinfo,
121EXPORT_SYMBOL_GPL(print_context_stack); 119EXPORT_SYMBOL_GPL(print_context_stack);
122 120
123unsigned long 121unsigned long
124print_context_stack_bp(struct thread_info *tinfo, 122print_context_stack_bp(struct task_struct *task,
125 unsigned long *stack, unsigned long bp, 123 unsigned long *stack, unsigned long bp,
126 const struct stacktrace_ops *ops, void *data, 124 const struct stacktrace_ops *ops, void *data,
127 unsigned long *end, int *graph) 125 unsigned long *end, int *graph)
@@ -129,7 +127,7 @@ print_context_stack_bp(struct thread_info *tinfo,
129 struct stack_frame *frame = (struct stack_frame *)bp; 127 struct stack_frame *frame = (struct stack_frame *)bp;
130 unsigned long *ret_addr = &frame->return_address; 128 unsigned long *ret_addr = &frame->return_address;
131 129
132 while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) { 130 while (valid_stack_ptr(task, ret_addr, sizeof(*ret_addr), end)) {
133 unsigned long addr = *ret_addr; 131 unsigned long addr = *ret_addr;
134 132
135 if (!__kernel_text_address(addr)) 133 if (!__kernel_text_address(addr))
@@ -139,7 +137,7 @@ print_context_stack_bp(struct thread_info *tinfo,
139 break; 137 break;
140 frame = frame->next_frame; 138 frame = frame->next_frame;
141 ret_addr = &frame->return_address; 139 ret_addr = &frame->return_address;
142 print_ftrace_graph_addr(addr, data, ops, tinfo, graph); 140 print_ftrace_graph_addr(addr, data, ops, task, graph);
143 } 141 }
144 142
145 return (unsigned long)frame; 143 return (unsigned long)frame;
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 464ffd69b92e..fef917e79b9d 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -61,15 +61,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
61 bp = stack_frame(task, regs); 61 bp = stack_frame(task, regs);
62 62
63 for (;;) { 63 for (;;) {
64 struct thread_info *context;
65 void *end_stack; 64 void *end_stack;
66 65
67 end_stack = is_hardirq_stack(stack, cpu); 66 end_stack = is_hardirq_stack(stack, cpu);
68 if (!end_stack) 67 if (!end_stack)
69 end_stack = is_softirq_stack(stack, cpu); 68 end_stack = is_softirq_stack(stack, cpu);
70 69
71 context = task_thread_info(task); 70 bp = ops->walk_stack(task, stack, bp, ops, data,
72 bp = ops->walk_stack(context, stack, bp, ops, data,
73 end_stack, &graph); 71 end_stack, &graph);
74 72
75 /* Stop if not on irq stack */ 73 /* Stop if not on irq stack */
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 5f1c6266eb30..d558a8a49016 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -153,7 +153,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
153 const struct stacktrace_ops *ops, void *data) 153 const struct stacktrace_ops *ops, void *data)
154{ 154{
155 const unsigned cpu = get_cpu(); 155 const unsigned cpu = get_cpu();
156 struct thread_info *tinfo;
157 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu); 156 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
158 unsigned long dummy; 157 unsigned long dummy;
159 unsigned used = 0; 158 unsigned used = 0;
@@ -179,7 +178,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
179 * current stack address. If the stacks consist of nested 178 * current stack address. If the stacks consist of nested
180 * exceptions 179 * exceptions
181 */ 180 */
182 tinfo = task_thread_info(task);
183 while (!done) { 181 while (!done) {
184 unsigned long *stack_end; 182 unsigned long *stack_end;
185 enum stack_type stype; 183 enum stack_type stype;
@@ -202,7 +200,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
202 if (ops->stack(data, id) < 0) 200 if (ops->stack(data, id) < 0)
203 break; 201 break;
204 202
205 bp = ops->walk_stack(tinfo, stack, bp, ops, 203 bp = ops->walk_stack(task, stack, bp, ops,
206 data, stack_end, &graph); 204 data, stack_end, &graph);
207 ops->stack(data, "<EOE>"); 205 ops->stack(data, "<EOE>");
208 /* 206 /*
@@ -218,7 +216,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
218 216
219 if (ops->stack(data, "IRQ") < 0) 217 if (ops->stack(data, "IRQ") < 0)
220 break; 218 break;
221 bp = ops->walk_stack(tinfo, stack, bp, 219 bp = ops->walk_stack(task, stack, bp,
222 ops, data, stack_end, &graph); 220 ops, data, stack_end, &graph);
223 /* 221 /*
224 * We link to the next stack (which would be 222 * We link to the next stack (which would be
@@ -240,7 +238,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
240 /* 238 /*
241 * This handles the process stack: 239 * This handles the process stack:
242 */ 240 */
243 bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph); 241 bp = ops->walk_stack(task, stack, bp, ops, data, NULL, &graph);
244 put_cpu(); 242 put_cpu();
245} 243}
246EXPORT_SYMBOL(dump_trace); 244EXPORT_SYMBOL(dump_trace);
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index 4d38416e2a7f..04f89caef9c4 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -57,7 +57,7 @@
57# error "Need more than one PGD for the ESPFIX hack" 57# error "Need more than one PGD for the ESPFIX hack"
58#endif 58#endif
59 59
60#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) 60#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
61 61
62/* This contains the *bottom* address of the espfix stack */ 62/* This contains the *bottom* address of the espfix stack */
63DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack); 63DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 38da8f29a9c8..c627bf8d98ad 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -130,11 +130,9 @@ void irq_ctx_init(int cpu)
130 130
131void do_softirq_own_stack(void) 131void do_softirq_own_stack(void)
132{ 132{
133 struct thread_info *curstk;
134 struct irq_stack *irqstk; 133 struct irq_stack *irqstk;
135 u32 *isp, *prev_esp; 134 u32 *isp, *prev_esp;
136 135
137 curstk = current_stack();
138 irqstk = __this_cpu_read(softirq_stack); 136 irqstk = __this_cpu_read(softirq_stack);
139 137
140 /* build the stack frame on the softirq stack */ 138 /* build the stack frame on the softirq stack */
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 38cf7a741250..7847e5c0e0b5 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -961,7 +961,19 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
961 * normal page fault. 961 * normal page fault.
962 */ 962 */
963 regs->ip = (unsigned long)cur->addr; 963 regs->ip = (unsigned long)cur->addr;
964 /*
965 * Trap flag (TF) has been set here because this fault
966 * happened where the single stepping will be done.
967 * So clear it by resetting the current kprobe:
968 */
969 regs->flags &= ~X86_EFLAGS_TF;
970
971 /*
972 * If the TF flag was set before the kprobe hit,
973 * don't touch it:
974 */
964 regs->flags |= kcb->kprobe_old_flags; 975 regs->flags |= kcb->kprobe_old_flags;
976
965 if (kcb->kprobe_status == KPROBE_REENTER) 977 if (kcb->kprobe_status == KPROBE_REENTER)
966 restore_previous_kprobe(kcb); 978 restore_previous_kprobe(kcb);
967 else 979 else
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 99bfc025111d..06c58ce46762 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -61,11 +61,16 @@ void pvclock_resume(void)
61u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src) 61u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
62{ 62{
63 unsigned version; 63 unsigned version;
64 cycle_t ret;
65 u8 flags; 64 u8 flags;
66 65
67 do { 66 do {
68 version = __pvclock_read_cycles(src, &ret, &flags); 67 version = src->version;
68 /* Make the latest version visible */
69 smp_rmb();
70
71 flags = src->flags;
72 /* Make sure that the version double-check is last. */
73 smp_rmb();
69 } while ((src->version & 1) || version != src->version); 74 } while ((src->version & 1) || version != src->version);
70 75
71 return flags & valid_flags; 76 return flags & valid_flags;
@@ -80,6 +85,8 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
80 85
81 do { 86 do {
82 version = __pvclock_read_cycles(src, &ret, &flags); 87 version = __pvclock_read_cycles(src, &ret, &flags);
88 /* Make sure that the version double-check is last. */
89 smp_rmb();
83 } while ((src->version & 1) || version != src->version); 90 } while ((src->version & 1) || version != src->version);
84 91
85 if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) { 92 if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index d1590486204a..00f03d82e69a 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -96,6 +96,12 @@ static inline void cond_local_irq_disable(struct pt_regs *regs)
96 local_irq_disable(); 96 local_irq_disable();
97} 97}
98 98
99/*
100 * In IST context, we explicitly disable preemption. This serves two
101 * purposes: it makes it much less likely that we would accidentally
102 * schedule in IST context and it will force a warning if we somehow
103 * manage to schedule by accident.
104 */
99void ist_enter(struct pt_regs *regs) 105void ist_enter(struct pt_regs *regs)
100{ 106{
101 if (user_mode(regs)) { 107 if (user_mode(regs)) {
@@ -110,13 +116,7 @@ void ist_enter(struct pt_regs *regs)
110 rcu_nmi_enter(); 116 rcu_nmi_enter();
111 } 117 }
112 118
113 /* 119 preempt_disable();
114 * We are atomic because we're on the IST stack; or we're on
115 * x86_32, in which case we still shouldn't schedule; or we're
116 * on x86_64 and entered from user mode, in which case we're
117 * still atomic unless ist_begin_non_atomic is called.
118 */
119 preempt_count_add(HARDIRQ_OFFSET);
120 120
121 /* This code is a bit fragile. Test it. */ 121 /* This code is a bit fragile. Test it. */
122 RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work"); 122 RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
@@ -124,7 +124,7 @@ void ist_enter(struct pt_regs *regs)
124 124
125void ist_exit(struct pt_regs *regs) 125void ist_exit(struct pt_regs *regs)
126{ 126{
127 preempt_count_sub(HARDIRQ_OFFSET); 127 preempt_enable_no_resched();
128 128
129 if (!user_mode(regs)) 129 if (!user_mode(regs))
130 rcu_nmi_exit(); 130 rcu_nmi_exit();
@@ -155,7 +155,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
155 BUG_ON((unsigned long)(current_top_of_stack() - 155 BUG_ON((unsigned long)(current_top_of_stack() -
156 current_stack_pointer()) >= THREAD_SIZE); 156 current_stack_pointer()) >= THREAD_SIZE);
157 157
158 preempt_count_sub(HARDIRQ_OFFSET); 158 preempt_enable_no_resched();
159} 159}
160 160
161/** 161/**
@@ -165,7 +165,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
165 */ 165 */
166void ist_end_non_atomic(void) 166void ist_end_non_atomic(void)
167{ 167{
168 preempt_count_add(HARDIRQ_OFFSET); 168 preempt_disable();
169} 169}
170 170
171static nokprobe_inline int 171static nokprobe_inline int
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index bbb5b283ff63..a397200281c1 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1310,7 +1310,8 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
1310 1310
1311 /* __delay is delay_tsc whenever the hardware has TSC, thus always. */ 1311 /* __delay is delay_tsc whenever the hardware has TSC, thus always. */
1312 if (guest_tsc < tsc_deadline) 1312 if (guest_tsc < tsc_deadline)
1313 __delay(tsc_deadline - guest_tsc); 1313 __delay(min(tsc_deadline - guest_tsc,
1314 nsec_to_cycles(vcpu, lapic_timer_advance_ns)));
1314} 1315}
1315 1316
1316static void start_apic_timer(struct kvm_lapic *apic) 1317static void start_apic_timer(struct kvm_lapic *apic)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1163e8173e5a..16ef31b87452 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -238,7 +238,9 @@ module_param(nested, int, S_IRUGO);
238 238
239/* enable / disable AVIC */ 239/* enable / disable AVIC */
240static int avic; 240static int avic;
241#ifdef CONFIG_X86_LOCAL_APIC
241module_param(avic, int, S_IRUGO); 242module_param(avic, int, S_IRUGO);
243#endif
242 244
243static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 245static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
244static void svm_flush_tlb(struct kvm_vcpu *vcpu); 246static void svm_flush_tlb(struct kvm_vcpu *vcpu);
@@ -981,11 +983,14 @@ static __init int svm_hardware_setup(void)
981 } else 983 } else
982 kvm_disable_tdp(); 984 kvm_disable_tdp();
983 985
984 if (avic && (!npt_enabled || !boot_cpu_has(X86_FEATURE_AVIC))) 986 if (avic) {
985 avic = false; 987 if (!npt_enabled ||
986 988 !boot_cpu_has(X86_FEATURE_AVIC) ||
987 if (avic) 989 !IS_ENABLED(CONFIG_X86_LOCAL_APIC))
988 pr_info("AVIC enabled\n"); 990 avic = false;
991 else
992 pr_info("AVIC enabled\n");
993 }
989 994
990 return 0; 995 return 0;
991 996
@@ -1324,7 +1329,7 @@ free_avic:
1324static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run) 1329static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
1325{ 1330{
1326 u64 entry; 1331 u64 entry;
1327 int h_physical_id = __default_cpu_present_to_apicid(vcpu->cpu); 1332 int h_physical_id = kvm_cpu_get_apicid(vcpu->cpu);
1328 struct vcpu_svm *svm = to_svm(vcpu); 1333 struct vcpu_svm *svm = to_svm(vcpu);
1329 1334
1330 if (!kvm_vcpu_apicv_active(vcpu)) 1335 if (!kvm_vcpu_apicv_active(vcpu))
@@ -1349,7 +1354,7 @@ static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1349{ 1354{
1350 u64 entry; 1355 u64 entry;
1351 /* ID = 0xff (broadcast), ID > 0xff (reserved) */ 1356 /* ID = 0xff (broadcast), ID > 0xff (reserved) */
1352 int h_physical_id = __default_cpu_present_to_apicid(cpu); 1357 int h_physical_id = kvm_cpu_get_apicid(cpu);
1353 struct vcpu_svm *svm = to_svm(vcpu); 1358 struct vcpu_svm *svm = to_svm(vcpu);
1354 1359
1355 if (!kvm_vcpu_apicv_active(vcpu)) 1360 if (!kvm_vcpu_apicv_active(vcpu))
@@ -4236,7 +4241,7 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
4236 4241
4237 if (avic_vcpu_is_running(vcpu)) 4242 if (avic_vcpu_is_running(vcpu))
4238 wrmsrl(SVM_AVIC_DOORBELL, 4243 wrmsrl(SVM_AVIC_DOORBELL,
4239 __default_cpu_present_to_apicid(vcpu->cpu)); 4244 kvm_cpu_get_apicid(vcpu->cpu));
4240 else 4245 else
4241 kvm_vcpu_wake_up(vcpu); 4246 kvm_vcpu_wake_up(vcpu);
4242} 4247}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index fb93010beaa4..64a79f271276 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2072,7 +2072,8 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
2072 unsigned int dest; 2072 unsigned int dest;
2073 2073
2074 if (!kvm_arch_has_assigned_device(vcpu->kvm) || 2074 if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
2075 !irq_remapping_cap(IRQ_POSTING_CAP)) 2075 !irq_remapping_cap(IRQ_POSTING_CAP) ||
2076 !kvm_vcpu_apicv_active(vcpu))
2076 return; 2077 return;
2077 2078
2078 do { 2079 do {
@@ -2180,7 +2181,8 @@ static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
2180 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 2181 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2181 2182
2182 if (!kvm_arch_has_assigned_device(vcpu->kvm) || 2183 if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
2183 !irq_remapping_cap(IRQ_POSTING_CAP)) 2184 !irq_remapping_cap(IRQ_POSTING_CAP) ||
2185 !kvm_vcpu_apicv_active(vcpu))
2184 return; 2186 return;
2185 2187
2186 /* Set SN when the vCPU is preempted */ 2188 /* Set SN when the vCPU is preempted */
@@ -6669,7 +6671,13 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
6669 6671
6670 /* Checks for #GP/#SS exceptions. */ 6672 /* Checks for #GP/#SS exceptions. */
6671 exn = false; 6673 exn = false;
6672 if (is_protmode(vcpu)) { 6674 if (is_long_mode(vcpu)) {
6675 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
6676 * non-canonical form. This is the only check on the memory
6677 * destination for long mode!
6678 */
6679 exn = is_noncanonical_address(*ret);
6680 } else if (is_protmode(vcpu)) {
6673 /* Protected mode: apply checks for segment validity in the 6681 /* Protected mode: apply checks for segment validity in the
6674 * following order: 6682 * following order:
6675 * - segment type check (#GP(0) may be thrown) 6683 * - segment type check (#GP(0) may be thrown)
@@ -6686,17 +6694,10 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
6686 * execute-only code segment 6694 * execute-only code segment
6687 */ 6695 */
6688 exn = ((s.type & 0xa) == 8); 6696 exn = ((s.type & 0xa) == 8);
6689 } 6697 if (exn) {
6690 if (exn) { 6698 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
6691 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 6699 return 1;
6692 return 1; 6700 }
6693 }
6694 if (is_long_mode(vcpu)) {
6695 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
6696 * non-canonical form. This is an only check for long mode.
6697 */
6698 exn = is_noncanonical_address(*ret);
6699 } else if (is_protmode(vcpu)) {
6700 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. 6701 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
6701 */ 6702 */
6702 exn = (s.unusable != 0); 6703 exn = (s.unusable != 0);
@@ -10714,7 +10715,8 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu)
10714 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 10715 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
10715 10716
10716 if (!kvm_arch_has_assigned_device(vcpu->kvm) || 10717 if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
10717 !irq_remapping_cap(IRQ_POSTING_CAP)) 10718 !irq_remapping_cap(IRQ_POSTING_CAP) ||
10719 !kvm_vcpu_apicv_active(vcpu))
10718 return 0; 10720 return 0;
10719 10721
10720 vcpu->pre_pcpu = vcpu->cpu; 10722 vcpu->pre_pcpu = vcpu->cpu;
@@ -10780,7 +10782,8 @@ static void vmx_post_block(struct kvm_vcpu *vcpu)
10780 unsigned long flags; 10782 unsigned long flags;
10781 10783
10782 if (!kvm_arch_has_assigned_device(vcpu->kvm) || 10784 if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
10783 !irq_remapping_cap(IRQ_POSTING_CAP)) 10785 !irq_remapping_cap(IRQ_POSTING_CAP) ||
10786 !kvm_vcpu_apicv_active(vcpu))
10784 return; 10787 return;
10785 10788
10786 do { 10789 do {
@@ -10833,7 +10836,8 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
10833 int idx, ret = -EINVAL; 10836 int idx, ret = -EINVAL;
10834 10837
10835 if (!kvm_arch_has_assigned_device(kvm) || 10838 if (!kvm_arch_has_assigned_device(kvm) ||
10836 !irq_remapping_cap(IRQ_POSTING_CAP)) 10839 !irq_remapping_cap(IRQ_POSTING_CAP) ||
10840 !kvm_vcpu_apicv_active(kvm->vcpus[0]))
10837 return 0; 10841 return 0;
10838 10842
10839 idx = srcu_read_lock(&kvm->irq_srcu); 10843 idx = srcu_read_lock(&kvm->irq_srcu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 902d9da12392..7da5dd2057a9 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1244,12 +1244,6 @@ static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
1244static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); 1244static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
1245static unsigned long max_tsc_khz; 1245static unsigned long max_tsc_khz;
1246 1246
1247static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
1248{
1249 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
1250 vcpu->arch.virtual_tsc_shift);
1251}
1252
1253static u32 adjust_tsc_khz(u32 khz, s32 ppm) 1247static u32 adjust_tsc_khz(u32 khz, s32 ppm)
1254{ 1248{
1255 u64 v = (u64)khz * (1000000 + ppm); 1249 u64 v = (u64)khz * (1000000 + ppm);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 7ce3634ab5fe..a82ca466b62e 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -2,6 +2,7 @@
2#define ARCH_X86_KVM_X86_H 2#define ARCH_X86_KVM_X86_H
3 3
4#include <linux/kvm_host.h> 4#include <linux/kvm_host.h>
5#include <asm/pvclock.h>
5#include "kvm_cache_regs.h" 6#include "kvm_cache_regs.h"
6 7
7#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL 8#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
@@ -195,6 +196,12 @@ extern unsigned int lapic_timer_advance_ns;
195 196
196extern struct static_key kvm_no_apic_vcpu; 197extern struct static_key kvm_no_apic_vcpu;
197 198
199static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
200{
201 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
202 vcpu->arch.virtual_tsc_shift);
203}
204
198/* Same "calling convention" as do_div: 205/* Same "calling convention" as do_div:
199 * - divide (n << 32) by base 206 * - divide (n << 32) by base
200 * - put result in n 207 * - put result in n
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 4eb287e25043..aa0ff4b02a96 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -6,7 +6,7 @@
6#include <asm/fixmap.h> 6#include <asm/fixmap.h>
7#include <asm/mtrr.h> 7#include <asm/mtrr.h>
8 8
9#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO 9#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
10 10
11#ifdef CONFIG_HIGHPTE 11#ifdef CONFIG_HIGHPTE
12#define PGALLOC_USER_GFP __GFP_HIGHMEM 12#define PGALLOC_USER_GFP __GFP_HIGHMEM
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 6e7242be1c87..b226b3f497f1 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -139,7 +139,7 @@ int __init efi_alloc_page_tables(void)
139 if (efi_enabled(EFI_OLD_MEMMAP)) 139 if (efi_enabled(EFI_OLD_MEMMAP))
140 return 0; 140 return 0;
141 141
142 gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO; 142 gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO;
143 efi_pgd = (pgd_t *)__get_free_page(gfp_mask); 143 efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
144 if (!efi_pgd) 144 if (!efi_pgd)
145 return -ENOMEM; 145 return -ENOMEM;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 478a2de543a5..67433714b791 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1113,7 +1113,7 @@ static void __init xen_cleanhighmap(unsigned long vaddr,
1113 1113
1114 /* NOTE: The loop is more greedy than the cleanup_highmap variant. 1114 /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1115 * We include the PMD passed in on _both_ boundaries. */ 1115 * We include the PMD passed in on _both_ boundaries. */
1116 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE)); 1116 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
1117 pmd++, vaddr += PMD_SIZE) { 1117 pmd++, vaddr += PMD_SIZE) {
1118 if (pmd_none(*pmd)) 1118 if (pmd_none(*pmd))
1119 continue; 1119 continue;
@@ -1551,41 +1551,6 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1551#endif 1551#endif
1552} 1552}
1553 1553
1554#ifdef CONFIG_X86_32
1555static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1556{
1557 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1558 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1559 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1560 pte_val_ma(pte));
1561
1562 return pte;
1563}
1564#else /* CONFIG_X86_64 */
1565static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1566{
1567 unsigned long pfn;
1568
1569 if (xen_feature(XENFEAT_writable_page_tables) ||
1570 xen_feature(XENFEAT_auto_translated_physmap) ||
1571 xen_start_info->mfn_list >= __START_KERNEL_map)
1572 return pte;
1573
1574 /*
1575 * Pages belonging to the initial p2m list mapped outside the default
1576 * address range must be mapped read-only. This region contains the
1577 * page tables for mapping the p2m list, too, and page tables MUST be
1578 * mapped read-only.
1579 */
1580 pfn = pte_pfn(pte);
1581 if (pfn >= xen_start_info->first_p2m_pfn &&
1582 pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
1583 pte = __pte_ma(pte_val_ma(pte) & ~_PAGE_RW);
1584
1585 return pte;
1586}
1587#endif /* CONFIG_X86_64 */
1588
1589/* 1554/*
1590 * Init-time set_pte while constructing initial pagetables, which 1555 * Init-time set_pte while constructing initial pagetables, which
1591 * doesn't allow RO page table pages to be remapped RW. 1556 * doesn't allow RO page table pages to be remapped RW.
@@ -1600,13 +1565,37 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1600 * so always write the PTE directly and rely on Xen trapping and 1565 * so always write the PTE directly and rely on Xen trapping and
1601 * emulating any updates as necessary. 1566 * emulating any updates as necessary.
1602 */ 1567 */
1603static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) 1568__visible pte_t xen_make_pte_init(pteval_t pte)
1604{ 1569{
1605 if (pte_mfn(pte) != INVALID_P2M_ENTRY) 1570#ifdef CONFIG_X86_64
1606 pte = mask_rw_pte(ptep, pte); 1571 unsigned long pfn;
1607 else 1572
1608 pte = __pte_ma(0); 1573 /*
1574 * Pages belonging to the initial p2m list mapped outside the default
1575 * address range must be mapped read-only. This region contains the
1576 * page tables for mapping the p2m list, too, and page tables MUST be
1577 * mapped read-only.
1578 */
1579 pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
1580 if (xen_start_info->mfn_list < __START_KERNEL_map &&
1581 pfn >= xen_start_info->first_p2m_pfn &&
1582 pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
1583 pte &= ~_PAGE_RW;
1584#endif
1585 pte = pte_pfn_to_mfn(pte);
1586 return native_make_pte(pte);
1587}
1588PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
1609 1589
1590static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1591{
1592#ifdef CONFIG_X86_32
1593 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1594 if (pte_mfn(pte) != INVALID_P2M_ENTRY
1595 && pte_val_ma(*ptep) & _PAGE_PRESENT)
1596 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1597 pte_val_ma(pte));
1598#endif
1610 native_set_pte(ptep, pte); 1599 native_set_pte(ptep, pte);
1611} 1600}
1612 1601
@@ -2407,6 +2396,7 @@ static void __init xen_post_allocator_init(void)
2407 pv_mmu_ops.alloc_pud = xen_alloc_pud; 2396 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2408 pv_mmu_ops.release_pud = xen_release_pud; 2397 pv_mmu_ops.release_pud = xen_release_pud;
2409#endif 2398#endif
2399 pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte);
2410 2400
2411#ifdef CONFIG_X86_64 2401#ifdef CONFIG_X86_64
2412 pv_mmu_ops.write_cr3 = &xen_write_cr3; 2402 pv_mmu_ops.write_cr3 = &xen_write_cr3;
@@ -2455,7 +2445,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2455 .pte_val = PV_CALLEE_SAVE(xen_pte_val), 2445 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2456 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val), 2446 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
2457 2447
2458 .make_pte = PV_CALLEE_SAVE(xen_make_pte), 2448 .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
2459 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd), 2449 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
2460 2450
2461#ifdef CONFIG_X86_PAE 2451#ifdef CONFIG_X86_PAE
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index cab9f766bb06..dd2a49a8aacc 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -182,7 +182,7 @@ static void * __ref alloc_p2m_page(void)
182 if (unlikely(!slab_is_available())) 182 if (unlikely(!slab_is_available()))
183 return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE); 183 return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
184 184
185 return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); 185 return (void *)__get_free_page(GFP_KERNEL);
186} 186}
187 187
188static void __ref free_p2m_page(void *p) 188static void __ref free_p2m_page(void *p)
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
index d38eb9237e64..1065bc8bcae5 100644
--- a/arch/xtensa/include/asm/pgalloc.h
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -44,7 +44,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
44 pte_t *ptep; 44 pte_t *ptep;
45 int i; 45 int i;
46 46
47 ptep = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); 47 ptep = (pte_t *)__get_free_page(GFP_KERNEL);
48 if (!ptep) 48 if (!ptep)
49 return NULL; 49 return NULL;
50 for (i = 0; i < 1024; i++) 50 for (i = 0; i < 1024; i++)
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 23d7f301a196..9e29dc351695 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -113,6 +113,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
113 ret = submit_bio_wait(type, bio); 113 ret = submit_bio_wait(type, bio);
114 if (ret == -EOPNOTSUPP) 114 if (ret == -EOPNOTSUPP)
115 ret = 0; 115 ret = 0;
116 bio_put(bio);
116 } 117 }
117 blk_finish_plug(&plug); 118 blk_finish_plug(&plug);
118 119
@@ -165,8 +166,10 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
165 } 166 }
166 } 167 }
167 168
168 if (bio) 169 if (bio) {
169 ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio); 170 ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
171 bio_put(bio);
172 }
170 return ret != -EOPNOTSUPP ? ret : 0; 173 return ret != -EOPNOTSUPP ? ret : 0;
171} 174}
172EXPORT_SYMBOL(blkdev_issue_write_same); 175EXPORT_SYMBOL(blkdev_issue_write_same);
@@ -206,8 +209,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
206 } 209 }
207 } 210 }
208 211
209 if (bio) 212 if (bio) {
210 return submit_bio_wait(WRITE, bio); 213 ret = submit_bio_wait(WRITE, bio);
214 bio_put(bio);
215 return ret;
216 }
211 return 0; 217 return 0;
212} 218}
213 219
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 29cbc1b5fbdb..f9b9049b1284 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1262,12 +1262,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1262 1262
1263 blk_queue_split(q, &bio, q->bio_split); 1263 blk_queue_split(q, &bio, q->bio_split);
1264 1264
1265 if (!is_flush_fua && !blk_queue_nomerges(q)) { 1265 if (!is_flush_fua && !blk_queue_nomerges(q) &&
1266 if (blk_attempt_plug_merge(q, bio, &request_count, 1266 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1267 &same_queue_rq)) 1267 return BLK_QC_T_NONE;
1268 return BLK_QC_T_NONE;
1269 } else
1270 request_count = blk_plug_queued_count(q);
1271 1268
1272 rq = blk_mq_map_request(q, bio, &data); 1269 rq = blk_mq_map_request(q, bio, &data);
1273 if (unlikely(!rq)) 1270 if (unlikely(!rq))
@@ -1358,9 +1355,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1358 1355
1359 blk_queue_split(q, &bio, q->bio_split); 1356 blk_queue_split(q, &bio, q->bio_split);
1360 1357
1361 if (!is_flush_fua && !blk_queue_nomerges(q) && 1358 if (!is_flush_fua && !blk_queue_nomerges(q)) {
1362 blk_attempt_plug_merge(q, bio, &request_count, NULL)) 1359 if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
1363 return BLK_QC_T_NONE; 1360 return BLK_QC_T_NONE;
1361 } else
1362 request_count = blk_plug_queued_count(q);
1364 1363
1365 rq = blk_mq_map_request(q, bio, &data); 1364 rq = blk_mq_map_request(q, bio, &data);
1366 if (unlikely(!rq)) 1365 if (unlikely(!rq))
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 43fe85f20d57..7097a3395b25 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -455,6 +455,7 @@ static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = {
455 [CRYPTO_MSG_NEWALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), 455 [CRYPTO_MSG_NEWALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
456 [CRYPTO_MSG_DELALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), 456 [CRYPTO_MSG_DELALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
457 [CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg), 457 [CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
458 [CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
458 [CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = 0, 459 [CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = 0,
459}; 460};
460 461
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index a1d177d58254..21932d640a41 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -108,7 +108,9 @@ acpi_ex_add_table(u32 table_index,
108 108
109 /* Add the table to the namespace */ 109 /* Add the table to the namespace */
110 110
111 acpi_ex_exit_interpreter();
111 status = acpi_ns_load_table(table_index, parent_node); 112 status = acpi_ns_load_table(table_index, parent_node);
113 acpi_ex_enter_interpreter();
112 if (ACPI_FAILURE(status)) { 114 if (ACPI_FAILURE(status)) {
113 acpi_ut_remove_reference(obj_desc); 115 acpi_ut_remove_reference(obj_desc);
114 *ddb_handle = NULL; 116 *ddb_handle = NULL;
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index daceb80022b0..3b7fb99362b6 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -306,12 +306,6 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
306acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg) 306acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
307{ 307{
308 u64 address; 308 u64 address;
309 u8 access_width;
310 u32 bit_width;
311 u8 bit_offset;
312 u64 value64;
313 u32 new_value32, old_value32;
314 u8 index;
315 acpi_status status; 309 acpi_status status;
316 310
317 ACPI_FUNCTION_NAME(hw_write); 311 ACPI_FUNCTION_NAME(hw_write);
@@ -323,145 +317,23 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
323 return (status); 317 return (status);
324 } 318 }
325 319
326 /* Convert access_width into number of bits based */
327
328 access_width = acpi_hw_get_access_bit_width(reg, 32);
329 bit_width = reg->bit_offset + reg->bit_width;
330 bit_offset = reg->bit_offset;
331
332 /* 320 /*
333 * Two address spaces supported: Memory or IO. PCI_Config is 321 * Two address spaces supported: Memory or IO. PCI_Config is
334 * not supported here because the GAS structure is insufficient 322 * not supported here because the GAS structure is insufficient
335 */ 323 */
336 index = 0; 324 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
337 while (bit_width) { 325 status = acpi_os_write_memory((acpi_physical_address)
338 /* 326 address, (u64)value,
339 * Use offset style bit reads because "Index * AccessWidth" is 327 reg->bit_width);
340 * ensured to be less than 32-bits by acpi_hw_validate_register(). 328 } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
341 */ 329
342 new_value32 = ACPI_GET_BITS(&value, index * access_width, 330 status = acpi_hw_write_port((acpi_io_address)
343 ACPI_MASK_BITS_ABOVE_32 331 address, value, reg->bit_width);
344 (access_width));
345
346 if (bit_offset >= access_width) {
347 bit_offset -= access_width;
348 } else {
349 /*
350 * Use offset style bit masks because access_width is ensured
351 * to be less than 32-bits by acpi_hw_validate_register() and
352 * bit_offset/bit_width is less than access_width here.
353 */
354 if (bit_offset) {
355 new_value32 &= ACPI_MASK_BITS_BELOW(bit_offset);
356 }
357 if (bit_width < access_width) {
358 new_value32 &= ACPI_MASK_BITS_ABOVE(bit_width);
359 }
360
361 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
362 if (bit_offset || bit_width < access_width) {
363 /*
364 * Read old values in order not to modify the bits that
365 * are beyond the register bit_width/bit_offset setting.
366 */
367 status =
368 acpi_os_read_memory((acpi_physical_address)
369 address +
370 index *
371 ACPI_DIV_8
372 (access_width),
373 &value64,
374 access_width);
375 old_value32 = (u32)value64;
376
377 /*
378 * Use offset style bit masks because access_width is
379 * ensured to be less than 32-bits by
380 * acpi_hw_validate_register() and bit_offset/bit_width is
381 * less than access_width here.
382 */
383 if (bit_offset) {
384 old_value32 &=
385 ACPI_MASK_BITS_ABOVE
386 (bit_offset);
387 bit_offset = 0;
388 }
389 if (bit_width < access_width) {
390 old_value32 &=
391 ACPI_MASK_BITS_BELOW
392 (bit_width);
393 }
394
395 new_value32 |= old_value32;
396 }
397
398 value64 = (u64)new_value32;
399 status =
400 acpi_os_write_memory((acpi_physical_address)
401 address +
402 index *
403 ACPI_DIV_8
404 (access_width),
405 value64, access_width);
406 } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
407
408 if (bit_offset || bit_width < access_width) {
409 /*
410 * Read old values in order not to modify the bits that
411 * are beyond the register bit_width/bit_offset setting.
412 */
413 status =
414 acpi_hw_read_port((acpi_io_address)
415 address +
416 index *
417 ACPI_DIV_8
418 (access_width),
419 &old_value32,
420 access_width);
421
422 /*
423 * Use offset style bit masks because access_width is
424 * ensured to be less than 32-bits by
425 * acpi_hw_validate_register() and bit_offset/bit_width is
426 * less than access_width here.
427 */
428 if (bit_offset) {
429 old_value32 &=
430 ACPI_MASK_BITS_ABOVE
431 (bit_offset);
432 bit_offset = 0;
433 }
434 if (bit_width < access_width) {
435 old_value32 &=
436 ACPI_MASK_BITS_BELOW
437 (bit_width);
438 }
439
440 new_value32 |= old_value32;
441 }
442
443 status = acpi_hw_write_port((acpi_io_address)
444 address +
445 index *
446 ACPI_DIV_8
447 (access_width),
448 new_value32,
449 access_width);
450 }
451 }
452
453 /*
454 * Index * access_width is ensured to be less than 32-bits by
455 * acpi_hw_validate_register().
456 */
457 bit_width -=
458 bit_width > access_width ? access_width : bit_width;
459 index++;
460 } 332 }
461 333
462 ACPI_DEBUG_PRINT((ACPI_DB_IO, 334 ACPI_DEBUG_PRINT((ACPI_DB_IO,
463 "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n", 335 "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n",
464 value, access_width, ACPI_FORMAT_UINT64(address), 336 value, reg->bit_width, ACPI_FORMAT_UINT64(address),
465 acpi_ut_get_region_name(reg->space_id))); 337 acpi_ut_get_region_name(reg->space_id)));
466 338
467 return (status); 339 return (status);
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index f631a47724f0..1783cd7e1446 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -47,6 +47,7 @@
47#include "acparser.h" 47#include "acparser.h"
48#include "acdispat.h" 48#include "acdispat.h"
49#include "actables.h" 49#include "actables.h"
50#include "acinterp.h"
50 51
51#define _COMPONENT ACPI_NAMESPACE 52#define _COMPONENT ACPI_NAMESPACE
52ACPI_MODULE_NAME("nsparse") 53ACPI_MODULE_NAME("nsparse")
@@ -170,6 +171,8 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
170 171
171 ACPI_FUNCTION_TRACE(ns_parse_table); 172 ACPI_FUNCTION_TRACE(ns_parse_table);
172 173
174 acpi_ex_enter_interpreter();
175
173 /* 176 /*
174 * AML Parse, pass 1 177 * AML Parse, pass 1
175 * 178 *
@@ -185,7 +188,7 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
185 status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1, 188 status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1,
186 table_index, start_node); 189 table_index, start_node);
187 if (ACPI_FAILURE(status)) { 190 if (ACPI_FAILURE(status)) {
188 return_ACPI_STATUS(status); 191 goto error_exit;
189 } 192 }
190 193
191 /* 194 /*
@@ -201,8 +204,10 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
201 status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2, 204 status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2,
202 table_index, start_node); 205 table_index, start_node);
203 if (ACPI_FAILURE(status)) { 206 if (ACPI_FAILURE(status)) {
204 return_ACPI_STATUS(status); 207 goto error_exit;
205 } 208 }
206 209
210error_exit:
211 acpi_ex_exit_interpreter();
207 return_ACPI_STATUS(status); 212 return_ACPI_STATUS(status);
208} 213}
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index 2215fc847fa9..ac6ddcc080d4 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -928,7 +928,7 @@ static ssize_t format_show(struct device *dev,
928{ 928{
929 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 929 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
930 930
931 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->code)); 931 return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
932} 932}
933static DEVICE_ATTR_RO(format); 933static DEVICE_ATTR_RO(format);
934 934
@@ -961,8 +961,8 @@ static ssize_t format1_show(struct device *dev,
961 continue; 961 continue;
962 if (nfit_dcr->dcr->code == dcr->code) 962 if (nfit_dcr->dcr->code == dcr->code)
963 continue; 963 continue;
964 rc = sprintf(buf, "%#x\n", 964 rc = sprintf(buf, "0x%04x\n",
965 be16_to_cpu(nfit_dcr->dcr->code)); 965 le16_to_cpu(nfit_dcr->dcr->code));
966 break; 966 break;
967 } 967 }
968 if (rc != ENXIO) 968 if (rc != ENXIO)
@@ -1131,11 +1131,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1131 1131
1132 /* 1132 /*
1133 * Until standardization materializes we need to consider up to 3 1133 * Until standardization materializes we need to consider up to 3
1134 * different command sets. Note, that checking for function0 (bit0) 1134 * different command sets. Note, that checking for zero functions
1135 * tells us if any commands are reachable through this uuid. 1135 * tells us if any commands might be reachable through this uuid.
1136 */ 1136 */
1137 for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_HPE2; i++) 1137 for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_HPE2; i++)
1138 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) 1138 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 0))
1139 break; 1139 break;
1140 1140
1141 /* limit the supported commands to those that are publicly documented */ 1141 /* limit the supported commands to those that are publicly documented */
diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h
index 11cb38348aef..02b9ea1e8d2e 100644
--- a/drivers/acpi/nfit.h
+++ b/drivers/acpi/nfit.h
@@ -53,12 +53,12 @@ enum nfit_uuids {
53}; 53};
54 54
55/* 55/*
56 * Region format interface codes are stored as an array of bytes in the 56 * Region format interface codes are stored with the interface as the
57 * NFIT DIMM Control Region structure 57 * LSB and the function as the MSB.
58 */ 58 */
59#define NFIT_FIC_BYTE cpu_to_be16(0x101) /* byte-addressable energy backed */ 59#define NFIT_FIC_BYTE cpu_to_le16(0x101) /* byte-addressable energy backed */
60#define NFIT_FIC_BLK cpu_to_be16(0x201) /* block-addressable non-energy backed */ 60#define NFIT_FIC_BLK cpu_to_le16(0x201) /* block-addressable non-energy backed */
61#define NFIT_FIC_BYTEN cpu_to_be16(0x301) /* byte-addressable non-energy backed */ 61#define NFIT_FIC_BYTEN cpu_to_le16(0x301) /* byte-addressable non-energy backed */
62 62
63enum { 63enum {
64 NFIT_BLK_READ_FLUSH = 1, 64 NFIT_BLK_READ_FLUSH = 1,
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 8fc7323ed3e8..4ed4061813e6 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -839,7 +839,7 @@ void acpi_penalize_isa_irq(int irq, int active)
839{ 839{
840 if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty))) 840 if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty)))
841 acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) + 841 acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) +
842 active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING; 842 (active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING);
843} 843}
844 844
845bool acpi_isa_irq_available(int irq) 845bool acpi_isa_irq_available(int irq)
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 22c09952e177..b4de130f2d57 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -680,9 +680,6 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
680 u64 mask = 0; 680 u64 mask = 0;
681 union acpi_object *obj; 681 union acpi_object *obj;
682 682
683 if (funcs == 0)
684 return false;
685
686 obj = acpi_evaluate_dsm(handle, uuid, rev, 0, NULL); 683 obj = acpi_evaluate_dsm(handle, uuid, rev, 0, NULL);
687 if (!obj) 684 if (!obj)
688 return false; 685 return false;
@@ -695,6 +692,9 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
695 mask |= (((u64)obj->buffer.pointer[i]) << (i * 8)); 692 mask |= (((u64)obj->buffer.pointer[i]) << (i * 8));
696 ACPI_FREE(obj); 693 ACPI_FREE(obj);
697 694
695 if (funcs == 0)
696 return true;
697
698 /* 698 /*
699 * Bit 0 indicates whether there's support for any functions other than 699 * Bit 0 indicates whether there's support for any functions other than
700 * function 0 for the specified UUID and revision. 700 * function 0 for the specified UUID and revision.
diff --git a/drivers/ata/ahci_seattle.c b/drivers/ata/ahci_seattle.c
index 6e702ab57220..1d31c0c0fc20 100644
--- a/drivers/ata/ahci_seattle.c
+++ b/drivers/ata/ahci_seattle.c
@@ -137,7 +137,7 @@ static const struct ata_port_info *ahci_seattle_get_port_info(
137 u32 val; 137 u32 val;
138 138
139 plat_data = devm_kzalloc(dev, sizeof(*plat_data), GFP_KERNEL); 139 plat_data = devm_kzalloc(dev, sizeof(*plat_data), GFP_KERNEL);
140 if (IS_ERR(plat_data)) 140 if (!plat_data)
141 return &ahci_port_info; 141 return &ahci_port_info;
142 142
143 plat_data->sgpio_ctrl = devm_ioremap_resource(dev, 143 plat_data->sgpio_ctrl = devm_ioremap_resource(dev,
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 61dc7a99e89a..c6f017458958 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -606,7 +606,7 @@ void ata_scsi_error(struct Scsi_Host *host)
606 ata_scsi_port_error_handler(host, ap); 606 ata_scsi_port_error_handler(host, ap);
607 607
608 /* finish or retry handled scmd's and clean up */ 608 /* finish or retry handled scmd's and clean up */
609 WARN_ON(host->host_failed || !list_empty(&eh_work_q)); 609 WARN_ON(!list_empty(&eh_work_q));
610 610
611 DPRINTK("EXIT\n"); 611 DPRINTK("EXIT\n");
612} 612}
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index bd74ee555278..745489a1c86a 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -986,7 +986,7 @@ static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
986 * Looks like a lot of fuss, but it avoids an unnecessary 986 * Looks like a lot of fuss, but it avoids an unnecessary
987 * +1 usec read-after-write delay for unaffected registers. 987 * +1 usec read-after-write delay for unaffected registers.
988 */ 988 */
989 laddr = (long)addr & 0xffff; 989 laddr = (unsigned long)addr & 0xffff;
990 if (laddr >= 0x300 && laddr <= 0x33c) { 990 if (laddr >= 0x300 && laddr <= 0x33c) {
991 laddr &= 0x000f; 991 laddr &= 0x000f;
992 if (laddr == 0x4 || laddr == 0xc) { 992 if (laddr == 0x4 || laddr == 0xc) {
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 527bbd595e37..5fc81e240c24 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -2795,9 +2795,7 @@ static int hrz_probe(struct pci_dev *pci_dev,
2795 dev->atm_dev->ci_range.vpi_bits = vpi_bits; 2795 dev->atm_dev->ci_range.vpi_bits = vpi_bits;
2796 dev->atm_dev->ci_range.vci_bits = 10-vpi_bits; 2796 dev->atm_dev->ci_range.vci_bits = 10-vpi_bits;
2797 2797
2798 init_timer(&dev->housekeeping); 2798 setup_timer(&dev->housekeeping, do_housekeeping, (unsigned long) dev);
2799 dev->housekeeping.function = do_housekeeping;
2800 dev->housekeeping.data = (unsigned long) dev;
2801 mod_timer(&dev->housekeeping, jiffies); 2799 mod_timer(&dev->housekeeping, jiffies);
2802 2800
2803out: 2801out:
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 6b2a84e7f2be..2609ba20b396 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
10obj-y += power/ 10obj-y += power/
11obj-$(CONFIG_HAS_DMA) += dma-mapping.o 11obj-$(CONFIG_HAS_DMA) += dma-mapping.o
12obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o 12obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
13obj-$(CONFIG_ISA) += isa.o 13obj-$(CONFIG_ISA_BUS_API) += isa.o
14obj-$(CONFIG_FW_LOADER) += firmware_class.o 14obj-$(CONFIG_FW_LOADER) += firmware_class.o
15obj-$(CONFIG_NUMA) += node.o 15obj-$(CONFIG_NUMA) += node.o
16obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o 16obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o
diff --git a/drivers/base/isa.c b/drivers/base/isa.c
index 91dba65d7264..cd6ccdcf9df0 100644
--- a/drivers/base/isa.c
+++ b/drivers/base/isa.c
@@ -180,4 +180,4 @@ static int __init isa_bus_init(void)
180 return error; 180 return error;
181} 181}
182 182
183device_initcall(isa_bus_init); 183postcore_initcall(isa_bus_init);
diff --git a/drivers/base/module.c b/drivers/base/module.c
index db930d3ee312..2a215780eda2 100644
--- a/drivers/base/module.c
+++ b/drivers/base/module.c
@@ -24,10 +24,12 @@ static char *make_driver_name(struct device_driver *drv)
24 24
25static void module_create_drivers_dir(struct module_kobject *mk) 25static void module_create_drivers_dir(struct module_kobject *mk)
26{ 26{
27 if (!mk || mk->drivers_dir) 27 static DEFINE_MUTEX(drivers_dir_mutex);
28 return;
29 28
30 mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj); 29 mutex_lock(&drivers_dir_mutex);
30 if (mk && !mk->drivers_dir)
31 mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
32 mutex_unlock(&drivers_dir_mutex);
31} 33}
32 34
33void module_add_driver(struct module *mod, struct device_driver *drv) 35void module_add_driver(struct module *mod, struct device_driver *drv)
diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c
index 83d6e7ba1a34..8c3434bdb26d 100644
--- a/drivers/base/power/opp/cpu.c
+++ b/drivers/base/power/opp/cpu.c
@@ -211,7 +211,7 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev,
211 } 211 }
212 212
213 /* Mark opp-table as multiple CPUs are sharing it now */ 213 /* Mark opp-table as multiple CPUs are sharing it now */
214 opp_table->shared_opp = true; 214 opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
215 } 215 }
216unlock: 216unlock:
217 mutex_unlock(&opp_table_lock); 217 mutex_unlock(&opp_table_lock);
@@ -227,7 +227,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
227 * 227 *
228 * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev. 228 * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
229 * 229 *
230 * Returns -ENODEV if OPP table isn't already present. 230 * Returns -ENODEV if OPP table isn't already present and -EINVAL if the OPP
231 * table's status is access-unknown.
231 * 232 *
232 * Locking: The internal opp_table and opp structures are RCU protected. 233 * Locking: The internal opp_table and opp structures are RCU protected.
233 * Hence this function internally uses RCU updater strategy with mutex locks 234 * Hence this function internally uses RCU updater strategy with mutex locks
@@ -249,9 +250,14 @@ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
249 goto unlock; 250 goto unlock;
250 } 251 }
251 252
253 if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) {
254 ret = -EINVAL;
255 goto unlock;
256 }
257
252 cpumask_clear(cpumask); 258 cpumask_clear(cpumask);
253 259
254 if (opp_table->shared_opp) { 260 if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
255 list_for_each_entry(opp_dev, &opp_table->dev_list, node) 261 list_for_each_entry(opp_dev, &opp_table->dev_list, node)
256 cpumask_set_cpu(opp_dev->dev->id, cpumask); 262 cpumask_set_cpu(opp_dev->dev->id, cpumask);
257 } else { 263 } else {
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
index 94d2010558e3..1dfd3dd92624 100644
--- a/drivers/base/power/opp/of.c
+++ b/drivers/base/power/opp/of.c
@@ -34,7 +34,10 @@ static struct opp_table *_managed_opp(const struct device_node *np)
34 * But the OPPs will be considered as shared only if the 34 * But the OPPs will be considered as shared only if the
35 * OPP table contains a "opp-shared" property. 35 * OPP table contains a "opp-shared" property.
36 */ 36 */
37 return opp_table->shared_opp ? opp_table : NULL; 37 if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED)
38 return opp_table;
39
40 return NULL;
38 } 41 }
39 } 42 }
40 43
@@ -353,7 +356,10 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
353 } 356 }
354 357
355 opp_table->np = opp_np; 358 opp_table->np = opp_np;
356 opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared"); 359 if (of_property_read_bool(opp_np, "opp-shared"))
360 opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
361 else
362 opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
357 363
358 mutex_unlock(&opp_table_lock); 364 mutex_unlock(&opp_table_lock);
359 365
diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h
index 20f3be22e060..fabd5ca1a083 100644
--- a/drivers/base/power/opp/opp.h
+++ b/drivers/base/power/opp/opp.h
@@ -119,6 +119,12 @@ struct opp_device {
119#endif 119#endif
120}; 120};
121 121
122enum opp_table_access {
123 OPP_TABLE_ACCESS_UNKNOWN = 0,
124 OPP_TABLE_ACCESS_EXCLUSIVE = 1,
125 OPP_TABLE_ACCESS_SHARED = 2,
126};
127
122/** 128/**
123 * struct opp_table - Device opp structure 129 * struct opp_table - Device opp structure
124 * @node: table node - contains the devices with OPPs that 130 * @node: table node - contains the devices with OPPs that
@@ -166,7 +172,7 @@ struct opp_table {
166 /* For backward compatibility with v1 bindings */ 172 /* For backward compatibility with v1 bindings */
167 unsigned int voltage_tolerance_v1; 173 unsigned int voltage_tolerance_v1;
168 174
169 bool shared_opp; 175 enum opp_table_access shared_opp;
170 struct dev_pm_opp *suspend_opp; 176 struct dev_pm_opp *suspend_opp;
171 177
172 unsigned int *supported_hw; 178 unsigned int *supported_hw;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index d597e432e195..ab19adb07a12 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -1750,7 +1750,7 @@ aoecmd_init(void)
1750 int ret; 1750 int ret;
1751 1751
1752 /* get_zeroed_page returns page with ref count 1 */ 1752 /* get_zeroed_page returns page with ref count 1 */
1753 p = (void *) get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); 1753 p = (void *) get_zeroed_page(GFP_KERNEL);
1754 if (!p) 1754 if (!p)
1755 return -ENOMEM; 1755 return -ENOMEM;
1756 empty_page = virt_to_page(p); 1756 empty_page = virt_to_page(p);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 31e73a7a40f2..6a48ed41963f 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -941,7 +941,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
941 debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize); 941 debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
942 debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout); 942 debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
943 debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize); 943 debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
944 debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops); 944 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
945 945
946 return 0; 946 return 0;
947} 947}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index ca13df854639..2e6d1e9c3345 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -874,8 +874,12 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
874 const struct blk_mq_queue_data *qd) 874 const struct blk_mq_queue_data *qd)
875{ 875{
876 unsigned long flags; 876 unsigned long flags;
877 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data; 877 int qid = hctx->queue_num;
878 struct blkfront_info *info = hctx->queue->queuedata;
879 struct blkfront_ring_info *rinfo = NULL;
878 880
881 BUG_ON(info->nr_rings <= qid);
882 rinfo = &info->rinfo[qid];
879 blk_mq_start_request(qd->rq); 883 blk_mq_start_request(qd->rq);
880 spin_lock_irqsave(&rinfo->ring_lock, flags); 884 spin_lock_irqsave(&rinfo->ring_lock, flags);
881 if (RING_FULL(&rinfo->ring)) 885 if (RING_FULL(&rinfo->ring))
@@ -901,20 +905,9 @@ out_busy:
901 return BLK_MQ_RQ_QUEUE_BUSY; 905 return BLK_MQ_RQ_QUEUE_BUSY;
902} 906}
903 907
904static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
905 unsigned int index)
906{
907 struct blkfront_info *info = (struct blkfront_info *)data;
908
909 BUG_ON(info->nr_rings <= index);
910 hctx->driver_data = &info->rinfo[index];
911 return 0;
912}
913
914static struct blk_mq_ops blkfront_mq_ops = { 908static struct blk_mq_ops blkfront_mq_ops = {
915 .queue_rq = blkif_queue_rq, 909 .queue_rq = blkif_queue_rq,
916 .map_queue = blk_mq_map_queue, 910 .map_queue = blk_mq_map_queue,
917 .init_hctx = blk_mq_init_hctx,
918}; 911};
919 912
920static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, 913static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
@@ -950,6 +943,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
950 return PTR_ERR(rq); 943 return PTR_ERR(rq);
951 } 944 }
952 945
946 rq->queuedata = info;
953 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); 947 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
954 948
955 if (info->feature_discard) { 949 if (info->feature_discard) {
@@ -2149,6 +2143,8 @@ static int blkfront_resume(struct xenbus_device *dev)
2149 return err; 2143 return err;
2150 2144
2151 err = talk_to_blkback(dev, info); 2145 err = talk_to_blkback(dev, info);
2146 if (!err)
2147 blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
2152 2148
2153 /* 2149 /*
2154 * We have to wait for the backend to switch to 2150 * We have to wait for the backend to switch to
@@ -2485,10 +2481,23 @@ static void blkback_changed(struct xenbus_device *dev,
2485 break; 2481 break;
2486 2482
2487 case XenbusStateConnected: 2483 case XenbusStateConnected:
2488 if (dev->state != XenbusStateInitialised) { 2484 /*
2485 * talk_to_blkback sets state to XenbusStateInitialised
2486 * and blkfront_connect sets it to XenbusStateConnected
2487 * (if connection went OK).
2488 *
2489 * If the backend (or toolstack) decides to poke at backend
2490 * state (and re-trigger the watch by setting the state repeatedly
2491 * to XenbusStateConnected (4)) we need to deal with this.
2492 * This is allowed as this is used to communicate to the guest
2493 * that the size of disk has changed!
2494 */
2495 if ((dev->state != XenbusStateInitialised) &&
2496 (dev->state != XenbusStateConnected)) {
2489 if (talk_to_blkback(dev, info)) 2497 if (talk_to_blkback(dev, info))
2490 break; 2498 break;
2491 } 2499 }
2500
2492 blkfront_connect(info); 2501 blkfront_connect(info);
2493 break; 2502 break;
2494 2503
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 94fb407d8561..44b1bd6baa38 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -3820,6 +3820,7 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
3820 while (!list_empty(&intf->waiting_rcv_msgs)) { 3820 while (!list_empty(&intf->waiting_rcv_msgs)) {
3821 smi_msg = list_entry(intf->waiting_rcv_msgs.next, 3821 smi_msg = list_entry(intf->waiting_rcv_msgs.next,
3822 struct ipmi_smi_msg, link); 3822 struct ipmi_smi_msg, link);
3823 list_del(&smi_msg->link);
3823 if (!run_to_completion) 3824 if (!run_to_completion)
3824 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 3825 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
3825 flags); 3826 flags);
@@ -3829,11 +3830,14 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
3829 if (rv > 0) { 3830 if (rv > 0) {
3830 /* 3831 /*
3831 * To preserve message order, quit if we 3832 * To preserve message order, quit if we
3832 * can't handle a message. 3833 * can't handle a message. Add the message
3834 * back at the head, this is safe because this
3835 * tasklet is the only thing that pulls the
3836 * messages.
3833 */ 3837 */
3838 list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
3834 break; 3839 break;
3835 } else { 3840 } else {
3836 list_del(&smi_msg->link);
3837 if (rv == 0) 3841 if (rv == 0)
3838 /* Message handled */ 3842 /* Message handled */
3839 ipmi_free_smi_msg(smi_msg); 3843 ipmi_free_smi_msg(smi_msg);
diff --git a/drivers/clk/clk-oxnas.c b/drivers/clk/clk-oxnas.c
index efba7d4dbcfc..79bcb2e42060 100644
--- a/drivers/clk/clk-oxnas.c
+++ b/drivers/clk/clk-oxnas.c
@@ -144,9 +144,9 @@ static int oxnas_stdclk_probe(struct platform_device *pdev)
144 return -ENOMEM; 144 return -ENOMEM;
145 145
146 regmap = syscon_node_to_regmap(of_get_parent(np)); 146 regmap = syscon_node_to_regmap(of_get_parent(np));
147 if (!regmap) { 147 if (IS_ERR(regmap)) {
148 dev_err(&pdev->dev, "failed to have parent regmap\n"); 148 dev_err(&pdev->dev, "failed to have parent regmap\n");
149 return -EINVAL; 149 return PTR_ERR(regmap);
150 } 150 }
151 151
152 for (i = 0; i < ARRAY_SIZE(clk_oxnas_init); i++) { 152 for (i = 0; i < ARRAY_SIZE(clk_oxnas_init); i++) {
diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c
index 4bb130cd0062..05b3d73bfefa 100644
--- a/drivers/clk/rockchip/clk-cpu.c
+++ b/drivers/clk/rockchip/clk-cpu.c
@@ -321,9 +321,9 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
321 } 321 }
322 322
323 cclk = clk_register(NULL, &cpuclk->hw); 323 cclk = clk_register(NULL, &cpuclk->hw);
324 if (IS_ERR(clk)) { 324 if (IS_ERR(cclk)) {
325 pr_err("%s: could not register cpuclk %s\n", __func__, name); 325 pr_err("%s: could not register cpuclk %s\n", __func__, name);
326 ret = PTR_ERR(clk); 326 ret = PTR_ERR(cclk);
327 goto free_rate_table; 327 goto free_rate_table;
328 } 328 }
329 329
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index bc856f21f6b2..077fcdc7908b 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -41,8 +41,6 @@ static unsigned long rockchip_mmc_recalc(struct clk_hw *hw,
41#define ROCKCHIP_MMC_DEGREE_MASK 0x3 41#define ROCKCHIP_MMC_DEGREE_MASK 0x3
42#define ROCKCHIP_MMC_DELAYNUM_OFFSET 2 42#define ROCKCHIP_MMC_DELAYNUM_OFFSET 2
43#define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET) 43#define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET)
44#define ROCKCHIP_MMC_INIT_STATE_RESET 0x1
45#define ROCKCHIP_MMC_INIT_STATE_SHIFT 1
46 44
47#define PSECS_PER_SEC 1000000000000LL 45#define PSECS_PER_SEC 1000000000000LL
48 46
@@ -154,6 +152,7 @@ struct clk *rockchip_clk_register_mmc(const char *name,
154 return ERR_PTR(-ENOMEM); 152 return ERR_PTR(-ENOMEM);
155 153
156 init.name = name; 154 init.name = name;
155 init.flags = 0;
157 init.num_parents = num_parents; 156 init.num_parents = num_parents;
158 init.parent_names = parent_names; 157 init.parent_names = parent_names;
159 init.ops = &rockchip_mmc_clk_ops; 158 init.ops = &rockchip_mmc_clk_ops;
@@ -162,15 +161,6 @@ struct clk *rockchip_clk_register_mmc(const char *name,
162 mmc_clock->reg = reg; 161 mmc_clock->reg = reg;
163 mmc_clock->shift = shift; 162 mmc_clock->shift = shift;
164 163
165 /*
166 * Assert init_state to soft reset the CLKGEN
167 * for mmc tuning phase and degree
168 */
169 if (mmc_clock->shift == ROCKCHIP_MMC_INIT_STATE_SHIFT)
170 writel(HIWORD_UPDATE(ROCKCHIP_MMC_INIT_STATE_RESET,
171 ROCKCHIP_MMC_INIT_STATE_RESET,
172 mmc_clock->shift), mmc_clock->reg);
173
174 clk = clk_register(NULL, &mmc_clock->hw); 164 clk = clk_register(NULL, &mmc_clock->hw);
175 if (IS_ERR(clk)) 165 if (IS_ERR(clk))
176 kfree(mmc_clock); 166 kfree(mmc_clock);
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index 291543f52caa..8059a8d3ea36 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -832,9 +832,9 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
832 RK3399_CLKGATE_CON(13), 1, GFLAGS), 832 RK3399_CLKGATE_CON(13), 1, GFLAGS),
833 833
834 /* perihp */ 834 /* perihp */
835 GATE(0, "cpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED, 835 GATE(0, "cpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED,
836 RK3399_CLKGATE_CON(5), 0, GFLAGS), 836 RK3399_CLKGATE_CON(5), 0, GFLAGS),
837 GATE(0, "gpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED, 837 GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
838 RK3399_CLKGATE_CON(5), 1, GFLAGS), 838 RK3399_CLKGATE_CON(5), 1, GFLAGS),
839 COMPOSITE(ACLK_PERIHP, "aclk_perihp", mux_aclk_perihp_p, CLK_IGNORE_UNUSED, 839 COMPOSITE(ACLK_PERIHP, "aclk_perihp", mux_aclk_perihp_p, CLK_IGNORE_UNUSED,
840 RK3399_CLKSEL_CON(14), 7, 1, MFLAGS, 0, 5, DFLAGS, 840 RK3399_CLKSEL_CON(14), 7, 1, MFLAGS, 0, 5, DFLAGS,
@@ -1466,6 +1466,8 @@ static struct rockchip_clk_branch rk3399_clk_pmu_branches[] __initdata = {
1466 1466
1467static const char *const rk3399_cru_critical_clocks[] __initconst = { 1467static const char *const rk3399_cru_critical_clocks[] __initconst = {
1468 "aclk_cci_pre", 1468 "aclk_cci_pre",
1469 "aclk_gic",
1470 "aclk_gic_noc",
1469 "pclk_perilp0", 1471 "pclk_perilp0",
1470 "pclk_perilp0", 1472 "pclk_perilp0",
1471 "hclk_perilp0", 1473 "hclk_perilp0",
@@ -1508,6 +1510,7 @@ static void __init rk3399_clk_init(struct device_node *np)
1508 ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); 1510 ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
1509 if (IS_ERR(ctx)) { 1511 if (IS_ERR(ctx)) {
1510 pr_err("%s: rockchip clk init failed\n", __func__); 1512 pr_err("%s: rockchip clk init failed\n", __func__);
1513 iounmap(reg_base);
1511 return; 1514 return;
1512 } 1515 }
1513 1516
@@ -1553,6 +1556,7 @@ static void __init rk3399_pmu_clk_init(struct device_node *np)
1553 ctx = rockchip_clk_init(np, reg_base, CLKPMU_NR_CLKS); 1556 ctx = rockchip_clk_init(np, reg_base, CLKPMU_NR_CLKS);
1554 if (IS_ERR(ctx)) { 1557 if (IS_ERR(ctx)) {
1555 pr_err("%s: rockchip pmu clk init failed\n", __func__); 1558 pr_err("%s: rockchip pmu clk init failed\n", __func__);
1559 iounmap(reg_base);
1556 return; 1560 return;
1557 } 1561 }
1558 1562
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 15d06fcf0b50..a782ce87715c 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -22,7 +22,6 @@
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */ 23 */
24 24
25#include <linux/module.h>
26#include <linux/kernel.h> 25#include <linux/kernel.h>
27#include <linux/ktime.h> 26#include <linux/ktime.h>
28#include <linux/init.h> 27#include <linux/init.h>
@@ -56,11 +55,21 @@ static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
56/* proc_event_counts is used as the sequence number of the netlink message */ 55/* proc_event_counts is used as the sequence number of the netlink message */
57static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 }; 56static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
58 57
59static inline void get_seq(__u32 *ts, int *cpu) 58static inline void send_msg(struct cn_msg *msg)
60{ 59{
61 preempt_disable(); 60 preempt_disable();
62 *ts = __this_cpu_inc_return(proc_event_counts) - 1; 61
63 *cpu = smp_processor_id(); 62 msg->seq = __this_cpu_inc_return(proc_event_counts) - 1;
63 ((struct proc_event *)msg->data)->cpu = smp_processor_id();
64
65 /*
66 * Preemption remains disabled during send to ensure the messages are
67 * ordered according to their sequence numbers.
68 *
69 * If cn_netlink_send() fails, the data is not sent.
70 */
71 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT);
72
64 preempt_enable(); 73 preempt_enable();
65} 74}
66 75
@@ -77,7 +86,6 @@ void proc_fork_connector(struct task_struct *task)
77 msg = buffer_to_cn_msg(buffer); 86 msg = buffer_to_cn_msg(buffer);
78 ev = (struct proc_event *)msg->data; 87 ev = (struct proc_event *)msg->data;
79 memset(&ev->event_data, 0, sizeof(ev->event_data)); 88 memset(&ev->event_data, 0, sizeof(ev->event_data));
80 get_seq(&msg->seq, &ev->cpu);
81 ev->timestamp_ns = ktime_get_ns(); 89 ev->timestamp_ns = ktime_get_ns();
82 ev->what = PROC_EVENT_FORK; 90 ev->what = PROC_EVENT_FORK;
83 rcu_read_lock(); 91 rcu_read_lock();
@@ -92,8 +100,7 @@ void proc_fork_connector(struct task_struct *task)
92 msg->ack = 0; /* not used */ 100 msg->ack = 0; /* not used */
93 msg->len = sizeof(*ev); 101 msg->len = sizeof(*ev);
94 msg->flags = 0; /* not used */ 102 msg->flags = 0; /* not used */
95 /* If cn_netlink_send() failed, the data is not sent */ 103 send_msg(msg);
96 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
97} 104}
98 105
99void proc_exec_connector(struct task_struct *task) 106void proc_exec_connector(struct task_struct *task)
@@ -108,7 +115,6 @@ void proc_exec_connector(struct task_struct *task)
108 msg = buffer_to_cn_msg(buffer); 115 msg = buffer_to_cn_msg(buffer);
109 ev = (struct proc_event *)msg->data; 116 ev = (struct proc_event *)msg->data;
110 memset(&ev->event_data, 0, sizeof(ev->event_data)); 117 memset(&ev->event_data, 0, sizeof(ev->event_data));
111 get_seq(&msg->seq, &ev->cpu);
112 ev->timestamp_ns = ktime_get_ns(); 118 ev->timestamp_ns = ktime_get_ns();
113 ev->what = PROC_EVENT_EXEC; 119 ev->what = PROC_EVENT_EXEC;
114 ev->event_data.exec.process_pid = task->pid; 120 ev->event_data.exec.process_pid = task->pid;
@@ -118,7 +124,7 @@ void proc_exec_connector(struct task_struct *task)
118 msg->ack = 0; /* not used */ 124 msg->ack = 0; /* not used */
119 msg->len = sizeof(*ev); 125 msg->len = sizeof(*ev);
120 msg->flags = 0; /* not used */ 126 msg->flags = 0; /* not used */
121 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); 127 send_msg(msg);
122} 128}
123 129
124void proc_id_connector(struct task_struct *task, int which_id) 130void proc_id_connector(struct task_struct *task, int which_id)
@@ -150,14 +156,13 @@ void proc_id_connector(struct task_struct *task, int which_id)
150 return; 156 return;
151 } 157 }
152 rcu_read_unlock(); 158 rcu_read_unlock();
153 get_seq(&msg->seq, &ev->cpu);
154 ev->timestamp_ns = ktime_get_ns(); 159 ev->timestamp_ns = ktime_get_ns();
155 160
156 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 161 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
157 msg->ack = 0; /* not used */ 162 msg->ack = 0; /* not used */
158 msg->len = sizeof(*ev); 163 msg->len = sizeof(*ev);
159 msg->flags = 0; /* not used */ 164 msg->flags = 0; /* not used */
160 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); 165 send_msg(msg);
161} 166}
162 167
163void proc_sid_connector(struct task_struct *task) 168void proc_sid_connector(struct task_struct *task)
@@ -172,7 +177,6 @@ void proc_sid_connector(struct task_struct *task)
172 msg = buffer_to_cn_msg(buffer); 177 msg = buffer_to_cn_msg(buffer);
173 ev = (struct proc_event *)msg->data; 178 ev = (struct proc_event *)msg->data;
174 memset(&ev->event_data, 0, sizeof(ev->event_data)); 179 memset(&ev->event_data, 0, sizeof(ev->event_data));
175 get_seq(&msg->seq, &ev->cpu);
176 ev->timestamp_ns = ktime_get_ns(); 180 ev->timestamp_ns = ktime_get_ns();
177 ev->what = PROC_EVENT_SID; 181 ev->what = PROC_EVENT_SID;
178 ev->event_data.sid.process_pid = task->pid; 182 ev->event_data.sid.process_pid = task->pid;
@@ -182,7 +186,7 @@ void proc_sid_connector(struct task_struct *task)
182 msg->ack = 0; /* not used */ 186 msg->ack = 0; /* not used */
183 msg->len = sizeof(*ev); 187 msg->len = sizeof(*ev);
184 msg->flags = 0; /* not used */ 188 msg->flags = 0; /* not used */
185 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); 189 send_msg(msg);
186} 190}
187 191
188void proc_ptrace_connector(struct task_struct *task, int ptrace_id) 192void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
@@ -197,7 +201,6 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
197 msg = buffer_to_cn_msg(buffer); 201 msg = buffer_to_cn_msg(buffer);
198 ev = (struct proc_event *)msg->data; 202 ev = (struct proc_event *)msg->data;
199 memset(&ev->event_data, 0, sizeof(ev->event_data)); 203 memset(&ev->event_data, 0, sizeof(ev->event_data));
200 get_seq(&msg->seq, &ev->cpu);
201 ev->timestamp_ns = ktime_get_ns(); 204 ev->timestamp_ns = ktime_get_ns();
202 ev->what = PROC_EVENT_PTRACE; 205 ev->what = PROC_EVENT_PTRACE;
203 ev->event_data.ptrace.process_pid = task->pid; 206 ev->event_data.ptrace.process_pid = task->pid;
@@ -215,7 +218,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
215 msg->ack = 0; /* not used */ 218 msg->ack = 0; /* not used */
216 msg->len = sizeof(*ev); 219 msg->len = sizeof(*ev);
217 msg->flags = 0; /* not used */ 220 msg->flags = 0; /* not used */
218 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); 221 send_msg(msg);
219} 222}
220 223
221void proc_comm_connector(struct task_struct *task) 224void proc_comm_connector(struct task_struct *task)
@@ -230,7 +233,6 @@ void proc_comm_connector(struct task_struct *task)
230 msg = buffer_to_cn_msg(buffer); 233 msg = buffer_to_cn_msg(buffer);
231 ev = (struct proc_event *)msg->data; 234 ev = (struct proc_event *)msg->data;
232 memset(&ev->event_data, 0, sizeof(ev->event_data)); 235 memset(&ev->event_data, 0, sizeof(ev->event_data));
233 get_seq(&msg->seq, &ev->cpu);
234 ev->timestamp_ns = ktime_get_ns(); 236 ev->timestamp_ns = ktime_get_ns();
235 ev->what = PROC_EVENT_COMM; 237 ev->what = PROC_EVENT_COMM;
236 ev->event_data.comm.process_pid = task->pid; 238 ev->event_data.comm.process_pid = task->pid;
@@ -241,7 +243,7 @@ void proc_comm_connector(struct task_struct *task)
241 msg->ack = 0; /* not used */ 243 msg->ack = 0; /* not used */
242 msg->len = sizeof(*ev); 244 msg->len = sizeof(*ev);
243 msg->flags = 0; /* not used */ 245 msg->flags = 0; /* not used */
244 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); 246 send_msg(msg);
245} 247}
246 248
247void proc_coredump_connector(struct task_struct *task) 249void proc_coredump_connector(struct task_struct *task)
@@ -256,7 +258,6 @@ void proc_coredump_connector(struct task_struct *task)
256 msg = buffer_to_cn_msg(buffer); 258 msg = buffer_to_cn_msg(buffer);
257 ev = (struct proc_event *)msg->data; 259 ev = (struct proc_event *)msg->data;
258 memset(&ev->event_data, 0, sizeof(ev->event_data)); 260 memset(&ev->event_data, 0, sizeof(ev->event_data));
259 get_seq(&msg->seq, &ev->cpu);
260 ev->timestamp_ns = ktime_get_ns(); 261 ev->timestamp_ns = ktime_get_ns();
261 ev->what = PROC_EVENT_COREDUMP; 262 ev->what = PROC_EVENT_COREDUMP;
262 ev->event_data.coredump.process_pid = task->pid; 263 ev->event_data.coredump.process_pid = task->pid;
@@ -266,7 +267,7 @@ void proc_coredump_connector(struct task_struct *task)
266 msg->ack = 0; /* not used */ 267 msg->ack = 0; /* not used */
267 msg->len = sizeof(*ev); 268 msg->len = sizeof(*ev);
268 msg->flags = 0; /* not used */ 269 msg->flags = 0; /* not used */
269 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); 270 send_msg(msg);
270} 271}
271 272
272void proc_exit_connector(struct task_struct *task) 273void proc_exit_connector(struct task_struct *task)
@@ -281,7 +282,6 @@ void proc_exit_connector(struct task_struct *task)
281 msg = buffer_to_cn_msg(buffer); 282 msg = buffer_to_cn_msg(buffer);
282 ev = (struct proc_event *)msg->data; 283 ev = (struct proc_event *)msg->data;
283 memset(&ev->event_data, 0, sizeof(ev->event_data)); 284 memset(&ev->event_data, 0, sizeof(ev->event_data));
284 get_seq(&msg->seq, &ev->cpu);
285 ev->timestamp_ns = ktime_get_ns(); 285 ev->timestamp_ns = ktime_get_ns();
286 ev->what = PROC_EVENT_EXIT; 286 ev->what = PROC_EVENT_EXIT;
287 ev->event_data.exit.process_pid = task->pid; 287 ev->event_data.exit.process_pid = task->pid;
@@ -293,7 +293,7 @@ void proc_exit_connector(struct task_struct *task)
293 msg->ack = 0; /* not used */ 293 msg->ack = 0; /* not used */
294 msg->len = sizeof(*ev); 294 msg->len = sizeof(*ev);
295 msg->flags = 0; /* not used */ 295 msg->flags = 0; /* not used */
296 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); 296 send_msg(msg);
297} 297}
298 298
299/* 299/*
@@ -325,7 +325,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
325 msg->ack = rcvd_ack + 1; 325 msg->ack = rcvd_ack + 1;
326 msg->len = sizeof(*ev); 326 msg->len = sizeof(*ev);
327 msg->flags = 0; /* not used */ 327 msg->flags = 0; /* not used */
328 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL); 328 send_msg(msg);
329} 329}
330 330
331/** 331/**
@@ -389,5 +389,4 @@ static int __init cn_proc_init(void)
389 } 389 }
390 return 0; 390 return 0;
391} 391}
392 392device_initcall(cn_proc_init);
393module_init(cn_proc_init);
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 3646b143bbf5..0bb44d5b5df4 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -79,15 +79,16 @@ static const struct of_device_id machines[] __initconst = {
79static int __init cpufreq_dt_platdev_init(void) 79static int __init cpufreq_dt_platdev_init(void)
80{ 80{
81 struct device_node *np = of_find_node_by_path("/"); 81 struct device_node *np = of_find_node_by_path("/");
82 const struct of_device_id *match;
82 83
83 if (!np) 84 if (!np)
84 return -ENODEV; 85 return -ENODEV;
85 86
86 if (!of_match_node(machines, np)) 87 match = of_match_node(machines, np);
88 of_node_put(np);
89 if (!match)
87 return -ENODEV; 90 return -ENODEV;
88 91
89 of_node_put(of_root);
90
91 return PTR_ERR_OR_ZERO(platform_device_register_simple("cpufreq-dt", -1, 92 return PTR_ERR_OR_ZERO(platform_device_register_simple("cpufreq-dt", -1,
92 NULL, 0)); 93 NULL, 0));
93} 94}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 9009295f5134..5617c7087d77 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2261,6 +2261,10 @@ int cpufreq_update_policy(unsigned int cpu)
2261 * -> ask driver for current freq and notify governors about a change 2261 * -> ask driver for current freq and notify governors about a change
2262 */ 2262 */
2263 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 2263 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2264 if (cpufreq_suspended) {
2265 ret = -EAGAIN;
2266 goto unlock;
2267 }
2264 new_policy.cur = cpufreq_update_current_freq(policy); 2268 new_policy.cur = cpufreq_update_current_freq(policy);
2265 if (WARN_ON(!new_policy.cur)) { 2269 if (WARN_ON(!new_policy.cur)) {
2266 ret = -EIO; 2270 ret = -EIO;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index ee367e9b7d2e..1fa1a32928d7 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -372,26 +372,9 @@ static bool intel_pstate_get_ppc_enable_status(void)
372 return acpi_ppc; 372 return acpi_ppc;
373} 373}
374 374
375/*
376 * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
377 * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and
378 * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state
379 * ratio, out of it only high 8 bits are used. For example 0x1700 is setting
380 * target ratio 0x17. The _PSS control value stores in a format which can be
381 * directly written to PERF_CTL MSR. But in intel_pstate driver this shift
382 * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()).
383 * This function converts the _PSS control value to intel pstate driver format
384 * for comparison and assignment.
385 */
386static int convert_to_native_pstate_format(struct cpudata *cpu, int index)
387{
388 return cpu->acpi_perf_data.states[index].control >> 8;
389}
390
391static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 375static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
392{ 376{
393 struct cpudata *cpu; 377 struct cpudata *cpu;
394 int turbo_pss_ctl;
395 int ret; 378 int ret;
396 int i; 379 int i;
397 380
@@ -441,11 +424,10 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
441 * max frequency, which will cause a reduced performance as 424 * max frequency, which will cause a reduced performance as
442 * this driver uses real max turbo frequency as the max 425 * this driver uses real max turbo frequency as the max
443 * frequency. So correct this frequency in _PSS table to 426 * frequency. So correct this frequency in _PSS table to
444 * correct max turbo frequency based on the turbo ratio. 427 * correct max turbo frequency based on the turbo state.
445 * Also need to convert to MHz as _PSS freq is in MHz. 428 * Also need to convert to MHz as _PSS freq is in MHz.
446 */ 429 */
447 turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0); 430 if (!limits->turbo_disabled)
448 if (turbo_pss_ctl > cpu->pstate.max_pstate)
449 cpu->acpi_perf_data.states[0].core_frequency = 431 cpu->acpi_perf_data.states[0].core_frequency =
450 policy->cpuinfo.max_freq / 1000; 432 policy->cpuinfo.max_freq / 1000;
451 cpu->valid_pss_table = true; 433 cpu->valid_pss_table = true;
@@ -1418,6 +1400,9 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
1418{ 1400{
1419 struct cpudata *cpu = all_cpu_data[cpu_num]; 1401 struct cpudata *cpu = all_cpu_data[cpu_num];
1420 1402
1403 if (cpu->update_util_set)
1404 return;
1405
1421 /* Prevent intel_pstate_update_util() from using stale data. */ 1406 /* Prevent intel_pstate_update_util() from using stale data. */
1422 cpu->sample.time = 0; 1407 cpu->sample.time = 0;
1423 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, 1408 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
@@ -1458,8 +1443,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1458 if (!policy->cpuinfo.max_freq) 1443 if (!policy->cpuinfo.max_freq)
1459 return -ENODEV; 1444 return -ENODEV;
1460 1445
1461 intel_pstate_clear_update_util_hook(policy->cpu);
1462
1463 pr_debug("set_policy cpuinfo.max %u policy->max %u\n", 1446 pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
1464 policy->cpuinfo.max_freq, policy->max); 1447 policy->cpuinfo.max_freq, policy->max);
1465 1448
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 808a320e9d5d..a7ecb9a84c15 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -487,7 +487,7 @@ static int __init pcc_cpufreq_probe(void)
487 doorbell.space_id = reg_resource->space_id; 487 doorbell.space_id = reg_resource->space_id;
488 doorbell.bit_width = reg_resource->bit_width; 488 doorbell.bit_width = reg_resource->bit_width;
489 doorbell.bit_offset = reg_resource->bit_offset; 489 doorbell.bit_offset = reg_resource->bit_offset;
490 doorbell.access_width = 64; 490 doorbell.access_width = 4;
491 doorbell.address = reg_resource->address; 491 doorbell.address = reg_resource->address;
492 492
493 pr_debug("probe: doorbell: space_id is %d, bit_width is %d, " 493 pr_debug("probe: doorbell: space_id is %d, bit_width is %d, "
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 574e87c7f2b8..9acccad26928 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -781,7 +781,7 @@ static int hash_process_data(struct hash_device_data *device_data,
781 &device_data->state); 781 &device_data->state);
782 memmove(req_ctx->state.buffer, 782 memmove(req_ctx->state.buffer,
783 device_data->state.buffer, 783 device_data->state.buffer,
784 HASH_BLOCK_SIZE / sizeof(u32)); 784 HASH_BLOCK_SIZE);
785 if (ret) { 785 if (ret) {
786 dev_err(device_data->dev, 786 dev_err(device_data->dev,
787 "%s: hash_resume_state() failed!\n", 787 "%s: hash_resume_state() failed!\n",
@@ -832,7 +832,7 @@ static int hash_process_data(struct hash_device_data *device_data,
832 832
833 memmove(device_data->state.buffer, 833 memmove(device_data->state.buffer,
834 req_ctx->state.buffer, 834 req_ctx->state.buffer,
835 HASH_BLOCK_SIZE / sizeof(u32)); 835 HASH_BLOCK_SIZE);
836 if (ret) { 836 if (ret) {
837 dev_err(device_data->dev, "%s: hash_save_state() failed!\n", 837 dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
838 __func__); 838 __func__);
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index 495577b6d31b..94ad5c0adbcb 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -182,7 +182,7 @@ struct crypto_alg p8_aes_cbc_alg = {
182 .cra_name = "cbc(aes)", 182 .cra_name = "cbc(aes)",
183 .cra_driver_name = "p8_aes_cbc", 183 .cra_driver_name = "p8_aes_cbc",
184 .cra_module = THIS_MODULE, 184 .cra_module = THIS_MODULE,
185 .cra_priority = 1000, 185 .cra_priority = 2000,
186 .cra_type = &crypto_blkcipher_type, 186 .cra_type = &crypto_blkcipher_type,
187 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, 187 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
188 .cra_alignmask = 0, 188 .cra_alignmask = 0,
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 0a3c1b04cf3c..38ed10d761d0 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -166,7 +166,7 @@ struct crypto_alg p8_aes_ctr_alg = {
166 .cra_name = "ctr(aes)", 166 .cra_name = "ctr(aes)",
167 .cra_driver_name = "p8_aes_ctr", 167 .cra_driver_name = "p8_aes_ctr",
168 .cra_module = THIS_MODULE, 168 .cra_module = THIS_MODULE,
169 .cra_priority = 1000, 169 .cra_priority = 2000,
170 .cra_type = &crypto_blkcipher_type, 170 .cra_type = &crypto_blkcipher_type,
171 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, 171 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
172 .cra_alignmask = 0, 172 .cra_alignmask = 0,
diff --git a/drivers/crypto/vmx/ppc-xlate.pl b/drivers/crypto/vmx/ppc-xlate.pl
index 9f4994cabcc7..b18e67d0e065 100644
--- a/drivers/crypto/vmx/ppc-xlate.pl
+++ b/drivers/crypto/vmx/ppc-xlate.pl
@@ -141,7 +141,7 @@ my $vmr = sub {
141 141
142# Some ABIs specify vrsave, special-purpose register #256, as reserved 142# Some ABIs specify vrsave, special-purpose register #256, as reserved
143# for system use. 143# for system use.
144my $no_vrsave = ($flavour =~ /aix|linux64le/); 144my $no_vrsave = ($flavour =~ /linux-ppc64le/);
145my $mtspr = sub { 145my $mtspr = sub {
146 my ($f,$idx,$ra) = @_; 146 my ($f,$idx,$ra) = @_;
147 if ($idx == 256 && $no_vrsave) { 147 if ($idx == 256 && $no_vrsave) {
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 1d6c803804d5..e92418facc92 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -268,8 +268,11 @@ int update_devfreq(struct devfreq *devfreq)
268 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE); 268 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
269 269
270 err = devfreq->profile->target(devfreq->dev.parent, &freq, flags); 270 err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
271 if (err) 271 if (err) {
272 freqs.new = cur_freq;
273 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
272 return err; 274 return err;
275 }
273 276
274 freqs.new = freq; 277 freqs.new = freq;
275 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); 278 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
@@ -552,6 +555,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
552 devfreq->profile = profile; 555 devfreq->profile = profile;
553 strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN); 556 strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
554 devfreq->previous_freq = profile->initial_freq; 557 devfreq->previous_freq = profile->initial_freq;
558 devfreq->last_status.current_frequency = profile->initial_freq;
555 devfreq->data = data; 559 devfreq->data = data;
556 devfreq->nb.notifier_call = devfreq_notifier_call; 560 devfreq->nb.notifier_call = devfreq_notifier_call;
557 561
@@ -561,23 +565,22 @@ struct devfreq *devfreq_add_device(struct device *dev,
561 mutex_lock(&devfreq->lock); 565 mutex_lock(&devfreq->lock);
562 } 566 }
563 567
564 devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) *
565 devfreq->profile->max_state *
566 devfreq->profile->max_state,
567 GFP_KERNEL);
568 devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned long) *
569 devfreq->profile->max_state,
570 GFP_KERNEL);
571 devfreq->last_stat_updated = jiffies;
572
573 dev_set_name(&devfreq->dev, "%s", dev_name(dev)); 568 dev_set_name(&devfreq->dev, "%s", dev_name(dev));
574 err = device_register(&devfreq->dev); 569 err = device_register(&devfreq->dev);
575 if (err) { 570 if (err) {
576 put_device(&devfreq->dev);
577 mutex_unlock(&devfreq->lock); 571 mutex_unlock(&devfreq->lock);
578 goto err_out; 572 goto err_out;
579 } 573 }
580 574
575 devfreq->trans_table = devm_kzalloc(&devfreq->dev, sizeof(unsigned int) *
576 devfreq->profile->max_state *
577 devfreq->profile->max_state,
578 GFP_KERNEL);
579 devfreq->time_in_state = devm_kzalloc(&devfreq->dev, sizeof(unsigned long) *
580 devfreq->profile->max_state,
581 GFP_KERNEL);
582 devfreq->last_stat_updated = jiffies;
583
581 srcu_init_notifier_head(&devfreq->transition_notifier_list); 584 srcu_init_notifier_head(&devfreq->transition_notifier_list);
582 585
583 mutex_unlock(&devfreq->lock); 586 mutex_unlock(&devfreq->lock);
@@ -603,7 +606,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
603err_init: 606err_init:
604 list_del(&devfreq->node); 607 list_del(&devfreq->node);
605 device_unregister(&devfreq->dev); 608 device_unregister(&devfreq->dev);
606 kfree(devfreq);
607err_out: 609err_out:
608 return ERR_PTR(err); 610 return ERR_PTR(err);
609} 611}
@@ -621,7 +623,6 @@ int devfreq_remove_device(struct devfreq *devfreq)
621 return -EINVAL; 623 return -EINVAL;
622 624
623 device_unregister(&devfreq->dev); 625 device_unregister(&devfreq->dev);
624 put_device(&devfreq->dev);
625 626
626 return 0; 627 return 0;
627} 628}
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
index 6b6a5f310486..a5841403bde8 100644
--- a/drivers/devfreq/event/exynos-nocp.c
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -220,9 +220,6 @@ static int exynos_nocp_parse_dt(struct platform_device *pdev,
220 220
221 /* Maps the memory mapped IO to control nocp register */ 221 /* Maps the memory mapped IO to control nocp register */
222 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 222 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
223 if (IS_ERR(res))
224 return PTR_ERR(res);
225
226 base = devm_ioremap_resource(dev, res); 223 base = devm_ioremap_resource(dev, res);
227 if (IS_ERR(base)) 224 if (IS_ERR(base))
228 return PTR_ERR(base); 225 return PTR_ERR(base);
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 8e304b1befc5..75bd6621dc5d 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -242,7 +242,7 @@ struct at_xdmac_lld {
242 u32 mbr_dus; /* Destination Microblock Stride Register */ 242 u32 mbr_dus; /* Destination Microblock Stride Register */
243}; 243};
244 244
245 245/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
246struct at_xdmac_desc { 246struct at_xdmac_desc {
247 struct at_xdmac_lld lld; 247 struct at_xdmac_lld lld;
248 enum dma_transfer_direction direction; 248 enum dma_transfer_direction direction;
@@ -253,7 +253,7 @@ struct at_xdmac_desc {
253 unsigned int xfer_size; 253 unsigned int xfer_size;
254 struct list_head descs_list; 254 struct list_head descs_list;
255 struct list_head xfer_node; 255 struct list_head xfer_node;
256}; 256} __aligned(sizeof(u64));
257 257
258static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb) 258static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
259{ 259{
@@ -1400,6 +1400,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1400 u32 cur_nda, check_nda, cur_ubc, mask, value; 1400 u32 cur_nda, check_nda, cur_ubc, mask, value;
1401 u8 dwidth = 0; 1401 u8 dwidth = 0;
1402 unsigned long flags; 1402 unsigned long flags;
1403 bool initd;
1403 1404
1404 ret = dma_cookie_status(chan, cookie, txstate); 1405 ret = dma_cookie_status(chan, cookie, txstate);
1405 if (ret == DMA_COMPLETE) 1406 if (ret == DMA_COMPLETE)
@@ -1424,7 +1425,16 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1424 residue = desc->xfer_size; 1425 residue = desc->xfer_size;
1425 /* 1426 /*
1426 * Flush FIFO: only relevant when the transfer is source peripheral 1427 * Flush FIFO: only relevant when the transfer is source peripheral
1427 * synchronized. 1428 * synchronized. Flush is needed before reading CUBC because data in
1429 * the FIFO are not reported by CUBC. Reporting a residue of the
1430 * transfer length while we have data in FIFO can cause issue.
1431 * Usecase: atmel USART has a timeout which means I have received
1432 * characters but there is no more character received for a while. On
1433 * timeout, it requests the residue. If the data are in the DMA FIFO,
1434 * we will return a residue of the transfer length. It means no data
1435 * received. If an application is waiting for these data, it will hang
1436 * since we won't have another USART timeout without receiving new
1437 * data.
1428 */ 1438 */
1429 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC; 1439 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
1430 value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM; 1440 value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
@@ -1435,34 +1445,43 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1435 } 1445 }
1436 1446
1437 /* 1447 /*
1438 * When processing the residue, we need to read two registers but we 1448 * The easiest way to compute the residue should be to pause the DMA
1439 * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where 1449 * but doing this can lead to miss some data as some devices don't
1440 * we stand in the descriptor list and AT_XDMAC_CUBC is used 1450 * have FIFO.
1441 * to know how many data are remaining for the current descriptor. 1451 * We need to read several registers because:
1442 * Since the dma channel is not paused to not loose data, between the 1452 * - DMA is running therefore a descriptor change is possible while
1443 * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of 1453 * reading these registers
1444 * descriptor. 1454 * - When the block transfer is done, the value of the CUBC register
1445 * For that reason, after reading AT_XDMAC_CUBC, we check if we are 1455 * is set to its initial value until the fetch of the next descriptor.
1446 * still using the same descriptor by reading a second time 1456 * This value will corrupt the residue calculation so we have to skip
1447 * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to 1457 * it.
1448 * read again AT_XDMAC_CUBC. 1458 *
1459 * INITD -------- ------------
1460 * |____________________|
1461 * _______________________ _______________
1462 * NDA @desc2 \/ @desc3
1463 * _______________________/\_______________
1464 * __________ ___________ _______________
1465 * CUBC 0 \/ MAX desc1 \/ MAX desc2
1466 * __________/\___________/\_______________
1467 *
1468 * Since descriptors are aligned on 64 bits, we can assume that
1469 * the update of NDA and CUBC is atomic.
1449 * Memory barriers are used to ensure the read order of the registers. 1470 * Memory barriers are used to ensure the read order of the registers.
1450 * A max number of retries is set because unlikely it can never ends if 1471 * A max number of retries is set because unlikely it could never ends.
1451 * we are transferring a lot of data with small buffers.
1452 */ 1472 */
1453 cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1454 rmb();
1455 cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
1456 for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) { 1473 for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
1457 rmb();
1458 check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; 1474 check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1459 1475 rmb();
1460 if (likely(cur_nda == check_nda)) 1476 initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
1461 break;
1462
1463 cur_nda = check_nda;
1464 rmb(); 1477 rmb();
1465 cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); 1478 cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
1479 rmb();
1480 cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1481 rmb();
1482
1483 if ((check_nda == cur_nda) && initd)
1484 break;
1466 } 1485 }
1467 1486
1468 if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) { 1487 if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
@@ -1471,6 +1490,19 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1471 } 1490 }
1472 1491
1473 /* 1492 /*
1493 * Flush FIFO: only relevant when the transfer is source peripheral
1494 * synchronized. Another flush is needed here because CUBC is updated
1495 * when the controller sends the data write command. It can lead to
1496 * report data that are not written in the memory or the device. The
1497 * FIFO flush ensures that data are really written.
1498 */
1499 if ((desc->lld.mbr_cfg & mask) == value) {
1500 at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
1501 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1502 cpu_relax();
1503 }
1504
1505 /*
1474 * Remove size of all microblocks already transferred and the current 1506 * Remove size of all microblocks already transferred and the current
1475 * one. Then add the remaining size to transfer of the current 1507 * one. Then add the remaining size to transfer of the current
1476 * microblock. 1508 * microblock.
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 25d1dadcddd1..d0446a75990a 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -703,8 +703,9 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
703 goto free_resources; 703 goto free_resources;
704 } 704 }
705 705
706 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, 706 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
707 PAGE_SIZE, DMA_TO_DEVICE); 707 (size_t)src & ~PAGE_MASK, PAGE_SIZE,
708 DMA_TO_DEVICE);
708 unmap->addr[0] = src_dma; 709 unmap->addr[0] = src_dma;
709 710
710 ret = dma_mapping_error(dma_chan->device->dev, src_dma); 711 ret = dma_mapping_error(dma_chan->device->dev, src_dma);
@@ -714,8 +715,9 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
714 } 715 }
715 unmap->to_cnt = 1; 716 unmap->to_cnt = 1;
716 717
717 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, 718 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
718 PAGE_SIZE, DMA_FROM_DEVICE); 719 (size_t)dest & ~PAGE_MASK, PAGE_SIZE,
720 DMA_FROM_DEVICE);
719 unmap->addr[1] = dest_dma; 721 unmap->addr[1] = dest_dma;
720 722
721 ret = dma_mapping_error(dma_chan->device->dev, dest_dma); 723 ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 8b3226dca1d9..caff46c0e214 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -360,6 +360,8 @@ static int palmas_usb_probe(struct platform_device *pdev)
360 360
361 palmas_enable_irq(palmas_usb); 361 palmas_enable_irq(palmas_usb);
362 /* perform initial detection */ 362 /* perform initial detection */
363 if (palmas_usb->enable_gpio_vbus_detection)
364 palmas_vbus_irq_handler(palmas_usb->gpio_vbus_irq, palmas_usb);
363 palmas_gpio_id_detect(&palmas_usb->wq_detectid.work); 365 palmas_gpio_id_detect(&palmas_usb->wq_detectid.work);
364 device_set_wakeup_capable(&pdev->dev, true); 366 device_set_wakeup_capable(&pdev->dev, true);
365 return 0; 367 return 0;
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index a850cbc48d8d..c49d50e68aee 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -174,6 +174,7 @@ static __init void reserve_regions(void)
174{ 174{
175 efi_memory_desc_t *md; 175 efi_memory_desc_t *md;
176 u64 paddr, npages, size; 176 u64 paddr, npages, size;
177 int resv;
177 178
178 if (efi_enabled(EFI_DBG)) 179 if (efi_enabled(EFI_DBG))
179 pr_info("Processing EFI memory map:\n"); 180 pr_info("Processing EFI memory map:\n");
@@ -190,12 +191,14 @@ static __init void reserve_regions(void)
190 paddr = md->phys_addr; 191 paddr = md->phys_addr;
191 npages = md->num_pages; 192 npages = md->num_pages;
192 193
194 resv = is_reserve_region(md);
193 if (efi_enabled(EFI_DBG)) { 195 if (efi_enabled(EFI_DBG)) {
194 char buf[64]; 196 char buf[64];
195 197
196 pr_info(" 0x%012llx-0x%012llx %s", 198 pr_info(" 0x%012llx-0x%012llx %s%s\n",
197 paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, 199 paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
198 efi_md_typeattr_format(buf, sizeof(buf), md)); 200 efi_md_typeattr_format(buf, sizeof(buf), md),
201 resv ? "*" : "");
199 } 202 }
200 203
201 memrange_efi_to_native(&paddr, &npages); 204 memrange_efi_to_native(&paddr, &npages);
@@ -204,14 +207,9 @@ static __init void reserve_regions(void)
204 if (is_normal_ram(md)) 207 if (is_normal_ram(md))
205 early_init_dt_add_memory_arch(paddr, size); 208 early_init_dt_add_memory_arch(paddr, size);
206 209
207 if (is_reserve_region(md)) { 210 if (resv)
208 memblock_mark_nomap(paddr, size); 211 memblock_mark_nomap(paddr, size);
209 if (efi_enabled(EFI_DBG))
210 pr_cont("*");
211 }
212 212
213 if (efi_enabled(EFI_DBG))
214 pr_cont("\n");
215 } 213 }
216 214
217 set_bit(EFI_MEMMAP, &efi.flags); 215 set_bit(EFI_MEMMAP, &efi.flags);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 48da857f4774..cebcb405812e 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -33,6 +33,7 @@ config ARCH_REQUIRE_GPIOLIB
33 33
34menuconfig GPIOLIB 34menuconfig GPIOLIB
35 bool "GPIO Support" 35 bool "GPIO Support"
36 select ANON_INODES
36 help 37 help
37 This enables GPIO support through the generic GPIO library. 38 This enables GPIO support through the generic GPIO library.
38 You only need to enable this, if you also want to enable 39 You only need to enable this, if you also want to enable
@@ -530,7 +531,7 @@ menu "Port-mapped I/O GPIO drivers"
530 531
531config GPIO_104_DIO_48E 532config GPIO_104_DIO_48E
532 tristate "ACCES 104-DIO-48E GPIO support" 533 tristate "ACCES 104-DIO-48E GPIO support"
533 depends on ISA 534 depends on ISA_BUS_API
534 select GPIOLIB_IRQCHIP 535 select GPIOLIB_IRQCHIP
535 help 536 help
536 Enables GPIO support for the ACCES 104-DIO-48E series (104-DIO-48E, 537 Enables GPIO support for the ACCES 104-DIO-48E series (104-DIO-48E,
@@ -540,7 +541,7 @@ config GPIO_104_DIO_48E
540 541
541config GPIO_104_IDIO_16 542config GPIO_104_IDIO_16
542 tristate "ACCES 104-IDIO-16 GPIO support" 543 tristate "ACCES 104-IDIO-16 GPIO support"
543 depends on ISA 544 depends on ISA_BUS_API
544 select GPIOLIB_IRQCHIP 545 select GPIOLIB_IRQCHIP
545 help 546 help
546 Enables GPIO support for the ACCES 104-IDIO-16 family (104-IDIO-16, 547 Enables GPIO support for the ACCES 104-IDIO-16 family (104-IDIO-16,
@@ -551,7 +552,7 @@ config GPIO_104_IDIO_16
551 552
552config GPIO_104_IDI_48 553config GPIO_104_IDI_48
553 tristate "ACCES 104-IDI-48 GPIO support" 554 tristate "ACCES 104-IDI-48 GPIO support"
554 depends on ISA 555 depends on ISA_BUS_API
555 select GPIOLIB_IRQCHIP 556 select GPIOLIB_IRQCHIP
556 help 557 help
557 Enables GPIO support for the ACCES 104-IDI-48 family (104-IDI-48A, 558 Enables GPIO support for the ACCES 104-IDI-48 family (104-IDI-48A,
@@ -627,7 +628,7 @@ config GPIO_TS5500
627 628
628config GPIO_WS16C48 629config GPIO_WS16C48
629 tristate "WinSystems WS16C48 GPIO support" 630 tristate "WinSystems WS16C48 GPIO support"
630 depends on ISA 631 depends on ISA_BUS_API
631 select GPIOLIB_IRQCHIP 632 select GPIOLIB_IRQCHIP
632 help 633 help
633 Enables GPIO support for the WinSystems WS16C48. The base port 634 Enables GPIO support for the WinSystems WS16C48. The base port
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index 1a647c07be67..fcf776971ca9 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -75,7 +75,7 @@ static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
75{ 75{
76 struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); 76 struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
77 const unsigned io_port = offset / 8; 77 const unsigned io_port = offset / 8;
78 const unsigned control_port = io_port / 2; 78 const unsigned int control_port = io_port / 3;
79 const unsigned control_addr = dio48egpio->base + 3 + control_port*4; 79 const unsigned control_addr = dio48egpio->base + 3 + control_port*4;
80 unsigned long flags; 80 unsigned long flags;
81 unsigned control; 81 unsigned control;
@@ -115,7 +115,7 @@ static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
115{ 115{
116 struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); 116 struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
117 const unsigned io_port = offset / 8; 117 const unsigned io_port = offset / 8;
118 const unsigned control_port = io_port / 2; 118 const unsigned int control_port = io_port / 3;
119 const unsigned mask = BIT(offset % 8); 119 const unsigned mask = BIT(offset % 8);
120 const unsigned control_addr = dio48egpio->base + 3 + control_port*4; 120 const unsigned control_addr = dio48egpio->base + 3 + control_port*4;
121 const unsigned out_port = (io_port > 2) ? io_port + 1 : io_port; 121 const unsigned out_port = (io_port > 2) ? io_port + 1 : io_port;
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c
index 6c75c83baf5a..2d2763ea1a68 100644
--- a/drivers/gpio/gpio-104-idi-48.c
+++ b/drivers/gpio/gpio-104-idi-48.c
@@ -247,6 +247,7 @@ static int idi_48_probe(struct device *dev, unsigned int id)
247 idi48gpio->irq = irq[id]; 247 idi48gpio->irq = irq[id];
248 248
249 spin_lock_init(&idi48gpio->lock); 249 spin_lock_init(&idi48gpio->lock);
250 spin_lock_init(&idi48gpio->ack_lock);
250 251
251 dev_set_drvdata(dev, idi48gpio); 252 dev_set_drvdata(dev, idi48gpio);
252 253
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 9aabc48ff5de..953e4b829e32 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -547,11 +547,11 @@ static void bcm_kona_gpio_reset(struct bcm_kona_gpio *kona_gpio)
547 /* disable interrupts and clear status */ 547 /* disable interrupts and clear status */
548 for (i = 0; i < kona_gpio->num_bank; i++) { 548 for (i = 0; i < kona_gpio->num_bank; i++) {
549 /* Unlock the entire bank first */ 549 /* Unlock the entire bank first */
550 bcm_kona_gpio_write_lock_regs(kona_gpio, i, UNLOCK_CODE); 550 bcm_kona_gpio_write_lock_regs(reg_base, i, UNLOCK_CODE);
551 writel(0xffffffff, reg_base + GPIO_INT_MASK(i)); 551 writel(0xffffffff, reg_base + GPIO_INT_MASK(i));
552 writel(0xffffffff, reg_base + GPIO_INT_STATUS(i)); 552 writel(0xffffffff, reg_base + GPIO_INT_STATUS(i));
553 /* Now re-lock the bank */ 553 /* Now re-lock the bank */
554 bcm_kona_gpio_write_lock_regs(kona_gpio, i, LOCK_CODE); 554 bcm_kona_gpio_write_lock_regs(reg_base, i, LOCK_CODE);
555 } 555 }
556} 556}
557 557
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index ec891a27952f..661b0e34e067 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -98,7 +98,6 @@ struct tegra_gpio_info {
98 const struct tegra_gpio_soc_config *soc; 98 const struct tegra_gpio_soc_config *soc;
99 struct gpio_chip gc; 99 struct gpio_chip gc;
100 struct irq_chip ic; 100 struct irq_chip ic;
101 struct lock_class_key lock_class;
102 u32 bank_count; 101 u32 bank_count;
103}; 102};
104 103
@@ -547,6 +546,12 @@ static const struct dev_pm_ops tegra_gpio_pm_ops = {
547 SET_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume) 546 SET_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume)
548}; 547};
549 548
549/*
550 * This lock class tells lockdep that GPIO irqs are in a different category
551 * than their parents, so it won't report false recursion.
552 */
553static struct lock_class_key gpio_lock_class;
554
550static int tegra_gpio_probe(struct platform_device *pdev) 555static int tegra_gpio_probe(struct platform_device *pdev)
551{ 556{
552 const struct tegra_gpio_soc_config *config; 557 const struct tegra_gpio_soc_config *config;
@@ -660,7 +665,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
660 665
661 bank = &tgi->bank_info[GPIO_BANK(gpio)]; 666 bank = &tgi->bank_info[GPIO_BANK(gpio)];
662 667
663 irq_set_lockdep_class(irq, &tgi->lock_class); 668 irq_set_lockdep_class(irq, &gpio_lock_class);
664 irq_set_chip_data(irq, bank); 669 irq_set_chip_data(irq, bank);
665 irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq); 670 irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq);
666 } 671 }
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 75c6355b018d..e72794e463aa 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -709,7 +709,13 @@ static int zynq_gpio_probe(struct platform_device *pdev)
709 dev_err(&pdev->dev, "input clock not found.\n"); 709 dev_err(&pdev->dev, "input clock not found.\n");
710 return PTR_ERR(gpio->clk); 710 return PTR_ERR(gpio->clk);
711 } 711 }
712 ret = clk_prepare_enable(gpio->clk);
713 if (ret) {
714 dev_err(&pdev->dev, "Unable to enable clock.\n");
715 return ret;
716 }
712 717
718 pm_runtime_set_active(&pdev->dev);
713 pm_runtime_enable(&pdev->dev); 719 pm_runtime_enable(&pdev->dev);
714 ret = pm_runtime_get_sync(&pdev->dev); 720 ret = pm_runtime_get_sync(&pdev->dev);
715 if (ret < 0) 721 if (ret < 0)
@@ -747,6 +753,7 @@ err_pm_put:
747 pm_runtime_put(&pdev->dev); 753 pm_runtime_put(&pdev->dev);
748err_pm_dis: 754err_pm_dis:
749 pm_runtime_disable(&pdev->dev); 755 pm_runtime_disable(&pdev->dev);
756 clk_disable_unprepare(gpio->clk);
750 757
751 return ret; 758 return ret;
752} 759}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index d22dcc38179d..4aabddb38b59 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -16,6 +16,7 @@
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/io-mapping.h>
19#include <linux/gpio/consumer.h> 20#include <linux/gpio/consumer.h>
20#include <linux/of.h> 21#include <linux/of.h>
21#include <linux/of_address.h> 22#include <linux/of_address.h>
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 24f60d28f0c0..570771ed19e6 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -449,7 +449,6 @@ static void gpiodevice_release(struct device *dev)
449{ 449{
450 struct gpio_device *gdev = dev_get_drvdata(dev); 450 struct gpio_device *gdev = dev_get_drvdata(dev);
451 451
452 cdev_del(&gdev->chrdev);
453 list_del(&gdev->list); 452 list_del(&gdev->list);
454 ida_simple_remove(&gpio_ida, gdev->id); 453 ida_simple_remove(&gpio_ida, gdev->id);
455 kfree(gdev->label); 454 kfree(gdev->label);
@@ -482,7 +481,6 @@ static int gpiochip_setup_dev(struct gpio_device *gdev)
482 481
483 /* From this point, the .release() function cleans up gpio_device */ 482 /* From this point, the .release() function cleans up gpio_device */
484 gdev->dev.release = gpiodevice_release; 483 gdev->dev.release = gpiodevice_release;
485 get_device(&gdev->dev);
486 pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n", 484 pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
487 __func__, gdev->base, gdev->base + gdev->ngpio - 1, 485 __func__, gdev->base, gdev->base + gdev->ngpio - 1,
488 dev_name(&gdev->dev), gdev->chip->label ? : "generic"); 486 dev_name(&gdev->dev), gdev->chip->label ? : "generic");
@@ -770,6 +768,8 @@ void gpiochip_remove(struct gpio_chip *chip)
770 * be removed, else it will be dangling until the last user is 768 * be removed, else it will be dangling until the last user is
771 * gone. 769 * gone.
772 */ 770 */
771 cdev_del(&gdev->chrdev);
772 device_del(&gdev->dev);
773 put_device(&gdev->dev); 773 put_device(&gdev->dev);
774} 774}
775EXPORT_SYMBOL_GPL(gpiochip_remove); 775EXPORT_SYMBOL_GPL(gpiochip_remove);
@@ -869,7 +869,7 @@ struct gpio_chip *gpiochip_find(void *data,
869 869
870 spin_lock_irqsave(&gpio_lock, flags); 870 spin_lock_irqsave(&gpio_lock, flags);
871 list_for_each_entry(gdev, &gpio_devices, list) 871 list_for_each_entry(gdev, &gpio_devices, list)
872 if (match(gdev->chip, data)) 872 if (gdev->chip && match(gdev->chip, data))
873 break; 873 break;
874 874
875 /* No match? */ 875 /* No match? */
@@ -1373,8 +1373,12 @@ done:
1373#define VALIDATE_DESC(desc) do { \ 1373#define VALIDATE_DESC(desc) do { \
1374 if (!desc) \ 1374 if (!desc) \
1375 return 0; \ 1375 return 0; \
1376 if (IS_ERR(desc)) { \
1377 pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
1378 return PTR_ERR(desc); \
1379 } \
1376 if (!desc->gdev) { \ 1380 if (!desc->gdev) { \
1377 pr_warn("%s: invalid GPIO\n", __func__); \ 1381 pr_warn("%s: invalid GPIO (no device)\n", __func__); \
1378 return -EINVAL; \ 1382 return -EINVAL; \
1379 } \ 1383 } \
1380 if ( !desc->gdev->chip ) { \ 1384 if ( !desc->gdev->chip ) { \
@@ -1386,8 +1390,12 @@ done:
1386#define VALIDATE_DESC_VOID(desc) do { \ 1390#define VALIDATE_DESC_VOID(desc) do { \
1387 if (!desc) \ 1391 if (!desc) \
1388 return; \ 1392 return; \
1393 if (IS_ERR(desc)) { \
1394 pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
1395 return; \
1396 } \
1389 if (!desc->gdev) { \ 1397 if (!desc->gdev) { \
1390 pr_warn("%s: invalid GPIO\n", __func__); \ 1398 pr_warn("%s: invalid GPIO (no device)\n", __func__); \
1391 return; \ 1399 return; \
1392 } \ 1400 } \
1393 if (!desc->gdev->chip) { \ 1401 if (!desc->gdev->chip) { \
@@ -2056,7 +2064,14 @@ int gpiod_to_irq(const struct gpio_desc *desc)
2056 struct gpio_chip *chip; 2064 struct gpio_chip *chip;
2057 int offset; 2065 int offset;
2058 2066
2059 VALIDATE_DESC(desc); 2067 /*
2068 * Cannot VALIDATE_DESC() here as gpiod_to_irq() consumer semantics
2069 * requires this function to not return zero on an invalid descriptor
2070 * but rather a negative error number.
2071 */
2072 if (!desc || IS_ERR(desc) || !desc->gdev || !desc->gdev->chip)
2073 return -EINVAL;
2074
2060 chip = desc->gdev->chip; 2075 chip = desc->gdev->chip;
2061 offset = gpio_chip_hwgpio(desc); 2076 offset = gpio_chip_hwgpio(desc);
2062 if (chip->to_irq) { 2077 if (chip->to_irq) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 01c36b8d6222..e055d5be1c3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -799,7 +799,6 @@ struct amdgpu_ring {
799 unsigned cond_exe_offs; 799 unsigned cond_exe_offs;
800 u64 cond_exe_gpu_addr; 800 u64 cond_exe_gpu_addr;
801 volatile u32 *cond_exe_cpu_addr; 801 volatile u32 *cond_exe_cpu_addr;
802 int vmid;
803}; 802};
804 803
805/* 804/*
@@ -937,8 +936,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
937 unsigned vm_id, uint64_t pd_addr, 936 unsigned vm_id, uint64_t pd_addr,
938 uint32_t gds_base, uint32_t gds_size, 937 uint32_t gds_base, uint32_t gds_size,
939 uint32_t gws_base, uint32_t gws_size, 938 uint32_t gws_base, uint32_t gws_size,
940 uint32_t oa_base, uint32_t oa_size, 939 uint32_t oa_base, uint32_t oa_size);
941 bool vmid_switch);
942void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); 940void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
943uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); 941uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
944int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, 942int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
@@ -1822,6 +1820,8 @@ struct amdgpu_asic_funcs {
1822 /* MM block clocks */ 1820 /* MM block clocks */
1823 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); 1821 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
1824 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); 1822 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
1823 /* query virtual capabilities */
1824 u32 (*get_virtual_caps)(struct amdgpu_device *adev);
1825}; 1825};
1826 1826
1827/* 1827/*
@@ -1916,8 +1916,12 @@ void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
1916 1916
1917 1917
1918/* GPU virtualization */ 1918/* GPU virtualization */
1919#define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0)
1920#define AMDGPU_VIRT_CAPS_IS_VF (1 << 1)
1919struct amdgpu_virtualization { 1921struct amdgpu_virtualization {
1920 bool supports_sr_iov; 1922 bool supports_sr_iov;
1923 bool is_virtual;
1924 u32 caps;
1921}; 1925};
1922 1926
1923/* 1927/*
@@ -2206,6 +2210,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
2206#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) 2210#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
2207#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) 2211#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
2208#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) 2212#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
2213#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
2209#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) 2214#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
2210#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) 2215#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
2211#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) 2216#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 8943099eb135..cf6f49fc1c75 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -909,7 +909,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
909 struct cgs_acpi_method_argument *argument = NULL; 909 struct cgs_acpi_method_argument *argument = NULL;
910 uint32_t i, count; 910 uint32_t i, count;
911 acpi_status status; 911 acpi_status status;
912 int result; 912 int result = 0;
913 uint32_t func_no = 0xFFFFFFFF; 913 uint32_t func_no = 0xFFFFFFFF;
914 914
915 handle = ACPI_HANDLE(&adev->pdev->dev); 915 handle = ACPI_HANDLE(&adev->pdev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 964f31404f17..6e920086af46 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1385,6 +1385,15 @@ static int amdgpu_resume(struct amdgpu_device *adev)
1385 return 0; 1385 return 0;
1386} 1386}
1387 1387
1388static bool amdgpu_device_is_virtual(void)
1389{
1390#ifdef CONFIG_X86
1391 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
1392#else
1393 return false;
1394#endif
1395}
1396
1388/** 1397/**
1389 * amdgpu_device_init - initialize the driver 1398 * amdgpu_device_init - initialize the driver
1390 * 1399 *
@@ -1519,8 +1528,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1519 adev->virtualization.supports_sr_iov = 1528 adev->virtualization.supports_sr_iov =
1520 amdgpu_atombios_has_gpu_virtualization_table(adev); 1529 amdgpu_atombios_has_gpu_virtualization_table(adev);
1521 1530
1531 /* Check if we are executing in a virtualized environment */
1532 adev->virtualization.is_virtual = amdgpu_device_is_virtual();
1533 adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev);
1534
1522 /* Post card if necessary */ 1535 /* Post card if necessary */
1523 if (!amdgpu_card_posted(adev)) { 1536 if (!amdgpu_card_posted(adev) ||
1537 (adev->virtualization.is_virtual &&
1538 !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
1524 if (!adev->bios) { 1539 if (!adev->bios) {
1525 dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); 1540 dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
1526 return -EINVAL; 1541 return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 7a0b1e50f293..34e35423b78e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -122,7 +122,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
122 bool skip_preamble, need_ctx_switch; 122 bool skip_preamble, need_ctx_switch;
123 unsigned patch_offset = ~0; 123 unsigned patch_offset = ~0;
124 struct amdgpu_vm *vm; 124 struct amdgpu_vm *vm;
125 int vmid = 0, old_vmid = ring->vmid;
126 struct fence *hwf; 125 struct fence *hwf;
127 uint64_t ctx; 126 uint64_t ctx;
128 127
@@ -136,11 +135,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
136 if (job) { 135 if (job) {
137 vm = job->vm; 136 vm = job->vm;
138 ctx = job->ctx; 137 ctx = job->ctx;
139 vmid = job->vm_id;
140 } else { 138 } else {
141 vm = NULL; 139 vm = NULL;
142 ctx = 0; 140 ctx = 0;
143 vmid = 0;
144 } 141 }
145 142
146 if (!ring->ready) { 143 if (!ring->ready) {
@@ -166,8 +163,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
166 r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr, 163 r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
167 job->gds_base, job->gds_size, 164 job->gds_base, job->gds_size,
168 job->gws_base, job->gws_size, 165 job->gws_base, job->gws_size,
169 job->oa_base, job->oa_size, 166 job->oa_base, job->oa_size);
170 (ring->current_ctx == ctx) && (old_vmid != vmid));
171 if (r) { 167 if (r) {
172 amdgpu_ring_undo(ring); 168 amdgpu_ring_undo(ring);
173 return r; 169 return r;
@@ -184,6 +180,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
184 need_ctx_switch = ring->current_ctx != ctx; 180 need_ctx_switch = ring->current_ctx != ctx;
185 for (i = 0; i < num_ibs; ++i) { 181 for (i = 0; i < num_ibs; ++i) {
186 ib = &ibs[i]; 182 ib = &ibs[i];
183
187 /* drop preamble IBs if we don't have a context switch */ 184 /* drop preamble IBs if we don't have a context switch */
188 if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble) 185 if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
189 continue; 186 continue;
@@ -191,7 +188,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
191 amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0, 188 amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
192 need_ctx_switch); 189 need_ctx_switch);
193 need_ctx_switch = false; 190 need_ctx_switch = false;
194 ring->vmid = vmid;
195 } 191 }
196 192
197 if (ring->funcs->emit_hdp_invalidate) 193 if (ring->funcs->emit_hdp_invalidate)
@@ -202,7 +198,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
202 dev_err(adev->dev, "failed to emit fence (%d)\n", r); 198 dev_err(adev->dev, "failed to emit fence (%d)\n", r);
203 if (job && job->vm_id) 199 if (job && job->vm_id)
204 amdgpu_vm_reset_id(adev, job->vm_id); 200 amdgpu_vm_reset_id(adev, job->vm_id);
205 ring->vmid = old_vmid;
206 amdgpu_ring_undo(ring); 201 amdgpu_ring_undo(ring);
207 return r; 202 return r;
208 } 203 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 40a23704a981..d851ea15059f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -447,7 +447,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
447 dev_info.max_memory_clock = adev->pm.default_mclk * 10; 447 dev_info.max_memory_clock = adev->pm.default_mclk * 10;
448 } 448 }
449 dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; 449 dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
450 dev_info.num_rb_pipes = adev->gfx.config.num_rbs; 450 dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
451 adev->gfx.config.max_shader_engines;
451 dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; 452 dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
452 dev_info._pad = 0; 453 dev_info._pad = 0;
453 dev_info.ids_flags = 0; 454 dev_info.ids_flags = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 589b36e8c5cf..0e13d80d2a95 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -270,30 +270,28 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
270 struct drm_device *ddev = dev_get_drvdata(dev); 270 struct drm_device *ddev = dev_get_drvdata(dev);
271 struct amdgpu_device *adev = ddev->dev_private; 271 struct amdgpu_device *adev = ddev->dev_private;
272 enum amd_pm_state_type state = 0; 272 enum amd_pm_state_type state = 0;
273 long idx; 273 unsigned long idx;
274 int ret; 274 int ret;
275 275
276 if (strlen(buf) == 1) 276 if (strlen(buf) == 1)
277 adev->pp_force_state_enabled = false; 277 adev->pp_force_state_enabled = false;
278 else { 278 else if (adev->pp_enabled) {
279 ret = kstrtol(buf, 0, &idx); 279 struct pp_states_info data;
280 280
281 if (ret) { 281 ret = kstrtoul(buf, 0, &idx);
282 if (ret || idx >= ARRAY_SIZE(data.states)) {
282 count = -EINVAL; 283 count = -EINVAL;
283 goto fail; 284 goto fail;
284 } 285 }
285 286
286 if (adev->pp_enabled) { 287 amdgpu_dpm_get_pp_num_states(adev, &data);
287 struct pp_states_info data; 288 state = data.states[idx];
288 amdgpu_dpm_get_pp_num_states(adev, &data); 289 /* only set user selected power states */
289 state = data.states[idx]; 290 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
290 /* only set user selected power states */ 291 state != POWER_STATE_TYPE_DEFAULT) {
291 if (state != POWER_STATE_TYPE_INTERNAL_BOOT && 292 amdgpu_dpm_dispatch_task(adev,
292 state != POWER_STATE_TYPE_DEFAULT) { 293 AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
293 amdgpu_dpm_dispatch_task(adev, 294 adev->pp_force_state_enabled = true;
294 AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
295 adev->pp_force_state_enabled = true;
296 }
297 } 295 }
298 } 296 }
299fail: 297fail:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e19520c4b4b6..d9c88d13f8db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1106,6 +1106,10 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1106 if (fences == 0 && handles == 0) { 1106 if (fences == 0 && handles == 0) {
1107 if (adev->pm.dpm_enabled) { 1107 if (adev->pm.dpm_enabled) {
1108 amdgpu_dpm_enable_uvd(adev, false); 1108 amdgpu_dpm_enable_uvd(adev, false);
1109 /* just work around for uvd clock remain high even
1110 * when uvd dpm disabled on Polaris10 */
1111 if (adev->asic_type == CHIP_POLARIS10)
1112 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
1109 } else { 1113 } else {
1110 amdgpu_asic_set_uvd_clocks(adev, 0, 0); 1114 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
1111 } 1115 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 62a4c127620f..9f36ed30ba11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -298,8 +298,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
298 unsigned vm_id, uint64_t pd_addr, 298 unsigned vm_id, uint64_t pd_addr,
299 uint32_t gds_base, uint32_t gds_size, 299 uint32_t gds_base, uint32_t gds_size,
300 uint32_t gws_base, uint32_t gws_size, 300 uint32_t gws_base, uint32_t gws_size,
301 uint32_t oa_base, uint32_t oa_size, 301 uint32_t oa_base, uint32_t oa_size)
302 bool vmid_switch)
303{ 302{
304 struct amdgpu_device *adev = ring->adev; 303 struct amdgpu_device *adev = ring->adev;
305 struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id]; 304 struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
@@ -313,7 +312,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
313 int r; 312 int r;
314 313
315 if (ring->funcs->emit_pipeline_sync && ( 314 if (ring->funcs->emit_pipeline_sync && (
316 pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || vmid_switch)) 315 pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed ||
316 ring->type == AMDGPU_RING_TYPE_COMPUTE))
317 amdgpu_ring_emit_pipeline_sync(ring); 317 amdgpu_ring_emit_pipeline_sync(ring);
318 318
319 if (ring->funcs->emit_vm_flush && 319 if (ring->funcs->emit_vm_flush &&
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 07bc795a4ca9..910431808542 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -962,6 +962,12 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
962 return true; 962 return true;
963} 963}
964 964
965static u32 cik_get_virtual_caps(struct amdgpu_device *adev)
966{
967 /* CIK does not support SR-IOV */
968 return 0;
969}
970
965static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = { 971static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
966 {mmGRBM_STATUS, false}, 972 {mmGRBM_STATUS, false},
967 {mmGB_ADDR_CONFIG, false}, 973 {mmGB_ADDR_CONFIG, false},
@@ -2007,6 +2013,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
2007 .get_xclk = &cik_get_xclk, 2013 .get_xclk = &cik_get_xclk,
2008 .set_uvd_clocks = &cik_set_uvd_clocks, 2014 .set_uvd_clocks = &cik_set_uvd_clocks,
2009 .set_vce_clocks = &cik_set_vce_clocks, 2015 .set_vce_clocks = &cik_set_vce_clocks,
2016 .get_virtual_caps = &cik_get_virtual_caps,
2010 /* these should be moved to their own ip modules */ 2017 /* these should be moved to their own ip modules */
2011 .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, 2018 .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
2012 .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle, 2019 .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 8c6ad1e72f02..fc8ff4d3ccf8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4833,7 +4833,7 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
4833 case 2: 4833 case 2:
4834 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4834 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4835 ring = &adev->gfx.compute_ring[i]; 4835 ring = &adev->gfx.compute_ring[i];
4836 if ((ring->me == me_id) & (ring->pipe == pipe_id)) 4836 if ((ring->me == me_id) && (ring->pipe == pipe_id))
4837 amdgpu_fence_process(ring); 4837 amdgpu_fence_process(ring);
4838 } 4838 }
4839 break; 4839 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 9f6f8669edc3..b2ebd4fef6cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -47,6 +47,8 @@
47#include "dce/dce_10_0_d.h" 47#include "dce/dce_10_0_d.h"
48#include "dce/dce_10_0_sh_mask.h" 48#include "dce/dce_10_0_sh_mask.h"
49 49
50#include "smu/smu_7_1_3_d.h"
51
50#define GFX8_NUM_GFX_RINGS 1 52#define GFX8_NUM_GFX_RINGS 1
51#define GFX8_NUM_COMPUTE_RINGS 8 53#define GFX8_NUM_COMPUTE_RINGS 8
52 54
@@ -297,7 +299,8 @@ static const u32 polaris11_golden_common_all[] =
297static const u32 golden_settings_polaris10_a11[] = 299static const u32 golden_settings_polaris10_a11[] =
298{ 300{
299 mmATC_MISC_CG, 0x000c0fc0, 0x000c0200, 301 mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
300 mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208, 302 mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
303 mmCB_HW_CONTROL_2, 0, 0x0f000000,
301 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, 304 mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
302 mmDB_DEBUG2, 0xf00fffff, 0x00000400, 305 mmDB_DEBUG2, 0xf00fffff, 0x00000400,
303 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, 306 mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
@@ -692,6 +695,7 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
692 amdgpu_program_register_sequence(adev, 695 amdgpu_program_register_sequence(adev,
693 polaris10_golden_common_all, 696 polaris10_golden_common_all,
694 (const u32)ARRAY_SIZE(polaris10_golden_common_all)); 697 (const u32)ARRAY_SIZE(polaris10_golden_common_all));
698 WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
695 break; 699 break;
696 case CHIP_CARRIZO: 700 case CHIP_CARRIZO:
697 amdgpu_program_register_sequence(adev, 701 amdgpu_program_register_sequence(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 2c88d0b66cf3..a65c96029476 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -421,6 +421,20 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
421 return true; 421 return true;
422} 422}
423 423
424static u32 vi_get_virtual_caps(struct amdgpu_device *adev)
425{
426 u32 caps = 0;
427 u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
428
429 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
430 caps |= AMDGPU_VIRT_CAPS_SRIOV_EN;
431
432 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
433 caps |= AMDGPU_VIRT_CAPS_IS_VF;
434
435 return caps;
436}
437
424static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { 438static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
425 {mmGB_MACROTILE_MODE7, true}, 439 {mmGB_MACROTILE_MODE7, true},
426}; 440};
@@ -1118,6 +1132,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
1118 .get_xclk = &vi_get_xclk, 1132 .get_xclk = &vi_get_xclk,
1119 .set_uvd_clocks = &vi_set_uvd_clocks, 1133 .set_uvd_clocks = &vi_set_uvd_clocks,
1120 .set_vce_clocks = &vi_set_vce_clocks, 1134 .set_vce_clocks = &vi_set_vce_clocks,
1135 .get_virtual_caps = &vi_get_virtual_caps,
1121 /* these should be moved to their own ip modules */ 1136 /* these should be moved to their own ip modules */
1122 .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, 1137 .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
1123 .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle, 1138 .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index ac005796b71c..7708d90b9da9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -242,13 +242,19 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
242 pqm_uninit(&p->pqm); 242 pqm_uninit(&p->pqm);
243 243
244 /* Iterate over all process device data structure and check 244 /* Iterate over all process device data structure and check
245 * if we should reset all wavefronts */ 245 * if we should delete debug managers and reset all wavefronts
246 list_for_each_entry(pdd, &p->per_device_data, per_device_list) 246 */
247 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
248 if ((pdd->dev->dbgmgr) &&
249 (pdd->dev->dbgmgr->pasid == p->pasid))
250 kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
251
247 if (pdd->reset_wavefronts) { 252 if (pdd->reset_wavefronts) {
248 pr_warn("amdkfd: Resetting all wave fronts\n"); 253 pr_warn("amdkfd: Resetting all wave fronts\n");
249 dbgdev_wave_reset_wavefronts(pdd->dev, p); 254 dbgdev_wave_reset_wavefronts(pdd->dev, p);
250 pdd->reset_wavefronts = false; 255 pdd->reset_wavefronts = false;
251 } 256 }
257 }
252 258
253 mutex_unlock(&p->mutex); 259 mutex_unlock(&p->mutex);
254 260
@@ -404,42 +410,52 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
404 410
405 idx = srcu_read_lock(&kfd_processes_srcu); 411 idx = srcu_read_lock(&kfd_processes_srcu);
406 412
413 /*
414 * Look for the process that matches the pasid. If there is no such
415 * process, we either released it in amdkfd's own notifier, or there
416 * is a bug. Unfortunately, there is no way to tell...
417 */
407 hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes) 418 hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
408 if (p->pasid == pasid) 419 if (p->pasid == pasid) {
409 break;
410 420
411 srcu_read_unlock(&kfd_processes_srcu, idx); 421 srcu_read_unlock(&kfd_processes_srcu, idx);
412 422
413 BUG_ON(p->pasid != pasid); 423 pr_debug("Unbinding process %d from IOMMU\n", pasid);
414 424
415 mutex_lock(&p->mutex); 425 mutex_lock(&p->mutex);
416 426
417 if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid)) 427 if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
418 kfd_dbgmgr_destroy(dev->dbgmgr); 428 kfd_dbgmgr_destroy(dev->dbgmgr);
419 429
420 pqm_uninit(&p->pqm); 430 pqm_uninit(&p->pqm);
421 431
422 pdd = kfd_get_process_device_data(dev, p); 432 pdd = kfd_get_process_device_data(dev, p);
423 433
424 if (!pdd) { 434 if (!pdd) {
425 mutex_unlock(&p->mutex); 435 mutex_unlock(&p->mutex);
426 return; 436 return;
427 } 437 }
428 438
429 if (pdd->reset_wavefronts) { 439 if (pdd->reset_wavefronts) {
430 dbgdev_wave_reset_wavefronts(pdd->dev, p); 440 dbgdev_wave_reset_wavefronts(pdd->dev, p);
431 pdd->reset_wavefronts = false; 441 pdd->reset_wavefronts = false;
432 } 442 }
433 443
434 /* 444 /*
435 * Just mark pdd as unbound, because we still need it to call 445 * Just mark pdd as unbound, because we still need it
436 * amd_iommu_unbind_pasid() in when the process exits. 446 * to call amd_iommu_unbind_pasid() in when the
437 * We don't call amd_iommu_unbind_pasid() here 447 * process exits.
438 * because the IOMMU called us. 448 * We don't call amd_iommu_unbind_pasid() here
439 */ 449 * because the IOMMU called us.
440 pdd->bound = false; 450 */
451 pdd->bound = false;
441 452
442 mutex_unlock(&p->mutex); 453 mutex_unlock(&p->mutex);
454
455 return;
456 }
457
458 srcu_read_unlock(&kfd_processes_srcu, idx);
443} 459}
444 460
445struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p) 461struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 74909e72a009..884c96f50c3d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -666,7 +666,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
666 dev->node_props.simd_count); 666 dev->node_props.simd_count);
667 667
668 if (dev->mem_bank_count < dev->node_props.mem_banks_count) { 668 if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
669 pr_warn("kfd: mem_banks_count truncated from %d to %d\n", 669 pr_info_once("kfd: mem_banks_count truncated from %d to %d\n",
670 dev->node_props.mem_banks_count, 670 dev->node_props.mem_banks_count,
671 dev->mem_bank_count); 671 dev->mem_bank_count);
672 sysfs_show_32bit_prop(buffer, "mem_banks_count", 672 sysfs_show_32bit_prop(buffer, "mem_banks_count",
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index 32f3e345de08..3493da5c8f0e 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -5538,6 +5538,78 @@ typedef struct _ATOM_ASIC_PROFILING_INFO_V3_5
5538 ULONG ulReserved[12]; 5538 ULONG ulReserved[12];
5539}ATOM_ASIC_PROFILING_INFO_V3_5; 5539}ATOM_ASIC_PROFILING_INFO_V3_5;
5540 5540
5541/* for Polars10/11 AVFS parameters */
5542typedef struct _ATOM_ASIC_PROFILING_INFO_V3_6
5543{
5544 ATOM_COMMON_TABLE_HEADER asHeader;
5545 ULONG ulMaxVddc;
5546 ULONG ulMinVddc;
5547 USHORT usLkgEuseIndex;
5548 UCHAR ucLkgEfuseBitLSB;
5549 UCHAR ucLkgEfuseLength;
5550 ULONG ulLkgEncodeLn_MaxDivMin;
5551 ULONG ulLkgEncodeMax;
5552 ULONG ulLkgEncodeMin;
5553 EFUSE_LINEAR_FUNC_PARAM sRoFuse;
5554 ULONG ulEvvDefaultVddc;
5555 ULONG ulEvvNoCalcVddc;
5556 ULONG ulSpeed_Model;
5557 ULONG ulSM_A0;
5558 ULONG ulSM_A1;
5559 ULONG ulSM_A2;
5560 ULONG ulSM_A3;
5561 ULONG ulSM_A4;
5562 ULONG ulSM_A5;
5563 ULONG ulSM_A6;
5564 ULONG ulSM_A7;
5565 UCHAR ucSM_A0_sign;
5566 UCHAR ucSM_A1_sign;
5567 UCHAR ucSM_A2_sign;
5568 UCHAR ucSM_A3_sign;
5569 UCHAR ucSM_A4_sign;
5570 UCHAR ucSM_A5_sign;
5571 UCHAR ucSM_A6_sign;
5572 UCHAR ucSM_A7_sign;
5573 ULONG ulMargin_RO_a;
5574 ULONG ulMargin_RO_b;
5575 ULONG ulMargin_RO_c;
5576 ULONG ulMargin_fixed;
5577 ULONG ulMargin_Fmax_mean;
5578 ULONG ulMargin_plat_mean;
5579 ULONG ulMargin_Fmax_sigma;
5580 ULONG ulMargin_plat_sigma;
5581 ULONG ulMargin_DC_sigma;
5582 ULONG ulLoadLineSlop;
5583 ULONG ulaTDClimitPerDPM[8];
5584 ULONG ulaNoCalcVddcPerDPM[8];
5585 ULONG ulAVFS_meanNsigma_Acontant0;
5586 ULONG ulAVFS_meanNsigma_Acontant1;
5587 ULONG ulAVFS_meanNsigma_Acontant2;
5588 USHORT usAVFS_meanNsigma_DC_tol_sigma;
5589 USHORT usAVFS_meanNsigma_Platform_mean;
5590 USHORT usAVFS_meanNsigma_Platform_sigma;
5591 ULONG ulGB_VDROOP_TABLE_CKSOFF_a0;
5592 ULONG ulGB_VDROOP_TABLE_CKSOFF_a1;
5593 ULONG ulGB_VDROOP_TABLE_CKSOFF_a2;
5594 ULONG ulGB_VDROOP_TABLE_CKSON_a0;
5595 ULONG ulGB_VDROOP_TABLE_CKSON_a1;
5596 ULONG ulGB_VDROOP_TABLE_CKSON_a2;
5597 ULONG ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
5598 USHORT usAVFSGB_FUSE_TABLE_CKSOFF_m2;
5599 ULONG ulAVFSGB_FUSE_TABLE_CKSOFF_b;
5600 ULONG ulAVFSGB_FUSE_TABLE_CKSON_m1;
5601 USHORT usAVFSGB_FUSE_TABLE_CKSON_m2;
5602 ULONG ulAVFSGB_FUSE_TABLE_CKSON_b;
5603 USHORT usMaxVoltage_0_25mv;
5604 UCHAR ucEnableGB_VDROOP_TABLE_CKSOFF;
5605 UCHAR ucEnableGB_VDROOP_TABLE_CKSON;
5606 UCHAR ucEnableGB_FUSE_TABLE_CKSOFF;
5607 UCHAR ucEnableGB_FUSE_TABLE_CKSON;
5608 USHORT usPSM_Age_ComFactor;
5609 UCHAR ucEnableApplyAVFS_CKS_OFF_Voltage;
5610 UCHAR ucReserved;
5611}ATOM_ASIC_PROFILING_INFO_V3_6;
5612
5541 5613
5542typedef struct _ATOM_SCLK_FCW_RANGE_ENTRY_V1{ 5614typedef struct _ATOM_SCLK_FCW_RANGE_ENTRY_V1{
5543 ULONG ulMaxSclkFreq; 5615 ULONG ulMaxSclkFreq;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
index 586f73276226..92912ab20944 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
@@ -633,6 +633,8 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
633 data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE; 633 data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE;
634 data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE; 634 data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE;
635 635
636 data->force_pcie_gen = PP_PCIEGenInvalid;
637
636 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, 638 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
637 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) 639 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
638 data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2; 640 data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index fa208ada6892..efb77eda7508 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -306,10 +306,14 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
306{ 306{
307 PHM_FUNC_CHECK(hwmgr); 307 PHM_FUNC_CHECK(hwmgr);
308 308
309 if (hwmgr->hwmgr_func->store_cc6_data == NULL) 309 if (display_config == NULL)
310 return -EINVAL; 310 return -EINVAL;
311 311
312 hwmgr->display_config = *display_config; 312 hwmgr->display_config = *display_config;
313
314 if (hwmgr->hwmgr_func->store_cc6_data == NULL)
315 return -EINVAL;
316
313 /* to do pass other display configuration in furture */ 317 /* to do pass other display configuration in furture */
314 318
315 if (hwmgr->hwmgr_func->store_cc6_data) 319 if (hwmgr->hwmgr_func->store_cc6_data)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h
index 347fef127ce9..2930a3355948 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h
@@ -39,6 +39,7 @@ struct phm_ppt_v1_clock_voltage_dependency_record {
39 uint8_t phases; 39 uint8_t phases;
40 uint8_t cks_enable; 40 uint8_t cks_enable;
41 uint8_t cks_voffset; 41 uint8_t cks_voffset;
42 uint32_t sclk_offset;
42}; 43};
43 44
44typedef struct phm_ppt_v1_clock_voltage_dependency_record phm_ppt_v1_clock_voltage_dependency_record; 45typedef struct phm_ppt_v1_clock_voltage_dependency_record phm_ppt_v1_clock_voltage_dependency_record;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
index aa6be033f21b..ec2a7ada346a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
@@ -98,6 +98,7 @@
98#define PCIE_BUS_CLK 10000 98#define PCIE_BUS_CLK 10000
99#define TCLK (PCIE_BUS_CLK / 10) 99#define TCLK (PCIE_BUS_CLK / 10)
100 100
101#define CEILING_UCHAR(double) ((double-(uint8_t)(double)) > 0 ? (uint8_t)(double+1) : (uint8_t)(double))
101 102
102static const uint16_t polaris10_clock_stretcher_lookup_table[2][4] = 103static const uint16_t polaris10_clock_stretcher_lookup_table[2][4] =
103{ {600, 1050, 3, 0}, {600, 1050, 6, 1} }; 104{ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
@@ -999,7 +1000,7 @@ static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
999 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), 1000 vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
1000 (dep_table->entries[i].vddc - 1001 (dep_table->entries[i].vddc -
1001 (uint16_t)data->vddc_vddci_delta)); 1002 (uint16_t)data->vddc_vddci_delta));
1002 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; 1003 *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1003 } 1004 }
1004 1005
1005 if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) 1006 if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control)
@@ -1296,7 +1297,6 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1296 } 1297 }
1297 1298
1298 mem_level->MclkFrequency = clock; 1299 mem_level->MclkFrequency = clock;
1299 mem_level->StutterEnable = 0;
1300 mem_level->EnabledForThrottle = 1; 1300 mem_level->EnabledForThrottle = 1;
1301 mem_level->EnabledForActivity = 0; 1301 mem_level->EnabledForActivity = 0;
1302 mem_level->UpHyst = 0; 1302 mem_level->UpHyst = 0;
@@ -1304,7 +1304,6 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1304 mem_level->VoltageDownHyst = 0; 1304 mem_level->VoltageDownHyst = 0;
1305 mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target; 1305 mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
1306 mem_level->StutterEnable = false; 1306 mem_level->StutterEnable = false;
1307
1308 mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 1307 mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1309 1308
1310 data->display_timing.num_existing_displays = info.display_count; 1309 data->display_timing.num_existing_displays = info.display_count;
@@ -1363,7 +1362,7 @@ static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1363 * a higher state by default such that we are not effected by 1362 * a higher state by default such that we are not effected by
1364 * up threshold or and MCLK DPM latency. 1363 * up threshold or and MCLK DPM latency.
1365 */ 1364 */
1366 levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target; 1365 levels[0].ActivityLevel = 0x1f;
1367 CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel); 1366 CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
1368 1367
1369 data->smc_state_table.MemoryDpmLevelCount = 1368 data->smc_state_table.MemoryDpmLevelCount =
@@ -1424,22 +1423,19 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1424 1423
1425 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; 1424 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1426 1425
1427 if (!data->sclk_dpm_key_disabled) { 1426
1428 /* Get MinVoltage and Frequency from DPM0, 1427 /* Get MinVoltage and Frequency from DPM0,
1429 * already converted to SMC_UL */ 1428 * already converted to SMC_UL */
1430 sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value; 1429 sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value;
1431 result = polaris10_get_dependency_volt_by_clk(hwmgr, 1430 result = polaris10_get_dependency_volt_by_clk(hwmgr,
1432 table_info->vdd_dep_on_sclk, 1431 table_info->vdd_dep_on_sclk,
1433 table->ACPILevel.SclkFrequency, 1432 sclk_frequency,
1434 &table->ACPILevel.MinVoltage, &mvdd); 1433 &table->ACPILevel.MinVoltage, &mvdd);
1435 PP_ASSERT_WITH_CODE((0 == result), 1434 PP_ASSERT_WITH_CODE((0 == result),
1436 "Cannot find ACPI VDDC voltage value " 1435 "Cannot find ACPI VDDC voltage value "
1437 "in Clock Dependency Table", ); 1436 "in Clock Dependency Table",
1438 } else { 1437 );
1439 sclk_frequency = data->vbios_boot_state.sclk_bootup_value; 1438
1440 table->ACPILevel.MinVoltage =
1441 data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
1442 }
1443 1439
1444 result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting)); 1440 result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting));
1445 PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result); 1441 PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
@@ -1464,24 +1460,18 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1464 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac); 1460 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
1465 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate); 1461 CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
1466 1462
1467 if (!data->mclk_dpm_key_disabled) { 1463
1468 /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */ 1464 /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
1469 table->MemoryACPILevel.MclkFrequency = 1465 table->MemoryACPILevel.MclkFrequency =
1470 data->dpm_table.mclk_table.dpm_levels[0].value; 1466 data->dpm_table.mclk_table.dpm_levels[0].value;
1471 result = polaris10_get_dependency_volt_by_clk(hwmgr, 1467 result = polaris10_get_dependency_volt_by_clk(hwmgr,
1472 table_info->vdd_dep_on_mclk, 1468 table_info->vdd_dep_on_mclk,
1473 table->MemoryACPILevel.MclkFrequency, 1469 table->MemoryACPILevel.MclkFrequency,
1474 &table->MemoryACPILevel.MinVoltage, &mvdd); 1470 &table->MemoryACPILevel.MinVoltage, &mvdd);
1475 PP_ASSERT_WITH_CODE((0 == result), 1471 PP_ASSERT_WITH_CODE((0 == result),
1476 "Cannot find ACPI VDDCI voltage value " 1472 "Cannot find ACPI VDDCI voltage value "
1477 "in Clock Dependency Table", 1473 "in Clock Dependency Table",
1478 ); 1474 );
1479 } else {
1480 table->MemoryACPILevel.MclkFrequency =
1481 data->vbios_boot_state.mclk_bootup_value;
1482 table->MemoryACPILevel.MinVoltage =
1483 data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
1484 }
1485 1475
1486 us_mvdd = 0; 1476 us_mvdd = 0;
1487 if ((POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) || 1477 if ((POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
@@ -1526,6 +1516,7 @@ static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1526 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = 1516 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1527 table_info->mm_dep_table; 1517 table_info->mm_dep_table;
1528 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 1518 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1519 uint32_t vddci;
1529 1520
1530 table->VceLevelCount = (uint8_t)(mm_table->count); 1521 table->VceLevelCount = (uint8_t)(mm_table->count);
1531 table->VceBootLevel = 0; 1522 table->VceBootLevel = 0;
@@ -1535,9 +1526,18 @@ static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1535 table->VceLevel[count].MinVoltage = 0; 1526 table->VceLevel[count].MinVoltage = 0;
1536 table->VceLevel[count].MinVoltage |= 1527 table->VceLevel[count].MinVoltage |=
1537 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; 1528 (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
1529
1530 if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1531 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1532 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1533 else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1534 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1535 else
1536 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1537
1538
1538 table->VceLevel[count].MinVoltage |= 1539 table->VceLevel[count].MinVoltage |=
1539 ((mm_table->entries[count].vddc - data->vddc_vddci_delta) * 1540 (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1540 VOLTAGE_SCALE) << VDDCI_SHIFT;
1541 table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT; 1541 table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1542 1542
1543 /*retrieve divider value for VBIOS */ 1543 /*retrieve divider value for VBIOS */
@@ -1566,6 +1566,7 @@ static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1566 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = 1566 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1567 table_info->mm_dep_table; 1567 table_info->mm_dep_table;
1568 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 1568 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1569 uint32_t vddci;
1569 1570
1570 table->SamuBootLevel = 0; 1571 table->SamuBootLevel = 0;
1571 table->SamuLevelCount = (uint8_t)(mm_table->count); 1572 table->SamuLevelCount = (uint8_t)(mm_table->count);
@@ -1576,8 +1577,16 @@ static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
1576 table->SamuLevel[count].Frequency = mm_table->entries[count].samclock; 1577 table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
1577 table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc * 1578 table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1578 VOLTAGE_SCALE) << VDDC_SHIFT; 1579 VOLTAGE_SCALE) << VDDC_SHIFT;
1579 table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - 1580
1580 data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT; 1581 if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1582 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1583 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1584 else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1585 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1586 else
1587 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1588
1589 table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1581 table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT; 1590 table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1582 1591
1583 /* retrieve divider value for VBIOS */ 1592 /* retrieve divider value for VBIOS */
@@ -1660,6 +1669,7 @@ static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1660 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = 1669 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1661 table_info->mm_dep_table; 1670 table_info->mm_dep_table;
1662 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 1671 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1672 uint32_t vddci;
1663 1673
1664 table->UvdLevelCount = (uint8_t)(mm_table->count); 1674 table->UvdLevelCount = (uint8_t)(mm_table->count);
1665 table->UvdBootLevel = 0; 1675 table->UvdBootLevel = 0;
@@ -1670,8 +1680,16 @@ static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1670 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; 1680 table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
1671 table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc * 1681 table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
1672 VOLTAGE_SCALE) << VDDC_SHIFT; 1682 VOLTAGE_SCALE) << VDDC_SHIFT;
1673 table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - 1683
1674 data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT; 1684 if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
1685 vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
1686 mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
1687 else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
1688 vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
1689 else
1690 vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
1691
1692 table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
1675 table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT; 1693 table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
1676 1694
1677 /* retrieve divider value for VBIOS */ 1695 /* retrieve divider value for VBIOS */
@@ -1692,8 +1710,8 @@ static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1692 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); 1710 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1693 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); 1711 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1694 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage); 1712 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
1695
1696 } 1713 }
1714
1697 return result; 1715 return result;
1698} 1716}
1699 1717
@@ -1761,12 +1779,9 @@ static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
1761 1779
1762static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) 1780static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1763{ 1781{
1764 uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks, 1782 uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
1765 volt_with_cks, value;
1766 uint16_t clock_freq_u16;
1767 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 1783 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1768 uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2, 1784 uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
1769 volt_offset = 0;
1770 struct phm_ppt_v1_information *table_info = 1785 struct phm_ppt_v1_information *table_info =
1771 (struct phm_ppt_v1_information *)(hwmgr->pptable); 1786 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1772 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = 1787 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
@@ -1778,50 +1793,46 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1778 * if the part is SS or FF. if RO >= 1660MHz, part is FF. 1793 * if the part is SS or FF. if RO >= 1660MHz, part is FF.
1779 */ 1794 */
1780 efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, 1795 efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1781 ixSMU_EFUSE_0 + (146 * 4)); 1796 ixSMU_EFUSE_0 + (67 * 4));
1782 efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1783 ixSMU_EFUSE_0 + (148 * 4));
1784 efuse &= 0xFF000000; 1797 efuse &= 0xFF000000;
1785 efuse = efuse >> 24; 1798 efuse = efuse >> 24;
1786 efuse2 &= 0xF;
1787
1788 if (efuse2 == 1)
1789 ro = (2300 - 1350) * efuse / 255 + 1350;
1790 else
1791 ro = (2500 - 1000) * efuse / 255 + 1000;
1792 1799
1793 if (ro >= 1660) 1800 if (hwmgr->chip_id == CHIP_POLARIS10) {
1794 type = 0; 1801 min = 1000;
1795 else 1802 max = 2300;
1796 type = 1; 1803 } else {
1804 min = 1100;
1805 max = 2100;
1806 }
1797 1807
1798 /* Populate Stretch amount */ 1808 ro = efuse * (max -min)/255 + min;
1799 data->smc_state_table.ClockStretcherAmount = stretch_amount;
1800 1809
1801 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ 1810 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset
1811 * there is a little difference in calculating
1812 * volt_with_cks with windows */
1802 for (i = 0; i < sclk_table->count; i++) { 1813 for (i = 0; i < sclk_table->count; i++) {
1803 data->smc_state_table.Sclk_CKS_masterEn0_7 |= 1814 data->smc_state_table.Sclk_CKS_masterEn0_7 |=
1804 sclk_table->entries[i].cks_enable << i; 1815 sclk_table->entries[i].cks_enable << i;
1805 volt_without_cks = (uint32_t)((14041 * 1816 if (hwmgr->chip_id == CHIP_POLARIS10) {
1806 (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 / 1817 volt_without_cks = (uint32_t)((2753594000 + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \
1807 (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000))); 1818 (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
1808 volt_with_cks = (uint32_t)((13946 * 1819 volt_with_cks = (uint32_t)((279720200 + sclk_table->entries[i].clk * 3232 - (ro - 65) * 100000000) / \
1809 (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 / 1820 (252248000 - sclk_table->entries[i].clk/100 * 115764));
1810 (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000))); 1821 } else {
1822 volt_without_cks = (uint32_t)((2416794800 + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \
1823 (2625416 - (sclk_table->entries[i].clk/100) * 12586807/10000));
1824 volt_with_cks = (uint32_t)((2999656000 + sclk_table->entries[i].clk * 392803/100 - (ro - 44) * 1000000) / \
1825 (3422454 - sclk_table->entries[i].clk/100 * 18886376/10000));
1826 }
1827
1811 if (volt_without_cks >= volt_with_cks) 1828 if (volt_without_cks >= volt_with_cks)
1812 volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + 1829 volt_offset = (uint8_t)CEILING_UCHAR((volt_without_cks - volt_with_cks +
1813 sclk_table->entries[i].cks_voffset) * 100 / 625) + 1); 1830 sclk_table->entries[i].cks_voffset) * 100 / 625);
1831
1814 data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; 1832 data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
1815 } 1833 }
1816 1834
1817 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, 1835 data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
1818 STRETCH_ENABLE, 0x0);
1819 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1820 masterReset, 0x1);
1821 /* PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, staticEnable, 0x1); */
1822 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
1823 masterReset, 0x0);
1824
1825 /* Populate CKS Lookup Table */ 1836 /* Populate CKS Lookup Table */
1826 if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) 1837 if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
1827 stretch_amount2 = 0; 1838 stretch_amount2 = 0;
@@ -1835,69 +1846,6 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1835 return -EINVAL); 1846 return -EINVAL);
1836 } 1847 }
1837 1848
1838 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1839 ixPWR_CKS_CNTL);
1840 value &= 0xFFC2FF87;
1841 data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
1842 polaris10_clock_stretcher_lookup_table[stretch_amount2][0];
1843 data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
1844 polaris10_clock_stretcher_lookup_table[stretch_amount2][1];
1845 clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(data->smc_state_table.
1846 GraphicsLevel[data->smc_state_table.GraphicsDpmLevelCount - 1].SclkSetting.SclkFrequency) / 100);
1847 if (polaris10_clock_stretcher_lookup_table[stretch_amount2][0] < clock_freq_u16
1848 && polaris10_clock_stretcher_lookup_table[stretch_amount2][1] > clock_freq_u16) {
1849 /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
1850 value |= (polaris10_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
1851 /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
1852 value |= (polaris10_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
1853 /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
1854 value |= (polaris10_clock_stretch_amount_conversion
1855 [polaris10_clock_stretcher_lookup_table[stretch_amount2][3]]
1856 [stretch_amount]) << 3;
1857 }
1858 CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq);
1859 CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq);
1860 data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
1861 polaris10_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
1862 data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
1863 (polaris10_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
1864
1865 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1866 ixPWR_CKS_CNTL, value);
1867
1868 /* Populate DDT Lookup Table */
1869 for (i = 0; i < 4; i++) {
1870 /* Assign the minimum and maximum VID stored
1871 * in the last row of Clock Stretcher Voltage Table.
1872 */
1873 data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].minVID =
1874 (uint8_t) polaris10_clock_stretcher_ddt_table[type][i][2];
1875 data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].maxVID =
1876 (uint8_t) polaris10_clock_stretcher_ddt_table[type][i][3];
1877 /* Loop through each SCLK and check the frequency
1878 * to see if it lies within the frequency for clock stretcher.
1879 */
1880 for (j = 0; j < data->smc_state_table.GraphicsDpmLevelCount; j++) {
1881 cks_setting = 0;
1882 clock_freq = PP_SMC_TO_HOST_UL(
1883 data->smc_state_table.GraphicsLevel[j].SclkSetting.SclkFrequency);
1884 /* Check the allowed frequency against the sclk level[j].
1885 * Sclk's endianness has already been converted,
1886 * and it's in 10Khz unit,
1887 * as opposed to Data table, which is in Mhz unit.
1888 */
1889 if (clock_freq >= (polaris10_clock_stretcher_ddt_table[type][i][0]) * 100) {
1890 cks_setting |= 0x2;
1891 if (clock_freq < (polaris10_clock_stretcher_ddt_table[type][i][1]) * 100)
1892 cks_setting |= 0x1;
1893 }
1894 data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting
1895 |= cks_setting << (j * 2);
1896 }
1897 CONVERT_FROM_HOST_TO_SMC_US(
1898 data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting);
1899 }
1900
1901 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL); 1849 value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
1902 value &= 0xFFFFFFFE; 1850 value &= 0xFFFFFFFE;
1903 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value); 1851 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
@@ -1956,6 +1904,90 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
1956 return 0; 1904 return 0;
1957} 1905}
1958 1906
1907
1908int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
1909{
1910 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1911 SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
1912 int result = 0;
1913 struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
1914 AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
1915 AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
1916 uint32_t tmp, i;
1917 struct pp_smumgr *smumgr = hwmgr->smumgr;
1918 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
1919
1920 struct phm_ppt_v1_information *table_info =
1921 (struct phm_ppt_v1_information *)hwmgr->pptable;
1922 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1923 table_info->vdd_dep_on_sclk;
1924
1925
1926 if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
1927 return result;
1928
1929 result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
1930
1931 if (0 == result) {
1932 table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
1933 table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
1934 table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
1935 table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
1936 table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
1937 table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
1938 table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
1939 table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
1940 table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
1941 table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
1942 table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12;
1943 table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
1944 table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
1945 table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
1946 table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
1947 table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12;
1948 table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
1949 AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
1950 AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
1951 AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
1952 AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
1953 AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
1954 AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
1955 AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
1956
1957 for (i = 0; i < NUM_VFT_COLUMNS; i++) {
1958 AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
1959 AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
1960 }
1961
1962 result = polaris10_read_smc_sram_dword(smumgr,
1963 SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
1964 &tmp, data->sram_end);
1965
1966 polaris10_copy_bytes_to_smc(smumgr,
1967 tmp,
1968 (uint8_t *)&AVFS_meanNsigma,
1969 sizeof(AVFS_meanNsigma_t),
1970 data->sram_end);
1971
1972 result = polaris10_read_smc_sram_dword(smumgr,
1973 SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
1974 &tmp, data->sram_end);
1975 polaris10_copy_bytes_to_smc(smumgr,
1976 tmp,
1977 (uint8_t *)&AVFS_SclkOffset,
1978 sizeof(AVFS_Sclk_Offset_t),
1979 data->sram_end);
1980
1981 data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
1982 (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
1983 (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
1984 (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
1985 data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
1986 }
1987 return result;
1988}
1989
1990
1959/** 1991/**
1960* Initializes the SMC table and uploads it 1992* Initializes the SMC table and uploads it
1961* 1993*
@@ -2056,6 +2088,10 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
2056 "Failed to populate Clock Stretcher Data Table!", 2088 "Failed to populate Clock Stretcher Data Table!",
2057 return result); 2089 return result);
2058 } 2090 }
2091
2092 result = polaris10_populate_avfs_parameters(hwmgr);
2093 PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
2094
2059 table->CurrSclkPllRange = 0xff; 2095 table->CurrSclkPllRange = 0xff;
2060 table->GraphicsVoltageChangeEnable = 1; 2096 table->GraphicsVoltageChangeEnable = 1;
2061 table->GraphicsThermThrottleEnable = 1; 2097 table->GraphicsThermThrottleEnable = 1;
@@ -2252,6 +2288,9 @@ static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2252static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) 2288static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
2253{ 2289{
2254 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 2290 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2291 uint32_t soft_register_value = 0;
2292 uint32_t handshake_disables_offset = data->soft_regs_start
2293 + offsetof(SMU74_SoftRegisters, HandshakeDisables);
2255 2294
2256 /* enable SCLK dpm */ 2295 /* enable SCLK dpm */
2257 if (!data->sclk_dpm_key_disabled) 2296 if (!data->sclk_dpm_key_disabled)
@@ -2262,6 +2301,12 @@ static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
2262 2301
2263 /* enable MCLK dpm */ 2302 /* enable MCLK dpm */
2264 if (0 == data->mclk_dpm_key_disabled) { 2303 if (0 == data->mclk_dpm_key_disabled) {
2304/* Disable UVD - SMU handshake for MCLK. */
2305 soft_register_value = cgs_read_ind_register(hwmgr->device,
2306 CGS_IND_REG__SMC, handshake_disables_offset);
2307 soft_register_value |= SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
2308 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
2309 handshake_disables_offset, soft_register_value);
2265 2310
2266 PP_ASSERT_WITH_CODE( 2311 PP_ASSERT_WITH_CODE(
2267 (0 == smum_send_msg_to_smc(hwmgr->smumgr, 2312 (0 == smum_send_msg_to_smc(hwmgr->smumgr,
@@ -2269,7 +2314,6 @@ static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
2269 "Failed to enable MCLK DPM during DPM Start Function!", 2314 "Failed to enable MCLK DPM during DPM Start Function!",
2270 return -1); 2315 return -1);
2271 2316
2272
2273 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); 2317 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
2274 2318
2275 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5); 2319 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
@@ -2471,6 +2515,8 @@ int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2471 PP_ASSERT_WITH_CODE((0 == tmp_result), 2515 PP_ASSERT_WITH_CODE((0 == tmp_result),
2472 "Failed to enable VR hot GPIO interrupt!", result = tmp_result); 2516 "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
2473 2517
2518 smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay);
2519
2474 tmp_result = polaris10_enable_sclk_control(hwmgr); 2520 tmp_result = polaris10_enable_sclk_control(hwmgr);
2475 PP_ASSERT_WITH_CODE((0 == tmp_result), 2521 PP_ASSERT_WITH_CODE((0 == tmp_result),
2476 "Failed to enable SCLK control!", result = tmp_result); 2522 "Failed to enable SCLK control!", result = tmp_result);
@@ -2606,6 +2652,7 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
2606 2652
2607 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 2653 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2608 PHM_PlatformCaps_FanSpeedInTableIsRPM); 2654 PHM_PlatformCaps_FanSpeedInTableIsRPM);
2655
2609 if (hwmgr->chip_id == CHIP_POLARIS11) 2656 if (hwmgr->chip_id == CHIP_POLARIS11)
2610 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 2657 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2611 PHM_PlatformCaps_SPLLShutdownSupport); 2658 PHM_PlatformCaps_SPLLShutdownSupport);
@@ -2896,6 +2943,31 @@ static int polaris10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
2896 return 0; 2943 return 0;
2897} 2944}
2898 2945
2946int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
2947{
2948 struct phm_ppt_v1_information *table_info =
2949 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2950 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
2951 table_info->vdd_dep_on_mclk;
2952 struct phm_ppt_v1_voltage_lookup_table *lookup_table =
2953 table_info->vddc_lookup_table;
2954 uint32_t i;
2955
2956 if (hwmgr->chip_id == CHIP_POLARIS10 && hwmgr->hw_revision == 0xC7) {
2957 if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
2958 return 0;
2959
2960 for (i = 0; i < lookup_table->count; i++) {
2961 if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
2962 dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
2963 return 0;
2964 }
2965 }
2966 }
2967 return 0;
2968}
2969
2970
2899int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) 2971int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2900{ 2972{
2901 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 2973 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -2938,6 +3010,11 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2938 data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE; 3010 data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE;
2939 data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE; 3011 data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE;
2940 3012
3013 data->enable_tdc_limit_feature = true;
3014 data->enable_pkg_pwr_tracking_feature = true;
3015 data->force_pcie_gen = PP_PCIEGenInvalid;
3016 data->mclk_stutter_mode_threshold = 40000;
3017
2941 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, 3018 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
2942 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) 3019 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
2943 data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2; 3020 data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
@@ -2962,8 +3039,13 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2962 data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2; 3039 data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
2963 } 3040 }
2964 3041
3042 if (table_info->cac_dtp_table->usClockStretchAmount != 0)
3043 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3044 PHM_PlatformCaps_ClockStretcher);
3045
2965 polaris10_set_features_platform_caps(hwmgr); 3046 polaris10_set_features_platform_caps(hwmgr);
2966 3047
3048 polaris10_patch_voltage_workaround(hwmgr);
2967 polaris10_init_dpm_defaults(hwmgr); 3049 polaris10_init_dpm_defaults(hwmgr);
2968 3050
2969 /* Get leakage voltage based on leakage ID. */ 3051 /* Get leakage voltage based on leakage ID. */
@@ -3520,10 +3602,11 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
3520 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; 3602 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
3521 ATOM_Tonga_POWERPLAYTABLE *powerplay_table = 3603 ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
3522 (ATOM_Tonga_POWERPLAYTABLE *)pp_table; 3604 (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
3523 ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = 3605 PPTable_Generic_SubTable_Header *sclk_dep_table =
3524 (ATOM_Tonga_SCLK_Dependency_Table *) 3606 (PPTable_Generic_SubTable_Header *)
3525 (((unsigned long)powerplay_table) + 3607 (((unsigned long)powerplay_table) +
3526 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); 3608 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
3609
3527 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = 3610 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
3528 (ATOM_Tonga_MCLK_Dependency_Table *) 3611 (ATOM_Tonga_MCLK_Dependency_Table *)
3529 (((unsigned long)powerplay_table) + 3612 (((unsigned long)powerplay_table) +
@@ -3575,7 +3658,11 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
3575 /* Performance levels are arranged from low to high. */ 3658 /* Performance levels are arranged from low to high. */
3576 performance_level->memory_clock = mclk_dep_table->entries 3659 performance_level->memory_clock = mclk_dep_table->entries
3577 [state_entry->ucMemoryClockIndexLow].ulMclk; 3660 [state_entry->ucMemoryClockIndexLow].ulMclk;
3578 performance_level->engine_clock = sclk_dep_table->entries 3661 if (sclk_dep_table->ucRevId == 0)
3662 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3663 [state_entry->ucEngineClockIndexLow].ulSclk;
3664 else if (sclk_dep_table->ucRevId == 1)
3665 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3579 [state_entry->ucEngineClockIndexLow].ulSclk; 3666 [state_entry->ucEngineClockIndexLow].ulSclk;
3580 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, 3667 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3581 state_entry->ucPCIEGenLow); 3668 state_entry->ucPCIEGenLow);
@@ -3586,8 +3673,14 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
3586 [polaris10_power_state->performance_level_count++]); 3673 [polaris10_power_state->performance_level_count++]);
3587 performance_level->memory_clock = mclk_dep_table->entries 3674 performance_level->memory_clock = mclk_dep_table->entries
3588 [state_entry->ucMemoryClockIndexHigh].ulMclk; 3675 [state_entry->ucMemoryClockIndexHigh].ulMclk;
3589 performance_level->engine_clock = sclk_dep_table->entries 3676
3677 if (sclk_dep_table->ucRevId == 0)
3678 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3679 [state_entry->ucEngineClockIndexHigh].ulSclk;
3680 else if (sclk_dep_table->ucRevId == 1)
3681 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3590 [state_entry->ucEngineClockIndexHigh].ulSclk; 3682 [state_entry->ucEngineClockIndexHigh].ulSclk;
3683
3591 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, 3684 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3592 state_entry->ucPCIEGenHigh); 3685 state_entry->ucPCIEGenHigh);
3593 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, 3686 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
@@ -3645,7 +3738,6 @@ static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3645 switch (state->classification.ui_label) { 3738 switch (state->classification.ui_label) {
3646 case PP_StateUILabel_Performance: 3739 case PP_StateUILabel_Performance:
3647 data->use_pcie_performance_levels = true; 3740 data->use_pcie_performance_levels = true;
3648
3649 for (i = 0; i < ps->performance_level_count; i++) { 3741 for (i = 0; i < ps->performance_level_count; i++) {
3650 if (data->pcie_gen_performance.max < 3742 if (data->pcie_gen_performance.max <
3651 ps->performance_levels[i].pcie_gen) 3743 ps->performance_levels[i].pcie_gen)
@@ -3661,7 +3753,6 @@ static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3661 ps->performance_levels[i].pcie_lane) 3753 ps->performance_levels[i].pcie_lane)
3662 data->pcie_lane_performance.max = 3754 data->pcie_lane_performance.max =
3663 ps->performance_levels[i].pcie_lane; 3755 ps->performance_levels[i].pcie_lane;
3664
3665 if (data->pcie_lane_performance.min > 3756 if (data->pcie_lane_performance.min >
3666 ps->performance_levels[i].pcie_lane) 3757 ps->performance_levels[i].pcie_lane)
3667 data->pcie_lane_performance.min = 3758 data->pcie_lane_performance.min =
@@ -4187,12 +4278,9 @@ int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
4187{ 4278{
4188 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 4279 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4189 uint32_t mm_boot_level_offset, mm_boot_level_value; 4280 uint32_t mm_boot_level_offset, mm_boot_level_value;
4190 struct phm_ppt_v1_information *table_info =
4191 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4192 4281
4193 if (!bgate) { 4282 if (!bgate) {
4194 data->smc_state_table.SamuBootLevel = 4283 data->smc_state_table.SamuBootLevel = 0;
4195 (uint8_t) (table_info->mm_dep_table->count - 1);
4196 mm_boot_level_offset = data->dpm_table_start + 4284 mm_boot_level_offset = data->dpm_table_start +
4197 offsetof(SMU74_Discrete_DpmTable, SamuBootLevel); 4285 offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
4198 mm_boot_level_offset /= 4; 4286 mm_boot_level_offset /= 4;
@@ -4327,6 +4415,15 @@ static int polaris10_notify_link_speed_change_after_state_change(
4327 return 0; 4415 return 0;
4328} 4416}
4329 4417
4418static int polaris10_notify_smc_display(struct pp_hwmgr *hwmgr)
4419{
4420 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4421
4422 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4423 (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
4424 return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
4425}
4426
4330static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) 4427static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
4331{ 4428{
4332 int tmp_result, result = 0; 4429 int tmp_result, result = 0;
@@ -4375,6 +4472,11 @@ static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *i
4375 "Failed to program memory timing parameters!", 4472 "Failed to program memory timing parameters!",
4376 result = tmp_result); 4473 result = tmp_result);
4377 4474
4475 tmp_result = polaris10_notify_smc_display(hwmgr);
4476 PP_ASSERT_WITH_CODE((0 == tmp_result),
4477 "Failed to notify smc display settings!",
4478 result = tmp_result);
4479
4378 tmp_result = polaris10_unfreeze_sclk_mclk_dpm(hwmgr); 4480 tmp_result = polaris10_unfreeze_sclk_mclk_dpm(hwmgr);
4379 PP_ASSERT_WITH_CODE((0 == tmp_result), 4481 PP_ASSERT_WITH_CODE((0 == tmp_result),
4380 "Failed to unfreeze SCLK MCLK DPM!", 4482 "Failed to unfreeze SCLK MCLK DPM!",
@@ -4409,6 +4511,7 @@ static int polaris10_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_
4409 PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm); 4511 PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
4410} 4512}
4411 4513
4514
4412int polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) 4515int polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
4413{ 4516{
4414 PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; 4517 PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
@@ -4428,8 +4531,6 @@ int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwm
4428 4531
4429 if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */ 4532 if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
4430 polaris10_notify_smc_display_change(hwmgr, false); 4533 polaris10_notify_smc_display_change(hwmgr, false);
4431 else
4432 polaris10_notify_smc_display_change(hwmgr, true);
4433 4534
4434 return 0; 4535 return 0;
4435} 4536}
@@ -4470,6 +4571,8 @@ int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
4470 frame_time_in_us = 1000000 / refresh_rate; 4571 frame_time_in_us = 1000000 / refresh_rate;
4471 4572
4472 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; 4573 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
4574 data->frame_time_x2 = frame_time_in_us * 2 / 100;
4575
4473 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); 4576 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
4474 4577
4475 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); 4578 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
@@ -4478,8 +4581,6 @@ int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
4478 4581
4479 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us)); 4582 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
4480 4583
4481 polaris10_notify_smc_display_change(hwmgr, num_active_displays != 0);
4482
4483 return 0; 4584 return 0;
4484} 4585}
4485 4586
@@ -4591,7 +4692,7 @@ int polaris10_upload_mc_firmware(struct pp_hwmgr *hwmgr)
4591 return 0; 4692 return 0;
4592 } 4693 }
4593 4694
4594 data->need_long_memory_training = true; 4695 data->need_long_memory_training = false;
4595 4696
4596/* 4697/*
4597 * PPMCME_FirmwareDescriptorEntry *pfd = NULL; 4698 * PPMCME_FirmwareDescriptorEntry *pfd = NULL;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
index beedf35cbfa6..afc3434822d1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
@@ -312,6 +312,10 @@ struct polaris10_hwmgr {
312 312
313 /* soft pptable for re-uploading into smu */ 313 /* soft pptable for re-uploading into smu */
314 void *soft_pp_table; 314 void *soft_pp_table;
315
316 uint32_t avfs_vdroop_override_setting;
317 bool apply_avfs_cks_off_voltage;
318 uint32_t frame_time_x2;
315}; 319};
316 320
317/* To convert to Q8.8 format for firmware */ 321/* To convert to Q8.8 format for firmware */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
index aba167f7d167..b206632d4650 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
@@ -625,10 +625,14 @@ static int tf_polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
625 int ret; 625 int ret;
626 struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); 626 struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
627 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); 627 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
628 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
628 629
629 if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS) 630 if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
630 return 0; 631 return 0;
631 632
633 ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
634 PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
635
632 ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ? 636 ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
633 0 : -1; 637 0 : -1;
634 638
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
index 58742e0d1492..a3c38bbd1e94 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
@@ -44,6 +44,20 @@ bool acpi_atcs_functions_supported(void *device, uint32_t index)
44 return result == 0 ? (output_buf.function_bits & (1 << (index - 1))) != 0 : false; 44 return result == 0 ? (output_buf.function_bits & (1 << (index - 1))) != 0 : false;
45} 45}
46 46
47bool acpi_atcs_notify_pcie_device_ready(void *device)
48{
49 int32_t temp_buffer = 1;
50
51 return cgs_call_acpi_method(device, CGS_ACPI_METHOD_ATCS,
52 ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION,
53 &temp_buffer,
54 NULL,
55 0,
56 sizeof(temp_buffer),
57 0);
58}
59
60
47int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise) 61int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
48{ 62{
49 struct atcs_pref_req_input atcs_input; 63 struct atcs_pref_req_input atcs_input;
@@ -52,7 +66,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
52 int result; 66 int result;
53 struct cgs_system_info info = {0}; 67 struct cgs_system_info info = {0};
54 68
55 if (!acpi_atcs_functions_supported(device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST)) 69 if( 0 != acpi_atcs_notify_pcie_device_ready(device))
56 return -EINVAL; 70 return -EINVAL;
57 71
58 info.size = sizeof(struct cgs_system_info); 72 info.size = sizeof(struct cgs_system_info);
@@ -77,7 +91,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
77 ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, 91 ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST,
78 &atcs_input, 92 &atcs_input,
79 &atcs_output, 93 &atcs_output,
80 0, 94 1,
81 sizeof(atcs_input), 95 sizeof(atcs_input),
82 sizeof(atcs_output)); 96 sizeof(atcs_output));
83 if (result != 0) 97 if (result != 0)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index da9f5f1b6dc2..bf4e18fd3872 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -1302,3 +1302,46 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr
1302 1302
1303 return 0; 1303 return 0;
1304} 1304}
1305
1306int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param)
1307{
1308 ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL;
1309
1310 if (param == NULL)
1311 return -EINVAL;
1312
1313 profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *)
1314 cgs_atom_get_data_table(hwmgr->device,
1315 GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
1316 NULL, NULL, NULL);
1317 if (!profile)
1318 return -1;
1319
1320 param->ulAVFS_meanNsigma_Acontant0 = profile->ulAVFS_meanNsigma_Acontant0;
1321 param->ulAVFS_meanNsigma_Acontant1 = profile->ulAVFS_meanNsigma_Acontant1;
1322 param->ulAVFS_meanNsigma_Acontant2 = profile->ulAVFS_meanNsigma_Acontant2;
1323 param->usAVFS_meanNsigma_DC_tol_sigma = profile->usAVFS_meanNsigma_DC_tol_sigma;
1324 param->usAVFS_meanNsigma_Platform_mean = profile->usAVFS_meanNsigma_Platform_mean;
1325 param->usAVFS_meanNsigma_Platform_sigma = profile->usAVFS_meanNsigma_Platform_sigma;
1326 param->ulGB_VDROOP_TABLE_CKSOFF_a0 = profile->ulGB_VDROOP_TABLE_CKSOFF_a0;
1327 param->ulGB_VDROOP_TABLE_CKSOFF_a1 = profile->ulGB_VDROOP_TABLE_CKSOFF_a1;
1328 param->ulGB_VDROOP_TABLE_CKSOFF_a2 = profile->ulGB_VDROOP_TABLE_CKSOFF_a2;
1329 param->ulGB_VDROOP_TABLE_CKSON_a0 = profile->ulGB_VDROOP_TABLE_CKSON_a0;
1330 param->ulGB_VDROOP_TABLE_CKSON_a1 = profile->ulGB_VDROOP_TABLE_CKSON_a1;
1331 param->ulGB_VDROOP_TABLE_CKSON_a2 = profile->ulGB_VDROOP_TABLE_CKSON_a2;
1332 param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
1333 param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2;
1334 param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b;
1335 param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSON_m1;
1336 param->usAVFSGB_FUSE_TABLE_CKSON_m2 = profile->usAVFSGB_FUSE_TABLE_CKSON_m2;
1337 param->ulAVFSGB_FUSE_TABLE_CKSON_b = profile->ulAVFSGB_FUSE_TABLE_CKSON_b;
1338 param->usMaxVoltage_0_25mv = profile->usMaxVoltage_0_25mv;
1339 param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF;
1340 param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON;
1341 param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF;
1342 param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON;
1343 param->usPSM_Age_ComFactor = profile->usPSM_Age_ComFactor;
1344 param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage;
1345
1346 return 0;
1347}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
index d24ebb566905..248c5db5f380 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
@@ -250,6 +250,35 @@ struct pp_atomctrl_gpio_pin_assignment {
250}; 250};
251typedef struct pp_atomctrl_gpio_pin_assignment pp_atomctrl_gpio_pin_assignment; 251typedef struct pp_atomctrl_gpio_pin_assignment pp_atomctrl_gpio_pin_assignment;
252 252
253struct pp_atom_ctrl__avfs_parameters {
254 uint32_t ulAVFS_meanNsigma_Acontant0;
255 uint32_t ulAVFS_meanNsigma_Acontant1;
256 uint32_t ulAVFS_meanNsigma_Acontant2;
257 uint16_t usAVFS_meanNsigma_DC_tol_sigma;
258 uint16_t usAVFS_meanNsigma_Platform_mean;
259 uint16_t usAVFS_meanNsigma_Platform_sigma;
260 uint32_t ulGB_VDROOP_TABLE_CKSOFF_a0;
261 uint32_t ulGB_VDROOP_TABLE_CKSOFF_a1;
262 uint32_t ulGB_VDROOP_TABLE_CKSOFF_a2;
263 uint32_t ulGB_VDROOP_TABLE_CKSON_a0;
264 uint32_t ulGB_VDROOP_TABLE_CKSON_a1;
265 uint32_t ulGB_VDROOP_TABLE_CKSON_a2;
266 uint32_t ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
267 uint16_t usAVFSGB_FUSE_TABLE_CKSOFF_m2;
268 uint32_t ulAVFSGB_FUSE_TABLE_CKSOFF_b;
269 uint32_t ulAVFSGB_FUSE_TABLE_CKSON_m1;
270 uint16_t usAVFSGB_FUSE_TABLE_CKSON_m2;
271 uint32_t ulAVFSGB_FUSE_TABLE_CKSON_b;
272 uint16_t usMaxVoltage_0_25mv;
273 uint8_t ucEnableGB_VDROOP_TABLE_CKSOFF;
274 uint8_t ucEnableGB_VDROOP_TABLE_CKSON;
275 uint8_t ucEnableGB_FUSE_TABLE_CKSOFF;
276 uint8_t ucEnableGB_FUSE_TABLE_CKSON;
277 uint16_t usPSM_Age_ComFactor;
278 uint8_t ucEnableApplyAVFS_CKS_OFF_Voltage;
279 uint8_t ucReserved;
280};
281
253extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment); 282extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment);
254extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); 283extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
255extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr); 284extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr);
@@ -278,5 +307,8 @@ extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clo
278extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type, 307extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
279 uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); 308 uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
280extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table); 309extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table);
310
311extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param);
312
281#endif 313#endif
282 314
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
index d27e8c40602a..233eb7f36c1d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
@@ -4489,6 +4489,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
4489 data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE; 4489 data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE;
4490 data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE; 4490 data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE;
4491 data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE; 4491 data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE;
4492 data->force_pcie_gen = PP_PCIEGenInvalid;
4492 4493
4493 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, 4494 if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
4494 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) { 4495 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
index 1b44f4e9b8f5..f127198aafc4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
@@ -197,6 +197,22 @@ typedef struct _ATOM_Tonga_SCLK_Dependency_Table {
197 ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ 197 ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
198} ATOM_Tonga_SCLK_Dependency_Table; 198} ATOM_Tonga_SCLK_Dependency_Table;
199 199
200typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
201 UCHAR ucVddInd; /* Base voltage */
202 USHORT usVddcOffset; /* Offset relative to base voltage */
203 ULONG ulSclk;
204 USHORT usEdcCurrent;
205 UCHAR ucReliabilityTemperature;
206 UCHAR ucCKSVOffsetandDisable; /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */
207 ULONG ulSclkOffset;
208} ATOM_Polaris_SCLK_Dependency_Record;
209
210typedef struct _ATOM_Polaris_SCLK_Dependency_Table {
211 UCHAR ucRevId;
212 UCHAR ucNumEntries; /* Number of entries. */
213 ATOM_Polaris_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
214} ATOM_Polaris_SCLK_Dependency_Table;
215
200typedef struct _ATOM_Tonga_PCIE_Record { 216typedef struct _ATOM_Tonga_PCIE_Record {
201 UCHAR ucPCIEGenSpeed; 217 UCHAR ucPCIEGenSpeed;
202 UCHAR usPCIELaneWidth; 218 UCHAR usPCIELaneWidth;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
index 296ec7ef6d45..671fdb4d615a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
@@ -408,41 +408,78 @@ static int get_mclk_voltage_dependency_table(
408static int get_sclk_voltage_dependency_table( 408static int get_sclk_voltage_dependency_table(
409 struct pp_hwmgr *hwmgr, 409 struct pp_hwmgr *hwmgr,
410 phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table, 410 phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table,
411 const ATOM_Tonga_SCLK_Dependency_Table * sclk_dep_table 411 const PPTable_Generic_SubTable_Header *sclk_dep_table
412 ) 412 )
413{ 413{
414 uint32_t table_size, i; 414 uint32_t table_size, i;
415 phm_ppt_v1_clock_voltage_dependency_table *sclk_table; 415 phm_ppt_v1_clock_voltage_dependency_table *sclk_table;
416 416
417 PP_ASSERT_WITH_CODE((0 != sclk_dep_table->ucNumEntries), 417 if (sclk_dep_table->ucRevId < 1) {
418 "Invalid PowerPlay Table!", return -1); 418 const ATOM_Tonga_SCLK_Dependency_Table *tonga_table =
419 (ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table;
419 420
420 table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) 421 PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries),
421 * sclk_dep_table->ucNumEntries; 422 "Invalid PowerPlay Table!", return -1);
422 423
423 sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) 424 table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
424 kzalloc(table_size, GFP_KERNEL); 425 * tonga_table->ucNumEntries;
425 426
426 if (NULL == sclk_table) 427 sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
427 return -ENOMEM; 428 kzalloc(table_size, GFP_KERNEL);
428 429
429 memset(sclk_table, 0x00, table_size); 430 if (NULL == sclk_table)
430 431 return -ENOMEM;
431 sclk_table->count = (uint32_t)sclk_dep_table->ucNumEntries; 432
432 433 memset(sclk_table, 0x00, table_size);
433 for (i = 0; i < sclk_dep_table->ucNumEntries; i++) { 434
434 sclk_table->entries[i].vddInd = 435 sclk_table->count = (uint32_t)tonga_table->ucNumEntries;
435 sclk_dep_table->entries[i].ucVddInd; 436
436 sclk_table->entries[i].vdd_offset = 437 for (i = 0; i < tonga_table->ucNumEntries; i++) {
437 sclk_dep_table->entries[i].usVddcOffset; 438 sclk_table->entries[i].vddInd =
438 sclk_table->entries[i].clk = 439 tonga_table->entries[i].ucVddInd;
439 sclk_dep_table->entries[i].ulSclk; 440 sclk_table->entries[i].vdd_offset =
440 sclk_table->entries[i].cks_enable = 441 tonga_table->entries[i].usVddcOffset;
441 (((sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; 442 sclk_table->entries[i].clk =
442 sclk_table->entries[i].cks_voffset = 443 tonga_table->entries[i].ulSclk;
443 (sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x7F); 444 sclk_table->entries[i].cks_enable =
444 } 445 (((tonga_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
446 sclk_table->entries[i].cks_voffset =
447 (tonga_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
448 }
449 } else {
450 const ATOM_Polaris_SCLK_Dependency_Table *polaris_table =
451 (ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table;
445 452
453 PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries),
454 "Invalid PowerPlay Table!", return -1);
455
456 table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
457 * polaris_table->ucNumEntries;
458
459 sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
460 kzalloc(table_size, GFP_KERNEL);
461
462 if (NULL == sclk_table)
463 return -ENOMEM;
464
465 memset(sclk_table, 0x00, table_size);
466
467 sclk_table->count = (uint32_t)polaris_table->ucNumEntries;
468
469 for (i = 0; i < polaris_table->ucNumEntries; i++) {
470 sclk_table->entries[i].vddInd =
471 polaris_table->entries[i].ucVddInd;
472 sclk_table->entries[i].vdd_offset =
473 polaris_table->entries[i].usVddcOffset;
474 sclk_table->entries[i].clk =
475 polaris_table->entries[i].ulSclk;
476 sclk_table->entries[i].cks_enable =
477 (((polaris_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
478 sclk_table->entries[i].cks_voffset =
479 (polaris_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
480 sclk_table->entries[i].sclk_offset = polaris_table->entries[i].ulSclkOffset;
481 }
482 }
446 *pp_tonga_sclk_dep_table = sclk_table; 483 *pp_tonga_sclk_dep_table = sclk_table;
447 484
448 return 0; 485 return 0;
@@ -708,8 +745,8 @@ static int init_clock_voltage_dependency(
708 const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = 745 const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
709 (const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) + 746 (const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) +
710 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); 747 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
711 const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = 748 const PPTable_Generic_SubTable_Header *sclk_dep_table =
712 (const ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long) powerplay_table) + 749 (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
713 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); 750 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
714 const ATOM_Tonga_Hard_Limit_Table *pHardLimits = 751 const ATOM_Tonga_Hard_Limit_Table *pHardLimits =
715 (const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) + 752 (const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) +
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 28f571449495..77e8e33d5870 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -411,6 +411,8 @@ struct phm_cac_tdp_table {
411 uint8_t ucVr_I2C_Line; 411 uint8_t ucVr_I2C_Line;
412 uint8_t ucPlx_I2C_address; 412 uint8_t ucPlx_I2C_address;
413 uint8_t ucPlx_I2C_Line; 413 uint8_t ucPlx_I2C_Line;
414 uint32_t usBoostPowerLimit;
415 uint8_t ucCKS_LDO_REFSEL;
414}; 416};
415 417
416struct phm_ppm_table { 418struct phm_ppm_table {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h
index 0c6a413eaa5b..b8f4b73c322e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h
@@ -27,6 +27,7 @@
27 27
28#pragma pack(push, 1) 28#pragma pack(push, 1)
29 29
30#define PPSMC_MSG_SetGBDroopSettings ((uint16_t) 0x305)
30 31
31#define PPSMC_SWSTATE_FLAG_DC 0x01 32#define PPSMC_SWSTATE_FLAG_DC 0x01
32#define PPSMC_SWSTATE_FLAG_UVD 0x02 33#define PPSMC_SWSTATE_FLAG_UVD 0x02
@@ -391,6 +392,8 @@ typedef uint16_t PPSMC_Result;
391#define PPSMC_MSG_SetGpuPllDfsForSclk ((uint16_t) 0x300) 392#define PPSMC_MSG_SetGpuPllDfsForSclk ((uint16_t) 0x300)
392#define PPSMC_MSG_Didt_Block_Function ((uint16_t) 0x301) 393#define PPSMC_MSG_Didt_Block_Function ((uint16_t) 0x301)
393 394
395#define PPSMC_MSG_SetVBITimeout ((uint16_t) 0x306)
396
394#define PPSMC_MSG_SecureSRBMWrite ((uint16_t) 0x600) 397#define PPSMC_MSG_SecureSRBMWrite ((uint16_t) 0x600)
395#define PPSMC_MSG_SecureSRBMRead ((uint16_t) 0x601) 398#define PPSMC_MSG_SecureSRBMRead ((uint16_t) 0x601)
396#define PPSMC_MSG_SetAddress ((uint16_t) 0x800) 399#define PPSMC_MSG_SetAddress ((uint16_t) 0x800)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
index 3bd5e69b9045..3df5de2cdab0 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
@@ -26,3 +26,4 @@ extern bool acpi_atcs_functions_supported(void *device,
26extern int acpi_pcie_perf_request(void *device, 26extern int acpi_pcie_perf_request(void *device,
27 uint8_t perf_req, 27 uint8_t perf_req,
28 bool advertise); 28 bool advertise);
29extern bool acpi_atcs_notify_pcie_device_ready(void *device);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu74.h b/drivers/gpu/drm/amd/powerplay/inc/smu74.h
index 1a12d85b8e97..fd10a9fa843d 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu74.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu74.h
@@ -34,6 +34,30 @@
34#define SMU__NUM_LCLK_DPM_LEVELS 8 34#define SMU__NUM_LCLK_DPM_LEVELS 8
35#define SMU__NUM_PCIE_DPM_LEVELS 8 35#define SMU__NUM_PCIE_DPM_LEVELS 8
36 36
37#define EXP_M1 35
38#define EXP_M2 92821
39#define EXP_B 66629747
40
41#define EXP_M1_1 365
42#define EXP_M2_1 658700
43#define EXP_B_1 305506134
44
45#define EXP_M1_2 189
46#define EXP_M2_2 379692
47#define EXP_B_2 194609469
48
49#define EXP_M1_3 99
50#define EXP_M2_3 217915
51#define EXP_B_3 122255994
52
53#define EXP_M1_4 51
54#define EXP_M2_4 122643
55#define EXP_B_4 74893384
56
57#define EXP_M1_5 423
58#define EXP_M2_5 1103326
59#define EXP_B_5 728122621
60
37enum SID_OPTION { 61enum SID_OPTION {
38 SID_OPTION_HI, 62 SID_OPTION_HI,
39 SID_OPTION_LO, 63 SID_OPTION_LO,
@@ -548,20 +572,20 @@ struct SMU74_Firmware_Header {
548 uint32_t CacConfigTable; 572 uint32_t CacConfigTable;
549 uint32_t CacStatusTable; 573 uint32_t CacStatusTable;
550 574
551
552 uint32_t mcRegisterTable; 575 uint32_t mcRegisterTable;
553 576
554
555 uint32_t mcArbDramTimingTable; 577 uint32_t mcArbDramTimingTable;
556 578
557
558
559
560 uint32_t PmFuseTable; 579 uint32_t PmFuseTable;
561 uint32_t Globals; 580 uint32_t Globals;
562 uint32_t ClockStretcherTable; 581 uint32_t ClockStretcherTable;
563 uint32_t VftTable; 582 uint32_t VftTable;
564 uint32_t Reserved[21]; 583 uint32_t Reserved1;
584 uint32_t AvfsTable;
585 uint32_t AvfsCksOffGbvTable;
586 uint32_t AvfsMeanNSigma;
587 uint32_t AvfsSclkOffsetTable;
588 uint32_t Reserved[16];
565 uint32_t Signature; 589 uint32_t Signature;
566}; 590};
567 591
@@ -701,8 +725,6 @@ VR Config info is contained in dpmTable.VRConfig */
701struct SMU_ClockStretcherDataTableEntry { 725struct SMU_ClockStretcherDataTableEntry {
702 uint8_t minVID; 726 uint8_t minVID;
703 uint8_t maxVID; 727 uint8_t maxVID;
704
705
706 uint16_t setting; 728 uint16_t setting;
707}; 729};
708typedef struct SMU_ClockStretcherDataTableEntry SMU_ClockStretcherDataTableEntry; 730typedef struct SMU_ClockStretcherDataTableEntry SMU_ClockStretcherDataTableEntry;
@@ -769,6 +791,43 @@ struct VFT_TABLE_t {
769typedef struct VFT_TABLE_t VFT_TABLE_t; 791typedef struct VFT_TABLE_t VFT_TABLE_t;
770 792
771 793
794/* Total margin, root mean square of Fmax + DC + Platform */
795struct AVFS_Margin_t {
796 VFT_CELL_t Cell[NUM_VFT_COLUMNS];
797};
798typedef struct AVFS_Margin_t AVFS_Margin_t;
799
800#define BTCGB_VDROOP_TABLE_MAX_ENTRIES 2
801#define AVFSGB_VDROOP_TABLE_MAX_ENTRIES 2
802
803struct GB_VDROOP_TABLE_t {
804 int32_t a0;
805 int32_t a1;
806 int32_t a2;
807 uint32_t spare;
808};
809typedef struct GB_VDROOP_TABLE_t GB_VDROOP_TABLE_t;
810
811struct AVFS_CksOff_Gbv_t {
812 VFT_CELL_t Cell[NUM_VFT_COLUMNS];
813};
814typedef struct AVFS_CksOff_Gbv_t AVFS_CksOff_Gbv_t;
815
816struct AVFS_meanNsigma_t {
817 uint32_t Aconstant[3];
818 uint16_t DC_tol_sigma;
819 uint16_t Platform_mean;
820 uint16_t Platform_sigma;
821 uint16_t PSM_Age_CompFactor;
822 uint8_t Static_Voltage_Offset[NUM_VFT_COLUMNS];
823};
824typedef struct AVFS_meanNsigma_t AVFS_meanNsigma_t;
825
826struct AVFS_Sclk_Offset_t {
827 uint16_t Sclk_Offset[8];
828};
829typedef struct AVFS_Sclk_Offset_t AVFS_Sclk_Offset_t;
830
772#endif 831#endif
773 832
774 833
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h
index 0dfe82336dc7..899d6d8108c2 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h
@@ -223,6 +223,16 @@ struct SMU74_Discrete_StateInfo {
223 223
224typedef struct SMU74_Discrete_StateInfo SMU74_Discrete_StateInfo; 224typedef struct SMU74_Discrete_StateInfo SMU74_Discrete_StateInfo;
225 225
226struct SMU_QuadraticCoeffs {
227 int32_t m1;
228 uint32_t b;
229
230 int16_t m2;
231 uint8_t m1_shift;
232 uint8_t m2_shift;
233};
234typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs;
235
226struct SMU74_Discrete_DpmTable { 236struct SMU74_Discrete_DpmTable {
227 237
228 SMU74_PIDController GraphicsPIDController; 238 SMU74_PIDController GraphicsPIDController;
@@ -258,7 +268,15 @@ struct SMU74_Discrete_DpmTable {
258 uint8_t ThermOutPolarity; 268 uint8_t ThermOutPolarity;
259 uint8_t ThermOutMode; 269 uint8_t ThermOutMode;
260 uint8_t BootPhases; 270 uint8_t BootPhases;
261 uint32_t Reserved[4]; 271
272 uint8_t VRHotLevel;
273 uint8_t LdoRefSel;
274 uint8_t Reserved1[2];
275 uint16_t FanStartTemperature;
276 uint16_t FanStopTemperature;
277 uint16_t MaxVoltage;
278 uint16_t Reserved2;
279 uint32_t Reserved[1];
262 280
263 SMU74_Discrete_GraphicsLevel GraphicsLevel[SMU74_MAX_LEVELS_GRAPHICS]; 281 SMU74_Discrete_GraphicsLevel GraphicsLevel[SMU74_MAX_LEVELS_GRAPHICS];
264 SMU74_Discrete_MemoryLevel MemoryACPILevel; 282 SMU74_Discrete_MemoryLevel MemoryACPILevel;
@@ -347,6 +365,8 @@ struct SMU74_Discrete_DpmTable {
347 365
348 uint32_t CurrSclkPllRange; 366 uint32_t CurrSclkPllRange;
349 sclkFcwRange_t SclkFcwRangeTable[NUM_SCLK_RANGE]; 367 sclkFcwRange_t SclkFcwRangeTable[NUM_SCLK_RANGE];
368 GB_VDROOP_TABLE_t BTCGB_VDROOP_TABLE[BTCGB_VDROOP_TABLE_MAX_ENTRIES];
369 SMU_QuadraticCoeffs AVFSGB_VDROOP_TABLE[AVFSGB_VDROOP_TABLE_MAX_ENTRIES];
350}; 370};
351 371
352typedef struct SMU74_Discrete_DpmTable SMU74_Discrete_DpmTable; 372typedef struct SMU74_Discrete_DpmTable SMU74_Discrete_DpmTable;
@@ -550,16 +570,6 @@ struct SMU7_AcpiScoreboard {
550 570
551typedef struct SMU7_AcpiScoreboard SMU7_AcpiScoreboard; 571typedef struct SMU7_AcpiScoreboard SMU7_AcpiScoreboard;
552 572
553struct SMU_QuadraticCoeffs {
554 int32_t m1;
555 uint32_t b;
556
557 int16_t m2;
558 uint8_t m1_shift;
559 uint8_t m2_shift;
560};
561typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs;
562
563struct SMU74_Discrete_PmFuses { 573struct SMU74_Discrete_PmFuses {
564 uint8_t BapmVddCVidHiSidd[8]; 574 uint8_t BapmVddCVidHiSidd[8];
565 uint8_t BapmVddCVidLoSidd[8]; 575 uint8_t BapmVddCVidLoSidd[8];
@@ -821,6 +831,17 @@ typedef struct SMU7_GfxCuPgScoreboard SMU7_GfxCuPgScoreboard;
821#define DB_PCC_SHIFT 26 831#define DB_PCC_SHIFT 26
822#define DB_EDC_SHIFT 27 832#define DB_EDC_SHIFT 27
823 833
834#define BTCGB0_Vdroop_Enable_MASK 0x1
835#define BTCGB1_Vdroop_Enable_MASK 0x2
836#define AVFSGB0_Vdroop_Enable_MASK 0x4
837#define AVFSGB1_Vdroop_Enable_MASK 0x8
838
839#define BTCGB0_Vdroop_Enable_SHIFT 0
840#define BTCGB1_Vdroop_Enable_SHIFT 1
841#define AVFSGB0_Vdroop_Enable_SHIFT 2
842#define AVFSGB1_Vdroop_Enable_SHIFT 3
843
844
824#pragma pack(pop) 845#pragma pack(pop)
825 846
826 847
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 043b6ac09d5f..5dba7c509710 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -52,19 +52,18 @@
52static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = { 52static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
53 /* Min pcie DeepSleep Activity CgSpll CgSpll CcPwr CcPwr Sclk Enabled Enabled Voltage Power */ 53 /* Min pcie DeepSleep Activity CgSpll CgSpll CcPwr CcPwr Sclk Enabled Enabled Voltage Power */
54 /* Voltage, DpmLevel, DivId, Level, FuncCntl3, FuncCntl4, DynRm, DynRm1 Did, Padding,ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */ 54 /* Voltage, DpmLevel, DivId, Level, FuncCntl3, FuncCntl4, DynRm, DynRm1 Did, Padding,ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */
55 { 0x3c0fd047, 0x00, 0x03, 0x1e00, 0x00200410, 0x87020000, 0, 0, 0x16, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x30750000, 0, 0, 0, 0, 0, 0, 0 } }, 55 { 0x100ea446, 0x00, 0x03, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x30750000, 0x3000, 0, 0x2600, 0, 0, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } },
56 { 0xa00fd047, 0x01, 0x04, 0x1e00, 0x00800510, 0x87020000, 0, 0, 0x16, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x409c0000, 0, 0, 0, 0, 0, 0, 0 } }, 56 { 0x400ea446, 0x01, 0x04, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x409c0000, 0x2000, 0, 0x1e00, 1, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } },
57 { 0x0410d047, 0x01, 0x00, 0x1e00, 0x00600410, 0x87020000, 0, 0, 0x0e, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x50c30000, 0, 0, 0, 0, 0, 0, 0 } }, 57 { 0x740ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x50c30000, 0x2800, 0, 0x2000, 1, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } },
58 { 0x6810d047, 0x01, 0x00, 0x1e00, 0x00800410, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x60ea0000, 0, 0, 0, 0, 0, 0, 0 } }, 58 { 0xa40ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x60ea0000, 0x3000, 0, 0x2600, 1, 1, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } },
59 { 0xcc10d047, 0x01, 0x00, 0x1e00, 0x00e00410, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0xe8fd0000, 0, 0, 0, 0, 0, 0, 0 } }, 59 { 0xd80ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x70110100, 0x3800, 0, 0x2c00, 1, 1, 0x0004, 0x1203, 0xffff, 0x3600, 0xc9e2, 0x2e00 } },
60 { 0x3011d047, 0x01, 0x00, 0x1e00, 0x00400510, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x70110100, 0, 0, 0, 0, 0, 0, 0 } }, 60 { 0x3c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x80380100, 0x2000, 0, 0x1e00, 2, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } },
61 { 0x9411d047, 0x01, 0x00, 0x1e00, 0x00a00510, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0xf8240100, 0, 0, 0, 0, 0, 0, 0 } }, 61 { 0x6c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x905f0100, 0x2400, 0, 0x1e00, 2, 1, 0x0004, 0x8901, 0xffff, 0x2300, 0x314c, 0x1d00 } },
62 { 0xf811d047, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x80380100, 0, 0, 0, 0, 0, 0, 0 } } 62 { 0xa00fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0xa0860100, 0x2800, 0, 0x2000, 2, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } }
63}; 63};
64 64
65static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = 65static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 =
66 {0x50140000, 0x50140000, 0x00320000, 0x00, 0x00, 66 {0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
67 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x0000, 0x00, 0x00};
68 67
69/** 68/**
70* Set the address for reading/writing the SMC SRAM space. 69* Set the address for reading/writing the SMC SRAM space.
@@ -219,6 +218,18 @@ bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr)
219 && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C))); 218 && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
220} 219}
221 220
221static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
222{
223 uint32_t efuse;
224
225 efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
226 efuse &= 0x00000001;
227 if (efuse)
228 return true;
229
230 return false;
231}
232
222/** 233/**
223* Send a message to the SMC, and wait for its response. 234* Send a message to the SMC, and wait for its response.
224* 235*
@@ -228,21 +239,27 @@ bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr)
228*/ 239*/
229int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) 240int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
230{ 241{
242 int ret;
243
231 if (!polaris10_is_smc_ram_running(smumgr)) 244 if (!polaris10_is_smc_ram_running(smumgr))
232 return -1; 245 return -1;
233 246
247
234 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); 248 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
235 249
236 if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) 250 ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
237 printk("Failed to send Previous Message.\n");
238 251
252 if (ret != 1)
253 printk("\n failed to send pre message %x ret is %d \n", msg, ret);
239 254
240 cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); 255 cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
241 256
242 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); 257 SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
243 258
244 if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) 259 ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
245 printk("Failed to send Message.\n"); 260
261 if (ret != 1)
262 printk("\n failed to send message %x ret is %d \n", msg, ret);
246 263
247 return 0; 264 return 0;
248} 265}
@@ -953,6 +970,11 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr)
953 (cgs_handle_t)smu_data->smu_buffer.handle); 970 (cgs_handle_t)smu_data->smu_buffer.handle);
954 return -1;); 971 return -1;);
955 972
973 if (polaris10_is_hw_avfs_present(smumgr))
974 smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
975 else
976 smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
977
956 return 0; 978 return 0;
957} 979}
958 980
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 39802c0539b6..3d34fc4ca826 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -266,9 +266,10 @@ int atmel_hlcdc_create_outputs(struct drm_device *dev)
266 if (!ret) 266 if (!ret)
267 ret = atmel_hlcdc_check_endpoint(dev, &ep); 267 ret = atmel_hlcdc_check_endpoint(dev, &ep);
268 268
269 of_node_put(ep_np); 269 if (ret) {
270 if (ret) 270 of_node_put(ep_np);
271 return ret; 271 return ret;
272 }
272 } 273 }
273 274
274 for_each_endpoint_of_node(dev->dev->of_node, ep_np) { 275 for_each_endpoint_of_node(dev->dev->of_node, ep_np) {
@@ -276,9 +277,10 @@ int atmel_hlcdc_create_outputs(struct drm_device *dev)
276 if (!ret) 277 if (!ret)
277 ret = atmel_hlcdc_attach_endpoint(dev, &ep); 278 ret = atmel_hlcdc_attach_endpoint(dev, &ep);
278 279
279 of_node_put(ep_np); 280 if (ret) {
280 if (ret) 281 of_node_put(ep_np);
281 return ret; 282 return ret;
283 }
282 } 284 }
283 285
284 return 0; 286 return 0;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index aef3ca8a81fa..016c191221f3 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -339,6 +339,8 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
339 339
340 atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, 340 atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff,
341 factor_reg); 341 factor_reg);
342 } else {
343 atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, 0);
342 } 344 }
343} 345}
344 346
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index c204ef32df16..9bb99e274d23 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1296,14 +1296,39 @@ EXPORT_SYMBOL(drm_atomic_add_affected_planes);
1296 */ 1296 */
1297void drm_atomic_legacy_backoff(struct drm_atomic_state *state) 1297void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
1298{ 1298{
1299 struct drm_device *dev = state->dev;
1300 unsigned crtc_mask = 0;
1301 struct drm_crtc *crtc;
1299 int ret; 1302 int ret;
1303 bool global = false;
1304
1305 drm_for_each_crtc(crtc, dev) {
1306 if (crtc->acquire_ctx != state->acquire_ctx)
1307 continue;
1308
1309 crtc_mask |= drm_crtc_mask(crtc);
1310 crtc->acquire_ctx = NULL;
1311 }
1312
1313 if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) {
1314 global = true;
1315
1316 dev->mode_config.acquire_ctx = NULL;
1317 }
1300 1318
1301retry: 1319retry:
1302 drm_modeset_backoff(state->acquire_ctx); 1320 drm_modeset_backoff(state->acquire_ctx);
1303 1321
1304 ret = drm_modeset_lock_all_ctx(state->dev, state->acquire_ctx); 1322 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
1305 if (ret) 1323 if (ret)
1306 goto retry; 1324 goto retry;
1325
1326 drm_for_each_crtc(crtc, dev)
1327 if (drm_crtc_mask(crtc) & crtc_mask)
1328 crtc->acquire_ctx = state->acquire_ctx;
1329
1330 if (global)
1331 dev->mode_config.acquire_ctx = state->acquire_ctx;
1307} 1332}
1308EXPORT_SYMBOL(drm_atomic_legacy_backoff); 1333EXPORT_SYMBOL(drm_atomic_legacy_backoff);
1309 1334
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index a6e42433ef0e..26feb2f8453f 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -528,11 +528,11 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
528int drm_crtc_helper_set_config(struct drm_mode_set *set) 528int drm_crtc_helper_set_config(struct drm_mode_set *set)
529{ 529{
530 struct drm_device *dev; 530 struct drm_device *dev;
531 struct drm_crtc *new_crtc; 531 struct drm_crtc **save_encoder_crtcs, *new_crtc;
532 struct drm_encoder *save_encoders, *new_encoder, *encoder; 532 struct drm_encoder **save_connector_encoders, *new_encoder, *encoder;
533 bool mode_changed = false; /* if true do a full mode set */ 533 bool mode_changed = false; /* if true do a full mode set */
534 bool fb_changed = false; /* if true and !mode_changed just do a flip */ 534 bool fb_changed = false; /* if true and !mode_changed just do a flip */
535 struct drm_connector *save_connectors, *connector; 535 struct drm_connector *connector;
536 int count = 0, ro, fail = 0; 536 int count = 0, ro, fail = 0;
537 const struct drm_crtc_helper_funcs *crtc_funcs; 537 const struct drm_crtc_helper_funcs *crtc_funcs;
538 struct drm_mode_set save_set; 538 struct drm_mode_set save_set;
@@ -574,15 +574,15 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
574 * Allocate space for the backup of all (non-pointer) encoder and 574 * Allocate space for the backup of all (non-pointer) encoder and
575 * connector data. 575 * connector data.
576 */ 576 */
577 save_encoders = kzalloc(dev->mode_config.num_encoder * 577 save_encoder_crtcs = kzalloc(dev->mode_config.num_encoder *
578 sizeof(struct drm_encoder), GFP_KERNEL); 578 sizeof(struct drm_crtc *), GFP_KERNEL);
579 if (!save_encoders) 579 if (!save_encoder_crtcs)
580 return -ENOMEM; 580 return -ENOMEM;
581 581
582 save_connectors = kzalloc(dev->mode_config.num_connector * 582 save_connector_encoders = kzalloc(dev->mode_config.num_connector *
583 sizeof(struct drm_connector), GFP_KERNEL); 583 sizeof(struct drm_encoder *), GFP_KERNEL);
584 if (!save_connectors) { 584 if (!save_connector_encoders) {
585 kfree(save_encoders); 585 kfree(save_encoder_crtcs);
586 return -ENOMEM; 586 return -ENOMEM;
587 } 587 }
588 588
@@ -593,12 +593,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
593 */ 593 */
594 count = 0; 594 count = 0;
595 drm_for_each_encoder(encoder, dev) { 595 drm_for_each_encoder(encoder, dev) {
596 save_encoders[count++] = *encoder; 596 save_encoder_crtcs[count++] = encoder->crtc;
597 } 597 }
598 598
599 count = 0; 599 count = 0;
600 drm_for_each_connector(connector, dev) { 600 drm_for_each_connector(connector, dev) {
601 save_connectors[count++] = *connector; 601 save_connector_encoders[count++] = connector->encoder;
602 } 602 }
603 603
604 save_set.crtc = set->crtc; 604 save_set.crtc = set->crtc;
@@ -631,8 +631,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
631 mode_changed = true; 631 mode_changed = true;
632 } 632 }
633 633
634 /* take a reference on all connectors in set */ 634 /* take a reference on all unbound connectors in set, reuse the
635 * already taken reference for bound connectors
636 */
635 for (ro = 0; ro < set->num_connectors; ro++) { 637 for (ro = 0; ro < set->num_connectors; ro++) {
638 if (set->connectors[ro]->encoder)
639 continue;
636 drm_connector_reference(set->connectors[ro]); 640 drm_connector_reference(set->connectors[ro]);
637 } 641 }
638 642
@@ -754,30 +758,28 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
754 } 758 }
755 } 759 }
756 760
757 /* after fail drop reference on all connectors in save set */ 761 kfree(save_connector_encoders);
758 count = 0; 762 kfree(save_encoder_crtcs);
759 drm_for_each_connector(connector, dev) {
760 drm_connector_unreference(&save_connectors[count++]);
761 }
762
763 kfree(save_connectors);
764 kfree(save_encoders);
765 return 0; 763 return 0;
766 764
767fail: 765fail:
768 /* Restore all previous data. */ 766 /* Restore all previous data. */
769 count = 0; 767 count = 0;
770 drm_for_each_encoder(encoder, dev) { 768 drm_for_each_encoder(encoder, dev) {
771 *encoder = save_encoders[count++]; 769 encoder->crtc = save_encoder_crtcs[count++];
772 } 770 }
773 771
774 count = 0; 772 count = 0;
775 drm_for_each_connector(connector, dev) { 773 drm_for_each_connector(connector, dev) {
776 *connector = save_connectors[count++]; 774 connector->encoder = save_connector_encoders[count++];
777 } 775 }
778 776
779 /* after fail drop reference on all connectors in set */ 777 /* after fail drop reference on all unbound connectors in set, let
778 * bound connectors keep their reference
779 */
780 for (ro = 0; ro < set->num_connectors; ro++) { 780 for (ro = 0; ro < set->num_connectors; ro++) {
781 if (set->connectors[ro]->encoder)
782 continue;
781 drm_connector_unreference(set->connectors[ro]); 783 drm_connector_unreference(set->connectors[ro]);
782 } 784 }
783 785
@@ -787,8 +789,8 @@ fail:
787 save_set.y, save_set.fb)) 789 save_set.y, save_set.fb))
788 DRM_ERROR("failed to restore config after modeset failure\n"); 790 DRM_ERROR("failed to restore config after modeset failure\n");
789 791
790 kfree(save_connectors); 792 kfree(save_connector_encoders);
791 kfree(save_encoders); 793 kfree(save_encoder_crtcs);
792 return ret; 794 return ret;
793} 795}
794EXPORT_SYMBOL(drm_crtc_helper_set_config); 796EXPORT_SYMBOL(drm_crtc_helper_set_config);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index a13edf5de2d6..6537908050d7 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -2927,11 +2927,9 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
2927 drm_dp_port_teardown_pdt(port, port->pdt); 2927 drm_dp_port_teardown_pdt(port, port->pdt);
2928 2928
2929 if (!port->input && port->vcpi.vcpi > 0) { 2929 if (!port->input && port->vcpi.vcpi > 0) {
2930 if (mgr->mst_state) { 2930 drm_dp_mst_reset_vcpi_slots(mgr, port);
2931 drm_dp_mst_reset_vcpi_slots(mgr, port); 2931 drm_dp_update_payload_part1(mgr);
2932 drm_dp_update_payload_part1(mgr); 2932 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2933 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2934 }
2935 } 2933 }
2936 2934
2937 kref_put(&port->kref, drm_dp_free_mst_port); 2935 kref_put(&port->kref, drm_dp_free_mst_port);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index 522cfd447892..16353ee81651 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -225,6 +225,7 @@ struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu)
225 225
226 etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING; 226 etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
227 etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops; 227 etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
228 etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
228 etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START; 229 etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START;
229 etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1; 230 etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1;
230 231
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index f6223f907c15..7f9901b7777b 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -31,7 +31,6 @@
31#include "exynos_drm_plane.h" 31#include "exynos_drm_plane.h"
32#include "exynos_drm_drv.h" 32#include "exynos_drm_drv.h"
33#include "exynos_drm_fb.h" 33#include "exynos_drm_fb.h"
34#include "exynos_drm_fbdev.h"
35#include "exynos_drm_iommu.h" 34#include "exynos_drm_iommu.h"
36 35
37/* 36/*
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 468498e3fec1..4c1fb3f8b5a6 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -34,7 +34,7 @@
34 34
35struct exynos_dp_device { 35struct exynos_dp_device {
36 struct drm_encoder encoder; 36 struct drm_encoder encoder;
37 struct drm_connector connector; 37 struct drm_connector *connector;
38 struct drm_bridge *ptn_bridge; 38 struct drm_bridge *ptn_bridge;
39 struct drm_device *drm_dev; 39 struct drm_device *drm_dev;
40 struct device *dev; 40 struct device *dev;
@@ -70,7 +70,7 @@ static int exynos_dp_poweroff(struct analogix_dp_plat_data *plat_data)
70static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data) 70static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data)
71{ 71{
72 struct exynos_dp_device *dp = to_dp(plat_data); 72 struct exynos_dp_device *dp = to_dp(plat_data);
73 struct drm_connector *connector = &dp->connector; 73 struct drm_connector *connector = dp->connector;
74 struct drm_display_mode *mode; 74 struct drm_display_mode *mode;
75 int num_modes = 0; 75 int num_modes = 0;
76 76
@@ -103,6 +103,7 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
103 int ret; 103 int ret;
104 104
105 drm_connector_register(connector); 105 drm_connector_register(connector);
106 dp->connector = connector;
106 107
107 /* Pre-empt DP connector creation if there's a bridge */ 108 /* Pre-empt DP connector creation if there's a bridge */
108 if (dp->ptn_bridge) { 109 if (dp->ptn_bridge) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 011211e4167d..edbd98ff293e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -15,7 +15,6 @@
15#include <drm/drmP.h> 15#include <drm/drmP.h>
16#include "exynos_drm_drv.h" 16#include "exynos_drm_drv.h"
17#include "exynos_drm_crtc.h" 17#include "exynos_drm_crtc.h"
18#include "exynos_drm_fbdev.h"
19 18
20static LIST_HEAD(exynos_drm_subdrv_list); 19static LIST_HEAD(exynos_drm_subdrv_list);
21 20
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 3efe1aa89416..d47216488985 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -30,7 +30,6 @@
30 30
31#include "exynos_drm_drv.h" 31#include "exynos_drm_drv.h"
32#include "exynos_drm_fb.h" 32#include "exynos_drm_fb.h"
33#include "exynos_drm_fbdev.h"
34#include "exynos_drm_crtc.h" 33#include "exynos_drm_crtc.h"
35#include "exynos_drm_plane.h" 34#include "exynos_drm_plane.h"
36#include "exynos_drm_iommu.h" 35#include "exynos_drm_iommu.h"
@@ -120,7 +119,6 @@ static struct fimd_driver_data s3c64xx_fimd_driver_data = {
120 .timing_base = 0x0, 119 .timing_base = 0x0,
121 .has_clksel = 1, 120 .has_clksel = 1,
122 .has_limited_fmt = 1, 121 .has_limited_fmt = 1,
123 .has_hw_trigger = 1,
124}; 122};
125 123
126static struct fimd_driver_data exynos3_fimd_driver_data = { 124static struct fimd_driver_data exynos3_fimd_driver_data = {
@@ -171,14 +169,11 @@ static struct fimd_driver_data exynos5420_fimd_driver_data = {
171 .lcdblk_vt_shift = 24, 169 .lcdblk_vt_shift = 24,
172 .lcdblk_bypass_shift = 15, 170 .lcdblk_bypass_shift = 15,
173 .lcdblk_mic_bypass_shift = 11, 171 .lcdblk_mic_bypass_shift = 11,
174 .trg_type = I80_HW_TRG,
175 .has_shadowcon = 1, 172 .has_shadowcon = 1,
176 .has_vidoutcon = 1, 173 .has_vidoutcon = 1,
177 .has_vtsel = 1, 174 .has_vtsel = 1,
178 .has_mic_bypass = 1, 175 .has_mic_bypass = 1,
179 .has_dp_clk = 1, 176 .has_dp_clk = 1,
180 .has_hw_trigger = 1,
181 .has_trigger_per_te = 1,
182}; 177};
183 178
184struct fimd_context { 179struct fimd_context {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 493552368295..8564c3da0d22 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -48,13 +48,13 @@
48 48
49/* registers for base address */ 49/* registers for base address */
50#define G2D_SRC_BASE_ADDR 0x0304 50#define G2D_SRC_BASE_ADDR 0x0304
51#define G2D_SRC_STRIDE_REG 0x0308 51#define G2D_SRC_STRIDE 0x0308
52#define G2D_SRC_COLOR_MODE 0x030C 52#define G2D_SRC_COLOR_MODE 0x030C
53#define G2D_SRC_LEFT_TOP 0x0310 53#define G2D_SRC_LEFT_TOP 0x0310
54#define G2D_SRC_RIGHT_BOTTOM 0x0314 54#define G2D_SRC_RIGHT_BOTTOM 0x0314
55#define G2D_SRC_PLANE2_BASE_ADDR 0x0318 55#define G2D_SRC_PLANE2_BASE_ADDR 0x0318
56#define G2D_DST_BASE_ADDR 0x0404 56#define G2D_DST_BASE_ADDR 0x0404
57#define G2D_DST_STRIDE_REG 0x0408 57#define G2D_DST_STRIDE 0x0408
58#define G2D_DST_COLOR_MODE 0x040C 58#define G2D_DST_COLOR_MODE 0x040C
59#define G2D_DST_LEFT_TOP 0x0410 59#define G2D_DST_LEFT_TOP 0x0410
60#define G2D_DST_RIGHT_BOTTOM 0x0414 60#define G2D_DST_RIGHT_BOTTOM 0x0414
@@ -563,7 +563,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
563 563
564 switch (reg_offset) { 564 switch (reg_offset) {
565 case G2D_SRC_BASE_ADDR: 565 case G2D_SRC_BASE_ADDR:
566 case G2D_SRC_STRIDE_REG: 566 case G2D_SRC_STRIDE:
567 case G2D_SRC_COLOR_MODE: 567 case G2D_SRC_COLOR_MODE:
568 case G2D_SRC_LEFT_TOP: 568 case G2D_SRC_LEFT_TOP:
569 case G2D_SRC_RIGHT_BOTTOM: 569 case G2D_SRC_RIGHT_BOTTOM:
@@ -573,7 +573,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
573 reg_type = REG_TYPE_SRC_PLANE2; 573 reg_type = REG_TYPE_SRC_PLANE2;
574 break; 574 break;
575 case G2D_DST_BASE_ADDR: 575 case G2D_DST_BASE_ADDR:
576 case G2D_DST_STRIDE_REG: 576 case G2D_DST_STRIDE:
577 case G2D_DST_COLOR_MODE: 577 case G2D_DST_COLOR_MODE:
578 case G2D_DST_LEFT_TOP: 578 case G2D_DST_LEFT_TOP:
579 case G2D_DST_RIGHT_BOTTOM: 579 case G2D_DST_RIGHT_BOTTOM:
@@ -968,8 +968,8 @@ static int g2d_check_reg_offset(struct device *dev,
968 } else 968 } else
969 buf_info->types[reg_type] = BUF_TYPE_GEM; 969 buf_info->types[reg_type] = BUF_TYPE_GEM;
970 break; 970 break;
971 case G2D_SRC_STRIDE_REG: 971 case G2D_SRC_STRIDE:
972 case G2D_DST_STRIDE_REG: 972 case G2D_DST_STRIDE:
973 if (for_addr) 973 if (for_addr)
974 goto err; 974 goto err;
975 975
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 55f1d37c666a..77f12c00abf9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -242,7 +242,7 @@ exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config,
242 state->v_ratio == (1 << 15)) 242 state->v_ratio == (1 << 15))
243 height_ok = true; 243 height_ok = true;
244 244
245 if (width_ok & height_ok) 245 if (width_ok && height_ok)
246 return 0; 246 return 0;
247 247
248 DRM_DEBUG_KMS("scaling mode is not supported"); 248 DRM_DEBUG_KMS("scaling mode is not supported");
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 32690332d441..103546834b60 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2365,16 +2365,16 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
2365 task = get_pid_task(file->pid, PIDTYPE_PID); 2365 task = get_pid_task(file->pid, PIDTYPE_PID);
2366 if (!task) { 2366 if (!task) {
2367 ret = -ESRCH; 2367 ret = -ESRCH;
2368 goto out_put; 2368 goto out_unlock;
2369 } 2369 }
2370 seq_printf(m, "\nproc: %s\n", task->comm); 2370 seq_printf(m, "\nproc: %s\n", task->comm);
2371 put_task_struct(task); 2371 put_task_struct(task);
2372 idr_for_each(&file_priv->context_idr, per_file_ctx, 2372 idr_for_each(&file_priv->context_idr, per_file_ctx,
2373 (void *)(unsigned long)m); 2373 (void *)(unsigned long)m);
2374 } 2374 }
2375out_unlock:
2375 mutex_unlock(&dev->filelist_mutex); 2376 mutex_unlock(&dev->filelist_mutex);
2376 2377
2377out_put:
2378 intel_runtime_pm_put(dev_priv); 2378 intel_runtime_pm_put(dev_priv);
2379 mutex_unlock(&dev->struct_mutex); 2379 mutex_unlock(&dev->struct_mutex);
2380 2380
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5faacc6e548d..7c334e902266 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3481,6 +3481,7 @@ int intel_bios_init(struct drm_i915_private *dev_priv);
3481bool intel_bios_is_valid_vbt(const void *buf, size_t size); 3481bool intel_bios_is_valid_vbt(const void *buf, size_t size);
3482bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); 3482bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
3483bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); 3483bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
3484bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
3484bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); 3485bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
3485bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); 3486bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
3486bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); 3487bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b235b6e88ead..b9022fa053d6 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -139,6 +139,11 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
139 else 139 else
140 panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; 140 panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
141 141
142 panel_fixed_mode->width_mm = (dvo_timing->himage_hi << 8) |
143 dvo_timing->himage_lo;
144 panel_fixed_mode->height_mm = (dvo_timing->vimage_hi << 8) |
145 dvo_timing->vimage_lo;
146
142 /* Some VBTs have bogus h/vtotal values */ 147 /* Some VBTs have bogus h/vtotal values */
143 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) 148 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
144 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; 149 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
@@ -1187,7 +1192,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
1187 } 1192 }
1188 if (bdb->version < 106) { 1193 if (bdb->version < 106) {
1189 expected_size = 22; 1194 expected_size = 22;
1190 } else if (bdb->version < 109) { 1195 } else if (bdb->version < 111) {
1191 expected_size = 27; 1196 expected_size = 27;
1192 } else if (bdb->version < 195) { 1197 } else if (bdb->version < 195) {
1193 BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33); 1198 BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33);
@@ -1546,6 +1551,45 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
1546} 1551}
1547 1552
1548/** 1553/**
1554 * intel_bios_is_port_present - is the specified digital port present
1555 * @dev_priv: i915 device instance
1556 * @port: port to check
1557 *
1558 * Return true if the device in %port is present.
1559 */
1560bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
1561{
1562 static const struct {
1563 u16 dp, hdmi;
1564 } port_mapping[] = {
1565 [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
1566 [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
1567 [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
1568 [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
1569 };
1570 int i;
1571
1572 /* FIXME maybe deal with port A as well? */
1573 if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
1574 return false;
1575
1576 if (!dev_priv->vbt.child_dev_num)
1577 return false;
1578
1579 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
1580 const union child_device_config *p_child =
1581 &dev_priv->vbt.child_dev[i];
1582 if ((p_child->common.dvo_port == port_mapping[port].dp ||
1583 p_child->common.dvo_port == port_mapping[port].hdmi) &&
1584 (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
1585 DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
1586 return true;
1587 }
1588
1589 return false;
1590}
1591
1592/**
1549 * intel_bios_is_port_edp - is the device in given port eDP 1593 * intel_bios_is_port_edp - is the device in given port eDP
1550 * @dev_priv: i915 device instance 1594 * @dev_priv: i915 device instance
1551 * @port: port to check 1595 * @port: port to check
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2113f401f0ba..04452cf3eae8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8275,12 +8275,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
8275{ 8275{
8276 struct drm_i915_private *dev_priv = dev->dev_private; 8276 struct drm_i915_private *dev_priv = dev->dev_private;
8277 struct intel_encoder *encoder; 8277 struct intel_encoder *encoder;
8278 int i;
8278 u32 val, final; 8279 u32 val, final;
8279 bool has_lvds = false; 8280 bool has_lvds = false;
8280 bool has_cpu_edp = false; 8281 bool has_cpu_edp = false;
8281 bool has_panel = false; 8282 bool has_panel = false;
8282 bool has_ck505 = false; 8283 bool has_ck505 = false;
8283 bool can_ssc = false; 8284 bool can_ssc = false;
8285 bool using_ssc_source = false;
8284 8286
8285 /* We need to take the global config into account */ 8287 /* We need to take the global config into account */
8286 for_each_intel_encoder(dev, encoder) { 8288 for_each_intel_encoder(dev, encoder) {
@@ -8307,8 +8309,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
8307 can_ssc = true; 8309 can_ssc = true;
8308 } 8310 }
8309 8311
8310 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n", 8312 /* Check if any DPLLs are using the SSC source */
8311 has_panel, has_lvds, has_ck505); 8313 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8314 u32 temp = I915_READ(PCH_DPLL(i));
8315
8316 if (!(temp & DPLL_VCO_ENABLE))
8317 continue;
8318
8319 if ((temp & PLL_REF_INPUT_MASK) ==
8320 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8321 using_ssc_source = true;
8322 break;
8323 }
8324 }
8325
8326 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8327 has_panel, has_lvds, has_ck505, using_ssc_source);
8312 8328
8313 /* Ironlake: try to setup display ref clock before DPLL 8329 /* Ironlake: try to setup display ref clock before DPLL
8314 * enabling. This is only under driver's control after 8330 * enabling. This is only under driver's control after
@@ -8345,9 +8361,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
8345 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 8361 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8346 } else 8362 } else
8347 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8363 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8348 } else { 8364 } else if (using_ssc_source) {
8349 final |= DREF_SSC_SOURCE_DISABLE; 8365 final |= DREF_SSC_SOURCE_ENABLE;
8350 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8366 final |= DREF_SSC1_ENABLE;
8351 } 8367 }
8352 8368
8353 if (final == val) 8369 if (final == val)
@@ -8393,7 +8409,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
8393 POSTING_READ(PCH_DREF_CONTROL); 8409 POSTING_READ(PCH_DREF_CONTROL);
8394 udelay(200); 8410 udelay(200);
8395 } else { 8411 } else {
8396 DRM_DEBUG_KMS("Disabling SSC entirely\n"); 8412 DRM_DEBUG_KMS("Disabling CPU source output\n");
8397 8413
8398 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8414 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8399 8415
@@ -8404,16 +8420,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
8404 POSTING_READ(PCH_DREF_CONTROL); 8420 POSTING_READ(PCH_DREF_CONTROL);
8405 udelay(200); 8421 udelay(200);
8406 8422
8407 /* Turn off the SSC source */ 8423 if (!using_ssc_source) {
8408 val &= ~DREF_SSC_SOURCE_MASK; 8424 DRM_DEBUG_KMS("Disabling SSC source\n");
8409 val |= DREF_SSC_SOURCE_DISABLE;
8410 8425
8411 /* Turn off SSC1 */ 8426 /* Turn off the SSC source */
8412 val &= ~DREF_SSC1_ENABLE; 8427 val &= ~DREF_SSC_SOURCE_MASK;
8428 val |= DREF_SSC_SOURCE_DISABLE;
8413 8429
8414 I915_WRITE(PCH_DREF_CONTROL, val); 8430 /* Turn off SSC1 */
8415 POSTING_READ(PCH_DREF_CONTROL); 8431 val &= ~DREF_SSC1_ENABLE;
8416 udelay(200); 8432
8433 I915_WRITE(PCH_DREF_CONTROL, val);
8434 POSTING_READ(PCH_DREF_CONTROL);
8435 udelay(200);
8436 }
8417 } 8437 }
8418 8438
8419 BUG_ON(val != final); 8439 BUG_ON(val != final);
@@ -8427,16 +8447,16 @@ static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8427 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 8447 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8428 I915_WRITE(SOUTH_CHICKEN2, tmp); 8448 I915_WRITE(SOUTH_CHICKEN2, tmp);
8429 8449
8430 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & 8450 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8431 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 8451 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8432 DRM_ERROR("FDI mPHY reset assert timeout\n"); 8452 DRM_ERROR("FDI mPHY reset assert timeout\n");
8433 8453
8434 tmp = I915_READ(SOUTH_CHICKEN2); 8454 tmp = I915_READ(SOUTH_CHICKEN2);
8435 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 8455 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8436 I915_WRITE(SOUTH_CHICKEN2, tmp); 8456 I915_WRITE(SOUTH_CHICKEN2, tmp);
8437 8457
8438 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & 8458 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8439 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 8459 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8440 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 8460 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8441} 8461}
8442 8462
@@ -9420,8 +9440,8 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9420 val |= LCPLL_CD_SOURCE_FCLK; 9440 val |= LCPLL_CD_SOURCE_FCLK;
9421 I915_WRITE(LCPLL_CTL, val); 9441 I915_WRITE(LCPLL_CTL, val);
9422 9442
9423 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) & 9443 if (wait_for_us(I915_READ(LCPLL_CTL) &
9424 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 9444 LCPLL_CD_SOURCE_FCLK_DONE, 1))
9425 DRM_ERROR("Switching to FCLK failed\n"); 9445 DRM_ERROR("Switching to FCLK failed\n");
9426 9446
9427 val = I915_READ(LCPLL_CTL); 9447 val = I915_READ(LCPLL_CTL);
@@ -9494,8 +9514,8 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9494 val &= ~LCPLL_CD_SOURCE_FCLK; 9514 val &= ~LCPLL_CD_SOURCE_FCLK;
9495 I915_WRITE(LCPLL_CTL, val); 9515 I915_WRITE(LCPLL_CTL, val);
9496 9516
9497 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) & 9517 if (wait_for_us((I915_READ(LCPLL_CTL) &
9498 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 9518 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9499 DRM_ERROR("Switching back to LCPLL failed\n"); 9519 DRM_ERROR("Switching back to LCPLL failed\n");
9500 } 9520 }
9501 9521
@@ -14554,6 +14574,8 @@ static void intel_setup_outputs(struct drm_device *dev)
14554 if (I915_READ(PCH_DP_D) & DP_DETECTED) 14574 if (I915_READ(PCH_DP_D) & DP_DETECTED)
14555 intel_dp_init(dev, PCH_DP_D, PORT_D); 14575 intel_dp_init(dev, PCH_DP_D, PORT_D);
14556 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 14576 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
14577 bool has_edp, has_port;
14578
14557 /* 14579 /*
14558 * The DP_DETECTED bit is the latched state of the DDC 14580 * The DP_DETECTED bit is the latched state of the DDC
14559 * SDA pin at boot. However since eDP doesn't require DDC 14581 * SDA pin at boot. However since eDP doesn't require DDC
@@ -14562,27 +14584,37 @@ static void intel_setup_outputs(struct drm_device *dev)
14562 * Thus we can't rely on the DP_DETECTED bit alone to detect 14584 * Thus we can't rely on the DP_DETECTED bit alone to detect
14563 * eDP ports. Consult the VBT as well as DP_DETECTED to 14585 * eDP ports. Consult the VBT as well as DP_DETECTED to
14564 * detect eDP ports. 14586 * detect eDP ports.
14587 *
14588 * Sadly the straps seem to be missing sometimes even for HDMI
14589 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14590 * and VBT for the presence of the port. Additionally we can't
14591 * trust the port type the VBT declares as we've seen at least
14592 * HDMI ports that the VBT claim are DP or eDP.
14565 */ 14593 */
14566 if (I915_READ(VLV_HDMIB) & SDVO_DETECTED && 14594 has_edp = intel_dp_is_edp(dev, PORT_B);
14567 !intel_dp_is_edp(dev, PORT_B)) 14595 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14596 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
14597 has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
14598 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
14568 intel_hdmi_init(dev, VLV_HDMIB, PORT_B); 14599 intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
14569 if (I915_READ(VLV_DP_B) & DP_DETECTED ||
14570 intel_dp_is_edp(dev, PORT_B))
14571 intel_dp_init(dev, VLV_DP_B, PORT_B);
14572 14600
14573 if (I915_READ(VLV_HDMIC) & SDVO_DETECTED && 14601 has_edp = intel_dp_is_edp(dev, PORT_C);
14574 !intel_dp_is_edp(dev, PORT_C)) 14602 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14603 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
14604 has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
14605 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
14575 intel_hdmi_init(dev, VLV_HDMIC, PORT_C); 14606 intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
14576 if (I915_READ(VLV_DP_C) & DP_DETECTED ||
14577 intel_dp_is_edp(dev, PORT_C))
14578 intel_dp_init(dev, VLV_DP_C, PORT_C);
14579 14607
14580 if (IS_CHERRYVIEW(dev)) { 14608 if (IS_CHERRYVIEW(dev)) {
14581 /* eDP not supported on port D, so don't check VBT */ 14609 /*
14582 if (I915_READ(CHV_HDMID) & SDVO_DETECTED) 14610 * eDP not supported on port D,
14583 intel_hdmi_init(dev, CHV_HDMID, PORT_D); 14611 * so no need to worry about it
14584 if (I915_READ(CHV_DP_D) & DP_DETECTED) 14612 */
14613 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14614 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
14585 intel_dp_init(dev, CHV_DP_D, PORT_D); 14615 intel_dp_init(dev, CHV_DP_D, PORT_D);
14616 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
14617 intel_hdmi_init(dev, CHV_HDMID, PORT_D);
14586 } 14618 }
14587 14619
14588 intel_dsi_init(dev); 14620 intel_dsi_init(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f192f58708c2..40745e38d438 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -663,7 +663,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
663 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 663 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
664 msecs_to_jiffies_timeout(10)); 664 msecs_to_jiffies_timeout(10));
665 else 665 else
666 done = wait_for_atomic(C, 10) == 0; 666 done = wait_for(C, 10) == 0;
667 if (!done) 667 if (!done)
668 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n", 668 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
669 has_aux_irq); 669 has_aux_irq);
@@ -4899,13 +4899,15 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4899 4899
4900void intel_dp_encoder_reset(struct drm_encoder *encoder) 4900void intel_dp_encoder_reset(struct drm_encoder *encoder)
4901{ 4901{
4902 struct intel_dp *intel_dp; 4902 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
4903 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4904
4905 if (!HAS_DDI(dev_priv))
4906 intel_dp->DP = I915_READ(intel_dp->output_reg);
4903 4907
4904 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP) 4908 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4905 return; 4909 return;
4906 4910
4907 intel_dp = enc_to_intel_dp(encoder);
4908
4909 pps_lock(intel_dp); 4911 pps_lock(intel_dp);
4910 4912
4911 /* 4913 /*
@@ -4977,9 +4979,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4977 intel_display_power_get(dev_priv, power_domain); 4979 intel_display_power_get(dev_priv, power_domain);
4978 4980
4979 if (long_hpd) { 4981 if (long_hpd) {
4980 /* indicate that we need to restart link training */
4981 intel_dp->train_set_valid = false;
4982
4983 intel_dp_long_pulse(intel_dp->attached_connector); 4982 intel_dp_long_pulse(intel_dp->attached_connector);
4984 if (intel_dp->is_mst) 4983 if (intel_dp->is_mst)
4985 ret = IRQ_HANDLED; 4984 ret = IRQ_HANDLED;
@@ -5725,8 +5724,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5725 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) { 5724 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5726 fixed_mode = drm_mode_duplicate(dev, 5725 fixed_mode = drm_mode_duplicate(dev,
5727 dev_priv->vbt.lfp_lvds_vbt_mode); 5726 dev_priv->vbt.lfp_lvds_vbt_mode);
5728 if (fixed_mode) 5727 if (fixed_mode) {
5729 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 5728 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5729 connector->display_info.width_mm = fixed_mode->width_mm;
5730 connector->display_info.height_mm = fixed_mode->height_mm;
5731 }
5730 } 5732 }
5731 mutex_unlock(&dev->mode_config.mutex); 5733 mutex_unlock(&dev->mode_config.mutex);
5732 5734
@@ -5923,9 +5925,9 @@ fail:
5923 return false; 5925 return false;
5924} 5926}
5925 5927
5926void 5928bool intel_dp_init(struct drm_device *dev,
5927intel_dp_init(struct drm_device *dev, 5929 i915_reg_t output_reg,
5928 i915_reg_t output_reg, enum port port) 5930 enum port port)
5929{ 5931{
5930 struct drm_i915_private *dev_priv = dev->dev_private; 5932 struct drm_i915_private *dev_priv = dev->dev_private;
5931 struct intel_digital_port *intel_dig_port; 5933 struct intel_digital_port *intel_dig_port;
@@ -5935,7 +5937,7 @@ intel_dp_init(struct drm_device *dev,
5935 5937
5936 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); 5938 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5937 if (!intel_dig_port) 5939 if (!intel_dig_port)
5938 return; 5940 return false;
5939 5941
5940 intel_connector = intel_connector_alloc(); 5942 intel_connector = intel_connector_alloc();
5941 if (!intel_connector) 5943 if (!intel_connector)
@@ -5992,7 +5994,7 @@ intel_dp_init(struct drm_device *dev,
5992 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) 5994 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
5993 goto err_init_connector; 5995 goto err_init_connector;
5994 5996
5995 return; 5997 return true;
5996 5998
5997err_init_connector: 5999err_init_connector:
5998 drm_encoder_cleanup(encoder); 6000 drm_encoder_cleanup(encoder);
@@ -6000,8 +6002,7 @@ err_encoder_init:
6000 kfree(intel_connector); 6002 kfree(intel_connector);
6001err_connector_alloc: 6003err_connector_alloc:
6002 kfree(intel_dig_port); 6004 kfree(intel_dig_port);
6003 6005 return false;
6004 return;
6005} 6006}
6006 6007
6007void intel_dp_mst_suspend(struct drm_device *dev) 6008void intel_dp_mst_suspend(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index 0b8eefc2acc5..60fb39cd220b 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -85,8 +85,7 @@ static bool
85intel_dp_reset_link_train(struct intel_dp *intel_dp, 85intel_dp_reset_link_train(struct intel_dp *intel_dp,
86 uint8_t dp_train_pat) 86 uint8_t dp_train_pat)
87{ 87{
88 if (!intel_dp->train_set_valid) 88 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
89 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
90 intel_dp_set_signal_levels(intel_dp); 89 intel_dp_set_signal_levels(intel_dp);
91 return intel_dp_set_link_train(intel_dp, dp_train_pat); 90 return intel_dp_set_link_train(intel_dp, dp_train_pat);
92} 91}
@@ -161,23 +160,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
161 break; 160 break;
162 } 161 }
163 162
164 /*
165 * if we used previously trained voltage and pre-emphasis values
166 * and we don't get clock recovery, reset link training values
167 */
168 if (intel_dp->train_set_valid) {
169 DRM_DEBUG_KMS("clock recovery not ok, reset");
170 /* clear the flag as we are not reusing train set */
171 intel_dp->train_set_valid = false;
172 if (!intel_dp_reset_link_train(intel_dp,
173 DP_TRAINING_PATTERN_1 |
174 DP_LINK_SCRAMBLING_DISABLE)) {
175 DRM_ERROR("failed to enable link training\n");
176 return;
177 }
178 continue;
179 }
180
181 /* Check to see if we've tried the max voltage */ 163 /* Check to see if we've tried the max voltage */
182 for (i = 0; i < intel_dp->lane_count; i++) 164 for (i = 0; i < intel_dp->lane_count; i++)
183 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 165 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
@@ -284,7 +266,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
284 /* Make sure clock is still ok */ 266 /* Make sure clock is still ok */
285 if (!drm_dp_clock_recovery_ok(link_status, 267 if (!drm_dp_clock_recovery_ok(link_status,
286 intel_dp->lane_count)) { 268 intel_dp->lane_count)) {
287 intel_dp->train_set_valid = false;
288 intel_dp_link_training_clock_recovery(intel_dp); 269 intel_dp_link_training_clock_recovery(intel_dp);
289 intel_dp_set_link_train(intel_dp, 270 intel_dp_set_link_train(intel_dp,
290 training_pattern | 271 training_pattern |
@@ -301,7 +282,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
301 282
302 /* Try 5 times, then try clock recovery if that fails */ 283 /* Try 5 times, then try clock recovery if that fails */
303 if (tries > 5) { 284 if (tries > 5) {
304 intel_dp->train_set_valid = false;
305 intel_dp_link_training_clock_recovery(intel_dp); 285 intel_dp_link_training_clock_recovery(intel_dp);
306 intel_dp_set_link_train(intel_dp, 286 intel_dp_set_link_train(intel_dp,
307 training_pattern | 287 training_pattern |
@@ -322,10 +302,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
322 302
323 intel_dp_set_idle_link_train(intel_dp); 303 intel_dp_set_idle_link_train(intel_dp);
324 304
325 if (channel_eq) { 305 if (channel_eq)
326 intel_dp->train_set_valid = true;
327 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); 306 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
328 }
329} 307}
330 308
331void intel_dp_stop_link_train(struct intel_dp *intel_dp) 309void intel_dp_stop_link_train(struct intel_dp *intel_dp)
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 3ac705936b04..58f60b27837e 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -366,6 +366,9 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
366 DPLL_ID_PCH_PLL_B); 366 DPLL_ID_PCH_PLL_B);
367 } 367 }
368 368
369 if (!pll)
370 return NULL;
371
369 /* reference the pll */ 372 /* reference the pll */
370 intel_reference_shared_dpll(pll, crtc_state); 373 intel_reference_shared_dpll(pll, crtc_state);
371 374
@@ -1374,8 +1377,8 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1374 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); 1377 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1375 POSTING_READ(BXT_PORT_PLL_ENABLE(port)); 1378 POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1376 1379
1377 if (wait_for_atomic_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & 1380 if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1378 PORT_PLL_LOCK), 200)) 1381 200))
1379 DRM_ERROR("PLL %d not locked\n", port); 1382 DRM_ERROR("PLL %d not locked\n", port);
1380 1383
1381 /* 1384 /*
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a28b4aac1e02..f7f0f01814f6 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -863,8 +863,6 @@ struct intel_dp {
863 /* This is called before a link training is starterd */ 863 /* This is called before a link training is starterd */
864 void (*prepare_link_retrain)(struct intel_dp *intel_dp); 864 void (*prepare_link_retrain)(struct intel_dp *intel_dp);
865 865
866 bool train_set_valid;
867
868 /* Displayport compliance testing */ 866 /* Displayport compliance testing */
869 unsigned long compliance_test_type; 867 unsigned long compliance_test_type;
870 unsigned long compliance_test_data; 868 unsigned long compliance_test_data;
@@ -1284,7 +1282,7 @@ void intel_csr_ucode_suspend(struct drm_i915_private *);
1284void intel_csr_ucode_resume(struct drm_i915_private *); 1282void intel_csr_ucode_resume(struct drm_i915_private *);
1285 1283
1286/* intel_dp.c */ 1284/* intel_dp.c */
1287void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port); 1285bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
1288bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 1286bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
1289 struct intel_connector *intel_connector); 1287 struct intel_connector *intel_connector);
1290void intel_dp_set_link_params(struct intel_dp *intel_dp, 1288void intel_dp_set_link_params(struct intel_dp *intel_dp,
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 366ad6c67ce4..4756ef639648 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -1545,6 +1545,9 @@ void intel_dsi_init(struct drm_device *dev)
1545 goto err; 1545 goto err;
1546 } 1546 }
1547 1547
1548 connector->display_info.width_mm = fixed_mode->width_mm;
1549 connector->display_info.height_mm = fixed_mode->height_mm;
1550
1548 intel_panel_init(&intel_connector->panel, fixed_mode, NULL); 1551 intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
1549 1552
1550 intel_dsi_add_properties(intel_connector); 1553 intel_dsi_add_properties(intel_connector);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index d5a7cfec589b..647127f3aaff 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -824,8 +824,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
824{ 824{
825 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 825 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
826 struct intel_fbc *fbc = &dev_priv->fbc; 826 struct intel_fbc *fbc = &dev_priv->fbc;
827 bool enable_by_default = IS_HASWELL(dev_priv) || 827 bool enable_by_default = IS_BROADWELL(dev_priv);
828 IS_BROADWELL(dev_priv);
829 828
830 if (intel_vgpu_active(dev_priv->dev)) { 829 if (intel_vgpu_active(dev_priv->dev)) {
831 fbc->no_fbc_reason = "VGPU is active"; 830 fbc->no_fbc_reason = "VGPU is active";
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2c3bd9c2573e..a8844702d11b 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -2142,6 +2142,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
2142 enum port port = intel_dig_port->port; 2142 enum port port = intel_dig_port->port;
2143 uint8_t alternate_ddc_pin; 2143 uint8_t alternate_ddc_pin;
2144 2144
2145 DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
2146 port_name(port));
2147
2145 if (WARN(intel_dig_port->max_lanes < 4, 2148 if (WARN(intel_dig_port->max_lanes < 4,
2146 "Not enough lanes (%d) for HDMI on port %c\n", 2149 "Not enough lanes (%d) for HDMI on port %c\n",
2147 intel_dig_port->max_lanes, port_name(port))) 2150 intel_dig_port->max_lanes, port_name(port)))
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index bc53c0dd34d0..96281e628d2a 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -1082,6 +1082,8 @@ void intel_lvds_init(struct drm_device *dev)
1082 fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode); 1082 fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
1083 if (fixed_mode) { 1083 if (fixed_mode) {
1084 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 1084 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
1085 connector->display_info.width_mm = fixed_mode->width_mm;
1086 connector->display_info.height_mm = fixed_mode->height_mm;
1085 goto out; 1087 goto out;
1086 } 1088 }
1087 } 1089 }
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index c15051de8023..44fb0b35eed3 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -403,9 +403,10 @@ struct lvds_dvo_timing {
403 u8 vsync_off:4; 403 u8 vsync_off:4;
404 u8 rsvd0:6; 404 u8 rsvd0:6;
405 u8 hsync_off_hi:2; 405 u8 hsync_off_hi:2;
406 u8 h_image; 406 u8 himage_lo;
407 u8 v_image; 407 u8 vimage_lo;
408 u8 max_hv; 408 u8 vimage_hi:4;
409 u8 himage_hi:4;
409 u8 h_border; 410 u8 h_border;
410 u8 v_border; 411 u8 v_border;
411 u8 rsvd1:3; 412 u8 rsvd1:3;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 300ea03be8f0..d1f248fd3506 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -552,7 +552,8 @@ nouveau_fbcon_init(struct drm_device *dev)
552 if (ret) 552 if (ret)
553 goto fini; 553 goto fini;
554 554
555 fbcon->helper.fbdev->pixmap.buf_align = 4; 555 if (fbcon->helper.fbdev)
556 fbcon->helper.fbdev->pixmap.buf_align = 4;
556 return 0; 557 return 0;
557 558
558fini: 559fini:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index 18fab3973ce5..62ad0300cfa5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1614,7 +1614,7 @@ nvkm_device_pci_func = {
1614 .fini = nvkm_device_pci_fini, 1614 .fini = nvkm_device_pci_fini,
1615 .resource_addr = nvkm_device_pci_resource_addr, 1615 .resource_addr = nvkm_device_pci_resource_addr,
1616 .resource_size = nvkm_device_pci_resource_size, 1616 .resource_size = nvkm_device_pci_resource_size,
1617 .cpu_coherent = !IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_ARM64), 1617 .cpu_coherent = !IS_ENABLED(CONFIG_ARM),
1618}; 1618};
1619 1619
1620int 1620int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
index 323c79abe468..41bd5d0f7692 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
@@ -276,6 +276,8 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
276 struct pwr_rail_t *r = &stbl.rail[i]; 276 struct pwr_rail_t *r = &stbl.rail[i];
277 struct nvkm_iccsense_rail *rail; 277 struct nvkm_iccsense_rail *rail;
278 struct nvkm_iccsense_sensor *sensor; 278 struct nvkm_iccsense_sensor *sensor;
279 int (*read)(struct nvkm_iccsense *,
280 struct nvkm_iccsense_rail *);
279 281
280 if (!r->mode || r->resistor_mohm == 0) 282 if (!r->mode || r->resistor_mohm == 0)
281 continue; 283 continue;
@@ -284,31 +286,31 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
284 if (!sensor) 286 if (!sensor)
285 continue; 287 continue;
286 288
287 rail = kmalloc(sizeof(*rail), GFP_KERNEL);
288 if (!rail)
289 return -ENOMEM;
290
291 switch (sensor->type) { 289 switch (sensor->type) {
292 case NVBIOS_EXTDEV_INA209: 290 case NVBIOS_EXTDEV_INA209:
293 if (r->rail != 0) 291 if (r->rail != 0)
294 continue; 292 continue;
295 rail->read = nvkm_iccsense_ina209_read; 293 read = nvkm_iccsense_ina209_read;
296 break; 294 break;
297 case NVBIOS_EXTDEV_INA219: 295 case NVBIOS_EXTDEV_INA219:
298 if (r->rail != 0) 296 if (r->rail != 0)
299 continue; 297 continue;
300 rail->read = nvkm_iccsense_ina219_read; 298 read = nvkm_iccsense_ina219_read;
301 break; 299 break;
302 case NVBIOS_EXTDEV_INA3221: 300 case NVBIOS_EXTDEV_INA3221:
303 if (r->rail >= 3) 301 if (r->rail >= 3)
304 continue; 302 continue;
305 rail->read = nvkm_iccsense_ina3221_read; 303 read = nvkm_iccsense_ina3221_read;
306 break; 304 break;
307 default: 305 default:
308 continue; 306 continue;
309 } 307 }
310 308
309 rail = kmalloc(sizeof(*rail), GFP_KERNEL);
310 if (!rail)
311 return -ENOMEM;
311 sensor->rail_mask |= 1 << r->rail; 312 sensor->rail_mask |= 1 << r->rail;
313 rail->read = read;
312 rail->sensor = sensor; 314 rail->sensor = sensor;
313 rail->idx = r->rail; 315 rail->idx = r->rail;
314 rail->mohm = r->resistor_mohm; 316 rail->mohm = r->resistor_mohm;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 2e216e2ea78c..259cd6e6d71c 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -589,7 +589,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
589 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev)) 589 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev))
590 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 590 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
591 /* use frac fb div on RS780/RS880 */ 591 /* use frac fb div on RS780/RS880 */
592 if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) 592 if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
593 && !radeon_crtc->ss_enabled)
593 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 594 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
594 if (ASIC_IS_DCE32(rdev) && mode->clock > 165000) 595 if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
595 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 596 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
@@ -626,7 +627,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
626 if (radeon_crtc->ss.refdiv) { 627 if (radeon_crtc->ss.refdiv) {
627 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; 628 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
628 radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv; 629 radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
629 if (ASIC_IS_AVIVO(rdev)) 630 if (rdev->family >= CHIP_RV770)
630 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 631 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
631 } 632 }
632 } 633 }
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index e721e6b2766e..21c44b2293bc 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -630,6 +630,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
630/* 630/*
631 * GPU helpers function. 631 * GPU helpers function.
632 */ 632 */
633
634/**
635 * radeon_device_is_virtual - check if we are running is a virtual environment
636 *
637 * Check if the asic has been passed through to a VM (all asics).
638 * Used at driver startup.
639 * Returns true if virtual or false if not.
640 */
641static bool radeon_device_is_virtual(void)
642{
643#ifdef CONFIG_X86
644 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
645#else
646 return false;
647#endif
648}
649
633/** 650/**
634 * radeon_card_posted - check if the hw has already been initialized 651 * radeon_card_posted - check if the hw has already been initialized
635 * 652 *
@@ -643,6 +660,10 @@ bool radeon_card_posted(struct radeon_device *rdev)
643{ 660{
644 uint32_t reg; 661 uint32_t reg;
645 662
663 /* for pass through, always force asic_init */
664 if (radeon_device_is_virtual())
665 return false;
666
646 /* required for EFI mode on macbook2,1 which uses an r5xx asic */ 667 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
647 if (efi_enabled(EFI_BOOT) && 668 if (efi_enabled(EFI_BOOT) &&
648 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && 669 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
@@ -1631,7 +1652,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1631 radeon_agp_suspend(rdev); 1652 radeon_agp_suspend(rdev);
1632 1653
1633 pci_save_state(dev->pdev); 1654 pci_save_state(dev->pdev);
1634 if (freeze && rdev->family >= CHIP_R600) { 1655 if (freeze && rdev->family >= CHIP_CEDAR) {
1635 rdev->asic->asic_reset(rdev, true); 1656 rdev->asic->asic_reset(rdev, true);
1636 pci_restore_state(dev->pdev); 1657 pci_restore_state(dev->pdev);
1637 } else if (suspend) { 1658 } else if (suspend) {
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index 99510e64e91a..a4b357db8856 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -1,6 +1,6 @@
1config DRM_SUN4I 1config DRM_SUN4I
2 tristate "DRM Support for Allwinner A10 Display Engine" 2 tristate "DRM Support for Allwinner A10 Display Engine"
3 depends on DRM && ARM 3 depends on DRM && ARM && COMMON_CLK
4 depends on ARCH_SUNXI || COMPILE_TEST 4 depends on ARCH_SUNXI || COMPILE_TEST
5 select DRM_GEM_CMA_HELPER 5 select DRM_GEM_CMA_HELPER
6 select DRM_KMS_HELPER 6 select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index f7a15c1a93bf..3ab560450a82 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -190,7 +190,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
190 /* Get the physical address of the buffer in memory */ 190 /* Get the physical address of the buffer in memory */
191 gem = drm_fb_cma_get_gem_obj(fb, 0); 191 gem = drm_fb_cma_get_gem_obj(fb, 0);
192 192
193 DRM_DEBUG_DRIVER("Using GEM @ 0x%x\n", gem->paddr); 193 DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr);
194 194
195 /* Compute the start of the displayed memory */ 195 /* Compute the start of the displayed memory */
196 bpp = drm_format_plane_cpp(fb->pixel_format, 0); 196 bpp = drm_format_plane_cpp(fb->pixel_format, 0);
@@ -198,7 +198,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
198 paddr += (state->src_x >> 16) * bpp; 198 paddr += (state->src_x >> 16) * bpp;
199 paddr += (state->src_y >> 16) * fb->pitches[0]; 199 paddr += (state->src_y >> 16) * fb->pitches[0];
200 200
201 DRM_DEBUG_DRIVER("Setting buffer address to 0x%x\n", paddr); 201 DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
202 202
203 /* Write the 32 lower bits of the address (in bits) */ 203 /* Write the 32 lower bits of the address (in bits) */
204 lo_paddr = paddr << 3; 204 lo_paddr = paddr << 3;
diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
index 3ff668cb463c..5b3463197c48 100644
--- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c
+++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
@@ -72,14 +72,40 @@ static unsigned long sun4i_dclk_recalc_rate(struct clk_hw *hw,
72static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate, 72static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
73 unsigned long *parent_rate) 73 unsigned long *parent_rate)
74{ 74{
75 return *parent_rate / DIV_ROUND_CLOSEST(*parent_rate, rate); 75 unsigned long best_parent = 0;
76 u8 best_div = 1;
77 int i;
78
79 for (i = 6; i < 127; i++) {
80 unsigned long ideal = rate * i;
81 unsigned long rounded;
82
83 rounded = clk_hw_round_rate(clk_hw_get_parent(hw),
84 ideal);
85
86 if (rounded == ideal) {
87 best_parent = rounded;
88 best_div = i;
89 goto out;
90 }
91
92 if ((rounded < ideal) && (rounded > best_parent)) {
93 best_parent = rounded;
94 best_div = i;
95 }
96 }
97
98out:
99 *parent_rate = best_parent;
100
101 return best_parent / best_div;
76} 102}
77 103
78static int sun4i_dclk_set_rate(struct clk_hw *hw, unsigned long rate, 104static int sun4i_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
79 unsigned long parent_rate) 105 unsigned long parent_rate)
80{ 106{
81 struct sun4i_dclk *dclk = hw_to_dclk(hw); 107 struct sun4i_dclk *dclk = hw_to_dclk(hw);
82 int div = DIV_ROUND_CLOSEST(parent_rate, rate); 108 u8 div = parent_rate / rate;
83 109
84 return regmap_update_bits(dclk->regmap, SUN4I_TCON0_DCLK_REG, 110 return regmap_update_bits(dclk->regmap, SUN4I_TCON0_DCLK_REG,
85 GENMASK(6, 0), div); 111 GENMASK(6, 0), div);
@@ -127,10 +153,14 @@ int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon)
127 const char *clk_name, *parent_name; 153 const char *clk_name, *parent_name;
128 struct clk_init_data init; 154 struct clk_init_data init;
129 struct sun4i_dclk *dclk; 155 struct sun4i_dclk *dclk;
156 int ret;
130 157
131 parent_name = __clk_get_name(tcon->sclk0); 158 parent_name = __clk_get_name(tcon->sclk0);
132 of_property_read_string_index(dev->of_node, "clock-output-names", 0, 159 ret = of_property_read_string_index(dev->of_node,
133 &clk_name); 160 "clock-output-names", 0,
161 &clk_name);
162 if (ret)
163 return ret;
134 164
135 dclk = devm_kzalloc(dev, sizeof(*dclk), GFP_KERNEL); 165 dclk = devm_kzalloc(dev, sizeof(*dclk), GFP_KERNEL);
136 if (!dclk) 166 if (!dclk)
@@ -140,6 +170,7 @@ int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon)
140 init.ops = &sun4i_dclk_ops; 170 init.ops = &sun4i_dclk_ops;
141 init.parent_names = &parent_name; 171 init.parent_names = &parent_name;
142 init.num_parents = 1; 172 init.num_parents = 1;
173 init.flags = CLK_SET_RATE_PARENT;
143 174
144 dclk->regmap = tcon->regs; 175 dclk->regmap = tcon->regs;
145 dclk->hw.init = &init; 176 dclk->hw.init = &init;
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 76e922bb60e5..257d2b4f3645 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -24,34 +24,6 @@
24#include "sun4i_layer.h" 24#include "sun4i_layer.h"
25#include "sun4i_tcon.h" 25#include "sun4i_tcon.h"
26 26
27static int sun4i_drv_connector_plug_all(struct drm_device *drm)
28{
29 struct drm_connector *connector, *failed;
30 int ret;
31
32 mutex_lock(&drm->mode_config.mutex);
33 list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
34 ret = drm_connector_register(connector);
35 if (ret) {
36 failed = connector;
37 goto err;
38 }
39 }
40 mutex_unlock(&drm->mode_config.mutex);
41 return 0;
42
43err:
44 list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
45 if (failed == connector)
46 break;
47
48 drm_connector_unregister(connector);
49 }
50 mutex_unlock(&drm->mode_config.mutex);
51
52 return ret;
53}
54
55static int sun4i_drv_enable_vblank(struct drm_device *drm, unsigned int pipe) 27static int sun4i_drv_enable_vblank(struct drm_device *drm, unsigned int pipe)
56{ 28{
57 struct sun4i_drv *drv = drm->dev_private; 29 struct sun4i_drv *drv = drm->dev_private;
@@ -125,6 +97,22 @@ static struct drm_driver sun4i_drv_driver = {
125 .disable_vblank = sun4i_drv_disable_vblank, 97 .disable_vblank = sun4i_drv_disable_vblank,
126}; 98};
127 99
100static void sun4i_remove_framebuffers(void)
101{
102 struct apertures_struct *ap;
103
104 ap = alloc_apertures(1);
105 if (!ap)
106 return;
107
108 /* The framebuffer can be located anywhere in RAM */
109 ap->ranges[0].base = 0;
110 ap->ranges[0].size = ~0;
111
112 remove_conflicting_framebuffers(ap, "sun4i-drm-fb", false);
113 kfree(ap);
114}
115
128static int sun4i_drv_bind(struct device *dev) 116static int sun4i_drv_bind(struct device *dev)
129{ 117{
130 struct drm_device *drm; 118 struct drm_device *drm;
@@ -172,6 +160,9 @@ static int sun4i_drv_bind(struct device *dev)
172 } 160 }
173 drm->irq_enabled = true; 161 drm->irq_enabled = true;
174 162
163 /* Remove early framebuffers (ie. simplefb) */
164 sun4i_remove_framebuffers();
165
175 /* Create our framebuffer */ 166 /* Create our framebuffer */
176 drv->fbdev = sun4i_framebuffer_init(drm); 167 drv->fbdev = sun4i_framebuffer_init(drm);
177 if (IS_ERR(drv->fbdev)) { 168 if (IS_ERR(drv->fbdev)) {
@@ -187,7 +178,7 @@ static int sun4i_drv_bind(struct device *dev)
187 if (ret) 178 if (ret)
188 goto free_drm; 179 goto free_drm;
189 180
190 ret = sun4i_drv_connector_plug_all(drm); 181 ret = drm_connector_register_all(drm);
191 if (ret) 182 if (ret)
192 goto unregister_drm; 183 goto unregister_drm;
193 184
@@ -204,6 +195,7 @@ static void sun4i_drv_unbind(struct device *dev)
204{ 195{
205 struct drm_device *drm = dev_get_drvdata(dev); 196 struct drm_device *drm = dev_get_drvdata(dev);
206 197
198 drm_connector_unregister_all(drm);
207 drm_dev_unregister(drm); 199 drm_dev_unregister(drm);
208 drm_kms_helper_poll_fini(drm); 200 drm_kms_helper_poll_fini(drm);
209 sun4i_framebuffer_free(drm); 201 sun4i_framebuffer_free(drm);
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index ab6494818050..aaffe9e64ffb 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -54,8 +54,13 @@ static int sun4i_rgb_get_modes(struct drm_connector *connector)
54static int sun4i_rgb_mode_valid(struct drm_connector *connector, 54static int sun4i_rgb_mode_valid(struct drm_connector *connector,
55 struct drm_display_mode *mode) 55 struct drm_display_mode *mode)
56{ 56{
57 struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector);
58 struct sun4i_drv *drv = rgb->drv;
59 struct sun4i_tcon *tcon = drv->tcon;
57 u32 hsync = mode->hsync_end - mode->hsync_start; 60 u32 hsync = mode->hsync_end - mode->hsync_start;
58 u32 vsync = mode->vsync_end - mode->vsync_start; 61 u32 vsync = mode->vsync_end - mode->vsync_start;
62 unsigned long rate = mode->clock * 1000;
63 long rounded_rate;
59 64
60 DRM_DEBUG_DRIVER("Validating modes...\n"); 65 DRM_DEBUG_DRIVER("Validating modes...\n");
61 66
@@ -87,6 +92,15 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector,
87 92
88 DRM_DEBUG_DRIVER("Vertical parameters OK\n"); 93 DRM_DEBUG_DRIVER("Vertical parameters OK\n");
89 94
95 rounded_rate = clk_round_rate(tcon->dclk, rate);
96 if (rounded_rate < rate)
97 return MODE_CLOCK_LOW;
98
99 if (rounded_rate > rate)
100 return MODE_CLOCK_HIGH;
101
102 DRM_DEBUG_DRIVER("Clock rate OK\n");
103
90 return MODE_OK; 104 return MODE_OK;
91} 105}
92 106
@@ -203,7 +217,7 @@ int sun4i_rgb_init(struct drm_device *drm)
203 int ret; 217 int ret;
204 218
205 /* If we don't have a panel, there's no point in going on */ 219 /* If we don't have a panel, there's no point in going on */
206 if (!tcon->panel) 220 if (IS_ERR(tcon->panel))
207 return -ENODEV; 221 return -ENODEV;
208 222
209 rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL); 223 rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 9f19b0e08560..652385f09735 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -425,11 +425,11 @@ static struct drm_panel *sun4i_tcon_find_panel(struct device_node *node)
425 425
426 remote = of_graph_get_remote_port_parent(end_node); 426 remote = of_graph_get_remote_port_parent(end_node);
427 if (!remote) { 427 if (!remote) {
428 DRM_DEBUG_DRIVER("Enable to parse remote node\n"); 428 DRM_DEBUG_DRIVER("Unable to parse remote node\n");
429 return ERR_PTR(-EINVAL); 429 return ERR_PTR(-EINVAL);
430 } 430 }
431 431
432 return of_drm_find_panel(remote); 432 return of_drm_find_panel(remote) ?: ERR_PTR(-EPROBE_DEFER);
433} 433}
434 434
435static int sun4i_tcon_bind(struct device *dev, struct device *master, 435static int sun4i_tcon_bind(struct device *dev, struct device *master,
@@ -490,7 +490,11 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
490 return 0; 490 return 0;
491 } 491 }
492 492
493 return sun4i_rgb_init(drm); 493 ret = sun4i_rgb_init(drm);
494 if (ret < 0)
495 goto err_free_clocks;
496
497 return 0;
494 498
495err_free_clocks: 499err_free_clocks:
496 sun4i_tcon_free_clocks(tcon); 500 sun4i_tcon_free_clocks(tcon);
@@ -522,12 +526,13 @@ static int sun4i_tcon_probe(struct platform_device *pdev)
522 * Defer the probe. 526 * Defer the probe.
523 */ 527 */
524 panel = sun4i_tcon_find_panel(node); 528 panel = sun4i_tcon_find_panel(node);
525 if (IS_ERR(panel)) { 529
526 /* 530 /*
527 * If we don't have a panel endpoint, just go on 531 * If we don't have a panel endpoint, just go on
528 */ 532 */
529 if (PTR_ERR(panel) != -ENODEV) 533 if (PTR_ERR(panel) == -EPROBE_DEFER) {
530 return -EPROBE_DEFER; 534 DRM_DEBUG_DRIVER("Still waiting for our panel. Deferring...\n");
535 return -EPROBE_DEFER;
531 } 536 }
532 537
533 return component_add(&pdev->dev, &sun4i_tcon_ops); 538 return component_add(&pdev->dev, &sun4i_tcon_ops);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 6de283c8fa3e..f0374f9b56ca 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -28,6 +28,7 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/frame.h>
31#include <asm/hypervisor.h> 32#include <asm/hypervisor.h>
32#include "drmP.h" 33#include "drmP.h"
33#include "vmwgfx_msg.h" 34#include "vmwgfx_msg.h"
@@ -194,7 +195,7 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
194 195
195 return -EINVAL; 196 return -EINVAL;
196} 197}
197 198STACK_FRAME_NON_STANDARD(vmw_send_msg);
198 199
199 200
200/** 201/**
@@ -304,6 +305,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
304 305
305 return 0; 306 return 0;
306} 307}
308STACK_FRAME_NON_STANDARD(vmw_recv_msg);
307 309
308 310
309/** 311/**
diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
index aad8c162a825..0cd4f7216239 100644
--- a/drivers/hid/hid-elo.c
+++ b/drivers/hid/hid-elo.c
@@ -261,7 +261,7 @@ static void elo_remove(struct hid_device *hdev)
261 struct elo_priv *priv = hid_get_drvdata(hdev); 261 struct elo_priv *priv = hid_get_drvdata(hdev);
262 262
263 hid_hw_stop(hdev); 263 hid_hw_stop(hdev);
264 flush_workqueue(wq); 264 cancel_delayed_work_sync(&priv->work);
265 kfree(priv); 265 kfree(priv);
266} 266}
267 267
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index c741f5e50a66..fb6f1f447279 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -61,6 +61,7 @@ MODULE_LICENSE("GPL");
61#define MT_QUIRK_ALWAYS_VALID (1 << 4) 61#define MT_QUIRK_ALWAYS_VALID (1 << 4)
62#define MT_QUIRK_VALID_IS_INRANGE (1 << 5) 62#define MT_QUIRK_VALID_IS_INRANGE (1 << 5)
63#define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 6) 63#define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 6)
64#define MT_QUIRK_CONFIDENCE (1 << 7)
64#define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE (1 << 8) 65#define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE (1 << 8)
65#define MT_QUIRK_NO_AREA (1 << 9) 66#define MT_QUIRK_NO_AREA (1 << 9)
66#define MT_QUIRK_IGNORE_DUPLICATES (1 << 10) 67#define MT_QUIRK_IGNORE_DUPLICATES (1 << 10)
@@ -78,6 +79,7 @@ struct mt_slot {
78 __s32 contactid; /* the device ContactID assigned to this slot */ 79 __s32 contactid; /* the device ContactID assigned to this slot */
79 bool touch_state; /* is the touch valid? */ 80 bool touch_state; /* is the touch valid? */
80 bool inrange_state; /* is the finger in proximity of the sensor? */ 81 bool inrange_state; /* is the finger in proximity of the sensor? */
82 bool confidence_state; /* is the touch made by a finger? */
81}; 83};
82 84
83struct mt_class { 85struct mt_class {
@@ -503,10 +505,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
503 return 1; 505 return 1;
504 case HID_DG_CONFIDENCE: 506 case HID_DG_CONFIDENCE:
505 if (cls->name == MT_CLS_WIN_8 && 507 if (cls->name == MT_CLS_WIN_8 &&
506 field->application == HID_DG_TOUCHPAD) { 508 field->application == HID_DG_TOUCHPAD)
507 cls->quirks &= ~MT_QUIRK_ALWAYS_VALID; 509 cls->quirks |= MT_QUIRK_CONFIDENCE;
508 cls->quirks |= MT_QUIRK_VALID_IS_CONFIDENCE;
509 }
510 mt_store_field(usage, td, hi); 510 mt_store_field(usage, td, hi);
511 return 1; 511 return 1;
512 case HID_DG_TIPSWITCH: 512 case HID_DG_TIPSWITCH:
@@ -619,6 +619,7 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
619 return; 619 return;
620 620
621 if (td->curvalid || (td->mtclass.quirks & MT_QUIRK_ALWAYS_VALID)) { 621 if (td->curvalid || (td->mtclass.quirks & MT_QUIRK_ALWAYS_VALID)) {
622 int active;
622 int slotnum = mt_compute_slot(td, input); 623 int slotnum = mt_compute_slot(td, input);
623 struct mt_slot *s = &td->curdata; 624 struct mt_slot *s = &td->curdata;
624 struct input_mt *mt = input->mt; 625 struct input_mt *mt = input->mt;
@@ -633,10 +634,14 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
633 return; 634 return;
634 } 635 }
635 636
637 if (!(td->mtclass.quirks & MT_QUIRK_CONFIDENCE))
638 s->confidence_state = 1;
639 active = (s->touch_state || s->inrange_state) &&
640 s->confidence_state;
641
636 input_mt_slot(input, slotnum); 642 input_mt_slot(input, slotnum);
637 input_mt_report_slot_state(input, MT_TOOL_FINGER, 643 input_mt_report_slot_state(input, MT_TOOL_FINGER, active);
638 s->touch_state || s->inrange_state); 644 if (active) {
639 if (s->touch_state || s->inrange_state) {
640 /* this finger is in proximity of the sensor */ 645 /* this finger is in proximity of the sensor */
641 int wide = (s->w > s->h); 646 int wide = (s->w > s->h);
642 /* divided by two to match visual scale of touch */ 647 /* divided by two to match visual scale of touch */
@@ -701,6 +706,8 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
701 td->curdata.touch_state = value; 706 td->curdata.touch_state = value;
702 break; 707 break;
703 case HID_DG_CONFIDENCE: 708 case HID_DG_CONFIDENCE:
709 if (quirks & MT_QUIRK_CONFIDENCE)
710 td->curdata.confidence_state = value;
704 if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE) 711 if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE)
705 td->curvalid = value; 712 td->curvalid = value;
706 break; 713 break;
@@ -1401,6 +1408,11 @@ static const struct hid_device_id mt_devices[] = {
1401 MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK, 1408 MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK,
1402 USB_DEVICE_ID_NOVATEK_PCT) }, 1409 USB_DEVICE_ID_NOVATEK_PCT) },
1403 1410
1411 /* Ntrig Panel */
1412 { .driver_data = MT_CLS_NSMU,
1413 HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
1414 USB_VENDOR_ID_NTRIG, 0x1b05) },
1415
1404 /* PixArt optical touch screen */ 1416 /* PixArt optical touch screen */
1405 { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER, 1417 { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
1406 MT_USB_DEVICE(USB_VENDOR_ID_PIXART, 1418 MT_USB_DEVICE(USB_VENDOR_ID_PIXART,
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 2f1ddca6f2e0..700145b15088 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -516,13 +516,13 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
516 goto inval; 516 goto inval;
517 } else if (uref->usage_index >= field->report_count) 517 } else if (uref->usage_index >= field->report_count)
518 goto inval; 518 goto inval;
519
520 else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
521 (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
522 uref->usage_index + uref_multi->num_values > field->report_count))
523 goto inval;
524 } 519 }
525 520
521 if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
522 (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
523 uref->usage_index + uref_multi->num_values > field->report_count))
524 goto inval;
525
526 switch (cmd) { 526 switch (cmd) {
527 case HIDIOCGUSAGE: 527 case HIDIOCGUSAGE:
528 uref->value = field->value[uref->usage_index]; 528 uref->value = field->value[uref->usage_index];
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index c43318d3416e..2ac87d553e22 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -35,6 +35,7 @@
35#include <linux/uaccess.h> 35#include <linux/uaccess.h>
36#include <linux/io.h> 36#include <linux/io.h>
37#include <linux/sched.h> 37#include <linux/sched.h>
38#include <linux/ctype.h>
38 39
39#include <linux/i8k.h> 40#include <linux/i8k.h>
40 41
@@ -66,11 +67,13 @@
66 67
67static DEFINE_MUTEX(i8k_mutex); 68static DEFINE_MUTEX(i8k_mutex);
68static char bios_version[4]; 69static char bios_version[4];
70static char bios_machineid[16];
69static struct device *i8k_hwmon_dev; 71static struct device *i8k_hwmon_dev;
70static u32 i8k_hwmon_flags; 72static u32 i8k_hwmon_flags;
71static uint i8k_fan_mult = I8K_FAN_MULT; 73static uint i8k_fan_mult = I8K_FAN_MULT;
72static uint i8k_pwm_mult; 74static uint i8k_pwm_mult;
73static uint i8k_fan_max = I8K_FAN_HIGH; 75static uint i8k_fan_max = I8K_FAN_HIGH;
76static bool disallow_fan_type_call;
74 77
75#define I8K_HWMON_HAVE_TEMP1 (1 << 0) 78#define I8K_HWMON_HAVE_TEMP1 (1 << 0)
76#define I8K_HWMON_HAVE_TEMP2 (1 << 1) 79#define I8K_HWMON_HAVE_TEMP2 (1 << 1)
@@ -94,13 +97,13 @@ module_param(ignore_dmi, bool, 0);
94MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match"); 97MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match");
95 98
96#if IS_ENABLED(CONFIG_I8K) 99#if IS_ENABLED(CONFIG_I8K)
97static bool restricted; 100static bool restricted = true;
98module_param(restricted, bool, 0); 101module_param(restricted, bool, 0);
99MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set"); 102MODULE_PARM_DESC(restricted, "Restrict fan control and serial number to CAP_SYS_ADMIN (default: 1)");
100 103
101static bool power_status; 104static bool power_status;
102module_param(power_status, bool, 0600); 105module_param(power_status, bool, 0600);
103MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k"); 106MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k (default: 0)");
104#endif 107#endif
105 108
106static uint fan_mult; 109static uint fan_mult;
@@ -235,14 +238,28 @@ static int i8k_get_fan_speed(int fan)
235/* 238/*
236 * Read the fan type. 239 * Read the fan type.
237 */ 240 */
238static int i8k_get_fan_type(int fan) 241static int _i8k_get_fan_type(int fan)
239{ 242{
240 struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, }; 243 struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, };
241 244
245 if (disallow_fan_type_call)
246 return -EINVAL;
247
242 regs.ebx = fan & 0xff; 248 regs.ebx = fan & 0xff;
243 return i8k_smm(&regs) ? : regs.eax & 0xff; 249 return i8k_smm(&regs) ? : regs.eax & 0xff;
244} 250}
245 251
252static int i8k_get_fan_type(int fan)
253{
254 /* I8K_SMM_GET_FAN_TYPE SMM call is expensive, so cache values */
255 static int types[2] = { INT_MIN, INT_MIN };
256
257 if (types[fan] == INT_MIN)
258 types[fan] = _i8k_get_fan_type(fan);
259
260 return types[fan];
261}
262
246/* 263/*
247 * Read the fan nominal rpm for specific fan speed. 264 * Read the fan nominal rpm for specific fan speed.
248 */ 265 */
@@ -387,14 +404,20 @@ i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg)
387 404
388 switch (cmd) { 405 switch (cmd) {
389 case I8K_BIOS_VERSION: 406 case I8K_BIOS_VERSION:
407 if (!isdigit(bios_version[0]) || !isdigit(bios_version[1]) ||
408 !isdigit(bios_version[2]))
409 return -EINVAL;
410
390 val = (bios_version[0] << 16) | 411 val = (bios_version[0] << 16) |
391 (bios_version[1] << 8) | bios_version[2]; 412 (bios_version[1] << 8) | bios_version[2];
392 break; 413 break;
393 414
394 case I8K_MACHINE_ID: 415 case I8K_MACHINE_ID:
395 memset(buff, 0, 16); 416 if (restricted && !capable(CAP_SYS_ADMIN))
396 strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL), 417 return -EPERM;
397 sizeof(buff)); 418
419 memset(buff, 0, sizeof(buff));
420 strlcpy(buff, bios_machineid, sizeof(buff));
398 break; 421 break;
399 422
400 case I8K_FN_STATUS: 423 case I8K_FN_STATUS:
@@ -511,7 +534,7 @@ static int i8k_proc_show(struct seq_file *seq, void *offset)
511 seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n", 534 seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n",
512 I8K_PROC_FMT, 535 I8K_PROC_FMT,
513 bios_version, 536 bios_version,
514 i8k_get_dmi_data(DMI_PRODUCT_SERIAL), 537 (restricted && !capable(CAP_SYS_ADMIN)) ? "-1" : bios_machineid,
515 cpu_temp, 538 cpu_temp,
516 left_fan, right_fan, left_speed, right_speed, 539 left_fan, right_fan, left_speed, right_speed,
517 ac_power, fn_key); 540 ac_power, fn_key);
@@ -718,6 +741,9 @@ static struct attribute *i8k_attrs[] = {
718static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr, 741static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
719 int index) 742 int index)
720{ 743{
744 if (disallow_fan_type_call &&
745 (index == 9 || index == 12))
746 return 0;
721 if (index >= 0 && index <= 1 && 747 if (index >= 0 && index <= 1 &&
722 !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1)) 748 !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
723 return 0; 749 return 0;
@@ -767,13 +793,17 @@ static int __init i8k_init_hwmon(void)
767 if (err >= 0) 793 if (err >= 0)
768 i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4; 794 i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4;
769 795
770 /* First fan attributes, if fan type is OK */ 796 /* First fan attributes, if fan status or type is OK */
771 err = i8k_get_fan_type(0); 797 err = i8k_get_fan_status(0);
798 if (err < 0)
799 err = i8k_get_fan_type(0);
772 if (err >= 0) 800 if (err >= 0)
773 i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1; 801 i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1;
774 802
775 /* Second fan attributes, if fan type is OK */ 803 /* Second fan attributes, if fan status or type is OK */
776 err = i8k_get_fan_type(1); 804 err = i8k_get_fan_status(1);
805 if (err < 0)
806 err = i8k_get_fan_type(1);
777 if (err >= 0) 807 if (err >= 0)
778 i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2; 808 i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2;
779 809
@@ -929,12 +959,14 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
929 959
930MODULE_DEVICE_TABLE(dmi, i8k_dmi_table); 960MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
931 961
932static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = { 962/*
963 * On some machines once I8K_SMM_GET_FAN_TYPE is issued then CPU fan speed
964 * randomly going up and down due to bug in Dell SMM or BIOS. Here is blacklist
965 * of affected Dell machines for which we disallow I8K_SMM_GET_FAN_TYPE call.
966 * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=100121
967 */
968static struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initdata = {
933 { 969 {
934 /*
935 * CPU fan speed going up and down on Dell Studio XPS 8000
936 * for unknown reasons.
937 */
938 .ident = "Dell Studio XPS 8000", 970 .ident = "Dell Studio XPS 8000",
939 .matches = { 971 .matches = {
940 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 972 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -942,16 +974,19 @@ static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
942 }, 974 },
943 }, 975 },
944 { 976 {
945 /*
946 * CPU fan speed going up and down on Dell Studio XPS 8100
947 * for unknown reasons.
948 */
949 .ident = "Dell Studio XPS 8100", 977 .ident = "Dell Studio XPS 8100",
950 .matches = { 978 .matches = {
951 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 979 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
952 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"), 980 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
953 }, 981 },
954 }, 982 },
983 {
984 .ident = "Dell Inspiron 580",
985 .matches = {
986 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
987 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 580 "),
988 },
989 },
955 { } 990 { }
956}; 991};
957 992
@@ -966,8 +1001,7 @@ static int __init i8k_probe(void)
966 /* 1001 /*
967 * Get DMI information 1002 * Get DMI information
968 */ 1003 */
969 if (!dmi_check_system(i8k_dmi_table) || 1004 if (!dmi_check_system(i8k_dmi_table)) {
970 dmi_check_system(i8k_blacklist_dmi_table)) {
971 if (!ignore_dmi && !force) 1005 if (!ignore_dmi && !force)
972 return -ENODEV; 1006 return -ENODEV;
973 1007
@@ -978,8 +1012,13 @@ static int __init i8k_probe(void)
978 i8k_get_dmi_data(DMI_BIOS_VERSION)); 1012 i8k_get_dmi_data(DMI_BIOS_VERSION));
979 } 1013 }
980 1014
1015 if (dmi_check_system(i8k_blacklist_fan_type_dmi_table))
1016 disallow_fan_type_call = true;
1017
981 strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION), 1018 strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION),
982 sizeof(bios_version)); 1019 sizeof(bios_version));
1020 strlcpy(bios_machineid, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
1021 sizeof(bios_machineid));
983 1022
984 /* 1023 /*
985 * Get SMM Dell signature 1024 * Get SMM Dell signature
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index eb97a9241d17..15aa49d082c4 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -172,9 +172,9 @@ static void do_read_registers_on_cu(void *_data)
172 */ 172 */
173static int read_registers(struct fam15h_power_data *data) 173static int read_registers(struct fam15h_power_data *data)
174{ 174{
175 int this_cpu, ret, cpu;
176 int core, this_core; 175 int core, this_core;
177 cpumask_var_t mask; 176 cpumask_var_t mask;
177 int ret, cpu;
178 178
179 ret = zalloc_cpumask_var(&mask, GFP_KERNEL); 179 ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
180 if (!ret) 180 if (!ret)
@@ -183,7 +183,6 @@ static int read_registers(struct fam15h_power_data *data)
183 memset(data->cu_on, 0, sizeof(int) * MAX_CUS); 183 memset(data->cu_on, 0, sizeof(int) * MAX_CUS);
184 184
185 get_online_cpus(); 185 get_online_cpus();
186 this_cpu = smp_processor_id();
187 186
188 /* 187 /*
189 * Choose the first online core of each compute unit, and then 188 * Choose the first online core of each compute unit, and then
@@ -205,12 +204,9 @@ static int read_registers(struct fam15h_power_data *data)
205 cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(cpu)), mask); 204 cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(cpu)), mask);
206 } 205 }
207 206
208 if (cpumask_test_cpu(this_cpu, mask)) 207 on_each_cpu_mask(mask, do_read_registers_on_cu, data, true);
209 do_read_registers_on_cu(data);
210 208
211 smp_call_function_many(mask, do_read_registers_on_cu, data, true);
212 put_online_cpus(); 209 put_online_cpus();
213
214 free_cpumask_var(mask); 210 free_cpumask_var(mask);
215 211
216 return 0; 212 return 0;
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index c9ff08dbe10c..e30a5939dc0d 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -375,7 +375,7 @@ struct lm90_data {
375 int kind; 375 int kind;
376 u32 flags; 376 u32 flags;
377 377
378 int update_interval; /* in milliseconds */ 378 unsigned int update_interval; /* in milliseconds */
379 379
380 u8 config_orig; /* Original configuration register value */ 380 u8 config_orig; /* Original configuration register value */
381 u8 convrate_orig; /* Original conversion rate register value */ 381 u8 convrate_orig; /* Original conversion rate register value */
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 847d1b5f2c13..688be9e060fc 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -300,13 +300,10 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
300 if (local_read(&drvdata->mode) == CS_MODE_SYSFS) { 300 if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
301 /* 301 /*
302 * The trace run will continue with the same allocated trace 302 * The trace run will continue with the same allocated trace
303 * buffer. As such zero-out the buffer so that we don't end 303 * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
304 * up with stale data. 304 * so we don't have to explicitly clear it. Also, since the
305 * 305 * tracer is still enabled drvdata::buf can't be NULL.
306 * Since the tracer is still enabled drvdata::buf
307 * can't be NULL.
308 */ 306 */
309 memset(drvdata->buf, 0, drvdata->size);
310 tmc_etr_enable_hw(drvdata); 307 tmc_etr_enable_hw(drvdata);
311 } else { 308 } else {
312 /* 309 /*
@@ -315,7 +312,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
315 */ 312 */
316 vaddr = drvdata->vaddr; 313 vaddr = drvdata->vaddr;
317 paddr = drvdata->paddr; 314 paddr = drvdata->paddr;
318 drvdata->buf = NULL; 315 drvdata->buf = drvdata->vaddr = NULL;
319 } 316 }
320 317
321 drvdata->reading = false; 318 drvdata->reading = false;
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 5443d03a1eec..d08d1ab9bba5 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -385,7 +385,6 @@ static int _coresight_build_path(struct coresight_device *csdev,
385 int i; 385 int i;
386 bool found = false; 386 bool found = false;
387 struct coresight_node *node; 387 struct coresight_node *node;
388 struct coresight_connection *conn;
389 388
390 /* An activated sink has been found. Enqueue the element */ 389 /* An activated sink has been found. Enqueue the element */
391 if ((csdev->type == CORESIGHT_DEV_TYPE_SINK || 390 if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
@@ -394,8 +393,9 @@ static int _coresight_build_path(struct coresight_device *csdev,
394 393
395 /* Not a sink - recursively explore each port found on this element */ 394 /* Not a sink - recursively explore each port found on this element */
396 for (i = 0; i < csdev->nr_outport; i++) { 395 for (i = 0; i < csdev->nr_outport; i++) {
397 conn = &csdev->conns[i]; 396 struct coresight_device *child_dev = csdev->conns[i].child_dev;
398 if (_coresight_build_path(conn->child_dev, path) == 0) { 397
398 if (child_dev && _coresight_build_path(child_dev, path) == 0) {
399 found = true; 399 found = true;
400 break; 400 break;
401 } 401 }
@@ -425,6 +425,7 @@ out:
425struct list_head *coresight_build_path(struct coresight_device *csdev) 425struct list_head *coresight_build_path(struct coresight_device *csdev)
426{ 426{
427 struct list_head *path; 427 struct list_head *path;
428 int rc;
428 429
429 path = kzalloc(sizeof(struct list_head), GFP_KERNEL); 430 path = kzalloc(sizeof(struct list_head), GFP_KERNEL);
430 if (!path) 431 if (!path)
@@ -432,9 +433,10 @@ struct list_head *coresight_build_path(struct coresight_device *csdev)
432 433
433 INIT_LIST_HEAD(path); 434 INIT_LIST_HEAD(path);
434 435
435 if (_coresight_build_path(csdev, path)) { 436 rc = _coresight_build_path(csdev, path);
437 if (rc) {
436 kfree(path); 438 kfree(path);
437 path = NULL; 439 return ERR_PTR(rc);
438 } 440 }
439 441
440 return path; 442 return path;
@@ -507,8 +509,9 @@ int coresight_enable(struct coresight_device *csdev)
507 goto out; 509 goto out;
508 510
509 path = coresight_build_path(csdev); 511 path = coresight_build_path(csdev);
510 if (!path) { 512 if (IS_ERR(path)) {
511 pr_err("building path(s) failed\n"); 513 pr_err("building path(s) failed\n");
514 ret = PTR_ERR(path);
512 goto out; 515 goto out;
513 } 516 }
514 517
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 64b1208bca5e..4a60ad214747 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -245,6 +245,13 @@ struct i801_priv {
245 struct platform_device *mux_pdev; 245 struct platform_device *mux_pdev;
246#endif 246#endif
247 struct platform_device *tco_pdev; 247 struct platform_device *tco_pdev;
248
249 /*
250 * If set to true the host controller registers are reserved for
251 * ACPI AML use. Protected by acpi_lock.
252 */
253 bool acpi_reserved;
254 struct mutex acpi_lock;
248}; 255};
249 256
250#define FEATURE_SMBUS_PEC (1 << 0) 257#define FEATURE_SMBUS_PEC (1 << 0)
@@ -718,6 +725,12 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
718 int ret = 0, xact = 0; 725 int ret = 0, xact = 0;
719 struct i801_priv *priv = i2c_get_adapdata(adap); 726 struct i801_priv *priv = i2c_get_adapdata(adap);
720 727
728 mutex_lock(&priv->acpi_lock);
729 if (priv->acpi_reserved) {
730 mutex_unlock(&priv->acpi_lock);
731 return -EBUSY;
732 }
733
721 pm_runtime_get_sync(&priv->pci_dev->dev); 734 pm_runtime_get_sync(&priv->pci_dev->dev);
722 735
723 hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC) 736 hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC)
@@ -820,6 +833,7 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
820out: 833out:
821 pm_runtime_mark_last_busy(&priv->pci_dev->dev); 834 pm_runtime_mark_last_busy(&priv->pci_dev->dev);
822 pm_runtime_put_autosuspend(&priv->pci_dev->dev); 835 pm_runtime_put_autosuspend(&priv->pci_dev->dev);
836 mutex_unlock(&priv->acpi_lock);
823 return ret; 837 return ret;
824} 838}
825 839
@@ -1257,6 +1271,83 @@ static void i801_add_tco(struct i801_priv *priv)
1257 priv->tco_pdev = pdev; 1271 priv->tco_pdev = pdev;
1258} 1272}
1259 1273
1274#ifdef CONFIG_ACPI
1275static acpi_status
1276i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
1277 u64 *value, void *handler_context, void *region_context)
1278{
1279 struct i801_priv *priv = handler_context;
1280 struct pci_dev *pdev = priv->pci_dev;
1281 acpi_status status;
1282
1283 /*
1284 * Once BIOS AML code touches the OpRegion we warn and inhibit any
1285 * further access from the driver itself. This device is now owned
1286 * by the system firmware.
1287 */
1288 mutex_lock(&priv->acpi_lock);
1289
1290 if (!priv->acpi_reserved) {
1291 priv->acpi_reserved = true;
1292
1293 dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
1294 dev_warn(&pdev->dev, "Driver SMBus register access inhibited\n");
1295
1296 /*
1297 * BIOS is accessing the host controller so prevent it from
1298 * suspending automatically from now on.
1299 */
1300 pm_runtime_get_sync(&pdev->dev);
1301 }
1302
1303 if ((function & ACPI_IO_MASK) == ACPI_READ)
1304 status = acpi_os_read_port(address, (u32 *)value, bits);
1305 else
1306 status = acpi_os_write_port(address, (u32)*value, bits);
1307
1308 mutex_unlock(&priv->acpi_lock);
1309
1310 return status;
1311}
1312
1313static int i801_acpi_probe(struct i801_priv *priv)
1314{
1315 struct acpi_device *adev;
1316 acpi_status status;
1317
1318 adev = ACPI_COMPANION(&priv->pci_dev->dev);
1319 if (adev) {
1320 status = acpi_install_address_space_handler(adev->handle,
1321 ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler,
1322 NULL, priv);
1323 if (ACPI_SUCCESS(status))
1324 return 0;
1325 }
1326
1327 return acpi_check_resource_conflict(&priv->pci_dev->resource[SMBBAR]);
1328}
1329
1330static void i801_acpi_remove(struct i801_priv *priv)
1331{
1332 struct acpi_device *adev;
1333
1334 adev = ACPI_COMPANION(&priv->pci_dev->dev);
1335 if (!adev)
1336 return;
1337
1338 acpi_remove_address_space_handler(adev->handle,
1339 ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler);
1340
1341 mutex_lock(&priv->acpi_lock);
1342 if (priv->acpi_reserved)
1343 pm_runtime_put(&priv->pci_dev->dev);
1344 mutex_unlock(&priv->acpi_lock);
1345}
1346#else
1347static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; }
1348static inline void i801_acpi_remove(struct i801_priv *priv) { }
1349#endif
1350
1260static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) 1351static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
1261{ 1352{
1262 unsigned char temp; 1353 unsigned char temp;
@@ -1274,6 +1365,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
1274 priv->adapter.dev.parent = &dev->dev; 1365 priv->adapter.dev.parent = &dev->dev;
1275 ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev)); 1366 ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev));
1276 priv->adapter.retries = 3; 1367 priv->adapter.retries = 3;
1368 mutex_init(&priv->acpi_lock);
1277 1369
1278 priv->pci_dev = dev; 1370 priv->pci_dev = dev;
1279 switch (dev->device) { 1371 switch (dev->device) {
@@ -1336,10 +1428,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
1336 return -ENODEV; 1428 return -ENODEV;
1337 } 1429 }
1338 1430
1339 err = acpi_check_resource_conflict(&dev->resource[SMBBAR]); 1431 if (i801_acpi_probe(priv))
1340 if (err) {
1341 return -ENODEV; 1432 return -ENODEV;
1342 }
1343 1433
1344 err = pcim_iomap_regions(dev, 1 << SMBBAR, 1434 err = pcim_iomap_regions(dev, 1 << SMBBAR,
1345 dev_driver_string(&dev->dev)); 1435 dev_driver_string(&dev->dev));
@@ -1348,6 +1438,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
1348 "Failed to request SMBus region 0x%lx-0x%Lx\n", 1438 "Failed to request SMBus region 0x%lx-0x%Lx\n",
1349 priv->smba, 1439 priv->smba,
1350 (unsigned long long)pci_resource_end(dev, SMBBAR)); 1440 (unsigned long long)pci_resource_end(dev, SMBBAR));
1441 i801_acpi_remove(priv);
1351 return err; 1442 return err;
1352 } 1443 }
1353 1444
@@ -1412,6 +1503,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
1412 err = i2c_add_adapter(&priv->adapter); 1503 err = i2c_add_adapter(&priv->adapter);
1413 if (err) { 1504 if (err) {
1414 dev_err(&dev->dev, "Failed to add SMBus adapter\n"); 1505 dev_err(&dev->dev, "Failed to add SMBus adapter\n");
1506 i801_acpi_remove(priv);
1415 return err; 1507 return err;
1416 } 1508 }
1417 1509
@@ -1438,6 +1530,7 @@ static void i801_remove(struct pci_dev *dev)
1438 1530
1439 i801_del_mux(priv); 1531 i801_del_mux(priv);
1440 i2c_del_adapter(&priv->adapter); 1532 i2c_del_adapter(&priv->adapter);
1533 i801_acpi_remove(priv);
1441 pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg); 1534 pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
1442 1535
1443 platform_device_unregister(priv->tco_pdev); 1536 platform_device_unregister(priv->tco_pdev);
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index aa5f01efd826..30ae35146723 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -934,8 +934,15 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
934 return result; 934 return result;
935 935
936 for (i = 0; i < length; i++) { 936 for (i = 0; i < length; i++) {
937 /* for the last byte TWSI_CTL_AAK must not be set */ 937 /*
938 if (i + 1 == length) 938 * For the last byte to receive TWSI_CTL_AAK must not be set.
939 *
940 * A special case is I2C_M_RECV_LEN where we don't know the
941 * additional length yet. If recv_len is set we assume we're
942 * not reading the final byte and therefore need to set
943 * TWSI_CTL_AAK.
944 */
945 if ((i + 1 == length) && !(recv_len && i == 0))
939 final_read = true; 946 final_read = true;
940 947
941 /* clear iflg to allow next event */ 948 /* clear iflg to allow next event */
@@ -950,12 +957,8 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
950 957
951 data[i] = octeon_i2c_data_read(i2c); 958 data[i] = octeon_i2c_data_read(i2c);
952 if (recv_len && i == 0) { 959 if (recv_len && i == 0) {
953 if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) { 960 if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
954 dev_err(i2c->dev,
955 "%s: read len > I2C_SMBUS_BLOCK_MAX %d\n",
956 __func__, data[i]);
957 return -EPROTO; 961 return -EPROTO;
958 }
959 length += data[i]; 962 length += data[i];
960 } 963 }
961 964
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c
index 6773cadf7c9f..26e7c5187a58 100644
--- a/drivers/i2c/muxes/i2c-mux-reg.c
+++ b/drivers/i2c/muxes/i2c-mux-reg.c
@@ -260,6 +260,7 @@ static struct platform_driver i2c_mux_reg_driver = {
260 .remove = i2c_mux_reg_remove, 260 .remove = i2c_mux_reg_remove,
261 .driver = { 261 .driver = {
262 .name = "i2c-mux-reg", 262 .name = "i2c-mux-reg",
263 .of_match_table = of_match_ptr(i2c_mux_reg_of_match),
263 }, 264 },
264}; 265};
265 266
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index 923f56598d4b..3a9f106787d2 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -81,7 +81,7 @@ static int kxsd9_write_scale(struct iio_dev *indio_dev, int micro)
81 81
82 mutex_lock(&st->buf_lock); 82 mutex_lock(&st->buf_lock);
83 ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C)); 83 ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
84 if (ret) 84 if (ret < 0)
85 goto error_ret; 85 goto error_ret;
86 st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C); 86 st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C);
87 st->tx[1] = (ret & ~KXSD9_FS_MASK) | i; 87 st->tx[1] = (ret & ~KXSD9_FS_MASK) | i;
@@ -163,7 +163,7 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
163 break; 163 break;
164 case IIO_CHAN_INFO_SCALE: 164 case IIO_CHAN_INFO_SCALE:
165 ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C)); 165 ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
166 if (ret) 166 if (ret < 0)
167 goto error_ret; 167 goto error_ret;
168 *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK]; 168 *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
169 ret = IIO_VAL_INT_PLUS_MICRO; 169 ret = IIO_VAL_INT_PLUS_MICRO;
diff --git a/drivers/iio/accel/st_accel_buffer.c b/drivers/iio/accel/st_accel_buffer.c
index a1e642ee13d6..7fddc137e91e 100644
--- a/drivers/iio/accel/st_accel_buffer.c
+++ b/drivers/iio/accel/st_accel_buffer.c
@@ -91,7 +91,7 @@ static const struct iio_buffer_setup_ops st_accel_buffer_setup_ops = {
91 91
92int st_accel_allocate_ring(struct iio_dev *indio_dev) 92int st_accel_allocate_ring(struct iio_dev *indio_dev)
93{ 93{
94 return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, 94 return iio_triggered_buffer_setup(indio_dev, NULL,
95 &st_sensors_trigger_handler, &st_accel_buffer_setup_ops); 95 &st_sensors_trigger_handler, &st_accel_buffer_setup_ops);
96} 96}
97 97
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index dc73f2d85e6d..4d95bfc4786c 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -741,6 +741,7 @@ static const struct iio_info accel_info = {
741static const struct iio_trigger_ops st_accel_trigger_ops = { 741static const struct iio_trigger_ops st_accel_trigger_ops = {
742 .owner = THIS_MODULE, 742 .owner = THIS_MODULE,
743 .set_trigger_state = ST_ACCEL_TRIGGER_SET_STATE, 743 .set_trigger_state = ST_ACCEL_TRIGGER_SET_STATE,
744 .validate_device = st_sensors_validate_device,
744}; 745};
745#define ST_ACCEL_TRIGGER_OPS (&st_accel_trigger_ops) 746#define ST_ACCEL_TRIGGER_OPS (&st_accel_trigger_ops)
746#else 747#else
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index 21e19b60e2b9..2123f0ac2e2a 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -396,8 +396,8 @@ static int ad7266_probe(struct spi_device *spi)
396 396
397 st = iio_priv(indio_dev); 397 st = iio_priv(indio_dev);
398 398
399 st->reg = devm_regulator_get(&spi->dev, "vref"); 399 st->reg = devm_regulator_get_optional(&spi->dev, "vref");
400 if (!IS_ERR_OR_NULL(st->reg)) { 400 if (!IS_ERR(st->reg)) {
401 ret = regulator_enable(st->reg); 401 ret = regulator_enable(st->reg);
402 if (ret) 402 if (ret)
403 return ret; 403 return ret;
@@ -408,6 +408,9 @@ static int ad7266_probe(struct spi_device *spi)
408 408
409 st->vref_mv = ret / 1000; 409 st->vref_mv = ret / 1000;
410 } else { 410 } else {
411 /* Any other error indicates that the regulator does exist */
412 if (PTR_ERR(st->reg) != -ENODEV)
413 return PTR_ERR(st->reg);
411 /* Use internal reference */ 414 /* Use internal reference */
412 st->vref_mv = 2500; 415 st->vref_mv = 2500;
413 } 416 }
diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c
index c55898543a47..f1693dbebb8a 100644
--- a/drivers/iio/common/st_sensors/st_sensors_buffer.c
+++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c
@@ -57,31 +57,20 @@ irqreturn_t st_sensors_trigger_handler(int irq, void *p)
57 struct iio_poll_func *pf = p; 57 struct iio_poll_func *pf = p;
58 struct iio_dev *indio_dev = pf->indio_dev; 58 struct iio_dev *indio_dev = pf->indio_dev;
59 struct st_sensor_data *sdata = iio_priv(indio_dev); 59 struct st_sensor_data *sdata = iio_priv(indio_dev);
60 s64 timestamp;
60 61
61 /* If we have a status register, check if this IRQ came from us */ 62 /* If we do timetamping here, do it before reading the values */
62 if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) { 63 if (sdata->hw_irq_trigger)
63 u8 status; 64 timestamp = sdata->hw_timestamp;
64 65 else
65 len = sdata->tf->read_byte(&sdata->tb, sdata->dev, 66 timestamp = iio_get_time_ns();
66 sdata->sensor_settings->drdy_irq.addr_stat_drdy,
67 &status);
68 if (len < 0)
69 dev_err(sdata->dev, "could not read channel status\n");
70
71 /*
72 * If this was not caused by any channels on this sensor,
73 * return IRQ_NONE
74 */
75 if (!(status & (u8)indio_dev->active_scan_mask[0]))
76 return IRQ_NONE;
77 }
78 67
79 len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data); 68 len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data);
80 if (len < 0) 69 if (len < 0)
81 goto st_sensors_get_buffer_element_error; 70 goto st_sensors_get_buffer_element_error;
82 71
83 iio_push_to_buffers_with_timestamp(indio_dev, sdata->buffer_data, 72 iio_push_to_buffers_with_timestamp(indio_dev, sdata->buffer_data,
84 pf->timestamp); 73 timestamp);
85 74
86st_sensors_get_buffer_element_error: 75st_sensors_get_buffer_element_error:
87 iio_trigger_notify_done(indio_dev->trig); 76 iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index dffe00692169..9e59c90f6a8d 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -363,6 +363,11 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
363 if (err < 0) 363 if (err < 0)
364 return err; 364 return err;
365 365
366 /* Disable DRDY, this might be still be enabled after reboot. */
367 err = st_sensors_set_dataready_irq(indio_dev, false);
368 if (err < 0)
369 return err;
370
366 if (sdata->current_fullscale) { 371 if (sdata->current_fullscale) {
367 err = st_sensors_set_fullscale(indio_dev, 372 err = st_sensors_set_fullscale(indio_dev,
368 sdata->current_fullscale->num); 373 sdata->current_fullscale->num);
@@ -424,6 +429,9 @@ int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable)
424 else 429 else
425 drdy_mask = sdata->sensor_settings->drdy_irq.mask_int2; 430 drdy_mask = sdata->sensor_settings->drdy_irq.mask_int2;
426 431
432 /* Flag to the poll function that the hardware trigger is in use */
433 sdata->hw_irq_trigger = enable;
434
427 /* Enable/Disable the interrupt generator for data ready. */ 435 /* Enable/Disable the interrupt generator for data ready. */
428 err = st_sensors_write_data_with_mask(indio_dev, 436 err = st_sensors_write_data_with_mask(indio_dev,
429 sdata->sensor_settings->drdy_irq.addr, 437 sdata->sensor_settings->drdy_irq.addr,
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index da72279fcf99..296e4ff19ae8 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -17,6 +17,73 @@
17#include <linux/iio/common/st_sensors.h> 17#include <linux/iio/common/st_sensors.h>
18#include "st_sensors_core.h" 18#include "st_sensors_core.h"
19 19
20/**
21 * st_sensors_irq_handler() - top half of the IRQ-based triggers
22 * @irq: irq number
23 * @p: private handler data
24 */
25irqreturn_t st_sensors_irq_handler(int irq, void *p)
26{
27 struct iio_trigger *trig = p;
28 struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
29 struct st_sensor_data *sdata = iio_priv(indio_dev);
30
31 /* Get the time stamp as close in time as possible */
32 sdata->hw_timestamp = iio_get_time_ns();
33 return IRQ_WAKE_THREAD;
34}
35
36/**
37 * st_sensors_irq_thread() - bottom half of the IRQ-based triggers
38 * @irq: irq number
39 * @p: private handler data
40 */
41irqreturn_t st_sensors_irq_thread(int irq, void *p)
42{
43 struct iio_trigger *trig = p;
44 struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
45 struct st_sensor_data *sdata = iio_priv(indio_dev);
46 int ret;
47
48 /*
49 * If this trigger is backed by a hardware interrupt and we have a
50 * status register, check if this IRQ came from us
51 */
52 if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) {
53 u8 status;
54
55 ret = sdata->tf->read_byte(&sdata->tb, sdata->dev,
56 sdata->sensor_settings->drdy_irq.addr_stat_drdy,
57 &status);
58 if (ret < 0) {
59 dev_err(sdata->dev, "could not read channel status\n");
60 goto out_poll;
61 }
62 /*
63 * the lower bits of .active_scan_mask[0] is directly mapped
64 * to the channels on the sensor: either bit 0 for
65 * one-dimensional sensors, or e.g. x,y,z for accelerometers,
66 * gyroscopes or magnetometers. No sensor use more than 3
67 * channels, so cut the other status bits here.
68 */
69 status &= 0x07;
70
71 /*
72 * If this was not caused by any channels on this sensor,
73 * return IRQ_NONE
74 */
75 if (!indio_dev->active_scan_mask)
76 return IRQ_NONE;
77 if (!(status & (u8)indio_dev->active_scan_mask[0]))
78 return IRQ_NONE;
79 }
80
81out_poll:
82 /* It's our IRQ: proceed to handle the register polling */
83 iio_trigger_poll_chained(p);
84 return IRQ_HANDLED;
85}
86
20int st_sensors_allocate_trigger(struct iio_dev *indio_dev, 87int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
21 const struct iio_trigger_ops *trigger_ops) 88 const struct iio_trigger_ops *trigger_ops)
22{ 89{
@@ -30,6 +97,10 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
30 return -ENOMEM; 97 return -ENOMEM;
31 } 98 }
32 99
100 iio_trigger_set_drvdata(sdata->trig, indio_dev);
101 sdata->trig->ops = trigger_ops;
102 sdata->trig->dev.parent = sdata->dev;
103
33 irq = sdata->get_irq_data_ready(indio_dev); 104 irq = sdata->get_irq_data_ready(indio_dev);
34 irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq)); 105 irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
35 /* 106 /*
@@ -77,9 +148,12 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
77 sdata->sensor_settings->drdy_irq.addr_stat_drdy) 148 sdata->sensor_settings->drdy_irq.addr_stat_drdy)
78 irq_trig |= IRQF_SHARED; 149 irq_trig |= IRQF_SHARED;
79 150
80 err = request_threaded_irq(irq, 151 /* Let's create an interrupt thread masking the hard IRQ here */
81 iio_trigger_generic_data_rdy_poll, 152 irq_trig |= IRQF_ONESHOT;
82 NULL, 153
154 err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev),
155 st_sensors_irq_handler,
156 st_sensors_irq_thread,
83 irq_trig, 157 irq_trig,
84 sdata->trig->name, 158 sdata->trig->name,
85 sdata->trig); 159 sdata->trig);
@@ -88,10 +162,6 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
88 goto iio_trigger_free; 162 goto iio_trigger_free;
89 } 163 }
90 164
91 iio_trigger_set_drvdata(sdata->trig, indio_dev);
92 sdata->trig->ops = trigger_ops;
93 sdata->trig->dev.parent = sdata->dev;
94
95 err = iio_trigger_register(sdata->trig); 165 err = iio_trigger_register(sdata->trig);
96 if (err < 0) { 166 if (err < 0) {
97 dev_err(&indio_dev->dev, "failed to register iio trigger.\n"); 167 dev_err(&indio_dev->dev, "failed to register iio trigger.\n");
@@ -119,6 +189,18 @@ void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
119} 189}
120EXPORT_SYMBOL(st_sensors_deallocate_trigger); 190EXPORT_SYMBOL(st_sensors_deallocate_trigger);
121 191
192int st_sensors_validate_device(struct iio_trigger *trig,
193 struct iio_dev *indio_dev)
194{
195 struct iio_dev *indio = iio_trigger_get_drvdata(trig);
196
197 if (indio != indio_dev)
198 return -EINVAL;
199
200 return 0;
201}
202EXPORT_SYMBOL(st_sensors_validate_device);
203
122MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>"); 204MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
123MODULE_DESCRIPTION("STMicroelectronics ST-sensors trigger"); 205MODULE_DESCRIPTION("STMicroelectronics ST-sensors trigger");
124MODULE_LICENSE("GPL v2"); 206MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index e63b957c985f..f7c71da42f15 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -247,7 +247,7 @@ config MCP4922
247 247
248config STX104 248config STX104
249 tristate "Apex Embedded Systems STX104 DAC driver" 249 tristate "Apex Embedded Systems STX104 DAC driver"
250 depends on X86 && ISA 250 depends on X86 && ISA_BUS_API
251 help 251 help
252 Say yes here to build support for the 2-channel DAC on the Apex 252 Say yes here to build support for the 2-channel DAC on the Apex
253 Embedded Systems STX104 integrated analog PC/104 card. The base port 253 Embedded Systems STX104 integrated analog PC/104 card. The base port
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index 948f600e7059..69bde5909854 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -525,7 +525,7 @@ static int ad5592r_alloc_channels(struct ad5592r_state *st)
525 525
526 device_for_each_child_node(st->dev, child) { 526 device_for_each_child_node(st->dev, child) {
527 ret = fwnode_property_read_u32(child, "reg", &reg); 527 ret = fwnode_property_read_u32(child, "reg", &reg);
528 if (ret || reg > ARRAY_SIZE(st->channel_modes)) 528 if (ret || reg >= ARRAY_SIZE(st->channel_modes))
529 continue; 529 continue;
530 530
531 ret = fwnode_property_read_u32(child, "adi,mode", &tmp); 531 ret = fwnode_property_read_u32(child, "adi,mode", &tmp);
diff --git a/drivers/iio/gyro/st_gyro_buffer.c b/drivers/iio/gyro/st_gyro_buffer.c
index d67b17b6a7aa..a5377044e42f 100644
--- a/drivers/iio/gyro/st_gyro_buffer.c
+++ b/drivers/iio/gyro/st_gyro_buffer.c
@@ -91,7 +91,7 @@ static const struct iio_buffer_setup_ops st_gyro_buffer_setup_ops = {
91 91
92int st_gyro_allocate_ring(struct iio_dev *indio_dev) 92int st_gyro_allocate_ring(struct iio_dev *indio_dev)
93{ 93{
94 return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, 94 return iio_triggered_buffer_setup(indio_dev, NULL,
95 &st_sensors_trigger_handler, &st_gyro_buffer_setup_ops); 95 &st_sensors_trigger_handler, &st_gyro_buffer_setup_ops);
96} 96}
97 97
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index 52a3c87c375c..a8012955a1f6 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -409,6 +409,7 @@ static const struct iio_info gyro_info = {
409static const struct iio_trigger_ops st_gyro_trigger_ops = { 409static const struct iio_trigger_ops st_gyro_trigger_ops = {
410 .owner = THIS_MODULE, 410 .owner = THIS_MODULE,
411 .set_trigger_state = ST_GYRO_TRIGGER_SET_STATE, 411 .set_trigger_state = ST_GYRO_TRIGGER_SET_STATE,
412 .validate_device = st_sensors_validate_device,
412}; 413};
413#define ST_GYRO_TRIGGER_OPS (&st_gyro_trigger_ops) 414#define ST_GYRO_TRIGGER_OPS (&st_gyro_trigger_ops)
414#else 415#else
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
index 3be6d209a159..11535911a5c6 100644
--- a/drivers/iio/humidity/am2315.c
+++ b/drivers/iio/humidity/am2315.c
@@ -165,10 +165,8 @@ static irqreturn_t am2315_trigger_handler(int irq, void *p)
165 struct am2315_sensor_data sensor_data; 165 struct am2315_sensor_data sensor_data;
166 166
167 ret = am2315_read_data(data, &sensor_data); 167 ret = am2315_read_data(data, &sensor_data);
168 if (ret < 0) { 168 if (ret < 0)
169 mutex_unlock(&data->lock);
170 goto err; 169 goto err;
171 }
172 170
173 mutex_lock(&data->lock); 171 mutex_lock(&data->lock);
174 if (*(indio_dev->active_scan_mask) == AM2315_ALL_CHANNEL_MASK) { 172 if (*(indio_dev->active_scan_mask) == AM2315_ALL_CHANNEL_MASK) {
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index fa4767613173..a03832a5fc95 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -55,7 +55,7 @@ static const struct {
55 }, 55 },
56 { /* IIO_HUMIDITYRELATIVE channel */ 56 { /* IIO_HUMIDITYRELATIVE channel */
57 .shift = 8, 57 .shift = 8,
58 .mask = 2, 58 .mask = 3,
59 }, 59 },
60}; 60};
61 61
@@ -164,14 +164,14 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
164 dev_err(&client->dev, "cannot read high byte measurement"); 164 dev_err(&client->dev, "cannot read high byte measurement");
165 return ret; 165 return ret;
166 } 166 }
167 val = ret << 6; 167 val = ret << 8;
168 168
169 ret = i2c_smbus_read_byte(client); 169 ret = i2c_smbus_read_byte(client);
170 if (ret < 0) { 170 if (ret < 0) {
171 dev_err(&client->dev, "cannot read low byte measurement"); 171 dev_err(&client->dev, "cannot read low byte measurement");
172 return ret; 172 return ret;
173 } 173 }
174 val |= ret >> 2; 174 val |= ret;
175 175
176 return val; 176 return val;
177} 177}
@@ -211,18 +211,18 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev,
211 return IIO_VAL_INT_PLUS_MICRO; 211 return IIO_VAL_INT_PLUS_MICRO;
212 case IIO_CHAN_INFO_SCALE: 212 case IIO_CHAN_INFO_SCALE:
213 if (chan->type == IIO_TEMP) { 213 if (chan->type == IIO_TEMP) {
214 *val = 165; 214 *val = 165000;
215 *val2 = 65536 >> 2; 215 *val2 = 65536;
216 return IIO_VAL_FRACTIONAL; 216 return IIO_VAL_FRACTIONAL;
217 } else { 217 } else {
218 *val = 0; 218 *val = 100;
219 *val2 = 10000; 219 *val2 = 65536;
220 return IIO_VAL_INT_PLUS_MICRO; 220 return IIO_VAL_FRACTIONAL;
221 } 221 }
222 break; 222 break;
223 case IIO_CHAN_INFO_OFFSET: 223 case IIO_CHAN_INFO_OFFSET:
224 *val = -3971; 224 *val = -15887;
225 *val2 = 879096; 225 *val2 = 515151;
226 return IIO_VAL_INT_PLUS_MICRO; 226 return IIO_VAL_INT_PLUS_MICRO;
227 default: 227 default:
228 return -EINVAL; 228 return -EINVAL;
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index 0bf92b06d7d8..b8a290ec984e 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -209,11 +209,11 @@ static const struct bmi160_scale_item bmi160_scale_table[] = {
209}; 209};
210 210
211static const struct bmi160_odr bmi160_accel_odr[] = { 211static const struct bmi160_odr bmi160_accel_odr[] = {
212 {0x01, 0, 78125}, 212 {0x01, 0, 781250},
213 {0x02, 1, 5625}, 213 {0x02, 1, 562500},
214 {0x03, 3, 125}, 214 {0x03, 3, 125000},
215 {0x04, 6, 25}, 215 {0x04, 6, 250000},
216 {0x05, 12, 5}, 216 {0x05, 12, 500000},
217 {0x06, 25, 0}, 217 {0x06, 25, 0},
218 {0x07, 50, 0}, 218 {0x07, 50, 0},
219 {0x08, 100, 0}, 219 {0x08, 100, 0},
@@ -229,7 +229,7 @@ static const struct bmi160_odr bmi160_gyro_odr[] = {
229 {0x08, 100, 0}, 229 {0x08, 100, 0},
230 {0x09, 200, 0}, 230 {0x09, 200, 0},
231 {0x0A, 400, 0}, 231 {0x0A, 400, 0},
232 {0x0B, 8000, 0}, 232 {0x0B, 800, 0},
233 {0x0C, 1600, 0}, 233 {0x0C, 1600, 0},
234 {0x0D, 3200, 0}, 234 {0x0D, 3200, 0},
235}; 235};
@@ -364,8 +364,8 @@ int bmi160_set_odr(struct bmi160_data *data, enum bmi160_sensor_type t,
364 364
365 return regmap_update_bits(data->regmap, 365 return regmap_update_bits(data->regmap,
366 bmi160_regs[t].config, 366 bmi160_regs[t].config,
367 bmi160_odr_table[t].tbl[i].bits, 367 bmi160_regs[t].config_odr_mask,
368 bmi160_regs[t].config_odr_mask); 368 bmi160_odr_table[t].tbl[i].bits);
369} 369}
370 370
371static int bmi160_get_odr(struct bmi160_data *data, enum bmi160_sensor_type t, 371static int bmi160_get_odr(struct bmi160_data *data, enum bmi160_sensor_type t,
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
index f62b8bd9ad7e..dd6fc6d21f9d 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
@@ -56,6 +56,7 @@ static int asus_acpi_get_sensor_info(struct acpi_device *adev,
56 int i; 56 int i;
57 acpi_status status; 57 acpi_status status;
58 union acpi_object *cpm; 58 union acpi_object *cpm;
59 int ret;
59 60
60 status = acpi_evaluate_object(adev->handle, "CNF0", NULL, &buffer); 61 status = acpi_evaluate_object(adev->handle, "CNF0", NULL, &buffer);
61 if (ACPI_FAILURE(status)) 62 if (ACPI_FAILURE(status))
@@ -82,10 +83,10 @@ static int asus_acpi_get_sensor_info(struct acpi_device *adev,
82 } 83 }
83 } 84 }
84 } 85 }
85 86 ret = cpm->package.count;
86 kfree(buffer.pointer); 87 kfree(buffer.pointer);
87 88
88 return cpm->package.count; 89 return ret;
89} 90}
90 91
91static int acpi_i2c_check_resource(struct acpi_resource *ares, void *data) 92static int acpi_i2c_check_resource(struct acpi_resource *ares, void *data)
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index ae2806aafb72..0c52dfe64977 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -210,22 +210,35 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
210 210
211 /* Prevent the module from being removed whilst attached to a trigger */ 211 /* Prevent the module from being removed whilst attached to a trigger */
212 __module_get(pf->indio_dev->info->driver_module); 212 __module_get(pf->indio_dev->info->driver_module);
213
214 /* Get irq number */
213 pf->irq = iio_trigger_get_irq(trig); 215 pf->irq = iio_trigger_get_irq(trig);
216 if (pf->irq < 0)
217 goto out_put_module;
218
219 /* Request irq */
214 ret = request_threaded_irq(pf->irq, pf->h, pf->thread, 220 ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
215 pf->type, pf->name, 221 pf->type, pf->name,
216 pf); 222 pf);
217 if (ret < 0) { 223 if (ret < 0)
218 module_put(pf->indio_dev->info->driver_module); 224 goto out_put_irq;
219 return ret;
220 }
221 225
226 /* Enable trigger in driver */
222 if (trig->ops && trig->ops->set_trigger_state && notinuse) { 227 if (trig->ops && trig->ops->set_trigger_state && notinuse) {
223 ret = trig->ops->set_trigger_state(trig, true); 228 ret = trig->ops->set_trigger_state(trig, true);
224 if (ret < 0) 229 if (ret < 0)
225 module_put(pf->indio_dev->info->driver_module); 230 goto out_free_irq;
226 } 231 }
227 232
228 return ret; 233 return ret;
234
235out_free_irq:
236 free_irq(pf->irq, pf);
237out_put_irq:
238 iio_trigger_put_irq(trig, pf->irq);
239out_put_module:
240 module_put(pf->indio_dev->info->driver_module);
241 return ret;
229} 242}
230 243
231static int iio_trigger_detach_poll_func(struct iio_trigger *trig, 244static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index b4dbb3912977..651d57b8abbf 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -1011,6 +1011,7 @@ static int apds9960_probe(struct i2c_client *client,
1011 1011
1012 iio_device_attach_buffer(indio_dev, buffer); 1012 iio_device_attach_buffer(indio_dev, buffer);
1013 1013
1014 indio_dev->dev.parent = &client->dev;
1014 indio_dev->info = &apds9960_info; 1015 indio_dev->info = &apds9960_info;
1015 indio_dev->name = APDS9960_DRV_NAME; 1016 indio_dev->name = APDS9960_DRV_NAME;
1016 indio_dev->channels = apds9960_channels; 1017 indio_dev->channels = apds9960_channels;
diff --git a/drivers/iio/light/bh1780.c b/drivers/iio/light/bh1780.c
index 72b364e4aa72..b54dcba05a82 100644
--- a/drivers/iio/light/bh1780.c
+++ b/drivers/iio/light/bh1780.c
@@ -84,7 +84,7 @@ static int bh1780_debugfs_reg_access(struct iio_dev *indio_dev,
84 int ret; 84 int ret;
85 85
86 if (!readval) 86 if (!readval)
87 bh1780_write(bh1780, (u8)reg, (u8)writeval); 87 return bh1780_write(bh1780, (u8)reg, (u8)writeval);
88 88
89 ret = bh1780_read(bh1780, (u8)reg); 89 ret = bh1780_read(bh1780, (u8)reg);
90 if (ret < 0) 90 if (ret < 0)
@@ -187,7 +187,7 @@ static int bh1780_probe(struct i2c_client *client,
187 187
188 indio_dev->dev.parent = &client->dev; 188 indio_dev->dev.parent = &client->dev;
189 indio_dev->info = &bh1780_info; 189 indio_dev->info = &bh1780_info;
190 indio_dev->name = id->name; 190 indio_dev->name = "bh1780";
191 indio_dev->channels = bh1780_channels; 191 indio_dev->channels = bh1780_channels;
192 indio_dev->num_channels = ARRAY_SIZE(bh1780_channels); 192 indio_dev->num_channels = ARRAY_SIZE(bh1780_channels);
193 indio_dev->modes = INDIO_DIRECT_MODE; 193 indio_dev->modes = INDIO_DIRECT_MODE;
@@ -226,7 +226,8 @@ static int bh1780_remove(struct i2c_client *client)
226static int bh1780_runtime_suspend(struct device *dev) 226static int bh1780_runtime_suspend(struct device *dev)
227{ 227{
228 struct i2c_client *client = to_i2c_client(dev); 228 struct i2c_client *client = to_i2c_client(dev);
229 struct bh1780_data *bh1780 = i2c_get_clientdata(client); 229 struct iio_dev *indio_dev = i2c_get_clientdata(client);
230 struct bh1780_data *bh1780 = iio_priv(indio_dev);
230 int ret; 231 int ret;
231 232
232 ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_POFF); 233 ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_POFF);
@@ -241,7 +242,8 @@ static int bh1780_runtime_suspend(struct device *dev)
241static int bh1780_runtime_resume(struct device *dev) 242static int bh1780_runtime_resume(struct device *dev)
242{ 243{
243 struct i2c_client *client = to_i2c_client(dev); 244 struct i2c_client *client = to_i2c_client(dev);
244 struct bh1780_data *bh1780 = i2c_get_clientdata(client); 245 struct iio_dev *indio_dev = i2c_get_clientdata(client);
246 struct bh1780_data *bh1780 = iio_priv(indio_dev);
245 int ret; 247 int ret;
246 248
247 ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_PON); 249 ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_PON);
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index e01e58a9bd14..f17cb2ea18f5 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -147,7 +147,6 @@ static const struct iio_chan_spec max44000_channels[] = {
147 { 147 {
148 .type = IIO_PROXIMITY, 148 .type = IIO_PROXIMITY,
149 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), 149 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
150 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
151 .scan_index = MAX44000_SCAN_INDEX_PRX, 150 .scan_index = MAX44000_SCAN_INDEX_PRX,
152 .scan_type = { 151 .scan_type = {
153 .sign = 'u', 152 .sign = 'u',
diff --git a/drivers/iio/magnetometer/st_magn_buffer.c b/drivers/iio/magnetometer/st_magn_buffer.c
index ecd3bd0a9769..0a9e8fadfa9d 100644
--- a/drivers/iio/magnetometer/st_magn_buffer.c
+++ b/drivers/iio/magnetometer/st_magn_buffer.c
@@ -82,7 +82,7 @@ static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = {
82 82
83int st_magn_allocate_ring(struct iio_dev *indio_dev) 83int st_magn_allocate_ring(struct iio_dev *indio_dev)
84{ 84{
85 return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, 85 return iio_triggered_buffer_setup(indio_dev, NULL,
86 &st_sensors_trigger_handler, &st_magn_buffer_setup_ops); 86 &st_sensors_trigger_handler, &st_magn_buffer_setup_ops);
87} 87}
88 88
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 62036d2a9956..8250fc322c56 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -572,6 +572,7 @@ static const struct iio_info magn_info = {
572static const struct iio_trigger_ops st_magn_trigger_ops = { 572static const struct iio_trigger_ops st_magn_trigger_ops = {
573 .owner = THIS_MODULE, 573 .owner = THIS_MODULE,
574 .set_trigger_state = ST_MAGN_TRIGGER_SET_STATE, 574 .set_trigger_state = ST_MAGN_TRIGGER_SET_STATE,
575 .validate_device = st_sensors_validate_device,
575}; 576};
576#define ST_MAGN_TRIGGER_OPS (&st_magn_trigger_ops) 577#define ST_MAGN_TRIGGER_OPS (&st_magn_trigger_ops)
577#else 578#else
diff --git a/drivers/iio/pressure/bmp280.c b/drivers/iio/pressure/bmp280.c
index 2f1498e12bb2..724452d61846 100644
--- a/drivers/iio/pressure/bmp280.c
+++ b/drivers/iio/pressure/bmp280.c
@@ -879,8 +879,8 @@ static int bmp280_probe(struct i2c_client *client,
879 if (ret < 0) 879 if (ret < 0)
880 return ret; 880 return ret;
881 if (chip_id != id->driver_data) { 881 if (chip_id != id->driver_data) {
882 dev_err(&client->dev, "bad chip id. expected %x got %x\n", 882 dev_err(&client->dev, "bad chip id. expected %lx got %x\n",
883 BMP280_CHIP_ID, chip_id); 883 id->driver_data, chip_id);
884 return -EINVAL; 884 return -EINVAL;
885 } 885 }
886 886
diff --git a/drivers/iio/pressure/st_pressure_buffer.c b/drivers/iio/pressure/st_pressure_buffer.c
index 2ff53f222352..99468d0a64e7 100644
--- a/drivers/iio/pressure/st_pressure_buffer.c
+++ b/drivers/iio/pressure/st_pressure_buffer.c
@@ -82,7 +82,7 @@ static const struct iio_buffer_setup_ops st_press_buffer_setup_ops = {
82 82
83int st_press_allocate_ring(struct iio_dev *indio_dev) 83int st_press_allocate_ring(struct iio_dev *indio_dev)
84{ 84{
85 return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, 85 return iio_triggered_buffer_setup(indio_dev, NULL,
86 &st_sensors_trigger_handler, &st_press_buffer_setup_ops); 86 &st_sensors_trigger_handler, &st_press_buffer_setup_ops);
87} 87}
88 88
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 9e9b72a8f18f..92a118c3c4ac 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -28,15 +28,21 @@
28#include <linux/iio/common/st_sensors.h> 28#include <linux/iio/common/st_sensors.h>
29#include "st_pressure.h" 29#include "st_pressure.h"
30 30
31#define MCELSIUS_PER_CELSIUS 1000
32
33/* Default pressure sensitivity */
31#define ST_PRESS_LSB_PER_MBAR 4096UL 34#define ST_PRESS_LSB_PER_MBAR 4096UL
32#define ST_PRESS_KPASCAL_NANO_SCALE (100000000UL / \ 35#define ST_PRESS_KPASCAL_NANO_SCALE (100000000UL / \
33 ST_PRESS_LSB_PER_MBAR) 36 ST_PRESS_LSB_PER_MBAR)
37
38/* Default temperature sensitivity */
34#define ST_PRESS_LSB_PER_CELSIUS 480UL 39#define ST_PRESS_LSB_PER_CELSIUS 480UL
35#define ST_PRESS_CELSIUS_NANO_SCALE (1000000000UL / \ 40#define ST_PRESS_MILLI_CELSIUS_OFFSET 42500UL
36 ST_PRESS_LSB_PER_CELSIUS) 41
37#define ST_PRESS_NUMBER_DATA_CHANNELS 1 42#define ST_PRESS_NUMBER_DATA_CHANNELS 1
38 43
39/* FULLSCALE */ 44/* FULLSCALE */
45#define ST_PRESS_FS_AVL_1100MB 1100
40#define ST_PRESS_FS_AVL_1260MB 1260 46#define ST_PRESS_FS_AVL_1260MB 1260
41 47
42#define ST_PRESS_1_OUT_XL_ADDR 0x28 48#define ST_PRESS_1_OUT_XL_ADDR 0x28
@@ -54,9 +60,6 @@
54#define ST_PRESS_LPS331AP_PW_MASK 0x80 60#define ST_PRESS_LPS331AP_PW_MASK 0x80
55#define ST_PRESS_LPS331AP_FS_ADDR 0x23 61#define ST_PRESS_LPS331AP_FS_ADDR 0x23
56#define ST_PRESS_LPS331AP_FS_MASK 0x30 62#define ST_PRESS_LPS331AP_FS_MASK 0x30
57#define ST_PRESS_LPS331AP_FS_AVL_1260_VAL 0x00
58#define ST_PRESS_LPS331AP_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE
59#define ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE
60#define ST_PRESS_LPS331AP_BDU_ADDR 0x20 63#define ST_PRESS_LPS331AP_BDU_ADDR 0x20
61#define ST_PRESS_LPS331AP_BDU_MASK 0x04 64#define ST_PRESS_LPS331AP_BDU_MASK 0x04
62#define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22 65#define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22
@@ -67,9 +70,14 @@
67#define ST_PRESS_LPS331AP_OD_IRQ_ADDR 0x22 70#define ST_PRESS_LPS331AP_OD_IRQ_ADDR 0x22
68#define ST_PRESS_LPS331AP_OD_IRQ_MASK 0x40 71#define ST_PRESS_LPS331AP_OD_IRQ_MASK 0x40
69#define ST_PRESS_LPS331AP_MULTIREAD_BIT true 72#define ST_PRESS_LPS331AP_MULTIREAD_BIT true
70#define ST_PRESS_LPS331AP_TEMP_OFFSET 42500
71 73
72/* CUSTOM VALUES FOR LPS001WP SENSOR */ 74/* CUSTOM VALUES FOR LPS001WP SENSOR */
75
76/* LPS001WP pressure resolution */
77#define ST_PRESS_LPS001WP_LSB_PER_MBAR 16UL
78/* LPS001WP temperature resolution */
79#define ST_PRESS_LPS001WP_LSB_PER_CELSIUS 64UL
80
73#define ST_PRESS_LPS001WP_WAI_EXP 0xba 81#define ST_PRESS_LPS001WP_WAI_EXP 0xba
74#define ST_PRESS_LPS001WP_ODR_ADDR 0x20 82#define ST_PRESS_LPS001WP_ODR_ADDR 0x20
75#define ST_PRESS_LPS001WP_ODR_MASK 0x30 83#define ST_PRESS_LPS001WP_ODR_MASK 0x30
@@ -78,6 +86,8 @@
78#define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL 0x03 86#define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL 0x03
79#define ST_PRESS_LPS001WP_PW_ADDR 0x20 87#define ST_PRESS_LPS001WP_PW_ADDR 0x20
80#define ST_PRESS_LPS001WP_PW_MASK 0x40 88#define ST_PRESS_LPS001WP_PW_MASK 0x40
89#define ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN \
90 (100000000UL / ST_PRESS_LPS001WP_LSB_PER_MBAR)
81#define ST_PRESS_LPS001WP_BDU_ADDR 0x20 91#define ST_PRESS_LPS001WP_BDU_ADDR 0x20
82#define ST_PRESS_LPS001WP_BDU_MASK 0x04 92#define ST_PRESS_LPS001WP_BDU_MASK 0x04
83#define ST_PRESS_LPS001WP_MULTIREAD_BIT true 93#define ST_PRESS_LPS001WP_MULTIREAD_BIT true
@@ -94,11 +104,6 @@
94#define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL 0x04 104#define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL 0x04
95#define ST_PRESS_LPS25H_PW_ADDR 0x20 105#define ST_PRESS_LPS25H_PW_ADDR 0x20
96#define ST_PRESS_LPS25H_PW_MASK 0x80 106#define ST_PRESS_LPS25H_PW_MASK 0x80
97#define ST_PRESS_LPS25H_FS_ADDR 0x00
98#define ST_PRESS_LPS25H_FS_MASK 0x00
99#define ST_PRESS_LPS25H_FS_AVL_1260_VAL 0x00
100#define ST_PRESS_LPS25H_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE
101#define ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE
102#define ST_PRESS_LPS25H_BDU_ADDR 0x20 107#define ST_PRESS_LPS25H_BDU_ADDR 0x20
103#define ST_PRESS_LPS25H_BDU_MASK 0x04 108#define ST_PRESS_LPS25H_BDU_MASK 0x04
104#define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23 109#define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23
@@ -109,7 +114,6 @@
109#define ST_PRESS_LPS25H_OD_IRQ_ADDR 0x22 114#define ST_PRESS_LPS25H_OD_IRQ_ADDR 0x22
110#define ST_PRESS_LPS25H_OD_IRQ_MASK 0x40 115#define ST_PRESS_LPS25H_OD_IRQ_MASK 0x40
111#define ST_PRESS_LPS25H_MULTIREAD_BIT true 116#define ST_PRESS_LPS25H_MULTIREAD_BIT true
112#define ST_PRESS_LPS25H_TEMP_OFFSET 42500
113#define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28 117#define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28
114#define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b 118#define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b
115 119
@@ -161,7 +165,9 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
161 .storagebits = 16, 165 .storagebits = 16,
162 .endianness = IIO_LE, 166 .endianness = IIO_LE,
163 }, 167 },
164 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), 168 .info_mask_separate =
169 BIT(IIO_CHAN_INFO_RAW) |
170 BIT(IIO_CHAN_INFO_SCALE),
165 .modified = 0, 171 .modified = 0,
166 }, 172 },
167 { 173 {
@@ -177,7 +183,7 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
177 }, 183 },
178 .info_mask_separate = 184 .info_mask_separate =
179 BIT(IIO_CHAN_INFO_RAW) | 185 BIT(IIO_CHAN_INFO_RAW) |
180 BIT(IIO_CHAN_INFO_OFFSET), 186 BIT(IIO_CHAN_INFO_SCALE),
181 .modified = 0, 187 .modified = 0,
182 }, 188 },
183 IIO_CHAN_SOFT_TIMESTAMP(1) 189 IIO_CHAN_SOFT_TIMESTAMP(1)
@@ -212,11 +218,14 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
212 .addr = ST_PRESS_LPS331AP_FS_ADDR, 218 .addr = ST_PRESS_LPS331AP_FS_ADDR,
213 .mask = ST_PRESS_LPS331AP_FS_MASK, 219 .mask = ST_PRESS_LPS331AP_FS_MASK,
214 .fs_avl = { 220 .fs_avl = {
221 /*
222 * Pressure and temperature sensitivity values
223 * as defined in table 3 of LPS331AP datasheet.
224 */
215 [0] = { 225 [0] = {
216 .num = ST_PRESS_FS_AVL_1260MB, 226 .num = ST_PRESS_FS_AVL_1260MB,
217 .value = ST_PRESS_LPS331AP_FS_AVL_1260_VAL, 227 .gain = ST_PRESS_KPASCAL_NANO_SCALE,
218 .gain = ST_PRESS_LPS331AP_FS_AVL_1260_GAIN, 228 .gain2 = ST_PRESS_LSB_PER_CELSIUS,
219 .gain2 = ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN,
220 }, 229 },
221 }, 230 },
222 }, 231 },
@@ -261,7 +270,17 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
261 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, 270 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
262 }, 271 },
263 .fs = { 272 .fs = {
264 .addr = 0, 273 .fs_avl = {
274 /*
275 * Pressure and temperature resolution values
276 * as defined in table 3 of LPS001WP datasheet.
277 */
278 [0] = {
279 .num = ST_PRESS_FS_AVL_1100MB,
280 .gain = ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN,
281 .gain2 = ST_PRESS_LPS001WP_LSB_PER_CELSIUS,
282 },
283 },
265 }, 284 },
266 .bdu = { 285 .bdu = {
267 .addr = ST_PRESS_LPS001WP_BDU_ADDR, 286 .addr = ST_PRESS_LPS001WP_BDU_ADDR,
@@ -298,14 +317,15 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
298 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, 317 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
299 }, 318 },
300 .fs = { 319 .fs = {
301 .addr = ST_PRESS_LPS25H_FS_ADDR,
302 .mask = ST_PRESS_LPS25H_FS_MASK,
303 .fs_avl = { 320 .fs_avl = {
321 /*
322 * Pressure and temperature sensitivity values
323 * as defined in table 3 of LPS25H datasheet.
324 */
304 [0] = { 325 [0] = {
305 .num = ST_PRESS_FS_AVL_1260MB, 326 .num = ST_PRESS_FS_AVL_1260MB,
306 .value = ST_PRESS_LPS25H_FS_AVL_1260_VAL, 327 .gain = ST_PRESS_KPASCAL_NANO_SCALE,
307 .gain = ST_PRESS_LPS25H_FS_AVL_1260_GAIN, 328 .gain2 = ST_PRESS_LSB_PER_CELSIUS,
308 .gain2 = ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN,
309 }, 329 },
310 }, 330 },
311 }, 331 },
@@ -364,26 +384,26 @@ static int st_press_read_raw(struct iio_dev *indio_dev,
364 384
365 return IIO_VAL_INT; 385 return IIO_VAL_INT;
366 case IIO_CHAN_INFO_SCALE: 386 case IIO_CHAN_INFO_SCALE:
367 *val = 0;
368
369 switch (ch->type) { 387 switch (ch->type) {
370 case IIO_PRESSURE: 388 case IIO_PRESSURE:
389 *val = 0;
371 *val2 = press_data->current_fullscale->gain; 390 *val2 = press_data->current_fullscale->gain;
372 break; 391 return IIO_VAL_INT_PLUS_NANO;
373 case IIO_TEMP: 392 case IIO_TEMP:
393 *val = MCELSIUS_PER_CELSIUS;
374 *val2 = press_data->current_fullscale->gain2; 394 *val2 = press_data->current_fullscale->gain2;
375 break; 395 return IIO_VAL_FRACTIONAL;
376 default: 396 default:
377 err = -EINVAL; 397 err = -EINVAL;
378 goto read_error; 398 goto read_error;
379 } 399 }
380 400
381 return IIO_VAL_INT_PLUS_NANO;
382 case IIO_CHAN_INFO_OFFSET: 401 case IIO_CHAN_INFO_OFFSET:
383 switch (ch->type) { 402 switch (ch->type) {
384 case IIO_TEMP: 403 case IIO_TEMP:
385 *val = 425; 404 *val = ST_PRESS_MILLI_CELSIUS_OFFSET *
386 *val2 = 10; 405 press_data->current_fullscale->gain2;
406 *val2 = MCELSIUS_PER_CELSIUS;
387 break; 407 break;
388 default: 408 default:
389 err = -EINVAL; 409 err = -EINVAL;
@@ -425,6 +445,7 @@ static const struct iio_info press_info = {
425static const struct iio_trigger_ops st_press_trigger_ops = { 445static const struct iio_trigger_ops st_press_trigger_ops = {
426 .owner = THIS_MODULE, 446 .owner = THIS_MODULE,
427 .set_trigger_state = ST_PRESS_TRIGGER_SET_STATE, 447 .set_trigger_state = ST_PRESS_TRIGGER_SET_STATE,
448 .validate_device = st_sensors_validate_device,
428}; 449};
429#define ST_PRESS_TRIGGER_OPS (&st_press_trigger_ops) 450#define ST_PRESS_TRIGGER_OPS (&st_press_trigger_ops)
430#else 451#else
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index f4d29d5dbd5f..e2f926cdcad2 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -64,6 +64,7 @@ struct as3935_state {
64 struct delayed_work work; 64 struct delayed_work work;
65 65
66 u32 tune_cap; 66 u32 tune_cap;
67 u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */
67 u8 buf[2] ____cacheline_aligned; 68 u8 buf[2] ____cacheline_aligned;
68}; 69};
69 70
@@ -72,7 +73,8 @@ static const struct iio_chan_spec as3935_channels[] = {
72 .type = IIO_PROXIMITY, 73 .type = IIO_PROXIMITY,
73 .info_mask_separate = 74 .info_mask_separate =
74 BIT(IIO_CHAN_INFO_RAW) | 75 BIT(IIO_CHAN_INFO_RAW) |
75 BIT(IIO_CHAN_INFO_PROCESSED), 76 BIT(IIO_CHAN_INFO_PROCESSED) |
77 BIT(IIO_CHAN_INFO_SCALE),
76 .scan_index = 0, 78 .scan_index = 0,
77 .scan_type = { 79 .scan_type = {
78 .sign = 'u', 80 .sign = 'u',
@@ -181,7 +183,12 @@ static int as3935_read_raw(struct iio_dev *indio_dev,
181 /* storm out of range */ 183 /* storm out of range */
182 if (*val == AS3935_DATA_MASK) 184 if (*val == AS3935_DATA_MASK)
183 return -EINVAL; 185 return -EINVAL;
184 *val *= 1000; 186
187 if (m == IIO_CHAN_INFO_PROCESSED)
188 *val *= 1000;
189 break;
190 case IIO_CHAN_INFO_SCALE:
191 *val = 1000;
185 break; 192 break;
186 default: 193 default:
187 return -EINVAL; 194 return -EINVAL;
@@ -206,10 +213,10 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private)
206 ret = as3935_read(st, AS3935_DATA, &val); 213 ret = as3935_read(st, AS3935_DATA, &val);
207 if (ret) 214 if (ret)
208 goto err_read; 215 goto err_read;
209 val &= AS3935_DATA_MASK;
210 val *= 1000;
211 216
212 iio_push_to_buffers_with_timestamp(indio_dev, &val, pf->timestamp); 217 st->buffer[0] = val & AS3935_DATA_MASK;
218 iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
219 pf->timestamp);
213err_read: 220err_read:
214 iio_trigger_notify_done(indio_dev->trig); 221 iio_trigger_notify_done(indio_dev->trig);
215 222
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 040966775f40..1a2984c28b95 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -411,7 +411,9 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
411 411
412 for (ix = 0; ix < table->sz; ix++) 412 for (ix = 0; ix < table->sz; ix++)
413 if (table->data_vec[ix].attr.ndev == ndev) 413 if (table->data_vec[ix].attr.ndev == ndev)
414 if (!del_gid(ib_dev, port, table, ix, false)) 414 if (!del_gid(ib_dev, port, table, ix,
415 !!(table->data_vec[ix].props &
416 GID_TABLE_ENTRY_DEFAULT)))
415 deleted = true; 417 deleted = true;
416 418
417 write_unlock_irq(&table->rwlock); 419 write_unlock_irq(&table->rwlock);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index f0c91ba3178a..ad1b1adcf6f0 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -708,17 +708,6 @@ static void cma_deref_id(struct rdma_id_private *id_priv)
708 complete(&id_priv->comp); 708 complete(&id_priv->comp);
709} 709}
710 710
711static int cma_disable_callback(struct rdma_id_private *id_priv,
712 enum rdma_cm_state state)
713{
714 mutex_lock(&id_priv->handler_mutex);
715 if (id_priv->state != state) {
716 mutex_unlock(&id_priv->handler_mutex);
717 return -EINVAL;
718 }
719 return 0;
720}
721
722struct rdma_cm_id *rdma_create_id(struct net *net, 711struct rdma_cm_id *rdma_create_id(struct net *net,
723 rdma_cm_event_handler event_handler, 712 rdma_cm_event_handler event_handler,
724 void *context, enum rdma_port_space ps, 713 void *context, enum rdma_port_space ps,
@@ -1671,11 +1660,12 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1671 struct rdma_cm_event event; 1660 struct rdma_cm_event event;
1672 int ret = 0; 1661 int ret = 0;
1673 1662
1663 mutex_lock(&id_priv->handler_mutex);
1674 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && 1664 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
1675 cma_disable_callback(id_priv, RDMA_CM_CONNECT)) || 1665 id_priv->state != RDMA_CM_CONNECT) ||
1676 (ib_event->event == IB_CM_TIMEWAIT_EXIT && 1666 (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
1677 cma_disable_callback(id_priv, RDMA_CM_DISCONNECT))) 1667 id_priv->state != RDMA_CM_DISCONNECT))
1678 return 0; 1668 goto out;
1679 1669
1680 memset(&event, 0, sizeof event); 1670 memset(&event, 0, sizeof event);
1681 switch (ib_event->event) { 1671 switch (ib_event->event) {
@@ -1870,7 +1860,7 @@ static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_e
1870 1860
1871static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 1861static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1872{ 1862{
1873 struct rdma_id_private *listen_id, *conn_id; 1863 struct rdma_id_private *listen_id, *conn_id = NULL;
1874 struct rdma_cm_event event; 1864 struct rdma_cm_event event;
1875 struct net_device *net_dev; 1865 struct net_device *net_dev;
1876 int offset, ret; 1866 int offset, ret;
@@ -1884,9 +1874,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1884 goto net_dev_put; 1874 goto net_dev_put;
1885 } 1875 }
1886 1876
1887 if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) { 1877 mutex_lock(&listen_id->handler_mutex);
1878 if (listen_id->state != RDMA_CM_LISTEN) {
1888 ret = -ECONNABORTED; 1879 ret = -ECONNABORTED;
1889 goto net_dev_put; 1880 goto err1;
1890 } 1881 }
1891 1882
1892 memset(&event, 0, sizeof event); 1883 memset(&event, 0, sizeof event);
@@ -1976,8 +1967,9 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1976 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; 1967 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
1977 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; 1968 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
1978 1969
1979 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) 1970 mutex_lock(&id_priv->handler_mutex);
1980 return 0; 1971 if (id_priv->state != RDMA_CM_CONNECT)
1972 goto out;
1981 1973
1982 memset(&event, 0, sizeof event); 1974 memset(&event, 0, sizeof event);
1983 switch (iw_event->event) { 1975 switch (iw_event->event) {
@@ -2029,6 +2021,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
2029 return ret; 2021 return ret;
2030 } 2022 }
2031 2023
2024out:
2032 mutex_unlock(&id_priv->handler_mutex); 2025 mutex_unlock(&id_priv->handler_mutex);
2033 return ret; 2026 return ret;
2034} 2027}
@@ -2039,13 +2032,15 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
2039 struct rdma_cm_id *new_cm_id; 2032 struct rdma_cm_id *new_cm_id;
2040 struct rdma_id_private *listen_id, *conn_id; 2033 struct rdma_id_private *listen_id, *conn_id;
2041 struct rdma_cm_event event; 2034 struct rdma_cm_event event;
2042 int ret; 2035 int ret = -ECONNABORTED;
2043 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; 2036 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
2044 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; 2037 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
2045 2038
2046 listen_id = cm_id->context; 2039 listen_id = cm_id->context;
2047 if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) 2040
2048 return -ECONNABORTED; 2041 mutex_lock(&listen_id->handler_mutex);
2042 if (listen_id->state != RDMA_CM_LISTEN)
2043 goto out;
2049 2044
2050 /* Create a new RDMA id for the new IW CM ID */ 2045 /* Create a new RDMA id for the new IW CM ID */
2051 new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net, 2046 new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net,
@@ -3216,8 +3211,9 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
3216 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; 3211 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
3217 int ret = 0; 3212 int ret = 0;
3218 3213
3219 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) 3214 mutex_lock(&id_priv->handler_mutex);
3220 return 0; 3215 if (id_priv->state != RDMA_CM_CONNECT)
3216 goto out;
3221 3217
3222 memset(&event, 0, sizeof event); 3218 memset(&event, 0, sizeof event);
3223 switch (ib_event->event) { 3219 switch (ib_event->event) {
@@ -3673,12 +3669,13 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
3673 struct rdma_id_private *id_priv; 3669 struct rdma_id_private *id_priv;
3674 struct cma_multicast *mc = multicast->context; 3670 struct cma_multicast *mc = multicast->context;
3675 struct rdma_cm_event event; 3671 struct rdma_cm_event event;
3676 int ret; 3672 int ret = 0;
3677 3673
3678 id_priv = mc->id_priv; 3674 id_priv = mc->id_priv;
3679 if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) && 3675 mutex_lock(&id_priv->handler_mutex);
3680 cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED)) 3676 if (id_priv->state != RDMA_CM_ADDR_BOUND &&
3681 return 0; 3677 id_priv->state != RDMA_CM_ADDR_RESOLVED)
3678 goto out;
3682 3679
3683 if (!status) 3680 if (!status)
3684 status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); 3681 status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
@@ -3720,6 +3717,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
3720 return 0; 3717 return 0;
3721 } 3718 }
3722 3719
3720out:
3723 mutex_unlock(&id_priv->handler_mutex); 3721 mutex_unlock(&id_priv->handler_mutex);
3724 return 0; 3722 return 0;
3725} 3723}
@@ -3878,12 +3876,12 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
3878 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - 3876 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
3879 rdma_start_port(id_priv->cma_dev->device)]; 3877 rdma_start_port(id_priv->cma_dev->device)];
3880 if (addr->sa_family == AF_INET) { 3878 if (addr->sa_family == AF_INET) {
3881 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) 3879 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
3880 mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
3882 err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, 3881 err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
3883 true); 3882 true);
3884 if (!err) { 3883 if (!err)
3885 mc->igmp_joined = true; 3884 mc->igmp_joined = true;
3886 mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
3887 } 3885 }
3888 } else { 3886 } else {
3889 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) 3887 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 1a8babb8ee3c..825021d1008b 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1747,7 +1747,7 @@ static int create_qp(struct ib_uverbs_file *file,
1747 struct ib_srq *srq = NULL; 1747 struct ib_srq *srq = NULL;
1748 struct ib_qp *qp; 1748 struct ib_qp *qp;
1749 char *buf; 1749 char *buf;
1750 struct ib_qp_init_attr attr; 1750 struct ib_qp_init_attr attr = {};
1751 struct ib_uverbs_ex_create_qp_resp resp; 1751 struct ib_uverbs_ex_create_qp_resp resp;
1752 int ret; 1752 int ret;
1753 1753
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 1d7d4cf442e3..6298f54b4137 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -511,12 +511,16 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
511 ah_attr->grh.dgid = sgid; 511 ah_attr->grh.dgid = sgid;
512 512
513 if (!rdma_cap_eth_ah(device, port_num)) { 513 if (!rdma_cap_eth_ah(device, port_num)) {
514 ret = ib_find_cached_gid_by_port(device, &dgid, 514 if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
515 IB_GID_TYPE_IB, 515 ret = ib_find_cached_gid_by_port(device, &dgid,
516 port_num, NULL, 516 IB_GID_TYPE_IB,
517 &gid_index); 517 port_num, NULL,
518 if (ret) 518 &gid_index);
519 return ret; 519 if (ret)
520 return ret;
521 } else {
522 gid_index = 0;
523 }
520 } 524 }
521 525
522 ah_attr->grh.sgid_index = (u8) gid_index; 526 ah_attr->grh.sgid_index = (u8) gid_index;
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 81619fbb5842..f5de85178055 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1037,7 +1037,7 @@ static void dc_shutdown(struct hfi1_devdata *);
1037static void dc_start(struct hfi1_devdata *); 1037static void dc_start(struct hfi1_devdata *);
1038static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, 1038static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1039 unsigned int *np); 1039 unsigned int *np);
1040static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd); 1040static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1041 1041
1042/* 1042/*
1043 * Error interrupt table entry. This is used as input to the interrupt 1043 * Error interrupt table entry. This is used as input to the interrupt
@@ -6962,8 +6962,6 @@ void handle_link_down(struct work_struct *work)
6962 } 6962 }
6963 6963
6964 reset_neighbor_info(ppd); 6964 reset_neighbor_info(ppd);
6965 if (ppd->mgmt_allowed)
6966 remove_full_mgmt_pkey(ppd);
6967 6965
6968 /* disable the port */ 6966 /* disable the port */
6969 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); 6967 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
@@ -7070,12 +7068,16 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7070 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY); 7068 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
7071 ppd->pkeys[2] = FULL_MGMT_P_KEY; 7069 ppd->pkeys[2] = FULL_MGMT_P_KEY;
7072 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); 7070 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7071 hfi1_event_pkey_change(ppd->dd, ppd->port);
7073} 7072}
7074 7073
7075static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd) 7074static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7076{ 7075{
7077 ppd->pkeys[2] = 0; 7076 if (ppd->pkeys[2] != 0) {
7078 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); 7077 ppd->pkeys[2] = 0;
7078 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7079 hfi1_event_pkey_change(ppd->dd, ppd->port);
7080 }
7079} 7081}
7080 7082
7081/* 7083/*
@@ -9168,6 +9170,13 @@ int start_link(struct hfi1_pportdata *ppd)
9168 return 0; 9170 return 0;
9169 } 9171 }
9170 9172
9173 /*
9174 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9175 * pkey table can be configured properly if the HFI unit is connected
9176 * to switch port with MgmtAllowed=NO
9177 */
9178 clear_full_mgmt_pkey(ppd);
9179
9171 return set_link_state(ppd, HLS_DN_POLL); 9180 return set_link_state(ppd, HLS_DN_POLL);
9172} 9181}
9173 9182
@@ -9777,7 +9786,7 @@ static void set_send_length(struct hfi1_pportdata *ppd)
9777 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2) 9786 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9778 & SEND_LEN_CHECK1_LEN_VL15_MASK) << 9787 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9779 SEND_LEN_CHECK1_LEN_VL15_SHIFT; 9788 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9780 int i; 9789 int i, j;
9781 u32 thres; 9790 u32 thres;
9782 9791
9783 for (i = 0; i < ppd->vls_supported; i++) { 9792 for (i = 0; i < ppd->vls_supported; i++) {
@@ -9801,7 +9810,10 @@ static void set_send_length(struct hfi1_pportdata *ppd)
9801 sc_mtu_to_threshold(dd->vld[i].sc, 9810 sc_mtu_to_threshold(dd->vld[i].sc,
9802 dd->vld[i].mtu, 9811 dd->vld[i].mtu,
9803 dd->rcd[0]->rcvhdrqentsize)); 9812 dd->rcd[0]->rcvhdrqentsize));
9804 sc_set_cr_threshold(dd->vld[i].sc, thres); 9813 for (j = 0; j < INIT_SC_PER_VL; j++)
9814 sc_set_cr_threshold(
9815 pio_select_send_context_vl(dd, j, i),
9816 thres);
9805 } 9817 }
9806 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50), 9818 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
9807 sc_mtu_to_threshold(dd->vld[15].sc, 9819 sc_mtu_to_threshold(dd->vld[15].sc,
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 7a5b0e676cc7..c702a009608f 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -203,6 +203,9 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
203 203
204 switch (cmd) { 204 switch (cmd) {
205 case HFI1_IOCTL_ASSIGN_CTXT: 205 case HFI1_IOCTL_ASSIGN_CTXT:
206 if (uctxt)
207 return -EINVAL;
208
206 if (copy_from_user(&uinfo, 209 if (copy_from_user(&uinfo,
207 (struct hfi1_user_info __user *)arg, 210 (struct hfi1_user_info __user *)arg,
208 sizeof(uinfo))) 211 sizeof(uinfo)))
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 0d28a5a40fae..eed971ccd2a1 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1383,7 +1383,7 @@ static void postinit_cleanup(struct hfi1_devdata *dd)
1383static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1383static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1384{ 1384{
1385 int ret = 0, j, pidx, initfail; 1385 int ret = 0, j, pidx, initfail;
1386 struct hfi1_devdata *dd = NULL; 1386 struct hfi1_devdata *dd = ERR_PTR(-EINVAL);
1387 struct hfi1_pportdata *ppd; 1387 struct hfi1_pportdata *ppd;
1388 1388
1389 /* First, lock the non-writable module parameters */ 1389 /* First, lock the non-writable module parameters */
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index 219029576ba0..fca07a1d6c28 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -78,6 +78,16 @@ static inline void clear_opa_smp_data(struct opa_smp *smp)
78 memset(data, 0, size); 78 memset(data, 0, size);
79} 79}
80 80
81void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port)
82{
83 struct ib_event event;
84
85 event.event = IB_EVENT_PKEY_CHANGE;
86 event.device = &dd->verbs_dev.rdi.ibdev;
87 event.element.port_num = port;
88 ib_dispatch_event(&event);
89}
90
81static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len) 91static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
82{ 92{
83 struct ib_mad_send_buf *send_buf; 93 struct ib_mad_send_buf *send_buf;
@@ -1418,15 +1428,10 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
1418 } 1428 }
1419 1429
1420 if (changed) { 1430 if (changed) {
1421 struct ib_event event;
1422
1423 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); 1431 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
1424 1432 hfi1_event_pkey_change(dd, port);
1425 event.event = IB_EVENT_PKEY_CHANGE;
1426 event.device = &dd->verbs_dev.rdi.ibdev;
1427 event.element.port_num = port;
1428 ib_dispatch_event(&event);
1429 } 1433 }
1434
1430 return 0; 1435 return 0;
1431} 1436}
1432 1437
diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h
index 55ee08675333..8b734aaae88a 100644
--- a/drivers/infiniband/hw/hfi1/mad.h
+++ b/drivers/infiniband/hw/hfi1/mad.h
@@ -434,4 +434,6 @@ struct sc2vlnt {
434 COUNTER_MASK(1, 3) | \ 434 COUNTER_MASK(1, 3) | \
435 COUNTER_MASK(1, 4)) 435 COUNTER_MASK(1, 4))
436 436
437void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port);
438
437#endif /* _HFI1_MAD_H */ 439#endif /* _HFI1_MAD_H */
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index d5edb1afbb8f..d4022450b73f 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -995,7 +995,7 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
995 /* counter is reset if occupancy count changes */ 995 /* counter is reset if occupancy count changes */
996 if (reg != reg_prev) 996 if (reg != reg_prev)
997 loop = 0; 997 loop = 0;
998 if (loop > 500) { 998 if (loop > 50000) {
999 /* timed out - bounce the link */ 999 /* timed out - bounce the link */
1000 dd_dev_err(dd, 1000 dd_dev_err(dd,
1001 "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n", 1001 "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n",
@@ -1798,6 +1798,21 @@ static void pio_map_rcu_callback(struct rcu_head *list)
1798} 1798}
1799 1799
1800/* 1800/*
1801 * Set credit return threshold for the kernel send context
1802 */
1803static void set_threshold(struct hfi1_devdata *dd, int scontext, int i)
1804{
1805 u32 thres;
1806
1807 thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext],
1808 50),
1809 sc_mtu_to_threshold(dd->kernel_send_context[scontext],
1810 dd->vld[i].mtu,
1811 dd->rcd[0]->rcvhdrqentsize));
1812 sc_set_cr_threshold(dd->kernel_send_context[scontext], thres);
1813}
1814
1815/*
1801 * pio_map_init - called when #vls change 1816 * pio_map_init - called when #vls change
1802 * @dd: hfi1_devdata 1817 * @dd: hfi1_devdata
1803 * @port: port number 1818 * @port: port number
@@ -1872,11 +1887,16 @@ int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
1872 if (!newmap->map[i]) 1887 if (!newmap->map[i])
1873 goto bail; 1888 goto bail;
1874 newmap->map[i]->mask = (1 << ilog2(sz)) - 1; 1889 newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
1875 /* assign send contexts */ 1890 /*
1891 * assign send contexts and
1892 * adjust credit return threshold
1893 */
1876 for (j = 0; j < sz; j++) { 1894 for (j = 0; j < sz; j++) {
1877 if (dd->kernel_send_context[scontext]) 1895 if (dd->kernel_send_context[scontext]) {
1878 newmap->map[i]->ksc[j] = 1896 newmap->map[i]->ksc[j] =
1879 dd->kernel_send_context[scontext]; 1897 dd->kernel_send_context[scontext];
1898 set_threshold(dd, scontext, i);
1899 }
1880 if (++scontext >= first_scontext + 1900 if (++scontext >= first_scontext +
1881 vl_scontexts[i]) 1901 vl_scontexts[i])
1882 /* wrap back to first send context */ 1902 /* wrap back to first send context */
diff --git a/drivers/infiniband/hw/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c
index 2441669f0817..9fb561682c66 100644
--- a/drivers/infiniband/hw/hfi1/qsfp.c
+++ b/drivers/infiniband/hw/hfi1/qsfp.c
@@ -579,7 +579,8 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len)
579 579
580 if (ppd->qsfp_info.cache_valid) { 580 if (ppd->qsfp_info.cache_valid) {
581 if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS])) 581 if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
582 sprintf(lenstr, "%dM ", cache[QSFP_MOD_LEN_OFFS]); 582 snprintf(lenstr, sizeof(lenstr), "%dM ",
583 cache[QSFP_MOD_LEN_OFFS]);
583 584
584 power_byte = cache[QSFP_MOD_PWR_OFFS]; 585 power_byte = cache[QSFP_MOD_PWR_OFFS];
585 sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n", 586 sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n",
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c
index bc95c4112c61..d8fb056526f8 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.c
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c
@@ -92,11 +92,10 @@ void hfi1_put_txreq(struct verbs_txreq *tx)
92 92
93struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, 93struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
94 struct rvt_qp *qp) 94 struct rvt_qp *qp)
95 __must_hold(&qp->s_lock)
95{ 96{
96 struct verbs_txreq *tx = ERR_PTR(-EBUSY); 97 struct verbs_txreq *tx = ERR_PTR(-EBUSY);
97 unsigned long flags;
98 98
99 spin_lock_irqsave(&qp->s_lock, flags);
100 write_seqlock(&dev->iowait_lock); 99 write_seqlock(&dev->iowait_lock);
101 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { 100 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
102 struct hfi1_qp_priv *priv; 101 struct hfi1_qp_priv *priv;
@@ -116,7 +115,6 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
116 } 115 }
117out: 116out:
118 write_sequnlock(&dev->iowait_lock); 117 write_sequnlock(&dev->iowait_lock);
119 spin_unlock_irqrestore(&qp->s_lock, flags);
120 return tx; 118 return tx;
121} 119}
122 120
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index 1cf69b2fe4a5..a1d6e0807f97 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -73,6 +73,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
73 73
74static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, 74static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
75 struct rvt_qp *qp) 75 struct rvt_qp *qp)
76 __must_hold(&qp->slock)
76{ 77{
77 struct verbs_txreq *tx; 78 struct verbs_txreq *tx;
78 struct hfi1_qp_priv *priv = qp->priv; 79 struct hfi1_qp_priv *priv = qp->priv;
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 8b9532034558..b738acdb9b02 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -113,6 +113,8 @@
113 113
114#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types) 114#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
115#define IW_CFG_FPM_QP_COUNT 32768 115#define IW_CFG_FPM_QP_COUNT 32768
116#define I40IW_MAX_PAGES_PER_FMR 512
117#define I40IW_MIN_PAGES_PER_FMR 1
116 118
117#define I40IW_MTU_TO_MSS 40 119#define I40IW_MTU_TO_MSS 40
118#define I40IW_DEFAULT_MSS 1460 120#define I40IW_DEFAULT_MSS 1460
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 02a735b64208..33959ed14563 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -79,6 +79,7 @@ static int i40iw_query_device(struct ib_device *ibdev,
79 props->max_qp_init_rd_atom = props->max_qp_rd_atom; 79 props->max_qp_init_rd_atom = props->max_qp_rd_atom;
80 props->atomic_cap = IB_ATOMIC_NONE; 80 props->atomic_cap = IB_ATOMIC_NONE;
81 props->max_map_per_fmr = 1; 81 props->max_map_per_fmr = 1;
82 props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
82 return 0; 83 return 0;
83} 84}
84 85
@@ -1527,7 +1528,7 @@ static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd,
1527 mutex_lock(&iwdev->pbl_mutex); 1528 mutex_lock(&iwdev->pbl_mutex);
1528 status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt); 1529 status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
1529 mutex_unlock(&iwdev->pbl_mutex); 1530 mutex_unlock(&iwdev->pbl_mutex);
1530 if (!status) 1531 if (status)
1531 goto err1; 1532 goto err1;
1532 1533
1533 if (palloc->level != I40IW_LEVEL_1) 1534 if (palloc->level != I40IW_LEVEL_1)
@@ -2149,6 +2150,7 @@ static int i40iw_post_send(struct ib_qp *ibqp,
2149 struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev; 2150 struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
2150 struct i40iw_fast_reg_stag_info info; 2151 struct i40iw_fast_reg_stag_info info;
2151 2152
2153 memset(&info, 0, sizeof(info));
2152 info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD; 2154 info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD;
2153 info.access_rights |= i40iw_get_user_access(flags); 2155 info.access_rights |= i40iw_get_user_access(flags);
2154 info.stag_key = reg_wr(ib_wr)->key & 0xff; 2156 info.stag_key = reg_wr(ib_wr)->key & 0xff;
@@ -2158,10 +2160,14 @@ static int i40iw_post_send(struct ib_qp *ibqp,
2158 info.addr_type = I40IW_ADDR_TYPE_VA_BASED; 2160 info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
2159 info.va = (void *)(uintptr_t)iwmr->ibmr.iova; 2161 info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
2160 info.total_len = iwmr->ibmr.length; 2162 info.total_len = iwmr->ibmr.length;
2163 info.reg_addr_pa = *(u64 *)palloc->level1.addr;
2161 info.first_pm_pbl_index = palloc->level1.idx; 2164 info.first_pm_pbl_index = palloc->level1.idx;
2162 info.local_fence = ib_wr->send_flags & IB_SEND_FENCE; 2165 info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
2163 info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED; 2166 info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED;
2164 2167
2168 if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
2169 info.chunk_size = 1;
2170
2165 if (page_shift == 21) 2171 if (page_shift == 21)
2166 info.page_size = 1; /* 2M page */ 2172 info.page_size = 1; /* 2M page */
2167 2173
@@ -2327,13 +2333,16 @@ static int i40iw_req_notify_cq(struct ib_cq *ibcq,
2327{ 2333{
2328 struct i40iw_cq *iwcq; 2334 struct i40iw_cq *iwcq;
2329 struct i40iw_cq_uk *ukcq; 2335 struct i40iw_cq_uk *ukcq;
2330 enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_SOLICITED; 2336 unsigned long flags;
2337 enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT;
2331 2338
2332 iwcq = (struct i40iw_cq *)ibcq; 2339 iwcq = (struct i40iw_cq *)ibcq;
2333 ukcq = &iwcq->sc_cq.cq_uk; 2340 ukcq = &iwcq->sc_cq.cq_uk;
2334 if (notify_flags == IB_CQ_NEXT_COMP) 2341 if (notify_flags == IB_CQ_SOLICITED)
2335 cq_notify = IW_CQ_COMPL_EVENT; 2342 cq_notify = IW_CQ_COMPL_SOLICITED;
2343 spin_lock_irqsave(&iwcq->lock, flags);
2336 ukcq->ops.iw_cq_request_notification(ukcq, cq_notify); 2344 ukcq->ops.iw_cq_request_notification(ukcq, cq_notify);
2345 spin_unlock_irqrestore(&iwcq->lock, flags);
2337 return 0; 2346 return 0;
2338} 2347}
2339 2348
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 105246fba2e7..5fc623362731 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -47,6 +47,7 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
47 47
48 ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); 48 ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
49 ah->av.ib.g_slid = ah_attr->src_path_bits; 49 ah->av.ib.g_slid = ah_attr->src_path_bits;
50 ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
50 if (ah_attr->ah_flags & IB_AH_GRH) { 51 if (ah_attr->ah_flags & IB_AH_GRH) {
51 ah->av.ib.g_slid |= 0x80; 52 ah->av.ib.g_slid |= 0x80;
52 ah->av.ib.gid_index = ah_attr->grh.sgid_index; 53 ah->av.ib.gid_index = ah_attr->grh.sgid_index;
@@ -64,7 +65,6 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
64 !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support)) 65 !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
65 --ah->av.ib.stat_rate; 66 --ah->av.ib.stat_rate;
66 } 67 }
67 ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
68 68
69 return &ah->ibah; 69 return &ah->ibah;
70} 70}
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index d68f506c1922..9c2e53d28f98 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -527,7 +527,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
527 tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); 527 tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
528 spin_unlock(&tun_qp->tx_lock); 528 spin_unlock(&tun_qp->tx_lock);
529 if (ret) 529 if (ret)
530 goto out; 530 goto end;
531 531
532 tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr); 532 tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
533 if (tun_qp->tx_ring[tun_tx_ix].ah) 533 if (tun_qp->tx_ring[tun_tx_ix].ah)
@@ -596,9 +596,15 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
596 wr.wr.send_flags = IB_SEND_SIGNALED; 596 wr.wr.send_flags = IB_SEND_SIGNALED;
597 597
598 ret = ib_post_send(src_qp, &wr.wr, &bad_wr); 598 ret = ib_post_send(src_qp, &wr.wr, &bad_wr);
599out: 599 if (!ret)
600 if (ret) 600 return 0;
601 ib_destroy_ah(ah); 601 out:
602 spin_lock(&tun_qp->tx_lock);
603 tun_qp->tx_ix_tail++;
604 spin_unlock(&tun_qp->tx_lock);
605 tun_qp->tx_ring[tun_tx_ix].ah = NULL;
606end:
607 ib_destroy_ah(ah);
602 return ret; 608 return ret;
603} 609}
604 610
@@ -1326,9 +1332,15 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
1326 1332
1327 1333
1328 ret = ib_post_send(send_qp, &wr.wr, &bad_wr); 1334 ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
1335 if (!ret)
1336 return 0;
1337
1338 spin_lock(&sqp->tx_lock);
1339 sqp->tx_ix_tail++;
1340 spin_unlock(&sqp->tx_lock);
1341 sqp->tx_ring[wire_tx_ix].ah = NULL;
1329out: 1342out:
1330 if (ret) 1343 ib_destroy_ah(ah);
1331 ib_destroy_ah(ah);
1332 return ret; 1344 return ret;
1333} 1345}
1334 1346
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 0eb09e104542..42a46078d7d5 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1704,6 +1704,9 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1704 struct mlx4_dev *dev = (to_mdev(qp->device))->dev; 1704 struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
1705 int is_bonded = mlx4_is_bonded(dev); 1705 int is_bonded = mlx4_is_bonded(dev);
1706 1706
1707 if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
1708 return ERR_PTR(-EINVAL);
1709
1707 if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && 1710 if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
1708 (flow_attr->type != IB_FLOW_ATTR_NORMAL)) 1711 (flow_attr->type != IB_FLOW_ATTR_NORMAL))
1709 return ERR_PTR(-EOPNOTSUPP); 1712 return ERR_PTR(-EOPNOTSUPP);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 6c5ac5d8f32f..29acda249612 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -139,7 +139,7 @@ struct mlx4_ib_mr {
139 u32 max_pages; 139 u32 max_pages;
140 struct mlx4_mr mmr; 140 struct mlx4_mr mmr;
141 struct ib_umem *umem; 141 struct ib_umem *umem;
142 void *pages_alloc; 142 size_t page_map_size;
143}; 143};
144 144
145struct mlx4_ib_mw { 145struct mlx4_ib_mw {
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 631272172a0b..5d73989d9771 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -277,20 +277,23 @@ mlx4_alloc_priv_pages(struct ib_device *device,
277 struct mlx4_ib_mr *mr, 277 struct mlx4_ib_mr *mr,
278 int max_pages) 278 int max_pages)
279{ 279{
280 int size = max_pages * sizeof(u64);
281 int add_size;
282 int ret; 280 int ret;
283 281
284 add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0); 282 /* Ensure that size is aligned to DMA cacheline
283 * requirements.
284 * max_pages is limited to MLX4_MAX_FAST_REG_PAGES
285 * so page_map_size will never cross PAGE_SIZE.
286 */
287 mr->page_map_size = roundup(max_pages * sizeof(u64),
288 MLX4_MR_PAGES_ALIGN);
285 289
286 mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL); 290 /* Prevent cross page boundary allocation. */
287 if (!mr->pages_alloc) 291 mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
292 if (!mr->pages)
288 return -ENOMEM; 293 return -ENOMEM;
289 294
290 mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN);
291
292 mr->page_map = dma_map_single(device->dma_device, mr->pages, 295 mr->page_map = dma_map_single(device->dma_device, mr->pages,
293 size, DMA_TO_DEVICE); 296 mr->page_map_size, DMA_TO_DEVICE);
294 297
295 if (dma_mapping_error(device->dma_device, mr->page_map)) { 298 if (dma_mapping_error(device->dma_device, mr->page_map)) {
296 ret = -ENOMEM; 299 ret = -ENOMEM;
@@ -298,9 +301,9 @@ mlx4_alloc_priv_pages(struct ib_device *device,
298 } 301 }
299 302
300 return 0; 303 return 0;
301err:
302 kfree(mr->pages_alloc);
303 304
305err:
306 free_page((unsigned long)mr->pages);
304 return ret; 307 return ret;
305} 308}
306 309
@@ -309,11 +312,10 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
309{ 312{
310 if (mr->pages) { 313 if (mr->pages) {
311 struct ib_device *device = mr->ibmr.device; 314 struct ib_device *device = mr->ibmr.device;
312 int size = mr->max_pages * sizeof(u64);
313 315
314 dma_unmap_single(device->dma_device, mr->page_map, 316 dma_unmap_single(device->dma_device, mr->page_map,
315 size, DMA_TO_DEVICE); 317 mr->page_map_size, DMA_TO_DEVICE);
316 kfree(mr->pages_alloc); 318 free_page((unsigned long)mr->pages);
317 mr->pages = NULL; 319 mr->pages = NULL;
318 } 320 }
319} 321}
@@ -537,14 +539,12 @@ int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
537 mr->npages = 0; 539 mr->npages = 0;
538 540
539 ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map, 541 ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
540 sizeof(u64) * mr->max_pages, 542 mr->page_map_size, DMA_TO_DEVICE);
541 DMA_TO_DEVICE);
542 543
543 rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page); 544 rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);
544 545
545 ib_dma_sync_single_for_device(ibmr->device, mr->page_map, 546 ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
546 sizeof(u64) * mr->max_pages, 547 mr->page_map_size, DMA_TO_DEVICE);
547 DMA_TO_DEVICE);
548 548
549 return rc; 549 return rc;
550} 550}
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 81b0e1fbec1d..8db8405c1e99 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -362,7 +362,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
362 sizeof (struct mlx4_wqe_raddr_seg); 362 sizeof (struct mlx4_wqe_raddr_seg);
363 case MLX4_IB_QPT_RC: 363 case MLX4_IB_QPT_RC:
364 return sizeof (struct mlx4_wqe_ctrl_seg) + 364 return sizeof (struct mlx4_wqe_ctrl_seg) +
365 sizeof (struct mlx4_wqe_atomic_seg) + 365 sizeof (struct mlx4_wqe_masked_atomic_seg) +
366 sizeof (struct mlx4_wqe_raddr_seg); 366 sizeof (struct mlx4_wqe_raddr_seg);
367 case MLX4_IB_QPT_SMI: 367 case MLX4_IB_QPT_SMI:
368 case MLX4_IB_QPT_GSI: 368 case MLX4_IB_QPT_GSI:
@@ -1191,8 +1191,10 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
1191 { 1191 {
1192 err = create_qp_common(to_mdev(pd->device), pd, init_attr, 1192 err = create_qp_common(to_mdev(pd->device), pd, init_attr,
1193 udata, 0, &qp, gfp); 1193 udata, 0, &qp, gfp);
1194 if (err) 1194 if (err) {
1195 kfree(qp);
1195 return ERR_PTR(err); 1196 return ERR_PTR(err);
1197 }
1196 1198
1197 qp->ibqp.qp_num = qp->mqp.qpn; 1199 qp->ibqp.qp_num = qp->mqp.qpn;
1198 qp->xrcdn = xrcdn; 1200 qp->xrcdn = xrcdn;
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 1534af113058..364aab9f3c9e 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -121,7 +121,7 @@ static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext,
121 pma_cnt_ext->port_xmit_data = 121 pma_cnt_ext->port_xmit_data =
122 cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets, 122 cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets,
123 transmitted_ib_multicast.octets) >> 2); 123 transmitted_ib_multicast.octets) >> 2);
124 pma_cnt_ext->port_xmit_data = 124 pma_cnt_ext->port_rcv_data =
125 cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets, 125 cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets,
126 received_ib_multicast.octets) >> 2); 126 received_ib_multicast.octets) >> 2);
127 pma_cnt_ext->port_xmit_packets = 127 pma_cnt_ext->port_xmit_packets =
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index b48ad85315dc..dad63f038bb8 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1528,21 +1528,18 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
1528{ 1528{
1529 struct mlx5_flow_table *ft = ft_prio->flow_table; 1529 struct mlx5_flow_table *ft = ft_prio->flow_table;
1530 struct mlx5_ib_flow_handler *handler; 1530 struct mlx5_ib_flow_handler *handler;
1531 struct mlx5_flow_spec *spec;
1531 void *ib_flow = flow_attr + 1; 1532 void *ib_flow = flow_attr + 1;
1532 u8 match_criteria_enable = 0;
1533 unsigned int spec_index; 1533 unsigned int spec_index;
1534 u32 *match_c;
1535 u32 *match_v;
1536 u32 action; 1534 u32 action;
1537 int err = 0; 1535 int err = 0;
1538 1536
1539 if (!is_valid_attr(flow_attr)) 1537 if (!is_valid_attr(flow_attr))
1540 return ERR_PTR(-EINVAL); 1538 return ERR_PTR(-EINVAL);
1541 1539
1542 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 1540 spec = mlx5_vzalloc(sizeof(*spec));
1543 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
1544 handler = kzalloc(sizeof(*handler), GFP_KERNEL); 1541 handler = kzalloc(sizeof(*handler), GFP_KERNEL);
1545 if (!handler || !match_c || !match_v) { 1542 if (!handler || !spec) {
1546 err = -ENOMEM; 1543 err = -ENOMEM;
1547 goto free; 1544 goto free;
1548 } 1545 }
@@ -1550,7 +1547,8 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
1550 INIT_LIST_HEAD(&handler->list); 1547 INIT_LIST_HEAD(&handler->list);
1551 1548
1552 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 1549 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
1553 err = parse_flow_attr(match_c, match_v, ib_flow); 1550 err = parse_flow_attr(spec->match_criteria,
1551 spec->match_value, ib_flow);
1554 if (err < 0) 1552 if (err < 0)
1555 goto free; 1553 goto free;
1556 1554
@@ -1558,11 +1556,11 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
1558 } 1556 }
1559 1557
1560 /* Outer header support only */ 1558 /* Outer header support only */
1561 match_criteria_enable = (!outer_header_zero(match_c)) << 0; 1559 spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria))
1560 << 0;
1562 action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST : 1561 action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
1563 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; 1562 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
1564 handler->rule = mlx5_add_flow_rule(ft, match_criteria_enable, 1563 handler->rule = mlx5_add_flow_rule(ft, spec,
1565 match_c, match_v,
1566 action, 1564 action,
1567 MLX5_FS_DEFAULT_FLOW_TAG, 1565 MLX5_FS_DEFAULT_FLOW_TAG,
1568 dst); 1566 dst);
@@ -1578,8 +1576,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
1578free: 1576free:
1579 if (err) 1577 if (err)
1580 kfree(handler); 1578 kfree(handler);
1581 kfree(match_c); 1579 kvfree(spec);
1582 kfree(match_v);
1583 return err ? ERR_PTR(err) : handler; 1580 return err ? ERR_PTR(err) : handler;
1584} 1581}
1585 1582
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index ce434228a5ea..ce0a7ab35a22 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3332,10 +3332,11 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr)
3332 return MLX5_FENCE_MODE_SMALL_AND_FENCE; 3332 return MLX5_FENCE_MODE_SMALL_AND_FENCE;
3333 else 3333 else
3334 return fence; 3334 return fence;
3335 3335 } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
3336 } else { 3336 return MLX5_FENCE_MODE_FENCE;
3337 return 0;
3338 } 3337 }
3338
3339 return 0;
3339} 3340}
3340 3341
3341static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, 3342static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index ff946d5f59e4..382466a90da7 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -2178,6 +2178,11 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
2178 2178
2179 switch (cmd.type) { 2179 switch (cmd.type) {
2180 case QIB_CMD_ASSIGN_CTXT: 2180 case QIB_CMD_ASSIGN_CTXT:
2181 if (rcd) {
2182 ret = -EINVAL;
2183 goto bail;
2184 }
2185
2181 ret = qib_assign_ctxt(fp, &cmd.cmd.user_info); 2186 ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
2182 if (ret) 2187 if (ret)
2183 goto bail; 2188 goto bail;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 7de5134bec85..41ba7e9cadaa 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -369,8 +369,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
369 /* wrap to first map page, invert bit 0 */ 369 /* wrap to first map page, invert bit 0 */
370 offset = qpt->incr | ((offset & 1) ^ 1); 370 offset = qpt->incr | ((offset & 1) ^ 1);
371 } 371 }
372 /* there can be no bits at shift and below */ 372 /* there can be no set bits in low-order QoS bits */
373 WARN_ON(offset & (rdi->dparms.qos_shift - 1)); 373 WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1));
374 qpn = mk_qpn(qpt, map, offset); 374 qpn = mk_qpn(qpt, map, offset);
375 } 375 }
376 376
@@ -576,12 +576,6 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
576 qp->s_ssn = 1; 576 qp->s_ssn = 1;
577 qp->s_lsn = 0; 577 qp->s_lsn = 0;
578 qp->s_mig_state = IB_MIG_MIGRATED; 578 qp->s_mig_state = IB_MIG_MIGRATED;
579 if (qp->s_ack_queue)
580 memset(
581 qp->s_ack_queue,
582 0,
583 rvt_max_atomic(rdi) *
584 sizeof(*qp->s_ack_queue));
585 qp->r_head_ack_queue = 0; 579 qp->r_head_ack_queue = 0;
586 qp->s_tail_ack_queue = 0; 580 qp->s_tail_ack_queue = 0;
587 qp->s_num_rd_atomic = 0; 581 qp->s_num_rd_atomic = 0;
@@ -705,8 +699,10 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
705 * initialization that is needed. 699 * initialization that is needed.
706 */ 700 */
707 priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp); 701 priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
708 if (!priv) 702 if (IS_ERR(priv)) {
703 ret = priv;
709 goto bail_qp; 704 goto bail_qp;
705 }
710 qp->priv = priv; 706 qp->priv = priv;
711 qp->timeout_jiffies = 707 qp->timeout_jiffies =
712 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 708 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index e1cc2cc42f25..30c4fda7a05a 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -501,9 +501,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
501 !rdi->driver_f.quiesce_qp || 501 !rdi->driver_f.quiesce_qp ||
502 !rdi->driver_f.notify_error_qp || 502 !rdi->driver_f.notify_error_qp ||
503 !rdi->driver_f.mtu_from_qp || 503 !rdi->driver_f.mtu_from_qp ||
504 !rdi->driver_f.mtu_to_path_mtu || 504 !rdi->driver_f.mtu_to_path_mtu)
505 !rdi->driver_f.shut_down_port ||
506 !rdi->driver_f.cap_mask_chg)
507 return -EINVAL; 505 return -EINVAL;
508 break; 506 break;
509 507
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index e68b20cba70b..4a4155640d51 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1638,8 +1638,7 @@ retry:
1638 */ 1638 */
1639 qp_init->cap.max_send_wr = srp_sq_size / 2; 1639 qp_init->cap.max_send_wr = srp_sq_size / 2;
1640 qp_init->cap.max_rdma_ctxs = srp_sq_size / 2; 1640 qp_init->cap.max_rdma_ctxs = srp_sq_size / 2;
1641 qp_init->cap.max_send_sge = max(sdev->device->attrs.max_sge_rd, 1641 qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
1642 sdev->device->attrs.max_sge);
1643 qp_init->port_num = ch->sport->port; 1642 qp_init->port_num = ch->sport->port;
1644 1643
1645 ch->qp = ib_create_qp(sdev->pd, qp_init); 1644 ch->qp = ib_create_qp(sdev->pd, qp_init);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index fee6bfd7ca21..389030487da7 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -106,6 +106,7 @@ enum {
106 SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2, 106 SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2,
107 107
108 SRPT_DEF_SG_TABLESIZE = 128, 108 SRPT_DEF_SG_TABLESIZE = 128,
109 SRPT_DEF_SG_PER_WQE = 16,
109 110
110 MIN_SRPT_SQ_SIZE = 16, 111 MIN_SRPT_SQ_SIZE = 16,
111 DEF_SRPT_SQ_SIZE = 4096, 112 DEF_SRPT_SQ_SIZE = 4096,
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 804dbcc37d3f..3438e98c145a 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -1031,17 +1031,17 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
1031 1031
1032 case XTYPE_XBOXONE: 1032 case XTYPE_XBOXONE:
1033 packet->data[0] = 0x09; /* activate rumble */ 1033 packet->data[0] = 0x09; /* activate rumble */
1034 packet->data[1] = 0x08; 1034 packet->data[1] = 0x00;
1035 packet->data[2] = xpad->odata_serial++; 1035 packet->data[2] = xpad->odata_serial++;
1036 packet->data[3] = 0x08; /* continuous effect */ 1036 packet->data[3] = 0x09;
1037 packet->data[4] = 0x00; /* simple rumble mode */ 1037 packet->data[4] = 0x00;
1038 packet->data[5] = 0x03; /* L and R actuator only */ 1038 packet->data[5] = 0x0F;
1039 packet->data[6] = 0x00; /* TODO: LT actuator */ 1039 packet->data[6] = 0x00;
1040 packet->data[7] = 0x00; /* TODO: RT actuator */ 1040 packet->data[7] = 0x00;
1041 packet->data[8] = strong / 512; /* left actuator */ 1041 packet->data[8] = strong / 512; /* left actuator */
1042 packet->data[9] = weak / 512; /* right actuator */ 1042 packet->data[9] = weak / 512; /* right actuator */
1043 packet->data[10] = 0x80; /* length of pulse */ 1043 packet->data[10] = 0xFF;
1044 packet->data[11] = 0x00; /* stop period of pulse */ 1044 packet->data[11] = 0x00;
1045 packet->data[12] = 0x00; 1045 packet->data[12] = 0x00;
1046 packet->len = 13; 1046 packet->len = 13;
1047 packet->pending = true; 1047 packet->pending = true;
@@ -1437,16 +1437,6 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
1437 break; 1437 break;
1438 } 1438 }
1439 1439
1440 if (xpad_device[i].xtype == XTYPE_XBOXONE &&
1441 intf->cur_altsetting->desc.bInterfaceNumber != 0) {
1442 /*
1443 * The Xbox One controller lists three interfaces all with the
1444 * same interface class, subclass and protocol. Differentiate by
1445 * interface number.
1446 */
1447 return -ENODEV;
1448 }
1449
1450 xpad = kzalloc(sizeof(struct usb_xpad), GFP_KERNEL); 1440 xpad = kzalloc(sizeof(struct usb_xpad), GFP_KERNEL);
1451 if (!xpad) 1441 if (!xpad)
1452 return -ENOMEM; 1442 return -ENOMEM;
@@ -1478,6 +1468,8 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
1478 if (intf->cur_altsetting->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC) { 1468 if (intf->cur_altsetting->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC) {
1479 if (intf->cur_altsetting->desc.bInterfaceProtocol == 129) 1469 if (intf->cur_altsetting->desc.bInterfaceProtocol == 129)
1480 xpad->xtype = XTYPE_XBOX360W; 1470 xpad->xtype = XTYPE_XBOX360W;
1471 else if (intf->cur_altsetting->desc.bInterfaceProtocol == 208)
1472 xpad->xtype = XTYPE_XBOXONE;
1481 else 1473 else
1482 xpad->xtype = XTYPE_XBOX360; 1474 xpad->xtype = XTYPE_XBOX360;
1483 } else { 1475 } else {
@@ -1492,6 +1484,17 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
1492 xpad->mapping |= MAP_STICKS_TO_NULL; 1484 xpad->mapping |= MAP_STICKS_TO_NULL;
1493 } 1485 }
1494 1486
1487 if (xpad->xtype == XTYPE_XBOXONE &&
1488 intf->cur_altsetting->desc.bInterfaceNumber != 0) {
1489 /*
1490 * The Xbox One controller lists three interfaces all with the
1491 * same interface class, subclass and protocol. Differentiate by
1492 * interface number.
1493 */
1494 error = -ENODEV;
1495 goto err_free_in_urb;
1496 }
1497
1495 error = xpad_init_output(intf, xpad); 1498 error = xpad_init_output(intf, xpad);
1496 if (error) 1499 if (error)
1497 goto err_free_in_urb; 1500 goto err_free_in_urb;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 78f93cf68840..be5b399da5d3 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1568,13 +1568,7 @@ static int elantech_set_properties(struct elantech_data *etd)
1568 case 5: 1568 case 5:
1569 etd->hw_version = 3; 1569 etd->hw_version = 3;
1570 break; 1570 break;
1571 case 6: 1571 case 6 ... 14:
1572 case 7:
1573 case 8:
1574 case 9:
1575 case 10:
1576 case 13:
1577 case 14:
1578 etd->hw_version = 4; 1572 etd->hw_version = 4;
1579 break; 1573 break;
1580 default: 1574 default:
diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c
index a3f0f5a47490..0f586780ceb4 100644
--- a/drivers/input/mouse/vmmouse.c
+++ b/drivers/input/mouse/vmmouse.c
@@ -355,18 +355,11 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties)
355 return -ENXIO; 355 return -ENXIO;
356 } 356 }
357 357
358 if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
359 psmouse_dbg(psmouse, "VMMouse port in use.\n");
360 return -EBUSY;
361 }
362
363 /* Check if the device is present */ 358 /* Check if the device is present */
364 response = ~VMMOUSE_PROTO_MAGIC; 359 response = ~VMMOUSE_PROTO_MAGIC;
365 VMMOUSE_CMD(GETVERSION, 0, version, response, dummy1, dummy2); 360 VMMOUSE_CMD(GETVERSION, 0, version, response, dummy1, dummy2);
366 if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU) { 361 if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU)
367 release_region(VMMOUSE_PROTO_PORT, 4);
368 return -ENXIO; 362 return -ENXIO;
369 }
370 363
371 if (set_properties) { 364 if (set_properties) {
372 psmouse->vendor = VMMOUSE_VENDOR; 365 psmouse->vendor = VMMOUSE_VENDOR;
@@ -374,8 +367,6 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties)
374 psmouse->model = version; 367 psmouse->model = version;
375 } 368 }
376 369
377 release_region(VMMOUSE_PROTO_PORT, 4);
378
379 return 0; 370 return 0;
380} 371}
381 372
@@ -394,7 +385,6 @@ static void vmmouse_disconnect(struct psmouse *psmouse)
394 psmouse_reset(psmouse); 385 psmouse_reset(psmouse);
395 input_unregister_device(priv->abs_dev); 386 input_unregister_device(priv->abs_dev);
396 kfree(priv); 387 kfree(priv);
397 release_region(VMMOUSE_PROTO_PORT, 4);
398} 388}
399 389
400/** 390/**
@@ -438,15 +428,10 @@ int vmmouse_init(struct psmouse *psmouse)
438 struct input_dev *rel_dev = psmouse->dev, *abs_dev; 428 struct input_dev *rel_dev = psmouse->dev, *abs_dev;
439 int error; 429 int error;
440 430
441 if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
442 psmouse_dbg(psmouse, "VMMouse port in use.\n");
443 return -EBUSY;
444 }
445
446 psmouse_reset(psmouse); 431 psmouse_reset(psmouse);
447 error = vmmouse_enable(psmouse); 432 error = vmmouse_enable(psmouse);
448 if (error) 433 if (error)
449 goto release_region; 434 return error;
450 435
451 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 436 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
452 abs_dev = input_allocate_device(); 437 abs_dev = input_allocate_device();
@@ -502,8 +487,5 @@ init_fail:
502 kfree(priv); 487 kfree(priv);
503 psmouse->private = NULL; 488 psmouse->private = NULL;
504 489
505release_region:
506 release_region(VMMOUSE_PROTO_PORT, 4);
507
508 return error; 490 return error;
509} 491}
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index bab3c6acf6a2..0c9191cf324d 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -27,7 +27,7 @@ MODULE_AUTHOR("Jaya Kumar <jayakumar.lkml@gmail.com>");
27MODULE_DESCRIPTION(DRIVER_DESC); 27MODULE_DESCRIPTION(DRIVER_DESC);
28MODULE_LICENSE("GPL"); 28MODULE_LICENSE("GPL");
29 29
30#define W8001_MAX_LENGTH 11 30#define W8001_MAX_LENGTH 13
31#define W8001_LEAD_MASK 0x80 31#define W8001_LEAD_MASK 0x80
32#define W8001_LEAD_BYTE 0x80 32#define W8001_LEAD_BYTE 0x80
33#define W8001_TAB_MASK 0x40 33#define W8001_TAB_MASK 0x40
@@ -339,6 +339,15 @@ static irqreturn_t w8001_interrupt(struct serio *serio,
339 w8001->idx = 0; 339 w8001->idx = 0;
340 parse_multi_touch(w8001); 340 parse_multi_touch(w8001);
341 break; 341 break;
342
343 default:
344 /*
345 * ThinkPad X60 Tablet PC (pen only device) sometimes
346 * sends invalid data packets that are larger than
347 * W8001_PKTLEN_TPCPEN. Let's start over again.
348 */
349 if (!w8001->touch_dev && w8001->idx > W8001_PKTLEN_TPCPEN - 1)
350 w8001->idx = 0;
342 } 351 }
343 352
344 return IRQ_HANDLED; 353 return IRQ_HANDLED;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 9e0034196e10..d091defc3426 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1107,13 +1107,13 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1107 break; 1107 break;
1108 } 1108 }
1109 1109
1110 devid = e->devid;
1110 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n", 1111 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1111 hid, uid, 1112 hid, uid,
1112 PCI_BUS_NUM(devid), 1113 PCI_BUS_NUM(devid),
1113 PCI_SLOT(devid), 1114 PCI_SLOT(devid),
1114 PCI_FUNC(devid)); 1115 PCI_FUNC(devid));
1115 1116
1116 devid = e->devid;
1117 flags = e->flags; 1117 flags = e->flags;
1118 1118
1119 ret = add_acpi_hid_device(hid, uid, &devid, false); 1119 ret = add_acpi_hid_device(hid, uid, &devid, false);
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 94b68213c50d..5f6b3bcab078 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1941,6 +1941,7 @@ static struct iommu_ops arm_smmu_ops = {
1941 .attach_dev = arm_smmu_attach_dev, 1941 .attach_dev = arm_smmu_attach_dev,
1942 .map = arm_smmu_map, 1942 .map = arm_smmu_map,
1943 .unmap = arm_smmu_unmap, 1943 .unmap = arm_smmu_unmap,
1944 .map_sg = default_iommu_map_sg,
1944 .iova_to_phys = arm_smmu_iova_to_phys, 1945 .iova_to_phys = arm_smmu_iova_to_phys,
1945 .add_device = arm_smmu_add_device, 1946 .add_device = arm_smmu_add_device,
1946 .remove_device = arm_smmu_remove_device, 1947 .remove_device = arm_smmu_remove_device,
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a644d0cec2d8..cfe410eedaf0 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3222,11 +3222,6 @@ static int __init init_dmars(void)
3222 } 3222 }
3223 } 3223 }
3224 3224
3225 iommu_flush_write_buffer(iommu);
3226 iommu_set_root_entry(iommu);
3227 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3228 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3229
3230 if (!ecap_pass_through(iommu->ecap)) 3225 if (!ecap_pass_through(iommu->ecap))
3231 hw_pass_through = 0; 3226 hw_pass_through = 0;
3232#ifdef CONFIG_INTEL_IOMMU_SVM 3227#ifdef CONFIG_INTEL_IOMMU_SVM
@@ -3235,6 +3230,18 @@ static int __init init_dmars(void)
3235#endif 3230#endif
3236 } 3231 }
3237 3232
3233 /*
3234 * Now that qi is enabled on all iommus, set the root entry and flush
3235 * caches. This is required on some Intel X58 chipsets, otherwise the
3236 * flush_context function will loop forever and the boot hangs.
3237 */
3238 for_each_active_iommu(iommu, drhd) {
3239 iommu_flush_write_buffer(iommu);
3240 iommu_set_root_entry(iommu);
3241 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3242 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3243 }
3244
3238 if (iommu_pass_through) 3245 if (iommu_pass_through)
3239 iommu_identity_mapping |= IDENTMAP_ALL; 3246 iommu_identity_mapping |= IDENTMAP_ALL;
3240 3247
@@ -4600,7 +4607,7 @@ static void free_all_cpu_cached_iovas(unsigned int cpu)
4600 if (!iommu) 4607 if (!iommu)
4601 continue; 4608 continue;
4602 4609
4603 for (did = 0; did < 0xffff; did++) { 4610 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
4604 domain = get_iommu_domain(iommu, did); 4611 domain = get_iommu_domain(iommu, did);
4605 4612
4606 if (!domain) 4613 if (!domain)
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index ba764a0835d3..e23001bfcfee 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -420,8 +420,10 @@ retry:
420 420
421 /* Try replenishing IOVAs by flushing rcache. */ 421 /* Try replenishing IOVAs by flushing rcache. */
422 flushed_rcache = true; 422 flushed_rcache = true;
423 preempt_disable();
423 for_each_online_cpu(cpu) 424 for_each_online_cpu(cpu)
424 free_cpu_cached_iovas(cpu, iovad); 425 free_cpu_cached_iovas(cpu, iovad);
426 preempt_enable();
425 goto retry; 427 goto retry;
426 } 428 }
427 429
@@ -749,7 +751,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
749 bool can_insert = false; 751 bool can_insert = false;
750 unsigned long flags; 752 unsigned long flags;
751 753
752 cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches); 754 cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
753 spin_lock_irqsave(&cpu_rcache->lock, flags); 755 spin_lock_irqsave(&cpu_rcache->lock, flags);
754 756
755 if (!iova_magazine_full(cpu_rcache->loaded)) { 757 if (!iova_magazine_full(cpu_rcache->loaded)) {
@@ -779,6 +781,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
779 iova_magazine_push(cpu_rcache->loaded, iova_pfn); 781 iova_magazine_push(cpu_rcache->loaded, iova_pfn);
780 782
781 spin_unlock_irqrestore(&cpu_rcache->lock, flags); 783 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
784 put_cpu_ptr(rcache->cpu_rcaches);
782 785
783 if (mag_to_free) { 786 if (mag_to_free) {
784 iova_magazine_free_pfns(mag_to_free, iovad); 787 iova_magazine_free_pfns(mag_to_free, iovad);
@@ -812,7 +815,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
812 bool has_pfn = false; 815 bool has_pfn = false;
813 unsigned long flags; 816 unsigned long flags;
814 817
815 cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches); 818 cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
816 spin_lock_irqsave(&cpu_rcache->lock, flags); 819 spin_lock_irqsave(&cpu_rcache->lock, flags);
817 820
818 if (!iova_magazine_empty(cpu_rcache->loaded)) { 821 if (!iova_magazine_empty(cpu_rcache->loaded)) {
@@ -834,6 +837,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
834 iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn); 837 iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
835 838
836 spin_unlock_irqrestore(&cpu_rcache->lock, flags); 839 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
840 put_cpu_ptr(rcache->cpu_rcaches);
837 841
838 return iova_pfn; 842 return iova_pfn;
839} 843}
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index c7d6156ff536..25b4627cb57f 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -815,7 +815,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
815 dte_addr = virt_to_phys(rk_domain->dt); 815 dte_addr = virt_to_phys(rk_domain->dt);
816 for (i = 0; i < iommu->num_mmu; i++) { 816 for (i = 0; i < iommu->num_mmu; i++) {
817 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr); 817 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
818 rk_iommu_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); 818 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
819 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); 819 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
820 } 820 }
821 821
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 3b5e10aa48ab..8a4adbeb2b8c 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -746,6 +746,12 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
746 /* verify that it doesn't conflict with an IPI irq */ 746 /* verify that it doesn't conflict with an IPI irq */
747 if (test_bit(spec->hwirq, ipi_resrv)) 747 if (test_bit(spec->hwirq, ipi_resrv))
748 return -EBUSY; 748 return -EBUSY;
749
750 hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq);
751
752 return irq_domain_set_hwirq_and_chip(d, virq, hwirq,
753 &gic_level_irq_controller,
754 NULL);
749 } else { 755 } else {
750 base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs); 756 base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
751 if (base_hwirq == gic_shared_intrs) { 757 if (base_hwirq == gic_shared_intrs) {
@@ -867,10 +873,14 @@ static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq,
867 &gic_level_irq_controller, 873 &gic_level_irq_controller,
868 NULL); 874 NULL);
869 if (ret) 875 if (ret)
870 return ret; 876 goto error;
871 } 877 }
872 878
873 return 0; 879 return 0;
880
881error:
882 irq_domain_free_irqs_parent(d, virq, nr_irqs);
883 return ret;
874} 884}
875 885
876void gic_dev_domain_free(struct irq_domain *d, unsigned int virq, 886void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index 3495d5d6547f..3bce44893021 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -53,11 +53,12 @@ static void led_timer_function(unsigned long data)
53 53
54 if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) { 54 if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
55 led_set_brightness_nosleep(led_cdev, LED_OFF); 55 led_set_brightness_nosleep(led_cdev, LED_OFF);
56 led_cdev->flags &= ~LED_BLINK_SW;
56 return; 57 return;
57 } 58 }
58 59
59 if (led_cdev->flags & LED_BLINK_ONESHOT_STOP) { 60 if (led_cdev->flags & LED_BLINK_ONESHOT_STOP) {
60 led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP; 61 led_cdev->flags &= ~(LED_BLINK_ONESHOT_STOP | LED_BLINK_SW);
61 return; 62 return;
62 } 63 }
63 64
@@ -151,6 +152,7 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
151 return; 152 return;
152 } 153 }
153 154
155 led_cdev->flags |= LED_BLINK_SW;
154 mod_timer(&led_cdev->blink_timer, jiffies + 1); 156 mod_timer(&led_cdev->blink_timer, jiffies + 1);
155} 157}
156 158
@@ -219,6 +221,7 @@ void led_stop_software_blink(struct led_classdev *led_cdev)
219 del_timer_sync(&led_cdev->blink_timer); 221 del_timer_sync(&led_cdev->blink_timer);
220 led_cdev->blink_delay_on = 0; 222 led_cdev->blink_delay_on = 0;
221 led_cdev->blink_delay_off = 0; 223 led_cdev->blink_delay_off = 0;
224 led_cdev->flags &= ~LED_BLINK_SW;
222} 225}
223EXPORT_SYMBOL_GPL(led_stop_software_blink); 226EXPORT_SYMBOL_GPL(led_stop_software_blink);
224 227
@@ -226,10 +229,10 @@ void led_set_brightness(struct led_classdev *led_cdev,
226 enum led_brightness brightness) 229 enum led_brightness brightness)
227{ 230{
228 /* 231 /*
229 * In case blinking is on delay brightness setting 232 * If software blink is active, delay brightness setting
230 * until the next timer tick. 233 * until the next timer tick.
231 */ 234 */
232 if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) { 235 if (led_cdev->flags & LED_BLINK_SW) {
233 /* 236 /*
234 * If we need to disable soft blinking delegate this to the 237 * If we need to disable soft blinking delegate this to the
235 * work queue task to avoid problems in case we are called 238 * work queue task to avoid problems in case we are called
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c
index 410c39c62dc7..c9f386213e9e 100644
--- a/drivers/leds/trigger/ledtrig-heartbeat.c
+++ b/drivers/leds/trigger/ledtrig-heartbeat.c
@@ -19,6 +19,7 @@
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/leds.h> 20#include <linux/leds.h>
21#include <linux/reboot.h> 21#include <linux/reboot.h>
22#include <linux/suspend.h>
22#include "../leds.h" 23#include "../leds.h"
23 24
24static int panic_heartbeats; 25static int panic_heartbeats;
@@ -154,6 +155,30 @@ static struct led_trigger heartbeat_led_trigger = {
154 .deactivate = heartbeat_trig_deactivate, 155 .deactivate = heartbeat_trig_deactivate,
155}; 156};
156 157
158static int heartbeat_pm_notifier(struct notifier_block *nb,
159 unsigned long pm_event, void *unused)
160{
161 int rc;
162
163 switch (pm_event) {
164 case PM_SUSPEND_PREPARE:
165 case PM_HIBERNATION_PREPARE:
166 case PM_RESTORE_PREPARE:
167 led_trigger_unregister(&heartbeat_led_trigger);
168 break;
169 case PM_POST_SUSPEND:
170 case PM_POST_HIBERNATION:
171 case PM_POST_RESTORE:
172 rc = led_trigger_register(&heartbeat_led_trigger);
173 if (rc)
174 pr_err("could not re-register heartbeat trigger\n");
175 break;
176 default:
177 break;
178 }
179 return NOTIFY_DONE;
180}
181
157static int heartbeat_reboot_notifier(struct notifier_block *nb, 182static int heartbeat_reboot_notifier(struct notifier_block *nb,
158 unsigned long code, void *unused) 183 unsigned long code, void *unused)
159{ 184{
@@ -168,6 +193,10 @@ static int heartbeat_panic_notifier(struct notifier_block *nb,
168 return NOTIFY_DONE; 193 return NOTIFY_DONE;
169} 194}
170 195
196static struct notifier_block heartbeat_pm_nb = {
197 .notifier_call = heartbeat_pm_notifier,
198};
199
171static struct notifier_block heartbeat_reboot_nb = { 200static struct notifier_block heartbeat_reboot_nb = {
172 .notifier_call = heartbeat_reboot_notifier, 201 .notifier_call = heartbeat_reboot_notifier,
173}; 202};
@@ -184,12 +213,14 @@ static int __init heartbeat_trig_init(void)
184 atomic_notifier_chain_register(&panic_notifier_list, 213 atomic_notifier_chain_register(&panic_notifier_list,
185 &heartbeat_panic_nb); 214 &heartbeat_panic_nb);
186 register_reboot_notifier(&heartbeat_reboot_nb); 215 register_reboot_notifier(&heartbeat_reboot_nb);
216 register_pm_notifier(&heartbeat_pm_nb);
187 } 217 }
188 return rc; 218 return rc;
189} 219}
190 220
191static void __exit heartbeat_trig_exit(void) 221static void __exit heartbeat_trig_exit(void)
192{ 222{
223 unregister_pm_notifier(&heartbeat_pm_nb);
193 unregister_reboot_notifier(&heartbeat_reboot_nb); 224 unregister_reboot_notifier(&heartbeat_reboot_nb);
194 atomic_notifier_chain_unregister(&panic_notifier_list, 225 atomic_notifier_chain_unregister(&panic_notifier_list,
195 &heartbeat_panic_nb); 226 &heartbeat_panic_nb);
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
index b73c6e7d28e4..6f2c8522e14a 100644
--- a/drivers/mcb/mcb-core.c
+++ b/drivers/mcb/mcb-core.c
@@ -61,21 +61,36 @@ static int mcb_probe(struct device *dev)
61 struct mcb_driver *mdrv = to_mcb_driver(dev->driver); 61 struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
62 struct mcb_device *mdev = to_mcb_device(dev); 62 struct mcb_device *mdev = to_mcb_device(dev);
63 const struct mcb_device_id *found_id; 63 const struct mcb_device_id *found_id;
64 struct module *carrier_mod;
65 int ret;
64 66
65 found_id = mcb_match_id(mdrv->id_table, mdev); 67 found_id = mcb_match_id(mdrv->id_table, mdev);
66 if (!found_id) 68 if (!found_id)
67 return -ENODEV; 69 return -ENODEV;
68 70
69 return mdrv->probe(mdev, found_id); 71 carrier_mod = mdev->dev.parent->driver->owner;
72 if (!try_module_get(carrier_mod))
73 return -EINVAL;
74
75 get_device(dev);
76 ret = mdrv->probe(mdev, found_id);
77 if (ret)
78 module_put(carrier_mod);
79
80 return ret;
70} 81}
71 82
72static int mcb_remove(struct device *dev) 83static int mcb_remove(struct device *dev)
73{ 84{
74 struct mcb_driver *mdrv = to_mcb_driver(dev->driver); 85 struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
75 struct mcb_device *mdev = to_mcb_device(dev); 86 struct mcb_device *mdev = to_mcb_device(dev);
87 struct module *carrier_mod;
76 88
77 mdrv->remove(mdev); 89 mdrv->remove(mdev);
78 90
91 carrier_mod = mdev->dev.parent->driver->owner;
92 module_put(carrier_mod);
93
79 put_device(&mdev->dev); 94 put_device(&mdev->dev);
80 95
81 return 0; 96 return 0;
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index d7723ce772b3..c04bc6afb965 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -1274,8 +1274,6 @@ struct uvc_xu_control_mapping32 {
1274static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp, 1274static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
1275 const struct uvc_xu_control_mapping32 __user *up) 1275 const struct uvc_xu_control_mapping32 __user *up)
1276{ 1276{
1277 struct uvc_menu_info __user *umenus;
1278 struct uvc_menu_info __user *kmenus;
1279 compat_caddr_t p; 1277 compat_caddr_t p;
1280 1278
1281 if (!access_ok(VERIFY_READ, up, sizeof(*up)) || 1279 if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
@@ -1292,17 +1290,7 @@ static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
1292 1290
1293 if (__get_user(p, &up->menu_info)) 1291 if (__get_user(p, &up->menu_info))
1294 return -EFAULT; 1292 return -EFAULT;
1295 umenus = compat_ptr(p); 1293 kp->menu_info = compat_ptr(p);
1296 if (!access_ok(VERIFY_READ, umenus, kp->menu_count * sizeof(*umenus)))
1297 return -EFAULT;
1298
1299 kmenus = compat_alloc_user_space(kp->menu_count * sizeof(*kmenus));
1300 if (kmenus == NULL)
1301 return -EFAULT;
1302 kp->menu_info = kmenus;
1303
1304 if (copy_in_user(kmenus, umenus, kp->menu_count * sizeof(*umenus)))
1305 return -EFAULT;
1306 1294
1307 return 0; 1295 return 0;
1308} 1296}
@@ -1310,10 +1298,6 @@ static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
1310static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp, 1298static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp,
1311 struct uvc_xu_control_mapping32 __user *up) 1299 struct uvc_xu_control_mapping32 __user *up)
1312{ 1300{
1313 struct uvc_menu_info __user *umenus;
1314 struct uvc_menu_info __user *kmenus = kp->menu_info;
1315 compat_caddr_t p;
1316
1317 if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || 1301 if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
1318 __copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) || 1302 __copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) ||
1319 __put_user(kp->menu_count, &up->menu_count)) 1303 __put_user(kp->menu_count, &up->menu_count))
@@ -1322,16 +1306,6 @@ static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp,
1322 if (__clear_user(up->reserved, sizeof(up->reserved))) 1306 if (__clear_user(up->reserved, sizeof(up->reserved)))
1323 return -EFAULT; 1307 return -EFAULT;
1324 1308
1325 if (kp->menu_count == 0)
1326 return 0;
1327
1328 if (get_user(p, &up->menu_info))
1329 return -EFAULT;
1330 umenus = compat_ptr(p);
1331
1332 if (copy_in_user(umenus, kmenus, kp->menu_count * sizeof(*umenus)))
1333 return -EFAULT;
1334
1335 return 0; 1309 return 0;
1336} 1310}
1337 1311
@@ -1346,8 +1320,6 @@ struct uvc_xu_control_query32 {
1346static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp, 1320static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
1347 const struct uvc_xu_control_query32 __user *up) 1321 const struct uvc_xu_control_query32 __user *up)
1348{ 1322{
1349 u8 __user *udata;
1350 u8 __user *kdata;
1351 compat_caddr_t p; 1323 compat_caddr_t p;
1352 1324
1353 if (!access_ok(VERIFY_READ, up, sizeof(*up)) || 1325 if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
@@ -1361,17 +1333,7 @@ static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
1361 1333
1362 if (__get_user(p, &up->data)) 1334 if (__get_user(p, &up->data))
1363 return -EFAULT; 1335 return -EFAULT;
1364 udata = compat_ptr(p); 1336 kp->data = compat_ptr(p);
1365 if (!access_ok(VERIFY_READ, udata, kp->size))
1366 return -EFAULT;
1367
1368 kdata = compat_alloc_user_space(kp->size);
1369 if (kdata == NULL)
1370 return -EFAULT;
1371 kp->data = kdata;
1372
1373 if (copy_in_user(kdata, udata, kp->size))
1374 return -EFAULT;
1375 1337
1376 return 0; 1338 return 0;
1377} 1339}
@@ -1379,26 +1341,10 @@ static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
1379static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp, 1341static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
1380 struct uvc_xu_control_query32 __user *up) 1342 struct uvc_xu_control_query32 __user *up)
1381{ 1343{
1382 u8 __user *udata;
1383 u8 __user *kdata = kp->data;
1384 compat_caddr_t p;
1385
1386 if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || 1344 if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
1387 __copy_to_user(up, kp, offsetof(typeof(*up), data))) 1345 __copy_to_user(up, kp, offsetof(typeof(*up), data)))
1388 return -EFAULT; 1346 return -EFAULT;
1389 1347
1390 if (kp->size == 0)
1391 return 0;
1392
1393 if (get_user(p, &up->data))
1394 return -EFAULT;
1395 udata = compat_ptr(p);
1396 if (!access_ok(VERIFY_READ, udata, kp->size))
1397 return -EFAULT;
1398
1399 if (copy_in_user(udata, kdata, kp->size))
1400 return -EFAULT;
1401
1402 return 0; 1348 return 0;
1403} 1349}
1404 1350
@@ -1408,47 +1354,44 @@ static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
1408static long uvc_v4l2_compat_ioctl32(struct file *file, 1354static long uvc_v4l2_compat_ioctl32(struct file *file,
1409 unsigned int cmd, unsigned long arg) 1355 unsigned int cmd, unsigned long arg)
1410{ 1356{
1357 struct uvc_fh *handle = file->private_data;
1411 union { 1358 union {
1412 struct uvc_xu_control_mapping xmap; 1359 struct uvc_xu_control_mapping xmap;
1413 struct uvc_xu_control_query xqry; 1360 struct uvc_xu_control_query xqry;
1414 } karg; 1361 } karg;
1415 void __user *up = compat_ptr(arg); 1362 void __user *up = compat_ptr(arg);
1416 mm_segment_t old_fs;
1417 long ret; 1363 long ret;
1418 1364
1419 switch (cmd) { 1365 switch (cmd) {
1420 case UVCIOC_CTRL_MAP32: 1366 case UVCIOC_CTRL_MAP32:
1421 cmd = UVCIOC_CTRL_MAP;
1422 ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up); 1367 ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up);
1368 if (ret)
1369 return ret;
1370 ret = uvc_ioctl_ctrl_map(handle->chain, &karg.xmap);
1371 if (ret)
1372 return ret;
1373 ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
1374 if (ret)
1375 return ret;
1376
1423 break; 1377 break;
1424 1378
1425 case UVCIOC_CTRL_QUERY32: 1379 case UVCIOC_CTRL_QUERY32:
1426 cmd = UVCIOC_CTRL_QUERY;
1427 ret = uvc_v4l2_get_xu_query(&karg.xqry, up); 1380 ret = uvc_v4l2_get_xu_query(&karg.xqry, up);
1381 if (ret)
1382 return ret;
1383 ret = uvc_xu_ctrl_query(handle->chain, &karg.xqry);
1384 if (ret)
1385 return ret;
1386 ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
1387 if (ret)
1388 return ret;
1428 break; 1389 break;
1429 1390
1430 default: 1391 default:
1431 return -ENOIOCTLCMD; 1392 return -ENOIOCTLCMD;
1432 } 1393 }
1433 1394
1434 old_fs = get_fs();
1435 set_fs(KERNEL_DS);
1436 ret = video_ioctl2(file, cmd, (unsigned long)&karg);
1437 set_fs(old_fs);
1438
1439 if (ret < 0)
1440 return ret;
1441
1442 switch (cmd) {
1443 case UVCIOC_CTRL_MAP:
1444 ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
1445 break;
1446
1447 case UVCIOC_CTRL_QUERY:
1448 ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
1449 break;
1450 }
1451
1452 return ret; 1395 return ret;
1453} 1396}
1454#endif 1397#endif
diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c
index ca94bded3386..8bef4331bd51 100644
--- a/drivers/media/v4l2-core/v4l2-mc.c
+++ b/drivers/media/v4l2-core/v4l2-mc.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Media Controller ancillary functions 2 * Media Controller ancillary functions
3 * 3 *
4 * Copyright (c) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com> 4 * Copyright (c) 2016 Mauro Carvalho Chehab <mchehab@kernel.org>
5 * Copyright (C) 2016 Shuah Khan <shuahkh@osg.samsung.com> 5 * Copyright (C) 2016 Shuah Khan <shuahkh@osg.samsung.com>
6 * Copyright (C) 2006-2010 Nokia Corporation 6 * Copyright (C) 2006-2010 Nokia Corporation
7 * Copyright (c) 2016 Intel Corporation. 7 * Copyright (c) 2016 Intel Corporation.
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index af4884ba6b7c..15508df24e5d 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -398,7 +398,7 @@ static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
398 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4, 398 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
399 GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay); 399 GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay);
400 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4, 400 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
401 GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay); 401 GPMC_CONFIG4_WEEXTRADELAY, p->we_extra_delay);
402 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6, 402 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
403 GPMC_CONFIG6_CYCLE2CYCLESAMECSEN, 403 GPMC_CONFIG6_CYCLE2CYCLESAMECSEN,
404 p->cycle2cyclesamecsen); 404 p->cycle2cyclesamecsen);
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index 199d261990be..f32fbb8e8129 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -203,6 +203,7 @@ static int max77620_get_fps_period_reg_value(struct max77620_chip *chip,
203 break; 203 break;
204 case MAX77620: 204 case MAX77620:
205 fps_min_period = MAX77620_FPS_PERIOD_MIN_US; 205 fps_min_period = MAX77620_FPS_PERIOD_MIN_US;
206 break;
206 default: 207 default:
207 return -EINVAL; 208 return -EINVAL;
208 } 209 }
@@ -236,6 +237,7 @@ static int max77620_config_fps(struct max77620_chip *chip,
236 break; 237 break;
237 case MAX77620: 238 case MAX77620:
238 fps_max_period = MAX77620_FPS_PERIOD_MAX_US; 239 fps_max_period = MAX77620_FPS_PERIOD_MAX_US;
240 break;
239 default: 241 default:
240 return -EINVAL; 242 return -EINVAL;
241 } 243 }
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index eed254da63a8..641c1a566687 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -730,7 +730,7 @@ static void mei_cl_wake_all(struct mei_cl *cl)
730 /* synchronized under device mutex */ 730 /* synchronized under device mutex */
731 if (waitqueue_active(&cl->wait)) { 731 if (waitqueue_active(&cl->wait)) {
732 cl_dbg(dev, cl, "Waking up ctrl write clients!\n"); 732 cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
733 wake_up_interruptible(&cl->wait); 733 wake_up(&cl->wait);
734 } 734 }
735} 735}
736 736
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 16baeb51b2bd..ef3618299494 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1147,11 +1147,17 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
1147 */ 1147 */
1148static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev) 1148static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
1149{ 1149{
1150 struct kstat stat;
1151 int err, minor; 1150 int err, minor;
1151 struct path path;
1152 struct kstat stat;
1152 1153
1153 /* Probably this is an MTD character device node path */ 1154 /* Probably this is an MTD character device node path */
1154 err = vfs_stat(mtd_dev, &stat); 1155 err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
1156 if (err)
1157 return ERR_PTR(err);
1158
1159 err = vfs_getattr(&path, &stat);
1160 path_put(&path);
1155 if (err) 1161 if (err)
1156 return ERR_PTR(err); 1162 return ERR_PTR(err);
1157 1163
@@ -1160,6 +1166,7 @@ static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
1160 return ERR_PTR(-EINVAL); 1166 return ERR_PTR(-EINVAL);
1161 1167
1162 minor = MINOR(stat.rdev); 1168 minor = MINOR(stat.rdev);
1169
1163 if (minor & 1) 1170 if (minor & 1)
1164 /* 1171 /*
1165 * Just do not think the "/dev/mtdrX" devices support is need, 1172 * Just do not think the "/dev/mtdrX" devices support is need,
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 5780dd1ba79d..ebf517271d29 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -575,6 +575,7 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
575 int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0; 575 int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
576 struct ubi_volume *vol = ubi->volumes[idx]; 576 struct ubi_volume *vol = ubi->volumes[idx];
577 struct ubi_vid_hdr *vid_hdr; 577 struct ubi_vid_hdr *vid_hdr;
578 uint32_t crc;
578 579
579 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 580 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
580 if (!vid_hdr) 581 if (!vid_hdr)
@@ -599,14 +600,8 @@ retry:
599 goto out_put; 600 goto out_put;
600 } 601 }
601 602
602 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 603 ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
603 err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
604 if (err) {
605 up_read(&ubi->fm_eba_sem);
606 goto write_error;
607 }
608 604
609 data_size = offset + len;
610 mutex_lock(&ubi->buf_mutex); 605 mutex_lock(&ubi->buf_mutex);
611 memset(ubi->peb_buf + offset, 0xFF, len); 606 memset(ubi->peb_buf + offset, 0xFF, len);
612 607
@@ -621,6 +616,19 @@ retry:
621 616
622 memcpy(ubi->peb_buf + offset, buf, len); 617 memcpy(ubi->peb_buf + offset, buf, len);
623 618
619 data_size = offset + len;
620 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
621 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
622 vid_hdr->copy_flag = 1;
623 vid_hdr->data_size = cpu_to_be32(data_size);
624 vid_hdr->data_crc = cpu_to_be32(crc);
625 err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
626 if (err) {
627 mutex_unlock(&ubi->buf_mutex);
628 up_read(&ubi->fm_eba_sem);
629 goto write_error;
630 }
631
624 err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size); 632 err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
625 if (err) { 633 if (err) {
626 mutex_unlock(&ubi->buf_mutex); 634 mutex_unlock(&ubi->buf_mutex);
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 348dbbcbedc8..a9e2cef7c95c 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -302,6 +302,7 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
302struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode) 302struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
303{ 303{
304 int error, ubi_num, vol_id; 304 int error, ubi_num, vol_id;
305 struct path path;
305 struct kstat stat; 306 struct kstat stat;
306 307
307 dbg_gen("open volume %s, mode %d", pathname, mode); 308 dbg_gen("open volume %s, mode %d", pathname, mode);
@@ -309,7 +310,12 @@ struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
309 if (!pathname || !*pathname) 310 if (!pathname || !*pathname)
310 return ERR_PTR(-EINVAL); 311 return ERR_PTR(-EINVAL);
311 312
312 error = vfs_stat(pathname, &stat); 313 error = kern_path(pathname, LOOKUP_FOLLOW, &path);
314 if (error)
315 return ERR_PTR(error);
316
317 error = vfs_getattr(&path, &stat);
318 path_put(&path);
313 if (error) 319 if (error)
314 return ERR_PTR(error); 320 return ERR_PTR(error);
315 321
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index b9304a295f86..edc70ffad660 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -101,11 +101,14 @@ enum ad_link_speed_type {
101#define MAC_ADDRESS_EQUAL(A, B) \ 101#define MAC_ADDRESS_EQUAL(A, B) \
102 ether_addr_equal_64bits((const u8 *)A, (const u8 *)B) 102 ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
103 103
104static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } }; 104static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
105 0, 0, 0, 0, 0, 0
106};
105static u16 ad_ticks_per_sec; 107static u16 ad_ticks_per_sec;
106static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000; 108static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
107 109
108static const u8 lacpdu_mcast_addr[ETH_ALEN] = MULTICAST_LACPDU_ADDR; 110static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
111 MULTICAST_LACPDU_ADDR;
109 112
110/* ================= main 802.3ad protocol functions ================== */ 113/* ================= main 802.3ad protocol functions ================== */
111static int ad_lacpdu_send(struct port *port); 114static int ad_lacpdu_send(struct port *port);
@@ -657,6 +660,20 @@ static void __set_agg_ports_ready(struct aggregator *aggregator, int val)
657 } 660 }
658} 661}
659 662
663static int __agg_active_ports(struct aggregator *agg)
664{
665 struct port *port;
666 int active = 0;
667
668 for (port = agg->lag_ports; port;
669 port = port->next_port_in_aggregator) {
670 if (port->is_enabled)
671 active++;
672 }
673
674 return active;
675}
676
660/** 677/**
661 * __get_agg_bandwidth - get the total bandwidth of an aggregator 678 * __get_agg_bandwidth - get the total bandwidth of an aggregator
662 * @aggregator: the aggregator we're looking at 679 * @aggregator: the aggregator we're looking at
@@ -664,39 +681,40 @@ static void __set_agg_ports_ready(struct aggregator *aggregator, int val)
664 */ 681 */
665static u32 __get_agg_bandwidth(struct aggregator *aggregator) 682static u32 __get_agg_bandwidth(struct aggregator *aggregator)
666{ 683{
684 int nports = __agg_active_ports(aggregator);
667 u32 bandwidth = 0; 685 u32 bandwidth = 0;
668 686
669 if (aggregator->num_of_ports) { 687 if (nports) {
670 switch (__get_link_speed(aggregator->lag_ports)) { 688 switch (__get_link_speed(aggregator->lag_ports)) {
671 case AD_LINK_SPEED_1MBPS: 689 case AD_LINK_SPEED_1MBPS:
672 bandwidth = aggregator->num_of_ports; 690 bandwidth = nports;
673 break; 691 break;
674 case AD_LINK_SPEED_10MBPS: 692 case AD_LINK_SPEED_10MBPS:
675 bandwidth = aggregator->num_of_ports * 10; 693 bandwidth = nports * 10;
676 break; 694 break;
677 case AD_LINK_SPEED_100MBPS: 695 case AD_LINK_SPEED_100MBPS:
678 bandwidth = aggregator->num_of_ports * 100; 696 bandwidth = nports * 100;
679 break; 697 break;
680 case AD_LINK_SPEED_1000MBPS: 698 case AD_LINK_SPEED_1000MBPS:
681 bandwidth = aggregator->num_of_ports * 1000; 699 bandwidth = nports * 1000;
682 break; 700 break;
683 case AD_LINK_SPEED_2500MBPS: 701 case AD_LINK_SPEED_2500MBPS:
684 bandwidth = aggregator->num_of_ports * 2500; 702 bandwidth = nports * 2500;
685 break; 703 break;
686 case AD_LINK_SPEED_10000MBPS: 704 case AD_LINK_SPEED_10000MBPS:
687 bandwidth = aggregator->num_of_ports * 10000; 705 bandwidth = nports * 10000;
688 break; 706 break;
689 case AD_LINK_SPEED_20000MBPS: 707 case AD_LINK_SPEED_20000MBPS:
690 bandwidth = aggregator->num_of_ports * 20000; 708 bandwidth = nports * 20000;
691 break; 709 break;
692 case AD_LINK_SPEED_40000MBPS: 710 case AD_LINK_SPEED_40000MBPS:
693 bandwidth = aggregator->num_of_ports * 40000; 711 bandwidth = nports * 40000;
694 break; 712 break;
695 case AD_LINK_SPEED_56000MBPS: 713 case AD_LINK_SPEED_56000MBPS:
696 bandwidth = aggregator->num_of_ports * 56000; 714 bandwidth = nports * 56000;
697 break; 715 break;
698 case AD_LINK_SPEED_100000MBPS: 716 case AD_LINK_SPEED_100000MBPS:
699 bandwidth = aggregator->num_of_ports * 100000; 717 bandwidth = nports * 100000;
700 break; 718 break;
701 default: 719 default:
702 bandwidth = 0; /* to silence the compiler */ 720 bandwidth = 0; /* to silence the compiler */
@@ -1530,10 +1548,10 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
1530 1548
1531 switch (__get_agg_selection_mode(curr->lag_ports)) { 1549 switch (__get_agg_selection_mode(curr->lag_ports)) {
1532 case BOND_AD_COUNT: 1550 case BOND_AD_COUNT:
1533 if (curr->num_of_ports > best->num_of_ports) 1551 if (__agg_active_ports(curr) > __agg_active_ports(best))
1534 return curr; 1552 return curr;
1535 1553
1536 if (curr->num_of_ports < best->num_of_ports) 1554 if (__agg_active_ports(curr) < __agg_active_ports(best))
1537 return best; 1555 return best;
1538 1556
1539 /*FALLTHROUGH*/ 1557 /*FALLTHROUGH*/
@@ -1561,8 +1579,14 @@ static int agg_device_up(const struct aggregator *agg)
1561 if (!port) 1579 if (!port)
1562 return 0; 1580 return 0;
1563 1581
1564 return netif_running(port->slave->dev) && 1582 for (port = agg->lag_ports; port;
1565 netif_carrier_ok(port->slave->dev); 1583 port = port->next_port_in_aggregator) {
1584 if (netif_running(port->slave->dev) &&
1585 netif_carrier_ok(port->slave->dev))
1586 return 1;
1587 }
1588
1589 return 0;
1566} 1590}
1567 1591
1568/** 1592/**
@@ -1610,7 +1634,7 @@ static void ad_agg_selection_logic(struct aggregator *agg,
1610 1634
1611 agg->is_active = 0; 1635 agg->is_active = 0;
1612 1636
1613 if (agg->num_of_ports && agg_device_up(agg)) 1637 if (__agg_active_ports(agg) && agg_device_up(agg))
1614 best = ad_agg_selection_test(best, agg); 1638 best = ad_agg_selection_test(best, agg);
1615 } 1639 }
1616 1640
@@ -1622,7 +1646,7 @@ static void ad_agg_selection_logic(struct aggregator *agg,
1622 * answering partner. 1646 * answering partner.
1623 */ 1647 */
1624 if (active && active->lag_ports && 1648 if (active && active->lag_ports &&
1625 active->lag_ports->is_enabled && 1649 __agg_active_ports(active) &&
1626 (__agg_has_partner(active) || 1650 (__agg_has_partner(active) ||
1627 (!__agg_has_partner(active) && 1651 (!__agg_has_partner(active) &&
1628 !__agg_has_partner(best)))) { 1652 !__agg_has_partner(best)))) {
@@ -1718,7 +1742,7 @@ static void ad_clear_agg(struct aggregator *aggregator)
1718 aggregator->is_individual = false; 1742 aggregator->is_individual = false;
1719 aggregator->actor_admin_aggregator_key = 0; 1743 aggregator->actor_admin_aggregator_key = 0;
1720 aggregator->actor_oper_aggregator_key = 0; 1744 aggregator->actor_oper_aggregator_key = 0;
1721 aggregator->partner_system = null_mac_addr; 1745 eth_zero_addr(aggregator->partner_system.mac_addr_value);
1722 aggregator->partner_system_priority = 0; 1746 aggregator->partner_system_priority = 0;
1723 aggregator->partner_oper_aggregator_key = 0; 1747 aggregator->partner_oper_aggregator_key = 0;
1724 aggregator->receive_state = 0; 1748 aggregator->receive_state = 0;
@@ -1740,7 +1764,7 @@ static void ad_initialize_agg(struct aggregator *aggregator)
1740 if (aggregator) { 1764 if (aggregator) {
1741 ad_clear_agg(aggregator); 1765 ad_clear_agg(aggregator);
1742 1766
1743 aggregator->aggregator_mac_address = null_mac_addr; 1767 eth_zero_addr(aggregator->aggregator_mac_address.mac_addr_value);
1744 aggregator->aggregator_identifier = 0; 1768 aggregator->aggregator_identifier = 0;
1745 aggregator->slave = NULL; 1769 aggregator->slave = NULL;
1746 } 1770 }
@@ -2133,7 +2157,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
2133 else 2157 else
2134 temp_aggregator->lag_ports = temp_port->next_port_in_aggregator; 2158 temp_aggregator->lag_ports = temp_port->next_port_in_aggregator;
2135 temp_aggregator->num_of_ports--; 2159 temp_aggregator->num_of_ports--;
2136 if (temp_aggregator->num_of_ports == 0) { 2160 if (__agg_active_ports(temp_aggregator) == 0) {
2137 select_new_active_agg = temp_aggregator->is_active; 2161 select_new_active_agg = temp_aggregator->is_active;
2138 ad_clear_agg(temp_aggregator); 2162 ad_clear_agg(temp_aggregator);
2139 if (select_new_active_agg) { 2163 if (select_new_active_agg) {
@@ -2432,7 +2456,9 @@ void bond_3ad_adapter_speed_duplex_changed(struct slave *slave)
2432 */ 2456 */
2433void bond_3ad_handle_link_change(struct slave *slave, char link) 2457void bond_3ad_handle_link_change(struct slave *slave, char link)
2434{ 2458{
2459 struct aggregator *agg;
2435 struct port *port; 2460 struct port *port;
2461 bool dummy;
2436 2462
2437 port = &(SLAVE_AD_INFO(slave)->port); 2463 port = &(SLAVE_AD_INFO(slave)->port);
2438 2464
@@ -2459,6 +2485,9 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
2459 port->is_enabled = false; 2485 port->is_enabled = false;
2460 ad_update_actor_keys(port, true); 2486 ad_update_actor_keys(port, true);
2461 } 2487 }
2488 agg = __get_first_agg(port);
2489 ad_agg_selection_logic(agg, &dummy);
2490
2462 netdev_dbg(slave->bond->dev, "Port %d changed link status to %s\n", 2491 netdev_dbg(slave->bond->dev, "Port %d changed link status to %s\n",
2463 port->actor_port_number, 2492 port->actor_port_number,
2464 link == BOND_LINK_UP ? "UP" : "DOWN"); 2493 link == BOND_LINK_UP ? "UP" : "DOWN");
@@ -2499,7 +2528,7 @@ int bond_3ad_set_carrier(struct bonding *bond)
2499 active = __get_active_agg(&(SLAVE_AD_INFO(first_slave)->aggregator)); 2528 active = __get_active_agg(&(SLAVE_AD_INFO(first_slave)->aggregator));
2500 if (active) { 2529 if (active) {
2501 /* are enough slaves available to consider link up? */ 2530 /* are enough slaves available to consider link up? */
2502 if (active->num_of_ports < bond->params.min_links) { 2531 if (__agg_active_ports(active) < bond->params.min_links) {
2503 if (netif_carrier_ok(bond->dev)) { 2532 if (netif_carrier_ok(bond->dev)) {
2504 netif_carrier_off(bond->dev); 2533 netif_carrier_off(bond->dev);
2505 goto out; 2534 goto out;
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index c5ac160a8ae9..551f0f8dead3 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -42,13 +42,10 @@
42 42
43 43
44 44
45#ifndef __long_aligned 45static const u8 mac_bcast[ETH_ALEN + 2] __long_aligned = {
46#define __long_aligned __attribute__((aligned((sizeof(long)))))
47#endif
48static const u8 mac_bcast[ETH_ALEN] __long_aligned = {
49 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 46 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
50}; 47};
51static const u8 mac_v6_allmcast[ETH_ALEN] __long_aligned = { 48static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = {
52 0x33, 0x33, 0x00, 0x00, 0x00, 0x01 49 0x33, 0x33, 0x00, 0x00, 0x00, 0x01
53}; 50};
54static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC; 51static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 90157e20357e..b571ed9fd63d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1584,6 +1584,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1584 } 1584 }
1585 1585
1586 /* check for initial state */ 1586 /* check for initial state */
1587 new_slave->link = BOND_LINK_NOCHANGE;
1587 if (bond->params.miimon) { 1588 if (bond->params.miimon) {
1588 if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) { 1589 if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
1589 if (bond->params.updelay) { 1590 if (bond->params.updelay) {
@@ -4137,6 +4138,8 @@ static const struct net_device_ops bond_netdev_ops = {
4137 .ndo_add_slave = bond_enslave, 4138 .ndo_add_slave = bond_enslave,
4138 .ndo_del_slave = bond_release, 4139 .ndo_del_slave = bond_release,
4139 .ndo_fix_features = bond_fix_features, 4140 .ndo_fix_features = bond_fix_features,
4141 .ndo_neigh_construct = netdev_default_l2upper_neigh_construct,
4142 .ndo_neigh_destroy = netdev_default_l2upper_neigh_destroy,
4140 .ndo_bridge_setlink = switchdev_port_bridge_setlink, 4143 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
4141 .ndo_bridge_getlink = switchdev_port_bridge_getlink, 4144 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
4142 .ndo_bridge_dellink = switchdev_port_bridge_dellink, 4145 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 8b3275d7792a..8f5e93cb7975 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -712,9 +712,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
712 712
713 /* upper group completed, look again in lower */ 713 /* upper group completed, look again in lower */
714 if (priv->rx_next > get_mb_rx_low_last(priv) && 714 if (priv->rx_next > get_mb_rx_low_last(priv) &&
715 quota > 0 && mb > get_mb_rx_last(priv)) { 715 mb > get_mb_rx_last(priv)) {
716 priv->rx_next = get_mb_rx_first(priv); 716 priv->rx_next = get_mb_rx_first(priv);
717 goto again; 717 if (quota > 0)
718 goto again;
718 } 719 }
719 720
720 return received; 721 return received;
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index f91b094288da..e3dccd3200d5 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -332,9 +332,23 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
332 332
333 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl); 333 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
334 334
335 for (i = 0; i < frame->can_dlc; i += 2) { 335 if (priv->type == BOSCH_D_CAN) {
336 priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2, 336 u32 data = 0, dreg = C_CAN_IFACE(DATA1_REG, iface);
337 frame->data[i] | (frame->data[i + 1] << 8)); 337
338 for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
339 data = (u32)frame->data[i];
340 data |= (u32)frame->data[i + 1] << 8;
341 data |= (u32)frame->data[i + 2] << 16;
342 data |= (u32)frame->data[i + 3] << 24;
343 priv->write_reg32(priv, dreg, data);
344 }
345 } else {
346 for (i = 0; i < frame->can_dlc; i += 2) {
347 priv->write_reg(priv,
348 C_CAN_IFACE(DATA1_REG, iface) + i / 2,
349 frame->data[i] |
350 (frame->data[i + 1] << 8));
351 }
338 } 352 }
339} 353}
340 354
@@ -402,10 +416,20 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
402 } else { 416 } else {
403 int i, dreg = C_CAN_IFACE(DATA1_REG, iface); 417 int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
404 418
405 for (i = 0; i < frame->can_dlc; i += 2, dreg ++) { 419 if (priv->type == BOSCH_D_CAN) {
406 data = priv->read_reg(priv, dreg); 420 for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
407 frame->data[i] = data; 421 data = priv->read_reg32(priv, dreg);
408 frame->data[i + 1] = data >> 8; 422 frame->data[i] = data;
423 frame->data[i + 1] = data >> 8;
424 frame->data[i + 2] = data >> 16;
425 frame->data[i + 3] = data >> 24;
426 }
427 } else {
428 for (i = 0; i < frame->can_dlc; i += 2, dreg++) {
429 data = priv->read_reg(priv, dreg);
430 frame->data[i] = data;
431 frame->data[i + 1] = data >> 8;
432 }
409 } 433 }
410 } 434 }
411 435
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 7188137fa08e..e21f7cc5ae4d 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -828,6 +828,9 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[])
828 * - control mode with CAN_CTRLMODE_FD set 828 * - control mode with CAN_CTRLMODE_FD set
829 */ 829 */
830 830
831 if (!data)
832 return 0;
833
831 if (data[IFLA_CAN_CTRLMODE]) { 834 if (data[IFLA_CAN_CTRLMODE]) {
832 struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]); 835 struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
833 836
@@ -1038,6 +1041,11 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
1038 return -EOPNOTSUPP; 1041 return -EOPNOTSUPP;
1039} 1042}
1040 1043
1044static void can_dellink(struct net_device *dev, struct list_head *head)
1045{
1046 return;
1047}
1048
1041static struct rtnl_link_ops can_link_ops __read_mostly = { 1049static struct rtnl_link_ops can_link_ops __read_mostly = {
1042 .kind = "can", 1050 .kind = "can",
1043 .maxtype = IFLA_CAN_MAX, 1051 .maxtype = IFLA_CAN_MAX,
@@ -1046,6 +1054,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
1046 .validate = can_validate, 1054 .validate = can_validate,
1047 .newlink = can_newlink, 1055 .newlink = can_newlink,
1048 .changelink = can_changelink, 1056 .changelink = can_changelink,
1057 .dellink = can_dellink,
1049 .get_size = can_get_size, 1058 .get_size = can_get_size,
1050 .fill_info = can_fill_info, 1059 .fill_info = can_fill_info,
1051 .get_xstats_size = can_get_xstats_size, 1060 .get_xstats_size = can_get_xstats_size,
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index bcb272f6c68a..8483a40e7e9e 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -16,7 +16,8 @@ config CAN_ESD_USB2
16config CAN_GS_USB 16config CAN_GS_USB
17 tristate "Geschwister Schneider UG interfaces" 17 tristate "Geschwister Schneider UG interfaces"
18 ---help--- 18 ---help---
19 This driver supports the Geschwister Schneider USB/CAN devices. 19 This driver supports the Geschwister Schneider and bytewerk.org
20 candleLight USB CAN interfaces USB/CAN devices
20 If unsure choose N, 21 If unsure choose N,
21 choose Y for built in support, 22 choose Y for built in support,
22 M to compile as module (module will be named: gs_usb). 23 M to compile as module (module will be named: gs_usb).
@@ -46,6 +47,8 @@ config CAN_KVASER_USB
46 - Kvaser USBcan R 47 - Kvaser USBcan R
47 - Kvaser Leaf Light v2 48 - Kvaser Leaf Light v2
48 - Kvaser Mini PCI Express HS 49 - Kvaser Mini PCI Express HS
50 - Kvaser Mini PCI Express 2xHS
51 - Kvaser USBcan Light 2xHS
49 - Kvaser USBcan II HS/HS 52 - Kvaser USBcan II HS/HS
50 - Kvaser USBcan II HS/LS 53 - Kvaser USBcan II HS/LS
51 - Kvaser USBcan Rugged ("USBcan Rev B") 54 - Kvaser USBcan Rugged ("USBcan Rev B")
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 360764324c54..6f0cbc38782e 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -1,7 +1,9 @@
1/* CAN driver for Geschwister Schneider USB/CAN devices. 1/* CAN driver for Geschwister Schneider USB/CAN devices
2 * and bytewerk.org candleLight USB CAN interfaces.
2 * 3 *
3 * Copyright (C) 2013 Geschwister Schneider Technologie-, 4 * Copyright (C) 2013-2016 Geschwister Schneider Technologie-,
4 * Entwicklungs- und Vertriebs UG (Haftungsbeschränkt). 5 * Entwicklungs- und Vertriebs UG (Haftungsbeschränkt).
6 * Copyright (C) 2016 Hubert Denkmair
5 * 7 *
6 * Many thanks to all socketcan devs! 8 * Many thanks to all socketcan devs!
7 * 9 *
@@ -29,6 +31,9 @@
29#define USB_GSUSB_1_VENDOR_ID 0x1d50 31#define USB_GSUSB_1_VENDOR_ID 0x1d50
30#define USB_GSUSB_1_PRODUCT_ID 0x606f 32#define USB_GSUSB_1_PRODUCT_ID 0x606f
31 33
34#define USB_CANDLELIGHT_VENDOR_ID 0x1209
35#define USB_CANDLELIGHT_PRODUCT_ID 0x2323
36
32#define GSUSB_ENDPOINT_IN 1 37#define GSUSB_ENDPOINT_IN 1
33#define GSUSB_ENDPOINT_OUT 2 38#define GSUSB_ENDPOINT_OUT 2
34 39
@@ -1009,6 +1014,8 @@ static void gs_usb_disconnect(struct usb_interface *intf)
1009static const struct usb_device_id gs_usb_table[] = { 1014static const struct usb_device_id gs_usb_table[] = {
1010 { USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID, 1015 { USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID,
1011 USB_GSUSB_1_PRODUCT_ID, 0) }, 1016 USB_GSUSB_1_PRODUCT_ID, 0) },
1017 { USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID,
1018 USB_CANDLELIGHT_PRODUCT_ID, 0) },
1012 {} /* Terminating entry */ 1019 {} /* Terminating entry */
1013}; 1020};
1014 1021
@@ -1026,5 +1033,6 @@ module_usb_driver(gs_usb_driver);
1026MODULE_AUTHOR("Maximilian Schneider <mws@schneidersoft.net>"); 1033MODULE_AUTHOR("Maximilian Schneider <mws@schneidersoft.net>");
1027MODULE_DESCRIPTION( 1034MODULE_DESCRIPTION(
1028"Socket CAN device driver for Geschwister Schneider Technologie-, " 1035"Socket CAN device driver for Geschwister Schneider Technologie-, "
1029"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces."); 1036"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces\n"
1037"and bytewerk.org candleLight USB CAN interfaces.");
1030MODULE_LICENSE("GPL v2"); 1038MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 022bfa13ebfa..6f1f3b675ff5 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -59,11 +59,14 @@
59#define USB_CAN_R_PRODUCT_ID 39 59#define USB_CAN_R_PRODUCT_ID 39
60#define USB_LEAF_LITE_V2_PRODUCT_ID 288 60#define USB_LEAF_LITE_V2_PRODUCT_ID 288
61#define USB_MINI_PCIE_HS_PRODUCT_ID 289 61#define USB_MINI_PCIE_HS_PRODUCT_ID 289
62#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290
63#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 291
64#define USB_MINI_PCIE_2HS_PRODUCT_ID 292
62 65
63static inline bool kvaser_is_leaf(const struct usb_device_id *id) 66static inline bool kvaser_is_leaf(const struct usb_device_id *id)
64{ 67{
65 return id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID && 68 return id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
66 id->idProduct <= USB_MINI_PCIE_HS_PRODUCT_ID; 69 id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID;
67} 70}
68 71
69/* Kvaser USBCan-II devices */ 72/* Kvaser USBCan-II devices */
@@ -537,6 +540,9 @@ static const struct usb_device_id kvaser_usb_table[] = {
537 .driver_info = KVASER_HAS_TXRX_ERRORS }, 540 .driver_info = KVASER_HAS_TXRX_ERRORS },
538 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) }, 541 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
539 { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) }, 542 { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
543 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
544 { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
545 { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
540 546
541 /* USBCANII family IDs */ 547 /* USBCANII family IDs */
542 { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID), 548 { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index b70daf9174d4..21f1068b0804 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -70,6 +70,8 @@ static int b53_mmap_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
70 70
71static int b53_mmap_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val) 71static int b53_mmap_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
72{ 72{
73 u8 __iomem *regs = dev->priv;
74
73 if (WARN_ON(reg % 2)) 75 if (WARN_ON(reg % 2))
74 return -EINVAL; 76 return -EINVAL;
75 77
@@ -77,16 +79,26 @@ static int b53_mmap_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
77 u16 lo; 79 u16 lo;
78 u32 hi; 80 u32 hi;
79 81
80 b53_mmap_read16(dev, page, reg, &lo); 82 if (dev->pdata && dev->pdata->big_endian) {
81 b53_mmap_read32(dev, page, reg + 2, &hi); 83 lo = ioread16be(regs + (page << 8) + reg);
84 hi = ioread32be(regs + (page << 8) + reg + 2);
85 } else {
86 lo = readw(regs + (page << 8) + reg);
87 hi = readl(regs + (page << 8) + reg + 2);
88 }
82 89
83 *val = ((u64)hi << 16) | lo; 90 *val = ((u64)hi << 16) | lo;
84 } else { 91 } else {
85 u32 lo; 92 u32 lo;
86 u16 hi; 93 u16 hi;
87 94
88 b53_mmap_read32(dev, page, reg, &lo); 95 if (dev->pdata && dev->pdata->big_endian) {
89 b53_mmap_read16(dev, page, reg + 4, &hi); 96 lo = ioread32be(regs + (page << 8) + reg);
97 hi = ioread16be(regs + (page << 8) + reg + 4);
98 } else {
99 lo = readl(regs + (page << 8) + reg);
100 hi = readw(regs + (page << 8) + reg + 4);
101 }
90 102
91 *val = ((u64)hi << 32) | lo; 103 *val = ((u64)hi << 32) | lo;
92 } 104 }
@@ -96,13 +108,19 @@ static int b53_mmap_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
96 108
97static int b53_mmap_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val) 109static int b53_mmap_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
98{ 110{
111 u8 __iomem *regs = dev->priv;
99 u32 hi, lo; 112 u32 hi, lo;
100 113
101 if (WARN_ON(reg % 4)) 114 if (WARN_ON(reg % 4))
102 return -EINVAL; 115 return -EINVAL;
103 116
104 b53_mmap_read32(dev, page, reg, &lo); 117 if (dev->pdata && dev->pdata->big_endian) {
105 b53_mmap_read32(dev, page, reg + 4, &hi); 118 lo = ioread32be(regs + (page << 8) + reg);
119 hi = ioread32be(regs + (page << 8) + reg + 4);
120 } else {
121 lo = readl(regs + (page << 8) + reg);
122 hi = readl(regs + (page << 8) + reg + 4);
123 }
106 124
107 *val = ((u64)hi << 32) | lo; 125 *val = ((u64)hi << 32) | lo;
108 126
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index e0fb0f1122db..20760e10211a 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -509,8 +509,8 @@ static int au1000_mii_probe(struct net_device *dev)
509 * on the current MAC's MII bus 509 * on the current MAC's MII bus
510 */ 510 */
511 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) 511 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
512 if (mdiobus_get_phy(aup->mii_bus, aup->phy_addr)) { 512 if (mdiobus_get_phy(aup->mii_bus, phy_addr)) {
513 phydev = mdiobus_get_phy(aup->mii_bus, aup->phy_addr); 513 phydev = mdiobus_get_phy(aup->mii_bus, phy_addr);
514 if (!aup->phy_search_highest_addr) 514 if (!aup->phy_search_highest_addr)
515 /* break out with first one found */ 515 /* break out with first one found */
516 break; 516 break;
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index ca562bc034c3..e4feb712d4f2 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -134,7 +134,6 @@ struct arc_emac_priv {
134 134
135 /* Devices */ 135 /* Devices */
136 struct device *dev; 136 struct device *dev;
137 struct phy_device *phy_dev;
138 struct mii_bus *bus; 137 struct mii_bus *bus;
139 struct arc_emac_mdio_bus_data bus_data; 138 struct arc_emac_mdio_bus_data bus_data;
140 139
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index a3a9392a4954..586bedac457d 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -47,7 +47,7 @@ static inline int arc_emac_tx_avail(struct arc_emac_priv *priv)
47static void arc_emac_adjust_link(struct net_device *ndev) 47static void arc_emac_adjust_link(struct net_device *ndev)
48{ 48{
49 struct arc_emac_priv *priv = netdev_priv(ndev); 49 struct arc_emac_priv *priv = netdev_priv(ndev);
50 struct phy_device *phy_dev = priv->phy_dev; 50 struct phy_device *phy_dev = ndev->phydev;
51 unsigned int reg, state_changed = 0; 51 unsigned int reg, state_changed = 0;
52 52
53 if (priv->link != phy_dev->link) { 53 if (priv->link != phy_dev->link) {
@@ -80,46 +80,6 @@ static void arc_emac_adjust_link(struct net_device *ndev)
80} 80}
81 81
82/** 82/**
83 * arc_emac_get_settings - Get PHY settings.
84 * @ndev: Pointer to net_device structure.
85 * @cmd: Pointer to ethtool_cmd structure.
86 *
87 * This implements ethtool command for getting PHY settings. If PHY could
88 * not be found, the function returns -ENODEV. This function calls the
89 * relevant PHY ethtool API to get the PHY settings.
90 * Issue "ethtool ethX" under linux prompt to execute this function.
91 */
92static int arc_emac_get_settings(struct net_device *ndev,
93 struct ethtool_cmd *cmd)
94{
95 struct arc_emac_priv *priv = netdev_priv(ndev);
96
97 return phy_ethtool_gset(priv->phy_dev, cmd);
98}
99
100/**
101 * arc_emac_set_settings - Set PHY settings as passed in the argument.
102 * @ndev: Pointer to net_device structure.
103 * @cmd: Pointer to ethtool_cmd structure.
104 *
105 * This implements ethtool command for setting various PHY settings. If PHY
106 * could not be found, the function returns -ENODEV. This function calls the
107 * relevant PHY ethtool API to set the PHY.
108 * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this
109 * function.
110 */
111static int arc_emac_set_settings(struct net_device *ndev,
112 struct ethtool_cmd *cmd)
113{
114 struct arc_emac_priv *priv = netdev_priv(ndev);
115
116 if (!capable(CAP_NET_ADMIN))
117 return -EPERM;
118
119 return phy_ethtool_sset(priv->phy_dev, cmd);
120}
121
122/**
123 * arc_emac_get_drvinfo - Get EMAC driver information. 83 * arc_emac_get_drvinfo - Get EMAC driver information.
124 * @ndev: Pointer to net_device structure. 84 * @ndev: Pointer to net_device structure.
125 * @info: Pointer to ethtool_drvinfo structure. 85 * @info: Pointer to ethtool_drvinfo structure.
@@ -137,10 +97,10 @@ static void arc_emac_get_drvinfo(struct net_device *ndev,
137} 97}
138 98
139static const struct ethtool_ops arc_emac_ethtool_ops = { 99static const struct ethtool_ops arc_emac_ethtool_ops = {
140 .get_settings = arc_emac_get_settings,
141 .set_settings = arc_emac_set_settings,
142 .get_drvinfo = arc_emac_get_drvinfo, 100 .get_drvinfo = arc_emac_get_drvinfo,
143 .get_link = ethtool_op_get_link, 101 .get_link = ethtool_op_get_link,
102 .get_link_ksettings = phy_ethtool_get_link_ksettings,
103 .set_link_ksettings = phy_ethtool_set_link_ksettings,
144}; 104};
145 105
146#define FIRST_OR_LAST_MASK (FIRST_MASK | LAST_MASK) 106#define FIRST_OR_LAST_MASK (FIRST_MASK | LAST_MASK)
@@ -403,7 +363,7 @@ static void arc_emac_poll_controller(struct net_device *dev)
403static int arc_emac_open(struct net_device *ndev) 363static int arc_emac_open(struct net_device *ndev)
404{ 364{
405 struct arc_emac_priv *priv = netdev_priv(ndev); 365 struct arc_emac_priv *priv = netdev_priv(ndev);
406 struct phy_device *phy_dev = priv->phy_dev; 366 struct phy_device *phy_dev = ndev->phydev;
407 int i; 367 int i;
408 368
409 phy_dev->autoneg = AUTONEG_ENABLE; 369 phy_dev->autoneg = AUTONEG_ENABLE;
@@ -474,7 +434,7 @@ static int arc_emac_open(struct net_device *ndev)
474 /* Enable EMAC */ 434 /* Enable EMAC */
475 arc_reg_or(priv, R_CTRL, EN_MASK); 435 arc_reg_or(priv, R_CTRL, EN_MASK);
476 436
477 phy_start_aneg(priv->phy_dev); 437 phy_start_aneg(ndev->phydev);
478 438
479 netif_start_queue(ndev); 439 netif_start_queue(ndev);
480 440
@@ -772,6 +732,7 @@ int arc_emac_probe(struct net_device *ndev, int interface)
772 struct device *dev = ndev->dev.parent; 732 struct device *dev = ndev->dev.parent;
773 struct resource res_regs; 733 struct resource res_regs;
774 struct device_node *phy_node; 734 struct device_node *phy_node;
735 struct phy_device *phydev = NULL;
775 struct arc_emac_priv *priv; 736 struct arc_emac_priv *priv;
776 const char *mac_addr; 737 const char *mac_addr;
777 unsigned int id, clock_frequency, irq; 738 unsigned int id, clock_frequency, irq;
@@ -887,16 +848,16 @@ int arc_emac_probe(struct net_device *ndev, int interface)
887 goto out_clken; 848 goto out_clken;
888 } 849 }
889 850
890 priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0, 851 phydev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0,
891 interface); 852 interface);
892 if (!priv->phy_dev) { 853 if (!phydev) {
893 dev_err(dev, "of_phy_connect() failed\n"); 854 dev_err(dev, "of_phy_connect() failed\n");
894 err = -ENODEV; 855 err = -ENODEV;
895 goto out_mdio; 856 goto out_mdio;
896 } 857 }
897 858
898 dev_info(dev, "connected to %s phy with id 0x%x\n", 859 dev_info(dev, "connected to %s phy with id 0x%x\n",
899 priv->phy_dev->drv->name, priv->phy_dev->phy_id); 860 phydev->drv->name, phydev->phy_id);
900 861
901 netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT); 862 netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT);
902 863
@@ -910,8 +871,7 @@ int arc_emac_probe(struct net_device *ndev, int interface)
910 871
911out_netif_api: 872out_netif_api:
912 netif_napi_del(&priv->napi); 873 netif_napi_del(&priv->napi);
913 phy_disconnect(priv->phy_dev); 874 phy_disconnect(phydev);
914 priv->phy_dev = NULL;
915out_mdio: 875out_mdio:
916 arc_mdio_remove(priv); 876 arc_mdio_remove(priv);
917out_clken: 877out_clken:
@@ -925,8 +885,7 @@ int arc_emac_remove(struct net_device *ndev)
925{ 885{
926 struct arc_emac_priv *priv = netdev_priv(ndev); 886 struct arc_emac_priv *priv = netdev_priv(ndev);
927 887
928 phy_disconnect(priv->phy_dev); 888 phy_disconnect(ndev->phydev);
929 priv->phy_dev = NULL;
930 arc_mdio_remove(priv); 889 arc_mdio_remove(priv);
931 unregister_netdev(ndev); 890 unregister_netdev(ndev);
932 netif_napi_del(&priv->napi); 891 netif_napi_del(&priv->napi);
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index d02c4240b7df..8fc93c5f6abc 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -96,10 +96,6 @@ struct alx_priv {
96 unsigned int rx_ringsz; 96 unsigned int rx_ringsz;
97 unsigned int rxbuf_size; 97 unsigned int rxbuf_size;
98 98
99 struct page *rx_page;
100 unsigned int rx_page_offset;
101 unsigned int rx_frag_size;
102
103 struct napi_struct napi; 99 struct napi_struct napi;
104 struct alx_tx_queue txq; 100 struct alx_tx_queue txq;
105 struct alx_rx_queue rxq; 101 struct alx_rx_queue rxq;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index c98acdc0d14f..e708e360a9e3 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -70,35 +70,6 @@ static void alx_free_txbuf(struct alx_priv *alx, int entry)
70 } 70 }
71} 71}
72 72
73static struct sk_buff *alx_alloc_skb(struct alx_priv *alx, gfp_t gfp)
74{
75 struct sk_buff *skb;
76 struct page *page;
77
78 if (alx->rx_frag_size > PAGE_SIZE)
79 return __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
80
81 page = alx->rx_page;
82 if (!page) {
83 alx->rx_page = page = alloc_page(gfp);
84 if (unlikely(!page))
85 return NULL;
86 alx->rx_page_offset = 0;
87 }
88
89 skb = build_skb(page_address(page) + alx->rx_page_offset,
90 alx->rx_frag_size);
91 if (likely(skb)) {
92 alx->rx_page_offset += alx->rx_frag_size;
93 if (alx->rx_page_offset >= PAGE_SIZE)
94 alx->rx_page = NULL;
95 else
96 get_page(page);
97 }
98 return skb;
99}
100
101
102static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) 73static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
103{ 74{
104 struct alx_rx_queue *rxq = &alx->rxq; 75 struct alx_rx_queue *rxq = &alx->rxq;
@@ -115,9 +86,22 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
115 while (!cur_buf->skb && next != rxq->read_idx) { 86 while (!cur_buf->skb && next != rxq->read_idx) {
116 struct alx_rfd *rfd = &rxq->rfd[cur]; 87 struct alx_rfd *rfd = &rxq->rfd[cur];
117 88
118 skb = alx_alloc_skb(alx, gfp); 89 /*
90 * When DMA RX address is set to something like
91 * 0x....fc0, it will be very likely to cause DMA
92 * RFD overflow issue.
93 *
94 * To work around it, we apply rx skb with 64 bytes
95 * longer space, and offset the address whenever
96 * 0x....fc0 is detected.
97 */
98 skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
119 if (!skb) 99 if (!skb)
120 break; 100 break;
101
102 if (((unsigned long)skb->data & 0xfff) == 0xfc0)
103 skb_reserve(skb, 64);
104
121 dma = dma_map_single(&alx->hw.pdev->dev, 105 dma = dma_map_single(&alx->hw.pdev->dev,
122 skb->data, alx->rxbuf_size, 106 skb->data, alx->rxbuf_size,
123 DMA_FROM_DEVICE); 107 DMA_FROM_DEVICE);
@@ -153,7 +137,6 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
153 alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur); 137 alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
154 } 138 }
155 139
156
157 return count; 140 return count;
158} 141}
159 142
@@ -622,11 +605,6 @@ static void alx_free_rings(struct alx_priv *alx)
622 kfree(alx->txq.bufs); 605 kfree(alx->txq.bufs);
623 kfree(alx->rxq.bufs); 606 kfree(alx->rxq.bufs);
624 607
625 if (alx->rx_page) {
626 put_page(alx->rx_page);
627 alx->rx_page = NULL;
628 }
629
630 dma_free_coherent(&alx->hw.pdev->dev, 608 dma_free_coherent(&alx->hw.pdev->dev,
631 alx->descmem.size, 609 alx->descmem.size,
632 alx->descmem.virt, 610 alx->descmem.virt,
@@ -681,7 +659,6 @@ static int alx_request_irq(struct alx_priv *alx)
681 alx->dev->name, alx); 659 alx->dev->name, alx);
682 if (!err) 660 if (!err)
683 goto out; 661 goto out;
684
685 /* fall back to legacy interrupt */ 662 /* fall back to legacy interrupt */
686 pci_disable_msi(alx->hw.pdev); 663 pci_disable_msi(alx->hw.pdev);
687 } 664 }
@@ -725,7 +702,6 @@ static int alx_init_sw(struct alx_priv *alx)
725 struct pci_dev *pdev = alx->hw.pdev; 702 struct pci_dev *pdev = alx->hw.pdev;
726 struct alx_hw *hw = &alx->hw; 703 struct alx_hw *hw = &alx->hw;
727 int err; 704 int err;
728 unsigned int head_size;
729 705
730 err = alx_identify_hw(alx); 706 err = alx_identify_hw(alx);
731 if (err) { 707 if (err) {
@@ -741,12 +717,7 @@ static int alx_init_sw(struct alx_priv *alx)
741 717
742 hw->smb_timer = 400; 718 hw->smb_timer = 400;
743 hw->mtu = alx->dev->mtu; 719 hw->mtu = alx->dev->mtu;
744
745 alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu); 720 alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
746 head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
747 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
748 alx->rx_frag_size = roundup_pow_of_two(head_size);
749
750 alx->tx_ringsz = 256; 721 alx->tx_ringsz = 256;
751 alx->rx_ringsz = 512; 722 alx->rx_ringsz = 512;
752 hw->imt = 200; 723 hw->imt = 200;
@@ -848,7 +819,6 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
848{ 819{
849 struct alx_priv *alx = netdev_priv(netdev); 820 struct alx_priv *alx = netdev_priv(netdev);
850 int max_frame = ALX_MAX_FRAME_LEN(mtu); 821 int max_frame = ALX_MAX_FRAME_LEN(mtu);
851 unsigned int head_size;
852 822
853 if ((max_frame < ALX_MIN_FRAME_SIZE) || 823 if ((max_frame < ALX_MIN_FRAME_SIZE) ||
854 (max_frame > ALX_MAX_FRAME_SIZE)) 824 (max_frame > ALX_MAX_FRAME_SIZE))
@@ -860,9 +830,6 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
860 netdev->mtu = mtu; 830 netdev->mtu = mtu;
861 alx->hw.mtu = mtu; 831 alx->hw.mtu = mtu;
862 alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE); 832 alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
863 head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
864 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
865 alx->rx_frag_size = roundup_pow_of_two(head_size);
866 netdev_update_features(netdev); 833 netdev_update_features(netdev);
867 if (netif_running(netdev)) 834 if (netif_running(netdev))
868 alx_reinit(alx); 835 alx_reinit(alx);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 834afbb51aff..b2d30863caeb 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -370,7 +370,7 @@ static void bcm_sysport_get_stats(struct net_device *dev,
370 else 370 else
371 p = (char *)priv; 371 p = (char *)priv;
372 p += s->stat_offset; 372 p += s->stat_offset;
373 data[i] = *(u32 *)p; 373 data[i] = *(unsigned long *)p;
374 } 374 }
375} 375}
376 376
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index e6e74ca86e60..b045dc072c40 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -269,15 +269,16 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
269 while (ring->start != ring->end) { 269 while (ring->start != ring->end) {
270 int slot_idx = ring->start % BGMAC_TX_RING_SLOTS; 270 int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
271 struct bgmac_slot_info *slot = &ring->slots[slot_idx]; 271 struct bgmac_slot_info *slot = &ring->slots[slot_idx];
272 u32 ctl1; 272 u32 ctl0, ctl1;
273 int len; 273 int len;
274 274
275 if (slot_idx == empty_slot) 275 if (slot_idx == empty_slot)
276 break; 276 break;
277 277
278 ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
278 ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1); 279 ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
279 len = ctl1 & BGMAC_DESC_CTL1_LEN; 280 len = ctl1 & BGMAC_DESC_CTL1_LEN;
280 if (ctl1 & BGMAC_DESC_CTL0_SOF) 281 if (ctl0 & BGMAC_DESC_CTL0_SOF)
281 /* Unmap no longer used buffer */ 282 /* Unmap no longer used buffer */
282 dma_unmap_single(dma_dev, slot->dma_addr, len, 283 dma_unmap_single(dma_dev, slot->dma_addr, len,
283 DMA_TO_DEVICE); 284 DMA_TO_DEVICE);
@@ -1322,7 +1323,8 @@ static int bgmac_open(struct net_device *net_dev)
1322 1323
1323 phy_start(net_dev->phydev); 1324 phy_start(net_dev->phydev);
1324 1325
1325 netif_carrier_on(net_dev); 1326 netif_start_queue(net_dev);
1327
1326 return 0; 1328 return 0;
1327} 1329}
1328 1330
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 673f4d62e73e..70b148a10ec8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3414,7 +3414,8 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
3414 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); 3414 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
3415 /* Only RSS support for now TBD: COS & LB */ 3415 /* Only RSS support for now TBD: COS & LB */
3416 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP | 3416 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP |
3417 VNIC_CFG_REQ_ENABLES_RSS_RULE); 3417 VNIC_CFG_REQ_ENABLES_RSS_RULE |
3418 VNIC_CFG_REQ_ENABLES_MRU);
3418 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx); 3419 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
3419 req.cos_rule = cpu_to_le16(0xffff); 3420 req.cos_rule = cpu_to_le16(0xffff);
3420 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 3421 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
@@ -3951,7 +3952,7 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
3951 3952
3952 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); 3953 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
3953 3954
3954 req.update_period_ms = cpu_to_le32(1000); 3955 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
3955 3956
3956 mutex_lock(&bp->hwrm_cmd_lock); 3957 mutex_lock(&bp->hwrm_cmd_lock);
3957 for (i = 0; i < bp->cp_nr_rings; i++) { 3958 for (i = 0; i < bp->cp_nr_rings; i++) {
@@ -4025,6 +4026,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
4025 4026
4026 pf->fw_fid = le16_to_cpu(resp->fid); 4027 pf->fw_fid = le16_to_cpu(resp->fid);
4027 pf->port_id = le16_to_cpu(resp->port_id); 4028 pf->port_id = le16_to_cpu(resp->port_id);
4029 bp->dev->dev_port = pf->port_id;
4028 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); 4030 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
4029 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN); 4031 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
4030 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 4032 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
@@ -4315,6 +4317,16 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
4315#endif 4317#endif
4316} 4318}
4317 4319
4320/* Allow PF and VF with default VLAN to be in promiscuous mode */
4321static bool bnxt_promisc_ok(struct bnxt *bp)
4322{
4323#ifdef CONFIG_BNXT_SRIOV
4324 if (BNXT_VF(bp) && !bp->vf.vlan)
4325 return false;
4326#endif
4327 return true;
4328}
4329
4318static int bnxt_cfg_rx_mode(struct bnxt *); 4330static int bnxt_cfg_rx_mode(struct bnxt *);
4319static bool bnxt_mc_list_updated(struct bnxt *, u32 *); 4331static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
4320 4332
@@ -4380,7 +4392,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
4380 4392
4381 vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 4393 vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
4382 4394
4383 if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp)) 4395 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
4384 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 4396 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
4385 4397
4386 if (bp->dev->flags & IFF_ALLMULTI) { 4398 if (bp->dev->flags & IFF_ALLMULTI) {
@@ -5295,12 +5307,19 @@ static int bnxt_open(struct net_device *dev)
5295 struct bnxt *bp = netdev_priv(dev); 5307 struct bnxt *bp = netdev_priv(dev);
5296 int rc = 0; 5308 int rc = 0;
5297 5309
5298 rc = bnxt_hwrm_func_reset(bp); 5310 if (!test_bit(BNXT_STATE_FN_RST_DONE, &bp->state)) {
5299 if (rc) { 5311 rc = bnxt_hwrm_func_reset(bp);
5300 netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n", 5312 if (rc) {
5301 rc); 5313 netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
5302 rc = -1; 5314 rc);
5303 return rc; 5315 rc = -EBUSY;
5316 return rc;
5317 }
5318 /* Do func_reset during the 1st PF open only to prevent killing
5319 * the VFs when the PF is brought down and up.
5320 */
5321 if (BNXT_PF(bp))
5322 set_bit(BNXT_STATE_FN_RST_DONE, &bp->state);
5304 } 5323 }
5305 return __bnxt_open_nic(bp, true, true); 5324 return __bnxt_open_nic(bp, true, true);
5306} 5325}
@@ -5520,8 +5539,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
5520 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | 5539 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
5521 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST); 5540 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
5522 5541
5523 /* Only allow PF to be in promiscuous mode */ 5542 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
5524 if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp))
5525 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 5543 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
5526 5544
5527 uc_update = bnxt_uc_list_updated(bp); 5545 uc_update = bnxt_uc_list_updated(bp);
@@ -5976,6 +5994,8 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
5976 bp->tx_coal_ticks_irq = 2; 5994 bp->tx_coal_ticks_irq = 2;
5977 bp->tx_coal_bufs_irq = 2; 5995 bp->tx_coal_bufs_irq = 2;
5978 5996
5997 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
5998
5979 init_timer(&bp->timer); 5999 init_timer(&bp->timer);
5980 bp->timer.data = (unsigned long)bp; 6000 bp->timer.data = (unsigned long)bp;
5981 bp->timer.function = bnxt_timer; 6001 bp->timer.function = bnxt_timer;
@@ -6041,7 +6061,7 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
6041{ 6061{
6042 struct bnxt *bp = netdev_priv(dev); 6062 struct bnxt *bp = netdev_priv(dev);
6043 6063
6044 if (new_mtu < 60 || new_mtu > 9000) 6064 if (new_mtu < 60 || new_mtu > 9500)
6045 return -EINVAL; 6065 return -EINVAL;
6046 6066
6047 if (netif_running(dev)) 6067 if (netif_running(dev))
@@ -6676,6 +6696,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
6676 pci_channel_state_t state) 6696 pci_channel_state_t state)
6677{ 6697{
6678 struct net_device *netdev = pci_get_drvdata(pdev); 6698 struct net_device *netdev = pci_get_drvdata(pdev);
6699 struct bnxt *bp = netdev_priv(netdev);
6679 6700
6680 netdev_info(netdev, "PCI I/O error detected\n"); 6701 netdev_info(netdev, "PCI I/O error detected\n");
6681 6702
@@ -6690,6 +6711,8 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
6690 if (netif_running(netdev)) 6711 if (netif_running(netdev))
6691 bnxt_close(netdev); 6712 bnxt_close(netdev);
6692 6713
6714 /* So that func_reset will be done during slot_reset */
6715 clear_bit(BNXT_STATE_FN_RST_DONE, &bp->state);
6693 pci_disable_device(pdev); 6716 pci_disable_device(pdev);
6694 rtnl_unlock(); 6717 rtnl_unlock();
6695 6718
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 927ece9c408a..2313e37e6eb5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -11,10 +11,10 @@
11#define BNXT_H 11#define BNXT_H
12 12
13#define DRV_MODULE_NAME "bnxt_en" 13#define DRV_MODULE_NAME "bnxt_en"
14#define DRV_MODULE_VERSION "1.2.0" 14#define DRV_MODULE_VERSION "1.3.0"
15 15
16#define DRV_VER_MAJ 1 16#define DRV_VER_MAJ 1
17#define DRV_VER_MIN 0 17#define DRV_VER_MIN 3
18#define DRV_VER_UPD 0 18#define DRV_VER_UPD 0
19 19
20struct tx_bd { 20struct tx_bd {
@@ -359,7 +359,8 @@ struct rx_tpa_end_cmp {
359 RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO) 359 RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO)
360 360
361#define TPA_END_GRO_TS(rx_tpa_end) \ 361#define TPA_END_GRO_TS(rx_tpa_end) \
362 ((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & cpu_to_le32(RX_TPA_END_GRO_TS)) 362 (!!((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & \
363 cpu_to_le32(RX_TPA_END_GRO_TS)))
363 364
364struct rx_tpa_end_cmp_ext { 365struct rx_tpa_end_cmp_ext {
365 __le32 rx_tpa_end_cmp_dup_acks; 366 __le32 rx_tpa_end_cmp_dup_acks;
@@ -753,8 +754,8 @@ struct bnxt_vf_info {
753struct bnxt_pf_info { 754struct bnxt_pf_info {
754#define BNXT_FIRST_PF_FID 1 755#define BNXT_FIRST_PF_FID 1
755#define BNXT_FIRST_VF_FID 128 756#define BNXT_FIRST_VF_FID 128
756 u32 fw_fid; 757 u16 fw_fid;
757 u8 port_id; 758 u16 port_id;
758 u8 mac_addr[ETH_ALEN]; 759 u8 mac_addr[ETH_ALEN];
759 u16 max_rsscos_ctxs; 760 u16 max_rsscos_ctxs;
760 u16 max_cp_rings; 761 u16 max_cp_rings;
@@ -1017,6 +1018,7 @@ struct bnxt {
1017 unsigned long state; 1018 unsigned long state;
1018#define BNXT_STATE_OPEN 0 1019#define BNXT_STATE_OPEN 0
1019#define BNXT_STATE_IN_SP_TASK 1 1020#define BNXT_STATE_IN_SP_TASK 1
1021#define BNXT_STATE_FN_RST_DONE 2
1020 1022
1021 struct bnxt_irq *irq_tbl; 1023 struct bnxt_irq *irq_tbl;
1022 u8 mac_addr[ETH_ALEN]; 1024 u8 mac_addr[ETH_ALEN];
@@ -1065,6 +1067,11 @@ struct bnxt {
1065 1067
1066#define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2) 1068#define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2)
1067 1069
1070 u32 stats_coal_ticks;
1071#define BNXT_DEF_STATS_COAL_TICKS 1000000
1072#define BNXT_MIN_STATS_COAL_TICKS 250000
1073#define BNXT_MAX_STATS_COAL_TICKS 1000000
1074
1068 struct work_struct sp_task; 1075 struct work_struct sp_task;
1069 unsigned long sp_event; 1076 unsigned long sp_event;
1070#define BNXT_RX_MASK_SP_EVENT 0 1077#define BNXT_RX_MASK_SP_EVENT 0
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index d7ab2d7982c2..0f7dd861ab4d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -56,6 +56,8 @@ static int bnxt_get_coalesce(struct net_device *dev,
56 coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq; 56 coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq;
57 coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq; 57 coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq;
58 58
59 coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
60
59 return 0; 61 return 0;
60} 62}
61 63
@@ -63,6 +65,7 @@ static int bnxt_set_coalesce(struct net_device *dev,
63 struct ethtool_coalesce *coal) 65 struct ethtool_coalesce *coal)
64{ 66{
65 struct bnxt *bp = netdev_priv(dev); 67 struct bnxt *bp = netdev_priv(dev);
68 bool update_stats = false;
66 int rc = 0; 69 int rc = 0;
67 70
68 bp->rx_coal_ticks = coal->rx_coalesce_usecs; 71 bp->rx_coal_ticks = coal->rx_coalesce_usecs;
@@ -76,8 +79,26 @@ static int bnxt_set_coalesce(struct net_device *dev,
76 bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq; 79 bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq;
77 bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq; 80 bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq;
78 81
79 if (netif_running(dev)) 82 if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
80 rc = bnxt_hwrm_set_coal(bp); 83 u32 stats_ticks = coal->stats_block_coalesce_usecs;
84
85 stats_ticks = clamp_t(u32, stats_ticks,
86 BNXT_MIN_STATS_COAL_TICKS,
87 BNXT_MAX_STATS_COAL_TICKS);
88 stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
89 bp->stats_coal_ticks = stats_ticks;
90 update_stats = true;
91 }
92
93 if (netif_running(dev)) {
94 if (update_stats) {
95 rc = bnxt_close_nic(bp, true, false);
96 if (!rc)
97 rc = bnxt_open_nic(bp, true, false);
98 } else {
99 rc = bnxt_hwrm_set_coal(bp);
100 }
101 }
81 102
82 return rc; 103 return rc;
83} 104}
@@ -961,7 +982,7 @@ static int bnxt_set_pauseparam(struct net_device *dev,
961 struct bnxt_link_info *link_info = &bp->link_info; 982 struct bnxt_link_info *link_info = &bp->link_info;
962 983
963 if (!BNXT_SINGLE_PF(bp)) 984 if (!BNXT_SINGLE_PF(bp))
964 return rc; 985 return -EOPNOTSUPP;
965 986
966 if (epause->autoneg) { 987 if (epause->autoneg) {
967 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) 988 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
@@ -1059,6 +1080,8 @@ static int bnxt_firmware_reset(struct net_device *dev,
1059 case BNX_DIR_TYPE_APE_FW: 1080 case BNX_DIR_TYPE_APE_FW:
1060 case BNX_DIR_TYPE_APE_PATCH: 1081 case BNX_DIR_TYPE_APE_PATCH:
1061 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; 1082 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
1083 /* Self-reset APE upon next PCIe reset: */
1084 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
1062 break; 1085 break;
1063 case BNX_DIR_TYPE_KONG_FW: 1086 case BNX_DIR_TYPE_KONG_FW:
1064 case BNX_DIR_TYPE_KONG_PATCH: 1087 case BNX_DIR_TYPE_KONG_PATCH:
@@ -1092,9 +1115,27 @@ static int bnxt_flash_firmware(struct net_device *dev,
1092 case BNX_DIR_TYPE_BOOTCODE_2: 1115 case BNX_DIR_TYPE_BOOTCODE_2:
1093 code_type = CODE_BOOT; 1116 code_type = CODE_BOOT;
1094 break; 1117 break;
1118 case BNX_DIR_TYPE_CHIMP_PATCH:
1119 code_type = CODE_CHIMP_PATCH;
1120 break;
1095 case BNX_DIR_TYPE_APE_FW: 1121 case BNX_DIR_TYPE_APE_FW:
1096 code_type = CODE_MCTP_PASSTHRU; 1122 code_type = CODE_MCTP_PASSTHRU;
1097 break; 1123 break;
1124 case BNX_DIR_TYPE_APE_PATCH:
1125 code_type = CODE_APE_PATCH;
1126 break;
1127 case BNX_DIR_TYPE_KONG_FW:
1128 code_type = CODE_KONG_FW;
1129 break;
1130 case BNX_DIR_TYPE_KONG_PATCH:
1131 code_type = CODE_KONG_PATCH;
1132 break;
1133 case BNX_DIR_TYPE_BONO_FW:
1134 code_type = CODE_BONO_FW;
1135 break;
1136 case BNX_DIR_TYPE_BONO_PATCH:
1137 code_type = CODE_BONO_PATCH;
1138 break;
1098 default: 1139 default:
1099 netdev_err(dev, "Unsupported directory entry type: %u\n", 1140 netdev_err(dev, "Unsupported directory entry type: %u\n",
1100 dir_type); 1141 dir_type);
@@ -1149,6 +1190,8 @@ static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
1149 case BNX_DIR_TYPE_APE_PATCH: 1190 case BNX_DIR_TYPE_APE_PATCH:
1150 case BNX_DIR_TYPE_KONG_FW: 1191 case BNX_DIR_TYPE_KONG_FW:
1151 case BNX_DIR_TYPE_KONG_PATCH: 1192 case BNX_DIR_TYPE_KONG_PATCH:
1193 case BNX_DIR_TYPE_BONO_FW:
1194 case BNX_DIR_TYPE_BONO_PATCH:
1152 return true; 1195 return true;
1153 } 1196 }
1154 1197
@@ -1186,7 +1229,8 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
1186 const struct firmware *fw; 1229 const struct firmware *fw;
1187 int rc; 1230 int rc;
1188 1231
1189 if (bnxt_dir_type_is_executable(dir_type) == false) 1232 if (dir_type != BNX_DIR_TYPE_UPDATE &&
1233 bnxt_dir_type_is_executable(dir_type) == false)
1190 return -EINVAL; 1234 return -EINVAL;
1191 1235
1192 rc = request_firmware(&fw, filename, &dev->dev); 1236 rc = request_firmware(&fw, filename, &dev->dev);
@@ -1483,7 +1527,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
1483 int rc = 0; 1527 int rc = 0;
1484 1528
1485 if (!BNXT_SINGLE_PF(bp)) 1529 if (!BNXT_SINGLE_PF(bp))
1486 return 0; 1530 return -EOPNOTSUPP;
1487 1531
1488 if (!(bp->flags & BNXT_FLAG_EEE_CAP)) 1532 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
1489 return -EOPNOTSUPP; 1533 return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
index 461675caaacd..82bf44ab811b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
@@ -70,6 +70,7 @@ enum SUPPORTED_CODE {
70 CODE_KONG_PATCH, /* 18 - KONG Patch firmware */ 70 CODE_KONG_PATCH, /* 18 - KONG Patch firmware */
71 CODE_BONO_FW, /* 19 - BONO firmware */ 71 CODE_BONO_FW, /* 19 - BONO firmware */
72 CODE_BONO_PATCH, /* 20 - BONO Patch firmware */ 72 CODE_BONO_PATCH, /* 20 - BONO Patch firmware */
73 CODE_CHIMP_PATCH, /* 21 - ChiMP Patch firmware */
73 74
74 MAX_CODE_TYPE, 75 MAX_CODE_TYPE,
75}; 76};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 05e3c49a7677..517567f6d651 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -105,6 +105,7 @@ struct hwrm_async_event_cmpl {
105 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0) 105 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
106 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0) 106 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
107 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0) 107 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0)
108 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE (0x7UL << 0)
108 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0) 109 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
109 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0) 110 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
110 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0) 111 #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
@@ -484,12 +485,12 @@ struct hwrm_async_event_cmpl_hwrm_error {
484 #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL 485 #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
485}; 486};
486 487
487/* HW Resource Manager Specification 1.2.2 */ 488/* HW Resource Manager Specification 1.3.0 */
488#define HWRM_VERSION_MAJOR 1 489#define HWRM_VERSION_MAJOR 1
489#define HWRM_VERSION_MINOR 2 490#define HWRM_VERSION_MINOR 3
490#define HWRM_VERSION_UPDATE 2 491#define HWRM_VERSION_UPDATE 0
491 492
492#define HWRM_VERSION_STR "1.2.2" 493#define HWRM_VERSION_STR "1.3.0"
493/* 494/*
494 * Following is the signature for HWRM message field that indicates not 495 * Following is the signature for HWRM message field that indicates not
495 * applicable (All F's). Need to cast it the size of the field if needed. 496 * applicable (All F's). Need to cast it the size of the field if needed.
@@ -611,6 +612,9 @@ struct cmd_nums {
611 #define HWRM_FWD_RESP (0xd2UL) 612 #define HWRM_FWD_RESP (0xd2UL)
612 #define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL) 613 #define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL)
613 #define HWRM_TEMP_MONITOR_QUERY (0xe0UL) 614 #define HWRM_TEMP_MONITOR_QUERY (0xe0UL)
615 #define HWRM_WOL_FILTER_ALLOC (0xf0UL)
616 #define HWRM_WOL_FILTER_FREE (0xf1UL)
617 #define HWRM_WOL_FILTER_QCFG (0xf2UL)
614 #define HWRM_DBG_READ_DIRECT (0xff10UL) 618 #define HWRM_DBG_READ_DIRECT (0xff10UL)
615 #define HWRM_DBG_READ_INDIRECT (0xff11UL) 619 #define HWRM_DBG_READ_INDIRECT (0xff11UL)
616 #define HWRM_DBG_WRITE_DIRECT (0xff12UL) 620 #define HWRM_DBG_WRITE_DIRECT (0xff12UL)
@@ -1020,6 +1024,10 @@ struct hwrm_func_qcaps_output {
1020 #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL 1024 #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
1021 #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL 1025 #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
1022 #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL 1026 #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
1027 #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
1028 #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
1029 #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
1030 #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
1023 u8 mac_address[6]; 1031 u8 mac_address[6];
1024 __le16 max_rsscos_ctx; 1032 __le16 max_rsscos_ctx;
1025 __le16 max_cmpl_rings; 1033 __le16 max_cmpl_rings;
@@ -1066,8 +1074,9 @@ struct hwrm_func_qcfg_output {
1066 __le16 fid; 1074 __le16 fid;
1067 __le16 port_id; 1075 __le16 port_id;
1068 __le16 vlan; 1076 __le16 vlan;
1069 u8 unused_0; 1077 __le16 flags;
1070 u8 unused_1; 1078 #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
1079 #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
1071 u8 mac_address[6]; 1080 u8 mac_address[6];
1072 __le16 pci_id; 1081 __le16 pci_id;
1073 __le16 alloc_rsscos_ctx; 1082 __le16 alloc_rsscos_ctx;
@@ -1086,23 +1095,23 @@ struct hwrm_func_qcfg_output {
1086 #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 (0x3UL << 0) 1095 #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 (0x3UL << 0)
1087 #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 (0x4UL << 0) 1096 #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 (0x4UL << 0)
1088 #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN (0xffUL << 0) 1097 #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN (0xffUL << 0)
1089 u8 unused_2; 1098 u8 unused_0;
1090 __le16 dflt_vnic_id; 1099 __le16 dflt_vnic_id;
1091 u8 unused_3; 1100 u8 unused_1;
1092 u8 unused_4; 1101 u8 unused_2;
1093 __le32 min_bw; 1102 __le32 min_bw;
1094 __le32 max_bw; 1103 __le32 max_bw;
1095 u8 evb_mode; 1104 u8 evb_mode;
1096 #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB (0x0UL << 0) 1105 #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB (0x0UL << 0)
1097 #define FUNC_QCFG_RESP_EVB_MODE_VEB (0x1UL << 0) 1106 #define FUNC_QCFG_RESP_EVB_MODE_VEB (0x1UL << 0)
1098 #define FUNC_QCFG_RESP_EVB_MODE_VEPA (0x2UL << 0) 1107 #define FUNC_QCFG_RESP_EVB_MODE_VEPA (0x2UL << 0)
1099 u8 unused_5; 1108 u8 unused_3;
1100 __le16 unused_6; 1109 __le16 unused_4;
1101 __le32 alloc_mcast_filters; 1110 __le32 alloc_mcast_filters;
1102 __le32 alloc_hw_ring_grps; 1111 __le32 alloc_hw_ring_grps;
1112 u8 unused_5;
1113 u8 unused_6;
1103 u8 unused_7; 1114 u8 unused_7;
1104 u8 unused_8;
1105 u8 unused_9;
1106 u8 valid; 1115 u8 valid;
1107}; 1116};
1108 1117
@@ -1410,8 +1419,8 @@ struct hwrm_func_buf_rgtr_input {
1410 #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K (0xcUL << 0) 1419 #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K (0xcUL << 0)
1411 #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K (0xdUL << 0) 1420 #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K (0xdUL << 0)
1412 #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K (0x10UL << 0) 1421 #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K (0x10UL << 0)
1413 #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M (0x16UL << 0) 1422 #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M (0x15UL << 0)
1414 #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M (0x17UL << 0) 1423 #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M (0x16UL << 0)
1415 #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G (0x1eUL << 0) 1424 #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G (0x1eUL << 0)
1416 __le16 req_buf_len; 1425 __le16 req_buf_len;
1417 __le16 resp_buf_len; 1426 __le16 resp_buf_len;
@@ -1499,6 +1508,12 @@ struct hwrm_port_phy_cfg_input {
1499 #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL 1508 #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
1500 #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL 1509 #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
1501 #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL 1510 #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
1511 #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
1512 #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
1513 #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
1514 #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
1515 #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
1516 #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
1502 __le32 enables; 1517 __le32 enables;
1503 #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL 1518 #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
1504 #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL 1519 #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
@@ -1815,13 +1830,22 @@ struct hwrm_port_phy_qcfg_output {
1815 #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24) 1830 #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
1816 #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24) 1831 #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
1817 #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24) 1832 #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
1818 __le32 unused_1; 1833 __le16 fec_cfg;
1834 #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
1835 #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
1836 #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
1837 #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
1838 #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
1839 #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
1840 #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
1841 u8 unused_1;
1842 u8 unused_2;
1819 char phy_vendor_name[16]; 1843 char phy_vendor_name[16];
1820 char phy_vendor_partnumber[16]; 1844 char phy_vendor_partnumber[16];
1821 __le32 unused_2; 1845 __le32 unused_3;
1822 u8 unused_3;
1823 u8 unused_4; 1846 u8 unused_4;
1824 u8 unused_5; 1847 u8 unused_5;
1848 u8 unused_6;
1825 u8 valid; 1849 u8 valid;
1826}; 1850};
1827 1851
@@ -1842,6 +1866,8 @@ struct hwrm_port_mac_cfg_input {
1842 #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL 1866 #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
1843 #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL 1867 #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
1844 #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL 1868 #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
1869 #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
1870 #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
1845 __le32 enables; 1871 __le32 enables;
1846 #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL 1872 #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
1847 #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL 1873 #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
@@ -2127,6 +2153,7 @@ struct hwrm_port_phy_i2c_read_output {
2127 u8 valid; 2153 u8 valid;
2128}; 2154};
2129 2155
2156/* hwrm_queue_qportcfg */
2130/* Input (24 bytes) */ 2157/* Input (24 bytes) */
2131struct hwrm_queue_qportcfg_input { 2158struct hwrm_queue_qportcfg_input {
2132 __le16 req_type; 2159 __le16 req_type;
@@ -2382,7 +2409,7 @@ struct hwrm_queue_cos2bw_cfg_input {
2382 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP (0x0UL << 0) 2409 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP (0x0UL << 0)
2383 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS (0x1UL << 0) 2410 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS (0x1UL << 0)
2384 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) 2411 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
2385 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) 2412 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
2386 u8 queue_id0_pri_lvl; 2413 u8 queue_id0_pri_lvl;
2387 u8 queue_id0_bw_weight; 2414 u8 queue_id0_bw_weight;
2388 u8 queue_id1; 2415 u8 queue_id1;
@@ -2392,7 +2419,7 @@ struct hwrm_queue_cos2bw_cfg_input {
2392 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP (0x0UL << 0) 2419 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP (0x0UL << 0)
2393 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS (0x1UL << 0) 2420 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS (0x1UL << 0)
2394 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) 2421 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
2395 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) 2422 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
2396 u8 queue_id1_pri_lvl; 2423 u8 queue_id1_pri_lvl;
2397 u8 queue_id1_bw_weight; 2424 u8 queue_id1_bw_weight;
2398 u8 queue_id2; 2425 u8 queue_id2;
@@ -2402,7 +2429,7 @@ struct hwrm_queue_cos2bw_cfg_input {
2402 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP (0x0UL << 0) 2429 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP (0x0UL << 0)
2403 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS (0x1UL << 0) 2430 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS (0x1UL << 0)
2404 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) 2431 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
2405 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) 2432 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
2406 u8 queue_id2_pri_lvl; 2433 u8 queue_id2_pri_lvl;
2407 u8 queue_id2_bw_weight; 2434 u8 queue_id2_bw_weight;
2408 u8 queue_id3; 2435 u8 queue_id3;
@@ -2412,7 +2439,7 @@ struct hwrm_queue_cos2bw_cfg_input {
2412 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP (0x0UL << 0) 2439 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP (0x0UL << 0)
2413 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS (0x1UL << 0) 2440 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS (0x1UL << 0)
2414 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) 2441 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
2415 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) 2442 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
2416 u8 queue_id3_pri_lvl; 2443 u8 queue_id3_pri_lvl;
2417 u8 queue_id3_bw_weight; 2444 u8 queue_id3_bw_weight;
2418 u8 queue_id4; 2445 u8 queue_id4;
@@ -2422,7 +2449,7 @@ struct hwrm_queue_cos2bw_cfg_input {
2422 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP (0x0UL << 0) 2449 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP (0x0UL << 0)
2423 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS (0x1UL << 0) 2450 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS (0x1UL << 0)
2424 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) 2451 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
2425 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) 2452 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
2426 u8 queue_id4_pri_lvl; 2453 u8 queue_id4_pri_lvl;
2427 u8 queue_id4_bw_weight; 2454 u8 queue_id4_bw_weight;
2428 u8 queue_id5; 2455 u8 queue_id5;
@@ -2432,7 +2459,7 @@ struct hwrm_queue_cos2bw_cfg_input {
2432 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP (0x0UL << 0) 2459 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP (0x0UL << 0)
2433 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS (0x1UL << 0) 2460 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS (0x1UL << 0)
2434 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) 2461 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
2435 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) 2462 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
2436 u8 queue_id5_pri_lvl; 2463 u8 queue_id5_pri_lvl;
2437 u8 queue_id5_bw_weight; 2464 u8 queue_id5_bw_weight;
2438 u8 queue_id6; 2465 u8 queue_id6;
@@ -2442,7 +2469,7 @@ struct hwrm_queue_cos2bw_cfg_input {
2442 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP (0x0UL << 0) 2469 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP (0x0UL << 0)
2443 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS (0x1UL << 0) 2470 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS (0x1UL << 0)
2444 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) 2471 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
2445 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) 2472 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
2446 u8 queue_id6_pri_lvl; 2473 u8 queue_id6_pri_lvl;
2447 u8 queue_id6_bw_weight; 2474 u8 queue_id6_bw_weight;
2448 u8 queue_id7; 2475 u8 queue_id7;
@@ -2452,7 +2479,7 @@ struct hwrm_queue_cos2bw_cfg_input {
2452 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP (0x0UL << 0) 2479 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP (0x0UL << 0)
2453 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS (0x1UL << 0) 2480 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS (0x1UL << 0)
2454 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) 2481 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
2455 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) 2482 #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
2456 u8 queue_id7_pri_lvl; 2483 u8 queue_id7_pri_lvl;
2457 u8 queue_id7_bw_weight; 2484 u8 queue_id7_bw_weight;
2458 u8 unused_1[5]; 2485 u8 unused_1[5];
@@ -3150,7 +3177,7 @@ struct hwrm_cfa_l2_filter_cfg_output {
3150}; 3177};
3151 3178
3152/* hwrm_cfa_l2_set_rx_mask */ 3179/* hwrm_cfa_l2_set_rx_mask */
3153/* Input (40 bytes) */ 3180/* Input (56 bytes) */
3154struct hwrm_cfa_l2_set_rx_mask_input { 3181struct hwrm_cfa_l2_set_rx_mask_input {
3155 __le16 req_type; 3182 __le16 req_type;
3156 __le16 cmpl_ring; 3183 __le16 cmpl_ring;
@@ -3165,9 +3192,15 @@ struct hwrm_cfa_l2_set_rx_mask_input {
3165 #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL 3192 #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
3166 #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL 3193 #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
3167 #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL 3194 #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
3195 #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
3196 #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
3197 #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
3168 __le64 mc_tbl_addr; 3198 __le64 mc_tbl_addr;
3169 __le32 num_mc_entries; 3199 __le32 num_mc_entries;
3170 __le32 unused_0; 3200 __le32 unused_0;
3201 __le64 vlan_tag_tbl_addr;
3202 __le32 num_vlan_tags;
3203 __le32 unused_1;
3171}; 3204};
3172 3205
3173/* Output (16 bytes) */ 3206/* Output (16 bytes) */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
index 40a7b0e09612..73f2249555b5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
@@ -13,6 +13,7 @@
13enum bnxt_nvm_directory_type { 13enum bnxt_nvm_directory_type {
14 BNX_DIR_TYPE_UNUSED = 0, 14 BNX_DIR_TYPE_UNUSED = 0,
15 BNX_DIR_TYPE_PKG_LOG = 1, 15 BNX_DIR_TYPE_PKG_LOG = 1,
16 BNX_DIR_TYPE_UPDATE = 2,
16 BNX_DIR_TYPE_CHIMP_PATCH = 3, 17 BNX_DIR_TYPE_CHIMP_PATCH = 3,
17 BNX_DIR_TYPE_BOOTCODE = 4, 18 BNX_DIR_TYPE_BOOTCODE = 4,
18 BNX_DIR_TYPE_VPD = 5, 19 BNX_DIR_TYPE_VPD = 5,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 541456398dfb..76ed6df0fe53 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -450,34 +450,6 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
450 genet_dma_ring_regs[r]); 450 genet_dma_ring_regs[r]);
451} 451}
452 452
453static int bcmgenet_get_settings(struct net_device *dev,
454 struct ethtool_cmd *cmd)
455{
456 struct bcmgenet_priv *priv = netdev_priv(dev);
457
458 if (!netif_running(dev))
459 return -EINVAL;
460
461 if (!priv->phydev)
462 return -ENODEV;
463
464 return phy_ethtool_gset(priv->phydev, cmd);
465}
466
467static int bcmgenet_set_settings(struct net_device *dev,
468 struct ethtool_cmd *cmd)
469{
470 struct bcmgenet_priv *priv = netdev_priv(dev);
471
472 if (!netif_running(dev))
473 return -EINVAL;
474
475 if (!priv->phydev)
476 return -ENODEV;
477
478 return phy_ethtool_sset(priv->phydev, cmd);
479}
480
481static int bcmgenet_set_rx_csum(struct net_device *dev, 453static int bcmgenet_set_rx_csum(struct net_device *dev,
482 netdev_features_t wanted) 454 netdev_features_t wanted)
483{ 455{
@@ -941,7 +913,7 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
941 e->eee_active = p->eee_active; 913 e->eee_active = p->eee_active;
942 e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); 914 e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
943 915
944 return phy_ethtool_get_eee(priv->phydev, e); 916 return phy_ethtool_get_eee(dev->phydev, e);
945} 917}
946 918
947static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) 919static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
@@ -958,7 +930,7 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
958 if (!p->eee_enabled) { 930 if (!p->eee_enabled) {
959 bcmgenet_eee_enable_set(dev, false); 931 bcmgenet_eee_enable_set(dev, false);
960 } else { 932 } else {
961 ret = phy_init_eee(priv->phydev, 0); 933 ret = phy_init_eee(dev->phydev, 0);
962 if (ret) { 934 if (ret) {
963 netif_err(priv, hw, dev, "EEE initialization failed\n"); 935 netif_err(priv, hw, dev, "EEE initialization failed\n");
964 return ret; 936 return ret;
@@ -968,14 +940,12 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
968 bcmgenet_eee_enable_set(dev, true); 940 bcmgenet_eee_enable_set(dev, true);
969 } 941 }
970 942
971 return phy_ethtool_set_eee(priv->phydev, e); 943 return phy_ethtool_set_eee(dev->phydev, e);
972} 944}
973 945
974static int bcmgenet_nway_reset(struct net_device *dev) 946static int bcmgenet_nway_reset(struct net_device *dev)
975{ 947{
976 struct bcmgenet_priv *priv = netdev_priv(dev); 948 return genphy_restart_aneg(dev->phydev);
977
978 return genphy_restart_aneg(priv->phydev);
979} 949}
980 950
981/* standard ethtool support functions. */ 951/* standard ethtool support functions. */
@@ -983,8 +953,6 @@ static struct ethtool_ops bcmgenet_ethtool_ops = {
983 .get_strings = bcmgenet_get_strings, 953 .get_strings = bcmgenet_get_strings,
984 .get_sset_count = bcmgenet_get_sset_count, 954 .get_sset_count = bcmgenet_get_sset_count,
985 .get_ethtool_stats = bcmgenet_get_ethtool_stats, 955 .get_ethtool_stats = bcmgenet_get_ethtool_stats,
986 .get_settings = bcmgenet_get_settings,
987 .set_settings = bcmgenet_set_settings,
988 .get_drvinfo = bcmgenet_get_drvinfo, 956 .get_drvinfo = bcmgenet_get_drvinfo,
989 .get_link = ethtool_op_get_link, 957 .get_link = ethtool_op_get_link,
990 .get_msglevel = bcmgenet_get_msglevel, 958 .get_msglevel = bcmgenet_get_msglevel,
@@ -996,18 +964,21 @@ static struct ethtool_ops bcmgenet_ethtool_ops = {
996 .nway_reset = bcmgenet_nway_reset, 964 .nway_reset = bcmgenet_nway_reset,
997 .get_coalesce = bcmgenet_get_coalesce, 965 .get_coalesce = bcmgenet_get_coalesce,
998 .set_coalesce = bcmgenet_set_coalesce, 966 .set_coalesce = bcmgenet_set_coalesce,
967 .get_link_ksettings = phy_ethtool_get_link_ksettings,
968 .set_link_ksettings = phy_ethtool_set_link_ksettings,
999}; 969};
1000 970
1001/* Power down the unimac, based on mode. */ 971/* Power down the unimac, based on mode. */
1002static int bcmgenet_power_down(struct bcmgenet_priv *priv, 972static int bcmgenet_power_down(struct bcmgenet_priv *priv,
1003 enum bcmgenet_power_mode mode) 973 enum bcmgenet_power_mode mode)
1004{ 974{
975 struct net_device *ndev = priv->dev;
1005 int ret = 0; 976 int ret = 0;
1006 u32 reg; 977 u32 reg;
1007 978
1008 switch (mode) { 979 switch (mode) {
1009 case GENET_POWER_CABLE_SENSE: 980 case GENET_POWER_CABLE_SENSE:
1010 phy_detach(priv->phydev); 981 phy_detach(ndev->phydev);
1011 break; 982 break;
1012 983
1013 case GENET_POWER_WOL_MAGIC: 984 case GENET_POWER_WOL_MAGIC:
@@ -1068,7 +1039,6 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
1068/* ioctl handle special commands that are not present in ethtool. */ 1039/* ioctl handle special commands that are not present in ethtool. */
1069static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1040static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1070{ 1041{
1071 struct bcmgenet_priv *priv = netdev_priv(dev);
1072 int val = 0; 1042 int val = 0;
1073 1043
1074 if (!netif_running(dev)) 1044 if (!netif_running(dev))
@@ -1078,10 +1048,10 @@ static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1078 case SIOCGMIIPHY: 1048 case SIOCGMIIPHY:
1079 case SIOCGMIIREG: 1049 case SIOCGMIIREG:
1080 case SIOCSMIIREG: 1050 case SIOCSMIIREG:
1081 if (!priv->phydev) 1051 if (!dev->phydev)
1082 val = -ENODEV; 1052 val = -ENODEV;
1083 else 1053 else
1084 val = phy_mii_ioctl(priv->phydev, rq, cmd); 1054 val = phy_mii_ioctl(dev->phydev, rq, cmd);
1085 break; 1055 break;
1086 1056
1087 default: 1057 default:
@@ -2464,6 +2434,7 @@ static void bcmgenet_irq_task(struct work_struct *work)
2464{ 2434{
2465 struct bcmgenet_priv *priv = container_of( 2435 struct bcmgenet_priv *priv = container_of(
2466 work, struct bcmgenet_priv, bcmgenet_irq_work); 2436 work, struct bcmgenet_priv, bcmgenet_irq_work);
2437 struct net_device *ndev = priv->dev;
2467 2438
2468 netif_dbg(priv, intr, priv->dev, "%s\n", __func__); 2439 netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
2469 2440
@@ -2476,7 +2447,7 @@ static void bcmgenet_irq_task(struct work_struct *work)
2476 2447
2477 /* Link UP/DOWN event */ 2448 /* Link UP/DOWN event */
2478 if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) { 2449 if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
2479 phy_mac_interrupt(priv->phydev, 2450 phy_mac_interrupt(ndev->phydev,
2480 !!(priv->irq0_stat & UMAC_IRQ_LINK_UP)); 2451 !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
2481 priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT; 2452 priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
2482 } 2453 }
@@ -2838,7 +2809,7 @@ static void bcmgenet_netif_start(struct net_device *dev)
2838 /* Monitor link interrupts now */ 2809 /* Monitor link interrupts now */
2839 bcmgenet_link_intr_enable(priv); 2810 bcmgenet_link_intr_enable(priv);
2840 2811
2841 phy_start(priv->phydev); 2812 phy_start(dev->phydev);
2842} 2813}
2843 2814
2844static int bcmgenet_open(struct net_device *dev) 2815static int bcmgenet_open(struct net_device *dev)
@@ -2937,7 +2908,7 @@ static void bcmgenet_netif_stop(struct net_device *dev)
2937 struct bcmgenet_priv *priv = netdev_priv(dev); 2908 struct bcmgenet_priv *priv = netdev_priv(dev);
2938 2909
2939 netif_tx_stop_all_queues(dev); 2910 netif_tx_stop_all_queues(dev);
2940 phy_stop(priv->phydev); 2911 phy_stop(dev->phydev);
2941 bcmgenet_intr_disable(priv); 2912 bcmgenet_intr_disable(priv);
2942 bcmgenet_disable_rx_napi(priv); 2913 bcmgenet_disable_rx_napi(priv);
2943 bcmgenet_disable_tx_napi(priv); 2914 bcmgenet_disable_tx_napi(priv);
@@ -2963,7 +2934,7 @@ static int bcmgenet_close(struct net_device *dev)
2963 bcmgenet_netif_stop(dev); 2934 bcmgenet_netif_stop(dev);
2964 2935
2965 /* Really kill the PHY state machine and disconnect from it */ 2936 /* Really kill the PHY state machine and disconnect from it */
2966 phy_disconnect(priv->phydev); 2937 phy_disconnect(dev->phydev);
2967 2938
2968 /* Disable MAC receive */ 2939 /* Disable MAC receive */
2969 umac_enable_set(priv, CMD_RX_EN, false); 2940 umac_enable_set(priv, CMD_RX_EN, false);
@@ -3522,7 +3493,7 @@ static int bcmgenet_suspend(struct device *d)
3522 3493
3523 bcmgenet_netif_stop(dev); 3494 bcmgenet_netif_stop(dev);
3524 3495
3525 phy_suspend(priv->phydev); 3496 phy_suspend(dev->phydev);
3526 3497
3527 netif_device_detach(dev); 3498 netif_device_detach(dev);
3528 3499
@@ -3586,7 +3557,7 @@ static int bcmgenet_resume(struct device *d)
3586 if (priv->wolopts) 3557 if (priv->wolopts)
3587 clk_disable_unprepare(priv->clk_wol); 3558 clk_disable_unprepare(priv->clk_wol);
3588 3559
3589 phy_init_hw(priv->phydev); 3560 phy_init_hw(dev->phydev);
3590 /* Speed settings must be restored */ 3561 /* Speed settings must be restored */
3591 bcmgenet_mii_config(priv->dev); 3562 bcmgenet_mii_config(priv->dev);
3592 3563
@@ -3619,7 +3590,7 @@ static int bcmgenet_resume(struct device *d)
3619 3590
3620 netif_device_attach(dev); 3591 netif_device_attach(dev);
3621 3592
3622 phy_resume(priv->phydev); 3593 phy_resume(dev->phydev);
3623 3594
3624 if (priv->eee.eee_enabled) 3595 if (priv->eee.eee_enabled)
3625 bcmgenet_eee_enable_set(dev, true); 3596 bcmgenet_eee_enable_set(dev, true);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 1e2dc34d331a..0f0868c56f05 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -597,7 +597,6 @@ struct bcmgenet_priv {
597 597
598 /* MDIO bus variables */ 598 /* MDIO bus variables */
599 wait_queue_head_t wq; 599 wait_queue_head_t wq;
600 struct phy_device *phydev;
601 bool internal_phy; 600 bool internal_phy;
602 struct device_node *phy_dn; 601 struct device_node *phy_dn;
603 struct device_node *mdio_dn; 602 struct device_node *mdio_dn;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 457c3bc8cfff..e907acd81da9 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -86,7 +86,7 @@ static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
86void bcmgenet_mii_setup(struct net_device *dev) 86void bcmgenet_mii_setup(struct net_device *dev)
87{ 87{
88 struct bcmgenet_priv *priv = netdev_priv(dev); 88 struct bcmgenet_priv *priv = netdev_priv(dev);
89 struct phy_device *phydev = priv->phydev; 89 struct phy_device *phydev = dev->phydev;
90 u32 reg, cmd_bits = 0; 90 u32 reg, cmd_bits = 0;
91 bool status_changed = false; 91 bool status_changed = false;
92 92
@@ -183,9 +183,9 @@ void bcmgenet_mii_reset(struct net_device *dev)
183 if (GENET_IS_V4(priv)) 183 if (GENET_IS_V4(priv))
184 return; 184 return;
185 185
186 if (priv->phydev) { 186 if (dev->phydev) {
187 phy_init_hw(priv->phydev); 187 phy_init_hw(dev->phydev);
188 phy_start_aneg(priv->phydev); 188 phy_start_aneg(dev->phydev);
189 } 189 }
190} 190}
191 191
@@ -236,6 +236,7 @@ static void bcmgenet_internal_phy_setup(struct net_device *dev)
236 236
237static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) 237static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
238{ 238{
239 struct net_device *ndev = priv->dev;
239 u32 reg; 240 u32 reg;
240 241
241 /* Speed settings are set in bcmgenet_mii_setup() */ 242 /* Speed settings are set in bcmgenet_mii_setup() */
@@ -244,14 +245,14 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
244 bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); 245 bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
245 246
246 if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) 247 if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
247 fixed_phy_set_link_update(priv->phydev, 248 fixed_phy_set_link_update(ndev->phydev,
248 bcmgenet_fixed_phy_link_update); 249 bcmgenet_fixed_phy_link_update);
249} 250}
250 251
251int bcmgenet_mii_config(struct net_device *dev) 252int bcmgenet_mii_config(struct net_device *dev)
252{ 253{
253 struct bcmgenet_priv *priv = netdev_priv(dev); 254 struct bcmgenet_priv *priv = netdev_priv(dev);
254 struct phy_device *phydev = priv->phydev; 255 struct phy_device *phydev = dev->phydev;
255 struct device *kdev = &priv->pdev->dev; 256 struct device *kdev = &priv->pdev->dev;
256 const char *phy_name = NULL; 257 const char *phy_name = NULL;
257 u32 id_mode_dis = 0; 258 u32 id_mode_dis = 0;
@@ -302,7 +303,7 @@ int bcmgenet_mii_config(struct net_device *dev)
302 * capabilities, use that knowledge to also configure the 303 * capabilities, use that knowledge to also configure the
303 * Reverse MII interface correctly. 304 * Reverse MII interface correctly.
304 */ 305 */
305 if ((priv->phydev->supported & PHY_BASIC_FEATURES) == 306 if ((phydev->supported & PHY_BASIC_FEATURES) ==
306 PHY_BASIC_FEATURES) 307 PHY_BASIC_FEATURES)
307 port_ctrl = PORT_MODE_EXT_RVMII_25; 308 port_ctrl = PORT_MODE_EXT_RVMII_25;
308 else 309 else
@@ -371,7 +372,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
371 return -ENODEV; 372 return -ENODEV;
372 } 373 }
373 } else { 374 } else {
374 phydev = priv->phydev; 375 phydev = dev->phydev;
375 phydev->dev_flags = phy_flags; 376 phydev->dev_flags = phy_flags;
376 377
377 ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup, 378 ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
@@ -382,8 +383,6 @@ int bcmgenet_mii_probe(struct net_device *dev)
382 } 383 }
383 } 384 }
384 385
385 priv->phydev = phydev;
386
387 /* Configure port multiplexer based on what the probed PHY device since 386 /* Configure port multiplexer based on what the probed PHY device since
388 * reading the 'max-speed' property determines the maximum supported 387 * reading the 'max-speed' property determines the maximum supported
389 * PHY speed which is needed for bcmgenet_mii_config() to configure 388 * PHY speed which is needed for bcmgenet_mii_config() to configure
@@ -391,7 +390,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
391 */ 390 */
392 ret = bcmgenet_mii_config(dev); 391 ret = bcmgenet_mii_config(dev);
393 if (ret) { 392 if (ret) {
394 phy_disconnect(priv->phydev); 393 phy_disconnect(phydev);
395 return ret; 394 return ret;
396 } 395 }
397 396
@@ -401,7 +400,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
401 * Ethernet MAC ISRs 400 * Ethernet MAC ISRs
402 */ 401 */
403 if (priv->internal_phy) 402 if (priv->internal_phy)
404 priv->phydev->irq = PHY_IGNORE_INTERRUPT; 403 phydev->irq = PHY_IGNORE_INTERRUPT;
405 404
406 return 0; 405 return 0;
407} 406}
@@ -606,7 +605,6 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
606 605
607 } 606 }
608 607
609 priv->phydev = phydev;
610 priv->phy_interface = pd->phy_interface; 608 priv->phy_interface = pd->phy_interface;
611 609
612 return 0; 610 return 0;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
index d35864ada9a3..c03d37016a48 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
@@ -19,26 +19,16 @@
19* This file may also be available under a different license from Cavium. 19* This file may also be available under a different license from Cavium.
20* Contact Cavium, Inc. for more information 20* Contact Cavium, Inc. for more information
21**********************************************************************/ 21**********************************************************************/
22#include <linux/version.h>
23#include <linux/types.h>
24#include <linux/list.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h> 22#include <linux/pci.h>
27#include <linux/kthread.h>
28#include <linux/netdevice.h> 23#include <linux/netdevice.h>
29#include "octeon_config.h"
30#include "liquidio_common.h" 24#include "liquidio_common.h"
31#include "octeon_droq.h" 25#include "octeon_droq.h"
32#include "octeon_iq.h" 26#include "octeon_iq.h"
33#include "response_manager.h" 27#include "response_manager.h"
34#include "octeon_device.h" 28#include "octeon_device.h"
35#include "octeon_nic.h"
36#include "octeon_main.h" 29#include "octeon_main.h"
37#include "octeon_network.h"
38#include "cn66xx_regs.h" 30#include "cn66xx_regs.h"
39#include "cn66xx_device.h" 31#include "cn66xx_device.h"
40#include "liquidio_image.h"
41#include "octeon_mem_ops.h"
42 32
43int lio_cn6xxx_soft_reset(struct octeon_device *oct) 33int lio_cn6xxx_soft_reset(struct octeon_device *oct)
44{ 34{
@@ -74,9 +64,9 @@ void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct)
74 u32 val; 64 u32 val;
75 65
76 pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val); 66 pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
77 if (val & 0x000f0000) { 67 if (val & 0x000c0000) {
78 dev_err(&oct->pci_dev->dev, "PCI-E Link error detected: 0x%08x\n", 68 dev_err(&oct->pci_dev->dev, "PCI-E Link error detected: 0x%08x\n",
79 val & 0x000f0000); 69 val & 0x000c0000);
80 } 70 }
81 71
82 val |= 0xf; /* Enable Link error reporting */ 72 val |= 0xf; /* Enable Link error reporting */
@@ -229,7 +219,7 @@ void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct)
229 /* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */ 219 /* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */
230 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0); 220 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0);
231 221
232 /* / Select ES,RO,NS setting from register for Output Queue Packet 222 /* Select ES, RO, NS setting from register for Output Queue Packet
233 * Address 223 * Address
234 */ 224 */
235 octeon_write_csr(oct, CN6XXX_SLI_PKT_DPADDR, 0xFFFFFFFF); 225 octeon_write_csr(oct, CN6XXX_SLI_PKT_DPADDR, 0xFFFFFFFF);
@@ -547,14 +537,14 @@ static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct)
547 dev_dbg(&oct->pci_dev->dev, "Using PCIE Port %d\n", oct->pcie_port); 537 dev_dbg(&oct->pci_dev->dev, "Using PCIE Port %d\n", oct->pcie_port);
548} 538}
549 539
550void 540static void
551lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64) 541lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64)
552{ 542{
553 dev_err(&oct->pci_dev->dev, "Error Intr: 0x%016llx\n", 543 dev_err(&oct->pci_dev->dev, "Error Intr: 0x%016llx\n",
554 CVM_CAST64(intr64)); 544 CVM_CAST64(intr64));
555} 545}
556 546
557int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct) 547static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
558{ 548{
559 struct octeon_droq *droq; 549 struct octeon_droq *droq;
560 int oq_no; 550 int oq_no;
@@ -579,7 +569,7 @@ int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
579 continue; 569 continue;
580 570
581 droq = oct->droq[oq_no]; 571 droq = oct->droq[oq_no];
582 pkt_count = octeon_droq_check_hw_for_pkts(oct, droq); 572 pkt_count = octeon_droq_check_hw_for_pkts(droq);
583 if (pkt_count) { 573 if (pkt_count) {
584 oct->droq_intr |= (1ULL << oq_no); 574 oct->droq_intr |= (1ULL << oq_no);
585 if (droq->ops.poll_mode) { 575 if (droq->ops.poll_mode) {
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
index fe2932cb7ed8..28c47224221a 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
@@ -82,8 +82,6 @@ void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no);
82void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no); 82void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no);
83void lio_cn6xxx_enable_io_queues(struct octeon_device *oct); 83void lio_cn6xxx_enable_io_queues(struct octeon_device *oct);
84void lio_cn6xxx_disable_io_queues(struct octeon_device *oct); 84void lio_cn6xxx_disable_io_queues(struct octeon_device *oct);
85void lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64);
86int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct);
87irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev); 85irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev);
88void lio_cn6xxx_reinit_regs(struct octeon_device *oct); 86void lio_cn6xxx_reinit_regs(struct octeon_device *oct);
89void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr, 87void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
index 8e830d0c0754..29755bc68f12 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
@@ -19,28 +19,17 @@
19* This file may also be available under a different license from Cavium. 19* This file may also be available under a different license from Cavium.
20* Contact Cavium, Inc. for more information 20* Contact Cavium, Inc. for more information
21**********************************************************************/ 21**********************************************************************/
22#include <linux/version.h>
23#include <linux/types.h>
24#include <linux/list.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h> 22#include <linux/pci.h>
27#include <linux/kthread.h>
28#include <linux/netdevice.h> 23#include <linux/netdevice.h>
29#include "octeon_config.h"
30#include "liquidio_common.h" 24#include "liquidio_common.h"
31#include "octeon_droq.h" 25#include "octeon_droq.h"
32#include "octeon_iq.h" 26#include "octeon_iq.h"
33#include "response_manager.h" 27#include "response_manager.h"
34#include "octeon_device.h" 28#include "octeon_device.h"
35#include "octeon_nic.h"
36#include "octeon_main.h" 29#include "octeon_main.h"
37#include "octeon_network.h"
38#include "cn66xx_regs.h" 30#include "cn66xx_regs.h"
39#include "cn66xx_device.h" 31#include "cn66xx_device.h"
40#include "cn68xx_regs.h" 32#include "cn68xx_regs.h"
41#include "cn68xx_device.h"
42#include "liquidio_image.h"
43#include "octeon_mem_ops.h"
44 33
45static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct) 34static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct)
46{ 35{
@@ -129,7 +118,7 @@ static inline void lio_cn68xx_vendor_message_fix(struct octeon_device *oct)
129 pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, val); 118 pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, val);
130} 119}
131 120
132int lio_is_210nv(struct octeon_device *oct) 121static int lio_is_210nv(struct octeon_device *oct)
133{ 122{
134 u64 mio_qlm4_cfg = lio_pci_readq(oct, CN6XXX_MIO_QLM4_CFG); 123 u64 mio_qlm4_cfg = lio_pci_readq(oct, CN6XXX_MIO_QLM4_CFG);
135 124
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
index d4e1c9fb0bf2..ea7bdcce6044 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
@@ -28,6 +28,5 @@
28#define __CN68XX_DEVICE_H__ 28#define __CN68XX_DEVICE_H__
29 29
30int lio_setup_cn68xx_octeon_device(struct octeon_device *oct); 30int lio_setup_cn68xx_octeon_device(struct octeon_device *oct);
31int lio_is_210nv(struct octeon_device *oct);
32 31
33#endif 32#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
index 38cddbd107b6..d45a0f4aaf1f 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
@@ -29,7 +29,6 @@
29 29
30#ifndef __CN68XX_REGS_H__ 30#ifndef __CN68XX_REGS_H__
31#define __CN68XX_REGS_H__ 31#define __CN68XX_REGS_H__
32#include "cn66xx_regs.h"
33 32
34/*###################### REQUEST QUEUE #########################*/ 33/*###################### REQUEST QUEUE #########################*/
35 34
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 03bfa9771e4d..289eb8907922 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -19,13 +19,9 @@
19* This file may also be available under a different license from Cavium. 19* This file may also be available under a different license from Cavium.
20* Contact Cavium, Inc. for more information 20* Contact Cavium, Inc. for more information
21**********************************************************************/ 21**********************************************************************/
22#include <linux/version.h>
23#include <linux/netdevice.h> 22#include <linux/netdevice.h>
24#include <linux/net_tstamp.h> 23#include <linux/net_tstamp.h>
25#include <linux/ethtool.h>
26#include <linux/dma-mapping.h>
27#include <linux/pci.h> 24#include <linux/pci.h>
28#include "octeon_config.h"
29#include "liquidio_common.h" 25#include "liquidio_common.h"
30#include "octeon_droq.h" 26#include "octeon_droq.h"
31#include "octeon_iq.h" 27#include "octeon_iq.h"
@@ -36,9 +32,6 @@
36#include "octeon_network.h" 32#include "octeon_network.h"
37#include "cn66xx_regs.h" 33#include "cn66xx_regs.h"
38#include "cn66xx_device.h" 34#include "cn66xx_device.h"
39#include "cn68xx_regs.h"
40#include "cn68xx_device.h"
41#include "liquidio_image.h"
42 35
43static int octnet_get_link_stats(struct net_device *netdev); 36static int octnet_get_link_stats(struct net_device *netdev);
44 37
@@ -106,6 +99,7 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
106 "tx_tso", 99 "tx_tso",
107 "tx_tso_packets", 100 "tx_tso_packets",
108 "tx_tso_err", 101 "tx_tso_err",
102 "tx_vxlan",
109 103
110 "mac_tx_total_pkts", 104 "mac_tx_total_pkts",
111 "mac_tx_total_bytes", 105 "mac_tx_total_bytes",
@@ -129,6 +123,9 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
129 "rx_err_link", 123 "rx_err_link",
130 "rx_err_drop", 124 "rx_err_drop",
131 125
126 "rx_vxlan",
127 "rx_vxlan_err",
128
132 "rx_lro_pkts", 129 "rx_lro_pkts",
133 "rx_lro_bytes", 130 "rx_lro_bytes",
134 "rx_total_lro", 131 "rx_total_lro",
@@ -167,6 +164,7 @@ static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
167 "fw_bytes_sent", 164 "fw_bytes_sent",
168 165
169 "tso", 166 "tso",
167 "vxlan",
170 "txq_restart", 168 "txq_restart",
171}; 169};
172 170
@@ -186,6 +184,7 @@ static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
186 "fw_bytes_received", 184 "fw_bytes_received",
187 "fw_dropped_nodispatch", 185 "fw_dropped_nodispatch",
188 186
187 "vxlan",
189 "buffer_alloc_failure", 188 "buffer_alloc_failure",
190}; 189};
191 190
@@ -340,20 +339,18 @@ static void octnet_mdio_resp_callback(struct octeon_device *oct,
340 u32 status, 339 u32 status,
341 void *buf) 340 void *buf)
342{ 341{
343 struct oct_mdio_cmd_resp *mdio_cmd_rsp;
344 struct oct_mdio_cmd_context *mdio_cmd_ctx; 342 struct oct_mdio_cmd_context *mdio_cmd_ctx;
345 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; 343 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
346 344
347 mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
348 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr; 345 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
349 346
350 oct = lio_get_device(mdio_cmd_ctx->octeon_id); 347 oct = lio_get_device(mdio_cmd_ctx->octeon_id);
351 if (status) { 348 if (status) {
352 dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n", 349 dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
353 CVM_CAST64(status)); 350 CVM_CAST64(status));
354 ACCESS_ONCE(mdio_cmd_ctx->cond) = -1; 351 WRITE_ONCE(mdio_cmd_ctx->cond, -1);
355 } else { 352 } else {
356 ACCESS_ONCE(mdio_cmd_ctx->cond) = 1; 353 WRITE_ONCE(mdio_cmd_ctx->cond, 1);
357 } 354 }
358 wake_up_interruptible(&mdio_cmd_ctx->wc); 355 wake_up_interruptible(&mdio_cmd_ctx->wc);
359} 356}
@@ -384,7 +381,7 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
384 mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr; 381 mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
385 mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr; 382 mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
386 383
387 ACCESS_ONCE(mdio_cmd_ctx->cond) = 0; 384 WRITE_ONCE(mdio_cmd_ctx->cond, 0);
388 mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev); 385 mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
389 mdio_cmd->op = op; 386 mdio_cmd->op = op;
390 mdio_cmd->mdio_addr = loc; 387 mdio_cmd->mdio_addr = loc;
@@ -423,7 +420,7 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
423 octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp), 420 octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
424 sizeof(struct oct_mdio_cmd) / 8); 421 sizeof(struct oct_mdio_cmd) / 8);
425 422
426 if (ACCESS_ONCE(mdio_cmd_ctx->cond) == 1) { 423 if (READ_ONCE(mdio_cmd_ctx->cond) == 1) {
427 if (!op) 424 if (!op)
428 *value = mdio_cmd_rsp->resp.value1; 425 *value = mdio_cmd_rsp->resp.value1;
429 } else { 426 } else {
@@ -467,18 +464,16 @@ static int lio_set_phys_id(struct net_device *netdev,
467 464
468 /* Configure Beacon values */ 465 /* Configure Beacon values */
469 value = LIO68XX_LED_BEACON_CFGON; 466 value = LIO68XX_LED_BEACON_CFGON;
470 ret = 467 ret = octnet_mdio45_access(lio, 1,
471 octnet_mdio45_access(lio, 1, 468 LIO68XX_LED_BEACON_ADDR,
472 LIO68XX_LED_BEACON_ADDR, 469 &value);
473 &value);
474 if (ret) 470 if (ret)
475 return ret; 471 return ret;
476 472
477 value = LIO68XX_LED_CTRL_CFGON; 473 value = LIO68XX_LED_CTRL_CFGON;
478 ret = 474 ret = octnet_mdio45_access(lio, 1,
479 octnet_mdio45_access(lio, 1, 475 LIO68XX_LED_CTRL_ADDR,
480 LIO68XX_LED_CTRL_ADDR, 476 &value);
481 &value);
482 if (ret) 477 if (ret)
483 return ret; 478 return ret;
484 } else { 479 } else {
@@ -557,7 +552,7 @@ lio_ethtool_get_ringparam(struct net_device *netdev,
557 tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx); 552 tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
558 } 553 }
559 554
560 if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE) { 555 if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) {
561 ering->rx_pending = 0; 556 ering->rx_pending = 0;
562 ering->rx_max_pending = 0; 557 ering->rx_max_pending = 0;
563 ering->rx_mini_pending = 0; 558 ering->rx_mini_pending = 0;
@@ -617,7 +612,8 @@ lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
617 612
618static void 613static void
619lio_get_ethtool_stats(struct net_device *netdev, 614lio_get_ethtool_stats(struct net_device *netdev,
620 struct ethtool_stats *stats, u64 *data) 615 struct ethtool_stats *stats __attribute__((unused)),
616 u64 *data)
621{ 617{
622 struct lio *lio = GET_LIO(netdev); 618 struct lio *lio = GET_LIO(netdev);
623 struct octeon_device *oct_dev = lio->oct_dev; 619 struct octeon_device *oct_dev = lio->oct_dev;
@@ -675,6 +671,10 @@ lio_get_ethtool_stats(struct net_device *netdev,
675 *fw_err_tso 671 *fw_err_tso
676 */ 672 */
677 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso); 673 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
674 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
675 *fw_tx_vxlan
676 */
677 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
678 678
679 /* mac tx statistics */ 679 /* mac tx statistics */
680 /*CVMX_BGXX_CMRX_TX_STAT5 */ 680 /*CVMX_BGXX_CMRX_TX_STAT5 */
@@ -729,6 +729,15 @@ lio_get_ethtool_stats(struct net_device *netdev,
729 */ 729 */
730 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop); 730 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
731 731
732 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
733 *fromwire.fw_rx_vxlan
734 */
735 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan);
736 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
737 *fromwire.fw_rx_vxlan_err
738 */
739 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err);
740
732 /* LRO */ 741 /* LRO */
733 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. 742 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
734 *fw_lro_pkts 743 *fw_lro_pkts
@@ -822,6 +831,8 @@ lio_get_ethtool_stats(struct net_device *netdev,
822 831
823 /*tso request*/ 832 /*tso request*/
824 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso); 833 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
834 /*vxlan request*/
835 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
825 /*txq restart*/ 836 /*txq restart*/
826 data[i++] = 837 data[i++] =
827 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart); 838 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
@@ -858,6 +869,9 @@ lio_get_ethtool_stats(struct net_device *netdev,
858 CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); 869 CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
859 data[i++] = 870 data[i++] =
860 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); 871 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
872
873 data[i++] =
874 CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
861 data[i++] = 875 data[i++] =
862 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure); 876 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
863 } 877 }
@@ -945,7 +959,6 @@ static int lio_get_intr_coalesce(struct net_device *netdev,
945 intr_coal->rx_max_coalesced_frames = 959 intr_coal->rx_max_coalesced_frames =
946 CFG_GET_OQ_INTR_PKT(cn6xxx->conf); 960 CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
947 } 961 }
948
949 iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no]; 962 iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
950 intr_coal->tx_max_coalesced_frames = iq->fill_threshold; 963 intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
951 break; 964 break;
@@ -1043,7 +1056,7 @@ static int octnet_set_intrmod_cfg(struct lio *lio,
1043 return 0; 1056 return 0;
1044} 1057}
1045 1058
1046void 1059static void
1047octnet_nic_stats_callback(struct octeon_device *oct_dev, 1060octnet_nic_stats_callback(struct octeon_device *oct_dev,
1048 u32 status, void *ptr) 1061 u32 status, void *ptr)
1049{ 1062{
@@ -1083,6 +1096,9 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev,
1083 rstats->fw_err_pko = rsp_rstats->fw_err_pko; 1096 rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1084 rstats->fw_err_link = rsp_rstats->fw_err_link; 1097 rstats->fw_err_link = rsp_rstats->fw_err_link;
1085 rstats->fw_err_drop = rsp_rstats->fw_err_drop; 1098 rstats->fw_err_drop = rsp_rstats->fw_err_drop;
1099 rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
1100 rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
1101
1086 /* Number of packets that are LROed */ 1102 /* Number of packets that are LROed */
1087 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts; 1103 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1088 /* Number of octets that are LROed */ 1104 /* Number of octets that are LROed */
@@ -1127,6 +1143,8 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev,
1127 tstats->fw_tso = rsp_tstats->fw_tso; 1143 tstats->fw_tso = rsp_tstats->fw_tso;
1128 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd; 1144 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1129 tstats->fw_err_tso = rsp_tstats->fw_err_tso; 1145 tstats->fw_err_tso = rsp_tstats->fw_err_tso;
1146 tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
1147
1130 resp->status = 1; 1148 resp->status = 1;
1131 } else { 1149 } else {
1132 resp->status = -1; 1150 resp->status = -1;
@@ -1523,7 +1541,7 @@ static int lio_nway_reset(struct net_device *netdev)
1523} 1541}
1524 1542
1525/* Return register dump len. */ 1543/* Return register dump len. */
1526static int lio_get_regs_len(struct net_device *dev) 1544static int lio_get_regs_len(struct net_device *dev __attribute__((unused)))
1527{ 1545{
1528 return OCT_ETHTOOL_REGDUMP_LEN; 1546 return OCT_ETHTOOL_REGDUMP_LEN;
1529} 1547}
@@ -1667,13 +1685,12 @@ static void lio_get_regs(struct net_device *dev,
1667 int len = 0; 1685 int len = 0;
1668 struct octeon_device *oct = lio->oct_dev; 1686 struct octeon_device *oct = lio->oct_dev;
1669 1687
1670 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
1671 regs->version = OCT_ETHTOOL_REGSVER; 1688 regs->version = OCT_ETHTOOL_REGSVER;
1672 1689
1673 switch (oct->chip_id) { 1690 switch (oct->chip_id) {
1674 /* case OCTEON_CN73XX: Todo */
1675 case OCTEON_CN68XX: 1691 case OCTEON_CN68XX:
1676 case OCTEON_CN66XX: 1692 case OCTEON_CN66XX:
1693 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
1677 len += cn6xxx_read_csr_reg(regbuf + len, oct); 1694 len += cn6xxx_read_csr_reg(regbuf + len, oct);
1678 len += cn6xxx_read_config_reg(regbuf + len, oct); 1695 len += cn6xxx_read_config_reg(regbuf + len, oct);
1679 break; 1696 break;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 1a584ebde42c..20d6942edf40 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -20,24 +20,12 @@
20* Contact Cavium, Inc. for more information 20* Contact Cavium, Inc. for more information
21**********************************************************************/ 21**********************************************************************/
22#include <linux/version.h> 22#include <linux/version.h>
23#include <linux/module.h>
24#include <linux/crc32.h>
25#include <linux/dma-mapping.h>
26#include <linux/pci.h> 23#include <linux/pci.h>
27#include <linux/pci_ids.h>
28#include <linux/ip.h>
29#include <net/ip.h>
30#include <linux/ipv6.h>
31#include <linux/net_tstamp.h> 24#include <linux/net_tstamp.h>
32#include <linux/if_vlan.h> 25#include <linux/if_vlan.h>
33#include <linux/firmware.h> 26#include <linux/firmware.h>
34#include <linux/ethtool.h>
35#include <linux/ptp_clock_kernel.h> 27#include <linux/ptp_clock_kernel.h>
36#include <linux/types.h> 28#include <net/vxlan.h>
37#include <linux/list.h>
38#include <linux/workqueue.h>
39#include <linux/interrupt.h>
40#include "octeon_config.h"
41#include "liquidio_common.h" 29#include "liquidio_common.h"
42#include "octeon_droq.h" 30#include "octeon_droq.h"
43#include "octeon_iq.h" 31#include "octeon_iq.h"
@@ -48,7 +36,6 @@
48#include "octeon_network.h" 36#include "octeon_network.h"
49#include "cn66xx_regs.h" 37#include "cn66xx_regs.h"
50#include "cn66xx_device.h" 38#include "cn66xx_device.h"
51#include "cn68xx_regs.h"
52#include "cn68xx_device.h" 39#include "cn68xx_device.h"
53#include "liquidio_image.h" 40#include "liquidio_image.h"
54 41
@@ -251,8 +238,7 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
251 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 238 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
252 if (!(oct->io_qmask.oq & (1ULL << i))) 239 if (!(oct->io_qmask.oq & (1ULL << i)))
253 continue; 240 continue;
254 pkt_cnt += octeon_droq_check_hw_for_pkts(oct, 241 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
255 oct->droq[i]);
256 } 242 }
257 if (pkt_cnt > 0) { 243 if (pkt_cnt > 0) {
258 pending_pkts += pkt_cnt; 244 pending_pkts += pkt_cnt;
@@ -507,7 +493,8 @@ static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
507 * \brief mmio handler 493 * \brief mmio handler
508 * @param pdev Pointer to PCI device 494 * @param pdev Pointer to PCI device
509 */ 495 */
510static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev *pdev) 496static pci_ers_result_t liquidio_pcie_mmio_enabled(
497 struct pci_dev *pdev __attribute__((unused)))
511{ 498{
512 /* We should never hit this since we never ask for a reset for a Fatal 499 /* We should never hit this since we never ask for a reset for a Fatal
513 * Error. We always return DISCONNECT in io_error above. 500 * Error. We always return DISCONNECT in io_error above.
@@ -523,7 +510,8 @@ static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev *pdev)
523 * Restart the card from scratch, as if from a cold-boot. Implementation 510 * Restart the card from scratch, as if from a cold-boot. Implementation
524 * resembles the first-half of the octeon_resume routine. 511 * resembles the first-half of the octeon_resume routine.
525 */ 512 */
526static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev *pdev) 513static pci_ers_result_t liquidio_pcie_slot_reset(
514 struct pci_dev *pdev __attribute__((unused)))
527{ 515{
528 /* We should never hit this since we never ask for a reset for a Fatal 516 /* We should never hit this since we never ask for a reset for a Fatal
529 * Error. We always return DISCONNECT in io_error above. 517 * Error. We always return DISCONNECT in io_error above.
@@ -540,7 +528,7 @@ static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev *pdev)
540 * its OK to resume normal operation. Implementation resembles the 528 * its OK to resume normal operation. Implementation resembles the
541 * second-half of the octeon_resume routine. 529 * second-half of the octeon_resume routine.
542 */ 530 */
543static void liquidio_pcie_resume(struct pci_dev *pdev) 531static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
544{ 532{
545 /* Nothing to be done here. */ 533 /* Nothing to be done here. */
546} 534}
@@ -551,7 +539,8 @@ static void liquidio_pcie_resume(struct pci_dev *pdev)
551 * @param pdev Pointer to PCI device 539 * @param pdev Pointer to PCI device
552 * @param state state to suspend to 540 * @param state state to suspend to
553 */ 541 */
554static int liquidio_suspend(struct pci_dev *pdev, pm_message_t state) 542static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
543 pm_message_t state __attribute__((unused)))
555{ 544{
556 return 0; 545 return 0;
557} 546}
@@ -560,7 +549,7 @@ static int liquidio_suspend(struct pci_dev *pdev, pm_message_t state)
560 * \brief called when resuming 549 * \brief called when resuming
561 * @param pdev Pointer to PCI device 550 * @param pdev Pointer to PCI device
562 */ 551 */
563static int liquidio_resume(struct pci_dev *pdev) 552static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
564{ 553{
565 return 0; 554 return 0;
566} 555}
@@ -1104,7 +1093,9 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
1104 * @param pdev PCI device structure 1093 * @param pdev PCI device structure
1105 * @param ent unused 1094 * @param ent unused
1106 */ 1095 */
1107static int liquidio_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1096static int
1097liquidio_probe(struct pci_dev *pdev,
1098 const struct pci_device_id *ent __attribute__((unused)))
1108{ 1099{
1109 struct octeon_device *oct_dev = NULL; 1100 struct octeon_device *oct_dev = NULL;
1110 struct handshake *hs; 1101 struct handshake *hs;
@@ -1267,7 +1258,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
1267 1258
1268 /* Nothing to be done here either */ 1259 /* Nothing to be done here either */
1269 break; 1260 break;
1270 } /* end switch(oct->status) */ 1261 } /* end switch (oct->status) */
1271 1262
1272 tasklet_kill(&oct_priv->droq_tasklet); 1263 tasklet_kill(&oct_priv->droq_tasklet);
1273} 1264}
@@ -1724,8 +1715,10 @@ static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1724 * @param rq request 1715 * @param rq request
1725 * @param on is it on 1716 * @param on is it on
1726 */ 1717 */
1727static int liquidio_ptp_enable(struct ptp_clock_info *ptp, 1718static int
1728 struct ptp_clock_request *rq, int on) 1719liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
1720 struct ptp_clock_request *rq __attribute__((unused)),
1721 int on __attribute__((unused)))
1729{ 1722{
1730 return -EOPNOTSUPP; 1723 return -EOPNOTSUPP;
1731} 1724}
@@ -1866,7 +1859,7 @@ static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
1866 * @param buf pointer to resp structure 1859 * @param buf pointer to resp structure
1867 */ 1860 */
1868static void if_cfg_callback(struct octeon_device *oct, 1861static void if_cfg_callback(struct octeon_device *oct,
1869 u32 status, 1862 u32 status __attribute__((unused)),
1870 void *buf) 1863 void *buf)
1871{ 1864{
1872 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; 1865 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
@@ -1880,7 +1873,7 @@ static void if_cfg_callback(struct octeon_device *oct,
1880 if (resp->status) 1873 if (resp->status)
1881 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n", 1874 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
1882 CVM_CAST64(resp->status)); 1875 CVM_CAST64(resp->status));
1883 ACCESS_ONCE(ctx->cond) = 1; 1876 WRITE_ONCE(ctx->cond, 1);
1884 1877
1885 snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", 1878 snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
1886 resp->cfg_info.liquidio_firmware_version); 1879 resp->cfg_info.liquidio_firmware_version);
@@ -1900,7 +1893,8 @@ static void if_cfg_callback(struct octeon_device *oct,
1900 * @returns selected queue number 1893 * @returns selected queue number
1901 */ 1894 */
1902static u16 select_q(struct net_device *dev, struct sk_buff *skb, 1895static u16 select_q(struct net_device *dev, struct sk_buff *skb,
1903 void *accel_priv, select_queue_fallback_t fallback) 1896 void *accel_priv __attribute__((unused)),
1897 select_queue_fallback_t fallback __attribute__((unused)))
1904{ 1898{
1905 u32 qindex = 0; 1899 u32 qindex = 0;
1906 struct lio *lio; 1900 struct lio *lio;
@@ -1920,7 +1914,7 @@ static u16 select_q(struct net_device *dev, struct sk_buff *skb,
1920 * @param arg - farg registered in droq_ops 1914 * @param arg - farg registered in droq_ops
1921 */ 1915 */
1922static void 1916static void
1923liquidio_push_packet(u32 octeon_id, 1917liquidio_push_packet(u32 octeon_id __attribute__((unused)),
1924 void *skbuff, 1918 void *skbuff,
1925 u32 len, 1919 u32 len,
1926 union octeon_rh *rh, 1920 union octeon_rh *rh,
@@ -2000,14 +1994,25 @@ liquidio_push_packet(u32 octeon_id,
2000 } 1994 }
2001 1995
2002 skb->protocol = eth_type_trans(skb, skb->dev); 1996 skb->protocol = eth_type_trans(skb, skb->dev);
2003
2004 if ((netdev->features & NETIF_F_RXCSUM) && 1997 if ((netdev->features & NETIF_F_RXCSUM) &&
2005 (rh->r_dh.csum_verified == CNNIC_CSUM_VERIFIED)) 1998 (((rh->r_dh.encap_on) &&
1999 (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
2000 (!(rh->r_dh.encap_on) &&
2001 (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
2006 /* checksum has already been verified */ 2002 /* checksum has already been verified */
2007 skb->ip_summed = CHECKSUM_UNNECESSARY; 2003 skb->ip_summed = CHECKSUM_UNNECESSARY;
2008 else 2004 else
2009 skb->ip_summed = CHECKSUM_NONE; 2005 skb->ip_summed = CHECKSUM_NONE;
2010 2006
2007 /* Setting Encapsulation field on basis of status received
2008 * from the firmware
2009 */
2010 if (rh->r_dh.encap_on) {
2011 skb->encapsulation = 1;
2012 skb->csum_level = 1;
2013 droq->stats.rx_vxlan++;
2014 }
2015
2011 /* inbound VLAN tag */ 2016 /* inbound VLAN tag */
2012 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 2017 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
2013 (rh->r_dh.vlan != 0)) { 2018 (rh->r_dh.vlan != 0)) {
@@ -2120,7 +2125,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
2120/** 2125/**
2121 * \brief Setup input and output queues 2126 * \brief Setup input and output queues
2122 * @param octeon_dev octeon device 2127 * @param octeon_dev octeon device
2123 * @param net_device Net device 2128 * @param ifidx Interface Index
2124 * 2129 *
2125 * Note: Queues are with respect to the octeon device. Thus 2130 * Note: Queues are with respect to the octeon device. Thus
2126 * an input queue is for egress packets, and output queues 2131 * an input queue is for egress packets, and output queues
@@ -2331,7 +2336,6 @@ static int liquidio_stop(struct net_device *netdev)
2331 } 2336 }
2332 2337
2333 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); 2338 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
2334 module_put(THIS_MODULE);
2335 2339
2336 return 0; 2340 return 0;
2337} 2341}
@@ -2342,6 +2346,7 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
2342 struct net_device *netdev = (struct net_device *)nctrl->netpndev; 2346 struct net_device *netdev = (struct net_device *)nctrl->netpndev;
2343 struct lio *lio = GET_LIO(netdev); 2347 struct lio *lio = GET_LIO(netdev);
2344 struct octeon_device *oct = lio->oct_dev; 2348 struct octeon_device *oct = lio->oct_dev;
2349 u8 *mac;
2345 2350
2346 switch (nctrl->ncmd.s.cmd) { 2351 switch (nctrl->ncmd.s.cmd) {
2347 case OCTNET_CMD_CHANGE_DEVFLAGS: 2352 case OCTNET_CMD_CHANGE_DEVFLAGS:
@@ -2349,22 +2354,24 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
2349 break; 2354 break;
2350 2355
2351 case OCTNET_CMD_CHANGE_MACADDR: 2356 case OCTNET_CMD_CHANGE_MACADDR:
2352 /* If command is successful, change the MACADDR. */ 2357 mac = ((u8 *)&nctrl->udd[0]) + 2;
2353 netif_info(lio, probe, lio->netdev, " MACAddr changed to 0x%llx\n", 2358 netif_info(lio, probe, lio->netdev,
2354 CVM_CAST64(nctrl->udd[0])); 2359 "%s %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
2355 dev_info(&oct->pci_dev->dev, "%s MACAddr changed to 0x%llx\n", 2360 "MACAddr changed to", mac[0], mac[1],
2356 netdev->name, CVM_CAST64(nctrl->udd[0])); 2361 mac[2], mac[3], mac[4], mac[5]);
2357 memcpy(netdev->dev_addr, ((u8 *)&nctrl->udd[0]) + 2, ETH_ALEN);
2358 break; 2362 break;
2359 2363
2360 case OCTNET_CMD_CHANGE_MTU: 2364 case OCTNET_CMD_CHANGE_MTU:
2361 /* If command is successful, change the MTU. */ 2365 /* If command is successful, change the MTU. */
2362 netif_info(lio, probe, lio->netdev, " MTU Changed from %d to %d\n", 2366 netif_info(lio, probe, lio->netdev, " MTU Changed from %d to %d\n",
2363 netdev->mtu, nctrl->ncmd.s.param2); 2367 netdev->mtu, nctrl->ncmd.s.param1);
2364 dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n", 2368 dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n",
2365 netdev->name, netdev->mtu, 2369 netdev->name, netdev->mtu,
2366 nctrl->ncmd.s.param2); 2370 nctrl->ncmd.s.param1);
2367 netdev->mtu = nctrl->ncmd.s.param2; 2371 rtnl_lock();
2372 netdev->mtu = nctrl->ncmd.s.param1;
2373 call_netdevice_notifiers(NETDEV_CHANGEMTU, netdev);
2374 rtnl_unlock();
2368 break; 2375 break;
2369 2376
2370 case OCTNET_CMD_GPIO_ACCESS: 2377 case OCTNET_CMD_GPIO_ACCESS:
@@ -2410,6 +2417,55 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
2410 netdev->name); 2417 netdev->name);
2411 2418
2412 break; 2419 break;
2420 /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
2421 * Command passed by NIC driver
2422 */
2423 case OCTNET_CMD_TNL_RX_CSUM_CTL:
2424 if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
2425 netif_info(lio, probe, lio->netdev,
2426 "%s RX Checksum Offload Enabled\n",
2427 netdev->name);
2428 } else if (nctrl->ncmd.s.param1 ==
2429 OCTNET_CMD_RXCSUM_DISABLE) {
2430 netif_info(lio, probe, lio->netdev,
2431 "%s RX Checksum Offload Disabled\n",
2432 netdev->name);
2433 }
2434 break;
2435
2436 /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
2437 * Command passed by NIC driver
2438 */
2439 case OCTNET_CMD_TNL_TX_CSUM_CTL:
2440 if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
2441 netif_info(lio, probe, lio->netdev,
2442 "%s TX Checksum Offload Enabled\n",
2443 netdev->name);
2444 } else if (nctrl->ncmd.s.param1 ==
2445 OCTNET_CMD_TXCSUM_DISABLE) {
2446 netif_info(lio, probe, lio->netdev,
2447 "%s TX Checksum Offload Disabled\n",
2448 netdev->name);
2449 }
2450 break;
2451
2452 /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
2453 * Command passed by NIC driver
2454 */
2455 case OCTNET_CMD_VXLAN_PORT_CONFIG:
2456 if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
2457 netif_info(lio, probe, lio->netdev,
2458 "%s VxLAN Destination UDP PORT:%d ADDED\n",
2459 netdev->name,
2460 nctrl->ncmd.s.param1);
2461 } else if (nctrl->ncmd.s.more ==
2462 OCTNET_CMD_VXLAN_PORT_DEL) {
2463 netif_info(lio, probe, lio->netdev,
2464 "%s VxLAN Destination UDP PORT:%d DELETED\n",
2465 netdev->name,
2466 nctrl->ncmd.s.param1);
2467 }
2468 break;
2413 2469
2414 case OCTNET_CMD_SET_FLOW_CTL: 2470 case OCTNET_CMD_SET_FLOW_CTL:
2415 netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n"); 2471 netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
@@ -2465,7 +2521,7 @@ static void liquidio_set_mcast_list(struct net_device *netdev)
2465 struct octnic_ctrl_pkt nctrl; 2521 struct octnic_ctrl_pkt nctrl;
2466 struct netdev_hw_addr *ha; 2522 struct netdev_hw_addr *ha;
2467 u64 *mc; 2523 u64 *mc;
2468 int ret, i; 2524 int ret;
2469 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); 2525 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
2470 2526
2471 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2527 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
@@ -2481,7 +2537,6 @@ static void liquidio_set_mcast_list(struct net_device *netdev)
2481 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2537 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2482 2538
2483 /* copy all the addresses into the udd */ 2539 /* copy all the addresses into the udd */
2484 i = 0;
2485 mc = &nctrl.udd[0]; 2540 mc = &nctrl.udd[0];
2486 netdev_for_each_mc_addr(ha, netdev) { 2541 netdev_for_each_mc_addr(ha, netdev) {
2487 *mc = 0; 2542 *mc = 0;
@@ -2604,18 +2659,16 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
2604 struct lio *lio = GET_LIO(netdev); 2659 struct lio *lio = GET_LIO(netdev);
2605 struct octeon_device *oct = lio->oct_dev; 2660 struct octeon_device *oct = lio->oct_dev;
2606 struct octnic_ctrl_pkt nctrl; 2661 struct octnic_ctrl_pkt nctrl;
2607 int max_frm_size = new_mtu + OCTNET_FRM_HEADER_SIZE;
2608 int ret = 0; 2662 int ret = 0;
2609 2663
2610 /* Limit the MTU to make sure the ethernet packets are between 64 bytes 2664 /* Limit the MTU to make sure the ethernet packets are between 68 bytes
2611 * and 65535 bytes 2665 * and 16000 bytes
2612 */ 2666 */
2613 if ((max_frm_size < OCTNET_MIN_FRM_SIZE) || 2667 if ((new_mtu < LIO_MIN_MTU_SIZE) ||
2614 (max_frm_size > OCTNET_MAX_FRM_SIZE)) { 2668 (new_mtu > LIO_MAX_MTU_SIZE)) {
2615 dev_err(&oct->pci_dev->dev, "Invalid MTU: %d\n", new_mtu); 2669 dev_err(&oct->pci_dev->dev, "Invalid MTU: %d\n", new_mtu);
2616 dev_err(&oct->pci_dev->dev, "Valid range %d and %d\n", 2670 dev_err(&oct->pci_dev->dev, "Valid range %d and %d\n",
2617 (OCTNET_MIN_FRM_SIZE - OCTNET_FRM_HEADER_SIZE), 2671 LIO_MIN_MTU_SIZE, LIO_MAX_MTU_SIZE);
2618 (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE));
2619 return -EINVAL; 2672 return -EINVAL;
2620 } 2673 }
2621 2674
@@ -2646,7 +2699,7 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
2646 * @param ifr interface request 2699 * @param ifr interface request
2647 * @param cmd command 2700 * @param cmd command
2648 */ 2701 */
2649static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2702static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2650{ 2703{
2651 struct hwtstamp_config conf; 2704 struct hwtstamp_config conf;
2652 struct lio *lio = GET_LIO(netdev); 2705 struct lio *lio = GET_LIO(netdev);
@@ -2707,7 +2760,7 @@ static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2707{ 2760{
2708 switch (cmd) { 2761 switch (cmd) {
2709 case SIOCSHWTSTAMP: 2762 case SIOCSHWTSTAMP:
2710 return hwtstamp_ioctl(netdev, ifr, cmd); 2763 return hwtstamp_ioctl(netdev, ifr);
2711 default: 2764 default:
2712 return -EOPNOTSUPP; 2765 return -EOPNOTSUPP;
2713 } 2766 }
@@ -2886,12 +2939,12 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2886 /* defer sending if queue is full */ 2939 /* defer sending if queue is full */
2887 stats->tx_iq_busy++; 2940 stats->tx_iq_busy++;
2888 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 2941 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2889 ndata.q_no); 2942 lio->txq);
2890 return NETDEV_TX_BUSY; 2943 return NETDEV_TX_BUSY;
2891 } 2944 }
2892 } 2945 }
2893 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n", 2946 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
2894 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no ); 2947 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2895 */ 2948 */
2896 2949
2897 ndata.datasize = skb->len; 2950 ndata.datasize = skb->len;
@@ -2899,9 +2952,14 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2899 cmdsetup.u64 = 0; 2952 cmdsetup.u64 = 0;
2900 cmdsetup.s.iq_no = iq_no; 2953 cmdsetup.s.iq_no = iq_no;
2901 2954
2902 if (skb->ip_summed == CHECKSUM_PARTIAL) 2955 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2903 cmdsetup.s.transport_csum = 1; 2956 if (skb->encapsulation) {
2904 2957 cmdsetup.s.tnl_csum = 1;
2958 stats->tx_vxlan++;
2959 } else {
2960 cmdsetup.s.transport_csum = 1;
2961 }
2962 }
2905 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 2963 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2906 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2964 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2907 cmdsetup.s.timestamp = 1; 2965 cmdsetup.s.timestamp = 1;
@@ -2910,6 +2968,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2910 if (skb_shinfo(skb)->nr_frags == 0) { 2968 if (skb_shinfo(skb)->nr_frags == 0) {
2911 cmdsetup.s.u.datasize = skb->len; 2969 cmdsetup.s.u.datasize = skb->len;
2912 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 2970 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2971
2913 /* Offload checksum calculation for TCP/UDP packets */ 2972 /* Offload checksum calculation for TCP/UDP packets */
2914 dptr = dma_map_single(&oct->pci_dev->dev, 2973 dptr = dma_map_single(&oct->pci_dev->dev,
2915 skb->data, 2974 skb->data,
@@ -3124,6 +3183,72 @@ static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
3124 return ret; 3183 return ret;
3125} 3184}
3126 3185
3186/** Sending command to enable/disable RX checksum offload
3187 * @param netdev pointer to network device
3188 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
3189 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
3190 * OCTNET_CMD_RXCSUM_DISABLE
3191 * @returns SUCCESS or FAILURE
3192 */
3193int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
3194 u8 rx_cmd)
3195{
3196 struct lio *lio = GET_LIO(netdev);
3197 struct octeon_device *oct = lio->oct_dev;
3198 struct octnic_ctrl_pkt nctrl;
3199 int ret = 0;
3200
3201 nctrl.ncmd.u64 = 0;
3202 nctrl.ncmd.s.cmd = command;
3203 nctrl.ncmd.s.param1 = rx_cmd;
3204 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3205 nctrl.wait_time = 100;
3206 nctrl.netpndev = (u64)netdev;
3207 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3208
3209 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3210 if (ret < 0) {
3211 dev_err(&oct->pci_dev->dev,
3212 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
3213 ret);
3214 }
3215 return ret;
3216}
3217
3218/** Sending command to add/delete VxLAN UDP port to firmware
3219 * @param netdev pointer to network device
3220 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG
3221 * @param vxlan_port VxLAN port to be added or deleted
3222 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
3223 * OCTNET_CMD_VXLAN_PORT_DEL
3224 * @returns SUCCESS or FAILURE
3225 */
3226static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
3227 u16 vxlan_port, u8 vxlan_cmd_bit)
3228{
3229 struct lio *lio = GET_LIO(netdev);
3230 struct octeon_device *oct = lio->oct_dev;
3231 struct octnic_ctrl_pkt nctrl;
3232 int ret = 0;
3233
3234 nctrl.ncmd.u64 = 0;
3235 nctrl.ncmd.s.cmd = command;
3236 nctrl.ncmd.s.more = vxlan_cmd_bit;
3237 nctrl.ncmd.s.param1 = vxlan_port;
3238 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3239 nctrl.wait_time = 100;
3240 nctrl.netpndev = (u64)netdev;
3241 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3242
3243 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3244 if (ret < 0) {
3245 dev_err(&oct->pci_dev->dev,
3246 "VxLAN port add/delete failed in core (ret:0x%x)\n",
3247 ret);
3248 }
3249 return ret;
3250}
3251
3127int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1) 3252int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
3128{ 3253{
3129 struct lio *lio = GET_LIO(netdev); 3254 struct lio *lio = GET_LIO(netdev);
@@ -3204,9 +3329,48 @@ static int liquidio_set_features(struct net_device *netdev,
3204 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, 3329 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
3205 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 3330 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3206 3331
3332 /* Sending command to firmware to enable/disable RX checksum
3333 * offload settings using ethtool
3334 */
3335 if (!(netdev->features & NETIF_F_RXCSUM) &&
3336 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
3337 (features & NETIF_F_RXCSUM))
3338 liquidio_set_rxcsum_command(netdev,
3339 OCTNET_CMD_TNL_RX_CSUM_CTL,
3340 OCTNET_CMD_RXCSUM_ENABLE);
3341 else if ((netdev->features & NETIF_F_RXCSUM) &&
3342 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
3343 !(features & NETIF_F_RXCSUM))
3344 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3345 OCTNET_CMD_RXCSUM_DISABLE);
3346
3207 return 0; 3347 return 0;
3208} 3348}
3209 3349
3350static void liquidio_add_vxlan_port(struct net_device *netdev,
3351 struct udp_tunnel_info *ti)
3352{
3353 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3354 return;
3355
3356 liquidio_vxlan_port_command(netdev,
3357 OCTNET_CMD_VXLAN_PORT_CONFIG,
3358 htons(ti->port),
3359 OCTNET_CMD_VXLAN_PORT_ADD);
3360}
3361
3362static void liquidio_del_vxlan_port(struct net_device *netdev,
3363 struct udp_tunnel_info *ti)
3364{
3365 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3366 return;
3367
3368 liquidio_vxlan_port_command(netdev,
3369 OCTNET_CMD_VXLAN_PORT_CONFIG,
3370 htons(ti->port),
3371 OCTNET_CMD_VXLAN_PORT_DEL);
3372}
3373
3210static struct net_device_ops lionetdevops = { 3374static struct net_device_ops lionetdevops = {
3211 .ndo_open = liquidio_open, 3375 .ndo_open = liquidio_open,
3212 .ndo_stop = liquidio_stop, 3376 .ndo_stop = liquidio_stop,
@@ -3222,6 +3386,8 @@ static struct net_device_ops lionetdevops = {
3222 .ndo_do_ioctl = liquidio_ioctl, 3386 .ndo_do_ioctl = liquidio_ioctl,
3223 .ndo_fix_features = liquidio_fix_features, 3387 .ndo_fix_features = liquidio_fix_features,
3224 .ndo_set_features = liquidio_set_features, 3388 .ndo_set_features = liquidio_set_features,
3389 .ndo_udp_tunnel_add = liquidio_add_vxlan_port,
3390 .ndo_udp_tunnel_del = liquidio_del_vxlan_port,
3225}; 3391};
3226 3392
3227/** \brief Entry point for the liquidio module 3393/** \brief Entry point for the liquidio module
@@ -3323,7 +3489,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
3323 struct liquidio_if_cfg_resp *resp; 3489 struct liquidio_if_cfg_resp *resp;
3324 struct octdev_props *props; 3490 struct octdev_props *props;
3325 int retval, num_iqueues, num_oqueues; 3491 int retval, num_iqueues, num_oqueues;
3326 int num_cpus = num_online_cpus();
3327 union oct_nic_if_cfg if_cfg; 3492 union oct_nic_if_cfg if_cfg;
3328 unsigned int base_queue; 3493 unsigned int base_queue;
3329 unsigned int gmx_port_id; 3494 unsigned int gmx_port_id;
@@ -3365,14 +3530,11 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
3365 gmx_port_id = 3530 gmx_port_id =
3366 CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev), i); 3531 CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev), i);
3367 ifidx_or_pfnum = i; 3532 ifidx_or_pfnum = i;
3368 if (num_iqueues > num_cpus) 3533
3369 num_iqueues = num_cpus;
3370 if (num_oqueues > num_cpus)
3371 num_oqueues = num_cpus;
3372 dev_dbg(&octeon_dev->pci_dev->dev, 3534 dev_dbg(&octeon_dev->pci_dev->dev,
3373 "requesting config for interface %d, iqs %d, oqs %d\n", 3535 "requesting config for interface %d, iqs %d, oqs %d\n",
3374 ifidx_or_pfnum, num_iqueues, num_oqueues); 3536 ifidx_or_pfnum, num_iqueues, num_oqueues);
3375 ACCESS_ONCE(ctx->cond) = 0; 3537 WRITE_ONCE(ctx->cond, 0);
3376 ctx->octeon_id = lio_get_device_id(octeon_dev); 3538 ctx->octeon_id = lio_get_device_id(octeon_dev);
3377 init_waitqueue_head(&ctx->wc); 3539 init_waitqueue_head(&ctx->wc);
3378 3540
@@ -3390,7 +3552,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
3390 3552
3391 sc->callback = if_cfg_callback; 3553 sc->callback = if_cfg_callback;
3392 sc->callback_arg = sc; 3554 sc->callback_arg = sc;
3393 sc->wait_time = 1000; 3555 sc->wait_time = 3000;
3394 3556
3395 retval = octeon_send_soft_command(octeon_dev, sc); 3557 retval = octeon_send_soft_command(octeon_dev, sc);
3396 if (retval == IQ_SEND_FAILED) { 3558 if (retval == IQ_SEND_FAILED) {
@@ -3479,6 +3641,22 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
3479 | NETIF_F_LRO; 3641 | NETIF_F_LRO;
3480 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); 3642 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3481 3643
3644 /* Copy of transmit encapsulation capabilities:
3645 * TSO, TSO6, Checksums for this device
3646 */
3647 lio->enc_dev_capability = NETIF_F_IP_CSUM
3648 | NETIF_F_IPV6_CSUM
3649 | NETIF_F_GSO_UDP_TUNNEL
3650 | NETIF_F_HW_CSUM | NETIF_F_SG
3651 | NETIF_F_RXCSUM
3652 | NETIF_F_TSO | NETIF_F_TSO6
3653 | NETIF_F_LRO;
3654
3655 netdev->hw_enc_features = (lio->enc_dev_capability &
3656 ~NETIF_F_LRO);
3657
3658 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3659
3482 netdev->vlan_features = lio->dev_capability; 3660 netdev->vlan_features = lio->dev_capability;
3483 /* Add any unchangeable hw features */ 3661 /* Add any unchangeable hw features */
3484 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | 3662 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
@@ -3538,8 +3716,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
3538 octeon_dev->priv_flags = 0x0; 3716 octeon_dev->priv_flags = 0x0;
3539 3717
3540 if (netdev->features & NETIF_F_LRO) 3718 if (netdev->features & NETIF_F_LRO)
3541 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 3719 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3542 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 3720 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3543 3721
3544 liquidio_set_feature(netdev, OCTNET_CMD_ENABLE_VLAN_FILTER, 0); 3722 liquidio_set_feature(netdev, OCTNET_CMD_ENABLE_VLAN_FILTER, 0);
3545 3723
@@ -3561,6 +3739,15 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
3561 3739
3562 ifstate_set(lio, LIO_IFSTATE_REGISTERED); 3740 ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3563 3741
3742 /* Sending command to firmware to enable Rx checksum offload
3743 * by default at the time of setup of Liquidio driver for
3744 * this device
3745 */
3746 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3747 OCTNET_CMD_RXCSUM_ENABLE);
3748 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3749 OCTNET_CMD_TXCSUM_ENABLE);
3750
3564 dev_dbg(&octeon_dev->pci_dev->dev, 3751 dev_dbg(&octeon_dev->pci_dev->dev,
3565 "NIC ifidx:%d Setup successful\n", i); 3752 "NIC ifidx:%d Setup successful\n", i);
3566 3753
@@ -3771,6 +3958,7 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
3771 /* Release any previously allocated queues */ 3958 /* Release any previously allocated queues */
3772 for (j = 0; j < octeon_dev->num_oqs; j++) 3959 for (j = 0; j < octeon_dev->num_oqs; j++)
3773 octeon_delete_droq(octeon_dev, j); 3960 octeon_delete_droq(octeon_dev, j);
3961 return 1;
3774 } 3962 }
3775 3963
3776 atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE); 3964 atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
@@ -3793,7 +3981,8 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
3793 3981
3794 /* Setup the interrupt handler and record the INT SUM register address 3982 /* Setup the interrupt handler and record the INT SUM register address
3795 */ 3983 */
3796 octeon_setup_interrupt(octeon_dev); 3984 if (octeon_setup_interrupt(octeon_dev))
3985 return 1;
3797 3986
3798 /* Enable Octeon device interrupts */ 3987 /* Enable Octeon device interrupts */
3799 octeon_dev->fn_list.enable_interrupt(octeon_dev->chip); 3988 octeon_dev->fn_list.enable_interrupt(octeon_dev->chip);
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 5aa01f427d4a..199a8b9c7dc5 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -34,6 +34,7 @@
34#define LIQUIDIO_MICRO_VERSION ".1" 34#define LIQUIDIO_MICRO_VERSION ".1"
35#define LIQUIDIO_PACKAGE "" 35#define LIQUIDIO_PACKAGE ""
36#define LIQUIDIO_VERSION "1.4.1" 36#define LIQUIDIO_VERSION "1.4.1"
37
37#define CONTROL_IQ 0 38#define CONTROL_IQ 0
38/** Tag types used by Octeon cores in its work. */ 39/** Tag types used by Octeon cores in its work. */
39enum octeon_tag_type { 40enum octeon_tag_type {
@@ -216,6 +217,13 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
216#define OCTNET_CMD_ENABLE_VLAN_FILTER 0x16 217#define OCTNET_CMD_ENABLE_VLAN_FILTER 0x16
217#define OCTNET_CMD_ADD_VLAN_FILTER 0x17 218#define OCTNET_CMD_ADD_VLAN_FILTER 0x17
218#define OCTNET_CMD_DEL_VLAN_FILTER 0x18 219#define OCTNET_CMD_DEL_VLAN_FILTER 0x18
220#define OCTNET_CMD_VXLAN_PORT_CONFIG 0x19
221#define OCTNET_CMD_VXLAN_PORT_ADD 0x0
222#define OCTNET_CMD_VXLAN_PORT_DEL 0x1
223#define OCTNET_CMD_RXCSUM_ENABLE 0x0
224#define OCTNET_CMD_RXCSUM_DISABLE 0x1
225#define OCTNET_CMD_TXCSUM_ENABLE 0x0
226#define OCTNET_CMD_TXCSUM_DISABLE 0x1
219 227
220/* RX(packets coming from wire) Checksum verification flags */ 228/* RX(packets coming from wire) Checksum verification flags */
221/* TCP/UDP csum */ 229/* TCP/UDP csum */
@@ -288,7 +296,7 @@ union octnet_cmd {
288 296
289#define OCTNET_CMD_SIZE (sizeof(union octnet_cmd)) 297#define OCTNET_CMD_SIZE (sizeof(union octnet_cmd))
290 298
291/* Instruction Header (DPI - CN23xx) - for OCTEON-III models */ 299/* Instruction Header(DPI) - for OCTEON-III models */
292struct octeon_instr_ih3 { 300struct octeon_instr_ih3 {
293#ifdef __BIG_ENDIAN_BITFIELD 301#ifdef __BIG_ENDIAN_BITFIELD
294 302
@@ -338,7 +346,7 @@ struct octeon_instr_ih3 {
338#endif 346#endif
339}; 347};
340 348
341/* Optional PKI Instruction Header(PKI IH) - for OCTEON CN23XX models */ 349/* Optional PKI Instruction Header(PKI IH) - for OCTEON-III models */
342/** BIG ENDIAN format. */ 350/** BIG ENDIAN format. */
343struct octeon_instr_pki_ih3 { 351struct octeon_instr_pki_ih3 {
344#ifdef __BIG_ENDIAN_BITFIELD 352#ifdef __BIG_ENDIAN_BITFIELD
@@ -533,6 +541,8 @@ union octeon_rh {
533 u64 priority:3; 541 u64 priority:3;
534 u64 csum_verified:3; /** checksum verified. */ 542 u64 csum_verified:3; /** checksum verified. */
535 u64 has_hwtstamp:1; /** Has hardware timestamp. 1 = yes. */ 543 u64 has_hwtstamp:1; /** Has hardware timestamp. 1 = yes. */
544 u64 encap_on:1;
545 u64 has_hash:1; /** Has hash (rth or rss). 1 = yes. */
536 } r_dh; 546 } r_dh;
537 struct { 547 struct {
538 u64 opcode:4; 548 u64 opcode:4;
@@ -542,7 +552,8 @@ union octeon_rh {
542 u64 num_gmx_ports:8; 552 u64 num_gmx_ports:8;
543 u64 max_nic_ports:10; 553 u64 max_nic_ports:10;
544 u64 app_cap_flags:4; 554 u64 app_cap_flags:4;
545 u64 app_mode:16; 555 u64 app_mode:8;
556 u64 pkind:8;
546 } r_core_drv_init; 557 } r_core_drv_init;
547 struct { 558 struct {
548 u64 opcode:4; 559 u64 opcode:4;
@@ -562,6 +573,8 @@ union octeon_rh {
562 u64 opcode:4; 573 u64 opcode:4;
563 } r; 574 } r;
564 struct { 575 struct {
576 u64 has_hash:1; /** Has hash (rth or rss). 1 = yes. */
577 u64 encap_on:1;
565 u64 has_hwtstamp:1; /** 1 = has hwtstamp */ 578 u64 has_hwtstamp:1; /** 1 = has hwtstamp */
566 u64 csum_verified:3; /** checksum verified. */ 579 u64 csum_verified:3; /** checksum verified. */
567 u64 priority:3; 580 u64 priority:3;
@@ -572,7 +585,8 @@ union octeon_rh {
572 u64 opcode:4; 585 u64 opcode:4;
573 } r_dh; 586 } r_dh;
574 struct { 587 struct {
575 u64 app_mode:16; 588 u64 pkind:8;
589 u64 app_mode:8;
576 u64 app_cap_flags:4; 590 u64 app_cap_flags:4;
577 u64 max_nic_ports:10; 591 u64 max_nic_ports:10;
578 u64 num_gmx_ports:8; 592 u64 num_gmx_ports:8;
@@ -630,9 +644,11 @@ union oct_link_status {
630 u64 autoneg:1; 644 u64 autoneg:1;
631 u64 if_mode:5; 645 u64 if_mode:5;
632 u64 pause:1; 646 u64 pause:1;
633 u64 reserved:16; 647 u64 flashing:1;
648 u64 reserved:15;
634#else 649#else
635 u64 reserved:16; 650 u64 reserved:15;
651 u64 flashing:1;
636 u64 pause:1; 652 u64 pause:1;
637 u64 if_mode:5; 653 u64 if_mode:5;
638 u64 autoneg:1; 654 u64 autoneg:1;
@@ -736,6 +752,8 @@ struct nic_rx_stats {
736 u64 fw_err_pko; 752 u64 fw_err_pko;
737 u64 fw_err_link; 753 u64 fw_err_link;
738 u64 fw_err_drop; 754 u64 fw_err_drop;
755 u64 fw_rx_vxlan;
756 u64 fw_rx_vxlan_err;
739 757
740 /* LRO */ 758 /* LRO */
741 u64 fw_lro_pkts; /* Number of packets that are LROed */ 759 u64 fw_lro_pkts; /* Number of packets that are LROed */
@@ -776,6 +794,7 @@ struct nic_tx_stats {
776 u64 fw_err_tso; 794 u64 fw_err_tso;
777 u64 fw_tso; /* number of tso requests */ 795 u64 fw_tso; /* number of tso requests */
778 u64 fw_tso_fwd; /* number of packets segmented in tso */ 796 u64 fw_tso_fwd; /* number of packets segmented in tso */
797 u64 fw_tx_vxlan;
779}; 798};
780 799
781struct oct_link_stats { 800struct oct_link_stats {
@@ -856,9 +875,9 @@ union oct_nic_if_cfg {
856 u64 num_iqueues:16; 875 u64 num_iqueues:16;
857 u64 num_oqueues:16; 876 u64 num_oqueues:16;
858 u64 gmx_port_id:8; 877 u64 gmx_port_id:8;
859 u64 reserved:8; 878 u64 vf_id:8;
860#else 879#else
861 u64 reserved:8; 880 u64 vf_id:8;
862 u64 gmx_port_id:8; 881 u64 gmx_port_id:8;
863 u64 num_oqueues:16; 882 u64 num_oqueues:16;
864 u64 num_iqueues:16; 883 u64 num_iqueues:16;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
index 4b8c948400be..b3396e3a8bab 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -226,7 +226,7 @@ struct octeon_oq_config {
226 */ 226 */
227 u64 refill_threshold:16; 227 u64 refill_threshold:16;
228 228
229 /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */ 229 /** If set, the Output queue uses info-pointer mode. (Default: 1) */
230 u64 info_ptr:32; 230 u64 info_ptr:32;
231 231
232 /* Max number of OQs available */ 232 /* Max number of OQs available */
@@ -236,7 +236,7 @@ struct octeon_oq_config {
236 /* Max number of OQs available */ 236 /* Max number of OQs available */
237 u64 max_oqs:8; 237 u64 max_oqs:8;
238 238
239 /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */ 239 /** If set, the Output queue uses info-pointer mode. (Default: 1) */
240 u64 info_ptr:32; 240 u64 info_ptr:32;
241 241
242 /** The number of buffers that were consumed during packet processing by 242 /** The number of buffers that were consumed during packet processing by
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index 466147e409c9..bbb50ea66f16 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -23,27 +23,14 @@
23/** 23/**
24 * @file octeon_console.c 24 * @file octeon_console.c
25 */ 25 */
26#include <linux/version.h>
27#include <linux/types.h>
28#include <linux/list.h>
29#include <linux/interrupt.h>
30#include <linux/pci.h> 26#include <linux/pci.h>
31#include <linux/kthread.h>
32#include <linux/netdevice.h> 27#include <linux/netdevice.h>
33#include "octeon_config.h"
34#include "liquidio_common.h" 28#include "liquidio_common.h"
35#include "octeon_droq.h" 29#include "octeon_droq.h"
36#include "octeon_iq.h" 30#include "octeon_iq.h"
37#include "response_manager.h" 31#include "response_manager.h"
38#include "octeon_device.h" 32#include "octeon_device.h"
39#include "octeon_nic.h"
40#include "octeon_main.h" 33#include "octeon_main.h"
41#include "octeon_network.h"
42#include "cn66xx_regs.h"
43#include "cn66xx_device.h"
44#include "cn68xx_regs.h"
45#include "cn68xx_device.h"
46#include "liquidio_image.h"
47#include "octeon_mem_ops.h" 34#include "octeon_mem_ops.h"
48 35
49static void octeon_remote_lock(void); 36static void octeon_remote_lock(void);
@@ -51,6 +38,8 @@ static void octeon_remote_unlock(void);
51static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct, 38static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
52 const char *name, 39 const char *name,
53 u32 flags); 40 u32 flags);
41static int octeon_console_read(struct octeon_device *oct, u32 console_num,
42 char *buffer, u32 buf_size);
54 43
55#define MIN(a, b) min((a), (b)) 44#define MIN(a, b) min((a), (b))
56#define CAST_ULL(v) ((u64)(v)) 45#define CAST_ULL(v) ((u64)(v))
@@ -170,8 +159,8 @@ struct octeon_pci_console_desc {
170 offsetof(struct cvmx_bootmem_desc, field), \ 159 offsetof(struct cvmx_bootmem_desc, field), \
171 SIZEOF_FIELD(struct cvmx_bootmem_desc, field)) 160 SIZEOF_FIELD(struct cvmx_bootmem_desc, field))
172 161
173#define __cvmx_bootmem_lock(flags) 162#define __cvmx_bootmem_lock(flags) (flags = flags)
174#define __cvmx_bootmem_unlock(flags) 163#define __cvmx_bootmem_unlock(flags) (flags = flags)
175 164
176/** 165/**
177 * This macro returns a member of the 166 * This macro returns a member of the
@@ -234,7 +223,7 @@ static void CVMX_BOOTMEM_NAMED_GET_NAME(struct octeon_device *oct,
234 u32 len) 223 u32 len)
235{ 224{
236 addr += offsetof(struct cvmx_bootmem_named_block_desc, name); 225 addr += offsetof(struct cvmx_bootmem_named_block_desc, name);
237 octeon_pci_read_core_mem(oct, addr, str, len); 226 octeon_pci_read_core_mem(oct, addr, (u8 *)str, len);
238 str[len] = 0; 227 str[len] = 0;
239} 228}
240 229
@@ -323,6 +312,9 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
323 if (name && named_size) { 312 if (name && named_size) {
324 char *name_tmp = 313 char *name_tmp =
325 kmalloc(name_length + 1, GFP_KERNEL); 314 kmalloc(name_length + 1, GFP_KERNEL);
315 if (!name_tmp)
316 break;
317
326 CVMX_BOOTMEM_NAMED_GET_NAME(oct, named_addr, 318 CVMX_BOOTMEM_NAMED_GET_NAME(oct, named_addr,
327 name_tmp, 319 name_tmp,
328 name_length); 320 name_length);
@@ -383,7 +375,7 @@ static void octeon_remote_unlock(void)
383int octeon_console_send_cmd(struct octeon_device *oct, char *cmd_str, 375int octeon_console_send_cmd(struct octeon_device *oct, char *cmd_str,
384 u32 wait_hundredths) 376 u32 wait_hundredths)
385{ 377{
386 u32 len = strlen(cmd_str); 378 u32 len = (u32)strlen(cmd_str);
387 379
388 dev_dbg(&oct->pci_dev->dev, "sending \"%s\" to bootloader\n", cmd_str); 380 dev_dbg(&oct->pci_dev->dev, "sending \"%s\" to bootloader\n", cmd_str);
389 381
@@ -440,8 +432,7 @@ int octeon_wait_for_bootloader(struct octeon_device *oct,
440} 432}
441 433
442static void octeon_console_handle_result(struct octeon_device *oct, 434static void octeon_console_handle_result(struct octeon_device *oct,
443 size_t console_num, 435 size_t console_num)
444 char *buffer, s32 bytes_read)
445{ 436{
446 struct octeon_console *console; 437 struct octeon_console *console;
447 438
@@ -492,7 +483,7 @@ static void check_console(struct work_struct *work)
492 struct octeon_console *console; 483 struct octeon_console *console;
493 struct cavium_wk *wk = (struct cavium_wk *)work; 484 struct cavium_wk *wk = (struct cavium_wk *)work;
494 struct octeon_device *oct = (struct octeon_device *)wk->ctxptr; 485 struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
495 size_t console_num = wk->ctxul; 486 u32 console_num = (u32)wk->ctxul;
496 u32 delay; 487 u32 delay;
497 488
498 console = &oct->console[console_num]; 489 console = &oct->console[console_num];
@@ -505,20 +496,17 @@ static void check_console(struct work_struct *work)
505 */ 496 */
506 bytes_read = 497 bytes_read =
507 octeon_console_read(oct, console_num, console_buffer, 498 octeon_console_read(oct, console_num, console_buffer,
508 sizeof(console_buffer) - 1, 0); 499 sizeof(console_buffer) - 1);
509 if (bytes_read > 0) { 500 if (bytes_read > 0) {
510 total_read += bytes_read; 501 total_read += bytes_read;
511 if (console->waiting) { 502 if (console->waiting)
512 octeon_console_handle_result(oct, console_num, 503 octeon_console_handle_result(oct, console_num);
513 console_buffer,
514 bytes_read);
515 }
516 if (octeon_console_debug_enabled(console_num)) { 504 if (octeon_console_debug_enabled(console_num)) {
517 output_console_line(oct, console, console_num, 505 output_console_line(oct, console, console_num,
518 console_buffer, bytes_read); 506 console_buffer, bytes_read);
519 } 507 }
520 } else if (bytes_read < 0) { 508 } else if (bytes_read < 0) {
521 dev_err(&oct->pci_dev->dev, "Error reading console %lu, ret=%d\n", 509 dev_err(&oct->pci_dev->dev, "Error reading console %u, ret=%d\n",
522 console_num, bytes_read); 510 console_num, bytes_read);
523 } 511 }
524 512
@@ -530,7 +518,7 @@ static void check_console(struct work_struct *work)
530 */ 518 */
531 if (octeon_console_debug_enabled(console_num) && 519 if (octeon_console_debug_enabled(console_num) &&
532 (total_read == 0) && (console->leftover[0])) { 520 (total_read == 0) && (console->leftover[0])) {
533 dev_info(&oct->pci_dev->dev, "%lu: %s\n", 521 dev_info(&oct->pci_dev->dev, "%u: %s\n",
534 console_num, console->leftover); 522 console_num, console->leftover);
535 console->leftover[0] = '\0'; 523 console->leftover[0] = '\0';
536 } 524 }
@@ -675,8 +663,8 @@ static inline int octeon_console_avail_bytes(u32 buffer_size,
675 octeon_console_free_bytes(buffer_size, wr_idx, rd_idx); 663 octeon_console_free_bytes(buffer_size, wr_idx, rd_idx);
676} 664}
677 665
678int octeon_console_read(struct octeon_device *oct, u32 console_num, 666static int octeon_console_read(struct octeon_device *oct, u32 console_num,
679 char *buffer, u32 buf_size, u32 flags) 667 char *buffer, u32 buf_size)
680{ 668{
681 int bytes_to_read; 669 int bytes_to_read;
682 u32 rd_idx, wr_idx; 670 u32 rd_idx, wr_idx;
@@ -712,7 +700,7 @@ int octeon_console_read(struct octeon_device *oct, u32 console_num,
712 bytes_to_read = console->buffer_size - rd_idx; 700 bytes_to_read = console->buffer_size - rd_idx;
713 701
714 octeon_pci_read_core_mem(oct, console->output_base_addr + rd_idx, 702 octeon_pci_read_core_mem(oct, console->output_base_addr + rd_idx,
715 buffer, bytes_to_read); 703 (u8 *)buffer, bytes_to_read);
716 octeon_write_device_mem32(oct, console->addr + 704 octeon_write_device_mem32(oct, console->addr +
717 offsetof(struct octeon_pci_console, 705 offsetof(struct octeon_pci_console,
718 output_read_index), 706 output_read_index),
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 337220721632..0eb504a4379a 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -19,27 +19,19 @@
19* This file may also be available under a different license from Cavium. 19* This file may also be available under a different license from Cavium.
20* Contact Cavium, Inc. for more information 20* Contact Cavium, Inc. for more information
21**********************************************************************/ 21**********************************************************************/
22#include <linux/types.h>
23#include <linux/list.h>
24#include <linux/interrupt.h>
25#include <linux/pci.h> 22#include <linux/pci.h>
26#include <linux/crc32.h> 23#include <linux/crc32.h>
27#include <linux/kthread.h>
28#include <linux/netdevice.h> 24#include <linux/netdevice.h>
29#include <linux/vmalloc.h> 25#include <linux/vmalloc.h>
30#include "octeon_config.h"
31#include "liquidio_common.h" 26#include "liquidio_common.h"
32#include "octeon_droq.h" 27#include "octeon_droq.h"
33#include "octeon_iq.h" 28#include "octeon_iq.h"
34#include "response_manager.h" 29#include "response_manager.h"
35#include "octeon_device.h" 30#include "octeon_device.h"
36#include "octeon_nic.h"
37#include "octeon_main.h" 31#include "octeon_main.h"
38#include "octeon_network.h" 32#include "octeon_network.h"
39#include "cn66xx_regs.h" 33#include "cn66xx_regs.h"
40#include "cn66xx_device.h" 34#include "cn66xx_device.h"
41#include "cn68xx_regs.h"
42#include "cn68xx_device.h"
43#include "liquidio_image.h" 35#include "liquidio_image.h"
44#include "octeon_mem_ops.h" 36#include "octeon_mem_ops.h"
45 37
@@ -448,10 +440,10 @@ static struct octeon_config_ptr {
448}; 440};
449 441
450static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = { 442static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = {
451 "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE", 443 "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE",
452 "IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE", 444 "IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE",
453 "DROQ-INIT-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE", 445 "DROQ-INIT-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE",
454 "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET", 446 "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET",
455 "INVALID" 447 "INVALID"
456}; 448};
457 449
@@ -652,16 +644,16 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
652 644
653void octeon_free_device_mem(struct octeon_device *oct) 645void octeon_free_device_mem(struct octeon_device *oct)
654{ 646{
655 u32 i; 647 int i;
656 648
657 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 649 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
658 /* could check mask as well */ 650 if (oct->io_qmask.oq & (1ULL << i))
659 vfree(oct->droq[i]); 651 vfree(oct->droq[i]);
660 } 652 }
661 653
662 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 654 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
663 /* could check mask as well */ 655 if (oct->io_qmask.iq & (1ULL << i))
664 vfree(oct->instr_queue[i]); 656 vfree(oct->instr_queue[i]);
665 } 657 }
666 658
667 i = oct->octeon_id; 659 i = oct->octeon_id;
@@ -752,13 +744,11 @@ struct octeon_device *octeon_allocate_device(u32 pci_id,
752/* this function is only for setting up the first queue */ 744/* this function is only for setting up the first queue */
753int octeon_setup_instr_queues(struct octeon_device *oct) 745int octeon_setup_instr_queues(struct octeon_device *oct)
754{ 746{
755 u32 num_iqs = 0;
756 u32 num_descs = 0; 747 u32 num_descs = 0;
757 u32 iq_no = 0; 748 u32 iq_no = 0;
758 union oct_txpciq txpciq; 749 union oct_txpciq txpciq;
759 int numa_node = cpu_to_node(iq_no % num_online_cpus()); 750 int numa_node = cpu_to_node(iq_no % num_online_cpus());
760 751
761 num_iqs = 1;
762 /* this causes queue 0 to be default queue */ 752 /* this causes queue 0 to be default queue */
763 if (OCTEON_CN6XXX(oct)) 753 if (OCTEON_CN6XXX(oct))
764 num_descs = 754 num_descs =
@@ -793,13 +783,11 @@ int octeon_setup_instr_queues(struct octeon_device *oct)
793 783
794int octeon_setup_output_queues(struct octeon_device *oct) 784int octeon_setup_output_queues(struct octeon_device *oct)
795{ 785{
796 u32 num_oqs = 0;
797 u32 num_descs = 0; 786 u32 num_descs = 0;
798 u32 desc_size = 0; 787 u32 desc_size = 0;
799 u32 oq_no = 0; 788 u32 oq_no = 0;
800 int numa_node = cpu_to_node(oq_no % num_online_cpus()); 789 int numa_node = cpu_to_node(oq_no % num_online_cpus());
801 790
802 num_oqs = 1;
803 /* this causes queue 0 to be default queue */ 791 /* this causes queue 0 to be default queue */
804 if (OCTEON_CN6XXX(oct)) { 792 if (OCTEON_CN6XXX(oct)) {
805 num_descs = 793 num_descs =
@@ -1019,79 +1007,6 @@ octeon_register_dispatch_fn(struct octeon_device *oct,
1019 return 0; 1007 return 0;
1020} 1008}
1021 1009
1022/* octeon_unregister_dispatch_fn
1023 * Parameters:
1024 * oct - octeon device
1025 * opcode - driver should unregister the function for this opcode
1026 * subcode - driver should unregister the function for this subcode
1027 * Description:
1028 * Unregister the function set for this opcode+subcode.
1029 * Returns:
1030 * Success: 0
1031 * Failure: 1
1032 * Locks:
1033 * No locks are held.
1034 */
1035int
1036octeon_unregister_dispatch_fn(struct octeon_device *oct, u16 opcode,
1037 u16 subcode)
1038{
1039 int retval = 0;
1040 u32 idx;
1041 struct list_head *dispatch, *dfree = NULL, *tmp2;
1042 u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
1043
1044 idx = combined_opcode & OCTEON_OPCODE_MASK;
1045
1046 spin_lock_bh(&oct->dispatch.lock);
1047
1048 if (oct->dispatch.count == 0) {
1049 spin_unlock_bh(&oct->dispatch.lock);
1050 dev_err(&oct->pci_dev->dev,
1051 "No dispatch functions registered for this device\n");
1052 return 1;
1053 }
1054
1055 if (oct->dispatch.dlist[idx].opcode == combined_opcode) {
1056 dispatch = &oct->dispatch.dlist[idx].list;
1057 if (dispatch->next != dispatch) {
1058 dispatch = dispatch->next;
1059 oct->dispatch.dlist[idx].opcode =
1060 ((struct octeon_dispatch *)dispatch)->opcode;
1061 oct->dispatch.dlist[idx].dispatch_fn =
1062 ((struct octeon_dispatch *)
1063 dispatch)->dispatch_fn;
1064 oct->dispatch.dlist[idx].arg =
1065 ((struct octeon_dispatch *)dispatch)->arg;
1066 list_del(dispatch);
1067 dfree = dispatch;
1068 } else {
1069 oct->dispatch.dlist[idx].opcode = 0;
1070 oct->dispatch.dlist[idx].dispatch_fn = NULL;
1071 oct->dispatch.dlist[idx].arg = NULL;
1072 }
1073 } else {
1074 retval = 1;
1075 list_for_each_safe(dispatch, tmp2,
1076 &(oct->dispatch.dlist[idx].
1077 list)) {
1078 if (((struct octeon_dispatch *)dispatch)->opcode ==
1079 combined_opcode) {
1080 list_del(dispatch);
1081 dfree = dispatch;
1082 retval = 0;
1083 }
1084 }
1085 }
1086
1087 if (!retval)
1088 oct->dispatch.count--;
1089
1090 spin_unlock_bh(&oct->dispatch.lock);
1091 vfree(dfree);
1092 return retval;
1093}
1094
1095int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf) 1010int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
1096{ 1011{
1097 u32 i; 1012 u32 i;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index b4e566dea008..01edfb404346 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -221,7 +221,7 @@ struct octeon_fn_list {
221 221
222/* Structure for named memory blocks 222/* Structure for named memory blocks
223 * Number of descriptors 223 * Number of descriptors
224 * available can be changed without affecting compatiblity, 224 * available can be changed without affecting compatibility,
225 * but name length changes require a bump in the bootmem 225 * but name length changes require a bump in the bootmem
226 * descriptor version 226 * descriptor version
227 * Note: This structure must be naturally 64 bit aligned, as a single 227 * Note: This structure must be naturally 64 bit aligned, as a single
@@ -254,7 +254,7 @@ struct oct_fw_info {
254struct cavium_wk { 254struct cavium_wk {
255 struct delayed_work work; 255 struct delayed_work work;
256 void *ctxptr; 256 void *ctxptr;
257 size_t ctxul; 257 u64 ctxul;
258}; 258};
259 259
260struct cavium_wq { 260struct cavium_wq {
@@ -585,8 +585,7 @@ int octeon_add_console(struct octeon_device *oct, u32 console_num);
585int octeon_console_write(struct octeon_device *oct, u32 console_num, 585int octeon_console_write(struct octeon_device *oct, u32 console_num,
586 char *buffer, u32 write_request_size, u32 flags); 586 char *buffer, u32 write_request_size, u32 flags);
587int octeon_console_write_avail(struct octeon_device *oct, u32 console_num); 587int octeon_console_write_avail(struct octeon_device *oct, u32 console_num);
588int octeon_console_read(struct octeon_device *oct, u32 console_num, 588
589 char *buffer, u32 buf_size, u32 flags);
590int octeon_console_read_avail(struct octeon_device *oct, u32 console_num); 589int octeon_console_read_avail(struct octeon_device *oct, u32 console_num);
591 590
592/** Removes all attached consoles. */ 591/** Removes all attached consoles. */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index d9bb2f7e0836..e0afe4c1fd01 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -19,30 +19,18 @@
19* This file may also be available under a different license from Cavium. 19* This file may also be available under a different license from Cavium.
20* Contact Cavium, Inc. for more information 20* Contact Cavium, Inc. for more information
21**********************************************************************/ 21**********************************************************************/
22#include <linux/version.h>
23#include <linux/types.h>
24#include <linux/list.h>
25#include <linux/pci.h> 22#include <linux/pci.h>
26#include <linux/kthread.h>
27#include <linux/netdevice.h> 23#include <linux/netdevice.h>
28#include <linux/vmalloc.h> 24#include <linux/vmalloc.h>
29#include "octeon_config.h"
30#include "liquidio_common.h" 25#include "liquidio_common.h"
31#include "octeon_droq.h" 26#include "octeon_droq.h"
32#include "octeon_iq.h" 27#include "octeon_iq.h"
33#include "response_manager.h" 28#include "response_manager.h"
34#include "octeon_device.h" 29#include "octeon_device.h"
35#include "octeon_nic.h"
36#include "octeon_main.h" 30#include "octeon_main.h"
37#include "octeon_network.h" 31#include "octeon_network.h"
38#include "cn66xx_regs.h" 32#include "cn66xx_regs.h"
39#include "cn66xx_device.h" 33#include "cn66xx_device.h"
40#include "cn68xx_regs.h"
41#include "cn68xx_device.h"
42#include "liquidio_image.h"
43#include "octeon_mem_ops.h"
44
45/* #define CAVIUM_ONLY_PERF_MODE */
46 34
47#define CVM_MIN(d1, d2) (((d1) < (d2)) ? (d1) : (d2)) 35#define CVM_MIN(d1, d2) (((d1) < (d2)) ? (d1) : (d2))
48#define CVM_MAX(d1, d2) (((d1) > (d2)) ? (d1) : (d2)) 36#define CVM_MAX(d1, d2) (((d1) > (d2)) ? (d1) : (d2))
@@ -104,8 +92,12 @@ static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
104 return fn_arg; 92 return fn_arg;
105} 93}
106 94
107u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct, 95/** Check for packets on Droq. This function should be called with
108 struct octeon_droq *droq) 96 * lock held.
97 * @param droq - Droq on which count is checked.
98 * @return Returns packet count.
99 */
100u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq)
109{ 101{
110 u32 pkt_count = 0; 102 u32 pkt_count = 0;
111 103
@@ -196,7 +188,6 @@ octeon_droq_setup_ring_buffers(struct octeon_device *oct,
196 188
197 droq->recv_buf_list[i].buffer = buf; 189 droq->recv_buf_list[i].buffer = buf;
198 droq->recv_buf_list[i].data = get_rbd(buf); 190 droq->recv_buf_list[i].data = get_rbd(buf);
199
200 droq->info_list[i].length = 0; 191 droq->info_list[i].length = 0;
201 192
202 /* map ring buffers into memory */ 193 /* map ring buffers into memory */
@@ -569,7 +560,9 @@ octeon_droq_dispatch_pkt(struct octeon_device *oct,
569 droq->stats.dropped_nomem++; 560 droq->stats.dropped_nomem++;
570 } 561 }
571 } else { 562 } else {
572 dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function\n"); 563 dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function (opcode %u/%u)\n",
564 (unsigned int)rh->r.opcode,
565 (unsigned int)rh->r.subcode);
573 droq->stats.dropped_nodispatch++; 566 droq->stats.dropped_nodispatch++;
574 } /* else (dispatch_fn ... */ 567 } /* else (dispatch_fn ... */
575 568
@@ -654,6 +647,7 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
654 pg_info->page = NULL; 647 pg_info->page = NULL;
655 droq->recv_buf_list[droq->read_idx].buffer = 648 droq->recv_buf_list[droq->read_idx].buffer =
656 NULL; 649 NULL;
650
657 INCR_INDEX_BY1(droq->read_idx, droq->max_count); 651 INCR_INDEX_BY1(droq->read_idx, droq->max_count);
658 droq->refill_count++; 652 droq->refill_count++;
659 } else { 653 } else {
@@ -748,7 +742,7 @@ octeon_droq_process_packets(struct octeon_device *oct,
748 if (pkt_count > budget) 742 if (pkt_count > budget)
749 pkt_count = budget; 743 pkt_count = budget;
750 744
751 /* Grab the lock */ 745 /* Grab the droq lock */
752 spin_lock(&droq->lock); 746 spin_lock(&droq->lock);
753 747
754 pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count); 748 pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count);
@@ -810,7 +804,7 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct,
810 804
811 total_pkts_processed += pkts_processed; 805 total_pkts_processed += pkts_processed;
812 806
813 octeon_droq_check_hw_for_pkts(oct, droq); 807 octeon_droq_check_hw_for_pkts(droq);
814 } 808 }
815 809
816 spin_unlock(&droq->lock); 810 spin_unlock(&droq->lock);
@@ -834,18 +828,6 @@ octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd,
834 u32 arg) 828 u32 arg)
835{ 829{
836 struct octeon_droq *droq; 830 struct octeon_droq *droq;
837 struct octeon_config *oct_cfg = NULL;
838
839 oct_cfg = octeon_get_conf(oct);
840
841 if (!oct_cfg)
842 return -EINVAL;
843
844 if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
845 dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
846 __func__, q_no, (oct->num_oqs - 1));
847 return -EINVAL;
848 }
849 831
850 droq = oct->droq[q_no]; 832 droq = oct->droq[q_no];
851 833
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index 1ca9c4f05702..5a6fb9113bbd 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -121,6 +121,9 @@ struct oct_droq_stats {
121 /** Num of Packets dropped due to receive path failures. */ 121 /** Num of Packets dropped due to receive path failures. */
122 u64 rx_dropped; 122 u64 rx_dropped;
123 123
124 /** Num of vxlan packets received; */
125 u64 rx_vxlan;
126
124 /** Num of failures of recv_buffer_alloc() */ 127 /** Num of failures of recv_buffer_alloc() */
125 u64 rx_alloc_failure; 128 u64 rx_alloc_failure;
126 129
@@ -413,24 +416,9 @@ int octeon_register_dispatch_fn(struct octeon_device *oct,
413 u16 subcode, 416 u16 subcode,
414 octeon_dispatch_fn_t fn, void *fn_arg); 417 octeon_dispatch_fn_t fn, void *fn_arg);
415 418
416/** Remove registration for an opcode/subcode. This will delete the mapping for
417 * an opcode/subcode. The dispatch function will be unregistered and will no
418 * longer be called if a packet with the opcode/subcode arrives in the driver
419 * output queues.
420 * @param oct - the octeon device to unregister from.
421 * @param opcode - the opcode to be unregistered.
422 * @param subcode - the subcode to be unregistered.
423 *
424 * @return Success: 0; Failure: 1
425 */
426int octeon_unregister_dispatch_fn(struct octeon_device *oct,
427 u16 opcode,
428 u16 subcode);
429
430void octeon_droq_print_stats(void); 419void octeon_droq_print_stats(void);
431 420
432u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct, 421u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq);
433 struct octeon_droq *droq);
434 422
435int octeon_create_droq(struct octeon_device *oct, u32 q_no, 423int octeon_create_droq(struct octeon_device *oct, u32 q_no,
436 u32 num_descs, u32 desc_size, void *app_ctx); 424 u32 num_descs, u32 desc_size, void *app_ctx);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index caa2b4f30717..ff4b1d6f007b 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -66,6 +66,7 @@ struct oct_iq_stats {
66 u64 tx_dropped;/**< Numof pkts dropped dueto xmitpath errors. */ 66 u64 tx_dropped;/**< Numof pkts dropped dueto xmitpath errors. */
67 u64 tx_tot_bytes;/**< Total count of bytes sento to network. */ 67 u64 tx_tot_bytes;/**< Total count of bytes sento to network. */
68 u64 tx_gso; /* count of tso */ 68 u64 tx_gso; /* count of tso */
69 u64 tx_vxlan; /* tunnel */
69 u64 tx_dmamap_fail; 70 u64 tx_dmamap_fail;
70 u64 tx_restart; 71 u64 tx_restart;
71 /*u64 tx_timeout_count;*/ 72 /*u64 tx_timeout_count;*/
@@ -98,7 +99,7 @@ struct octeon_instr_queue {
98 99
99 u32 rsvd:17; 100 u32 rsvd:17;
100 101
101 /* Controls the periodic flushing of iq */ 102 /* Controls whether extra flushing of IQ is done on Tx */
102 u32 do_auto_flush:1; 103 u32 do_auto_flush:1;
103 104
104 u32 status:8; 105 u32 status:8;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index 0ff3efc67b84..bc14e4c27332 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -174,7 +174,7 @@ sleep_cond(wait_queue_head_t *wait_queue, int *condition)
174 174
175 init_waitqueue_entry(&we, current); 175 init_waitqueue_entry(&we, current);
176 add_wait_queue(wait_queue, &we); 176 add_wait_queue(wait_queue, &we);
177 while (!(ACCESS_ONCE(*condition))) { 177 while (!(READ_ONCE(*condition))) {
178 set_current_state(TASK_INTERRUPTIBLE); 178 set_current_state(TASK_INTERRUPTIBLE);
179 if (signal_pending(current)) 179 if (signal_pending(current))
180 goto out; 180 goto out;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
index 5aecef870377..95a4bbedf557 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
@@ -19,43 +19,29 @@
19 * This file may also be available under a different license from Cavium. 19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information 20 * Contact Cavium, Inc. for more information
21 **********************************************************************/ 21 **********************************************************************/
22#include <linux/version.h>
23#include <linux/types.h>
24#include <linux/list.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h> 22#include <linux/pci.h>
27#include <linux/kthread.h>
28#include <linux/netdevice.h> 23#include <linux/netdevice.h>
29#include "octeon_config.h"
30#include "liquidio_common.h" 24#include "liquidio_common.h"
31#include "octeon_droq.h" 25#include "octeon_droq.h"
32#include "octeon_iq.h" 26#include "octeon_iq.h"
33#include "response_manager.h" 27#include "response_manager.h"
34#include "octeon_device.h" 28#include "octeon_device.h"
35#include "octeon_nic.h"
36#include "octeon_main.h"
37#include "octeon_network.h"
38#include "cn66xx_regs.h"
39#include "cn66xx_device.h"
40#include "cn68xx_regs.h"
41#include "cn68xx_device.h"
42#include "liquidio_image.h"
43#include "octeon_mem_ops.h"
44 29
45#define MEMOPS_IDX MAX_BAR1_MAP_INDEX 30#define MEMOPS_IDX MAX_BAR1_MAP_INDEX
46 31
32#ifdef __BIG_ENDIAN_BITFIELD
47static inline void 33static inline void
48octeon_toggle_bar1_swapmode(struct octeon_device *oct __attribute__((unused)), 34octeon_toggle_bar1_swapmode(struct octeon_device *oct, u32 idx)
49 u32 idx __attribute__((unused)))
50{ 35{
51#ifdef __BIG_ENDIAN_BITFIELD
52 u32 mask; 36 u32 mask;
53 37
54 mask = oct->fn_list.bar1_idx_read(oct, idx); 38 mask = oct->fn_list.bar1_idx_read(oct, idx);
55 mask = (mask & 0x2) ? (mask & ~2) : (mask | 2); 39 mask = (mask & 0x2) ? (mask & ~2) : (mask | 2);
56 oct->fn_list.bar1_idx_write(oct, idx, mask); 40 oct->fn_list.bar1_idx_write(oct, idx, mask);
57#endif
58} 41}
42#else
43#define octeon_toggle_bar1_swapmode(oct, idx) (oct = oct)
44#endif
59 45
60static void 46static void
61octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr, 47octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr,
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index b481edc56c6e..fb820dc7fcb7 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -30,6 +30,9 @@
30#include <linux/dma-mapping.h> 30#include <linux/dma-mapping.h>
31#include <linux/ptp_clock_kernel.h> 31#include <linux/ptp_clock_kernel.h>
32 32
33#define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
34#define LIO_MIN_MTU_SIZE 68
35
33struct oct_nic_stats_resp { 36struct oct_nic_stats_resp {
34 u64 rh; 37 u64 rh;
35 struct oct_link_stats stats; 38 struct oct_link_stats stats;
@@ -96,6 +99,12 @@ struct lio {
96 /** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */ 99 /** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */
97 u64 dev_capability; 100 u64 dev_capability;
98 101
102 /* Copy of transmit encapsulation capabilities:
103 * TSO, TSO6, Checksums for this device for Kernel
104 * 3.10.0 onwards
105 */
106 u64 enc_dev_capability;
107
99 /** Copy of beacaon reg in phy */ 108 /** Copy of beacaon reg in phy */
100 u32 phy_beacon_val; 109 u32 phy_beacon_val;
101 110
@@ -115,7 +124,6 @@ struct lio {
115 124
116 /* work queue for txq status */ 125 /* work queue for txq status */
117 struct cavium_wq txq_status_wq; 126 struct cavium_wq txq_status_wq;
118
119}; 127};
120 128
121#define LIO_SIZE (sizeof(struct lio)) 129#define LIO_SIZE (sizeof(struct lio))
@@ -351,7 +359,7 @@ lio_map_ring_info(struct octeon_droq *droq, u32 i)
351 dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i], 359 dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i],
352 OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE); 360 OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE);
353 361
354 BUG_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr)); 362 WARN_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
355 363
356 return (u64)dma_addr; 364 return (u64)dma_addr;
357} 365}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
index 36f1970a860e..166727be928f 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
@@ -19,14 +19,9 @@
19 * This file may also be available under a different license from Cavium. 19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information 20 * Contact Cavium, Inc. for more information
21 **********************************************************************/ 21 **********************************************************************/
22#include <linux/version.h>
23#include <linux/types.h>
24#include <linux/list.h>
25#include <linux/interrupt.h> 22#include <linux/interrupt.h>
26#include <linux/pci.h> 23#include <linux/pci.h>
27#include <linux/kthread.h>
28#include <linux/netdevice.h> 24#include <linux/netdevice.h>
29#include "octeon_config.h"
30#include "liquidio_common.h" 25#include "liquidio_common.h"
31#include "octeon_droq.h" 26#include "octeon_droq.h"
32#include "octeon_iq.h" 27#include "octeon_iq.h"
@@ -34,13 +29,6 @@
34#include "octeon_device.h" 29#include "octeon_device.h"
35#include "octeon_nic.h" 30#include "octeon_nic.h"
36#include "octeon_main.h" 31#include "octeon_main.h"
37#include "octeon_network.h"
38#include "cn66xx_regs.h"
39#include "cn66xx_device.h"
40#include "cn68xx_regs.h"
41#include "cn68xx_device.h"
42#include "liquidio_image.h"
43#include "octeon_mem_ops.h"
44 32
45void * 33void *
46octeon_alloc_soft_command_resp(struct octeon_device *oct, 34octeon_alloc_soft_command_resp(struct octeon_device *oct,
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 7eafa75ac095..d32492f185ff 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -19,28 +19,17 @@
19 * This file may also be available under a different license from Cavium. 19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information 20 * Contact Cavium, Inc. for more information
21 **********************************************************************/ 21 **********************************************************************/
22#include <linux/version.h>
23#include <linux/types.h>
24#include <linux/list.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h> 22#include <linux/pci.h>
27#include <linux/kthread.h>
28#include <linux/netdevice.h> 23#include <linux/netdevice.h>
29#include <linux/vmalloc.h> 24#include <linux/vmalloc.h>
30#include "octeon_config.h"
31#include "liquidio_common.h" 25#include "liquidio_common.h"
32#include "octeon_droq.h" 26#include "octeon_droq.h"
33#include "octeon_iq.h" 27#include "octeon_iq.h"
34#include "response_manager.h" 28#include "response_manager.h"
35#include "octeon_device.h" 29#include "octeon_device.h"
36#include "octeon_nic.h"
37#include "octeon_main.h" 30#include "octeon_main.h"
38#include "octeon_network.h" 31#include "octeon_network.h"
39#include "cn66xx_regs.h"
40#include "cn66xx_device.h" 32#include "cn66xx_device.h"
41#include "cn68xx_regs.h"
42#include "cn68xx_device.h"
43#include "liquidio_image.h"
44 33
45#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \ 34#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
46 (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count) 35 (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
@@ -301,40 +290,8 @@ static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
301 memcpy(iqptr, cmd, cmdsize); 290 memcpy(iqptr, cmd, cmdsize);
302} 291}
303 292
304static inline int
305__post_command(struct octeon_device *octeon_dev __attribute__((unused)),
306 struct octeon_instr_queue *iq,
307 u32 force_db __attribute__((unused)), u8 *cmd)
308{
309 u32 index = -1;
310
311 /* This ensures that the read index does not wrap around to the same
312 * position if queue gets full before Octeon could fetch any instr.
313 */
314 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1))
315 return -1;
316
317 __copy_cmd_into_iq(iq, cmd);
318
319 /* "index" is returned, host_write_index is modified. */
320 index = iq->host_write_index;
321 INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
322 iq->fill_cnt++;
323
324 /* Flush the command into memory. We need to be sure the data is in
325 * memory before indicating that the instruction is pending.
326 */
327 wmb();
328
329 atomic_inc(&iq->instr_pending);
330
331 return index;
332}
333
334static inline struct iq_post_status 293static inline struct iq_post_status
335__post_command2(struct octeon_device *octeon_dev __attribute__((unused)), 294__post_command2(struct octeon_instr_queue *iq, u8 *cmd)
336 struct octeon_instr_queue *iq,
337 u32 force_db __attribute__((unused)), u8 *cmd)
338{ 295{
339 struct iq_post_status st; 296 struct iq_post_status st;
340 297
@@ -392,6 +349,7 @@ __add_to_request_list(struct octeon_instr_queue *iq,
392 iq->request_list[idx].reqtype = reqtype; 349 iq->request_list[idx].reqtype = reqtype;
393} 350}
394 351
352/* Can only run in process context */
395int 353int
396lio_process_iq_request_list(struct octeon_device *oct, 354lio_process_iq_request_list(struct octeon_device *oct,
397 struct octeon_instr_queue *iq, u32 napi_budget) 355 struct octeon_instr_queue *iq, u32 napi_budget)
@@ -403,6 +361,7 @@ lio_process_iq_request_list(struct octeon_device *oct,
403 unsigned int pkts_compl = 0, bytes_compl = 0; 361 unsigned int pkts_compl = 0, bytes_compl = 0;
404 struct octeon_soft_command *sc; 362 struct octeon_soft_command *sc;
405 struct octeon_instr_irh *irh; 363 struct octeon_instr_irh *irh;
364 unsigned long flags;
406 365
407 while (old != iq->octeon_read_index) { 366 while (old != iq->octeon_read_index) {
408 reqtype = iq->request_list[old].reqtype; 367 reqtype = iq->request_list[old].reqtype;
@@ -432,17 +391,22 @@ lio_process_iq_request_list(struct octeon_device *oct,
432 * command response list because we expect 391 * command response list because we expect
433 * a response from Octeon. 392 * a response from Octeon.
434 */ 393 */
435 spin_lock_bh(&oct->response_list 394 spin_lock_irqsave
436 [OCTEON_ORDERED_SC_LIST].lock); 395 (&oct->response_list
396 [OCTEON_ORDERED_SC_LIST].lock,
397 flags);
437 atomic_inc(&oct->response_list 398 atomic_inc(&oct->response_list
438 [OCTEON_ORDERED_SC_LIST]. 399 [OCTEON_ORDERED_SC_LIST].
439 pending_req_count); 400 pending_req_count);
440 list_add_tail(&sc->node, &oct->response_list 401 list_add_tail(&sc->node, &oct->response_list
441 [OCTEON_ORDERED_SC_LIST].head); 402 [OCTEON_ORDERED_SC_LIST].head);
442 spin_unlock_bh(&oct->response_list 403 spin_unlock_irqrestore
443 [OCTEON_ORDERED_SC_LIST].lock); 404 (&oct->response_list
405 [OCTEON_ORDERED_SC_LIST].lock,
406 flags);
444 } else { 407 } else {
445 if (sc->callback) { 408 if (sc->callback) {
409 /* This callback must not sleep */
446 sc->callback(oct, OCTEON_REQUEST_DONE, 410 sc->callback(oct, OCTEON_REQUEST_DONE,
447 sc->callback_arg); 411 sc->callback_arg);
448 } 412 }
@@ -559,11 +523,12 @@ static void check_db_timeout(struct work_struct *work)
559{ 523{
560 struct cavium_wk *wk = (struct cavium_wk *)work; 524 struct cavium_wk *wk = (struct cavium_wk *)work;
561 struct octeon_device *oct = (struct octeon_device *)wk->ctxptr; 525 struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
562 unsigned long iq_no = wk->ctxul; 526 u64 iq_no = wk->ctxul;
563 struct cavium_wq *db_wq = &oct->check_db_wq[iq_no]; 527 struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
528 u32 delay = 10;
564 529
565 __check_db_timeout(oct, iq_no); 530 __check_db_timeout(oct, iq_no);
566 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1)); 531 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay));
567} 532}
568 533
569int 534int
@@ -579,7 +544,7 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
579 */ 544 */
580 spin_lock_bh(&iq->post_lock); 545 spin_lock_bh(&iq->post_lock);
581 546
582 st = __post_command2(oct, iq, force_db, cmd); 547 st = __post_command2(iq, cmd);
583 548
584 if (st.status != IQ_SEND_FAILED) { 549 if (st.status != IQ_SEND_FAILED) {
585 octeon_report_sent_bytes_to_bql(buf, reqtype); 550 octeon_report_sent_bytes_to_bql(buf, reqtype);
@@ -587,7 +552,7 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
587 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize); 552 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
588 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1); 553 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
589 554
590 if (iq->fill_cnt >= iq->fill_threshold || force_db) 555 if (force_db)
591 ring_doorbell(oct, iq); 556 ring_doorbell(oct, iq);
592 } else { 557 } else {
593 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1); 558 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
@@ -618,8 +583,8 @@ octeon_prepare_soft_command(struct octeon_device *oct,
618 struct octeon_instr_irh *irh; 583 struct octeon_instr_irh *irh;
619 struct octeon_instr_rdp *rdp; 584 struct octeon_instr_rdp *rdp;
620 585
621 BUG_ON(opcode > 15); 586 WARN_ON(opcode > 15);
622 BUG_ON(subcode > 127); 587 WARN_ON(subcode > 127);
623 588
624 oct_cfg = octeon_get_conf(oct); 589 oct_cfg = octeon_get_conf(oct);
625 590
@@ -661,7 +626,6 @@ int octeon_send_soft_command(struct octeon_device *oct,
661{ 626{
662 struct octeon_instr_ih2 *ih2; 627 struct octeon_instr_ih2 *ih2;
663 struct octeon_instr_irh *irh; 628 struct octeon_instr_irh *irh;
664 struct octeon_instr_rdp *rdp;
665 u32 len; 629 u32 len;
666 630
667 ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2; 631 ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
@@ -671,12 +635,10 @@ int octeon_send_soft_command(struct octeon_device *oct,
671 } 635 }
672 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh; 636 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
673 if (irh->rflag) { 637 if (irh->rflag) {
674 BUG_ON(!sc->dmarptr); 638 WARN_ON(!sc->dmarptr);
675 BUG_ON(!sc->status_word); 639 WARN_ON(!sc->status_word);
676 *sc->status_word = COMPLETION_WORD_INIT; 640 *sc->status_word = COMPLETION_WORD_INIT;
677 641
678 rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
679
680 sc->cmd.cmd2.rptr = sc->dmarptr; 642 sc->cmd.cmd2.rptr = sc->dmarptr;
681 } 643 }
682 len = (u32)ih2->dlengsz; 644 len = (u32)ih2->dlengsz;
@@ -720,7 +682,7 @@ int octeon_free_sc_buffer_pool(struct octeon_device *oct)
720 struct list_head *tmp, *tmp2; 682 struct list_head *tmp, *tmp2;
721 struct octeon_soft_command *sc; 683 struct octeon_soft_command *sc;
722 684
723 spin_lock(&oct->sc_buf_pool.lock); 685 spin_lock_bh(&oct->sc_buf_pool.lock);
724 686
725 list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) { 687 list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
726 list_del(tmp); 688 list_del(tmp);
@@ -732,7 +694,7 @@ int octeon_free_sc_buffer_pool(struct octeon_device *oct)
732 694
733 INIT_LIST_HEAD(&oct->sc_buf_pool.head); 695 INIT_LIST_HEAD(&oct->sc_buf_pool.head);
734 696
735 spin_unlock(&oct->sc_buf_pool.lock); 697 spin_unlock_bh(&oct->sc_buf_pool.lock);
736 698
737 return 0; 699 return 0;
738} 700}
@@ -748,13 +710,13 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
748 struct octeon_soft_command *sc = NULL; 710 struct octeon_soft_command *sc = NULL;
749 struct list_head *tmp; 711 struct list_head *tmp;
750 712
751 BUG_ON((offset + datasize + rdatasize + ctxsize) > 713 WARN_ON((offset + datasize + rdatasize + ctxsize) >
752 SOFT_COMMAND_BUFFER_SIZE); 714 SOFT_COMMAND_BUFFER_SIZE);
753 715
754 spin_lock(&oct->sc_buf_pool.lock); 716 spin_lock_bh(&oct->sc_buf_pool.lock);
755 717
756 if (list_empty(&oct->sc_buf_pool.head)) { 718 if (list_empty(&oct->sc_buf_pool.head)) {
757 spin_unlock(&oct->sc_buf_pool.lock); 719 spin_unlock_bh(&oct->sc_buf_pool.lock);
758 return NULL; 720 return NULL;
759 } 721 }
760 722
@@ -765,7 +727,7 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
765 727
766 atomic_inc(&oct->sc_buf_pool.alloc_buf_count); 728 atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
767 729
768 spin_unlock(&oct->sc_buf_pool.lock); 730 spin_unlock_bh(&oct->sc_buf_pool.lock);
769 731
770 sc = (struct octeon_soft_command *)tmp; 732 sc = (struct octeon_soft_command *)tmp;
771 733
@@ -795,7 +757,7 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
795 offset = (offset + datasize + 127) & 0xffffff80; 757 offset = (offset + datasize + 127) & 0xffffff80;
796 758
797 if (rdatasize) { 759 if (rdatasize) {
798 BUG_ON(rdatasize < 16); 760 WARN_ON(rdatasize < 16);
799 sc->virtrptr = (u8 *)sc + offset; 761 sc->virtrptr = (u8 *)sc + offset;
800 sc->dmarptr = dma_addr + offset; 762 sc->dmarptr = dma_addr + offset;
801 sc->rdatasize = rdatasize; 763 sc->rdatasize = rdatasize;
@@ -808,11 +770,11 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
808void octeon_free_soft_command(struct octeon_device *oct, 770void octeon_free_soft_command(struct octeon_device *oct,
809 struct octeon_soft_command *sc) 771 struct octeon_soft_command *sc)
810{ 772{
811 spin_lock(&oct->sc_buf_pool.lock); 773 spin_lock_bh(&oct->sc_buf_pool.lock);
812 774
813 list_add_tail(&sc->node, &oct->sc_buf_pool.head); 775 list_add_tail(&sc->node, &oct->sc_buf_pool.head);
814 776
815 atomic_dec(&oct->sc_buf_pool.alloc_buf_count); 777 atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
816 778
817 spin_unlock(&oct->sc_buf_pool.lock); 779 spin_unlock_bh(&oct->sc_buf_pool.lock);
818} 780}
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c
index c93210f99dda..709049e36627 100644
--- a/drivers/net/ethernet/cavium/liquidio/response_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c
@@ -19,28 +19,14 @@
19 * This file may also be available under a different license from Cavium. 19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information 20 * Contact Cavium, Inc. for more information
21 **********************************************************************/ 21 **********************************************************************/
22#include <linux/version.h>
23#include <linux/types.h>
24#include <linux/list.h>
25#include <linux/interrupt.h>
26#include <linux/dma-mapping.h>
27#include <linux/pci.h> 22#include <linux/pci.h>
28#include <linux/kthread.h>
29#include <linux/netdevice.h> 23#include <linux/netdevice.h>
30#include "octeon_config.h"
31#include "liquidio_common.h" 24#include "liquidio_common.h"
32#include "octeon_droq.h" 25#include "octeon_droq.h"
33#include "octeon_iq.h" 26#include "octeon_iq.h"
34#include "response_manager.h" 27#include "response_manager.h"
35#include "octeon_device.h" 28#include "octeon_device.h"
36#include "octeon_nic.h"
37#include "octeon_main.h" 29#include "octeon_main.h"
38#include "octeon_network.h"
39#include "cn66xx_regs.h"
40#include "cn66xx_device.h"
41#include "cn68xx_regs.h"
42#include "cn68xx_device.h"
43#include "liquidio_image.h"
44 30
45static void oct_poll_req_completion(struct work_struct *work); 31static void oct_poll_req_completion(struct work_struct *work);
46 32
@@ -66,7 +52,7 @@ int octeon_setup_response_list(struct octeon_device *oct)
66 INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion); 52 INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion);
67 cwq->wk.ctxptr = oct; 53 cwq->wk.ctxptr = oct;
68 oct->cmd_resp_state = OCT_DRV_ONLINE; 54 oct->cmd_resp_state = OCT_DRV_ONLINE;
69 queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100)); 55 queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(50));
70 56
71 return ret; 57 return ret;
72} 58}
@@ -176,6 +162,5 @@ static void oct_poll_req_completion(struct work_struct *work)
176 struct cavium_wq *cwq = &oct->dma_comp_wq; 162 struct cavium_wq *cwq = &oct->dma_comp_wq;
177 163
178 lio_process_ordered_list(oct, 0); 164 lio_process_ordered_list(oct, 0);
179 165 queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(50));
180 queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
181} 166}
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 388cd799d9ed..e8bc15bcde70 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -146,7 +146,6 @@ struct octeon_mgmt {
146 struct device *dev; 146 struct device *dev;
147 struct napi_struct napi; 147 struct napi_struct napi;
148 struct tasklet_struct tx_clean_tasklet; 148 struct tasklet_struct tx_clean_tasklet;
149 struct phy_device *phydev;
150 struct device_node *phy_np; 149 struct device_node *phy_np;
151 resource_size_t mix_phys; 150 resource_size_t mix_phys;
152 resource_size_t mix_size; 151 resource_size_t mix_size;
@@ -787,14 +786,12 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
787static int octeon_mgmt_ioctl(struct net_device *netdev, 786static int octeon_mgmt_ioctl(struct net_device *netdev,
788 struct ifreq *rq, int cmd) 787 struct ifreq *rq, int cmd)
789{ 788{
790 struct octeon_mgmt *p = netdev_priv(netdev);
791
792 switch (cmd) { 789 switch (cmd) {
793 case SIOCSHWTSTAMP: 790 case SIOCSHWTSTAMP:
794 return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd); 791 return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
795 default: 792 default:
796 if (p->phydev) 793 if (netdev->phydev)
797 return phy_mii_ioctl(p->phydev, rq, cmd); 794 return phy_mii_ioctl(netdev->phydev, rq, cmd);
798 return -EINVAL; 795 return -EINVAL;
799 } 796 }
800} 797}
@@ -836,16 +833,18 @@ static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
836 833
837static void octeon_mgmt_update_link(struct octeon_mgmt *p) 834static void octeon_mgmt_update_link(struct octeon_mgmt *p)
838{ 835{
836 struct net_device *ndev = p->netdev;
837 struct phy_device *phydev = ndev->phydev;
839 union cvmx_agl_gmx_prtx_cfg prtx_cfg; 838 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
840 839
841 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); 840 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
842 841
843 if (!p->phydev->link) 842 if (!phydev->link)
844 prtx_cfg.s.duplex = 1; 843 prtx_cfg.s.duplex = 1;
845 else 844 else
846 prtx_cfg.s.duplex = p->phydev->duplex; 845 prtx_cfg.s.duplex = phydev->duplex;
847 846
848 switch (p->phydev->speed) { 847 switch (phydev->speed) {
849 case 10: 848 case 10:
850 prtx_cfg.s.speed = 0; 849 prtx_cfg.s.speed = 0;
851 prtx_cfg.s.slottime = 0; 850 prtx_cfg.s.slottime = 0;
@@ -871,7 +870,7 @@ static void octeon_mgmt_update_link(struct octeon_mgmt *p)
871 prtx_cfg.s.speed_msb = 0; 870 prtx_cfg.s.speed_msb = 0;
872 /* Only matters for half-duplex */ 871 /* Only matters for half-duplex */
873 prtx_cfg.s.slottime = 1; 872 prtx_cfg.s.slottime = 1;
874 prtx_cfg.s.burst = p->phydev->duplex; 873 prtx_cfg.s.burst = phydev->duplex;
875 } 874 }
876 break; 875 break;
877 case 0: /* No link */ 876 case 0: /* No link */
@@ -894,9 +893,9 @@ static void octeon_mgmt_update_link(struct octeon_mgmt *p)
894 /* MII (both speeds) and RGMII 1000 speed. */ 893 /* MII (both speeds) and RGMII 1000 speed. */
895 agl_clk.s.clk_cnt = 1; 894 agl_clk.s.clk_cnt = 1;
896 if (prtx_ctl.s.mode == 0) { /* RGMII mode */ 895 if (prtx_ctl.s.mode == 0) { /* RGMII mode */
897 if (p->phydev->speed == 10) 896 if (phydev->speed == 10)
898 agl_clk.s.clk_cnt = 50; 897 agl_clk.s.clk_cnt = 50;
899 else if (p->phydev->speed == 100) 898 else if (phydev->speed == 100)
900 agl_clk.s.clk_cnt = 5; 899 agl_clk.s.clk_cnt = 5;
901 } 900 }
902 cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64); 901 cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
@@ -906,39 +905,40 @@ static void octeon_mgmt_update_link(struct octeon_mgmt *p)
906static void octeon_mgmt_adjust_link(struct net_device *netdev) 905static void octeon_mgmt_adjust_link(struct net_device *netdev)
907{ 906{
908 struct octeon_mgmt *p = netdev_priv(netdev); 907 struct octeon_mgmt *p = netdev_priv(netdev);
908 struct phy_device *phydev = netdev->phydev;
909 unsigned long flags; 909 unsigned long flags;
910 int link_changed = 0; 910 int link_changed = 0;
911 911
912 if (!p->phydev) 912 if (!phydev)
913 return; 913 return;
914 914
915 spin_lock_irqsave(&p->lock, flags); 915 spin_lock_irqsave(&p->lock, flags);
916 916
917 917
918 if (!p->phydev->link && p->last_link) 918 if (!phydev->link && p->last_link)
919 link_changed = -1; 919 link_changed = -1;
920 920
921 if (p->phydev->link 921 if (phydev->link &&
922 && (p->last_duplex != p->phydev->duplex 922 (p->last_duplex != phydev->duplex ||
923 || p->last_link != p->phydev->link 923 p->last_link != phydev->link ||
924 || p->last_speed != p->phydev->speed)) { 924 p->last_speed != phydev->speed)) {
925 octeon_mgmt_disable_link(p); 925 octeon_mgmt_disable_link(p);
926 link_changed = 1; 926 link_changed = 1;
927 octeon_mgmt_update_link(p); 927 octeon_mgmt_update_link(p);
928 octeon_mgmt_enable_link(p); 928 octeon_mgmt_enable_link(p);
929 } 929 }
930 930
931 p->last_link = p->phydev->link; 931 p->last_link = phydev->link;
932 p->last_speed = p->phydev->speed; 932 p->last_speed = phydev->speed;
933 p->last_duplex = p->phydev->duplex; 933 p->last_duplex = phydev->duplex;
934 934
935 spin_unlock_irqrestore(&p->lock, flags); 935 spin_unlock_irqrestore(&p->lock, flags);
936 936
937 if (link_changed != 0) { 937 if (link_changed != 0) {
938 if (link_changed > 0) { 938 if (link_changed > 0) {
939 pr_info("%s: Link is up - %d/%s\n", netdev->name, 939 pr_info("%s: Link is up - %d/%s\n", netdev->name,
940 p->phydev->speed, 940 phydev->speed,
941 DUPLEX_FULL == p->phydev->duplex ? 941 phydev->duplex == DUPLEX_FULL ?
942 "Full" : "Half"); 942 "Full" : "Half");
943 } else { 943 } else {
944 pr_info("%s: Link is down\n", netdev->name); 944 pr_info("%s: Link is down\n", netdev->name);
@@ -949,6 +949,7 @@ static void octeon_mgmt_adjust_link(struct net_device *netdev)
949static int octeon_mgmt_init_phy(struct net_device *netdev) 949static int octeon_mgmt_init_phy(struct net_device *netdev)
950{ 950{
951 struct octeon_mgmt *p = netdev_priv(netdev); 951 struct octeon_mgmt *p = netdev_priv(netdev);
952 struct phy_device *phydev = NULL;
952 953
953 if (octeon_is_simulation() || p->phy_np == NULL) { 954 if (octeon_is_simulation() || p->phy_np == NULL) {
954 /* No PHYs in the simulator. */ 955 /* No PHYs in the simulator. */
@@ -956,11 +957,11 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
956 return 0; 957 return 0;
957 } 958 }
958 959
959 p->phydev = of_phy_connect(netdev, p->phy_np, 960 phydev = of_phy_connect(netdev, p->phy_np,
960 octeon_mgmt_adjust_link, 0, 961 octeon_mgmt_adjust_link, 0,
961 PHY_INTERFACE_MODE_MII); 962 PHY_INTERFACE_MODE_MII);
962 963
963 if (!p->phydev) 964 if (!phydev)
964 return -ENODEV; 965 return -ENODEV;
965 966
966 return 0; 967 return 0;
@@ -1080,9 +1081,9 @@ static int octeon_mgmt_open(struct net_device *netdev)
1080 } 1081 }
1081 1082
1082 /* Set the mode of the interface, RGMII/MII. */ 1083 /* Set the mode of the interface, RGMII/MII. */
1083 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) { 1084 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) {
1084 union cvmx_agl_prtx_ctl agl_prtx_ctl; 1085 union cvmx_agl_prtx_ctl agl_prtx_ctl;
1085 int rgmii_mode = (p->phydev->supported & 1086 int rgmii_mode = (netdev->phydev->supported &
1086 (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0; 1087 (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;
1087 1088
1088 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); 1089 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
@@ -1205,7 +1206,7 @@ static int octeon_mgmt_open(struct net_device *netdev)
1205 1206
1206 /* Configure the port duplex, speed and enables */ 1207 /* Configure the port duplex, speed and enables */
1207 octeon_mgmt_disable_link(p); 1208 octeon_mgmt_disable_link(p);
1208 if (p->phydev) 1209 if (netdev->phydev)
1209 octeon_mgmt_update_link(p); 1210 octeon_mgmt_update_link(p);
1210 octeon_mgmt_enable_link(p); 1211 octeon_mgmt_enable_link(p);
1211 1212
@@ -1214,9 +1215,9 @@ static int octeon_mgmt_open(struct net_device *netdev)
1214 /* PHY is not present in simulator. The carrier is enabled 1215 /* PHY is not present in simulator. The carrier is enabled
1215 * while initializing the phy for simulator, leave it enabled. 1216 * while initializing the phy for simulator, leave it enabled.
1216 */ 1217 */
1217 if (p->phydev) { 1218 if (netdev->phydev) {
1218 netif_carrier_off(netdev); 1219 netif_carrier_off(netdev);
1219 phy_start_aneg(p->phydev); 1220 phy_start_aneg(netdev->phydev);
1220 } 1221 }
1221 1222
1222 netif_wake_queue(netdev); 1223 netif_wake_queue(netdev);
@@ -1244,9 +1245,8 @@ static int octeon_mgmt_stop(struct net_device *netdev)
1244 napi_disable(&p->napi); 1245 napi_disable(&p->napi);
1245 netif_stop_queue(netdev); 1246 netif_stop_queue(netdev);
1246 1247
1247 if (p->phydev) 1248 if (netdev->phydev)
1248 phy_disconnect(p->phydev); 1249 phy_disconnect(netdev->phydev);
1249 p->phydev = NULL;
1250 1250
1251 netif_carrier_off(netdev); 1251 netif_carrier_off(netdev);
1252 1252
@@ -1346,50 +1346,23 @@ static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
1346 strlcpy(info->bus_info, "N/A", sizeof(info->bus_info)); 1346 strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
1347} 1347}
1348 1348
1349static int octeon_mgmt_get_settings(struct net_device *netdev,
1350 struct ethtool_cmd *cmd)
1351{
1352 struct octeon_mgmt *p = netdev_priv(netdev);
1353
1354 if (p->phydev)
1355 return phy_ethtool_gset(p->phydev, cmd);
1356
1357 return -EOPNOTSUPP;
1358}
1359
1360static int octeon_mgmt_set_settings(struct net_device *netdev,
1361 struct ethtool_cmd *cmd)
1362{
1363 struct octeon_mgmt *p = netdev_priv(netdev);
1364
1365 if (!capable(CAP_NET_ADMIN))
1366 return -EPERM;
1367
1368 if (p->phydev)
1369 return phy_ethtool_sset(p->phydev, cmd);
1370
1371 return -EOPNOTSUPP;
1372}
1373
1374static int octeon_mgmt_nway_reset(struct net_device *dev) 1349static int octeon_mgmt_nway_reset(struct net_device *dev)
1375{ 1350{
1376 struct octeon_mgmt *p = netdev_priv(dev);
1377
1378 if (!capable(CAP_NET_ADMIN)) 1351 if (!capable(CAP_NET_ADMIN))
1379 return -EPERM; 1352 return -EPERM;
1380 1353
1381 if (p->phydev) 1354 if (dev->phydev)
1382 return phy_start_aneg(p->phydev); 1355 return phy_start_aneg(dev->phydev);
1383 1356
1384 return -EOPNOTSUPP; 1357 return -EOPNOTSUPP;
1385} 1358}
1386 1359
1387static const struct ethtool_ops octeon_mgmt_ethtool_ops = { 1360static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
1388 .get_drvinfo = octeon_mgmt_get_drvinfo, 1361 .get_drvinfo = octeon_mgmt_get_drvinfo,
1389 .get_settings = octeon_mgmt_get_settings,
1390 .set_settings = octeon_mgmt_set_settings,
1391 .nway_reset = octeon_mgmt_nway_reset, 1362 .nway_reset = octeon_mgmt_nway_reset,
1392 .get_link = ethtool_op_get_link, 1363 .get_link = ethtool_op_get_link,
1364 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1365 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1393}; 1366};
1394 1367
1395static const struct net_device_ops octeon_mgmt_ops = { 1368static const struct net_device_ops octeon_mgmt_ops = {
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 95f17f8cadac..16ed20357c5c 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -499,6 +499,7 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
499 u32 rr_quantum; 499 u32 rr_quantum;
500 u8 sq_idx = sq->sq_num; 500 u8 sq_idx = sq->sq_num;
501 u8 pqs_vnic; 501 u8 pqs_vnic;
502 int svf;
502 503
503 if (sq->sqs_mode) 504 if (sq->sqs_mode)
504 pqs_vnic = nic->pqs_vf[vnic]; 505 pqs_vnic = nic->pqs_vf[vnic];
@@ -511,10 +512,19 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
511 /* 24 bytes for FCS, IPG and preamble */ 512 /* 24 bytes for FCS, IPG and preamble */
512 rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4); 513 rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
513 514
514 tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX); 515 if (!sq->sqs_mode) {
516 tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
517 } else {
518 for (svf = 0; svf < MAX_SQS_PER_VF; svf++) {
519 if (nic->vf_sqs[pqs_vnic][svf] == vnic)
520 break;
521 }
522 tl4 = (MAX_LMAC_PER_BGX * NIC_TL4_PER_LMAC);
523 tl4 += (lmac * NIC_TL4_PER_LMAC * MAX_SQS_PER_VF);
524 tl4 += (svf * NIC_TL4_PER_LMAC);
525 tl4 += (bgx * NIC_TL4_PER_BGX);
526 }
515 tl4 += sq_idx; 527 tl4 += sq_idx;
516 if (sq->sqs_mode)
517 tl4 += vnic * 8;
518 528
519 tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3); 529 tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
520 nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 | 530 nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 3ed21988626b..63a39ac97d53 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -551,7 +551,9 @@ static int bgx_xaui_check_link(struct lmac *lmac)
551 } 551 }
552 552
553 /* Clear rcvflt bit (latching high) and read it back */ 553 /* Clear rcvflt bit (latching high) and read it back */
554 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT); 554 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
555 bgx_reg_modify(bgx, lmacid,
556 BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
555 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { 557 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
556 dev_err(&bgx->pdev->dev, "Receive fault, retry training\n"); 558 dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
557 if (bgx->use_training) { 559 if (bgx->use_training) {
@@ -570,13 +572,6 @@ static int bgx_xaui_check_link(struct lmac *lmac)
570 return -1; 572 return -1;
571 } 573 }
572 574
573 /* Wait for MAC RX to be ready */
574 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
575 SMU_RX_CTL_STATUS, true)) {
576 dev_err(&bgx->pdev->dev, "SMU RX link not okay\n");
577 return -1;
578 }
579
580 /* Wait for BGX RX to be idle */ 575 /* Wait for BGX RX to be idle */
581 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) { 576 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
582 dev_err(&bgx->pdev->dev, "SMU RX not idle\n"); 577 dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
@@ -589,29 +584,30 @@ static int bgx_xaui_check_link(struct lmac *lmac)
589 return -1; 584 return -1;
590 } 585 }
591 586
592 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { 587 /* Clear receive packet disable */
593 dev_err(&bgx->pdev->dev, "Receive fault\n");
594 return -1;
595 }
596
597 /* Receive link is latching low. Force it high and verify it */
598 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
599 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
600 SPU_STATUS1_RCV_LNK, false)) {
601 dev_err(&bgx->pdev->dev, "SPU receive link down\n");
602 return -1;
603 }
604
605 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL); 588 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
606 cfg &= ~SPU_MISC_CTL_RX_DIS; 589 cfg &= ~SPU_MISC_CTL_RX_DIS;
607 bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg); 590 bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
608 return 0; 591
592 /* Check for MAC RX faults */
593 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
594 /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
595 cfg &= SMU_RX_CTL_STATUS;
596 if (!cfg)
597 return 0;
598
599 /* Rx local/remote fault seen.
600 * Do lmac reinit to see if condition recovers
601 */
602 bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type);
603
604 return -1;
609} 605}
610 606
611static void bgx_poll_for_link(struct work_struct *work) 607static void bgx_poll_for_link(struct work_struct *work)
612{ 608{
613 struct lmac *lmac; 609 struct lmac *lmac;
614 u64 link; 610 u64 spu_link, smu_link;
615 611
616 lmac = container_of(work, struct lmac, dwork.work); 612 lmac = container_of(work, struct lmac, dwork.work);
617 613
@@ -621,8 +617,11 @@ static void bgx_poll_for_link(struct work_struct *work)
621 bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1, 617 bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
622 SPU_STATUS1_RCV_LNK, false); 618 SPU_STATUS1_RCV_LNK, false);
623 619
624 link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1); 620 spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
625 if (link & SPU_STATUS1_RCV_LNK) { 621 smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
622
623 if ((spu_link & SPU_STATUS1_RCV_LNK) &&
624 !(smu_link & SMU_RX_CTL_STATUS)) {
626 lmac->link_up = 1; 625 lmac->link_up = 1;
627 if (lmac->bgx->lmac_type == BGX_MODE_XLAUI) 626 if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
628 lmac->last_speed = 40000; 627 lmac->last_speed = 40000;
@@ -636,9 +635,15 @@ static void bgx_poll_for_link(struct work_struct *work)
636 } 635 }
637 636
638 if (lmac->last_link != lmac->link_up) { 637 if (lmac->last_link != lmac->link_up) {
638 if (lmac->link_up) {
639 if (bgx_xaui_check_link(lmac)) {
640 /* Errors, clear link_up state */
641 lmac->link_up = 0;
642 lmac->last_speed = SPEED_UNKNOWN;
643 lmac->last_duplex = DUPLEX_UNKNOWN;
644 }
645 }
639 lmac->last_link = lmac->link_up; 646 lmac->last_link = lmac->link_up;
640 if (lmac->link_up)
641 bgx_xaui_check_link(lmac);
642 } 647 }
643 648
644 queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2); 649 queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
@@ -710,7 +715,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
710static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) 715static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
711{ 716{
712 struct lmac *lmac; 717 struct lmac *lmac;
713 u64 cmrx_cfg; 718 u64 cfg;
714 719
715 lmac = &bgx->lmac[lmacid]; 720 lmac = &bgx->lmac[lmacid];
716 if (lmac->check_link) { 721 if (lmac->check_link) {
@@ -719,9 +724,33 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
719 destroy_workqueue(lmac->check_link); 724 destroy_workqueue(lmac->check_link);
720 } 725 }
721 726
722 cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); 727 /* Disable packet reception */
723 cmrx_cfg &= ~(1 << 15); 728 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
724 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg); 729 cfg &= ~CMR_PKT_RX_EN;
730 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
731
732 /* Give chance for Rx/Tx FIFO to get drained */
733 bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
734 bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
735
736 /* Disable packet transmission */
737 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
738 cfg &= ~CMR_PKT_TX_EN;
739 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
740
741 /* Disable serdes lanes */
742 if (!lmac->is_sgmii)
743 bgx_reg_modify(bgx, lmacid,
744 BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
745 else
746 bgx_reg_modify(bgx, lmacid,
747 BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
748
749 /* Disable LMAC */
750 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
751 cfg &= ~CMR_EN;
752 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
753
725 bgx_flush_dmac_addrs(bgx, lmacid); 754 bgx_flush_dmac_addrs(bgx, lmacid);
726 755
727 if ((bgx->lmac_type != BGX_MODE_XFI) && 756 if ((bgx->lmac_type != BGX_MODE_XFI) &&
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 149e179363a1..42010d2e5ddf 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -41,6 +41,7 @@
41#define BGX_CMRX_RX_STAT10 0xC0 41#define BGX_CMRX_RX_STAT10 0xC0
42#define BGX_CMRX_RX_BP_DROP 0xC8 42#define BGX_CMRX_RX_BP_DROP 0xC8
43#define BGX_CMRX_RX_DMAC_CTL 0x0E8 43#define BGX_CMRX_RX_DMAC_CTL 0x0E8
44#define BGX_CMRX_RX_FIFO_LEN 0x108
44#define BGX_CMR_RX_DMACX_CAM 0x200 45#define BGX_CMR_RX_DMACX_CAM 0x200
45#define RX_DMACX_CAM_EN BIT_ULL(48) 46#define RX_DMACX_CAM_EN BIT_ULL(48)
46#define RX_DMACX_CAM_LMACID(x) (x << 49) 47#define RX_DMACX_CAM_LMACID(x) (x << 49)
@@ -50,6 +51,7 @@
50#define BGX_CMR_CHAN_MSK_AND 0x450 51#define BGX_CMR_CHAN_MSK_AND 0x450
51#define BGX_CMR_BIST_STATUS 0x460 52#define BGX_CMR_BIST_STATUS 0x460
52#define BGX_CMR_RX_LMACS 0x468 53#define BGX_CMR_RX_LMACS 0x468
54#define BGX_CMRX_TX_FIFO_LEN 0x518
53#define BGX_CMRX_TX_STAT0 0x600 55#define BGX_CMRX_TX_STAT0 0x600
54#define BGX_CMRX_TX_STAT1 0x608 56#define BGX_CMRX_TX_STAT1 0x608
55#define BGX_CMRX_TX_STAT2 0x610 57#define BGX_CMRX_TX_STAT2 0x610
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index c4b262ca7d43..2accab386323 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,8 +36,8 @@
36#define __T4FW_VERSION_H__ 36#define __T4FW_VERSION_H__
37 37
38#define T4FW_VERSION_MAJOR 0x01 38#define T4FW_VERSION_MAJOR 0x01
39#define T4FW_VERSION_MINOR 0x0E 39#define T4FW_VERSION_MINOR 0x0F
40#define T4FW_VERSION_MICRO 0x04 40#define T4FW_VERSION_MICRO 0x25
41#define T4FW_VERSION_BUILD 0x00 41#define T4FW_VERSION_BUILD 0x00
42 42
43#define T4FW_MIN_VERSION_MAJOR 0x01 43#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
45#define T4FW_MIN_VERSION_MICRO 0x00 45#define T4FW_MIN_VERSION_MICRO 0x00
46 46
47#define T5FW_VERSION_MAJOR 0x01 47#define T5FW_VERSION_MAJOR 0x01
48#define T5FW_VERSION_MINOR 0x0E 48#define T5FW_VERSION_MINOR 0x0F
49#define T5FW_VERSION_MICRO 0x04 49#define T5FW_VERSION_MICRO 0x25
50#define T5FW_VERSION_BUILD 0x00 50#define T5FW_VERSION_BUILD 0x00
51 51
52#define T5FW_MIN_VERSION_MAJOR 0x00 52#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
54#define T5FW_MIN_VERSION_MICRO 0x00 54#define T5FW_MIN_VERSION_MICRO 0x00
55 55
56#define T6FW_VERSION_MAJOR 0x01 56#define T6FW_VERSION_MAJOR 0x01
57#define T6FW_VERSION_MINOR 0x0E 57#define T6FW_VERSION_MINOR 0x0F
58#define T6FW_VERSION_MICRO 0x04 58#define T6FW_VERSION_MICRO 0x25
59#define T6FW_VERSION_BUILD 0x00 59#define T6FW_VERSION_BUILD 0x00
60 60
61#define T6FW_MIN_VERSION_MAJOR 0x00 61#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index b69a9eacc531..c3b64cdd0dec 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -173,7 +173,7 @@ static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
173static void dnet_handle_link_change(struct net_device *dev) 173static void dnet_handle_link_change(struct net_device *dev)
174{ 174{
175 struct dnet *bp = netdev_priv(dev); 175 struct dnet *bp = netdev_priv(dev);
176 struct phy_device *phydev = bp->phy_dev; 176 struct phy_device *phydev = dev->phydev;
177 unsigned long flags; 177 unsigned long flags;
178 u32 mode_reg, ctl_reg; 178 u32 mode_reg, ctl_reg;
179 179
@@ -295,7 +295,6 @@ static int dnet_mii_probe(struct net_device *dev)
295 bp->link = 0; 295 bp->link = 0;
296 bp->speed = 0; 296 bp->speed = 0;
297 bp->duplex = -1; 297 bp->duplex = -1;
298 bp->phy_dev = phydev;
299 298
300 return 0; 299 return 0;
301} 300}
@@ -629,16 +628,16 @@ static int dnet_open(struct net_device *dev)
629 struct dnet *bp = netdev_priv(dev); 628 struct dnet *bp = netdev_priv(dev);
630 629
631 /* if the phy is not yet register, retry later */ 630 /* if the phy is not yet register, retry later */
632 if (!bp->phy_dev) 631 if (!dev->phydev)
633 return -EAGAIN; 632 return -EAGAIN;
634 633
635 napi_enable(&bp->napi); 634 napi_enable(&bp->napi);
636 dnet_init_hw(bp); 635 dnet_init_hw(bp);
637 636
638 phy_start_aneg(bp->phy_dev); 637 phy_start_aneg(dev->phydev);
639 638
640 /* schedule a link state check */ 639 /* schedule a link state check */
641 phy_start(bp->phy_dev); 640 phy_start(dev->phydev);
642 641
643 netif_start_queue(dev); 642 netif_start_queue(dev);
644 643
@@ -652,8 +651,8 @@ static int dnet_close(struct net_device *dev)
652 netif_stop_queue(dev); 651 netif_stop_queue(dev);
653 napi_disable(&bp->napi); 652 napi_disable(&bp->napi);
654 653
655 if (bp->phy_dev) 654 if (dev->phydev)
656 phy_stop(bp->phy_dev); 655 phy_stop(dev->phydev);
657 656
658 dnet_reset_hw(bp); 657 dnet_reset_hw(bp);
659 netif_carrier_off(dev); 658 netif_carrier_off(dev);
@@ -731,32 +730,9 @@ static struct net_device_stats *dnet_get_stats(struct net_device *dev)
731 return nstat; 730 return nstat;
732} 731}
733 732
734static int dnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
735{
736 struct dnet *bp = netdev_priv(dev);
737 struct phy_device *phydev = bp->phy_dev;
738
739 if (!phydev)
740 return -ENODEV;
741
742 return phy_ethtool_gset(phydev, cmd);
743}
744
745static int dnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
746{
747 struct dnet *bp = netdev_priv(dev);
748 struct phy_device *phydev = bp->phy_dev;
749
750 if (!phydev)
751 return -ENODEV;
752
753 return phy_ethtool_sset(phydev, cmd);
754}
755
756static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 733static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
757{ 734{
758 struct dnet *bp = netdev_priv(dev); 735 struct phy_device *phydev = dev->phydev;
759 struct phy_device *phydev = bp->phy_dev;
760 736
761 if (!netif_running(dev)) 737 if (!netif_running(dev))
762 return -EINVAL; 738 return -EINVAL;
@@ -776,11 +752,11 @@ static void dnet_get_drvinfo(struct net_device *dev,
776} 752}
777 753
778static const struct ethtool_ops dnet_ethtool_ops = { 754static const struct ethtool_ops dnet_ethtool_ops = {
779 .get_settings = dnet_get_settings,
780 .set_settings = dnet_set_settings,
781 .get_drvinfo = dnet_get_drvinfo, 755 .get_drvinfo = dnet_get_drvinfo,
782 .get_link = ethtool_op_get_link, 756 .get_link = ethtool_op_get_link,
783 .get_ts_info = ethtool_op_get_ts_info, 757 .get_ts_info = ethtool_op_get_ts_info,
758 .get_link_ksettings = phy_ethtool_get_link_ksettings,
759 .set_link_ksettings = phy_ethtool_set_link_ksettings,
784}; 760};
785 761
786static const struct net_device_ops dnet_netdev_ops = { 762static const struct net_device_ops dnet_netdev_ops = {
@@ -875,7 +851,7 @@ static int dnet_probe(struct platform_device *pdev)
875 (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ", 851 (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
876 (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ", 852 (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
877 (bp->capabilities & DNET_HAS_DMA) ? "" : "no "); 853 (bp->capabilities & DNET_HAS_DMA) ? "" : "no ");
878 phydev = bp->phy_dev; 854 phydev = dev->phydev;
879 phy_attached_info(phydev); 855 phy_attached_info(phydev);
880 856
881 return 0; 857 return 0;
@@ -899,8 +875,8 @@ static int dnet_remove(struct platform_device *pdev)
899 875
900 if (dev) { 876 if (dev) {
901 bp = netdev_priv(dev); 877 bp = netdev_priv(dev);
902 if (bp->phy_dev) 878 if (dev->phydev)
903 phy_disconnect(bp->phy_dev); 879 phy_disconnect(dev->phydev);
904 mdiobus_unregister(bp->mii_bus); 880 mdiobus_unregister(bp->mii_bus);
905 mdiobus_free(bp->mii_bus); 881 mdiobus_free(bp->mii_bus);
906 unregister_netdev(dev); 882 unregister_netdev(dev);
diff --git a/drivers/net/ethernet/dnet.h b/drivers/net/ethernet/dnet.h
index 37f5b30fa78b..d985080bbd5d 100644
--- a/drivers/net/ethernet/dnet.h
+++ b/drivers/net/ethernet/dnet.h
@@ -216,7 +216,6 @@ struct dnet {
216 216
217 /* PHY stuff */ 217 /* PHY stuff */
218 struct mii_bus *mii_bus; 218 struct mii_bus *mii_bus;
219 struct phy_device *phy_dev;
220 unsigned int link; 219 unsigned int link;
221 unsigned int speed; 220 unsigned int speed;
222 unsigned int duplex; 221 unsigned int duplex;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 1873c74638cd..1f16e73f6d0c 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3251,8 +3251,9 @@ static void be_msix_disable(struct be_adapter *adapter)
3251 3251
3252static int be_msix_enable(struct be_adapter *adapter) 3252static int be_msix_enable(struct be_adapter *adapter)
3253{ 3253{
3254 unsigned int i, num_vec, max_roce_eqs; 3254 unsigned int i, max_roce_eqs;
3255 struct device *dev = &adapter->pdev->dev; 3255 struct device *dev = &adapter->pdev->dev;
3256 int num_vec;
3256 3257
3257 /* If RoCE is supported, program the max number of vectors that 3258 /* If RoCE is supported, program the max number of vectors that
3258 * could be used for NIC and RoCE, else, just program the number 3259 * could be used for NIC and RoCE, else, just program the number
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index b9f2ea59308a..275618bb4646 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -218,7 +218,6 @@ struct hix5hd2_priv {
218 struct device *dev; 218 struct device *dev;
219 struct net_device *netdev; 219 struct net_device *netdev;
220 220
221 struct phy_device *phy;
222 struct device_node *phy_node; 221 struct device_node *phy_node;
223 phy_interface_t phy_mode; 222 phy_interface_t phy_mode;
224 223
@@ -402,7 +401,7 @@ static int hix5hd2_net_set_mac_address(struct net_device *dev, void *p)
402static void hix5hd2_adjust_link(struct net_device *dev) 401static void hix5hd2_adjust_link(struct net_device *dev)
403{ 402{
404 struct hix5hd2_priv *priv = netdev_priv(dev); 403 struct hix5hd2_priv *priv = netdev_priv(dev);
405 struct phy_device *phy = priv->phy; 404 struct phy_device *phy = dev->phydev;
406 405
407 if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) { 406 if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
408 hix5hd2_config_port(dev, phy->speed, phy->duplex); 407 hix5hd2_config_port(dev, phy->speed, phy->duplex);
@@ -679,6 +678,7 @@ static void hix5hd2_free_dma_desc_rings(struct hix5hd2_priv *priv)
679static int hix5hd2_net_open(struct net_device *dev) 678static int hix5hd2_net_open(struct net_device *dev)
680{ 679{
681 struct hix5hd2_priv *priv = netdev_priv(dev); 680 struct hix5hd2_priv *priv = netdev_priv(dev);
681 struct phy_device *phy;
682 int ret; 682 int ret;
683 683
684 ret = clk_prepare_enable(priv->clk); 684 ret = clk_prepare_enable(priv->clk);
@@ -687,12 +687,12 @@ static int hix5hd2_net_open(struct net_device *dev)
687 return ret; 687 return ret;
688 } 688 }
689 689
690 priv->phy = of_phy_connect(dev, priv->phy_node, 690 phy = of_phy_connect(dev, priv->phy_node,
691 &hix5hd2_adjust_link, 0, priv->phy_mode); 691 &hix5hd2_adjust_link, 0, priv->phy_mode);
692 if (!priv->phy) 692 if (!phy)
693 return -ENODEV; 693 return -ENODEV;
694 694
695 phy_start(priv->phy); 695 phy_start(phy);
696 hix5hd2_hw_init(priv); 696 hix5hd2_hw_init(priv);
697 hix5hd2_rx_refill(priv); 697 hix5hd2_rx_refill(priv);
698 698
@@ -716,9 +716,9 @@ static int hix5hd2_net_close(struct net_device *dev)
716 netif_stop_queue(dev); 716 netif_stop_queue(dev);
717 hix5hd2_free_dma_desc_rings(priv); 717 hix5hd2_free_dma_desc_rings(priv);
718 718
719 if (priv->phy) { 719 if (dev->phydev) {
720 phy_stop(priv->phy); 720 phy_stop(dev->phydev);
721 phy_disconnect(priv->phy); 721 phy_disconnect(dev->phydev);
722 } 722 }
723 723
724 clk_disable_unprepare(priv->clk); 724 clk_disable_unprepare(priv->clk);
@@ -750,32 +750,10 @@ static const struct net_device_ops hix5hd2_netdev_ops = {
750 .ndo_set_mac_address = hix5hd2_net_set_mac_address, 750 .ndo_set_mac_address = hix5hd2_net_set_mac_address,
751}; 751};
752 752
753static int hix5hd2_get_settings(struct net_device *net_dev,
754 struct ethtool_cmd *cmd)
755{
756 struct hix5hd2_priv *priv = netdev_priv(net_dev);
757
758 if (!priv->phy)
759 return -ENODEV;
760
761 return phy_ethtool_gset(priv->phy, cmd);
762}
763
764static int hix5hd2_set_settings(struct net_device *net_dev,
765 struct ethtool_cmd *cmd)
766{
767 struct hix5hd2_priv *priv = netdev_priv(net_dev);
768
769 if (!priv->phy)
770 return -ENODEV;
771
772 return phy_ethtool_sset(priv->phy, cmd);
773}
774
775static struct ethtool_ops hix5hd2_ethtools_ops = { 753static struct ethtool_ops hix5hd2_ethtools_ops = {
776 .get_link = ethtool_op_get_link, 754 .get_link = ethtool_op_get_link,
777 .get_settings = hix5hd2_get_settings, 755 .get_link_ksettings = phy_ethtool_get_link_ksettings,
778 .set_settings = hix5hd2_set_settings, 756 .set_link_ksettings = phy_ethtool_set_link_ksettings,
779}; 757};
780 758
781static int hix5hd2_mdio_wait_ready(struct mii_bus *bus) 759static int hix5hd2_mdio_wait_ready(struct mii_bus *bus)
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 3869322690ac..e093cbf26c8c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -363,6 +363,14 @@ enum hnae_port_type {
363 HNAE_PORT_DEBUG 363 HNAE_PORT_DEBUG
364}; 364};
365 365
366/* mac media type */
367enum hnae_media_type {
368 HNAE_MEDIA_TYPE_UNKNOWN = 0,
369 HNAE_MEDIA_TYPE_FIBER,
370 HNAE_MEDIA_TYPE_COPPER,
371 HNAE_MEDIA_TYPE_BACKPLANE,
372};
373
366/* This struct defines the operation on the handle. 374/* This struct defines the operation on the handle.
367 * 375 *
368 * get_handle(): (mandatory) 376 * get_handle(): (mandatory)
@@ -525,6 +533,7 @@ struct hnae_handle {
525 u32 eport_id; 533 u32 eport_id;
526 u32 dport_id; /* v2 tx bd should fill the dport_id */ 534 u32 dport_id; /* v2 tx bd should fill the dport_id */
527 enum hnae_port_type port_type; 535 enum hnae_port_type port_type;
536 enum hnae_media_type media_type;
528 struct list_head node; /* list to hnae_ae_dev->handle_list */ 537 struct list_head node; /* list to hnae_ae_dev->handle_list */
529 struct hnae_buf_ops *bops; /* operation for the buffer */ 538 struct hnae_buf_ops *bops; /* operation for the buffer */
530 struct hnae_queue **qs; /* array base of all queues */ 539 struct hnae_queue **qs; /* array base of all queues */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index 835521bf1bbc..e28d960997af 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -134,6 +134,7 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
134 ae_handle->phy_dev = vf_cb->mac_cb->phy_dev; 134 ae_handle->phy_dev = vf_cb->mac_cb->phy_dev;
135 ae_handle->if_support = vf_cb->mac_cb->if_support; 135 ae_handle->if_support = vf_cb->mac_cb->if_support;
136 ae_handle->port_type = vf_cb->mac_cb->mac_type; 136 ae_handle->port_type = vf_cb->mac_cb->mac_type;
137 ae_handle->media_type = vf_cb->mac_cb->media_type;
137 ae_handle->dport_id = port_id; 138 ae_handle->dport_id = port_id;
138 139
139 return ae_handle; 140 return ae_handle;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index c526558e6367..3fb87e233c49 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -56,20 +56,6 @@ static const enum mac_mode g_mac_mode_1000[] = {
56 [PHY_INTERFACE_MODE_RTBI] = MAC_MODE_RTBI_1000 56 [PHY_INTERFACE_MODE_RTBI] = MAC_MODE_RTBI_1000
57}; 57};
58 58
59static enum mac_mode hns_mac_dev_to_enet_if(const struct hns_mac_cb *mac_cb)
60{
61 switch (mac_cb->max_speed) {
62 case MAC_SPEED_100:
63 return g_mac_mode_100[mac_cb->phy_if];
64 case MAC_SPEED_1000:
65 return g_mac_mode_1000[mac_cb->phy_if];
66 case MAC_SPEED_10000:
67 return MAC_MODE_XGMII_10000;
68 default:
69 return MAC_MODE_MII_100;
70 }
71}
72
73static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb) 59static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb)
74{ 60{
75 switch (mac_cb->max_speed) { 61 switch (mac_cb->max_speed) {
@@ -134,7 +120,6 @@ void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
134 120
135 mac_cb->speed = speed; 121 mac_cb->speed = speed;
136 mac_cb->half_duplex = !duplex; 122 mac_cb->half_duplex = !duplex;
137 mac_ctrl_drv->mac_mode = hns_mac_dev_to_enet_if(mac_cb);
138 123
139 if (mac_ctrl_drv->adjust_link) { 124 if (mac_ctrl_drv->adjust_link) {
140 ret = mac_ctrl_drv->adjust_link(mac_ctrl_drv, 125 ret = mac_ctrl_drv->adjust_link(mac_ctrl_drv,
@@ -748,6 +733,18 @@ static void hns_mac_register_phy(struct hns_mac_cb *mac_cb)
748 mac_cb->mac_id, addr); 733 mac_cb->mac_id, addr);
749} 734}
750 735
736#define MAC_MEDIA_TYPE_MAX_LEN 16
737
738static const struct {
739 enum hnae_media_type value;
740 const char *name;
741} media_type_defs[] = {
742 {HNAE_MEDIA_TYPE_UNKNOWN, "unknown" },
743 {HNAE_MEDIA_TYPE_FIBER, "fiber" },
744 {HNAE_MEDIA_TYPE_COPPER, "copper" },
745 {HNAE_MEDIA_TYPE_BACKPLANE, "backplane" },
746};
747
751/** 748/**
752 *hns_mac_get_info - get mac information from device node 749 *hns_mac_get_info - get mac information from device node
753 *@mac_cb: mac device 750 *@mac_cb: mac device
@@ -759,10 +756,13 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
759 struct device_node *np; 756 struct device_node *np;
760 struct regmap *syscon; 757 struct regmap *syscon;
761 struct of_phandle_args cpld_args; 758 struct of_phandle_args cpld_args;
759 const char *media_type;
760 u32 i;
762 u32 ret; 761 u32 ret;
763 762
764 mac_cb->link = false; 763 mac_cb->link = false;
765 mac_cb->half_duplex = false; 764 mac_cb->half_duplex = false;
765 mac_cb->media_type = HNAE_MEDIA_TYPE_UNKNOWN;
766 mac_cb->speed = mac_phy_to_speed[mac_cb->phy_if]; 766 mac_cb->speed = mac_phy_to_speed[mac_cb->phy_if];
767 mac_cb->max_speed = mac_cb->speed; 767 mac_cb->max_speed = mac_cb->speed;
768 768
@@ -864,6 +864,17 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
864 mac_cb->mac_id); 864 mac_cb->mac_id);
865 } 865 }
866 866
867 if (!fwnode_property_read_string(mac_cb->fw_port, "media-type",
868 &media_type)) {
869 for (i = 0; i < ARRAY_SIZE(media_type_defs); i++) {
870 if (!strncmp(media_type_defs[i].name, media_type,
871 MAC_MEDIA_TYPE_MAX_LEN)) {
872 mac_cb->media_type = media_type_defs[i].value;
873 break;
874 }
875 }
876 }
877
867 return 0; 878 return 0;
868} 879}
869 880
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 05a6e8f7a419..4cbdf14f5c16 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -335,6 +335,7 @@ struct hns_mac_cb {
335 u64 txpkt_for_led; 335 u64 txpkt_for_led;
336 u64 rxpkt_for_led; 336 u64 rxpkt_for_led;
337 enum hnae_port_type mac_type; 337 enum hnae_port_type mac_type;
338 enum hnae_media_type media_type;
338 phy_interface_t phy_if; 339 phy_interface_t phy_if;
339 enum hnae_loop loop_mode; 340 enum hnae_loop loop_mode;
340 341
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 67e8e1323205..2ef4277d00b3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -114,9 +114,9 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
114 114
115 dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev, 115 dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev,
116 res); 116 res);
117 if (!dsaf_dev->sc_base) { 117 if (IS_ERR(dsaf_dev->sc_base)) {
118 dev_err(dsaf_dev->dev, "subctrl can not map!\n"); 118 dev_err(dsaf_dev->dev, "subctrl can not map!\n");
119 return -ENOMEM; 119 return PTR_ERR(dsaf_dev->sc_base);
120 } 120 }
121 121
122 res = platform_get_resource(pdev, IORESOURCE_MEM, 122 res = platform_get_resource(pdev, IORESOURCE_MEM,
@@ -128,9 +128,9 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
128 128
129 dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev, 129 dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev,
130 res); 130 res);
131 if (!dsaf_dev->sds_base) { 131 if (IS_ERR(dsaf_dev->sds_base)) {
132 dev_err(dsaf_dev->dev, "serdes-ctrl can not map!\n"); 132 dev_err(dsaf_dev->dev, "serdes-ctrl can not map!\n");
133 return -ENOMEM; 133 return PTR_ERR(dsaf_dev->sds_base);
134 } 134 }
135 } else { 135 } else {
136 dsaf_dev->sub_ctrl = syscon; 136 dsaf_dev->sub_ctrl = syscon;
@@ -146,9 +146,9 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
146 } 146 }
147 } 147 }
148 dsaf_dev->ppe_base = devm_ioremap_resource(&pdev->dev, res); 148 dsaf_dev->ppe_base = devm_ioremap_resource(&pdev->dev, res);
149 if (!dsaf_dev->ppe_base) { 149 if (IS_ERR(dsaf_dev->ppe_base)) {
150 dev_err(dsaf_dev->dev, "ppe-base resource can not map!\n"); 150 dev_err(dsaf_dev->dev, "ppe-base resource can not map!\n");
151 return -ENOMEM; 151 return PTR_ERR(dsaf_dev->ppe_base);
152 } 152 }
153 dsaf_dev->ppe_paddr = res->start; 153 dsaf_dev->ppe_paddr = res->start;
154 154
@@ -165,9 +165,9 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
165 } 165 }
166 } 166 }
167 dsaf_dev->io_base = devm_ioremap_resource(&pdev->dev, res); 167 dsaf_dev->io_base = devm_ioremap_resource(&pdev->dev, res);
168 if (!dsaf_dev->io_base) { 168 if (IS_ERR(dsaf_dev->io_base)) {
169 dev_err(dsaf_dev->dev, "dsaf-base resource can not map!\n"); 169 dev_err(dsaf_dev->dev, "dsaf-base resource can not map!\n");
170 return -ENOMEM; 170 return PTR_ERR(dsaf_dev->io_base);
171 } 171 }
172 } 172 }
173 173
@@ -2540,45 +2540,45 @@ static char *hns_dsaf_get_node_stats_strings(char *data, int node,
2540 bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver); 2540 bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
2541 2541
2542 snprintf(buff, ETH_GSTRING_LEN, "innod%d_pad_drop_pkts", node); 2542 snprintf(buff, ETH_GSTRING_LEN, "innod%d_pad_drop_pkts", node);
2543 buff = buff + ETH_GSTRING_LEN; 2543 buff += ETH_GSTRING_LEN;
2544 snprintf(buff, ETH_GSTRING_LEN, "innod%d_manage_pkts", node); 2544 snprintf(buff, ETH_GSTRING_LEN, "innod%d_manage_pkts", node);
2545 buff = buff + ETH_GSTRING_LEN; 2545 buff += ETH_GSTRING_LEN;
2546 snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkts", node); 2546 snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkts", node);
2547 buff = buff + ETH_GSTRING_LEN; 2547 buff += ETH_GSTRING_LEN;
2548 snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkt_id", node); 2548 snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkt_id", node);
2549 buff = buff + ETH_GSTRING_LEN; 2549 buff += ETH_GSTRING_LEN;
2550 snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pause_frame", node); 2550 snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pause_frame", node);
2551 buff = buff + ETH_GSTRING_LEN; 2551 buff += ETH_GSTRING_LEN;
2552 snprintf(buff, ETH_GSTRING_LEN, "innod%d_release_buf_num", node); 2552 snprintf(buff, ETH_GSTRING_LEN, "innod%d_release_buf_num", node);
2553 buff = buff + ETH_GSTRING_LEN; 2553 buff += ETH_GSTRING_LEN;
2554 snprintf(buff, ETH_GSTRING_LEN, "innod%d_sbm_drop_pkts", node); 2554 snprintf(buff, ETH_GSTRING_LEN, "innod%d_sbm_drop_pkts", node);
2555 buff = buff + ETH_GSTRING_LEN; 2555 buff += ETH_GSTRING_LEN;
2556 snprintf(buff, ETH_GSTRING_LEN, "innod%d_crc_false_pkts", node); 2556 snprintf(buff, ETH_GSTRING_LEN, "innod%d_crc_false_pkts", node);
2557 buff = buff + ETH_GSTRING_LEN; 2557 buff += ETH_GSTRING_LEN;
2558 snprintf(buff, ETH_GSTRING_LEN, "innod%d_bp_drop_pkts", node); 2558 snprintf(buff, ETH_GSTRING_LEN, "innod%d_bp_drop_pkts", node);
2559 buff = buff + ETH_GSTRING_LEN; 2559 buff += ETH_GSTRING_LEN;
2560 snprintf(buff, ETH_GSTRING_LEN, "innod%d_lookup_rslt_drop_pkts", node); 2560 snprintf(buff, ETH_GSTRING_LEN, "innod%d_lookup_rslt_drop_pkts", node);
2561 buff = buff + ETH_GSTRING_LEN; 2561 buff += ETH_GSTRING_LEN;
2562 snprintf(buff, ETH_GSTRING_LEN, "innod%d_local_rslt_fail_pkts", node); 2562 snprintf(buff, ETH_GSTRING_LEN, "innod%d_local_rslt_fail_pkts", node);
2563 buff = buff + ETH_GSTRING_LEN; 2563 buff += ETH_GSTRING_LEN;
2564 snprintf(buff, ETH_GSTRING_LEN, "innod%d_vlan_drop_pkts", node); 2564 snprintf(buff, ETH_GSTRING_LEN, "innod%d_vlan_drop_pkts", node);
2565 buff = buff + ETH_GSTRING_LEN; 2565 buff += ETH_GSTRING_LEN;
2566 snprintf(buff, ETH_GSTRING_LEN, "innod%d_stp_drop_pkts", node); 2566 snprintf(buff, ETH_GSTRING_LEN, "innod%d_stp_drop_pkts", node);
2567 buff = buff + ETH_GSTRING_LEN; 2567 buff += ETH_GSTRING_LEN;
2568 if ((node < DSAF_SERVICE_NW_NUM) && (!is_ver1)) { 2568 if (node < DSAF_SERVICE_NW_NUM && !is_ver1) {
2569 for (i = 0; i < DSAF_PRIO_NR; i++) {
2570 snprintf(buff, ETH_GSTRING_LEN,
2571 "inod%d_pfc_prio%d_pkts", node, i);
2572 buff = buff + ETH_GSTRING_LEN;
2573 }
2574 for (i = 0; i < DSAF_PRIO_NR; i++) { 2569 for (i = 0; i < DSAF_PRIO_NR; i++) {
2575 snprintf(buff, ETH_GSTRING_LEN, 2570 snprintf(buff + 0 * ETH_GSTRING_LEN * DSAF_PRIO_NR,
2576 "onod%d_pfc_prio%d_pkts", node, i); 2571 ETH_GSTRING_LEN, "inod%d_pfc_prio%d_pkts",
2577 buff = buff + ETH_GSTRING_LEN; 2572 node, i);
2573 snprintf(buff + 1 * ETH_GSTRING_LEN * DSAF_PRIO_NR,
2574 ETH_GSTRING_LEN, "onod%d_pfc_prio%d_pkts",
2575 node, i);
2576 buff += ETH_GSTRING_LEN;
2578 } 2577 }
2578 buff += 1 * DSAF_PRIO_NR * ETH_GSTRING_LEN;
2579 } 2579 }
2580 snprintf(buff, ETH_GSTRING_LEN, "onnod%d_tx_pkts", node); 2580 snprintf(buff, ETH_GSTRING_LEN, "onnod%d_tx_pkts", node);
2581 buff = buff + ETH_GSTRING_LEN; 2581 buff += ETH_GSTRING_LEN;
2582 2582
2583 return buff; 2583 return buff;
2584} 2584}
@@ -2604,10 +2604,10 @@ static u64 *hns_dsaf_get_node_stats(struct dsaf_device *ddev, u64 *data,
2604 p[10] = hw_stats->local_addr_false; 2604 p[10] = hw_stats->local_addr_false;
2605 p[11] = hw_stats->vlan_drop; 2605 p[11] = hw_stats->vlan_drop;
2606 p[12] = hw_stats->stp_drop; 2606 p[12] = hw_stats->stp_drop;
2607 if ((node_num < DSAF_SERVICE_NW_NUM) && (!is_ver1)) { 2607 if (node_num < DSAF_SERVICE_NW_NUM && !is_ver1) {
2608 for (i = 0; i < DSAF_PRIO_NR; i++) { 2608 for (i = 0; i < DSAF_PRIO_NR; i++) {
2609 p[13 + i] = hw_stats->rx_pfc[i]; 2609 p[13 + i + 0 * DSAF_PRIO_NR] = hw_stats->rx_pfc[i];
2610 p[13 + i + DSAF_PRIO_NR] = hw_stats->tx_pfc[i]; 2610 p[13 + i + 1 * DSAF_PRIO_NR] = hw_stats->tx_pfc[i];
2611 } 2611 }
2612 p[29] = hw_stats->tx_pkts; 2612 p[29] = hw_stats->tx_pkts;
2613 return &p[30]; 2613 return &p[30];
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 8473287d4c8b..611b67b6f450 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -253,10 +253,9 @@ static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
253 reg_val_1 = 0x1 << port; 253 reg_val_1 = 0x1 << port;
254 port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off; 254 port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off;
255 /* there is difference between V1 and V2 in register.*/ 255 /* there is difference between V1 and V2 in register.*/
256 if (AE_IS_VER1(dsaf_dev->dsaf_ver)) 256 reg_val_2 = AE_IS_VER1(dsaf_dev->dsaf_ver) ?
257 reg_val_2 = 0x1041041 << port_rst_off; 257 0x1041041 : 0x2082082;
258 else 258 reg_val_2 <<= port_rst_off;
259 reg_val_2 = 0x2082082 << port_rst_off;
260 259
261 if (!dereset) { 260 if (!dereset) {
262 dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG, 261 dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG,
@@ -272,12 +271,11 @@ static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
272 reg_val_1); 271 reg_val_1);
273 } 272 }
274 } else { 273 } else {
275 reg_val_1 = 0x15540 << dsaf_dev->reset_offset; 274 reg_val_1 = 0x15540;
275 reg_val_2 = AE_IS_VER1(dsaf_dev->dsaf_ver) ? 0x100 : 0x40;
276 276
277 if (AE_IS_VER1(dsaf_dev->dsaf_ver)) 277 reg_val_1 <<= dsaf_dev->reset_offset;
278 reg_val_2 = 0x100 << dsaf_dev->reset_offset; 278 reg_val_2 <<= dsaf_dev->reset_offset;
279 else
280 reg_val_2 = 0x40 << dsaf_dev->reset_offset;
281 279
282 if (!dereset) { 280 if (!dereset) {
283 dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG, 281 dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index d5297ecfe4a5..d7e1f8c7ae92 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -762,13 +762,13 @@ static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
762 recv_pkts = 0, recv_bds = 0, clean_count = 0; 762 recv_pkts = 0, recv_bds = 0, clean_count = 0;
763recv: 763recv:
764 while (recv_pkts < budget && recv_bds < num) { 764 while (recv_pkts < budget && recv_bds < num) {
765 /* reuse or realloc buffers*/ 765 /* reuse or realloc buffers */
766 if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { 766 if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
767 hns_nic_alloc_rx_buffers(ring_data, clean_count); 767 hns_nic_alloc_rx_buffers(ring_data, clean_count);
768 clean_count = 0; 768 clean_count = 0;
769 } 769 }
770 770
771 /* poll one pkt*/ 771 /* poll one pkt */
772 err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum); 772 err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
773 if (unlikely(!skb)) /* this fault cannot be repaired */ 773 if (unlikely(!skb)) /* this fault cannot be repaired */
774 goto out; 774 goto out;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index a395ca1405c3..ab33487a5321 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -165,13 +165,21 @@ static int hns_nic_get_settings(struct net_device *net_dev,
165 cmd->advertising |= ADVERTISED_10000baseKR_Full; 165 cmd->advertising |= ADVERTISED_10000baseKR_Full;
166 } 166 }
167 167
168 if (h->port_type == HNAE_PORT_SERVICE) { 168 switch (h->media_type) {
169 case HNAE_MEDIA_TYPE_FIBER:
169 cmd->port = PORT_FIBRE; 170 cmd->port = PORT_FIBRE;
170 cmd->supported |= SUPPORTED_Pause; 171 break;
171 } else { 172 case HNAE_MEDIA_TYPE_COPPER:
172 cmd->port = PORT_TP; 173 cmd->port = PORT_TP;
174 break;
175 case HNAE_MEDIA_TYPE_UNKNOWN:
176 default:
177 break;
173 } 178 }
174 179
180 if (!(AE_IS_VER1(priv->enet_ver) && h->port_type == HNAE_PORT_DEBUG))
181 cmd->supported |= SUPPORTED_Pause;
182
175 cmd->transceiver = XCVR_EXTERNAL; 183 cmd->transceiver = XCVR_EXTERNAL;
176 cmd->mdio_support = (ETH_MDIO_SUPPORTS_C45 | ETH_MDIO_SUPPORTS_C22); 184 cmd->mdio_support = (ETH_MDIO_SUPPORTS_C45 | ETH_MDIO_SUPPORTS_C22);
177 hns_get_mdix_mode(net_dev, cmd); 185 hns_get_mdix_mode(net_dev, cmd);
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 761a32fceceb..33f4c483af0f 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -37,9 +37,19 @@
37 37
38#define MDIO_TIMEOUT 1000000 38#define MDIO_TIMEOUT 1000000
39 39
40struct hns_mdio_sc_reg {
41 u16 mdio_clk_en;
42 u16 mdio_clk_dis;
43 u16 mdio_reset_req;
44 u16 mdio_reset_dreq;
45 u16 mdio_clk_st;
46 u16 mdio_reset_st;
47};
48
40struct hns_mdio_device { 49struct hns_mdio_device {
41 void *vbase; /* mdio reg base address */ 50 void *vbase; /* mdio reg base address */
42 struct regmap *subctrl_vbase; 51 struct regmap *subctrl_vbase;
52 struct hns_mdio_sc_reg sc_reg;
43}; 53};
44 54
45/* mdio reg */ 55/* mdio reg */
@@ -93,7 +103,6 @@ enum mdio_c45_op_seq {
93#define MDIO_SC_CLK_DIS 0x33C 103#define MDIO_SC_CLK_DIS 0x33C
94#define MDIO_SC_RESET_REQ 0xA38 104#define MDIO_SC_RESET_REQ 0xA38
95#define MDIO_SC_RESET_DREQ 0xA3C 105#define MDIO_SC_RESET_DREQ 0xA3C
96#define MDIO_SC_CTRL 0x2010
97#define MDIO_SC_CLK_ST 0x531C 106#define MDIO_SC_CLK_ST 0x531C
98#define MDIO_SC_RESET_ST 0x5A1C 107#define MDIO_SC_RESET_ST 0x5A1C
99 108
@@ -353,6 +362,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
353static int hns_mdio_reset(struct mii_bus *bus) 362static int hns_mdio_reset(struct mii_bus *bus)
354{ 363{
355 struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv; 364 struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
365 const struct hns_mdio_sc_reg *sc_reg;
356 int ret; 366 int ret;
357 367
358 if (dev_of_node(bus->parent)) { 368 if (dev_of_node(bus->parent)) {
@@ -361,9 +371,10 @@ static int hns_mdio_reset(struct mii_bus *bus)
361 return -ENODEV; 371 return -ENODEV;
362 } 372 }
363 373
374 sc_reg = &mdio_dev->sc_reg;
364 /* 1. reset req, and read reset st check */ 375 /* 1. reset req, and read reset st check */
365 ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_REQ, 0x1, 376 ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_reset_req,
366 MDIO_SC_RESET_ST, 0x1, 377 0x1, sc_reg->mdio_reset_st, 0x1,
367 MDIO_CHECK_SET_ST); 378 MDIO_CHECK_SET_ST);
368 if (ret) { 379 if (ret) {
369 dev_err(&bus->dev, "MDIO reset fail\n"); 380 dev_err(&bus->dev, "MDIO reset fail\n");
@@ -371,8 +382,8 @@ static int hns_mdio_reset(struct mii_bus *bus)
371 } 382 }
372 383
373 /* 2. dis clk, and read clk st check */ 384 /* 2. dis clk, and read clk st check */
374 ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_DIS, 385 ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_clk_dis,
375 0x1, MDIO_SC_CLK_ST, 0x1, 386 0x1, sc_reg->mdio_clk_st, 0x1,
376 MDIO_CHECK_CLR_ST); 387 MDIO_CHECK_CLR_ST);
377 if (ret) { 388 if (ret) {
378 dev_err(&bus->dev, "MDIO dis clk fail\n"); 389 dev_err(&bus->dev, "MDIO dis clk fail\n");
@@ -380,8 +391,8 @@ static int hns_mdio_reset(struct mii_bus *bus)
380 } 391 }
381 392
382 /* 3. reset dreq, and read reset st check */ 393 /* 3. reset dreq, and read reset st check */
383 ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_DREQ, 0x1, 394 ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_reset_dreq,
384 MDIO_SC_RESET_ST, 0x1, 395 0x1, sc_reg->mdio_reset_st, 0x1,
385 MDIO_CHECK_CLR_ST); 396 MDIO_CHECK_CLR_ST);
386 if (ret) { 397 if (ret) {
387 dev_err(&bus->dev, "MDIO dis clk fail\n"); 398 dev_err(&bus->dev, "MDIO dis clk fail\n");
@@ -389,8 +400,8 @@ static int hns_mdio_reset(struct mii_bus *bus)
389 } 400 }
390 401
391 /* 4. en clk, and read clk st check */ 402 /* 4. en clk, and read clk st check */
392 ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_EN, 403 ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_clk_en,
393 0x1, MDIO_SC_CLK_ST, 0x1, 404 0x1, sc_reg->mdio_clk_st, 0x1,
394 MDIO_CHECK_SET_ST); 405 MDIO_CHECK_SET_ST);
395 if (ret) 406 if (ret)
396 dev_err(&bus->dev, "MDIO en clk fail\n"); 407 dev_err(&bus->dev, "MDIO en clk fail\n");
@@ -458,13 +469,54 @@ static int hns_mdio_probe(struct platform_device *pdev)
458 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%s", "Mii", 469 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%s", "Mii",
459 dev_name(&pdev->dev)); 470 dev_name(&pdev->dev));
460 if (dev_of_node(&pdev->dev)) { 471 if (dev_of_node(&pdev->dev)) {
461 mdio_dev->subctrl_vbase = syscon_node_to_regmap( 472 struct of_phandle_args reg_args;
462 of_parse_phandle(pdev->dev.of_node, 473
463 "subctrl-vbase", 0)); 474 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
464 if (IS_ERR(mdio_dev->subctrl_vbase)) { 475 "subctrl-vbase",
465 dev_warn(&pdev->dev, "no syscon hisilicon,peri-c-subctrl\n"); 476 4,
477 0,
478 &reg_args);
479 if (!ret) {
480 mdio_dev->subctrl_vbase =
481 syscon_node_to_regmap(reg_args.np);
482 if (IS_ERR(mdio_dev->subctrl_vbase)) {
483 dev_warn(&pdev->dev, "syscon_node_to_regmap error\n");
484 mdio_dev->subctrl_vbase = NULL;
485 } else {
486 if (reg_args.args_count == 4) {
487 mdio_dev->sc_reg.mdio_clk_en =
488 (u16)reg_args.args[0];
489 mdio_dev->sc_reg.mdio_clk_dis =
490 (u16)reg_args.args[0] + 4;
491 mdio_dev->sc_reg.mdio_reset_req =
492 (u16)reg_args.args[1];
493 mdio_dev->sc_reg.mdio_reset_dreq =
494 (u16)reg_args.args[1] + 4;
495 mdio_dev->sc_reg.mdio_clk_st =
496 (u16)reg_args.args[2];
497 mdio_dev->sc_reg.mdio_reset_st =
498 (u16)reg_args.args[3];
499 } else {
500 /* for compatible */
501 mdio_dev->sc_reg.mdio_clk_en =
502 MDIO_SC_CLK_EN;
503 mdio_dev->sc_reg.mdio_clk_dis =
504 MDIO_SC_CLK_DIS;
505 mdio_dev->sc_reg.mdio_reset_req =
506 MDIO_SC_RESET_REQ;
507 mdio_dev->sc_reg.mdio_reset_dreq =
508 MDIO_SC_RESET_DREQ;
509 mdio_dev->sc_reg.mdio_clk_st =
510 MDIO_SC_CLK_ST;
511 mdio_dev->sc_reg.mdio_reset_st =
512 MDIO_SC_RESET_ST;
513 }
514 }
515 } else {
516 dev_warn(&pdev->dev, "find syscon ret = %#x\n", ret);
466 mdio_dev->subctrl_vbase = NULL; 517 mdio_dev->subctrl_vbase = NULL;
467 } 518 }
519
468 ret = of_mdiobus_register(new_bus, pdev->dev.of_node); 520 ret = of_mdiobus_register(new_bus, pdev->dev.of_node);
469 } else if (is_acpi_node(pdev->dev.fwnode)) { 521 } else if (is_acpi_node(pdev->dev.fwnode)) {
470 /* Clear all the IRQ properties */ 522 /* Clear all the IRQ properties */
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 864cb21351a4..ecdb6854a898 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2121,7 +2121,7 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
2121 struct ibmvnic_adapter *adapter) 2121 struct ibmvnic_adapter *adapter)
2122{ 2122{
2123 struct device *dev = &adapter->vdev->dev; 2123 struct device *dev = &adapter->vdev->dev;
2124 struct ibmvnic_error_buff *error_buff; 2124 struct ibmvnic_error_buff *error_buff, *tmp;
2125 unsigned long flags; 2125 unsigned long flags;
2126 bool found = false; 2126 bool found = false;
2127 int i; 2127 int i;
@@ -2133,7 +2133,7 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
2133 } 2133 }
2134 2134
2135 spin_lock_irqsave(&adapter->error_list_lock, flags); 2135 spin_lock_irqsave(&adapter->error_list_lock, flags);
2136 list_for_each_entry(error_buff, &adapter->errors, list) 2136 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
2137 if (error_buff->error_id == crq->request_error_rsp.error_id) { 2137 if (error_buff->error_id == crq->request_error_rsp.error_id) {
2138 found = true; 2138 found = true;
2139 list_del(&error_buff->list); 2139 list_del(&error_buff->list);
@@ -3141,14 +3141,14 @@ static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
3141 3141
3142static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter) 3142static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
3143{ 3143{
3144 struct ibmvnic_inflight_cmd *inflight_cmd; 3144 struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
3145 struct device *dev = &adapter->vdev->dev; 3145 struct device *dev = &adapter->vdev->dev;
3146 struct ibmvnic_error_buff *error_buff; 3146 struct ibmvnic_error_buff *error_buff, *tmp2;
3147 unsigned long flags; 3147 unsigned long flags;
3148 unsigned long flags2; 3148 unsigned long flags2;
3149 3149
3150 spin_lock_irqsave(&adapter->inflight_lock, flags); 3150 spin_lock_irqsave(&adapter->inflight_lock, flags);
3151 list_for_each_entry(inflight_cmd, &adapter->inflight, list) { 3151 list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
3152 switch (inflight_cmd->crq.generic.cmd) { 3152 switch (inflight_cmd->crq.generic.cmd) {
3153 case LOGIN: 3153 case LOGIN:
3154 dma_unmap_single(dev, adapter->login_buf_token, 3154 dma_unmap_single(dev, adapter->login_buf_token,
@@ -3165,8 +3165,8 @@ static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
3165 break; 3165 break;
3166 case REQUEST_ERROR_INFO: 3166 case REQUEST_ERROR_INFO:
3167 spin_lock_irqsave(&adapter->error_list_lock, flags2); 3167 spin_lock_irqsave(&adapter->error_list_lock, flags2);
3168 list_for_each_entry(error_buff, &adapter->errors, 3168 list_for_each_entry_safe(error_buff, tmp2,
3169 list) { 3169 &adapter->errors, list) {
3170 dma_unmap_single(dev, error_buff->dma, 3170 dma_unmap_single(dev, error_buff->dma,
3171 error_buff->len, 3171 error_buff->len,
3172 DMA_FROM_DEVICE); 3172 DMA_FROM_DEVICE);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 75e60897b7e7..41f32c0b341e 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2789,7 +2789,7 @@ static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
2789} 2789}
2790 2790
2791/** 2791/**
2792 * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping 2792 * e1000e_vlan_strip_disable - helper to disable HW VLAN stripping
2793 * @adapter: board private structure to initialize 2793 * @adapter: board private structure to initialize
2794 **/ 2794 **/
2795static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter) 2795static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
@@ -4352,7 +4352,8 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
4352 4352
4353 time_delta = systim_next - systim; 4353 time_delta = systim_next - systim;
4354 temp = time_delta; 4354 temp = time_delta;
4355 rem = do_div(temp, incvalue); 4355 /* VMWare users have seen incvalue of zero, don't div / 0 */
4356 rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0);
4356 4357
4357 systim = systim_next; 4358 systim = systim_next;
4358 4359
@@ -6915,6 +6916,14 @@ static netdev_features_t e1000_fix_features(struct net_device *netdev,
6915 if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN)) 6916 if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN))
6916 features &= ~NETIF_F_RXFCS; 6917 features &= ~NETIF_F_RXFCS;
6917 6918
6919 /* Since there is no support for separate Rx/Tx vlan accel
6920 * enable/disable make sure Tx flag is always in same state as Rx.
6921 */
6922 if (features & NETIF_F_HW_VLAN_CTAG_RX)
6923 features |= NETIF_F_HW_VLAN_CTAG_TX;
6924 else
6925 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
6926
6918 return features; 6927 return features;
6919} 6928}
6920 6929
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index fcf106e545c5..e98b86bf0ca1 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -406,7 +406,7 @@ static inline u16 fm10k_desc_unused(struct fm10k_ring *ring)
406 (&(((union fm10k_rx_desc *)((R)->desc))[i])) 406 (&(((union fm10k_rx_desc *)((R)->desc))[i]))
407 407
408#define FM10K_MAX_TXD_PWR 14 408#define FM10K_MAX_TXD_PWR 14
409#define FM10K_MAX_DATA_PER_TXD BIT(FM10K_MAX_TXD_PWR) 409#define FM10K_MAX_DATA_PER_TXD (1u << FM10K_MAX_TXD_PWR)
410 410
411/* Tx Descriptors needed, worst case */ 411/* Tx Descriptors needed, worst case */
412#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), FM10K_MAX_DATA_PER_TXD) 412#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), FM10K_MAX_DATA_PER_TXD)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 9c0d87503977..9b5195435c87 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -983,9 +983,10 @@ void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir)
983 /* generate a new table if we weren't given one */ 983 /* generate a new table if we weren't given one */
984 for (j = 0; j < 4; j++) { 984 for (j = 0; j < 4; j++) {
985 if (indir) 985 if (indir)
986 n = indir[i + j]; 986 n = indir[4 * i + j];
987 else 987 else
988 n = ethtool_rxfh_indir_default(i + j, rss_i); 988 n = ethtool_rxfh_indir_default(4 * i + j,
989 rss_i);
989 990
990 table[j] = n; 991 table[j] = n;
991 } 992 }
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 0e166e9c90c8..a9ccc1eb3ea4 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -56,7 +56,7 @@ static int __init fm10k_init_module(void)
56 pr_info("%s\n", fm10k_copyright); 56 pr_info("%s\n", fm10k_copyright);
57 57
58 /* create driver workqueue */ 58 /* create driver workqueue */
59 fm10k_workqueue = create_workqueue("fm10k"); 59 fm10k_workqueue = alloc_workqueue("fm10k", WQ_MEM_RECLAIM, 0);
60 60
61 fm10k_dbg_init(); 61 fm10k_dbg_init();
62 62
@@ -77,7 +77,6 @@ static void __exit fm10k_exit_module(void)
77 fm10k_dbg_exit(); 77 fm10k_dbg_exit();
78 78
79 /* destroy driver workqueue */ 79 /* destroy driver workqueue */
80 flush_workqueue(fm10k_workqueue);
81 destroy_workqueue(fm10k_workqueue); 80 destroy_workqueue(fm10k_workqueue);
82} 81}
83module_exit(fm10k_exit_module); 82module_exit(fm10k_exit_module);
@@ -272,7 +271,7 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
272#if (PAGE_SIZE < 8192) 271#if (PAGE_SIZE < 8192)
273 unsigned int truesize = FM10K_RX_BUFSZ; 272 unsigned int truesize = FM10K_RX_BUFSZ;
274#else 273#else
275 unsigned int truesize = SKB_DATA_ALIGN(size); 274 unsigned int truesize = ALIGN(size, 512);
276#endif 275#endif
277 unsigned int pull_len; 276 unsigned int pull_len;
278 277
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 9c44739da5e2..e83fc8afb30f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -283,6 +283,7 @@ struct i40e_pf {
283#endif /* I40E_FCOE */ 283#endif /* I40E_FCOE */
284 u16 num_lan_qps; /* num lan queues this PF has set up */ 284 u16 num_lan_qps; /* num lan queues this PF has set up */
285 u16 num_lan_msix; /* num queue vectors for the base PF vsi */ 285 u16 num_lan_msix; /* num queue vectors for the base PF vsi */
286 u16 num_fdsb_msix; /* num queue vectors for sideband Fdir */
286 u16 num_iwarp_msix; /* num of iwarp vectors for this PF */ 287 u16 num_iwarp_msix; /* num of iwarp vectors for this PF */
287 int iwarp_base_vector; 288 int iwarp_base_vector;
288 int queues_left; /* queues left unclaimed */ 289 int queues_left; /* queues left unclaimed */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 422b41d61c9a..e447dc435464 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1967,6 +1967,62 @@ aq_add_vsi_exit:
1967} 1967}
1968 1968
1969/** 1969/**
1970 * i40e_aq_set_default_vsi
1971 * @hw: pointer to the hw struct
1972 * @seid: vsi number
1973 * @cmd_details: pointer to command details structure or NULL
1974 **/
1975i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
1976 u16 seid,
1977 struct i40e_asq_cmd_details *cmd_details)
1978{
1979 struct i40e_aq_desc desc;
1980 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
1981 (struct i40e_aqc_set_vsi_promiscuous_modes *)
1982 &desc.params.raw;
1983 i40e_status status;
1984
1985 i40e_fill_default_direct_cmd_desc(&desc,
1986 i40e_aqc_opc_set_vsi_promiscuous_modes);
1987
1988 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1989 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
1990 cmd->seid = cpu_to_le16(seid);
1991
1992 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1993
1994 return status;
1995}
1996
1997/**
1998 * i40e_aq_clear_default_vsi
1999 * @hw: pointer to the hw struct
2000 * @seid: vsi number
2001 * @cmd_details: pointer to command details structure or NULL
2002 **/
2003i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
2004 u16 seid,
2005 struct i40e_asq_cmd_details *cmd_details)
2006{
2007 struct i40e_aq_desc desc;
2008 struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
2009 (struct i40e_aqc_set_vsi_promiscuous_modes *)
2010 &desc.params.raw;
2011 i40e_status status;
2012
2013 i40e_fill_default_direct_cmd_desc(&desc,
2014 i40e_aqc_opc_set_vsi_promiscuous_modes);
2015
2016 cmd->promiscuous_flags = cpu_to_le16(0);
2017 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
2018 cmd->seid = cpu_to_le16(seid);
2019
2020 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2021
2022 return status;
2023}
2024
2025/**
1970 * i40e_aq_set_vsi_unicast_promiscuous 2026 * i40e_aq_set_vsi_unicast_promiscuous
1971 * @hw: pointer to the hw struct 2027 * @hw: pointer to the hw struct
1972 * @seid: vsi number 2028 * @seid: vsi number
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 5e8d84ff7d5f..4962e855fbd3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -313,8 +313,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
313 *advertising |= ADVERTISED_Autoneg | 313 *advertising |= ADVERTISED_Autoneg |
314 ADVERTISED_40000baseCR4_Full; 314 ADVERTISED_40000baseCR4_Full;
315 } 315 }
316 if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) && 316 if (phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) {
317 !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) {
318 *supported |= SUPPORTED_Autoneg | 317 *supported |= SUPPORTED_Autoneg |
319 SUPPORTED_100baseT_Full; 318 SUPPORTED_100baseT_Full;
320 *advertising |= ADVERTISED_Autoneg | 319 *advertising |= ADVERTISED_Autoneg |
@@ -663,6 +662,7 @@ static int i40e_set_settings(struct net_device *netdev,
663 if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET && 662 if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
664 hw->phy.media_type != I40E_MEDIA_TYPE_FIBER && 663 hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
665 hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE && 664 hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE &&
665 hw->phy.media_type != I40E_MEDIA_TYPE_DA &&
666 hw->phy.link_info.link_info & I40E_AQ_LINK_UP) 666 hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
667 return -EOPNOTSUPP; 667 return -EOPNOTSUPP;
668 668
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 734cba693d16..2b1140563a64 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -40,8 +40,8 @@ static const char i40e_driver_string[] =
40#define DRV_KERN "-k" 40#define DRV_KERN "-k"
41 41
42#define DRV_VERSION_MAJOR 1 42#define DRV_VERSION_MAJOR 1
43#define DRV_VERSION_MINOR 5 43#define DRV_VERSION_MINOR 6
44#define DRV_VERSION_BUILD 16 44#define DRV_VERSION_BUILD 4
45#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 45#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
46 __stringify(DRV_VERSION_MINOR) "." \ 46 __stringify(DRV_VERSION_MINOR) "." \
47 __stringify(DRV_VERSION_BUILD) DRV_KERN 47 __stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -1579,14 +1579,8 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1579 vsi->tc_config.numtc = numtc; 1579 vsi->tc_config.numtc = numtc;
1580 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1580 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1581 /* Number of queues per enabled TC */ 1581 /* Number of queues per enabled TC */
1582 /* In MFP case we can have a much lower count of MSIx 1582 qcount = vsi->alloc_queue_pairs;
1583 * vectors available and so we need to lower the used 1583
1584 * q count.
1585 */
1586 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1587 qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
1588 else
1589 qcount = vsi->alloc_queue_pairs;
1590 num_tc_qps = qcount / numtc; 1584 num_tc_qps = qcount / numtc;
1591 num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf)); 1585 num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
1592 1586
@@ -1840,8 +1834,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1840{ 1834{
1841 struct list_head tmp_del_list, tmp_add_list; 1835 struct list_head tmp_del_list, tmp_add_list;
1842 struct i40e_mac_filter *f, *ftmp, *fclone; 1836 struct i40e_mac_filter *f, *ftmp, *fclone;
1837 struct i40e_hw *hw = &vsi->back->hw;
1843 bool promisc_forced_on = false; 1838 bool promisc_forced_on = false;
1844 bool add_happened = false; 1839 bool add_happened = false;
1840 char vsi_name[16] = "PF";
1845 int filter_list_len = 0; 1841 int filter_list_len = 0;
1846 u32 changed_flags = 0; 1842 u32 changed_flags = 0;
1847 i40e_status aq_ret = 0; 1843 i40e_status aq_ret = 0;
@@ -1869,6 +1865,11 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1869 INIT_LIST_HEAD(&tmp_del_list); 1865 INIT_LIST_HEAD(&tmp_del_list);
1870 INIT_LIST_HEAD(&tmp_add_list); 1866 INIT_LIST_HEAD(&tmp_add_list);
1871 1867
1868 if (vsi->type == I40E_VSI_SRIOV)
1869 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
1870 else if (vsi->type != I40E_VSI_MAIN)
1871 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
1872
1872 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { 1873 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1873 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; 1874 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1874 1875
@@ -1920,7 +1921,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1920 if (!list_empty(&tmp_del_list)) { 1921 if (!list_empty(&tmp_del_list)) {
1921 int del_list_size; 1922 int del_list_size;
1922 1923
1923 filter_list_len = pf->hw.aq.asq_buf_size / 1924 filter_list_len = hw->aq.asq_buf_size /
1924 sizeof(struct i40e_aqc_remove_macvlan_element_data); 1925 sizeof(struct i40e_aqc_remove_macvlan_element_data);
1925 del_list_size = filter_list_len * 1926 del_list_size = filter_list_len *
1926 sizeof(struct i40e_aqc_remove_macvlan_element_data); 1927 sizeof(struct i40e_aqc_remove_macvlan_element_data);
@@ -1952,21 +1953,21 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1952 1953
1953 /* flush a full buffer */ 1954 /* flush a full buffer */
1954 if (num_del == filter_list_len) { 1955 if (num_del == filter_list_len) {
1955 aq_ret = i40e_aq_remove_macvlan(&pf->hw, 1956 aq_ret =
1956 vsi->seid, 1957 i40e_aq_remove_macvlan(hw, vsi->seid,
1957 del_list, 1958 del_list,
1958 num_del, 1959 num_del, NULL);
1959 NULL); 1960 aq_err = hw->aq.asq_last_status;
1960 aq_err = pf->hw.aq.asq_last_status;
1961 num_del = 0; 1961 num_del = 0;
1962 memset(del_list, 0, del_list_size); 1962 memset(del_list, 0, del_list_size);
1963 1963
1964 if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) { 1964 if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) {
1965 retval = -EIO; 1965 retval = -EIO;
1966 dev_err(&pf->pdev->dev, 1966 dev_err(&pf->pdev->dev,
1967 "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", 1967 "ignoring delete macvlan error on %s, err %s, aq_err %s while flushing a full buffer\n",
1968 i40e_stat_str(&pf->hw, aq_ret), 1968 vsi_name,
1969 i40e_aq_str(&pf->hw, aq_err)); 1969 i40e_stat_str(hw, aq_ret),
1970 i40e_aq_str(hw, aq_err));
1970 } 1971 }
1971 } 1972 }
1972 /* Release memory for MAC filter entries which were 1973 /* Release memory for MAC filter entries which were
@@ -1977,17 +1978,17 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1977 } 1978 }
1978 1979
1979 if (num_del) { 1980 if (num_del) {
1980 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, 1981 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, del_list,
1981 del_list, num_del, 1982 num_del, NULL);
1982 NULL); 1983 aq_err = hw->aq.asq_last_status;
1983 aq_err = pf->hw.aq.asq_last_status;
1984 num_del = 0; 1984 num_del = 0;
1985 1985
1986 if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) 1986 if (aq_ret && aq_err != I40E_AQ_RC_ENOENT)
1987 dev_info(&pf->pdev->dev, 1987 dev_info(&pf->pdev->dev,
1988 "ignoring delete macvlan error, err %s aq_err %s\n", 1988 "ignoring delete macvlan error on %s, err %s aq_err %s\n",
1989 i40e_stat_str(&pf->hw, aq_ret), 1989 vsi_name,
1990 i40e_aq_str(&pf->hw, aq_err)); 1990 i40e_stat_str(hw, aq_ret),
1991 i40e_aq_str(hw, aq_err));
1991 } 1992 }
1992 1993
1993 kfree(del_list); 1994 kfree(del_list);
@@ -1998,7 +1999,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1998 int add_list_size; 1999 int add_list_size;
1999 2000
2000 /* do all the adds now */ 2001 /* do all the adds now */
2001 filter_list_len = pf->hw.aq.asq_buf_size / 2002 filter_list_len = hw->aq.asq_buf_size /
2002 sizeof(struct i40e_aqc_add_macvlan_element_data), 2003 sizeof(struct i40e_aqc_add_macvlan_element_data),
2003 add_list_size = filter_list_len * 2004 add_list_size = filter_list_len *
2004 sizeof(struct i40e_aqc_add_macvlan_element_data); 2005 sizeof(struct i40e_aqc_add_macvlan_element_data);
@@ -2033,10 +2034,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2033 2034
2034 /* flush a full buffer */ 2035 /* flush a full buffer */
2035 if (num_add == filter_list_len) { 2036 if (num_add == filter_list_len) {
2036 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 2037 aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
2037 add_list, num_add, 2038 add_list, num_add,
2038 NULL); 2039 NULL);
2039 aq_err = pf->hw.aq.asq_last_status; 2040 aq_err = hw->aq.asq_last_status;
2040 num_add = 0; 2041 num_add = 0;
2041 2042
2042 if (aq_ret) 2043 if (aq_ret)
@@ -2051,9 +2052,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2051 } 2052 }
2052 2053
2053 if (num_add) { 2054 if (num_add) {
2054 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, 2055 aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
2055 add_list, num_add, NULL); 2056 add_list, num_add, NULL);
2056 aq_err = pf->hw.aq.asq_last_status; 2057 aq_err = hw->aq.asq_last_status;
2057 num_add = 0; 2058 num_add = 0;
2058 } 2059 }
2059 kfree(add_list); 2060 kfree(add_list);
@@ -2062,16 +2063,18 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2062 if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) { 2063 if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) {
2063 retval = i40e_aq_rc_to_posix(aq_ret, aq_err); 2064 retval = i40e_aq_rc_to_posix(aq_ret, aq_err);
2064 dev_info(&pf->pdev->dev, 2065 dev_info(&pf->pdev->dev,
2065 "add filter failed, err %s aq_err %s\n", 2066 "add filter failed on %s, err %s aq_err %s\n",
2066 i40e_stat_str(&pf->hw, aq_ret), 2067 vsi_name,
2067 i40e_aq_str(&pf->hw, aq_err)); 2068 i40e_stat_str(hw, aq_ret),
2068 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && 2069 i40e_aq_str(hw, aq_err));
2070 if ((hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
2069 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, 2071 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2070 &vsi->state)) { 2072 &vsi->state)) {
2071 promisc_forced_on = true; 2073 promisc_forced_on = true;
2072 set_bit(__I40E_FILTER_OVERFLOW_PROMISC, 2074 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2073 &vsi->state); 2075 &vsi->state);
2074 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n"); 2076 dev_info(&pf->pdev->dev, "promiscuous mode forced on %s\n",
2077 vsi_name);
2075 } 2078 }
2076 } 2079 }
2077 } 2080 }
@@ -2093,12 +2096,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2093 NULL); 2096 NULL);
2094 if (aq_ret) { 2097 if (aq_ret) {
2095 retval = i40e_aq_rc_to_posix(aq_ret, 2098 retval = i40e_aq_rc_to_posix(aq_ret,
2096 pf->hw.aq.asq_last_status); 2099 hw->aq.asq_last_status);
2097 dev_info(&pf->pdev->dev, 2100 dev_info(&pf->pdev->dev,
2098 "set multi promisc failed, err %s aq_err %s\n", 2101 "set multi promisc failed on %s, err %s aq_err %s\n",
2099 i40e_stat_str(&pf->hw, aq_ret), 2102 vsi_name,
2100 i40e_aq_str(&pf->hw, 2103 i40e_stat_str(hw, aq_ret),
2101 pf->hw.aq.asq_last_status)); 2104 i40e_aq_str(hw, hw->aq.asq_last_status));
2102 } 2105 }
2103 } 2106 }
2104 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { 2107 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
@@ -2117,33 +2120,58 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2117 */ 2120 */
2118 if (pf->cur_promisc != cur_promisc) { 2121 if (pf->cur_promisc != cur_promisc) {
2119 pf->cur_promisc = cur_promisc; 2122 pf->cur_promisc = cur_promisc;
2120 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 2123 if (cur_promisc)
2124 aq_ret =
2125 i40e_aq_set_default_vsi(hw,
2126 vsi->seid,
2127 NULL);
2128 else
2129 aq_ret =
2130 i40e_aq_clear_default_vsi(hw,
2131 vsi->seid,
2132 NULL);
2133 if (aq_ret) {
2134 retval = i40e_aq_rc_to_posix(aq_ret,
2135 hw->aq.asq_last_status);
2136 dev_info(&pf->pdev->dev,
2137 "Set default VSI failed on %s, err %s, aq_err %s\n",
2138 vsi_name,
2139 i40e_stat_str(hw, aq_ret),
2140 i40e_aq_str(hw,
2141 hw->aq.asq_last_status));
2142 }
2121 } 2143 }
2122 } else { 2144 } else {
2123 aq_ret = i40e_aq_set_vsi_unicast_promiscuous( 2145 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2124 &vsi->back->hw, 2146 hw,
2125 vsi->seid, 2147 vsi->seid,
2126 cur_promisc, NULL, 2148 cur_promisc, NULL,
2127 true); 2149 true);
2128 if (aq_ret) { 2150 if (aq_ret) {
2129 retval = 2151 retval =
2130 i40e_aq_rc_to_posix(aq_ret, 2152 i40e_aq_rc_to_posix(aq_ret,
2131 pf->hw.aq.asq_last_status); 2153 hw->aq.asq_last_status);
2132 dev_info(&pf->pdev->dev, 2154 dev_info(&pf->pdev->dev,
2133 "set unicast promisc failed, err %d, aq_err %d\n", 2155 "set unicast promisc failed on %s, err %s, aq_err %s\n",
2134 aq_ret, pf->hw.aq.asq_last_status); 2156 vsi_name,
2157 i40e_stat_str(hw, aq_ret),
2158 i40e_aq_str(hw,
2159 hw->aq.asq_last_status));
2135 } 2160 }
2136 aq_ret = i40e_aq_set_vsi_multicast_promiscuous( 2161 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2137 &vsi->back->hw, 2162 hw,
2138 vsi->seid, 2163 vsi->seid,
2139 cur_promisc, NULL); 2164 cur_promisc, NULL);
2140 if (aq_ret) { 2165 if (aq_ret) {
2141 retval = 2166 retval =
2142 i40e_aq_rc_to_posix(aq_ret, 2167 i40e_aq_rc_to_posix(aq_ret,
2143 pf->hw.aq.asq_last_status); 2168 hw->aq.asq_last_status);
2144 dev_info(&pf->pdev->dev, 2169 dev_info(&pf->pdev->dev,
2145 "set multicast promisc failed, err %d, aq_err %d\n", 2170 "set multicast promisc failed on %s, err %s, aq_err %s\n",
2146 aq_ret, pf->hw.aq.asq_last_status); 2171 vsi_name,
2172 i40e_stat_str(hw, aq_ret),
2173 i40e_aq_str(hw,
2174 hw->aq.asq_last_status));
2147 } 2175 }
2148 } 2176 }
2149 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, 2177 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
@@ -2154,9 +2182,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2154 pf->hw.aq.asq_last_status); 2182 pf->hw.aq.asq_last_status);
2155 dev_info(&pf->pdev->dev, 2183 dev_info(&pf->pdev->dev,
2156 "set brdcast promisc failed, err %s, aq_err %s\n", 2184 "set brdcast promisc failed, err %s, aq_err %s\n",
2157 i40e_stat_str(&pf->hw, aq_ret), 2185 i40e_stat_str(hw, aq_ret),
2158 i40e_aq_str(&pf->hw, 2186 i40e_aq_str(hw,
2159 pf->hw.aq.asq_last_status)); 2187 hw->aq.asq_last_status));
2160 } 2188 }
2161 } 2189 }
2162out: 2190out:
@@ -3947,6 +3975,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3947 /* clear the affinity_mask in the IRQ descriptor */ 3975 /* clear the affinity_mask in the IRQ descriptor */
3948 irq_set_affinity_hint(pf->msix_entries[vector].vector, 3976 irq_set_affinity_hint(pf->msix_entries[vector].vector,
3949 NULL); 3977 NULL);
3978 synchronize_irq(pf->msix_entries[vector].vector);
3950 free_irq(pf->msix_entries[vector].vector, 3979 free_irq(pf->msix_entries[vector].vector,
3951 vsi->q_vectors[i]); 3980 vsi->q_vectors[i]);
3952 3981
@@ -4953,7 +4982,6 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4953 if (pf->vsi[v]->netdev) 4982 if (pf->vsi[v]->netdev)
4954 i40e_dcbnl_set_all(pf->vsi[v]); 4983 i40e_dcbnl_set_all(pf->vsi[v]);
4955 } 4984 }
4956 i40e_notify_client_of_l2_param_changes(pf->vsi[v]);
4957 } 4985 }
4958} 4986}
4959 4987
@@ -5178,12 +5206,6 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
5178 usleep_range(1000, 2000); 5206 usleep_range(1000, 2000);
5179 i40e_down(vsi); 5207 i40e_down(vsi);
5180 5208
5181 /* Give a VF some time to respond to the reset. The
5182 * two second wait is based upon the watchdog cycle in
5183 * the VF driver.
5184 */
5185 if (vsi->type == I40E_VSI_SRIOV)
5186 msleep(2000);
5187 i40e_up(vsi); 5209 i40e_up(vsi);
5188 clear_bit(__I40E_CONFIG_BUSY, &pf->state); 5210 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
5189} 5211}
@@ -5226,6 +5248,9 @@ void i40e_down(struct i40e_vsi *vsi)
5226 i40e_clean_tx_ring(vsi->tx_rings[i]); 5248 i40e_clean_tx_ring(vsi->tx_rings[i]);
5227 i40e_clean_rx_ring(vsi->rx_rings[i]); 5249 i40e_clean_rx_ring(vsi->rx_rings[i]);
5228 } 5250 }
5251
5252 i40e_notify_client_of_netdev_close(vsi, false);
5253
5229} 5254}
5230 5255
5231/** 5256/**
@@ -5704,6 +5729,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
5704 i40e_service_event_schedule(pf); 5729 i40e_service_event_schedule(pf);
5705 } else { 5730 } else {
5706 i40e_pf_unquiesce_all_vsi(pf); 5731 i40e_pf_unquiesce_all_vsi(pf);
5732 /* Notify the client for the DCB changes */
5733 i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]);
5707 } 5734 }
5708 5735
5709exit: 5736exit:
@@ -5928,7 +5955,6 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5928 if (I40E_DEBUG_FD & pf->hw.debug_mask) 5955 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5929 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); 5956 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
5930 } 5957 }
5931
5932} 5958}
5933 5959
5934/** 5960/**
@@ -7160,7 +7186,7 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
7160 vsi->alloc_queue_pairs = 1; 7186 vsi->alloc_queue_pairs = 1;
7161 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, 7187 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
7162 I40E_REQ_DESCRIPTOR_MULTIPLE); 7188 I40E_REQ_DESCRIPTOR_MULTIPLE);
7163 vsi->num_q_vectors = 1; 7189 vsi->num_q_vectors = pf->num_fdsb_msix;
7164 break; 7190 break;
7165 7191
7166 case I40E_VSI_VMDQ2: 7192 case I40E_VSI_VMDQ2:
@@ -7544,9 +7570,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
7544 /* reserve one vector for sideband flow director */ 7570 /* reserve one vector for sideband flow director */
7545 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 7571 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7546 if (vectors_left) { 7572 if (vectors_left) {
7573 pf->num_fdsb_msix = 1;
7547 v_budget++; 7574 v_budget++;
7548 vectors_left--; 7575 vectors_left--;
7549 } else { 7576 } else {
7577 pf->num_fdsb_msix = 0;
7550 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 7578 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7551 } 7579 }
7552 } 7580 }
@@ -8565,7 +8593,9 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
8565 /* Enable filters and mark for reset */ 8593 /* Enable filters and mark for reset */
8566 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 8594 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
8567 need_reset = true; 8595 need_reset = true;
8568 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 8596 /* enable FD_SB only if there is MSI-X vector */
8597 if (pf->num_fdsb_msix > 0)
8598 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8569 } else { 8599 } else {
8570 /* turn off filters, mark for reset and clear SW filter list */ 8600 /* turn off filters, mark for reset and clear SW filter list */
8571 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { 8601 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
@@ -10053,14 +10083,14 @@ void i40e_veb_release(struct i40e_veb *veb)
10053static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) 10083static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
10054{ 10084{
10055 struct i40e_pf *pf = veb->pf; 10085 struct i40e_pf *pf = veb->pf;
10056 bool is_default = veb->pf->cur_promisc;
10057 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED); 10086 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
10058 int ret; 10087 int ret;
10059 10088
10060 /* get a VEB from the hardware */
10061 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, 10089 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
10062 veb->enabled_tc, is_default, 10090 veb->enabled_tc, false,
10063 &veb->seid, enable_stats, NULL); 10091 &veb->seid, enable_stats, NULL);
10092
10093 /* get a VEB from the hardware */
10064 if (ret) { 10094 if (ret) {
10065 dev_info(&pf->pdev->dev, 10095 dev_info(&pf->pdev->dev,
10066 "couldn't add VEB, err %s aq_err %s\n", 10096 "couldn't add VEB, err %s aq_err %s\n",
@@ -11441,6 +11471,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
11441{ 11471{
11442 struct i40e_pf *pf = pci_get_drvdata(pdev); 11472 struct i40e_pf *pf = pci_get_drvdata(pdev);
11443 struct i40e_hw *hw = &pf->hw; 11473 struct i40e_hw *hw = &pf->hw;
11474 int retval = 0;
11444 11475
11445 set_bit(__I40E_SUSPENDED, &pf->state); 11476 set_bit(__I40E_SUSPENDED, &pf->state);
11446 set_bit(__I40E_DOWN, &pf->state); 11477 set_bit(__I40E_DOWN, &pf->state);
@@ -11452,10 +11483,16 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
11452 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 11483 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11453 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 11484 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11454 11485
11486 i40e_stop_misc_vector(pf);
11487
11488 retval = pci_save_state(pdev);
11489 if (retval)
11490 return retval;
11491
11455 pci_wake_from_d3(pdev, pf->wol_en); 11492 pci_wake_from_d3(pdev, pf->wol_en);
11456 pci_set_power_state(pdev, PCI_D3hot); 11493 pci_set_power_state(pdev, PCI_D3hot);
11457 11494
11458 return 0; 11495 return retval;
11459} 11496}
11460 11497
11461/** 11498/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 80403c6ee7f0..4660c5abc855 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -98,6 +98,8 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
98 struct i40e_asq_cmd_details *cmd_details); 98 struct i40e_asq_cmd_details *cmd_details);
99i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id, 99i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
100 struct i40e_asq_cmd_details *cmd_details); 100 struct i40e_asq_cmd_details *cmd_details);
101i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
102 struct i40e_asq_cmd_details *cmd_details);
101enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw, 103enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
102 bool qualified_modules, bool report_init, 104 bool qualified_modules, bool report_init,
103 struct i40e_aq_get_phy_abilities_resp *abilities, 105 struct i40e_aq_get_phy_abilities_resp *abilities,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 1fcafcfa8f14..6fcbf764f32b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -665,6 +665,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
665 goto error_alloc_vsi_res; 665 goto error_alloc_vsi_res;
666 } 666 }
667 if (type == I40E_VSI_SRIOV) { 667 if (type == I40E_VSI_SRIOV) {
668 u64 hena = i40e_pf_get_default_rss_hena(pf);
669
668 vf->lan_vsi_idx = vsi->idx; 670 vf->lan_vsi_idx = vsi->idx;
669 vf->lan_vsi_id = vsi->id; 671 vf->lan_vsi_id = vsi->id;
670 /* If the port VLAN has been configured and then the 672 /* If the port VLAN has been configured and then the
@@ -687,6 +689,10 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
687 vf->default_lan_addr.addr, vf->vf_id); 689 vf->default_lan_addr.addr, vf->vf_id);
688 } 690 }
689 spin_unlock_bh(&vsi->mac_filter_list_lock); 691 spin_unlock_bh(&vsi->mac_filter_list_lock);
692 i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id),
693 (u32)hena);
694 i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id),
695 (u32)(hena >> 32));
690 } 696 }
691 697
692 /* program mac filter */ 698 /* program mac filter */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 16c552952860..eac057b88055 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -37,8 +37,8 @@ static const char i40evf_driver_string[] =
37#define DRV_KERN "-k" 37#define DRV_KERN "-k"
38 38
39#define DRV_VERSION_MAJOR 1 39#define DRV_VERSION_MAJOR 1
40#define DRV_VERSION_MINOR 5 40#define DRV_VERSION_MINOR 6
41#define DRV_VERSION_BUILD 10 41#define DRV_VERSION_BUILD 4
42#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 42#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
43 __stringify(DRV_VERSION_MINOR) "." \ 43 __stringify(DRV_VERSION_MINOR) "." \
44 __stringify(DRV_VERSION_BUILD) \ 44 __stringify(DRV_VERSION_BUILD) \
@@ -825,7 +825,7 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
825 825
826 ether_addr_copy(f->macaddr, macaddr); 826 ether_addr_copy(f->macaddr, macaddr);
827 827
828 list_add(&f->list, &adapter->mac_filter_list); 828 list_add_tail(&f->list, &adapter->mac_filter_list);
829 f->add = true; 829 f->add = true;
830 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; 830 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
831 } 831 }
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index f13445691507..d76c221d4c8a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -434,6 +434,8 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
434 ether_addr_copy(veal->list[i].addr, f->macaddr); 434 ether_addr_copy(veal->list[i].addr, f->macaddr);
435 i++; 435 i++;
436 f->add = false; 436 f->add = false;
437 if (i == count)
438 break;
437 } 439 }
438 } 440 }
439 if (!more) 441 if (!more)
@@ -497,6 +499,8 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
497 i++; 499 i++;
498 list_del(&f->list); 500 list_del(&f->list);
499 kfree(f); 501 kfree(f);
502 if (i == count)
503 break;
500 } 504 }
501 } 505 }
502 if (!more) 506 if (!more)
@@ -560,6 +564,8 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
560 vvfl->vlan_id[i] = f->vlan; 564 vvfl->vlan_id[i] = f->vlan;
561 i++; 565 i++;
562 f->add = false; 566 f->add = false;
567 if (i == count)
568 break;
563 } 569 }
564 } 570 }
565 if (!more) 571 if (!more)
@@ -623,6 +629,8 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
623 i++; 629 i++;
624 list_del(&f->list); 630 list_del(&f->list);
625 kfree(f); 631 kfree(f);
632 if (i == count)
633 break;
626 } 634 }
627 } 635 }
628 if (!more) 636 if (!more)
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index b9609afa5ca3..5387b3a96489 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -445,6 +445,7 @@ struct igb_adapter {
445 unsigned long ptp_tx_start; 445 unsigned long ptp_tx_start;
446 unsigned long last_rx_ptp_check; 446 unsigned long last_rx_ptp_check;
447 unsigned long last_rx_timestamp; 447 unsigned long last_rx_timestamp;
448 unsigned int ptp_flags;
448 spinlock_t tmreg_lock; 449 spinlock_t tmreg_lock;
449 struct cyclecounter cc; 450 struct cyclecounter cc;
450 struct timecounter tc; 451 struct timecounter tc;
@@ -474,12 +475,15 @@ struct igb_adapter {
474 u16 eee_advert; 475 u16 eee_advert;
475}; 476};
476 477
478/* flags controlling PTP/1588 function */
479#define IGB_PTP_ENABLED BIT(0)
480#define IGB_PTP_OVERFLOW_CHECK BIT(1)
481
477#define IGB_FLAG_HAS_MSI BIT(0) 482#define IGB_FLAG_HAS_MSI BIT(0)
478#define IGB_FLAG_DCA_ENABLED BIT(1) 483#define IGB_FLAG_DCA_ENABLED BIT(1)
479#define IGB_FLAG_QUAD_PORT_A BIT(2) 484#define IGB_FLAG_QUAD_PORT_A BIT(2)
480#define IGB_FLAG_QUEUE_PAIRS BIT(3) 485#define IGB_FLAG_QUEUE_PAIRS BIT(3)
481#define IGB_FLAG_DMAC BIT(4) 486#define IGB_FLAG_DMAC BIT(4)
482#define IGB_FLAG_PTP BIT(5)
483#define IGB_FLAG_RSS_FIELD_IPV4_UDP BIT(6) 487#define IGB_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
484#define IGB_FLAG_RSS_FIELD_IPV6_UDP BIT(7) 488#define IGB_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
485#define IGB_FLAG_WOL_SUPPORTED BIT(8) 489#define IGB_FLAG_WOL_SUPPORTED BIT(8)
@@ -546,6 +550,7 @@ void igb_set_fw_version(struct igb_adapter *);
546void igb_ptp_init(struct igb_adapter *adapter); 550void igb_ptp_init(struct igb_adapter *adapter);
547void igb_ptp_stop(struct igb_adapter *adapter); 551void igb_ptp_stop(struct igb_adapter *adapter);
548void igb_ptp_reset(struct igb_adapter *adapter); 552void igb_ptp_reset(struct igb_adapter *adapter);
553void igb_ptp_suspend(struct igb_adapter *adapter);
549void igb_ptp_rx_hang(struct igb_adapter *adapter); 554void igb_ptp_rx_hang(struct igb_adapter *adapter);
550void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); 555void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
551void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, 556void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ef3d642f5ff2..9bcba42abb91 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2027,7 +2027,8 @@ void igb_reset(struct igb_adapter *adapter)
2027 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); 2027 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
2028 2028
2029 /* Re-enable PTP, where applicable. */ 2029 /* Re-enable PTP, where applicable. */
2030 igb_ptp_reset(adapter); 2030 if (adapter->ptp_flags & IGB_PTP_ENABLED)
2031 igb_ptp_reset(adapter);
2031 2032
2032 igb_get_phy_info(hw); 2033 igb_get_phy_info(hw);
2033} 2034}
@@ -6855,12 +6856,12 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
6855 **/ 6856 **/
6856static bool igb_add_rx_frag(struct igb_ring *rx_ring, 6857static bool igb_add_rx_frag(struct igb_ring *rx_ring,
6857 struct igb_rx_buffer *rx_buffer, 6858 struct igb_rx_buffer *rx_buffer,
6859 unsigned int size,
6858 union e1000_adv_rx_desc *rx_desc, 6860 union e1000_adv_rx_desc *rx_desc,
6859 struct sk_buff *skb) 6861 struct sk_buff *skb)
6860{ 6862{
6861 struct page *page = rx_buffer->page; 6863 struct page *page = rx_buffer->page;
6862 unsigned char *va = page_address(page) + rx_buffer->page_offset; 6864 unsigned char *va = page_address(page) + rx_buffer->page_offset;
6863 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
6864#if (PAGE_SIZE < 8192) 6865#if (PAGE_SIZE < 8192)
6865 unsigned int truesize = IGB_RX_BUFSZ; 6866 unsigned int truesize = IGB_RX_BUFSZ;
6866#else 6867#else
@@ -6912,6 +6913,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
6912 union e1000_adv_rx_desc *rx_desc, 6913 union e1000_adv_rx_desc *rx_desc,
6913 struct sk_buff *skb) 6914 struct sk_buff *skb)
6914{ 6915{
6916 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
6915 struct igb_rx_buffer *rx_buffer; 6917 struct igb_rx_buffer *rx_buffer;
6916 struct page *page; 6918 struct page *page;
6917 6919
@@ -6947,11 +6949,11 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
6947 dma_sync_single_range_for_cpu(rx_ring->dev, 6949 dma_sync_single_range_for_cpu(rx_ring->dev,
6948 rx_buffer->dma, 6950 rx_buffer->dma,
6949 rx_buffer->page_offset, 6951 rx_buffer->page_offset,
6950 IGB_RX_BUFSZ, 6952 size,
6951 DMA_FROM_DEVICE); 6953 DMA_FROM_DEVICE);
6952 6954
6953 /* pull page into skb */ 6955 /* pull page into skb */
6954 if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { 6956 if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {
6955 /* hand second half of page back to the ring */ 6957 /* hand second half of page back to the ring */
6956 igb_reuse_rx_page(rx_ring, rx_buffer); 6958 igb_reuse_rx_page(rx_ring, rx_buffer);
6957 } else { 6959 } else {
@@ -7527,6 +7529,8 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
7527 if (netif_running(netdev)) 7529 if (netif_running(netdev))
7528 __igb_close(netdev, true); 7530 __igb_close(netdev, true);
7529 7531
7532 igb_ptp_suspend(adapter);
7533
7530 igb_clear_interrupt_scheme(adapter); 7534 igb_clear_interrupt_scheme(adapter);
7531 7535
7532#ifdef CONFIG_PM 7536#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index f097c5a8ab93..e61b647f5f2a 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -684,6 +684,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
684 u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL); 684 u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL);
685 unsigned long rx_event; 685 unsigned long rx_event;
686 686
687 /* Other hardware uses per-packet timestamps */
687 if (hw->mac.type != e1000_82576) 688 if (hw->mac.type != e1000_82576)
688 return; 689 return;
689 690
@@ -1042,6 +1043,13 @@ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
1042 -EFAULT : 0; 1043 -EFAULT : 0;
1043} 1044}
1044 1045
1046/**
1047 * igb_ptp_init - Initialize PTP functionality
1048 * @adapter: Board private structure
1049 *
1050 * This function is called at device probe to initialize the PTP
1051 * functionality.
1052 */
1045void igb_ptp_init(struct igb_adapter *adapter) 1053void igb_ptp_init(struct igb_adapter *adapter)
1046{ 1054{
1047 struct e1000_hw *hw = &adapter->hw; 1055 struct e1000_hw *hw = &adapter->hw;
@@ -1064,8 +1072,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
1064 adapter->cc.mask = CYCLECOUNTER_MASK(64); 1072 adapter->cc.mask = CYCLECOUNTER_MASK(64);
1065 adapter->cc.mult = 1; 1073 adapter->cc.mult = 1;
1066 adapter->cc.shift = IGB_82576_TSYNC_SHIFT; 1074 adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
1067 /* Dial the nominal frequency. */ 1075 adapter->ptp_flags |= IGB_PTP_OVERFLOW_CHECK;
1068 wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
1069 break; 1076 break;
1070 case e1000_82580: 1077 case e1000_82580:
1071 case e1000_i354: 1078 case e1000_i354:
@@ -1084,8 +1091,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
1084 adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580); 1091 adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580);
1085 adapter->cc.mult = 1; 1092 adapter->cc.mult = 1;
1086 adapter->cc.shift = 0; 1093 adapter->cc.shift = 0;
1087 /* Enable the timer functions by clearing bit 31. */ 1094 adapter->ptp_flags |= IGB_PTP_OVERFLOW_CHECK;
1088 wr32(E1000_TSAUXC, 0x0);
1089 break; 1095 break;
1090 case e1000_i210: 1096 case e1000_i210:
1091 case e1000_i211: 1097 case e1000_i211:
@@ -1110,44 +1116,24 @@ void igb_ptp_init(struct igb_adapter *adapter)
1110 adapter->ptp_caps.settime64 = igb_ptp_settime_i210; 1116 adapter->ptp_caps.settime64 = igb_ptp_settime_i210;
1111 adapter->ptp_caps.enable = igb_ptp_feature_enable_i210; 1117 adapter->ptp_caps.enable = igb_ptp_feature_enable_i210;
1112 adapter->ptp_caps.verify = igb_ptp_verify_pin; 1118 adapter->ptp_caps.verify = igb_ptp_verify_pin;
1113 /* Enable the timer functions by clearing bit 31. */
1114 wr32(E1000_TSAUXC, 0x0);
1115 break; 1119 break;
1116 default: 1120 default:
1117 adapter->ptp_clock = NULL; 1121 adapter->ptp_clock = NULL;
1118 return; 1122 return;
1119 } 1123 }
1120 1124
1121 wrfl();
1122
1123 spin_lock_init(&adapter->tmreg_lock); 1125 spin_lock_init(&adapter->tmreg_lock);
1124 INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); 1126 INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
1125 1127
1126 /* Initialize the clock and overflow work for devices that need it. */ 1128 if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
1127 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
1128 struct timespec64 ts = ktime_to_timespec64(ktime_get_real());
1129
1130 igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
1131 } else {
1132 timecounter_init(&adapter->tc, &adapter->cc,
1133 ktime_to_ns(ktime_get_real()));
1134
1135 INIT_DELAYED_WORK(&adapter->ptp_overflow_work, 1129 INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
1136 igb_ptp_overflow_check); 1130 igb_ptp_overflow_check);
1137 1131
1138 schedule_delayed_work(&adapter->ptp_overflow_work,
1139 IGB_SYSTIM_OVERFLOW_PERIOD);
1140 }
1141
1142 /* Initialize the time sync interrupts for devices that support it. */
1143 if (hw->mac.type >= e1000_82580) {
1144 wr32(E1000_TSIM, TSYNC_INTERRUPTS);
1145 wr32(E1000_IMS, E1000_IMS_TS);
1146 }
1147
1148 adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; 1132 adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
1149 adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; 1133 adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
1150 1134
1135 igb_ptp_reset(adapter);
1136
1151 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, 1137 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
1152 &adapter->pdev->dev); 1138 &adapter->pdev->dev);
1153 if (IS_ERR(adapter->ptp_clock)) { 1139 if (IS_ERR(adapter->ptp_clock)) {
@@ -1156,32 +1142,24 @@ void igb_ptp_init(struct igb_adapter *adapter)
1156 } else { 1142 } else {
1157 dev_info(&adapter->pdev->dev, "added PHC on %s\n", 1143 dev_info(&adapter->pdev->dev, "added PHC on %s\n",
1158 adapter->netdev->name); 1144 adapter->netdev->name);
1159 adapter->flags |= IGB_FLAG_PTP; 1145 adapter->ptp_flags |= IGB_PTP_ENABLED;
1160 } 1146 }
1161} 1147}
1162 1148
1163/** 1149/**
1164 * igb_ptp_stop - Disable PTP device and stop the overflow check. 1150 * igb_ptp_suspend - Disable PTP work items and prepare for suspend
1165 * @adapter: Board private structure. 1151 * @adapter: Board private structure
1166 * 1152 *
1167 * This function stops the PTP support and cancels the delayed work. 1153 * This function stops the overflow check work and PTP Tx timestamp work, and
1168 **/ 1154 * will prepare the device for OS suspend.
1169void igb_ptp_stop(struct igb_adapter *adapter) 1155 */
1156void igb_ptp_suspend(struct igb_adapter *adapter)
1170{ 1157{
1171 switch (adapter->hw.mac.type) { 1158 if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
1172 case e1000_82576:
1173 case e1000_82580:
1174 case e1000_i354:
1175 case e1000_i350:
1176 cancel_delayed_work_sync(&adapter->ptp_overflow_work);
1177 break;
1178 case e1000_i210:
1179 case e1000_i211:
1180 /* No delayed work to cancel. */
1181 break;
1182 default:
1183 return; 1159 return;
1184 } 1160
1161 if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
1162 cancel_delayed_work_sync(&adapter->ptp_overflow_work);
1185 1163
1186 cancel_work_sync(&adapter->ptp_tx_work); 1164 cancel_work_sync(&adapter->ptp_tx_work);
1187 if (adapter->ptp_tx_skb) { 1165 if (adapter->ptp_tx_skb) {
@@ -1189,12 +1167,23 @@ void igb_ptp_stop(struct igb_adapter *adapter)
1189 adapter->ptp_tx_skb = NULL; 1167 adapter->ptp_tx_skb = NULL;
1190 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); 1168 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
1191 } 1169 }
1170}
1171
1172/**
1173 * igb_ptp_stop - Disable PTP device and stop the overflow check.
1174 * @adapter: Board private structure.
1175 *
1176 * This function stops the PTP support and cancels the delayed work.
1177 **/
1178void igb_ptp_stop(struct igb_adapter *adapter)
1179{
1180 igb_ptp_suspend(adapter);
1192 1181
1193 if (adapter->ptp_clock) { 1182 if (adapter->ptp_clock) {
1194 ptp_clock_unregister(adapter->ptp_clock); 1183 ptp_clock_unregister(adapter->ptp_clock);
1195 dev_info(&adapter->pdev->dev, "removed PHC on %s\n", 1184 dev_info(&adapter->pdev->dev, "removed PHC on %s\n",
1196 adapter->netdev->name); 1185 adapter->netdev->name);
1197 adapter->flags &= ~IGB_FLAG_PTP; 1186 adapter->ptp_flags &= ~IGB_PTP_ENABLED;
1198 } 1187 }
1199} 1188}
1200 1189
@@ -1209,9 +1198,6 @@ void igb_ptp_reset(struct igb_adapter *adapter)
1209 struct e1000_hw *hw = &adapter->hw; 1198 struct e1000_hw *hw = &adapter->hw;
1210 unsigned long flags; 1199 unsigned long flags;
1211 1200
1212 if (!(adapter->flags & IGB_FLAG_PTP))
1213 return;
1214
1215 /* reset the tstamp_config */ 1201 /* reset the tstamp_config */
1216 igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); 1202 igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
1217 1203
@@ -1248,4 +1234,10 @@ void igb_ptp_reset(struct igb_adapter *adapter)
1248 } 1234 }
1249out: 1235out:
1250 spin_unlock_irqrestore(&adapter->tmreg_lock, flags); 1236 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
1237
1238 wrfl();
1239
1240 if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
1241 schedule_delayed_work(&adapter->ptp_overflow_work,
1242 IGB_SYSTIM_OVERFLOW_PERIOD);
1251} 1243}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 59b771b9b354..8a8450788124 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2991,10 +2991,15 @@ static int ixgbe_get_ts_info(struct net_device *dev,
2991{ 2991{
2992 struct ixgbe_adapter *adapter = netdev_priv(dev); 2992 struct ixgbe_adapter *adapter = netdev_priv(dev);
2993 2993
2994 /* we always support timestamping disabled */
2995 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
2996
2994 switch (adapter->hw.mac.type) { 2997 switch (adapter->hw.mac.type) {
2995 case ixgbe_mac_X550: 2998 case ixgbe_mac_X550:
2996 case ixgbe_mac_X550EM_x: 2999 case ixgbe_mac_X550EM_x:
2997 case ixgbe_mac_x550em_a: 3000 case ixgbe_mac_x550em_a:
3001 info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
3002 /* fallthrough */
2998 case ixgbe_mac_X540: 3003 case ixgbe_mac_X540:
2999 case ixgbe_mac_82599EB: 3004 case ixgbe_mac_82599EB:
3000 info->so_timestamping = 3005 info->so_timestamping =
@@ -3014,8 +3019,7 @@ static int ixgbe_get_ts_info(struct net_device *dev,
3014 BIT(HWTSTAMP_TX_OFF) | 3019 BIT(HWTSTAMP_TX_OFF) |
3015 BIT(HWTSTAMP_TX_ON); 3020 BIT(HWTSTAMP_TX_ON);
3016 3021
3017 info->rx_filters = 3022 info->rx_filters |=
3018 BIT(HWTSTAMP_FILTER_NONE) |
3019 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 3023 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
3020 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | 3024 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
3021 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); 3025 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 468fa9ddfa06..fd5a761c68f3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -8300,14 +8300,53 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
8300static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, 8300static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
8301 struct tc_cls_u32_offload *cls) 8301 struct tc_cls_u32_offload *cls)
8302{ 8302{
8303 u32 hdl = cls->knode.handle;
8303 u32 uhtid = TC_U32_USERHTID(cls->knode.handle); 8304 u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
8304 u32 loc; 8305 u32 loc = cls->knode.handle & 0xfffff;
8305 int err; 8306 int err = 0, i, j;
8307 struct ixgbe_jump_table *jump = NULL;
8308
8309 if (loc > IXGBE_MAX_HW_ENTRIES)
8310 return -EINVAL;
8306 8311
8307 if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE)) 8312 if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
8308 return -EINVAL; 8313 return -EINVAL;
8309 8314
8310 loc = cls->knode.handle & 0xfffff; 8315 /* Clear this filter in the link data it is associated with */
8316 if (uhtid != 0x800) {
8317 jump = adapter->jump_tables[uhtid];
8318 if (!jump)
8319 return -EINVAL;
8320 if (!test_bit(loc - 1, jump->child_loc_map))
8321 return -EINVAL;
8322 clear_bit(loc - 1, jump->child_loc_map);
8323 }
8324
8325 /* Check if the filter being deleted is a link */
8326 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
8327 jump = adapter->jump_tables[i];
8328 if (jump && jump->link_hdl == hdl) {
8329 /* Delete filters in the hardware in the child hash
8330 * table associated with this link
8331 */
8332 for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) {
8333 if (!test_bit(j, jump->child_loc_map))
8334 continue;
8335 spin_lock(&adapter->fdir_perfect_lock);
8336 err = ixgbe_update_ethtool_fdir_entry(adapter,
8337 NULL,
8338 j + 1);
8339 spin_unlock(&adapter->fdir_perfect_lock);
8340 clear_bit(j, jump->child_loc_map);
8341 }
8342 /* Remove resources for this link */
8343 kfree(jump->input);
8344 kfree(jump->mask);
8345 kfree(jump);
8346 adapter->jump_tables[i] = NULL;
8347 return err;
8348 }
8349 }
8311 8350
8312 spin_lock(&adapter->fdir_perfect_lock); 8351 spin_lock(&adapter->fdir_perfect_lock);
8313 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc); 8352 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
@@ -8541,6 +8580,18 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
8541 if (!test_bit(link_uhtid - 1, &adapter->tables)) 8580 if (!test_bit(link_uhtid - 1, &adapter->tables))
8542 return err; 8581 return err;
8543 8582
8583 /* Multiple filters as links to the same hash table are not
8584 * supported. To add a new filter with the same next header
8585 * but different match/jump conditions, create a new hash table
8586 * and link to it.
8587 */
8588 if (adapter->jump_tables[link_uhtid] &&
8589 (adapter->jump_tables[link_uhtid])->link_hdl) {
8590 e_err(drv, "Link filter exists for link: %x\n",
8591 link_uhtid);
8592 return err;
8593 }
8594
8544 for (i = 0; nexthdr[i].jump; i++) { 8595 for (i = 0; nexthdr[i].jump; i++) {
8545 if (nexthdr[i].o != cls->knode.sel->offoff || 8596 if (nexthdr[i].o != cls->knode.sel->offoff ||
8546 nexthdr[i].s != cls->knode.sel->offshift || 8597 nexthdr[i].s != cls->knode.sel->offshift ||
@@ -8562,6 +8613,8 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
8562 } 8613 }
8563 jump->input = input; 8614 jump->input = input;
8564 jump->mask = mask; 8615 jump->mask = mask;
8616 jump->link_hdl = cls->knode.handle;
8617
8565 err = ixgbe_clsu32_build_input(input, mask, cls, 8618 err = ixgbe_clsu32_build_input(input, mask, cls,
8566 field_ptr, &nexthdr[i]); 8619 field_ptr, &nexthdr[i]);
8567 if (!err) { 8620 if (!err) {
@@ -8589,6 +8642,20 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
8589 if ((adapter->jump_tables[uhtid])->mask) 8642 if ((adapter->jump_tables[uhtid])->mask)
8590 memcpy(mask, (adapter->jump_tables[uhtid])->mask, 8643 memcpy(mask, (adapter->jump_tables[uhtid])->mask,
8591 sizeof(*mask)); 8644 sizeof(*mask));
8645
8646 /* Lookup in all child hash tables if this location is already
8647 * filled with a filter
8648 */
8649 for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
8650 struct ixgbe_jump_table *link = adapter->jump_tables[i];
8651
8652 if (link && (test_bit(loc - 1, link->child_loc_map))) {
8653 e_err(drv, "Filter exists in location: %x\n",
8654 loc);
8655 err = -EINVAL;
8656 goto err_out;
8657 }
8658 }
8592 } 8659 }
8593 err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL); 8660 err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);
8594 if (err) 8661 if (err)
@@ -8620,6 +8687,9 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
8620 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); 8687 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
8621 spin_unlock(&adapter->fdir_perfect_lock); 8688 spin_unlock(&adapter->fdir_perfect_lock);
8622 8689
8690 if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
8691 set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map);
8692
8623 kfree(mask); 8693 kfree(mask);
8624 return err; 8694 return err;
8625err_out_w_lock: 8695err_out_w_lock:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
index a8bed3d887f7..538a1c5475b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
@@ -42,8 +42,12 @@ struct ixgbe_jump_table {
42 struct ixgbe_mat_field *mat; 42 struct ixgbe_mat_field *mat;
43 struct ixgbe_fdir_filter *input; 43 struct ixgbe_fdir_filter *input;
44 union ixgbe_atr_input *mask; 44 union ixgbe_atr_input *mask;
45 u32 link_hdl;
46 unsigned long child_loc_map[32];
45}; 47};
46 48
49#define IXGBE_MAX_HW_ENTRIES 2045
50
47static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input, 51static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input,
48 union ixgbe_atr_input *mask, 52 union ixgbe_atr_input *mask,
49 u32 val, u32 m) 53 u32 val, u32 m)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index c5caacdd193d..8618599dfd6f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -954,6 +954,7 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
954 struct ixgbe_hw *hw = &adapter->hw; 954 struct ixgbe_hw *hw = &adapter->hw;
955 955
956 hw->mac.ops.set_mac_anti_spoofing(hw, false, vf); 956 hw->mac.ops.set_mac_anti_spoofing(hw, false, vf);
957 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
957 } 958 }
958 } 959 }
959 960
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index 61a80da8b6f0..2819abc454c7 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -85,7 +85,7 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
85static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) 85static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
86{ 86{
87 struct ixgbe_mbx_info *mbx = &hw->mbx; 87 struct ixgbe_mbx_info *mbx = &hw->mbx;
88 s32 ret_val = -IXGBE_ERR_MBX; 88 s32 ret_val = IXGBE_ERR_MBX;
89 89
90 if (!mbx->ops.read) 90 if (!mbx->ops.read)
91 goto out; 91 goto out;
@@ -111,7 +111,7 @@ out:
111static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) 111static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
112{ 112{
113 struct ixgbe_mbx_info *mbx = &hw->mbx; 113 struct ixgbe_mbx_info *mbx = &hw->mbx;
114 s32 ret_val = -IXGBE_ERR_MBX; 114 s32 ret_val = IXGBE_ERR_MBX;
115 115
116 /* exit if either we can't write or there isn't a defined timeout */ 116 /* exit if either we can't write or there isn't a defined timeout */
117 if (!mbx->ops.write || !mbx->timeout) 117 if (!mbx->ops.write || !mbx->timeout)
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 0d2f8e934c59..91e09d68b7e2 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -102,7 +102,6 @@ struct ltq_etop_priv {
102 struct resource *res; 102 struct resource *res;
103 103
104 struct mii_bus *mii_bus; 104 struct mii_bus *mii_bus;
105 struct phy_device *phydev;
106 105
107 struct ltq_etop_chan ch[MAX_DMA_CHAN]; 106 struct ltq_etop_chan ch[MAX_DMA_CHAN];
108 int tx_free[MAX_DMA_CHAN >> 1]; 107 int tx_free[MAX_DMA_CHAN >> 1];
@@ -305,34 +304,16 @@ ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
305} 304}
306 305
307static int 306static int
308ltq_etop_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
309{
310 struct ltq_etop_priv *priv = netdev_priv(dev);
311
312 return phy_ethtool_gset(priv->phydev, cmd);
313}
314
315static int
316ltq_etop_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
317{
318 struct ltq_etop_priv *priv = netdev_priv(dev);
319
320 return phy_ethtool_sset(priv->phydev, cmd);
321}
322
323static int
324ltq_etop_nway_reset(struct net_device *dev) 307ltq_etop_nway_reset(struct net_device *dev)
325{ 308{
326 struct ltq_etop_priv *priv = netdev_priv(dev); 309 return phy_start_aneg(dev->phydev);
327
328 return phy_start_aneg(priv->phydev);
329} 310}
330 311
331static const struct ethtool_ops ltq_etop_ethtool_ops = { 312static const struct ethtool_ops ltq_etop_ethtool_ops = {
332 .get_drvinfo = ltq_etop_get_drvinfo, 313 .get_drvinfo = ltq_etop_get_drvinfo,
333 .get_settings = ltq_etop_get_settings,
334 .set_settings = ltq_etop_set_settings,
335 .nway_reset = ltq_etop_nway_reset, 314 .nway_reset = ltq_etop_nway_reset,
315 .get_link_ksettings = phy_ethtool_get_link_ksettings,
316 .set_link_ksettings = phy_ethtool_set_link_ksettings,
336}; 317};
337 318
338static int 319static int
@@ -401,7 +382,6 @@ ltq_etop_mdio_probe(struct net_device *dev)
401 | SUPPORTED_TP); 382 | SUPPORTED_TP);
402 383
403 phydev->advertising = phydev->supported; 384 phydev->advertising = phydev->supported;
404 priv->phydev = phydev;
405 phy_attached_info(phydev); 385 phy_attached_info(phydev);
406 386
407 return 0; 387 return 0;
@@ -450,7 +430,7 @@ ltq_etop_mdio_cleanup(struct net_device *dev)
450{ 430{
451 struct ltq_etop_priv *priv = netdev_priv(dev); 431 struct ltq_etop_priv *priv = netdev_priv(dev);
452 432
453 phy_disconnect(priv->phydev); 433 phy_disconnect(dev->phydev);
454 mdiobus_unregister(priv->mii_bus); 434 mdiobus_unregister(priv->mii_bus);
455 mdiobus_free(priv->mii_bus); 435 mdiobus_free(priv->mii_bus);
456} 436}
@@ -469,7 +449,7 @@ ltq_etop_open(struct net_device *dev)
469 ltq_dma_open(&ch->dma); 449 ltq_dma_open(&ch->dma);
470 napi_enable(&ch->napi); 450 napi_enable(&ch->napi);
471 } 451 }
472 phy_start(priv->phydev); 452 phy_start(dev->phydev);
473 netif_tx_start_all_queues(dev); 453 netif_tx_start_all_queues(dev);
474 return 0; 454 return 0;
475} 455}
@@ -481,7 +461,7 @@ ltq_etop_stop(struct net_device *dev)
481 int i; 461 int i;
482 462
483 netif_tx_stop_all_queues(dev); 463 netif_tx_stop_all_queues(dev);
484 phy_stop(priv->phydev); 464 phy_stop(dev->phydev);
485 for (i = 0; i < MAX_DMA_CHAN; i++) { 465 for (i = 0; i < MAX_DMA_CHAN; i++) {
486 struct ltq_etop_chan *ch = &priv->ch[i]; 466 struct ltq_etop_chan *ch = &priv->ch[i];
487 467
@@ -556,10 +536,8 @@ ltq_etop_change_mtu(struct net_device *dev, int new_mtu)
556static int 536static int
557ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 537ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
558{ 538{
559 struct ltq_etop_priv *priv = netdev_priv(dev);
560
561 /* TODO: mii-toll reports "No MII transceiver present!." ?!*/ 539 /* TODO: mii-toll reports "No MII transceiver present!." ?!*/
562 return phy_mii_ioctl(priv->phydev, rq, cmd); 540 return phy_mii_ioctl(dev->phydev, rq, cmd);
563} 541}
564 542
565static int 543static int
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a6d26d351dfc..d5d263bda333 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3458,6 +3458,8 @@ static int mvneta_open(struct net_device *dev)
3458 return 0; 3458 return 0;
3459 3459
3460err_free_irq: 3460err_free_irq:
3461 unregister_cpu_notifier(&pp->cpu_notifier);
3462 on_each_cpu(mvneta_percpu_disable, pp, true);
3461 free_percpu_irq(pp->dev->irq, pp->ports); 3463 free_percpu_irq(pp->dev->irq, pp->ports);
3462err_cleanup_txqs: 3464err_cleanup_txqs:
3463 mvneta_cleanup_txqs(pp); 3465 mvneta_cleanup_txqs(pp);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 868a957f24bb..0b047178cda1 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -699,7 +699,6 @@ struct mvpp2_port {
699 u16 rx_ring_size; 699 u16 rx_ring_size;
700 struct mvpp2_pcpu_stats __percpu *stats; 700 struct mvpp2_pcpu_stats __percpu *stats;
701 701
702 struct phy_device *phy_dev;
703 phy_interface_t phy_interface; 702 phy_interface_t phy_interface;
704 struct device_node *phy_node; 703 struct device_node *phy_node;
705 unsigned int link; 704 unsigned int link;
@@ -4850,7 +4849,7 @@ static irqreturn_t mvpp2_isr(int irq, void *dev_id)
4850static void mvpp2_link_event(struct net_device *dev) 4849static void mvpp2_link_event(struct net_device *dev)
4851{ 4850{
4852 struct mvpp2_port *port = netdev_priv(dev); 4851 struct mvpp2_port *port = netdev_priv(dev);
4853 struct phy_device *phydev = port->phy_dev; 4852 struct phy_device *phydev = dev->phydev;
4854 int status_change = 0; 4853 int status_change = 0;
4855 u32 val; 4854 u32 val;
4856 4855
@@ -5416,6 +5415,8 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
5416/* Set hw internals when starting port */ 5415/* Set hw internals when starting port */
5417static void mvpp2_start_dev(struct mvpp2_port *port) 5416static void mvpp2_start_dev(struct mvpp2_port *port)
5418{ 5417{
5418 struct net_device *ndev = port->dev;
5419
5419 mvpp2_gmac_max_rx_size_set(port); 5420 mvpp2_gmac_max_rx_size_set(port);
5420 mvpp2_txp_max_tx_size_set(port); 5421 mvpp2_txp_max_tx_size_set(port);
5421 5422
@@ -5425,13 +5426,15 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
5425 mvpp2_interrupts_enable(port); 5426 mvpp2_interrupts_enable(port);
5426 5427
5427 mvpp2_port_enable(port); 5428 mvpp2_port_enable(port);
5428 phy_start(port->phy_dev); 5429 phy_start(ndev->phydev);
5429 netif_tx_start_all_queues(port->dev); 5430 netif_tx_start_all_queues(port->dev);
5430} 5431}
5431 5432
5432/* Set hw internals when stopping port */ 5433/* Set hw internals when stopping port */
5433static void mvpp2_stop_dev(struct mvpp2_port *port) 5434static void mvpp2_stop_dev(struct mvpp2_port *port)
5434{ 5435{
5436 struct net_device *ndev = port->dev;
5437
5435 /* Stop new packets from arriving to RXQs */ 5438 /* Stop new packets from arriving to RXQs */
5436 mvpp2_ingress_disable(port); 5439 mvpp2_ingress_disable(port);
5437 5440
@@ -5447,7 +5450,7 @@ static void mvpp2_stop_dev(struct mvpp2_port *port)
5447 5450
5448 mvpp2_egress_disable(port); 5451 mvpp2_egress_disable(port);
5449 mvpp2_port_disable(port); 5452 mvpp2_port_disable(port);
5450 phy_stop(port->phy_dev); 5453 phy_stop(ndev->phydev);
5451} 5454}
5452 5455
5453/* Return positive if MTU is valid */ 5456/* Return positive if MTU is valid */
@@ -5535,7 +5538,6 @@ static int mvpp2_phy_connect(struct mvpp2_port *port)
5535 phy_dev->supported &= PHY_GBIT_FEATURES; 5538 phy_dev->supported &= PHY_GBIT_FEATURES;
5536 phy_dev->advertising = phy_dev->supported; 5539 phy_dev->advertising = phy_dev->supported;
5537 5540
5538 port->phy_dev = phy_dev;
5539 port->link = 0; 5541 port->link = 0;
5540 port->duplex = 0; 5542 port->duplex = 0;
5541 port->speed = 0; 5543 port->speed = 0;
@@ -5545,8 +5547,9 @@ static int mvpp2_phy_connect(struct mvpp2_port *port)
5545 5547
5546static void mvpp2_phy_disconnect(struct mvpp2_port *port) 5548static void mvpp2_phy_disconnect(struct mvpp2_port *port)
5547{ 5549{
5548 phy_disconnect(port->phy_dev); 5550 struct net_device *ndev = port->dev;
5549 port->phy_dev = NULL; 5551
5552 phy_disconnect(ndev->phydev);
5550} 5553}
5551 5554
5552static int mvpp2_open(struct net_device *dev) 5555static int mvpp2_open(struct net_device *dev)
@@ -5796,13 +5799,12 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5796 5799
5797static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 5800static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5798{ 5801{
5799 struct mvpp2_port *port = netdev_priv(dev);
5800 int ret; 5802 int ret;
5801 5803
5802 if (!port->phy_dev) 5804 if (!dev->phydev)
5803 return -ENOTSUPP; 5805 return -ENOTSUPP;
5804 5806
5805 ret = phy_mii_ioctl(port->phy_dev, ifr, cmd); 5807 ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
5806 if (!ret) 5808 if (!ret)
5807 mvpp2_link_event(dev); 5809 mvpp2_link_event(dev);
5808 5810
@@ -5811,28 +5813,6 @@ static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5811 5813
5812/* Ethtool methods */ 5814/* Ethtool methods */
5813 5815
5814/* Get settings (phy address, speed) for ethtools */
5815static int mvpp2_ethtool_get_settings(struct net_device *dev,
5816 struct ethtool_cmd *cmd)
5817{
5818 struct mvpp2_port *port = netdev_priv(dev);
5819
5820 if (!port->phy_dev)
5821 return -ENODEV;
5822 return phy_ethtool_gset(port->phy_dev, cmd);
5823}
5824
5825/* Set settings (phy address, speed) for ethtools */
5826static int mvpp2_ethtool_set_settings(struct net_device *dev,
5827 struct ethtool_cmd *cmd)
5828{
5829 struct mvpp2_port *port = netdev_priv(dev);
5830
5831 if (!port->phy_dev)
5832 return -ENODEV;
5833 return phy_ethtool_sset(port->phy_dev, cmd);
5834}
5835
5836/* Set interrupt coalescing for ethtools */ 5816/* Set interrupt coalescing for ethtools */
5837static int mvpp2_ethtool_set_coalesce(struct net_device *dev, 5817static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
5838 struct ethtool_coalesce *c) 5818 struct ethtool_coalesce *c)
@@ -5967,13 +5947,13 @@ static const struct net_device_ops mvpp2_netdev_ops = {
5967 5947
5968static const struct ethtool_ops mvpp2_eth_tool_ops = { 5948static const struct ethtool_ops mvpp2_eth_tool_ops = {
5969 .get_link = ethtool_op_get_link, 5949 .get_link = ethtool_op_get_link,
5970 .get_settings = mvpp2_ethtool_get_settings,
5971 .set_settings = mvpp2_ethtool_set_settings,
5972 .set_coalesce = mvpp2_ethtool_set_coalesce, 5950 .set_coalesce = mvpp2_ethtool_set_coalesce,
5973 .get_coalesce = mvpp2_ethtool_get_coalesce, 5951 .get_coalesce = mvpp2_ethtool_get_coalesce,
5974 .get_drvinfo = mvpp2_ethtool_get_drvinfo, 5952 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
5975 .get_ringparam = mvpp2_ethtool_get_ringparam, 5953 .get_ringparam = mvpp2_ethtool_get_ringparam,
5976 .set_ringparam = mvpp2_ethtool_set_ringparam, 5954 .set_ringparam = mvpp2_ethtool_set_ringparam,
5955 .get_link_ksettings = phy_ethtool_get_link_ksettings,
5956 .set_link_ksettings = phy_ethtool_set_link_ksettings,
5977}; 5957};
5978 5958
5979/* Driver initialization */ 5959/* Driver initialization */
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 4763252bbf85..760f3d71eda3 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -328,22 +328,24 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
328 328
329static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) 329static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
330{ 330{
331 unsigned long flags;
331 u32 val; 332 u32 val;
332 333
334 spin_lock_irqsave(&eth->irq_lock, flags);
333 val = mtk_r32(eth, MTK_QDMA_INT_MASK); 335 val = mtk_r32(eth, MTK_QDMA_INT_MASK);
334 mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK); 336 mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
335 /* flush write */ 337 spin_unlock_irqrestore(&eth->irq_lock, flags);
336 mtk_r32(eth, MTK_QDMA_INT_MASK);
337} 338}
338 339
339static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask) 340static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask)
340{ 341{
342 unsigned long flags;
341 u32 val; 343 u32 val;
342 344
345 spin_lock_irqsave(&eth->irq_lock, flags);
343 val = mtk_r32(eth, MTK_QDMA_INT_MASK); 346 val = mtk_r32(eth, MTK_QDMA_INT_MASK);
344 mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK); 347 mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
345 /* flush write */ 348 spin_unlock_irqrestore(&eth->irq_lock, flags);
346 mtk_r32(eth, MTK_QDMA_INT_MASK);
347} 349}
348 350
349static int mtk_set_mac_address(struct net_device *dev, void *p) 351static int mtk_set_mac_address(struct net_device *dev, void *p)
@@ -481,20 +483,23 @@ static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
481/* the qdma core needs scratch memory to be setup */ 483/* the qdma core needs scratch memory to be setup */
482static int mtk_init_fq_dma(struct mtk_eth *eth) 484static int mtk_init_fq_dma(struct mtk_eth *eth)
483{ 485{
484 dma_addr_t phy_ring_head, phy_ring_tail; 486 dma_addr_t phy_ring_tail;
485 int cnt = MTK_DMA_SIZE; 487 int cnt = MTK_DMA_SIZE;
486 dma_addr_t dma_addr; 488 dma_addr_t dma_addr;
487 int i; 489 int i;
488 490
489 eth->scratch_ring = dma_alloc_coherent(eth->dev, 491 eth->scratch_ring = dma_alloc_coherent(eth->dev,
490 cnt * sizeof(struct mtk_tx_dma), 492 cnt * sizeof(struct mtk_tx_dma),
491 &phy_ring_head, 493 &eth->phy_scratch_ring,
492 GFP_ATOMIC | __GFP_ZERO); 494 GFP_ATOMIC | __GFP_ZERO);
493 if (unlikely(!eth->scratch_ring)) 495 if (unlikely(!eth->scratch_ring))
494 return -ENOMEM; 496 return -ENOMEM;
495 497
496 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, 498 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
497 GFP_KERNEL); 499 GFP_KERNEL);
500 if (unlikely(!eth->scratch_head))
501 return -ENOMEM;
502
498 dma_addr = dma_map_single(eth->dev, 503 dma_addr = dma_map_single(eth->dev,
499 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, 504 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
500 DMA_FROM_DEVICE); 505 DMA_FROM_DEVICE);
@@ -502,19 +507,19 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
502 return -ENOMEM; 507 return -ENOMEM;
503 508
504 memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt); 509 memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
505 phy_ring_tail = phy_ring_head + 510 phy_ring_tail = eth->phy_scratch_ring +
506 (sizeof(struct mtk_tx_dma) * (cnt - 1)); 511 (sizeof(struct mtk_tx_dma) * (cnt - 1));
507 512
508 for (i = 0; i < cnt; i++) { 513 for (i = 0; i < cnt; i++) {
509 eth->scratch_ring[i].txd1 = 514 eth->scratch_ring[i].txd1 =
510 (dma_addr + (i * MTK_QDMA_PAGE_SIZE)); 515 (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
511 if (i < cnt - 1) 516 if (i < cnt - 1)
512 eth->scratch_ring[i].txd2 = (phy_ring_head + 517 eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
513 ((i + 1) * sizeof(struct mtk_tx_dma))); 518 ((i + 1) * sizeof(struct mtk_tx_dma)));
514 eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE); 519 eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
515 } 520 }
516 521
517 mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD); 522 mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
518 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL); 523 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
519 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT); 524 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
520 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN); 525 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
@@ -671,7 +676,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
671 676
672err_dma: 677err_dma:
673 do { 678 do {
674 tx_buf = mtk_desc_to_tx_buf(ring, txd); 679 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
675 680
676 /* unmap dma */ 681 /* unmap dma */
677 mtk_tx_unmap(&dev->dev, tx_buf); 682 mtk_tx_unmap(&dev->dev, tx_buf);
@@ -701,6 +706,20 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb)
701 return nfrags; 706 return nfrags;
702} 707}
703 708
709static int mtk_queue_stopped(struct mtk_eth *eth)
710{
711 int i;
712
713 for (i = 0; i < MTK_MAC_COUNT; i++) {
714 if (!eth->netdev[i])
715 continue;
716 if (netif_queue_stopped(eth->netdev[i]))
717 return 1;
718 }
719
720 return 0;
721}
722
704static void mtk_wake_queue(struct mtk_eth *eth) 723static void mtk_wake_queue(struct mtk_eth *eth)
705{ 724{
706 int i; 725 int i;
@@ -766,12 +785,9 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
766 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0) 785 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
767 goto drop; 786 goto drop;
768 787
769 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) { 788 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
770 mtk_stop_queue(eth); 789 mtk_stop_queue(eth);
771 if (unlikely(atomic_read(&ring->free_count) > 790
772 ring->thresh))
773 mtk_wake_queue(eth);
774 }
775 spin_unlock_irqrestore(&eth->page_lock, flags); 791 spin_unlock_irqrestore(&eth->page_lock, flags);
776 792
777 return NETDEV_TX_OK; 793 return NETDEV_TX_OK;
@@ -784,7 +800,7 @@ drop:
784} 800}
785 801
786static int mtk_poll_rx(struct napi_struct *napi, int budget, 802static int mtk_poll_rx(struct napi_struct *napi, int budget,
787 struct mtk_eth *eth, u32 rx_intr) 803 struct mtk_eth *eth)
788{ 804{
789 struct mtk_rx_ring *ring = &eth->rx_ring; 805 struct mtk_rx_ring *ring = &eth->rx_ring;
790 int idx = ring->calc_idx; 806 int idx = ring->calc_idx;
@@ -826,6 +842,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
826 DMA_FROM_DEVICE); 842 DMA_FROM_DEVICE);
827 if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { 843 if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
828 skb_free_frag(new_data); 844 skb_free_frag(new_data);
845 netdev->stats.rx_dropped++;
829 goto release_desc; 846 goto release_desc;
830 } 847 }
831 848
@@ -833,6 +850,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
833 skb = build_skb(data, ring->frag_size); 850 skb = build_skb(data, ring->frag_size);
834 if (unlikely(!skb)) { 851 if (unlikely(!skb)) {
835 put_page(virt_to_head_page(new_data)); 852 put_page(virt_to_head_page(new_data));
853 netdev->stats.rx_dropped++;
836 goto release_desc; 854 goto release_desc;
837 } 855 }
838 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 856 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
@@ -870,22 +888,22 @@ release_desc:
870 } 888 }
871 889
872 if (done < budget) 890 if (done < budget)
873 mtk_w32(eth, rx_intr, MTK_QMTK_INT_STATUS); 891 mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS);
874 892
875 return done; 893 return done;
876} 894}
877 895
878static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again) 896static int mtk_poll_tx(struct mtk_eth *eth, int budget)
879{ 897{
880 struct mtk_tx_ring *ring = &eth->tx_ring; 898 struct mtk_tx_ring *ring = &eth->tx_ring;
881 struct mtk_tx_dma *desc; 899 struct mtk_tx_dma *desc;
882 struct sk_buff *skb; 900 struct sk_buff *skb;
883 struct mtk_tx_buf *tx_buf; 901 struct mtk_tx_buf *tx_buf;
884 int total = 0, done[MTK_MAX_DEVS]; 902 unsigned int done[MTK_MAX_DEVS];
885 unsigned int bytes[MTK_MAX_DEVS]; 903 unsigned int bytes[MTK_MAX_DEVS];
886 u32 cpu, dma; 904 u32 cpu, dma;
887 static int condition; 905 static int condition;
888 int i; 906 int total = 0, i;
889 907
890 memset(done, 0, sizeof(done)); 908 memset(done, 0, sizeof(done));
891 memset(bytes, 0, sizeof(bytes)); 909 memset(bytes, 0, sizeof(bytes));
@@ -921,7 +939,6 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
921 } 939 }
922 mtk_tx_unmap(eth->dev, tx_buf); 940 mtk_tx_unmap(eth->dev, tx_buf);
923 941
924 ring->last_free->txd2 = next_cpu;
925 ring->last_free = desc; 942 ring->last_free = desc;
926 atomic_inc(&ring->free_count); 943 atomic_inc(&ring->free_count);
927 944
@@ -937,64 +954,82 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
937 total += done[i]; 954 total += done[i];
938 } 955 }
939 956
940 /* read hw index again make sure no new tx packet */ 957 if (mtk_queue_stopped(eth) &&
941 if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR)) 958 (atomic_read(&ring->free_count) > ring->thresh))
942 *tx_again = true;
943 else
944 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
945
946 if (!total)
947 return 0;
948
949 if (atomic_read(&ring->free_count) > ring->thresh)
950 mtk_wake_queue(eth); 959 mtk_wake_queue(eth);
951 960
952 return total; 961 return total;
953} 962}
954 963
955static int mtk_poll(struct napi_struct *napi, int budget) 964static void mtk_handle_status_irq(struct mtk_eth *eth)
956{ 965{
957 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); 966 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
958 u32 status, status2, mask, tx_intr, rx_intr, status_intr;
959 int tx_done, rx_done;
960 bool tx_again = false;
961 967
962 status = mtk_r32(eth, MTK_QMTK_INT_STATUS); 968 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
963 status2 = mtk_r32(eth, MTK_INT_STATUS2); 969 mtk_stats_update(eth);
964 tx_intr = MTK_TX_DONE_INT; 970 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
965 rx_intr = MTK_RX_DONE_INT; 971 MTK_INT_STATUS2);
966 status_intr = (MTK_GDM1_AF | MTK_GDM2_AF); 972 }
967 tx_done = 0; 973}
968 rx_done = 0;
969 tx_again = 0;
970 974
971 if (status & tx_intr) 975static int mtk_napi_tx(struct napi_struct *napi, int budget)
972 tx_done = mtk_poll_tx(eth, budget, &tx_again); 976{
977 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
978 u32 status, mask;
979 int tx_done = 0;
973 980
974 if (status & rx_intr) 981 mtk_handle_status_irq(eth);
975 rx_done = mtk_poll_rx(napi, budget, eth, rx_intr); 982 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
983 tx_done = mtk_poll_tx(eth, budget);
976 984
977 if (unlikely(status2 & status_intr)) { 985 if (unlikely(netif_msg_intr(eth))) {
978 mtk_stats_update(eth); 986 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
979 mtk_w32(eth, status_intr, MTK_INT_STATUS2); 987 mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
988 dev_info(eth->dev,
989 "done tx %d, intr 0x%08x/0x%x\n",
990 tx_done, status, mask);
980 } 991 }
981 992
993 if (tx_done == budget)
994 return budget;
995
996 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
997 if (status & MTK_TX_DONE_INT)
998 return budget;
999
1000 napi_complete(napi);
1001 mtk_irq_enable(eth, MTK_TX_DONE_INT);
1002
1003 return tx_done;
1004}
1005
1006static int mtk_napi_rx(struct napi_struct *napi, int budget)
1007{
1008 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
1009 u32 status, mask;
1010 int rx_done = 0;
1011
1012 mtk_handle_status_irq(eth);
1013 mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS);
1014 rx_done = mtk_poll_rx(napi, budget, eth);
1015
982 if (unlikely(netif_msg_intr(eth))) { 1016 if (unlikely(netif_msg_intr(eth))) {
1017 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
983 mask = mtk_r32(eth, MTK_QDMA_INT_MASK); 1018 mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
984 netdev_info(eth->netdev[0], 1019 dev_info(eth->dev,
985 "done tx %d, rx %d, intr 0x%08x/0x%x\n", 1020 "done rx %d, intr 0x%08x/0x%x\n",
986 tx_done, rx_done, status, mask); 1021 rx_done, status, mask);
987 } 1022 }
988 1023
989 if (tx_again || rx_done == budget) 1024 if (rx_done == budget)
990 return budget; 1025 return budget;
991 1026
992 status = mtk_r32(eth, MTK_QMTK_INT_STATUS); 1027 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
993 if (status & (tx_intr | rx_intr)) 1028 if (status & MTK_RX_DONE_INT)
994 return budget; 1029 return budget;
995 1030
996 napi_complete(napi); 1031 napi_complete(napi);
997 mtk_irq_enable(eth, tx_intr | rx_intr); 1032 mtk_irq_enable(eth, MTK_RX_DONE_INT);
998 1033
999 return rx_done; 1034 return rx_done;
1000} 1035}
@@ -1027,9 +1062,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
1027 1062
1028 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); 1063 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
1029 ring->next_free = &ring->dma[0]; 1064 ring->next_free = &ring->dma[0];
1030 ring->last_free = &ring->dma[MTK_DMA_SIZE - 2]; 1065 ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
1031 ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2, 1066 ring->thresh = MAX_SKB_FRAGS;
1032 MAX_SKB_FRAGS);
1033 1067
1034 /* make sure that all changes to the dma ring are flushed before we 1068 /* make sure that all changes to the dma ring are flushed before we
1035 * continue 1069 * continue
@@ -1207,6 +1241,14 @@ static void mtk_dma_free(struct mtk_eth *eth)
1207 for (i = 0; i < MTK_MAC_COUNT; i++) 1241 for (i = 0; i < MTK_MAC_COUNT; i++)
1208 if (eth->netdev[i]) 1242 if (eth->netdev[i])
1209 netdev_reset_queue(eth->netdev[i]); 1243 netdev_reset_queue(eth->netdev[i]);
1244 if (eth->scratch_ring) {
1245 dma_free_coherent(eth->dev,
1246 MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
1247 eth->scratch_ring,
1248 eth->phy_scratch_ring);
1249 eth->scratch_ring = NULL;
1250 eth->phy_scratch_ring = 0;
1251 }
1210 mtk_tx_clean(eth); 1252 mtk_tx_clean(eth);
1211 mtk_rx_clean(eth); 1253 mtk_rx_clean(eth);
1212 kfree(eth->scratch_head); 1254 kfree(eth->scratch_head);
@@ -1223,22 +1265,26 @@ static void mtk_tx_timeout(struct net_device *dev)
1223 schedule_work(&eth->pending_work); 1265 schedule_work(&eth->pending_work);
1224} 1266}
1225 1267
1226static irqreturn_t mtk_handle_irq(int irq, void *_eth) 1268static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
1227{ 1269{
1228 struct mtk_eth *eth = _eth; 1270 struct mtk_eth *eth = _eth;
1229 u32 status;
1230 1271
1231 status = mtk_r32(eth, MTK_QMTK_INT_STATUS); 1272 if (likely(napi_schedule_prep(&eth->rx_napi))) {
1232 if (unlikely(!status)) 1273 __napi_schedule(&eth->rx_napi);
1233 return IRQ_NONE; 1274 mtk_irq_disable(eth, MTK_RX_DONE_INT);
1275 }
1234 1276
1235 if (likely(status & (MTK_RX_DONE_INT | MTK_TX_DONE_INT))) { 1277 return IRQ_HANDLED;
1236 if (likely(napi_schedule_prep(&eth->rx_napi))) 1278}
1237 __napi_schedule(&eth->rx_napi); 1279
1238 } else { 1280static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
1239 mtk_w32(eth, status, MTK_QMTK_INT_STATUS); 1281{
1282 struct mtk_eth *eth = _eth;
1283
1284 if (likely(napi_schedule_prep(&eth->tx_napi))) {
1285 __napi_schedule(&eth->tx_napi);
1286 mtk_irq_disable(eth, MTK_TX_DONE_INT);
1240 } 1287 }
1241 mtk_irq_disable(eth, (MTK_RX_DONE_INT | MTK_TX_DONE_INT));
1242 1288
1243 return IRQ_HANDLED; 1289 return IRQ_HANDLED;
1244} 1290}
@@ -1251,7 +1297,7 @@ static void mtk_poll_controller(struct net_device *dev)
1251 u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT; 1297 u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT;
1252 1298
1253 mtk_irq_disable(eth, int_mask); 1299 mtk_irq_disable(eth, int_mask);
1254 mtk_handle_irq(dev->irq, dev); 1300 mtk_handle_irq_rx(eth->irq[2], dev);
1255 mtk_irq_enable(eth, int_mask); 1301 mtk_irq_enable(eth, int_mask);
1256} 1302}
1257#endif 1303#endif
@@ -1269,7 +1315,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
1269 mtk_w32(eth, 1315 mtk_w32(eth,
1270 MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN | 1316 MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN |
1271 MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS | 1317 MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS |
1272 MTK_RX_BT_32DWORDS, 1318 MTK_RX_BT_32DWORDS | MTK_NDP_CO_PRO,
1273 MTK_QDMA_GLO_CFG); 1319 MTK_QDMA_GLO_CFG);
1274 1320
1275 return 0; 1321 return 0;
@@ -1287,6 +1333,7 @@ static int mtk_open(struct net_device *dev)
1287 if (err) 1333 if (err)
1288 return err; 1334 return err;
1289 1335
1336 napi_enable(&eth->tx_napi);
1290 napi_enable(&eth->rx_napi); 1337 napi_enable(&eth->rx_napi);
1291 mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); 1338 mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
1292 } 1339 }
@@ -1335,6 +1382,7 @@ static int mtk_stop(struct net_device *dev)
1335 return 0; 1382 return 0;
1336 1383
1337 mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); 1384 mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
1385 napi_disable(&eth->tx_napi);
1338 napi_disable(&eth->rx_napi); 1386 napi_disable(&eth->rx_napi);
1339 1387
1340 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); 1388 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
@@ -1372,7 +1420,11 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
1372 /* Enable RX VLan Offloading */ 1420 /* Enable RX VLan Offloading */
1373 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); 1421 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
1374 1422
1375 err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0, 1423 err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
1424 dev_name(eth->dev), eth);
1425 if (err)
1426 return err;
1427 err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
1376 dev_name(eth->dev), eth); 1428 dev_name(eth->dev), eth);
1377 if (err) 1429 if (err)
1378 return err; 1430 return err;
@@ -1383,12 +1435,16 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
1383 1435
1384 /* disable delay and normal interrupt */ 1436 /* disable delay and normal interrupt */
1385 mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); 1437 mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
1386 mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); 1438 mtk_irq_disable(eth, ~0);
1387 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); 1439 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
1388 mtk_w32(eth, 0, MTK_RST_GL); 1440 mtk_w32(eth, 0, MTK_RST_GL);
1389 1441
1390 /* FE int grouping */ 1442 /* FE int grouping */
1391 mtk_w32(eth, 0, MTK_FE_INT_GRP); 1443 mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
1444 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
1445 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
1446 mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
1447 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
1392 1448
1393 for (i = 0; i < 2; i++) { 1449 for (i = 0; i < 2; i++) {
1394 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); 1450 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
@@ -1436,7 +1492,8 @@ static void mtk_uninit(struct net_device *dev)
1436 phy_disconnect(mac->phy_dev); 1492 phy_disconnect(mac->phy_dev);
1437 mtk_mdio_cleanup(eth); 1493 mtk_mdio_cleanup(eth);
1438 mtk_irq_disable(eth, ~0); 1494 mtk_irq_disable(eth, ~0);
1439 free_irq(dev->irq, dev); 1495 free_irq(eth->irq[1], dev);
1496 free_irq(eth->irq[2], dev);
1440} 1497}
1441 1498
1442static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1499static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -1697,7 +1754,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
1697 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; 1754 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
1698 1755
1699 SET_NETDEV_DEV(eth->netdev[id], eth->dev); 1756 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
1700 eth->netdev[id]->watchdog_timeo = HZ; 1757 eth->netdev[id]->watchdog_timeo = 5 * HZ;
1701 eth->netdev[id]->netdev_ops = &mtk_netdev_ops; 1758 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
1702 eth->netdev[id]->base_addr = (unsigned long)eth->base; 1759 eth->netdev[id]->base_addr = (unsigned long)eth->base;
1703 eth->netdev[id]->vlan_features = MTK_HW_FEATURES & 1760 eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
@@ -1710,10 +1767,10 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
1710 dev_err(eth->dev, "error bringing up device\n"); 1767 dev_err(eth->dev, "error bringing up device\n");
1711 goto free_netdev; 1768 goto free_netdev;
1712 } 1769 }
1713 eth->netdev[id]->irq = eth->irq; 1770 eth->netdev[id]->irq = eth->irq[0];
1714 netif_info(eth, probe, eth->netdev[id], 1771 netif_info(eth, probe, eth->netdev[id],
1715 "mediatek frame engine at 0x%08lx, irq %d\n", 1772 "mediatek frame engine at 0x%08lx, irq %d\n",
1716 eth->netdev[id]->base_addr, eth->netdev[id]->irq); 1773 eth->netdev[id]->base_addr, eth->irq[0]);
1717 1774
1718 return 0; 1775 return 0;
1719 1776
@@ -1730,6 +1787,7 @@ static int mtk_probe(struct platform_device *pdev)
1730 struct mtk_soc_data *soc; 1787 struct mtk_soc_data *soc;
1731 struct mtk_eth *eth; 1788 struct mtk_eth *eth;
1732 int err; 1789 int err;
1790 int i;
1733 1791
1734 match = of_match_device(of_mtk_match, &pdev->dev); 1792 match = of_match_device(of_mtk_match, &pdev->dev);
1735 soc = (struct mtk_soc_data *)match->data; 1793 soc = (struct mtk_soc_data *)match->data;
@@ -1743,6 +1801,7 @@ static int mtk_probe(struct platform_device *pdev)
1743 return PTR_ERR(eth->base); 1801 return PTR_ERR(eth->base);
1744 1802
1745 spin_lock_init(&eth->page_lock); 1803 spin_lock_init(&eth->page_lock);
1804 spin_lock_init(&eth->irq_lock);
1746 1805
1747 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, 1806 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
1748 "mediatek,ethsys"); 1807 "mediatek,ethsys");
@@ -1764,10 +1823,12 @@ static int mtk_probe(struct platform_device *pdev)
1764 return PTR_ERR(eth->rstc); 1823 return PTR_ERR(eth->rstc);
1765 } 1824 }
1766 1825
1767 eth->irq = platform_get_irq(pdev, 0); 1826 for (i = 0; i < 3; i++) {
1768 if (eth->irq < 0) { 1827 eth->irq[i] = platform_get_irq(pdev, i);
1769 dev_err(&pdev->dev, "no IRQ resource found\n"); 1828 if (eth->irq[i] < 0) {
1770 return -ENXIO; 1829 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
1830 return -ENXIO;
1831 }
1771 } 1832 }
1772 1833
1773 eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif"); 1834 eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
@@ -1808,7 +1869,9 @@ static int mtk_probe(struct platform_device *pdev)
1808 * for NAPI to work 1869 * for NAPI to work
1809 */ 1870 */
1810 init_dummy_netdev(&eth->dummy_dev); 1871 init_dummy_netdev(&eth->dummy_dev);
1811 netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll, 1872 netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
1873 MTK_NAPI_WEIGHT);
1874 netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
1812 MTK_NAPI_WEIGHT); 1875 MTK_NAPI_WEIGHT);
1813 1876
1814 platform_set_drvdata(pdev, eth); 1877 platform_set_drvdata(pdev, eth);
@@ -1829,6 +1892,7 @@ static int mtk_remove(struct platform_device *pdev)
1829 clk_disable_unprepare(eth->clk_gp1); 1892 clk_disable_unprepare(eth->clk_gp1);
1830 clk_disable_unprepare(eth->clk_gp2); 1893 clk_disable_unprepare(eth->clk_gp2);
1831 1894
1895 netif_napi_del(&eth->tx_napi);
1832 netif_napi_del(&eth->rx_napi); 1896 netif_napi_del(&eth->rx_napi);
1833 mtk_cleanup(eth); 1897 mtk_cleanup(eth);
1834 platform_set_drvdata(pdev, NULL); 1898 platform_set_drvdata(pdev, NULL);
@@ -1846,7 +1910,6 @@ static struct platform_driver mtk_driver = {
1846 .remove = mtk_remove, 1910 .remove = mtk_remove,
1847 .driver = { 1911 .driver = {
1848 .name = "mtk_soc_eth", 1912 .name = "mtk_soc_eth",
1849 .owner = THIS_MODULE,
1850 .of_match_table = of_mtk_match, 1913 .of_match_table = of_mtk_match,
1851 }, 1914 },
1852}; 1915};
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index eed626d56ea4..f82e3acb947b 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -68,6 +68,10 @@
68/* Unicast Filter MAC Address Register - High */ 68/* Unicast Filter MAC Address Register - High */
69#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000)) 69#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
70 70
71/* PDMA Interrupt grouping registers */
72#define MTK_PDMA_INT_GRP1 0xa50
73#define MTK_PDMA_INT_GRP2 0xa54
74
71/* QDMA TX Queue Configuration Registers */ 75/* QDMA TX Queue Configuration Registers */
72#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10)) 76#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
73#define QDMA_RES_THRES 4 77#define QDMA_RES_THRES 4
@@ -91,6 +95,7 @@
91#define MTK_QDMA_GLO_CFG 0x1A04 95#define MTK_QDMA_GLO_CFG 0x1A04
92#define MTK_RX_2B_OFFSET BIT(31) 96#define MTK_RX_2B_OFFSET BIT(31)
93#define MTK_RX_BT_32DWORDS (3 << 11) 97#define MTK_RX_BT_32DWORDS (3 << 11)
98#define MTK_NDP_CO_PRO BIT(10)
94#define MTK_TX_WB_DDONE BIT(6) 99#define MTK_TX_WB_DDONE BIT(6)
95#define MTK_DMA_SIZE_16DWORDS (2 << 4) 100#define MTK_DMA_SIZE_16DWORDS (2 << 4)
96#define MTK_RX_DMA_BUSY BIT(3) 101#define MTK_RX_DMA_BUSY BIT(3)
@@ -124,6 +129,11 @@
124#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \ 129#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
125 MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3) 130 MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
126 131
132/* QDMA Interrupt grouping registers */
133#define MTK_QDMA_INT_GRP1 0x1a20
134#define MTK_QDMA_INT_GRP2 0x1a24
135#define MTK_RLS_DONE_INT BIT(0)
136
127/* QDMA Interrupt Status Register */ 137/* QDMA Interrupt Status Register */
128#define MTK_QDMA_INT_MASK 0x1A1C 138#define MTK_QDMA_INT_MASK 0x1A1C
129 139
@@ -355,8 +365,10 @@ struct mtk_rx_ring {
355 * @dma_refcnt: track how many netdevs are using the DMA engine 365 * @dma_refcnt: track how many netdevs are using the DMA engine
356 * @tx_ring: Pointer to the memore holding info about the TX ring 366 * @tx_ring: Pointer to the memore holding info about the TX ring
357 * @rx_ring: Pointer to the memore holding info about the RX ring 367 * @rx_ring: Pointer to the memore holding info about the RX ring
358 * @rx_napi: The NAPI struct 368 * @tx_napi: The TX NAPI struct
369 * @rx_napi: The RX NAPI struct
359 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring 370 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
371 * @phy_scratch_ring: physical address of scratch_ring
360 * @scratch_head: The scratch memory that scratch_ring points to. 372 * @scratch_head: The scratch memory that scratch_ring points to.
361 * @clk_ethif: The ethif clock 373 * @clk_ethif: The ethif clock
362 * @clk_esw: The switch clock 374 * @clk_esw: The switch clock
@@ -371,10 +383,11 @@ struct mtk_eth {
371 void __iomem *base; 383 void __iomem *base;
372 struct reset_control *rstc; 384 struct reset_control *rstc;
373 spinlock_t page_lock; 385 spinlock_t page_lock;
386 spinlock_t irq_lock;
374 struct net_device dummy_dev; 387 struct net_device dummy_dev;
375 struct net_device *netdev[MTK_MAX_DEVS]; 388 struct net_device *netdev[MTK_MAX_DEVS];
376 struct mtk_mac *mac[MTK_MAX_DEVS]; 389 struct mtk_mac *mac[MTK_MAX_DEVS];
377 int irq; 390 int irq[3];
378 u32 msg_enable; 391 u32 msg_enable;
379 unsigned long sysclk; 392 unsigned long sysclk;
380 struct regmap *ethsys; 393 struct regmap *ethsys;
@@ -382,8 +395,10 @@ struct mtk_eth {
382 atomic_t dma_refcnt; 395 atomic_t dma_refcnt;
383 struct mtk_tx_ring tx_ring; 396 struct mtk_tx_ring tx_ring;
384 struct mtk_rx_ring rx_ring; 397 struct mtk_rx_ring rx_ring;
398 struct napi_struct tx_napi;
385 struct napi_struct rx_napi; 399 struct napi_struct rx_napi;
386 struct mtk_tx_dma *scratch_ring; 400 struct mtk_tx_dma *scratch_ring;
401 dma_addr_t phy_scratch_ring;
387 void *scratch_head; 402 void *scratch_head;
388 struct clk *clk_ethif; 403 struct clk *clk_ethif;
389 struct clk *clk_esw; 404 struct clk *clk_esw;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index e94ca1c3fc7c..f04a423ff79d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2597,7 +2597,6 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
2597 priv->cmd.free_head = 0; 2597 priv->cmd.free_head = 0;
2598 2598
2599 sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds); 2599 sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
2600 spin_lock_init(&priv->cmd.context_lock);
2601 2600
2602 for (priv->cmd.token_mask = 1; 2601 for (priv->cmd.token_mask = 1;
2603 priv->cmd.token_mask < priv->cmd.max_cmds; 2602 priv->cmd.token_mask < priv->cmd.max_cmds;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index d42083a8a104..6083775dae16 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -417,14 +417,18 @@ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
417 mutex_lock(&mdev->state_lock); 417 mutex_lock(&mdev->state_lock);
418 if (mdev->device_up && priv->port_up) { 418 if (mdev->device_up && priv->port_up) {
419 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 419 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
420 if (err) 420 if (err) {
421 en_err(priv, "Failed configuring VLAN filter\n"); 421 en_err(priv, "Failed configuring VLAN filter\n");
422 goto out;
423 }
422 } 424 }
423 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) 425 err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
424 en_dbg(HW, priv, "failed adding vlan %d\n", vid); 426 if (err)
425 mutex_unlock(&mdev->state_lock); 427 en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
426 428
427 return 0; 429out:
430 mutex_unlock(&mdev->state_lock);
431 return err;
428} 432}
429 433
430static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, 434static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
@@ -432,7 +436,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
432{ 436{
433 struct mlx4_en_priv *priv = netdev_priv(dev); 437 struct mlx4_en_priv *priv = netdev_priv(dev);
434 struct mlx4_en_dev *mdev = priv->mdev; 438 struct mlx4_en_dev *mdev = priv->mdev;
435 int err; 439 int err = 0;
436 440
437 en_dbg(HW, priv, "Killing VID:%d\n", vid); 441 en_dbg(HW, priv, "Killing VID:%d\n", vid);
438 442
@@ -449,7 +453,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
449 } 453 }
450 mutex_unlock(&mdev->state_lock); 454 mutex_unlock(&mdev->state_lock);
451 455
452 return 0; 456 return err;
453} 457}
454 458
455static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) 459static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
@@ -2042,11 +2046,20 @@ err:
2042 return -ENOMEM; 2046 return -ENOMEM;
2043} 2047}
2044 2048
2049static void mlx4_en_shutdown(struct net_device *dev)
2050{
2051 rtnl_lock();
2052 netif_device_detach(dev);
2053 mlx4_en_close(dev);
2054 rtnl_unlock();
2055}
2045 2056
2046void mlx4_en_destroy_netdev(struct net_device *dev) 2057void mlx4_en_destroy_netdev(struct net_device *dev)
2047{ 2058{
2048 struct mlx4_en_priv *priv = netdev_priv(dev); 2059 struct mlx4_en_priv *priv = netdev_priv(dev);
2049 struct mlx4_en_dev *mdev = priv->mdev; 2060 struct mlx4_en_dev *mdev = priv->mdev;
2061 bool shutdown = mdev->dev->persist->interface_state &
2062 MLX4_INTERFACE_STATE_SHUTDOWN;
2050 2063
2051 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 2064 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
2052 2065
@@ -2054,7 +2067,10 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
2054 if (priv->registered) { 2067 if (priv->registered) {
2055 devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev, 2068 devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
2056 priv->port)); 2069 priv->port));
2057 unregister_netdev(dev); 2070 if (shutdown)
2071 mlx4_en_shutdown(dev);
2072 else
2073 unregister_netdev(dev);
2058 } 2074 }
2059 2075
2060 if (priv->allocated) 2076 if (priv->allocated)
@@ -2079,7 +2095,8 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
2079 kfree(priv->tx_ring); 2095 kfree(priv->tx_ring);
2080 kfree(priv->tx_cq); 2096 kfree(priv->tx_cq);
2081 2097
2082 free_netdev(dev); 2098 if (!shutdown)
2099 free_netdev(dev);
2083} 2100}
2084 2101
2085static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) 2102static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
@@ -2464,9 +2481,14 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
2464 * strip that feature if this is an IPv6 encapsulated frame. 2481 * strip that feature if this is an IPv6 encapsulated frame.
2465 */ 2482 */
2466 if (skb->encapsulation && 2483 if (skb->encapsulation &&
2467 (skb->ip_summed == CHECKSUM_PARTIAL) && 2484 (skb->ip_summed == CHECKSUM_PARTIAL)) {
2468 (ip_hdr(skb)->version != 4)) 2485 struct mlx4_en_priv *priv = netdev_priv(dev);
2469 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 2486
2487 if (!priv->vxlan_port ||
2488 (ip_hdr(skb)->version != 4) ||
2489 (udp_hdr(skb)->dest != priv->vxlan_port))
2490 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2491 }
2470 2492
2471 return features; 2493 return features;
2472} 2494}
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index dec77d6f0ac9..7ae1cdad9bf0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -147,7 +147,7 @@ int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
147 if (enable) { 147 if (enable) {
148 dev->flags |= MLX4_FLAG_BONDED; 148 dev->flags |= MLX4_FLAG_BONDED;
149 } else { 149 } else {
150 ret = mlx4_virt2phy_port_map(dev, 1, 2); 150 ret = mlx4_virt2phy_port_map(dev, 1, 2);
151 if (ret) { 151 if (ret) {
152 mlx4_err(dev, "Fail to reset port map\n"); 152 mlx4_err(dev, "Fail to reset port map\n");
153 return ret; 153 return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 3564aad778a3..75dd2e3d3059 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2600,7 +2600,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
2600 err = mlx4_init_uar_table(dev); 2600 err = mlx4_init_uar_table(dev);
2601 if (err) { 2601 if (err) {
2602 mlx4_err(dev, "Failed to initialize user access region table, aborting\n"); 2602 mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
2603 return err; 2603 return err;
2604 } 2604 }
2605 2605
2606 err = mlx4_uar_alloc(dev, &priv->driver_uar); 2606 err = mlx4_uar_alloc(dev, &priv->driver_uar);
@@ -3223,6 +3223,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
3223 3223
3224 INIT_LIST_HEAD(&priv->pgdir_list); 3224 INIT_LIST_HEAD(&priv->pgdir_list);
3225 mutex_init(&priv->pgdir_mutex); 3225 mutex_init(&priv->pgdir_mutex);
3226 spin_lock_init(&priv->cmd.context_lock);
3226 3227
3227 INIT_LIST_HEAD(&priv->bf_list); 3228 INIT_LIST_HEAD(&priv->bf_list);
3228 mutex_init(&priv->bf_mutex); 3229 mutex_init(&priv->bf_mutex);
@@ -4135,8 +4136,11 @@ static void mlx4_shutdown(struct pci_dev *pdev)
4135 4136
4136 mlx4_info(persist->dev, "mlx4_shutdown was called\n"); 4137 mlx4_info(persist->dev, "mlx4_shutdown was called\n");
4137 mutex_lock(&persist->interface_state_mutex); 4138 mutex_lock(&persist->interface_state_mutex);
4138 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 4139 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) {
4140 /* Notify mlx4 clients that the kernel is being shut down */
4141 persist->interface_state |= MLX4_INTERFACE_STATE_SHUTDOWN;
4139 mlx4_unload_one(pdev); 4142 mlx4_unload_one(pdev);
4143 }
4140 mutex_unlock(&persist->interface_state_mutex); 4144 mutex_unlock(&persist->interface_state_mutex);
4141} 4145}
4142 4146
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index f2d0920018a5..94b891c118c1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -618,8 +618,8 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
618 err = mlx4_READ_ENTRY(dev, 618 err = mlx4_READ_ENTRY(dev,
619 entry->index, 619 entry->index,
620 mailbox); 620 mailbox);
621 if (err) 621 if (err)
622 goto out_mailbox; 622 goto out_mailbox;
623 members_count = 623 members_count =
624 be32_to_cpu(mgm->members_count) & 624 be32_to_cpu(mgm->members_count) &
625 0xffffff; 625 0xffffff;
@@ -657,8 +657,8 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
657 err = mlx4_WRITE_ENTRY(dev, 657 err = mlx4_WRITE_ENTRY(dev,
658 entry->index, 658 entry->index,
659 mailbox); 659 mailbox);
660 if (err) 660 if (err)
661 goto out_mailbox; 661 goto out_mailbox;
662 } 662 }
663 } 663 }
664 } 664 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 93195191f45b..395b5463cfd9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -248,7 +248,7 @@ static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
248 offset, order); 248 offset, order);
249 return; 249 return;
250 } 250 }
251 __mlx4_free_mtt_range(dev, offset, order); 251 __mlx4_free_mtt_range(dev, offset, order);
252} 252}
253 253
254void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) 254void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index cd9b2b28df88..8b81114bdc72 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2372,16 +2372,15 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2372 __mlx4_mpt_release(dev, index); 2372 __mlx4_mpt_release(dev, index);
2373 break; 2373 break;
2374 case RES_OP_MAP_ICM: 2374 case RES_OP_MAP_ICM:
2375 index = get_param_l(&in_param); 2375 index = get_param_l(&in_param);
2376 id = index & mpt_mask(dev); 2376 id = index & mpt_mask(dev);
2377 err = mr_res_start_move_to(dev, slave, id, 2377 err = mr_res_start_move_to(dev, slave, id,
2378 RES_MPT_RESERVED, &mpt); 2378 RES_MPT_RESERVED, &mpt);
2379 if (err) 2379 if (err)
2380 return err;
2381
2382 __mlx4_mpt_free_icm(dev, mpt->key);
2383 res_end_move(dev, slave, RES_MPT, id);
2384 return err; 2380 return err;
2381
2382 __mlx4_mpt_free_icm(dev, mpt->key);
2383 res_end_move(dev, slave, RES_MPT, id);
2385 break; 2384 break;
2386 default: 2385 default:
2387 err = -EINVAL; 2386 err = -EINVAL;
@@ -4253,9 +4252,8 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4253 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) && 4252 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
4254 !(dev->caps.flags2 & 4253 !(dev->caps.flags2 &
4255 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) { 4254 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
4256 mlx4_warn(dev, 4255 mlx4_warn(dev, "Src check LB for slave %d isn't supported\n",
4257 "Src check LB for slave %d isn't supported\n", 4256 slave);
4258 slave);
4259 return -ENOTSUPP; 4257 return -ENOTSUPP;
4260 } 4258 }
4261 4259
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 1cf722eba607..aae46884bf93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -4,6 +4,7 @@
4 4
5config MLX5_CORE 5config MLX5_CORE
6 tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver" 6 tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver"
7 depends on MAY_USE_DEVLINK
7 depends on PCI 8 depends on PCI
8 default n 9 default n
9 ---help--- 10 ---help---
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index c4f450f1c658..05cc1effc13c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -5,9 +5,9 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
5 mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ 5 mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
6 fs_counters.o rl.o 6 fs_counters.o rl.o
7 7
8mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \ 8mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \
9 en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \ 9 en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \
10 en_rx_am.o en_txrx.o en_clock.o vxlan.o en_tc.o \ 10 en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \
11 en_arfs.o 11 en_tc.o en_arfs.o en_rep.o en_fs_ethtool.o
12 12
13mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o 13mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index dcd2df6518de..d6e2a1cae19a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -295,6 +295,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
295 case MLX5_CMD_OP_DESTROY_FLOW_GROUP: 295 case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
296 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: 296 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
297 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER: 297 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
298 case MLX5_CMD_OP_2ERR_QP:
299 case MLX5_CMD_OP_2RST_QP:
300 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
301 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
302 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
303 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
298 return MLX5_CMD_STAT_OK; 304 return MLX5_CMD_STAT_OK;
299 305
300 case MLX5_CMD_OP_QUERY_HCA_CAP: 306 case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -321,8 +327,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
321 case MLX5_CMD_OP_RTR2RTS_QP: 327 case MLX5_CMD_OP_RTR2RTS_QP:
322 case MLX5_CMD_OP_RTS2RTS_QP: 328 case MLX5_CMD_OP_RTS2RTS_QP:
323 case MLX5_CMD_OP_SQERR2RTS_QP: 329 case MLX5_CMD_OP_SQERR2RTS_QP:
324 case MLX5_CMD_OP_2ERR_QP:
325 case MLX5_CMD_OP_2RST_QP:
326 case MLX5_CMD_OP_QUERY_QP: 330 case MLX5_CMD_OP_QUERY_QP:
327 case MLX5_CMD_OP_SQD_RTS_QP: 331 case MLX5_CMD_OP_SQD_RTS_QP:
328 case MLX5_CMD_OP_INIT2INIT_QP: 332 case MLX5_CMD_OP_INIT2INIT_QP:
@@ -342,7 +346,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
342 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: 346 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
343 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: 347 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
344 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: 348 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
345 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
346 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: 349 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
347 case MLX5_CMD_OP_SET_ROCE_ADDRESS: 350 case MLX5_CMD_OP_SET_ROCE_ADDRESS:
348 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: 351 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
@@ -390,11 +393,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
390 case MLX5_CMD_OP_CREATE_RQT: 393 case MLX5_CMD_OP_CREATE_RQT:
391 case MLX5_CMD_OP_MODIFY_RQT: 394 case MLX5_CMD_OP_MODIFY_RQT:
392 case MLX5_CMD_OP_QUERY_RQT: 395 case MLX5_CMD_OP_QUERY_RQT:
396
393 case MLX5_CMD_OP_CREATE_FLOW_TABLE: 397 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
394 case MLX5_CMD_OP_QUERY_FLOW_TABLE: 398 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
395 case MLX5_CMD_OP_CREATE_FLOW_GROUP: 399 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
396 case MLX5_CMD_OP_QUERY_FLOW_GROUP: 400 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
397 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 401
398 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: 402 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
399 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: 403 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
400 case MLX5_CMD_OP_QUERY_FLOW_COUNTER: 404 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
@@ -545,6 +549,7 @@ const char *mlx5_command_str(int command)
545 MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER); 549 MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
546 MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER); 550 MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
547 MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER); 551 MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
552 MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
548 default: return "unknown command opcode"; 553 default: return "unknown command opcode";
549 } 554 }
550} 555}
@@ -601,11 +606,36 @@ static void dump_command(struct mlx5_core_dev *dev,
601 pr_debug("\n"); 606 pr_debug("\n");
602} 607}
603 608
609static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
610{
611 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
612
613 return be16_to_cpu(hdr->opcode);
614}
615
616static void cb_timeout_handler(struct work_struct *work)
617{
618 struct delayed_work *dwork = container_of(work, struct delayed_work,
619 work);
620 struct mlx5_cmd_work_ent *ent = container_of(dwork,
621 struct mlx5_cmd_work_ent,
622 cb_timeout_work);
623 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
624 cmd);
625
626 ent->ret = -ETIMEDOUT;
627 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
628 mlx5_command_str(msg_to_opcode(ent->in)),
629 msg_to_opcode(ent->in));
630 mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
631}
632
604static void cmd_work_handler(struct work_struct *work) 633static void cmd_work_handler(struct work_struct *work)
605{ 634{
606 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); 635 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
607 struct mlx5_cmd *cmd = ent->cmd; 636 struct mlx5_cmd *cmd = ent->cmd;
608 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); 637 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
638 unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
609 struct mlx5_cmd_layout *lay; 639 struct mlx5_cmd_layout *lay;
610 struct semaphore *sem; 640 struct semaphore *sem;
611 unsigned long flags; 641 unsigned long flags;
@@ -646,6 +676,9 @@ static void cmd_work_handler(struct work_struct *work)
646 dump_command(dev, ent, 1); 676 dump_command(dev, ent, 1);
647 ent->ts1 = ktime_get_ns(); 677 ent->ts1 = ktime_get_ns();
648 678
679 if (ent->callback)
680 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
681
649 /* ring doorbell after the descriptor is valid */ 682 /* ring doorbell after the descriptor is valid */
650 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); 683 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
651 wmb(); 684 wmb();
@@ -690,13 +723,6 @@ static const char *deliv_status_to_str(u8 status)
690 } 723 }
691} 724}
692 725
693static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
694{
695 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
696
697 return be16_to_cpu(hdr->opcode);
698}
699
700static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) 726static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
701{ 727{
702 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); 728 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
@@ -705,13 +731,13 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
705 731
706 if (cmd->mode == CMD_MODE_POLLING) { 732 if (cmd->mode == CMD_MODE_POLLING) {
707 wait_for_completion(&ent->done); 733 wait_for_completion(&ent->done);
708 err = ent->ret; 734 } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
709 } else { 735 ent->ret = -ETIMEDOUT;
710 if (!wait_for_completion_timeout(&ent->done, timeout)) 736 mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
711 err = -ETIMEDOUT;
712 else
713 err = 0;
714 } 737 }
738
739 err = ent->ret;
740
715 if (err == -ETIMEDOUT) { 741 if (err == -ETIMEDOUT) {
716 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 742 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
717 mlx5_command_str(msg_to_opcode(ent->in)), 743 mlx5_command_str(msg_to_opcode(ent->in)),
@@ -760,6 +786,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
760 if (!callback) 786 if (!callback)
761 init_completion(&ent->done); 787 init_completion(&ent->done);
762 788
789 INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
763 INIT_WORK(&ent->work, cmd_work_handler); 790 INIT_WORK(&ent->work, cmd_work_handler);
764 if (page_queue) { 791 if (page_queue) {
765 cmd_work_handler(&ent->work); 792 cmd_work_handler(&ent->work);
@@ -769,28 +796,26 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
769 goto out_free; 796 goto out_free;
770 } 797 }
771 798
772 if (!callback) { 799 if (callback)
773 err = wait_func(dev, ent); 800 goto out;
774 if (err == -ETIMEDOUT)
775 goto out;
776
777 ds = ent->ts2 - ent->ts1;
778 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
779 if (op < ARRAY_SIZE(cmd->stats)) {
780 stats = &cmd->stats[op];
781 spin_lock_irq(&stats->lock);
782 stats->sum += ds;
783 ++stats->n;
784 spin_unlock_irq(&stats->lock);
785 }
786 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
787 "fw exec time for %s is %lld nsec\n",
788 mlx5_command_str(op), ds);
789 *status = ent->status;
790 free_cmd(ent);
791 }
792 801
793 return err; 802 err = wait_func(dev, ent);
803 if (err == -ETIMEDOUT)
804 goto out_free;
805
806 ds = ent->ts2 - ent->ts1;
807 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
808 if (op < ARRAY_SIZE(cmd->stats)) {
809 stats = &cmd->stats[op];
810 spin_lock_irq(&stats->lock);
811 stats->sum += ds;
812 ++stats->n;
813 spin_unlock_irq(&stats->lock);
814 }
815 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
816 "fw exec time for %s is %lld nsec\n",
817 mlx5_command_str(op), ds);
818 *status = ent->status;
794 819
795out_free: 820out_free:
796 free_cmd(ent); 821 free_cmd(ent);
@@ -1180,41 +1205,30 @@ err_dbg:
1180 return err; 1205 return err;
1181} 1206}
1182 1207
1183void mlx5_cmd_use_events(struct mlx5_core_dev *dev) 1208static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
1184{ 1209{
1185 struct mlx5_cmd *cmd = &dev->cmd; 1210 struct mlx5_cmd *cmd = &dev->cmd;
1186 int i; 1211 int i;
1187 1212
1188 for (i = 0; i < cmd->max_reg_cmds; i++) 1213 for (i = 0; i < cmd->max_reg_cmds; i++)
1189 down(&cmd->sem); 1214 down(&cmd->sem);
1190
1191 down(&cmd->pages_sem); 1215 down(&cmd->pages_sem);
1192 1216
1193 flush_workqueue(cmd->wq); 1217 cmd->mode = mode;
1194
1195 cmd->mode = CMD_MODE_EVENTS;
1196 1218
1197 up(&cmd->pages_sem); 1219 up(&cmd->pages_sem);
1198 for (i = 0; i < cmd->max_reg_cmds; i++) 1220 for (i = 0; i < cmd->max_reg_cmds; i++)
1199 up(&cmd->sem); 1221 up(&cmd->sem);
1200} 1222}
1201 1223
1202void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) 1224void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1203{ 1225{
1204 struct mlx5_cmd *cmd = &dev->cmd; 1226 mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
1205 int i; 1227}
1206
1207 for (i = 0; i < cmd->max_reg_cmds; i++)
1208 down(&cmd->sem);
1209
1210 down(&cmd->pages_sem);
1211
1212 flush_workqueue(cmd->wq);
1213 cmd->mode = CMD_MODE_POLLING;
1214 1228
1215 up(&cmd->pages_sem); 1229void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1216 for (i = 0; i < cmd->max_reg_cmds; i++) 1230{
1217 up(&cmd->sem); 1231 mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
1218} 1232}
1219 1233
1220static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) 1234static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
@@ -1250,6 +1264,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
1250 struct semaphore *sem; 1264 struct semaphore *sem;
1251 1265
1252 ent = cmd->ent_arr[i]; 1266 ent = cmd->ent_arr[i];
1267 if (ent->callback)
1268 cancel_delayed_work(&ent->cb_timeout_work);
1253 if (ent->page_queue) 1269 if (ent->page_queue)
1254 sem = &cmd->pages_sem; 1270 sem = &cmd->pages_sem;
1255 else 1271 else
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index da885c0dfebe..4cbd452fec25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -44,6 +44,7 @@
44#include <linux/mlx5/vport.h> 44#include <linux/mlx5/vport.h>
45#include <linux/mlx5/transobj.h> 45#include <linux/mlx5/transobj.h>
46#include <linux/rhashtable.h> 46#include <linux/rhashtable.h>
47#include <net/switchdev.h>
47#include "wq.h" 48#include "wq.h"
48#include "mlx5_core.h" 49#include "mlx5_core.h"
49#include "en_stats.h" 50#include "en_stats.h"
@@ -163,7 +164,6 @@ enum mlx5e_priv_flag {
163 164
164#ifdef CONFIG_MLX5_CORE_EN_DCB 165#ifdef CONFIG_MLX5_CORE_EN_DCB
165#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ 166#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
166#define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */
167#endif 167#endif
168 168
169struct mlx5e_cq_moder { 169struct mlx5e_cq_moder {
@@ -214,6 +214,7 @@ struct mlx5e_tstamp {
214enum { 214enum {
215 MLX5E_RQ_STATE_POST_WQES_ENABLE, 215 MLX5E_RQ_STATE_POST_WQES_ENABLE,
216 MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, 216 MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
217 MLX5E_RQ_STATE_FLUSH_TIMEOUT,
217 MLX5E_RQ_STATE_AM, 218 MLX5E_RQ_STATE_AM,
218}; 219};
219 220
@@ -245,6 +246,8 @@ typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq,
245typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, 246typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
246 u16 ix); 247 u16 ix);
247 248
249typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix);
250
248struct mlx5e_dma_info { 251struct mlx5e_dma_info {
249 struct page *page; 252 struct page *page;
250 dma_addr_t addr; 253 dma_addr_t addr;
@@ -290,6 +293,7 @@ struct mlx5e_rq {
290 struct mlx5e_cq cq; 293 struct mlx5e_cq cq;
291 mlx5e_fp_handle_rx_cqe handle_rx_cqe; 294 mlx5e_fp_handle_rx_cqe handle_rx_cqe;
292 mlx5e_fp_alloc_wqe alloc_wqe; 295 mlx5e_fp_alloc_wqe alloc_wqe;
296 mlx5e_fp_dealloc_wqe dealloc_wqe;
293 297
294 unsigned long state; 298 unsigned long state;
295 int ix; 299 int ix;
@@ -356,6 +360,7 @@ struct mlx5e_sq_dma {
356enum { 360enum {
357 MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, 361 MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
358 MLX5E_SQ_STATE_BF_ENABLE, 362 MLX5E_SQ_STATE_BF_ENABLE,
363 MLX5E_SQ_STATE_TX_TIMEOUT,
359}; 364};
360 365
361struct mlx5e_ico_wqe_info { 366struct mlx5e_ico_wqe_info {
@@ -453,7 +458,7 @@ enum mlx5e_traffic_types {
453}; 458};
454 459
455enum { 460enum {
456 MLX5E_STATE_ASYNC_EVENTS_ENABLE, 461 MLX5E_STATE_ASYNC_EVENTS_ENABLED,
457 MLX5E_STATE_OPENED, 462 MLX5E_STATE_OPENED,
458 MLX5E_STATE_DESTROYING, 463 MLX5E_STATE_DESTROYING,
459}; 464};
@@ -543,8 +548,24 @@ enum {
543 MLX5E_ARFS_FT_LEVEL 548 MLX5E_ARFS_FT_LEVEL
544}; 549};
545 550
551struct mlx5e_ethtool_table {
552 struct mlx5_flow_table *ft;
553 int num_rules;
554};
555
556#define ETHTOOL_NUM_L3_L4_FTS 7
557#define ETHTOOL_NUM_L2_FTS 4
558
559struct mlx5e_ethtool_steering {
560 struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
561 struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
562 struct list_head rules;
563 int tot_num_rules;
564};
565
546struct mlx5e_flow_steering { 566struct mlx5e_flow_steering {
547 struct mlx5_flow_namespace *ns; 567 struct mlx5_flow_namespace *ns;
568 struct mlx5e_ethtool_steering ethtool;
548 struct mlx5e_tc_table tc; 569 struct mlx5e_tc_table tc;
549 struct mlx5e_vlan_table vlan; 570 struct mlx5e_vlan_table vlan;
550 struct mlx5e_l2_table l2; 571 struct mlx5e_l2_table l2;
@@ -552,9 +573,15 @@ struct mlx5e_flow_steering {
552 struct mlx5e_arfs_tables arfs; 573 struct mlx5e_arfs_tables arfs;
553}; 574};
554 575
555struct mlx5e_direct_tir { 576struct mlx5e_rqt {
556 u32 tirn;
557 u32 rqtn; 577 u32 rqtn;
578 bool enabled;
579};
580
581struct mlx5e_tir {
582 u32 tirn;
583 struct mlx5e_rqt rqt;
584 struct list_head list;
558}; 585};
559 586
560enum { 587enum {
@@ -562,6 +589,22 @@ enum {
562 MLX5E_NIC_PRIO 589 MLX5E_NIC_PRIO
563}; 590};
564 591
592struct mlx5e_profile {
593 void (*init)(struct mlx5_core_dev *mdev,
594 struct net_device *netdev,
595 const struct mlx5e_profile *profile, void *ppriv);
596 void (*cleanup)(struct mlx5e_priv *priv);
597 int (*init_rx)(struct mlx5e_priv *priv);
598 void (*cleanup_rx)(struct mlx5e_priv *priv);
599 int (*init_tx)(struct mlx5e_priv *priv);
600 void (*cleanup_tx)(struct mlx5e_priv *priv);
601 void (*enable)(struct mlx5e_priv *priv);
602 void (*disable)(struct mlx5e_priv *priv);
603 void (*update_stats)(struct mlx5e_priv *priv);
604 int (*max_nch)(struct mlx5_core_dev *mdev);
605 int max_tc;
606};
607
565struct mlx5e_priv { 608struct mlx5e_priv {
566 /* priv data path fields - start */ 609 /* priv data path fields - start */
567 struct mlx5e_sq **txq_to_sq_map; 610 struct mlx5e_sq **txq_to_sq_map;
@@ -570,18 +613,14 @@ struct mlx5e_priv {
570 613
571 unsigned long state; 614 unsigned long state;
572 struct mutex state_lock; /* Protects Interface state */ 615 struct mutex state_lock; /* Protects Interface state */
573 struct mlx5_uar cq_uar;
574 u32 pdn;
575 u32 tdn;
576 struct mlx5_core_mkey mkey;
577 struct mlx5_core_mkey umr_mkey; 616 struct mlx5_core_mkey umr_mkey;
578 struct mlx5e_rq drop_rq; 617 struct mlx5e_rq drop_rq;
579 618
580 struct mlx5e_channel **channel; 619 struct mlx5e_channel **channel;
581 u32 tisn[MLX5E_MAX_NUM_TC]; 620 u32 tisn[MLX5E_MAX_NUM_TC];
582 u32 indir_rqtn; 621 struct mlx5e_rqt indir_rqt;
583 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS]; 622 struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
584 struct mlx5e_direct_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; 623 struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
585 u32 tx_rates[MLX5E_MAX_NUM_SQS]; 624 u32 tx_rates[MLX5E_MAX_NUM_SQS];
586 625
587 struct mlx5e_flow_steering fs; 626 struct mlx5e_flow_steering fs;
@@ -591,6 +630,7 @@ struct mlx5e_priv {
591 struct workqueue_struct *wq; 630 struct workqueue_struct *wq;
592 struct work_struct update_carrier_work; 631 struct work_struct update_carrier_work;
593 struct work_struct set_rx_mode_work; 632 struct work_struct set_rx_mode_work;
633 struct work_struct tx_timeout_work;
594 struct delayed_work update_stats_work; 634 struct delayed_work update_stats_work;
595 635
596 u32 pflags; 636 u32 pflags;
@@ -599,6 +639,8 @@ struct mlx5e_priv {
599 struct mlx5e_stats stats; 639 struct mlx5e_stats stats;
600 struct mlx5e_tstamp tstamp; 640 struct mlx5e_tstamp tstamp;
601 u16 q_counter; 641 u16 q_counter;
642 const struct mlx5e_profile *profile;
643 void *ppriv;
602}; 644};
603 645
604enum mlx5e_link_mode { 646enum mlx5e_link_mode {
@@ -647,12 +689,16 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
647int mlx5e_napi_poll(struct napi_struct *napi, int budget); 689int mlx5e_napi_poll(struct napi_struct *napi, int budget);
648bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); 690bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
649int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); 691int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
692void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
693void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
650 694
651void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 695void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
652void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 696void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
653bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); 697bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
654int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix); 698int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
655int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix); 699int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
700void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
701void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
656void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq); 702void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq);
657void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq, 703void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq,
658 struct mlx5_cqe64 *cqe, 704 struct mlx5_cqe64 *cqe,
@@ -680,6 +726,16 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
680void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); 726void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
681void mlx5e_init_l2_addr(struct mlx5e_priv *priv); 727void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
682void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft); 728void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
729int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
730 int location);
731int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
732 struct ethtool_rxnfc *info, u32 *rule_locs);
733int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
734 struct ethtool_rx_flow_spec *fs);
735int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
736 int location);
737void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
738void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
683void mlx5e_set_rx_mode_work(struct work_struct *work); 739void mlx5e_set_rx_mode_work(struct work_struct *work);
684 740
685void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp, 741void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp,
@@ -788,5 +844,39 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
788#endif 844#endif
789 845
790u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev); 846u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
847int mlx5e_create_tir(struct mlx5_core_dev *mdev,
848 struct mlx5e_tir *tir, u32 *in, int inlen);
849void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
850 struct mlx5e_tir *tir);
851int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
852void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
853int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev);
854
855struct mlx5_eswitch_rep;
856int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
857 struct mlx5_eswitch_rep *rep);
858void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
859 struct mlx5_eswitch_rep *rep);
860int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep);
861void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
862 struct mlx5_eswitch_rep *rep);
863int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
864void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
865int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr);
866
867int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
868void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
869int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
870void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
871int mlx5e_create_tises(struct mlx5e_priv *priv);
872void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
873int mlx5e_close(struct net_device *netdev);
874int mlx5e_open(struct net_device *netdev);
875void mlx5e_update_stats_work(struct work_struct *work);
876void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
877 const struct mlx5e_profile *profile, void *ppriv);
878void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv);
879struct rtnl_link_stats64 *
880mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
791 881
792#endif /* __MLX5_EN_H__ */ 882#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 3515e78ba68f..a8cb38789774 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -93,14 +93,14 @@ static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type)
93static int arfs_disable(struct mlx5e_priv *priv) 93static int arfs_disable(struct mlx5e_priv *priv)
94{ 94{
95 struct mlx5_flow_destination dest; 95 struct mlx5_flow_destination dest;
96 u32 *tirn = priv->indir_tirn; 96 struct mlx5e_tir *tir = priv->indir_tir;
97 int err = 0; 97 int err = 0;
98 int tt; 98 int tt;
99 int i; 99 int i;
100 100
101 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; 101 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
102 for (i = 0; i < ARFS_NUM_TYPES; i++) { 102 for (i = 0; i < ARFS_NUM_TYPES; i++) {
103 dest.tir_num = tirn[i]; 103 dest.tir_num = tir[i].tirn;
104 tt = arfs_get_tt(i); 104 tt = arfs_get_tt(i);
105 /* Modify ttc rules destination to bypass the aRFS tables*/ 105 /* Modify ttc rules destination to bypass the aRFS tables*/
106 err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt], 106 err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
@@ -175,15 +175,12 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
175{ 175{
176 struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type]; 176 struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
177 struct mlx5_flow_destination dest; 177 struct mlx5_flow_destination dest;
178 u8 match_criteria_enable = 0; 178 struct mlx5e_tir *tir = priv->indir_tir;
179 u32 *tirn = priv->indir_tirn; 179 struct mlx5_flow_spec *spec;
180 u32 *match_criteria;
181 u32 *match_value;
182 int err = 0; 180 int err = 0;
183 181
184 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 182 spec = mlx5_vzalloc(sizeof(*spec));
185 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 183 if (!spec) {
186 if (!match_value || !match_criteria) {
187 netdev_err(priv->netdev, "%s: alloc failed\n", __func__); 184 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
188 err = -ENOMEM; 185 err = -ENOMEM;
189 goto out; 186 goto out;
@@ -192,24 +189,23 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
192 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; 189 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
193 switch (type) { 190 switch (type) {
194 case ARFS_IPV4_TCP: 191 case ARFS_IPV4_TCP:
195 dest.tir_num = tirn[MLX5E_TT_IPV4_TCP]; 192 dest.tir_num = tir[MLX5E_TT_IPV4_TCP].tirn;
196 break; 193 break;
197 case ARFS_IPV4_UDP: 194 case ARFS_IPV4_UDP:
198 dest.tir_num = tirn[MLX5E_TT_IPV4_UDP]; 195 dest.tir_num = tir[MLX5E_TT_IPV4_UDP].tirn;
199 break; 196 break;
200 case ARFS_IPV6_TCP: 197 case ARFS_IPV6_TCP:
201 dest.tir_num = tirn[MLX5E_TT_IPV6_TCP]; 198 dest.tir_num = tir[MLX5E_TT_IPV6_TCP].tirn;
202 break; 199 break;
203 case ARFS_IPV6_UDP: 200 case ARFS_IPV6_UDP:
204 dest.tir_num = tirn[MLX5E_TT_IPV6_UDP]; 201 dest.tir_num = tir[MLX5E_TT_IPV6_UDP].tirn;
205 break; 202 break;
206 default: 203 default:
207 err = -EINVAL; 204 err = -EINVAL;
208 goto out; 205 goto out;
209 } 206 }
210 207
211 arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, match_criteria_enable, 208 arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, spec,
212 match_criteria, match_value,
213 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 209 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
214 MLX5_FS_DEFAULT_FLOW_TAG, 210 MLX5_FS_DEFAULT_FLOW_TAG,
215 &dest); 211 &dest);
@@ -220,8 +216,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
220 __func__, type); 216 __func__, type);
221 } 217 }
222out: 218out:
223 kvfree(match_criteria); 219 kvfree(spec);
224 kvfree(match_value);
225 return err; 220 return err;
226} 221}
227 222
@@ -475,23 +470,20 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
475 struct mlx5_flow_rule *rule = NULL; 470 struct mlx5_flow_rule *rule = NULL;
476 struct mlx5_flow_destination dest; 471 struct mlx5_flow_destination dest;
477 struct arfs_table *arfs_table; 472 struct arfs_table *arfs_table;
478 u8 match_criteria_enable = 0; 473 struct mlx5_flow_spec *spec;
479 struct mlx5_flow_table *ft; 474 struct mlx5_flow_table *ft;
480 u32 *match_criteria;
481 u32 *match_value;
482 int err = 0; 475 int err = 0;
483 476
484 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 477 spec = mlx5_vzalloc(sizeof(*spec));
485 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 478 if (!spec) {
486 if (!match_value || !match_criteria) {
487 netdev_err(priv->netdev, "%s: alloc failed\n", __func__); 479 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
488 err = -ENOMEM; 480 err = -ENOMEM;
489 goto out; 481 goto out;
490 } 482 }
491 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 483 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
492 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 484 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
493 outer_headers.ethertype); 485 outer_headers.ethertype);
494 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, 486 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype,
495 ntohs(tuple->etype)); 487 ntohs(tuple->etype));
496 arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype); 488 arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype);
497 if (!arfs_table) { 489 if (!arfs_table) {
@@ -501,59 +493,58 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
501 493
502 ft = arfs_table->ft.t; 494 ft = arfs_table->ft.t;
503 if (tuple->ip_proto == IPPROTO_TCP) { 495 if (tuple->ip_proto == IPPROTO_TCP) {
504 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 496 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
505 outer_headers.tcp_dport); 497 outer_headers.tcp_dport);
506 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 498 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
507 outer_headers.tcp_sport); 499 outer_headers.tcp_sport);
508 MLX5_SET(fte_match_param, match_value, outer_headers.tcp_dport, 500 MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport,
509 ntohs(tuple->dst_port)); 501 ntohs(tuple->dst_port));
510 MLX5_SET(fte_match_param, match_value, outer_headers.tcp_sport, 502 MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport,
511 ntohs(tuple->src_port)); 503 ntohs(tuple->src_port));
512 } else { 504 } else {
513 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 505 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
514 outer_headers.udp_dport); 506 outer_headers.udp_dport);
515 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 507 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
516 outer_headers.udp_sport); 508 outer_headers.udp_sport);
517 MLX5_SET(fte_match_param, match_value, outer_headers.udp_dport, 509 MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport,
518 ntohs(tuple->dst_port)); 510 ntohs(tuple->dst_port));
519 MLX5_SET(fte_match_param, match_value, outer_headers.udp_sport, 511 MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_sport,
520 ntohs(tuple->src_port)); 512 ntohs(tuple->src_port));
521 } 513 }
522 if (tuple->etype == htons(ETH_P_IP)) { 514 if (tuple->etype == htons(ETH_P_IP)) {
523 memcpy(MLX5_ADDR_OF(fte_match_param, match_value, 515 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
524 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), 516 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
525 &tuple->src_ipv4, 517 &tuple->src_ipv4,
526 4); 518 4);
527 memcpy(MLX5_ADDR_OF(fte_match_param, match_value, 519 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
528 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 520 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
529 &tuple->dst_ipv4, 521 &tuple->dst_ipv4,
530 4); 522 4);
531 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 523 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
532 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4); 524 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
533 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 525 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
534 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 526 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
535 } else { 527 } else {
536 memcpy(MLX5_ADDR_OF(fte_match_param, match_value, 528 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
537 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 529 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
538 &tuple->src_ipv6, 530 &tuple->src_ipv6,
539 16); 531 16);
540 memcpy(MLX5_ADDR_OF(fte_match_param, match_value, 532 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
541 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 533 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
542 &tuple->dst_ipv6, 534 &tuple->dst_ipv6,
543 16); 535 16);
544 memset(MLX5_ADDR_OF(fte_match_param, match_criteria, 536 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
545 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 537 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
546 0xff, 538 0xff,
547 16); 539 16);
548 memset(MLX5_ADDR_OF(fte_match_param, match_criteria, 540 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
549 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 541 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
550 0xff, 542 0xff,
551 16); 543 16);
552 } 544 }
553 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; 545 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
554 dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn; 546 dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
555 rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria, 547 rule = mlx5_add_flow_rule(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
556 match_value, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
557 MLX5_FS_DEFAULT_FLOW_TAG, 548 MLX5_FS_DEFAULT_FLOW_TAG,
558 &dest); 549 &dest);
559 if (IS_ERR(rule)) { 550 if (IS_ERR(rule)) {
@@ -563,8 +554,7 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
563 } 554 }
564 555
565out: 556out:
566 kvfree(match_criteria); 557 kvfree(spec);
567 kvfree(match_value);
568 return err ? ERR_PTR(err) : rule; 558 return err ? ERR_PTR(err) : rule;
569} 559}
570 560
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
new file mode 100644
index 000000000000..673043ccd76c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -0,0 +1,160 @@
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "en.h"
34
35/* mlx5e global resources should be placed in this file.
36 * Global resources are common to all the netdevices crated on the same nic.
37 */
38
39int mlx5e_create_tir(struct mlx5_core_dev *mdev,
40 struct mlx5e_tir *tir, u32 *in, int inlen)
41{
42 int err;
43
44 err = mlx5_core_create_tir(mdev, in, inlen, &tir->tirn);
45 if (err)
46 return err;
47
48 list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
49
50 return 0;
51}
52
53void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
54 struct mlx5e_tir *tir)
55{
56 mlx5_core_destroy_tir(mdev, tir->tirn);
57 list_del(&tir->list);
58}
59
60static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
61 struct mlx5_core_mkey *mkey)
62{
63 struct mlx5_create_mkey_mbox_in *in;
64 int err;
65
66 in = mlx5_vzalloc(sizeof(*in));
67 if (!in)
68 return -ENOMEM;
69
70 in->seg.flags = MLX5_PERM_LOCAL_WRITE |
71 MLX5_PERM_LOCAL_READ |
72 MLX5_ACCESS_MODE_PA;
73 in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
74 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
75
76 err = mlx5_core_create_mkey(mdev, mkey, in, sizeof(*in), NULL, NULL,
77 NULL);
78
79 kvfree(in);
80
81 return err;
82}
83
84int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
85{
86 struct mlx5e_resources *res = &mdev->mlx5e_res;
87 int err;
88
89 err = mlx5_alloc_map_uar(mdev, &res->cq_uar, false);
90 if (err) {
91 mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
92 return err;
93 }
94
95 err = mlx5_core_alloc_pd(mdev, &res->pdn);
96 if (err) {
97 mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
98 goto err_unmap_free_uar;
99 }
100
101 err = mlx5_core_alloc_transport_domain(mdev, &res->td.tdn);
102 if (err) {
103 mlx5_core_err(mdev, "alloc td failed, %d\n", err);
104 goto err_dealloc_pd;
105 }
106
107 err = mlx5e_create_mkey(mdev, res->pdn, &res->mkey);
108 if (err) {
109 mlx5_core_err(mdev, "create mkey failed, %d\n", err);
110 goto err_dealloc_transport_domain;
111 }
112
113 INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
114
115 return 0;
116
117err_dealloc_transport_domain:
118 mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
119err_dealloc_pd:
120 mlx5_core_dealloc_pd(mdev, res->pdn);
121err_unmap_free_uar:
122 mlx5_unmap_free_uar(mdev, &res->cq_uar);
123
124 return err;
125}
126
127void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
128{
129 struct mlx5e_resources *res = &mdev->mlx5e_res;
130
131 mlx5_core_destroy_mkey(mdev, &res->mkey);
132 mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
133 mlx5_core_dealloc_pd(mdev, res->pdn);
134 mlx5_unmap_free_uar(mdev, &res->cq_uar);
135}
136
137int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev)
138{
139 struct mlx5e_tir *tir;
140 void *in;
141 int inlen;
142 int err;
143
144 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
145 in = mlx5_vzalloc(inlen);
146 if (!in)
147 return -ENOMEM;
148
149 MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
150
151 list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
152 err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen);
153 if (err)
154 return err;
155 }
156
157 kvfree(in);
158
159 return 0;
160}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index e6883132b555..caa9a3ccc3f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -96,7 +96,7 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
96 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; 96 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
97 break; 97 break;
98 case IEEE_8021QAZ_TSA_ETS: 98 case IEEE_8021QAZ_TSA_ETS:
99 tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX5E_MIN_BW_ALLOC; 99 tc_tx_bw[i] = ets->tc_tx_bw[i];
100 break; 100 break;
101 } 101 }
102 } 102 }
@@ -140,8 +140,12 @@ static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets)
140 140
141 /* Validate Bandwidth Sum */ 141 /* Validate Bandwidth Sum */
142 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 142 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
143 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) 143 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
144 if (!ets->tc_tx_bw[i])
145 return -EINVAL;
146
144 bw_sum += ets->tc_tx_bw[i]; 147 bw_sum += ets->tc_tx_bw[i];
148 }
145 } 149 }
146 150
147 if (bw_sum != 0 && bw_sum != 100) 151 if (bw_sum != 0 && bw_sum != 100)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 39a4d961a58e..4a3757e60441 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -139,6 +139,18 @@ static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
139 return err ? 0 : pfc_en_tx | pfc_en_rx; 139 return err ? 0 : pfc_en_tx | pfc_en_rx;
140} 140}
141 141
142static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
143{
144 struct mlx5_core_dev *mdev = priv->mdev;
145 u32 rx_pause;
146 u32 tx_pause;
147 int err;
148
149 err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
150
151 return err ? false : rx_pause | tx_pause;
152}
153
142#define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter)) 154#define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter))
143#define MLX5E_NUM_RQ_STATS(priv) \ 155#define MLX5E_NUM_RQ_STATS(priv) \
144 (NUM_RQ_STATS * priv->params.num_channels * \ 156 (NUM_RQ_STATS * priv->params.num_channels * \
@@ -146,7 +158,9 @@ static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
146#define MLX5E_NUM_SQ_STATS(priv) \ 158#define MLX5E_NUM_SQ_STATS(priv) \
147 (NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \ 159 (NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \
148 test_bit(MLX5E_STATE_OPENED, &priv->state)) 160 test_bit(MLX5E_STATE_OPENED, &priv->state))
149#define MLX5E_NUM_PFC_COUNTERS(priv) hweight8(mlx5e_query_pfc_combined(priv)) 161#define MLX5E_NUM_PFC_COUNTERS(priv) \
162 ((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \
163 NUM_PPORT_PER_PRIO_PFC_COUNTERS)
150 164
151static int mlx5e_get_sset_count(struct net_device *dev, int sset) 165static int mlx5e_get_sset_count(struct net_device *dev, int sset)
152{ 166{
@@ -175,42 +189,51 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
175 189
176 /* SW counters */ 190 /* SW counters */
177 for (i = 0; i < NUM_SW_COUNTERS; i++) 191 for (i = 0; i < NUM_SW_COUNTERS; i++)
178 strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].name); 192 strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
179 193
180 /* Q counters */ 194 /* Q counters */
181 for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++) 195 for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
182 strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].name); 196 strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
183 197
184 /* VPORT counters */ 198 /* VPORT counters */
185 for (i = 0; i < NUM_VPORT_COUNTERS; i++) 199 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
186 strcpy(data + (idx++) * ETH_GSTRING_LEN, 200 strcpy(data + (idx++) * ETH_GSTRING_LEN,
187 vport_stats_desc[i].name); 201 vport_stats_desc[i].format);
188 202
189 /* PPORT counters */ 203 /* PPORT counters */
190 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) 204 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
191 strcpy(data + (idx++) * ETH_GSTRING_LEN, 205 strcpy(data + (idx++) * ETH_GSTRING_LEN,
192 pport_802_3_stats_desc[i].name); 206 pport_802_3_stats_desc[i].format);
193 207
194 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) 208 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
195 strcpy(data + (idx++) * ETH_GSTRING_LEN, 209 strcpy(data + (idx++) * ETH_GSTRING_LEN,
196 pport_2863_stats_desc[i].name); 210 pport_2863_stats_desc[i].format);
197 211
198 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) 212 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
199 strcpy(data + (idx++) * ETH_GSTRING_LEN, 213 strcpy(data + (idx++) * ETH_GSTRING_LEN,
200 pport_2819_stats_desc[i].name); 214 pport_2819_stats_desc[i].format);
201 215
202 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 216 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
203 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) 217 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
204 sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s", 218 sprintf(data + (idx++) * ETH_GSTRING_LEN,
205 prio, 219 pport_per_prio_traffic_stats_desc[i].format, prio);
206 pport_per_prio_traffic_stats_desc[i].name);
207 } 220 }
208 221
209 pfc_combined = mlx5e_query_pfc_combined(priv); 222 pfc_combined = mlx5e_query_pfc_combined(priv);
210 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { 223 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
211 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { 224 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
212 sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s", 225 char pfc_string[ETH_GSTRING_LEN];
213 prio, pport_per_prio_pfc_stats_desc[i].name); 226
227 snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
228 sprintf(data + (idx++) * ETH_GSTRING_LEN,
229 pport_per_prio_pfc_stats_desc[i].format, pfc_string);
230 }
231 }
232
233 if (mlx5e_query_global_pause_combined(priv)) {
234 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
235 sprintf(data + (idx++) * ETH_GSTRING_LEN,
236 pport_per_prio_pfc_stats_desc[i].format, "global");
214 } 237 }
215 } 238 }
216 239
@@ -220,16 +243,15 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
220 /* per channel counters */ 243 /* per channel counters */
221 for (i = 0; i < priv->params.num_channels; i++) 244 for (i = 0; i < priv->params.num_channels; i++)
222 for (j = 0; j < NUM_RQ_STATS; j++) 245 for (j = 0; j < NUM_RQ_STATS; j++)
223 sprintf(data + (idx++) * ETH_GSTRING_LEN, "rx%d_%s", i, 246 sprintf(data + (idx++) * ETH_GSTRING_LEN,
224 rq_stats_desc[j].name); 247 rq_stats_desc[j].format, i);
225 248
226 for (tc = 0; tc < priv->params.num_tc; tc++) 249 for (tc = 0; tc < priv->params.num_tc; tc++)
227 for (i = 0; i < priv->params.num_channels; i++) 250 for (i = 0; i < priv->params.num_channels; i++)
228 for (j = 0; j < NUM_SQ_STATS; j++) 251 for (j = 0; j < NUM_SQ_STATS; j++)
229 sprintf(data + (idx++) * ETH_GSTRING_LEN, 252 sprintf(data + (idx++) * ETH_GSTRING_LEN,
230 "tx%d_%s", 253 sq_stats_desc[j].format,
231 priv->channeltc_to_txq_map[i][tc], 254 priv->channeltc_to_txq_map[i][tc]);
232 sq_stats_desc[j].name);
233} 255}
234 256
235static void mlx5e_get_strings(struct net_device *dev, 257static void mlx5e_get_strings(struct net_device *dev,
@@ -306,6 +328,13 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
306 } 328 }
307 } 329 }
308 330
331 if (mlx5e_query_global_pause_combined(priv)) {
332 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
333 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
334 pport_per_prio_pfc_stats_desc, 0);
335 }
336 }
337
309 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) 338 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
310 return; 339 return;
311 340
@@ -876,7 +905,7 @@ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
876 mlx5e_build_tir_ctx_hash(tirc, priv); 905 mlx5e_build_tir_ctx_hash(tirc, priv);
877 906
878 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) 907 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
879 mlx5_core_modify_tir(mdev, priv->indir_tirn[i], in, inlen); 908 mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen);
880} 909}
881 910
882static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, 911static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
@@ -898,7 +927,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
898 mutex_lock(&priv->state_lock); 927 mutex_lock(&priv->state_lock);
899 928
900 if (indir) { 929 if (indir) {
901 u32 rqtn = priv->indir_rqtn; 930 u32 rqtn = priv->indir_rqt.rqtn;
902 931
903 memcpy(priv->params.indirection_rqt, indir, 932 memcpy(priv->params.indirection_rqt, indir,
904 sizeof(priv->params.indirection_rqt)); 933 sizeof(priv->params.indirection_rqt));
@@ -931,6 +960,15 @@ static int mlx5e_get_rxnfc(struct net_device *netdev,
931 case ETHTOOL_GRXRINGS: 960 case ETHTOOL_GRXRINGS:
932 info->data = priv->params.num_channels; 961 info->data = priv->params.num_channels;
933 break; 962 break;
963 case ETHTOOL_GRXCLSRLCNT:
964 info->rule_cnt = priv->fs.ethtool.tot_num_rules;
965 break;
966 case ETHTOOL_GRXCLSRULE:
967 err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
968 break;
969 case ETHTOOL_GRXCLSRLALL:
970 err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
971 break;
934 default: 972 default:
935 err = -EOPNOTSUPP; 973 err = -EOPNOTSUPP;
936 break; 974 break;
@@ -1368,6 +1406,26 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
1368 return priv->pflags; 1406 return priv->pflags;
1369} 1407}
1370 1408
1409static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1410{
1411 int err = 0;
1412 struct mlx5e_priv *priv = netdev_priv(dev);
1413
1414 switch (cmd->cmd) {
1415 case ETHTOOL_SRXCLSRLINS:
1416 err = mlx5e_ethtool_flow_replace(priv, &cmd->fs);
1417 break;
1418 case ETHTOOL_SRXCLSRLDEL:
1419 err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
1420 break;
1421 default:
1422 err = -EOPNOTSUPP;
1423 break;
1424 }
1425
1426 return err;
1427}
1428
1371const struct ethtool_ops mlx5e_ethtool_ops = { 1429const struct ethtool_ops mlx5e_ethtool_ops = {
1372 .get_drvinfo = mlx5e_get_drvinfo, 1430 .get_drvinfo = mlx5e_get_drvinfo,
1373 .get_link = ethtool_op_get_link, 1431 .get_link = ethtool_op_get_link,
@@ -1387,6 +1445,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
1387 .get_rxfh = mlx5e_get_rxfh, 1445 .get_rxfh = mlx5e_get_rxfh,
1388 .set_rxfh = mlx5e_set_rxfh, 1446 .set_rxfh = mlx5e_set_rxfh,
1389 .get_rxnfc = mlx5e_get_rxnfc, 1447 .get_rxnfc = mlx5e_get_rxnfc,
1448 .set_rxnfc = mlx5e_set_rxnfc,
1390 .get_tunable = mlx5e_get_tunable, 1449 .get_tunable = mlx5e_get_tunable,
1391 .set_tunable = mlx5e_set_tunable, 1450 .set_tunable = mlx5e_set_tunable,
1392 .get_pauseparam = mlx5e_get_pauseparam, 1451 .get_pauseparam = mlx5e_get_pauseparam,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index b32740092854..1587a9fd5724 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -156,19 +156,18 @@ enum mlx5e_vlan_rule_type {
156 156
157static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, 157static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
158 enum mlx5e_vlan_rule_type rule_type, 158 enum mlx5e_vlan_rule_type rule_type,
159 u16 vid, u32 *mc, u32 *mv) 159 u16 vid, struct mlx5_flow_spec *spec)
160{ 160{
161 struct mlx5_flow_table *ft = priv->fs.vlan.ft.t; 161 struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
162 struct mlx5_flow_destination dest; 162 struct mlx5_flow_destination dest;
163 u8 match_criteria_enable = 0;
164 struct mlx5_flow_rule **rule_p; 163 struct mlx5_flow_rule **rule_p;
165 int err = 0; 164 int err = 0;
166 165
167 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 166 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
168 dest.ft = priv->fs.l2.ft.t; 167 dest.ft = priv->fs.l2.ft.t;
169 168
170 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 169 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
171 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); 170 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
172 171
173 switch (rule_type) { 172 switch (rule_type) {
174 case MLX5E_VLAN_RULE_TYPE_UNTAGGED: 173 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
@@ -176,17 +175,19 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
176 break; 175 break;
177 case MLX5E_VLAN_RULE_TYPE_ANY_VID: 176 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
178 rule_p = &priv->fs.vlan.any_vlan_rule; 177 rule_p = &priv->fs.vlan.any_vlan_rule;
179 MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1); 178 MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
180 break; 179 break;
181 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ 180 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
182 rule_p = &priv->fs.vlan.active_vlans_rule[vid]; 181 rule_p = &priv->fs.vlan.active_vlans_rule[vid];
183 MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1); 182 MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
184 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); 183 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
185 MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid); 184 outer_headers.first_vid);
185 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
186 vid);
186 break; 187 break;
187 } 188 }
188 189
189 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, 190 *rule_p = mlx5_add_flow_rule(ft, spec,
190 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 191 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
191 MLX5_FS_DEFAULT_FLOW_TAG, 192 MLX5_FS_DEFAULT_FLOW_TAG,
192 &dest); 193 &dest);
@@ -203,27 +204,21 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
203static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv, 204static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
204 enum mlx5e_vlan_rule_type rule_type, u16 vid) 205 enum mlx5e_vlan_rule_type rule_type, u16 vid)
205{ 206{
206 u32 *match_criteria; 207 struct mlx5_flow_spec *spec;
207 u32 *match_value;
208 int err = 0; 208 int err = 0;
209 209
210 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 210 spec = mlx5_vzalloc(sizeof(*spec));
211 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 211 if (!spec) {
212 if (!match_value || !match_criteria) {
213 netdev_err(priv->netdev, "%s: alloc failed\n", __func__); 212 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
214 err = -ENOMEM; 213 return -ENOMEM;
215 goto add_vlan_rule_out;
216 } 214 }
217 215
218 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID) 216 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
219 mlx5e_vport_context_update_vlans(priv); 217 mlx5e_vport_context_update_vlans(priv);
220 218
221 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, match_criteria, 219 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
222 match_value);
223 220
224add_vlan_rule_out: 221 kvfree(spec);
225 kvfree(match_criteria);
226 kvfree(match_value);
227 222
228 return err; 223 return err;
229} 224}
@@ -598,32 +593,27 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
598 u8 proto) 593 u8 proto)
599{ 594{
600 struct mlx5_flow_rule *rule; 595 struct mlx5_flow_rule *rule;
601 u8 match_criteria_enable = 0; 596 struct mlx5_flow_spec *spec;
602 u32 *match_criteria;
603 u32 *match_value;
604 int err = 0; 597 int err = 0;
605 598
606 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 599 spec = mlx5_vzalloc(sizeof(*spec));
607 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 600 if (!spec) {
608 if (!match_value || !match_criteria) {
609 netdev_err(priv->netdev, "%s: alloc failed\n", __func__); 601 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
610 err = -ENOMEM; 602 return ERR_PTR(-ENOMEM);
611 goto out;
612 } 603 }
613 604
614 if (proto) { 605 if (proto) {
615 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 606 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
616 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ip_protocol); 607 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
617 MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, proto); 608 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
618 } 609 }
619 if (etype) { 610 if (etype) {
620 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 611 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
621 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ethertype); 612 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
622 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, etype); 613 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
623 } 614 }
624 615
625 rule = mlx5_add_flow_rule(ft, match_criteria_enable, 616 rule = mlx5_add_flow_rule(ft, spec,
626 match_criteria, match_value,
627 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 617 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
628 MLX5_FS_DEFAULT_FLOW_TAG, 618 MLX5_FS_DEFAULT_FLOW_TAG,
629 dest); 619 dest);
@@ -631,9 +621,8 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
631 err = PTR_ERR(rule); 621 err = PTR_ERR(rule);
632 netdev_err(priv->netdev, "%s: add rule failed\n", __func__); 622 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
633 } 623 }
634out: 624
635 kvfree(match_criteria); 625 kvfree(spec);
636 kvfree(match_value);
637 return err ? ERR_PTR(err) : rule; 626 return err ? ERR_PTR(err) : rule;
638} 627}
639 628
@@ -655,7 +644,7 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
655 if (tt == MLX5E_TT_ANY) 644 if (tt == MLX5E_TT_ANY)
656 dest.tir_num = priv->direct_tir[0].tirn; 645 dest.tir_num = priv->direct_tir[0].tirn;
657 else 646 else
658 dest.tir_num = priv->indir_tirn[tt]; 647 dest.tir_num = priv->indir_tir[tt].tirn;
659 rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest, 648 rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
660 ttc_rules[tt].etype, 649 ttc_rules[tt].etype,
661 ttc_rules[tt].proto); 650 ttc_rules[tt].proto);
@@ -792,24 +781,20 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
792{ 781{
793 struct mlx5_flow_table *ft = priv->fs.l2.ft.t; 782 struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
794 struct mlx5_flow_destination dest; 783 struct mlx5_flow_destination dest;
795 u8 match_criteria_enable = 0; 784 struct mlx5_flow_spec *spec;
796 u32 *match_criteria;
797 u32 *match_value;
798 int err = 0; 785 int err = 0;
799 u8 *mc_dmac; 786 u8 *mc_dmac;
800 u8 *mv_dmac; 787 u8 *mv_dmac;
801 788
802 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 789 spec = mlx5_vzalloc(sizeof(*spec));
803 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 790 if (!spec) {
804 if (!match_value || !match_criteria) {
805 netdev_err(priv->netdev, "%s: alloc failed\n", __func__); 791 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
806 err = -ENOMEM; 792 return -ENOMEM;
807 goto add_l2_rule_out;
808 } 793 }
809 794
810 mc_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, 795 mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
811 outer_headers.dmac_47_16); 796 outer_headers.dmac_47_16);
812 mv_dmac = MLX5_ADDR_OF(fte_match_param, match_value, 797 mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
813 outer_headers.dmac_47_16); 798 outer_headers.dmac_47_16);
814 799
815 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 800 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
@@ -817,13 +802,13 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
817 802
818 switch (type) { 803 switch (type) {
819 case MLX5E_FULLMATCH: 804 case MLX5E_FULLMATCH:
820 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 805 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
821 eth_broadcast_addr(mc_dmac); 806 eth_broadcast_addr(mc_dmac);
822 ether_addr_copy(mv_dmac, ai->addr); 807 ether_addr_copy(mv_dmac, ai->addr);
823 break; 808 break;
824 809
825 case MLX5E_ALLMULTI: 810 case MLX5E_ALLMULTI:
826 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 811 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
827 mc_dmac[0] = 0x01; 812 mc_dmac[0] = 0x01;
828 mv_dmac[0] = 0x01; 813 mv_dmac[0] = 0x01;
829 break; 814 break;
@@ -832,8 +817,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
832 break; 817 break;
833 } 818 }
834 819
835 ai->rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria, 820 ai->rule = mlx5_add_flow_rule(ft, spec,
836 match_value,
837 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 821 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
838 MLX5_FS_DEFAULT_FLOW_TAG, &dest); 822 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
839 if (IS_ERR(ai->rule)) { 823 if (IS_ERR(ai->rule)) {
@@ -843,9 +827,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
843 ai->rule = NULL; 827 ai->rule = NULL;
844 } 828 }
845 829
846add_l2_rule_out: 830 kvfree(spec);
847 kvfree(match_criteria);
848 kvfree(match_value);
849 831
850 return err; 832 return err;
851} 833}
@@ -1102,6 +1084,8 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1102 goto err_destroy_l2_table; 1084 goto err_destroy_l2_table;
1103 } 1085 }
1104 1086
1087 mlx5e_ethtool_init_steering(priv);
1088
1105 return 0; 1089 return 0;
1106 1090
1107err_destroy_l2_table: 1091err_destroy_l2_table:
@@ -1121,4 +1105,5 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
1121 mlx5e_destroy_l2_table(priv); 1105 mlx5e_destroy_l2_table(priv);
1122 mlx5e_destroy_ttc_table(priv); 1106 mlx5e_destroy_ttc_table(priv);
1123 mlx5e_arfs_destroy_tables(priv); 1107 mlx5e_arfs_destroy_tables(priv);
1108 mlx5e_ethtool_cleanup_steering(priv);
1124} 1109}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
new file mode 100644
index 000000000000..d17c24227900
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -0,0 +1,586 @@
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mlx5/fs.h>
34#include "en.h"
35
36struct mlx5e_ethtool_rule {
37 struct list_head list;
38 struct ethtool_rx_flow_spec flow_spec;
39 struct mlx5_flow_rule *rule;
40 struct mlx5e_ethtool_table *eth_ft;
41};
42
43static void put_flow_table(struct mlx5e_ethtool_table *eth_ft)
44{
45 if (!--eth_ft->num_rules) {
46 mlx5_destroy_flow_table(eth_ft->ft);
47 eth_ft->ft = NULL;
48 }
49}
50
51#define MLX5E_ETHTOOL_L3_L4_PRIO 0
52#define MLX5E_ETHTOOL_L2_PRIO (MLX5E_ETHTOOL_L3_L4_PRIO + ETHTOOL_NUM_L3_L4_FTS)
53#define MLX5E_ETHTOOL_NUM_ENTRIES 64000
54#define MLX5E_ETHTOOL_NUM_GROUPS 10
55static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
56 struct ethtool_rx_flow_spec *fs,
57 int num_tuples)
58{
59 struct mlx5e_ethtool_table *eth_ft;
60 struct mlx5_flow_namespace *ns;
61 struct mlx5_flow_table *ft;
62 int max_tuples;
63 int table_size;
64 int prio;
65
66 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
67 case TCP_V4_FLOW:
68 case UDP_V4_FLOW:
69 max_tuples = ETHTOOL_NUM_L3_L4_FTS;
70 prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
71 eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
72 break;
73 case IP_USER_FLOW:
74 max_tuples = ETHTOOL_NUM_L3_L4_FTS;
75 prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
76 eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
77 break;
78 case ETHER_FLOW:
79 max_tuples = ETHTOOL_NUM_L2_FTS;
80 prio = max_tuples - num_tuples;
81 eth_ft = &priv->fs.ethtool.l2_ft[prio];
82 prio += MLX5E_ETHTOOL_L2_PRIO;
83 break;
84 default:
85 return ERR_PTR(-EINVAL);
86 }
87
88 eth_ft->num_rules++;
89 if (eth_ft->ft)
90 return eth_ft;
91
92 ns = mlx5_get_flow_namespace(priv->mdev,
93 MLX5_FLOW_NAMESPACE_ETHTOOL);
94 if (!ns)
95 return ERR_PTR(-ENOTSUPP);
96
97 table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
98 flow_table_properties_nic_receive.log_max_ft_size)),
99 MLX5E_ETHTOOL_NUM_ENTRIES);
100 ft = mlx5_create_auto_grouped_flow_table(ns, prio,
101 table_size,
102 MLX5E_ETHTOOL_NUM_GROUPS, 0);
103 if (IS_ERR(ft))
104 return (void *)ft;
105
106 eth_ft->ft = ft;
107 return eth_ft;
108}
109
110static void mask_spec(u8 *mask, u8 *val, size_t size)
111{
112 unsigned int i;
113
114 for (i = 0; i < size; i++, mask++, val++)
115 *((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
116}
117
118static void set_ips(void *outer_headers_v, void *outer_headers_c, __be32 ip4src_m,
119 __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
120{
121 if (ip4src_m) {
122 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
123 src_ipv4_src_ipv6.ipv4_layout.ipv4),
124 &ip4src_v, sizeof(ip4src_v));
125 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
126 src_ipv4_src_ipv6.ipv4_layout.ipv4),
127 0xff, sizeof(ip4src_m));
128 }
129 if (ip4dst_m) {
130 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
131 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
132 &ip4dst_v, sizeof(ip4dst_v));
133 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
134 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
135 0xff, sizeof(ip4dst_m));
136 }
137 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
138 ethertype, ETH_P_IP);
139 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
140 ethertype, 0xffff);
141}
142
143static int set_flow_attrs(u32 *match_c, u32 *match_v,
144 struct ethtool_rx_flow_spec *fs)
145{
146 void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
147 outer_headers);
148 void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
149 outer_headers);
150 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
151 struct ethtool_tcpip4_spec *l4_mask;
152 struct ethtool_tcpip4_spec *l4_val;
153 struct ethtool_usrip4_spec *l3_mask;
154 struct ethtool_usrip4_spec *l3_val;
155 struct ethhdr *eth_val;
156 struct ethhdr *eth_mask;
157
158 switch (flow_type) {
159 case TCP_V4_FLOW:
160 l4_mask = &fs->m_u.tcp_ip4_spec;
161 l4_val = &fs->h_u.tcp_ip4_spec;
162 set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
163 l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
164
165 if (l4_mask->psrc) {
166 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
167 0xffff);
168 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
169 ntohs(l4_val->psrc));
170 }
171 if (l4_mask->pdst) {
172 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
173 0xffff);
174 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
175 ntohs(l4_val->pdst));
176 }
177 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
178 0xffff);
179 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
180 IPPROTO_TCP);
181 break;
182 case UDP_V4_FLOW:
183 l4_mask = &fs->m_u.tcp_ip4_spec;
184 l4_val = &fs->h_u.tcp_ip4_spec;
185 set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
186 l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
187
188 if (l4_mask->psrc) {
189 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
190 0xffff);
191 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
192 ntohs(l4_val->psrc));
193 }
194 if (l4_mask->pdst) {
195 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
196 0xffff);
197 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
198 ntohs(l4_val->pdst));
199 }
200 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
201 0xffff);
202 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
203 IPPROTO_UDP);
204 break;
205 case IP_USER_FLOW:
206 l3_mask = &fs->m_u.usr_ip4_spec;
207 l3_val = &fs->h_u.usr_ip4_spec;
208 set_ips(outer_headers_v, outer_headers_c, l3_mask->ip4src,
209 l3_val->ip4src, l3_mask->ip4dst, l3_val->ip4dst);
210 break;
211 case ETHER_FLOW:
212 eth_mask = &fs->m_u.ether_spec;
213 eth_val = &fs->h_u.ether_spec;
214
215 mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
216 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
217 outer_headers_c, smac_47_16),
218 eth_mask->h_source);
219 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
220 outer_headers_v, smac_47_16),
221 eth_val->h_source);
222 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
223 outer_headers_c, dmac_47_16),
224 eth_mask->h_dest);
225 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
226 outer_headers_v, dmac_47_16),
227 eth_val->h_dest);
228 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ethertype,
229 ntohs(eth_mask->h_proto));
230 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ethertype,
231 ntohs(eth_val->h_proto));
232 break;
233 default:
234 return -EINVAL;
235 }
236
237 if ((fs->flow_type & FLOW_EXT) &&
238 (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
239 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
240 vlan_tag, 1);
241 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
242 vlan_tag, 1);
243 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
244 first_vid, 0xfff);
245 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
246 first_vid, ntohs(fs->h_ext.vlan_tci));
247 }
248 if (fs->flow_type & FLOW_MAC_EXT &&
249 !is_zero_ether_addr(fs->m_ext.h_dest)) {
250 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
251 outer_headers_c, dmac_47_16),
252 fs->m_ext.h_dest);
253 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
254 outer_headers_v, dmac_47_16),
255 fs->h_ext.h_dest);
256 }
257
258 return 0;
259}
260
261static void add_rule_to_list(struct mlx5e_priv *priv,
262 struct mlx5e_ethtool_rule *rule)
263{
264 struct mlx5e_ethtool_rule *iter;
265 struct list_head *head = &priv->fs.ethtool.rules;
266
267 list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
268 if (iter->flow_spec.location > rule->flow_spec.location)
269 break;
270 head = &iter->list;
271 }
272 priv->fs.ethtool.tot_num_rules++;
273 list_add(&rule->list, head);
274}
275
276static bool outer_header_zero(u32 *match_criteria)
277{
278 int size = MLX5_ST_SZ_BYTES(fte_match_param);
279 char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
280 outer_headers);
281
282 return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
283 outer_headers_c + 1,
284 size - 1);
285}
286
287static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv,
288 struct mlx5_flow_table *ft,
289 struct ethtool_rx_flow_spec *fs)
290{
291 struct mlx5_flow_destination *dst = NULL;
292 struct mlx5_flow_spec *spec;
293 struct mlx5_flow_rule *rule;
294 int err = 0;
295 u32 action;
296
297 spec = mlx5_vzalloc(sizeof(*spec));
298 if (!spec)
299 return ERR_PTR(-ENOMEM);
300 err = set_flow_attrs(spec->match_criteria, spec->match_value,
301 fs);
302 if (err)
303 goto free;
304
305 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
306 action = MLX5_FLOW_CONTEXT_ACTION_DROP;
307 } else {
308 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
309 if (!dst) {
310 err = -ENOMEM;
311 goto free;
312 }
313
314 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
315 dst->tir_num = priv->direct_tir[fs->ring_cookie].tirn;
316 action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
317 }
318
319 spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
320 rule = mlx5_add_flow_rule(ft, spec, action,
321 MLX5_FS_DEFAULT_FLOW_TAG, dst);
322 if (IS_ERR(rule)) {
323 err = PTR_ERR(rule);
324 netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
325 __func__, err);
326 goto free;
327 }
328free:
329 kvfree(spec);
330 kfree(dst);
331 return err ? ERR_PTR(err) : rule;
332}
333
334static void del_ethtool_rule(struct mlx5e_priv *priv,
335 struct mlx5e_ethtool_rule *eth_rule)
336{
337 if (eth_rule->rule)
338 mlx5_del_flow_rule(eth_rule->rule);
339 list_del(&eth_rule->list);
340 priv->fs.ethtool.tot_num_rules--;
341 put_flow_table(eth_rule->eth_ft);
342 kfree(eth_rule);
343}
344
345static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
346 int location)
347{
348 struct mlx5e_ethtool_rule *iter;
349
350 list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
351 if (iter->flow_spec.location == location)
352 return iter;
353 }
354 return NULL;
355}
356
357static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
358 int location)
359{
360 struct mlx5e_ethtool_rule *eth_rule;
361
362 eth_rule = find_ethtool_rule(priv, location);
363 if (eth_rule)
364 del_ethtool_rule(priv, eth_rule);
365
366 eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
367 if (!eth_rule)
368 return ERR_PTR(-ENOMEM);
369
370 add_rule_to_list(priv, eth_rule);
371 return eth_rule;
372}
373
374#define MAX_NUM_OF_ETHTOOL_RULES BIT(10)
375
376#define all_ones(field) (field == (__force typeof(field))-1)
377#define all_zeros_or_all_ones(field) \
378 ((field) == 0 || (field) == (__force typeof(field))-1)
379
380static int validate_flow(struct mlx5e_priv *priv,
381 struct ethtool_rx_flow_spec *fs)
382{
383 struct ethtool_tcpip4_spec *l4_mask;
384 struct ethtool_usrip4_spec *l3_mask;
385 struct ethhdr *eth_mask;
386 int num_tuples = 0;
387
388 if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
389 return -EINVAL;
390
391 if (fs->ring_cookie >= priv->params.num_channels &&
392 fs->ring_cookie != RX_CLS_FLOW_DISC)
393 return -EINVAL;
394
395 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
396 case ETHER_FLOW:
397 eth_mask = &fs->m_u.ether_spec;
398 if (!is_zero_ether_addr(eth_mask->h_dest))
399 num_tuples++;
400 if (!is_zero_ether_addr(eth_mask->h_source))
401 num_tuples++;
402 if (eth_mask->h_proto)
403 num_tuples++;
404 break;
405 case TCP_V4_FLOW:
406 case UDP_V4_FLOW:
407 if (fs->m_u.tcp_ip4_spec.tos)
408 return -EINVAL;
409 l4_mask = &fs->m_u.tcp_ip4_spec;
410 if (l4_mask->ip4src) {
411 if (!all_ones(l4_mask->ip4src))
412 return -EINVAL;
413 num_tuples++;
414 }
415 if (l4_mask->ip4dst) {
416 if (!all_ones(l4_mask->ip4dst))
417 return -EINVAL;
418 num_tuples++;
419 }
420 if (l4_mask->psrc) {
421 if (!all_ones(l4_mask->psrc))
422 return -EINVAL;
423 num_tuples++;
424 }
425 if (l4_mask->pdst) {
426 if (!all_ones(l4_mask->pdst))
427 return -EINVAL;
428 num_tuples++;
429 }
430 /* Flow is TCP/UDP */
431 num_tuples++;
432 break;
433 case IP_USER_FLOW:
434 l3_mask = &fs->m_u.usr_ip4_spec;
435 if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
436 fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
437 return -EINVAL;
438 if (l3_mask->ip4src) {
439 if (!all_ones(l3_mask->ip4src))
440 return -EINVAL;
441 num_tuples++;
442 }
443 if (l3_mask->ip4dst) {
444 if (!all_ones(l3_mask->ip4dst))
445 return -EINVAL;
446 num_tuples++;
447 }
448 /* Flow is IPv4 */
449 num_tuples++;
450 break;
451 default:
452 return -EINVAL;
453 }
454 if ((fs->flow_type & FLOW_EXT)) {
455 if (fs->m_ext.vlan_etype ||
456 (fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK)))
457 return -EINVAL;
458
459 if (fs->m_ext.vlan_tci) {
460 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
461 return -EINVAL;
462 }
463 num_tuples++;
464 }
465
466 if (fs->flow_type & FLOW_MAC_EXT &&
467 !is_zero_ether_addr(fs->m_ext.h_dest))
468 num_tuples++;
469
470 return num_tuples;
471}
472
473int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
474 struct ethtool_rx_flow_spec *fs)
475{
476 struct mlx5e_ethtool_table *eth_ft;
477 struct mlx5e_ethtool_rule *eth_rule;
478 struct mlx5_flow_rule *rule;
479 int num_tuples;
480 int err;
481
482 num_tuples = validate_flow(priv, fs);
483 if (num_tuples <= 0) {
484 netdev_warn(priv->netdev, "%s: flow is not valid\n", __func__);
485 return -EINVAL;
486 }
487
488 eth_ft = get_flow_table(priv, fs, num_tuples);
489 if (IS_ERR(eth_ft))
490 return PTR_ERR(eth_ft);
491
492 eth_rule = get_ethtool_rule(priv, fs->location);
493 if (IS_ERR(eth_rule)) {
494 put_flow_table(eth_ft);
495 return PTR_ERR(eth_rule);
496 }
497
498 eth_rule->flow_spec = *fs;
499 eth_rule->eth_ft = eth_ft;
500 if (!eth_ft->ft) {
501 err = -EINVAL;
502 goto del_ethtool_rule;
503 }
504 rule = add_ethtool_flow_rule(priv, eth_ft->ft, fs);
505 if (IS_ERR(rule)) {
506 err = PTR_ERR(rule);
507 goto del_ethtool_rule;
508 }
509
510 eth_rule->rule = rule;
511
512 return 0;
513
514del_ethtool_rule:
515 del_ethtool_rule(priv, eth_rule);
516
517 return err;
518}
519
520int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
521 int location)
522{
523 struct mlx5e_ethtool_rule *eth_rule;
524 int err = 0;
525
526 if (location >= MAX_NUM_OF_ETHTOOL_RULES)
527 return -ENOSPC;
528
529 eth_rule = find_ethtool_rule(priv, location);
530 if (!eth_rule) {
531 err = -ENOENT;
532 goto out;
533 }
534
535 del_ethtool_rule(priv, eth_rule);
536out:
537 return err;
538}
539
540int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
541 int location)
542{
543 struct mlx5e_ethtool_rule *eth_rule;
544
545 if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
546 return -EINVAL;
547
548 list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) {
549 if (eth_rule->flow_spec.location == location) {
550 info->fs = eth_rule->flow_spec;
551 return 0;
552 }
553 }
554
555 return -ENOENT;
556}
557
558int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
559 u32 *rule_locs)
560{
561 int location = 0;
562 int idx = 0;
563 int err = 0;
564
565 while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
566 err = mlx5e_ethtool_get_flow(priv, info, location);
567 if (!err)
568 rule_locs[idx++] = location;
569 location++;
570 }
571 return err;
572}
573
574void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
575{
576 struct mlx5e_ethtool_rule *iter;
577 struct mlx5e_ethtool_rule *temp;
578
579 list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list)
580 del_ethtool_rule(priv, iter);
581}
582
583void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
584{
585 INIT_LIST_HEAD(&priv->fs.ethtool.rules);
586}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 02a0f1796f7b..611ab550136e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -39,6 +39,13 @@
39#include "eswitch.h" 39#include "eswitch.h"
40#include "vxlan.h" 40#include "vxlan.h"
41 41
42enum {
43 MLX5_EN_QP_FLUSH_TIMEOUT_MS = 5000,
44 MLX5_EN_QP_FLUSH_MSLEEP_QUANT = 20,
45 MLX5_EN_QP_FLUSH_MAX_ITER = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
46 MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
47};
48
42struct mlx5e_rq_param { 49struct mlx5e_rq_param {
43 u32 rqc[MLX5_ST_SZ_DW(rqc)]; 50 u32 rqc[MLX5_ST_SZ_DW(rqc)];
44 struct mlx5_wq_param wq; 51 struct mlx5_wq_param wq;
@@ -76,10 +83,13 @@ static void mlx5e_update_carrier(struct mlx5e_priv *priv)
76 port_state = mlx5_query_vport_state(mdev, 83 port_state = mlx5_query_vport_state(mdev,
77 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0); 84 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
78 85
79 if (port_state == VPORT_STATE_UP) 86 if (port_state == VPORT_STATE_UP) {
87 netdev_info(priv->netdev, "Link up\n");
80 netif_carrier_on(priv->netdev); 88 netif_carrier_on(priv->netdev);
81 else 89 } else {
90 netdev_info(priv->netdev, "Link down\n");
82 netif_carrier_off(priv->netdev); 91 netif_carrier_off(priv->netdev);
92 }
83} 93}
84 94
85static void mlx5e_update_carrier_work(struct work_struct *work) 95static void mlx5e_update_carrier_work(struct work_struct *work)
@@ -93,6 +103,26 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
93 mutex_unlock(&priv->state_lock); 103 mutex_unlock(&priv->state_lock);
94} 104}
95 105
106static void mlx5e_tx_timeout_work(struct work_struct *work)
107{
108 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
109 tx_timeout_work);
110 int err;
111
112 rtnl_lock();
113 mutex_lock(&priv->state_lock);
114 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
115 goto unlock;
116 mlx5e_close_locked(priv->netdev);
117 err = mlx5e_open_locked(priv->netdev);
118 if (err)
119 netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
120 err);
121unlock:
122 mutex_unlock(&priv->state_lock);
123 rtnl_unlock();
124}
125
96static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) 126static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
97{ 127{
98 struct mlx5e_sw_stats *s = &priv->stats.sw; 128 struct mlx5e_sw_stats *s = &priv->stats.sw;
@@ -107,11 +137,11 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
107 137
108 s->rx_packets += rq_stats->packets; 138 s->rx_packets += rq_stats->packets;
109 s->rx_bytes += rq_stats->bytes; 139 s->rx_bytes += rq_stats->bytes;
110 s->lro_packets += rq_stats->lro_packets; 140 s->rx_lro_packets += rq_stats->lro_packets;
111 s->lro_bytes += rq_stats->lro_bytes; 141 s->rx_lro_bytes += rq_stats->lro_bytes;
112 s->rx_csum_none += rq_stats->csum_none; 142 s->rx_csum_none += rq_stats->csum_none;
113 s->rx_csum_sw += rq_stats->csum_sw; 143 s->rx_csum_complete += rq_stats->csum_complete;
114 s->rx_csum_inner += rq_stats->csum_inner; 144 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
115 s->rx_wqe_err += rq_stats->wqe_err; 145 s->rx_wqe_err += rq_stats->wqe_err;
116 s->rx_mpwqe_filler += rq_stats->mpwqe_filler; 146 s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
117 s->rx_mpwqe_frag += rq_stats->mpwqe_frag; 147 s->rx_mpwqe_frag += rq_stats->mpwqe_frag;
@@ -124,24 +154,23 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
124 154
125 s->tx_packets += sq_stats->packets; 155 s->tx_packets += sq_stats->packets;
126 s->tx_bytes += sq_stats->bytes; 156 s->tx_bytes += sq_stats->bytes;
127 s->tso_packets += sq_stats->tso_packets; 157 s->tx_tso_packets += sq_stats->tso_packets;
128 s->tso_bytes += sq_stats->tso_bytes; 158 s->tx_tso_bytes += sq_stats->tso_bytes;
129 s->tso_inner_packets += sq_stats->tso_inner_packets; 159 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
130 s->tso_inner_bytes += sq_stats->tso_inner_bytes; 160 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
131 s->tx_queue_stopped += sq_stats->stopped; 161 s->tx_queue_stopped += sq_stats->stopped;
132 s->tx_queue_wake += sq_stats->wake; 162 s->tx_queue_wake += sq_stats->wake;
133 s->tx_queue_dropped += sq_stats->dropped; 163 s->tx_queue_dropped += sq_stats->dropped;
134 s->tx_csum_inner += sq_stats->csum_offload_inner; 164 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
135 tx_offload_none += sq_stats->csum_offload_none; 165 tx_offload_none += sq_stats->csum_none;
136 } 166 }
137 } 167 }
138 168
139 /* Update calculated offload counters */ 169 /* Update calculated offload counters */
140 s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner; 170 s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
141 s->rx_csum_good = s->rx_packets - s->rx_csum_none - 171 s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
142 s->rx_csum_sw;
143 172
144 s->link_down_events = MLX5_GET(ppcnt_reg, 173 s->link_down_events_phy = MLX5_GET(ppcnt_reg,
145 priv->stats.pport.phy_counters, 174 priv->stats.pport.phy_counters,
146 counter_set.phys_layer_cntrs.link_down_events); 175 counter_set.phys_layer_cntrs.link_down_events);
147} 176}
@@ -227,14 +256,14 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
227 mlx5e_update_sw_counters(priv); 256 mlx5e_update_sw_counters(priv);
228} 257}
229 258
230static void mlx5e_update_stats_work(struct work_struct *work) 259void mlx5e_update_stats_work(struct work_struct *work)
231{ 260{
232 struct delayed_work *dwork = to_delayed_work(work); 261 struct delayed_work *dwork = to_delayed_work(work);
233 struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv, 262 struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
234 update_stats_work); 263 update_stats_work);
235 mutex_lock(&priv->state_lock); 264 mutex_lock(&priv->state_lock);
236 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { 265 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
237 mlx5e_update_stats(priv); 266 priv->profile->update_stats(priv);
238 queue_delayed_work(priv->wq, dwork, 267 queue_delayed_work(priv->wq, dwork,
239 msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL)); 268 msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
240 } 269 }
@@ -246,7 +275,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
246{ 275{
247 struct mlx5e_priv *priv = vpriv; 276 struct mlx5e_priv *priv = vpriv;
248 277
249 if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state)) 278 if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
250 return; 279 return;
251 280
252 switch (event) { 281 switch (event) {
@@ -262,12 +291,12 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
262 291
263static void mlx5e_enable_async_events(struct mlx5e_priv *priv) 292static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
264{ 293{
265 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); 294 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
266} 295}
267 296
268static void mlx5e_disable_async_events(struct mlx5e_priv *priv) 297static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
269{ 298{
270 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); 299 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
271 synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC)); 300 synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
272} 301}
273 302
@@ -308,6 +337,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
308 } 337 }
309 rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq; 338 rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
310 rq->alloc_wqe = mlx5e_alloc_rx_mpwqe; 339 rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
340 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
311 341
312 rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz); 342 rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
313 rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides); 343 rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
@@ -323,6 +353,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
323 } 353 }
324 rq->handle_rx_cqe = mlx5e_handle_rx_cqe; 354 rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
325 rq->alloc_wqe = mlx5e_alloc_rx_wqe; 355 rq->alloc_wqe = mlx5e_alloc_rx_wqe;
356 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
326 357
327 rq->wqe_sz = (priv->params.lro_en) ? 358 rq->wqe_sz = (priv->params.lro_en) ?
328 priv->params.lro_wqe_sz : 359 priv->params.lro_wqe_sz :
@@ -534,12 +565,19 @@ err_destroy_rq:
534 565
535static void mlx5e_close_rq(struct mlx5e_rq *rq) 566static void mlx5e_close_rq(struct mlx5e_rq *rq)
536{ 567{
568 int tout = 0;
569 int err;
570
537 clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); 571 clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
538 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */ 572 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
539 573
540 mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); 574 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
541 while (!mlx5_wq_ll_is_empty(&rq->wq)) 575 while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
542 msleep(20); 576 tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
577 msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
578
579 if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
580 set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
543 581
544 /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */ 582 /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
545 napi_synchronize(&rq->channel->napi); 583 napi_synchronize(&rq->channel->napi);
@@ -547,6 +585,7 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
547 cancel_work_sync(&rq->am.work); 585 cancel_work_sync(&rq->am.work);
548 586
549 mlx5e_disable_rq(rq); 587 mlx5e_disable_rq(rq);
588 mlx5e_free_rx_descs(rq);
550 mlx5e_destroy_rq(rq); 589 mlx5e_destroy_rq(rq);
551} 590}
552 591
@@ -590,7 +629,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
590 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq); 629 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
591 int err; 630 int err;
592 631
593 err = mlx5_alloc_map_uar(mdev, &sq->uar, true); 632 err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
594 if (err) 633 if (err)
595 return err; 634 return err;
596 635
@@ -801,6 +840,9 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
801 840
802static void mlx5e_close_sq(struct mlx5e_sq *sq) 841static void mlx5e_close_sq(struct mlx5e_sq *sq)
803{ 842{
843 int tout = 0;
844 int err;
845
804 if (sq->txq) { 846 if (sq->txq) {
805 clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); 847 clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
806 /* prevent netif_tx_wake_queue */ 848 /* prevent netif_tx_wake_queue */
@@ -811,16 +853,24 @@ static void mlx5e_close_sq(struct mlx5e_sq *sq)
811 if (mlx5e_sq_has_room_for(sq, 1)) 853 if (mlx5e_sq_has_room_for(sq, 1))
812 mlx5e_send_nop(sq, true); 854 mlx5e_send_nop(sq, true);
813 855
814 mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR, 856 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
815 false, 0); 857 MLX5_SQC_STATE_ERR, false, 0);
858 if (err)
859 set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
816 } 860 }
817 861
818 while (sq->cc != sq->pc) /* wait till sq is empty */ 862 /* wait till sq is empty, unless a TX timeout occurred on this SQ */
819 msleep(20); 863 while (sq->cc != sq->pc &&
864 !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
865 msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
866 if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
867 set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
868 }
820 869
821 /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */ 870 /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
822 napi_synchronize(&sq->channel->napi); 871 napi_synchronize(&sq->channel->napi);
823 872
873 mlx5e_free_tx_descs(sq);
824 mlx5e_disable_sq(sq); 874 mlx5e_disable_sq(sq);
825 mlx5e_destroy_sq(sq); 875 mlx5e_destroy_sq(sq);
826} 876}
@@ -859,7 +909,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
859 mcq->comp = mlx5e_completion_event; 909 mcq->comp = mlx5e_completion_event;
860 mcq->event = mlx5e_cq_error_event; 910 mcq->event = mlx5e_cq_error_event;
861 mcq->irqn = irqn; 911 mcq->irqn = irqn;
862 mcq->uar = &priv->cq_uar; 912 mcq->uar = &mdev->mlx5e_res.cq_uar;
863 913
864 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { 914 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
865 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); 915 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
@@ -1037,7 +1087,7 @@ static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
1037{ 1087{
1038 int i; 1088 int i;
1039 1089
1040 for (i = 0; i < MLX5E_MAX_NUM_TC; i++) 1090 for (i = 0; i < priv->profile->max_tc; i++)
1041 priv->channeltc_to_txq_map[ix][i] = 1091 priv->channeltc_to_txq_map[ix][i] =
1042 ix + i * priv->params.num_channels; 1092 ix + i * priv->params.num_channels;
1043} 1093}
@@ -1137,7 +1187,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1137 c->cpu = cpu; 1187 c->cpu = cpu;
1138 c->pdev = &priv->mdev->pdev->dev; 1188 c->pdev = &priv->mdev->pdev->dev;
1139 c->netdev = priv->netdev; 1189 c->netdev = priv->netdev;
1140 c->mkey_be = cpu_to_be32(priv->mkey.key); 1190 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
1141 c->num_tc = priv->params.num_tc; 1191 c->num_tc = priv->params.num_tc;
1142 1192
1143 if (priv->params.rx_am_enabled) 1193 if (priv->params.rx_am_enabled)
@@ -1253,7 +1303,7 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
1253 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); 1303 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1254 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); 1304 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1255 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); 1305 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
1256 MLX5_SET(wq, wq, pd, priv->pdn); 1306 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
1257 MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter); 1307 MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
1258 1308
1259 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); 1309 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
@@ -1278,7 +1328,7 @@ static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
1278 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 1328 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1279 1329
1280 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); 1330 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1281 MLX5_SET(wq, wq, pd, priv->pdn); 1331 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
1282 1332
1283 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); 1333 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1284} 1334}
@@ -1300,7 +1350,7 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1300{ 1350{
1301 void *cqc = param->cqc; 1351 void *cqc = param->cqc;
1302 1352
1303 MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index); 1353 MLX5_SET(cqc, cqc, uar_page, priv->mdev->mlx5e_res.cq_uar.index);
1304} 1354}
1305 1355
1306static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, 1356static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
@@ -1487,7 +1537,8 @@ static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc,
1487 MLX5_SET(rqtc, rqtc, rq_num[0], rqn); 1537 MLX5_SET(rqtc, rqtc, rq_num[0], rqn);
1488} 1538}
1489 1539
1490static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, int ix, u32 *rqtn) 1540static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz,
1541 int ix, struct mlx5e_rqt *rqt)
1491{ 1542{
1492 struct mlx5_core_dev *mdev = priv->mdev; 1543 struct mlx5_core_dev *mdev = priv->mdev;
1493 void *rqtc; 1544 void *rqtc;
@@ -1510,34 +1561,36 @@ static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, int ix, u32 *rqtn)
1510 else 1561 else
1511 mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix); 1562 mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
1512 1563
1513 err = mlx5_core_create_rqt(mdev, in, inlen, rqtn); 1564 err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
1565 if (!err)
1566 rqt->enabled = true;
1514 1567
1515 kvfree(in); 1568 kvfree(in);
1516 return err; 1569 return err;
1517} 1570}
1518 1571
1519static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, u32 rqtn) 1572void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
1520{ 1573{
1521 mlx5_core_destroy_rqt(priv->mdev, rqtn); 1574 rqt->enabled = false;
1575 mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
1522} 1576}
1523 1577
1524static int mlx5e_create_rqts(struct mlx5e_priv *priv) 1578static int mlx5e_create_indirect_rqts(struct mlx5e_priv *priv)
1525{ 1579{
1526 int nch = mlx5e_get_max_num_channels(priv->mdev); 1580 struct mlx5e_rqt *rqt = &priv->indir_rqt;
1527 u32 *rqtn; 1581
1582 return mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqt);
1583}
1584
1585int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
1586{
1587 struct mlx5e_rqt *rqt;
1528 int err; 1588 int err;
1529 int ix; 1589 int ix;
1530 1590
1531 /* Indirect RQT */ 1591 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
1532 rqtn = &priv->indir_rqtn; 1592 rqt = &priv->direct_tir[ix].rqt;
1533 err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqtn); 1593 err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqt);
1534 if (err)
1535 return err;
1536
1537 /* Direct RQTs */
1538 for (ix = 0; ix < nch; ix++) {
1539 rqtn = &priv->direct_tir[ix].rqtn;
1540 err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqtn);
1541 if (err) 1594 if (err)
1542 goto err_destroy_rqts; 1595 goto err_destroy_rqts;
1543 } 1596 }
@@ -1546,24 +1599,11 @@ static int mlx5e_create_rqts(struct mlx5e_priv *priv)
1546 1599
1547err_destroy_rqts: 1600err_destroy_rqts:
1548 for (ix--; ix >= 0; ix--) 1601 for (ix--; ix >= 0; ix--)
1549 mlx5e_destroy_rqt(priv, priv->direct_tir[ix].rqtn); 1602 mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
1550
1551 mlx5e_destroy_rqt(priv, priv->indir_rqtn);
1552 1603
1553 return err; 1604 return err;
1554} 1605}
1555 1606
1556static void mlx5e_destroy_rqts(struct mlx5e_priv *priv)
1557{
1558 int nch = mlx5e_get_max_num_channels(priv->mdev);
1559 int i;
1560
1561 for (i = 0; i < nch; i++)
1562 mlx5e_destroy_rqt(priv, priv->direct_tir[i].rqtn);
1563
1564 mlx5e_destroy_rqt(priv, priv->indir_rqtn);
1565}
1566
1567int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix) 1607int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix)
1568{ 1608{
1569 struct mlx5_core_dev *mdev = priv->mdev; 1609 struct mlx5_core_dev *mdev = priv->mdev;
@@ -1599,10 +1639,15 @@ static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
1599 u32 rqtn; 1639 u32 rqtn;
1600 int ix; 1640 int ix;
1601 1641
1602 rqtn = priv->indir_rqtn; 1642 if (priv->indir_rqt.enabled) {
1603 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); 1643 rqtn = priv->indir_rqt.rqtn;
1644 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
1645 }
1646
1604 for (ix = 0; ix < priv->params.num_channels; ix++) { 1647 for (ix = 0; ix < priv->params.num_channels; ix++) {
1605 rqtn = priv->direct_tir[ix].rqtn; 1648 if (!priv->direct_tir[ix].rqt.enabled)
1649 continue;
1650 rqtn = priv->direct_tir[ix].rqt.rqtn;
1606 mlx5e_redirect_rqt(priv, rqtn, 1, ix); 1651 mlx5e_redirect_rqt(priv, rqtn, 1, ix);
1607 } 1652 }
1608} 1653}
@@ -1662,13 +1707,13 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
1662 mlx5e_build_tir_ctx_lro(tirc, priv); 1707 mlx5e_build_tir_ctx_lro(tirc, priv);
1663 1708
1664 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { 1709 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
1665 err = mlx5_core_modify_tir(mdev, priv->indir_tirn[tt], in, 1710 err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
1666 inlen); 1711 inlen);
1667 if (err) 1712 if (err)
1668 goto free_in; 1713 goto free_in;
1669 } 1714 }
1670 1715
1671 for (ix = 0; ix < mlx5e_get_max_num_channels(mdev); ix++) { 1716 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
1672 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, 1717 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
1673 in, inlen); 1718 in, inlen);
1674 if (err) 1719 if (err)
@@ -1681,40 +1726,6 @@ free_in:
1681 return err; 1726 return err;
1682} 1727}
1683 1728
1684static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
1685{
1686 void *in;
1687 int inlen;
1688 int err;
1689 int i;
1690
1691 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1692 in = mlx5_vzalloc(inlen);
1693 if (!in)
1694 return -ENOMEM;
1695
1696 MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
1697
1698 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
1699 err = mlx5_core_modify_tir(priv->mdev, priv->indir_tirn[i], in,
1700 inlen);
1701 if (err)
1702 return err;
1703 }
1704
1705 for (i = 0; i < priv->params.num_channels; i++) {
1706 err = mlx5_core_modify_tir(priv->mdev,
1707 priv->direct_tir[i].tirn, in,
1708 inlen);
1709 if (err)
1710 return err;
1711 }
1712
1713 kvfree(in);
1714
1715 return 0;
1716}
1717
1718static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu) 1729static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
1719{ 1730{
1720 struct mlx5_core_dev *mdev = priv->mdev; 1731 struct mlx5_core_dev *mdev = priv->mdev;
@@ -1776,13 +1787,17 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
1776 1787
1777 netdev_set_num_tc(netdev, ntc); 1788 netdev_set_num_tc(netdev, ntc);
1778 1789
1790 /* Map netdev TCs to offset 0
1791 * We have our own UP to TXQ mapping for QoS
1792 */
1779 for (tc = 0; tc < ntc; tc++) 1793 for (tc = 0; tc < ntc; tc++)
1780 netdev_set_tc_queue(netdev, tc, nch, tc * nch); 1794 netdev_set_tc_queue(netdev, tc, nch, 0);
1781} 1795}
1782 1796
1783int mlx5e_open_locked(struct net_device *netdev) 1797int mlx5e_open_locked(struct net_device *netdev)
1784{ 1798{
1785 struct mlx5e_priv *priv = netdev_priv(netdev); 1799 struct mlx5e_priv *priv = netdev_priv(netdev);
1800 struct mlx5_core_dev *mdev = priv->mdev;
1786 int num_txqs; 1801 int num_txqs;
1787 int err; 1802 int err;
1788 1803
@@ -1805,7 +1820,7 @@ int mlx5e_open_locked(struct net_device *netdev)
1805 goto err_clear_state_opened_flag; 1820 goto err_clear_state_opened_flag;
1806 } 1821 }
1807 1822
1808 err = mlx5e_refresh_tirs_self_loopback_enable(priv); 1823 err = mlx5e_refresh_tirs_self_loopback_enable(priv->mdev);
1809 if (err) { 1824 if (err) {
1810 netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n", 1825 netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
1811 __func__, err); 1826 __func__, err);
@@ -1818,9 +1833,14 @@ int mlx5e_open_locked(struct net_device *netdev)
1818#ifdef CONFIG_RFS_ACCEL 1833#ifdef CONFIG_RFS_ACCEL
1819 priv->netdev->rx_cpu_rmap = priv->mdev->rmap; 1834 priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
1820#endif 1835#endif
1836 if (priv->profile->update_stats)
1837 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
1821 1838
1822 queue_delayed_work(priv->wq, &priv->update_stats_work, 0); 1839 if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
1823 1840 err = mlx5e_add_sqs_fwd_rules(priv);
1841 if (err)
1842 goto err_close_channels;
1843 }
1824 return 0; 1844 return 0;
1825 1845
1826err_close_channels: 1846err_close_channels:
@@ -1830,7 +1850,7 @@ err_clear_state_opened_flag:
1830 return err; 1850 return err;
1831} 1851}
1832 1852
1833static int mlx5e_open(struct net_device *netdev) 1853int mlx5e_open(struct net_device *netdev)
1834{ 1854{
1835 struct mlx5e_priv *priv = netdev_priv(netdev); 1855 struct mlx5e_priv *priv = netdev_priv(netdev);
1836 int err; 1856 int err;
@@ -1845,6 +1865,7 @@ static int mlx5e_open(struct net_device *netdev)
1845int mlx5e_close_locked(struct net_device *netdev) 1865int mlx5e_close_locked(struct net_device *netdev)
1846{ 1866{
1847 struct mlx5e_priv *priv = netdev_priv(netdev); 1867 struct mlx5e_priv *priv = netdev_priv(netdev);
1868 struct mlx5_core_dev *mdev = priv->mdev;
1848 1869
1849 /* May already be CLOSED in case a previous configuration operation 1870 /* May already be CLOSED in case a previous configuration operation
1850 * (e.g RX/TX queue size change) that involves close&open failed. 1871 * (e.g RX/TX queue size change) that involves close&open failed.
@@ -1854,6 +1875,9 @@ int mlx5e_close_locked(struct net_device *netdev)
1854 1875
1855 clear_bit(MLX5E_STATE_OPENED, &priv->state); 1876 clear_bit(MLX5E_STATE_OPENED, &priv->state);
1856 1877
1878 if (MLX5_CAP_GEN(mdev, vport_group_manager))
1879 mlx5e_remove_sqs_fwd_rules(priv);
1880
1857 mlx5e_timestamp_cleanup(priv); 1881 mlx5e_timestamp_cleanup(priv);
1858 netif_carrier_off(priv->netdev); 1882 netif_carrier_off(priv->netdev);
1859 mlx5e_redirect_rqts(priv); 1883 mlx5e_redirect_rqts(priv);
@@ -1862,7 +1886,7 @@ int mlx5e_close_locked(struct net_device *netdev)
1862 return 0; 1886 return 0;
1863} 1887}
1864 1888
1865static int mlx5e_close(struct net_device *netdev) 1889int mlx5e_close(struct net_device *netdev)
1866{ 1890{
1867 struct mlx5e_priv *priv = netdev_priv(netdev); 1891 struct mlx5e_priv *priv = netdev_priv(netdev);
1868 int err; 1892 int err;
@@ -1921,7 +1945,7 @@ static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
1921 mcq->comp = mlx5e_completion_event; 1945 mcq->comp = mlx5e_completion_event;
1922 mcq->event = mlx5e_cq_error_event; 1946 mcq->event = mlx5e_cq_error_event;
1923 mcq->irqn = irqn; 1947 mcq->irqn = irqn;
1924 mcq->uar = &priv->cq_uar; 1948 mcq->uar = &mdev->mlx5e_res.cq_uar;
1925 1949
1926 cq->priv = priv; 1950 cq->priv = priv;
1927 1951
@@ -1987,7 +2011,7 @@ static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
1987 memset(in, 0, sizeof(in)); 2011 memset(in, 0, sizeof(in));
1988 2012
1989 MLX5_SET(tisc, tisc, prio, tc << 1); 2013 MLX5_SET(tisc, tisc, prio, tc << 1);
1990 MLX5_SET(tisc, tisc, transport_domain, priv->tdn); 2014 MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
1991 2015
1992 return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]); 2016 return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
1993} 2017}
@@ -1997,12 +2021,12 @@ static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
1997 mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]); 2021 mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
1998} 2022}
1999 2023
2000static int mlx5e_create_tises(struct mlx5e_priv *priv) 2024int mlx5e_create_tises(struct mlx5e_priv *priv)
2001{ 2025{
2002 int err; 2026 int err;
2003 int tc; 2027 int tc;
2004 2028
2005 for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++) { 2029 for (tc = 0; tc < priv->profile->max_tc; tc++) {
2006 err = mlx5e_create_tis(priv, tc); 2030 err = mlx5e_create_tis(priv, tc);
2007 if (err) 2031 if (err)
2008 goto err_close_tises; 2032 goto err_close_tises;
@@ -2017,11 +2041,11 @@ err_close_tises:
2017 return err; 2041 return err;
2018} 2042}
2019 2043
2020static void mlx5e_destroy_tises(struct mlx5e_priv *priv) 2044void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
2021{ 2045{
2022 int tc; 2046 int tc;
2023 2047
2024 for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++) 2048 for (tc = 0; tc < priv->profile->max_tc; tc++)
2025 mlx5e_destroy_tis(priv, tc); 2049 mlx5e_destroy_tis(priv, tc);
2026} 2050}
2027 2051
@@ -2030,7 +2054,7 @@ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
2030{ 2054{
2031 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); 2055 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2032 2056
2033 MLX5_SET(tirc, tirc, transport_domain, priv->tdn); 2057 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2034 2058
2035#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ 2059#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2036 MLX5_HASH_FIELD_SEL_DST_IP) 2060 MLX5_HASH_FIELD_SEL_DST_IP)
@@ -2047,7 +2071,7 @@ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
2047 mlx5e_build_tir_ctx_lro(tirc, priv); 2071 mlx5e_build_tir_ctx_lro(tirc, priv);
2048 2072
2049 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); 2073 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2050 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqtn); 2074 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
2051 mlx5e_build_tir_ctx_hash(tirc, priv); 2075 mlx5e_build_tir_ctx_hash(tirc, priv);
2052 2076
2053 switch (tt) { 2077 switch (tt) {
@@ -2137,7 +2161,7 @@ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
2137static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, 2161static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
2138 u32 rqtn) 2162 u32 rqtn)
2139{ 2163{
2140 MLX5_SET(tirc, tirc, transport_domain, priv->tdn); 2164 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2141 2165
2142 mlx5e_build_tir_ctx_lro(tirc, priv); 2166 mlx5e_build_tir_ctx_lro(tirc, priv);
2143 2167
@@ -2146,15 +2170,13 @@ static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
2146 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); 2170 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
2147} 2171}
2148 2172
2149static int mlx5e_create_tirs(struct mlx5e_priv *priv) 2173static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
2150{ 2174{
2151 int nch = mlx5e_get_max_num_channels(priv->mdev); 2175 struct mlx5e_tir *tir;
2152 void *tirc; 2176 void *tirc;
2153 int inlen; 2177 int inlen;
2154 u32 *tirn;
2155 int err; 2178 int err;
2156 u32 *in; 2179 u32 *in;
2157 int ix;
2158 int tt; 2180 int tt;
2159 2181
2160 inlen = MLX5_ST_SZ_BYTES(create_tir_in); 2182 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
@@ -2162,25 +2184,51 @@ static int mlx5e_create_tirs(struct mlx5e_priv *priv)
2162 if (!in) 2184 if (!in)
2163 return -ENOMEM; 2185 return -ENOMEM;
2164 2186
2165 /* indirect tirs */
2166 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { 2187 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2167 memset(in, 0, inlen); 2188 memset(in, 0, inlen);
2168 tirn = &priv->indir_tirn[tt]; 2189 tir = &priv->indir_tir[tt];
2169 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); 2190 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2170 mlx5e_build_indir_tir_ctx(priv, tirc, tt); 2191 mlx5e_build_indir_tir_ctx(priv, tirc, tt);
2171 err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn); 2192 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
2172 if (err) 2193 if (err)
2173 goto err_destroy_tirs; 2194 goto err_destroy_tirs;
2174 } 2195 }
2175 2196
2176 /* direct tirs */ 2197 kvfree(in);
2198
2199 return 0;
2200
2201err_destroy_tirs:
2202 for (tt--; tt >= 0; tt--)
2203 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
2204
2205 kvfree(in);
2206
2207 return err;
2208}
2209
2210int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
2211{
2212 int nch = priv->profile->max_nch(priv->mdev);
2213 struct mlx5e_tir *tir;
2214 void *tirc;
2215 int inlen;
2216 int err;
2217 u32 *in;
2218 int ix;
2219
2220 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2221 in = mlx5_vzalloc(inlen);
2222 if (!in)
2223 return -ENOMEM;
2224
2177 for (ix = 0; ix < nch; ix++) { 2225 for (ix = 0; ix < nch; ix++) {
2178 memset(in, 0, inlen); 2226 memset(in, 0, inlen);
2179 tirn = &priv->direct_tir[ix].tirn; 2227 tir = &priv->direct_tir[ix];
2180 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); 2228 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2181 mlx5e_build_direct_tir_ctx(priv, tirc, 2229 mlx5e_build_direct_tir_ctx(priv, tirc,
2182 priv->direct_tir[ix].rqtn); 2230 priv->direct_tir[ix].rqt.rqtn);
2183 err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn); 2231 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
2184 if (err) 2232 if (err)
2185 goto err_destroy_ch_tirs; 2233 goto err_destroy_ch_tirs;
2186 } 2234 }
@@ -2191,27 +2239,28 @@ static int mlx5e_create_tirs(struct mlx5e_priv *priv)
2191 2239
2192err_destroy_ch_tirs: 2240err_destroy_ch_tirs:
2193 for (ix--; ix >= 0; ix--) 2241 for (ix--; ix >= 0; ix--)
2194 mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[ix].tirn); 2242 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
2195
2196err_destroy_tirs:
2197 for (tt--; tt >= 0; tt--)
2198 mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[tt]);
2199 2243
2200 kvfree(in); 2244 kvfree(in);
2201 2245
2202 return err; 2246 return err;
2203} 2247}
2204 2248
2205static void mlx5e_destroy_tirs(struct mlx5e_priv *priv) 2249static void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
2206{ 2250{
2207 int nch = mlx5e_get_max_num_channels(priv->mdev);
2208 int i; 2251 int i;
2209 2252
2210 for (i = 0; i < nch; i++)
2211 mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[i].tirn);
2212
2213 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) 2253 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
2214 mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[i]); 2254 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
2255}
2256
2257void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
2258{
2259 int nch = priv->profile->max_nch(priv->mdev);
2260 int i;
2261
2262 for (i = 0; i < nch; i++)
2263 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
2215} 2264}
2216 2265
2217int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd) 2266int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd)
@@ -2285,7 +2334,7 @@ mqprio:
2285 return mlx5e_setup_tc(dev, tc->tc); 2334 return mlx5e_setup_tc(dev, tc->tc);
2286} 2335}
2287 2336
2288static struct rtnl_link_stats64 * 2337struct rtnl_link_stats64 *
2289mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) 2338mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
2290{ 2339{
2291 struct mlx5e_priv *priv = netdev_priv(dev); 2340 struct mlx5e_priv *priv = netdev_priv(dev);
@@ -2714,6 +2763,29 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
2714 return features; 2763 return features;
2715} 2764}
2716 2765
2766static void mlx5e_tx_timeout(struct net_device *dev)
2767{
2768 struct mlx5e_priv *priv = netdev_priv(dev);
2769 bool sched_work = false;
2770 int i;
2771
2772 netdev_err(dev, "TX timeout detected\n");
2773
2774 for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
2775 struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
2776
2777 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
2778 continue;
2779 sched_work = true;
2780 set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
2781 netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
2782 i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
2783 }
2784
2785 if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
2786 schedule_work(&priv->tx_timeout_work);
2787}
2788
2717static const struct net_device_ops mlx5e_netdev_ops_basic = { 2789static const struct net_device_ops mlx5e_netdev_ops_basic = {
2718 .ndo_open = mlx5e_open, 2790 .ndo_open = mlx5e_open,
2719 .ndo_stop = mlx5e_close, 2791 .ndo_stop = mlx5e_close,
@@ -2732,6 +2804,7 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = {
2732#ifdef CONFIG_RFS_ACCEL 2804#ifdef CONFIG_RFS_ACCEL
2733 .ndo_rx_flow_steer = mlx5e_rx_flow_steer, 2805 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
2734#endif 2806#endif
2807 .ndo_tx_timeout = mlx5e_tx_timeout,
2735}; 2808};
2736 2809
2737static const struct net_device_ops mlx5e_netdev_ops_sriov = { 2810static const struct net_device_ops mlx5e_netdev_ops_sriov = {
@@ -2762,6 +2835,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
2762 .ndo_get_vf_config = mlx5e_get_vf_config, 2835 .ndo_get_vf_config = mlx5e_get_vf_config,
2763 .ndo_set_vf_link_state = mlx5e_set_vf_link_state, 2836 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
2764 .ndo_get_vf_stats = mlx5e_get_vf_stats, 2837 .ndo_get_vf_stats = mlx5e_get_vf_stats,
2838 .ndo_tx_timeout = mlx5e_tx_timeout,
2765}; 2839};
2766 2840
2767static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) 2841static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@@ -2893,9 +2967,10 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
2893 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; 2967 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
2894} 2968}
2895 2969
2896static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, 2970static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
2897 struct net_device *netdev, 2971 struct net_device *netdev,
2898 int num_channels) 2972 const struct mlx5e_profile *profile,
2973 void *ppriv)
2899{ 2974{
2900 struct mlx5e_priv *priv = netdev_priv(netdev); 2975 struct mlx5e_priv *priv = netdev_priv(netdev);
2901 u32 link_speed = 0; 2976 u32 link_speed = 0;
@@ -2964,7 +3039,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
2964 sizeof(priv->params.toeplitz_hash_key)); 3039 sizeof(priv->params.toeplitz_hash_key));
2965 3040
2966 mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt, 3041 mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
2967 MLX5E_INDIR_RQT_SIZE, num_channels); 3042 MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
2968 3043
2969 priv->params.lro_wqe_sz = 3044 priv->params.lro_wqe_sz =
2970 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; 3045 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
@@ -2975,7 +3050,9 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
2975 3050
2976 priv->mdev = mdev; 3051 priv->mdev = mdev;
2977 priv->netdev = netdev; 3052 priv->netdev = netdev;
2978 priv->params.num_channels = num_channels; 3053 priv->params.num_channels = profile->max_nch(mdev);
3054 priv->profile = profile;
3055 priv->ppriv = ppriv;
2979 3056
2980#ifdef CONFIG_MLX5_CORE_EN_DCB 3057#ifdef CONFIG_MLX5_CORE_EN_DCB
2981 mlx5e_ets_init(priv); 3058 mlx5e_ets_init(priv);
@@ -2985,6 +3062,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
2985 3062
2986 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); 3063 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
2987 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); 3064 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
3065 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
2988 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work); 3066 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
2989} 3067}
2990 3068
@@ -3000,7 +3078,11 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
3000 } 3078 }
3001} 3079}
3002 3080
3003static void mlx5e_build_netdev(struct net_device *netdev) 3081static const struct switchdev_ops mlx5e_switchdev_ops = {
3082 .switchdev_port_attr_get = mlx5e_attr_get,
3083};
3084
3085static void mlx5e_build_nic_netdev(struct net_device *netdev)
3004{ 3086{
3005 struct mlx5e_priv *priv = netdev_priv(netdev); 3087 struct mlx5e_priv *priv = netdev_priv(netdev);
3006 struct mlx5_core_dev *mdev = priv->mdev; 3088 struct mlx5_core_dev *mdev = priv->mdev;
@@ -3081,31 +3163,11 @@ static void mlx5e_build_netdev(struct net_device *netdev)
3081 netdev->priv_flags |= IFF_UNICAST_FLT; 3163 netdev->priv_flags |= IFF_UNICAST_FLT;
3082 3164
3083 mlx5e_set_netdev_dev_addr(netdev); 3165 mlx5e_set_netdev_dev_addr(netdev);
3084}
3085 3166
3086static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn, 3167#ifdef CONFIG_NET_SWITCHDEV
3087 struct mlx5_core_mkey *mkey) 3168 if (MLX5_CAP_GEN(mdev, vport_group_manager))
3088{ 3169 netdev->switchdev_ops = &mlx5e_switchdev_ops;
3089 struct mlx5_core_dev *mdev = priv->mdev; 3170#endif
3090 struct mlx5_create_mkey_mbox_in *in;
3091 int err;
3092
3093 in = mlx5_vzalloc(sizeof(*in));
3094 if (!in)
3095 return -ENOMEM;
3096
3097 in->seg.flags = MLX5_PERM_LOCAL_WRITE |
3098 MLX5_PERM_LOCAL_READ |
3099 MLX5_ACCESS_MODE_PA;
3100 in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
3101 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
3102
3103 err = mlx5_core_create_mkey(mdev, mkey, in, sizeof(*in), NULL, NULL,
3104 NULL);
3105
3106 kvfree(in);
3107
3108 return err;
3109} 3171}
3110 3172
3111static void mlx5e_create_q_counter(struct mlx5e_priv *priv) 3173static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
@@ -3135,7 +3197,7 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
3135 struct mlx5_mkey_seg *mkc; 3197 struct mlx5_mkey_seg *mkc;
3136 int inlen = sizeof(*in); 3198 int inlen = sizeof(*in);
3137 u64 npages = 3199 u64 npages =
3138 mlx5e_get_max_num_channels(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS; 3200 priv->profile->max_nch(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS;
3139 int err; 3201 int err;
3140 3202
3141 in = mlx5_vzalloc(inlen); 3203 in = mlx5_vzalloc(inlen);
@@ -3150,7 +3212,7 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
3150 MLX5_ACCESS_MODE_MTT; 3212 MLX5_ACCESS_MODE_MTT;
3151 3213
3152 mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); 3214 mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
3153 mkc->flags_pd = cpu_to_be32(priv->pdn); 3215 mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn);
3154 mkc->len = cpu_to_be64(npages << PAGE_SHIFT); 3216 mkc->len = cpu_to_be64(npages << PAGE_SHIFT);
3155 mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages)); 3217 mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages));
3156 mkc->log2_page_size = PAGE_SHIFT; 3218 mkc->log2_page_size = PAGE_SHIFT;
@@ -3163,160 +3225,233 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
3163 return err; 3225 return err;
3164} 3226}
3165 3227
3166static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) 3228static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
3229 struct net_device *netdev,
3230 const struct mlx5e_profile *profile,
3231 void *ppriv)
3167{ 3232{
3168 struct net_device *netdev; 3233 struct mlx5e_priv *priv = netdev_priv(netdev);
3169 struct mlx5e_priv *priv;
3170 int nch = mlx5e_get_max_num_channels(mdev);
3171 int err;
3172
3173 if (mlx5e_check_required_hca_cap(mdev))
3174 return NULL;
3175 3234
3176 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), 3235 mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
3177 nch * MLX5E_MAX_NUM_TC, 3236 mlx5e_build_nic_netdev(netdev);
3178 nch); 3237 mlx5e_vxlan_init(priv);
3179 if (!netdev) { 3238}
3180 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
3181 return NULL;
3182 }
3183 3239
3184 mlx5e_build_netdev_priv(mdev, netdev, nch); 3240static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
3185 mlx5e_build_netdev(netdev); 3241{
3242 struct mlx5_core_dev *mdev = priv->mdev;
3243 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3186 3244
3187 netif_carrier_off(netdev); 3245 mlx5e_vxlan_cleanup(priv);
3188 3246
3189 priv = netdev_priv(netdev); 3247 if (MLX5_CAP_GEN(mdev, vport_group_manager))
3248 mlx5_eswitch_unregister_vport_rep(esw, 0);
3249}
3190 3250
3191 priv->wq = create_singlethread_workqueue("mlx5e"); 3251static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
3192 if (!priv->wq) 3252{
3193 goto err_free_netdev; 3253 struct mlx5_core_dev *mdev = priv->mdev;
3254 int err;
3255 int i;
3194 3256
3195 err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false); 3257 err = mlx5e_create_indirect_rqts(priv);
3196 if (err) { 3258 if (err) {
3197 mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err); 3259 mlx5_core_warn(mdev, "create indirect rqts failed, %d\n", err);
3198 goto err_destroy_wq; 3260 return err;
3199 } 3261 }
3200 3262
3201 err = mlx5_core_alloc_pd(mdev, &priv->pdn); 3263 err = mlx5e_create_direct_rqts(priv);
3202 if (err) { 3264 if (err) {
3203 mlx5_core_err(mdev, "alloc pd failed, %d\n", err); 3265 mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err);
3204 goto err_unmap_free_uar; 3266 goto err_destroy_indirect_rqts;
3205 } 3267 }
3206 3268
3207 err = mlx5_core_alloc_transport_domain(mdev, &priv->tdn); 3269 err = mlx5e_create_indirect_tirs(priv);
3208 if (err) { 3270 if (err) {
3209 mlx5_core_err(mdev, "alloc td failed, %d\n", err); 3271 mlx5_core_warn(mdev, "create indirect tirs failed, %d\n", err);
3210 goto err_dealloc_pd; 3272 goto err_destroy_direct_rqts;
3211 } 3273 }
3212 3274
3213 err = mlx5e_create_mkey(priv, priv->pdn, &priv->mkey); 3275 err = mlx5e_create_direct_tirs(priv);
3214 if (err) { 3276 if (err) {
3215 mlx5_core_err(mdev, "create mkey failed, %d\n", err); 3277 mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err);
3216 goto err_dealloc_transport_domain; 3278 goto err_destroy_indirect_tirs;
3217 } 3279 }
3218 3280
3219 err = mlx5e_create_umr_mkey(priv); 3281 err = mlx5e_create_flow_steering(priv);
3220 if (err) { 3282 if (err) {
3221 mlx5_core_err(mdev, "create umr mkey failed, %d\n", err); 3283 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
3222 goto err_destroy_mkey; 3284 goto err_destroy_direct_tirs;
3223 } 3285 }
3224 3286
3287 err = mlx5e_tc_init(priv);
3288 if (err)
3289 goto err_destroy_flow_steering;
3290
3291 return 0;
3292
3293err_destroy_flow_steering:
3294 mlx5e_destroy_flow_steering(priv);
3295err_destroy_direct_tirs:
3296 mlx5e_destroy_direct_tirs(priv);
3297err_destroy_indirect_tirs:
3298 mlx5e_destroy_indirect_tirs(priv);
3299err_destroy_direct_rqts:
3300 for (i = 0; i < priv->profile->max_nch(mdev); i++)
3301 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
3302err_destroy_indirect_rqts:
3303 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
3304 return err;
3305}
3306
3307static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
3308{
3309 int i;
3310
3311 mlx5e_tc_cleanup(priv);
3312 mlx5e_destroy_flow_steering(priv);
3313 mlx5e_destroy_direct_tirs(priv);
3314 mlx5e_destroy_indirect_tirs(priv);
3315 for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
3316 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
3317 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
3318}
3319
3320static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
3321{
3322 int err;
3323
3225 err = mlx5e_create_tises(priv); 3324 err = mlx5e_create_tises(priv);
3226 if (err) { 3325 if (err) {
3227 mlx5_core_warn(mdev, "create tises failed, %d\n", err); 3326 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
3228 goto err_destroy_umr_mkey; 3327 return err;
3229 } 3328 }
3230 3329
3231 err = mlx5e_open_drop_rq(priv); 3330#ifdef CONFIG_MLX5_CORE_EN_DCB
3232 if (err) { 3331 mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets);
3233 mlx5_core_err(mdev, "open drop rq failed, %d\n", err); 3332#endif
3234 goto err_destroy_tises; 3333 return 0;
3334}
3335
3336static void mlx5e_nic_enable(struct mlx5e_priv *priv)
3337{
3338 struct net_device *netdev = priv->netdev;
3339 struct mlx5_core_dev *mdev = priv->mdev;
3340 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3341 struct mlx5_eswitch_rep rep;
3342
3343 if (mlx5e_vxlan_allowed(mdev)) {
3344 rtnl_lock();
3345 udp_tunnel_get_rx_info(netdev);
3346 rtnl_unlock();
3235 } 3347 }
3236 3348
3237 err = mlx5e_create_rqts(priv); 3349 mlx5e_enable_async_events(priv);
3238 if (err) { 3350 queue_work(priv->wq, &priv->set_rx_mode_work);
3239 mlx5_core_warn(mdev, "create rqts failed, %d\n", err); 3351
3240 goto err_close_drop_rq; 3352 if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
3353 rep.load = mlx5e_nic_rep_load;
3354 rep.unload = mlx5e_nic_rep_unload;
3355 rep.vport = 0;
3356 rep.priv_data = priv;
3357 mlx5_eswitch_register_vport_rep(esw, &rep);
3241 } 3358 }
3359}
3242 3360
3243 err = mlx5e_create_tirs(priv); 3361static void mlx5e_nic_disable(struct mlx5e_priv *priv)
3244 if (err) { 3362{
3245 mlx5_core_warn(mdev, "create tirs failed, %d\n", err); 3363 queue_work(priv->wq, &priv->set_rx_mode_work);
3246 goto err_destroy_rqts; 3364 mlx5e_disable_async_events(priv);
3365}
3366
3367static const struct mlx5e_profile mlx5e_nic_profile = {
3368 .init = mlx5e_nic_init,
3369 .cleanup = mlx5e_nic_cleanup,
3370 .init_rx = mlx5e_init_nic_rx,
3371 .cleanup_rx = mlx5e_cleanup_nic_rx,
3372 .init_tx = mlx5e_init_nic_tx,
3373 .cleanup_tx = mlx5e_cleanup_nic_tx,
3374 .enable = mlx5e_nic_enable,
3375 .disable = mlx5e_nic_disable,
3376 .update_stats = mlx5e_update_stats,
3377 .max_nch = mlx5e_get_max_num_channels,
3378 .max_tc = MLX5E_MAX_NUM_TC,
3379};
3380
3381void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
3382 const struct mlx5e_profile *profile, void *ppriv)
3383{
3384 struct net_device *netdev;
3385 struct mlx5e_priv *priv;
3386 int nch = profile->max_nch(mdev);
3387 int err;
3388
3389 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
3390 nch * profile->max_tc,
3391 nch);
3392 if (!netdev) {
3393 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
3394 return NULL;
3247 } 3395 }
3248 3396
3249 err = mlx5e_create_flow_steering(priv); 3397 profile->init(mdev, netdev, profile, ppriv);
3398
3399 netif_carrier_off(netdev);
3400
3401 priv = netdev_priv(netdev);
3402
3403 priv->wq = create_singlethread_workqueue("mlx5e");
3404 if (!priv->wq)
3405 goto err_free_netdev;
3406
3407 err = mlx5e_create_umr_mkey(priv);
3250 if (err) { 3408 if (err) {
3251 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err); 3409 mlx5_core_err(mdev, "create umr mkey failed, %d\n", err);
3252 goto err_destroy_tirs; 3410 goto err_destroy_wq;
3253 } 3411 }
3254 3412
3255 mlx5e_create_q_counter(priv); 3413 err = profile->init_tx(priv);
3256 3414 if (err)
3257 mlx5e_init_l2_addr(priv); 3415 goto err_destroy_umr_mkey;
3258 3416
3259 mlx5e_vxlan_init(priv); 3417 err = mlx5e_open_drop_rq(priv);
3418 if (err) {
3419 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
3420 goto err_cleanup_tx;
3421 }
3260 3422
3261 err = mlx5e_tc_init(priv); 3423 err = profile->init_rx(priv);
3262 if (err) 3424 if (err)
3263 goto err_dealloc_q_counters; 3425 goto err_close_drop_rq;
3264 3426
3265#ifdef CONFIG_MLX5_CORE_EN_DCB 3427 mlx5e_create_q_counter(priv);
3266 mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets); 3428
3267#endif 3429 mlx5e_init_l2_addr(priv);
3268 3430
3269 err = register_netdev(netdev); 3431 err = register_netdev(netdev);
3270 if (err) { 3432 if (err) {
3271 mlx5_core_err(mdev, "register_netdev failed, %d\n", err); 3433 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
3272 goto err_tc_cleanup; 3434 goto err_dealloc_q_counters;
3273 }
3274
3275 if (mlx5e_vxlan_allowed(mdev)) {
3276 rtnl_lock();
3277 udp_tunnel_get_rx_info(netdev);
3278 rtnl_unlock();
3279 } 3435 }
3280 3436
3281 mlx5e_enable_async_events(priv); 3437 if (profile->enable)
3282 queue_work(priv->wq, &priv->set_rx_mode_work); 3438 profile->enable(priv);
3283 3439
3284 return priv; 3440 return priv;
3285 3441
3286err_tc_cleanup:
3287 mlx5e_tc_cleanup(priv);
3288
3289err_dealloc_q_counters: 3442err_dealloc_q_counters:
3290 mlx5e_destroy_q_counter(priv); 3443 mlx5e_destroy_q_counter(priv);
3291 mlx5e_destroy_flow_steering(priv); 3444 profile->cleanup_rx(priv);
3292
3293err_destroy_tirs:
3294 mlx5e_destroy_tirs(priv);
3295
3296err_destroy_rqts:
3297 mlx5e_destroy_rqts(priv);
3298 3445
3299err_close_drop_rq: 3446err_close_drop_rq:
3300 mlx5e_close_drop_rq(priv); 3447 mlx5e_close_drop_rq(priv);
3301 3448
3302err_destroy_tises: 3449err_cleanup_tx:
3303 mlx5e_destroy_tises(priv); 3450 profile->cleanup_tx(priv);
3304 3451
3305err_destroy_umr_mkey: 3452err_destroy_umr_mkey:
3306 mlx5_core_destroy_mkey(mdev, &priv->umr_mkey); 3453 mlx5_core_destroy_mkey(mdev, &priv->umr_mkey);
3307 3454
3308err_destroy_mkey:
3309 mlx5_core_destroy_mkey(mdev, &priv->mkey);
3310
3311err_dealloc_transport_domain:
3312 mlx5_core_dealloc_transport_domain(mdev, priv->tdn);
3313
3314err_dealloc_pd:
3315 mlx5_core_dealloc_pd(mdev, priv->pdn);
3316
3317err_unmap_free_uar:
3318 mlx5_unmap_free_uar(mdev, &priv->cq_uar);
3319
3320err_destroy_wq: 3455err_destroy_wq:
3321 destroy_workqueue(priv->wq); 3456 destroy_workqueue(priv->wq);
3322 3457
@@ -3326,15 +3461,59 @@ err_free_netdev:
3326 return NULL; 3461 return NULL;
3327} 3462}
3328 3463
3329static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) 3464static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
3330{ 3465{
3331 struct mlx5e_priv *priv = vpriv; 3466 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3467 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
3468 int vport;
3469
3470 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
3471 return;
3472
3473 for (vport = 1; vport < total_vfs; vport++) {
3474 struct mlx5_eswitch_rep rep;
3475
3476 rep.load = mlx5e_vport_rep_load;
3477 rep.unload = mlx5e_vport_rep_unload;
3478 rep.vport = vport;
3479 mlx5_eswitch_register_vport_rep(esw, &rep);
3480 }
3481}
3482
3483static void *mlx5e_add(struct mlx5_core_dev *mdev)
3484{
3485 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3486 void *ppriv = NULL;
3487 void *ret;
3488
3489 if (mlx5e_check_required_hca_cap(mdev))
3490 return NULL;
3491
3492 if (mlx5e_create_mdev_resources(mdev))
3493 return NULL;
3494
3495 mlx5e_register_vport_rep(mdev);
3496
3497 if (MLX5_CAP_GEN(mdev, vport_group_manager))
3498 ppriv = &esw->offloads.vport_reps[0];
3499
3500 ret = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv);
3501 if (!ret) {
3502 mlx5e_destroy_mdev_resources(mdev);
3503 return NULL;
3504 }
3505 return ret;
3506}
3507
3508void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
3509{
3510 const struct mlx5e_profile *profile = priv->profile;
3332 struct net_device *netdev = priv->netdev; 3511 struct net_device *netdev = priv->netdev;
3333 3512
3334 set_bit(MLX5E_STATE_DESTROYING, &priv->state); 3513 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
3514 if (profile->disable)
3515 profile->disable(priv);
3335 3516
3336 queue_work(priv->wq, &priv->set_rx_mode_work);
3337 mlx5e_disable_async_events(priv);
3338 flush_workqueue(priv->wq); 3517 flush_workqueue(priv->wq);
3339 if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) { 3518 if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
3340 netif_device_detach(netdev); 3519 netif_device_detach(netdev);
@@ -3343,26 +3522,35 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
3343 unregister_netdev(netdev); 3522 unregister_netdev(netdev);
3344 } 3523 }
3345 3524
3346 mlx5e_tc_cleanup(priv);
3347 mlx5e_vxlan_cleanup(priv);
3348 mlx5e_destroy_q_counter(priv); 3525 mlx5e_destroy_q_counter(priv);
3349 mlx5e_destroy_flow_steering(priv); 3526 profile->cleanup_rx(priv);
3350 mlx5e_destroy_tirs(priv);
3351 mlx5e_destroy_rqts(priv);
3352 mlx5e_close_drop_rq(priv); 3527 mlx5e_close_drop_rq(priv);
3353 mlx5e_destroy_tises(priv); 3528 profile->cleanup_tx(priv);
3354 mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey); 3529 mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
3355 mlx5_core_destroy_mkey(priv->mdev, &priv->mkey);
3356 mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
3357 mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
3358 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
3359 cancel_delayed_work_sync(&priv->update_stats_work); 3530 cancel_delayed_work_sync(&priv->update_stats_work);
3360 destroy_workqueue(priv->wq); 3531 destroy_workqueue(priv->wq);
3532 if (profile->cleanup)
3533 profile->cleanup(priv);
3361 3534
3362 if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) 3535 if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
3363 free_netdev(netdev); 3536 free_netdev(netdev);
3364} 3537}
3365 3538
3539static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
3540{
3541 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3542 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
3543 struct mlx5e_priv *priv = vpriv;
3544 int vport;
3545
3546 mlx5e_destroy_netdev(mdev, priv);
3547
3548 for (vport = 1; vport < total_vfs; vport++)
3549 mlx5_eswitch_unregister_vport_rep(esw, vport);
3550
3551 mlx5e_destroy_mdev_resources(mdev);
3552}
3553
3366static void *mlx5e_get_netdev(void *vpriv) 3554static void *mlx5e_get_netdev(void *vpriv)
3367{ 3555{
3368 struct mlx5e_priv *priv = vpriv; 3556 struct mlx5e_priv *priv = vpriv;
@@ -3371,8 +3559,8 @@ static void *mlx5e_get_netdev(void *vpriv)
3371} 3559}
3372 3560
3373static struct mlx5_interface mlx5e_interface = { 3561static struct mlx5_interface mlx5e_interface = {
3374 .add = mlx5e_create_netdev, 3562 .add = mlx5e_add,
3375 .remove = mlx5e_destroy_netdev, 3563 .remove = mlx5e_remove,
3376 .event = mlx5e_async_event, 3564 .event = mlx5e_async_event,
3377 .protocol = MLX5_INTERFACE_PROTOCOL_ETH, 3565 .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
3378 .get_dev = mlx5e_get_netdev, 3566 .get_dev = mlx5e_get_netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
new file mode 100644
index 000000000000..5ef02f02a1d5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -0,0 +1,394 @@
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <generated/utsrelease.h>
34#include <linux/mlx5/fs.h>
35#include <net/switchdev.h>
36
37#include "eswitch.h"
38#include "en.h"
39
40static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
41
42static void mlx5e_rep_get_drvinfo(struct net_device *dev,
43 struct ethtool_drvinfo *drvinfo)
44{
45 strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
46 sizeof(drvinfo->driver));
47 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
48}
49
50static const struct counter_desc sw_rep_stats_desc[] = {
51 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
52 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
53 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
54 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
55};
56
57#define NUM_VPORT_REP_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
58
59static void mlx5e_rep_get_strings(struct net_device *dev,
60 u32 stringset, uint8_t *data)
61{
62 int i;
63
64 switch (stringset) {
65 case ETH_SS_STATS:
66 for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
67 strcpy(data + (i * ETH_GSTRING_LEN),
68 sw_rep_stats_desc[i].format);
69 break;
70 }
71}
72
73static void mlx5e_update_sw_rep_counters(struct mlx5e_priv *priv)
74{
75 struct mlx5e_sw_stats *s = &priv->stats.sw;
76 struct mlx5e_rq_stats *rq_stats;
77 struct mlx5e_sq_stats *sq_stats;
78 int i, j;
79
80 memset(s, 0, sizeof(*s));
81 for (i = 0; i < priv->params.num_channels; i++) {
82 rq_stats = &priv->channel[i]->rq.stats;
83
84 s->rx_packets += rq_stats->packets;
85 s->rx_bytes += rq_stats->bytes;
86
87 for (j = 0; j < priv->params.num_tc; j++) {
88 sq_stats = &priv->channel[i]->sq[j].stats;
89
90 s->tx_packets += sq_stats->packets;
91 s->tx_bytes += sq_stats->bytes;
92 }
93 }
94}
95
96static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
97 struct ethtool_stats *stats, u64 *data)
98{
99 struct mlx5e_priv *priv = netdev_priv(dev);
100 int i;
101
102 if (!data)
103 return;
104
105 mutex_lock(&priv->state_lock);
106 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
107 mlx5e_update_sw_rep_counters(priv);
108 mutex_unlock(&priv->state_lock);
109
110 for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
111 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
112 sw_rep_stats_desc, i);
113}
114
115static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
116{
117 switch (sset) {
118 case ETH_SS_STATS:
119 return NUM_VPORT_REP_COUNTERS;
120 default:
121 return -EOPNOTSUPP;
122 }
123}
124
125static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
126 .get_drvinfo = mlx5e_rep_get_drvinfo,
127 .get_link = ethtool_op_get_link,
128 .get_strings = mlx5e_rep_get_strings,
129 .get_sset_count = mlx5e_rep_get_sset_count,
130 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
131};
132
133int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
134{
135 struct mlx5e_priv *priv = netdev_priv(dev);
136 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
137 u8 mac[ETH_ALEN];
138
139 if (esw->mode == SRIOV_NONE)
140 return -EOPNOTSUPP;
141
142 switch (attr->id) {
143 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
144 mlx5_query_nic_vport_mac_address(priv->mdev, 0, mac);
145 attr->u.ppid.id_len = ETH_ALEN;
146 memcpy(&attr->u.ppid.id, &mac, ETH_ALEN);
147 break;
148 default:
149 return -EOPNOTSUPP;
150 }
151
152 return 0;
153}
154
155int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
156
157{
158 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
159 struct mlx5_eswitch_rep *rep = priv->ppriv;
160 struct mlx5e_channel *c;
161 int n, tc, err, num_sqs = 0;
162 u16 *sqs;
163
164 sqs = kcalloc(priv->params.num_channels * priv->params.num_tc, sizeof(u16), GFP_KERNEL);
165 if (!sqs)
166 return -ENOMEM;
167
168 for (n = 0; n < priv->params.num_channels; n++) {
169 c = priv->channel[n];
170 for (tc = 0; tc < c->num_tc; tc++)
171 sqs[num_sqs++] = c->sq[tc].sqn;
172 }
173
174 err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs);
175
176 kfree(sqs);
177 return err;
178}
179
180int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
181{
182 struct mlx5e_priv *priv = rep->priv_data;
183
184 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
185 return mlx5e_add_sqs_fwd_rules(priv);
186 return 0;
187}
188
189void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
190{
191 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
192 struct mlx5_eswitch_rep *rep = priv->ppriv;
193
194 mlx5_eswitch_sqs2vport_stop(esw, rep);
195}
196
197void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
198 struct mlx5_eswitch_rep *rep)
199{
200 struct mlx5e_priv *priv = rep->priv_data;
201
202 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
203 mlx5e_remove_sqs_fwd_rules(priv);
204}
205
206static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
207 char *buf, size_t len)
208{
209 struct mlx5e_priv *priv = netdev_priv(dev);
210 struct mlx5_eswitch_rep *rep = priv->ppriv;
211 int ret;
212
213 ret = snprintf(buf, len, "%d", rep->vport - 1);
214 if (ret >= len)
215 return -EOPNOTSUPP;
216
217 return 0;
218}
219
220static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
221 .switchdev_port_attr_get = mlx5e_attr_get,
222};
223
224static const struct net_device_ops mlx5e_netdev_ops_rep = {
225 .ndo_open = mlx5e_open,
226 .ndo_stop = mlx5e_close,
227 .ndo_start_xmit = mlx5e_xmit,
228 .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
229 .ndo_get_stats64 = mlx5e_get_stats,
230};
231
232static void mlx5e_build_rep_netdev_priv(struct mlx5_core_dev *mdev,
233 struct net_device *netdev,
234 const struct mlx5e_profile *profile,
235 void *ppriv)
236{
237 struct mlx5e_priv *priv = netdev_priv(netdev);
238 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
239 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
240 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
241
242 priv->params.log_sq_size =
243 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
244 priv->params.rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
245 priv->params.log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
246
247 priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
248 BIT(priv->params.log_rq_size));
249
250 priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
251 mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
252
253 priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
254 priv->params.num_tc = 1;
255
256 priv->params.lro_wqe_sz =
257 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
258
259 priv->mdev = mdev;
260 priv->netdev = netdev;
261 priv->params.num_channels = profile->max_nch(mdev);
262 priv->profile = profile;
263 priv->ppriv = ppriv;
264
265 mutex_init(&priv->state_lock);
266
267 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
268}
269
270static void mlx5e_build_rep_netdev(struct net_device *netdev)
271{
272 netdev->netdev_ops = &mlx5e_netdev_ops_rep;
273
274 netdev->watchdog_timeo = 15 * HZ;
275
276 netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
277
278#ifdef CONFIG_NET_SWITCHDEV
279 netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
280#endif
281
282 netdev->features |= NETIF_F_VLAN_CHALLENGED;
283
284 eth_hw_addr_random(netdev);
285}
286
287static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
288 struct net_device *netdev,
289 const struct mlx5e_profile *profile,
290 void *ppriv)
291{
292 mlx5e_build_rep_netdev_priv(mdev, netdev, profile, ppriv);
293 mlx5e_build_rep_netdev(netdev);
294}
295
296static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
297{
298 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
299 struct mlx5_eswitch_rep *rep = priv->ppriv;
300 struct mlx5_core_dev *mdev = priv->mdev;
301 struct mlx5_flow_rule *flow_rule;
302 int err;
303 int i;
304
305 err = mlx5e_create_direct_rqts(priv);
306 if (err) {
307 mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err);
308 return err;
309 }
310
311 err = mlx5e_create_direct_tirs(priv);
312 if (err) {
313 mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err);
314 goto err_destroy_direct_rqts;
315 }
316
317 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
318 rep->vport,
319 priv->direct_tir[0].tirn);
320 if (IS_ERR(flow_rule)) {
321 err = PTR_ERR(flow_rule);
322 goto err_destroy_direct_tirs;
323 }
324 rep->vport_rx_rule = flow_rule;
325
326 return 0;
327
328err_destroy_direct_tirs:
329 mlx5e_destroy_direct_tirs(priv);
330err_destroy_direct_rqts:
331 for (i = 0; i < priv->params.num_channels; i++)
332 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
333 return err;
334}
335
336static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
337{
338 struct mlx5_eswitch_rep *rep = priv->ppriv;
339 int i;
340
341 mlx5_del_flow_rule(rep->vport_rx_rule);
342 mlx5e_destroy_direct_tirs(priv);
343 for (i = 0; i < priv->params.num_channels; i++)
344 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
345}
346
347static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
348{
349 int err;
350
351 err = mlx5e_create_tises(priv);
352 if (err) {
353 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
354 return err;
355 }
356 return 0;
357}
358
359static int mlx5e_get_rep_max_num_channels(struct mlx5_core_dev *mdev)
360{
361#define MLX5E_PORT_REPRESENTOR_NCH 1
362 return MLX5E_PORT_REPRESENTOR_NCH;
363}
364
365static struct mlx5e_profile mlx5e_rep_profile = {
366 .init = mlx5e_init_rep,
367 .init_rx = mlx5e_init_rep_rx,
368 .cleanup_rx = mlx5e_cleanup_rep_rx,
369 .init_tx = mlx5e_init_rep_tx,
370 .cleanup_tx = mlx5e_cleanup_nic_tx,
371 .update_stats = mlx5e_update_sw_rep_counters,
372 .max_nch = mlx5e_get_rep_max_num_channels,
373 .max_tc = 1,
374};
375
376int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
377 struct mlx5_eswitch_rep *rep)
378{
379 rep->priv_data = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rep);
380 if (!rep->priv_data) {
381 pr_warn("Failed to create representor for vport %d\n",
382 rep->vport);
383 return -EINVAL;
384 }
385 return 0;
386}
387
388void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
389 struct mlx5_eswitch_rep *rep)
390{
391 struct mlx5e_priv *priv = rep->priv_data;
392
393 mlx5e_destroy_netdev(esw->dev, priv);
394}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index bd947704b59c..9f2a16a507e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -212,6 +212,20 @@ err_free_skb:
212 return -ENOMEM; 212 return -ENOMEM;
213} 213}
214 214
215void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
216{
217 struct sk_buff *skb = rq->skb[ix];
218
219 if (skb) {
220 rq->skb[ix] = NULL;
221 dma_unmap_single(rq->pdev,
222 *((dma_addr_t *)skb->cb),
223 rq->wqe_sz,
224 DMA_FROM_DEVICE);
225 dev_kfree_skb(skb);
226 }
227}
228
215static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq) 229static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
216{ 230{
217 return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER; 231 return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
@@ -574,6 +588,30 @@ int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
574 return 0; 588 return 0;
575} 589}
576 590
591void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
592{
593 struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
594
595 wi->free_wqe(rq, wi);
596}
597
598void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
599{
600 struct mlx5_wq_ll *wq = &rq->wq;
601 struct mlx5e_rx_wqe *wqe;
602 __be16 wqe_ix_be;
603 u16 wqe_ix;
604
605 while (!mlx5_wq_ll_is_empty(wq)) {
606 wqe_ix_be = *wq->tail_next;
607 wqe_ix = be16_to_cpu(wqe_ix_be);
608 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
609 rq->dealloc_wqe(rq, wqe_ix);
610 mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
611 &wqe->next.next_wqe_index);
612 }
613}
614
577#define RQ_CANNOT_POST(rq) \ 615#define RQ_CANNOT_POST(rq) \
578 (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \ 616 (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \
579 test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) 617 test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
@@ -689,7 +727,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
689 if (is_first_ethertype_ip(skb)) { 727 if (is_first_ethertype_ip(skb)) {
690 skb->ip_summed = CHECKSUM_COMPLETE; 728 skb->ip_summed = CHECKSUM_COMPLETE;
691 skb->csum = csum_unfold((__force __sum16)cqe->check_sum); 729 skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
692 rq->stats.csum_sw++; 730 rq->stats.csum_complete++;
693 return; 731 return;
694 } 732 }
695 733
@@ -699,7 +737,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
699 if (cqe_is_tunneled(cqe)) { 737 if (cqe_is_tunneled(cqe)) {
700 skb->csum_level = 1; 738 skb->csum_level = 1;
701 skb->encapsulation = 1; 739 skb->encapsulation = 1;
702 rq->stats.csum_inner++; 740 rq->stats.csum_unnecessary_inner++;
703 } 741 }
704 return; 742 return;
705 } 743 }
@@ -878,6 +916,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
878 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); 916 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
879 int work_done = 0; 917 int work_done = 0;
880 918
919 if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state)))
920 return 0;
921
881 if (cq->decmprs_left) 922 if (cq->decmprs_left)
882 work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget); 923 work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
883 924
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 83bc32b25849..7b9d8a989b52 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -42,9 +42,11 @@
42 be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset)) 42 be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
43 43
44#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld) 44#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
45#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
46#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
45 47
46struct counter_desc { 48struct counter_desc {
47 char name[ETH_GSTRING_LEN]; 49 char format[ETH_GSTRING_LEN];
48 int offset; /* Byte offset */ 50 int offset; /* Byte offset */
49}; 51};
50 52
@@ -53,18 +55,18 @@ struct mlx5e_sw_stats {
53 u64 rx_bytes; 55 u64 rx_bytes;
54 u64 tx_packets; 56 u64 tx_packets;
55 u64 tx_bytes; 57 u64 tx_bytes;
56 u64 tso_packets; 58 u64 tx_tso_packets;
57 u64 tso_bytes; 59 u64 tx_tso_bytes;
58 u64 tso_inner_packets; 60 u64 tx_tso_inner_packets;
59 u64 tso_inner_bytes; 61 u64 tx_tso_inner_bytes;
60 u64 lro_packets; 62 u64 rx_lro_packets;
61 u64 lro_bytes; 63 u64 rx_lro_bytes;
62 u64 rx_csum_good; 64 u64 rx_csum_unnecessary;
63 u64 rx_csum_none; 65 u64 rx_csum_none;
64 u64 rx_csum_sw; 66 u64 rx_csum_complete;
65 u64 rx_csum_inner; 67 u64 rx_csum_unnecessary_inner;
66 u64 tx_csum_offload; 68 u64 tx_csum_partial;
67 u64 tx_csum_inner; 69 u64 tx_csum_partial_inner;
68 u64 tx_queue_stopped; 70 u64 tx_queue_stopped;
69 u64 tx_queue_wake; 71 u64 tx_queue_wake;
70 u64 tx_queue_dropped; 72 u64 tx_queue_dropped;
@@ -76,7 +78,7 @@ struct mlx5e_sw_stats {
76 u64 rx_cqe_compress_pkts; 78 u64 rx_cqe_compress_pkts;
77 79
78 /* Special handling counters */ 80 /* Special handling counters */
79 u64 link_down_events; 81 u64 link_down_events_phy;
80}; 82};
81 83
82static const struct counter_desc sw_stats_desc[] = { 84static const struct counter_desc sw_stats_desc[] = {
@@ -84,18 +86,18 @@ static const struct counter_desc sw_stats_desc[] = {
84 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) }, 86 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
85 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) }, 87 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
86 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) }, 88 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
87 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_packets) }, 89 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
88 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_bytes) }, 90 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
89 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_packets) }, 91 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
90 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_bytes) }, 92 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
91 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_packets) }, 93 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
92 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_bytes) }, 94 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
93 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_good) }, 95 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
94 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, 96 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
95 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_sw) }, 97 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
96 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_inner) }, 98 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
97 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_offload) }, 99 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
98 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_inner) }, 100 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
99 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, 101 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
100 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, 102 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
101 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, 103 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
@@ -105,7 +107,7 @@ static const struct counter_desc sw_stats_desc[] = {
105 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, 107 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
106 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, 108 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
107 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, 109 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
108 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events) }, 110 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
109}; 111};
110 112
111struct mlx5e_qcounter_stats { 113struct mlx5e_qcounter_stats {
@@ -125,12 +127,6 @@ struct mlx5e_vport_stats {
125}; 127};
126 128
127static const struct counter_desc vport_stats_desc[] = { 129static const struct counter_desc vport_stats_desc[] = {
128 { "rx_vport_error_packets",
129 VPORT_COUNTER_OFF(received_errors.packets) },
130 { "rx_vport_error_bytes", VPORT_COUNTER_OFF(received_errors.octets) },
131 { "tx_vport_error_packets",
132 VPORT_COUNTER_OFF(transmit_errors.packets) },
133 { "tx_vport_error_bytes", VPORT_COUNTER_OFF(transmit_errors.octets) },
134 { "rx_vport_unicast_packets", 130 { "rx_vport_unicast_packets",
135 VPORT_COUNTER_OFF(received_eth_unicast.packets) }, 131 VPORT_COUNTER_OFF(received_eth_unicast.packets) },
136 { "rx_vport_unicast_bytes", 132 { "rx_vport_unicast_bytes",
@@ -155,6 +151,22 @@ static const struct counter_desc vport_stats_desc[] = {
155 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) }, 151 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
156 { "tx_vport_broadcast_bytes", 152 { "tx_vport_broadcast_bytes",
157 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) }, 153 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
154 { "rx_vport_rdma_unicast_packets",
155 VPORT_COUNTER_OFF(received_ib_unicast.packets) },
156 { "rx_vport_rdma_unicast_bytes",
157 VPORT_COUNTER_OFF(received_ib_unicast.octets) },
158 { "tx_vport_rdma_unicast_packets",
159 VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
160 { "tx_vport_rdma_unicast_bytes",
161 VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
162 { "rx_vport_rdma_multicast_packets",
163 VPORT_COUNTER_OFF(received_ib_multicast.packets) },
164 { "rx_vport_rdma_multicast_bytes",
165 VPORT_COUNTER_OFF(received_ib_multicast.octets) },
166 { "tx_vport_rdma_multicast_packets",
167 VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
168 { "tx_vport_rdma_multicast_bytes",
169 VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
158}; 170};
159 171
160#define PPORT_802_3_OFF(c) \ 172#define PPORT_802_3_OFF(c) \
@@ -192,94 +204,69 @@ struct mlx5e_pport_stats {
192}; 204};
193 205
194static const struct counter_desc pport_802_3_stats_desc[] = { 206static const struct counter_desc pport_802_3_stats_desc[] = {
195 { "frames_tx", PPORT_802_3_OFF(a_frames_transmitted_ok) }, 207 { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
196 { "frames_rx", PPORT_802_3_OFF(a_frames_received_ok) }, 208 { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
197 { "check_seq_err", PPORT_802_3_OFF(a_frame_check_sequence_errors) }, 209 { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
198 { "alignment_err", PPORT_802_3_OFF(a_alignment_errors) }, 210 { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
199 { "octets_tx", PPORT_802_3_OFF(a_octets_transmitted_ok) }, 211 { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
200 { "octets_received", PPORT_802_3_OFF(a_octets_received_ok) }, 212 { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
201 { "multicast_xmitted", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) }, 213 { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
202 { "broadcast_xmitted", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) }, 214 { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
203 { "multicast_rx", PPORT_802_3_OFF(a_multicast_frames_received_ok) }, 215 { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
204 { "broadcast_rx", PPORT_802_3_OFF(a_broadcast_frames_received_ok) }, 216 { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
205 { "in_range_len_errors", PPORT_802_3_OFF(a_in_range_length_errors) }, 217 { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
206 { "out_of_range_len", PPORT_802_3_OFF(a_out_of_range_length_field) }, 218 { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
207 { "too_long_errors", PPORT_802_3_OFF(a_frame_too_long_errors) }, 219 { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
208 { "symbol_err", PPORT_802_3_OFF(a_symbol_error_during_carrier) }, 220 { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
209 { "mac_control_tx", PPORT_802_3_OFF(a_mac_control_frames_transmitted) }, 221 { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
210 { "mac_control_rx", PPORT_802_3_OFF(a_mac_control_frames_received) }, 222 { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
211 { "unsupported_op_rx", 223 { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
212 PPORT_802_3_OFF(a_unsupported_opcodes_received) }, 224 { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
213 { "pause_ctrl_rx", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
214 { "pause_ctrl_tx",
215 PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
216}; 225};
217 226
218static const struct counter_desc pport_2863_stats_desc[] = { 227static const struct counter_desc pport_2863_stats_desc[] = {
219 { "in_octets", PPORT_2863_OFF(if_in_octets) }, 228 { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
220 { "in_ucast_pkts", PPORT_2863_OFF(if_in_ucast_pkts) }, 229 { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
221 { "in_discards", PPORT_2863_OFF(if_in_discards) }, 230 { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
222 { "in_errors", PPORT_2863_OFF(if_in_errors) },
223 { "in_unknown_protos", PPORT_2863_OFF(if_in_unknown_protos) },
224 { "out_octets", PPORT_2863_OFF(if_out_octets) },
225 { "out_ucast_pkts", PPORT_2863_OFF(if_out_ucast_pkts) },
226 { "out_discards", PPORT_2863_OFF(if_out_discards) },
227 { "out_errors", PPORT_2863_OFF(if_out_errors) },
228 { "in_multicast_pkts", PPORT_2863_OFF(if_in_multicast_pkts) },
229 { "in_broadcast_pkts", PPORT_2863_OFF(if_in_broadcast_pkts) },
230 { "out_multicast_pkts", PPORT_2863_OFF(if_out_multicast_pkts) },
231 { "out_broadcast_pkts", PPORT_2863_OFF(if_out_broadcast_pkts) },
232}; 231};
233 232
234static const struct counter_desc pport_2819_stats_desc[] = { 233static const struct counter_desc pport_2819_stats_desc[] = {
235 { "drop_events", PPORT_2819_OFF(ether_stats_drop_events) }, 234 { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
236 { "octets", PPORT_2819_OFF(ether_stats_octets) }, 235 { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
237 { "pkts", PPORT_2819_OFF(ether_stats_pkts) }, 236 { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
238 { "broadcast_pkts", PPORT_2819_OFF(ether_stats_broadcast_pkts) }, 237 { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
239 { "multicast_pkts", PPORT_2819_OFF(ether_stats_multicast_pkts) }, 238 { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
240 { "crc_align_errors", PPORT_2819_OFF(ether_stats_crc_align_errors) }, 239 { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
241 { "undersize_pkts", PPORT_2819_OFF(ether_stats_undersize_pkts) }, 240 { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
242 { "oversize_pkts", PPORT_2819_OFF(ether_stats_oversize_pkts) }, 241 { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
243 { "fragments", PPORT_2819_OFF(ether_stats_fragments) }, 242 { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
244 { "jabbers", PPORT_2819_OFF(ether_stats_jabbers) }, 243 { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
245 { "collisions", PPORT_2819_OFF(ether_stats_collisions) }, 244 { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
246 { "p64octets", PPORT_2819_OFF(ether_stats_pkts64octets) }, 245 { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
247 { "p65to127octets", PPORT_2819_OFF(ether_stats_pkts65to127octets) }, 246 { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
248 { "p128to255octets", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
249 { "p256to511octets", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
250 { "p512to1023octets", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
251 { "p1024to1518octets",
252 PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
253 { "p1519to2047octets",
254 PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
255 { "p2048to4095octets",
256 PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
257 { "p4096to8191octets",
258 PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
259 { "p8192to10239octets",
260 PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
261}; 247};
262 248
263static const struct counter_desc pport_per_prio_traffic_stats_desc[] = { 249static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
264 { "rx_octets", PPORT_PER_PRIO_OFF(rx_octets) }, 250 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
265 { "rx_frames", PPORT_PER_PRIO_OFF(rx_frames) }, 251 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
266 { "tx_octets", PPORT_PER_PRIO_OFF(tx_octets) }, 252 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
267 { "tx_frames", PPORT_PER_PRIO_OFF(tx_frames) }, 253 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
268}; 254};
269 255
270static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { 256static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
271 { "rx_pause", PPORT_PER_PRIO_OFF(rx_pause) }, 257 /* %s is "global" or "prio{i}" */
272 { "rx_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) }, 258 { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
273 { "tx_pause", PPORT_PER_PRIO_OFF(tx_pause) }, 259 { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
274 { "tx_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) }, 260 { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
275 { "rx_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, 261 { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
262 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
276}; 263};
277 264
278struct mlx5e_rq_stats { 265struct mlx5e_rq_stats {
279 u64 packets; 266 u64 packets;
280 u64 bytes; 267 u64 bytes;
281 u64 csum_sw; 268 u64 csum_complete;
282 u64 csum_inner; 269 u64 csum_unnecessary_inner;
283 u64 csum_none; 270 u64 csum_none;
284 u64 lro_packets; 271 u64 lro_packets;
285 u64 lro_bytes; 272 u64 lro_bytes;
@@ -292,19 +279,19 @@ struct mlx5e_rq_stats {
292}; 279};
293 280
294static const struct counter_desc rq_stats_desc[] = { 281static const struct counter_desc rq_stats_desc[] = {
295 { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, packets) }, 282 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
296 { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, bytes) }, 283 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
297 { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_sw) }, 284 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
298 { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_inner) }, 285 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
299 { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_none) }, 286 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
300 { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_packets) }, 287 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
301 { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_bytes) }, 288 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
302 { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, wqe_err) }, 289 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
303 { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_filler) }, 290 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
304 { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_frag) }, 291 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_frag) },
305 { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, 292 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
306 { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, 293 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
307 { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, 294 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
308}; 295};
309 296
310struct mlx5e_sq_stats { 297struct mlx5e_sq_stats {
@@ -315,28 +302,28 @@ struct mlx5e_sq_stats {
315 u64 tso_bytes; 302 u64 tso_bytes;
316 u64 tso_inner_packets; 303 u64 tso_inner_packets;
317 u64 tso_inner_bytes; 304 u64 tso_inner_bytes;
318 u64 csum_offload_inner; 305 u64 csum_partial_inner;
319 u64 nop; 306 u64 nop;
320 /* less likely accessed in data path */ 307 /* less likely accessed in data path */
321 u64 csum_offload_none; 308 u64 csum_none;
322 u64 stopped; 309 u64 stopped;
323 u64 wake; 310 u64 wake;
324 u64 dropped; 311 u64 dropped;
325}; 312};
326 313
327static const struct counter_desc sq_stats_desc[] = { 314static const struct counter_desc sq_stats_desc[] = {
328 { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, packets) }, 315 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
329 { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, bytes) }, 316 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
330 { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_packets) }, 317 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
331 { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_bytes) }, 318 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
332 { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, 319 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
333 { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, 320 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
334 { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_inner) }, 321 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
335 { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, nop) }, 322 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
336 { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_none) }, 323 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
337 { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, stopped) }, 324 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
338 { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, wake) }, 325 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
339 { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, dropped) }, 326 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
340}; 327};
341 328
342#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) 329#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 704c3d30493e..3261e8b1286e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -50,7 +50,7 @@ struct mlx5e_tc_flow {
50#define MLX5E_TC_TABLE_NUM_GROUPS 4 50#define MLX5E_TC_TABLE_NUM_GROUPS 4
51 51
52static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv, 52static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
53 u32 *match_c, u32 *match_v, 53 struct mlx5_flow_spec *spec,
54 u32 action, u32 flow_tag) 54 u32 action, u32 flow_tag)
55{ 55{
56 struct mlx5_core_dev *dev = priv->mdev; 56 struct mlx5_core_dev *dev = priv->mdev;
@@ -88,8 +88,8 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
88 table_created = true; 88 table_created = true;
89 } 89 }
90 90
91 rule = mlx5_add_flow_rule(priv->fs.tc.t, MLX5_MATCH_OUTER_HEADERS, 91 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
92 match_c, match_v, 92 rule = mlx5_add_flow_rule(priv->fs.tc.t, spec,
93 action, flow_tag, 93 action, flow_tag,
94 &dest); 94 &dest);
95 95
@@ -126,12 +126,13 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
126 } 126 }
127} 127}
128 128
129static int parse_cls_flower(struct mlx5e_priv *priv, 129static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
130 u32 *match_c, u32 *match_v,
131 struct tc_cls_flower_offload *f) 130 struct tc_cls_flower_offload *f)
132{ 131{
133 void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c, outer_headers); 132 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
134 void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers); 133 outer_headers);
134 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
135 outer_headers);
135 u16 addr_type = 0; 136 u16 addr_type = 0;
136 u8 ip_proto = 0; 137 u8 ip_proto = 0;
137 138
@@ -342,12 +343,11 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
342 struct tc_cls_flower_offload *f) 343 struct tc_cls_flower_offload *f)
343{ 344{
344 struct mlx5e_tc_table *tc = &priv->fs.tc; 345 struct mlx5e_tc_table *tc = &priv->fs.tc;
345 u32 *match_c;
346 u32 *match_v;
347 int err = 0; 346 int err = 0;
348 u32 flow_tag; 347 u32 flow_tag;
349 u32 action; 348 u32 action;
350 struct mlx5e_tc_flow *flow; 349 struct mlx5e_tc_flow *flow;
350 struct mlx5_flow_spec *spec;
351 struct mlx5_flow_rule *old = NULL; 351 struct mlx5_flow_rule *old = NULL;
352 352
353 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, 353 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
@@ -357,16 +357,15 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
357 else 357 else
358 flow = kzalloc(sizeof(*flow), GFP_KERNEL); 358 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
359 359
360 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 360 spec = mlx5_vzalloc(sizeof(*spec));
361 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 361 if (!spec || !flow) {
362 if (!match_c || !match_v || !flow) {
363 err = -ENOMEM; 362 err = -ENOMEM;
364 goto err_free; 363 goto err_free;
365 } 364 }
366 365
367 flow->cookie = f->cookie; 366 flow->cookie = f->cookie;
368 367
369 err = parse_cls_flower(priv, match_c, match_v, f); 368 err = parse_cls_flower(priv, spec, f);
370 if (err < 0) 369 if (err < 0)
371 goto err_free; 370 goto err_free;
372 371
@@ -379,8 +378,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
379 if (err) 378 if (err)
380 goto err_free; 379 goto err_free;
381 380
382 flow->rule = mlx5e_tc_add_flow(priv, match_c, match_v, action, 381 flow->rule = mlx5e_tc_add_flow(priv, spec, action, flow_tag);
383 flow_tag);
384 if (IS_ERR(flow->rule)) { 382 if (IS_ERR(flow->rule)) {
385 err = PTR_ERR(flow->rule); 383 err = PTR_ERR(flow->rule);
386 goto err_hash_del; 384 goto err_hash_del;
@@ -398,8 +396,7 @@ err_free:
398 if (!old) 396 if (!old)
399 kfree(flow); 397 kfree(flow);
400out: 398out:
401 kfree(match_c); 399 kvfree(spec);
402 kfree(match_v);
403 return err; 400 return err;
404} 401}
405 402
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index b000ddc29553..5740b465ef84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -110,8 +110,20 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
110{ 110{
111 struct mlx5e_priv *priv = netdev_priv(dev); 111 struct mlx5e_priv *priv = netdev_priv(dev);
112 int channel_ix = fallback(dev, skb); 112 int channel_ix = fallback(dev, skb);
113 int up = (netdev_get_num_tc(dev) && skb_vlan_tag_present(skb)) ? 113 int up = 0;
114 skb->vlan_tci >> VLAN_PRIO_SHIFT : 0; 114
115 if (!netdev_get_num_tc(dev))
116 return channel_ix;
117
118 if (skb_vlan_tag_present(skb))
119 up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
120
121 /* channel_ix can be larger than num_channels since
122 * dev->num_real_tx_queues = num_channels * num_tc
123 */
124 if (channel_ix >= priv->params.num_channels)
125 channel_ix = reciprocal_scale(channel_ix,
126 priv->params.num_channels);
115 127
116 return priv->channeltc_to_txq_map[channel_ix][up]; 128 return priv->channeltc_to_txq_map[channel_ix][up];
117} 129}
@@ -123,7 +135,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
123 * headers and occur before the data gather. 135 * headers and occur before the data gather.
124 * Therefore these headers must be copied into the WQE 136 * Therefore these headers must be copied into the WQE
125 */ 137 */
126#define MLX5E_MIN_INLINE ETH_HLEN 138#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
127 139
128 if (bf) { 140 if (bf) {
129 u16 ihs = skb_headlen(skb); 141 u16 ihs = skb_headlen(skb);
@@ -135,7 +147,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
135 return skb_headlen(skb); 147 return skb_headlen(skb);
136 } 148 }
137 149
138 return MLX5E_MIN_INLINE; 150 return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
139} 151}
140 152
141static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, 153static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
@@ -192,12 +204,12 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
192 if (skb->encapsulation) { 204 if (skb->encapsulation) {
193 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM | 205 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
194 MLX5_ETH_WQE_L4_INNER_CSUM; 206 MLX5_ETH_WQE_L4_INNER_CSUM;
195 sq->stats.csum_offload_inner++; 207 sq->stats.csum_partial_inner++;
196 } else { 208 } else {
197 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; 209 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
198 } 210 }
199 } else 211 } else
200 sq->stats.csum_offload_none++; 212 sq->stats.csum_none++;
201 213
202 if (sq->cc != sq->prev_cc) { 214 if (sq->cc != sq->prev_cc) {
203 sq->prev_cc = sq->cc; 215 sq->prev_cc = sq->cc;
@@ -341,6 +353,35 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
341 return mlx5e_sq_xmit(sq, skb); 353 return mlx5e_sq_xmit(sq, skb);
342} 354}
343 355
356void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
357{
358 struct mlx5e_tx_wqe_info *wi;
359 struct sk_buff *skb;
360 u16 ci;
361 int i;
362
363 while (sq->cc != sq->pc) {
364 ci = sq->cc & sq->wq.sz_m1;
365 skb = sq->skb[ci];
366 wi = &sq->wqe_info[ci];
367
368 if (!skb) { /* nop */
369 sq->cc++;
370 continue;
371 }
372
373 for (i = 0; i < wi->num_dma; i++) {
374 struct mlx5e_sq_dma *dma =
375 mlx5e_dma_get(sq, sq->dma_fifo_cc++);
376
377 mlx5e_tx_dma_unmap(sq->pdev, dma);
378 }
379
380 dev_kfree_skb_any(skb);
381 sq->cc += wi->num_wqebbs;
382 }
383}
384
344bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) 385bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
345{ 386{
346 struct mlx5e_sq *sq; 387 struct mlx5e_sq *sq;
@@ -352,6 +393,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
352 393
353 sq = container_of(cq, struct mlx5e_sq, cq); 394 sq = container_of(cq, struct mlx5e_sq, cq);
354 395
396 if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)))
397 return false;
398
355 npkts = 0; 399 npkts = 0;
356 nbytes = 0; 400 nbytes = 0;
357 401
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index aebbd6ccb9fe..f6d667797ee1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -40,17 +40,6 @@
40 40
41#define UPLINK_VPORT 0xFFFF 41#define UPLINK_VPORT 0xFFFF
42 42
43#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
44
45#define esw_info(dev, format, ...) \
46 pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
47
48#define esw_warn(dev, format, ...) \
49 pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
50
51#define esw_debug(dev, format, ...) \
52 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
53
54enum { 43enum {
55 MLX5_ACTION_NONE = 0, 44 MLX5_ACTION_NONE = 0,
56 MLX5_ACTION_ADD = 1, 45 MLX5_ACTION_ADD = 1,
@@ -92,6 +81,9 @@ enum {
92 MC_ADDR_CHANGE | \ 81 MC_ADDR_CHANGE | \
93 PROMISC_CHANGE) 82 PROMISC_CHANGE)
94 83
84int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
85void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
86
95static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, 87static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
96 u32 events_mask) 88 u32 events_mask)
97{ 89{
@@ -337,25 +329,23 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
337 MLX5_MATCH_OUTER_HEADERS); 329 MLX5_MATCH_OUTER_HEADERS);
338 struct mlx5_flow_rule *flow_rule = NULL; 330 struct mlx5_flow_rule *flow_rule = NULL;
339 struct mlx5_flow_destination dest; 331 struct mlx5_flow_destination dest;
332 struct mlx5_flow_spec *spec;
340 void *mv_misc = NULL; 333 void *mv_misc = NULL;
341 void *mc_misc = NULL; 334 void *mc_misc = NULL;
342 u8 *dmac_v = NULL; 335 u8 *dmac_v = NULL;
343 u8 *dmac_c = NULL; 336 u8 *dmac_c = NULL;
344 u32 *match_v;
345 u32 *match_c;
346 337
347 if (rx_rule) 338 if (rx_rule)
348 match_header |= MLX5_MATCH_MISC_PARAMETERS; 339 match_header |= MLX5_MATCH_MISC_PARAMETERS;
349 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 340
350 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 341 spec = mlx5_vzalloc(sizeof(*spec));
351 if (!match_v || !match_c) { 342 if (!spec) {
352 pr_warn("FDB: Failed to alloc match parameters\n"); 343 pr_warn("FDB: Failed to alloc match parameters\n");
353 goto out; 344 return NULL;
354 } 345 }
355 346 dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
356 dmac_v = MLX5_ADDR_OF(fte_match_param, match_v,
357 outer_headers.dmac_47_16); 347 outer_headers.dmac_47_16);
358 dmac_c = MLX5_ADDR_OF(fte_match_param, match_c, 348 dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
359 outer_headers.dmac_47_16); 349 outer_headers.dmac_47_16);
360 350
361 if (match_header & MLX5_MATCH_OUTER_HEADERS) { 351 if (match_header & MLX5_MATCH_OUTER_HEADERS) {
@@ -364,8 +354,10 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
364 } 354 }
365 355
366 if (match_header & MLX5_MATCH_MISC_PARAMETERS) { 356 if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
367 mv_misc = MLX5_ADDR_OF(fte_match_param, match_v, misc_parameters); 357 mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
368 mc_misc = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters); 358 misc_parameters);
359 mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
360 misc_parameters);
369 MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT); 361 MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
370 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port); 362 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
371 } 363 }
@@ -376,11 +368,9 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
376 esw_debug(esw->dev, 368 esw_debug(esw->dev,
377 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n", 369 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
378 dmac_v, dmac_c, vport); 370 dmac_v, dmac_c, vport);
371 spec->match_criteria_enable = match_header;
379 flow_rule = 372 flow_rule =
380 mlx5_add_flow_rule(esw->fdb_table.fdb, 373 mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
381 match_header,
382 match_c,
383 match_v,
384 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 374 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
385 0, &dest); 375 0, &dest);
386 if (IS_ERR(flow_rule)) { 376 if (IS_ERR(flow_rule)) {
@@ -389,9 +379,8 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
389 dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); 379 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
390 flow_rule = NULL; 380 flow_rule = NULL;
391 } 381 }
392out: 382
393 kfree(match_v); 383 kvfree(spec);
394 kfree(match_c);
395 return flow_rule; 384 return flow_rule;
396} 385}
397 386
@@ -428,7 +417,7 @@ esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
428 return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v); 417 return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
429} 418}
430 419
431static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) 420static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
432{ 421{
433 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); 422 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
434 struct mlx5_core_dev *dev = esw->dev; 423 struct mlx5_core_dev *dev = esw->dev;
@@ -479,7 +468,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
479 esw_warn(dev, "Failed to create flow group err(%d)\n", err); 468 esw_warn(dev, "Failed to create flow group err(%d)\n", err);
480 goto out; 469 goto out;
481 } 470 }
482 esw->fdb_table.addr_grp = g; 471 esw->fdb_table.legacy.addr_grp = g;
483 472
484 /* Allmulti group : One rule that forwards any mcast traffic */ 473 /* Allmulti group : One rule that forwards any mcast traffic */
485 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 474 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
@@ -494,7 +483,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
494 esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err); 483 esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
495 goto out; 484 goto out;
496 } 485 }
497 esw->fdb_table.allmulti_grp = g; 486 esw->fdb_table.legacy.allmulti_grp = g;
498 487
499 /* Promiscuous group : 488 /* Promiscuous group :
500 * One rule that forward all unmatched traffic from previous groups 489 * One rule that forward all unmatched traffic from previous groups
@@ -511,17 +500,17 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
511 esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err); 500 esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
512 goto out; 501 goto out;
513 } 502 }
514 esw->fdb_table.promisc_grp = g; 503 esw->fdb_table.legacy.promisc_grp = g;
515 504
516out: 505out:
517 if (err) { 506 if (err) {
518 if (!IS_ERR_OR_NULL(esw->fdb_table.allmulti_grp)) { 507 if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.allmulti_grp)) {
519 mlx5_destroy_flow_group(esw->fdb_table.allmulti_grp); 508 mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
520 esw->fdb_table.allmulti_grp = NULL; 509 esw->fdb_table.legacy.allmulti_grp = NULL;
521 } 510 }
522 if (!IS_ERR_OR_NULL(esw->fdb_table.addr_grp)) { 511 if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.addr_grp)) {
523 mlx5_destroy_flow_group(esw->fdb_table.addr_grp); 512 mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
524 esw->fdb_table.addr_grp = NULL; 513 esw->fdb_table.legacy.addr_grp = NULL;
525 } 514 }
526 if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) { 515 if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) {
527 mlx5_destroy_flow_table(esw->fdb_table.fdb); 516 mlx5_destroy_flow_table(esw->fdb_table.fdb);
@@ -533,20 +522,20 @@ out:
533 return err; 522 return err;
534} 523}
535 524
536static void esw_destroy_fdb_table(struct mlx5_eswitch *esw) 525static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
537{ 526{
538 if (!esw->fdb_table.fdb) 527 if (!esw->fdb_table.fdb)
539 return; 528 return;
540 529
541 esw_debug(esw->dev, "Destroy FDB Table\n"); 530 esw_debug(esw->dev, "Destroy FDB Table\n");
542 mlx5_destroy_flow_group(esw->fdb_table.promisc_grp); 531 mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
543 mlx5_destroy_flow_group(esw->fdb_table.allmulti_grp); 532 mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
544 mlx5_destroy_flow_group(esw->fdb_table.addr_grp); 533 mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
545 mlx5_destroy_flow_table(esw->fdb_table.fdb); 534 mlx5_destroy_flow_table(esw->fdb_table.fdb);
546 esw->fdb_table.fdb = NULL; 535 esw->fdb_table.fdb = NULL;
547 esw->fdb_table.addr_grp = NULL; 536 esw->fdb_table.legacy.addr_grp = NULL;
548 esw->fdb_table.allmulti_grp = NULL; 537 esw->fdb_table.legacy.allmulti_grp = NULL;
549 esw->fdb_table.promisc_grp = NULL; 538 esw->fdb_table.legacy.promisc_grp = NULL;
550} 539}
551 540
552/* E-Switch vport UC/MC lists management */ 541/* E-Switch vport UC/MC lists management */
@@ -578,7 +567,8 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
578 if (err) 567 if (err)
579 goto abort; 568 goto abort;
580 569
581 if (esw->fdb_table.fdb) /* SRIOV is enabled: Forward UC MAC to vport */ 570 /* SRIOV is enabled: Forward UC MAC to vport */
571 if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY)
582 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); 572 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
583 573
584 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n", 574 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n",
@@ -1300,9 +1290,8 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
1300static int esw_vport_ingress_config(struct mlx5_eswitch *esw, 1290static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1301 struct mlx5_vport *vport) 1291 struct mlx5_vport *vport)
1302{ 1292{
1293 struct mlx5_flow_spec *spec;
1303 u8 smac[ETH_ALEN]; 1294 u8 smac[ETH_ALEN];
1304 u32 *match_v;
1305 u32 *match_c;
1306 int err = 0; 1295 int err = 0;
1307 u8 *smac_v; 1296 u8 *smac_v;
1308 1297
@@ -1336,9 +1325,8 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1336 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n", 1325 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
1337 vport->vport, vport->vlan, vport->qos); 1326 vport->vport, vport->vlan, vport->qos);
1338 1327
1339 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 1328 spec = mlx5_vzalloc(sizeof(*spec));
1340 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 1329 if (!spec) {
1341 if (!match_v || !match_c) {
1342 err = -ENOMEM; 1330 err = -ENOMEM;
1343 esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n", 1331 esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
1344 vport->vport, err); 1332 vport->vport, err);
@@ -1346,22 +1334,20 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1346 } 1334 }
1347 1335
1348 if (vport->vlan || vport->qos) 1336 if (vport->vlan || vport->qos)
1349 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag); 1337 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
1350 1338
1351 if (vport->spoofchk) { 1339 if (vport->spoofchk) {
1352 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_47_16); 1340 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
1353 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_15_0); 1341 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
1354 smac_v = MLX5_ADDR_OF(fte_match_param, 1342 smac_v = MLX5_ADDR_OF(fte_match_param,
1355 match_v, 1343 spec->match_value,
1356 outer_headers.smac_47_16); 1344 outer_headers.smac_47_16);
1357 ether_addr_copy(smac_v, smac); 1345 ether_addr_copy(smac_v, smac);
1358 } 1346 }
1359 1347
1348 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1360 vport->ingress.allow_rule = 1349 vport->ingress.allow_rule =
1361 mlx5_add_flow_rule(vport->ingress.acl, 1350 mlx5_add_flow_rule(vport->ingress.acl, spec,
1362 MLX5_MATCH_OUTER_HEADERS,
1363 match_c,
1364 match_v,
1365 MLX5_FLOW_CONTEXT_ACTION_ALLOW, 1351 MLX5_FLOW_CONTEXT_ACTION_ALLOW,
1366 0, NULL); 1352 0, NULL);
1367 if (IS_ERR(vport->ingress.allow_rule)) { 1353 if (IS_ERR(vport->ingress.allow_rule)) {
@@ -1372,13 +1358,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1372 goto out; 1358 goto out;
1373 } 1359 }
1374 1360
1375 memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param)); 1361 memset(spec, 0, sizeof(*spec));
1376 memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
1377 vport->ingress.drop_rule = 1362 vport->ingress.drop_rule =
1378 mlx5_add_flow_rule(vport->ingress.acl, 1363 mlx5_add_flow_rule(vport->ingress.acl, spec,
1379 0,
1380 match_c,
1381 match_v,
1382 MLX5_FLOW_CONTEXT_ACTION_DROP, 1364 MLX5_FLOW_CONTEXT_ACTION_DROP,
1383 0, NULL); 1365 0, NULL);
1384 if (IS_ERR(vport->ingress.drop_rule)) { 1366 if (IS_ERR(vport->ingress.drop_rule)) {
@@ -1392,17 +1374,14 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1392out: 1374out:
1393 if (err) 1375 if (err)
1394 esw_vport_cleanup_ingress_rules(esw, vport); 1376 esw_vport_cleanup_ingress_rules(esw, vport);
1395 1377 kvfree(spec);
1396 kfree(match_v);
1397 kfree(match_c);
1398 return err; 1378 return err;
1399} 1379}
1400 1380
1401static int esw_vport_egress_config(struct mlx5_eswitch *esw, 1381static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1402 struct mlx5_vport *vport) 1382 struct mlx5_vport *vport)
1403{ 1383{
1404 u32 *match_v; 1384 struct mlx5_flow_spec *spec;
1405 u32 *match_c;
1406 int err = 0; 1385 int err = 0;
1407 1386
1408 esw_vport_cleanup_egress_rules(esw, vport); 1387 esw_vport_cleanup_egress_rules(esw, vport);
@@ -1418,9 +1397,8 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1418 "vport[%d] configure egress rules, vlan(%d) qos(%d)\n", 1397 "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
1419 vport->vport, vport->vlan, vport->qos); 1398 vport->vport, vport->vlan, vport->qos);
1420 1399
1421 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 1400 spec = mlx5_vzalloc(sizeof(*spec));
1422 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 1401 if (!spec) {
1423 if (!match_v || !match_c) {
1424 err = -ENOMEM; 1402 err = -ENOMEM;
1425 esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n", 1403 esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
1426 vport->vport, err); 1404 vport->vport, err);
@@ -1428,16 +1406,14 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1428 } 1406 }
1429 1407
1430 /* Allowed vlan rule */ 1408 /* Allowed vlan rule */
1431 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag); 1409 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
1432 MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.vlan_tag); 1410 MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag);
1433 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.first_vid); 1411 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
1434 MLX5_SET(fte_match_param, match_v, outer_headers.first_vid, vport->vlan); 1412 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->vlan);
1435 1413
1414 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1436 vport->egress.allowed_vlan = 1415 vport->egress.allowed_vlan =
1437 mlx5_add_flow_rule(vport->egress.acl, 1416 mlx5_add_flow_rule(vport->egress.acl, spec,
1438 MLX5_MATCH_OUTER_HEADERS,
1439 match_c,
1440 match_v,
1441 MLX5_FLOW_CONTEXT_ACTION_ALLOW, 1417 MLX5_FLOW_CONTEXT_ACTION_ALLOW,
1442 0, NULL); 1418 0, NULL);
1443 if (IS_ERR(vport->egress.allowed_vlan)) { 1419 if (IS_ERR(vport->egress.allowed_vlan)) {
@@ -1449,13 +1425,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1449 } 1425 }
1450 1426
1451 /* Drop others rule (star rule) */ 1427 /* Drop others rule (star rule) */
1452 memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param)); 1428 memset(spec, 0, sizeof(*spec));
1453 memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
1454 vport->egress.drop_rule = 1429 vport->egress.drop_rule =
1455 mlx5_add_flow_rule(vport->egress.acl, 1430 mlx5_add_flow_rule(vport->egress.acl, spec,
1456 0,
1457 match_c,
1458 match_v,
1459 MLX5_FLOW_CONTEXT_ACTION_DROP, 1431 MLX5_FLOW_CONTEXT_ACTION_DROP,
1460 0, NULL); 1432 0, NULL);
1461 if (IS_ERR(vport->egress.drop_rule)) { 1433 if (IS_ERR(vport->egress.drop_rule)) {
@@ -1465,8 +1437,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1465 vport->egress.drop_rule = NULL; 1437 vport->egress.drop_rule = NULL;
1466 } 1438 }
1467out: 1439out:
1468 kfree(match_v); 1440 kvfree(spec);
1469 kfree(match_c);
1470 return err; 1441 return err;
1471} 1442}
1472 1443
@@ -1540,10 +1511,10 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
1540} 1511}
1541 1512
1542/* Public E-Switch API */ 1513/* Public E-Switch API */
1543int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs) 1514int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
1544{ 1515{
1545 int err; 1516 int err;
1546 int i; 1517 int i, enabled_events;
1547 1518
1548 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || 1519 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1549 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 1520 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
@@ -1561,16 +1532,20 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs)
1561 if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support)) 1532 if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
1562 esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n"); 1533 esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
1563 1534
1564 esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs); 1535 esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
1565 1536 esw->mode = mode;
1566 esw_disable_vport(esw, 0); 1537 esw_disable_vport(esw, 0);
1567 1538
1568 err = esw_create_fdb_table(esw, nvfs + 1); 1539 if (mode == SRIOV_LEGACY)
1540 err = esw_create_legacy_fdb_table(esw, nvfs + 1);
1541 else
1542 err = esw_offloads_init(esw, nvfs + 1);
1569 if (err) 1543 if (err)
1570 goto abort; 1544 goto abort;
1571 1545
1546 enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : UC_ADDR_CHANGE;
1572 for (i = 0; i <= nvfs; i++) 1547 for (i = 0; i <= nvfs; i++)
1573 esw_enable_vport(esw, i, SRIOV_VPORT_EVENTS); 1548 esw_enable_vport(esw, i, enabled_events);
1574 1549
1575 esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n", 1550 esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
1576 esw->enabled_vports); 1551 esw->enabled_vports);
@@ -1584,16 +1559,18 @@ abort:
1584void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) 1559void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
1585{ 1560{
1586 struct esw_mc_addr *mc_promisc; 1561 struct esw_mc_addr *mc_promisc;
1562 int nvports;
1587 int i; 1563 int i;
1588 1564
1589 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || 1565 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1590 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 1566 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1591 return; 1567 return;
1592 1568
1593 esw_info(esw->dev, "disable SRIOV: active vports(%d)\n", 1569 esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
1594 esw->enabled_vports); 1570 esw->enabled_vports, esw->mode);
1595 1571
1596 mc_promisc = esw->mc_promisc; 1572 mc_promisc = esw->mc_promisc;
1573 nvports = esw->enabled_vports;
1597 1574
1598 for (i = 0; i < esw->total_vports; i++) 1575 for (i = 0; i < esw->total_vports; i++)
1599 esw_disable_vport(esw, i); 1576 esw_disable_vport(esw, i);
@@ -1601,8 +1578,12 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
1601 if (mc_promisc && mc_promisc->uplink_rule) 1578 if (mc_promisc && mc_promisc->uplink_rule)
1602 mlx5_del_flow_rule(mc_promisc->uplink_rule); 1579 mlx5_del_flow_rule(mc_promisc->uplink_rule);
1603 1580
1604 esw_destroy_fdb_table(esw); 1581 if (esw->mode == SRIOV_LEGACY)
1582 esw_destroy_legacy_fdb_table(esw);
1583 else if (esw->mode == SRIOV_OFFLOADS)
1584 esw_offloads_cleanup(esw, nvports);
1605 1585
1586 esw->mode = SRIOV_NONE;
1606 /* VPORT 0 (PF) must be enabled back with non-sriov configuration */ 1587 /* VPORT 0 (PF) must be enabled back with non-sriov configuration */
1607 esw_enable_vport(esw, 0, UC_ADDR_CHANGE); 1588 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1608} 1589}
@@ -1660,6 +1641,14 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1660 goto abort; 1641 goto abort;
1661 } 1642 }
1662 1643
1644 esw->offloads.vport_reps =
1645 kzalloc(total_vports * sizeof(struct mlx5_eswitch_rep),
1646 GFP_KERNEL);
1647 if (!esw->offloads.vport_reps) {
1648 err = -ENOMEM;
1649 goto abort;
1650 }
1651
1663 mutex_init(&esw->state_lock); 1652 mutex_init(&esw->state_lock);
1664 1653
1665 for (vport_num = 0; vport_num < total_vports; vport_num++) { 1654 for (vport_num = 0; vport_num < total_vports; vport_num++) {
@@ -1673,6 +1662,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1673 1662
1674 esw->total_vports = total_vports; 1663 esw->total_vports = total_vports;
1675 esw->enabled_vports = 0; 1664 esw->enabled_vports = 0;
1665 esw->mode = SRIOV_NONE;
1676 1666
1677 dev->priv.eswitch = esw; 1667 dev->priv.eswitch = esw;
1678 esw_enable_vport(esw, 0, UC_ADDR_CHANGE); 1668 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
@@ -1683,6 +1673,7 @@ abort:
1683 destroy_workqueue(esw->work_queue); 1673 destroy_workqueue(esw->work_queue);
1684 kfree(esw->l2_table.bitmap); 1674 kfree(esw->l2_table.bitmap);
1685 kfree(esw->vports); 1675 kfree(esw->vports);
1676 kfree(esw->offloads.vport_reps);
1686 kfree(esw); 1677 kfree(esw);
1687 return err; 1678 return err;
1688} 1679}
@@ -1700,6 +1691,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1700 destroy_workqueue(esw->work_queue); 1691 destroy_workqueue(esw->work_queue);
1701 kfree(esw->l2_table.bitmap); 1692 kfree(esw->l2_table.bitmap);
1702 kfree(esw->mc_promisc); 1693 kfree(esw->mc_promisc);
1694 kfree(esw->offloads.vport_reps);
1703 kfree(esw->vports); 1695 kfree(esw->vports);
1704 kfree(esw); 1696 kfree(esw);
1705} 1697}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index fd6800256d4a..7b45e6a6efb8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -35,6 +35,7 @@
35 35
36#include <linux/if_ether.h> 36#include <linux/if_ether.h>
37#include <linux/if_link.h> 37#include <linux/if_link.h>
38#include <net/devlink.h>
38#include <linux/mlx5/device.h> 39#include <linux/mlx5/device.h>
39 40
40#define MLX5_MAX_UC_PER_VPORT(dev) \ 41#define MLX5_MAX_UC_PER_VPORT(dev) \
@@ -46,6 +47,8 @@
46#define MLX5_L2_ADDR_HASH_SIZE (BIT(BITS_PER_BYTE)) 47#define MLX5_L2_ADDR_HASH_SIZE (BIT(BITS_PER_BYTE))
47#define MLX5_L2_ADDR_HASH(addr) (addr[5]) 48#define MLX5_L2_ADDR_HASH(addr) (addr[5])
48 49
50#define FDB_UPLINK_VPORT 0xffff
51
49/* L2 -mac address based- hash helpers */ 52/* L2 -mac address based- hash helpers */
50struct l2addr_node { 53struct l2addr_node {
51 struct hlist_node hlist; 54 struct hlist_node hlist;
@@ -134,9 +137,48 @@ struct mlx5_l2_table {
134 137
135struct mlx5_eswitch_fdb { 138struct mlx5_eswitch_fdb {
136 void *fdb; 139 void *fdb;
137 struct mlx5_flow_group *addr_grp; 140 union {
138 struct mlx5_flow_group *allmulti_grp; 141 struct legacy_fdb {
139 struct mlx5_flow_group *promisc_grp; 142 struct mlx5_flow_group *addr_grp;
143 struct mlx5_flow_group *allmulti_grp;
144 struct mlx5_flow_group *promisc_grp;
145 } legacy;
146
147 struct offloads_fdb {
148 struct mlx5_flow_group *send_to_vport_grp;
149 struct mlx5_flow_group *miss_grp;
150 struct mlx5_flow_rule *miss_rule;
151 } offloads;
152 };
153};
154
155enum {
156 SRIOV_NONE,
157 SRIOV_LEGACY,
158 SRIOV_OFFLOADS
159};
160
161struct mlx5_esw_sq {
162 struct mlx5_flow_rule *send_to_vport_rule;
163 struct list_head list;
164};
165
166struct mlx5_eswitch_rep {
167 int (*load)(struct mlx5_eswitch *esw,
168 struct mlx5_eswitch_rep *rep);
169 void (*unload)(struct mlx5_eswitch *esw,
170 struct mlx5_eswitch_rep *rep);
171 u16 vport;
172 struct mlx5_flow_rule *vport_rx_rule;
173 void *priv_data;
174 struct list_head vport_sqs_list;
175 bool valid;
176};
177
178struct mlx5_esw_offload {
179 struct mlx5_flow_table *ft_offloads;
180 struct mlx5_flow_group *vport_rx_group;
181 struct mlx5_eswitch_rep *vport_reps;
140}; 182};
141 183
142struct mlx5_eswitch { 184struct mlx5_eswitch {
@@ -153,13 +195,15 @@ struct mlx5_eswitch {
153 */ 195 */
154 struct mutex state_lock; 196 struct mutex state_lock;
155 struct esw_mc_addr *mc_promisc; 197 struct esw_mc_addr *mc_promisc;
198 struct mlx5_esw_offload offloads;
199 int mode;
156}; 200};
157 201
158/* E-Switch API */ 202/* E-Switch API */
159int mlx5_eswitch_init(struct mlx5_core_dev *dev); 203int mlx5_eswitch_init(struct mlx5_core_dev *dev);
160void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); 204void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
161void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe); 205void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe);
162int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs); 206int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode);
163void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw); 207void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw);
164int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, 208int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
165 int vport, u8 mac[ETH_ALEN]); 209 int vport, u8 mac[ETH_ALEN]);
@@ -177,4 +221,30 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
177 int vport, 221 int vport,
178 struct ifla_vf_stats *vf_stats); 222 struct ifla_vf_stats *vf_stats);
179 223
224struct mlx5_flow_rule *
225mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
226
227int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
228 struct mlx5_eswitch_rep *rep,
229 u16 *sqns_array, int sqns_num);
230void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
231 struct mlx5_eswitch_rep *rep);
232
233int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
234int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
235void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
236 struct mlx5_eswitch_rep *rep);
237void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
238 int vport);
239
240#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
241
242#define esw_info(dev, format, ...) \
243 pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
244
245#define esw_warn(dev, format, ...) \
246 pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
247
248#define esw_debug(dev, format, ...) \
249 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
180#endif /* __MLX5_ESWITCH_H__ */ 250#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
new file mode 100644
index 000000000000..1842dfb4636b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -0,0 +1,561 @@
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
40
41static struct mlx5_flow_rule *
42mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
43{
44 struct mlx5_flow_destination dest;
45 struct mlx5_flow_rule *flow_rule;
46 struct mlx5_flow_spec *spec;
47 void *misc;
48
49 spec = mlx5_vzalloc(sizeof(*spec));
50 if (!spec) {
51 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
52 flow_rule = ERR_PTR(-ENOMEM);
53 goto out;
54 }
55
56 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
57 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
58 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
59
60 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
61 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
62 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
63
64 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
65 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
66 dest.vport_num = vport;
67
68 flow_rule = mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
69 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
70 0, &dest);
71 if (IS_ERR(flow_rule))
72 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
73out:
74 kvfree(spec);
75 return flow_rule;
76}
77
78void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
79 struct mlx5_eswitch_rep *rep)
80{
81 struct mlx5_esw_sq *esw_sq, *tmp;
82
83 if (esw->mode != SRIOV_OFFLOADS)
84 return;
85
86 list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
87 mlx5_del_flow_rule(esw_sq->send_to_vport_rule);
88 list_del(&esw_sq->list);
89 kfree(esw_sq);
90 }
91}
92
93int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
94 struct mlx5_eswitch_rep *rep,
95 u16 *sqns_array, int sqns_num)
96{
97 struct mlx5_flow_rule *flow_rule;
98 struct mlx5_esw_sq *esw_sq;
99 int vport;
100 int err;
101 int i;
102
103 if (esw->mode != SRIOV_OFFLOADS)
104 return 0;
105
106 vport = rep->vport == 0 ?
107 FDB_UPLINK_VPORT : rep->vport;
108
109 for (i = 0; i < sqns_num; i++) {
110 esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
111 if (!esw_sq) {
112 err = -ENOMEM;
113 goto out_err;
114 }
115
116 /* Add re-inject rule to the PF/representor sqs */
117 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
118 vport,
119 sqns_array[i]);
120 if (IS_ERR(flow_rule)) {
121 err = PTR_ERR(flow_rule);
122 kfree(esw_sq);
123 goto out_err;
124 }
125 esw_sq->send_to_vport_rule = flow_rule;
126 list_add(&esw_sq->list, &rep->vport_sqs_list);
127 }
128 return 0;
129
130out_err:
131 mlx5_eswitch_sqs2vport_stop(esw, rep);
132 return err;
133}
134
135static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
136{
137 struct mlx5_flow_destination dest;
138 struct mlx5_flow_rule *flow_rule = NULL;
139 struct mlx5_flow_spec *spec;
140 int err = 0;
141
142 spec = mlx5_vzalloc(sizeof(*spec));
143 if (!spec) {
144 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
145 err = -ENOMEM;
146 goto out;
147 }
148
149 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
150 dest.vport_num = 0;
151
152 flow_rule = mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
153 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
154 0, &dest);
155 if (IS_ERR(flow_rule)) {
156 err = PTR_ERR(flow_rule);
157 esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
158 goto out;
159 }
160
161 esw->fdb_table.offloads.miss_rule = flow_rule;
162out:
163 kvfree(spec);
164 return err;
165}
166
167#define MAX_PF_SQ 256
168
169static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
170{
171 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
172 struct mlx5_core_dev *dev = esw->dev;
173 struct mlx5_flow_namespace *root_ns;
174 struct mlx5_flow_table *fdb = NULL;
175 struct mlx5_flow_group *g;
176 u32 *flow_group_in;
177 void *match_criteria;
178 int table_size, ix, err = 0;
179
180 flow_group_in = mlx5_vzalloc(inlen);
181 if (!flow_group_in)
182 return -ENOMEM;
183
184 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
185 if (!root_ns) {
186 esw_warn(dev, "Failed to get FDB flow namespace\n");
187 goto ns_err;
188 }
189
190 esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n",
191 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
192
193 table_size = nvports + MAX_PF_SQ + 1;
194 fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
195 if (IS_ERR(fdb)) {
196 err = PTR_ERR(fdb);
197 esw_warn(dev, "Failed to create FDB Table err %d\n", err);
198 goto fdb_err;
199 }
200 esw->fdb_table.fdb = fdb;
201
202 /* create send-to-vport group */
203 memset(flow_group_in, 0, inlen);
204 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
205 MLX5_MATCH_MISC_PARAMETERS);
206
207 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
208
209 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
210 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
211
212 ix = nvports + MAX_PF_SQ;
213 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
214 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
215
216 g = mlx5_create_flow_group(fdb, flow_group_in);
217 if (IS_ERR(g)) {
218 err = PTR_ERR(g);
219 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
220 goto send_vport_err;
221 }
222 esw->fdb_table.offloads.send_to_vport_grp = g;
223
224 /* create miss group */
225 memset(flow_group_in, 0, inlen);
226 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
227
228 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
229 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
230
231 g = mlx5_create_flow_group(fdb, flow_group_in);
232 if (IS_ERR(g)) {
233 err = PTR_ERR(g);
234 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
235 goto miss_err;
236 }
237 esw->fdb_table.offloads.miss_grp = g;
238
239 err = esw_add_fdb_miss_rule(esw);
240 if (err)
241 goto miss_rule_err;
242
243 return 0;
244
245miss_rule_err:
246 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
247miss_err:
248 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
249send_vport_err:
250 mlx5_destroy_flow_table(fdb);
251fdb_err:
252ns_err:
253 kvfree(flow_group_in);
254 return err;
255}
256
257static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
258{
259 if (!esw->fdb_table.fdb)
260 return;
261
262 esw_debug(esw->dev, "Destroy offloads FDB Table\n");
263 mlx5_del_flow_rule(esw->fdb_table.offloads.miss_rule);
264 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
265 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
266
267 mlx5_destroy_flow_table(esw->fdb_table.fdb);
268}
269
270static int esw_create_offloads_table(struct mlx5_eswitch *esw)
271{
272 struct mlx5_flow_namespace *ns;
273 struct mlx5_flow_table *ft_offloads;
274 struct mlx5_core_dev *dev = esw->dev;
275 int err = 0;
276
277 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
278 if (!ns) {
279 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
280 return -ENOMEM;
281 }
282
283 ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0);
284 if (IS_ERR(ft_offloads)) {
285 err = PTR_ERR(ft_offloads);
286 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
287 return err;
288 }
289
290 esw->offloads.ft_offloads = ft_offloads;
291 return 0;
292}
293
294static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
295{
296 struct mlx5_esw_offload *offloads = &esw->offloads;
297
298 mlx5_destroy_flow_table(offloads->ft_offloads);
299}
300
301static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
302{
303 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
304 struct mlx5_flow_group *g;
305 struct mlx5_priv *priv = &esw->dev->priv;
306 u32 *flow_group_in;
307 void *match_criteria, *misc;
308 int err = 0;
309 int nvports = priv->sriov.num_vfs + 2;
310
311 flow_group_in = mlx5_vzalloc(inlen);
312 if (!flow_group_in)
313 return -ENOMEM;
314
315 /* create vport rx group */
316 memset(flow_group_in, 0, inlen);
317 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
318 MLX5_MATCH_MISC_PARAMETERS);
319
320 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
321 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
322 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
323
324 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
325 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
326
327 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
328
329 if (IS_ERR(g)) {
330 err = PTR_ERR(g);
331 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
332 goto out;
333 }
334
335 esw->offloads.vport_rx_group = g;
336out:
337 kfree(flow_group_in);
338 return err;
339}
340
341static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
342{
343 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
344}
345
346struct mlx5_flow_rule *
347mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
348{
349 struct mlx5_flow_destination dest;
350 struct mlx5_flow_rule *flow_rule;
351 struct mlx5_flow_spec *spec;
352 void *misc;
353
354 spec = mlx5_vzalloc(sizeof(*spec));
355 if (!spec) {
356 esw_warn(esw->dev, "Failed to alloc match parameters\n");
357 flow_rule = ERR_PTR(-ENOMEM);
358 goto out;
359 }
360
361 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
362 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
363
364 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
365 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
366
367 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
368 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
369 dest.tir_num = tirn;
370
371 flow_rule = mlx5_add_flow_rule(esw->offloads.ft_offloads, spec,
372 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
373 0, &dest);
374 if (IS_ERR(flow_rule)) {
375 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
376 goto out;
377 }
378
379out:
380 kvfree(spec);
381 return flow_rule;
382}
383
384static int esw_offloads_start(struct mlx5_eswitch *esw)
385{
386 int err, num_vfs = esw->dev->priv.sriov.num_vfs;
387
388 if (esw->mode != SRIOV_LEGACY) {
389 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
390 return -EINVAL;
391 }
392
393 mlx5_eswitch_disable_sriov(esw);
394 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
395 if (err)
396 esw_warn(esw->dev, "Failed set eswitch to offloads, err %d\n", err);
397 return err;
398}
399
400int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
401{
402 struct mlx5_eswitch_rep *rep;
403 int vport;
404 int err;
405
406 err = esw_create_offloads_fdb_table(esw, nvports);
407 if (err)
408 return err;
409
410 err = esw_create_offloads_table(esw);
411 if (err)
412 goto create_ft_err;
413
414 err = esw_create_vport_rx_group(esw);
415 if (err)
416 goto create_fg_err;
417
418 for (vport = 0; vport < nvports; vport++) {
419 rep = &esw->offloads.vport_reps[vport];
420 if (!rep->valid)
421 continue;
422
423 err = rep->load(esw, rep);
424 if (err)
425 goto err_reps;
426 }
427 return 0;
428
429err_reps:
430 for (vport--; vport >= 0; vport--) {
431 rep = &esw->offloads.vport_reps[vport];
432 if (!rep->valid)
433 continue;
434 rep->unload(esw, rep);
435 }
436 esw_destroy_vport_rx_group(esw);
437
438create_fg_err:
439 esw_destroy_offloads_table(esw);
440
441create_ft_err:
442 esw_destroy_offloads_fdb_table(esw);
443 return err;
444}
445
446static int esw_offloads_stop(struct mlx5_eswitch *esw)
447{
448 int err, num_vfs = esw->dev->priv.sriov.num_vfs;
449
450 mlx5_eswitch_disable_sriov(esw);
451 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
452 if (err)
453 esw_warn(esw->dev, "Failed set eswitch legacy mode. err %d\n", err);
454
455 return err;
456}
457
458void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
459{
460 struct mlx5_eswitch_rep *rep;
461 int vport;
462
463 for (vport = 0; vport < nvports; vport++) {
464 rep = &esw->offloads.vport_reps[vport];
465 if (!rep->valid)
466 continue;
467 rep->unload(esw, rep);
468 }
469
470 esw_destroy_vport_rx_group(esw);
471 esw_destroy_offloads_table(esw);
472 esw_destroy_offloads_fdb_table(esw);
473}
474
475static int mlx5_esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
476{
477 switch (mode) {
478 case DEVLINK_ESWITCH_MODE_LEGACY:
479 *mlx5_mode = SRIOV_LEGACY;
480 break;
481 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
482 *mlx5_mode = SRIOV_OFFLOADS;
483 break;
484 default:
485 return -EINVAL;
486 }
487
488 return 0;
489}
490
491int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
492{
493 struct mlx5_core_dev *dev;
494 u16 cur_mlx5_mode, mlx5_mode = 0;
495
496 dev = devlink_priv(devlink);
497
498 if (!MLX5_CAP_GEN(dev, vport_group_manager))
499 return -EOPNOTSUPP;
500
501 cur_mlx5_mode = dev->priv.eswitch->mode;
502
503 if (cur_mlx5_mode == SRIOV_NONE)
504 return -EOPNOTSUPP;
505
506 if (mlx5_esw_mode_from_devlink(mode, &mlx5_mode))
507 return -EINVAL;
508
509 if (cur_mlx5_mode == mlx5_mode)
510 return 0;
511
512 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
513 return esw_offloads_start(dev->priv.eswitch);
514 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
515 return esw_offloads_stop(dev->priv.eswitch);
516 else
517 return -EINVAL;
518}
519
520int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
521{
522 struct mlx5_core_dev *dev;
523
524 dev = devlink_priv(devlink);
525
526 if (!MLX5_CAP_GEN(dev, vport_group_manager))
527 return -EOPNOTSUPP;
528
529 if (dev->priv.eswitch->mode == SRIOV_NONE)
530 return -EOPNOTSUPP;
531
532 *mode = dev->priv.eswitch->mode;
533
534 return 0;
535}
536
537void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
538 struct mlx5_eswitch_rep *rep)
539{
540 struct mlx5_esw_offload *offloads = &esw->offloads;
541
542 memcpy(&offloads->vport_reps[rep->vport], rep,
543 sizeof(struct mlx5_eswitch_rep));
544
545 INIT_LIST_HEAD(&offloads->vport_reps[rep->vport].vport_sqs_list);
546 offloads->vport_reps[rep->vport].valid = true;
547}
548
549void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
550 int vport)
551{
552 struct mlx5_esw_offload *offloads = &esw->offloads;
553 struct mlx5_eswitch_rep *rep;
554
555 rep = &offloads->vport_reps[vport];
556
557 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport].enabled)
558 rep->unload(esw, rep);
559
560 offloads->vport_reps[vport].valid = false;
561}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index e912a3d2505e..b0a130479085 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -67,13 +67,21 @@
67#define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \ 67#define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
68 .caps = (long[]) {__VA_ARGS__} } 68 .caps = (long[]) {__VA_ARGS__} }
69 69
70#define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
71 FS_CAP(flow_table_properties_nic_receive.modify_root), \
72 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
73 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
74
70#define LEFTOVERS_NUM_LEVELS 1 75#define LEFTOVERS_NUM_LEVELS 1
71#define LEFTOVERS_NUM_PRIOS 1 76#define LEFTOVERS_NUM_PRIOS 1
72 77
73#define BY_PASS_PRIO_NUM_LEVELS 1 78#define BY_PASS_PRIO_NUM_LEVELS 1
74#define BY_PASS_MIN_LEVEL (KERNEL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ 79#define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
75 LEFTOVERS_NUM_PRIOS) 80 LEFTOVERS_NUM_PRIOS)
76 81
82#define ETHTOOL_PRIO_NUM_LEVELS 1
83#define ETHTOOL_NUM_PRIOS 10
84#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
77/* Vlan, mac, ttc, aRFS */ 85/* Vlan, mac, ttc, aRFS */
78#define KERNEL_NIC_PRIO_NUM_LEVELS 4 86#define KERNEL_NIC_PRIO_NUM_LEVELS 4
79#define KERNEL_NIC_NUM_PRIOS 1 87#define KERNEL_NIC_NUM_PRIOS 1
@@ -83,6 +91,11 @@
83#define ANCHOR_NUM_LEVELS 1 91#define ANCHOR_NUM_LEVELS 1
84#define ANCHOR_NUM_PRIOS 1 92#define ANCHOR_NUM_PRIOS 1
85#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1) 93#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
94
95#define OFFLOADS_MAX_FT 1
96#define OFFLOADS_NUM_PRIOS 1
97#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
98
86struct node_caps { 99struct node_caps {
87 size_t arr_sz; 100 size_t arr_sz;
88 long *caps; 101 long *caps;
@@ -98,24 +111,24 @@ static struct init_tree_node {
98 int num_levels; 111 int num_levels;
99} root_fs = { 112} root_fs = {
100 .type = FS_TYPE_NAMESPACE, 113 .type = FS_TYPE_NAMESPACE,
101 .ar_size = 4, 114 .ar_size = 6,
102 .children = (struct init_tree_node[]) { 115 .children = (struct init_tree_node[]) {
103 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, 116 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
104 FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), 117 FS_CHAINING_CAPS,
105 FS_CAP(flow_table_properties_nic_receive.modify_root),
106 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode),
107 FS_CAP(flow_table_properties_nic_receive.flow_table_modify)),
108 ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, 118 ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
109 BY_PASS_PRIO_NUM_LEVELS))), 119 BY_PASS_PRIO_NUM_LEVELS))),
120 ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
121 ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
122 ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
123 FS_CHAINING_CAPS,
124 ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
125 ETHTOOL_PRIO_NUM_LEVELS))),
110 ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {}, 126 ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
111 ADD_NS(ADD_MULTIPLE_PRIO(1, 1), 127 ADD_NS(ADD_MULTIPLE_PRIO(1, 1),
112 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS, 128 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
113 KERNEL_NIC_PRIO_NUM_LEVELS))), 129 KERNEL_NIC_PRIO_NUM_LEVELS))),
114 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, 130 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
115 FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), 131 FS_CHAINING_CAPS,
116 FS_CAP(flow_table_properties_nic_receive.modify_root),
117 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode),
118 FS_CAP(flow_table_properties_nic_receive.flow_table_modify)),
119 ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))), 132 ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
120 ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {}, 133 ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
121 ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))), 134 ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
@@ -1152,9 +1165,7 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest,
1152 1165
1153static struct mlx5_flow_rule * 1166static struct mlx5_flow_rule *
1154_mlx5_add_flow_rule(struct mlx5_flow_table *ft, 1167_mlx5_add_flow_rule(struct mlx5_flow_table *ft,
1155 u8 match_criteria_enable, 1168 struct mlx5_flow_spec *spec,
1156 u32 *match_criteria,
1157 u32 *match_value,
1158 u32 action, 1169 u32 action,
1159 u32 flow_tag, 1170 u32 flow_tag,
1160 struct mlx5_flow_destination *dest) 1171 struct mlx5_flow_destination *dest)
@@ -1168,22 +1179,23 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
1168 nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT); 1179 nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
1169 fs_for_each_fg(g, ft) 1180 fs_for_each_fg(g, ft)
1170 if (compare_match_criteria(g->mask.match_criteria_enable, 1181 if (compare_match_criteria(g->mask.match_criteria_enable,
1171 match_criteria_enable, 1182 spec->match_criteria_enable,
1172 g->mask.match_criteria, 1183 g->mask.match_criteria,
1173 match_criteria)) { 1184 spec->match_criteria)) {
1174 rule = add_rule_fg(g, match_value, 1185 rule = add_rule_fg(g, spec->match_value,
1175 action, flow_tag, dest); 1186 action, flow_tag, dest);
1176 if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC) 1187 if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC)
1177 goto unlock; 1188 goto unlock;
1178 } 1189 }
1179 1190
1180 g = create_autogroup(ft, match_criteria_enable, match_criteria); 1191 g = create_autogroup(ft, spec->match_criteria_enable,
1192 spec->match_criteria);
1181 if (IS_ERR(g)) { 1193 if (IS_ERR(g)) {
1182 rule = (void *)g; 1194 rule = (void *)g;
1183 goto unlock; 1195 goto unlock;
1184 } 1196 }
1185 1197
1186 rule = add_rule_fg(g, match_value, 1198 rule = add_rule_fg(g, spec->match_value,
1187 action, flow_tag, dest); 1199 action, flow_tag, dest);
1188 if (IS_ERR(rule)) { 1200 if (IS_ERR(rule)) {
1189 /* Remove assumes refcount > 0 and autogroup creates a group 1201 /* Remove assumes refcount > 0 and autogroup creates a group
@@ -1207,9 +1219,7 @@ static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
1207 1219
1208struct mlx5_flow_rule * 1220struct mlx5_flow_rule *
1209mlx5_add_flow_rule(struct mlx5_flow_table *ft, 1221mlx5_add_flow_rule(struct mlx5_flow_table *ft,
1210 u8 match_criteria_enable, 1222 struct mlx5_flow_spec *spec,
1211 u32 *match_criteria,
1212 u32 *match_value,
1213 u32 action, 1223 u32 action,
1214 u32 flow_tag, 1224 u32 flow_tag,
1215 struct mlx5_flow_destination *dest) 1225 struct mlx5_flow_destination *dest)
@@ -1240,8 +1250,7 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft,
1240 } 1250 }
1241 } 1251 }
1242 1252
1243 rule = _mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria, 1253 rule = _mlx5_add_flow_rule(ft, spec, action, flow_tag, dest);
1244 match_value, action, flow_tag, dest);
1245 1254
1246 if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { 1255 if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1247 if (!IS_ERR_OR_NULL(rule) && 1256 if (!IS_ERR_OR_NULL(rule) &&
@@ -1359,40 +1368,47 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
1359struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, 1368struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
1360 enum mlx5_flow_namespace_type type) 1369 enum mlx5_flow_namespace_type type)
1361{ 1370{
1362 struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns; 1371 struct mlx5_flow_steering *steering = dev->priv.steering;
1372 struct mlx5_flow_root_namespace *root_ns;
1363 int prio; 1373 int prio;
1364 struct fs_prio *fs_prio; 1374 struct fs_prio *fs_prio;
1365 struct mlx5_flow_namespace *ns; 1375 struct mlx5_flow_namespace *ns;
1366 1376
1367 if (!root_ns) 1377 if (!steering)
1368 return NULL; 1378 return NULL;
1369 1379
1370 switch (type) { 1380 switch (type) {
1371 case MLX5_FLOW_NAMESPACE_BYPASS: 1381 case MLX5_FLOW_NAMESPACE_BYPASS:
1382 case MLX5_FLOW_NAMESPACE_OFFLOADS:
1383 case MLX5_FLOW_NAMESPACE_ETHTOOL:
1372 case MLX5_FLOW_NAMESPACE_KERNEL: 1384 case MLX5_FLOW_NAMESPACE_KERNEL:
1373 case MLX5_FLOW_NAMESPACE_LEFTOVERS: 1385 case MLX5_FLOW_NAMESPACE_LEFTOVERS:
1374 case MLX5_FLOW_NAMESPACE_ANCHOR: 1386 case MLX5_FLOW_NAMESPACE_ANCHOR:
1375 prio = type; 1387 prio = type;
1376 break; 1388 break;
1377 case MLX5_FLOW_NAMESPACE_FDB: 1389 case MLX5_FLOW_NAMESPACE_FDB:
1378 if (dev->priv.fdb_root_ns) 1390 if (steering->fdb_root_ns)
1379 return &dev->priv.fdb_root_ns->ns; 1391 return &steering->fdb_root_ns->ns;
1380 else 1392 else
1381 return NULL; 1393 return NULL;
1382 case MLX5_FLOW_NAMESPACE_ESW_EGRESS: 1394 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
1383 if (dev->priv.esw_egress_root_ns) 1395 if (steering->esw_egress_root_ns)
1384 return &dev->priv.esw_egress_root_ns->ns; 1396 return &steering->esw_egress_root_ns->ns;
1385 else 1397 else
1386 return NULL; 1398 return NULL;
1387 case MLX5_FLOW_NAMESPACE_ESW_INGRESS: 1399 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
1388 if (dev->priv.esw_ingress_root_ns) 1400 if (steering->esw_ingress_root_ns)
1389 return &dev->priv.esw_ingress_root_ns->ns; 1401 return &steering->esw_ingress_root_ns->ns;
1390 else 1402 else
1391 return NULL; 1403 return NULL;
1392 default: 1404 default:
1393 return NULL; 1405 return NULL;
1394 } 1406 }
1395 1407
1408 root_ns = steering->root_ns;
1409 if (!root_ns)
1410 return NULL;
1411
1396 fs_prio = find_prio(&root_ns->ns, prio); 1412 fs_prio = find_prio(&root_ns->ns, prio);
1397 if (!fs_prio) 1413 if (!fs_prio)
1398 return NULL; 1414 return NULL;
@@ -1478,13 +1494,13 @@ static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
1478 return true; 1494 return true;
1479} 1495}
1480 1496
1481static int init_root_tree_recursive(struct mlx5_core_dev *dev, 1497static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
1482 struct init_tree_node *init_node, 1498 struct init_tree_node *init_node,
1483 struct fs_node *fs_parent_node, 1499 struct fs_node *fs_parent_node,
1484 struct init_tree_node *init_parent_node, 1500 struct init_tree_node *init_parent_node,
1485 int prio) 1501 int prio)
1486{ 1502{
1487 int max_ft_level = MLX5_CAP_FLOWTABLE(dev, 1503 int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
1488 flow_table_properties_nic_receive. 1504 flow_table_properties_nic_receive.
1489 max_ft_level); 1505 max_ft_level);
1490 struct mlx5_flow_namespace *fs_ns; 1506 struct mlx5_flow_namespace *fs_ns;
@@ -1495,7 +1511,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
1495 1511
1496 if (init_node->type == FS_TYPE_PRIO) { 1512 if (init_node->type == FS_TYPE_PRIO) {
1497 if ((init_node->min_ft_level > max_ft_level) || 1513 if ((init_node->min_ft_level > max_ft_level) ||
1498 !has_required_caps(dev, &init_node->caps)) 1514 !has_required_caps(steering->dev, &init_node->caps))
1499 return 0; 1515 return 0;
1500 1516
1501 fs_get_obj(fs_ns, fs_parent_node); 1517 fs_get_obj(fs_ns, fs_parent_node);
@@ -1516,7 +1532,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
1516 } 1532 }
1517 prio = 0; 1533 prio = 0;
1518 for (i = 0; i < init_node->ar_size; i++) { 1534 for (i = 0; i < init_node->ar_size; i++) {
1519 err = init_root_tree_recursive(dev, &init_node->children[i], 1535 err = init_root_tree_recursive(steering, &init_node->children[i],
1520 base, init_node, prio); 1536 base, init_node, prio);
1521 if (err) 1537 if (err)
1522 return err; 1538 return err;
@@ -1529,7 +1545,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
1529 return 0; 1545 return 0;
1530} 1546}
1531 1547
1532static int init_root_tree(struct mlx5_core_dev *dev, 1548static int init_root_tree(struct mlx5_flow_steering *steering,
1533 struct init_tree_node *init_node, 1549 struct init_tree_node *init_node,
1534 struct fs_node *fs_parent_node) 1550 struct fs_node *fs_parent_node)
1535{ 1551{
@@ -1539,7 +1555,7 @@ static int init_root_tree(struct mlx5_core_dev *dev,
1539 1555
1540 fs_get_obj(fs_ns, fs_parent_node); 1556 fs_get_obj(fs_ns, fs_parent_node);
1541 for (i = 0; i < init_node->ar_size; i++) { 1557 for (i = 0; i < init_node->ar_size; i++) {
1542 err = init_root_tree_recursive(dev, &init_node->children[i], 1558 err = init_root_tree_recursive(steering, &init_node->children[i],
1543 &fs_ns->node, 1559 &fs_ns->node,
1544 init_node, i); 1560 init_node, i);
1545 if (err) 1561 if (err)
@@ -1548,7 +1564,7 @@ static int init_root_tree(struct mlx5_core_dev *dev,
1548 return 0; 1564 return 0;
1549} 1565}
1550 1566
1551static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev, 1567static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering *steering,
1552 enum fs_flow_table_type 1568 enum fs_flow_table_type
1553 table_type) 1569 table_type)
1554{ 1570{
@@ -1560,7 +1576,7 @@ static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev
1560 if (!root_ns) 1576 if (!root_ns)
1561 return NULL; 1577 return NULL;
1562 1578
1563 root_ns->dev = dev; 1579 root_ns->dev = steering->dev;
1564 root_ns->table_type = table_type; 1580 root_ns->table_type = table_type;
1565 1581
1566 ns = &root_ns->ns; 1582 ns = &root_ns->ns;
@@ -1615,212 +1631,126 @@ static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
1615#define ANCHOR_PRIO 0 1631#define ANCHOR_PRIO 0
1616#define ANCHOR_SIZE 1 1632#define ANCHOR_SIZE 1
1617#define ANCHOR_LEVEL 0 1633#define ANCHOR_LEVEL 0
1618static int create_anchor_flow_table(struct mlx5_core_dev 1634static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
1619 *dev)
1620{ 1635{
1621 struct mlx5_flow_namespace *ns = NULL; 1636 struct mlx5_flow_namespace *ns = NULL;
1622 struct mlx5_flow_table *ft; 1637 struct mlx5_flow_table *ft;
1623 1638
1624 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ANCHOR); 1639 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
1625 if (!ns) 1640 if (!ns)
1626 return -EINVAL; 1641 return -EINVAL;
1627 ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL); 1642 ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL);
1628 if (IS_ERR(ft)) { 1643 if (IS_ERR(ft)) {
1629 mlx5_core_err(dev, "Failed to create last anchor flow table"); 1644 mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
1630 return PTR_ERR(ft); 1645 return PTR_ERR(ft);
1631 } 1646 }
1632 return 0; 1647 return 0;
1633} 1648}
1634 1649
1635static int init_root_ns(struct mlx5_core_dev *dev) 1650static int init_root_ns(struct mlx5_flow_steering *steering)
1636{ 1651{
1637 1652
1638 dev->priv.root_ns = create_root_ns(dev, FS_FT_NIC_RX); 1653 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
1639 if (IS_ERR_OR_NULL(dev->priv.root_ns)) 1654 if (IS_ERR_OR_NULL(steering->root_ns))
1640 goto cleanup; 1655 goto cleanup;
1641 1656
1642 if (init_root_tree(dev, &root_fs, &dev->priv.root_ns->ns.node)) 1657 if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
1643 goto cleanup; 1658 goto cleanup;
1644 1659
1645 set_prio_attrs(dev->priv.root_ns); 1660 set_prio_attrs(steering->root_ns);
1646 1661
1647 if (create_anchor_flow_table(dev)) 1662 if (create_anchor_flow_table(steering))
1648 goto cleanup; 1663 goto cleanup;
1649 1664
1650 return 0; 1665 return 0;
1651 1666
1652cleanup: 1667cleanup:
1653 mlx5_cleanup_fs(dev); 1668 mlx5_cleanup_fs(steering->dev);
1654 return -ENOMEM; 1669 return -ENOMEM;
1655} 1670}
1656 1671
1657static void cleanup_single_prio_root_ns(struct mlx5_core_dev *dev, 1672static void clean_tree(struct fs_node *node)
1658 struct mlx5_flow_root_namespace *root_ns)
1659{ 1673{
1660 struct fs_node *prio; 1674 if (node) {
1661 1675 struct fs_node *iter;
1662 if (!root_ns) 1676 struct fs_node *temp;
1663 return;
1664 1677
1665 if (!list_empty(&root_ns->ns.node.children)) { 1678 list_for_each_entry_safe(iter, temp, &node->children, list)
1666 prio = list_first_entry(&root_ns->ns.node.children, 1679 clean_tree(iter);
1667 struct fs_node, 1680 tree_remove_node(node);
1668 list);
1669 if (tree_remove_node(prio))
1670 mlx5_core_warn(dev,
1671 "Flow steering priority wasn't destroyed, refcount > 1\n");
1672 } 1681 }
1673 if (tree_remove_node(&root_ns->ns.node))
1674 mlx5_core_warn(dev,
1675 "Flow steering namespace wasn't destroyed, refcount > 1\n");
1676 root_ns = NULL;
1677}
1678
1679static void destroy_flow_tables(struct fs_prio *prio)
1680{
1681 struct mlx5_flow_table *iter;
1682 struct mlx5_flow_table *tmp;
1683
1684 fs_for_each_ft_safe(iter, tmp, prio)
1685 mlx5_destroy_flow_table(iter);
1686} 1682}
1687 1683
1688static void cleanup_root_ns(struct mlx5_core_dev *dev) 1684static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
1689{ 1685{
1690 struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
1691 struct fs_prio *iter_prio;
1692
1693 if (!MLX5_CAP_GEN(dev, nic_flow_table))
1694 return;
1695
1696 if (!root_ns) 1686 if (!root_ns)
1697 return; 1687 return;
1698 1688
1699 /* stage 1 */ 1689 clean_tree(&root_ns->ns.node);
1700 fs_for_each_prio(iter_prio, &root_ns->ns) {
1701 struct fs_node *node;
1702 struct mlx5_flow_namespace *iter_ns;
1703
1704 fs_for_each_ns_or_ft(node, iter_prio) {
1705 if (node->type == FS_TYPE_FLOW_TABLE)
1706 continue;
1707 fs_get_obj(iter_ns, node);
1708 while (!list_empty(&iter_ns->node.children)) {
1709 struct fs_prio *obj_iter_prio2;
1710 struct fs_node *iter_prio2 =
1711 list_first_entry(&iter_ns->node.children,
1712 struct fs_node,
1713 list);
1714
1715 fs_get_obj(obj_iter_prio2, iter_prio2);
1716 destroy_flow_tables(obj_iter_prio2);
1717 if (tree_remove_node(iter_prio2)) {
1718 mlx5_core_warn(dev,
1719 "Priority %d wasn't destroyed, refcount > 1\n",
1720 obj_iter_prio2->prio);
1721 return;
1722 }
1723 }
1724 }
1725 }
1726
1727 /* stage 2 */
1728 fs_for_each_prio(iter_prio, &root_ns->ns) {
1729 while (!list_empty(&iter_prio->node.children)) {
1730 struct fs_node *iter_ns =
1731 list_first_entry(&iter_prio->node.children,
1732 struct fs_node,
1733 list);
1734 if (tree_remove_node(iter_ns)) {
1735 mlx5_core_warn(dev,
1736 "Namespace wasn't destroyed, refcount > 1\n");
1737 return;
1738 }
1739 }
1740 }
1741
1742 /* stage 3 */
1743 while (!list_empty(&root_ns->ns.node.children)) {
1744 struct fs_prio *obj_prio_node;
1745 struct fs_node *prio_node =
1746 list_first_entry(&root_ns->ns.node.children,
1747 struct fs_node,
1748 list);
1749
1750 fs_get_obj(obj_prio_node, prio_node);
1751 if (tree_remove_node(prio_node)) {
1752 mlx5_core_warn(dev,
1753 "Priority %d wasn't destroyed, refcount > 1\n",
1754 obj_prio_node->prio);
1755 return;
1756 }
1757 }
1758
1759 if (tree_remove_node(&root_ns->ns.node)) {
1760 mlx5_core_warn(dev,
1761 "root namespace wasn't destroyed, refcount > 1\n");
1762 return;
1763 }
1764
1765 dev->priv.root_ns = NULL;
1766} 1690}
1767 1691
1768void mlx5_cleanup_fs(struct mlx5_core_dev *dev) 1692void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
1769{ 1693{
1694 struct mlx5_flow_steering *steering = dev->priv.steering;
1695
1770 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 1696 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1771 return; 1697 return;
1772 1698
1773 cleanup_root_ns(dev); 1699 cleanup_root_ns(steering->root_ns);
1774 cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); 1700 cleanup_root_ns(steering->esw_egress_root_ns);
1775 cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns); 1701 cleanup_root_ns(steering->esw_ingress_root_ns);
1776 cleanup_single_prio_root_ns(dev, dev->priv.esw_ingress_root_ns); 1702 cleanup_root_ns(steering->fdb_root_ns);
1777 mlx5_cleanup_fc_stats(dev); 1703 mlx5_cleanup_fc_stats(dev);
1704 kfree(steering);
1778} 1705}
1779 1706
1780static int init_fdb_root_ns(struct mlx5_core_dev *dev) 1707static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
1781{ 1708{
1782 struct fs_prio *prio; 1709 struct fs_prio *prio;
1783 1710
1784 dev->priv.fdb_root_ns = create_root_ns(dev, FS_FT_FDB); 1711 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
1785 if (!dev->priv.fdb_root_ns) 1712 if (!steering->fdb_root_ns)
1786 return -ENOMEM; 1713 return -ENOMEM;
1787 1714
1788 /* Create single prio */ 1715 /* Create single prio */
1789 prio = fs_create_prio(&dev->priv.fdb_root_ns->ns, 0, 1); 1716 prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 1);
1790 if (IS_ERR(prio)) { 1717 if (IS_ERR(prio)) {
1791 cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); 1718 cleanup_root_ns(steering->fdb_root_ns);
1719 steering->fdb_root_ns = NULL;
1792 return PTR_ERR(prio); 1720 return PTR_ERR(prio);
1793 } else { 1721 } else {
1794 return 0; 1722 return 0;
1795 } 1723 }
1796} 1724}
1797 1725
1798static int init_egress_acl_root_ns(struct mlx5_core_dev *dev) 1726static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering)
1799{ 1727{
1800 struct fs_prio *prio; 1728 struct fs_prio *prio;
1801 1729
1802 dev->priv.esw_egress_root_ns = create_root_ns(dev, FS_FT_ESW_EGRESS_ACL); 1730 steering->esw_egress_root_ns = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
1803 if (!dev->priv.esw_egress_root_ns) 1731 if (!steering->esw_egress_root_ns)
1804 return -ENOMEM; 1732 return -ENOMEM;
1805 1733
1806 /* create 1 prio*/ 1734 /* create 1 prio*/
1807 prio = fs_create_prio(&dev->priv.esw_egress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev)); 1735 prio = fs_create_prio(&steering->esw_egress_root_ns->ns, 0,
1736 MLX5_TOTAL_VPORTS(steering->dev));
1808 if (IS_ERR(prio)) 1737 if (IS_ERR(prio))
1809 return PTR_ERR(prio); 1738 return PTR_ERR(prio);
1810 else 1739 else
1811 return 0; 1740 return 0;
1812} 1741}
1813 1742
1814static int init_ingress_acl_root_ns(struct mlx5_core_dev *dev) 1743static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering)
1815{ 1744{
1816 struct fs_prio *prio; 1745 struct fs_prio *prio;
1817 1746
1818 dev->priv.esw_ingress_root_ns = create_root_ns(dev, FS_FT_ESW_INGRESS_ACL); 1747 steering->esw_ingress_root_ns = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
1819 if (!dev->priv.esw_ingress_root_ns) 1748 if (!steering->esw_ingress_root_ns)
1820 return -ENOMEM; 1749 return -ENOMEM;
1821 1750
1822 /* create 1 prio*/ 1751 /* create 1 prio*/
1823 prio = fs_create_prio(&dev->priv.esw_ingress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev)); 1752 prio = fs_create_prio(&steering->esw_ingress_root_ns->ns, 0,
1753 MLX5_TOTAL_VPORTS(steering->dev));
1824 if (IS_ERR(prio)) 1754 if (IS_ERR(prio))
1825 return PTR_ERR(prio); 1755 return PTR_ERR(prio);
1826 else 1756 else
@@ -1829,6 +1759,7 @@ static int init_ingress_acl_root_ns(struct mlx5_core_dev *dev)
1829 1759
1830int mlx5_init_fs(struct mlx5_core_dev *dev) 1760int mlx5_init_fs(struct mlx5_core_dev *dev)
1831{ 1761{
1762 struct mlx5_flow_steering *steering;
1832 int err = 0; 1763 int err = 0;
1833 1764
1834 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 1765 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
@@ -1838,26 +1769,32 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
1838 if (err) 1769 if (err)
1839 return err; 1770 return err;
1840 1771
1772 steering = kzalloc(sizeof(*steering), GFP_KERNEL);
1773 if (!steering)
1774 return -ENOMEM;
1775 steering->dev = dev;
1776 dev->priv.steering = steering;
1777
1841 if (MLX5_CAP_GEN(dev, nic_flow_table) && 1778 if (MLX5_CAP_GEN(dev, nic_flow_table) &&
1842 MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) { 1779 MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
1843 err = init_root_ns(dev); 1780 err = init_root_ns(steering);
1844 if (err) 1781 if (err)
1845 goto err; 1782 goto err;
1846 } 1783 }
1847 1784
1848 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { 1785 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
1849 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) { 1786 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
1850 err = init_fdb_root_ns(dev); 1787 err = init_fdb_root_ns(steering);
1851 if (err) 1788 if (err)
1852 goto err; 1789 goto err;
1853 } 1790 }
1854 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { 1791 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
1855 err = init_egress_acl_root_ns(dev); 1792 err = init_egress_acl_root_ns(steering);
1856 if (err) 1793 if (err)
1857 goto err; 1794 goto err;
1858 } 1795 }
1859 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { 1796 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
1860 err = init_ingress_acl_root_ns(dev); 1797 err = init_ingress_acl_root_ns(steering);
1861 if (err) 1798 if (err)
1862 goto err; 1799 goto err;
1863 } 1800 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index aa41a7314691..d7ba91a1eea2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -55,6 +55,14 @@ enum fs_fte_status {
55 FS_FTE_STATUS_EXISTING = 1UL << 0, 55 FS_FTE_STATUS_EXISTING = 1UL << 0,
56}; 56};
57 57
58struct mlx5_flow_steering {
59 struct mlx5_core_dev *dev;
60 struct mlx5_flow_root_namespace *root_ns;
61 struct mlx5_flow_root_namespace *fdb_root_ns;
62 struct mlx5_flow_root_namespace *esw_egress_root_ns;
63 struct mlx5_flow_root_namespace *esw_ingress_root_ns;
64};
65
58struct fs_node { 66struct fs_node {
59 struct list_head list; 67 struct list_head list;
60 struct list_head children; 68 struct list_head children;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 42d16b9458e4..96a59463ae65 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -108,15 +108,21 @@ static int in_fatal(struct mlx5_core_dev *dev)
108 108
109void mlx5_enter_error_state(struct mlx5_core_dev *dev) 109void mlx5_enter_error_state(struct mlx5_core_dev *dev)
110{ 110{
111 mutex_lock(&dev->intf_state_mutex);
111 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) 112 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
112 return; 113 goto unlock;
113 114
114 mlx5_core_err(dev, "start\n"); 115 mlx5_core_err(dev, "start\n");
115 if (pci_channel_offline(dev->pdev) || in_fatal(dev)) 116 if (pci_channel_offline(dev->pdev) || in_fatal(dev)) {
116 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; 117 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
118 trigger_cmd_completions(dev);
119 }
117 120
118 mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0); 121 mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
119 mlx5_core_err(dev, "end\n"); 122 mlx5_core_err(dev, "end\n");
123
124unlock:
125 mutex_unlock(&dev->intf_state_mutex);
120} 126}
121 127
122static void mlx5_handle_bad_state(struct mlx5_core_dev *dev) 128static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
@@ -245,7 +251,6 @@ static void poll_health(unsigned long data)
245 u32 count; 251 u32 count;
246 252
247 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 253 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
248 trigger_cmd_completions(dev);
249 mod_timer(&health->timer, get_next_poll_jiffies()); 254 mod_timer(&health->timer, get_next_poll_jiffies());
250 return; 255 return;
251 } 256 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 08cae3485960..4f491d43e77d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -51,6 +51,7 @@
51#ifdef CONFIG_RFS_ACCEL 51#ifdef CONFIG_RFS_ACCEL
52#include <linux/cpu_rmap.h> 52#include <linux/cpu_rmap.h>
53#endif 53#endif
54#include <net/devlink.h>
54#include "mlx5_core.h" 55#include "mlx5_core.h"
55#include "fs_core.h" 56#include "fs_core.h"
56#ifdef CONFIG_MLX5_CORE_EN 57#ifdef CONFIG_MLX5_CORE_EN
@@ -1315,19 +1316,28 @@ struct mlx5_core_event_handler {
1315 void *data); 1316 void *data);
1316}; 1317};
1317 1318
1319static const struct devlink_ops mlx5_devlink_ops = {
1320#ifdef CONFIG_MLX5_CORE_EN
1321 .eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
1322 .eswitch_mode_get = mlx5_devlink_eswitch_mode_get,
1323#endif
1324};
1318 1325
1319static int init_one(struct pci_dev *pdev, 1326static int init_one(struct pci_dev *pdev,
1320 const struct pci_device_id *id) 1327 const struct pci_device_id *id)
1321{ 1328{
1322 struct mlx5_core_dev *dev; 1329 struct mlx5_core_dev *dev;
1330 struct devlink *devlink;
1323 struct mlx5_priv *priv; 1331 struct mlx5_priv *priv;
1324 int err; 1332 int err;
1325 1333
1326 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1334 devlink = devlink_alloc(&mlx5_devlink_ops, sizeof(*dev));
1327 if (!dev) { 1335 if (!devlink) {
1328 dev_err(&pdev->dev, "kzalloc failed\n"); 1336 dev_err(&pdev->dev, "kzalloc failed\n");
1329 return -ENOMEM; 1337 return -ENOMEM;
1330 } 1338 }
1339
1340 dev = devlink_priv(devlink);
1331 priv = &dev->priv; 1341 priv = &dev->priv;
1332 priv->pci_dev_data = id->driver_data; 1342 priv->pci_dev_data = id->driver_data;
1333 1343
@@ -1364,15 +1374,21 @@ static int init_one(struct pci_dev *pdev,
1364 goto clean_health; 1374 goto clean_health;
1365 } 1375 }
1366 1376
1377 err = devlink_register(devlink, &pdev->dev);
1378 if (err)
1379 goto clean_load;
1380
1367 return 0; 1381 return 0;
1368 1382
1383clean_load:
1384 mlx5_unload_one(dev, priv);
1369clean_health: 1385clean_health:
1370 mlx5_health_cleanup(dev); 1386 mlx5_health_cleanup(dev);
1371close_pci: 1387close_pci:
1372 mlx5_pci_close(dev, priv); 1388 mlx5_pci_close(dev, priv);
1373clean_dev: 1389clean_dev:
1374 pci_set_drvdata(pdev, NULL); 1390 pci_set_drvdata(pdev, NULL);
1375 kfree(dev); 1391 devlink_free(devlink);
1376 1392
1377 return err; 1393 return err;
1378} 1394}
@@ -1380,8 +1396,10 @@ clean_dev:
1380static void remove_one(struct pci_dev *pdev) 1396static void remove_one(struct pci_dev *pdev)
1381{ 1397{
1382 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1398 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1399 struct devlink *devlink = priv_to_devlink(dev);
1383 struct mlx5_priv *priv = &dev->priv; 1400 struct mlx5_priv *priv = &dev->priv;
1384 1401
1402 devlink_unregister(devlink);
1385 if (mlx5_unload_one(dev, priv)) { 1403 if (mlx5_unload_one(dev, priv)) {
1386 dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n"); 1404 dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
1387 mlx5_health_cleanup(dev); 1405 mlx5_health_cleanup(dev);
@@ -1390,7 +1408,7 @@ static void remove_one(struct pci_dev *pdev)
1390 mlx5_health_cleanup(dev); 1408 mlx5_health_cleanup(dev);
1391 mlx5_pci_close(dev, priv); 1409 mlx5_pci_close(dev, priv);
1392 pci_set_drvdata(pdev, NULL); 1410 pci_set_drvdata(pdev, NULL);
1393 kfree(dev); 1411 devlink_free(devlink);
1394} 1412}
1395 1413
1396static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, 1414static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
@@ -1432,46 +1450,31 @@ void mlx5_disable_device(struct mlx5_core_dev *dev)
1432 mlx5_pci_err_detected(dev->pdev, 0); 1450 mlx5_pci_err_detected(dev->pdev, 0);
1433} 1451}
1434 1452
1435/* wait for the device to show vital signs. For now we check 1453/* wait for the device to show vital signs by waiting
1436 * that we can read the device ID and that the health buffer 1454 * for the health counter to start counting.
1437 * shows a non zero value which is different than 0xffffffff
1438 */ 1455 */
1439static void wait_vital(struct pci_dev *pdev) 1456static int wait_vital(struct pci_dev *pdev)
1440{ 1457{
1441 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1458 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1442 struct mlx5_core_health *health = &dev->priv.health; 1459 struct mlx5_core_health *health = &dev->priv.health;
1443 const int niter = 100; 1460 const int niter = 100;
1461 u32 last_count = 0;
1444 u32 count; 1462 u32 count;
1445 u16 did;
1446 int i; 1463 int i;
1447 1464
1448 /* Wait for firmware to be ready after reset */
1449 msleep(1000);
1450 for (i = 0; i < niter; i++) {
1451 if (pci_read_config_word(pdev, 2, &did)) {
1452 dev_warn(&pdev->dev, "failed reading config word\n");
1453 break;
1454 }
1455 if (did == pdev->device) {
1456 dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
1457 break;
1458 }
1459 msleep(50);
1460 }
1461 if (i == niter)
1462 dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
1463
1464 for (i = 0; i < niter; i++) { 1465 for (i = 0; i < niter; i++) {
1465 count = ioread32be(health->health_counter); 1466 count = ioread32be(health->health_counter);
1466 if (count && count != 0xffffffff) { 1467 if (count && count != 0xffffffff) {
1467 dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i); 1468 if (last_count && last_count != count) {
1468 break; 1469 dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
1470 return 0;
1471 }
1472 last_count = count;
1469 } 1473 }
1470 msleep(50); 1474 msleep(50);
1471 } 1475 }
1472 1476
1473 if (i == niter) 1477 return -ETIMEDOUT;
1474 dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
1475} 1478}
1476 1479
1477static void mlx5_pci_resume(struct pci_dev *pdev) 1480static void mlx5_pci_resume(struct pci_dev *pdev)
@@ -1483,7 +1486,11 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
1483 dev_info(&pdev->dev, "%s was called\n", __func__); 1486 dev_info(&pdev->dev, "%s was called\n", __func__);
1484 1487
1485 pci_save_state(pdev); 1488 pci_save_state(pdev);
1486 wait_vital(pdev); 1489 err = wait_vital(pdev);
1490 if (err) {
1491 dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
1492 return;
1493 }
1487 1494
1488 err = mlx5_load_one(dev, priv); 1495 err = mlx5_load_one(dev, priv);
1489 if (err) 1496 if (err)
@@ -1518,8 +1525,9 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
1518 { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */ 1525 { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
1519 { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */ 1526 { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
1520 { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */ 1527 { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
1521 { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */ 1528 { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
1522 { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */ 1529 { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
1530 { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5, PCIe 4.0 */
1523 { 0, } 1531 { 0, }
1524}; 1532};
1525 1533
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 9eeee0545f1c..32dea3524cee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -345,7 +345,6 @@ retry:
345 func_id, npages, err); 345 func_id, npages, err);
346 goto out_4k; 346 goto out_4k;
347 } 347 }
348 dev->priv.fw_pages += npages;
349 348
350 err = mlx5_cmd_status_to_err(&out.hdr); 349 err = mlx5_cmd_status_to_err(&out.hdr);
351 if (err) { 350 if (err) {
@@ -373,6 +372,33 @@ out_free:
373 return err; 372 return err;
374} 373}
375 374
375static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
376 struct mlx5_manage_pages_inbox *in, int in_size,
377 struct mlx5_manage_pages_outbox *out, int out_size)
378{
379 struct fw_page *fwp;
380 struct rb_node *p;
381 u32 npages;
382 u32 i = 0;
383
384 if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
385 return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size,
386 (u32 *)out, out_size);
387
388 npages = be32_to_cpu(in->num_entries);
389
390 p = rb_first(&dev->priv.page_root);
391 while (p && i < npages) {
392 fwp = rb_entry(p, struct fw_page, rb_node);
393 out->pas[i] = cpu_to_be64(fwp->addr);
394 p = rb_next(p);
395 i++;
396 }
397
398 out->num_entries = cpu_to_be32(i);
399 return 0;
400}
401
376static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, 402static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
377 int *nclaimed) 403 int *nclaimed)
378{ 404{
@@ -398,15 +424,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
398 in.func_id = cpu_to_be16(func_id); 424 in.func_id = cpu_to_be16(func_id);
399 in.num_entries = cpu_to_be32(npages); 425 in.num_entries = cpu_to_be32(npages);
400 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); 426 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
401 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); 427 err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen);
402 if (err) { 428 if (err) {
403 mlx5_core_err(dev, "failed reclaiming pages\n"); 429 mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
404 goto out_free;
405 }
406 dev->priv.fw_pages -= npages;
407
408 if (out->hdr.status) {
409 err = mlx5_cmd_status_to_err(&out->hdr);
410 goto out_free; 430 goto out_free;
411 } 431 }
412 432
@@ -417,13 +437,15 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
417 err = -EINVAL; 437 err = -EINVAL;
418 goto out_free; 438 goto out_free;
419 } 439 }
420 if (nclaimed)
421 *nclaimed = num_claimed;
422 440
423 for (i = 0; i < num_claimed; i++) { 441 for (i = 0; i < num_claimed; i++) {
424 addr = be64_to_cpu(out->pas[i]); 442 addr = be64_to_cpu(out->pas[i]);
425 free_4k(dev, addr); 443 free_4k(dev, addr);
426 } 444 }
445
446 if (nclaimed)
447 *nclaimed = num_claimed;
448
427 dev->priv.fw_pages -= num_claimed; 449 dev->priv.fw_pages -= num_claimed;
428 if (func_id) 450 if (func_id)
429 dev->priv.vfs_pages -= num_claimed; 451 dev->priv.vfs_pages -= num_claimed;
@@ -514,14 +536,10 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
514 p = rb_first(&dev->priv.page_root); 536 p = rb_first(&dev->priv.page_root);
515 if (p) { 537 if (p) {
516 fwp = rb_entry(p, struct fw_page, rb_node); 538 fwp = rb_entry(p, struct fw_page, rb_node);
517 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 539 err = reclaim_pages(dev, fwp->func_id,
518 free_4k(dev, fwp->addr); 540 optimal_reclaimed_pages(),
519 nclaimed = 1; 541 &nclaimed);
520 } else { 542
521 err = reclaim_pages(dev, fwp->func_id,
522 optimal_reclaimed_pages(),
523 &nclaimed);
524 }
525 if (err) { 543 if (err) {
526 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", 544 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
527 err); 545 err);
@@ -536,6 +554,13 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
536 } 554 }
537 } while (p); 555 } while (p);
538 556
557 WARN(dev->priv.fw_pages,
558 "FW pages counter is %d after reclaiming all pages\n",
559 dev->priv.fw_pages);
560 WARN(dev->priv.vfs_pages,
561 "VFs FW pages counter is %d after reclaiming all pages\n",
562 dev->priv.vfs_pages);
563
539 return 0; 564 return 0;
540} 565}
541 566
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index d6a3f412ba9f..b380a6bc1f85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -167,7 +167,7 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
167 167
168 mlx5_core_init_vfs(dev, num_vfs); 168 mlx5_core_init_vfs(dev, num_vfs);
169#ifdef CONFIG_MLX5_CORE_EN 169#ifdef CONFIG_MLX5_CORE_EN
170 mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs); 170 mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
171#endif 171#endif
172 172
173 return num_vfs; 173 return num_vfs;
@@ -209,7 +209,8 @@ int mlx5_sriov_init(struct mlx5_core_dev *dev)
209 mlx5_core_init_vfs(dev, cur_vfs); 209 mlx5_core_init_vfs(dev, cur_vfs);
210#ifdef CONFIG_MLX5_CORE_EN 210#ifdef CONFIG_MLX5_CORE_EN
211 if (cur_vfs) 211 if (cur_vfs)
212 mlx5_eswitch_enable_sriov(dev->priv.eswitch, cur_vfs); 212 mlx5_eswitch_enable_sriov(dev->priv.eswitch, cur_vfs,
213 SRIOV_LEGACY);
213#endif 214#endif
214 215
215 enable_vfs(dev, cur_vfs); 216 enable_vfs(dev, cur_vfs);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index daf44cd4c566..91846dfcbe9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -513,7 +513,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
513{ 513{
514 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); 514 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
515 void *nic_vport_context; 515 void *nic_vport_context;
516 u8 *guid;
517 void *in; 516 void *in;
518 int err; 517 int err;
519 518
@@ -535,8 +534,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
535 534
536 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in, 535 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
537 in, nic_vport_context); 536 in, nic_vport_context);
538 guid = MLX5_ADDR_OF(nic_vport_context, nic_vport_context,
539 node_guid);
540 MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid); 537 MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
541 538
542 err = mlx5_modify_nic_vport_context(mdev, in, inlen); 539 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
index f2fd1ef16da7..05de77267d58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
@@ -105,6 +105,9 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
105 struct mlx5e_vxlan *vxlan; 105 struct mlx5e_vxlan *vxlan;
106 int err; 106 int err;
107 107
108 if (mlx5e_vxlan_lookup_port(priv, port))
109 goto free_work;
110
108 if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port)) 111 if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
109 goto free_work; 112 goto free_work;
110 113
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index ce21ee5b2357..821a087c7ae2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -75,14 +75,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
75 75
76 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); 76 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
77 if (err) { 77 if (err) {
78 mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); 78 mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
79 return err; 79 return err;
80 } 80 }
81 81
82 err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq), 82 err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
83 &wq_ctrl->buf, param->buf_numa_node); 83 &wq_ctrl->buf, param->buf_numa_node);
84 if (err) { 84 if (err) {
85 mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); 85 mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
86 goto err_db_free; 86 goto err_db_free;
87 } 87 }
88 88
@@ -111,14 +111,14 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
111 111
112 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); 112 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
113 if (err) { 113 if (err) {
114 mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); 114 mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
115 return err; 115 return err;
116 } 116 }
117 117
118 err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq), 118 err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
119 &wq_ctrl->buf, param->buf_numa_node); 119 &wq_ctrl->buf, param->buf_numa_node);
120 if (err) { 120 if (err) {
121 mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); 121 mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
122 goto err_db_free; 122 goto err_db_free;
123 } 123 }
124 124
@@ -148,13 +148,14 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
148 148
149 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); 149 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
150 if (err) { 150 if (err) {
151 mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); 151 mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
152 return err; 152 return err;
153 } 153 }
154 154
155 err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf); 155 err = mlx5_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
156 &wq_ctrl->buf, param->buf_numa_node);
156 if (err) { 157 if (err) {
157 mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); 158 mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
158 goto err_db_free; 159 goto err_db_free;
159 } 160 }
160 161
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 9b5ebf84c051..d20ae1838a64 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -7,5 +7,6 @@ obj-$(CONFIG_MLXSW_SWITCHX2) += mlxsw_switchx2.o
7mlxsw_switchx2-objs := switchx2.o 7mlxsw_switchx2-objs := switchx2.o
8obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o 8obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
9mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ 9mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
10 spectrum_switchdev.o 10 spectrum_switchdev.o spectrum_router.o \
11 spectrum_kvdl.o
11mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o 12mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index cd63b8263688..f9cd6e3f7709 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -607,6 +607,24 @@ MLXSW_ITEM32(cmd_mbox, config_profile,
607 */ 607 */
608MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1); 608MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1);
609 609
610/* cmd_mbox_config_set_kvd_linear_size
611 * Capability bit. Setting a bit to 1 configures the profile
612 * according to the mailbox contents.
613 */
614MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_linear_size, 0x0C, 24, 1);
615
616/* cmd_mbox_config_set_kvd_hash_single_size
617 * Capability bit. Setting a bit to 1 configures the profile
618 * according to the mailbox contents.
619 */
620MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_single_size, 0x0C, 25, 1);
621
622/* cmd_mbox_config_set_kvd_hash_double_size
623 * Capability bit. Setting a bit to 1 configures the profile
624 * according to the mailbox contents.
625 */
626MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1);
627
610/* cmd_mbox_config_profile_max_vepa_channels 628/* cmd_mbox_config_profile_max_vepa_channels
611 * Maximum number of VEPA channels per port (0 through 16) 629 * Maximum number of VEPA channels per port (0 through 16)
612 * 0 - multi-channel VEPA is disabled 630 * 0 - multi-channel VEPA is disabled
@@ -733,6 +751,31 @@ MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16);
733 */ 751 */
734MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1); 752MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1);
735 753
754/* cmd_mbox_config_kvd_linear_size
755 * KVD Linear Size
756 * Valid for Spectrum only
757 * Allowed values are 128*N where N=0 or higher
758 */
759MLXSW_ITEM32(cmd_mbox, config_profile, kvd_linear_size, 0x54, 0, 24);
760
761/* cmd_mbox_config_kvd_hash_single_size
762 * KVD Hash single-entries size
763 * Valid for Spectrum only
764 * Allowed values are 128*N where N=0 or higher
765 * Must be greater or equal to cap_min_kvd_hash_single_size
766 * Must be smaller or equal to cap_kvd_size - kvd_linear_size
767 */
768MLXSW_ITEM32(cmd_mbox, config_profile, kvd_hash_single_size, 0x58, 0, 24);
769
770/* cmd_mbox_config_kvd_hash_double_size
771 * KVD Hash double-entries size (units of single-size entries)
772 * Valid for Spectrum only
773 * Allowed values are 128*N where N=0 or higher
774 * Must be either 0 or greater or equal to cap_min_kvd_hash_double_size
775 * Must be smaller or equal to cap_kvd_size - kvd_linear_size
776 */
777MLXSW_ITEM32(cmd_mbox, config_profile, kvd_hash_double_size, 0x5C, 0, 24);
778
736/* cmd_mbox_config_profile_swid_config_mask 779/* cmd_mbox_config_profile_swid_config_mask
737 * Modify Switch Partition Configuration mask. When set, the configu- 780 * Modify Switch Partition Configuration mask. When set, the configu-
738 * ration value for the Switch Partition are taken from the mailbox. 781 * ration value for the Switch Partition are taken from the mailbox.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 436bc49df6ab..2fe385cce203 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -190,7 +190,8 @@ struct mlxsw_config_profile {
190 used_max_ib_mc:1, 190 used_max_ib_mc:1,
191 used_max_pkey:1, 191 used_max_pkey:1,
192 used_ar_sec:1, 192 used_ar_sec:1,
193 used_adaptive_routing_group_cap:1; 193 used_adaptive_routing_group_cap:1,
194 used_kvd_sizes:1;
194 u8 max_vepa_channels; 195 u8 max_vepa_channels;
195 u16 max_lag; 196 u16 max_lag;
196 u16 max_port_per_lag; 197 u16 max_port_per_lag;
@@ -211,6 +212,9 @@ struct mlxsw_config_profile {
211 u8 ar_sec; 212 u8 ar_sec;
212 u16 adaptive_routing_group_cap; 213 u16 adaptive_routing_group_cap;
213 u8 arn; 214 u8 arn;
215 u32 kvd_linear_size;
216 u32 kvd_hash_single_size;
217 u32 kvd_hash_double_size;
214 struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT]; 218 struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
215}; 219};
216 220
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 7f4173c8eda3..ddbc9f22278d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1255,6 +1255,20 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
1255 mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set( 1255 mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
1256 mbox, profile->adaptive_routing_group_cap); 1256 mbox, profile->adaptive_routing_group_cap);
1257 } 1257 }
1258 if (profile->used_kvd_sizes) {
1259 mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(
1260 mbox, 1);
1261 mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(
1262 mbox, profile->kvd_linear_size);
1263 mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(
1264 mbox, 1);
1265 mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(
1266 mbox, profile->kvd_hash_single_size);
1267 mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
1268 mbox, 1);
1269 mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(
1270 mbox, profile->kvd_hash_double_size);
1271 }
1258 1272
1259 for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++) 1273 for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
1260 mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i, 1274 mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 1977e7a5c530..0cc148566677 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -1,9 +1,10 @@
1/* 1/*
2 * drivers/net/ethernet/mellanox/mlxsw/reg.h 2 * drivers/net/ethernet/mellanox/mlxsw/reg.h
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> 4 * Copyright (c) 2015-2016 Ido Schimmel <idosch@mellanox.com>
5 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> 5 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
6 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 6 * Copyright (c) 2015-2016 Jiri Pirko <jiri@mellanox.com>
7 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
7 * 8 *
8 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met: 10 * modification, are permitted provided that the following conditions are met:
@@ -386,7 +387,9 @@ enum mlxsw_reg_sfd_rec_action {
386 /* forward and trap, trap_id is FDB_TRAP */ 387 /* forward and trap, trap_id is FDB_TRAP */
387 MLXSW_REG_SFD_REC_ACTION_MIRROR_TO_CPU = 1, 388 MLXSW_REG_SFD_REC_ACTION_MIRROR_TO_CPU = 1,
388 /* trap and do not forward, trap_id is FDB_TRAP */ 389 /* trap and do not forward, trap_id is FDB_TRAP */
389 MLXSW_REG_SFD_REC_ACTION_TRAP = 3, 390 MLXSW_REG_SFD_REC_ACTION_TRAP = 2,
391 /* forward to IP router */
392 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER = 3,
390 MLXSW_REG_SFD_REC_ACTION_DISCARD_ERROR = 15, 393 MLXSW_REG_SFD_REC_ACTION_DISCARD_ERROR = 15,
391}; 394};
392 395
@@ -3186,6 +3189,1183 @@ static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id)
3186 mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT); 3189 mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT);
3187} 3190}
3188 3191
3192/* RGCR - Router General Configuration Register
3193 * --------------------------------------------
3194 * The register is used for setting up the router configuration.
3195 */
3196#define MLXSW_REG_RGCR_ID 0x8001
3197#define MLXSW_REG_RGCR_LEN 0x28
3198
3199static const struct mlxsw_reg_info mlxsw_reg_rgcr = {
3200 .id = MLXSW_REG_RGCR_ID,
3201 .len = MLXSW_REG_RGCR_LEN,
3202};
3203
3204/* reg_rgcr_ipv4_en
3205 * IPv4 router enable.
3206 * Access: RW
3207 */
3208MLXSW_ITEM32(reg, rgcr, ipv4_en, 0x00, 31, 1);
3209
3210/* reg_rgcr_ipv6_en
3211 * IPv6 router enable.
3212 * Access: RW
3213 */
3214MLXSW_ITEM32(reg, rgcr, ipv6_en, 0x00, 30, 1);
3215
3216/* reg_rgcr_max_router_interfaces
3217 * Defines the maximum number of active router interfaces for all virtual
3218 * routers.
3219 * Access: RW
3220 */
3221MLXSW_ITEM32(reg, rgcr, max_router_interfaces, 0x10, 0, 16);
3222
3223/* reg_rgcr_usp
3224 * Update switch priority and packet color.
3225 * 0 - Preserve the value of Switch Priority and packet color.
3226 * 1 - Recalculate the value of Switch Priority and packet color.
3227 * Access: RW
3228 *
3229 * Note: Not supported by SwitchX and SwitchX-2.
3230 */
3231MLXSW_ITEM32(reg, rgcr, usp, 0x18, 20, 1);
3232
3233/* reg_rgcr_pcp_rw
3234 * Indicates how to handle the pcp_rewrite_en value:
3235 * 0 - Preserve the value of pcp_rewrite_en.
3236 * 2 - Disable PCP rewrite.
3237 * 3 - Enable PCP rewrite.
3238 * Access: RW
3239 *
3240 * Note: Not supported by SwitchX and SwitchX-2.
3241 */
3242MLXSW_ITEM32(reg, rgcr, pcp_rw, 0x18, 16, 2);
3243
3244/* reg_rgcr_activity_dis
3245 * Activity disable:
3246 * 0 - Activity will be set when an entry is hit (default).
3247 * 1 - Activity will not be set when an entry is hit.
3248 *
3249 * Bit 0 - Disable activity bit in Router Algorithmic LPM Unicast Entry
3250 * (RALUE).
3251 * Bit 1 - Disable activity bit in Router Algorithmic LPM Unicast Host
3252 * Entry (RAUHT).
3253 * Bits 2:7 are reserved.
3254 * Access: RW
3255 *
3256 * Note: Not supported by SwitchX, SwitchX-2 and Switch-IB.
3257 */
3258MLXSW_ITEM32(reg, rgcr, activity_dis, 0x20, 0, 8);
3259
3260static inline void mlxsw_reg_rgcr_pack(char *payload, bool ipv4_en)
3261{
3262 MLXSW_REG_ZERO(rgcr, payload);
3263 mlxsw_reg_rgcr_ipv4_en_set(payload, ipv4_en);
3264}
3265
3266/* RITR - Router Interface Table Register
3267 * --------------------------------------
3268 * The register is used to configure the router interface table.
3269 */
3270#define MLXSW_REG_RITR_ID 0x8002
3271#define MLXSW_REG_RITR_LEN 0x40
3272
3273static const struct mlxsw_reg_info mlxsw_reg_ritr = {
3274 .id = MLXSW_REG_RITR_ID,
3275 .len = MLXSW_REG_RITR_LEN,
3276};
3277
3278/* reg_ritr_enable
3279 * Enables routing on the router interface.
3280 * Access: RW
3281 */
3282MLXSW_ITEM32(reg, ritr, enable, 0x00, 31, 1);
3283
3284/* reg_ritr_ipv4
3285 * IPv4 routing enable. Enables routing of IPv4 traffic on the router
3286 * interface.
3287 * Access: RW
3288 */
3289MLXSW_ITEM32(reg, ritr, ipv4, 0x00, 29, 1);
3290
3291/* reg_ritr_ipv6
3292 * IPv6 routing enable. Enables routing of IPv6 traffic on the router
3293 * interface.
3294 * Access: RW
3295 */
3296MLXSW_ITEM32(reg, ritr, ipv6, 0x00, 28, 1);
3297
3298enum mlxsw_reg_ritr_if_type {
3299 MLXSW_REG_RITR_VLAN_IF,
3300 MLXSW_REG_RITR_FID_IF,
3301 MLXSW_REG_RITR_SP_IF,
3302};
3303
3304/* reg_ritr_type
3305 * Router interface type.
3306 * 0 - VLAN interface.
3307 * 1 - FID interface.
3308 * 2 - Sub-port interface.
3309 * Access: RW
3310 */
3311MLXSW_ITEM32(reg, ritr, type, 0x00, 23, 3);
3312
3313enum {
3314 MLXSW_REG_RITR_RIF_CREATE,
3315 MLXSW_REG_RITR_RIF_DEL,
3316};
3317
3318/* reg_ritr_op
3319 * Opcode:
3320 * 0 - Create or edit RIF.
3321 * 1 - Delete RIF.
3322 * Reserved for SwitchX-2. For Spectrum, editing of interface properties
3323 * is not supported. An interface must be deleted and re-created in order
3324 * to update properties.
3325 * Access: WO
3326 */
3327MLXSW_ITEM32(reg, ritr, op, 0x00, 20, 2);
3328
3329/* reg_ritr_rif
3330 * Router interface index. A pointer to the Router Interface Table.
3331 * Access: Index
3332 */
3333MLXSW_ITEM32(reg, ritr, rif, 0x00, 0, 16);
3334
3335/* reg_ritr_ipv4_fe
3336 * IPv4 Forwarding Enable.
3337 * Enables routing of IPv4 traffic on the router interface. When disabled,
3338 * forwarding is blocked but local traffic (traps and IP2ME) will be enabled.
3339 * Not supported in SwitchX-2.
3340 * Access: RW
3341 */
3342MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1);
3343
3344/* reg_ritr_ipv6_fe
3345 * IPv6 Forwarding Enable.
3346 * Enables routing of IPv6 traffic on the router interface. When disabled,
3347 * forwarding is blocked but local traffic (traps and IP2ME) will be enabled.
3348 * Not supported in SwitchX-2.
3349 * Access: RW
3350 */
3351MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1);
3352
3353/* reg_ritr_virtual_router
3354 * Virtual router ID associated with the router interface.
3355 * Access: RW
3356 */
3357MLXSW_ITEM32(reg, ritr, virtual_router, 0x04, 0, 16);
3358
3359/* reg_ritr_mtu
3360 * Router interface MTU.
3361 * Access: RW
3362 */
3363MLXSW_ITEM32(reg, ritr, mtu, 0x34, 0, 16);
3364
3365/* reg_ritr_if_swid
3366 * Switch partition ID.
3367 * Access: RW
3368 */
3369MLXSW_ITEM32(reg, ritr, if_swid, 0x08, 24, 8);
3370
3371/* reg_ritr_if_mac
3372 * Router interface MAC address.
3373 * In Spectrum, all MAC addresses must have the same 38 MSBits.
3374 * Access: RW
3375 */
3376MLXSW_ITEM_BUF(reg, ritr, if_mac, 0x12, 6);
3377
3378/* VLAN Interface */
3379
3380/* reg_ritr_vlan_if_vid
3381 * VLAN ID.
3382 * Access: RW
3383 */
3384MLXSW_ITEM32(reg, ritr, vlan_if_vid, 0x08, 0, 12);
3385
3386/* FID Interface */
3387
3388/* reg_ritr_fid_if_fid
3389 * Filtering ID. Used to connect a bridge to the router. Only FIDs from
3390 * the vFID range are supported.
3391 * Access: RW
3392 */
3393MLXSW_ITEM32(reg, ritr, fid_if_fid, 0x08, 0, 16);
3394
3395static inline void mlxsw_reg_ritr_fid_set(char *payload,
3396 enum mlxsw_reg_ritr_if_type rif_type,
3397 u16 fid)
3398{
3399 if (rif_type == MLXSW_REG_RITR_FID_IF)
3400 mlxsw_reg_ritr_fid_if_fid_set(payload, fid);
3401 else
3402 mlxsw_reg_ritr_vlan_if_vid_set(payload, fid);
3403}
3404
3405/* Sub-port Interface */
3406
3407/* reg_ritr_sp_if_lag
3408 * LAG indication. When this bit is set the system_port field holds the
3409 * LAG identifier.
3410 * Access: RW
3411 */
3412MLXSW_ITEM32(reg, ritr, sp_if_lag, 0x08, 24, 1);
3413
3414/* reg_ritr_sp_system_port
3415 * Port unique indentifier. When lag bit is set, this field holds the
3416 * lag_id in bits 0:9.
3417 * Access: RW
3418 */
3419MLXSW_ITEM32(reg, ritr, sp_if_system_port, 0x08, 0, 16);
3420
3421/* reg_ritr_sp_if_vid
3422 * VLAN ID.
3423 * Access: RW
3424 */
3425MLXSW_ITEM32(reg, ritr, sp_if_vid, 0x18, 0, 12);
3426
3427static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif)
3428{
3429 MLXSW_REG_ZERO(ritr, payload);
3430 mlxsw_reg_ritr_rif_set(payload, rif);
3431}
3432
3433static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag,
3434 u16 system_port, u16 vid)
3435{
3436 mlxsw_reg_ritr_sp_if_lag_set(payload, lag);
3437 mlxsw_reg_ritr_sp_if_system_port_set(payload, system_port);
3438 mlxsw_reg_ritr_sp_if_vid_set(payload, vid);
3439}
3440
3441static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
3442 enum mlxsw_reg_ritr_if_type type,
3443 u16 rif, u16 mtu, const char *mac)
3444{
3445 bool op = enable ? MLXSW_REG_RITR_RIF_CREATE : MLXSW_REG_RITR_RIF_DEL;
3446
3447 MLXSW_REG_ZERO(ritr, payload);
3448 mlxsw_reg_ritr_enable_set(payload, enable);
3449 mlxsw_reg_ritr_ipv4_set(payload, 1);
3450 mlxsw_reg_ritr_type_set(payload, type);
3451 mlxsw_reg_ritr_op_set(payload, op);
3452 mlxsw_reg_ritr_rif_set(payload, rif);
3453 mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
3454 mlxsw_reg_ritr_mtu_set(payload, mtu);
3455 mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
3456}
3457
3458/* RATR - Router Adjacency Table Register
3459 * --------------------------------------
3460 * The RATR register is used to configure the Router Adjacency (next-hop)
3461 * Table.
3462 */
3463#define MLXSW_REG_RATR_ID 0x8008
3464#define MLXSW_REG_RATR_LEN 0x2C
3465
3466static const struct mlxsw_reg_info mlxsw_reg_ratr = {
3467 .id = MLXSW_REG_RATR_ID,
3468 .len = MLXSW_REG_RATR_LEN,
3469};
3470
3471enum mlxsw_reg_ratr_op {
3472 /* Read */
3473 MLXSW_REG_RATR_OP_QUERY_READ = 0,
3474 /* Read and clear activity */
3475 MLXSW_REG_RATR_OP_QUERY_READ_CLEAR = 2,
3476 /* Write Adjacency entry */
3477 MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY = 1,
3478 /* Write Adjacency entry only if the activity is cleared.
3479 * The write may not succeed if the activity is set. There is not
3480 * direct feedback if the write has succeeded or not, however
3481 * the get will reveal the actual entry (SW can compare the get
3482 * response to the set command).
3483 */
3484 MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY = 3,
3485};
3486
3487/* reg_ratr_op
3488 * Note that Write operation may also be used for updating
3489 * counter_set_type and counter_index. In this case all other
3490 * fields must not be updated.
3491 * Access: OP
3492 */
3493MLXSW_ITEM32(reg, ratr, op, 0x00, 28, 4);
3494
3495/* reg_ratr_v
3496 * Valid bit. Indicates if the adjacency entry is valid.
3497 * Note: the device may need some time before reusing an invalidated
3498 * entry. During this time the entry can not be reused. It is
3499 * recommended to use another entry before reusing an invalidated
3500 * entry (e.g. software can put it at the end of the list for
3501 * reusing). Trying to access an invalidated entry not yet cleared
3502 * by the device results with failure indicating "Try Again" status.
3503 * When valid is '0' then egress_router_interface,trap_action,
3504 * adjacency_parameters and counters are reserved
3505 * Access: RW
3506 */
3507MLXSW_ITEM32(reg, ratr, v, 0x00, 24, 1);
3508
3509/* reg_ratr_a
3510 * Activity. Set for new entries. Set if a packet lookup has hit on
3511 * the specific entry. To clear the a bit, use "clear activity".
3512 * Access: RO
3513 */
3514MLXSW_ITEM32(reg, ratr, a, 0x00, 16, 1);
3515
3516/* reg_ratr_adjacency_index_low
3517 * Bits 15:0 of index into the adjacency table.
3518 * For SwitchX and SwitchX-2, the adjacency table is linear and
3519 * used for adjacency entries only.
3520 * For Spectrum, the index is to the KVD linear.
3521 * Access: Index
3522 */
3523MLXSW_ITEM32(reg, ratr, adjacency_index_low, 0x04, 0, 16);
3524
3525/* reg_ratr_egress_router_interface
3526 * Range is 0 .. cap_max_router_interfaces - 1
3527 * Access: RW
3528 */
3529MLXSW_ITEM32(reg, ratr, egress_router_interface, 0x08, 0, 16);
3530
3531enum mlxsw_reg_ratr_trap_action {
3532 MLXSW_REG_RATR_TRAP_ACTION_NOP,
3533 MLXSW_REG_RATR_TRAP_ACTION_TRAP,
3534 MLXSW_REG_RATR_TRAP_ACTION_MIRROR_TO_CPU,
3535 MLXSW_REG_RATR_TRAP_ACTION_MIRROR,
3536 MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS,
3537};
3538
3539/* reg_ratr_trap_action
3540 * see mlxsw_reg_ratr_trap_action
3541 * Access: RW
3542 */
3543MLXSW_ITEM32(reg, ratr, trap_action, 0x0C, 28, 4);
3544
3545enum mlxsw_reg_ratr_trap_id {
3546 MLXSW_REG_RATR_TRAP_ID_RTR_EGRESS0 = 0,
3547 MLXSW_REG_RATR_TRAP_ID_RTR_EGRESS1 = 1,
3548};
3549
3550/* reg_ratr_adjacency_index_high
3551 * Bits 23:16 of the adjacency_index.
3552 * Access: Index
3553 */
3554MLXSW_ITEM32(reg, ratr, adjacency_index_high, 0x0C, 16, 8);
3555
3556/* reg_ratr_trap_id
3557 * Trap ID to be reported to CPU.
3558 * Trap-ID is RTR_EGRESS0 or RTR_EGRESS1.
3559 * For trap_action of NOP, MIRROR and DISCARD_ERROR
3560 * Access: RW
3561 */
3562MLXSW_ITEM32(reg, ratr, trap_id, 0x0C, 0, 8);
3563
3564/* reg_ratr_eth_destination_mac
3565 * MAC address of the destination next-hop.
3566 * Access: RW
3567 */
3568MLXSW_ITEM_BUF(reg, ratr, eth_destination_mac, 0x12, 6);
3569
3570static inline void
3571mlxsw_reg_ratr_pack(char *payload,
3572 enum mlxsw_reg_ratr_op op, bool valid,
3573 u32 adjacency_index, u16 egress_rif)
3574{
3575 MLXSW_REG_ZERO(ratr, payload);
3576 mlxsw_reg_ratr_op_set(payload, op);
3577 mlxsw_reg_ratr_v_set(payload, valid);
3578 mlxsw_reg_ratr_adjacency_index_low_set(payload, adjacency_index);
3579 mlxsw_reg_ratr_adjacency_index_high_set(payload, adjacency_index >> 16);
3580 mlxsw_reg_ratr_egress_router_interface_set(payload, egress_rif);
3581}
3582
3583static inline void mlxsw_reg_ratr_eth_entry_pack(char *payload,
3584 const char *dest_mac)
3585{
3586 mlxsw_reg_ratr_eth_destination_mac_memcpy_to(payload, dest_mac);
3587}
3588
3589/* RALTA - Router Algorithmic LPM Tree Allocation Register
3590 * -------------------------------------------------------
3591 * RALTA is used to allocate the LPM trees of the SHSPM method.
3592 */
3593#define MLXSW_REG_RALTA_ID 0x8010
3594#define MLXSW_REG_RALTA_LEN 0x04
3595
3596static const struct mlxsw_reg_info mlxsw_reg_ralta = {
3597 .id = MLXSW_REG_RALTA_ID,
3598 .len = MLXSW_REG_RALTA_LEN,
3599};
3600
3601/* reg_ralta_op
3602 * opcode (valid for Write, must be 0 on Read)
3603 * 0 - allocate a tree
3604 * 1 - deallocate a tree
3605 * Access: OP
3606 */
3607MLXSW_ITEM32(reg, ralta, op, 0x00, 28, 2);
3608
3609enum mlxsw_reg_ralxx_protocol {
3610 MLXSW_REG_RALXX_PROTOCOL_IPV4,
3611 MLXSW_REG_RALXX_PROTOCOL_IPV6,
3612};
3613
3614/* reg_ralta_protocol
3615 * Protocol.
3616 * Deallocation opcode: Reserved.
3617 * Access: RW
3618 */
3619MLXSW_ITEM32(reg, ralta, protocol, 0x00, 24, 4);
3620
3621/* reg_ralta_tree_id
3622 * An identifier (numbered from 1..cap_shspm_max_trees-1) representing
3623 * the tree identifier (managed by software).
3624 * Note that tree_id 0 is allocated for a default-route tree.
3625 * Access: Index
3626 */
3627MLXSW_ITEM32(reg, ralta, tree_id, 0x00, 0, 8);
3628
3629static inline void mlxsw_reg_ralta_pack(char *payload, bool alloc,
3630 enum mlxsw_reg_ralxx_protocol protocol,
3631 u8 tree_id)
3632{
3633 MLXSW_REG_ZERO(ralta, payload);
3634 mlxsw_reg_ralta_op_set(payload, !alloc);
3635 mlxsw_reg_ralta_protocol_set(payload, protocol);
3636 mlxsw_reg_ralta_tree_id_set(payload, tree_id);
3637}
3638
3639/* RALST - Router Algorithmic LPM Structure Tree Register
3640 * ------------------------------------------------------
3641 * RALST is used to set and query the structure of an LPM tree.
3642 * The structure of the tree must be sorted as a sorted binary tree, while
3643 * each node is a bin that is tagged as the length of the prefixes the lookup
3644 * will refer to. Therefore, bin X refers to a set of entries with prefixes
3645 * of X bits to match with the destination address. The bin 0 indicates
3646 * the default action, when there is no match of any prefix.
3647 */
3648#define MLXSW_REG_RALST_ID 0x8011
3649#define MLXSW_REG_RALST_LEN 0x104
3650
3651static const struct mlxsw_reg_info mlxsw_reg_ralst = {
3652 .id = MLXSW_REG_RALST_ID,
3653 .len = MLXSW_REG_RALST_LEN,
3654};
3655
3656/* reg_ralst_root_bin
3657 * The bin number of the root bin.
3658 * 0<root_bin=<(length of IP address)
3659 * For a default-route tree configure 0xff
3660 * Access: RW
3661 */
3662MLXSW_ITEM32(reg, ralst, root_bin, 0x00, 16, 8);
3663
3664/* reg_ralst_tree_id
3665 * Tree identifier numbered from 1..(cap_shspm_max_trees-1).
3666 * Access: Index
3667 */
3668MLXSW_ITEM32(reg, ralst, tree_id, 0x00, 0, 8);
3669
3670#define MLXSW_REG_RALST_BIN_NO_CHILD 0xff
3671#define MLXSW_REG_RALST_BIN_OFFSET 0x04
3672#define MLXSW_REG_RALST_BIN_COUNT 128
3673
3674/* reg_ralst_left_child_bin
3675 * Holding the children of the bin according to the stored tree's structure.
3676 * For trees composed of less than 4 blocks, the bins in excess are reserved.
3677 * Note that tree_id 0 is allocated for a default-route tree, bins are 0xff
3678 * Access: RW
3679 */
3680MLXSW_ITEM16_INDEXED(reg, ralst, left_child_bin, 0x04, 8, 8, 0x02, 0x00, false);
3681
3682/* reg_ralst_right_child_bin
3683 * Holding the children of the bin according to the stored tree's structure.
3684 * For trees composed of less than 4 blocks, the bins in excess are reserved.
3685 * Note that tree_id 0 is allocated for a default-route tree, bins are 0xff
3686 * Access: RW
3687 */
3688MLXSW_ITEM16_INDEXED(reg, ralst, right_child_bin, 0x04, 0, 8, 0x02, 0x00,
3689 false);
3690
3691static inline void mlxsw_reg_ralst_pack(char *payload, u8 root_bin, u8 tree_id)
3692{
3693 MLXSW_REG_ZERO(ralst, payload);
3694
3695 /* Initialize all bins to have no left or right child */
3696 memset(payload + MLXSW_REG_RALST_BIN_OFFSET,
3697 MLXSW_REG_RALST_BIN_NO_CHILD, MLXSW_REG_RALST_BIN_COUNT * 2);
3698
3699 mlxsw_reg_ralst_root_bin_set(payload, root_bin);
3700 mlxsw_reg_ralst_tree_id_set(payload, tree_id);
3701}
3702
3703static inline void mlxsw_reg_ralst_bin_pack(char *payload, u8 bin_number,
3704 u8 left_child_bin,
3705 u8 right_child_bin)
3706{
3707 int bin_index = bin_number - 1;
3708
3709 mlxsw_reg_ralst_left_child_bin_set(payload, bin_index, left_child_bin);
3710 mlxsw_reg_ralst_right_child_bin_set(payload, bin_index,
3711 right_child_bin);
3712}
3713
3714/* RALTB - Router Algorithmic LPM Tree Binding Register
3715 * ----------------------------------------------------
3716 * RALTB is used to bind virtual router and protocol to an allocated LPM tree.
3717 */
3718#define MLXSW_REG_RALTB_ID 0x8012
3719#define MLXSW_REG_RALTB_LEN 0x04
3720
3721static const struct mlxsw_reg_info mlxsw_reg_raltb = {
3722 .id = MLXSW_REG_RALTB_ID,
3723 .len = MLXSW_REG_RALTB_LEN,
3724};
3725
3726/* reg_raltb_virtual_router
3727 * Virtual Router ID
3728 * Range is 0..cap_max_virtual_routers-1
3729 * Access: Index
3730 */
3731MLXSW_ITEM32(reg, raltb, virtual_router, 0x00, 16, 16);
3732
3733/* reg_raltb_protocol
3734 * Protocol.
3735 * Access: Index
3736 */
3737MLXSW_ITEM32(reg, raltb, protocol, 0x00, 12, 4);
3738
3739/* reg_raltb_tree_id
3740 * Tree to be used for the {virtual_router, protocol}
3741 * Tree identifier numbered from 1..(cap_shspm_max_trees-1).
3742 * By default, all Unicast IPv4 and IPv6 are bound to tree_id 0.
3743 * Access: RW
3744 */
3745MLXSW_ITEM32(reg, raltb, tree_id, 0x00, 0, 8);
3746
3747static inline void mlxsw_reg_raltb_pack(char *payload, u16 virtual_router,
3748 enum mlxsw_reg_ralxx_protocol protocol,
3749 u8 tree_id)
3750{
3751 MLXSW_REG_ZERO(raltb, payload);
3752 mlxsw_reg_raltb_virtual_router_set(payload, virtual_router);
3753 mlxsw_reg_raltb_protocol_set(payload, protocol);
3754 mlxsw_reg_raltb_tree_id_set(payload, tree_id);
3755}
3756
3757/* RALUE - Router Algorithmic LPM Unicast Entry Register
3758 * -----------------------------------------------------
3759 * RALUE is used to configure and query LPM entries that serve
3760 * the Unicast protocols.
3761 */
3762#define MLXSW_REG_RALUE_ID 0x8013
3763#define MLXSW_REG_RALUE_LEN 0x38
3764
3765static const struct mlxsw_reg_info mlxsw_reg_ralue = {
3766 .id = MLXSW_REG_RALUE_ID,
3767 .len = MLXSW_REG_RALUE_LEN,
3768};
3769
3770/* reg_ralue_protocol
3771 * Protocol.
3772 * Access: Index
3773 */
3774MLXSW_ITEM32(reg, ralue, protocol, 0x00, 24, 4);
3775
3776enum mlxsw_reg_ralue_op {
3777 /* Read operation. If entry doesn't exist, the operation fails. */
3778 MLXSW_REG_RALUE_OP_QUERY_READ = 0,
3779 /* Clear on read operation. Used to read entry and
3780 * clear Activity bit.
3781 */
3782 MLXSW_REG_RALUE_OP_QUERY_CLEAR = 1,
3783 /* Write operation. Used to write a new entry to the table. All RW
3784 * fields are written for new entry. Activity bit is set
3785 * for new entries.
3786 */
3787 MLXSW_REG_RALUE_OP_WRITE_WRITE = 0,
3788 /* Update operation. Used to update an existing route entry and
3789 * only update the RW fields that are detailed in the field
3790 * op_u_mask. If entry doesn't exist, the operation fails.
3791 */
3792 MLXSW_REG_RALUE_OP_WRITE_UPDATE = 1,
3793 /* Clear activity. The Activity bit (the field a) is cleared
3794 * for the entry.
3795 */
3796 MLXSW_REG_RALUE_OP_WRITE_CLEAR = 2,
3797 /* Delete operation. Used to delete an existing entry. If entry
3798 * doesn't exist, the operation fails.
3799 */
3800 MLXSW_REG_RALUE_OP_WRITE_DELETE = 3,
3801};
3802
3803/* reg_ralue_op
3804 * Operation.
3805 * Access: OP
3806 */
3807MLXSW_ITEM32(reg, ralue, op, 0x00, 20, 3);
3808
3809/* reg_ralue_a
3810 * Activity. Set for new entries. Set if a packet lookup has hit on the
3811 * specific entry, only if the entry is a route. To clear the a bit, use
3812 * "clear activity" op.
3813 * Enabled by activity_dis in RGCR
3814 * Access: RO
3815 */
3816MLXSW_ITEM32(reg, ralue, a, 0x00, 16, 1);
3817
3818/* reg_ralue_virtual_router
3819 * Virtual Router ID
3820 * Range is 0..cap_max_virtual_routers-1
3821 * Access: Index
3822 */
3823MLXSW_ITEM32(reg, ralue, virtual_router, 0x04, 16, 16);
3824
3825#define MLXSW_REG_RALUE_OP_U_MASK_ENTRY_TYPE BIT(0)
3826#define MLXSW_REG_RALUE_OP_U_MASK_BMP_LEN BIT(1)
3827#define MLXSW_REG_RALUE_OP_U_MASK_ACTION BIT(2)
3828
3829/* reg_ralue_op_u_mask
3830 * opcode update mask.
3831 * On read operation, this field is reserved.
3832 * This field is valid for update opcode, otherwise - reserved.
3833 * This field is a bitmask of the fields that should be updated.
3834 * Access: WO
3835 */
3836MLXSW_ITEM32(reg, ralue, op_u_mask, 0x04, 8, 3);
3837
3838/* reg_ralue_prefix_len
3839 * Number of bits in the prefix of the LPM route.
3840 * Note that for IPv6 prefixes, if prefix_len>64 the entry consumes
3841 * two entries in the physical HW table.
3842 * Access: Index
3843 */
3844MLXSW_ITEM32(reg, ralue, prefix_len, 0x08, 0, 8);
3845
3846/* reg_ralue_dip*
3847 * The prefix of the route or of the marker that the object of the LPM
3848 * is compared with. The most significant bits of the dip are the prefix.
3849 * The list significant bits must be '0' if the prefix_len is smaller
3850 * than 128 for IPv6 or smaller than 32 for IPv4.
3851 * IPv4 address uses bits dip[31:0] and bits dip[127:32] are reserved.
3852 * Access: Index
3853 */
3854MLXSW_ITEM32(reg, ralue, dip4, 0x18, 0, 32);
3855
3856enum mlxsw_reg_ralue_entry_type {
3857 MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_ENTRY = 1,
3858 MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY = 2,
3859 MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_AND_ROUTE_ENTRY = 3,
3860};
3861
3862/* reg_ralue_entry_type
3863 * Entry type.
3864 * Note - for Marker entries, the action_type and action fields are reserved.
3865 * Access: RW
3866 */
3867MLXSW_ITEM32(reg, ralue, entry_type, 0x1C, 30, 2);
3868
3869/* reg_ralue_bmp_len
3870 * The best match prefix length in the case that there is no match for
3871 * longer prefixes.
3872 * If (entry_type != MARKER_ENTRY), bmp_len must be equal to prefix_len
3873 * Note for any update operation with entry_type modification this
3874 * field must be set.
3875 * Access: RW
3876 */
3877MLXSW_ITEM32(reg, ralue, bmp_len, 0x1C, 16, 8);
3878
3879enum mlxsw_reg_ralue_action_type {
3880 MLXSW_REG_RALUE_ACTION_TYPE_REMOTE,
3881 MLXSW_REG_RALUE_ACTION_TYPE_LOCAL,
3882 MLXSW_REG_RALUE_ACTION_TYPE_IP2ME,
3883};
3884
3885/* reg_ralue_action_type
3886 * Action Type
3887 * Indicates how the IP address is connected.
3888 * It can be connected to a local subnet through local_erif or can be
3889 * on a remote subnet connected through a next-hop router,
3890 * or transmitted to the CPU.
3891 * Reserved when entry_type = MARKER_ENTRY
3892 * Access: RW
3893 */
3894MLXSW_ITEM32(reg, ralue, action_type, 0x1C, 0, 2);
3895
3896enum mlxsw_reg_ralue_trap_action {
3897 MLXSW_REG_RALUE_TRAP_ACTION_NOP,
3898 MLXSW_REG_RALUE_TRAP_ACTION_TRAP,
3899 MLXSW_REG_RALUE_TRAP_ACTION_MIRROR_TO_CPU,
3900 MLXSW_REG_RALUE_TRAP_ACTION_MIRROR,
3901 MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR,
3902};
3903
3904/* reg_ralue_trap_action
3905 * Trap action.
3906 * For IP2ME action, only NOP and MIRROR are possible.
3907 * Access: RW
3908 */
3909MLXSW_ITEM32(reg, ralue, trap_action, 0x20, 28, 4);
3910
3911/* reg_ralue_trap_id
3912 * Trap ID to be reported to CPU.
3913 * Trap ID is RTR_INGRESS0 or RTR_INGRESS1.
3914 * For trap_action of NOP, MIRROR and DISCARD_ERROR, trap_id is reserved.
3915 * Access: RW
3916 */
3917MLXSW_ITEM32(reg, ralue, trap_id, 0x20, 0, 9);
3918
3919/* reg_ralue_adjacency_index
3920 * Points to the first entry of the group-based ECMP.
3921 * Only relevant in case of REMOTE action.
3922 * Access: RW
3923 */
3924MLXSW_ITEM32(reg, ralue, adjacency_index, 0x24, 0, 24);
3925
3926/* reg_ralue_ecmp_size
3927 * Amount of sequential entries starting
3928 * from the adjacency_index (the number of ECMPs).
3929 * The valid range is 1-64, 512, 1024, 2048 and 4096.
3930 * Reserved when trap_action is TRAP or DISCARD_ERROR.
3931 * Only relevant in case of REMOTE action.
3932 * Access: RW
3933 */
3934MLXSW_ITEM32(reg, ralue, ecmp_size, 0x28, 0, 13);
3935
3936/* reg_ralue_local_erif
3937 * Egress Router Interface.
3938 * Only relevant in case of LOCAL action.
3939 * Access: RW
3940 */
3941MLXSW_ITEM32(reg, ralue, local_erif, 0x24, 0, 16);
3942
3943/* reg_ralue_v
3944 * Valid bit for the tunnel_ptr field.
3945 * If valid = 0 then trap to CPU as IP2ME trap ID.
3946 * If valid = 1 and the packet format allows NVE or IPinIP tunnel
3947 * decapsulation then tunnel decapsulation is done.
3948 * If valid = 1 and packet format does not allow NVE or IPinIP tunnel
3949 * decapsulation then trap as IP2ME trap ID.
3950 * Only relevant in case of IP2ME action.
3951 * Access: RW
3952 */
3953MLXSW_ITEM32(reg, ralue, v, 0x24, 31, 1);
3954
3955/* reg_ralue_tunnel_ptr
3956 * Tunnel Pointer for NVE or IPinIP tunnel decapsulation.
3957 * For Spectrum, pointer to KVD Linear.
3958 * Only relevant in case of IP2ME action.
3959 * Access: RW
3960 */
3961MLXSW_ITEM32(reg, ralue, tunnel_ptr, 0x24, 0, 24);
3962
3963static inline void mlxsw_reg_ralue_pack(char *payload,
3964 enum mlxsw_reg_ralxx_protocol protocol,
3965 enum mlxsw_reg_ralue_op op,
3966 u16 virtual_router, u8 prefix_len)
3967{
3968 MLXSW_REG_ZERO(ralue, payload);
3969 mlxsw_reg_ralue_protocol_set(payload, protocol);
3970 mlxsw_reg_ralue_virtual_router_set(payload, virtual_router);
3971 mlxsw_reg_ralue_prefix_len_set(payload, prefix_len);
3972 mlxsw_reg_ralue_entry_type_set(payload,
3973 MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY);
3974 mlxsw_reg_ralue_bmp_len_set(payload, prefix_len);
3975}
3976
3977static inline void mlxsw_reg_ralue_pack4(char *payload,
3978 enum mlxsw_reg_ralxx_protocol protocol,
3979 enum mlxsw_reg_ralue_op op,
3980 u16 virtual_router, u8 prefix_len,
3981 u32 dip)
3982{
3983 mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
3984 mlxsw_reg_ralue_dip4_set(payload, dip);
3985}
3986
3987static inline void
3988mlxsw_reg_ralue_act_remote_pack(char *payload,
3989 enum mlxsw_reg_ralue_trap_action trap_action,
3990 u16 trap_id, u32 adjacency_index, u16 ecmp_size)
3991{
3992 mlxsw_reg_ralue_action_type_set(payload,
3993 MLXSW_REG_RALUE_ACTION_TYPE_REMOTE);
3994 mlxsw_reg_ralue_trap_action_set(payload, trap_action);
3995 mlxsw_reg_ralue_trap_id_set(payload, trap_id);
3996 mlxsw_reg_ralue_adjacency_index_set(payload, adjacency_index);
3997 mlxsw_reg_ralue_ecmp_size_set(payload, ecmp_size);
3998}
3999
4000static inline void
4001mlxsw_reg_ralue_act_local_pack(char *payload,
4002 enum mlxsw_reg_ralue_trap_action trap_action,
4003 u16 trap_id, u16 local_erif)
4004{
4005 mlxsw_reg_ralue_action_type_set(payload,
4006 MLXSW_REG_RALUE_ACTION_TYPE_LOCAL);
4007 mlxsw_reg_ralue_trap_action_set(payload, trap_action);
4008 mlxsw_reg_ralue_trap_id_set(payload, trap_id);
4009 mlxsw_reg_ralue_local_erif_set(payload, local_erif);
4010}
4011
4012static inline void
4013mlxsw_reg_ralue_act_ip2me_pack(char *payload)
4014{
4015 mlxsw_reg_ralue_action_type_set(payload,
4016 MLXSW_REG_RALUE_ACTION_TYPE_IP2ME);
4017}
4018
4019/* RAUHT - Router Algorithmic LPM Unicast Host Table Register
4020 * ----------------------------------------------------------
4021 * The RAUHT register is used to configure and query the Unicast Host table in
4022 * devices that implement the Algorithmic LPM.
4023 */
4024#define MLXSW_REG_RAUHT_ID 0x8014
4025#define MLXSW_REG_RAUHT_LEN 0x74
4026
4027static const struct mlxsw_reg_info mlxsw_reg_rauht = {
4028 .id = MLXSW_REG_RAUHT_ID,
4029 .len = MLXSW_REG_RAUHT_LEN,
4030};
4031
4032enum mlxsw_reg_rauht_type {
4033 MLXSW_REG_RAUHT_TYPE_IPV4,
4034 MLXSW_REG_RAUHT_TYPE_IPV6,
4035};
4036
4037/* reg_rauht_type
4038 * Access: Index
4039 */
4040MLXSW_ITEM32(reg, rauht, type, 0x00, 24, 2);
4041
4042enum mlxsw_reg_rauht_op {
4043 MLXSW_REG_RAUHT_OP_QUERY_READ = 0,
4044 /* Read operation */
4045 MLXSW_REG_RAUHT_OP_QUERY_CLEAR_ON_READ = 1,
4046 /* Clear on read operation. Used to read entry and clear
4047 * activity bit.
4048 */
4049 MLXSW_REG_RAUHT_OP_WRITE_ADD = 0,
4050 /* Add. Used to write a new entry to the table. All R/W fields are
4051 * relevant for new entry. Activity bit is set for new entries.
4052 */
4053 MLXSW_REG_RAUHT_OP_WRITE_UPDATE = 1,
4054 /* Update action. Used to update an existing route entry and
4055 * only update the following fields:
4056 * trap_action, trap_id, mac, counter_set_type, counter_index
4057 */
4058 MLXSW_REG_RAUHT_OP_WRITE_CLEAR_ACTIVITY = 2,
4059 /* Clear activity. A bit is cleared for the entry. */
4060 MLXSW_REG_RAUHT_OP_WRITE_DELETE = 3,
4061 /* Delete entry */
4062 MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL = 4,
4063 /* Delete all host entries on a RIF. In this command, dip
4064 * field is reserved.
4065 */
4066};
4067
4068/* reg_rauht_op
4069 * Access: OP
4070 */
4071MLXSW_ITEM32(reg, rauht, op, 0x00, 20, 3);
4072
4073/* reg_rauht_a
4074 * Activity. Set for new entries. Set if a packet lookup has hit on
4075 * the specific entry.
4076 * To clear the a bit, use "clear activity" op.
4077 * Enabled by activity_dis in RGCR
4078 * Access: RO
4079 */
4080MLXSW_ITEM32(reg, rauht, a, 0x00, 16, 1);
4081
4082/* reg_rauht_rif
4083 * Router Interface
4084 * Access: Index
4085 */
4086MLXSW_ITEM32(reg, rauht, rif, 0x00, 0, 16);
4087
4088/* reg_rauht_dip*
4089 * Destination address.
4090 * Access: Index
4091 */
4092MLXSW_ITEM32(reg, rauht, dip4, 0x1C, 0x0, 32);
4093
4094enum mlxsw_reg_rauht_trap_action {
4095 MLXSW_REG_RAUHT_TRAP_ACTION_NOP,
4096 MLXSW_REG_RAUHT_TRAP_ACTION_TRAP,
4097 MLXSW_REG_RAUHT_TRAP_ACTION_MIRROR_TO_CPU,
4098 MLXSW_REG_RAUHT_TRAP_ACTION_MIRROR,
4099 MLXSW_REG_RAUHT_TRAP_ACTION_DISCARD_ERRORS,
4100};
4101
4102/* reg_rauht_trap_action
4103 * Access: RW
4104 */
4105MLXSW_ITEM32(reg, rauht, trap_action, 0x60, 28, 4);
4106
4107enum mlxsw_reg_rauht_trap_id {
4108 MLXSW_REG_RAUHT_TRAP_ID_RTR_EGRESS0,
4109 MLXSW_REG_RAUHT_TRAP_ID_RTR_EGRESS1,
4110};
4111
4112/* reg_rauht_trap_id
4113 * Trap ID to be reported to CPU.
4114 * Trap-ID is RTR_EGRESS0 or RTR_EGRESS1.
4115 * For trap_action of NOP, MIRROR and DISCARD_ERROR,
4116 * trap_id is reserved.
4117 * Access: RW
4118 */
4119MLXSW_ITEM32(reg, rauht, trap_id, 0x60, 0, 9);
4120
4121/* reg_rauht_counter_set_type
4122 * Counter set type for flow counters
4123 * Access: RW
4124 */
4125MLXSW_ITEM32(reg, rauht, counter_set_type, 0x68, 24, 8);
4126
4127/* reg_rauht_counter_index
4128 * Counter index for flow counters
4129 * Access: RW
4130 */
4131MLXSW_ITEM32(reg, rauht, counter_index, 0x68, 0, 24);
4132
4133/* reg_rauht_mac
4134 * MAC address.
4135 * Access: RW
4136 */
4137MLXSW_ITEM_BUF(reg, rauht, mac, 0x6E, 6);
4138
4139static inline void mlxsw_reg_rauht_pack(char *payload,
4140 enum mlxsw_reg_rauht_op op, u16 rif,
4141 const char *mac)
4142{
4143 MLXSW_REG_ZERO(rauht, payload);
4144 mlxsw_reg_rauht_op_set(payload, op);
4145 mlxsw_reg_rauht_rif_set(payload, rif);
4146 mlxsw_reg_rauht_mac_memcpy_to(payload, mac);
4147}
4148
4149static inline void mlxsw_reg_rauht_pack4(char *payload,
4150 enum mlxsw_reg_rauht_op op, u16 rif,
4151 const char *mac, u32 dip)
4152{
4153 mlxsw_reg_rauht_pack(payload, op, rif, mac);
4154 mlxsw_reg_rauht_dip4_set(payload, dip);
4155}
4156
4157/* RALEU - Router Algorithmic LPM ECMP Update Register
4158 * ---------------------------------------------------
4159 * The register enables updating the ECMP section in the action for multiple
4160 * LPM Unicast entries in a single operation. The update is executed to
4161 * all entries of a {virtual router, protocol} tuple using the same ECMP group.
4162 */
4163#define MLXSW_REG_RALEU_ID 0x8015
4164#define MLXSW_REG_RALEU_LEN 0x28
4165
4166static const struct mlxsw_reg_info mlxsw_reg_raleu = {
4167 .id = MLXSW_REG_RALEU_ID,
4168 .len = MLXSW_REG_RALEU_LEN,
4169};
4170
4171/* reg_raleu_protocol
4172 * Protocol.
4173 * Access: Index
4174 */
4175MLXSW_ITEM32(reg, raleu, protocol, 0x00, 24, 4);
4176
4177/* reg_raleu_virtual_router
4178 * Virtual Router ID
4179 * Range is 0..cap_max_virtual_routers-1
4180 * Access: Index
4181 */
4182MLXSW_ITEM32(reg, raleu, virtual_router, 0x00, 0, 16);
4183
4184/* reg_raleu_adjacency_index
4185 * Adjacency Index used for matching on the existing entries.
4186 * Access: Index
4187 */
4188MLXSW_ITEM32(reg, raleu, adjacency_index, 0x10, 0, 24);
4189
4190/* reg_raleu_ecmp_size
4191 * ECMP Size used for matching on the existing entries.
4192 * Access: Index
4193 */
4194MLXSW_ITEM32(reg, raleu, ecmp_size, 0x14, 0, 13);
4195
4196/* reg_raleu_new_adjacency_index
4197 * New Adjacency Index.
4198 * Access: WO
4199 */
4200MLXSW_ITEM32(reg, raleu, new_adjacency_index, 0x20, 0, 24);
4201
4202/* reg_raleu_new_ecmp_size
4203 * New ECMP Size.
4204 * Access: WO
4205 */
4206MLXSW_ITEM32(reg, raleu, new_ecmp_size, 0x24, 0, 13);
4207
4208static inline void mlxsw_reg_raleu_pack(char *payload,
4209 enum mlxsw_reg_ralxx_protocol protocol,
4210 u16 virtual_router,
4211 u32 adjacency_index, u16 ecmp_size,
4212 u32 new_adjacency_index,
4213 u16 new_ecmp_size)
4214{
4215 MLXSW_REG_ZERO(raleu, payload);
4216 mlxsw_reg_raleu_protocol_set(payload, protocol);
4217 mlxsw_reg_raleu_virtual_router_set(payload, virtual_router);
4218 mlxsw_reg_raleu_adjacency_index_set(payload, adjacency_index);
4219 mlxsw_reg_raleu_ecmp_size_set(payload, ecmp_size);
4220 mlxsw_reg_raleu_new_adjacency_index_set(payload, new_adjacency_index);
4221 mlxsw_reg_raleu_new_ecmp_size_set(payload, new_ecmp_size);
4222}
4223
4224/* RAUHTD - Router Algorithmic LPM Unicast Host Table Dump Register
4225 * ----------------------------------------------------------------
4226 * The RAUHTD register allows dumping entries from the Router Unicast Host
4227 * Table. For a given session an entry is dumped no more than one time. The
4228 * first RAUHTD access after reset is a new session. A session ends when the
4229 * num_rec response is smaller than num_rec request or for IPv4 when the
4230 * num_entries is smaller than 4. The clear activity affect the current session
4231 * or the last session if a new session has not started.
4232 */
4233#define MLXSW_REG_RAUHTD_ID 0x8018
4234#define MLXSW_REG_RAUHTD_BASE_LEN 0x20
4235#define MLXSW_REG_RAUHTD_REC_LEN 0x20
4236#define MLXSW_REG_RAUHTD_REC_MAX_NUM 32
4237#define MLXSW_REG_RAUHTD_LEN (MLXSW_REG_RAUHTD_BASE_LEN + \
4238 MLXSW_REG_RAUHTD_REC_MAX_NUM * MLXSW_REG_RAUHTD_REC_LEN)
4239#define MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC 4
4240
4241static const struct mlxsw_reg_info mlxsw_reg_rauhtd = {
4242 .id = MLXSW_REG_RAUHTD_ID,
4243 .len = MLXSW_REG_RAUHTD_LEN,
4244};
4245
4246#define MLXSW_REG_RAUHTD_FILTER_A BIT(0)
4247#define MLXSW_REG_RAUHTD_FILTER_RIF BIT(3)
4248
4249/* reg_rauhtd_filter_fields
4250 * if a bit is '0' then the relevant field is ignored and dump is done
4251 * regardless of the field value
4252 * Bit0 - filter by activity: entry_a
4253 * Bit3 - filter by entry rip: entry_rif
4254 * Access: Index
4255 */
4256MLXSW_ITEM32(reg, rauhtd, filter_fields, 0x00, 0, 8);
4257
4258enum mlxsw_reg_rauhtd_op {
4259 MLXSW_REG_RAUHTD_OP_DUMP,
4260 MLXSW_REG_RAUHTD_OP_DUMP_AND_CLEAR,
4261};
4262
4263/* reg_rauhtd_op
4264 * Access: OP
4265 */
4266MLXSW_ITEM32(reg, rauhtd, op, 0x04, 24, 2);
4267
4268/* reg_rauhtd_num_rec
4269 * At request: number of records requested
4270 * At response: number of records dumped
4271 * For IPv4, each record has 4 entries at request and up to 4 entries
4272 * at response
4273 * Range is 0..MLXSW_REG_RAUHTD_REC_MAX_NUM
4274 * Access: Index
4275 */
4276MLXSW_ITEM32(reg, rauhtd, num_rec, 0x04, 0, 8);
4277
4278/* reg_rauhtd_entry_a
4279 * Dump only if activity has value of entry_a
4280 * Reserved if filter_fields bit0 is '0'
4281 * Access: Index
4282 */
4283MLXSW_ITEM32(reg, rauhtd, entry_a, 0x08, 16, 1);
4284
4285enum mlxsw_reg_rauhtd_type {
4286 MLXSW_REG_RAUHTD_TYPE_IPV4,
4287 MLXSW_REG_RAUHTD_TYPE_IPV6,
4288};
4289
4290/* reg_rauhtd_type
4291 * Dump only if record type is:
4292 * 0 - IPv4
4293 * 1 - IPv6
4294 * Access: Index
4295 */
4296MLXSW_ITEM32(reg, rauhtd, type, 0x08, 0, 4);
4297
4298/* reg_rauhtd_entry_rif
4299 * Dump only if RIF has value of entry_rif
4300 * Reserved if filter_fields bit3 is '0'
4301 * Access: Index
4302 */
4303MLXSW_ITEM32(reg, rauhtd, entry_rif, 0x0C, 0, 16);
4304
4305static inline void mlxsw_reg_rauhtd_pack(char *payload,
4306 enum mlxsw_reg_rauhtd_type type)
4307{
4308 MLXSW_REG_ZERO(rauhtd, payload);
4309 mlxsw_reg_rauhtd_filter_fields_set(payload, MLXSW_REG_RAUHTD_FILTER_A);
4310 mlxsw_reg_rauhtd_op_set(payload, MLXSW_REG_RAUHTD_OP_DUMP_AND_CLEAR);
4311 mlxsw_reg_rauhtd_num_rec_set(payload, MLXSW_REG_RAUHTD_REC_MAX_NUM);
4312 mlxsw_reg_rauhtd_entry_a_set(payload, 1);
4313 mlxsw_reg_rauhtd_type_set(payload, type);
4314}
4315
4316/* reg_rauhtd_ipv4_rec_num_entries
4317 * Number of valid entries in this record:
4318 * 0 - 1 valid entry
4319 * 1 - 2 valid entries
4320 * 2 - 3 valid entries
4321 * 3 - 4 valid entries
4322 * Access: RO
4323 */
4324MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_rec_num_entries,
4325 MLXSW_REG_RAUHTD_BASE_LEN, 28, 2,
4326 MLXSW_REG_RAUHTD_REC_LEN, 0x00, false);
4327
4328/* reg_rauhtd_rec_type
4329 * Record type.
4330 * 0 - IPv4
4331 * 1 - IPv6
4332 * Access: RO
4333 */
4334MLXSW_ITEM32_INDEXED(reg, rauhtd, rec_type, MLXSW_REG_RAUHTD_BASE_LEN, 24, 2,
4335 MLXSW_REG_RAUHTD_REC_LEN, 0x00, false);
4336
4337#define MLXSW_REG_RAUHTD_IPV4_ENT_LEN 0x8
4338
4339/* reg_rauhtd_ipv4_ent_a
4340 * Activity. Set for new entries. Set if a packet lookup has hit on the
4341 * specific entry.
4342 * Access: RO
4343 */
4344MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_a, MLXSW_REG_RAUHTD_BASE_LEN, 16, 1,
4345 MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x00, false);
4346
4347/* reg_rauhtd_ipv4_ent_rif
4348 * Router interface.
4349 * Access: RO
4350 */
4351MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_rif, MLXSW_REG_RAUHTD_BASE_LEN, 0,
4352 16, MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x00, false);
4353
4354/* reg_rauhtd_ipv4_ent_dip
4355 * Destination IPv4 address.
4356 * Access: RO
4357 */
4358MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_dip, MLXSW_REG_RAUHTD_BASE_LEN, 0,
4359 32, MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x04, false);
4360
4361static inline void mlxsw_reg_rauhtd_ent_ipv4_unpack(char *payload,
4362 int ent_index, u16 *p_rif,
4363 u32 *p_dip)
4364{
4365 *p_rif = mlxsw_reg_rauhtd_ipv4_ent_rif_get(payload, ent_index);
4366 *p_dip = mlxsw_reg_rauhtd_ipv4_ent_dip_get(payload, ent_index);
4367}
4368
3189/* MFCR - Management Fan Control Register 4369/* MFCR - Management Fan Control Register
3190 * -------------------------------------- 4370 * --------------------------------------
3191 * This register controls the settings of the Fan Speed PWM mechanism. 4371 * This register controls the settings of the Fan Speed PWM mechanism.
@@ -3924,6 +5104,26 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
3924 return "HTGT"; 5104 return "HTGT";
3925 case MLXSW_REG_HPKT_ID: 5105 case MLXSW_REG_HPKT_ID:
3926 return "HPKT"; 5106 return "HPKT";
5107 case MLXSW_REG_RGCR_ID:
5108 return "RGCR";
5109 case MLXSW_REG_RITR_ID:
5110 return "RITR";
5111 case MLXSW_REG_RATR_ID:
5112 return "RATR";
5113 case MLXSW_REG_RALTA_ID:
5114 return "RALTA";
5115 case MLXSW_REG_RALST_ID:
5116 return "RALST";
5117 case MLXSW_REG_RALTB_ID:
5118 return "RALTB";
5119 case MLXSW_REG_RALUE_ID:
5120 return "RALUE";
5121 case MLXSW_REG_RAUHT_ID:
5122 return "RAUHT";
5123 case MLXSW_REG_RALEU_ID:
5124 return "RALEU";
5125 case MLXSW_REG_RAUHTD_ID:
5126 return "RAUHTD";
3927 case MLXSW_REG_MFCR_ID: 5127 case MLXSW_REG_MFCR_ID:
3928 return "MFCR"; 5128 return "MFCR";
3929 case MLXSW_REG_MFSC_ID: 5129 case MLXSW_REG_MFSC_ID:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index d23948b88962..c812513e079d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -51,6 +51,7 @@
51#include <linux/list.h> 51#include <linux/list.h>
52#include <linux/notifier.h> 52#include <linux/notifier.h>
53#include <linux/dcbnl.h> 53#include <linux/dcbnl.h>
54#include <linux/inetdevice.h>
54#include <net/switchdev.h> 55#include <net/switchdev.h>
55#include <generated/utsrelease.h> 56#include <generated/utsrelease.h>
56 57
@@ -210,23 +211,6 @@ static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
210 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); 211 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
211} 212}
212 213
213static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
214 u16 vid, enum mlxsw_reg_spms_state state)
215{
216 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
217 char *spms_pl;
218 int err;
219
220 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
221 if (!spms_pl)
222 return -ENOMEM;
223 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
224 mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
225 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
226 kfree(spms_pl);
227 return err;
228}
229
230static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) 214static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
231{ 215{
232 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 216 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -409,7 +393,11 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
409 } 393 }
410 394
411 mlxsw_sp_txhdr_construct(skb, &tx_info); 395 mlxsw_sp_txhdr_construct(skb, &tx_info);
412 len = skb->len; 396 /* TX header is consumed by HW on the way so we shouldn't count its
397 * bytes as being sent.
398 */
399 len = skb->len - MLXSW_TXHDR_LEN;
400
413 /* Due to a race we might fail here because of a full queue. In that 401 /* Due to a race we might fail here because of a full queue. In that
414 * unlikely case we simply drop the packet. 402 * unlikely case we simply drop the packet.
415 */ 403 */
@@ -633,87 +621,6 @@ static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
633 return 0; 621 return 0;
634} 622}
635 623
636static struct mlxsw_sp_fid *
637mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
638{
639 struct mlxsw_sp_fid *f;
640
641 list_for_each_entry(f, &mlxsw_sp->port_vfids.list, list) {
642 if (f->vid == vid)
643 return f;
644 }
645
646 return NULL;
647}
648
649static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
650{
651 return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
652 MLXSW_SP_VFID_PORT_MAX);
653}
654
655static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
656{
657 char sfmr_pl[MLXSW_REG_SFMR_LEN];
658
659 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
660 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
661}
662
663static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
664
665static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
666 u16 vid)
667{
668 struct device *dev = mlxsw_sp->bus_info->dev;
669 struct mlxsw_sp_fid *f;
670 u16 vfid, fid;
671 int err;
672
673 vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
674 if (vfid == MLXSW_SP_VFID_PORT_MAX) {
675 dev_err(dev, "No available vFIDs\n");
676 return ERR_PTR(-ERANGE);
677 }
678
679 fid = mlxsw_sp_vfid_to_fid(vfid);
680 err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
681 if (err) {
682 dev_err(dev, "Failed to create FID=%d\n", fid);
683 return ERR_PTR(err);
684 }
685
686 f = kzalloc(sizeof(*f), GFP_KERNEL);
687 if (!f)
688 goto err_allocate_vfid;
689
690 f->leave = mlxsw_sp_vport_vfid_leave;
691 f->fid = fid;
692 f->vid = vid;
693
694 list_add(&f->list, &mlxsw_sp->port_vfids.list);
695 set_bit(vfid, mlxsw_sp->port_vfids.mapped);
696
697 return f;
698
699err_allocate_vfid:
700 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
701 return ERR_PTR(-ENOMEM);
702}
703
704static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
705 struct mlxsw_sp_fid *f)
706{
707 u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
708
709 clear_bit(vfid, mlxsw_sp->port_vfids.mapped);
710 list_del(&f->list);
711
712 mlxsw_sp_vfid_op(mlxsw_sp, f->fid, false);
713
714 kfree(f);
715}
716
717static struct mlxsw_sp_port * 624static struct mlxsw_sp_port *
718mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 625mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
719{ 626{
@@ -746,72 +653,12 @@ static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
746 kfree(mlxsw_sp_vport); 653 kfree(mlxsw_sp_vport);
747} 654}
748 655
749static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
750 bool valid)
751{
752 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
753 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
754
755 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
756 vid);
757}
758
759static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport)
760{
761 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
762 struct mlxsw_sp_fid *f;
763 int err;
764
765 f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, vid);
766 if (!f) {
767 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, vid);
768 if (IS_ERR(f))
769 return PTR_ERR(f);
770 }
771
772 if (!f->ref_count) {
773 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
774 if (err)
775 goto err_vport_flood_set;
776 }
777
778 err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
779 if (err)
780 goto err_vport_fid_map;
781
782 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
783 f->ref_count++;
784
785 return 0;
786
787err_vport_fid_map:
788 if (!f->ref_count)
789 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
790err_vport_flood_set:
791 if (!f->ref_count)
792 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
793 return err;
794}
795
796static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
797{
798 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
799
800 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
801
802 mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
803
804 if (--f->ref_count == 0) {
805 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
806 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
807 }
808}
809
810int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, 656int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
811 u16 vid) 657 u16 vid)
812{ 658{
813 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 659 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
814 struct mlxsw_sp_port *mlxsw_sp_vport; 660 struct mlxsw_sp_port *mlxsw_sp_vport;
661 bool untagged = vid == 1;
815 int err; 662 int err;
816 663
817 /* VLAN 0 is added to HW filter when device goes up, but it is 664 /* VLAN 0 is added to HW filter when device goes up, but it is
@@ -843,41 +690,24 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
843 } 690 }
844 } 691 }
845 692
846 err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport);
847 if (err) {
848 netdev_err(dev, "Failed to join vFID\n");
849 goto err_vport_vfid_join;
850 }
851
852 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 693 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
853 if (err) { 694 if (err) {
854 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid); 695 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
855 goto err_port_vid_learning_set; 696 goto err_port_vid_learning_set;
856 } 697 }
857 698
858 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false); 699 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
859 if (err) { 700 if (err) {
860 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", 701 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
861 vid); 702 vid);
862 goto err_port_add_vid; 703 goto err_port_add_vid;
863 } 704 }
864 705
865 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
866 MLXSW_REG_SPMS_STATE_FORWARDING);
867 if (err) {
868 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
869 goto err_port_stp_state_set;
870 }
871
872 return 0; 706 return 0;
873 707
874err_port_stp_state_set:
875 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
876err_port_add_vid: 708err_port_add_vid:
877 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 709 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
878err_port_vid_learning_set: 710err_port_vid_learning_set:
879 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
880err_vport_vfid_join:
881 if (list_is_singular(&mlxsw_sp_port->vports_list)) 711 if (list_is_singular(&mlxsw_sp_port->vports_list))
882 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); 712 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
883err_port_vp_mode_trans: 713err_port_vp_mode_trans:
@@ -885,8 +715,8 @@ err_port_vp_mode_trans:
885 return err; 715 return err;
886} 716}
887 717
888int mlxsw_sp_port_kill_vid(struct net_device *dev, 718static int mlxsw_sp_port_kill_vid(struct net_device *dev,
889 __be16 __always_unused proto, u16 vid) 719 __be16 __always_unused proto, u16 vid)
890{ 720{
891 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 721 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
892 struct mlxsw_sp_port *mlxsw_sp_vport; 722 struct mlxsw_sp_port *mlxsw_sp_vport;
@@ -905,13 +735,6 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
905 return 0; 735 return 0;
906 } 736 }
907 737
908 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
909 MLXSW_REG_SPMS_STATE_DISCARDING);
910 if (err) {
911 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
912 return err;
913 }
914
915 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); 738 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
916 if (err) { 739 if (err) {
917 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", 740 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
@@ -980,6 +803,8 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
980 .ndo_get_stats64 = mlxsw_sp_port_get_stats64, 803 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
981 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, 804 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
982 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, 805 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
806 .ndo_neigh_construct = mlxsw_sp_router_neigh_construct,
807 .ndo_neigh_destroy = mlxsw_sp_router_neigh_destroy,
983 .ndo_fdb_add = switchdev_port_fdb_add, 808 .ndo_fdb_add = switchdev_port_fdb_add,
984 .ndo_fdb_del = switchdev_port_fdb_del, 809 .ndo_fdb_del = switchdev_port_fdb_del,
985 .ndo_fdb_dump = switchdev_port_fdb_dump, 810 .ndo_fdb_dump = switchdev_port_fdb_dump,
@@ -1840,23 +1665,6 @@ err_port_active_vlans_alloc:
1840 return err; 1665 return err;
1841} 1666}
1842 1667
1843static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1844{
1845 struct net_device *dev = mlxsw_sp_port->dev;
1846 struct mlxsw_sp_port *mlxsw_sp_vport, *tmp;
1847
1848 list_for_each_entry_safe(mlxsw_sp_vport, tmp,
1849 &mlxsw_sp_port->vports_list, vport.list) {
1850 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1851
1852 /* vPorts created for VLAN devices should already be gone
1853 * by now, since we unregistered the port netdev.
1854 */
1855 WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev));
1856 mlxsw_sp_port_kill_vid(dev, 0, vid);
1857 }
1858}
1859
1860static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) 1668static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1861{ 1669{
1862 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1670 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
@@ -1867,13 +1675,14 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1867 mlxsw_core_port_fini(&mlxsw_sp_port->core_port); 1675 mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1868 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ 1676 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1869 mlxsw_sp_port_dcb_fini(mlxsw_sp_port); 1677 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1870 mlxsw_sp_port_vports_fini(mlxsw_sp_port); 1678 mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
1871 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); 1679 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1872 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); 1680 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1873 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); 1681 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
1874 free_percpu(mlxsw_sp_port->pcpu_stats); 1682 free_percpu(mlxsw_sp_port->pcpu_stats);
1875 kfree(mlxsw_sp_port->untagged_vlans); 1683 kfree(mlxsw_sp_port->untagged_vlans);
1876 kfree(mlxsw_sp_port->active_vlans); 1684 kfree(mlxsw_sp_port->active_vlans);
1685 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
1877 free_netdev(mlxsw_sp_port->dev); 1686 free_netdev(mlxsw_sp_port->dev);
1878} 1687}
1879 1688
@@ -2110,11 +1919,8 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2110 1919
2111 local_port = mlxsw_reg_pude_local_port_get(pude_pl); 1920 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2112 mlxsw_sp_port = mlxsw_sp->ports[local_port]; 1921 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2113 if (!mlxsw_sp_port) { 1922 if (!mlxsw_sp_port)
2114 dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
2115 local_port);
2116 return; 1923 return;
2117 }
2118 1924
2119 status = mlxsw_reg_pude_oper_status_get(pude_pl); 1925 status = mlxsw_reg_pude_oper_status_get(pude_pl);
2120 if (status == MLXSW_PORT_OPER_STATUS_UP) { 1926 if (status == MLXSW_PORT_OPER_STATUS_UP) {
@@ -2269,6 +2075,31 @@ static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
2269 .local_port = MLXSW_PORT_DONT_CARE, 2075 .local_port = MLXSW_PORT_DONT_CARE,
2270 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT, 2076 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
2271 }, 2077 },
2078 {
2079 .func = mlxsw_sp_rx_listener_func,
2080 .local_port = MLXSW_PORT_DONT_CARE,
2081 .trap_id = MLXSW_TRAP_ID_ARPBC,
2082 },
2083 {
2084 .func = mlxsw_sp_rx_listener_func,
2085 .local_port = MLXSW_PORT_DONT_CARE,
2086 .trap_id = MLXSW_TRAP_ID_ARPUC,
2087 },
2088 {
2089 .func = mlxsw_sp_rx_listener_func,
2090 .local_port = MLXSW_PORT_DONT_CARE,
2091 .trap_id = MLXSW_TRAP_ID_IP2ME,
2092 },
2093 {
2094 .func = mlxsw_sp_rx_listener_func,
2095 .local_port = MLXSW_PORT_DONT_CARE,
2096 .trap_id = MLXSW_TRAP_ID_RTR_INGRESS0,
2097 },
2098 {
2099 .func = mlxsw_sp_rx_listener_func,
2100 .local_port = MLXSW_PORT_DONT_CARE,
2101 .trap_id = MLXSW_TRAP_ID_HOST_MISS_IPV4,
2102 },
2272}; 2103};
2273 2104
2274static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) 2105static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
@@ -2309,7 +2140,7 @@ err_rx_trap_set:
2309 mlxsw_sp); 2140 mlxsw_sp);
2310err_rx_listener_register: 2141err_rx_listener_register:
2311 for (i--; i >= 0; i--) { 2142 for (i--; i >= 0; i--) {
2312 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, 2143 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
2313 mlxsw_sp_rx_listener[i].trap_id); 2144 mlxsw_sp_rx_listener[i].trap_id);
2314 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 2145 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2315 2146
@@ -2326,7 +2157,7 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2326 int i; 2157 int i;
2327 2158
2328 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { 2159 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2329 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, 2160 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
2330 mlxsw_sp_rx_listener[i].trap_id); 2161 mlxsw_sp_rx_listener[i].trap_id);
2331 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); 2162 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2332 2163
@@ -2406,8 +2237,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2406 mlxsw_sp->core = mlxsw_core; 2237 mlxsw_sp->core = mlxsw_core;
2407 mlxsw_sp->bus_info = mlxsw_bus_info; 2238 mlxsw_sp->bus_info = mlxsw_bus_info;
2408 INIT_LIST_HEAD(&mlxsw_sp->fids); 2239 INIT_LIST_HEAD(&mlxsw_sp->fids);
2409 INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list); 2240 INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
2410 INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
2411 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list); 2241 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
2412 2242
2413 err = mlxsw_sp_base_mac_get(mlxsw_sp); 2243 err = mlxsw_sp_base_mac_get(mlxsw_sp);
@@ -2416,16 +2246,10 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2416 return err; 2246 return err;
2417 } 2247 }
2418 2248
2419 err = mlxsw_sp_ports_create(mlxsw_sp);
2420 if (err) {
2421 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2422 return err;
2423 }
2424
2425 err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 2249 err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2426 if (err) { 2250 if (err) {
2427 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n"); 2251 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
2428 goto err_event_register; 2252 return err;
2429 } 2253 }
2430 2254
2431 err = mlxsw_sp_traps_init(mlxsw_sp); 2255 err = mlxsw_sp_traps_init(mlxsw_sp);
@@ -2458,8 +2282,24 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2458 goto err_switchdev_init; 2282 goto err_switchdev_init;
2459 } 2283 }
2460 2284
2285 err = mlxsw_sp_router_init(mlxsw_sp);
2286 if (err) {
2287 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
2288 goto err_router_init;
2289 }
2290
2291 err = mlxsw_sp_ports_create(mlxsw_sp);
2292 if (err) {
2293 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2294 goto err_ports_create;
2295 }
2296
2461 return 0; 2297 return 0;
2462 2298
2299err_ports_create:
2300 mlxsw_sp_router_fini(mlxsw_sp);
2301err_router_init:
2302 mlxsw_sp_switchdev_fini(mlxsw_sp);
2463err_switchdev_init: 2303err_switchdev_init:
2464err_lag_init: 2304err_lag_init:
2465 mlxsw_sp_buffers_fini(mlxsw_sp); 2305 mlxsw_sp_buffers_fini(mlxsw_sp);
@@ -2468,21 +2308,24 @@ err_flood_init:
2468 mlxsw_sp_traps_fini(mlxsw_sp); 2308 mlxsw_sp_traps_fini(mlxsw_sp);
2469err_rx_listener_register: 2309err_rx_listener_register:
2470 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 2310 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2471err_event_register:
2472 mlxsw_sp_ports_remove(mlxsw_sp);
2473 return err; 2311 return err;
2474} 2312}
2475 2313
2476static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) 2314static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2477{ 2315{
2478 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); 2316 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2317 int i;
2479 2318
2319 mlxsw_sp_ports_remove(mlxsw_sp);
2320 mlxsw_sp_router_fini(mlxsw_sp);
2480 mlxsw_sp_switchdev_fini(mlxsw_sp); 2321 mlxsw_sp_switchdev_fini(mlxsw_sp);
2481 mlxsw_sp_buffers_fini(mlxsw_sp); 2322 mlxsw_sp_buffers_fini(mlxsw_sp);
2482 mlxsw_sp_traps_fini(mlxsw_sp); 2323 mlxsw_sp_traps_fini(mlxsw_sp);
2483 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); 2324 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2484 mlxsw_sp_ports_remove(mlxsw_sp); 2325 WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
2485 WARN_ON(!list_empty(&mlxsw_sp->fids)); 2326 WARN_ON(!list_empty(&mlxsw_sp->fids));
2327 for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
2328 WARN_ON_ONCE(mlxsw_sp->rifs[i]);
2486} 2329}
2487 2330
2488static struct mlxsw_config_profile mlxsw_sp_config_profile = { 2331static struct mlxsw_config_profile mlxsw_sp_config_profile = {
@@ -2513,6 +2356,10 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = {
2513 .max_ib_mc = 0, 2356 .max_ib_mc = 0,
2514 .used_max_pkey = 1, 2357 .used_max_pkey = 1,
2515 .max_pkey = 0, 2358 .max_pkey = 0,
2359 .used_kvd_sizes = 1,
2360 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
2361 .kvd_hash_single_size = MLXSW_SP_KVD_HASH_SINGLE_SIZE,
2362 .kvd_hash_double_size = MLXSW_SP_KVD_HASH_DOUBLE_SIZE,
2516 .swid_config = { 2363 .swid_config = {
2517 { 2364 {
2518 .used_type = 1, 2365 .used_type = 1,
@@ -2544,6 +2391,559 @@ static struct mlxsw_driver mlxsw_sp_driver = {
2544 .profile = &mlxsw_sp_config_profile, 2391 .profile = &mlxsw_sp_config_profile,
2545}; 2392};
2546 2393
2394static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2395{
2396 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2397}
2398
2399static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
2400{
2401 struct net_device *lower_dev;
2402 struct list_head *iter;
2403
2404 if (mlxsw_sp_port_dev_check(dev))
2405 return netdev_priv(dev);
2406
2407 netdev_for_each_all_lower_dev(dev, lower_dev, iter) {
2408 if (mlxsw_sp_port_dev_check(lower_dev))
2409 return netdev_priv(lower_dev);
2410 }
2411 return NULL;
2412}
2413
2414static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
2415{
2416 struct mlxsw_sp_port *mlxsw_sp_port;
2417
2418 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
2419 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
2420}
2421
2422static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
2423{
2424 struct net_device *lower_dev;
2425 struct list_head *iter;
2426
2427 if (mlxsw_sp_port_dev_check(dev))
2428 return netdev_priv(dev);
2429
2430 netdev_for_each_all_lower_dev_rcu(dev, lower_dev, iter) {
2431 if (mlxsw_sp_port_dev_check(lower_dev))
2432 return netdev_priv(lower_dev);
2433 }
2434 return NULL;
2435}
2436
2437struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
2438{
2439 struct mlxsw_sp_port *mlxsw_sp_port;
2440
2441 rcu_read_lock();
2442 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
2443 if (mlxsw_sp_port)
2444 dev_hold(mlxsw_sp_port->dev);
2445 rcu_read_unlock();
2446 return mlxsw_sp_port;
2447}
2448
2449void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
2450{
2451 dev_put(mlxsw_sp_port->dev);
2452}
2453
2454static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
2455 unsigned long event)
2456{
2457 switch (event) {
2458 case NETDEV_UP:
2459 if (!r)
2460 return true;
2461 r->ref_count++;
2462 return false;
2463 case NETDEV_DOWN:
2464 if (r && --r->ref_count == 0)
2465 return true;
2466 /* It is possible we already removed the RIF ourselves
2467 * if it was assigned to a netdev that is now a bridge
2468 * or LAG slave.
2469 */
2470 return false;
2471 }
2472
2473 return false;
2474}
2475
2476static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
2477{
2478 int i;
2479
2480 for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
2481 if (!mlxsw_sp->rifs[i])
2482 return i;
2483
2484 return MLXSW_SP_RIF_MAX;
2485}
2486
2487static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
2488 bool *p_lagged, u16 *p_system_port)
2489{
2490 u8 local_port = mlxsw_sp_vport->local_port;
2491
2492 *p_lagged = mlxsw_sp_vport->lagged;
2493 *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
2494}
2495
2496static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
2497 struct net_device *l3_dev, u16 rif,
2498 bool create)
2499{
2500 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2501 bool lagged = mlxsw_sp_vport->lagged;
2502 char ritr_pl[MLXSW_REG_RITR_LEN];
2503 u16 system_port;
2504
2505 mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif,
2506 l3_dev->mtu, l3_dev->dev_addr);
2507
2508 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
2509 mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
2510 mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
2511
2512 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2513}
2514
2515static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
2516
2517static struct mlxsw_sp_fid *
2518mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
2519{
2520 struct mlxsw_sp_fid *f;
2521
2522 f = kzalloc(sizeof(*f), GFP_KERNEL);
2523 if (!f)
2524 return NULL;
2525
2526 f->leave = mlxsw_sp_vport_rif_sp_leave;
2527 f->ref_count = 0;
2528 f->dev = l3_dev;
2529 f->fid = fid;
2530
2531 return f;
2532}
2533
2534static struct mlxsw_sp_rif *
2535mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f)
2536{
2537 struct mlxsw_sp_rif *r;
2538
2539 r = kzalloc(sizeof(*r), GFP_KERNEL);
2540 if (!r)
2541 return NULL;
2542
2543 ether_addr_copy(r->addr, l3_dev->dev_addr);
2544 r->mtu = l3_dev->mtu;
2545 r->ref_count = 1;
2546 r->dev = l3_dev;
2547 r->rif = rif;
2548 r->f = f;
2549
2550 return r;
2551}
2552
2553static struct mlxsw_sp_rif *
2554mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
2555 struct net_device *l3_dev)
2556{
2557 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2558 struct mlxsw_sp_fid *f;
2559 struct mlxsw_sp_rif *r;
2560 u16 fid, rif;
2561 int err;
2562
2563 rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
2564 if (rif == MLXSW_SP_RIF_MAX)
2565 return ERR_PTR(-ERANGE);
2566
2567 err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
2568 if (err)
2569 return ERR_PTR(err);
2570
2571 fid = mlxsw_sp_rif_sp_to_fid(rif);
2572 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
2573 if (err)
2574 goto err_rif_fdb_op;
2575
2576 f = mlxsw_sp_rfid_alloc(fid, l3_dev);
2577 if (!f) {
2578 err = -ENOMEM;
2579 goto err_rfid_alloc;
2580 }
2581
2582 r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
2583 if (!r) {
2584 err = -ENOMEM;
2585 goto err_rif_alloc;
2586 }
2587
2588 f->r = r;
2589 mlxsw_sp->rifs[rif] = r;
2590
2591 return r;
2592
2593err_rif_alloc:
2594 kfree(f);
2595err_rfid_alloc:
2596 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
2597err_rif_fdb_op:
2598 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
2599 return ERR_PTR(err);
2600}
2601
2602static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
2603 struct mlxsw_sp_rif *r)
2604{
2605 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2606 struct net_device *l3_dev = r->dev;
2607 struct mlxsw_sp_fid *f = r->f;
2608 u16 fid = f->fid;
2609 u16 rif = r->rif;
2610
2611 mlxsw_sp->rifs[rif] = NULL;
2612 f->r = NULL;
2613
2614 kfree(r);
2615
2616 kfree(f);
2617
2618 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
2619
2620 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
2621}
2622
2623static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
2624 struct net_device *l3_dev)
2625{
2626 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2627 struct mlxsw_sp_rif *r;
2628
2629 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
2630 if (!r) {
2631 r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
2632 if (IS_ERR(r))
2633 return PTR_ERR(r);
2634 }
2635
2636 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f);
2637 r->f->ref_count++;
2638
2639 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid);
2640
2641 return 0;
2642}
2643
2644static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
2645{
2646 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
2647
2648 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
2649
2650 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
2651 if (--f->ref_count == 0)
2652 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r);
2653}
2654
2655static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
2656 struct net_device *port_dev,
2657 unsigned long event, u16 vid)
2658{
2659 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
2660 struct mlxsw_sp_port *mlxsw_sp_vport;
2661
2662 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2663 if (WARN_ON(!mlxsw_sp_vport))
2664 return -EINVAL;
2665
2666 switch (event) {
2667 case NETDEV_UP:
2668 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
2669 case NETDEV_DOWN:
2670 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
2671 break;
2672 }
2673
2674 return 0;
2675}
2676
2677static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
2678 unsigned long event)
2679{
2680 if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
2681 return 0;
2682
2683 return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
2684}
2685
2686static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
2687 struct net_device *lag_dev,
2688 unsigned long event, u16 vid)
2689{
2690 struct net_device *port_dev;
2691 struct list_head *iter;
2692 int err;
2693
2694 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
2695 if (mlxsw_sp_port_dev_check(port_dev)) {
2696 err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
2697 event, vid);
2698 if (err)
2699 return err;
2700 }
2701 }
2702
2703 return 0;
2704}
2705
2706static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
2707 unsigned long event)
2708{
2709 if (netif_is_bridge_port(lag_dev))
2710 return 0;
2711
2712 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
2713}
2714
2715static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
2716 struct net_device *l3_dev)
2717{
2718 u16 fid;
2719
2720 if (is_vlan_dev(l3_dev))
2721 fid = vlan_dev_vlan_id(l3_dev);
2722 else if (mlxsw_sp->master_bridge.dev == l3_dev)
2723 fid = 1;
2724 else
2725 return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
2726
2727 return mlxsw_sp_fid_find(mlxsw_sp, fid);
2728}
2729
2730static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
2731{
2732 if (mlxsw_sp_fid_is_vfid(fid))
2733 return MLXSW_REG_RITR_FID_IF;
2734 else
2735 return MLXSW_REG_RITR_VLAN_IF;
2736}
2737
2738static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp,
2739 struct net_device *l3_dev,
2740 u16 fid, u16 rif,
2741 bool create)
2742{
2743 enum mlxsw_reg_ritr_if_type rif_type;
2744 char ritr_pl[MLXSW_REG_RITR_LEN];
2745
2746 rif_type = mlxsw_sp_rif_type_get(fid);
2747 mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu,
2748 l3_dev->dev_addr);
2749 mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
2750
2751 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2752}
2753
2754static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
2755 struct net_device *l3_dev,
2756 struct mlxsw_sp_fid *f)
2757{
2758 struct mlxsw_sp_rif *r;
2759 u16 rif;
2760 int err;
2761
2762 rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
2763 if (rif == MLXSW_SP_RIF_MAX)
2764 return -ERANGE;
2765
2766 err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
2767 if (err)
2768 return err;
2769
2770 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
2771 if (err)
2772 goto err_rif_fdb_op;
2773
2774 r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
2775 if (!r) {
2776 err = -ENOMEM;
2777 goto err_rif_alloc;
2778 }
2779
2780 f->r = r;
2781 mlxsw_sp->rifs[rif] = r;
2782
2783 netdev_dbg(l3_dev, "RIF=%d created\n", rif);
2784
2785 return 0;
2786
2787err_rif_alloc:
2788 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
2789err_rif_fdb_op:
2790 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
2791 return err;
2792}
2793
2794void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
2795 struct mlxsw_sp_rif *r)
2796{
2797 struct net_device *l3_dev = r->dev;
2798 struct mlxsw_sp_fid *f = r->f;
2799 u16 rif = r->rif;
2800
2801 mlxsw_sp->rifs[rif] = NULL;
2802 f->r = NULL;
2803
2804 kfree(r);
2805
2806 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
2807
2808 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
2809
2810 netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
2811}
2812
2813static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
2814 struct net_device *br_dev,
2815 unsigned long event)
2816{
2817 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
2818 struct mlxsw_sp_fid *f;
2819
2820 /* FID can either be an actual FID if the L3 device is the
2821 * VLAN-aware bridge or a VLAN device on top. Otherwise, the
2822 * L3 device is a VLAN-unaware bridge and we get a vFID.
2823 */
2824 f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
2825 if (WARN_ON(!f))
2826 return -EINVAL;
2827
2828 switch (event) {
2829 case NETDEV_UP:
2830 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
2831 case NETDEV_DOWN:
2832 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
2833 break;
2834 }
2835
2836 return 0;
2837}
2838
2839static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
2840 unsigned long event)
2841{
2842 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
2843 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
2844 u16 vid = vlan_dev_vlan_id(vlan_dev);
2845
2846 if (mlxsw_sp_port_dev_check(real_dev))
2847 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
2848 vid);
2849 else if (netif_is_lag_master(real_dev))
2850 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
2851 vid);
2852 else if (netif_is_bridge_master(real_dev) &&
2853 mlxsw_sp->master_bridge.dev == real_dev)
2854 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
2855 event);
2856
2857 return 0;
2858}
2859
2860static int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
2861 unsigned long event, void *ptr)
2862{
2863 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
2864 struct net_device *dev = ifa->ifa_dev->dev;
2865 struct mlxsw_sp *mlxsw_sp;
2866 struct mlxsw_sp_rif *r;
2867 int err = 0;
2868
2869 mlxsw_sp = mlxsw_sp_lower_get(dev);
2870 if (!mlxsw_sp)
2871 goto out;
2872
2873 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
2874 if (!mlxsw_sp_rif_should_config(r, event))
2875 goto out;
2876
2877 if (mlxsw_sp_port_dev_check(dev))
2878 err = mlxsw_sp_inetaddr_port_event(dev, event);
2879 else if (netif_is_lag_master(dev))
2880 err = mlxsw_sp_inetaddr_lag_event(dev, event);
2881 else if (netif_is_bridge_master(dev))
2882 err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
2883 else if (is_vlan_dev(dev))
2884 err = mlxsw_sp_inetaddr_vlan_event(dev, event);
2885
2886out:
2887 return notifier_from_errno(err);
2888}
2889
2890static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif,
2891 const char *mac, int mtu)
2892{
2893 char ritr_pl[MLXSW_REG_RITR_LEN];
2894 int err;
2895
2896 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
2897 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2898 if (err)
2899 return err;
2900
2901 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
2902 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
2903 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
2904 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2905}
2906
2907static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
2908{
2909 struct mlxsw_sp *mlxsw_sp;
2910 struct mlxsw_sp_rif *r;
2911 int err;
2912
2913 mlxsw_sp = mlxsw_sp_lower_get(dev);
2914 if (!mlxsw_sp)
2915 return 0;
2916
2917 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
2918 if (!r)
2919 return 0;
2920
2921 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false);
2922 if (err)
2923 return err;
2924
2925 err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu);
2926 if (err)
2927 goto err_rif_edit;
2928
2929 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true);
2930 if (err)
2931 goto err_rif_fdb_op;
2932
2933 ether_addr_copy(r->addr, dev->dev_addr);
2934 r->mtu = dev->mtu;
2935
2936 netdev_dbg(dev, "Updated RIF=%d\n", r->rif);
2937
2938 return 0;
2939
2940err_rif_fdb_op:
2941 mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu);
2942err_rif_edit:
2943 mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true);
2944 return err;
2945}
2946
2547static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port, 2947static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
2548 u16 fid) 2948 u16 fid)
2549{ 2949{
@@ -2624,9 +3024,15 @@ int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
2624 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid); 3024 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
2625} 3025}
2626 3026
2627static bool mlxsw_sp_port_dev_check(const struct net_device *dev) 3027static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
2628{ 3028{
2629 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; 3029 struct mlxsw_sp_fid *f, *tmp;
3030
3031 list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
3032 if (--f->ref_count == 0)
3033 mlxsw_sp_fid_destroy(mlxsw_sp, f);
3034 else
3035 WARN_ON_ONCE(1);
2630} 3036}
2631 3037
2632static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, 3038static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
@@ -2645,8 +3051,15 @@ static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
2645 3051
2646static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp) 3052static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
2647{ 3053{
2648 if (--mlxsw_sp->master_bridge.ref_count == 0) 3054 if (--mlxsw_sp->master_bridge.ref_count == 0) {
2649 mlxsw_sp->master_bridge.dev = NULL; 3055 mlxsw_sp->master_bridge.dev = NULL;
3056 /* It's possible upper VLAN devices are still holding
3057 * references to underlying FIDs. Drop the reference
3058 * and release the resources if it was the last one.
3059 * If it wasn't, then something bad happened.
3060 */
3061 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
3062 }
2650} 3063}
2651 3064
2652static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, 3065static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -2806,6 +3219,45 @@ static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2806 return -EBUSY; 3219 return -EBUSY;
2807} 3220}
2808 3221
3222static void
3223mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3224 u16 lag_id)
3225{
3226 struct mlxsw_sp_port *mlxsw_sp_vport;
3227 struct mlxsw_sp_fid *f;
3228
3229 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3230 if (WARN_ON(!mlxsw_sp_vport))
3231 return;
3232
3233 /* If vPort is assigned a RIF, then leave it since it's no
3234 * longer valid.
3235 */
3236 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3237 if (f)
3238 f->leave(mlxsw_sp_vport);
3239
3240 mlxsw_sp_vport->lag_id = lag_id;
3241 mlxsw_sp_vport->lagged = 1;
3242}
3243
3244static void
3245mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3246{
3247 struct mlxsw_sp_port *mlxsw_sp_vport;
3248 struct mlxsw_sp_fid *f;
3249
3250 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3251 if (WARN_ON(!mlxsw_sp_vport))
3252 return;
3253
3254 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3255 if (f)
3256 f->leave(mlxsw_sp_vport);
3257
3258 mlxsw_sp_vport->lagged = 0;
3259}
3260
2809static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, 3261static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
2810 struct net_device *lag_dev) 3262 struct net_device *lag_dev)
2811{ 3263{
@@ -2841,6 +3293,9 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
2841 mlxsw_sp_port->lag_id = lag_id; 3293 mlxsw_sp_port->lag_id = lag_id;
2842 mlxsw_sp_port->lagged = 1; 3294 mlxsw_sp_port->lagged = 1;
2843 lag->ref_count++; 3295 lag->ref_count++;
3296
3297 mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id);
3298
2844 return 0; 3299 return 0;
2845 3300
2846err_col_port_enable: 3301err_col_port_enable:
@@ -2878,6 +3333,8 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2878 mlxsw_sp_port->local_port); 3333 mlxsw_sp_port->local_port);
2879 mlxsw_sp_port->lagged = 0; 3334 mlxsw_sp_port->lagged = 0;
2880 lag->ref_count--; 3335 lag->ref_count--;
3336
3337 mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
2881} 3338}
2882 3339
2883static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, 3340static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -3071,47 +3528,97 @@ static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
3071 return 0; 3528 return 0;
3072} 3529}
3073 3530
3074static struct mlxsw_sp_fid * 3531static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
3075mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp, 3532 struct net_device *vlan_dev)
3076 const struct net_device *br_dev)
3077{ 3533{
3534 u16 fid = vlan_dev_vlan_id(vlan_dev);
3078 struct mlxsw_sp_fid *f; 3535 struct mlxsw_sp_fid *f;
3079 3536
3080 list_for_each_entry(f, &mlxsw_sp->br_vfids.list, list) { 3537 f = mlxsw_sp_fid_find(mlxsw_sp, fid);
3081 if (f->dev == br_dev) 3538 if (!f) {
3082 return f; 3539 f = mlxsw_sp_fid_create(mlxsw_sp, fid);
3540 if (IS_ERR(f))
3541 return PTR_ERR(f);
3083 } 3542 }
3084 3543
3085 return NULL; 3544 f->ref_count++;
3545
3546 return 0;
3547}
3548
3549static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
3550 struct net_device *vlan_dev)
3551{
3552 u16 fid = vlan_dev_vlan_id(vlan_dev);
3553 struct mlxsw_sp_fid *f;
3554
3555 f = mlxsw_sp_fid_find(mlxsw_sp, fid);
3556 if (f && f->r)
3557 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
3558 if (f && --f->ref_count == 0)
3559 mlxsw_sp_fid_destroy(mlxsw_sp, f);
3086} 3560}
3087 3561
3088static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid) 3562static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
3563 unsigned long event, void *ptr)
3089{ 3564{
3090 return vfid - MLXSW_SP_VFID_PORT_MAX; 3565 struct netdev_notifier_changeupper_info *info;
3566 struct net_device *upper_dev;
3567 struct mlxsw_sp *mlxsw_sp;
3568 int err;
3569
3570 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3571 if (!mlxsw_sp)
3572 return 0;
3573 if (br_dev != mlxsw_sp->master_bridge.dev)
3574 return 0;
3575
3576 info = ptr;
3577
3578 switch (event) {
3579 case NETDEV_CHANGEUPPER:
3580 upper_dev = info->upper_dev;
3581 if (!is_vlan_dev(upper_dev))
3582 break;
3583 if (info->linking) {
3584 err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
3585 upper_dev);
3586 if (err)
3587 return err;
3588 } else {
3589 mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev);
3590 }
3591 break;
3592 }
3593
3594 return 0;
3091} 3595}
3092 3596
3093static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid) 3597static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
3094{ 3598{
3095 return MLXSW_SP_VFID_PORT_MAX + br_vfid; 3599 return find_first_zero_bit(mlxsw_sp->vfids.mapped,
3600 MLXSW_SP_VFID_MAX);
3096} 3601}
3097 3602
3098static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp) 3603static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
3099{ 3604{
3100 return find_first_zero_bit(mlxsw_sp->br_vfids.mapped, 3605 char sfmr_pl[MLXSW_REG_SFMR_LEN];
3101 MLXSW_SP_VFID_BR_MAX); 3606
3607 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
3608 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
3102} 3609}
3103 3610
3104static void mlxsw_sp_vport_br_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport); 3611static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
3105 3612
3106static struct mlxsw_sp_fid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp, 3613static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
3107 struct net_device *br_dev) 3614 struct net_device *br_dev)
3108{ 3615{
3109 struct device *dev = mlxsw_sp->bus_info->dev; 3616 struct device *dev = mlxsw_sp->bus_info->dev;
3110 struct mlxsw_sp_fid *f; 3617 struct mlxsw_sp_fid *f;
3111 u16 vfid, fid; 3618 u16 vfid, fid;
3112 int err; 3619 int err;
3113 3620
3114 vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp)); 3621 vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
3115 if (vfid == MLXSW_SP_VFID_MAX) { 3622 if (vfid == MLXSW_SP_VFID_MAX) {
3116 dev_err(dev, "No available vFIDs\n"); 3623 dev_err(dev, "No available vFIDs\n");
3117 return ERR_PTR(-ERANGE); 3624 return ERR_PTR(-ERANGE);
@@ -3128,12 +3635,12 @@ static struct mlxsw_sp_fid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
3128 if (!f) 3635 if (!f)
3129 goto err_allocate_vfid; 3636 goto err_allocate_vfid;
3130 3637
3131 f->leave = mlxsw_sp_vport_br_vfid_leave; 3638 f->leave = mlxsw_sp_vport_vfid_leave;
3132 f->fid = fid; 3639 f->fid = fid;
3133 f->dev = br_dev; 3640 f->dev = br_dev;
3134 3641
3135 list_add(&f->list, &mlxsw_sp->br_vfids.list); 3642 list_add(&f->list, &mlxsw_sp->vfids.list);
3136 set_bit(mlxsw_sp_vfid_to_br_vfid(vfid), mlxsw_sp->br_vfids.mapped); 3643 set_bit(vfid, mlxsw_sp->vfids.mapped);
3137 3644
3138 return f; 3645 return f;
3139 3646
@@ -3142,29 +3649,42 @@ err_allocate_vfid:
3142 return ERR_PTR(-ENOMEM); 3649 return ERR_PTR(-ENOMEM);
3143} 3650}
3144 3651
3145static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp, 3652static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
3146 struct mlxsw_sp_fid *f) 3653 struct mlxsw_sp_fid *f)
3147{ 3654{
3148 u16 vfid = mlxsw_sp_fid_to_vfid(f->fid); 3655 u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
3149 u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid); 3656 u16 fid = f->fid;
3150 3657
3151 clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped); 3658 clear_bit(vfid, mlxsw_sp->vfids.mapped);
3152 list_del(&f->list); 3659 list_del(&f->list);
3153 3660
3154 mlxsw_sp_vfid_op(mlxsw_sp, f->fid, false); 3661 if (f->r)
3662 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
3155 3663
3156 kfree(f); 3664 kfree(f);
3665
3666 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
3157} 3667}
3158 3668
3159static int mlxsw_sp_vport_br_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport, 3669static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
3160 struct net_device *br_dev) 3670 bool valid)
3671{
3672 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
3673 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3674
3675 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
3676 vid);
3677}
3678
3679static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3680 struct net_device *br_dev)
3161{ 3681{
3162 struct mlxsw_sp_fid *f; 3682 struct mlxsw_sp_fid *f;
3163 int err; 3683 int err;
3164 3684
3165 f = mlxsw_sp_br_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev); 3685 f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
3166 if (!f) { 3686 if (!f) {
3167 f = mlxsw_sp_br_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev); 3687 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
3168 if (IS_ERR(f)) 3688 if (IS_ERR(f))
3169 return PTR_ERR(f); 3689 return PTR_ERR(f);
3170 } 3690 }
@@ -3188,11 +3708,11 @@ err_vport_fid_map:
3188 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); 3708 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
3189err_vport_flood_set: 3709err_vport_flood_set:
3190 if (!f->ref_count) 3710 if (!f->ref_count)
3191 mlxsw_sp_br_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); 3711 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
3192 return err; 3712 return err;
3193} 3713}
3194 3714
3195static void mlxsw_sp_vport_br_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport) 3715static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3196{ 3716{
3197 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 3717 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3198 3718
@@ -3206,22 +3726,24 @@ static void mlxsw_sp_vport_br_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3206 3726
3207 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); 3727 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
3208 if (--f->ref_count == 0) 3728 if (--f->ref_count == 0)
3209 mlxsw_sp_br_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); 3729 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
3210} 3730}
3211 3731
3212static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, 3732static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3213 struct net_device *br_dev) 3733 struct net_device *br_dev)
3214{ 3734{
3735 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3215 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); 3736 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3216 struct net_device *dev = mlxsw_sp_vport->dev; 3737 struct net_device *dev = mlxsw_sp_vport->dev;
3217 int err; 3738 int err;
3218 3739
3219 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport); 3740 if (f && !WARN_ON(!f->leave))
3741 f->leave(mlxsw_sp_vport);
3220 3742
3221 err = mlxsw_sp_vport_br_vfid_join(mlxsw_sp_vport, br_dev); 3743 err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
3222 if (err) { 3744 if (err) {
3223 netdev_err(dev, "Failed to join vFID\n"); 3745 netdev_err(dev, "Failed to join vFID\n");
3224 goto err_vport_br_vfid_join; 3746 return err;
3225 } 3747 }
3226 3748
3227 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); 3749 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
@@ -3238,9 +3760,7 @@ static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3238 return 0; 3760 return 0;
3239 3761
3240err_port_vid_learning_set: 3762err_port_vid_learning_set:
3241 mlxsw_sp_vport_br_vfid_leave(mlxsw_sp_vport); 3763 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
3242err_vport_br_vfid_join:
3243 mlxsw_sp_vport_vfid_join(mlxsw_sp_vport);
3244 return err; 3764 return err;
3245} 3765}
3246 3766
@@ -3250,12 +3770,7 @@ static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3250 3770
3251 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); 3771 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3252 3772
3253 mlxsw_sp_vport_br_vfid_leave(mlxsw_sp_vport); 3773 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
3254
3255 mlxsw_sp_vport_vfid_join(mlxsw_sp_vport);
3256
3257 mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
3258 MLXSW_REG_SPMS_STATE_FORWARDING);
3259 3774
3260 mlxsw_sp_vport->learning = 0; 3775 mlxsw_sp_vport->learning = 0;
3261 mlxsw_sp_vport->learning_sync = 0; 3776 mlxsw_sp_vport->learning_sync = 0;
@@ -3271,7 +3786,7 @@ mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
3271 3786
3272 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, 3787 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
3273 vport.list) { 3788 vport.list) {
3274 struct net_device *dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); 3789 struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
3275 3790
3276 if (dev && dev == br_dev) 3791 if (dev && dev == br_dev)
3277 return false; 3792 return false;
@@ -3365,10 +3880,14 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3365 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3880 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3366 int err = 0; 3881 int err = 0;
3367 3882
3368 if (mlxsw_sp_port_dev_check(dev)) 3883 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
3884 err = mlxsw_sp_netdevice_router_port_event(dev);
3885 else if (mlxsw_sp_port_dev_check(dev))
3369 err = mlxsw_sp_netdevice_port_event(dev, event, ptr); 3886 err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3370 else if (netif_is_lag_master(dev)) 3887 else if (netif_is_lag_master(dev))
3371 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); 3888 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
3889 else if (netif_is_bridge_master(dev))
3890 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
3372 else if (is_vlan_dev(dev)) 3891 else if (is_vlan_dev(dev))
3373 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); 3892 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
3374 3893
@@ -3379,11 +3898,17 @@ static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
3379 .notifier_call = mlxsw_sp_netdevice_event, 3898 .notifier_call = mlxsw_sp_netdevice_event,
3380}; 3899};
3381 3900
3901static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
3902 .notifier_call = mlxsw_sp_inetaddr_event,
3903 .priority = 10, /* Must be called before FIB notifier block */
3904};
3905
3382static int __init mlxsw_sp_module_init(void) 3906static int __init mlxsw_sp_module_init(void)
3383{ 3907{
3384 int err; 3908 int err;
3385 3909
3386 register_netdevice_notifier(&mlxsw_sp_netdevice_nb); 3910 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3911 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
3387 err = mlxsw_core_driver_register(&mlxsw_sp_driver); 3912 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
3388 if (err) 3913 if (err)
3389 goto err_core_driver_register; 3914 goto err_core_driver_register;
@@ -3397,6 +3922,7 @@ err_core_driver_register:
3397static void __exit mlxsw_sp_module_exit(void) 3922static void __exit mlxsw_sp_module_exit(void)
3398{ 3923{
3399 mlxsw_core_driver_unregister(&mlxsw_sp_driver); 3924 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
3925 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
3400 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); 3926 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3401} 3927}
3402 3928
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 36c9835ea20b..ef4ac8987a2a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -39,19 +39,22 @@
39 39
40#include <linux/types.h> 40#include <linux/types.h>
41#include <linux/netdevice.h> 41#include <linux/netdevice.h>
42#include <linux/rhashtable.h>
42#include <linux/bitops.h> 43#include <linux/bitops.h>
43#include <linux/if_vlan.h> 44#include <linux/if_vlan.h>
44#include <linux/list.h> 45#include <linux/list.h>
45#include <linux/dcbnl.h> 46#include <linux/dcbnl.h>
47#include <linux/in6.h>
46#include <net/switchdev.h> 48#include <net/switchdev.h>
47 49
48#include "port.h" 50#include "port.h"
49#include "core.h" 51#include "core.h"
50 52
51#define MLXSW_SP_VFID_BASE VLAN_N_VID 53#define MLXSW_SP_VFID_BASE VLAN_N_VID
52#define MLXSW_SP_VFID_PORT_MAX 512 /* Non-bridged VLAN interfaces */ 54#define MLXSW_SP_VFID_MAX 6656 /* Bridged VLAN interfaces */
53#define MLXSW_SP_VFID_BR_MAX 6144 /* Bridged VLAN interfaces */ 55
54#define MLXSW_SP_VFID_MAX (MLXSW_SP_VFID_PORT_MAX + MLXSW_SP_VFID_BR_MAX) 56#define MLXSW_SP_RFID_BASE 15360
57#define MLXSW_SP_RIF_MAX 800
55 58
56#define MLXSW_SP_LAG_MAX 64 59#define MLXSW_SP_LAG_MAX 64
57#define MLXSW_SP_PORT_PER_LAG_MAX 16 60#define MLXSW_SP_PORT_PER_LAG_MAX 16
@@ -60,6 +63,12 @@
60 63
61#define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4 64#define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4
62 65
66#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
67#define MLXSW_SP_LPM_TREE_MAX 22
68#define MLXSW_SP_LPM_TREE_COUNT (MLXSW_SP_LPM_TREE_MAX - MLXSW_SP_LPM_TREE_MIN)
69
70#define MLXSW_SP_VIRTUAL_ROUTER_MAX 256
71
63#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */ 72#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
64 73
65#define MLXSW_SP_BYTES_PER_CELL 96 74#define MLXSW_SP_BYTES_PER_CELL 96
@@ -67,6 +76,10 @@
67#define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL) 76#define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL)
68#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL) 77#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL)
69 78
79#define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */
80#define MLXSW_SP_KVD_HASH_SINGLE_SIZE 163840 /* entries */
81#define MLXSW_SP_KVD_HASH_DOUBLE_SIZE 32768 /* entries */
82
70/* Maximum delay buffer needed in case of PAUSE frames, in cells. 83/* Maximum delay buffer needed in case of PAUSE frames, in cells.
71 * Assumes 100m cable and maximum MTU. 84 * Assumes 100m cable and maximum MTU.
72 */ 85 */
@@ -92,8 +105,17 @@ struct mlxsw_sp_fid {
92 struct list_head list; 105 struct list_head list;
93 unsigned int ref_count; 106 unsigned int ref_count;
94 struct net_device *dev; 107 struct net_device *dev;
108 struct mlxsw_sp_rif *r;
95 u16 fid; 109 u16 fid;
96 u16 vid; 110};
111
112struct mlxsw_sp_rif {
113 struct net_device *dev;
114 unsigned int ref_count;
115 struct mlxsw_sp_fid *f;
116 unsigned char addr[ETH_ALEN];
117 int mtu;
118 u16 rif;
97}; 119};
98 120
99struct mlxsw_sp_mid { 121struct mlxsw_sp_mid {
@@ -116,7 +138,17 @@ static inline u16 mlxsw_sp_fid_to_vfid(u16 fid)
116 138
117static inline bool mlxsw_sp_fid_is_vfid(u16 fid) 139static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
118{ 140{
119 return fid >= MLXSW_SP_VFID_BASE; 141 return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE;
142}
143
144static inline bool mlxsw_sp_fid_is_rfid(u16 fid)
145{
146 return fid >= MLXSW_SP_RFID_BASE;
147}
148
149static inline u16 mlxsw_sp_rif_sp_to_fid(u16 rif)
150{
151 return MLXSW_SP_RFID_BASE + rif;
120} 152}
121 153
122struct mlxsw_sp_sb_pr { 154struct mlxsw_sp_sb_pr {
@@ -153,20 +185,60 @@ struct mlxsw_sp_sb {
153 } ports[MLXSW_PORT_MAX_PORTS]; 185 } ports[MLXSW_PORT_MAX_PORTS];
154}; 186};
155 187
156struct mlxsw_sp { 188#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
189
190struct mlxsw_sp_prefix_usage {
191 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
192};
193
194enum mlxsw_sp_l3proto {
195 MLXSW_SP_L3_PROTO_IPV4,
196 MLXSW_SP_L3_PROTO_IPV6,
197};
198
199struct mlxsw_sp_lpm_tree {
200 u8 id; /* tree ID */
201 unsigned int ref_count;
202 enum mlxsw_sp_l3proto proto;
203 struct mlxsw_sp_prefix_usage prefix_usage;
204};
205
206struct mlxsw_sp_fib;
207
208struct mlxsw_sp_vr {
209 u16 id; /* virtual router ID */
210 bool used;
211 enum mlxsw_sp_l3proto proto;
212 u32 tb_id; /* kernel fib table id */
213 struct mlxsw_sp_lpm_tree *lpm_tree;
214 struct mlxsw_sp_fib *fib;
215};
216
217struct mlxsw_sp_router {
218 struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT];
219 struct mlxsw_sp_vr vrs[MLXSW_SP_VIRTUAL_ROUTER_MAX];
220 struct rhashtable neigh_ht;
157 struct { 221 struct {
158 struct list_head list; 222 struct delayed_work dw;
159 DECLARE_BITMAP(mapped, MLXSW_SP_VFID_PORT_MAX); 223 unsigned long interval; /* ms */
160 } port_vfids; 224 } neighs_update;
225 struct delayed_work nexthop_probe_dw;
226#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
227 struct list_head nexthop_group_list;
228 struct list_head nexthop_neighs_list;
229};
230
231struct mlxsw_sp {
161 struct { 232 struct {
162 struct list_head list; 233 struct list_head list;
163 DECLARE_BITMAP(mapped, MLXSW_SP_VFID_BR_MAX); 234 DECLARE_BITMAP(mapped, MLXSW_SP_VFID_MAX);
164 } br_vfids; 235 } vfids;
165 struct { 236 struct {
166 struct list_head list; 237 struct list_head list;
167 DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX); 238 DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX);
168 } br_mids; 239 } br_mids;
169 struct list_head fids; /* VLAN-aware bridge FIDs */ 240 struct list_head fids; /* VLAN-aware bridge FIDs */
241 struct mlxsw_sp_rif *rifs[MLXSW_SP_RIF_MAX];
170 struct mlxsw_sp_port **ports; 242 struct mlxsw_sp_port **ports;
171 struct mlxsw_core *core; 243 struct mlxsw_core *core;
172 const struct mlxsw_bus_info *bus_info; 244 const struct mlxsw_bus_info *bus_info;
@@ -184,6 +256,10 @@ struct mlxsw_sp {
184 struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX]; 256 struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
185 u8 port_to_module[MLXSW_PORT_MAX_PORTS]; 257 u8 port_to_module[MLXSW_PORT_MAX_PORTS];
186 struct mlxsw_sp_sb sb; 258 struct mlxsw_sp_sb sb;
259 struct mlxsw_sp_router router;
260 struct {
261 DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
262 } kvdl;
187}; 263};
188 264
189static inline struct mlxsw_sp_upper * 265static inline struct mlxsw_sp_upper *
@@ -242,6 +318,9 @@ struct mlxsw_sp_port {
242 struct list_head vports_list; 318 struct list_head vports_list;
243}; 319};
244 320
321struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
322void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
323
245static inline bool 324static inline bool
246mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port) 325mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
247{ 326{
@@ -287,7 +366,7 @@ mlxsw_sp_vport_fid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
287} 366}
288 367
289static inline struct net_device * 368static inline struct net_device *
290mlxsw_sp_vport_br_get(const struct mlxsw_sp_port *mlxsw_sp_vport) 369mlxsw_sp_vport_dev_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
291{ 370{
292 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); 371 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
293 372
@@ -325,6 +404,44 @@ mlxsw_sp_port_vport_find_by_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
325 return NULL; 404 return NULL;
326} 405}
327 406
407static inline struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp,
408 u16 fid)
409{
410 struct mlxsw_sp_fid *f;
411
412 list_for_each_entry(f, &mlxsw_sp->fids, list)
413 if (f->fid == fid)
414 return f;
415
416 return NULL;
417}
418
419static inline struct mlxsw_sp_fid *
420mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp,
421 const struct net_device *br_dev)
422{
423 struct mlxsw_sp_fid *f;
424
425 list_for_each_entry(f, &mlxsw_sp->vfids.list, list)
426 if (f->dev == br_dev)
427 return f;
428
429 return NULL;
430}
431
432static inline struct mlxsw_sp_rif *
433mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
434 const struct net_device *dev)
435{
436 int i;
437
438 for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
439 if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
440 return mlxsw_sp->rifs[i];
441
442 return NULL;
443}
444
328enum mlxsw_sp_flood_table { 445enum mlxsw_sp_flood_table {
329 MLXSW_SP_FLOOD_TABLE_UC, 446 MLXSW_SP_FLOOD_TABLE_UC,
330 MLXSW_SP_FLOOD_TABLE_BM, 447 MLXSW_SP_FLOOD_TABLE_BM,
@@ -377,13 +494,17 @@ int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
377 u16 vid_end, bool is_member, bool untagged); 494 u16 vid_end, bool is_member, bool untagged);
378int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, 495int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
379 u16 vid); 496 u16 vid);
380int mlxsw_sp_port_kill_vid(struct net_device *dev,
381 __be16 __always_unused proto, u16 vid);
382int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, 497int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
383 bool set); 498 bool set);
384void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); 499void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
385int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); 500int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
386int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid); 501int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid);
502int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
503 bool adding);
504struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid);
505void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f);
506void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
507 struct mlxsw_sp_rif *r);
387int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, 508int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
388 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, 509 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
389 bool dwrr, u8 dwrr_weight); 510 bool dwrr, u8 dwrr_weight);
@@ -413,4 +534,19 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
413 534
414#endif 535#endif
415 536
537int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
538void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
539int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port,
540 const struct switchdev_obj_ipv4_fib *fib4,
541 struct switchdev_trans *trans);
542int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
543 const struct switchdev_obj_ipv4_fib *fib4);
544int mlxsw_sp_router_neigh_construct(struct net_device *dev,
545 struct neighbour *n);
546void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
547 struct neighbour *n);
548
549int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count);
550void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
551
416#endif 552#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
new file mode 100644
index 000000000000..ac321e8e5c1a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -0,0 +1,91 @@
1/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <linux/kernel.h>
36#include <linux/bitops.h>
37
38#include "spectrum.h"
39
40#define MLXSW_SP_KVDL_SINGLE_BASE 0
41#define MLXSW_SP_KVDL_SINGLE_SIZE 16384
42#define MLXSW_SP_KVDL_CHUNKS_BASE \
43 (MLXSW_SP_KVDL_SINGLE_BASE + MLXSW_SP_KVDL_SINGLE_SIZE)
44#define MLXSW_SP_KVDL_CHUNKS_SIZE \
45 (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_CHUNKS_BASE)
46#define MLXSW_SP_CHUNK_MAX 32
47
48int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count)
49{
50 int entry_index;
51 int size;
52 int type_base;
53 int type_size;
54 int type_entries;
55
56 if (entry_count == 0 || entry_count > MLXSW_SP_CHUNK_MAX) {
57 return -EINVAL;
58 } else if (entry_count == 1) {
59 type_base = MLXSW_SP_KVDL_SINGLE_BASE;
60 type_size = MLXSW_SP_KVDL_SINGLE_SIZE;
61 type_entries = 1;
62 } else {
63 type_base = MLXSW_SP_KVDL_CHUNKS_BASE;
64 type_size = MLXSW_SP_KVDL_CHUNKS_SIZE;
65 type_entries = MLXSW_SP_CHUNK_MAX;
66 }
67
68 entry_index = type_base;
69 size = type_base + type_size;
70 for_each_clear_bit_from(entry_index, mlxsw_sp->kvdl.usage, size) {
71 int i;
72
73 for (i = 0; i < type_entries; i++)
74 set_bit(entry_index + i, mlxsw_sp->kvdl.usage);
75 return entry_index;
76 }
77 return -ENOBUFS;
78}
79
80void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index)
81{
82 int type_entries;
83 int i;
84
85 if (entry_index < MLXSW_SP_KVDL_CHUNKS_BASE)
86 type_entries = 1;
87 else
88 type_entries = MLXSW_SP_CHUNK_MAX;
89 for (i = 0; i < type_entries; i++)
90 clear_bit(entry_index + i, mlxsw_sp->kvdl.usage);
91}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
new file mode 100644
index 000000000000..e084ea5448ac
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -0,0 +1,1814 @@
1/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/types.h>
39#include <linux/rhashtable.h>
40#include <linux/bitops.h>
41#include <linux/in6.h>
42#include <linux/notifier.h>
43#include <net/netevent.h>
44#include <net/neighbour.h>
45#include <net/arp.h>
46
47#include "spectrum.h"
48#include "core.h"
49#include "reg.h"
50
51#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
52 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
53
54static bool
55mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
56 struct mlxsw_sp_prefix_usage *prefix_usage2)
57{
58 unsigned char prefix;
59
60 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
61 if (!test_bit(prefix, prefix_usage2->b))
62 return false;
63 }
64 return true;
65}
66
67static bool
68mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
69 struct mlxsw_sp_prefix_usage *prefix_usage2)
70{
71 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
72}
73
74static bool
75mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
76{
77 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
78
79 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
80}
81
82static void
83mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
84 struct mlxsw_sp_prefix_usage *prefix_usage2)
85{
86 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
87}
88
89static void
90mlxsw_sp_prefix_usage_zero(struct mlxsw_sp_prefix_usage *prefix_usage)
91{
92 memset(prefix_usage, 0, sizeof(*prefix_usage));
93}
94
95static void
96mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
97 unsigned char prefix_len)
98{
99 set_bit(prefix_len, prefix_usage->b);
100}
101
102static void
103mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
104 unsigned char prefix_len)
105{
106 clear_bit(prefix_len, prefix_usage->b);
107}
108
109struct mlxsw_sp_fib_key {
110 unsigned char addr[sizeof(struct in6_addr)];
111 unsigned char prefix_len;
112};
113
114enum mlxsw_sp_fib_entry_type {
115 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
116 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
117 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
118};
119
120struct mlxsw_sp_nexthop_group;
121
122struct mlxsw_sp_fib_entry {
123 struct rhash_head ht_node;
124 struct mlxsw_sp_fib_key key;
125 enum mlxsw_sp_fib_entry_type type;
126 u8 added:1;
127 u16 rif; /* used for action local */
128 struct mlxsw_sp_vr *vr;
129 struct list_head nexthop_group_node;
130 struct mlxsw_sp_nexthop_group *nh_group;
131};
132
133struct mlxsw_sp_fib {
134 struct rhashtable ht;
135 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
136 struct mlxsw_sp_prefix_usage prefix_usage;
137};
138
139static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
140 .key_offset = offsetof(struct mlxsw_sp_fib_entry, key),
141 .head_offset = offsetof(struct mlxsw_sp_fib_entry, ht_node),
142 .key_len = sizeof(struct mlxsw_sp_fib_key),
143 .automatic_shrinking = true,
144};
145
146static int mlxsw_sp_fib_entry_insert(struct mlxsw_sp_fib *fib,
147 struct mlxsw_sp_fib_entry *fib_entry)
148{
149 unsigned char prefix_len = fib_entry->key.prefix_len;
150 int err;
151
152 err = rhashtable_insert_fast(&fib->ht, &fib_entry->ht_node,
153 mlxsw_sp_fib_ht_params);
154 if (err)
155 return err;
156 if (fib->prefix_ref_count[prefix_len]++ == 0)
157 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
158 return 0;
159}
160
161static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
162 struct mlxsw_sp_fib_entry *fib_entry)
163{
164 unsigned char prefix_len = fib_entry->key.prefix_len;
165
166 if (--fib->prefix_ref_count[prefix_len] == 0)
167 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
168 rhashtable_remove_fast(&fib->ht, &fib_entry->ht_node,
169 mlxsw_sp_fib_ht_params);
170}
171
172static struct mlxsw_sp_fib_entry *
173mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr,
174 size_t addr_len, unsigned char prefix_len)
175{
176 struct mlxsw_sp_fib_entry *fib_entry;
177
178 fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
179 if (!fib_entry)
180 return NULL;
181 memcpy(fib_entry->key.addr, addr, addr_len);
182 fib_entry->key.prefix_len = prefix_len;
183 return fib_entry;
184}
185
186static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry)
187{
188 kfree(fib_entry);
189}
190
191static struct mlxsw_sp_fib_entry *
192mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr,
193 size_t addr_len, unsigned char prefix_len)
194{
195 struct mlxsw_sp_fib_key key = {{ 0 } };
196
197 memcpy(key.addr, addr, addr_len);
198 key.prefix_len = prefix_len;
199 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
200}
201
202static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
203{
204 struct mlxsw_sp_fib *fib;
205 int err;
206
207 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
208 if (!fib)
209 return ERR_PTR(-ENOMEM);
210 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
211 if (err)
212 goto err_rhashtable_init;
213 return fib;
214
215err_rhashtable_init:
216 kfree(fib);
217 return ERR_PTR(err);
218}
219
220static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
221{
222 rhashtable_destroy(&fib->ht);
223 kfree(fib);
224}
225
226static struct mlxsw_sp_lpm_tree *
227mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp, bool one_reserved)
228{
229 static struct mlxsw_sp_lpm_tree *lpm_tree;
230 int i;
231
232 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
233 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
234 if (lpm_tree->ref_count == 0) {
235 if (one_reserved)
236 one_reserved = false;
237 else
238 return lpm_tree;
239 }
240 }
241 return NULL;
242}
243
244static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
245 struct mlxsw_sp_lpm_tree *lpm_tree)
246{
247 char ralta_pl[MLXSW_REG_RALTA_LEN];
248
249 mlxsw_reg_ralta_pack(ralta_pl, true, lpm_tree->proto, lpm_tree->id);
250 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
251}
252
253static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
254 struct mlxsw_sp_lpm_tree *lpm_tree)
255{
256 char ralta_pl[MLXSW_REG_RALTA_LEN];
257
258 mlxsw_reg_ralta_pack(ralta_pl, false, lpm_tree->proto, lpm_tree->id);
259 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
260}
261
262static int
263mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
264 struct mlxsw_sp_prefix_usage *prefix_usage,
265 struct mlxsw_sp_lpm_tree *lpm_tree)
266{
267 char ralst_pl[MLXSW_REG_RALST_LEN];
268 u8 root_bin = 0;
269 u8 prefix;
270 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
271
272 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
273 root_bin = prefix;
274
275 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
276 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
277 if (prefix == 0)
278 continue;
279 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
280 MLXSW_REG_RALST_BIN_NO_CHILD);
281 last_prefix = prefix;
282 }
283 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
284}
285
286static struct mlxsw_sp_lpm_tree *
287mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
288 struct mlxsw_sp_prefix_usage *prefix_usage,
289 enum mlxsw_sp_l3proto proto, bool one_reserved)
290{
291 struct mlxsw_sp_lpm_tree *lpm_tree;
292 int err;
293
294 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp, one_reserved);
295 if (!lpm_tree)
296 return ERR_PTR(-EBUSY);
297 lpm_tree->proto = proto;
298 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
299 if (err)
300 return ERR_PTR(err);
301
302 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
303 lpm_tree);
304 if (err)
305 goto err_left_struct_set;
306 return lpm_tree;
307
308err_left_struct_set:
309 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
310 return ERR_PTR(err);
311}
312
313static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
314 struct mlxsw_sp_lpm_tree *lpm_tree)
315{
316 return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
317}
318
319static struct mlxsw_sp_lpm_tree *
320mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
321 struct mlxsw_sp_prefix_usage *prefix_usage,
322 enum mlxsw_sp_l3proto proto, bool one_reserved)
323{
324 struct mlxsw_sp_lpm_tree *lpm_tree;
325 int i;
326
327 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
328 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
329 if (lpm_tree->proto == proto &&
330 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
331 prefix_usage))
332 goto inc_ref_count;
333 }
334 lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
335 proto, one_reserved);
336 if (IS_ERR(lpm_tree))
337 return lpm_tree;
338
339inc_ref_count:
340 lpm_tree->ref_count++;
341 return lpm_tree;
342}
343
344static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
345 struct mlxsw_sp_lpm_tree *lpm_tree)
346{
347 if (--lpm_tree->ref_count == 0)
348 return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
349 return 0;
350}
351
352static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
353{
354 struct mlxsw_sp_lpm_tree *lpm_tree;
355 int i;
356
357 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
358 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
359 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
360 }
361}
362
363static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
364{
365 struct mlxsw_sp_vr *vr;
366 int i;
367
368 for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
369 vr = &mlxsw_sp->router.vrs[i];
370 if (!vr->used)
371 return vr;
372 }
373 return NULL;
374}
375
376static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
377 struct mlxsw_sp_vr *vr)
378{
379 char raltb_pl[MLXSW_REG_RALTB_LEN];
380
381 mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, vr->lpm_tree->id);
382 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
383}
384
385static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
386 struct mlxsw_sp_vr *vr)
387{
388 char raltb_pl[MLXSW_REG_RALTB_LEN];
389
390 /* Bind to tree 0 which is default */
391 mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, 0);
392 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
393}
394
395static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
396{
397 /* For our purpose, squash main and local table into one */
398 if (tb_id == RT_TABLE_LOCAL)
399 tb_id = RT_TABLE_MAIN;
400 return tb_id;
401}
402
403static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
404 u32 tb_id,
405 enum mlxsw_sp_l3proto proto)
406{
407 struct mlxsw_sp_vr *vr;
408 int i;
409
410 tb_id = mlxsw_sp_fix_tb_id(tb_id);
411 for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
412 vr = &mlxsw_sp->router.vrs[i];
413 if (vr->used && vr->proto == proto && vr->tb_id == tb_id)
414 return vr;
415 }
416 return NULL;
417}
418
419static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
420 unsigned char prefix_len,
421 u32 tb_id,
422 enum mlxsw_sp_l3proto proto)
423{
424 struct mlxsw_sp_prefix_usage req_prefix_usage;
425 struct mlxsw_sp_lpm_tree *lpm_tree;
426 struct mlxsw_sp_vr *vr;
427 int err;
428
429 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
430 if (!vr)
431 return ERR_PTR(-EBUSY);
432 vr->fib = mlxsw_sp_fib_create();
433 if (IS_ERR(vr->fib))
434 return ERR_CAST(vr->fib);
435
436 vr->proto = proto;
437 vr->tb_id = tb_id;
438 mlxsw_sp_prefix_usage_zero(&req_prefix_usage);
439 mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
440 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
441 proto, true);
442 if (IS_ERR(lpm_tree)) {
443 err = PTR_ERR(lpm_tree);
444 goto err_tree_get;
445 }
446 vr->lpm_tree = lpm_tree;
447 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
448 if (err)
449 goto err_tree_bind;
450
451 vr->used = true;
452 return vr;
453
454err_tree_bind:
455 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
456err_tree_get:
457 mlxsw_sp_fib_destroy(vr->fib);
458
459 return ERR_PTR(err);
460}
461
462static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
463 struct mlxsw_sp_vr *vr)
464{
465 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
466 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
467 mlxsw_sp_fib_destroy(vr->fib);
468 vr->used = false;
469}
470
471static int
472mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
473 struct mlxsw_sp_prefix_usage *req_prefix_usage)
474{
475 struct mlxsw_sp_lpm_tree *lpm_tree;
476
477 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage,
478 &vr->lpm_tree->prefix_usage))
479 return 0;
480
481 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
482 vr->proto, false);
483 if (IS_ERR(lpm_tree)) {
484 /* We failed to get a tree according to the required
485 * prefix usage. However, the current tree might be still good
486 * for us if our requirement is subset of the prefixes used
487 * in the tree.
488 */
489 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
490 &vr->lpm_tree->prefix_usage))
491 return 0;
492 return PTR_ERR(lpm_tree);
493 }
494
495 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
496 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
497 vr->lpm_tree = lpm_tree;
498 return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
499}
500
501static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp,
502 unsigned char prefix_len,
503 u32 tb_id,
504 enum mlxsw_sp_l3proto proto)
505{
506 struct mlxsw_sp_vr *vr;
507 int err;
508
509 tb_id = mlxsw_sp_fix_tb_id(tb_id);
510 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id, proto);
511 if (!vr) {
512 vr = mlxsw_sp_vr_create(mlxsw_sp, prefix_len, tb_id, proto);
513 if (IS_ERR(vr))
514 return vr;
515 } else {
516 struct mlxsw_sp_prefix_usage req_prefix_usage;
517
518 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
519 &vr->fib->prefix_usage);
520 mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
521 /* Need to replace LPM tree in case new prefix is required. */
522 err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
523 &req_prefix_usage);
524 if (err)
525 return ERR_PTR(err);
526 }
527 return vr;
528}
529
530static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
531{
532 /* Destroy virtual router entity in case the associated FIB is empty
533 * and allow it to be used for other tables in future. Otherwise,
534 * check if some prefix usage did not disappear and change tree if
535 * that is the case. Note that in case new, smaller tree cannot be
536 * allocated, the original one will be kept being used.
537 */
538 if (mlxsw_sp_prefix_usage_none(&vr->fib->prefix_usage))
539 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
540 else
541 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
542 &vr->fib->prefix_usage);
543}
544
545static void mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
546{
547 struct mlxsw_sp_vr *vr;
548 int i;
549
550 for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
551 vr = &mlxsw_sp->router.vrs[i];
552 vr->id = i;
553 }
554}
555
556struct mlxsw_sp_neigh_key {
557 unsigned char addr[sizeof(struct in6_addr)];
558 struct net_device *dev;
559};
560
561struct mlxsw_sp_neigh_entry {
562 struct rhash_head ht_node;
563 struct mlxsw_sp_neigh_key key;
564 u16 rif;
565 struct neighbour *n;
566 bool offloaded;
567 struct delayed_work dw;
568 struct mlxsw_sp_port *mlxsw_sp_port;
569 unsigned char ha[ETH_ALEN];
570 struct list_head nexthop_list; /* list of nexthops using
571 * this neigh entry
572 */
573 struct list_head nexthop_neighs_list_node;
574};
575
576static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
577 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
578 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
579 .key_len = sizeof(struct mlxsw_sp_neigh_key),
580};
581
582static int
583mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
584 struct mlxsw_sp_neigh_entry *neigh_entry)
585{
586 return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht,
587 &neigh_entry->ht_node,
588 mlxsw_sp_neigh_ht_params);
589}
590
591static void
592mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
593 struct mlxsw_sp_neigh_entry *neigh_entry)
594{
595 rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht,
596 &neigh_entry->ht_node,
597 mlxsw_sp_neigh_ht_params);
598}
599
600static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work);
601
602static struct mlxsw_sp_neigh_entry *
603mlxsw_sp_neigh_entry_create(const void *addr, size_t addr_len,
604 struct net_device *dev, u16 rif,
605 struct neighbour *n)
606{
607 struct mlxsw_sp_neigh_entry *neigh_entry;
608
609 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC);
610 if (!neigh_entry)
611 return NULL;
612 memcpy(neigh_entry->key.addr, addr, addr_len);
613 neigh_entry->key.dev = dev;
614 neigh_entry->rif = rif;
615 neigh_entry->n = n;
616 INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw);
617 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
618 return neigh_entry;
619}
620
621static void
622mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp_neigh_entry *neigh_entry)
623{
624 kfree(neigh_entry);
625}
626
627static struct mlxsw_sp_neigh_entry *
628mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, const void *addr,
629 size_t addr_len, struct net_device *dev)
630{
631 struct mlxsw_sp_neigh_key key = {{ 0 } };
632
633 memcpy(key.addr, addr, addr_len);
634 key.dev = dev;
635 return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
636 &key, mlxsw_sp_neigh_ht_params);
637}
638
639int mlxsw_sp_router_neigh_construct(struct net_device *dev,
640 struct neighbour *n)
641{
642 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
643 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
644 struct mlxsw_sp_neigh_entry *neigh_entry;
645 struct mlxsw_sp_rif *r;
646 u32 dip;
647 int err;
648
649 if (n->tbl != &arp_tbl)
650 return 0;
651
652 dip = ntohl(*((__be32 *) n->primary_key));
653 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip),
654 n->dev);
655 if (neigh_entry) {
656 WARN_ON(neigh_entry->n != n);
657 return 0;
658 }
659
660 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
661 if (WARN_ON(!r))
662 return -EINVAL;
663
664 neigh_entry = mlxsw_sp_neigh_entry_create(&dip, sizeof(dip), n->dev,
665 r->rif, n);
666 if (!neigh_entry)
667 return -ENOMEM;
668 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
669 if (err)
670 goto err_neigh_entry_insert;
671 return 0;
672
673err_neigh_entry_insert:
674 mlxsw_sp_neigh_entry_destroy(neigh_entry);
675 return err;
676}
677
678void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
679 struct neighbour *n)
680{
681 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
682 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
683 struct mlxsw_sp_neigh_entry *neigh_entry;
684 u32 dip;
685
686 if (n->tbl != &arp_tbl)
687 return;
688
689 dip = ntohl(*((__be32 *) n->primary_key));
690 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip),
691 n->dev);
692 if (!neigh_entry)
693 return;
694 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
695 mlxsw_sp_neigh_entry_destroy(neigh_entry);
696}
697
698static void
699mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
700{
701 unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
702
703 mlxsw_sp->router.neighs_update.interval = jiffies_to_msecs(interval);
704}
705
706static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
707 char *rauhtd_pl,
708 int ent_index)
709{
710 struct net_device *dev;
711 struct neighbour *n;
712 __be32 dipn;
713 u32 dip;
714 u16 rif;
715
716 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
717
718 if (!mlxsw_sp->rifs[rif]) {
719 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
720 return;
721 }
722
723 dipn = htonl(dip);
724 dev = mlxsw_sp->rifs[rif]->dev;
725 n = neigh_lookup(&arp_tbl, &dipn, dev);
726 if (!n) {
727 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
728 &dip);
729 return;
730 }
731
732 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
733 neigh_event_send(n, NULL);
734 neigh_release(n);
735}
736
737static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
738 char *rauhtd_pl,
739 int rec_index)
740{
741 u8 num_entries;
742 int i;
743
744 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
745 rec_index);
746 /* Hardware starts counting at 0, so add 1. */
747 num_entries++;
748
749 /* Each record consists of several neighbour entries. */
750 for (i = 0; i < num_entries; i++) {
751 int ent_index;
752
753 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
754 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
755 ent_index);
756 }
757
758}
759
760static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
761 char *rauhtd_pl, int rec_index)
762{
763 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
764 case MLXSW_REG_RAUHTD_TYPE_IPV4:
765 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
766 rec_index);
767 break;
768 case MLXSW_REG_RAUHTD_TYPE_IPV6:
769 WARN_ON_ONCE(1);
770 break;
771 }
772}
773
774static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
775{
776 char *rauhtd_pl;
777 u8 num_rec;
778 int i, err;
779
780 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
781 if (!rauhtd_pl)
782 return -ENOMEM;
783
784 /* Make sure the neighbour's netdev isn't removed in the
785 * process.
786 */
787 rtnl_lock();
788 do {
789 mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4);
790 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
791 rauhtd_pl);
792 if (err) {
793 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
794 break;
795 }
796 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
797 for (i = 0; i < num_rec; i++)
798 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
799 i);
800 } while (num_rec);
801 rtnl_unlock();
802
803 kfree(rauhtd_pl);
804 return err;
805}
806
807static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
808{
809 struct mlxsw_sp_neigh_entry *neigh_entry;
810
811 /* Take RTNL mutex here to prevent lists from changes */
812 rtnl_lock();
813 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
814 nexthop_neighs_list_node) {
815 /* If this neigh have nexthops, make the kernel think this neigh
816 * is active regardless of the traffic.
817 */
818 if (!list_empty(&neigh_entry->nexthop_list))
819 neigh_event_send(neigh_entry->n, NULL);
820 }
821 rtnl_unlock();
822}
823
824static void
825mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
826{
827 unsigned long interval = mlxsw_sp->router.neighs_update.interval;
828
829 mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw,
830 msecs_to_jiffies(interval));
831}
832
833static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
834{
835 struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
836 router.neighs_update.dw.work);
837 int err;
838
839 err = mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp);
840 if (err)
841 dev_err(mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
842
843 mlxsw_sp_router_neighs_update_nh(mlxsw_sp);
844
845 mlxsw_sp_router_neighs_update_work_schedule(mlxsw_sp);
846}
847
848static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
849{
850 struct mlxsw_sp_neigh_entry *neigh_entry;
851 struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
852 router.nexthop_probe_dw.work);
853
854 /* Iterate over nexthop neighbours, find those who are unresolved and
855 * send arp on them. This solves the chicken-egg problem when
856 * the nexthop wouldn't get offloaded until the neighbor is resolved
857 * but it wouldn't get resolved ever in case traffic is flowing in HW
858 * using different nexthop.
859 *
860 * Take RTNL mutex here to prevent lists from changes.
861 */
862 rtnl_lock();
863 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
864 nexthop_neighs_list_node) {
865 if (!(neigh_entry->n->nud_state & NUD_VALID) &&
866 !list_empty(&neigh_entry->nexthop_list))
867 neigh_event_send(neigh_entry->n, NULL);
868 }
869 rtnl_unlock();
870
871 mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw,
872 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
873}
874
875static void
876mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
877 struct mlxsw_sp_neigh_entry *neigh_entry,
878 bool removing);
879
880static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
881{
882 struct mlxsw_sp_neigh_entry *neigh_entry =
883 container_of(work, struct mlxsw_sp_neigh_entry, dw.work);
884 struct neighbour *n = neigh_entry->n;
885 struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port;
886 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
887 char rauht_pl[MLXSW_REG_RAUHT_LEN];
888 struct net_device *dev;
889 bool entry_connected;
890 u8 nud_state;
891 bool updating;
892 bool removing;
893 bool adding;
894 u32 dip;
895 int err;
896
897 read_lock_bh(&n->lock);
898 dip = ntohl(*((__be32 *) n->primary_key));
899 memcpy(neigh_entry->ha, n->ha, sizeof(neigh_entry->ha));
900 nud_state = n->nud_state;
901 dev = n->dev;
902 read_unlock_bh(&n->lock);
903
904 entry_connected = nud_state & NUD_VALID;
905 adding = (!neigh_entry->offloaded) && entry_connected;
906 updating = neigh_entry->offloaded && entry_connected;
907 removing = neigh_entry->offloaded && !entry_connected;
908
909 if (adding || updating) {
910 mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_ADD,
911 neigh_entry->rif,
912 neigh_entry->ha, dip);
913 err = mlxsw_reg_write(mlxsw_sp->core,
914 MLXSW_REG(rauht), rauht_pl);
915 if (err) {
916 netdev_err(dev, "Could not add neigh %pI4h\n", &dip);
917 neigh_entry->offloaded = false;
918 } else {
919 neigh_entry->offloaded = true;
920 }
921 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, false);
922 } else if (removing) {
923 mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE,
924 neigh_entry->rif,
925 neigh_entry->ha, dip);
926 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht),
927 rauht_pl);
928 if (err) {
929 netdev_err(dev, "Could not delete neigh %pI4h\n", &dip);
930 neigh_entry->offloaded = true;
931 } else {
932 neigh_entry->offloaded = false;
933 }
934 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, true);
935 }
936
937 neigh_release(n);
938 mlxsw_sp_port_dev_put(mlxsw_sp_port);
939}
940
941static int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
942 unsigned long event, void *ptr)
943{
944 struct mlxsw_sp_neigh_entry *neigh_entry;
945 struct mlxsw_sp_port *mlxsw_sp_port;
946 struct mlxsw_sp *mlxsw_sp;
947 unsigned long interval;
948 struct net_device *dev;
949 struct neigh_parms *p;
950 struct neighbour *n;
951 u32 dip;
952
953 switch (event) {
954 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
955 p = ptr;
956
957 /* We don't care about changes in the default table. */
958 if (!p->dev || p->tbl != &arp_tbl)
959 return NOTIFY_DONE;
960
961 /* We are in atomic context and can't take RTNL mutex,
962 * so use RCU variant to walk the device chain.
963 */
964 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
965 if (!mlxsw_sp_port)
966 return NOTIFY_DONE;
967
968 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
969 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
970 mlxsw_sp->router.neighs_update.interval = interval;
971
972 mlxsw_sp_port_dev_put(mlxsw_sp_port);
973 break;
974 case NETEVENT_NEIGH_UPDATE:
975 n = ptr;
976 dev = n->dev;
977
978 if (n->tbl != &arp_tbl)
979 return NOTIFY_DONE;
980
981 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(dev);
982 if (!mlxsw_sp_port)
983 return NOTIFY_DONE;
984
985 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
986 dip = ntohl(*((__be32 *) n->primary_key));
987 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp,
988 &dip,
989 sizeof(__be32),
990 dev);
991 if (WARN_ON(!neigh_entry) || WARN_ON(neigh_entry->n != n)) {
992 mlxsw_sp_port_dev_put(mlxsw_sp_port);
993 return NOTIFY_DONE;
994 }
995 neigh_entry->mlxsw_sp_port = mlxsw_sp_port;
996
997 /* Take a reference to ensure the neighbour won't be
998 * destructed until we drop the reference in delayed
999 * work.
1000 */
1001 neigh_clone(n);
1002 if (!mlxsw_core_schedule_dw(&neigh_entry->dw, 0)) {
1003 neigh_release(n);
1004 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1005 }
1006 break;
1007 }
1008
1009 return NOTIFY_DONE;
1010}
1011
1012static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
1013 .notifier_call = mlxsw_sp_router_netevent_event,
1014};
1015
1016static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1017{
1018 int err;
1019
1020 err = rhashtable_init(&mlxsw_sp->router.neigh_ht,
1021 &mlxsw_sp_neigh_ht_params);
1022 if (err)
1023 return err;
1024
1025 /* Initialize the polling interval according to the default
1026 * table.
1027 */
1028 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
1029
1030 err = register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
1031 if (err)
1032 goto err_register_netevent_notifier;
1033
1034 /* Create the delayed works for the activity_update */
1035 INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
1036 mlxsw_sp_router_neighs_update_work);
1037 INIT_DELAYED_WORK(&mlxsw_sp->router.nexthop_probe_dw,
1038 mlxsw_sp_router_probe_unresolved_nexthops);
1039 mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
1040 mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
1041 return 0;
1042
1043err_register_netevent_notifier:
1044 rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
1045 return err;
1046}
1047
1048static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1049{
1050 cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
1051 cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
1052 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
1053 rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
1054}
1055
1056struct mlxsw_sp_nexthop {
1057 struct list_head neigh_list_node; /* member of neigh entry list */
1058 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
1059 * this belongs to
1060 */
1061 u8 should_offload:1, /* set indicates this neigh is connected and
1062 * should be put to KVD linear area of this group.
1063 */
1064 offloaded:1, /* set in case the neigh is actually put into
1065 * KVD linear area of this group.
1066 */
1067 update:1; /* set indicates that MAC of this neigh should be
1068 * updated in HW
1069 */
1070 struct mlxsw_sp_neigh_entry *neigh_entry;
1071};
1072
1073struct mlxsw_sp_nexthop_group {
1074 struct list_head list; /* node in mlxsw->router.nexthop_group_list */
1075 struct list_head fib_list; /* list of fib entries that use this group */
1076 u8 adj_index_valid:1;
1077 u32 adj_index;
1078 u16 ecmp_size;
1079 u16 count;
1080 struct mlxsw_sp_nexthop nexthops[0];
1081};
1082
1083static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
1084 struct mlxsw_sp_vr *vr,
1085 u32 adj_index, u16 ecmp_size,
1086 u32 new_adj_index,
1087 u16 new_ecmp_size)
1088{
1089 char raleu_pl[MLXSW_REG_RALEU_LEN];
1090
1091 mlxsw_reg_raleu_pack(raleu_pl, vr->proto, vr->id,
1092 adj_index, ecmp_size,
1093 new_adj_index, new_ecmp_size);
1094 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
1095}
1096
1097static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
1098 struct mlxsw_sp_nexthop_group *nh_grp,
1099 u32 old_adj_index, u16 old_ecmp_size)
1100{
1101 struct mlxsw_sp_fib_entry *fib_entry;
1102 struct mlxsw_sp_vr *vr = NULL;
1103 int err;
1104
1105 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1106 if (vr == fib_entry->vr)
1107 continue;
1108 vr = fib_entry->vr;
1109 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr,
1110 old_adj_index,
1111 old_ecmp_size,
1112 nh_grp->adj_index,
1113 nh_grp->ecmp_size);
1114 if (err)
1115 return err;
1116 }
1117 return 0;
1118}
1119
1120static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1121 struct mlxsw_sp_nexthop *nh)
1122{
1123 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1124 char ratr_pl[MLXSW_REG_RATR_LEN];
1125
1126 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
1127 true, adj_index, neigh_entry->rif);
1128 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
1129 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
1130}
1131
1132static int
1133mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
1134 struct mlxsw_sp_nexthop_group *nh_grp)
1135{
1136 u32 adj_index = nh_grp->adj_index; /* base */
1137 struct mlxsw_sp_nexthop *nh;
1138 int i;
1139 int err;
1140
1141 for (i = 0; i < nh_grp->count; i++) {
1142 nh = &nh_grp->nexthops[i];
1143
1144 if (!nh->should_offload) {
1145 nh->offloaded = 0;
1146 continue;
1147 }
1148
1149 if (nh->update) {
1150 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1151 adj_index, nh);
1152 if (err)
1153 return err;
1154 nh->update = 0;
1155 nh->offloaded = 1;
1156 }
1157 adj_index++;
1158 }
1159 return 0;
1160}
1161
1162static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1163 struct mlxsw_sp_fib_entry *fib_entry);
1164
1165static int
1166mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
1167 struct mlxsw_sp_nexthop_group *nh_grp)
1168{
1169 struct mlxsw_sp_fib_entry *fib_entry;
1170 int err;
1171
1172 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1173 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1174 if (err)
1175 return err;
1176 }
1177 return 0;
1178}
1179
1180static void
1181mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1182 struct mlxsw_sp_nexthop_group *nh_grp)
1183{
1184 struct mlxsw_sp_nexthop *nh;
1185 bool offload_change = false;
1186 u32 adj_index;
1187 u16 ecmp_size = 0;
1188 bool old_adj_index_valid;
1189 u32 old_adj_index;
1190 u16 old_ecmp_size;
1191 int ret;
1192 int i;
1193 int err;
1194
1195 for (i = 0; i < nh_grp->count; i++) {
1196 nh = &nh_grp->nexthops[i];
1197
1198 if (nh->should_offload ^ nh->offloaded) {
1199 offload_change = true;
1200 if (nh->should_offload)
1201 nh->update = 1;
1202 }
1203 if (nh->should_offload)
1204 ecmp_size++;
1205 }
1206 if (!offload_change) {
1207 /* Nothing was added or removed, so no need to reallocate. Just
1208 * update MAC on existing adjacency indexes.
1209 */
1210 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
1211 if (err) {
1212 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1213 goto set_trap;
1214 }
1215 return;
1216 }
1217 if (!ecmp_size)
1218 /* No neigh of this group is connected so we just set
1219 * the trap and let everthing flow through kernel.
1220 */
1221 goto set_trap;
1222
1223 ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size);
1224 if (ret < 0) {
1225 /* We ran out of KVD linear space, just set the
1226 * trap and let everything flow through kernel.
1227 */
1228 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
1229 goto set_trap;
1230 }
1231 adj_index = ret;
1232 old_adj_index_valid = nh_grp->adj_index_valid;
1233 old_adj_index = nh_grp->adj_index;
1234 old_ecmp_size = nh_grp->ecmp_size;
1235 nh_grp->adj_index_valid = 1;
1236 nh_grp->adj_index = adj_index;
1237 nh_grp->ecmp_size = ecmp_size;
1238 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
1239 if (err) {
1240 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1241 goto set_trap;
1242 }
1243
1244 if (!old_adj_index_valid) {
1245 /* The trap was set for fib entries, so we have to call
1246 * fib entry update to unset it and use adjacency index.
1247 */
1248 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1249 if (err) {
1250 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
1251 goto set_trap;
1252 }
1253 return;
1254 }
1255
1256 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
1257 old_adj_index, old_ecmp_size);
1258 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
1259 if (err) {
1260 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
1261 goto set_trap;
1262 }
1263 return;
1264
1265set_trap:
1266 old_adj_index_valid = nh_grp->adj_index_valid;
1267 nh_grp->adj_index_valid = 0;
1268 for (i = 0; i < nh_grp->count; i++) {
1269 nh = &nh_grp->nexthops[i];
1270 nh->offloaded = 0;
1271 }
1272 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1273 if (err)
1274 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
1275 if (old_adj_index_valid)
1276 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
1277}
1278
1279static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
1280 bool removing)
1281{
1282 if (!removing && !nh->should_offload)
1283 nh->should_offload = 1;
1284 else if (removing && nh->offloaded)
1285 nh->should_offload = 0;
1286 nh->update = 1;
1287}
1288
1289static void
1290mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1291 struct mlxsw_sp_neigh_entry *neigh_entry,
1292 bool removing)
1293{
1294 struct mlxsw_sp_nexthop *nh;
1295
1296 /* Take RTNL mutex here to prevent lists from changes */
1297 rtnl_lock();
1298 list_for_each_entry(nh, &neigh_entry->nexthop_list,
1299 neigh_list_node) {
1300 __mlxsw_sp_nexthop_neigh_update(nh, removing);
1301 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1302 }
1303 rtnl_unlock();
1304}
1305
1306static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
1307 struct mlxsw_sp_nexthop_group *nh_grp,
1308 struct mlxsw_sp_nexthop *nh,
1309 struct fib_nh *fib_nh)
1310{
1311 struct mlxsw_sp_neigh_entry *neigh_entry;
1312 u32 gwip = ntohl(fib_nh->nh_gw);
1313 struct net_device *dev = fib_nh->nh_dev;
1314 struct neighbour *n;
1315 u8 nud_state;
1316
1317 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip,
1318 sizeof(gwip), dev);
1319 if (!neigh_entry) {
1320 __be32 gwipn = htonl(gwip);
1321
1322 n = neigh_create(&arp_tbl, &gwipn, dev);
1323 if (IS_ERR(n))
1324 return PTR_ERR(n);
1325 neigh_event_send(n, NULL);
1326 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip,
1327 sizeof(gwip), dev);
1328 if (!neigh_entry) {
1329 neigh_release(n);
1330 return -EINVAL;
1331 }
1332 } else {
1333 /* Take a reference of neigh here ensuring that neigh would
1334 * not be detructed before the nexthop entry is finished.
1335 * The second branch takes the reference in neith_create()
1336 */
1337 n = neigh_entry->n;
1338 neigh_clone(n);
1339 }
1340
1341 /* If that is the first nexthop connected to that neigh, add to
1342 * nexthop_neighs_list
1343 */
1344 if (list_empty(&neigh_entry->nexthop_list))
1345 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
1346 &mlxsw_sp->router.nexthop_neighs_list);
1347
1348 nh->nh_grp = nh_grp;
1349 nh->neigh_entry = neigh_entry;
1350 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
1351 read_lock_bh(&n->lock);
1352 nud_state = n->nud_state;
1353 read_unlock_bh(&n->lock);
1354 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID));
1355
1356 return 0;
1357}
1358
1359static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
1360 struct mlxsw_sp_nexthop *nh)
1361{
1362 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1363
1364 list_del(&nh->neigh_list_node);
1365
1366 /* If that is the last nexthop connected to that neigh, remove from
1367 * nexthop_neighs_list
1368 */
1369 if (list_empty(&nh->neigh_entry->nexthop_list))
1370 list_del(&nh->neigh_entry->nexthop_neighs_list_node);
1371
1372 neigh_release(neigh_entry->n);
1373}
1374
1375static struct mlxsw_sp_nexthop_group *
1376mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
1377{
1378 struct mlxsw_sp_nexthop_group *nh_grp;
1379 struct mlxsw_sp_nexthop *nh;
1380 struct fib_nh *fib_nh;
1381 size_t alloc_size;
1382 int i;
1383 int err;
1384
1385 alloc_size = sizeof(*nh_grp) +
1386 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
1387 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
1388 if (!nh_grp)
1389 return ERR_PTR(-ENOMEM);
1390 INIT_LIST_HEAD(&nh_grp->fib_list);
1391 nh_grp->count = fi->fib_nhs;
1392 for (i = 0; i < nh_grp->count; i++) {
1393 nh = &nh_grp->nexthops[i];
1394 fib_nh = &fi->fib_nh[i];
1395 err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh);
1396 if (err)
1397 goto err_nexthop_init;
1398 }
1399 list_add_tail(&nh_grp->list, &mlxsw_sp->router.nexthop_group_list);
1400 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1401 return nh_grp;
1402
1403err_nexthop_init:
1404 for (i--; i >= 0; i--)
1405 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1406 kfree(nh_grp);
1407 return ERR_PTR(err);
1408}
1409
1410static void
1411mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
1412 struct mlxsw_sp_nexthop_group *nh_grp)
1413{
1414 struct mlxsw_sp_nexthop *nh;
1415 int i;
1416
1417 list_del(&nh_grp->list);
1418 for (i = 0; i < nh_grp->count; i++) {
1419 nh = &nh_grp->nexthops[i];
1420 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1421 }
1422 kfree(nh_grp);
1423}
1424
1425static bool mlxsw_sp_nexthop_match(struct mlxsw_sp_nexthop *nh,
1426 struct fib_info *fi)
1427{
1428 int i;
1429
1430 for (i = 0; i < fi->fib_nhs; i++) {
1431 struct fib_nh *fib_nh = &fi->fib_nh[i];
1432 u32 gwip = ntohl(fib_nh->nh_gw);
1433
1434 if (memcmp(nh->neigh_entry->key.addr,
1435 &gwip, sizeof(u32)) == 0 &&
1436 nh->neigh_entry->key.dev == fib_nh->nh_dev)
1437 return true;
1438 }
1439 return false;
1440}
1441
1442static bool mlxsw_sp_nexthop_group_match(struct mlxsw_sp_nexthop_group *nh_grp,
1443 struct fib_info *fi)
1444{
1445 int i;
1446
1447 if (nh_grp->count != fi->fib_nhs)
1448 return false;
1449 for (i = 0; i < nh_grp->count; i++) {
1450 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
1451
1452 if (!mlxsw_sp_nexthop_match(nh, fi))
1453 return false;
1454 }
1455 return true;
1456}
1457
1458static struct mlxsw_sp_nexthop_group *
1459mlxsw_sp_nexthop_group_find(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
1460{
1461 struct mlxsw_sp_nexthop_group *nh_grp;
1462
1463 list_for_each_entry(nh_grp, &mlxsw_sp->router.nexthop_group_list,
1464 list) {
1465 if (mlxsw_sp_nexthop_group_match(nh_grp, fi))
1466 return nh_grp;
1467 }
1468 return NULL;
1469}
1470
1471static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp,
1472 struct mlxsw_sp_fib_entry *fib_entry,
1473 struct fib_info *fi)
1474{
1475 struct mlxsw_sp_nexthop_group *nh_grp;
1476
1477 nh_grp = mlxsw_sp_nexthop_group_find(mlxsw_sp, fi);
1478 if (!nh_grp) {
1479 nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi);
1480 if (IS_ERR(nh_grp))
1481 return PTR_ERR(nh_grp);
1482 }
1483 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
1484 fib_entry->nh_group = nh_grp;
1485 return 0;
1486}
1487
1488static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
1489 struct mlxsw_sp_fib_entry *fib_entry)
1490{
1491 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
1492
1493 list_del(&fib_entry->nexthop_group_node);
1494 if (!list_empty(&nh_grp->fib_list))
1495 return;
1496 mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
1497}
1498
1499static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
1500{
1501 char rgcr_pl[MLXSW_REG_RGCR_LEN];
1502
1503 mlxsw_reg_rgcr_pack(rgcr_pl, true);
1504 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, MLXSW_SP_RIF_MAX);
1505 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
1506}
1507
1508static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
1509{
1510 char rgcr_pl[MLXSW_REG_RGCR_LEN];
1511
1512 mlxsw_reg_rgcr_pack(rgcr_pl, false);
1513 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
1514}
1515
1516int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
1517{
1518 int err;
1519
1520 INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
1521 INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_group_list);
1522 err = __mlxsw_sp_router_init(mlxsw_sp);
1523 if (err)
1524 return err;
1525 mlxsw_sp_lpm_init(mlxsw_sp);
1526 mlxsw_sp_vrs_init(mlxsw_sp);
1527 return mlxsw_sp_neigh_init(mlxsw_sp);
1528}
1529
1530void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
1531{
1532 mlxsw_sp_neigh_fini(mlxsw_sp);
1533 __mlxsw_sp_router_fini(mlxsw_sp);
1534}
1535
1536static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
1537 struct mlxsw_sp_fib_entry *fib_entry,
1538 enum mlxsw_reg_ralue_op op)
1539{
1540 char ralue_pl[MLXSW_REG_RALUE_LEN];
1541 u32 *p_dip = (u32 *) fib_entry->key.addr;
1542 struct mlxsw_sp_vr *vr = fib_entry->vr;
1543 enum mlxsw_reg_ralue_trap_action trap_action;
1544 u16 trap_id = 0;
1545 u32 adjacency_index = 0;
1546 u16 ecmp_size = 0;
1547
1548 /* In case the nexthop group adjacency index is valid, use it
1549 * with provided ECMP size. Otherwise, setup trap and pass
1550 * traffic to kernel.
1551 */
1552 if (fib_entry->nh_group->adj_index_valid) {
1553 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
1554 adjacency_index = fib_entry->nh_group->adj_index;
1555 ecmp_size = fib_entry->nh_group->ecmp_size;
1556 } else {
1557 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
1558 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
1559 }
1560
1561 mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
1562 fib_entry->key.prefix_len, *p_dip);
1563 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
1564 adjacency_index, ecmp_size);
1565 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1566}
1567
1568static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
1569 struct mlxsw_sp_fib_entry *fib_entry,
1570 enum mlxsw_reg_ralue_op op)
1571{
1572 char ralue_pl[MLXSW_REG_RALUE_LEN];
1573 u32 *p_dip = (u32 *) fib_entry->key.addr;
1574 struct mlxsw_sp_vr *vr = fib_entry->vr;
1575
1576 mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
1577 fib_entry->key.prefix_len, *p_dip);
1578 mlxsw_reg_ralue_act_local_pack(ralue_pl,
1579 MLXSW_REG_RALUE_TRAP_ACTION_NOP, 0,
1580 fib_entry->rif);
1581 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1582}
1583
1584static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
1585 struct mlxsw_sp_fib_entry *fib_entry,
1586 enum mlxsw_reg_ralue_op op)
1587{
1588 char ralue_pl[MLXSW_REG_RALUE_LEN];
1589 u32 *p_dip = (u32 *) fib_entry->key.addr;
1590 struct mlxsw_sp_vr *vr = fib_entry->vr;
1591
1592 mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
1593 fib_entry->key.prefix_len, *p_dip);
1594 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
1595 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1596}
1597
1598static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp,
1599 struct mlxsw_sp_fib_entry *fib_entry,
1600 enum mlxsw_reg_ralue_op op)
1601{
1602 switch (fib_entry->type) {
1603 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
1604 return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op);
1605 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
1606 return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op);
1607 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
1608 return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op);
1609 }
1610 return -EINVAL;
1611}
1612
1613static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
1614 struct mlxsw_sp_fib_entry *fib_entry,
1615 enum mlxsw_reg_ralue_op op)
1616{
1617 switch (fib_entry->vr->proto) {
1618 case MLXSW_SP_L3_PROTO_IPV4:
1619 return mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
1620 case MLXSW_SP_L3_PROTO_IPV6:
1621 return -EINVAL;
1622 }
1623 return -EINVAL;
1624}
1625
1626static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1627 struct mlxsw_sp_fib_entry *fib_entry)
1628{
1629 enum mlxsw_reg_ralue_op op;
1630
1631 op = !fib_entry->added ? MLXSW_REG_RALUE_OP_WRITE_WRITE :
1632 MLXSW_REG_RALUE_OP_WRITE_UPDATE;
1633 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
1634}
1635
1636static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
1637 struct mlxsw_sp_fib_entry *fib_entry)
1638{
1639 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
1640 MLXSW_REG_RALUE_OP_WRITE_DELETE);
1641}
1642
1643struct mlxsw_sp_router_fib4_add_info {
1644 struct switchdev_trans_item tritem;
1645 struct mlxsw_sp *mlxsw_sp;
1646 struct mlxsw_sp_fib_entry *fib_entry;
1647};
1648
1649static void mlxsw_sp_router_fib4_add_info_destroy(void const *data)
1650{
1651 const struct mlxsw_sp_router_fib4_add_info *info = data;
1652 struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry;
1653 struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp;
1654
1655 mlxsw_sp_fib_entry_destroy(fib_entry);
1656 mlxsw_sp_vr_put(mlxsw_sp, fib_entry->vr);
1657 kfree(info);
1658}
1659
1660static int
1661mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp,
1662 const struct switchdev_obj_ipv4_fib *fib4,
1663 struct mlxsw_sp_fib_entry *fib_entry)
1664{
1665 struct fib_info *fi = fib4->fi;
1666
1667 if (fib4->type == RTN_LOCAL || fib4->type == RTN_BROADCAST) {
1668 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1669 return 0;
1670 }
1671 if (fib4->type != RTN_UNICAST)
1672 return -EINVAL;
1673
1674 if (fi->fib_scope != RT_SCOPE_UNIVERSE) {
1675 struct mlxsw_sp_rif *r;
1676
1677 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
1678 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fi->fib_dev);
1679 if (!r)
1680 return -EINVAL;
1681 fib_entry->rif = r->rif;
1682 return 0;
1683 }
1684 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
1685 return mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi);
1686}
1687
1688static void
1689mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp,
1690 struct mlxsw_sp_fib_entry *fib_entry)
1691{
1692 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_REMOTE)
1693 return;
1694 mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
1695}
1696
1697static int
1698mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
1699 const struct switchdev_obj_ipv4_fib *fib4,
1700 struct switchdev_trans *trans)
1701{
1702 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1703 struct mlxsw_sp_router_fib4_add_info *info;
1704 struct mlxsw_sp_fib_entry *fib_entry;
1705 struct mlxsw_sp_vr *vr;
1706 int err;
1707
1708 vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id,
1709 MLXSW_SP_L3_PROTO_IPV4);
1710 if (IS_ERR(vr))
1711 return PTR_ERR(vr);
1712
1713 fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst,
1714 sizeof(fib4->dst), fib4->dst_len);
1715 if (!fib_entry) {
1716 err = -ENOMEM;
1717 goto err_fib_entry_create;
1718 }
1719 fib_entry->vr = vr;
1720
1721 err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry);
1722 if (err)
1723 goto err_fib4_entry_init;
1724
1725 info = kmalloc(sizeof(*info), GFP_KERNEL);
1726 if (!info) {
1727 err = -ENOMEM;
1728 goto err_alloc_info;
1729 }
1730 info->mlxsw_sp = mlxsw_sp;
1731 info->fib_entry = fib_entry;
1732 switchdev_trans_item_enqueue(trans, info,
1733 mlxsw_sp_router_fib4_add_info_destroy,
1734 &info->tritem);
1735 return 0;
1736
1737err_alloc_info:
1738 mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
1739err_fib4_entry_init:
1740 mlxsw_sp_fib_entry_destroy(fib_entry);
1741err_fib_entry_create:
1742 mlxsw_sp_vr_put(mlxsw_sp, vr);
1743 return err;
1744}
1745
1746static int
1747mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
1748 const struct switchdev_obj_ipv4_fib *fib4,
1749 struct switchdev_trans *trans)
1750{
1751 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1752 struct mlxsw_sp_router_fib4_add_info *info;
1753 struct mlxsw_sp_fib_entry *fib_entry;
1754 struct mlxsw_sp_vr *vr;
1755 int err;
1756
1757 info = switchdev_trans_item_dequeue(trans);
1758 fib_entry = info->fib_entry;
1759 kfree(info);
1760
1761 vr = fib_entry->vr;
1762 err = mlxsw_sp_fib_entry_insert(fib_entry->vr->fib, fib_entry);
1763 if (err)
1764 goto err_fib_entry_insert;
1765 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1766 if (err)
1767 goto err_fib_entry_add;
1768 return 0;
1769
1770err_fib_entry_add:
1771 mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
1772err_fib_entry_insert:
1773 mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
1774 mlxsw_sp_fib_entry_destroy(fib_entry);
1775 mlxsw_sp_vr_put(mlxsw_sp, vr);
1776 return err;
1777}
1778
1779int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port,
1780 const struct switchdev_obj_ipv4_fib *fib4,
1781 struct switchdev_trans *trans)
1782{
1783 if (switchdev_trans_ph_prepare(trans))
1784 return mlxsw_sp_router_fib4_add_prepare(mlxsw_sp_port,
1785 fib4, trans);
1786 return mlxsw_sp_router_fib4_add_commit(mlxsw_sp_port,
1787 fib4, trans);
1788}
1789
1790int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
1791 const struct switchdev_obj_ipv4_fib *fib4)
1792{
1793 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1794 struct mlxsw_sp_fib_entry *fib_entry;
1795 struct mlxsw_sp_vr *vr;
1796
1797 vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4);
1798 if (!vr) {
1799 dev_warn(mlxsw_sp->bus_info->dev, "Failed to find virtual router for FIB4 entry being removed.\n");
1800 return -ENOENT;
1801 }
1802 fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
1803 sizeof(fib4->dst), fib4->dst_len);
1804 if (!fib_entry) {
1805 dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
1806 return PTR_ERR(vr);
1807 }
1808 mlxsw_sp_fib_entry_del(mlxsw_sp_port->mlxsw_sp, fib_entry);
1809 mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
1810 mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
1811 mlxsw_sp_fib_entry_destroy(fib_entry);
1812 mlxsw_sp_vr_put(mlxsw_sp, vr);
1813 return 0;
1814}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index a0c7376ee517..a1ad5e6bdfa8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -166,11 +166,6 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
166 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state); 166 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
167} 167}
168 168
169static bool mlxsw_sp_vfid_is_vport_br(u16 vfid)
170{
171 return vfid >= MLXSW_SP_VFID_PORT_MAX;
172}
173
174static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, 169static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
175 u16 idx_begin, u16 idx_end, bool set, 170 u16 idx_begin, u16 idx_end, bool set,
176 bool only_uc) 171 bool only_uc)
@@ -182,15 +177,10 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
182 char *sftr_pl; 177 char *sftr_pl;
183 int err; 178 int err;
184 179
185 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { 180 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
186 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; 181 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
187 if (mlxsw_sp_vfid_is_vport_br(idx_begin)) 182 else
188 local_port = mlxsw_sp_port->local_port;
189 else
190 local_port = MLXSW_PORT_CPU_PORT;
191 } else {
192 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; 183 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
193 }
194 184
195 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); 185 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
196 if (!sftr_pl) 186 if (!sftr_pl)
@@ -384,18 +374,6 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
384 return err; 374 return err;
385} 375}
386 376
387static struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp,
388 u16 fid)
389{
390 struct mlxsw_sp_fid *f;
391
392 list_for_each_entry(f, &mlxsw_sp->fids, list)
393 if (f->fid == fid)
394 return f;
395
396 return NULL;
397}
398
399static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create) 377static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
400{ 378{
401 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 379 char sfmr_pl[MLXSW_REG_SFMR_LEN];
@@ -426,8 +404,7 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid)
426 return f; 404 return f;
427} 405}
428 406
429static struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, 407struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
430 u16 fid)
431{ 408{
432 struct mlxsw_sp_fid *f; 409 struct mlxsw_sp_fid *f;
433 int err; 410 int err;
@@ -462,13 +439,15 @@ err_fid_map:
462 return ERR_PTR(err); 439 return ERR_PTR(err);
463} 440}
464 441
465static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, 442void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
466 struct mlxsw_sp_fid *f)
467{ 443{
468 u16 fid = f->fid; 444 u16 fid = f->fid;
469 445
470 list_del(&f->list); 446 list_del(&f->list);
471 447
448 if (f->r)
449 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
450
472 kfree(f); 451 kfree(f);
473 452
474 mlxsw_sp_fid_op(mlxsw_sp, fid, false); 453 mlxsw_sp_fid_op(mlxsw_sp, fid, false);
@@ -633,25 +612,6 @@ err_port_allow_untagged_set:
633 return err; 612 return err;
634} 613}
635 614
636static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin,
637 u16 vid_end)
638{
639 u16 vid;
640 int err;
641
642 for (vid = vid_begin; vid <= vid_end; vid++) {
643 err = mlxsw_sp_port_add_vid(dev, 0, vid);
644 if (err)
645 goto err_port_add_vid;
646 }
647 return 0;
648
649err_port_add_vid:
650 for (vid--; vid >= vid_begin; vid--)
651 mlxsw_sp_port_kill_vid(dev, 0, vid);
652 return err;
653}
654
655static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port, 615static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port,
656 u16 vid_begin, u16 vid_end, bool is_member, 616 u16 vid_begin, u16 vid_end, bool is_member,
657 bool untagged) 617 bool untagged)
@@ -681,12 +641,8 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
681 u16 vid, old_pvid; 641 u16 vid, old_pvid;
682 int err; 642 int err;
683 643
684 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
685 * not bridged, then packets ingressing through the port with
686 * the specified VIDs will be directed to CPU.
687 */
688 if (!mlxsw_sp_port->bridged) 644 if (!mlxsw_sp_port->bridged)
689 return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end); 645 return -EINVAL;
690 646
691 err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid_begin, vid_end); 647 err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid_begin, vid_end);
692 if (err) { 648 if (err) {
@@ -776,9 +732,10 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
776 MLXSW_REG_SFD_OP_WRITE_REMOVE; 732 MLXSW_REG_SFD_OP_WRITE_REMOVE;
777} 733}
778 734
779static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, 735static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
780 const char *mac, u16 fid, bool adding, 736 const char *mac, u16 fid, bool adding,
781 bool dynamic) 737 enum mlxsw_reg_sfd_rec_action action,
738 bool dynamic)
782{ 739{
783 char *sfd_pl; 740 char *sfd_pl;
784 int err; 741 int err;
@@ -789,14 +746,29 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
789 746
790 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 747 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
791 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 748 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
792 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, 749 mac, fid, action, local_port);
793 local_port);
794 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 750 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
795 kfree(sfd_pl); 751 kfree(sfd_pl);
796 752
797 return err; 753 return err;
798} 754}
799 755
756static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
757 const char *mac, u16 fid, bool adding,
758 bool dynamic)
759{
760 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
761 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
762}
763
764int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
765 bool adding)
766{
767 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
768 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
769 false);
770}
771
800static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, 772static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
801 const char *mac, u16 fid, u16 lag_vid, 773 const char *mac, u16 fid, u16 lag_vid,
802 bool adding, bool dynamic) 774 bool adding, bool dynamic)
@@ -1001,6 +973,11 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
1001 SWITCHDEV_OBJ_PORT_VLAN(obj), 973 SWITCHDEV_OBJ_PORT_VLAN(obj),
1002 trans); 974 trans);
1003 break; 975 break;
976 case SWITCHDEV_OBJ_ID_IPV4_FIB:
977 err = mlxsw_sp_router_fib4_add(mlxsw_sp_port,
978 SWITCHDEV_OBJ_IPV4_FIB(obj),
979 trans);
980 break;
1004 case SWITCHDEV_OBJ_ID_PORT_FDB: 981 case SWITCHDEV_OBJ_ID_PORT_FDB:
1005 err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port, 982 err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
1006 SWITCHDEV_OBJ_PORT_FDB(obj), 983 SWITCHDEV_OBJ_PORT_FDB(obj),
@@ -1019,21 +996,6 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
1019 return err; 996 return err;
1020} 997}
1021 998
1022static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin,
1023 u16 vid_end)
1024{
1025 u16 vid;
1026 int err;
1027
1028 for (vid = vid_begin; vid <= vid_end; vid++) {
1029 err = mlxsw_sp_port_kill_vid(dev, 0, vid);
1030 if (err)
1031 return err;
1032 }
1033
1034 return 0;
1035}
1036
1037static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, 999static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1038 u16 vid_begin, u16 vid_end, bool init) 1000 u16 vid_begin, u16 vid_end, bool init)
1039{ 1001{
@@ -1041,12 +1003,8 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1041 u16 vid, pvid; 1003 u16 vid, pvid;
1042 int err; 1004 int err;
1043 1005
1044 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
1045 * not bridged, then prevent packets ingressing through the
1046 * port with the specified VIDs from being trapped to CPU.
1047 */
1048 if (!init && !mlxsw_sp_port->bridged) 1006 if (!init && !mlxsw_sp_port->bridged)
1049 return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end); 1007 return -EINVAL;
1050 1008
1051 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, 1009 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
1052 false, false); 1010 false, false);
@@ -1165,6 +1123,10 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev,
1165 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port, 1123 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1166 SWITCHDEV_OBJ_PORT_VLAN(obj)); 1124 SWITCHDEV_OBJ_PORT_VLAN(obj));
1167 break; 1125 break;
1126 case SWITCHDEV_OBJ_ID_IPV4_FIB:
1127 err = mlxsw_sp_router_fib4_del(mlxsw_sp_port,
1128 SWITCHDEV_OBJ_IPV4_FIB(obj));
1129 break;
1168 case SWITCHDEV_OBJ_ID_PORT_FDB: 1130 case SWITCHDEV_OBJ_ID_PORT_FDB:
1169 err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port, 1131 err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
1170 SWITCHDEV_OBJ_PORT_FDB(obj)); 1132 SWITCHDEV_OBJ_PORT_FDB(obj));
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 3842eab9449a..25f658b3849a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -316,7 +316,10 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
316 } 316 }
317 } 317 }
318 mlxsw_sx_txhdr_construct(skb, &tx_info); 318 mlxsw_sx_txhdr_construct(skb, &tx_info);
319 len = skb->len; 319 /* TX header is consumed by HW on the way so we shouldn't count its
320 * bytes as being sent.
321 */
322 len = skb->len - MLXSW_TXHDR_LEN;
320 /* Due to a race we might fail here because of a full queue. In that 323 /* Due to a race we might fail here because of a full queue. In that
321 * unlikely case we simply drop the packet. 324 * unlikely case we simply drop the packet.
322 */ 325 */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 53a9550be75e..470d7696e9fe 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -54,6 +54,11 @@ enum {
54 MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32, 54 MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32,
55 MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33, 55 MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33,
56 MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, 56 MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
57 MLXSW_TRAP_ID_ARPBC = 0x50,
58 MLXSW_TRAP_ID_ARPUC = 0x51,
59 MLXSW_TRAP_ID_IP2ME = 0x5F,
60 MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
61 MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90,
57 62
58 MLXSW_TRAP_ID_MAX = 0x1FF 63 MLXSW_TRAP_ID_MAX = 0x1FF
59}; 64};
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 7066954c39d6..0a26b11ca8f6 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1151,7 +1151,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
1151 enc28j60_phy_read(priv, PHIR); 1151 enc28j60_phy_read(priv, PHIR);
1152 } 1152 }
1153 /* TX complete handler */ 1153 /* TX complete handler */
1154 if ((intflags & EIR_TXIF) != 0) { 1154 if (((intflags & EIR_TXIF) != 0) &&
1155 ((intflags & EIR_TXERIF) == 0)) {
1155 bool err = false; 1156 bool err = false;
1156 loop++; 1157 loop++;
1157 if (netif_msg_intr(priv)) 1158 if (netif_msg_intr(priv))
@@ -1203,7 +1204,7 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
1203 enc28j60_tx_clear(ndev, true); 1204 enc28j60_tx_clear(ndev, true);
1204 } else 1205 } else
1205 enc28j60_tx_clear(ndev, true); 1206 enc28j60_tx_clear(ndev, true);
1206 locked_reg_bfclr(priv, EIR, EIR_TXERIF); 1207 locked_reg_bfclr(priv, EIR, EIR_TXERIF | EIR_TXIF);
1207 } 1208 }
1208 /* RX Error handler */ 1209 /* RX Error handler */
1209 if ((intflags & EIR_RXERIF) != 0) { 1210 if ((intflags & EIR_RXERIF) != 0) {
@@ -1238,6 +1239,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
1238 */ 1239 */
1239static void enc28j60_hw_tx(struct enc28j60_net *priv) 1240static void enc28j60_hw_tx(struct enc28j60_net *priv)
1240{ 1241{
1242 BUG_ON(!priv->tx_skb);
1243
1241 if (netif_msg_tx_queued(priv)) 1244 if (netif_msg_tx_queued(priv))
1242 printk(KERN_DEBUG DRV_NAME 1245 printk(KERN_DEBUG DRV_NAME
1243 ": Tx Packet Len:%d\n", priv->tx_skb->len); 1246 ": Tx Packet Len:%d\n", priv->tx_skb->len);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index e744acc18ef4..690635660195 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -63,7 +63,7 @@
63#define NFP_NET_POLL_TIMEOUT 5 63#define NFP_NET_POLL_TIMEOUT 5
64 64
65/* Bar allocation */ 65/* Bar allocation */
66#define NFP_NET_CRTL_BAR 0 66#define NFP_NET_CTRL_BAR 0
67#define NFP_NET_Q0_BAR 2 67#define NFP_NET_Q0_BAR 2
68#define NFP_NET_Q1_BAR 4 /* OBSOLETE */ 68#define NFP_NET_Q1_BAR 4 /* OBSOLETE */
69 69
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 2195ed3053da..1e74b911accb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1845,13 +1845,14 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
1845} 1845}
1846 1846
1847/** 1847/**
1848 * nfp_net_write_mac_addr() - Write mac address to device registers 1848 * nfp_net_write_mac_addr() - Write mac address to the device control BAR
1849 * @nn: NFP Net device to reconfigure 1849 * @nn: NFP Net device to reconfigure
1850 * @mac: Six-byte MAC address to be written
1851 * 1850 *
1852 * We do a bit of byte swapping dance because firmware is LE. 1851 * Writes the MAC address from the netdev to the device control BAR. Does not
1852 * perform the required reconfig. We do a bit of byte swapping dance because
1853 * firmware is LE.
1853 */ 1854 */
1854static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *mac) 1855static void nfp_net_write_mac_addr(struct nfp_net *nn)
1855{ 1856{
1856 nn_writel(nn, NFP_NET_CFG_MACADDR + 0, 1857 nn_writel(nn, NFP_NET_CFG_MACADDR + 0,
1857 get_unaligned_be32(nn->netdev->dev_addr)); 1858 get_unaligned_be32(nn->netdev->dev_addr));
@@ -1952,7 +1953,7 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
1952 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ? 1953 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ?
1953 0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1); 1954 0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1);
1954 1955
1955 nfp_net_write_mac_addr(nn, nn->netdev->dev_addr); 1956 nfp_net_write_mac_addr(nn);
1956 1957
1957 nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu); 1958 nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu);
1958 nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz); 1959 nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz);
@@ -2015,7 +2016,7 @@ static void nfp_net_open_stack(struct nfp_net *nn)
2015 2016
2016 netif_tx_wake_all_queues(nn->netdev); 2017 netif_tx_wake_all_queues(nn->netdev);
2017 2018
2018 enable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector); 2019 enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2019 nfp_net_read_link_status(nn); 2020 nfp_net_read_link_status(nn);
2020} 2021}
2021 2022
@@ -2044,7 +2045,7 @@ static int nfp_net_netdev_open(struct net_device *netdev)
2044 NFP_NET_IRQ_LSC_IDX, nn->lsc_handler); 2045 NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
2045 if (err) 2046 if (err)
2046 goto err_free_exn; 2047 goto err_free_exn;
2047 disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector); 2048 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2048 2049
2049 nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings), 2050 nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
2050 GFP_KERNEL); 2051 GFP_KERNEL);
@@ -2133,7 +2134,7 @@ static void nfp_net_close_stack(struct nfp_net *nn)
2133{ 2134{
2134 unsigned int r; 2135 unsigned int r;
2135 2136
2136 disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector); 2137 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2137 netif_carrier_off(nn->netdev); 2138 netif_carrier_off(nn->netdev);
2138 nn->link_up = false; 2139 nn->link_up = false;
2139 2140
@@ -2739,7 +2740,7 @@ int nfp_net_netdev_init(struct net_device *netdev)
2739 nn->cap = nn_readl(nn, NFP_NET_CFG_CAP); 2740 nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
2740 nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU); 2741 nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
2741 2742
2742 nfp_net_write_mac_addr(nn, nn->netdev->dev_addr); 2743 nfp_net_write_mac_addr(nn);
2743 2744
2744 /* Set default MTU and Freelist buffer size */ 2745 /* Set default MTU and Freelist buffer size */
2745 if (nn->max_mtu < NFP_NET_DEFAULT_MTU) 2746 if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index ccfef1f17627..7d7933d00b8f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -605,6 +605,7 @@ static int nfp_net_set_coalesce(struct net_device *netdev,
605 605
606static const struct ethtool_ops nfp_net_ethtool_ops = { 606static const struct ethtool_ops nfp_net_ethtool_ops = {
607 .get_drvinfo = nfp_net_get_drvinfo, 607 .get_drvinfo = nfp_net_get_drvinfo,
608 .get_link = ethtool_op_get_link,
608 .get_ringparam = nfp_net_get_ringparam, 609 .get_ringparam = nfp_net_get_ringparam,
609 .set_ringparam = nfp_net_set_ringparam, 610 .set_ringparam = nfp_net_set_ringparam,
610 .get_strings = nfp_net_get_strings, 611 .get_strings = nfp_net_get_strings,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index e2b22b8a20f1..37abef016a0a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -124,11 +124,11 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
124 * first NFP_NET_CFG_BAR_SZ of the BAR. This keeps the code 124 * first NFP_NET_CFG_BAR_SZ of the BAR. This keeps the code
125 * the identical for PF and VF drivers. 125 * the identical for PF and VF drivers.
126 */ 126 */
127 ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CRTL_BAR), 127 ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CTRL_BAR),
128 NFP_NET_CFG_BAR_SZ); 128 NFP_NET_CFG_BAR_SZ);
129 if (!ctrl_bar) { 129 if (!ctrl_bar) {
130 dev_err(&pdev->dev, 130 dev_err(&pdev->dev,
131 "Failed to map resource %d\n", NFP_NET_CRTL_BAR); 131 "Failed to map resource %d\n", NFP_NET_CTRL_BAR);
132 err = -EIO; 132 err = -EIO;
133 goto err_pci_regions; 133 goto err_pci_regions;
134 } 134 }
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index b1ce7aaa8f8b..4d4ecba0aad9 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -425,7 +425,6 @@ struct netdata_local {
425 unsigned int last_tx_idx; 425 unsigned int last_tx_idx;
426 unsigned int num_used_tx_buffs; 426 unsigned int num_used_tx_buffs;
427 struct mii_bus *mii_bus; 427 struct mii_bus *mii_bus;
428 struct phy_device *phy_dev;
429 struct clk *clk; 428 struct clk *clk;
430 dma_addr_t dma_buff_base_p; 429 dma_addr_t dma_buff_base_p;
431 void *dma_buff_base_v; 430 void *dma_buff_base_v;
@@ -750,7 +749,7 @@ static int lpc_mdio_reset(struct mii_bus *bus)
750static void lpc_handle_link_change(struct net_device *ndev) 749static void lpc_handle_link_change(struct net_device *ndev)
751{ 750{
752 struct netdata_local *pldat = netdev_priv(ndev); 751 struct netdata_local *pldat = netdev_priv(ndev);
753 struct phy_device *phydev = pldat->phy_dev; 752 struct phy_device *phydev = ndev->phydev;
754 unsigned long flags; 753 unsigned long flags;
755 754
756 bool status_change = false; 755 bool status_change = false;
@@ -814,7 +813,6 @@ static int lpc_mii_probe(struct net_device *ndev)
814 pldat->link = 0; 813 pldat->link = 0;
815 pldat->speed = 0; 814 pldat->speed = 0;
816 pldat->duplex = -1; 815 pldat->duplex = -1;
817 pldat->phy_dev = phydev;
818 816
819 phy_attached_info(phydev); 817 phy_attached_info(phydev);
820 818
@@ -1048,8 +1046,8 @@ static int lpc_eth_close(struct net_device *ndev)
1048 napi_disable(&pldat->napi); 1046 napi_disable(&pldat->napi);
1049 netif_stop_queue(ndev); 1047 netif_stop_queue(ndev);
1050 1048
1051 if (pldat->phy_dev) 1049 if (ndev->phydev)
1052 phy_stop(pldat->phy_dev); 1050 phy_stop(ndev->phydev);
1053 1051
1054 spin_lock_irqsave(&pldat->lock, flags); 1052 spin_lock_irqsave(&pldat->lock, flags);
1055 __lpc_eth_reset(pldat); 1053 __lpc_eth_reset(pldat);
@@ -1185,8 +1183,7 @@ static void lpc_eth_set_multicast_list(struct net_device *ndev)
1185 1183
1186static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) 1184static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1187{ 1185{
1188 struct netdata_local *pldat = netdev_priv(ndev); 1186 struct phy_device *phydev = ndev->phydev;
1189 struct phy_device *phydev = pldat->phy_dev;
1190 1187
1191 if (!netif_running(ndev)) 1188 if (!netif_running(ndev))
1192 return -EINVAL; 1189 return -EINVAL;
@@ -1207,14 +1204,14 @@ static int lpc_eth_open(struct net_device *ndev)
1207 __lpc_eth_clock_enable(pldat, true); 1204 __lpc_eth_clock_enable(pldat, true);
1208 1205
1209 /* Suspended PHY makes LPC ethernet core block, so resume now */ 1206 /* Suspended PHY makes LPC ethernet core block, so resume now */
1210 phy_resume(pldat->phy_dev); 1207 phy_resume(ndev->phydev);
1211 1208
1212 /* Reset and initialize */ 1209 /* Reset and initialize */
1213 __lpc_eth_reset(pldat); 1210 __lpc_eth_reset(pldat);
1214 __lpc_eth_init(pldat); 1211 __lpc_eth_init(pldat);
1215 1212
1216 /* schedule a link state check */ 1213 /* schedule a link state check */
1217 phy_start(pldat->phy_dev); 1214 phy_start(ndev->phydev);
1218 netif_start_queue(ndev); 1215 netif_start_queue(ndev);
1219 napi_enable(&pldat->napi); 1216 napi_enable(&pldat->napi);
1220 1217
@@ -1247,37 +1244,13 @@ static void lpc_eth_ethtool_setmsglevel(struct net_device *ndev, u32 level)
1247 pldat->msg_enable = level; 1244 pldat->msg_enable = level;
1248} 1245}
1249 1246
1250static int lpc_eth_ethtool_getsettings(struct net_device *ndev,
1251 struct ethtool_cmd *cmd)
1252{
1253 struct netdata_local *pldat = netdev_priv(ndev);
1254 struct phy_device *phydev = pldat->phy_dev;
1255
1256 if (!phydev)
1257 return -EOPNOTSUPP;
1258
1259 return phy_ethtool_gset(phydev, cmd);
1260}
1261
1262static int lpc_eth_ethtool_setsettings(struct net_device *ndev,
1263 struct ethtool_cmd *cmd)
1264{
1265 struct netdata_local *pldat = netdev_priv(ndev);
1266 struct phy_device *phydev = pldat->phy_dev;
1267
1268 if (!phydev)
1269 return -EOPNOTSUPP;
1270
1271 return phy_ethtool_sset(phydev, cmd);
1272}
1273
1274static const struct ethtool_ops lpc_eth_ethtool_ops = { 1247static const struct ethtool_ops lpc_eth_ethtool_ops = {
1275 .get_drvinfo = lpc_eth_ethtool_getdrvinfo, 1248 .get_drvinfo = lpc_eth_ethtool_getdrvinfo,
1276 .get_settings = lpc_eth_ethtool_getsettings,
1277 .set_settings = lpc_eth_ethtool_setsettings,
1278 .get_msglevel = lpc_eth_ethtool_getmsglevel, 1249 .get_msglevel = lpc_eth_ethtool_getmsglevel,
1279 .set_msglevel = lpc_eth_ethtool_setmsglevel, 1250 .set_msglevel = lpc_eth_ethtool_setmsglevel,
1280 .get_link = ethtool_op_get_link, 1251 .get_link = ethtool_op_get_link,
1252 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1253 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1281}; 1254};
1282 1255
1283static const struct net_device_ops lpc_netdev_ops = { 1256static const struct net_device_ops lpc_netdev_ops = {
@@ -1460,7 +1433,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
1460 netdev_info(ndev, "LPC mac at 0x%08x irq %d\n", 1433 netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
1461 res->start, ndev->irq); 1434 res->start, ndev->irq);
1462 1435
1463 phydev = pldat->phy_dev; 1436 phydev = ndev->phydev;
1464 1437
1465 device_init_wakeup(&pdev->dev, 1); 1438 device_init_wakeup(&pdev->dev, 1);
1466 device_set_wakeup_enable(&pdev->dev, 0); 1439 device_set_wakeup_enable(&pdev->dev, 0);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 9a63df1184f1..35e53771533f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -489,8 +489,8 @@ struct qed_dev {
489 489
490 u32 int_mode; 490 u32 int_mode;
491 enum qed_coalescing_mode int_coalescing_mode; 491 enum qed_coalescing_mode int_coalescing_mode;
492 u8 rx_coalesce_usecs; 492 u16 rx_coalesce_usecs;
493 u8 tx_coalesce_usecs; 493 u16 tx_coalesce_usecs;
494 494
495 /* Start Bar offset of first hwfn */ 495 /* Start Bar offset of first hwfn */
496 void __iomem *regview; 496 void __iomem *regview;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index d121a8bf6b20..a12c6caa6c66 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -72,6 +72,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
72 p_ramrod->mtu = cpu_to_le16(p_params->mtu); 72 p_ramrod->mtu = cpu_to_le16(p_params->mtu);
73 p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; 73 p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
74 p_ramrod->drop_ttl0_en = p_params->drop_ttl0; 74 p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
75 p_ramrod->untagged = p_params->only_untagged;
75 76
76 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); 77 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
77 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); 78 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
@@ -247,10 +248,6 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
247 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL, 248 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
248 !!(accept_filter & QED_ACCEPT_NONE)); 249 !!(accept_filter & QED_ACCEPT_NONE));
249 250
250 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
251 (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
252 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
253
254 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL, 251 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
255 !!(accept_filter & QED_ACCEPT_NONE)); 252 !!(accept_filter & QED_ACCEPT_NONE));
256 253
@@ -1756,7 +1753,8 @@ static int qed_start_vport(struct qed_dev *cdev,
1756 start.vport_id, start.mtu); 1753 start.vport_id, start.mtu);
1757 } 1754 }
1758 1755
1759 qed_reset_vport_stats(cdev); 1756 if (params->clear_stats)
1757 qed_reset_vport_stats(cdev);
1760 1758
1761 return 0; 1759 return 0;
1762} 1760}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index e32ee57cdfee..1f13abb5c316 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1088,6 +1088,7 @@ static int qed_get_port_type(u32 media_type)
1088 case MEDIA_SFPP_10G_FIBER: 1088 case MEDIA_SFPP_10G_FIBER:
1089 case MEDIA_SFP_1G_FIBER: 1089 case MEDIA_SFP_1G_FIBER:
1090 case MEDIA_XFP_FIBER: 1090 case MEDIA_XFP_FIBER:
1091 case MEDIA_MODULE_FIBER:
1091 case MEDIA_KR: 1092 case MEDIA_KR:
1092 port_type = PORT_FIBRE; 1093 port_type = PORT_FIBRE;
1093 break; 1094 break;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index ad9bf5c85c3f..97ffeae262bb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -213,19 +213,15 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
213 SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL, 213 SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
214 DQ_XCM_CORE_SPQ_PROD_CMD); 214 DQ_XCM_CORE_SPQ_PROD_CMD);
215 db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; 215 db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
216
217 /* validate producer is up to-date */
218 rmb();
219
220 db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain)); 216 db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
221 217
222 /* do not reorder */ 218 /* make sure the SPQE is updated before the doorbell */
223 barrier(); 219 wmb();
224 220
225 DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db); 221 DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
226 222
227 /* make sure doorbell is rang */ 223 /* make sure doorbell is rang */
228 mmiowb(); 224 wmb();
229 225
230 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, 226 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
231 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n", 227 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
@@ -620,7 +616,9 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn,
620 616
621 *p_en2 = *p_ent; 617 *p_en2 = *p_ent;
622 618
623 kfree(p_ent); 619 /* EBLOCK responsible to free the allocated p_ent */
620 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
621 kfree(p_ent);
624 622
625 p_ent = p_en2; 623 p_ent = p_en2;
626 } 624 }
@@ -755,6 +753,15 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
755 * Thus, after gaining the answer perform the cleanup here. 753 * Thus, after gaining the answer perform the cleanup here.
756 */ 754 */
757 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code); 755 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
756
757 if (p_ent->queue == &p_spq->unlimited_pending) {
758 /* This is an allocated p_ent which does not need to
759 * return to pool.
760 */
761 kfree(p_ent);
762 return rc;
763 }
764
758 if (rc) 765 if (rc)
759 goto spq_post_fail2; 766 goto spq_post_fail2;
760 767
@@ -850,8 +857,12 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
850 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data, 857 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
851 fw_return_code); 858 fw_return_code);
852 859
853 if (found->comp_mode != QED_SPQ_MODE_EBLOCK) 860 if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
854 /* EBLOCK is responsible for freeing its own entry */ 861 (found->queue == &p_spq->unlimited_pending))
862 /* EBLOCK is responsible for returning its own entry into the
863 * free list, unless it originally added the entry into the
864 * unlimited pending list.
865 */
855 qed_spq_return_entry(p_hwfn, found); 866 qed_spq_return_entry(p_hwfn, found);
856 867
857 /* Attempt to post pending requests */ 868 /* Attempt to post pending requests */
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 1441c8f6d414..02b06d4e40ae 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -24,7 +24,7 @@
24#include <linux/qed/qed_eth_if.h> 24#include <linux/qed/qed_eth_if.h>
25 25
26#define QEDE_MAJOR_VERSION 8 26#define QEDE_MAJOR_VERSION 8
27#define QEDE_MINOR_VERSION 7 27#define QEDE_MINOR_VERSION 10
28#define QEDE_REVISION_VERSION 1 28#define QEDE_REVISION_VERSION 1
29#define QEDE_ENGINEERING_VERSION 20 29#define QEDE_ENGINEERING_VERSION 20
30#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ 30#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
@@ -143,6 +143,8 @@ struct qede_dev {
143 struct mutex qede_lock; 143 struct mutex qede_lock;
144 u32 state; /* Protected by qede_lock */ 144 u32 state; /* Protected by qede_lock */
145 u16 rx_buf_size; 145 u16 rx_buf_size;
146 u32 rx_copybreak;
147
146 /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ 148 /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
147#define ETH_OVERHEAD (ETH_HLEN + 8 + 8) 149#define ETH_OVERHEAD (ETH_HLEN + 8 + 8)
148 /* Max supported alignment is 256 (8 shift) 150 /* Max supported alignment is 256 (8 shift)
@@ -235,6 +237,7 @@ struct qede_rx_queue {
235 237
236 u64 rx_hw_errors; 238 u64 rx_hw_errors;
237 u64 rx_alloc_errors; 239 u64 rx_alloc_errors;
240 u64 rx_ip_frags;
238}; 241};
239 242
240union db_prod { 243union db_prod {
@@ -332,6 +335,7 @@ void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
332#define NUM_TX_BDS_MIN 128 335#define NUM_TX_BDS_MIN 128
333#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX 336#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
334 337
338#define QEDE_MIN_PKT_LEN 64
335#define QEDE_RX_HDR_SIZE 256 339#define QEDE_RX_HDR_SIZE 256
336#define for_each_rss(i) for (i = 0; i < edev->num_rss; i++) 340#define for_each_rss(i) for (i = 0; i < edev->num_rss; i++)
337 341
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 6228482bf7f0..f8492cac9290 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -37,6 +37,7 @@ static const struct {
37} qede_rqstats_arr[] = { 37} qede_rqstats_arr[] = {
38 QEDE_RQSTAT(rx_hw_errors), 38 QEDE_RQSTAT(rx_hw_errors),
39 QEDE_RQSTAT(rx_alloc_errors), 39 QEDE_RQSTAT(rx_alloc_errors),
40 QEDE_RQSTAT(rx_ip_frags),
40}; 41};
41 42
42#define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr) 43#define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr)
@@ -430,11 +431,13 @@ static int qede_get_coalesce(struct net_device *dev,
430 struct ethtool_coalesce *coal) 431 struct ethtool_coalesce *coal)
431{ 432{
432 struct qede_dev *edev = netdev_priv(dev); 433 struct qede_dev *edev = netdev_priv(dev);
434 u16 rxc, txc;
433 435
434 memset(coal, 0, sizeof(struct ethtool_coalesce)); 436 memset(coal, 0, sizeof(struct ethtool_coalesce));
435 edev->ops->common->get_coalesce(edev->cdev, 437 edev->ops->common->get_coalesce(edev->cdev, &rxc, &txc);
436 (u16 *)&coal->rx_coalesce_usecs, 438
437 (u16 *)&coal->tx_coalesce_usecs); 439 coal->rx_coalesce_usecs = rxc;
440 coal->tx_coalesce_usecs = txc;
438 441
439 return 0; 442 return 0;
440} 443}
@@ -1182,6 +1185,48 @@ static void qede_self_test(struct net_device *dev,
1182 } 1185 }
1183} 1186}
1184 1187
1188static int qede_set_tunable(struct net_device *dev,
1189 const struct ethtool_tunable *tuna,
1190 const void *data)
1191{
1192 struct qede_dev *edev = netdev_priv(dev);
1193 u32 val;
1194
1195 switch (tuna->id) {
1196 case ETHTOOL_RX_COPYBREAK:
1197 val = *(u32 *)data;
1198 if (val < QEDE_MIN_PKT_LEN || val > QEDE_RX_HDR_SIZE) {
1199 DP_VERBOSE(edev, QED_MSG_DEBUG,
1200 "Invalid rx copy break value, range is [%u, %u]",
1201 QEDE_MIN_PKT_LEN, QEDE_RX_HDR_SIZE);
1202 return -EINVAL;
1203 }
1204
1205 edev->rx_copybreak = *(u32 *)data;
1206 break;
1207 default:
1208 return -EOPNOTSUPP;
1209 }
1210
1211 return 0;
1212}
1213
1214static int qede_get_tunable(struct net_device *dev,
1215 const struct ethtool_tunable *tuna, void *data)
1216{
1217 struct qede_dev *edev = netdev_priv(dev);
1218
1219 switch (tuna->id) {
1220 case ETHTOOL_RX_COPYBREAK:
1221 *(u32 *)data = edev->rx_copybreak;
1222 break;
1223 default:
1224 return -EOPNOTSUPP;
1225 }
1226
1227 return 0;
1228}
1229
1185static const struct ethtool_ops qede_ethtool_ops = { 1230static const struct ethtool_ops qede_ethtool_ops = {
1186 .get_settings = qede_get_settings, 1231 .get_settings = qede_get_settings,
1187 .set_settings = qede_set_settings, 1232 .set_settings = qede_set_settings,
@@ -1210,6 +1255,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
1210 .get_channels = qede_get_channels, 1255 .get_channels = qede_get_channels,
1211 .set_channels = qede_set_channels, 1256 .set_channels = qede_set_channels,
1212 .self_test = qede_self_test, 1257 .self_test = qede_self_test,
1258 .get_tunable = qede_get_tunable,
1259 .set_tunable = qede_set_tunable,
1213}; 1260};
1214 1261
1215static const struct ethtool_ops qede_vf_ethtool_ops = { 1262static const struct ethtool_ops qede_vf_ethtool_ops = {
@@ -1232,6 +1279,8 @@ static const struct ethtool_ops qede_vf_ethtool_ops = {
1232 .set_rxfh = qede_set_rxfh, 1279 .set_rxfh = qede_set_rxfh,
1233 .get_channels = qede_get_channels, 1280 .get_channels = qede_get_channels,
1234 .set_channels = qede_set_channels, 1281 .set_channels = qede_set_channels,
1282 .get_tunable = qede_get_tunable,
1283 .set_tunable = qede_set_tunable,
1235}; 1284};
1236 1285
1237void qede_set_ethtool_ops(struct net_device *dev) 1286void qede_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 2972742c6adb..91e7bb0b85c8 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -485,6 +485,24 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
485} 485}
486#endif 486#endif
487 487
488static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
489{
490 /* wmb makes sure that the BDs data is updated before updating the
491 * producer, otherwise FW may read old data from the BDs.
492 */
493 wmb();
494 barrier();
495 writel(txq->tx_db.raw, txq->doorbell_addr);
496
497 /* mmiowb is needed to synchronize doorbell writes from more than one
498 * processor. It guarantees that the write arrives to the device before
499 * the queue lock is released and another start_xmit is called (possibly
500 * on another CPU). Without this barrier, the next doorbell can bypass
501 * this doorbell. This is applicable to IA64/Altix systems.
502 */
503 mmiowb();
504}
505
488/* Main transmit function */ 506/* Main transmit function */
489static 507static
490netdev_tx_t qede_start_xmit(struct sk_buff *skb, 508netdev_tx_t qede_start_xmit(struct sk_buff *skb,
@@ -543,6 +561,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
543 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { 561 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
544 DP_NOTICE(edev, "SKB mapping failed\n"); 562 DP_NOTICE(edev, "SKB mapping failed\n");
545 qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false); 563 qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false);
564 qede_update_tx_producer(txq);
546 return NETDEV_TX_OK; 565 return NETDEV_TX_OK;
547 } 566 }
548 nbd++; 567 nbd++;
@@ -657,6 +676,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
657 if (rc) { 676 if (rc) {
658 qede_free_failed_tx_pkt(edev, txq, first_bd, nbd, 677 qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
659 data_split); 678 data_split);
679 qede_update_tx_producer(txq);
660 return NETDEV_TX_OK; 680 return NETDEV_TX_OK;
661 } 681 }
662 682
@@ -681,6 +701,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
681 if (rc) { 701 if (rc) {
682 qede_free_failed_tx_pkt(edev, txq, first_bd, nbd, 702 qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
683 data_split); 703 data_split);
704 qede_update_tx_producer(txq);
684 return NETDEV_TX_OK; 705 return NETDEV_TX_OK;
685 } 706 }
686 } 707 }
@@ -701,20 +722,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
701 txq->tx_db.data.bd_prod = 722 txq->tx_db.data.bd_prod =
702 cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); 723 cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
703 724
704 /* wmb makes sure that the BDs data is updated before updating the 725 if (!skb->xmit_more || netif_tx_queue_stopped(netdev_txq))
705 * producer, otherwise FW may read old data from the BDs. 726 qede_update_tx_producer(txq);
706 */
707 wmb();
708 barrier();
709 writel(txq->tx_db.raw, txq->doorbell_addr);
710
711 /* mmiowb is needed to synchronize doorbell writes from more than one
712 * processor. It guarantees that the write arrives to the device before
713 * the queue lock is released and another start_xmit is called (possibly
714 * on another CPU). Without this barrier, the next doorbell can bypass
715 * this doorbell. This is applicable to IA64/Altix systems.
716 */
717 mmiowb();
718 727
719 if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) 728 if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
720 < (MAX_SKB_FRAGS + 1))) { 729 < (MAX_SKB_FRAGS + 1))) {
@@ -1348,6 +1357,20 @@ static u8 qede_check_csum(u16 flag)
1348 return qede_check_tunn_csum(flag); 1357 return qede_check_tunn_csum(flag);
1349} 1358}
1350 1359
1360static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
1361 u16 flag)
1362{
1363 u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
1364
1365 if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
1366 ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
1367 (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1368 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
1369 return true;
1370
1371 return false;
1372}
1373
1351static int qede_rx_int(struct qede_fastpath *fp, int budget) 1374static int qede_rx_int(struct qede_fastpath *fp, int budget)
1352{ 1375{
1353 struct qede_dev *edev = fp->edev; 1376 struct qede_dev *edev = fp->edev;
@@ -1426,6 +1449,12 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
1426 1449
1427 csum_flag = qede_check_csum(parse_flag); 1450 csum_flag = qede_check_csum(parse_flag);
1428 if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { 1451 if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
1452 if (qede_pkt_is_ip_fragmented(&cqe->fast_path_regular,
1453 parse_flag)) {
1454 rxq->rx_ip_frags++;
1455 goto alloc_skb;
1456 }
1457
1429 DP_NOTICE(edev, 1458 DP_NOTICE(edev,
1430 "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n", 1459 "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
1431 sw_comp_cons, parse_flag); 1460 sw_comp_cons, parse_flag);
@@ -1434,6 +1463,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
1434 goto next_cqe; 1463 goto next_cqe;
1435 } 1464 }
1436 1465
1466alloc_skb:
1437 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); 1467 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
1438 if (unlikely(!skb)) { 1468 if (unlikely(!skb)) {
1439 DP_NOTICE(edev, 1469 DP_NOTICE(edev,
@@ -1444,7 +1474,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
1444 } 1474 }
1445 1475
1446 /* Copy data into SKB */ 1476 /* Copy data into SKB */
1447 if (len + pad <= QEDE_RX_HDR_SIZE) { 1477 if (len + pad <= edev->rx_copybreak) {
1448 memcpy(skb_put(skb, len), 1478 memcpy(skb_put(skb, len),
1449 page_address(data) + pad + 1479 page_address(data) + pad +
1450 sw_rx_data->page_offset, len); 1480 sw_rx_data->page_offset, len);
@@ -1576,56 +1606,49 @@ next_cqe: /* don't consume bd rx buffer */
1576 1606
1577static int qede_poll(struct napi_struct *napi, int budget) 1607static int qede_poll(struct napi_struct *napi, int budget)
1578{ 1608{
1579 int work_done = 0;
1580 struct qede_fastpath *fp = container_of(napi, struct qede_fastpath, 1609 struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
1581 napi); 1610 napi);
1582 struct qede_dev *edev = fp->edev; 1611 struct qede_dev *edev = fp->edev;
1612 int rx_work_done = 0;
1613 u8 tc;
1583 1614
1584 while (1) { 1615 for (tc = 0; tc < edev->num_tc; tc++)
1585 u8 tc; 1616 if (qede_txq_has_work(&fp->txqs[tc]))
1586 1617 qede_tx_int(edev, &fp->txqs[tc]);
1587 for (tc = 0; tc < edev->num_tc; tc++) 1618
1588 if (qede_txq_has_work(&fp->txqs[tc])) 1619 rx_work_done = qede_has_rx_work(fp->rxq) ?
1589 qede_tx_int(edev, &fp->txqs[tc]); 1620 qede_rx_int(fp, budget) : 0;
1590 1621 if (rx_work_done < budget) {
1591 if (qede_has_rx_work(fp->rxq)) { 1622 qed_sb_update_sb_idx(fp->sb_info);
1592 work_done += qede_rx_int(fp, budget - work_done); 1623 /* *_has_*_work() reads the status block,
1593 1624 * thus we need to ensure that status block indices
1594 /* must not complete if we consumed full budget */ 1625 * have been actually read (qed_sb_update_sb_idx)
1595 if (work_done >= budget) 1626 * prior to this check (*_has_*_work) so that
1596 break; 1627 * we won't write the "newer" value of the status block
1597 } 1628 * to HW (if there was a DMA right after
1629 * qede_has_rx_work and if there is no rmb, the memory
1630 * reading (qed_sb_update_sb_idx) may be postponed
1631 * to right before *_ack_sb). In this case there
1632 * will never be another interrupt until there is
1633 * another update of the status block, while there
1634 * is still unhandled work.
1635 */
1636 rmb();
1598 1637
1599 /* Fall out from the NAPI loop if needed */ 1638 /* Fall out from the NAPI loop if needed */
1600 if (!(qede_has_rx_work(fp->rxq) || qede_has_tx_work(fp))) { 1639 if (!(qede_has_rx_work(fp->rxq) ||
1601 qed_sb_update_sb_idx(fp->sb_info); 1640 qede_has_tx_work(fp))) {
1602 /* *_has_*_work() reads the status block, 1641 napi_complete(napi);
1603 * thus we need to ensure that status block indices 1642
1604 * have been actually read (qed_sb_update_sb_idx) 1643 /* Update and reenable interrupts */
1605 * prior to this check (*_has_*_work) so that 1644 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
1606 * we won't write the "newer" value of the status block 1645 1 /*update*/);
1607 * to HW (if there was a DMA right after 1646 } else {
1608 * qede_has_rx_work and if there is no rmb, the memory 1647 rx_work_done = budget;
1609 * reading (qed_sb_update_sb_idx) may be postponed
1610 * to right before *_ack_sb). In this case there
1611 * will never be another interrupt until there is
1612 * another update of the status block, while there
1613 * is still unhandled work.
1614 */
1615 rmb();
1616
1617 if (!(qede_has_rx_work(fp->rxq) ||
1618 qede_has_tx_work(fp))) {
1619 napi_complete(napi);
1620 /* Update and reenable interrupts */
1621 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
1622 1 /*update*/);
1623 break;
1624 }
1625 } 1648 }
1626 } 1649 }
1627 1650
1628 return work_done; 1651 return rx_work_done;
1629} 1652}
1630 1653
1631static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie) 1654static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
@@ -2496,6 +2519,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
2496 2519
2497 INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); 2520 INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
2498 mutex_init(&edev->qede_lock); 2521 mutex_init(&edev->qede_lock);
2522 edev->rx_copybreak = QEDE_RX_HDR_SIZE;
2499 2523
2500 DP_INFO(edev, "Ending successfully qede probe\n"); 2524 DP_INFO(edev, "Ending successfully qede probe\n");
2501 2525
@@ -3222,7 +3246,7 @@ static int qede_stop_queues(struct qede_dev *edev)
3222 return rc; 3246 return rc;
3223} 3247}
3224 3248
3225static int qede_start_queues(struct qede_dev *edev) 3249static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
3226{ 3250{
3227 int rc, tc, i; 3251 int rc, tc, i;
3228 int vlan_removal_en = 1; 3252 int vlan_removal_en = 1;
@@ -3453,6 +3477,7 @@ out:
3453 3477
3454enum qede_load_mode { 3478enum qede_load_mode {
3455 QEDE_LOAD_NORMAL, 3479 QEDE_LOAD_NORMAL,
3480 QEDE_LOAD_RELOAD,
3456}; 3481};
3457 3482
3458static int qede_load(struct qede_dev *edev, enum qede_load_mode mode) 3483static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
@@ -3491,7 +3516,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
3491 goto err3; 3516 goto err3;
3492 DP_INFO(edev, "Setup IRQs succeeded\n"); 3517 DP_INFO(edev, "Setup IRQs succeeded\n");
3493 3518
3494 rc = qede_start_queues(edev); 3519 rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
3495 if (rc) 3520 if (rc)
3496 goto err4; 3521 goto err4;
3497 DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n"); 3522 DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
@@ -3546,7 +3571,7 @@ void qede_reload(struct qede_dev *edev,
3546 if (func) 3571 if (func)
3547 func(edev, args); 3572 func(edev, args);
3548 3573
3549 qede_load(edev, QEDE_LOAD_NORMAL); 3574 qede_load(edev, QEDE_LOAD_RELOAD);
3550 3575
3551 mutex_lock(&edev->qede_lock); 3576 mutex_lock(&edev->qede_lock);
3552 qede_config_rx_mode(edev->ndev); 3577 qede_config_rx_mode(edev->ndev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 7bd6f25b4625..87c642d3b075 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -772,6 +772,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
772 tx_ring->tx_stats.tx_bytes += skb->len; 772 tx_ring->tx_stats.tx_bytes += skb->len;
773 tx_ring->tx_stats.xmit_called++; 773 tx_ring->tx_stats.xmit_called++;
774 774
775 /* Ensure writes are complete before HW fetches Tx descriptors */
776 wmb();
775 qlcnic_update_cmd_producer(tx_ring); 777 qlcnic_update_cmd_producer(tx_ring);
776 778
777 return NETDEV_TX_OK; 779 return NETDEV_TX_OK;
@@ -2220,7 +2222,7 @@ void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
2220 if (!opcode) 2222 if (!opcode)
2221 return; 2223 return;
2222 2224
2223 ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0])); 2225 ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
2224 qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data); 2226 qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
2225 desc = &sds_ring->desc_head[consumer]; 2227 desc = &sds_ring->desc_head[consumer];
2226 desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM); 2228 desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 6b541e57c96a..cb29ee24cf1b 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -4,7 +4,7 @@
4 * Copyright (C) 2004 Sten Wang <sten.wang@rdc.com.tw> 4 * Copyright (C) 2004 Sten Wang <sten.wang@rdc.com.tw>
5 * Copyright (C) 2007 5 * Copyright (C) 2007
6 * Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us> 6 * Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>
7 * Copyright (C) 2007-2012 Florian Fainelli <florian@openwrt.org> 7 * Copyright (C) 2007-2012 Florian Fainelli <f.fainelli@gmail.com>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
@@ -48,8 +48,8 @@
48#include <asm/processor.h> 48#include <asm/processor.h>
49 49
50#define DRV_NAME "r6040" 50#define DRV_NAME "r6040"
51#define DRV_VERSION "0.28" 51#define DRV_VERSION "0.29"
52#define DRV_RELDATE "07Oct2011" 52#define DRV_RELDATE "04Jul2016"
53 53
54/* Time in jiffies before concluding the transmitter is hung. */ 54/* Time in jiffies before concluding the transmitter is hung. */
55#define TX_TIMEOUT (6000 * HZ / 1000) 55#define TX_TIMEOUT (6000 * HZ / 1000)
@@ -162,7 +162,7 @@
162 162
163MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>," 163MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>,"
164 "Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>," 164 "Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>,"
165 "Florian Fainelli <florian@openwrt.org>"); 165 "Florian Fainelli <f.fainelli@gmail.com>");
166MODULE_LICENSE("GPL"); 166MODULE_LICENSE("GPL");
167MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver"); 167MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver");
168MODULE_VERSION(DRV_VERSION " " DRV_RELDATE); 168MODULE_VERSION(DRV_VERSION " " DRV_RELDATE);
@@ -200,7 +200,6 @@ struct r6040_private {
200 struct mii_bus *mii_bus; 200 struct mii_bus *mii_bus;
201 struct napi_struct napi; 201 struct napi_struct napi;
202 void __iomem *base; 202 void __iomem *base;
203 struct phy_device *phydev;
204 int old_link; 203 int old_link;
205 int old_duplex; 204 int old_duplex;
206}; 205};
@@ -474,7 +473,7 @@ static void r6040_down(struct net_device *dev)
474 iowrite16(adrp[1], ioaddr + MID_0M); 473 iowrite16(adrp[1], ioaddr + MID_0M);
475 iowrite16(adrp[2], ioaddr + MID_0H); 474 iowrite16(adrp[2], ioaddr + MID_0H);
476 475
477 phy_stop(lp->phydev); 476 phy_stop(dev->phydev);
478} 477}
479 478
480static int r6040_close(struct net_device *dev) 479static int r6040_close(struct net_device *dev)
@@ -515,12 +514,10 @@ static int r6040_close(struct net_device *dev)
515 514
516static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 515static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
517{ 516{
518 struct r6040_private *lp = netdev_priv(dev); 517 if (!dev->phydev)
519
520 if (!lp->phydev)
521 return -EINVAL; 518 return -EINVAL;
522 519
523 return phy_mii_ioctl(lp->phydev, rq, cmd); 520 return phy_mii_ioctl(dev->phydev, rq, cmd);
524} 521}
525 522
526static int r6040_rx(struct net_device *dev, int limit) 523static int r6040_rx(struct net_device *dev, int limit)
@@ -617,10 +614,15 @@ static void r6040_tx(struct net_device *dev)
617 if (descptr->status & DSC_OWNER_MAC) 614 if (descptr->status & DSC_OWNER_MAC)
618 break; /* Not complete */ 615 break; /* Not complete */
619 skb_ptr = descptr->skb_ptr; 616 skb_ptr = descptr->skb_ptr;
617
618 /* Statistic Counter */
619 dev->stats.tx_packets++;
620 dev->stats.tx_bytes += skb_ptr->len;
621
620 pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf), 622 pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
621 skb_ptr->len, PCI_DMA_TODEVICE); 623 skb_ptr->len, PCI_DMA_TODEVICE);
622 /* Free buffer */ 624 /* Free buffer */
623 dev_kfree_skb_irq(skb_ptr); 625 dev_kfree_skb(skb_ptr);
624 descptr->skb_ptr = NULL; 626 descptr->skb_ptr = NULL;
625 /* To next descriptor */ 627 /* To next descriptor */
626 descptr = descptr->vndescp; 628 descptr = descptr->vndescp;
@@ -641,12 +643,15 @@ static int r6040_poll(struct napi_struct *napi, int budget)
641 void __iomem *ioaddr = priv->base; 643 void __iomem *ioaddr = priv->base;
642 int work_done; 644 int work_done;
643 645
646 r6040_tx(dev);
647
644 work_done = r6040_rx(dev, budget); 648 work_done = r6040_rx(dev, budget);
645 649
646 if (work_done < budget) { 650 if (work_done < budget) {
647 napi_complete(napi); 651 napi_complete_done(napi, work_done);
648 /* Enable RX interrupt */ 652 /* Enable RX/TX interrupt */
649 iowrite16(ioread16(ioaddr + MIER) | RX_INTS, ioaddr + MIER); 653 iowrite16(ioread16(ioaddr + MIER) | RX_INTS | TX_INTS,
654 ioaddr + MIER);
650 } 655 }
651 return work_done; 656 return work_done;
652} 657}
@@ -673,7 +678,7 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
673 } 678 }
674 679
675 /* RX interrupt request */ 680 /* RX interrupt request */
676 if (status & RX_INTS) { 681 if (status & (RX_INTS | TX_INTS)) {
677 if (status & RX_NO_DESC) { 682 if (status & RX_NO_DESC) {
678 /* RX descriptor unavailable */ 683 /* RX descriptor unavailable */
679 dev->stats.rx_dropped++; 684 dev->stats.rx_dropped++;
@@ -684,15 +689,11 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
684 689
685 if (likely(napi_schedule_prep(&lp->napi))) { 690 if (likely(napi_schedule_prep(&lp->napi))) {
686 /* Mask off RX interrupt */ 691 /* Mask off RX interrupt */
687 misr &= ~RX_INTS; 692 misr &= ~(RX_INTS | TX_INTS);
688 __napi_schedule(&lp->napi); 693 __napi_schedule_irqoff(&lp->napi);
689 } 694 }
690 } 695 }
691 696
692 /* TX interrupt request */
693 if (status & TX_INTS)
694 r6040_tx(dev);
695
696 /* Restore RDC MAC interrupt */ 697 /* Restore RDC MAC interrupt */
697 iowrite16(misr, ioaddr + MIER); 698 iowrite16(misr, ioaddr + MIER);
698 699
@@ -732,7 +733,7 @@ static int r6040_up(struct net_device *dev)
732 /* Initialize all MAC registers */ 733 /* Initialize all MAC registers */
733 r6040_init_mac_regs(dev); 734 r6040_init_mac_regs(dev);
734 735
735 phy_start(lp->phydev); 736 phy_start(dev->phydev);
736 737
737 return 0; 738 return 0;
738} 739}
@@ -813,6 +814,9 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
813 void __iomem *ioaddr = lp->base; 814 void __iomem *ioaddr = lp->base;
814 unsigned long flags; 815 unsigned long flags;
815 816
817 if (skb_put_padto(skb, ETH_ZLEN) < 0)
818 return NETDEV_TX_OK;
819
816 /* Critical Section */ 820 /* Critical Section */
817 spin_lock_irqsave(&lp->lock, flags); 821 spin_lock_irqsave(&lp->lock, flags);
818 822
@@ -824,17 +828,10 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
824 return NETDEV_TX_BUSY; 828 return NETDEV_TX_BUSY;
825 } 829 }
826 830
827 /* Statistic Counter */
828 dev->stats.tx_packets++;
829 dev->stats.tx_bytes += skb->len;
830 /* Set TX descriptor & Transmit it */ 831 /* Set TX descriptor & Transmit it */
831 lp->tx_free_desc--; 832 lp->tx_free_desc--;
832 descptr = lp->tx_insert_ptr; 833 descptr = lp->tx_insert_ptr;
833 if (skb->len < ETH_ZLEN) 834 descptr->len = skb->len;
834 descptr->len = ETH_ZLEN;
835 else
836 descptr->len = skb->len;
837
838 descptr->skb_ptr = skb; 835 descptr->skb_ptr = skb;
839 descptr->buf = cpu_to_le32(pci_map_single(lp->pdev, 836 descptr->buf = cpu_to_le32(pci_map_single(lp->pdev,
840 skb->data, skb->len, PCI_DMA_TODEVICE)); 837 skb->data, skb->len, PCI_DMA_TODEVICE));
@@ -843,7 +840,8 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
843 skb_tx_timestamp(skb); 840 skb_tx_timestamp(skb);
844 841
845 /* Trigger the MAC to check the TX descriptor */ 842 /* Trigger the MAC to check the TX descriptor */
846 iowrite16(TM2TX, ioaddr + MTPR); 843 if (!skb->xmit_more || netif_queue_stopped(dev))
844 iowrite16(TM2TX, ioaddr + MTPR);
847 lp->tx_insert_ptr = descptr->vndescp; 845 lp->tx_insert_ptr = descptr->vndescp;
848 846
849 /* If no tx resource, stop */ 847 /* If no tx resource, stop */
@@ -957,26 +955,12 @@ static void netdev_get_drvinfo(struct net_device *dev,
957 strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info)); 955 strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
958} 956}
959 957
960static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
961{
962 struct r6040_private *rp = netdev_priv(dev);
963
964 return phy_ethtool_gset(rp->phydev, cmd);
965}
966
967static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
968{
969 struct r6040_private *rp = netdev_priv(dev);
970
971 return phy_ethtool_sset(rp->phydev, cmd);
972}
973
974static const struct ethtool_ops netdev_ethtool_ops = { 958static const struct ethtool_ops netdev_ethtool_ops = {
975 .get_drvinfo = netdev_get_drvinfo, 959 .get_drvinfo = netdev_get_drvinfo,
976 .get_settings = netdev_get_settings,
977 .set_settings = netdev_set_settings,
978 .get_link = ethtool_op_get_link, 960 .get_link = ethtool_op_get_link,
979 .get_ts_info = ethtool_op_get_ts_info, 961 .get_ts_info = ethtool_op_get_ts_info,
962 .get_link_ksettings = phy_ethtool_get_link_ksettings,
963 .set_link_ksettings = phy_ethtool_set_link_ksettings,
980}; 964};
981 965
982static const struct net_device_ops r6040_netdev_ops = { 966static const struct net_device_ops r6040_netdev_ops = {
@@ -998,7 +982,7 @@ static const struct net_device_ops r6040_netdev_ops = {
998static void r6040_adjust_link(struct net_device *dev) 982static void r6040_adjust_link(struct net_device *dev)
999{ 983{
1000 struct r6040_private *lp = netdev_priv(dev); 984 struct r6040_private *lp = netdev_priv(dev);
1001 struct phy_device *phydev = lp->phydev; 985 struct phy_device *phydev = dev->phydev;
1002 int status_changed = 0; 986 int status_changed = 0;
1003 void __iomem *ioaddr = lp->base; 987 void __iomem *ioaddr = lp->base;
1004 988
@@ -1018,14 +1002,8 @@ static void r6040_adjust_link(struct net_device *dev)
1018 lp->old_duplex = phydev->duplex; 1002 lp->old_duplex = phydev->duplex;
1019 } 1003 }
1020 1004
1021 if (status_changed) { 1005 if (status_changed)
1022 pr_info("%s: link %s", dev->name, phydev->link ? 1006 phy_print_status(phydev);
1023 "UP" : "DOWN");
1024 if (phydev->link)
1025 pr_cont(" - %d/%s", phydev->speed,
1026 DUPLEX_FULL == phydev->duplex ? "full" : "half");
1027 pr_cont("\n");
1028 }
1029} 1007}
1030 1008
1031static int r6040_mii_probe(struct net_device *dev) 1009static int r6040_mii_probe(struct net_device *dev)
@@ -1057,7 +1035,6 @@ static int r6040_mii_probe(struct net_device *dev)
1057 | SUPPORTED_TP); 1035 | SUPPORTED_TP);
1058 1036
1059 phydev->advertising = phydev->supported; 1037 phydev->advertising = phydev->supported;
1060 lp->phydev = phydev;
1061 lp->old_link = 0; 1038 lp->old_link = 0;
1062 lp->old_duplex = -1; 1039 lp->old_duplex = -1;
1063 1040
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 28b775e5a9ad..f0b09b05ed3f 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1996,7 +1996,8 @@ static int rocker_port_change_proto_down(struct net_device *dev,
1996 return 0; 1996 return 0;
1997} 1997}
1998 1998
1999static void rocker_port_neigh_destroy(struct neighbour *n) 1999static void rocker_port_neigh_destroy(struct net_device *dev,
2000 struct neighbour *n)
2000{ 2001{
2001 struct rocker_port *rocker_port = netdev_priv(n->dev); 2002 struct rocker_port *rocker_port = netdev_priv(n->dev);
2002 int err; 2003 int err;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
index 45019649bbbd..5cb51b609f02 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
@@ -475,7 +475,6 @@ struct sxgbe_priv_data {
475 int rxcsum_insertion; 475 int rxcsum_insertion;
476 spinlock_t stats_lock; /* lock for tx/rx statatics */ 476 spinlock_t stats_lock; /* lock for tx/rx statatics */
477 477
478 struct phy_device *phydev;
479 int oldlink; 478 int oldlink;
480 int speed; 479 int speed;
481 int oldduplex; 480 int oldduplex;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index c0981ae45874..542b67d436df 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -147,7 +147,7 @@ static int sxgbe_get_eee(struct net_device *dev,
147 edata->eee_active = priv->eee_active; 147 edata->eee_active = priv->eee_active;
148 edata->tx_lpi_timer = priv->tx_lpi_timer; 148 edata->tx_lpi_timer = priv->tx_lpi_timer;
149 149
150 return phy_ethtool_get_eee(priv->phydev, edata); 150 return phy_ethtool_get_eee(dev->phydev, edata);
151} 151}
152 152
153static int sxgbe_set_eee(struct net_device *dev, 153static int sxgbe_set_eee(struct net_device *dev,
@@ -172,7 +172,7 @@ static int sxgbe_set_eee(struct net_device *dev,
172 priv->tx_lpi_timer = edata->tx_lpi_timer; 172 priv->tx_lpi_timer = edata->tx_lpi_timer;
173 } 173 }
174 174
175 return phy_ethtool_set_eee(priv->phydev, edata); 175 return phy_ethtool_set_eee(dev->phydev, edata);
176} 176}
177 177
178static void sxgbe_getdrvinfo(struct net_device *dev, 178static void sxgbe_getdrvinfo(struct net_device *dev,
@@ -182,27 +182,6 @@ static void sxgbe_getdrvinfo(struct net_device *dev,
182 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 182 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
183} 183}
184 184
185static int sxgbe_getsettings(struct net_device *dev,
186 struct ethtool_cmd *cmd)
187{
188 struct sxgbe_priv_data *priv = netdev_priv(dev);
189
190 if (priv->phydev)
191 return phy_ethtool_gset(priv->phydev, cmd);
192
193 return -EOPNOTSUPP;
194}
195
196static int sxgbe_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
197{
198 struct sxgbe_priv_data *priv = netdev_priv(dev);
199
200 if (priv->phydev)
201 return phy_ethtool_sset(priv->phydev, cmd);
202
203 return -EOPNOTSUPP;
204}
205
206static u32 sxgbe_getmsglevel(struct net_device *dev) 185static u32 sxgbe_getmsglevel(struct net_device *dev)
207{ 186{
208 struct sxgbe_priv_data *priv = netdev_priv(dev); 187 struct sxgbe_priv_data *priv = netdev_priv(dev);
@@ -255,7 +234,7 @@ static void sxgbe_get_ethtool_stats(struct net_device *dev,
255 char *p; 234 char *p;
256 235
257 if (priv->eee_enabled) { 236 if (priv->eee_enabled) {
258 int val = phy_get_eee_err(priv->phydev); 237 int val = phy_get_eee_err(dev->phydev);
259 238
260 if (val) 239 if (val)
261 priv->xstats.eee_wakeup_error_n = val; 240 priv->xstats.eee_wakeup_error_n = val;
@@ -499,8 +478,6 @@ static int sxgbe_get_regs_len(struct net_device *dev)
499 478
500static const struct ethtool_ops sxgbe_ethtool_ops = { 479static const struct ethtool_ops sxgbe_ethtool_ops = {
501 .get_drvinfo = sxgbe_getdrvinfo, 480 .get_drvinfo = sxgbe_getdrvinfo,
502 .get_settings = sxgbe_getsettings,
503 .set_settings = sxgbe_setsettings,
504 .get_msglevel = sxgbe_getmsglevel, 481 .get_msglevel = sxgbe_getmsglevel,
505 .set_msglevel = sxgbe_setmsglevel, 482 .set_msglevel = sxgbe_setmsglevel,
506 .get_link = ethtool_op_get_link, 483 .get_link = ethtool_op_get_link,
@@ -516,6 +493,8 @@ static const struct ethtool_ops sxgbe_ethtool_ops = {
516 .get_regs_len = sxgbe_get_regs_len, 493 .get_regs_len = sxgbe_get_regs_len,
517 .get_eee = sxgbe_get_eee, 494 .get_eee = sxgbe_get_eee,
518 .set_eee = sxgbe_set_eee, 495 .set_eee = sxgbe_set_eee,
496 .get_link_ksettings = phy_ethtool_get_link_ksettings,
497 .set_link_ksettings = phy_ethtool_set_link_ksettings,
519}; 498};
520 499
521void sxgbe_set_ethtool_ops(struct net_device *netdev) 500void sxgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 413ea14ab91f..ea44a2456ce1 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -124,12 +124,13 @@ static void sxgbe_eee_ctrl_timer(unsigned long arg)
124 */ 124 */
125bool sxgbe_eee_init(struct sxgbe_priv_data * const priv) 125bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
126{ 126{
127 struct net_device *ndev = priv->dev;
127 bool ret = false; 128 bool ret = false;
128 129
129 /* MAC core supports the EEE feature. */ 130 /* MAC core supports the EEE feature. */
130 if (priv->hw_cap.eee) { 131 if (priv->hw_cap.eee) {
131 /* Check if the PHY supports EEE */ 132 /* Check if the PHY supports EEE */
132 if (phy_init_eee(priv->phydev, 1)) 133 if (phy_init_eee(ndev->phydev, 1))
133 return false; 134 return false;
134 135
135 priv->eee_active = 1; 136 priv->eee_active = 1;
@@ -152,12 +153,14 @@ bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
152 153
153static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv) 154static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv)
154{ 155{
156 struct net_device *ndev = priv->dev;
157
155 /* When the EEE has been already initialised we have to 158 /* When the EEE has been already initialised we have to
156 * modify the PLS bit in the LPI ctrl & status reg according 159 * modify the PLS bit in the LPI ctrl & status reg according
157 * to the PHY link status. For this reason. 160 * to the PHY link status. For this reason.
158 */ 161 */
159 if (priv->eee_enabled) 162 if (priv->eee_enabled)
160 priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link); 163 priv->hw->mac->set_eee_pls(priv->ioaddr, ndev->phydev->link);
161} 164}
162 165
163/** 166/**
@@ -203,7 +206,7 @@ static inline u32 sxgbe_tx_avail(struct sxgbe_tx_queue *queue, int tx_qsize)
203static void sxgbe_adjust_link(struct net_device *dev) 206static void sxgbe_adjust_link(struct net_device *dev)
204{ 207{
205 struct sxgbe_priv_data *priv = netdev_priv(dev); 208 struct sxgbe_priv_data *priv = netdev_priv(dev);
206 struct phy_device *phydev = priv->phydev; 209 struct phy_device *phydev = dev->phydev;
207 u8 new_state = 0; 210 u8 new_state = 0;
208 u8 speed = 0xff; 211 u8 speed = 0xff;
209 212
@@ -306,9 +309,6 @@ static int sxgbe_init_phy(struct net_device *ndev)
306 netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n", 309 netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
307 __func__, phydev->phy_id, phydev->link); 310 __func__, phydev->phy_id, phydev->link);
308 311
309 /* save phy device in private structure */
310 priv->phydev = phydev;
311
312 return 0; 312 return 0;
313} 313}
314 314
@@ -1173,8 +1173,8 @@ static int sxgbe_open(struct net_device *dev)
1173 priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES); 1173 priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES);
1174 priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES); 1174 priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES);
1175 1175
1176 if (priv->phydev) 1176 if (dev->phydev)
1177 phy_start(priv->phydev); 1177 phy_start(dev->phydev);
1178 1178
1179 /* initialise TX coalesce parameters */ 1179 /* initialise TX coalesce parameters */
1180 sxgbe_tx_init_coalesce(priv); 1180 sxgbe_tx_init_coalesce(priv);
@@ -1194,8 +1194,8 @@ static int sxgbe_open(struct net_device *dev)
1194 1194
1195init_error: 1195init_error:
1196 free_dma_desc_resources(priv); 1196 free_dma_desc_resources(priv);
1197 if (priv->phydev) 1197 if (dev->phydev)
1198 phy_disconnect(priv->phydev); 1198 phy_disconnect(dev->phydev);
1199phy_error: 1199phy_error:
1200 clk_disable_unprepare(priv->sxgbe_clk); 1200 clk_disable_unprepare(priv->sxgbe_clk);
1201 1201
@@ -1216,10 +1216,9 @@ static int sxgbe_release(struct net_device *dev)
1216 del_timer_sync(&priv->eee_ctrl_timer); 1216 del_timer_sync(&priv->eee_ctrl_timer);
1217 1217
1218 /* Stop and disconnect the PHY */ 1218 /* Stop and disconnect the PHY */
1219 if (priv->phydev) { 1219 if (dev->phydev) {
1220 phy_stop(priv->phydev); 1220 phy_stop(dev->phydev);
1221 phy_disconnect(priv->phydev); 1221 phy_disconnect(dev->phydev);
1222 priv->phydev = NULL;
1223 } 1222 }
1224 1223
1225 netif_tx_stop_all_queues(dev); 1224 netif_tx_stop_all_queues(dev);
@@ -1969,7 +1968,6 @@ static void sxgbe_poll_controller(struct net_device *dev)
1969 */ 1968 */
1970static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1969static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1971{ 1970{
1972 struct sxgbe_priv_data *priv = netdev_priv(dev);
1973 int ret = -EOPNOTSUPP; 1971 int ret = -EOPNOTSUPP;
1974 1972
1975 if (!netif_running(dev)) 1973 if (!netif_running(dev))
@@ -1979,9 +1977,9 @@ static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1979 case SIOCGMIIPHY: 1977 case SIOCGMIIPHY:
1980 case SIOCGMIIREG: 1978 case SIOCGMIIREG:
1981 case SIOCSMIIREG: 1979 case SIOCSMIIREG:
1982 if (!priv->phydev) 1980 if (!dev->phydev)
1983 return -EINVAL; 1981 return -EINVAL;
1984 ret = phy_mii_ioctl(priv->phydev, rq, cmd); 1982 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
1985 break; 1983 break;
1986 default: 1984 default:
1987 break; 1985 break;
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 133e9e35be9e..4c83739d158f 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -104,7 +104,8 @@ int efx_farch_test_registers(struct efx_nic *efx,
104 const struct efx_farch_register_test *regs, 104 const struct efx_farch_register_test *regs,
105 size_t n_regs) 105 size_t n_regs)
106{ 106{
107 unsigned address = 0, i, j; 107 unsigned address = 0;
108 int i, j;
108 efx_oword_t mask, imask, original, reg, buf; 109 efx_oword_t mask, imask, original, reg, buf;
109 110
110 for (i = 0; i < n_regs; ++i) { 111 for (i = 0; i < n_regs; ++i) {
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 8af25563f627..ca3134540d2d 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -114,9 +114,7 @@ struct smsc911x_data {
114 /* spinlock to ensure register accesses are serialised */ 114 /* spinlock to ensure register accesses are serialised */
115 spinlock_t dev_lock; 115 spinlock_t dev_lock;
116 116
117 struct phy_device *phy_dev;
118 struct mii_bus *mii_bus; 117 struct mii_bus *mii_bus;
119 int phy_irq[PHY_MAX_ADDR];
120 unsigned int using_extphy; 118 unsigned int using_extphy;
121 int last_duplex; 119 int last_duplex;
122 int last_carrier; 120 int last_carrier;
@@ -834,7 +832,7 @@ static int smsc911x_phy_reset(struct smsc911x_data *pdata)
834static int smsc911x_phy_loopbacktest(struct net_device *dev) 832static int smsc911x_phy_loopbacktest(struct net_device *dev)
835{ 833{
836 struct smsc911x_data *pdata = netdev_priv(dev); 834 struct smsc911x_data *pdata = netdev_priv(dev);
837 struct phy_device *phy_dev = pdata->phy_dev; 835 struct phy_device *phy_dev = dev->phydev;
838 int result = -EIO; 836 int result = -EIO;
839 unsigned int i, val; 837 unsigned int i, val;
840 unsigned long flags; 838 unsigned long flags;
@@ -904,7 +902,8 @@ static int smsc911x_phy_loopbacktest(struct net_device *dev)
904 902
905static void smsc911x_phy_update_flowcontrol(struct smsc911x_data *pdata) 903static void smsc911x_phy_update_flowcontrol(struct smsc911x_data *pdata)
906{ 904{
907 struct phy_device *phy_dev = pdata->phy_dev; 905 struct net_device *ndev = pdata->dev;
906 struct phy_device *phy_dev = ndev->phydev;
908 u32 afc = smsc911x_reg_read(pdata, AFC_CFG); 907 u32 afc = smsc911x_reg_read(pdata, AFC_CFG);
909 u32 flow; 908 u32 flow;
910 unsigned long flags; 909 unsigned long flags;
@@ -945,7 +944,7 @@ static void smsc911x_phy_update_flowcontrol(struct smsc911x_data *pdata)
945static void smsc911x_phy_adjust_link(struct net_device *dev) 944static void smsc911x_phy_adjust_link(struct net_device *dev)
946{ 945{
947 struct smsc911x_data *pdata = netdev_priv(dev); 946 struct smsc911x_data *pdata = netdev_priv(dev);
948 struct phy_device *phy_dev = pdata->phy_dev; 947 struct phy_device *phy_dev = dev->phydev;
949 unsigned long flags; 948 unsigned long flags;
950 int carrier; 949 int carrier;
951 950
@@ -1038,7 +1037,6 @@ static int smsc911x_mii_probe(struct net_device *dev)
1038 SUPPORTED_Asym_Pause); 1037 SUPPORTED_Asym_Pause);
1039 phydev->advertising = phydev->supported; 1038 phydev->advertising = phydev->supported;
1040 1039
1041 pdata->phy_dev = phydev;
1042 pdata->last_duplex = -1; 1040 pdata->last_duplex = -1;
1043 pdata->last_carrier = -1; 1041 pdata->last_carrier = -1;
1044 1042
@@ -1073,7 +1071,6 @@ static int smsc911x_mii_init(struct platform_device *pdev,
1073 pdata->mii_bus->priv = pdata; 1071 pdata->mii_bus->priv = pdata;
1074 pdata->mii_bus->read = smsc911x_mii_read; 1072 pdata->mii_bus->read = smsc911x_mii_read;
1075 pdata->mii_bus->write = smsc911x_mii_write; 1073 pdata->mii_bus->write = smsc911x_mii_write;
1076 memcpy(pdata->mii_bus->irq, pdata->phy_irq, sizeof(pdata->mii_bus));
1077 1074
1078 pdata->mii_bus->parent = &pdev->dev; 1075 pdata->mii_bus->parent = &pdev->dev;
1079 1076
@@ -1340,9 +1337,11 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
1340 1337
1341static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata) 1338static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
1342{ 1339{
1340 struct net_device *ndev = pdata->dev;
1341 struct phy_device *phy_dev = ndev->phydev;
1343 int rc = 0; 1342 int rc = 0;
1344 1343
1345 if (!pdata->phy_dev) 1344 if (!phy_dev)
1346 return rc; 1345 return rc;
1347 1346
1348 /* If the internal PHY is in General Power-Down mode, all, except the 1347 /* If the internal PHY is in General Power-Down mode, all, except the
@@ -1352,7 +1351,7 @@ static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
1352 * In that case, clear the bit 0.11, so the PHY powers up and we can 1351 * In that case, clear the bit 0.11, so the PHY powers up and we can
1353 * access to the phy registers. 1352 * access to the phy registers.
1354 */ 1353 */
1355 rc = phy_read(pdata->phy_dev, MII_BMCR); 1354 rc = phy_read(phy_dev, MII_BMCR);
1356 if (rc < 0) { 1355 if (rc < 0) {
1357 SMSC_WARN(pdata, drv, "Failed reading PHY control reg"); 1356 SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
1358 return rc; 1357 return rc;
@@ -1362,7 +1361,7 @@ static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
1362 * disable the general power down-mode. 1361 * disable the general power down-mode.
1363 */ 1362 */
1364 if (rc & BMCR_PDOWN) { 1363 if (rc & BMCR_PDOWN) {
1365 rc = phy_write(pdata->phy_dev, MII_BMCR, rc & ~BMCR_PDOWN); 1364 rc = phy_write(phy_dev, MII_BMCR, rc & ~BMCR_PDOWN);
1366 if (rc < 0) { 1365 if (rc < 0) {
1367 SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); 1366 SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
1368 return rc; 1367 return rc;
@@ -1376,12 +1375,14 @@ static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
1376 1375
1377static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) 1376static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
1378{ 1377{
1378 struct net_device *ndev = pdata->dev;
1379 struct phy_device *phy_dev = ndev->phydev;
1379 int rc = 0; 1380 int rc = 0;
1380 1381
1381 if (!pdata->phy_dev) 1382 if (!phy_dev)
1382 return rc; 1383 return rc;
1383 1384
1384 rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS); 1385 rc = phy_read(phy_dev, MII_LAN83C185_CTRL_STATUS);
1385 1386
1386 if (rc < 0) { 1387 if (rc < 0) {
1387 SMSC_WARN(pdata, drv, "Failed reading PHY control reg"); 1388 SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
@@ -1391,7 +1392,7 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
1391 /* Only disable if energy detect mode is already enabled */ 1392 /* Only disable if energy detect mode is already enabled */
1392 if (rc & MII_LAN83C185_EDPWRDOWN) { 1393 if (rc & MII_LAN83C185_EDPWRDOWN) {
1393 /* Disable energy detect mode for this SMSC Transceivers */ 1394 /* Disable energy detect mode for this SMSC Transceivers */
1394 rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, 1395 rc = phy_write(phy_dev, MII_LAN83C185_CTRL_STATUS,
1395 rc & (~MII_LAN83C185_EDPWRDOWN)); 1396 rc & (~MII_LAN83C185_EDPWRDOWN));
1396 1397
1397 if (rc < 0) { 1398 if (rc < 0) {
@@ -1407,12 +1408,14 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
1407 1408
1408static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata) 1409static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
1409{ 1410{
1411 struct net_device *ndev = pdata->dev;
1412 struct phy_device *phy_dev = ndev->phydev;
1410 int rc = 0; 1413 int rc = 0;
1411 1414
1412 if (!pdata->phy_dev) 1415 if (!phy_dev)
1413 return rc; 1416 return rc;
1414 1417
1415 rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS); 1418 rc = phy_read(phy_dev, MII_LAN83C185_CTRL_STATUS);
1416 1419
1417 if (rc < 0) { 1420 if (rc < 0) {
1418 SMSC_WARN(pdata, drv, "Failed reading PHY control reg"); 1421 SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
@@ -1422,7 +1425,7 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
1422 /* Only enable if energy detect mode is already disabled */ 1425 /* Only enable if energy detect mode is already disabled */
1423 if (!(rc & MII_LAN83C185_EDPWRDOWN)) { 1426 if (!(rc & MII_LAN83C185_EDPWRDOWN)) {
1424 /* Enable energy detect mode for this SMSC Transceivers */ 1427 /* Enable energy detect mode for this SMSC Transceivers */
1425 rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, 1428 rc = phy_write(phy_dev, MII_LAN83C185_CTRL_STATUS,
1426 rc | MII_LAN83C185_EDPWRDOWN); 1429 rc | MII_LAN83C185_EDPWRDOWN);
1427 1430
1428 if (rc < 0) { 1431 if (rc < 0) {
@@ -1519,7 +1522,7 @@ static int smsc911x_open(struct net_device *dev)
1519 unsigned int intcfg; 1522 unsigned int intcfg;
1520 1523
1521 /* if the phy is not yet registered, retry later*/ 1524 /* if the phy is not yet registered, retry later*/
1522 if (!pdata->phy_dev) { 1525 if (!dev->phydev) {
1523 SMSC_WARN(pdata, hw, "phy_dev is NULL"); 1526 SMSC_WARN(pdata, hw, "phy_dev is NULL");
1524 return -EAGAIN; 1527 return -EAGAIN;
1525 } 1528 }
@@ -1610,7 +1613,7 @@ static int smsc911x_open(struct net_device *dev)
1610 pdata->last_carrier = -1; 1613 pdata->last_carrier = -1;
1611 1614
1612 /* Bring the PHY up */ 1615 /* Bring the PHY up */
1613 phy_start(pdata->phy_dev); 1616 phy_start(dev->phydev);
1614 1617
1615 temp = smsc911x_reg_read(pdata, HW_CFG); 1618 temp = smsc911x_reg_read(pdata, HW_CFG);
1616 /* Preserve TX FIFO size and external PHY configuration */ 1619 /* Preserve TX FIFO size and external PHY configuration */
@@ -1665,8 +1668,8 @@ static int smsc911x_stop(struct net_device *dev)
1665 smsc911x_tx_update_txcounters(dev); 1668 smsc911x_tx_update_txcounters(dev);
1666 1669
1667 /* Bring the PHY down */ 1670 /* Bring the PHY down */
1668 if (pdata->phy_dev) 1671 if (dev->phydev)
1669 phy_stop(pdata->phy_dev); 1672 phy_stop(dev->phydev);
1670 1673
1671 SMSC_TRACE(pdata, ifdown, "Interface stopped"); 1674 SMSC_TRACE(pdata, ifdown, "Interface stopped");
1672 return 0; 1675 return 0;
@@ -1906,30 +1909,10 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p)
1906/* Standard ioctls for mii-tool */ 1909/* Standard ioctls for mii-tool */
1907static int smsc911x_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1910static int smsc911x_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1908{ 1911{
1909 struct smsc911x_data *pdata = netdev_priv(dev); 1912 if (!netif_running(dev) || !dev->phydev)
1910
1911 if (!netif_running(dev) || !pdata->phy_dev)
1912 return -EINVAL; 1913 return -EINVAL;
1913 1914
1914 return phy_mii_ioctl(pdata->phy_dev, ifr, cmd); 1915 return phy_mii_ioctl(dev->phydev, ifr, cmd);
1915}
1916
1917static int
1918smsc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
1919{
1920 struct smsc911x_data *pdata = netdev_priv(dev);
1921
1922 cmd->maxtxpkt = 1;
1923 cmd->maxrxpkt = 1;
1924 return phy_ethtool_gset(pdata->phy_dev, cmd);
1925}
1926
1927static int
1928smsc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
1929{
1930 struct smsc911x_data *pdata = netdev_priv(dev);
1931
1932 return phy_ethtool_sset(pdata->phy_dev, cmd);
1933} 1916}
1934 1917
1935static void smsc911x_ethtool_getdrvinfo(struct net_device *dev, 1918static void smsc911x_ethtool_getdrvinfo(struct net_device *dev,
@@ -1943,9 +1926,7 @@ static void smsc911x_ethtool_getdrvinfo(struct net_device *dev,
1943 1926
1944static int smsc911x_ethtool_nwayreset(struct net_device *dev) 1927static int smsc911x_ethtool_nwayreset(struct net_device *dev)
1945{ 1928{
1946 struct smsc911x_data *pdata = netdev_priv(dev); 1929 return phy_start_aneg(dev->phydev);
1947
1948 return phy_start_aneg(pdata->phy_dev);
1949} 1930}
1950 1931
1951static u32 smsc911x_ethtool_getmsglevel(struct net_device *dev) 1932static u32 smsc911x_ethtool_getmsglevel(struct net_device *dev)
@@ -1971,7 +1952,7 @@ smsc911x_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs,
1971 void *buf) 1952 void *buf)
1972{ 1953{
1973 struct smsc911x_data *pdata = netdev_priv(dev); 1954 struct smsc911x_data *pdata = netdev_priv(dev);
1974 struct phy_device *phy_dev = pdata->phy_dev; 1955 struct phy_device *phy_dev = dev->phydev;
1975 unsigned long flags; 1956 unsigned long flags;
1976 unsigned int i; 1957 unsigned int i;
1977 unsigned int j = 0; 1958 unsigned int j = 0;
@@ -2117,8 +2098,6 @@ static int smsc911x_ethtool_set_eeprom(struct net_device *dev,
2117} 2098}
2118 2099
2119static const struct ethtool_ops smsc911x_ethtool_ops = { 2100static const struct ethtool_ops smsc911x_ethtool_ops = {
2120 .get_settings = smsc911x_ethtool_getsettings,
2121 .set_settings = smsc911x_ethtool_setsettings,
2122 .get_link = ethtool_op_get_link, 2101 .get_link = ethtool_op_get_link,
2123 .get_drvinfo = smsc911x_ethtool_getdrvinfo, 2102 .get_drvinfo = smsc911x_ethtool_getdrvinfo,
2124 .nway_reset = smsc911x_ethtool_nwayreset, 2103 .nway_reset = smsc911x_ethtool_nwayreset,
@@ -2130,6 +2109,8 @@ static const struct ethtool_ops smsc911x_ethtool_ops = {
2130 .get_eeprom = smsc911x_ethtool_get_eeprom, 2109 .get_eeprom = smsc911x_ethtool_get_eeprom,
2131 .set_eeprom = smsc911x_ethtool_set_eeprom, 2110 .set_eeprom = smsc911x_ethtool_set_eeprom,
2132 .get_ts_info = ethtool_op_get_ts_info, 2111 .get_ts_info = ethtool_op_get_ts_info,
2112 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2113 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2133}; 2114};
2134 2115
2135static const struct net_device_ops smsc911x_netdev_ops = { 2116static const struct net_device_ops smsc911x_netdev_ops = {
@@ -2310,12 +2291,11 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
2310 pdata = netdev_priv(dev); 2291 pdata = netdev_priv(dev);
2311 BUG_ON(!pdata); 2292 BUG_ON(!pdata);
2312 BUG_ON(!pdata->ioaddr); 2293 BUG_ON(!pdata->ioaddr);
2313 BUG_ON(!pdata->phy_dev); 2294 BUG_ON(!dev->phydev);
2314 2295
2315 SMSC_TRACE(pdata, ifdown, "Stopping driver"); 2296 SMSC_TRACE(pdata, ifdown, "Stopping driver");
2316 2297
2317 phy_disconnect(pdata->phy_dev); 2298 phy_disconnect(dev->phydev);
2318 pdata->phy_dev = NULL;
2319 mdiobus_unregister(pdata->mii_bus); 2299 mdiobus_unregister(pdata->mii_bus);
2320 mdiobus_free(pdata->mii_bus); 2300 mdiobus_free(pdata->mii_bus);
2321 2301
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index fc60368df2e7..2533b91f1421 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -232,6 +232,11 @@ struct stmmac_extra_stats {
232#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */ 232#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */
233#define DEFAULT_DMA_PBL 8 233#define DEFAULT_DMA_PBL 8
234 234
235/* PCS status and mask defines */
236#define PCS_ANE_IRQ BIT(2) /* PCS Auto-Negotiation */
237#define PCS_LINK_IRQ BIT(1) /* PCS Link */
238#define PCS_RGSMIIIS_IRQ BIT(0) /* RGMII or SMII Interrupt */
239
235/* Max/Min RI Watchdog Timer count value */ 240/* Max/Min RI Watchdog Timer count value */
236#define MAX_DMA_RIWT 0xff 241#define MAX_DMA_RIWT 0xff
237#define MIN_DMA_RIWT 0x20 242#define MIN_DMA_RIWT 0x20
@@ -272,9 +277,6 @@ enum dma_irq_status {
272#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2) 277#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2)
273#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3) 278#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3)
274 279
275#define CORE_PCS_ANE_COMPLETE (1 << 5)
276#define CORE_PCS_LINK_STATUS (1 << 6)
277#define CORE_RGMII_IRQ (1 << 7)
278#define CORE_IRQ_MTL_RX_OVERFLOW BIT(8) 280#define CORE_IRQ_MTL_RX_OVERFLOW BIT(8)
279 281
280/* Physical Coding Sublayer */ 282/* Physical Coding Sublayer */
@@ -469,9 +471,12 @@ struct stmmac_ops {
469 void (*reset_eee_mode)(struct mac_device_info *hw); 471 void (*reset_eee_mode)(struct mac_device_info *hw);
470 void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); 472 void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
471 void (*set_eee_pls)(struct mac_device_info *hw, int link); 473 void (*set_eee_pls)(struct mac_device_info *hw, int link);
472 void (*ctrl_ane)(struct mac_device_info *hw, bool restart);
473 void (*get_adv)(struct mac_device_info *hw, struct rgmii_adv *adv);
474 void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x); 474 void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x);
475 /* PCS calls */
476 void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
477 bool loopback);
478 void (*pcs_rane)(void __iomem *ioaddr, bool restart);
479 void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv);
475}; 480};
476 481
477/* PTP and HW Timer helpers */ 482/* PTP and HW Timer helpers */
@@ -524,6 +529,9 @@ struct mac_device_info {
524 int unicast_filter_entries; 529 int unicast_filter_entries;
525 int mcast_bits_log2; 530 int mcast_bits_log2;
526 unsigned int rx_csum; 531 unsigned int rx_csum;
532 unsigned int pcs;
533 unsigned int pmt;
534 unsigned int ps;
527}; 535};
528 536
529struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, 537struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
@@ -546,6 +554,7 @@ void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
546void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable); 554void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable);
547 555
548void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); 556void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
557
549extern const struct stmmac_mode_ops ring_mode_ops; 558extern const struct stmmac_mode_ops ring_mode_ops;
550extern const struct stmmac_mode_ops chain_mode_ops; 559extern const struct stmmac_mode_ops chain_mode_ops;
551extern const struct stmmac_desc_ops dwmac4_desc_ops; 560extern const struct stmmac_desc_ops dwmac4_desc_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index b0593a4268ee..ff3e5ab39bd0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -38,19 +38,26 @@
38#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */ 38#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
39 39
40#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */ 40#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
41enum dwmac1000_irq_status { 41#define GMAC_INT_STATUS_PMT BIT(3)
42 lpiis_irq = 0x400, 42#define GMAC_INT_STATUS_MMCIS BIT(4)
43 time_stamp_irq = 0x0200, 43#define GMAC_INT_STATUS_MMCRIS BIT(5)
44 mmc_rx_csum_offload_irq = 0x0080, 44#define GMAC_INT_STATUS_MMCTIS BIT(6)
45 mmc_tx_irq = 0x0040, 45#define GMAC_INT_STATUS_MMCCSUM BIT(7)
46 mmc_rx_irq = 0x0020, 46#define GMAC_INT_STATUS_TSTAMP BIT(9)
47 mmc_irq = 0x0010, 47#define GMAC_INT_STATUS_LPIIS BIT(10)
48 pmt_irq = 0x0008, 48
49 pcs_ane_irq = 0x0004, 49/* interrupt mask register */
50 pcs_link_irq = 0x0002, 50#define GMAC_INT_MASK 0x0000003c
51 rgmii_irq = 0x0001, 51#define GMAC_INT_DISABLE_RGMII BIT(0)
52}; 52#define GMAC_INT_DISABLE_PCSLINK BIT(1)
53#define GMAC_INT_MASK 0x0000003c /* interrupt mask register */ 53#define GMAC_INT_DISABLE_PCSAN BIT(2)
54#define GMAC_INT_DISABLE_PMT BIT(3)
55#define GMAC_INT_DISABLE_TIMESTAMP BIT(9)
56#define GMAC_INT_DISABLE_PCS (GMAC_INT_DISABLE_RGMII | \
57 GMAC_INT_DISABLE_PCSLINK | \
58 GMAC_INT_DISABLE_PCSAN)
59#define GMAC_INT_DEFAULT_MASK (GMAC_INT_DISABLE_TIMESTAMP | \
60 GMAC_INT_DISABLE_PCS)
54 61
55/* PMT Control and Status */ 62/* PMT Control and Status */
56#define GMAC_PMT 0x0000002c 63#define GMAC_PMT 0x0000002c
@@ -90,42 +97,23 @@ enum power_event {
90 (reg * 8)) 97 (reg * 8))
91#define GMAC_MAX_PERFECT_ADDRESSES 1 98#define GMAC_MAX_PERFECT_ADDRESSES 1
92 99
93/* PCS registers (AN/TBI/SGMII/RGMII) offset */ 100#define GMAC_PCS_BASE 0x000000c0 /* PCS register base */
94#define GMAC_AN_CTRL 0x000000c0 /* AN control */ 101#define GMAC_RGSMIIIS 0x000000d8 /* RGMII/SMII status */
95#define GMAC_AN_STATUS 0x000000c4 /* AN status */ 102
96#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */ 103/* SGMII/RGMII status register */
97#define GMAC_ANE_LPA 0x000000cc /* Auto-Neg. link partener ability */ 104#define GMAC_RGSMIIIS_LNKMODE BIT(0)
98#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */ 105#define GMAC_RGSMIIIS_SPEED GENMASK(2, 1)
99#define GMAC_TBI 0x000000d4 /* TBI extend status */ 106#define GMAC_RGSMIIIS_SPEED_SHIFT 1
100#define GMAC_S_R_GMII 0x000000d8 /* SGMII RGMII status */ 107#define GMAC_RGSMIIIS_LNKSTS BIT(3)
101 108#define GMAC_RGSMIIIS_JABTO BIT(4)
102/* AN Configuration defines */ 109#define GMAC_RGSMIIIS_FALSECARDET BIT(5)
103#define GMAC_AN_CTRL_RAN 0x00000200 /* Restart Auto-Negotiation */ 110#define GMAC_RGSMIIIS_SMIDRXS BIT(16)
104#define GMAC_AN_CTRL_ANE 0x00001000 /* Auto-Negotiation Enable */ 111/* LNKMOD */
105#define GMAC_AN_CTRL_ELE 0x00004000 /* External Loopback Enable */ 112#define GMAC_RGSMIIIS_LNKMOD_MASK 0x1
106#define GMAC_AN_CTRL_ECD 0x00010000 /* Enable Comma Detect */ 113/* LNKSPEED */
107#define GMAC_AN_CTRL_LR 0x00020000 /* Lock to Reference */ 114#define GMAC_RGSMIIIS_SPEED_125 0x2
108#define GMAC_AN_CTRL_SGMRAL 0x00040000 /* SGMII RAL Control */ 115#define GMAC_RGSMIIIS_SPEED_25 0x1
109 116#define GMAC_RGSMIIIS_SPEED_2_5 0x0
110/* AN Status defines */
111#define GMAC_AN_STATUS_LS 0x00000004 /* Link Status 0:down 1:up */
112#define GMAC_AN_STATUS_ANA 0x00000008 /* Auto-Negotiation Ability */
113#define GMAC_AN_STATUS_ANC 0x00000020 /* Auto-Negotiation Complete */
114#define GMAC_AN_STATUS_ES 0x00000100 /* Extended Status */
115
116/* Register 54 (SGMII/RGMII status register) */
117#define GMAC_S_R_GMII_LINK 0x8
118#define GMAC_S_R_GMII_SPEED 0x5
119#define GMAC_S_R_GMII_SPEED_SHIFT 0x1
120#define GMAC_S_R_GMII_MODE 0x1
121#define GMAC_S_R_GMII_SPEED_125 2
122#define GMAC_S_R_GMII_SPEED_25 1
123
124/* Common ADV and LPA defines */
125#define GMAC_ANE_FD (1 << 5)
126#define GMAC_ANE_HD (1 << 6)
127#define GMAC_ANE_PSE (3 << 7)
128#define GMAC_ANE_PSE_SHIFT 7
129 117
130/* GMAC Configuration defines */ 118/* GMAC Configuration defines */
131#define GMAC_CONTROL_2K 0x08000000 /* IEEE 802.3as 2K packets */ 119#define GMAC_CONTROL_2K 0x08000000 /* IEEE 802.3as 2K packets */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index fb1eb578e34e..cbefe9e2207c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -30,22 +30,48 @@
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/ethtool.h> 31#include <linux/ethtool.h>
32#include <asm/io.h> 32#include <asm/io.h>
33#include "stmmac_pcs.h"
33#include "dwmac1000.h" 34#include "dwmac1000.h"
34 35
35static void dwmac1000_core_init(struct mac_device_info *hw, int mtu) 36static void dwmac1000_core_init(struct mac_device_info *hw, int mtu)
36{ 37{
37 void __iomem *ioaddr = hw->pcsr; 38 void __iomem *ioaddr = hw->pcsr;
38 u32 value = readl(ioaddr + GMAC_CONTROL); 39 u32 value = readl(ioaddr + GMAC_CONTROL);
40
41 /* Configure GMAC core */
39 value |= GMAC_CORE_INIT; 42 value |= GMAC_CORE_INIT;
43
40 if (mtu > 1500) 44 if (mtu > 1500)
41 value |= GMAC_CONTROL_2K; 45 value |= GMAC_CONTROL_2K;
42 if (mtu > 2000) 46 if (mtu > 2000)
43 value |= GMAC_CONTROL_JE; 47 value |= GMAC_CONTROL_JE;
44 48
49 if (hw->ps) {
50 value |= GMAC_CONTROL_TE;
51
52 if (hw->ps == SPEED_1000) {
53 value &= ~GMAC_CONTROL_PS;
54 } else {
55 value |= GMAC_CONTROL_PS;
56
57 if (hw->ps == SPEED_10)
58 value &= ~GMAC_CONTROL_FES;
59 else
60 value |= GMAC_CONTROL_FES;
61 }
62 }
63
45 writel(value, ioaddr + GMAC_CONTROL); 64 writel(value, ioaddr + GMAC_CONTROL);
46 65
47 /* Mask GMAC interrupts */ 66 /* Mask GMAC interrupts */
48 writel(0x207, ioaddr + GMAC_INT_MASK); 67 value = GMAC_INT_DEFAULT_MASK;
68
69 if (hw->pmt)
70 value &= ~GMAC_INT_DISABLE_PMT;
71 if (hw->pcs)
72 value &= ~GMAC_INT_DISABLE_PCS;
73
74 writel(value, ioaddr + GMAC_INT_MASK);
49 75
50#ifdef STMMAC_VLAN_TAG_USED 76#ifdef STMMAC_VLAN_TAG_USED
51 /* Tag detection without filtering */ 77 /* Tag detection without filtering */
@@ -241,6 +267,39 @@ static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode)
241 writel(pmt, ioaddr + GMAC_PMT); 267 writel(pmt, ioaddr + GMAC_PMT);
242} 268}
243 269
270/* RGMII or SMII interface */
271static void dwmac1000_rgsmii(void __iomem *ioaddr, struct stmmac_extra_stats *x)
272{
273 u32 status;
274
275 status = readl(ioaddr + GMAC_RGSMIIIS);
276 x->irq_rgmii_n++;
277
278 /* Check the link status */
279 if (status & GMAC_RGSMIIIS_LNKSTS) {
280 int speed_value;
281
282 x->pcs_link = 1;
283
284 speed_value = ((status & GMAC_RGSMIIIS_SPEED) >>
285 GMAC_RGSMIIIS_SPEED_SHIFT);
286 if (speed_value == GMAC_RGSMIIIS_SPEED_125)
287 x->pcs_speed = SPEED_1000;
288 else if (speed_value == GMAC_RGSMIIIS_SPEED_25)
289 x->pcs_speed = SPEED_100;
290 else
291 x->pcs_speed = SPEED_10;
292
293 x->pcs_duplex = (status & GMAC_RGSMIIIS_LNKMOD_MASK);
294
295 pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
296 x->pcs_duplex ? "Full" : "Half");
297 } else {
298 x->pcs_link = 0;
299 pr_info("Link is Down\n");
300 }
301}
302
244static int dwmac1000_irq_status(struct mac_device_info *hw, 303static int dwmac1000_irq_status(struct mac_device_info *hw,
245 struct stmmac_extra_stats *x) 304 struct stmmac_extra_stats *x)
246{ 305{
@@ -249,19 +308,20 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
249 int ret = 0; 308 int ret = 0;
250 309
251 /* Not used events (e.g. MMC interrupts) are not handled. */ 310 /* Not used events (e.g. MMC interrupts) are not handled. */
252 if ((intr_status & mmc_tx_irq)) 311 if ((intr_status & GMAC_INT_STATUS_MMCTIS))
253 x->mmc_tx_irq_n++; 312 x->mmc_tx_irq_n++;
254 if (unlikely(intr_status & mmc_rx_irq)) 313 if (unlikely(intr_status & GMAC_INT_STATUS_MMCRIS))
255 x->mmc_rx_irq_n++; 314 x->mmc_rx_irq_n++;
256 if (unlikely(intr_status & mmc_rx_csum_offload_irq)) 315 if (unlikely(intr_status & GMAC_INT_STATUS_MMCCSUM))
257 x->mmc_rx_csum_offload_irq_n++; 316 x->mmc_rx_csum_offload_irq_n++;
258 if (unlikely(intr_status & pmt_irq)) { 317 if (unlikely(intr_status & GMAC_INT_DISABLE_PMT)) {
259 /* clear the PMT bits 5 and 6 by reading the PMT status reg */ 318 /* clear the PMT bits 5 and 6 by reading the PMT status reg */
260 readl(ioaddr + GMAC_PMT); 319 readl(ioaddr + GMAC_PMT);
261 x->irq_receive_pmt_irq_n++; 320 x->irq_receive_pmt_irq_n++;
262 } 321 }
263 /* MAC trx/rx EEE LPI entry/exit interrupts */ 322
264 if (intr_status & lpiis_irq) { 323 /* MAC tx/rx EEE LPI entry/exit interrupts */
324 if (intr_status & GMAC_INT_STATUS_LPIIS) {
265 /* Clean LPI interrupt by reading the Reg 12 */ 325 /* Clean LPI interrupt by reading the Reg 12 */
266 ret = readl(ioaddr + LPI_CTRL_STATUS); 326 ret = readl(ioaddr + LPI_CTRL_STATUS);
267 327
@@ -275,36 +335,10 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
275 x->irq_rx_path_exit_lpi_mode_n++; 335 x->irq_rx_path_exit_lpi_mode_n++;
276 } 336 }
277 337
278 if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) { 338 dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
279 readl(ioaddr + GMAC_AN_STATUS);
280 x->irq_pcs_ane_n++;
281 }
282 if (intr_status & rgmii_irq) {
283 u32 status = readl(ioaddr + GMAC_S_R_GMII);
284 x->irq_rgmii_n++;
285
286 /* Save and dump the link status. */
287 if (status & GMAC_S_R_GMII_LINK) {
288 int speed_value = (status & GMAC_S_R_GMII_SPEED) >>
289 GMAC_S_R_GMII_SPEED_SHIFT;
290 x->pcs_duplex = (status & GMAC_S_R_GMII_MODE);
291
292 if (speed_value == GMAC_S_R_GMII_SPEED_125)
293 x->pcs_speed = SPEED_1000;
294 else if (speed_value == GMAC_S_R_GMII_SPEED_25)
295 x->pcs_speed = SPEED_100;
296 else
297 x->pcs_speed = SPEED_10;
298 339
299 x->pcs_link = 1; 340 if (intr_status & PCS_RGSMIIIS_IRQ)
300 pr_debug("%s: Link is Up - %d/%s\n", __func__, 341 dwmac1000_rgsmii(ioaddr, x);
301 (int)x->pcs_speed,
302 x->pcs_duplex ? "Full" : "Half");
303 } else {
304 x->pcs_link = 0;
305 pr_debug("%s: Link is Down\n", __func__);
306 }
307 }
308 342
309 return ret; 343 return ret;
310} 344}
@@ -363,38 +397,20 @@ static void dwmac1000_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
363 writel(value, ioaddr + LPI_TIMER_CTRL); 397 writel(value, ioaddr + LPI_TIMER_CTRL);
364} 398}
365 399
366static void dwmac1000_ctrl_ane(struct mac_device_info *hw, bool restart) 400static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
401 bool loopback)
367{ 402{
368 void __iomem *ioaddr = hw->pcsr; 403 dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
369 /* auto negotiation enable and External Loopback enable */
370 u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
371
372 if (restart)
373 value |= GMAC_AN_CTRL_RAN;
374
375 writel(value, ioaddr + GMAC_AN_CTRL);
376} 404}
377 405
378static void dwmac1000_get_adv(struct mac_device_info *hw, struct rgmii_adv *adv) 406static void dwmac1000_rane(void __iomem *ioaddr, bool restart)
379{ 407{
380 void __iomem *ioaddr = hw->pcsr; 408 dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
381 u32 value = readl(ioaddr + GMAC_ANE_ADV); 409}
382
383 if (value & GMAC_ANE_FD)
384 adv->duplex = DUPLEX_FULL;
385 if (value & GMAC_ANE_HD)
386 adv->duplex |= DUPLEX_HALF;
387
388 adv->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
389
390 value = readl(ioaddr + GMAC_ANE_LPA);
391
392 if (value & GMAC_ANE_FD)
393 adv->lp_duplex = DUPLEX_FULL;
394 if (value & GMAC_ANE_HD)
395 adv->lp_duplex = DUPLEX_HALF;
396 410
397 adv->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT; 411static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
412{
413 dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
398} 414}
399 415
400static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x) 416static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
@@ -485,9 +501,10 @@ static const struct stmmac_ops dwmac1000_ops = {
485 .reset_eee_mode = dwmac1000_reset_eee_mode, 501 .reset_eee_mode = dwmac1000_reset_eee_mode,
486 .set_eee_timer = dwmac1000_set_eee_timer, 502 .set_eee_timer = dwmac1000_set_eee_timer,
487 .set_eee_pls = dwmac1000_set_eee_pls, 503 .set_eee_pls = dwmac1000_set_eee_pls,
488 .ctrl_ane = dwmac1000_ctrl_ane,
489 .get_adv = dwmac1000_get_adv,
490 .debug = dwmac1000_debug, 504 .debug = dwmac1000_debug,
505 .pcs_ctrl_ane = dwmac1000_ctrl_ane,
506 .pcs_rane = dwmac1000_rane,
507 .pcs_get_adv_lp = dwmac1000_get_adv_lp,
491}; 508};
492 509
493struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, 510struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index bc50952a18e7..6f4f5ce25114 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -24,10 +24,8 @@
24#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4) 24#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4)
25#define GMAC_INT_STATUS 0x000000b0 25#define GMAC_INT_STATUS 0x000000b0
26#define GMAC_INT_EN 0x000000b4 26#define GMAC_INT_EN 0x000000b4
27#define GMAC_AN_CTRL 0x000000e0 27#define GMAC_PCS_BASE 0x000000e0
28#define GMAC_AN_STATUS 0x000000e4 28#define GMAC_PHYIF_CONTROL_STATUS 0x000000f8
29#define GMAC_AN_ADV 0x000000e8
30#define GMAC_AN_LPA 0x000000ec
31#define GMAC_PMT 0x000000c0 29#define GMAC_PMT 0x000000c0
32#define GMAC_VERSION 0x00000110 30#define GMAC_VERSION 0x00000110
33#define GMAC_DEBUG 0x00000114 31#define GMAC_DEBUG 0x00000114
@@ -54,9 +52,18 @@
54#define GMAC_TX_FLOW_CTRL_PT_SHIFT 16 52#define GMAC_TX_FLOW_CTRL_PT_SHIFT 16
55 53
56/* MAC Interrupt bitmap*/ 54/* MAC Interrupt bitmap*/
55#define GMAC_INT_RGSMIIS BIT(0)
56#define GMAC_INT_PCS_LINK BIT(1)
57#define GMAC_INT_PCS_ANE BIT(2)
58#define GMAC_INT_PCS_PHYIS BIT(3)
57#define GMAC_INT_PMT_EN BIT(4) 59#define GMAC_INT_PMT_EN BIT(4)
58#define GMAC_INT_LPI_EN BIT(5) 60#define GMAC_INT_LPI_EN BIT(5)
59 61
62#define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \
63 GMAC_INT_PCS_ANE)
64
65#define GMAC_INT_DEFAULT_MASK GMAC_INT_PMT_EN
66
60enum dwmac4_irq_status { 67enum dwmac4_irq_status {
61 time_stamp_irq = 0x00001000, 68 time_stamp_irq = 0x00001000,
62 mmc_rx_csum_offload_irq = 0x00000800, 69 mmc_rx_csum_offload_irq = 0x00000800,
@@ -64,19 +71,8 @@ enum dwmac4_irq_status {
64 mmc_rx_irq = 0x00000200, 71 mmc_rx_irq = 0x00000200,
65 mmc_irq = 0x00000100, 72 mmc_irq = 0x00000100,
66 pmt_irq = 0x00000010, 73 pmt_irq = 0x00000010,
67 pcs_ane_irq = 0x00000004,
68 pcs_link_irq = 0x00000002,
69}; 74};
70 75
71/* MAC Auto-Neg bitmap*/
72#define GMAC_AN_CTRL_RAN BIT(9)
73#define GMAC_AN_CTRL_ANE BIT(12)
74#define GMAC_AN_CTRL_ELE BIT(14)
75#define GMAC_AN_FD BIT(5)
76#define GMAC_AN_HD BIT(6)
77#define GMAC_AN_PSE_MASK GENMASK(8, 7)
78#define GMAC_AN_PSE_SHIFT 7
79
80/* MAC PMT bitmap */ 76/* MAC PMT bitmap */
81enum power_event { 77enum power_event {
82 pointer_reset = 0x80000000, 78 pointer_reset = 0x80000000,
@@ -250,6 +246,23 @@ enum power_event {
250#define MTL_DEBUG_RRCSTS_FLUSH 3 246#define MTL_DEBUG_RRCSTS_FLUSH 3
251#define MTL_DEBUG_RWCSTS BIT(0) 247#define MTL_DEBUG_RWCSTS BIT(0)
252 248
249/* SGMII/RGMII status register */
250#define GMAC_PHYIF_CTRLSTATUS_TC BIT(0)
251#define GMAC_PHYIF_CTRLSTATUS_LUD BIT(1)
252#define GMAC_PHYIF_CTRLSTATUS_SMIDRXS BIT(4)
253#define GMAC_PHYIF_CTRLSTATUS_LNKMOD BIT(16)
254#define GMAC_PHYIF_CTRLSTATUS_SPEED GENMASK(18, 17)
255#define GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT 17
256#define GMAC_PHYIF_CTRLSTATUS_LNKSTS BIT(19)
257#define GMAC_PHYIF_CTRLSTATUS_JABTO BIT(20)
258#define GMAC_PHYIF_CTRLSTATUS_FALSECARDET BIT(21)
259/* LNKMOD */
260#define GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK 0x1
261/* LNKSPEED */
262#define GMAC_PHYIF_CTRLSTATUS_SPEED_125 0x2
263#define GMAC_PHYIF_CTRLSTATUS_SPEED_25 0x1
264#define GMAC_PHYIF_CTRLSTATUS_SPEED_2_5 0x0
265
253extern const struct stmmac_dma_ops dwmac4_dma_ops; 266extern const struct stmmac_dma_ops dwmac4_dma_ops;
254extern const struct stmmac_dma_ops dwmac410_dma_ops; 267extern const struct stmmac_dma_ops dwmac410_dma_ops;
255#endif /* __DWMAC4_H__ */ 268#endif /* __DWMAC4_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 44da877d2483..df5580dcdfed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -17,6 +17,7 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/ethtool.h> 18#include <linux/ethtool.h>
19#include <linux/io.h> 19#include <linux/io.h>
20#include "stmmac_pcs.h"
20#include "dwmac4.h" 21#include "dwmac4.h"
21 22
22static void dwmac4_core_init(struct mac_device_info *hw, int mtu) 23static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
@@ -31,10 +32,31 @@ static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
31 if (mtu > 2000) 32 if (mtu > 2000)
32 value |= GMAC_CONFIG_JE; 33 value |= GMAC_CONFIG_JE;
33 34
35 if (hw->ps) {
36 value |= GMAC_CONFIG_TE;
37
38 if (hw->ps == SPEED_1000) {
39 value &= ~GMAC_CONFIG_PS;
40 } else {
41 value |= GMAC_CONFIG_PS;
42
43 if (hw->ps == SPEED_10)
44 value &= ~GMAC_CONFIG_FES;
45 else
46 value |= GMAC_CONFIG_FES;
47 }
48 }
49
34 writel(value, ioaddr + GMAC_CONFIG); 50 writel(value, ioaddr + GMAC_CONFIG);
35 51
36 /* Mask GMAC interrupts */ 52 /* Mask GMAC interrupts */
37 writel(GMAC_INT_PMT_EN, ioaddr + GMAC_INT_EN); 53 value = GMAC_INT_DEFAULT_MASK;
54 if (hw->pmt)
55 value |= GMAC_INT_PMT_EN;
56 if (hw->pcs)
57 value |= GMAC_PCS_IRQ_DEFAULT;
58
59 writel(value, ioaddr + GMAC_INT_EN);
38} 60}
39 61
40static void dwmac4_dump_regs(struct mac_device_info *hw) 62static void dwmac4_dump_regs(struct mac_device_info *hw)
@@ -190,39 +212,53 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
190 } 212 }
191} 213}
192 214
193static void dwmac4_ctrl_ane(struct mac_device_info *hw, bool restart) 215static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
216 bool loopback)
194{ 217{
195 void __iomem *ioaddr = hw->pcsr; 218 dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
196 219}
197 /* auto negotiation enable and External Loopback enable */
198 u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
199 220
200 if (restart) 221static void dwmac4_rane(void __iomem *ioaddr, bool restart)
201 value |= GMAC_AN_CTRL_RAN; 222{
223 dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
224}
202 225
203 writel(value, ioaddr + GMAC_AN_CTRL); 226static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
227{
228 dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
204} 229}
205 230
206static void dwmac4_get_adv(struct mac_device_info *hw, struct rgmii_adv *adv) 231/* RGMII or SMII interface */
232static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
207{ 233{
208 void __iomem *ioaddr = hw->pcsr; 234 u32 status;
209 u32 value = readl(ioaddr + GMAC_AN_ADV);
210 235
211 if (value & GMAC_AN_FD) 236 status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
212 adv->duplex = DUPLEX_FULL; 237 x->irq_rgmii_n++;
213 if (value & GMAC_AN_HD)
214 adv->duplex |= DUPLEX_HALF;
215 238
216 adv->pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT; 239 /* Check the link status */
240 if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
241 int speed_value;
217 242
218 value = readl(ioaddr + GMAC_AN_LPA); 243 x->pcs_link = 1;
244
245 speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
246 GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
247 if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
248 x->pcs_speed = SPEED_1000;
249 else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
250 x->pcs_speed = SPEED_100;
251 else
252 x->pcs_speed = SPEED_10;
219 253
220 if (value & GMAC_AN_FD) 254 x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK);
221 adv->lp_duplex = DUPLEX_FULL;
222 if (value & GMAC_AN_HD)
223 adv->lp_duplex = DUPLEX_HALF;
224 255
225 adv->lp_pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT; 256 pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
257 x->pcs_duplex ? "Full" : "Half");
258 } else {
259 x->pcs_link = 0;
260 pr_info("Link is Down\n");
261 }
226} 262}
227 263
228static int dwmac4_irq_status(struct mac_device_info *hw, 264static int dwmac4_irq_status(struct mac_device_info *hw,
@@ -248,11 +284,6 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
248 x->irq_receive_pmt_irq_n++; 284 x->irq_receive_pmt_irq_n++;
249 } 285 }
250 286
251 if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) {
252 readl(ioaddr + GMAC_AN_STATUS);
253 x->irq_pcs_ane_n++;
254 }
255
256 mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS); 287 mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
257 /* Check MTL Interrupt: Currently only one queue is used: Q0. */ 288 /* Check MTL Interrupt: Currently only one queue is used: Q0. */
258 if (mtl_int_qx_status & MTL_INT_Q0) { 289 if (mtl_int_qx_status & MTL_INT_Q0) {
@@ -267,6 +298,10 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
267 } 298 }
268 } 299 }
269 300
301 dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
302 if (intr_status & PCS_RGSMIIIS_IRQ)
303 dwmac4_phystatus(ioaddr, x);
304
270 return ret; 305 return ret;
271} 306}
272 307
@@ -363,8 +398,9 @@ static const struct stmmac_ops dwmac4_ops = {
363 .pmt = dwmac4_pmt, 398 .pmt = dwmac4_pmt,
364 .set_umac_addr = dwmac4_set_umac_addr, 399 .set_umac_addr = dwmac4_set_umac_addr,
365 .get_umac_addr = dwmac4_get_umac_addr, 400 .get_umac_addr = dwmac4_get_umac_addr,
366 .ctrl_ane = dwmac4_ctrl_ane, 401 .pcs_ctrl_ane = dwmac4_ctrl_ane,
367 .get_adv = dwmac4_get_adv, 402 .pcs_rane = dwmac4_rane,
403 .pcs_get_adv_lp = dwmac4_get_adv_lp,
368 .debug = dwmac4_debug, 404 .debug = dwmac4_debug,
369 .set_filter = dwmac4_set_filter, 405 .set_filter = dwmac4_set_filter,
370}; 406};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 59ae6088cd22..8dc9056c1001 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -117,7 +117,6 @@ struct stmmac_priv {
117 int eee_enabled; 117 int eee_enabled;
118 int eee_active; 118 int eee_active;
119 int tx_lpi_timer; 119 int tx_lpi_timer;
120 int pcs;
121 unsigned int mode; 120 unsigned int mode;
122 int extend_desc; 121 int extend_desc;
123 struct ptp_clock *ptp_clock; 122 struct ptp_clock *ptp_clock;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index e2b98b01647e..1e06173fc9d7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -276,7 +276,8 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
276 struct phy_device *phy = priv->phydev; 276 struct phy_device *phy = priv->phydev;
277 int rc; 277 int rc;
278 278
279 if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) { 279 if (priv->hw->pcs & STMMAC_PCS_RGMII ||
280 priv->hw->pcs & STMMAC_PCS_SGMII) {
280 struct rgmii_adv adv; 281 struct rgmii_adv adv;
281 282
282 if (!priv->xstats.pcs_link) { 283 if (!priv->xstats.pcs_link) {
@@ -289,10 +290,10 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
289 ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed); 290 ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed);
290 291
291 /* Get and convert ADV/LP_ADV from the HW AN registers */ 292 /* Get and convert ADV/LP_ADV from the HW AN registers */
292 if (!priv->hw->mac->get_adv) 293 if (!priv->hw->mac->pcs_get_adv_lp)
293 return -EOPNOTSUPP; /* should never happen indeed */ 294 return -EOPNOTSUPP; /* should never happen indeed */
294 295
295 priv->hw->mac->get_adv(priv->hw, &adv); 296 priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv);
296 297
297 /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */ 298 /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
298 299
@@ -361,7 +362,8 @@ static int stmmac_ethtool_setsettings(struct net_device *dev,
361 struct phy_device *phy = priv->phydev; 362 struct phy_device *phy = priv->phydev;
362 int rc; 363 int rc;
363 364
364 if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) { 365 if (priv->hw->pcs & STMMAC_PCS_RGMII ||
366 priv->hw->pcs & STMMAC_PCS_SGMII) {
365 u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause; 367 u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause;
366 368
367 /* Only support ANE */ 369 /* Only support ANE */
@@ -376,8 +378,11 @@ static int stmmac_ethtool_setsettings(struct net_device *dev,
376 ADVERTISED_10baseT_Full); 378 ADVERTISED_10baseT_Full);
377 379
378 spin_lock(&priv->lock); 380 spin_lock(&priv->lock);
379 if (priv->hw->mac->ctrl_ane) 381
380 priv->hw->mac->ctrl_ane(priv->hw, 1); 382 if (priv->hw->mac->pcs_ctrl_ane)
383 priv->hw->mac->pcs_ctrl_ane(priv->ioaddr, 1,
384 priv->hw->ps, 0);
385
381 spin_unlock(&priv->lock); 386 spin_unlock(&priv->lock);
382 387
383 return 0; 388 return 0;
@@ -452,11 +457,22 @@ stmmac_get_pauseparam(struct net_device *netdev,
452{ 457{
453 struct stmmac_priv *priv = netdev_priv(netdev); 458 struct stmmac_priv *priv = netdev_priv(netdev);
454 459
455 if (priv->pcs) /* FIXME */
456 return;
457
458 pause->rx_pause = 0; 460 pause->rx_pause = 0;
459 pause->tx_pause = 0; 461 pause->tx_pause = 0;
462
463 if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) {
464 struct rgmii_adv adv_lp;
465
466 pause->autoneg = 1;
467 priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp);
468 if (!adv_lp.pause)
469 return;
470 } else {
471 if (!(priv->phydev->supported & SUPPORTED_Pause) ||
472 !(priv->phydev->supported & SUPPORTED_Asym_Pause))
473 return;
474 }
475
460 pause->autoneg = priv->phydev->autoneg; 476 pause->autoneg = priv->phydev->autoneg;
461 477
462 if (priv->flow_ctrl & FLOW_RX) 478 if (priv->flow_ctrl & FLOW_RX)
@@ -473,10 +489,19 @@ stmmac_set_pauseparam(struct net_device *netdev,
473 struct stmmac_priv *priv = netdev_priv(netdev); 489 struct stmmac_priv *priv = netdev_priv(netdev);
474 struct phy_device *phy = priv->phydev; 490 struct phy_device *phy = priv->phydev;
475 int new_pause = FLOW_OFF; 491 int new_pause = FLOW_OFF;
476 int ret = 0;
477 492
478 if (priv->pcs) /* FIXME */ 493 if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) {
479 return -EOPNOTSUPP; 494 struct rgmii_adv adv_lp;
495
496 pause->autoneg = 1;
497 priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp);
498 if (!adv_lp.pause)
499 return -EOPNOTSUPP;
500 } else {
501 if (!(phy->supported & SUPPORTED_Pause) ||
502 !(phy->supported & SUPPORTED_Asym_Pause))
503 return -EOPNOTSUPP;
504 }
480 505
481 if (pause->rx_pause) 506 if (pause->rx_pause)
482 new_pause |= FLOW_RX; 507 new_pause |= FLOW_RX;
@@ -488,11 +513,12 @@ stmmac_set_pauseparam(struct net_device *netdev,
488 513
489 if (phy->autoneg) { 514 if (phy->autoneg) {
490 if (netif_running(netdev)) 515 if (netif_running(netdev))
491 ret = phy_start_aneg(phy); 516 return phy_start_aneg(phy);
492 } else 517 }
493 priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, 518
494 priv->flow_ctrl, priv->pause); 519 priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl,
495 return ret; 520 priv->pause);
521 return 0;
496} 522}
497 523
498static void stmmac_get_ethtool_stats(struct net_device *dev, 524static void stmmac_get_ethtool_stats(struct net_device *dev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a473c182c91d..c23ccabc2d8a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -285,8 +285,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
285 /* Using PCS we cannot dial with the phy registers at this stage 285 /* Using PCS we cannot dial with the phy registers at this stage
286 * so we do not support extra feature like EEE. 286 * so we do not support extra feature like EEE.
287 */ 287 */
288 if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) || 288 if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
289 (priv->pcs == STMMAC_PCS_RTBI)) 289 (priv->hw->pcs == STMMAC_PCS_TBI) ||
290 (priv->hw->pcs == STMMAC_PCS_RTBI))
290 goto out; 291 goto out;
291 292
292 /* MAC core supports the EEE feature. */ 293 /* MAC core supports the EEE feature. */
@@ -799,10 +800,10 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
799 (interface == PHY_INTERFACE_MODE_RGMII_RXID) || 800 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
800 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { 801 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
801 pr_debug("STMMAC: PCS RGMII support enable\n"); 802 pr_debug("STMMAC: PCS RGMII support enable\n");
802 priv->pcs = STMMAC_PCS_RGMII; 803 priv->hw->pcs = STMMAC_PCS_RGMII;
803 } else if (interface == PHY_INTERFACE_MODE_SGMII) { 804 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
804 pr_debug("STMMAC: PCS SGMII support enable\n"); 805 pr_debug("STMMAC: PCS SGMII support enable\n");
805 priv->pcs = STMMAC_PCS_SGMII; 806 priv->hw->pcs = STMMAC_PCS_SGMII;
806 } 807 }
807 } 808 }
808} 809}
@@ -1665,6 +1666,19 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1665 if (priv->plat->bus_setup) 1666 if (priv->plat->bus_setup)
1666 priv->plat->bus_setup(priv->ioaddr); 1667 priv->plat->bus_setup(priv->ioaddr);
1667 1668
1669 /* PS and related bits will be programmed according to the speed */
1670 if (priv->hw->pcs) {
1671 int speed = priv->plat->mac_port_sel_speed;
1672
1673 if ((speed == SPEED_10) || (speed == SPEED_100) ||
1674 (speed == SPEED_1000)) {
1675 priv->hw->ps = speed;
1676 } else {
1677 dev_warn(priv->device, "invalid port speed\n");
1678 priv->hw->ps = 0;
1679 }
1680 }
1681
1668 /* Initialize the MAC Core */ 1682 /* Initialize the MAC Core */
1669 priv->hw->mac->core_init(priv->hw, dev->mtu); 1683 priv->hw->mac->core_init(priv->hw, dev->mtu);
1670 1684
@@ -1714,8 +1728,8 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1714 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT); 1728 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
1715 } 1729 }
1716 1730
1717 if (priv->pcs && priv->hw->mac->ctrl_ane) 1731 if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
1718 priv->hw->mac->ctrl_ane(priv->hw, 0); 1732 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
1719 1733
1720 /* set TX ring length */ 1734 /* set TX ring length */
1721 if (priv->hw->dma->set_tx_ring_len) 1735 if (priv->hw->dma->set_tx_ring_len)
@@ -1748,8 +1762,9 @@ static int stmmac_open(struct net_device *dev)
1748 1762
1749 stmmac_check_ether_addr(priv); 1763 stmmac_check_ether_addr(priv);
1750 1764
1751 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && 1765 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
1752 priv->pcs != STMMAC_PCS_RTBI) { 1766 priv->hw->pcs != STMMAC_PCS_TBI &&
1767 priv->hw->pcs != STMMAC_PCS_RTBI) {
1753 ret = stmmac_init_phy(dev); 1768 ret = stmmac_init_phy(dev);
1754 if (ret) { 1769 if (ret) {
1755 pr_err("%s: Cannot attach to PHY (error: %d)\n", 1770 pr_err("%s: Cannot attach to PHY (error: %d)\n",
@@ -2804,11 +2819,19 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2804 priv->tx_path_in_lpi_mode = true; 2819 priv->tx_path_in_lpi_mode = true;
2805 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) 2820 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
2806 priv->tx_path_in_lpi_mode = false; 2821 priv->tx_path_in_lpi_mode = false;
2807 if (status & CORE_IRQ_MTL_RX_OVERFLOW) 2822 if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
2808 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, 2823 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2809 priv->rx_tail_addr, 2824 priv->rx_tail_addr,
2810 STMMAC_CHAN0); 2825 STMMAC_CHAN0);
2811 } 2826 }
2827
2828 /* PCS link status */
2829 if (priv->hw->pcs) {
2830 if (priv->xstats.pcs_link)
2831 netif_carrier_on(dev);
2832 else
2833 netif_carrier_off(dev);
2834 }
2812 } 2835 }
2813 2836
2814 /* To handle DMA interrupts */ 2837 /* To handle DMA interrupts */
@@ -3130,6 +3153,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
3130 */ 3153 */
3131 priv->plat->enh_desc = priv->dma_cap.enh_desc; 3154 priv->plat->enh_desc = priv->dma_cap.enh_desc;
3132 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; 3155 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
3156 priv->hw->pmt = priv->plat->pmt;
3133 3157
3134 /* TXCOE doesn't work in thresh DMA mode */ 3158 /* TXCOE doesn't work in thresh DMA mode */
3135 if (priv->plat->force_thresh_dma_mode) 3159 if (priv->plat->force_thresh_dma_mode)
@@ -3325,8 +3349,9 @@ int stmmac_dvr_probe(struct device *device,
3325 3349
3326 stmmac_check_pcs_mode(priv); 3350 stmmac_check_pcs_mode(priv);
3327 3351
3328 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && 3352 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3329 priv->pcs != STMMAC_PCS_RTBI) { 3353 priv->hw->pcs != STMMAC_PCS_TBI &&
3354 priv->hw->pcs != STMMAC_PCS_RTBI) {
3330 /* MDIO bus Registration */ 3355 /* MDIO bus Registration */
3331 ret = stmmac_mdio_register(ndev); 3356 ret = stmmac_mdio_register(ndev);
3332 if (ret < 0) { 3357 if (ret < 0) {
@@ -3376,8 +3401,9 @@ int stmmac_dvr_remove(struct device *dev)
3376 reset_control_assert(priv->stmmac_rst); 3401 reset_control_assert(priv->stmmac_rst);
3377 clk_disable_unprepare(priv->pclk); 3402 clk_disable_unprepare(priv->pclk);
3378 clk_disable_unprepare(priv->stmmac_clk); 3403 clk_disable_unprepare(priv->stmmac_clk);
3379 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && 3404 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3380 priv->pcs != STMMAC_PCS_RTBI) 3405 priv->hw->pcs != STMMAC_PCS_TBI &&
3406 priv->hw->pcs != STMMAC_PCS_RTBI)
3381 stmmac_mdio_unregister(ndev); 3407 stmmac_mdio_unregister(ndev);
3382 free_netdev(ndev); 3408 free_netdev(ndev);
3383 3409
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
new file mode 100644
index 000000000000..eba41c24b7a7
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
@@ -0,0 +1,159 @@
1/*
2 * stmmac_pcs.h: Physical Coding Sublayer Header File
3 *
4 * Copyright (C) 2016 STMicroelectronics (R&D) Limited
5 * Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#ifndef __STMMAC_PCS_H__
14#define __STMMAC_PCS_H__
15
16#include <linux/slab.h>
17#include <linux/io.h>
18#include "common.h"
19
20/* PCS registers (AN/TBI/SGMII/RGMII) offsets */
21#define GMAC_AN_CTRL(x) (x) /* AN control */
22#define GMAC_AN_STATUS(x) (x + 0x4) /* AN status */
23#define GMAC_ANE_ADV(x) (x + 0x8) /* ANE Advertisement */
24#define GMAC_ANE_LPA(x) (x + 0xc) /* ANE link partener ability */
25#define GMAC_ANE_EXP(x) (x + 0x10) /* ANE expansion */
26#define GMAC_TBI(x) (x + 0x14) /* TBI extend status */
27
28/* AN Configuration defines */
29#define GMAC_AN_CTRL_RAN BIT(9) /* Restart Auto-Negotiation */
30#define GMAC_AN_CTRL_ANE BIT(12) /* Auto-Negotiation Enable */
31#define GMAC_AN_CTRL_ELE BIT(14) /* External Loopback Enable */
32#define GMAC_AN_CTRL_ECD BIT(16) /* Enable Comma Detect */
33#define GMAC_AN_CTRL_LR BIT(17) /* Lock to Reference */
34#define GMAC_AN_CTRL_SGMRAL BIT(18) /* SGMII RAL Control */
35
36/* AN Status defines */
37#define GMAC_AN_STATUS_LS BIT(2) /* Link Status 0:down 1:up */
38#define GMAC_AN_STATUS_ANA BIT(3) /* Auto-Negotiation Ability */
39#define GMAC_AN_STATUS_ANC BIT(5) /* Auto-Negotiation Complete */
40#define GMAC_AN_STATUS_ES BIT(8) /* Extended Status */
41
42/* ADV and LPA defines */
43#define GMAC_ANE_FD BIT(5)
44#define GMAC_ANE_HD BIT(6)
45#define GMAC_ANE_PSE GENMASK(8, 7)
46#define GMAC_ANE_PSE_SHIFT 7
47#define GMAC_ANE_RFE GENMASK(13, 12)
48#define GMAC_ANE_RFE_SHIFT 12
49#define GMAC_ANE_ACK BIT(14)
50
51/**
52 * dwmac_pcs_isr - TBI, RTBI, or SGMII PHY ISR
53 * @ioaddr: IO registers pointer
54 * @reg: Base address of the AN Control Register.
55 * @intr_status: GMAC core interrupt status
56 * @x: pointer to log these events as stats
57 * Description: it is the ISR for PCS events: Auto-Negotiation Completed and
58 * Link status.
59 */
60static inline void dwmac_pcs_isr(void __iomem *ioaddr, u32 reg,
61 unsigned int intr_status,
62 struct stmmac_extra_stats *x)
63{
64 u32 val = readl(ioaddr + GMAC_AN_STATUS(reg));
65
66 if (intr_status & PCS_ANE_IRQ) {
67 x->irq_pcs_ane_n++;
68 if (val & GMAC_AN_STATUS_ANC)
69 pr_info("stmmac_pcs: ANE process completed\n");
70 }
71
72 if (intr_status & PCS_LINK_IRQ) {
73 x->irq_pcs_link_n++;
74 if (val & GMAC_AN_STATUS_LS)
75 pr_info("stmmac_pcs: Link Up\n");
76 else
77 pr_info("stmmac_pcs: Link Down\n");
78 }
79}
80
81/**
82 * dwmac_rane - To restart ANE
83 * @ioaddr: IO registers pointer
84 * @reg: Base address of the AN Control Register.
85 * @restart: to restart ANE
86 * Description: this is to just restart the Auto-Negotiation.
87 */
88static inline void dwmac_rane(void __iomem *ioaddr, u32 reg, bool restart)
89{
90 u32 value = readl(ioaddr + GMAC_AN_CTRL(reg));
91
92 if (restart)
93 value |= GMAC_AN_CTRL_RAN;
94
95 writel(value, ioaddr + GMAC_AN_CTRL(reg));
96}
97
98/**
99 * dwmac_ctrl_ane - To program the AN Control Register.
100 * @ioaddr: IO registers pointer
101 * @reg: Base address of the AN Control Register.
102 * @ane: to enable the auto-negotiation
103 * @srgmi_ral: to manage MAC-2-MAC SGMII connections.
104 * @loopback: to cause the PHY to loopback tx data into rx path.
105 * Description: this is the main function to configure the AN control register
106 * and init the ANE, select loopback (usually for debugging purpose) and
107 * configure SGMII RAL.
108 */
109static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane,
110 bool srgmi_ral, bool loopback)
111{
112 u32 value = readl(ioaddr + GMAC_AN_CTRL(reg));
113
114 /* Enable and restart the Auto-Negotiation */
115 if (ane)
116 value |= GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_RAN;
117
118 /* In case of MAC-2-MAC connection, block is configured to operate
119 * according to MAC conf register.
120 */
121 if (srgmi_ral)
122 value |= GMAC_AN_CTRL_SGMRAL;
123
124 if (loopback)
125 value |= GMAC_AN_CTRL_ELE;
126
127 writel(value, ioaddr + GMAC_AN_CTRL(reg));
128}
129
130/**
131 * dwmac_get_adv_lp - Get ADV and LP cap
132 * @ioaddr: IO registers pointer
133 * @reg: Base address of the AN Control Register.
134 * @adv_lp: structure to store the adv,lp status
135 * Description: this is to expose the ANE advertisement and Link partner ability
136 * status to ethtool support.
137 */
138static inline void dwmac_get_adv_lp(void __iomem *ioaddr, u32 reg,
139 struct rgmii_adv *adv_lp)
140{
141 u32 value = readl(ioaddr + GMAC_ANE_ADV(reg));
142
143 if (value & GMAC_ANE_FD)
144 adv_lp->duplex = DUPLEX_FULL;
145 if (value & GMAC_ANE_HD)
146 adv_lp->duplex |= DUPLEX_HALF;
147
148 adv_lp->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
149
150 value = readl(ioaddr + GMAC_ANE_LPA(reg));
151
152 if (value & GMAC_ANE_FD)
153 adv_lp->lp_duplex = DUPLEX_FULL;
154 if (value & GMAC_ANE_HD)
155 adv_lp->lp_duplex = DUPLEX_HALF;
156
157 adv_lp->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
158}
159#endif /* __STMMAC_PCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index a96714d34560..f7dfc0ae8e9c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -319,6 +319,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
319 pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set."); 319 pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
320 } 320 }
321 321
322 of_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed);
323
322 plat->axi = stmmac_axi_setup(pdev); 324 plat->axi = stmmac_axi_setup(pdev);
323 325
324 return plat; 326 return plat;
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index 158213cd6cdd..c34111b390c7 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -46,7 +46,6 @@
46#include <linux/delay.h> 46#include <linux/delay.h>
47#include <linux/dma-mapping.h> 47#include <linux/dma-mapping.h>
48#include <linux/vmalloc.h> 48#include <linux/vmalloc.h>
49#include <linux/version.h>
50 49
51#include <linux/device.h> 50#include <linux/device.h>
52#include <linux/bitrev.h> 51#include <linux/bitrev.h>
@@ -598,7 +597,6 @@ struct net_local {
598 struct work_struct txtimeout_reinit; 597 struct work_struct txtimeout_reinit;
599 598
600 phy_interface_t phy_interface; 599 phy_interface_t phy_interface;
601 struct phy_device *phy_dev;
602 struct mii_bus *mii_bus; 600 struct mii_bus *mii_bus;
603 601
604 unsigned int link; 602 unsigned int link;
@@ -816,7 +814,7 @@ static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg,
816static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 814static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
817{ 815{
818 struct net_local *lp = netdev_priv(ndev); 816 struct net_local *lp = netdev_priv(ndev);
819 struct phy_device *phydev = lp->phy_dev; 817 struct phy_device *phydev = ndev->phydev;
820 818
821 if (!netif_running(ndev)) 819 if (!netif_running(ndev))
822 return -EINVAL; 820 return -EINVAL;
@@ -850,6 +848,7 @@ static void dwceqos_link_down(struct net_local *lp)
850 848
851static void dwceqos_link_up(struct net_local *lp) 849static void dwceqos_link_up(struct net_local *lp)
852{ 850{
851 struct net_device *ndev = lp->ndev;
853 u32 regval; 852 u32 regval;
854 unsigned long flags; 853 unsigned long flags;
855 854
@@ -860,7 +859,7 @@ static void dwceqos_link_up(struct net_local *lp)
860 dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); 859 dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
861 spin_unlock_irqrestore(&lp->hw_lock, flags); 860 spin_unlock_irqrestore(&lp->hw_lock, flags);
862 861
863 lp->eee_active = !phy_init_eee(lp->phy_dev, 0); 862 lp->eee_active = !phy_init_eee(ndev->phydev, 0);
864 863
865 /* Check for changed EEE capability */ 864 /* Check for changed EEE capability */
866 if (!lp->eee_active && lp->eee_enabled) { 865 if (!lp->eee_active && lp->eee_enabled) {
@@ -876,7 +875,8 @@ static void dwceqos_link_up(struct net_local *lp)
876 875
877static void dwceqos_set_speed(struct net_local *lp) 876static void dwceqos_set_speed(struct net_local *lp)
878{ 877{
879 struct phy_device *phydev = lp->phy_dev; 878 struct net_device *ndev = lp->ndev;
879 struct phy_device *phydev = ndev->phydev;
880 u32 regval; 880 u32 regval;
881 881
882 regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); 882 regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
@@ -903,7 +903,7 @@ static void dwceqos_set_speed(struct net_local *lp)
903static void dwceqos_adjust_link(struct net_device *ndev) 903static void dwceqos_adjust_link(struct net_device *ndev)
904{ 904{
905 struct net_local *lp = netdev_priv(ndev); 905 struct net_local *lp = netdev_priv(ndev);
906 struct phy_device *phydev = lp->phy_dev; 906 struct phy_device *phydev = ndev->phydev;
907 int status_change = 0; 907 int status_change = 0;
908 908
909 if (lp->phy_defer) 909 if (lp->phy_defer)
@@ -987,7 +987,6 @@ static int dwceqos_mii_probe(struct net_device *ndev)
987 lp->link = 0; 987 lp->link = 0;
988 lp->speed = 0; 988 lp->speed = 0;
989 lp->duplex = DUPLEX_UNKNOWN; 989 lp->duplex = DUPLEX_UNKNOWN;
990 lp->phy_dev = phydev;
991 990
992 return 0; 991 return 0;
993} 992}
@@ -1531,6 +1530,7 @@ static void dwceqos_configure_bus(struct net_local *lp)
1531 1530
1532static void dwceqos_init_hw(struct net_local *lp) 1531static void dwceqos_init_hw(struct net_local *lp)
1533{ 1532{
1533 struct net_device *ndev = lp->ndev;
1534 u32 regval; 1534 u32 regval;
1535 u32 buswidth; 1535 u32 buswidth;
1536 u32 dma_skip; 1536 u32 dma_skip;
@@ -1645,10 +1645,10 @@ static void dwceqos_init_hw(struct net_local *lp)
1645 regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE); 1645 regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
1646 1646
1647 lp->phy_defer = false; 1647 lp->phy_defer = false;
1648 mutex_lock(&lp->phy_dev->lock); 1648 mutex_lock(&ndev->phydev->lock);
1649 phy_read_status(lp->phy_dev); 1649 phy_read_status(ndev->phydev);
1650 dwceqos_adjust_link(lp->ndev); 1650 dwceqos_adjust_link(lp->ndev);
1651 mutex_unlock(&lp->phy_dev->lock); 1651 mutex_unlock(&ndev->phydev->lock);
1652} 1652}
1653 1653
1654static void dwceqos_tx_reclaim(unsigned long data) 1654static void dwceqos_tx_reclaim(unsigned long data)
@@ -1898,7 +1898,7 @@ static int dwceqos_open(struct net_device *ndev)
1898 * hence the unusual init order with phy_start first. 1898 * hence the unusual init order with phy_start first.
1899 */ 1899 */
1900 lp->phy_defer = true; 1900 lp->phy_defer = true;
1901 phy_start(lp->phy_dev); 1901 phy_start(ndev->phydev);
1902 dwceqos_init_hw(lp); 1902 dwceqos_init_hw(lp);
1903 napi_enable(&lp->napi); 1903 napi_enable(&lp->napi);
1904 1904
@@ -1943,7 +1943,7 @@ static int dwceqos_stop(struct net_device *ndev)
1943 1943
1944 dwceqos_drain_dma(lp); 1944 dwceqos_drain_dma(lp);
1945 dwceqos_reset_hw(lp); 1945 dwceqos_reset_hw(lp);
1946 phy_stop(lp->phy_dev); 1946 phy_stop(ndev->phydev);
1947 1947
1948 dwceqos_descriptor_free(lp); 1948 dwceqos_descriptor_free(lp);
1949 1949
@@ -2523,30 +2523,6 @@ dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s)
2523 return s; 2523 return s;
2524} 2524}
2525 2525
2526static int
2527dwceqos_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
2528{
2529 struct net_local *lp = netdev_priv(ndev);
2530 struct phy_device *phydev = lp->phy_dev;
2531
2532 if (!phydev)
2533 return -ENODEV;
2534
2535 return phy_ethtool_gset(phydev, ecmd);
2536}
2537
2538static int
2539dwceqos_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
2540{
2541 struct net_local *lp = netdev_priv(ndev);
2542 struct phy_device *phydev = lp->phy_dev;
2543
2544 if (!phydev)
2545 return -ENODEV;
2546
2547 return phy_ethtool_sset(phydev, ecmd);
2548}
2549
2550static void 2526static void
2551dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed) 2527dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed)
2552{ 2528{
@@ -2574,17 +2550,17 @@ static int dwceqos_set_pauseparam(struct net_device *ndev,
2574 2550
2575 lp->flowcontrol.autoneg = pp->autoneg; 2551 lp->flowcontrol.autoneg = pp->autoneg;
2576 if (pp->autoneg) { 2552 if (pp->autoneg) {
2577 lp->phy_dev->advertising |= ADVERTISED_Pause; 2553 ndev->phydev->advertising |= ADVERTISED_Pause;
2578 lp->phy_dev->advertising |= ADVERTISED_Asym_Pause; 2554 ndev->phydev->advertising |= ADVERTISED_Asym_Pause;
2579 } else { 2555 } else {
2580 lp->phy_dev->advertising &= ~ADVERTISED_Pause; 2556 ndev->phydev->advertising &= ~ADVERTISED_Pause;
2581 lp->phy_dev->advertising &= ~ADVERTISED_Asym_Pause; 2557 ndev->phydev->advertising &= ~ADVERTISED_Asym_Pause;
2582 lp->flowcontrol.rx = pp->rx_pause; 2558 lp->flowcontrol.rx = pp->rx_pause;
2583 lp->flowcontrol.tx = pp->tx_pause; 2559 lp->flowcontrol.tx = pp->tx_pause;
2584 } 2560 }
2585 2561
2586 if (netif_running(ndev)) 2562 if (netif_running(ndev))
2587 ret = phy_start_aneg(lp->phy_dev); 2563 ret = phy_start_aneg(ndev->phydev);
2588 2564
2589 return ret; 2565 return ret;
2590} 2566}
@@ -2705,7 +2681,7 @@ static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
2705 dwceqos_get_tx_lpi_state(regval)); 2681 dwceqos_get_tx_lpi_state(regval));
2706 } 2682 }
2707 2683
2708 return phy_ethtool_get_eee(lp->phy_dev, edata); 2684 return phy_ethtool_get_eee(ndev->phydev, edata);
2709} 2685}
2710 2686
2711static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata) 2687static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
@@ -2747,7 +2723,7 @@ static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
2747 spin_unlock_irqrestore(&lp->hw_lock, flags); 2723 spin_unlock_irqrestore(&lp->hw_lock, flags);
2748 } 2724 }
2749 2725
2750 return phy_ethtool_set_eee(lp->phy_dev, edata); 2726 return phy_ethtool_set_eee(ndev->phydev, edata);
2751} 2727}
2752 2728
2753static u32 dwceqos_get_msglevel(struct net_device *ndev) 2729static u32 dwceqos_get_msglevel(struct net_device *ndev)
@@ -2765,8 +2741,6 @@ static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel)
2765} 2741}
2766 2742
2767static struct ethtool_ops dwceqos_ethtool_ops = { 2743static struct ethtool_ops dwceqos_ethtool_ops = {
2768 .get_settings = dwceqos_get_settings,
2769 .set_settings = dwceqos_set_settings,
2770 .get_drvinfo = dwceqos_get_drvinfo, 2744 .get_drvinfo = dwceqos_get_drvinfo,
2771 .get_link = ethtool_op_get_link, 2745 .get_link = ethtool_op_get_link,
2772 .get_pauseparam = dwceqos_get_pauseparam, 2746 .get_pauseparam = dwceqos_get_pauseparam,
@@ -2780,6 +2754,8 @@ static struct ethtool_ops dwceqos_ethtool_ops = {
2780 .set_eee = dwceqos_set_eee, 2754 .set_eee = dwceqos_set_eee,
2781 .get_msglevel = dwceqos_get_msglevel, 2755 .get_msglevel = dwceqos_get_msglevel,
2782 .set_msglevel = dwceqos_set_msglevel, 2756 .set_msglevel = dwceqos_set_msglevel,
2757 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2758 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2783}; 2759};
2784 2760
2785static struct net_device_ops netdev_ops = { 2761static struct net_device_ops netdev_ops = {
@@ -2981,8 +2957,8 @@ static int dwceqos_remove(struct platform_device *pdev)
2981 if (ndev) { 2957 if (ndev) {
2982 lp = netdev_priv(ndev); 2958 lp = netdev_priv(ndev);
2983 2959
2984 if (lp->phy_dev) 2960 if (ndev->phydev)
2985 phy_disconnect(lp->phy_dev); 2961 phy_disconnect(ndev->phydev);
2986 mdiobus_unregister(lp->mii_bus); 2962 mdiobus_unregister(lp->mii_bus);
2987 mdiobus_free(lp->mii_bus); 2963 mdiobus_free(lp->mii_bus);
2988 2964
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 9c924f15cd03..1a93a1f28433 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1243,6 +1243,7 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
1243 slave->phy = NULL; 1243 slave->phy = NULL;
1244 cpsw_ale_control_set(priv->ale, slave_port, 1244 cpsw_ale_control_set(priv->ale, slave_port,
1245 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 1245 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1246 soft_reset_slave(slave);
1246} 1247}
1247 1248
1248static int cpsw_ndo_open(struct net_device *ndev) 1249static int cpsw_ndo_open(struct net_device *ndev)
@@ -1251,7 +1252,11 @@ static int cpsw_ndo_open(struct net_device *ndev)
1251 int i, ret; 1252 int i, ret;
1252 u32 reg; 1253 u32 reg;
1253 1254
1254 pm_runtime_get_sync(&priv->pdev->dev); 1255 ret = pm_runtime_get_sync(&priv->pdev->dev);
1256 if (ret < 0) {
1257 pm_runtime_put_noidle(&priv->pdev->dev);
1258 return ret;
1259 }
1255 1260
1256 if (!cpsw_common_res_usage_state(priv)) 1261 if (!cpsw_common_res_usage_state(priv))
1257 cpsw_intr_disable(priv); 1262 cpsw_intr_disable(priv);
@@ -1609,10 +1614,17 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
1609 struct sockaddr *addr = (struct sockaddr *)p; 1614 struct sockaddr *addr = (struct sockaddr *)p;
1610 int flags = 0; 1615 int flags = 0;
1611 u16 vid = 0; 1616 u16 vid = 0;
1617 int ret;
1612 1618
1613 if (!is_valid_ether_addr(addr->sa_data)) 1619 if (!is_valid_ether_addr(addr->sa_data))
1614 return -EADDRNOTAVAIL; 1620 return -EADDRNOTAVAIL;
1615 1621
1622 ret = pm_runtime_get_sync(&priv->pdev->dev);
1623 if (ret < 0) {
1624 pm_runtime_put_noidle(&priv->pdev->dev);
1625 return ret;
1626 }
1627
1616 if (priv->data.dual_emac) { 1628 if (priv->data.dual_emac) {
1617 vid = priv->slaves[priv->emac_port].port_vlan; 1629 vid = priv->slaves[priv->emac_port].port_vlan;
1618 flags = ALE_VLAN; 1630 flags = ALE_VLAN;
@@ -1627,6 +1639,8 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
1627 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); 1639 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
1628 for_each_slave(priv, cpsw_set_slave_mac, priv); 1640 for_each_slave(priv, cpsw_set_slave_mac, priv);
1629 1641
1642 pm_runtime_put(&priv->pdev->dev);
1643
1630 return 0; 1644 return 0;
1631} 1645}
1632 1646
@@ -1691,10 +1705,17 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
1691 __be16 proto, u16 vid) 1705 __be16 proto, u16 vid)
1692{ 1706{
1693 struct cpsw_priv *priv = netdev_priv(ndev); 1707 struct cpsw_priv *priv = netdev_priv(ndev);
1708 int ret;
1694 1709
1695 if (vid == priv->data.default_vlan) 1710 if (vid == priv->data.default_vlan)
1696 return 0; 1711 return 0;
1697 1712
1713 ret = pm_runtime_get_sync(&priv->pdev->dev);
1714 if (ret < 0) {
1715 pm_runtime_put_noidle(&priv->pdev->dev);
1716 return ret;
1717 }
1718
1698 if (priv->data.dual_emac) { 1719 if (priv->data.dual_emac) {
1699 /* In dual EMAC, reserved VLAN id should not be used for 1720 /* In dual EMAC, reserved VLAN id should not be used for
1700 * creating VLAN interfaces as this can break the dual 1721 * creating VLAN interfaces as this can break the dual
@@ -1709,7 +1730,10 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
1709 } 1730 }
1710 1731
1711 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); 1732 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
1712 return cpsw_add_vlan_ale_entry(priv, vid); 1733 ret = cpsw_add_vlan_ale_entry(priv, vid);
1734
1735 pm_runtime_put(&priv->pdev->dev);
1736 return ret;
1713} 1737}
1714 1738
1715static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, 1739static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
@@ -1721,6 +1745,12 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
1721 if (vid == priv->data.default_vlan) 1745 if (vid == priv->data.default_vlan)
1722 return 0; 1746 return 0;
1723 1747
1748 ret = pm_runtime_get_sync(&priv->pdev->dev);
1749 if (ret < 0) {
1750 pm_runtime_put_noidle(&priv->pdev->dev);
1751 return ret;
1752 }
1753
1724 if (priv->data.dual_emac) { 1754 if (priv->data.dual_emac) {
1725 int i; 1755 int i;
1726 1756
@@ -1740,8 +1770,10 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
1740 if (ret != 0) 1770 if (ret != 0)
1741 return ret; 1771 return ret;
1742 1772
1743 return cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast, 1773 ret = cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast,
1744 0, ALE_VLAN, vid); 1774 0, ALE_VLAN, vid);
1775 pm_runtime_put(&priv->pdev->dev);
1776 return ret;
1745} 1777}
1746 1778
1747static const struct net_device_ops cpsw_netdev_ops = { 1779static const struct net_device_ops cpsw_netdev_ops = {
@@ -1900,10 +1932,33 @@ static int cpsw_set_pauseparam(struct net_device *ndev,
1900 priv->tx_pause = pause->tx_pause ? true : false; 1932 priv->tx_pause = pause->tx_pause ? true : false;
1901 1933
1902 for_each_slave(priv, _cpsw_adjust_link, priv, &link); 1934 for_each_slave(priv, _cpsw_adjust_link, priv, &link);
1903
1904 return 0; 1935 return 0;
1905} 1936}
1906 1937
1938static int cpsw_ethtool_op_begin(struct net_device *ndev)
1939{
1940 struct cpsw_priv *priv = netdev_priv(ndev);
1941 int ret;
1942
1943 ret = pm_runtime_get_sync(&priv->pdev->dev);
1944 if (ret < 0) {
1945 cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
1946 pm_runtime_put_noidle(&priv->pdev->dev);
1947 }
1948
1949 return ret;
1950}
1951
1952static void cpsw_ethtool_op_complete(struct net_device *ndev)
1953{
1954 struct cpsw_priv *priv = netdev_priv(ndev);
1955 int ret;
1956
1957 ret = pm_runtime_put(&priv->pdev->dev);
1958 if (ret < 0)
1959 cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
1960}
1961
1907static const struct ethtool_ops cpsw_ethtool_ops = { 1962static const struct ethtool_ops cpsw_ethtool_ops = {
1908 .get_drvinfo = cpsw_get_drvinfo, 1963 .get_drvinfo = cpsw_get_drvinfo,
1909 .get_msglevel = cpsw_get_msglevel, 1964 .get_msglevel = cpsw_get_msglevel,
@@ -1923,6 +1978,8 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
1923 .set_wol = cpsw_set_wol, 1978 .set_wol = cpsw_set_wol,
1924 .get_regs_len = cpsw_get_regs_len, 1979 .get_regs_len = cpsw_get_regs_len,
1925 .get_regs = cpsw_get_regs, 1980 .get_regs = cpsw_get_regs,
1981 .begin = cpsw_ethtool_op_begin,
1982 .complete = cpsw_ethtool_op_complete,
1926}; 1983};
1927 1984
1928static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, 1985static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
@@ -2311,7 +2368,11 @@ static int cpsw_probe(struct platform_device *pdev)
2311 /* Need to enable clocks with runtime PM api to access module 2368 /* Need to enable clocks with runtime PM api to access module
2312 * registers 2369 * registers
2313 */ 2370 */
2314 pm_runtime_get_sync(&pdev->dev); 2371 ret = pm_runtime_get_sync(&pdev->dev);
2372 if (ret < 0) {
2373 pm_runtime_put_noidle(&pdev->dev);
2374 goto clean_runtime_disable_ret;
2375 }
2315 priv->version = readl(&priv->regs->id_ver); 2376 priv->version = readl(&priv->regs->id_ver);
2316 pm_runtime_put_sync(&pdev->dev); 2377 pm_runtime_put_sync(&pdev->dev);
2317 2378
@@ -2495,8 +2556,6 @@ static int cpsw_probe(struct platform_device *pdev)
2495clean_ale_ret: 2556clean_ale_ret:
2496 cpsw_ale_destroy(priv->ale); 2557 cpsw_ale_destroy(priv->ale);
2497clean_dma_ret: 2558clean_dma_ret:
2498 cpdma_chan_destroy(priv->txch);
2499 cpdma_chan_destroy(priv->rxch);
2500 cpdma_ctlr_destroy(priv->dma); 2559 cpdma_ctlr_destroy(priv->dma);
2501clean_runtime_disable_ret: 2560clean_runtime_disable_ret:
2502 pm_runtime_disable(&pdev->dev); 2561 pm_runtime_disable(&pdev->dev);
@@ -2524,8 +2583,6 @@ static int cpsw_remove(struct platform_device *pdev)
2524 unregister_netdev(ndev); 2583 unregister_netdev(ndev);
2525 2584
2526 cpsw_ale_destroy(priv->ale); 2585 cpsw_ale_destroy(priv->ale);
2527 cpdma_chan_destroy(priv->txch);
2528 cpdma_chan_destroy(priv->rxch);
2529 cpdma_ctlr_destroy(priv->dma); 2586 cpdma_ctlr_destroy(priv->dma);
2530 pm_runtime_disable(&pdev->dev); 2587 pm_runtime_disable(&pdev->dev);
2531 device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device); 2588 device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device);
@@ -2548,16 +2605,12 @@ static int cpsw_suspend(struct device *dev)
2548 for (i = 0; i < priv->data.slaves; i++) { 2605 for (i = 0; i < priv->data.slaves; i++) {
2549 if (netif_running(priv->slaves[i].ndev)) 2606 if (netif_running(priv->slaves[i].ndev))
2550 cpsw_ndo_stop(priv->slaves[i].ndev); 2607 cpsw_ndo_stop(priv->slaves[i].ndev);
2551 soft_reset_slave(priv->slaves + i);
2552 } 2608 }
2553 } else { 2609 } else {
2554 if (netif_running(ndev)) 2610 if (netif_running(ndev))
2555 cpsw_ndo_stop(ndev); 2611 cpsw_ndo_stop(ndev);
2556 for_each_slave(priv, soft_reset_slave);
2557 } 2612 }
2558 2613
2559 pm_runtime_put_sync(&pdev->dev);
2560
2561 /* Select sleep pin state */ 2614 /* Select sleep pin state */
2562 pinctrl_pm_select_sleep_state(&pdev->dev); 2615 pinctrl_pm_select_sleep_state(&pdev->dev);
2563 2616
@@ -2570,8 +2623,6 @@ static int cpsw_resume(struct device *dev)
2570 struct net_device *ndev = platform_get_drvdata(pdev); 2623 struct net_device *ndev = platform_get_drvdata(pdev);
2571 struct cpsw_priv *priv = netdev_priv(ndev); 2624 struct cpsw_priv *priv = netdev_priv(ndev);
2572 2625
2573 pm_runtime_get_sync(&pdev->dev);
2574
2575 /* Select default pin state */ 2626 /* Select default pin state */
2576 pinctrl_pm_select_default_state(&pdev->dev); 2627 pinctrl_pm_select_default_state(&pdev->dev);
2577 2628
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index bcd9e455457e..1c653ca7c316 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -21,7 +21,7 @@
21#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/delay.h> 23#include <linux/delay.h>
24 24#include <linux/genalloc.h>
25#include "davinci_cpdma.h" 25#include "davinci_cpdma.h"
26 26
27/* DMA Registers */ 27/* DMA Registers */
@@ -87,9 +87,8 @@ struct cpdma_desc_pool {
87 void *cpumap; /* dma_alloc map */ 87 void *cpumap; /* dma_alloc map */
88 int desc_size, mem_size; 88 int desc_size, mem_size;
89 int num_desc, used_desc; 89 int num_desc, used_desc;
90 unsigned long *bitmap;
91 struct device *dev; 90 struct device *dev;
92 spinlock_t lock; 91 struct gen_pool *gen_pool;
93}; 92};
94 93
95enum cpdma_state { 94enum cpdma_state {
@@ -117,6 +116,7 @@ struct cpdma_chan {
117 int chan_num; 116 int chan_num;
118 spinlock_t lock; 117 spinlock_t lock;
119 int count; 118 int count;
119 u32 desc_num;
120 u32 mask; 120 u32 mask;
121 cpdma_handler_fn handler; 121 cpdma_handler_fn handler;
122 enum dma_data_direction dir; 122 enum dma_data_direction dir;
@@ -145,6 +145,19 @@ struct cpdma_chan {
145 (directed << CPDMA_TO_PORT_SHIFT)); \ 145 (directed << CPDMA_TO_PORT_SHIFT)); \
146 } while (0) 146 } while (0)
147 147
148static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
149{
150 if (!pool)
151 return;
152
153 WARN_ON(pool->used_desc);
154 if (pool->cpumap)
155 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
156 pool->phys);
157 else
158 iounmap(pool->iomap);
159}
160
148/* 161/*
149 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci 162 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
150 * emac) have dedicated on-chip memory for these descriptors. Some other 163 * emac) have dedicated on-chip memory for these descriptors. Some other
@@ -155,24 +168,25 @@ static struct cpdma_desc_pool *
155cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr, 168cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
156 int size, int align) 169 int size, int align)
157{ 170{
158 int bitmap_size;
159 struct cpdma_desc_pool *pool; 171 struct cpdma_desc_pool *pool;
172 int ret;
160 173
161 pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL); 174 pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
162 if (!pool) 175 if (!pool)
163 goto fail; 176 goto gen_pool_create_fail;
164
165 spin_lock_init(&pool->lock);
166 177
167 pool->dev = dev; 178 pool->dev = dev;
168 pool->mem_size = size; 179 pool->mem_size = size;
169 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align); 180 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
170 pool->num_desc = size / pool->desc_size; 181 pool->num_desc = size / pool->desc_size;
171 182
172 bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long); 183 pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1,
173 pool->bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL); 184 "cpdma");
174 if (!pool->bitmap) 185 if (IS_ERR(pool->gen_pool)) {
175 goto fail; 186 dev_err(dev, "pool create failed %ld\n",
187 PTR_ERR(pool->gen_pool));
188 goto gen_pool_create_fail;
189 }
176 190
177 if (phys) { 191 if (phys) {
178 pool->phys = phys; 192 pool->phys = phys;
@@ -185,24 +199,22 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
185 pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */ 199 pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
186 } 200 }
187 201
188 if (pool->iomap) 202 if (!pool->iomap)
189 return pool; 203 goto gen_pool_create_fail;
190fail:
191 return NULL;
192}
193 204
194static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) 205 ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
195{ 206 pool->phys, pool->mem_size, -1);
196 if (!pool) 207 if (ret < 0) {
197 return; 208 dev_err(dev, "pool add failed %d\n", ret);
198 209 goto gen_pool_add_virt_fail;
199 WARN_ON(pool->used_desc);
200 if (pool->cpumap) {
201 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
202 pool->phys);
203 } else {
204 iounmap(pool->iomap);
205 } 210 }
211
212 return pool;
213
214gen_pool_add_virt_fail:
215 cpdma_desc_pool_destroy(pool);
216gen_pool_create_fail:
217 return NULL;
206} 218}
207 219
208static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, 220static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
@@ -220,47 +232,23 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
220} 232}
221 233
222static struct cpdma_desc __iomem * 234static struct cpdma_desc __iomem *
223cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx) 235cpdma_desc_alloc(struct cpdma_desc_pool *pool)
224{ 236{
225 unsigned long flags;
226 int index;
227 int desc_start;
228 int desc_end;
229 struct cpdma_desc __iomem *desc = NULL; 237 struct cpdma_desc __iomem *desc = NULL;
230 238
231 spin_lock_irqsave(&pool->lock, flags); 239 desc = (struct cpdma_desc __iomem *)gen_pool_alloc(pool->gen_pool,
232 240 pool->desc_size);
233 if (is_rx) { 241 if (desc)
234 desc_start = 0;
235 desc_end = pool->num_desc/2;
236 } else {
237 desc_start = pool->num_desc/2;
238 desc_end = pool->num_desc;
239 }
240
241 index = bitmap_find_next_zero_area(pool->bitmap,
242 desc_end, desc_start, num_desc, 0);
243 if (index < desc_end) {
244 bitmap_set(pool->bitmap, index, num_desc);
245 desc = pool->iomap + pool->desc_size * index;
246 pool->used_desc++; 242 pool->used_desc++;
247 }
248 243
249 spin_unlock_irqrestore(&pool->lock, flags);
250 return desc; 244 return desc;
251} 245}
252 246
253static void cpdma_desc_free(struct cpdma_desc_pool *pool, 247static void cpdma_desc_free(struct cpdma_desc_pool *pool,
254 struct cpdma_desc __iomem *desc, int num_desc) 248 struct cpdma_desc __iomem *desc, int num_desc)
255{ 249{
256 unsigned long flags, index; 250 gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
257
258 index = ((unsigned long)desc - (unsigned long)pool->iomap) /
259 pool->desc_size;
260 spin_lock_irqsave(&pool->lock, flags);
261 bitmap_clear(pool->bitmap, index, num_desc);
262 pool->used_desc--; 251 pool->used_desc--;
263 spin_unlock_irqrestore(&pool->lock, flags);
264} 252}
265 253
266struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) 254struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
@@ -516,6 +504,7 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
516 chan->state = CPDMA_STATE_IDLE; 504 chan->state = CPDMA_STATE_IDLE;
517 chan->chan_num = chan_num; 505 chan->chan_num = chan_num;
518 chan->handler = handler; 506 chan->handler = handler;
507 chan->desc_num = ctlr->pool->num_desc / 2;
519 508
520 if (is_rx_chan(chan)) { 509 if (is_rx_chan(chan)) {
521 chan->hdp = ctlr->params.rxhdp + offset; 510 chan->hdp = ctlr->params.rxhdp + offset;
@@ -681,7 +670,13 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
681 goto unlock_ret; 670 goto unlock_ret;
682 } 671 }
683 672
684 desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan)); 673 if (chan->count >= chan->desc_num) {
674 chan->stats.desc_alloc_fail++;
675 ret = -ENOMEM;
676 goto unlock_ret;
677 }
678
679 desc = cpdma_desc_alloc(ctlr->pool);
685 if (!desc) { 680 if (!desc) {
686 chan->stats.desc_alloc_fail++; 681 chan->stats.desc_alloc_fail++;
687 ret = -ENOMEM; 682 ret = -ENOMEM;
@@ -727,24 +722,16 @@ EXPORT_SYMBOL_GPL(cpdma_chan_submit);
727 722
728bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) 723bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
729{ 724{
730 unsigned long flags;
731 int index;
732 bool ret;
733 struct cpdma_ctlr *ctlr = chan->ctlr; 725 struct cpdma_ctlr *ctlr = chan->ctlr;
734 struct cpdma_desc_pool *pool = ctlr->pool; 726 struct cpdma_desc_pool *pool = ctlr->pool;
727 bool free_tx_desc;
728 unsigned long flags;
735 729
736 spin_lock_irqsave(&pool->lock, flags); 730 spin_lock_irqsave(&chan->lock, flags);
737 731 free_tx_desc = (chan->count < chan->desc_num) &&
738 index = bitmap_find_next_zero_area(pool->bitmap, 732 gen_pool_avail(pool->gen_pool);
739 pool->num_desc, pool->num_desc/2, 1, 0); 733 spin_unlock_irqrestore(&chan->lock, flags);
740 734 return free_tx_desc;
741 if (index < pool->num_desc)
742 ret = true;
743 else
744 ret = false;
745
746 spin_unlock_irqrestore(&pool->lock, flags);
747 return ret;
748} 735}
749EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); 736EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
750 737
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index f56d66e6ec15..c6c54659f8d4 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -348,7 +348,6 @@ struct emac_priv {
348 u32 rx_addr_type; 348 u32 rx_addr_type;
349 const char *phy_id; 349 const char *phy_id;
350 struct device_node *phy_node; 350 struct device_node *phy_node;
351 struct phy_device *phydev;
352 spinlock_t lock; 351 spinlock_t lock;
353 /*platform specific members*/ 352 /*platform specific members*/
354 void (*int_enable) (void); 353 void (*int_enable) (void);
@@ -486,43 +485,6 @@ static void emac_get_drvinfo(struct net_device *ndev,
486} 485}
487 486
488/** 487/**
489 * emac_get_settings - Get EMAC settings
490 * @ndev: The DaVinci EMAC network adapter
491 * @ecmd: ethtool command
492 *
493 * Executes ethool get command
494 *
495 */
496static int emac_get_settings(struct net_device *ndev,
497 struct ethtool_cmd *ecmd)
498{
499 struct emac_priv *priv = netdev_priv(ndev);
500 if (priv->phydev)
501 return phy_ethtool_gset(priv->phydev, ecmd);
502 else
503 return -EOPNOTSUPP;
504
505}
506
507/**
508 * emac_set_settings - Set EMAC settings
509 * @ndev: The DaVinci EMAC network adapter
510 * @ecmd: ethtool command
511 *
512 * Executes ethool set command
513 *
514 */
515static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
516{
517 struct emac_priv *priv = netdev_priv(ndev);
518 if (priv->phydev)
519 return phy_ethtool_sset(priv->phydev, ecmd);
520 else
521 return -EOPNOTSUPP;
522
523}
524
525/**
526 * emac_get_coalesce - Get interrupt coalesce settings for this device 488 * emac_get_coalesce - Get interrupt coalesce settings for this device
527 * @ndev : The DaVinci EMAC network adapter 489 * @ndev : The DaVinci EMAC network adapter
528 * @coal : ethtool coalesce settings structure 490 * @coal : ethtool coalesce settings structure
@@ -625,12 +587,12 @@ static int emac_set_coalesce(struct net_device *ndev,
625 */ 587 */
626static const struct ethtool_ops ethtool_ops = { 588static const struct ethtool_ops ethtool_ops = {
627 .get_drvinfo = emac_get_drvinfo, 589 .get_drvinfo = emac_get_drvinfo,
628 .get_settings = emac_get_settings,
629 .set_settings = emac_set_settings,
630 .get_link = ethtool_op_get_link, 590 .get_link = ethtool_op_get_link,
631 .get_coalesce = emac_get_coalesce, 591 .get_coalesce = emac_get_coalesce,
632 .set_coalesce = emac_set_coalesce, 592 .set_coalesce = emac_set_coalesce,
633 .get_ts_info = ethtool_op_get_ts_info, 593 .get_ts_info = ethtool_op_get_ts_info,
594 .get_link_ksettings = phy_ethtool_get_link_ksettings,
595 .set_link_ksettings = phy_ethtool_set_link_ksettings,
634}; 596};
635 597
636/** 598/**
@@ -651,8 +613,8 @@ static void emac_update_phystatus(struct emac_priv *priv)
651 mac_control = emac_read(EMAC_MACCONTROL); 613 mac_control = emac_read(EMAC_MACCONTROL);
652 cur_duplex = (mac_control & EMAC_MACCONTROL_FULLDUPLEXEN) ? 614 cur_duplex = (mac_control & EMAC_MACCONTROL_FULLDUPLEXEN) ?
653 DUPLEX_FULL : DUPLEX_HALF; 615 DUPLEX_FULL : DUPLEX_HALF;
654 if (priv->phydev) 616 if (ndev->phydev)
655 new_duplex = priv->phydev->duplex; 617 new_duplex = ndev->phydev->duplex;
656 else 618 else
657 new_duplex = DUPLEX_FULL; 619 new_duplex = DUPLEX_FULL;
658 620
@@ -1454,7 +1416,7 @@ static void emac_poll_controller(struct net_device *ndev)
1454static void emac_adjust_link(struct net_device *ndev) 1416static void emac_adjust_link(struct net_device *ndev)
1455{ 1417{
1456 struct emac_priv *priv = netdev_priv(ndev); 1418 struct emac_priv *priv = netdev_priv(ndev);
1457 struct phy_device *phydev = priv->phydev; 1419 struct phy_device *phydev = ndev->phydev;
1458 unsigned long flags; 1420 unsigned long flags;
1459 int new_state = 0; 1421 int new_state = 0;
1460 1422
@@ -1483,7 +1445,7 @@ static void emac_adjust_link(struct net_device *ndev)
1483 } 1445 }
1484 if (new_state) { 1446 if (new_state) {
1485 emac_update_phystatus(priv); 1447 emac_update_phystatus(priv);
1486 phy_print_status(priv->phydev); 1448 phy_print_status(ndev->phydev);
1487 } 1449 }
1488 1450
1489 spin_unlock_irqrestore(&priv->lock, flags); 1451 spin_unlock_irqrestore(&priv->lock, flags);
@@ -1505,15 +1467,13 @@ static void emac_adjust_link(struct net_device *ndev)
1505 */ 1467 */
1506static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd) 1468static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
1507{ 1469{
1508 struct emac_priv *priv = netdev_priv(ndev);
1509
1510 if (!(netif_running(ndev))) 1470 if (!(netif_running(ndev)))
1511 return -EINVAL; 1471 return -EINVAL;
1512 1472
1513 /* TODO: Add phy read and write and private statistics get feature */ 1473 /* TODO: Add phy read and write and private statistics get feature */
1514 1474
1515 if (priv->phydev) 1475 if (ndev->phydev)
1516 return phy_mii_ioctl(priv->phydev, ifrq, cmd); 1476 return phy_mii_ioctl(ndev->phydev, ifrq, cmd);
1517 else 1477 else
1518 return -EOPNOTSUPP; 1478 return -EOPNOTSUPP;
1519} 1479}
@@ -1542,6 +1502,7 @@ static int emac_dev_open(struct net_device *ndev)
1542 int res_num = 0, irq_num = 0; 1502 int res_num = 0, irq_num = 0;
1543 int i = 0; 1503 int i = 0;
1544 struct emac_priv *priv = netdev_priv(ndev); 1504 struct emac_priv *priv = netdev_priv(ndev);
1505 struct phy_device *phydev = NULL;
1545 1506
1546 ret = pm_runtime_get_sync(&priv->pdev->dev); 1507 ret = pm_runtime_get_sync(&priv->pdev->dev);
1547 if (ret < 0) { 1508 if (ret < 0) {
@@ -1607,12 +1568,10 @@ static int emac_dev_open(struct net_device *ndev)
1607 1568
1608 cpdma_ctlr_start(priv->dma); 1569 cpdma_ctlr_start(priv->dma);
1609 1570
1610 priv->phydev = NULL;
1611
1612 if (priv->phy_node) { 1571 if (priv->phy_node) {
1613 priv->phydev = of_phy_connect(ndev, priv->phy_node, 1572 phydev = of_phy_connect(ndev, priv->phy_node,
1614 &emac_adjust_link, 0, 0); 1573 &emac_adjust_link, 0, 0);
1615 if (!priv->phydev) { 1574 if (!phydev) {
1616 dev_err(emac_dev, "could not connect to phy %s\n", 1575 dev_err(emac_dev, "could not connect to phy %s\n",
1617 priv->phy_node->full_name); 1576 priv->phy_node->full_name);
1618 ret = -ENODEV; 1577 ret = -ENODEV;
@@ -1621,7 +1580,7 @@ static int emac_dev_open(struct net_device *ndev)
1621 } 1580 }
1622 1581
1623 /* use the first phy on the bus if pdata did not give us a phy id */ 1582 /* use the first phy on the bus if pdata did not give us a phy id */
1624 if (!priv->phydev && !priv->phy_id) { 1583 if (!phydev && !priv->phy_id) {
1625 struct device *phy; 1584 struct device *phy;
1626 1585
1627 phy = bus_find_device(&mdio_bus_type, NULL, NULL, 1586 phy = bus_find_device(&mdio_bus_type, NULL, NULL,
@@ -1630,16 +1589,15 @@ static int emac_dev_open(struct net_device *ndev)
1630 priv->phy_id = dev_name(phy); 1589 priv->phy_id = dev_name(phy);
1631 } 1590 }
1632 1591
1633 if (!priv->phydev && priv->phy_id && *priv->phy_id) { 1592 if (!phydev && priv->phy_id && *priv->phy_id) {
1634 priv->phydev = phy_connect(ndev, priv->phy_id, 1593 phydev = phy_connect(ndev, priv->phy_id,
1635 &emac_adjust_link, 1594 &emac_adjust_link,
1636 PHY_INTERFACE_MODE_MII); 1595 PHY_INTERFACE_MODE_MII);
1637 1596
1638 if (IS_ERR(priv->phydev)) { 1597 if (IS_ERR(phydev)) {
1639 dev_err(emac_dev, "could not connect to phy %s\n", 1598 dev_err(emac_dev, "could not connect to phy %s\n",
1640 priv->phy_id); 1599 priv->phy_id);
1641 ret = PTR_ERR(priv->phydev); 1600 ret = PTR_ERR(phydev);
1642 priv->phydev = NULL;
1643 goto err; 1601 goto err;
1644 } 1602 }
1645 1603
@@ -1647,10 +1605,10 @@ static int emac_dev_open(struct net_device *ndev)
1647 priv->speed = 0; 1605 priv->speed = 0;
1648 priv->duplex = ~0; 1606 priv->duplex = ~0;
1649 1607
1650 phy_attached_info(priv->phydev); 1608 phy_attached_info(phydev);
1651 } 1609 }
1652 1610
1653 if (!priv->phydev) { 1611 if (!phydev) {
1654 /* No PHY , fix the link, speed and duplex settings */ 1612 /* No PHY , fix the link, speed and duplex settings */
1655 dev_notice(emac_dev, "no phy, defaulting to 100/full\n"); 1613 dev_notice(emac_dev, "no phy, defaulting to 100/full\n");
1656 priv->link = 1; 1614 priv->link = 1;
@@ -1665,8 +1623,8 @@ static int emac_dev_open(struct net_device *ndev)
1665 if (netif_msg_drv(priv)) 1623 if (netif_msg_drv(priv))
1666 dev_notice(emac_dev, "DaVinci EMAC: Opened %s\n", ndev->name); 1624 dev_notice(emac_dev, "DaVinci EMAC: Opened %s\n", ndev->name);
1667 1625
1668 if (priv->phydev) 1626 if (phydev)
1669 phy_start(priv->phydev); 1627 phy_start(phydev);
1670 1628
1671 return 0; 1629 return 0;
1672 1630
@@ -1717,8 +1675,8 @@ static int emac_dev_stop(struct net_device *ndev)
1717 cpdma_ctlr_stop(priv->dma); 1675 cpdma_ctlr_stop(priv->dma);
1718 emac_write(EMAC_SOFTRESET, 1); 1676 emac_write(EMAC_SOFTRESET, 1);
1719 1677
1720 if (priv->phydev) 1678 if (ndev->phydev)
1721 phy_disconnect(priv->phydev); 1679 phy_disconnect(ndev->phydev);
1722 1680
1723 /* Free IRQ */ 1681 /* Free IRQ */
1724 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) { 1682 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 4e7c9b9b042a..33df340db1f1 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -53,6 +53,10 @@
53 53
54#define DEF_OUT_FREQ 2200000 /* 2.2 MHz */ 54#define DEF_OUT_FREQ 2200000 /* 2.2 MHz */
55 55
56struct davinci_mdio_of_param {
57 int autosuspend_delay_ms;
58};
59
56struct davinci_mdio_regs { 60struct davinci_mdio_regs {
57 u32 version; 61 u32 version;
58 u32 control; 62 u32 control;
@@ -90,19 +94,19 @@ static const struct mdio_platform_data default_pdata = {
90struct davinci_mdio_data { 94struct davinci_mdio_data {
91 struct mdio_platform_data pdata; 95 struct mdio_platform_data pdata;
92 struct davinci_mdio_regs __iomem *regs; 96 struct davinci_mdio_regs __iomem *regs;
93 spinlock_t lock;
94 struct clk *clk; 97 struct clk *clk;
95 struct device *dev; 98 struct device *dev;
96 struct mii_bus *bus; 99 struct mii_bus *bus;
97 bool suspended; 100 bool active_in_suspend;
98 unsigned long access_time; /* jiffies */ 101 unsigned long access_time; /* jiffies */
99 /* Indicates that driver shouldn't modify phy_mask in case 102 /* Indicates that driver shouldn't modify phy_mask in case
100 * if MDIO bus is registered from DT. 103 * if MDIO bus is registered from DT.
101 */ 104 */
102 bool skip_scan; 105 bool skip_scan;
106 u32 clk_div;
103}; 107};
104 108
105static void __davinci_mdio_reset(struct davinci_mdio_data *data) 109static void davinci_mdio_init_clk(struct davinci_mdio_data *data)
106{ 110{
107 u32 mdio_in, div, mdio_out_khz, access_time; 111 u32 mdio_in, div, mdio_out_khz, access_time;
108 112
@@ -111,9 +115,7 @@ static void __davinci_mdio_reset(struct davinci_mdio_data *data)
111 if (div > CONTROL_MAX_DIV) 115 if (div > CONTROL_MAX_DIV)
112 div = CONTROL_MAX_DIV; 116 div = CONTROL_MAX_DIV;
113 117
114 /* set enable and clock divider */ 118 data->clk_div = div;
115 __raw_writel(div | CONTROL_ENABLE, &data->regs->control);
116
117 /* 119 /*
118 * One mdio transaction consists of: 120 * One mdio transaction consists of:
119 * 32 bits of preamble 121 * 32 bits of preamble
@@ -134,12 +136,23 @@ static void __davinci_mdio_reset(struct davinci_mdio_data *data)
134 data->access_time = 1; 136 data->access_time = 1;
135} 137}
136 138
139static void davinci_mdio_enable(struct davinci_mdio_data *data)
140{
141 /* set enable and clock divider */
142 __raw_writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
143}
144
137static int davinci_mdio_reset(struct mii_bus *bus) 145static int davinci_mdio_reset(struct mii_bus *bus)
138{ 146{
139 struct davinci_mdio_data *data = bus->priv; 147 struct davinci_mdio_data *data = bus->priv;
140 u32 phy_mask, ver; 148 u32 phy_mask, ver;
149 int ret;
141 150
142 __davinci_mdio_reset(data); 151 ret = pm_runtime_get_sync(data->dev);
152 if (ret < 0) {
153 pm_runtime_put_noidle(data->dev);
154 return ret;
155 }
143 156
144 /* wait for scan logic to settle */ 157 /* wait for scan logic to settle */
145 msleep(PHY_MAX_ADDR * data->access_time); 158 msleep(PHY_MAX_ADDR * data->access_time);
@@ -150,7 +163,7 @@ static int davinci_mdio_reset(struct mii_bus *bus)
150 (ver >> 8) & 0xff, ver & 0xff); 163 (ver >> 8) & 0xff, ver & 0xff);
151 164
152 if (data->skip_scan) 165 if (data->skip_scan)
153 return 0; 166 goto done;
154 167
155 /* get phy mask from the alive register */ 168 /* get phy mask from the alive register */
156 phy_mask = __raw_readl(&data->regs->alive); 169 phy_mask = __raw_readl(&data->regs->alive);
@@ -165,6 +178,10 @@ static int davinci_mdio_reset(struct mii_bus *bus)
165 } 178 }
166 data->bus->phy_mask = phy_mask; 179 data->bus->phy_mask = phy_mask;
167 180
181done:
182 pm_runtime_mark_last_busy(data->dev);
183 pm_runtime_put_autosuspend(data->dev);
184
168 return 0; 185 return 0;
169} 186}
170 187
@@ -190,7 +207,7 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data)
190 * operation 207 * operation
191 */ 208 */
192 dev_warn(data->dev, "resetting idled controller\n"); 209 dev_warn(data->dev, "resetting idled controller\n");
193 __davinci_mdio_reset(data); 210 davinci_mdio_enable(data);
194 return -EAGAIN; 211 return -EAGAIN;
195 } 212 }
196 213
@@ -225,11 +242,10 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
225 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) 242 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
226 return -EINVAL; 243 return -EINVAL;
227 244
228 spin_lock(&data->lock); 245 ret = pm_runtime_get_sync(data->dev);
229 246 if (ret < 0) {
230 if (data->suspended) { 247 pm_runtime_put_noidle(data->dev);
231 spin_unlock(&data->lock); 248 return ret;
232 return -ENODEV;
233 } 249 }
234 250
235 reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) | 251 reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
@@ -255,8 +271,8 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
255 break; 271 break;
256 } 272 }
257 273
258 spin_unlock(&data->lock); 274 pm_runtime_mark_last_busy(data->dev);
259 275 pm_runtime_put_autosuspend(data->dev);
260 return ret; 276 return ret;
261} 277}
262 278
@@ -270,11 +286,10 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
270 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) 286 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
271 return -EINVAL; 287 return -EINVAL;
272 288
273 spin_lock(&data->lock); 289 ret = pm_runtime_get_sync(data->dev);
274 290 if (ret < 0) {
275 if (data->suspended) { 291 pm_runtime_put_noidle(data->dev);
276 spin_unlock(&data->lock); 292 return ret;
277 return -ENODEV;
278 } 293 }
279 294
280 reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) | 295 reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
@@ -295,9 +310,10 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
295 break; 310 break;
296 } 311 }
297 312
298 spin_unlock(&data->lock); 313 pm_runtime_mark_last_busy(data->dev);
314 pm_runtime_put_autosuspend(data->dev);
299 315
300 return 0; 316 return ret;
301} 317}
302 318
303#if IS_ENABLED(CONFIG_OF) 319#if IS_ENABLED(CONFIG_OF)
@@ -320,6 +336,19 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
320} 336}
321#endif 337#endif
322 338
339#if IS_ENABLED(CONFIG_OF)
340static const struct davinci_mdio_of_param of_cpsw_mdio_data = {
341 .autosuspend_delay_ms = 100,
342};
343
344static const struct of_device_id davinci_mdio_of_mtable[] = {
345 { .compatible = "ti,davinci_mdio", },
346 { .compatible = "ti,cpsw-mdio", .data = &of_cpsw_mdio_data},
347 { /* sentinel */ },
348};
349MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
350#endif
351
323static int davinci_mdio_probe(struct platform_device *pdev) 352static int davinci_mdio_probe(struct platform_device *pdev)
324{ 353{
325 struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev); 354 struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -328,6 +357,7 @@ static int davinci_mdio_probe(struct platform_device *pdev)
328 struct resource *res; 357 struct resource *res;
329 struct phy_device *phy; 358 struct phy_device *phy;
330 int ret, addr; 359 int ret, addr;
360 int autosuspend_delay_ms = -1;
331 361
332 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 362 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
333 if (!data) 363 if (!data)
@@ -340,9 +370,22 @@ static int davinci_mdio_probe(struct platform_device *pdev)
340 } 370 }
341 371
342 if (dev->of_node) { 372 if (dev->of_node) {
343 if (davinci_mdio_probe_dt(&data->pdata, pdev)) 373 const struct of_device_id *of_id;
344 data->pdata = default_pdata; 374
375 ret = davinci_mdio_probe_dt(&data->pdata, pdev);
376 if (ret)
377 return ret;
345 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); 378 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
379
380 of_id = of_match_device(davinci_mdio_of_mtable, &pdev->dev);
381 if (of_id) {
382 const struct davinci_mdio_of_param *of_mdio_data;
383
384 of_mdio_data = of_id->data;
385 if (of_mdio_data)
386 autosuspend_delay_ms =
387 of_mdio_data->autosuspend_delay_ms;
388 }
346 } else { 389 } else {
347 data->pdata = pdata ? (*pdata) : default_pdata; 390 data->pdata = pdata ? (*pdata) : default_pdata;
348 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x", 391 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
@@ -356,26 +399,25 @@ static int davinci_mdio_probe(struct platform_device *pdev)
356 data->bus->parent = dev; 399 data->bus->parent = dev;
357 data->bus->priv = data; 400 data->bus->priv = data;
358 401
359 pm_runtime_enable(&pdev->dev);
360 pm_runtime_get_sync(&pdev->dev);
361 data->clk = devm_clk_get(dev, "fck"); 402 data->clk = devm_clk_get(dev, "fck");
362 if (IS_ERR(data->clk)) { 403 if (IS_ERR(data->clk)) {
363 dev_err(dev, "failed to get device clock\n"); 404 dev_err(dev, "failed to get device clock\n");
364 ret = PTR_ERR(data->clk); 405 return PTR_ERR(data->clk);
365 data->clk = NULL;
366 goto bail_out;
367 } 406 }
368 407
369 dev_set_drvdata(dev, data); 408 dev_set_drvdata(dev, data);
370 data->dev = dev; 409 data->dev = dev;
371 spin_lock_init(&data->lock);
372 410
373 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 411 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
374 data->regs = devm_ioremap_resource(dev, res); 412 data->regs = devm_ioremap_resource(dev, res);
375 if (IS_ERR(data->regs)) { 413 if (IS_ERR(data->regs))
376 ret = PTR_ERR(data->regs); 414 return PTR_ERR(data->regs);
377 goto bail_out; 415
378 } 416 davinci_mdio_init_clk(data);
417
418 pm_runtime_set_autosuspend_delay(&pdev->dev, autosuspend_delay_ms);
419 pm_runtime_use_autosuspend(&pdev->dev);
420 pm_runtime_enable(&pdev->dev);
379 421
380 /* register the mii bus 422 /* register the mii bus
381 * Create PHYs from DT only in case if PHY child nodes are explicitly 423 * Create PHYs from DT only in case if PHY child nodes are explicitly
@@ -404,9 +446,8 @@ static int davinci_mdio_probe(struct platform_device *pdev)
404 return 0; 446 return 0;
405 447
406bail_out: 448bail_out:
407 pm_runtime_put_sync(&pdev->dev); 449 pm_runtime_dont_use_autosuspend(&pdev->dev);
408 pm_runtime_disable(&pdev->dev); 450 pm_runtime_disable(&pdev->dev);
409
410 return ret; 451 return ret;
411} 452}
412 453
@@ -417,29 +458,47 @@ static int davinci_mdio_remove(struct platform_device *pdev)
417 if (data->bus) 458 if (data->bus)
418 mdiobus_unregister(data->bus); 459 mdiobus_unregister(data->bus);
419 460
420 pm_runtime_put_sync(&pdev->dev); 461 pm_runtime_dont_use_autosuspend(&pdev->dev);
421 pm_runtime_disable(&pdev->dev); 462 pm_runtime_disable(&pdev->dev);
422 463
423 return 0; 464 return 0;
424} 465}
425 466
426#ifdef CONFIG_PM_SLEEP 467#ifdef CONFIG_PM
427static int davinci_mdio_suspend(struct device *dev) 468static int davinci_mdio_runtime_suspend(struct device *dev)
428{ 469{
429 struct davinci_mdio_data *data = dev_get_drvdata(dev); 470 struct davinci_mdio_data *data = dev_get_drvdata(dev);
430 u32 ctrl; 471 u32 ctrl;
431 472
432 spin_lock(&data->lock);
433
434 /* shutdown the scan state machine */ 473 /* shutdown the scan state machine */
435 ctrl = __raw_readl(&data->regs->control); 474 ctrl = __raw_readl(&data->regs->control);
436 ctrl &= ~CONTROL_ENABLE; 475 ctrl &= ~CONTROL_ENABLE;
437 __raw_writel(ctrl, &data->regs->control); 476 __raw_writel(ctrl, &data->regs->control);
438 wait_for_idle(data); 477 wait_for_idle(data);
439 478
440 data->suspended = true; 479 return 0;
441 spin_unlock(&data->lock); 480}
442 pm_runtime_put_sync(data->dev); 481
482static int davinci_mdio_runtime_resume(struct device *dev)
483{
484 struct davinci_mdio_data *data = dev_get_drvdata(dev);
485
486 davinci_mdio_enable(data);
487 return 0;
488}
489#endif
490
491#ifdef CONFIG_PM_SLEEP
492static int davinci_mdio_suspend(struct device *dev)
493{
494 struct davinci_mdio_data *data = dev_get_drvdata(dev);
495 int ret = 0;
496
497 data->active_in_suspend = !pm_runtime_status_suspended(dev);
498 if (data->active_in_suspend)
499 ret = pm_runtime_force_suspend(dev);
500 if (ret < 0)
501 return ret;
443 502
444 /* Select sleep pin state */ 503 /* Select sleep pin state */
445 pinctrl_pm_select_sleep_state(dev); 504 pinctrl_pm_select_sleep_state(dev);
@@ -454,31 +513,19 @@ static int davinci_mdio_resume(struct device *dev)
454 /* Select default pin state */ 513 /* Select default pin state */
455 pinctrl_pm_select_default_state(dev); 514 pinctrl_pm_select_default_state(dev);
456 515
457 pm_runtime_get_sync(data->dev); 516 if (data->active_in_suspend)
458 517 pm_runtime_force_resume(dev);
459 spin_lock(&data->lock);
460 /* restart the scan state machine */
461 __davinci_mdio_reset(data);
462
463 data->suspended = false;
464 spin_unlock(&data->lock);
465 518
466 return 0; 519 return 0;
467} 520}
468#endif 521#endif
469 522
470static const struct dev_pm_ops davinci_mdio_pm_ops = { 523static const struct dev_pm_ops davinci_mdio_pm_ops = {
524 SET_RUNTIME_PM_OPS(davinci_mdio_runtime_suspend,
525 davinci_mdio_runtime_resume, NULL)
471 SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume) 526 SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
472}; 527};
473 528
474#if IS_ENABLED(CONFIG_OF)
475static const struct of_device_id davinci_mdio_of_mtable[] = {
476 { .compatible = "ti,davinci_mdio", },
477 { /* sentinel */ },
478};
479MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
480#endif
481
482static struct platform_driver davinci_mdio_driver = { 529static struct platform_driver davinci_mdio_driver = {
483 .driver = { 530 .driver = {
484 .name = "davinci_mdio", 531 .name = "davinci_mdio",
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 0a15acc075b3..11213a38c795 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -462,7 +462,7 @@ static void tile_tx_timestamp(struct sk_buff *skb, int instance)
462 if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) { 462 if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
463 struct mpipe_data *md = &mpipe_data[instance]; 463 struct mpipe_data *md = &mpipe_data[instance];
464 struct skb_shared_hwtstamps shhwtstamps; 464 struct skb_shared_hwtstamps shhwtstamps;
465 struct timespec ts; 465 struct timespec64 ts;
466 466
467 shtx->tx_flags |= SKBTX_IN_PROGRESS; 467 shtx->tx_flags |= SKBTX_IN_PROGRESS;
468 gxio_mpipe_get_timestamp(&md->context, &ts); 468 gxio_mpipe_get_timestamp(&md->context, &ts);
@@ -886,9 +886,9 @@ static struct ptp_clock_info ptp_mpipe_caps = {
886/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */ 886/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
887static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md) 887static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
888{ 888{
889 struct timespec ts; 889 struct timespec64 ts;
890 890
891 getnstimeofday(&ts); 891 ktime_get_ts64(&ts);
892 gxio_mpipe_set_timestamp(&md->context, &ts); 892 gxio_mpipe_set_timestamp(&md->context, &ts);
893 893
894 mutex_init(&md->ptp_lock); 894 mutex_init(&md->ptp_lock);
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 5138407941cf..7f127dc1b7ba 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -171,7 +171,6 @@ struct port {
171 struct npe *npe; 171 struct npe *npe;
172 struct net_device *netdev; 172 struct net_device *netdev;
173 struct napi_struct napi; 173 struct napi_struct napi;
174 struct phy_device *phydev;
175 struct eth_plat_info *plat; 174 struct eth_plat_info *plat;
176 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; 175 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
177 struct desc *desc_tab; /* coherent */ 176 struct desc *desc_tab; /* coherent */
@@ -562,7 +561,7 @@ static void ixp4xx_mdio_remove(void)
562static void ixp4xx_adjust_link(struct net_device *dev) 561static void ixp4xx_adjust_link(struct net_device *dev)
563{ 562{
564 struct port *port = netdev_priv(dev); 563 struct port *port = netdev_priv(dev);
565 struct phy_device *phydev = port->phydev; 564 struct phy_device *phydev = dev->phydev;
566 565
567 if (!phydev->link) { 566 if (!phydev->link) {
568 if (port->speed) { 567 if (port->speed) {
@@ -976,8 +975,6 @@ static void eth_set_mcast_list(struct net_device *dev)
976 975
977static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 976static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
978{ 977{
979 struct port *port = netdev_priv(dev);
980
981 if (!netif_running(dev)) 978 if (!netif_running(dev))
982 return -EINVAL; 979 return -EINVAL;
983 980
@@ -988,7 +985,7 @@ static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
988 return hwtstamp_get(dev, req); 985 return hwtstamp_get(dev, req);
989 } 986 }
990 987
991 return phy_mii_ioctl(port->phydev, req, cmd); 988 return phy_mii_ioctl(dev->phydev, req, cmd);
992} 989}
993 990
994/* ethtool support */ 991/* ethtool support */
@@ -1005,22 +1002,9 @@ static void ixp4xx_get_drvinfo(struct net_device *dev,
1005 strlcpy(info->bus_info, "internal", sizeof(info->bus_info)); 1002 strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
1006} 1003}
1007 1004
1008static int ixp4xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1009{
1010 struct port *port = netdev_priv(dev);
1011 return phy_ethtool_gset(port->phydev, cmd);
1012}
1013
1014static int ixp4xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1015{
1016 struct port *port = netdev_priv(dev);
1017 return phy_ethtool_sset(port->phydev, cmd);
1018}
1019
1020static int ixp4xx_nway_reset(struct net_device *dev) 1005static int ixp4xx_nway_reset(struct net_device *dev)
1021{ 1006{
1022 struct port *port = netdev_priv(dev); 1007 return phy_start_aneg(dev->phydev);
1023 return phy_start_aneg(port->phydev);
1024} 1008}
1025 1009
1026int ixp46x_phc_index = -1; 1010int ixp46x_phc_index = -1;
@@ -1054,11 +1038,11 @@ static int ixp4xx_get_ts_info(struct net_device *dev,
1054 1038
1055static const struct ethtool_ops ixp4xx_ethtool_ops = { 1039static const struct ethtool_ops ixp4xx_ethtool_ops = {
1056 .get_drvinfo = ixp4xx_get_drvinfo, 1040 .get_drvinfo = ixp4xx_get_drvinfo,
1057 .get_settings = ixp4xx_get_settings,
1058 .set_settings = ixp4xx_set_settings,
1059 .nway_reset = ixp4xx_nway_reset, 1041 .nway_reset = ixp4xx_nway_reset,
1060 .get_link = ethtool_op_get_link, 1042 .get_link = ethtool_op_get_link,
1061 .get_ts_info = ixp4xx_get_ts_info, 1043 .get_ts_info = ixp4xx_get_ts_info,
1044 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1045 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1062}; 1046};
1063 1047
1064 1048
@@ -1259,7 +1243,7 @@ static int eth_open(struct net_device *dev)
1259 } 1243 }
1260 1244
1261 port->speed = 0; /* force "link up" message */ 1245 port->speed = 0; /* force "link up" message */
1262 phy_start(port->phydev); 1246 phy_start(dev->phydev);
1263 1247
1264 for (i = 0; i < ETH_ALEN; i++) 1248 for (i = 0; i < ETH_ALEN; i++)
1265 __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]); 1249 __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
@@ -1380,7 +1364,7 @@ static int eth_close(struct net_device *dev)
1380 printk(KERN_CRIT "%s: unable to disable loopback\n", 1364 printk(KERN_CRIT "%s: unable to disable loopback\n",
1381 dev->name); 1365 dev->name);
1382 1366
1383 phy_stop(port->phydev); 1367 phy_stop(dev->phydev);
1384 1368
1385 if (!ports_open) 1369 if (!ports_open)
1386 qmgr_disable_irq(TXDONE_QUEUE); 1370 qmgr_disable_irq(TXDONE_QUEUE);
@@ -1405,6 +1389,7 @@ static int eth_init_one(struct platform_device *pdev)
1405 struct port *port; 1389 struct port *port;
1406 struct net_device *dev; 1390 struct net_device *dev;
1407 struct eth_plat_info *plat = dev_get_platdata(&pdev->dev); 1391 struct eth_plat_info *plat = dev_get_platdata(&pdev->dev);
1392 struct phy_device *phydev = NULL;
1408 u32 regs_phys; 1393 u32 regs_phys;
1409 char phy_id[MII_BUS_ID_SIZE + 3]; 1394 char phy_id[MII_BUS_ID_SIZE + 3];
1410 int err; 1395 int err;
@@ -1466,14 +1451,14 @@ static int eth_init_one(struct platform_device *pdev)
1466 1451
1467 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, 1452 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
1468 mdio_bus->id, plat->phy); 1453 mdio_bus->id, plat->phy);
1469 port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 1454 phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link,
1470 PHY_INTERFACE_MODE_MII); 1455 PHY_INTERFACE_MODE_MII);
1471 if (IS_ERR(port->phydev)) { 1456 if (IS_ERR(phydev)) {
1472 err = PTR_ERR(port->phydev); 1457 err = PTR_ERR(phydev);
1473 goto err_free_mem; 1458 goto err_free_mem;
1474 } 1459 }
1475 1460
1476 port->phydev->irq = PHY_POLL; 1461 phydev->irq = PHY_POLL;
1477 1462
1478 if ((err = register_netdev(dev))) 1463 if ((err = register_netdev(dev)))
1479 goto err_phy_dis; 1464 goto err_phy_dis;
@@ -1484,7 +1469,7 @@ static int eth_init_one(struct platform_device *pdev)
1484 return 0; 1469 return 0;
1485 1470
1486err_phy_dis: 1471err_phy_dis:
1487 phy_disconnect(port->phydev); 1472 phy_disconnect(phydev);
1488err_free_mem: 1473err_free_mem:
1489 npe_port_tab[NPE_ID(port->id)] = NULL; 1474 npe_port_tab[NPE_ID(port->id)] = NULL;
1490 release_resource(port->mem_res); 1475 release_resource(port->mem_res);
@@ -1498,10 +1483,11 @@ err_free:
1498static int eth_remove_one(struct platform_device *pdev) 1483static int eth_remove_one(struct platform_device *pdev)
1499{ 1484{
1500 struct net_device *dev = platform_get_drvdata(pdev); 1485 struct net_device *dev = platform_get_drvdata(pdev);
1486 struct phy_device *phydev = dev->phydev;
1501 struct port *port = netdev_priv(dev); 1487 struct port *port = netdev_priv(dev);
1502 1488
1503 unregister_netdev(dev); 1489 unregister_netdev(dev);
1504 phy_disconnect(port->phydev); 1490 phy_disconnect(phydev);
1505 npe_port_tab[NPE_ID(port->id)] = NULL; 1491 npe_port_tab[NPE_ID(port->id)] = NULL;
1506 npe_release(port->npe); 1492 npe_release(port->npe);
1507 release_resource(port->mem_res); 1493 release_resource(port->mem_res);
diff --git a/drivers/net/fddi/skfp/Makefile b/drivers/net/fddi/skfp/Makefile
index b0be0234abf6..a957a1c7e5ba 100644
--- a/drivers/net/fddi/skfp/Makefile
+++ b/drivers/net/fddi/skfp/Makefile
@@ -17,4 +17,4 @@ skfp-objs := skfddi.o hwmtm.o fplustm.o smt.o cfm.o \
17# projects. To keep the source common for all those drivers (and 17# projects. To keep the source common for all those drivers (and
18# thus simplify fixes to it), please do not clean it up! 18# thus simplify fixes to it), please do not clean it up!
19 19
20ccflags-y := -Idrivers/net/skfp -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes 20ccflags-y := -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index aa61708bea69..5de892f3c0e0 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -922,8 +922,8 @@ tx_error:
922 dev->stats.collisions++; 922 dev->stats.collisions++;
923 else if (err == -ENETUNREACH) 923 else if (err == -ENETUNREACH)
924 dev->stats.tx_carrier_errors++; 924 dev->stats.tx_carrier_errors++;
925 else 925
926 dev->stats.tx_errors++; 926 dev->stats.tx_errors++;
927 return NETDEV_TX_OK; 927 return NETDEV_TX_OK;
928} 928}
929 929
@@ -1012,8 +1012,8 @@ tx_error:
1012 dev->stats.collisions++; 1012 dev->stats.collisions++;
1013 else if (err == -ENETUNREACH) 1013 else if (err == -ENETUNREACH)
1014 dev->stats.tx_carrier_errors++; 1014 dev->stats.tx_carrier_errors++;
1015 else 1015
1016 dev->stats.tx_errors++; 1016 dev->stats.tx_errors++;
1017 return NETDEV_TX_OK; 1017 return NETDEV_TX_OK;
1018} 1018}
1019#endif 1019#endif
@@ -1036,12 +1036,17 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
1036 1036
1037static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict) 1037static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
1038{ 1038{
1039 struct geneve_dev *geneve = netdev_priv(dev);
1039 /* The max_mtu calculation does not take account of GENEVE 1040 /* The max_mtu calculation does not take account of GENEVE
1040 * options, to avoid excluding potentially valid 1041 * options, to avoid excluding potentially valid
1041 * configurations. 1042 * configurations.
1042 */ 1043 */
1043 int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - sizeof(struct iphdr) 1044 int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len;
1044 - dev->hard_header_len; 1045
1046 if (geneve->remote.sa.sa_family == AF_INET6)
1047 max_mtu -= sizeof(struct ipv6hdr);
1048 else
1049 max_mtu -= sizeof(struct iphdr);
1045 1050
1046 if (new_mtu < 68) 1051 if (new_mtu < 68)
1047 return -EINVAL; 1052 return -EINVAL;
@@ -1463,6 +1468,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
1463{ 1468{
1464 struct nlattr *tb[IFLA_MAX + 1]; 1469 struct nlattr *tb[IFLA_MAX + 1];
1465 struct net_device *dev; 1470 struct net_device *dev;
1471 LIST_HEAD(list_kill);
1466 int err; 1472 int err;
1467 1473
1468 memset(tb, 0, sizeof(tb)); 1474 memset(tb, 0, sizeof(tb));
@@ -1474,8 +1480,10 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
1474 err = geneve_configure(net, dev, &geneve_remote_unspec, 1480 err = geneve_configure(net, dev, &geneve_remote_unspec,
1475 0, 0, 0, 0, htons(dst_port), true, 1481 0, 0, 0, 0, htons(dst_port), true,
1476 GENEVE_F_UDP_ZERO_CSUM6_RX); 1482 GENEVE_F_UDP_ZERO_CSUM6_RX);
1477 if (err) 1483 if (err) {
1478 goto err; 1484 free_netdev(dev);
1485 return ERR_PTR(err);
1486 }
1479 1487
1480 /* openvswitch users expect packet sizes to be unrestricted, 1488 /* openvswitch users expect packet sizes to be unrestricted,
1481 * so set the largest MTU we can. 1489 * so set the largest MTU we can.
@@ -1484,10 +1492,15 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
1484 if (err) 1492 if (err)
1485 goto err; 1493 goto err;
1486 1494
1495 err = rtnl_configure_link(dev, NULL);
1496 if (err < 0)
1497 goto err;
1498
1487 return dev; 1499 return dev;
1488 1500
1489 err: 1501 err:
1490 free_netdev(dev); 1502 geneve_dellink(dev, &list_kill);
1503 unregister_netdevice_many(&list_kill);
1491 return ERR_PTR(err); 1504 return ERR_PTR(err);
1492} 1505}
1493EXPORT_SYMBOL_GPL(geneve_dev_create_fb); 1506EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 6909c322de4e..20e09174ff62 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1128,6 +1128,39 @@ static inline void netvsc_receive_inband(struct hv_device *hdev,
1128 } 1128 }
1129} 1129}
1130 1130
1131static void netvsc_process_raw_pkt(struct hv_device *device,
1132 struct vmbus_channel *channel,
1133 struct netvsc_device *net_device,
1134 struct net_device *ndev,
1135 u64 request_id,
1136 struct vmpacket_descriptor *desc)
1137{
1138 struct nvsp_message *nvmsg;
1139
1140 nvmsg = (struct nvsp_message *)((unsigned long)
1141 desc + (desc->offset8 << 3));
1142
1143 switch (desc->type) {
1144 case VM_PKT_COMP:
1145 netvsc_send_completion(net_device, channel, device, desc);
1146 break;
1147
1148 case VM_PKT_DATA_USING_XFER_PAGES:
1149 netvsc_receive(net_device, channel, device, desc);
1150 break;
1151
1152 case VM_PKT_DATA_INBAND:
1153 netvsc_receive_inband(device, net_device, nvmsg);
1154 break;
1155
1156 default:
1157 netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
1158 desc->type, request_id);
1159 break;
1160 }
1161}
1162
1163
1131void netvsc_channel_cb(void *context) 1164void netvsc_channel_cb(void *context)
1132{ 1165{
1133 int ret; 1166 int ret;
@@ -1140,7 +1173,7 @@ void netvsc_channel_cb(void *context)
1140 unsigned char *buffer; 1173 unsigned char *buffer;
1141 int bufferlen = NETVSC_PACKET_SIZE; 1174 int bufferlen = NETVSC_PACKET_SIZE;
1142 struct net_device *ndev; 1175 struct net_device *ndev;
1143 struct nvsp_message *nvmsg; 1176 bool need_to_commit = false;
1144 1177
1145 if (channel->primary_channel != NULL) 1178 if (channel->primary_channel != NULL)
1146 device = channel->primary_channel->device_obj; 1179 device = channel->primary_channel->device_obj;
@@ -1154,39 +1187,36 @@ void netvsc_channel_cb(void *context)
1154 buffer = get_per_channel_state(channel); 1187 buffer = get_per_channel_state(channel);
1155 1188
1156 do { 1189 do {
1190 desc = get_next_pkt_raw(channel);
1191 if (desc != NULL) {
1192 netvsc_process_raw_pkt(device,
1193 channel,
1194 net_device,
1195 ndev,
1196 desc->trans_id,
1197 desc);
1198
1199 put_pkt_raw(channel, desc);
1200 need_to_commit = true;
1201 continue;
1202 }
1203 if (need_to_commit) {
1204 need_to_commit = false;
1205 commit_rd_index(channel);
1206 }
1207
1157 ret = vmbus_recvpacket_raw(channel, buffer, bufferlen, 1208 ret = vmbus_recvpacket_raw(channel, buffer, bufferlen,
1158 &bytes_recvd, &request_id); 1209 &bytes_recvd, &request_id);
1159 if (ret == 0) { 1210 if (ret == 0) {
1160 if (bytes_recvd > 0) { 1211 if (bytes_recvd > 0) {
1161 desc = (struct vmpacket_descriptor *)buffer; 1212 desc = (struct vmpacket_descriptor *)buffer;
1162 nvmsg = (struct nvsp_message *)((unsigned long) 1213 netvsc_process_raw_pkt(device,
1163 desc + (desc->offset8 << 3)); 1214 channel,
1164 switch (desc->type) { 1215 net_device,
1165 case VM_PKT_COMP: 1216 ndev,
1166 netvsc_send_completion(net_device, 1217 request_id,
1167 channel, 1218 desc);
1168 device, desc); 1219
1169 break;
1170
1171 case VM_PKT_DATA_USING_XFER_PAGES:
1172 netvsc_receive(net_device, channel,
1173 device, desc);
1174 break;
1175
1176 case VM_PKT_DATA_INBAND:
1177 netvsc_receive_inband(device,
1178 net_device,
1179 nvmsg);
1180 break;
1181
1182 default:
1183 netdev_err(ndev,
1184 "unhandled packet type %d, "
1185 "tid %llx len %d\n",
1186 desc->type, request_id,
1187 bytes_recvd);
1188 break;
1189 }
1190 1220
1191 } else { 1221 } else {
1192 /* 1222 /*
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 47ee2c840b55..8bcd78f94966 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -605,12 +605,41 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err)
605 dev_put(dev); 605 dev_put(dev);
606} 606}
607 607
608static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
609 unsigned char **iv,
610 struct scatterlist **sg)
611{
612 size_t size, iv_offset, sg_offset;
613 struct aead_request *req;
614 void *tmp;
615
616 size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
617 iv_offset = size;
618 size += GCM_AES_IV_LEN;
619
620 size = ALIGN(size, __alignof__(struct scatterlist));
621 sg_offset = size;
622 size += sizeof(struct scatterlist) * (MAX_SKB_FRAGS + 1);
623
624 tmp = kmalloc(size, GFP_ATOMIC);
625 if (!tmp)
626 return NULL;
627
628 *iv = (unsigned char *)(tmp + iv_offset);
629 *sg = (struct scatterlist *)(tmp + sg_offset);
630 req = tmp;
631
632 aead_request_set_tfm(req, tfm);
633
634 return req;
635}
636
608static struct sk_buff *macsec_encrypt(struct sk_buff *skb, 637static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
609 struct net_device *dev) 638 struct net_device *dev)
610{ 639{
611 int ret; 640 int ret;
612 struct scatterlist sg[MAX_SKB_FRAGS + 1]; 641 struct scatterlist *sg;
613 unsigned char iv[GCM_AES_IV_LEN]; 642 unsigned char *iv;
614 struct ethhdr *eth; 643 struct ethhdr *eth;
615 struct macsec_eth_header *hh; 644 struct macsec_eth_header *hh;
616 size_t unprotected_len; 645 size_t unprotected_len;
@@ -668,8 +697,6 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
668 macsec_fill_sectag(hh, secy, pn); 697 macsec_fill_sectag(hh, secy, pn);
669 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); 698 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
670 699
671 macsec_fill_iv(iv, secy->sci, pn);
672
673 skb_put(skb, secy->icv_len); 700 skb_put(skb, secy->icv_len);
674 701
675 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) { 702 if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
@@ -684,13 +711,15 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
684 return ERR_PTR(-EINVAL); 711 return ERR_PTR(-EINVAL);
685 } 712 }
686 713
687 req = aead_request_alloc(tx_sa->key.tfm, GFP_ATOMIC); 714 req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg);
688 if (!req) { 715 if (!req) {
689 macsec_txsa_put(tx_sa); 716 macsec_txsa_put(tx_sa);
690 kfree_skb(skb); 717 kfree_skb(skb);
691 return ERR_PTR(-ENOMEM); 718 return ERR_PTR(-ENOMEM);
692 } 719 }
693 720
721 macsec_fill_iv(iv, secy->sci, pn);
722
694 sg_init_table(sg, MAX_SKB_FRAGS + 1); 723 sg_init_table(sg, MAX_SKB_FRAGS + 1);
695 skb_to_sgvec(skb, sg, 0, skb->len); 724 skb_to_sgvec(skb, sg, 0, skb->len);
696 725
@@ -861,7 +890,6 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err)
861out: 890out:
862 macsec_rxsa_put(rx_sa); 891 macsec_rxsa_put(rx_sa);
863 dev_put(dev); 892 dev_put(dev);
864 return;
865} 893}
866 894
867static struct sk_buff *macsec_decrypt(struct sk_buff *skb, 895static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
@@ -871,8 +899,8 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
871 struct macsec_secy *secy) 899 struct macsec_secy *secy)
872{ 900{
873 int ret; 901 int ret;
874 struct scatterlist sg[MAX_SKB_FRAGS + 1]; 902 struct scatterlist *sg;
875 unsigned char iv[GCM_AES_IV_LEN]; 903 unsigned char *iv;
876 struct aead_request *req; 904 struct aead_request *req;
877 struct macsec_eth_header *hdr; 905 struct macsec_eth_header *hdr;
878 u16 icv_len = secy->icv_len; 906 u16 icv_len = secy->icv_len;
@@ -882,7 +910,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
882 if (!skb) 910 if (!skb)
883 return ERR_PTR(-ENOMEM); 911 return ERR_PTR(-ENOMEM);
884 912
885 req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC); 913 req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg);
886 if (!req) { 914 if (!req) {
887 kfree_skb(skb); 915 kfree_skb(skb);
888 return ERR_PTR(-ENOMEM); 916 return ERR_PTR(-ENOMEM);
@@ -1234,7 +1262,7 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
1234 struct crypto_aead *tfm; 1262 struct crypto_aead *tfm;
1235 int ret; 1263 int ret;
1236 1264
1237 tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); 1265 tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
1238 if (!tfm || IS_ERR(tfm)) 1266 if (!tfm || IS_ERR(tfm))
1239 return NULL; 1267 return NULL;
1240 1268
@@ -2612,6 +2640,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
2612 u64_stats_update_begin(&secy_stats->syncp); 2640 u64_stats_update_begin(&secy_stats->syncp);
2613 secy_stats->stats.OutPktsUntagged++; 2641 secy_stats->stats.OutPktsUntagged++;
2614 u64_stats_update_end(&secy_stats->syncp); 2642 u64_stats_update_end(&secy_stats->syncp);
2643 skb->dev = macsec->real_dev;
2615 len = skb->len; 2644 len = skb->len;
2616 ret = dev_queue_xmit(skb); 2645 ret = dev_queue_xmit(skb);
2617 count_tx(dev, ret, len); 2646 count_tx(dev, ret, len);
@@ -3361,6 +3390,7 @@ static void __exit macsec_exit(void)
3361 genl_unregister_family(&macsec_fam); 3390 genl_unregister_family(&macsec_fam);
3362 rtnl_link_unregister(&macsec_link_ops); 3391 rtnl_link_unregister(&macsec_link_ops);
3363 unregister_netdevice_notifier(&macsec_notifier); 3392 unregister_netdevice_notifier(&macsec_notifier);
3393 rcu_barrier();
3364} 3394}
3365 3395
3366module_init(macsec_init); 3396module_init(macsec_init);
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 2afa61b51d41..91177a4a32ad 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -57,6 +57,7 @@
57 57
58/* PHY CTRL bits */ 58/* PHY CTRL bits */
59#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14 59#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
60#define DP83867_PHYCR_FIFO_DEPTH_MASK (3 << 14)
60 61
61/* RGMIIDCTL bits */ 62/* RGMIIDCTL bits */
62#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4 63#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
@@ -133,8 +134,8 @@ static int dp83867_of_init(struct phy_device *phydev)
133static int dp83867_config_init(struct phy_device *phydev) 134static int dp83867_config_init(struct phy_device *phydev)
134{ 135{
135 struct dp83867_private *dp83867; 136 struct dp83867_private *dp83867;
136 int ret; 137 int ret, val;
137 u16 val, delay; 138 u16 delay;
138 139
139 if (!phydev->priv) { 140 if (!phydev->priv) {
140 dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867), 141 dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867),
@@ -151,8 +152,12 @@ static int dp83867_config_init(struct phy_device *phydev)
151 } 152 }
152 153
153 if (phy_interface_is_rgmii(phydev)) { 154 if (phy_interface_is_rgmii(phydev)) {
154 ret = phy_write(phydev, MII_DP83867_PHYCTRL, 155 val = phy_read(phydev, MII_DP83867_PHYCTRL);
155 (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT)); 156 if (val < 0)
157 return val;
158 val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK;
159 val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
160 ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
156 if (ret) 161 if (ret)
157 return ret; 162 return ret;
158 } 163 }
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index b376ada83598..c649c101bbab 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -24,6 +24,7 @@
24#include <linux/of.h> 24#include <linux/of.h>
25#include <linux/gpio.h> 25#include <linux/gpio.h>
26#include <linux/seqlock.h> 26#include <linux/seqlock.h>
27#include <linux/idr.h>
27 28
28#include "swphy.h" 29#include "swphy.h"
29 30
@@ -189,6 +190,8 @@ err_regs:
189} 190}
190EXPORT_SYMBOL_GPL(fixed_phy_add); 191EXPORT_SYMBOL_GPL(fixed_phy_add);
191 192
193static DEFINE_IDA(phy_fixed_ida);
194
192static void fixed_phy_del(int phy_addr) 195static void fixed_phy_del(int phy_addr)
193{ 196{
194 struct fixed_mdio_bus *fmb = &platform_fmb; 197 struct fixed_mdio_bus *fmb = &platform_fmb;
@@ -200,14 +203,12 @@ static void fixed_phy_del(int phy_addr)
200 if (gpio_is_valid(fp->link_gpio)) 203 if (gpio_is_valid(fp->link_gpio))
201 gpio_free(fp->link_gpio); 204 gpio_free(fp->link_gpio);
202 kfree(fp); 205 kfree(fp);
206 ida_simple_remove(&phy_fixed_ida, phy_addr);
203 return; 207 return;
204 } 208 }
205 } 209 }
206} 210}
207 211
208static int phy_fixed_addr;
209static DEFINE_SPINLOCK(phy_fixed_addr_lock);
210
211struct phy_device *fixed_phy_register(unsigned int irq, 212struct phy_device *fixed_phy_register(unsigned int irq,
212 struct fixed_phy_status *status, 213 struct fixed_phy_status *status,
213 int link_gpio, 214 int link_gpio,
@@ -222,17 +223,15 @@ struct phy_device *fixed_phy_register(unsigned int irq,
222 return ERR_PTR(-EPROBE_DEFER); 223 return ERR_PTR(-EPROBE_DEFER);
223 224
224 /* Get the next available PHY address, up to PHY_MAX_ADDR */ 225 /* Get the next available PHY address, up to PHY_MAX_ADDR */
225 spin_lock(&phy_fixed_addr_lock); 226 phy_addr = ida_simple_get(&phy_fixed_ida, 0, PHY_MAX_ADDR, GFP_KERNEL);
226 if (phy_fixed_addr == PHY_MAX_ADDR) { 227 if (phy_addr < 0)
227 spin_unlock(&phy_fixed_addr_lock); 228 return ERR_PTR(phy_addr);
228 return ERR_PTR(-ENOSPC);
229 }
230 phy_addr = phy_fixed_addr++;
231 spin_unlock(&phy_fixed_addr_lock);
232 229
233 ret = fixed_phy_add(irq, phy_addr, status, link_gpio); 230 ret = fixed_phy_add(irq, phy_addr, status, link_gpio);
234 if (ret < 0) 231 if (ret < 0) {
232 ida_simple_remove(&phy_fixed_ida, phy_addr);
235 return ERR_PTR(ret); 233 return ERR_PTR(ret);
234 }
236 235
237 phy = get_phy_device(fmb->mii_bus, phy_addr, false); 236 phy = get_phy_device(fmb->mii_bus, phy_addr, false);
238 if (IS_ERR(phy)) { 237 if (IS_ERR(phy)) {
@@ -337,6 +336,7 @@ static void __exit fixed_mdio_bus_exit(void)
337 list_del(&fp->node); 336 list_del(&fp->node);
338 kfree(fp); 337 kfree(fp);
339 } 338 }
339 ida_destroy(&phy_fixed_ida);
340} 340}
341module_exit(fixed_mdio_bus_exit); 341module_exit(fixed_mdio_bus_exit);
342 342
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 280e8795b463..ec2c1eee6405 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -285,6 +285,48 @@ static int marvell_config_aneg(struct phy_device *phydev)
285 return 0; 285 return 0;
286} 286}
287 287
288static int m88e1111_config_aneg(struct phy_device *phydev)
289{
290 int err;
291
292 /* The Marvell PHY has an errata which requires
293 * that certain registers get written in order
294 * to restart autonegotiation
295 */
296 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
297
298 err = marvell_set_polarity(phydev, phydev->mdix);
299 if (err < 0)
300 return err;
301
302 err = phy_write(phydev, MII_M1111_PHY_LED_CONTROL,
303 MII_M1111_PHY_LED_DIRECT);
304 if (err < 0)
305 return err;
306
307 err = genphy_config_aneg(phydev);
308 if (err < 0)
309 return err;
310
311 if (phydev->autoneg != AUTONEG_ENABLE) {
312 int bmcr;
313
314 /* A write to speed/duplex bits (that is performed by
315 * genphy_config_aneg() call above) must be followed by
316 * a software reset. Otherwise, the write has no effect.
317 */
318 bmcr = phy_read(phydev, MII_BMCR);
319 if (bmcr < 0)
320 return bmcr;
321
322 err = phy_write(phydev, MII_BMCR, bmcr | BMCR_RESET);
323 if (err < 0)
324 return err;
325 }
326
327 return 0;
328}
329
288#ifdef CONFIG_OF_MDIO 330#ifdef CONFIG_OF_MDIO
289/* 331/*
290 * Set and/or override some configuration registers based on the 332 * Set and/or override some configuration registers based on the
@@ -407,15 +449,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
407 if (err < 0) 449 if (err < 0)
408 return err; 450 return err;
409 451
410 oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE); 452 return genphy_config_aneg(phydev);
411
412 phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
413 phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF);
414 phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
415
416 err = genphy_config_aneg(phydev);
417
418 return err;
419} 453}
420 454
421static int m88e1318_config_aneg(struct phy_device *phydev) 455static int m88e1318_config_aneg(struct phy_device *phydev)
@@ -636,6 +670,28 @@ static int m88e1111_config_init(struct phy_device *phydev)
636 return phy_write(phydev, MII_BMCR, BMCR_RESET); 670 return phy_write(phydev, MII_BMCR, BMCR_RESET);
637} 671}
638 672
673static int m88e1121_config_init(struct phy_device *phydev)
674{
675 int err, oldpage;
676
677 oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
678
679 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
680 if (err < 0)
681 return err;
682
683 /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
684 err = phy_write(phydev, MII_88E1121_PHY_LED_CTRL,
685 MII_88E1121_PHY_LED_DEF);
686 if (err < 0)
687 return err;
688
689 phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
690
691 /* Set marvell,reg-init configuration from device tree */
692 return marvell_config_init(phydev);
693}
694
639static int m88e1510_config_init(struct phy_device *phydev) 695static int m88e1510_config_init(struct phy_device *phydev)
640{ 696{
641 int err; 697 int err;
@@ -668,7 +724,7 @@ static int m88e1510_config_init(struct phy_device *phydev)
668 return err; 724 return err;
669 } 725 }
670 726
671 return marvell_config_init(phydev); 727 return m88e1121_config_init(phydev);
672} 728}
673 729
674static int m88e1118_config_aneg(struct phy_device *phydev) 730static int m88e1118_config_aneg(struct phy_device *phydev)
@@ -1161,7 +1217,7 @@ static struct phy_driver marvell_drivers[] = {
1161 .flags = PHY_HAS_INTERRUPT, 1217 .flags = PHY_HAS_INTERRUPT,
1162 .probe = marvell_probe, 1218 .probe = marvell_probe,
1163 .config_init = &m88e1111_config_init, 1219 .config_init = &m88e1111_config_init,
1164 .config_aneg = &marvell_config_aneg, 1220 .config_aneg = &m88e1111_config_aneg,
1165 .read_status = &marvell_read_status, 1221 .read_status = &marvell_read_status,
1166 .ack_interrupt = &marvell_ack_interrupt, 1222 .ack_interrupt = &marvell_ack_interrupt,
1167 .config_intr = &marvell_config_intr, 1223 .config_intr = &marvell_config_intr,
@@ -1196,7 +1252,7 @@ static struct phy_driver marvell_drivers[] = {
1196 .features = PHY_GBIT_FEATURES, 1252 .features = PHY_GBIT_FEATURES,
1197 .flags = PHY_HAS_INTERRUPT, 1253 .flags = PHY_HAS_INTERRUPT,
1198 .probe = marvell_probe, 1254 .probe = marvell_probe,
1199 .config_init = &marvell_config_init, 1255 .config_init = &m88e1121_config_init,
1200 .config_aneg = &m88e1121_config_aneg, 1256 .config_aneg = &m88e1121_config_aneg,
1201 .read_status = &marvell_read_status, 1257 .read_status = &marvell_read_status,
1202 .ack_interrupt = &marvell_ack_interrupt, 1258 .ack_interrupt = &marvell_ack_interrupt,
@@ -1215,7 +1271,7 @@ static struct phy_driver marvell_drivers[] = {
1215 .features = PHY_GBIT_FEATURES, 1271 .features = PHY_GBIT_FEATURES,
1216 .flags = PHY_HAS_INTERRUPT, 1272 .flags = PHY_HAS_INTERRUPT,
1217 .probe = marvell_probe, 1273 .probe = marvell_probe,
1218 .config_init = &marvell_config_init, 1274 .config_init = &m88e1121_config_init,
1219 .config_aneg = &m88e1318_config_aneg, 1275 .config_aneg = &m88e1318_config_aneg,
1220 .read_status = &marvell_read_status, 1276 .read_status = &marvell_read_status,
1221 .ack_interrupt = &marvell_ack_interrupt, 1277 .ack_interrupt = &marvell_ack_interrupt,
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 2e21e9366f76..b62c4aaee40b 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -75,22 +75,13 @@ static int smsc_phy_reset(struct phy_device *phydev)
75 * in all capable mode before using it. 75 * in all capable mode before using it.
76 */ 76 */
77 if ((rc & MII_LAN83C185_MODE_MASK) == MII_LAN83C185_MODE_POWERDOWN) { 77 if ((rc & MII_LAN83C185_MODE_MASK) == MII_LAN83C185_MODE_POWERDOWN) {
78 int timeout = 50000; 78 /* set "all capable" mode */
79
80 /* set "all capable" mode and reset the phy */
81 rc |= MII_LAN83C185_MODE_ALL; 79 rc |= MII_LAN83C185_MODE_ALL;
82 phy_write(phydev, MII_LAN83C185_SPECIAL_MODES, rc); 80 phy_write(phydev, MII_LAN83C185_SPECIAL_MODES, rc);
83 phy_write(phydev, MII_BMCR, BMCR_RESET);
84
85 /* wait end of reset (max 500 ms) */
86 do {
87 udelay(10);
88 if (timeout-- == 0)
89 return -1;
90 rc = phy_read(phydev, MII_BMCR);
91 } while (rc & BMCR_RESET);
92 } 81 }
93 return 0; 82
83 /* reset the phy */
84 return genphy_soft_reset(phydev);
94} 85}
95 86
96static int lan911x_config_init(struct phy_device *phydev) 87static int lan911x_config_init(struct phy_device *phydev)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 0a1bb8387d96..a380649bf6b5 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1203,8 +1203,10 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
1203 goto err_dev_open; 1203 goto err_dev_open;
1204 } 1204 }
1205 1205
1206 netif_addr_lock_bh(dev);
1206 dev_uc_sync_multiple(port_dev, dev); 1207 dev_uc_sync_multiple(port_dev, dev);
1207 dev_mc_sync_multiple(port_dev, dev); 1208 dev_mc_sync_multiple(port_dev, dev);
1209 netif_addr_unlock_bh(dev);
1208 1210
1209 err = vlan_vids_add_by_dev(port_dev, dev); 1211 err = vlan_vids_add_by_dev(port_dev, dev);
1210 if (err) { 1212 if (err) {
@@ -2000,6 +2002,8 @@ static const struct net_device_ops team_netdev_ops = {
2000 .ndo_add_slave = team_add_slave, 2002 .ndo_add_slave = team_add_slave,
2001 .ndo_del_slave = team_del_slave, 2003 .ndo_del_slave = team_del_slave,
2002 .ndo_fix_features = team_fix_features, 2004 .ndo_fix_features = team_fix_features,
2005 .ndo_neigh_construct = netdev_default_l2upper_neigh_construct,
2006 .ndo_neigh_destroy = netdev_default_l2upper_neigh_destroy,
2003 .ndo_change_carrier = team_change_carrier, 2007 .ndo_change_carrier = team_change_carrier,
2004 .ndo_bridge_setlink = switchdev_port_bridge_setlink, 2008 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
2005 .ndo_bridge_getlink = switchdev_port_bridge_getlink, 2009 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 4884802e0af1..5eadb7a1ad7b 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -71,6 +71,7 @@
71#include <net/sock.h> 71#include <net/sock.h>
72#include <linux/seq_file.h> 72#include <linux/seq_file.h>
73#include <linux/uio.h> 73#include <linux/uio.h>
74#include <linux/skb_array.h>
74 75
75#include <asm/uaccess.h> 76#include <asm/uaccess.h>
76 77
@@ -167,6 +168,7 @@ struct tun_file {
167 }; 168 };
168 struct list_head next; 169 struct list_head next;
169 struct tun_struct *detached; 170 struct tun_struct *detached;
171 struct skb_array tx_array;
170}; 172};
171 173
172struct tun_flow_entry { 174struct tun_flow_entry {
@@ -515,7 +517,11 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
515 517
516static void tun_queue_purge(struct tun_file *tfile) 518static void tun_queue_purge(struct tun_file *tfile)
517{ 519{
518 skb_queue_purge(&tfile->sk.sk_receive_queue); 520 struct sk_buff *skb;
521
522 while ((skb = skb_array_consume(&tfile->tx_array)) != NULL)
523 kfree_skb(skb);
524
519 skb_queue_purge(&tfile->sk.sk_error_queue); 525 skb_queue_purge(&tfile->sk.sk_error_queue);
520} 526}
521 527
@@ -560,6 +566,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
560 tun->dev->reg_state == NETREG_REGISTERED) 566 tun->dev->reg_state == NETREG_REGISTERED)
561 unregister_netdevice(tun->dev); 567 unregister_netdevice(tun->dev);
562 } 568 }
569 if (tun)
570 skb_array_cleanup(&tfile->tx_array);
563 sock_put(&tfile->sk); 571 sock_put(&tfile->sk);
564 } 572 }
565} 573}
@@ -613,6 +621,7 @@ static void tun_detach_all(struct net_device *dev)
613static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter) 621static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter)
614{ 622{
615 struct tun_file *tfile = file->private_data; 623 struct tun_file *tfile = file->private_data;
624 struct net_device *dev = tun->dev;
616 int err; 625 int err;
617 626
618 err = security_tun_dev_attach(tfile->socket.sk, tun->security); 627 err = security_tun_dev_attach(tfile->socket.sk, tun->security);
@@ -642,6 +651,13 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
642 if (!err) 651 if (!err)
643 goto out; 652 goto out;
644 } 653 }
654
655 if (!tfile->detached &&
656 skb_array_init(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) {
657 err = -ENOMEM;
658 goto out;
659 }
660
645 tfile->queue_index = tun->numqueues; 661 tfile->queue_index = tun->numqueues;
646 tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; 662 tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
647 rcu_assign_pointer(tfile->tun, tun); 663 rcu_assign_pointer(tfile->tun, tun);
@@ -891,8 +907,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
891 907
892 nf_reset(skb); 908 nf_reset(skb);
893 909
894 /* Enqueue packet */ 910 if (skb_array_produce(&tfile->tx_array, skb))
895 skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb); 911 goto drop;
896 912
897 /* Notify and wake up reader process */ 913 /* Notify and wake up reader process */
898 if (tfile->flags & TUN_FASYNC) 914 if (tfile->flags & TUN_FASYNC)
@@ -1107,7 +1123,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
1107 1123
1108 poll_wait(file, sk_sleep(sk), wait); 1124 poll_wait(file, sk_sleep(sk), wait);
1109 1125
1110 if (!skb_queue_empty(&sk->sk_receive_queue)) 1126 if (!skb_array_empty(&tfile->tx_array))
1111 mask |= POLLIN | POLLRDNORM; 1127 mask |= POLLIN | POLLRDNORM;
1112 1128
1113 if (sock_writeable(sk) || 1129 if (sock_writeable(sk) ||
@@ -1426,22 +1442,63 @@ done:
1426 return total; 1442 return total;
1427} 1443}
1428 1444
1445static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
1446 int *err)
1447{
1448 DECLARE_WAITQUEUE(wait, current);
1449 struct sk_buff *skb = NULL;
1450 int error = 0;
1451
1452 skb = skb_array_consume(&tfile->tx_array);
1453 if (skb)
1454 goto out;
1455 if (noblock) {
1456 error = -EAGAIN;
1457 goto out;
1458 }
1459
1460 add_wait_queue(&tfile->wq.wait, &wait);
1461 current->state = TASK_INTERRUPTIBLE;
1462
1463 while (1) {
1464 skb = skb_array_consume(&tfile->tx_array);
1465 if (skb)
1466 break;
1467 if (signal_pending(current)) {
1468 error = -ERESTARTSYS;
1469 break;
1470 }
1471 if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
1472 error = -EFAULT;
1473 break;
1474 }
1475
1476 schedule();
1477 }
1478
1479 current->state = TASK_RUNNING;
1480 remove_wait_queue(&tfile->wq.wait, &wait);
1481
1482out:
1483 *err = error;
1484 return skb;
1485}
1486
1429static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, 1487static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
1430 struct iov_iter *to, 1488 struct iov_iter *to,
1431 int noblock) 1489 int noblock)
1432{ 1490{
1433 struct sk_buff *skb; 1491 struct sk_buff *skb;
1434 ssize_t ret; 1492 ssize_t ret;
1435 int peeked, err, off = 0; 1493 int err;
1436 1494
1437 tun_debug(KERN_INFO, tun, "tun_do_read\n"); 1495 tun_debug(KERN_INFO, tun, "tun_do_read\n");
1438 1496
1439 if (!iov_iter_count(to)) 1497 if (!iov_iter_count(to))
1440 return 0; 1498 return 0;
1441 1499
1442 /* Read frames from queue */ 1500 /* Read frames from ring */
1443 skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0, 1501 skb = tun_ring_recv(tfile, noblock, &err);
1444 &peeked, &off, &err);
1445 if (!skb) 1502 if (!skb)
1446 return err; 1503 return err;
1447 1504
@@ -1574,8 +1631,25 @@ out:
1574 return ret; 1631 return ret;
1575} 1632}
1576 1633
1634static int tun_peek_len(struct socket *sock)
1635{
1636 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
1637 struct tun_struct *tun;
1638 int ret = 0;
1639
1640 tun = __tun_get(tfile);
1641 if (!tun)
1642 return 0;
1643
1644 ret = skb_array_peek_len(&tfile->tx_array);
1645 tun_put(tun);
1646
1647 return ret;
1648}
1649
1577/* Ops structure to mimic raw sockets with tun */ 1650/* Ops structure to mimic raw sockets with tun */
1578static const struct proto_ops tun_socket_ops = { 1651static const struct proto_ops tun_socket_ops = {
1652 .peek_len = tun_peek_len,
1579 .sendmsg = tun_sendmsg, 1653 .sendmsg = tun_sendmsg,
1580 .recvmsg = tun_recvmsg, 1654 .recvmsg = tun_recvmsg,
1581}; 1655};
@@ -2397,6 +2471,53 @@ static const struct ethtool_ops tun_ethtool_ops = {
2397 .get_ts_info = ethtool_op_get_ts_info, 2471 .get_ts_info = ethtool_op_get_ts_info,
2398}; 2472};
2399 2473
2474static int tun_queue_resize(struct tun_struct *tun)
2475{
2476 struct net_device *dev = tun->dev;
2477 struct tun_file *tfile;
2478 struct skb_array **arrays;
2479 int n = tun->numqueues + tun->numdisabled;
2480 int ret, i;
2481
2482 arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL);
2483 if (!arrays)
2484 return -ENOMEM;
2485
2486 for (i = 0; i < tun->numqueues; i++) {
2487 tfile = rtnl_dereference(tun->tfiles[i]);
2488 arrays[i] = &tfile->tx_array;
2489 }
2490 list_for_each_entry(tfile, &tun->disabled, next)
2491 arrays[i++] = &tfile->tx_array;
2492
2493 ret = skb_array_resize_multiple(arrays, n,
2494 dev->tx_queue_len, GFP_KERNEL);
2495
2496 kfree(arrays);
2497 return ret;
2498}
2499
2500static int tun_device_event(struct notifier_block *unused,
2501 unsigned long event, void *ptr)
2502{
2503 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2504 struct tun_struct *tun = netdev_priv(dev);
2505
2506 switch (event) {
2507 case NETDEV_CHANGE_TX_QUEUE_LEN:
2508 if (tun_queue_resize(tun))
2509 return NOTIFY_BAD;
2510 break;
2511 default:
2512 break;
2513 }
2514
2515 return NOTIFY_DONE;
2516}
2517
2518static struct notifier_block tun_notifier_block __read_mostly = {
2519 .notifier_call = tun_device_event,
2520};
2400 2521
2401static int __init tun_init(void) 2522static int __init tun_init(void)
2402{ 2523{
@@ -2416,6 +2537,8 @@ static int __init tun_init(void)
2416 pr_err("Can't register misc device %d\n", TUN_MINOR); 2537 pr_err("Can't register misc device %d\n", TUN_MINOR);
2417 goto err_misc; 2538 goto err_misc;
2418 } 2539 }
2540
2541 register_netdevice_notifier(&tun_notifier_block);
2419 return 0; 2542 return 0;
2420err_misc: 2543err_misc:
2421 rtnl_link_unregister(&tun_link_ops); 2544 rtnl_link_unregister(&tun_link_ops);
@@ -2427,6 +2550,7 @@ static void tun_cleanup(void)
2427{ 2550{
2428 misc_deregister(&tun_miscdev); 2551 misc_deregister(&tun_miscdev);
2429 rtnl_link_unregister(&tun_link_ops); 2552 rtnl_link_unregister(&tun_link_ops);
2553 unregister_netdevice_notifier(&tun_notifier_block);
2430} 2554}
2431 2555
2432/* Get an underlying socket object from tun file. Returns error unless file is 2556/* Get an underlying socket object from tun file. Returns error unless file is
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 53759c315b97..877c9516e781 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -854,6 +854,13 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
854 if (cdc_ncm_init(dev)) 854 if (cdc_ncm_init(dev))
855 goto error2; 855 goto error2;
856 856
857 /* Some firmwares need a pause here or they will silently fail
858 * to set up the interface properly. This value was decided
859 * empirically on a Sierra Wireless MC7455 running 02.08.02.00
860 * firmware.
861 */
862 usleep_range(10000, 20000);
863
857 /* configure data interface */ 864 /* configure data interface */
858 temp = usb_set_interface(dev->udev, iface_no, data_altsetting); 865 temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
859 if (temp) { 866 if (temp) {
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 11178f9dc838..b225bc27fbe2 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -31,7 +31,7 @@
31#define NETNEXT_VERSION "08" 31#define NETNEXT_VERSION "08"
32 32
33/* Information for net */ 33/* Information for net */
34#define NET_VERSION "3" 34#define NET_VERSION "5"
35 35
36#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION 36#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
37#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" 37#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -116,6 +116,7 @@
116#define USB_TX_DMA 0xd434 116#define USB_TX_DMA 0xd434
117#define USB_TOLERANCE 0xd490 117#define USB_TOLERANCE 0xd490
118#define USB_LPM_CTRL 0xd41a 118#define USB_LPM_CTRL 0xd41a
119#define USB_BMU_RESET 0xd4b0
119#define USB_UPS_CTRL 0xd800 120#define USB_UPS_CTRL 0xd800
120#define USB_MISC_0 0xd81a 121#define USB_MISC_0 0xd81a
121#define USB_POWER_CUT 0xd80a 122#define USB_POWER_CUT 0xd80a
@@ -338,6 +339,10 @@
338#define TEST_MODE_DISABLE 0x00000001 339#define TEST_MODE_DISABLE 0x00000001
339#define TX_SIZE_ADJUST1 0x00000100 340#define TX_SIZE_ADJUST1 0x00000100
340 341
342/* USB_BMU_RESET */
343#define BMU_RESET_EP_IN 0x01
344#define BMU_RESET_EP_OUT 0x02
345
341/* USB_UPS_CTRL */ 346/* USB_UPS_CTRL */
342#define POWER_CUT 0x0100 347#define POWER_CUT 0x0100
343 348
@@ -620,6 +625,7 @@ struct r8152 {
620 int (*eee_set)(struct r8152 *, struct ethtool_eee *); 625 int (*eee_set)(struct r8152 *, struct ethtool_eee *);
621 bool (*in_nway)(struct r8152 *); 626 bool (*in_nway)(struct r8152 *);
622 void (*hw_phy_cfg)(struct r8152 *); 627 void (*hw_phy_cfg)(struct r8152 *);
628 void (*autosuspend_en)(struct r8152 *tp, bool enable);
623 } rtl_ops; 629 } rtl_ops;
624 630
625 int intr_interval; 631 int intr_interval;
@@ -2173,7 +2179,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
2173static void r8153_set_rx_early_size(struct r8152 *tp) 2179static void r8153_set_rx_early_size(struct r8152 *tp)
2174{ 2180{
2175 u32 mtu = tp->netdev->mtu; 2181 u32 mtu = tp->netdev->mtu;
2176 u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 4; 2182 u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8;
2177 2183
2178 ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data); 2184 ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data);
2179} 2185}
@@ -2407,9 +2413,6 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
2407 if (enable) { 2413 if (enable) {
2408 u32 ocp_data; 2414 u32 ocp_data;
2409 2415
2410 r8153_u1u2en(tp, false);
2411 r8153_u2p3en(tp, false);
2412
2413 __rtl_set_wol(tp, WAKE_ANY); 2416 __rtl_set_wol(tp, WAKE_ANY);
2414 2417
2415 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); 2418 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
@@ -2420,7 +2423,28 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
2420 2423
2421 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); 2424 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
2422 } else { 2425 } else {
2426 u32 ocp_data;
2427
2423 __rtl_set_wol(tp, tp->saved_wolopts); 2428 __rtl_set_wol(tp, tp->saved_wolopts);
2429
2430 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
2431
2432 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
2433 ocp_data &= ~LINK_OFF_WAKE_EN;
2434 ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
2435
2436 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
2437 }
2438}
2439
2440static void rtl8153_runtime_enable(struct r8152 *tp, bool enable)
2441{
2442 rtl_runtime_suspend_enable(tp, enable);
2443
2444 if (enable) {
2445 r8153_u1u2en(tp, false);
2446 r8153_u2p3en(tp, false);
2447 } else {
2424 r8153_u2p3en(tp, true); 2448 r8153_u2p3en(tp, true);
2425 r8153_u1u2en(tp, true); 2449 r8153_u1u2en(tp, true);
2426 } 2450 }
@@ -2460,6 +2484,17 @@ static void r8153_teredo_off(struct r8152 *tp)
2460 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0); 2484 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
2461} 2485}
2462 2486
2487static void rtl_reset_bmu(struct r8152 *tp)
2488{
2489 u32 ocp_data;
2490
2491 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_BMU_RESET);
2492 ocp_data &= ~(BMU_RESET_EP_IN | BMU_RESET_EP_OUT);
2493 ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
2494 ocp_data |= BMU_RESET_EP_IN | BMU_RESET_EP_OUT;
2495 ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
2496}
2497
2463static void r8152_aldps_en(struct r8152 *tp, bool enable) 2498static void r8152_aldps_en(struct r8152 *tp, bool enable)
2464{ 2499{
2465 if (enable) { 2500 if (enable) {
@@ -2681,6 +2716,7 @@ static void r8153_first_init(struct r8152 *tp)
2681 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); 2716 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
2682 2717
2683 rtl8152_nic_reset(tp); 2718 rtl8152_nic_reset(tp);
2719 rtl_reset_bmu(tp);
2684 2720
2685 ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); 2721 ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
2686 ocp_data &= ~NOW_IS_OOB; 2722 ocp_data &= ~NOW_IS_OOB;
@@ -2742,6 +2778,7 @@ static void r8153_enter_oob(struct r8152 *tp)
2742 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); 2778 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
2743 2779
2744 rtl_disable(tp); 2780 rtl_disable(tp);
2781 rtl_reset_bmu(tp);
2745 2782
2746 for (i = 0; i < 1000; i++) { 2783 for (i = 0; i < 1000; i++) {
2747 ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); 2784 ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
@@ -2803,6 +2840,7 @@ static void rtl8153_disable(struct r8152 *tp)
2803{ 2840{
2804 r8153_aldps_en(tp, false); 2841 r8153_aldps_en(tp, false);
2805 rtl_disable(tp); 2842 rtl_disable(tp);
2843 rtl_reset_bmu(tp);
2806 r8153_aldps_en(tp, true); 2844 r8153_aldps_en(tp, true);
2807 usb_enable_lpm(tp->udev); 2845 usb_enable_lpm(tp->udev);
2808} 2846}
@@ -3400,15 +3438,11 @@ static void r8153_init(struct r8152 *tp)
3400 r8153_power_cut_en(tp, false); 3438 r8153_power_cut_en(tp, false);
3401 r8153_u1u2en(tp, true); 3439 r8153_u1u2en(tp, true);
3402 3440
3403 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ALDPS_SPDWN_RATIO); 3441 /* MAC clock speed down */
3404 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, EEE_SPDWN_RATIO); 3442 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0);
3405 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 3443 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0);
3406 PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN | 3444 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
3407 U1U2_SPDWN_EN | L1_SPDWN_EN); 3445 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
3408 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4,
3409 PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN |
3410 TP100_SPDWN_EN | TP500_SPDWN_EN | TP1000_SPDWN_EN |
3411 EEE_SPDWN_EN);
3412 3446
3413 r8153_enable_eee(tp); 3447 r8153_enable_eee(tp);
3414 r8153_aldps_en(tp, true); 3448 r8153_aldps_en(tp, true);
@@ -3515,7 +3549,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
3515 napi_disable(&tp->napi); 3549 napi_disable(&tp->napi);
3516 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3550 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3517 rtl_stop_rx(tp); 3551 rtl_stop_rx(tp);
3518 rtl_runtime_suspend_enable(tp, true); 3552 tp->rtl_ops.autosuspend_en(tp, true);
3519 } else { 3553 } else {
3520 cancel_delayed_work_sync(&tp->schedule); 3554 cancel_delayed_work_sync(&tp->schedule);
3521 tp->rtl_ops.down(tp); 3555 tp->rtl_ops.down(tp);
@@ -3542,7 +3576,7 @@ static int rtl8152_resume(struct usb_interface *intf)
3542 3576
3543 if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) { 3577 if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
3544 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3578 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3545 rtl_runtime_suspend_enable(tp, false); 3579 tp->rtl_ops.autosuspend_en(tp, false);
3546 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3580 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3547 napi_disable(&tp->napi); 3581 napi_disable(&tp->napi);
3548 set_bit(WORK_ENABLE, &tp->flags); 3582 set_bit(WORK_ENABLE, &tp->flags);
@@ -3557,7 +3591,7 @@ static int rtl8152_resume(struct usb_interface *intf)
3557 usb_submit_urb(tp->intr_urb, GFP_KERNEL); 3591 usb_submit_urb(tp->intr_urb, GFP_KERNEL);
3558 } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3592 } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3559 if (tp->netdev->flags & IFF_UP) 3593 if (tp->netdev->flags & IFF_UP)
3560 rtl_runtime_suspend_enable(tp, false); 3594 tp->rtl_ops.autosuspend_en(tp, false);
3561 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3595 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3562 } 3596 }
3563 3597
@@ -4143,6 +4177,7 @@ static int rtl_ops_init(struct r8152 *tp)
4143 ops->eee_set = r8152_set_eee; 4177 ops->eee_set = r8152_set_eee;
4144 ops->in_nway = rtl8152_in_nway; 4178 ops->in_nway = rtl8152_in_nway;
4145 ops->hw_phy_cfg = r8152b_hw_phy_cfg; 4179 ops->hw_phy_cfg = r8152b_hw_phy_cfg;
4180 ops->autosuspend_en = rtl_runtime_suspend_enable;
4146 break; 4181 break;
4147 4182
4148 case RTL_VER_03: 4183 case RTL_VER_03:
@@ -4159,6 +4194,7 @@ static int rtl_ops_init(struct r8152 *tp)
4159 ops->eee_set = r8153_set_eee; 4194 ops->eee_set = r8153_set_eee;
4160 ops->in_nway = rtl8153_in_nway; 4195 ops->in_nway = rtl8153_in_nway;
4161 ops->hw_phy_cfg = r8153_hw_phy_cfg; 4196 ops->hw_phy_cfg = r8153_hw_phy_cfg;
4197 ops->autosuspend_en = rtl8153_runtime_enable;
4162 break; 4198 break;
4163 4199
4164 default: 4200 default:
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 61ba46404937..6086a0163249 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -395,8 +395,11 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
395 dev->hard_mtu = net->mtu + net->hard_header_len; 395 dev->hard_mtu = net->mtu + net->hard_header_len;
396 if (dev->rx_urb_size == old_hard_mtu) { 396 if (dev->rx_urb_size == old_hard_mtu) {
397 dev->rx_urb_size = dev->hard_mtu; 397 dev->rx_urb_size = dev->hard_mtu;
398 if (dev->rx_urb_size > old_rx_urb_size) 398 if (dev->rx_urb_size > old_rx_urb_size) {
399 usbnet_pause_rx(dev);
399 usbnet_unlink_rx_urbs(dev); 400 usbnet_unlink_rx_urbs(dev);
401 usbnet_resume_rx(dev);
402 }
400 } 403 }
401 404
402 /* max qlen depend on hard_mtu and rx_urb_size */ 405 /* max qlen depend on hard_mtu and rx_urb_size */
@@ -1508,8 +1511,9 @@ static void usbnet_bh (unsigned long param)
1508 } else if (netif_running (dev->net) && 1511 } else if (netif_running (dev->net) &&
1509 netif_device_present (dev->net) && 1512 netif_device_present (dev->net) &&
1510 netif_carrier_ok(dev->net) && 1513 netif_carrier_ok(dev->net) &&
1511 !timer_pending (&dev->delay) && 1514 !timer_pending(&dev->delay) &&
1512 !test_bit (EVENT_RX_HALT, &dev->flags)) { 1515 !test_bit(EVENT_RX_PAUSED, &dev->flags) &&
1516 !test_bit(EVENT_RX_HALT, &dev->flags)) {
1513 int temp = dev->rxq.qlen; 1517 int temp = dev->rxq.qlen;
1514 1518
1515 if (temp < RX_QLEN(dev)) { 1519 if (temp < RX_QLEN(dev)) {
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index b3762822b653..1ce7420322ee 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -779,6 +779,25 @@ static int vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4)
779 return rc; 779 return rc;
780} 780}
781 781
782static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
783{
784 return 0;
785}
786
787static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
788 struct sk_buff *skb,
789 struct net_device *dev)
790{
791 struct net *net = dev_net(dev);
792
793 nf_reset(skb);
794
795 if (NF_HOOK(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) < 0)
796 skb = NULL; /* kfree_skb(skb) handled by nf code */
797
798 return skb;
799}
800
782#if IS_ENABLED(CONFIG_IPV6) 801#if IS_ENABLED(CONFIG_IPV6)
783/* neighbor handling is done with actual device; do not want 802/* neighbor handling is done with actual device; do not want
784 * to flip skb->dev for those ndisc packets. This really fails 803 * to flip skb->dev for those ndisc packets. This really fails
@@ -899,6 +918,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
899 if (need_strict) 918 if (need_strict)
900 vrf_ip6_input_dst(skb, vrf_dev, orig_iif); 919 vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
901 920
921 skb = vrf_rcv_nfhook(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, vrf_dev);
902out: 922out:
903 return skb; 923 return skb;
904} 924}
@@ -929,6 +949,7 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
929 dev_queue_xmit_nit(skb, vrf_dev); 949 dev_queue_xmit_nit(skb, vrf_dev);
930 skb_pull(skb, skb->mac_len); 950 skb_pull(skb, skb->mac_len);
931 951
952 skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev);
932out: 953out:
933 return skb; 954 return skb;
934} 955}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index abb9cd2df9e9..ae7455da1687 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2899,30 +2899,6 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2899 return 0; 2899 return 0;
2900} 2900}
2901 2901
2902struct net_device *vxlan_dev_create(struct net *net, const char *name,
2903 u8 name_assign_type, struct vxlan_config *conf)
2904{
2905 struct nlattr *tb[IFLA_MAX+1];
2906 struct net_device *dev;
2907 int err;
2908
2909 memset(&tb, 0, sizeof(tb));
2910
2911 dev = rtnl_create_link(net, name, name_assign_type,
2912 &vxlan_link_ops, tb);
2913 if (IS_ERR(dev))
2914 return dev;
2915
2916 err = vxlan_dev_configure(net, dev, conf);
2917 if (err < 0) {
2918 free_netdev(dev);
2919 return ERR_PTR(err);
2920 }
2921
2922 return dev;
2923}
2924EXPORT_SYMBOL_GPL(vxlan_dev_create);
2925
2926static int vxlan_newlink(struct net *src_net, struct net_device *dev, 2902static int vxlan_newlink(struct net *src_net, struct net_device *dev,
2927 struct nlattr *tb[], struct nlattr *data[]) 2903 struct nlattr *tb[], struct nlattr *data[])
2928{ 2904{
@@ -3215,6 +3191,40 @@ static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
3215 .get_link_net = vxlan_get_link_net, 3191 .get_link_net = vxlan_get_link_net,
3216}; 3192};
3217 3193
3194struct net_device *vxlan_dev_create(struct net *net, const char *name,
3195 u8 name_assign_type,
3196 struct vxlan_config *conf)
3197{
3198 struct nlattr *tb[IFLA_MAX + 1];
3199 struct net_device *dev;
3200 int err;
3201
3202 memset(&tb, 0, sizeof(tb));
3203
3204 dev = rtnl_create_link(net, name, name_assign_type,
3205 &vxlan_link_ops, tb);
3206 if (IS_ERR(dev))
3207 return dev;
3208
3209 err = vxlan_dev_configure(net, dev, conf);
3210 if (err < 0) {
3211 free_netdev(dev);
3212 return ERR_PTR(err);
3213 }
3214
3215 err = rtnl_configure_link(dev, NULL);
3216 if (err < 0) {
3217 LIST_HEAD(list_kill);
3218
3219 vxlan_dellink(dev, &list_kill);
3220 unregister_netdevice_many(&list_kill);
3221 return ERR_PTR(err);
3222 }
3223
3224 return dev;
3225}
3226EXPORT_SYMBOL_GPL(vxlan_dev_create);
3227
3218static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn, 3228static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
3219 struct net_device *dev) 3229 struct net_device *dev)
3220{ 3230{
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 9e314b791150..33ab3345d333 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -291,6 +291,17 @@ config FSL_UCC_HDLC
291 To compile this driver as a module, choose M here: the 291 To compile this driver as a module, choose M here: the
292 module will be called fsl_ucc_hdlc. 292 module will be called fsl_ucc_hdlc.
293 293
294config SLIC_DS26522
295 tristate "Slic Maxim ds26522 card support"
296 depends on SPI
297 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
298 help
299 This module initializes and configures the slic maxim card
300 in T1 or E1 mode.
301
302 To compile this driver as a module, choose M here: the
303 module will be called slic_ds26522.
304
294config DSCC4_PCISYNC 305config DSCC4_PCISYNC
295 bool "Etinc PCISYNC features" 306 bool "Etinc PCISYNC features"
296 depends on DSCC4 307 depends on DSCC4
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 25fec40d4353..73c2326603fc 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_PCI200SYN) += pci200syn.o
33obj-$(CONFIG_PC300TOO) += pc300too.o 33obj-$(CONFIG_PC300TOO) += pc300too.o
34obj-$(CONFIG_IXP4XX_HSS) += ixp4xx_hss.o 34obj-$(CONFIG_IXP4XX_HSS) += ixp4xx_hss.o
35obj-$(CONFIG_FSL_UCC_HDLC) += fsl_ucc_hdlc.o 35obj-$(CONFIG_FSL_UCC_HDLC) += fsl_ucc_hdlc.o
36obj-$(CONFIG_SLIC_DS26522) += slic_ds26522.o
36 37
37clean-files := wanxlfw.inc 38clean-files := wanxlfw.inc
38$(obj)/wanxl.o: $(obj)/wanxlfw.inc 39$(obj)/wanxl.o: $(obj)/wanxlfw.inc
diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c
new file mode 100644
index 000000000000..d06a887a2352
--- /dev/null
+++ b/drivers/net/wan/slic_ds26522.c
@@ -0,0 +1,255 @@
1/*
2 * drivers/net/wan/slic_ds26522.c
3 *
4 * Copyright (C) 2016 Freescale Semiconductor, Inc.
5 *
6 * Author:Zhao Qiang<qiang.zhao@nxp.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/bitrev.h>
15#include <linux/module.h>
16#include <linux/device.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/kthread.h>
20#include <linux/spi/spi.h>
21#include <linux/wait.h>
22#include <linux/param.h>
23#include <linux/delay.h>
24#include <linux/of.h>
25#include <linux/of_address.h>
26#include <linux/io.h>
27#include "slic_ds26522.h"
28
29#define DRV_NAME "ds26522"
30
31#define SLIC_TRANS_LEN 1
32#define SLIC_TWO_LEN 2
33#define SLIC_THREE_LEN 3
34
35static struct spi_device *g_spi;
36
37MODULE_LICENSE("GPL");
38MODULE_AUTHOR("Zhao Qiang<B45475@freescale.com>");
39
40/* the read/write format of address is
41 * w/r|A13|A12|A11|A10|A9|A8|A7|A6|A5|A4|A3|A2|A1|A0|x
42 */
43static void slic_write(struct spi_device *spi, u16 addr,
44 u8 data)
45{
46 u8 temp[3];
47
48 addr = bitrev16(addr) >> 1;
49 data = bitrev8(data);
50 temp[0] = (u8)((addr >> 8) & 0x7f);
51 temp[1] = (u8)(addr & 0xfe);
52 temp[2] = data;
53
54 /* write spi addr and value */
55 spi_write(spi, &temp[0], SLIC_THREE_LEN);
56}
57
58static u8 slic_read(struct spi_device *spi, u16 addr)
59{
60 u8 temp[2];
61 u8 data;
62
63 addr = bitrev16(addr) >> 1;
64 temp[0] = (u8)(((addr >> 8) & 0x7f) | 0x80);
65 temp[1] = (u8)(addr & 0xfe);
66
67 spi_write_then_read(spi, &temp[0], SLIC_TWO_LEN, &data,
68 SLIC_TRANS_LEN);
69
70 data = bitrev8(data);
71 return data;
72}
73
74static bool get_slic_product_code(struct spi_device *spi)
75{
76 u8 device_id;
77
78 device_id = slic_read(spi, DS26522_IDR_ADDR);
79 if ((device_id & 0xf8) == 0x68)
80 return true;
81 else
82 return false;
83}
84
85static void ds26522_e1_spec_config(struct spi_device *spi)
86{
87 /* Receive E1 Mode, Framer Disabled */
88 slic_write(spi, DS26522_RMMR_ADDR, DS26522_RMMR_E1);
89
90 /* Transmit E1 Mode, Framer Disable */
91 slic_write(spi, DS26522_TMMR_ADDR, DS26522_TMMR_E1);
92
93 /* Receive E1 Mode Framer Enable */
94 slic_write(spi, DS26522_RMMR_ADDR,
95 slic_read(spi, DS26522_RMMR_ADDR) | DS26522_RMMR_FRM_EN);
96
97 /* Transmit E1 Mode Framer Enable */
98 slic_write(spi, DS26522_TMMR_ADDR,
99 slic_read(spi, DS26522_TMMR_ADDR) | DS26522_TMMR_FRM_EN);
100
101 /* RCR1, receive E1 B8zs & ESF */
102 slic_write(spi, DS26522_RCR1_ADDR,
103 DS26522_RCR1_E1_HDB3 | DS26522_RCR1_E1_CCS);
104
105 /* RSYSCLK=2.048MHz, RSYNC-Output */
106 slic_write(spi, DS26522_RIOCR_ADDR,
107 DS26522_RIOCR_2048KHZ | DS26522_RIOCR_RSIO_OUT);
108
109 /* TCR1 Transmit E1 b8zs */
110 slic_write(spi, DS26522_TCR1_ADDR, DS26522_TCR1_TB8ZS);
111
112 /* TSYSCLK=2.048MHz, TSYNC-Output */
113 slic_write(spi, DS26522_TIOCR_ADDR,
114 DS26522_TIOCR_2048KHZ | DS26522_TIOCR_TSIO_OUT);
115
116 /* Set E1TAF */
117 slic_write(spi, DS26522_E1TAF_ADDR, DS26522_E1TAF_DEFAULT);
118
119 /* Set E1TNAF register */
120 slic_write(spi, DS26522_E1TNAF_ADDR, DS26522_E1TNAF_DEFAULT);
121
122 /* Receive E1 Mode Framer Enable & init Done */
123 slic_write(spi, DS26522_RMMR_ADDR, slic_read(spi, DS26522_RMMR_ADDR) |
124 DS26522_RMMR_INIT_DONE);
125
126 /* Transmit E1 Mode Framer Enable & init Done */
127 slic_write(spi, DS26522_TMMR_ADDR, slic_read(spi, DS26522_TMMR_ADDR) |
128 DS26522_TMMR_INIT_DONE);
129
130 /* Configure LIU E1 mode */
131 slic_write(spi, DS26522_LTRCR_ADDR, DS26522_LTRCR_E1);
132
133 /* E1 Mode default 75 ohm w/Transmit Impedance Matlinking */
134 slic_write(spi, DS26522_LTITSR_ADDR,
135 DS26522_LTITSR_TLIS_75OHM | DS26522_LTITSR_LBOS_75OHM);
136
137 /* E1 Mode default 75 ohm Long Haul w/Receive Impedance Matlinking */
138 slic_write(spi, DS26522_LRISMR_ADDR,
139 DS26522_LRISMR_75OHM | DS26522_LRISMR_MAX);
140
141 /* Enable Transmit output */
142 slic_write(spi, DS26522_LMCR_ADDR, DS26522_LMCR_TE);
143}
144
145static int slic_ds26522_init_configure(struct spi_device *spi)
146{
147 u16 addr;
148
149 /* set clock */
150 slic_write(spi, DS26522_GTCCR_ADDR, DS26522_GTCCR_BPREFSEL_REFCLKIN |
151 DS26522_GTCCR_BFREQSEL_2048KHZ |
152 DS26522_GTCCR_FREQSEL_2048KHZ);
153 slic_write(spi, DS26522_GTCR2_ADDR, DS26522_GTCR2_TSSYNCOUT);
154 slic_write(spi, DS26522_GFCR_ADDR, DS26522_GFCR_BPCLK_2048KHZ);
155
156 /* set gtcr */
157 slic_write(spi, DS26522_GTCR1_ADDR, DS26522_GTCR1);
158
159 /* Global LIU Software Reset Register */
160 slic_write(spi, DS26522_GLSRR_ADDR, DS26522_GLSRR_RESET);
161
162 /* Global Framer and BERT Software Reset Register */
163 slic_write(spi, DS26522_GFSRR_ADDR, DS26522_GFSRR_RESET);
164
165 usleep_range(100, 120);
166
167 slic_write(spi, DS26522_GLSRR_ADDR, DS26522_GLSRR_NORMAL);
168 slic_write(spi, DS26522_GFSRR_ADDR, DS26522_GFSRR_NORMAL);
169
170 /* Perform RX/TX SRESET,Reset receiver */
171 slic_write(spi, DS26522_RMMR_ADDR, DS26522_RMMR_SFTRST);
172
173 /* Reset tranceiver */
174 slic_write(spi, DS26522_TMMR_ADDR, DS26522_TMMR_SFTRST);
175
176 usleep_range(100, 120);
177
178 /* Zero all Framer Registers */
179 for (addr = DS26522_RF_ADDR_START; addr <= DS26522_RF_ADDR_END;
180 addr++)
181 slic_write(spi, addr, 0);
182
183 for (addr = DS26522_TF_ADDR_START; addr <= DS26522_TF_ADDR_END;
184 addr++)
185 slic_write(spi, addr, 0);
186
187 for (addr = DS26522_LIU_ADDR_START; addr <= DS26522_LIU_ADDR_END;
188 addr++)
189 slic_write(spi, addr, 0);
190
191 for (addr = DS26522_BERT_ADDR_START; addr <= DS26522_BERT_ADDR_END;
192 addr++)
193 slic_write(spi, addr, 0);
194
195 /* setup ds26522 for E1 specification */
196 ds26522_e1_spec_config(spi);
197
198 slic_write(spi, DS26522_GTCR1_ADDR, 0x00);
199
200 return 0;
201}
202
203static int slic_ds26522_remove(struct spi_device *spi)
204{
205 pr_info("DS26522 module uninstalled\n");
206 return 0;
207}
208
209static int slic_ds26522_probe(struct spi_device *spi)
210{
211 int ret = 0;
212
213 g_spi = spi;
214 spi->bits_per_word = 8;
215
216 if (!get_slic_product_code(spi))
217 return ret;
218
219 ret = slic_ds26522_init_configure(spi);
220 if (ret == 0)
221 pr_info("DS26522 cs%d configurated\n", spi->chip_select);
222
223 return ret;
224}
225
226static const struct of_device_id slic_ds26522_match[] = {
227 {
228 .compatible = "maxim,ds26522",
229 },
230 {},
231};
232
233static struct spi_driver slic_ds26522_driver = {
234 .driver = {
235 .name = "ds26522",
236 .bus = &spi_bus_type,
237 .owner = THIS_MODULE,
238 .of_match_table = slic_ds26522_match,
239 },
240 .probe = slic_ds26522_probe,
241 .remove = slic_ds26522_remove,
242};
243
244static int __init slic_ds26522_init(void)
245{
246 return spi_register_driver(&slic_ds26522_driver);
247}
248
249static void __exit slic_ds26522_exit(void)
250{
251 spi_unregister_driver(&slic_ds26522_driver);
252}
253
254module_init(slic_ds26522_init);
255module_exit(slic_ds26522_exit);
diff --git a/drivers/net/wan/slic_ds26522.h b/drivers/net/wan/slic_ds26522.h
new file mode 100644
index 000000000000..22aa0ecbd9fd
--- /dev/null
+++ b/drivers/net/wan/slic_ds26522.h
@@ -0,0 +1,134 @@
1/*
2 * drivers/tdm/line_ctrl/slic_ds26522.h
3 *
4 * Copyright 2016 Freescale Semiconductor, Inc.
5 *
6 * Author: Zhao Qiang <B45475@freescale.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#define DS26522_RF_ADDR_START 0x00
15#define DS26522_RF_ADDR_END 0xef
16#define DS26522_GLB_ADDR_START 0xf0
17#define DS26522_GLB_ADDR_END 0xff
18#define DS26522_TF_ADDR_START 0x100
19#define DS26522_TF_ADDR_END 0x1ef
20#define DS26522_LIU_ADDR_START 0x1000
21#define DS26522_LIU_ADDR_END 0x101f
22#define DS26522_TEST_ADDR_START 0x1008
23#define DS26522_TEST_ADDR_END 0x101f
24#define DS26522_BERT_ADDR_START 0x1100
25#define DS26522_BERT_ADDR_END 0x110f
26
27#define DS26522_RMMR_ADDR 0x80
28#define DS26522_RCR1_ADDR 0x81
29#define DS26522_RCR3_ADDR 0x83
30#define DS26522_RIOCR_ADDR 0x84
31
32#define DS26522_GTCR1_ADDR 0xf0
33#define DS26522_GFCR_ADDR 0xf1
34#define DS26522_GTCR2_ADDR 0xf2
35#define DS26522_GTCCR_ADDR 0xf3
36#define DS26522_GLSRR_ADDR 0xf5
37#define DS26522_GFSRR_ADDR 0xf6
38#define DS26522_IDR_ADDR 0xf8
39
40#define DS26522_E1TAF_ADDR 0x164
41#define DS26522_E1TNAF_ADDR 0x165
42#define DS26522_TMMR_ADDR 0x180
43#define DS26522_TCR1_ADDR 0x181
44#define DS26522_TIOCR_ADDR 0x184
45
46#define DS26522_LTRCR_ADDR 0x1000
47#define DS26522_LTITSR_ADDR 0x1001
48#define DS26522_LMCR_ADDR 0x1002
49#define DS26522_LRISMR_ADDR 0x1007
50
51#define MAX_NUM_OF_CHANNELS 8
52#define PQ_MDS_8E1T1_BRD_REV 0x00
53#define PQ_MDS_8E1T1_PLD_REV 0x00
54
55#define DS26522_GTCCR_BPREFSEL_REFCLKIN 0xa0
56#define DS26522_GTCCR_BFREQSEL_1544KHZ 0x08
57#define DS26522_GTCCR_FREQSEL_1544KHZ 0x04
58#define DS26522_GTCCR_BFREQSEL_2048KHZ 0x00
59#define DS26522_GTCCR_FREQSEL_2048KHZ 0x00
60
61#define DS26522_GFCR_BPCLK_2048KHZ 0x00
62
63#define DS26522_GTCR2_TSSYNCOUT 0x02
64#define DS26522_GTCR1 0x00
65
66#define DS26522_GFSRR_RESET 0x01
67#define DS26522_GFSRR_NORMAL 0x00
68
69#define DS26522_GLSRR_RESET 0x01
70#define DS26522_GLSRR_NORMAL 0x00
71
72#define DS26522_RMMR_SFTRST 0x02
73#define DS26522_RMMR_FRM_EN 0x80
74#define DS26522_RMMR_INIT_DONE 0x40
75#define DS26522_RMMR_T1 0x00
76#define DS26522_RMMR_E1 0x01
77
78#define DS26522_E1TAF_DEFAULT 0x1b
79#define DS26522_E1TNAF_DEFAULT 0x40
80
81#define DS26522_TMMR_SFTRST 0x02
82#define DS26522_TMMR_FRM_EN 0x80
83#define DS26522_TMMR_INIT_DONE 0x40
84#define DS26522_TMMR_T1 0x00
85#define DS26522_TMMR_E1 0x01
86
87#define DS26522_RCR1_T1_SYNCT 0x80
88#define DS26522_RCR1_T1_RB8ZS 0x40
89#define DS26522_RCR1_T1_SYNCC 0x08
90
91#define DS26522_RCR1_E1_HDB3 0x40
92#define DS26522_RCR1_E1_CCS 0x20
93
94#define DS26522_RIOCR_1544KHZ 0x00
95#define DS26522_RIOCR_2048KHZ 0x10
96#define DS26522_RIOCR_RSIO_OUT 0x00
97
98#define DS26522_RCR3_FLB 0x01
99
100#define DS26522_TIOCR_1544KHZ 0x00
101#define DS26522_TIOCR_2048KHZ 0x10
102#define DS26522_TIOCR_TSIO_OUT 0x04
103
104#define DS26522_TCR1_TB8ZS 0x04
105
106#define DS26522_LTRCR_T1 0x02
107#define DS26522_LTRCR_E1 0x00
108
109#define DS26522_LTITSR_TLIS_75OHM 0x00
110#define DS26522_LTITSR_LBOS_75OHM 0x00
111#define DS26522_LTITSR_TLIS_100OHM 0x10
112#define DS26522_LTITSR_TLIS_0DB_CSU 0x00
113
114#define DS26522_LRISMR_75OHM 0x00
115#define DS26522_LRISMR_100OHM 0x10
116#define DS26522_LRISMR_MAX 0x03
117
118#define DS26522_LMCR_TE 0x01
119
120enum line_rate {
121 LINE_RATE_T1, /* T1 line rate (1.544 Mbps) */
122 LINE_RATE_E1 /* E1 line rate (2.048 Mbps) */
123};
124
125enum tdm_trans_mode {
126 NORMAL = 0,
127 FRAMER_LB
128};
129
130enum card_support_type {
131 LM_CARD = 0,
132 DS26522_CARD,
133 NO_CARD
134};
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index c6291c20f7ec..dfb3db0ee5d1 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -1158,7 +1158,7 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
1158 } 1158 }
1159 1159
1160 ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "", 1160 ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "",
1161 ar->running_fw->fw_file.fw_features, 1161 fw_file->fw_features,
1162 sizeof(fw_file->fw_features)); 1162 sizeof(fw_file->fw_features));
1163 break; 1163 break;
1164 case ATH10K_FW_IE_FW_IMAGE: 1164 case ATH10K_FW_IE_FW_IMAGE:
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 3b35c7ab5680..80e645302b54 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -1905,7 +1905,6 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
1905 return; 1905 return;
1906 } 1906 }
1907 } 1907 }
1908 ath10k_htt_rx_msdu_buff_replenish(htt);
1909} 1908}
1910 1909
1911static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar, 1910static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 3a170b1bf99b..ebc12c521fe0 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -708,10 +708,10 @@ static int ath10k_peer_create(struct ath10k *ar,
708 708
709 peer = ath10k_peer_find(ar, vdev_id, addr); 709 peer = ath10k_peer_find(ar, vdev_id, addr);
710 if (!peer) { 710 if (!peer) {
711 spin_unlock_bh(&ar->data_lock);
711 ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n", 712 ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
712 addr, vdev_id); 713 addr, vdev_id);
713 ath10k_wmi_peer_delete(ar, vdev_id, addr); 714 ath10k_wmi_peer_delete(ar, vdev_id, addr);
714 spin_unlock_bh(&ar->data_lock);
715 return -ENOENT; 715 return -ENOENT;
716 } 716 }
717 717
@@ -3858,12 +3858,16 @@ void __ath10k_scan_finish(struct ath10k *ar)
3858 break; 3858 break;
3859 case ATH10K_SCAN_RUNNING: 3859 case ATH10K_SCAN_RUNNING:
3860 case ATH10K_SCAN_ABORTING: 3860 case ATH10K_SCAN_ABORTING:
3861 if (!ar->scan.is_roc) 3861 if (!ar->scan.is_roc) {
3862 ieee80211_scan_completed(ar->hw, 3862 struct cfg80211_scan_info info = {
3863 (ar->scan.state == 3863 .aborted = (ar->scan.state ==
3864 ATH10K_SCAN_ABORTING)); 3864 ATH10K_SCAN_ABORTING),
3865 else if (ar->scan.roc_notify) 3865 };
3866
3867 ieee80211_scan_completed(ar->hw, &info);
3868 } else if (ar->scan.roc_notify) {
3866 ieee80211_remain_on_channel_expired(ar->hw); 3869 ieee80211_remain_on_channel_expired(ar->hw);
3870 }
3867 /* fall through */ 3871 /* fall through */
3868 case ATH10K_SCAN_STARTING: 3872 case ATH10K_SCAN_STARTING:
3869 ar->scan.state = ATH10K_SCAN_IDLE; 3873 ar->scan.state = ATH10K_SCAN_IDLE;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 4e11ba06f089..ef5b40ef6d67 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -859,7 +859,11 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
859 struct ath6kl *ar = vif->ar; 859 struct ath6kl *ar = vif->ar;
860 860
861 if (vif->scan_req) { 861 if (vif->scan_req) {
862 cfg80211_scan_done(vif->scan_req, true); 862 struct cfg80211_scan_info info = {
863 .aborted = true,
864 };
865
866 cfg80211_scan_done(vif->scan_req, &info);
863 vif->scan_req = NULL; 867 vif->scan_req = NULL;
864 } 868 }
865 869
@@ -1069,6 +1073,9 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy,
1069void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted) 1073void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted)
1070{ 1074{
1071 struct ath6kl *ar = vif->ar; 1075 struct ath6kl *ar = vif->ar;
1076 struct cfg80211_scan_info info = {
1077 .aborted = aborted,
1078 };
1072 int i; 1079 int i;
1073 1080
1074 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status%s\n", __func__, 1081 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status%s\n", __func__,
@@ -1089,7 +1096,7 @@ void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted)
1089 } 1096 }
1090 1097
1091out: 1098out:
1092 cfg80211_scan_done(vif->scan_req, aborted); 1099 cfg80211_scan_done(vif->scan_req, &info);
1093 vif->scan_req = NULL; 1100 vif->scan_req = NULL;
1094} 1101}
1095 1102
@@ -3614,7 +3621,11 @@ void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready)
3614 } 3621 }
3615 3622
3616 if (vif->scan_req) { 3623 if (vif->scan_req) {
3617 cfg80211_scan_done(vif->scan_req, true); 3624 struct cfg80211_scan_info info = {
3625 .aborted = true,
3626 };
3627
3628 cfg80211_scan_done(vif->scan_req, &info);
3618 vif->scan_req = NULL; 3629 vif->scan_req = NULL;
3619 } 3630 }
3620 3631
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index e56bafcf5864..57e26a640477 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -960,6 +960,9 @@ void ath_roc_complete(struct ath_softc *sc, enum ath_roc_complete_reason reason)
960void ath_scan_complete(struct ath_softc *sc, bool abort) 960void ath_scan_complete(struct ath_softc *sc, bool abort)
961{ 961{
962 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 962 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
963 struct cfg80211_scan_info info = {
964 .aborted = abort,
965 };
963 966
964 if (abort) 967 if (abort)
965 ath_dbg(common, CHAN_CTX, "HW scan aborted\n"); 968 ath_dbg(common, CHAN_CTX, "HW scan aborted\n");
@@ -969,7 +972,7 @@ void ath_scan_complete(struct ath_softc *sc, bool abort)
969 sc->offchannel.scan_req = NULL; 972 sc->offchannel.scan_req = NULL;
970 sc->offchannel.scan_vif = NULL; 973 sc->offchannel.scan_vif = NULL;
971 sc->offchannel.state = ATH_OFFCHANNEL_IDLE; 974 sc->offchannel.state = ATH_OFFCHANNEL_IDLE;
972 ieee80211_scan_completed(sc->hw, abort); 975 ieee80211_scan_completed(sc->hw, &info);
973 clear_bit(ATH_OP_SCANNING, &common->op_flags); 976 clear_bit(ATH_OP_SCANNING, &common->op_flags);
974 spin_lock_bh(&sc->chan_lock); 977 spin_lock_bh(&sc->chan_lock);
975 if (test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags)) 978 if (test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags))
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 9272ca90632b..80ff69f99229 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -1122,12 +1122,12 @@ enum {
1122#define AR9300_NUM_GPIO 16 1122#define AR9300_NUM_GPIO 16
1123#define AR9330_NUM_GPIO 16 1123#define AR9330_NUM_GPIO 16
1124#define AR9340_NUM_GPIO 23 1124#define AR9340_NUM_GPIO 23
1125#define AR9462_NUM_GPIO 10 1125#define AR9462_NUM_GPIO 14
1126#define AR9485_NUM_GPIO 12 1126#define AR9485_NUM_GPIO 12
1127#define AR9531_NUM_GPIO 18 1127#define AR9531_NUM_GPIO 18
1128#define AR9550_NUM_GPIO 24 1128#define AR9550_NUM_GPIO 24
1129#define AR9561_NUM_GPIO 23 1129#define AR9561_NUM_GPIO 23
1130#define AR9565_NUM_GPIO 12 1130#define AR9565_NUM_GPIO 14
1131#define AR9580_NUM_GPIO 16 1131#define AR9580_NUM_GPIO 16
1132#define AR7010_NUM_GPIO 16 1132#define AR7010_NUM_GPIO 16
1133 1133
@@ -1139,12 +1139,12 @@ enum {
1139#define AR9300_GPIO_MASK 0x0000F4FF 1139#define AR9300_GPIO_MASK 0x0000F4FF
1140#define AR9330_GPIO_MASK 0x0000F4FF 1140#define AR9330_GPIO_MASK 0x0000F4FF
1141#define AR9340_GPIO_MASK 0x0000000F 1141#define AR9340_GPIO_MASK 0x0000000F
1142#define AR9462_GPIO_MASK 0x000003FF 1142#define AR9462_GPIO_MASK 0x00003FFF
1143#define AR9485_GPIO_MASK 0x00000FFF 1143#define AR9485_GPIO_MASK 0x00000FFF
1144#define AR9531_GPIO_MASK 0x0000000F 1144#define AR9531_GPIO_MASK 0x0000000F
1145#define AR9550_GPIO_MASK 0x0000000F 1145#define AR9550_GPIO_MASK 0x0000000F
1146#define AR9561_GPIO_MASK 0x0000000F 1146#define AR9561_GPIO_MASK 0x0000000F
1147#define AR9565_GPIO_MASK 0x00000FFF 1147#define AR9565_GPIO_MASK 0x00003FFF
1148#define AR9580_GPIO_MASK 0x0000F4FF 1148#define AR9580_GPIO_MASK 0x0000F4FF
1149#define AR7010_GPIO_MASK 0x0000FFFF 1149#define AR7010_GPIO_MASK 0x0000FFFF
1150 1150
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 62bf9331bd7f..f0e1175fb76a 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1369,7 +1369,11 @@ static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy,
1369 mutex_lock(&wil->mutex); 1369 mutex_lock(&wil->mutex);
1370 started = wil_p2p_stop_discovery(wil); 1370 started = wil_p2p_stop_discovery(wil);
1371 if (started && wil->scan_request) { 1371 if (started && wil->scan_request) {
1372 cfg80211_scan_done(wil->scan_request, 1); 1372 struct cfg80211_scan_info info = {
1373 .aborted = true,
1374 };
1375
1376 cfg80211_scan_done(wil->scan_request, &info);
1373 wil->scan_request = NULL; 1377 wil->scan_request = NULL;
1374 wil->radio_wdev = wil->wdev; 1378 wil->radio_wdev = wil->wdev;
1375 } 1379 }
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 8e31d755bbee..4bc92e54984a 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -850,10 +850,14 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
850 mutex_unlock(&wil->wmi_mutex); 850 mutex_unlock(&wil->wmi_mutex);
851 851
852 if (wil->scan_request) { 852 if (wil->scan_request) {
853 struct cfg80211_scan_info info = {
854 .aborted = true,
855 };
856
853 wil_dbg_misc(wil, "Abort scan_request 0x%p\n", 857 wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
854 wil->scan_request); 858 wil->scan_request);
855 del_timer_sync(&wil->scan_timer); 859 del_timer_sync(&wil->scan_timer);
856 cfg80211_scan_done(wil->scan_request, true); 860 cfg80211_scan_done(wil->scan_request, &info);
857 wil->scan_request = NULL; 861 wil->scan_request = NULL;
858 } 862 }
859 863
@@ -1049,10 +1053,14 @@ int __wil_down(struct wil6210_priv *wil)
1049 (void)wil_p2p_stop_discovery(wil); 1053 (void)wil_p2p_stop_discovery(wil);
1050 1054
1051 if (wil->scan_request) { 1055 if (wil->scan_request) {
1056 struct cfg80211_scan_info info = {
1057 .aborted = true,
1058 };
1059
1052 wil_dbg_misc(wil, "Abort scan_request 0x%p\n", 1060 wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
1053 wil->scan_request); 1061 wil->scan_request);
1054 del_timer_sync(&wil->scan_timer); 1062 del_timer_sync(&wil->scan_timer);
1055 cfg80211_scan_done(wil->scan_request, true); 1063 cfg80211_scan_done(wil->scan_request, &info);
1056 wil->scan_request = NULL; 1064 wil->scan_request = NULL;
1057 } 1065 }
1058 1066
diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c
index 213b8259638c..e0f8aa0ebfac 100644
--- a/drivers/net/wireless/ath/wil6210/p2p.c
+++ b/drivers/net/wireless/ath/wil6210/p2p.c
@@ -252,8 +252,12 @@ void wil_p2p_search_expired(struct work_struct *work)
252 mutex_unlock(&wil->mutex); 252 mutex_unlock(&wil->mutex);
253 253
254 if (started) { 254 if (started) {
255 struct cfg80211_scan_info info = {
256 .aborted = false,
257 };
258
255 mutex_lock(&wil->p2p_wdev_mutex); 259 mutex_lock(&wil->p2p_wdev_mutex);
256 cfg80211_scan_done(wil->scan_request, 0); 260 cfg80211_scan_done(wil->scan_request, &info);
257 wil->scan_request = NULL; 261 wil->scan_request = NULL;
258 wil->radio_wdev = wil->wdev; 262 wil->radio_wdev = wil->wdev;
259 mutex_unlock(&wil->p2p_wdev_mutex); 263 mutex_unlock(&wil->p2p_wdev_mutex);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index b80c5d850e1e..4d92541913c0 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -426,15 +426,17 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
426{ 426{
427 if (wil->scan_request) { 427 if (wil->scan_request) {
428 struct wmi_scan_complete_event *data = d; 428 struct wmi_scan_complete_event *data = d;
429 bool aborted = (data->status != WMI_SCAN_SUCCESS); 429 struct cfg80211_scan_info info = {
430 .aborted = (data->status != WMI_SCAN_SUCCESS),
431 };
430 432
431 wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status); 433 wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
432 wil_dbg_misc(wil, "Complete scan_request 0x%p aborted %d\n", 434 wil_dbg_misc(wil, "Complete scan_request 0x%p aborted %d\n",
433 wil->scan_request, aborted); 435 wil->scan_request, info.aborted);
434 436
435 del_timer_sync(&wil->scan_timer); 437 del_timer_sync(&wil->scan_timer);
436 mutex_lock(&wil->p2p_wdev_mutex); 438 mutex_lock(&wil->p2p_wdev_mutex);
437 cfg80211_scan_done(wil->scan_request, aborted); 439 cfg80211_scan_done(wil->scan_request, &info);
438 wil->radio_wdev = wil->wdev; 440 wil->radio_wdev = wil->wdev;
439 mutex_unlock(&wil->p2p_wdev_mutex); 441 mutex_unlock(&wil->p2p_wdev_mutex);
440 wil->scan_request = NULL; 442 wil->scan_request = NULL;
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index 7c108047fb46..0e180677c7fc 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -1922,6 +1922,9 @@ static void at76_dwork_hw_scan(struct work_struct *work)
1922{ 1922{
1923 struct at76_priv *priv = container_of(work, struct at76_priv, 1923 struct at76_priv *priv = container_of(work, struct at76_priv,
1924 dwork_hw_scan.work); 1924 dwork_hw_scan.work);
1925 struct cfg80211_scan_info info = {
1926 .aborted = false,
1927 };
1925 int ret; 1928 int ret;
1926 1929
1927 if (priv->device_unplugged) 1930 if (priv->device_unplugged)
@@ -1948,7 +1951,7 @@ static void at76_dwork_hw_scan(struct work_struct *work)
1948 1951
1949 mutex_unlock(&priv->mtx); 1952 mutex_unlock(&priv->mtx);
1950 1953
1951 ieee80211_scan_completed(priv->hw, false); 1954 ieee80211_scan_completed(priv->hw, &info);
1952 1955
1953 ieee80211_wake_queues(priv->hw); 1956 ieee80211_wake_queues(priv->hw);
1954} 1957}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 264bd638a3d9..afe2b202040a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -775,9 +775,13 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
775 if (!aborted) 775 if (!aborted)
776 cfg80211_sched_scan_results(cfg_to_wiphy(cfg)); 776 cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
777 } else if (scan_request) { 777 } else if (scan_request) {
778 struct cfg80211_scan_info info = {
779 .aborted = aborted,
780 };
781
778 brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n", 782 brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n",
779 aborted ? "Aborted" : "Done"); 783 aborted ? "Aborted" : "Done");
780 cfg80211_scan_done(scan_request, aborted); 784 cfg80211_scan_done(scan_request, &info);
781 } 785 }
782 if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) 786 if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
783 brcmf_dbg(SCAN, "Scan complete, probably P2P scan\n"); 787 brcmf_dbg(SCAN, "Scan complete, probably P2P scan\n");
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index eb24b9241bb2..140b6ea8f7cc 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -1305,10 +1305,14 @@ il_send_scan_abort(struct il_priv *il)
1305static void 1305static void
1306il_complete_scan(struct il_priv *il, bool aborted) 1306il_complete_scan(struct il_priv *il, bool aborted)
1307{ 1307{
1308 struct cfg80211_scan_info info = {
1309 .aborted = aborted,
1310 };
1311
1308 /* check if scan was requested from mac80211 */ 1312 /* check if scan was requested from mac80211 */
1309 if (il->scan_request) { 1313 if (il->scan_request) {
1310 D_SCAN("Complete scan in mac80211\n"); 1314 D_SCAN("Complete scan in mac80211\n");
1311 ieee80211_scan_completed(il->hw, aborted); 1315 ieee80211_scan_completed(il->hw, &info);
1312 } 1316 }
1313 1317
1314 il->scan_vif = NULL; 1318 il->scan_vif = NULL;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
index d01766f16175..17e6a32384d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
@@ -94,10 +94,14 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
94 94
95static void iwl_complete_scan(struct iwl_priv *priv, bool aborted) 95static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
96{ 96{
97 struct cfg80211_scan_info info = {
98 .aborted = aborted,
99 };
100
97 /* check if scan was requested from mac80211 */ 101 /* check if scan was requested from mac80211 */
98 if (priv->scan_request) { 102 if (priv->scan_request) {
99 IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n"); 103 IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
100 ieee80211_scan_completed(priv->hw, aborted); 104 ieee80211_scan_completed(priv->hw, &info);
101 } 105 }
102 106
103 priv->scan_type = IWL_SCAN_NORMAL; 107 priv->scan_type = IWL_SCAN_NORMAL;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index e5f267b21316..18a8474b5760 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -3851,8 +3851,8 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
3851 if (idx != 0) 3851 if (idx != 0)
3852 return -ENOENT; 3852 return -ENOENT;
3853 3853
3854 if (fw_has_capa(&mvm->fw->ucode_capa, 3854 if (!fw_has_capa(&mvm->fw->ucode_capa,
3855 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) 3855 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
3856 return -ENOENT; 3856 return -ENOENT;
3857 3857
3858 mutex_lock(&mvm->mutex); 3858 mutex_lock(&mvm->mutex);
@@ -3898,8 +3898,8 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
3898 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3898 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3899 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 3899 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3900 3900
3901 if (fw_has_capa(&mvm->fw->ucode_capa, 3901 if (!fw_has_capa(&mvm->fw->ucode_capa,
3902 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) 3902 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
3903 return; 3903 return;
3904 3904
3905 /* if beacon filtering isn't on mac80211 does it anyway */ 3905 /* if beacon filtering isn't on mac80211 does it anyway */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index ac2c5718e454..2c61516d06ff 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -581,7 +581,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
581 struct iwl_rx_mpdu_desc *desc) 581 struct iwl_rx_mpdu_desc *desc)
582{ 582{
583 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 583 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
584 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 584 struct iwl_mvm_sta *mvm_sta;
585 struct iwl_mvm_baid_data *baid_data; 585 struct iwl_mvm_baid_data *baid_data;
586 struct iwl_mvm_reorder_buffer *buffer; 586 struct iwl_mvm_reorder_buffer *buffer;
587 struct sk_buff *tail; 587 struct sk_buff *tail;
@@ -604,6 +604,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
604 if (WARN_ON(IS_ERR_OR_NULL(sta))) 604 if (WARN_ON(IS_ERR_OR_NULL(sta)))
605 return false; 605 return false;
606 606
607 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
608
607 /* not a data packet */ 609 /* not a data packet */
608 if (!ieee80211_is_data_qos(hdr->frame_control) || 610 if (!ieee80211_is_data_qos(hdr->frame_control) ||
609 is_multicast_ether_addr(hdr->addr1)) 611 is_multicast_ether_addr(hdr->addr1))
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 6f609dd5c222..1cac10c5d818 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -391,13 +391,16 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
391 ieee80211_sched_scan_stopped(mvm->hw); 391 ieee80211_sched_scan_stopped(mvm->hw);
392 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; 392 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
393 } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) { 393 } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
394 struct cfg80211_scan_info info = {
395 .aborted = aborted,
396 };
397
394 IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n", 398 IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n",
395 aborted ? "aborted" : "completed", 399 aborted ? "aborted" : "completed",
396 iwl_mvm_ebs_status_str(scan_notif->ebs_status)); 400 iwl_mvm_ebs_status_str(scan_notif->ebs_status));
397 401
398 mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR; 402 mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
399 ieee80211_scan_completed(mvm->hw, 403 ieee80211_scan_completed(mvm->hw, &info);
400 scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
401 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); 404 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
402 del_timer(&mvm->scan_timer); 405 del_timer(&mvm->scan_timer);
403 } else { 406 } else {
@@ -1222,7 +1225,7 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
1222 return -EIO; 1225 return -EIO;
1223} 1226}
1224 1227
1225#define SCAN_TIMEOUT (16 * HZ) 1228#define SCAN_TIMEOUT (20 * HZ)
1226 1229
1227void iwl_mvm_scan_timeout(unsigned long data) 1230void iwl_mvm_scan_timeout(unsigned long data)
1228{ 1231{
@@ -1430,7 +1433,11 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
1430 1433
1431 /* if the scan is already stopping, we don't need to notify mac80211 */ 1434 /* if the scan is already stopping, we don't need to notify mac80211 */
1432 if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) { 1435 if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
1433 ieee80211_scan_completed(mvm->hw, aborted); 1436 struct cfg80211_scan_info info = {
1437 .aborted = aborted,
1438 };
1439
1440 ieee80211_scan_completed(mvm->hw, &info);
1434 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); 1441 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1435 del_timer(&mvm->scan_timer); 1442 del_timer(&mvm->scan_timer);
1436 } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) { 1443 } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
@@ -1564,7 +1571,11 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
1564 1571
1565 uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR); 1572 uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR);
1566 if (uid >= 0) { 1573 if (uid >= 0) {
1567 ieee80211_scan_completed(mvm->hw, true); 1574 struct cfg80211_scan_info info = {
1575 .aborted = true,
1576 };
1577
1578 ieee80211_scan_completed(mvm->hw, &info);
1568 mvm->scan_uid_status[uid] = 0; 1579 mvm->scan_uid_status[uid] = 0;
1569 } 1580 }
1570 uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED); 1581 uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
@@ -1585,8 +1596,13 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
1585 mvm->scan_uid_status[i] = 0; 1596 mvm->scan_uid_status[i] = 0;
1586 } 1597 }
1587 } else { 1598 } else {
1588 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) 1599 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
1589 ieee80211_scan_completed(mvm->hw, true); 1600 struct cfg80211_scan_info info = {
1601 .aborted = true,
1602 };
1603
1604 ieee80211_scan_completed(mvm->hw, &info);
1605 }
1590 1606
1591 /* Sched scan will be restarted by mac80211 in 1607 /* Sched scan will be restarted by mac80211 in
1592 * restart_hw, so do not report if FW is about to be 1608 * restart_hw, so do not report if FW is about to be
@@ -1629,8 +1645,13 @@ out:
1629 */ 1645 */
1630 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); 1646 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1631 del_timer(&mvm->scan_timer); 1647 del_timer(&mvm->scan_timer);
1632 if (notify) 1648 if (notify) {
1633 ieee80211_scan_completed(mvm->hw, true); 1649 struct cfg80211_scan_info info = {
1650 .aborted = true,
1651 };
1652
1653 ieee80211_scan_completed(mvm->hw, &info);
1654 }
1634 } else if (notify) { 1655 } else if (notify) {
1635 ieee80211_sched_scan_stopped(mvm->hw); 1656 ieee80211_sched_scan_stopped(mvm->hw);
1636 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; 1657 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index fea4d3437e2f..b23ab4a4504f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1852,12 +1852,18 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
1852 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { 1852 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
1853 u8 sta_id = mvmvif->ap_sta_id; 1853 u8 sta_id = mvmvif->ap_sta_id;
1854 1854
1855 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
1856 lockdep_is_held(&mvm->mutex));
1857
1855 /* 1858 /*
1856 * It is possible that the 'sta' parameter is NULL, 1859 * It is possible that the 'sta' parameter is NULL,
1857 * for example when a GTK is removed - the sta_id will then 1860 * for example when a GTK is removed - the sta_id will then
1858 * be the AP ID, and no station was passed by mac80211. 1861 * be the AP ID, and no station was passed by mac80211.
1859 */ 1862 */
1860 return iwl_mvm_sta_from_staid_protected(mvm, sta_id); 1863 if (IS_ERR_OR_NULL(sta))
1864 return NULL;
1865
1866 return iwl_mvm_sta_from_mac80211(sta);
1861 } 1867 }
1862 1868
1863 return NULL; 1869 return NULL;
@@ -1955,6 +1961,14 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
1955 struct ieee80211_key_seq seq; 1961 struct ieee80211_key_seq seq;
1956 const u8 *pn; 1962 const u8 *pn;
1957 1963
1964 switch (keyconf->cipher) {
1965 case WLAN_CIPHER_SUITE_AES_CMAC:
1966 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
1967 break;
1968 default:
1969 return -EINVAL;
1970 }
1971
1958 memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen); 1972 memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
1959 ieee80211_get_key_rx_seq(keyconf, 0, &seq); 1973 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
1960 pn = seq.aes_cmac.pn; 1974 pn = seq.aes_cmac.pn;
diff --git a/drivers/net/wireless/intersil/orinoco/scan.c b/drivers/net/wireless/intersil/orinoco/scan.c
index d0ceb06c72d0..6d1d084854fb 100644
--- a/drivers/net/wireless/intersil/orinoco/scan.c
+++ b/drivers/net/wireless/intersil/orinoco/scan.c
@@ -237,7 +237,11 @@ void orinoco_add_hostscan_results(struct orinoco_private *priv,
237 237
238 scan_abort: 238 scan_abort:
239 if (priv->scan_request) { 239 if (priv->scan_request) {
240 cfg80211_scan_done(priv->scan_request, abort); 240 struct cfg80211_scan_info info = {
241 .aborted = abort,
242 };
243
244 cfg80211_scan_done(priv->scan_request, &info);
241 priv->scan_request = NULL; 245 priv->scan_request = NULL;
242 } 246 }
243} 247}
@@ -245,7 +249,11 @@ void orinoco_add_hostscan_results(struct orinoco_private *priv,
245void orinoco_scan_done(struct orinoco_private *priv, bool abort) 249void orinoco_scan_done(struct orinoco_private *priv, bool abort)
246{ 250{
247 if (priv->scan_request) { 251 if (priv->scan_request) {
248 cfg80211_scan_done(priv->scan_request, abort); 252 struct cfg80211_scan_info info = {
253 .aborted = abort,
254 };
255
256 cfg80211_scan_done(priv->scan_request, &info);
249 priv->scan_request = NULL; 257 priv->scan_request = NULL;
250 } 258 }
251} 259}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index a1e28a4fd658..8c35ac838fce 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -41,8 +41,6 @@ MODULE_AUTHOR("Jouni Malinen");
41MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211"); 41MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211");
42MODULE_LICENSE("GPL"); 42MODULE_LICENSE("GPL");
43 43
44static u32 wmediumd_portid;
45
46static int radios = 2; 44static int radios = 2;
47module_param(radios, int, 0444); 45module_param(radios, int, 0444);
48MODULE_PARM_DESC(radios, "Number of simulated radios"); 46MODULE_PARM_DESC(radios, "Number of simulated radios");
@@ -252,12 +250,13 @@ static inline void hwsim_clear_chanctx_magic(struct ieee80211_chanctx_conf *c)
252 cp->magic = 0; 250 cp->magic = 0;
253} 251}
254 252
255static unsigned int hwsim_net_id; 253static int hwsim_net_id;
256 254
257static int hwsim_netgroup; 255static int hwsim_netgroup;
258 256
259struct hwsim_net { 257struct hwsim_net {
260 int netgroup; 258 int netgroup;
259 u32 wmediumd;
261}; 260};
262 261
263static inline int hwsim_net_get_netgroup(struct net *net) 262static inline int hwsim_net_get_netgroup(struct net *net)
@@ -274,6 +273,20 @@ static inline void hwsim_net_set_netgroup(struct net *net)
274 hwsim_net->netgroup = hwsim_netgroup++; 273 hwsim_net->netgroup = hwsim_netgroup++;
275} 274}
276 275
276static inline u32 hwsim_net_get_wmediumd(struct net *net)
277{
278 struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id);
279
280 return hwsim_net->wmediumd;
281}
282
283static inline void hwsim_net_set_wmediumd(struct net *net, u32 portid)
284{
285 struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id);
286
287 hwsim_net->wmediumd = portid;
288}
289
277static struct class *hwsim_class; 290static struct class *hwsim_class;
278 291
279static struct net_device *hwsim_mon; /* global monitor netdev */ 292static struct net_device *hwsim_mon; /* global monitor netdev */
@@ -444,10 +457,6 @@ static const struct ieee80211_iface_limit hwsim_if_limits[] = {
444 { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) } 457 { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) }
445}; 458};
446 459
447static const struct ieee80211_iface_limit hwsim_if_dfs_limits[] = {
448 { .max = 8, .types = BIT(NL80211_IFTYPE_AP) },
449};
450
451static const struct ieee80211_iface_combination hwsim_if_comb[] = { 460static const struct ieee80211_iface_combination hwsim_if_comb[] = {
452 { 461 {
453 .limits = hwsim_if_limits, 462 .limits = hwsim_if_limits,
@@ -455,18 +464,12 @@ static const struct ieee80211_iface_combination hwsim_if_comb[] = {
455 .n_limits = ARRAY_SIZE(hwsim_if_limits) - 1, 464 .n_limits = ARRAY_SIZE(hwsim_if_limits) - 1,
456 .max_interfaces = 2048, 465 .max_interfaces = 2048,
457 .num_different_channels = 1, 466 .num_different_channels = 1,
458 },
459 {
460 .limits = hwsim_if_dfs_limits,
461 .n_limits = ARRAY_SIZE(hwsim_if_dfs_limits),
462 .max_interfaces = 8,
463 .num_different_channels = 1,
464 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 467 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
465 BIT(NL80211_CHAN_WIDTH_20) | 468 BIT(NL80211_CHAN_WIDTH_20) |
466 BIT(NL80211_CHAN_WIDTH_40) | 469 BIT(NL80211_CHAN_WIDTH_40) |
467 BIT(NL80211_CHAN_WIDTH_80) | 470 BIT(NL80211_CHAN_WIDTH_80) |
468 BIT(NL80211_CHAN_WIDTH_160), 471 BIT(NL80211_CHAN_WIDTH_160),
469 } 472 },
470}; 473};
471 474
472static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = { 475static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
@@ -475,18 +478,12 @@ static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
475 .n_limits = ARRAY_SIZE(hwsim_if_limits), 478 .n_limits = ARRAY_SIZE(hwsim_if_limits),
476 .max_interfaces = 2048, 479 .max_interfaces = 2048,
477 .num_different_channels = 1, 480 .num_different_channels = 1,
478 },
479 {
480 .limits = hwsim_if_dfs_limits,
481 .n_limits = ARRAY_SIZE(hwsim_if_dfs_limits),
482 .max_interfaces = 8,
483 .num_different_channels = 1,
484 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 481 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
485 BIT(NL80211_CHAN_WIDTH_20) | 482 BIT(NL80211_CHAN_WIDTH_20) |
486 BIT(NL80211_CHAN_WIDTH_40) | 483 BIT(NL80211_CHAN_WIDTH_40) |
487 BIT(NL80211_CHAN_WIDTH_80) | 484 BIT(NL80211_CHAN_WIDTH_80) |
488 BIT(NL80211_CHAN_WIDTH_160), 485 BIT(NL80211_CHAN_WIDTH_160),
489 } 486 },
490}; 487};
491 488
492static spinlock_t hwsim_radio_lock; 489static spinlock_t hwsim_radio_lock;
@@ -552,6 +549,8 @@ struct mac80211_hwsim_data {
552 549
553 /* group shared by radios created in the same netns */ 550 /* group shared by radios created in the same netns */
554 int netgroup; 551 int netgroup;
552 /* wmediumd portid responsible for netgroup of this radio */
553 u32 wmediumd;
555 554
556 int power_level; 555 int power_level;
557 556
@@ -983,6 +982,29 @@ static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data,
983 return true; 982 return true;
984} 983}
985 984
985static int hwsim_unicast_netgroup(struct mac80211_hwsim_data *data,
986 struct sk_buff *skb, int portid)
987{
988 struct net *net;
989 bool found = false;
990 int res = -ENOENT;
991
992 rcu_read_lock();
993 for_each_net_rcu(net) {
994 if (data->netgroup == hwsim_net_get_netgroup(net)) {
995 res = genlmsg_unicast(net, skb, portid);
996 found = true;
997 break;
998 }
999 }
1000 rcu_read_unlock();
1001
1002 if (!found)
1003 nlmsg_free(skb);
1004
1005 return res;
1006}
1007
986static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, 1008static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
987 struct sk_buff *my_skb, 1009 struct sk_buff *my_skb,
988 int dst_portid) 1010 int dst_portid)
@@ -1062,7 +1084,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
1062 goto nla_put_failure; 1084 goto nla_put_failure;
1063 1085
1064 genlmsg_end(skb, msg_head); 1086 genlmsg_end(skb, msg_head);
1065 if (genlmsg_unicast(&init_net, skb, dst_portid)) 1087 if (hwsim_unicast_netgroup(data, skb, dst_portid))
1066 goto err_free_txskb; 1088 goto err_free_txskb;
1067 1089
1068 /* Enqueue the packet */ 1090 /* Enqueue the packet */
@@ -1355,7 +1377,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
1355 mac80211_hwsim_monitor_rx(hw, skb, channel); 1377 mac80211_hwsim_monitor_rx(hw, skb, channel);
1356 1378
1357 /* wmediumd mode check */ 1379 /* wmediumd mode check */
1358 _portid = ACCESS_ONCE(wmediumd_portid); 1380 _portid = ACCESS_ONCE(data->wmediumd);
1359 1381
1360 if (_portid) 1382 if (_portid)
1361 return mac80211_hwsim_tx_frame_nl(hw, skb, _portid); 1383 return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
@@ -1451,7 +1473,8 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
1451 struct sk_buff *skb, 1473 struct sk_buff *skb,
1452 struct ieee80211_channel *chan) 1474 struct ieee80211_channel *chan)
1453{ 1475{
1454 u32 _pid = ACCESS_ONCE(wmediumd_portid); 1476 struct mac80211_hwsim_data *data = hw->priv;
1477 u32 _pid = ACCESS_ONCE(data->wmediumd);
1455 1478
1456 if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) { 1479 if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) {
1457 struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb); 1480 struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
@@ -1918,8 +1941,12 @@ static void hw_scan_work(struct work_struct *work)
1918 1941
1919 mutex_lock(&hwsim->mutex); 1942 mutex_lock(&hwsim->mutex);
1920 if (hwsim->scan_chan_idx >= req->n_channels) { 1943 if (hwsim->scan_chan_idx >= req->n_channels) {
1944 struct cfg80211_scan_info info = {
1945 .aborted = false,
1946 };
1947
1921 wiphy_debug(hwsim->hw->wiphy, "hw scan complete\n"); 1948 wiphy_debug(hwsim->hw->wiphy, "hw scan complete\n");
1922 ieee80211_scan_completed(hwsim->hw, false); 1949 ieee80211_scan_completed(hwsim->hw, &info);
1923 hwsim->hw_scan_request = NULL; 1950 hwsim->hw_scan_request = NULL;
1924 hwsim->hw_scan_vif = NULL; 1951 hwsim->hw_scan_vif = NULL;
1925 hwsim->tmp_chan = NULL; 1952 hwsim->tmp_chan = NULL;
@@ -2004,13 +2031,16 @@ static void mac80211_hwsim_cancel_hw_scan(struct ieee80211_hw *hw,
2004 struct ieee80211_vif *vif) 2031 struct ieee80211_vif *vif)
2005{ 2032{
2006 struct mac80211_hwsim_data *hwsim = hw->priv; 2033 struct mac80211_hwsim_data *hwsim = hw->priv;
2034 struct cfg80211_scan_info info = {
2035 .aborted = true,
2036 };
2007 2037
2008 wiphy_debug(hw->wiphy, "hwsim cancel_hw_scan\n"); 2038 wiphy_debug(hw->wiphy, "hwsim cancel_hw_scan\n");
2009 2039
2010 cancel_delayed_work_sync(&hwsim->hw_scan); 2040 cancel_delayed_work_sync(&hwsim->hw_scan);
2011 2041
2012 mutex_lock(&hwsim->mutex); 2042 mutex_lock(&hwsim->mutex);
2013 ieee80211_scan_completed(hwsim->hw, true); 2043 ieee80211_scan_completed(hwsim->hw, &info);
2014 hwsim->tmp_chan = NULL; 2044 hwsim->tmp_chan = NULL;
2015 hwsim->hw_scan_request = NULL; 2045 hwsim->hw_scan_request = NULL;
2016 hwsim->hw_scan_vif = NULL; 2046 hwsim->hw_scan_vif = NULL;
@@ -2448,13 +2478,14 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
2448 hw->wiphy->max_scan_ssids = 255; 2478 hw->wiphy->max_scan_ssids = 255;
2449 hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; 2479 hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
2450 hw->wiphy->max_remain_on_channel_duration = 1000; 2480 hw->wiphy->max_remain_on_channel_duration = 1000;
2451 /* For channels > 1 DFS is not allowed */
2452 hw->wiphy->n_iface_combinations = 1;
2453 hw->wiphy->iface_combinations = &data->if_combination; 2481 hw->wiphy->iface_combinations = &data->if_combination;
2454 if (param->p2p_device) 2482 if (param->p2p_device)
2455 data->if_combination = hwsim_if_comb_p2p_dev[0]; 2483 data->if_combination = hwsim_if_comb_p2p_dev[0];
2456 else 2484 else
2457 data->if_combination = hwsim_if_comb[0]; 2485 data->if_combination = hwsim_if_comb[0];
2486 hw->wiphy->n_iface_combinations = 1;
2487 /* For channels > 1 DFS is not allowed */
2488 data->if_combination.radar_detect_widths = 0;
2458 data->if_combination.num_different_channels = data->channels; 2489 data->if_combination.num_different_channels = data->channels;
2459 } else if (param->p2p_device) { 2490 } else if (param->p2p_device) {
2460 hw->wiphy->iface_combinations = hwsim_if_comb_p2p_dev; 2491 hw->wiphy->iface_combinations = hwsim_if_comb_p2p_dev;
@@ -2796,6 +2827,20 @@ static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr)
2796 return data; 2827 return data;
2797} 2828}
2798 2829
2830static void hwsim_register_wmediumd(struct net *net, u32 portid)
2831{
2832 struct mac80211_hwsim_data *data;
2833
2834 hwsim_net_set_wmediumd(net, portid);
2835
2836 spin_lock_bh(&hwsim_radio_lock);
2837 list_for_each_entry(data, &hwsim_radios, list) {
2838 if (data->netgroup == hwsim_net_get_netgroup(net))
2839 data->wmediumd = portid;
2840 }
2841 spin_unlock_bh(&hwsim_radio_lock);
2842}
2843
2799static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, 2844static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
2800 struct genl_info *info) 2845 struct genl_info *info)
2801{ 2846{
@@ -2811,9 +2856,6 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
2811 int i; 2856 int i;
2812 bool found = false; 2857 bool found = false;
2813 2858
2814 if (info->snd_portid != wmediumd_portid)
2815 return -EINVAL;
2816
2817 if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] || 2859 if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
2818 !info->attrs[HWSIM_ATTR_FLAGS] || 2860 !info->attrs[HWSIM_ATTR_FLAGS] ||
2819 !info->attrs[HWSIM_ATTR_COOKIE] || 2861 !info->attrs[HWSIM_ATTR_COOKIE] ||
@@ -2829,6 +2871,12 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
2829 if (!data2) 2871 if (!data2)
2830 goto out; 2872 goto out;
2831 2873
2874 if (hwsim_net_get_netgroup(genl_info_net(info)) != data2->netgroup)
2875 goto out;
2876
2877 if (info->snd_portid != data2->wmediumd)
2878 goto out;
2879
2832 /* look for the skb matching the cookie passed back from user */ 2880 /* look for the skb matching the cookie passed back from user */
2833 skb_queue_walk_safe(&data2->pending, skb, tmp) { 2881 skb_queue_walk_safe(&data2->pending, skb, tmp) {
2834 u64 skb_cookie; 2882 u64 skb_cookie;
@@ -2892,9 +2940,6 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
2892 void *frame_data; 2940 void *frame_data;
2893 struct sk_buff *skb = NULL; 2941 struct sk_buff *skb = NULL;
2894 2942
2895 if (info->snd_portid != wmediumd_portid)
2896 return -EINVAL;
2897
2898 if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] || 2943 if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] ||
2899 !info->attrs[HWSIM_ATTR_FRAME] || 2944 !info->attrs[HWSIM_ATTR_FRAME] ||
2900 !info->attrs[HWSIM_ATTR_RX_RATE] || 2945 !info->attrs[HWSIM_ATTR_RX_RATE] ||
@@ -2920,6 +2965,12 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
2920 if (!data2) 2965 if (!data2)
2921 goto out; 2966 goto out;
2922 2967
2968 if (hwsim_net_get_netgroup(genl_info_net(info)) != data2->netgroup)
2969 goto out;
2970
2971 if (info->snd_portid != data2->wmediumd)
2972 goto out;
2973
2923 /* check if radio is configured properly */ 2974 /* check if radio is configured properly */
2924 2975
2925 if (data2->idle || !data2->started) 2976 if (data2->idle || !data2->started)
@@ -2966,6 +3017,7 @@ out:
2966static int hwsim_register_received_nl(struct sk_buff *skb_2, 3017static int hwsim_register_received_nl(struct sk_buff *skb_2,
2967 struct genl_info *info) 3018 struct genl_info *info)
2968{ 3019{
3020 struct net *net = genl_info_net(info);
2969 struct mac80211_hwsim_data *data; 3021 struct mac80211_hwsim_data *data;
2970 int chans = 1; 3022 int chans = 1;
2971 3023
@@ -2982,10 +3034,10 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2,
2982 if (chans > 1) 3034 if (chans > 1)
2983 return -EOPNOTSUPP; 3035 return -EOPNOTSUPP;
2984 3036
2985 if (wmediumd_portid) 3037 if (hwsim_net_get_wmediumd(net))
2986 return -EBUSY; 3038 return -EBUSY;
2987 3039
2988 wmediumd_portid = info->snd_portid; 3040 hwsim_register_wmediumd(net, info->snd_portid);
2989 3041
2990 printk(KERN_DEBUG "mac80211_hwsim: received a REGISTER, " 3042 printk(KERN_DEBUG "mac80211_hwsim: received a REGISTER, "
2991 "switching to wmediumd mode with pid %d\n", info->snd_portid); 3043 "switching to wmediumd mode with pid %d\n", info->snd_portid);
@@ -3152,7 +3204,7 @@ static const struct genl_ops hwsim_ops[] = {
3152 .cmd = HWSIM_CMD_REGISTER, 3204 .cmd = HWSIM_CMD_REGISTER,
3153 .policy = hwsim_genl_policy, 3205 .policy = hwsim_genl_policy,
3154 .doit = hwsim_register_received_nl, 3206 .doit = hwsim_register_received_nl,
3155 .flags = GENL_ADMIN_PERM, 3207 .flags = GENL_UNS_ADMIN_PERM,
3156 }, 3208 },
3157 { 3209 {
3158 .cmd = HWSIM_CMD_FRAME, 3210 .cmd = HWSIM_CMD_FRAME,
@@ -3218,10 +3270,10 @@ static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
3218 3270
3219 remove_user_radios(notify->portid); 3271 remove_user_radios(notify->portid);
3220 3272
3221 if (notify->portid == wmediumd_portid) { 3273 if (notify->portid == hwsim_net_get_wmediumd(notify->net)) {
3222 printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink" 3274 printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink"
3223 " socket, switching to perfect channel medium\n"); 3275 " socket, switching to perfect channel medium\n");
3224 wmediumd_portid = 0; 3276 hwsim_register_wmediumd(notify->net, 0);
3225 } 3277 }
3226 return NOTIFY_DONE; 3278 return NOTIFY_DONE;
3227 3279
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index 776b44bfd93a..ea4802446618 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -796,10 +796,15 @@ void lbs_scan_done(struct lbs_private *priv)
796{ 796{
797 WARN_ON(!priv->scan_req); 797 WARN_ON(!priv->scan_req);
798 798
799 if (priv->internal_scan) 799 if (priv->internal_scan) {
800 kfree(priv->scan_req); 800 kfree(priv->scan_req);
801 else 801 } else {
802 cfg80211_scan_done(priv->scan_req, false); 802 struct cfg80211_scan_info info = {
803 .aborted = false,
804 };
805
806 cfg80211_scan_done(priv->scan_req, &info);
807 }
803 808
804 priv->scan_req = NULL; 809 priv->scan_req = NULL;
805} 810}
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index 6bc2011d8609..e7a21443647e 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -1057,8 +1057,12 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
1057 if (!priv) 1057 if (!priv)
1058 continue; 1058 continue;
1059 if (priv->scan_request) { 1059 if (priv->scan_request) {
1060 struct cfg80211_scan_info info = {
1061 .aborted = true,
1062 };
1063
1060 mwifiex_dbg(adapter, WARN, "info: aborting scan\n"); 1064 mwifiex_dbg(adapter, WARN, "info: aborting scan\n");
1061 cfg80211_scan_done(priv->scan_request, 1); 1065 cfg80211_scan_done(priv->scan_request, &info);
1062 priv->scan_request = NULL; 1066 priv->scan_request = NULL;
1063 } 1067 }
1064 } 1068 }
@@ -1112,8 +1116,12 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
1112 if (!priv) 1116 if (!priv)
1113 continue; 1117 continue;
1114 if (priv->scan_request) { 1118 if (priv->scan_request) {
1119 struct cfg80211_scan_info info = {
1120 .aborted = true,
1121 };
1122
1115 mwifiex_dbg(adapter, WARN, "info: aborting scan\n"); 1123 mwifiex_dbg(adapter, WARN, "info: aborting scan\n");
1116 cfg80211_scan_done(priv->scan_request, 1); 1124 cfg80211_scan_done(priv->scan_request, &info);
1117 priv->scan_request = NULL; 1125 priv->scan_request = NULL;
1118 } 1126 }
1119 } 1127 }
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 0e280f879b58..db4925db39aa 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -697,9 +697,13 @@ mwifiex_close(struct net_device *dev)
697 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 697 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
698 698
699 if (priv->scan_request) { 699 if (priv->scan_request) {
700 struct cfg80211_scan_info info = {
701 .aborted = true,
702 };
703
700 mwifiex_dbg(priv->adapter, INFO, 704 mwifiex_dbg(priv->adapter, INFO,
701 "aborting scan on ndo_stop\n"); 705 "aborting scan on ndo_stop\n");
702 cfg80211_scan_done(priv->scan_request, 1); 706 cfg80211_scan_done(priv->scan_request, &info);
703 priv->scan_request = NULL; 707 priv->scan_request = NULL;
704 priv->scan_aborting = true; 708 priv->scan_aborting = true;
705 } 709 }
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index bc5e52cebce1..fdd749110fcb 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1956,9 +1956,13 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
1956 mwifiex_complete_scan(priv); 1956 mwifiex_complete_scan(priv);
1957 1957
1958 if (priv->scan_request) { 1958 if (priv->scan_request) {
1959 struct cfg80211_scan_info info = {
1960 .aborted = false,
1961 };
1962
1959 mwifiex_dbg(adapter, INFO, 1963 mwifiex_dbg(adapter, INFO,
1960 "info: notifying scan done\n"); 1964 "info: notifying scan done\n");
1961 cfg80211_scan_done(priv->scan_request, 0); 1965 cfg80211_scan_done(priv->scan_request, &info);
1962 priv->scan_request = NULL; 1966 priv->scan_request = NULL;
1963 } else { 1967 } else {
1964 priv->scan_aborting = false; 1968 priv->scan_aborting = false;
@@ -1977,9 +1981,13 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
1977 1981
1978 if (!adapter->active_scan_triggered) { 1982 if (!adapter->active_scan_triggered) {
1979 if (priv->scan_request) { 1983 if (priv->scan_request) {
1984 struct cfg80211_scan_info info = {
1985 .aborted = true,
1986 };
1987
1980 mwifiex_dbg(adapter, INFO, 1988 mwifiex_dbg(adapter, INFO,
1981 "info: aborting scan\n"); 1989 "info: aborting scan\n");
1982 cfg80211_scan_done(priv->scan_request, 1); 1990 cfg80211_scan_done(priv->scan_request, &info);
1983 priv->scan_request = NULL; 1991 priv->scan_request = NULL;
1984 } else { 1992 } else {
1985 priv->scan_aborting = false; 1993 priv->scan_aborting = false;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index fe19ace0d6a0..b04cf30f3959 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -1149,7 +1149,7 @@ static void rtl8192eu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
1149 1149
1150 for (i = 0; i < retry; i++) { 1150 for (i = 0; i < retry; i++) {
1151 path_b_ok = rtl8192eu_rx_iqk_path_b(priv); 1151 path_b_ok = rtl8192eu_rx_iqk_path_b(priv);
1152 if (path_a_ok == 0x03) { 1152 if (path_b_ok == 0x03) {
1153 val32 = rtl8xxxu_read32(priv, 1153 val32 = rtl8xxxu_read32(priv,
1154 REG_RX_POWER_BEFORE_IQK_B_2); 1154 REG_RX_POWER_BEFORE_IQK_B_2);
1155 result[t][6] = (val32 >> 16) & 0x3ff; 1155 result[t][6] = (val32 >> 16) & 0x3ff;
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 569918c485b4..603c90470225 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2134,6 +2134,7 @@ static void rndis_get_scan_results(struct work_struct *work)
2134 struct rndis_wlan_private *priv = 2134 struct rndis_wlan_private *priv =
2135 container_of(work, struct rndis_wlan_private, scan_work.work); 2135 container_of(work, struct rndis_wlan_private, scan_work.work);
2136 struct usbnet *usbdev = priv->usbdev; 2136 struct usbnet *usbdev = priv->usbdev;
2137 struct cfg80211_scan_info info = {};
2137 int ret; 2138 int ret;
2138 2139
2139 netdev_dbg(usbdev->net, "get_scan_results\n"); 2140 netdev_dbg(usbdev->net, "get_scan_results\n");
@@ -2143,7 +2144,8 @@ static void rndis_get_scan_results(struct work_struct *work)
2143 2144
2144 ret = rndis_check_bssid_list(usbdev, NULL, NULL); 2145 ret = rndis_check_bssid_list(usbdev, NULL, NULL);
2145 2146
2146 cfg80211_scan_done(priv->scan_request, ret < 0); 2147 info.aborted = ret < 0;
2148 cfg80211_scan_done(priv->scan_request, &info);
2147 2149
2148 priv->scan_request = NULL; 2150 priv->scan_request = NULL;
2149} 2151}
@@ -3574,7 +3576,11 @@ static int rndis_wlan_stop(struct usbnet *usbdev)
3574 flush_workqueue(priv->workqueue); 3576 flush_workqueue(priv->workqueue);
3575 3577
3576 if (priv->scan_request) { 3578 if (priv->scan_request) {
3577 cfg80211_scan_done(priv->scan_request, true); 3579 struct cfg80211_scan_info info = {
3580 .aborted = true,
3581 };
3582
3583 cfg80211_scan_done(priv->scan_request, &info);
3578 priv->scan_request = NULL; 3584 priv->scan_request = NULL;
3579 } 3585 }
3580 3586
diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
index 983788156bb0..0a0ff7e31f5b 100644
--- a/drivers/net/wireless/st/cw1200/scan.c
+++ b/drivers/net/wireless/st/cw1200/scan.c
@@ -167,6 +167,10 @@ void cw1200_scan_work(struct work_struct *work)
167 } 167 }
168 168
169 if (!priv->scan.req || (priv->scan.curr == priv->scan.end)) { 169 if (!priv->scan.req || (priv->scan.curr == priv->scan.end)) {
170 struct cfg80211_scan_info info = {
171 .aborted = priv->scan.status ? 1 : 0,
172 };
173
170 if (priv->scan.output_power != priv->output_power) 174 if (priv->scan.output_power != priv->output_power)
171 wsm_set_output_power(priv, priv->output_power * 10); 175 wsm_set_output_power(priv, priv->output_power * 10);
172 if (priv->join_status == CW1200_JOIN_STATUS_STA && 176 if (priv->join_status == CW1200_JOIN_STATUS_STA &&
@@ -188,7 +192,7 @@ void cw1200_scan_work(struct work_struct *work)
188 cw1200_scan_restart_delayed(priv); 192 cw1200_scan_restart_delayed(priv);
189 wsm_unlock_tx(priv); 193 wsm_unlock_tx(priv);
190 mutex_unlock(&priv->conf_mutex); 194 mutex_unlock(&priv->conf_mutex);
191 ieee80211_scan_completed(priv->hw, priv->scan.status ? 1 : 0); 195 ieee80211_scan_completed(priv->hw, &info);
192 up(&priv->scan.lock); 196 up(&priv->scan.lock);
193 return; 197 return;
194 } else { 198 } else {
diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c
index c98630394a1a..d0593bc1f1a9 100644
--- a/drivers/net/wireless/ti/wl1251/event.c
+++ b/drivers/net/wireless/ti/wl1251/event.c
@@ -36,7 +36,11 @@ static int wl1251_event_scan_complete(struct wl1251 *wl,
36 mbox->scheduled_scan_channels); 36 mbox->scheduled_scan_channels);
37 37
38 if (wl->scanning) { 38 if (wl->scanning) {
39 ieee80211_scan_completed(wl->hw, false); 39 struct cfg80211_scan_info info = {
40 .aborted = false,
41 };
42
43 ieee80211_scan_completed(wl->hw, &info);
40 wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan completed"); 44 wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan completed");
41 wl->scanning = false; 45 wl->scanning = false;
42 if (wl->hw->conf.flags & IEEE80211_CONF_IDLE) 46 if (wl->hw->conf.flags & IEEE80211_CONF_IDLE)
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 56384a4e2a35..bbf7604889b7 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -448,7 +448,11 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
448 WARN_ON(wl->state != WL1251_STATE_ON); 448 WARN_ON(wl->state != WL1251_STATE_ON);
449 449
450 if (wl->scanning) { 450 if (wl->scanning) {
451 ieee80211_scan_completed(wl->hw, true); 451 struct cfg80211_scan_info info = {
452 .aborted = true,
453 };
454
455 ieee80211_scan_completed(wl->hw, &info);
452 wl->scanning = false; 456 wl->scanning = false;
453 } 457 }
454 458
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 10fd24c28ece..69267d592504 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -2615,6 +2615,10 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
2615 2615
2616 if (wl->scan.state != WL1271_SCAN_STATE_IDLE && 2616 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2617 wl->scan_wlvif == wlvif) { 2617 wl->scan_wlvif == wlvif) {
2618 struct cfg80211_scan_info info = {
2619 .aborted = true,
2620 };
2621
2618 /* 2622 /*
2619 * Rearm the tx watchdog just before idling scan. This 2623 * Rearm the tx watchdog just before idling scan. This
2620 * prevents just-finished scans from triggering the watchdog 2624 * prevents just-finished scans from triggering the watchdog
@@ -2625,7 +2629,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
2625 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); 2629 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2626 wl->scan_wlvif = NULL; 2630 wl->scan_wlvif = NULL;
2627 wl->scan.req = NULL; 2631 wl->scan.req = NULL;
2628 ieee80211_scan_completed(wl->hw, true); 2632 ieee80211_scan_completed(wl->hw, &info);
2629 } 2633 }
2630 2634
2631 if (wl->sched_vif == wlvif) 2635 if (wl->sched_vif == wlvif)
@@ -3649,6 +3653,9 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3649{ 3653{
3650 struct wl1271 *wl = hw->priv; 3654 struct wl1271 *wl = hw->priv;
3651 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 3655 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3656 struct cfg80211_scan_info info = {
3657 .aborted = true,
3658 };
3652 int ret; 3659 int ret;
3653 3660
3654 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan"); 3661 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
@@ -3681,7 +3688,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3681 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); 3688 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3682 wl->scan_wlvif = NULL; 3689 wl->scan_wlvif = NULL;
3683 wl->scan.req = NULL; 3690 wl->scan.req = NULL;
3684 ieee80211_scan_completed(wl->hw, true); 3691 ieee80211_scan_completed(wl->hw, &info);
3685 3692
3686out_sleep: 3693out_sleep:
3687 wl1271_ps_elp_sleep(wl); 3694 wl1271_ps_elp_sleep(wl);
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index 23343643207a..5612f5916b4e 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -36,6 +36,9 @@ void wl1271_scan_complete_work(struct work_struct *work)
36 struct delayed_work *dwork; 36 struct delayed_work *dwork;
37 struct wl1271 *wl; 37 struct wl1271 *wl;
38 struct wl12xx_vif *wlvif; 38 struct wl12xx_vif *wlvif;
39 struct cfg80211_scan_info info = {
40 .aborted = false,
41 };
39 int ret; 42 int ret;
40 43
41 dwork = to_delayed_work(work); 44 dwork = to_delayed_work(work);
@@ -82,7 +85,7 @@ void wl1271_scan_complete_work(struct work_struct *work)
82 85
83 wlcore_cmd_regdomain_config_locked(wl); 86 wlcore_cmd_regdomain_config_locked(wl);
84 87
85 ieee80211_scan_completed(wl->hw, false); 88 ieee80211_scan_completed(wl->hw, &info);
86 89
87out: 90out:
88 mutex_unlock(&wl->mutex); 91 mutex_unlock(&wl->mutex);
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index f7718ec685fa..cea8350fbc7e 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -344,6 +344,8 @@ struct device *nd_pfn_create(struct nd_region *nd_region)
344int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) 344int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
345{ 345{
346 u64 checksum, offset; 346 u64 checksum, offset;
347 unsigned long align;
348 enum nd_pfn_mode mode;
347 struct nd_namespace_io *nsio; 349 struct nd_namespace_io *nsio;
348 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; 350 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
349 struct nd_namespace_common *ndns = nd_pfn->ndns; 351 struct nd_namespace_common *ndns = nd_pfn->ndns;
@@ -386,22 +388,50 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
386 return -ENXIO; 388 return -ENXIO;
387 } 389 }
388 390
391 align = le32_to_cpu(pfn_sb->align);
392 offset = le64_to_cpu(pfn_sb->dataoff);
393 if (align == 0)
394 align = 1UL << ilog2(offset);
395 mode = le32_to_cpu(pfn_sb->mode);
396
389 if (!nd_pfn->uuid) { 397 if (!nd_pfn->uuid) {
390 /* from probe we allocate */ 398 /*
399 * When probing a namepace via nd_pfn_probe() the uuid
400 * is NULL (see: nd_pfn_devinit()) we init settings from
401 * pfn_sb
402 */
391 nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL); 403 nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL);
392 if (!nd_pfn->uuid) 404 if (!nd_pfn->uuid)
393 return -ENOMEM; 405 return -ENOMEM;
406 nd_pfn->align = align;
407 nd_pfn->mode = mode;
394 } else { 408 } else {
395 /* from init we validate */ 409 /*
410 * When probing a pfn / dax instance we validate the
411 * live settings against the pfn_sb
412 */
396 if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0) 413 if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
397 return -ENODEV; 414 return -ENODEV;
415
416 /*
417 * If the uuid validates, but other settings mismatch
418 * return EINVAL because userspace has managed to change
419 * the configuration without specifying new
420 * identification.
421 */
422 if (nd_pfn->align != align || nd_pfn->mode != mode) {
423 dev_err(&nd_pfn->dev,
424 "init failed, settings mismatch\n");
425 dev_dbg(&nd_pfn->dev, "align: %lx:%lx mode: %d:%d\n",
426 nd_pfn->align, align, nd_pfn->mode,
427 mode);
428 return -EINVAL;
429 }
398 } 430 }
399 431
400 if (nd_pfn->align == 0) 432 if (align > nvdimm_namespace_capacity(ndns)) {
401 nd_pfn->align = le32_to_cpu(pfn_sb->align);
402 if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) {
403 dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n", 433 dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n",
404 nd_pfn->align, nvdimm_namespace_capacity(ndns)); 434 align, nvdimm_namespace_capacity(ndns));
405 return -EINVAL; 435 return -EINVAL;
406 } 436 }
407 437
@@ -411,7 +441,6 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
411 * namespace has changed since the pfn superblock was 441 * namespace has changed since the pfn superblock was
412 * established. 442 * established.
413 */ 443 */
414 offset = le64_to_cpu(pfn_sb->dataoff);
415 nsio = to_nd_namespace_io(&ndns->dev); 444 nsio = to_nd_namespace_io(&ndns->dev);
416 if (offset >= resource_size(&nsio->res)) { 445 if (offset >= resource_size(&nsio->res)) {
417 dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n", 446 dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n",
@@ -419,10 +448,11 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
419 return -EBUSY; 448 return -EBUSY;
420 } 449 }
421 450
422 if ((nd_pfn->align && !IS_ALIGNED(offset, nd_pfn->align)) 451 if ((align && !IS_ALIGNED(offset, align))
423 || !IS_ALIGNED(offset, PAGE_SIZE)) { 452 || !IS_ALIGNED(offset, PAGE_SIZE)) {
424 dev_err(&nd_pfn->dev, "bad offset: %#llx dax disabled\n", 453 dev_err(&nd_pfn->dev,
425 offset); 454 "bad offset: %#llx dax disabled align: %#lx\n",
455 offset, align);
426 return -ENXIO; 456 return -ENXIO;
427 } 457 }
428 458
@@ -502,7 +532,6 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
502 res->start += start_pad; 532 res->start += start_pad;
503 res->end -= end_trunc; 533 res->end -= end_trunc;
504 534
505 nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
506 if (nd_pfn->mode == PFN_MODE_RAM) { 535 if (nd_pfn->mode == PFN_MODE_RAM) {
507 if (offset < SZ_8K) 536 if (offset < SZ_8K)
508 return ERR_PTR(-EINVAL); 537 return ERR_PTR(-EINVAL);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 78dca3193ca4..befac5b19490 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1679,9 +1679,14 @@ static int nvme_pci_enable(struct nvme_dev *dev)
1679 1679
1680static void nvme_dev_unmap(struct nvme_dev *dev) 1680static void nvme_dev_unmap(struct nvme_dev *dev)
1681{ 1681{
1682 struct pci_dev *pdev = to_pci_dev(dev->dev);
1683 int bars;
1684
1682 if (dev->bar) 1685 if (dev->bar)
1683 iounmap(dev->bar); 1686 iounmap(dev->bar);
1684 pci_release_regions(to_pci_dev(dev->dev)); 1687
1688 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1689 pci_release_selected_regions(pdev, bars);
1685} 1690}
1686 1691
1687static void nvme_pci_disable(struct nvme_dev *dev) 1692static void nvme_pci_disable(struct nvme_dev *dev)
@@ -1924,7 +1929,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
1924 1929
1925 return 0; 1930 return 0;
1926 release: 1931 release:
1927 pci_release_regions(pdev); 1932 pci_release_selected_regions(pdev, bars);
1928 return -ENODEV; 1933 return -ENODEV;
1929} 1934}
1930 1935
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index b3bec3aaa45d..bc07ad30c9bf 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -74,6 +74,7 @@ config OF_NET
74config OF_MDIO 74config OF_MDIO
75 def_tristate PHYLIB 75 def_tristate PHYLIB
76 depends on PHYLIB 76 depends on PHYLIB
77 select FIXED_PHY
77 help 78 help
78 OpenFirmware MDIO bus (Ethernet PHY) accessors 79 OpenFirmware MDIO bus (Ethernet PHY) accessors
79 80
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 14f2f8c7c260..33daffc4392c 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -395,7 +395,7 @@ static int unflatten_dt_nodes(const void *blob,
395 struct device_node **nodepp) 395 struct device_node **nodepp)
396{ 396{
397 struct device_node *root; 397 struct device_node *root;
398 int offset = 0, depth = 0; 398 int offset = 0, depth = 0, initial_depth = 0;
399#define FDT_MAX_DEPTH 64 399#define FDT_MAX_DEPTH 64
400 unsigned int fpsizes[FDT_MAX_DEPTH]; 400 unsigned int fpsizes[FDT_MAX_DEPTH];
401 struct device_node *nps[FDT_MAX_DEPTH]; 401 struct device_node *nps[FDT_MAX_DEPTH];
@@ -405,11 +405,22 @@ static int unflatten_dt_nodes(const void *blob,
405 if (nodepp) 405 if (nodepp)
406 *nodepp = NULL; 406 *nodepp = NULL;
407 407
408 /*
409 * We're unflattening device sub-tree if @dad is valid. There are
410 * possibly multiple nodes in the first level of depth. We need
411 * set @depth to 1 to make fdt_next_node() happy as it bails
412 * immediately when negative @depth is found. Otherwise, the device
413 * nodes except the first one won't be unflattened successfully.
414 */
415 if (dad)
416 depth = initial_depth = 1;
417
408 root = dad; 418 root = dad;
409 fpsizes[depth] = dad ? strlen(of_node_full_name(dad)) : 0; 419 fpsizes[depth] = dad ? strlen(of_node_full_name(dad)) : 0;
410 nps[depth] = dad; 420 nps[depth] = dad;
421
411 for (offset = 0; 422 for (offset = 0;
412 offset >= 0 && depth >= 0; 423 offset >= 0 && depth >= initial_depth;
413 offset = fdt_next_node(blob, offset, &depth)) { 424 offset = fdt_next_node(blob, offset, &depth)) {
414 if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH)) 425 if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH))
415 continue; 426 continue;
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index e7bfc175b8e1..6ec743faabe8 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -386,13 +386,13 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
386EXPORT_SYMBOL_GPL(of_irq_to_resource); 386EXPORT_SYMBOL_GPL(of_irq_to_resource);
387 387
388/** 388/**
389 * of_irq_get - Decode a node's IRQ and return it as a Linux irq number 389 * of_irq_get - Decode a node's IRQ and return it as a Linux IRQ number
390 * @dev: pointer to device tree node 390 * @dev: pointer to device tree node
391 * @index: zero-based index of the irq 391 * @index: zero-based index of the IRQ
392 *
393 * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
394 * is not yet created.
395 * 392 *
393 * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
394 * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
395 * of any other failure.
396 */ 396 */
397int of_irq_get(struct device_node *dev, int index) 397int of_irq_get(struct device_node *dev, int index)
398{ 398{
@@ -413,12 +413,13 @@ int of_irq_get(struct device_node *dev, int index)
413EXPORT_SYMBOL_GPL(of_irq_get); 413EXPORT_SYMBOL_GPL(of_irq_get);
414 414
415/** 415/**
416 * of_irq_get_byname - Decode a node's IRQ and return it as a Linux irq number 416 * of_irq_get_byname - Decode a node's IRQ and return it as a Linux IRQ number
417 * @dev: pointer to device tree node 417 * @dev: pointer to device tree node
418 * @name: irq name 418 * @name: IRQ name
419 * 419 *
420 * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain 420 * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
421 * is not yet created, or error code in case of any other failure. 421 * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
422 * of any other failure.
422 */ 423 */
423int of_irq_get_byname(struct device_node *dev, const char *name) 424int of_irq_get_byname(struct device_node *dev, const char *name)
424{ 425{
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index de68707a99c7..e2b50bc12f23 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -361,7 +361,6 @@ struct phy_device *of_phy_attach(struct net_device *dev,
361} 361}
362EXPORT_SYMBOL(of_phy_attach); 362EXPORT_SYMBOL(of_phy_attach);
363 363
364#if IS_ENABLED(CONFIG_FIXED_PHY)
365/* 364/*
366 * of_phy_is_fixed_link() and of_phy_register_fixed_link() must 365 * of_phy_is_fixed_link() and of_phy_register_fixed_link() must
367 * support two DT bindings: 366 * support two DT bindings:
@@ -451,4 +450,3 @@ int of_phy_register_fixed_link(struct device_node *np)
451 return -ENODEV; 450 return -ENODEV;
452} 451}
453EXPORT_SYMBOL(of_phy_register_fixed_link); 452EXPORT_SYMBOL(of_phy_register_fixed_link);
454#endif
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index ed01c0172e4a..216648233874 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -127,8 +127,15 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
127 } 127 }
128 128
129 /* Need adjust the alignment to satisfy the CMA requirement */ 129 /* Need adjust the alignment to satisfy the CMA requirement */
130 if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool")) 130 if (IS_ENABLED(CONFIG_CMA)
131 align = max(align, (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); 131 && of_flat_dt_is_compatible(node, "shared-dma-pool")
132 && of_get_flat_dt_prop(node, "reusable", NULL)
133 && !of_get_flat_dt_prop(node, "no-map", NULL)) {
134 unsigned long order =
135 max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
136
137 align = max(align, (phys_addr_t)PAGE_SIZE << order);
138 }
132 139
133 prop = of_get_flat_dt_prop(node, "alloc-ranges", &len); 140 prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
134 if (prop) { 141 if (prop) {
diff --git a/drivers/pci/vc.c b/drivers/pci/vc.c
index dfbab61a1b47..1fa3a3219c45 100644
--- a/drivers/pci/vc.c
+++ b/drivers/pci/vc.c
@@ -221,9 +221,9 @@ static int pci_vc_do_save_buffer(struct pci_dev *dev, int pos,
221 else 221 else
222 pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL, 222 pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL,
223 *(u16 *)buf); 223 *(u16 *)buf);
224 buf += 2; 224 buf += 4;
225 } 225 }
226 len += 2; 226 len += 4;
227 227
228 /* 228 /*
229 * If we have any Low Priority VCs and a VC Arbitration Table Offset 229 * If we have any Low Priority VCs and a VC Arbitration Table Offset
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 1b8304e1efaa..140436a046c0 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -1010,8 +1010,8 @@ int arm_pmu_device_probe(struct platform_device *pdev,
1010 if (!ret) 1010 if (!ret)
1011 ret = init_fn(pmu); 1011 ret = init_fn(pmu);
1012 } else { 1012 } else {
1013 ret = probe_current_pmu(pmu, probe_table);
1014 cpumask_setall(&pmu->supported_cpus); 1013 cpumask_setall(&pmu->supported_cpus);
1014 ret = probe_current_pmu(pmu, probe_table);
1015 } 1015 }
1016 1016
1017 if (ret) { 1017 if (ret) {
diff --git a/drivers/phy/phy-bcm-ns-usb2.c b/drivers/phy/phy-bcm-ns-usb2.c
index 95ab6b2a0de5..58dff80e9386 100644
--- a/drivers/phy/phy-bcm-ns-usb2.c
+++ b/drivers/phy/phy-bcm-ns-usb2.c
@@ -109,8 +109,8 @@ static int bcm_ns_usb2_probe(struct platform_device *pdev)
109 } 109 }
110 110
111 usb2->phy = devm_phy_create(dev, NULL, &ops); 111 usb2->phy = devm_phy_create(dev, NULL, &ops);
112 if (IS_ERR(dev)) 112 if (IS_ERR(usb2->phy))
113 return PTR_ERR(dev); 113 return PTR_ERR(usb2->phy);
114 114
115 phy_set_drvdata(usb2->phy, usb2); 115 phy_set_drvdata(usb2->phy, usb2);
116 platform_set_drvdata(pdev, usb2); 116 platform_set_drvdata(pdev, usb2);
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c
index cc093ebfda94..8b851f718123 100644
--- a/drivers/phy/phy-exynos-mipi-video.c
+++ b/drivers/phy/phy-exynos-mipi-video.c
@@ -233,8 +233,12 @@ static inline int __is_running(const struct exynos_mipi_phy_desc *data,
233 struct exynos_mipi_video_phy *state) 233 struct exynos_mipi_video_phy *state)
234{ 234{
235 u32 val; 235 u32 val;
236 int ret;
237
238 ret = regmap_read(state->regmaps[data->resetn_map], data->resetn_reg, &val);
239 if (ret)
240 return 0;
236 241
237 regmap_read(state->regmaps[data->resetn_map], data->resetn_reg, &val);
238 return val & data->resetn_val; 242 return val & data->resetn_val;
239} 243}
240 244
diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c
index 3acd2a1808df..213e2e15339c 100644
--- a/drivers/phy/phy-miphy28lp.c
+++ b/drivers/phy/phy-miphy28lp.c
@@ -1143,7 +1143,8 @@ static int miphy28lp_probe_resets(struct device_node *node,
1143 struct miphy28lp_dev *miphy_dev = miphy_phy->phydev; 1143 struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
1144 int err; 1144 int err;
1145 1145
1146 miphy_phy->miphy_rst = of_reset_control_get(node, "miphy-sw-rst"); 1146 miphy_phy->miphy_rst =
1147 of_reset_control_get_shared(node, "miphy-sw-rst");
1147 1148
1148 if (IS_ERR(miphy_phy->miphy_rst)) { 1149 if (IS_ERR(miphy_phy->miphy_rst)) {
1149 dev_err(miphy_dev->dev, 1150 dev_err(miphy_dev->dev,
diff --git a/drivers/phy/phy-rcar-gen3-usb2.c b/drivers/phy/phy-rcar-gen3-usb2.c
index 76bb88f0700a..4be3f5dbbc9f 100644
--- a/drivers/phy/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/phy-rcar-gen3-usb2.c
@@ -144,12 +144,6 @@ static void rcar_gen3_init_for_peri(struct rcar_gen3_chan *ch)
144 extcon_set_cable_state_(ch->extcon, EXTCON_USB, true); 144 extcon_set_cable_state_(ch->extcon, EXTCON_USB, true);
145} 145}
146 146
147static bool rcar_gen3_check_vbus(struct rcar_gen3_chan *ch)
148{
149 return !!(readl(ch->base + USB2_ADPCTRL) &
150 USB2_ADPCTRL_OTGSESSVLD);
151}
152
153static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch) 147static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch)
154{ 148{
155 return !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG); 149 return !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG);
@@ -157,13 +151,7 @@ static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch)
157 151
158static void rcar_gen3_device_recognition(struct rcar_gen3_chan *ch) 152static void rcar_gen3_device_recognition(struct rcar_gen3_chan *ch)
159{ 153{
160 bool is_host = true; 154 if (!rcar_gen3_check_id(ch))
161
162 /* B-device? */
163 if (rcar_gen3_check_id(ch) && rcar_gen3_check_vbus(ch))
164 is_host = false;
165
166 if (is_host)
167 rcar_gen3_init_for_host(ch); 155 rcar_gen3_init_for_host(ch);
168 else 156 else
169 rcar_gen3_init_for_peri(ch); 157 rcar_gen3_init_for_peri(ch);
diff --git a/drivers/phy/phy-rockchip-dp.c b/drivers/phy/phy-rockchip-dp.c
index 793ecb6d87bc..8b267a746576 100644
--- a/drivers/phy/phy-rockchip-dp.c
+++ b/drivers/phy/phy-rockchip-dp.c
@@ -90,7 +90,7 @@ static int rockchip_dp_phy_probe(struct platform_device *pdev)
90 return -ENODEV; 90 return -ENODEV;
91 91
92 dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); 92 dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
93 if (IS_ERR(dp)) 93 if (!dp)
94 return -ENOMEM; 94 return -ENOMEM;
95 95
96 dp->dev = dev; 96 dp->dev = dev;
diff --git a/drivers/phy/phy-stih407-usb.c b/drivers/phy/phy-stih407-usb.c
index 1d5ae5f8ef69..b1f44ab669fb 100644
--- a/drivers/phy/phy-stih407-usb.c
+++ b/drivers/phy/phy-stih407-usb.c
@@ -105,13 +105,13 @@ static int stih407_usb2_picophy_probe(struct platform_device *pdev)
105 phy_dev->dev = dev; 105 phy_dev->dev = dev;
106 dev_set_drvdata(dev, phy_dev); 106 dev_set_drvdata(dev, phy_dev);
107 107
108 phy_dev->rstc = devm_reset_control_get(dev, "global"); 108 phy_dev->rstc = devm_reset_control_get_shared(dev, "global");
109 if (IS_ERR(phy_dev->rstc)) { 109 if (IS_ERR(phy_dev->rstc)) {
110 dev_err(dev, "failed to ctrl picoPHY reset\n"); 110 dev_err(dev, "failed to ctrl picoPHY reset\n");
111 return PTR_ERR(phy_dev->rstc); 111 return PTR_ERR(phy_dev->rstc);
112 } 112 }
113 113
114 phy_dev->rstport = devm_reset_control_get(dev, "port"); 114 phy_dev->rstport = devm_reset_control_get_exclusive(dev, "port");
115 if (IS_ERR(phy_dev->rstport)) { 115 if (IS_ERR(phy_dev->rstport)) {
116 dev_err(dev, "failed to ctrl picoPHY reset\n"); 116 dev_err(dev, "failed to ctrl picoPHY reset\n");
117 return PTR_ERR(phy_dev->rstport); 117 return PTR_ERR(phy_dev->rstport);
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index bae54f7a1f48..de3101fbbf40 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -175,7 +175,7 @@ static void sun4i_usb_phy_write(struct sun4i_usb_phy *phy, u32 addr, u32 data,
175{ 175{
176 struct sun4i_usb_phy_data *phy_data = to_sun4i_usb_phy_data(phy); 176 struct sun4i_usb_phy_data *phy_data = to_sun4i_usb_phy_data(phy);
177 u32 temp, usbc_bit = BIT(phy->index * 2); 177 u32 temp, usbc_bit = BIT(phy->index * 2);
178 void *phyctl = phy_data->base + phy_data->cfg->phyctl_offset; 178 void __iomem *phyctl = phy_data->base + phy_data->cfg->phyctl_offset;
179 int i; 179 int i;
180 180
181 mutex_lock(&phy_data->mutex); 181 mutex_lock(&phy_data->mutex);
@@ -514,9 +514,9 @@ static int sun4i_usb_phy_remove(struct platform_device *pdev)
514 514
515 if (data->vbus_power_nb_registered) 515 if (data->vbus_power_nb_registered)
516 power_supply_unreg_notifier(&data->vbus_power_nb); 516 power_supply_unreg_notifier(&data->vbus_power_nb);
517 if (data->id_det_irq >= 0) 517 if (data->id_det_irq > 0)
518 devm_free_irq(dev, data->id_det_irq, data); 518 devm_free_irq(dev, data->id_det_irq, data);
519 if (data->vbus_det_irq >= 0) 519 if (data->vbus_det_irq > 0)
520 devm_free_irq(dev, data->vbus_det_irq, data); 520 devm_free_irq(dev, data->vbus_det_irq, data);
521 521
522 cancel_delayed_work_sync(&data->detect); 522 cancel_delayed_work_sync(&data->detect);
@@ -645,11 +645,11 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
645 645
646 data->id_det_irq = gpiod_to_irq(data->id_det_gpio); 646 data->id_det_irq = gpiod_to_irq(data->id_det_gpio);
647 data->vbus_det_irq = gpiod_to_irq(data->vbus_det_gpio); 647 data->vbus_det_irq = gpiod_to_irq(data->vbus_det_gpio);
648 if ((data->id_det_gpio && data->id_det_irq < 0) || 648 if ((data->id_det_gpio && data->id_det_irq <= 0) ||
649 (data->vbus_det_gpio && data->vbus_det_irq < 0)) 649 (data->vbus_det_gpio && data->vbus_det_irq <= 0))
650 data->phy0_poll = true; 650 data->phy0_poll = true;
651 651
652 if (data->id_det_irq >= 0) { 652 if (data->id_det_irq > 0) {
653 ret = devm_request_irq(dev, data->id_det_irq, 653 ret = devm_request_irq(dev, data->id_det_irq,
654 sun4i_usb_phy0_id_vbus_det_irq, 654 sun4i_usb_phy0_id_vbus_det_irq,
655 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 655 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
@@ -660,7 +660,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
660 } 660 }
661 } 661 }
662 662
663 if (data->vbus_det_irq >= 0) { 663 if (data->vbus_det_irq > 0) {
664 ret = devm_request_irq(dev, data->vbus_det_irq, 664 ret = devm_request_irq(dev, data->vbus_det_irq,
665 sun4i_usb_phy0_id_vbus_det_irq, 665 sun4i_usb_phy0_id_vbus_det_irq,
666 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 666 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index 0a477d24cf76..bf46844dc387 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -293,11 +293,18 @@ static int ti_pipe3_init(struct phy *x)
293 ret = ti_pipe3_dpll_wait_lock(phy); 293 ret = ti_pipe3_dpll_wait_lock(phy);
294 } 294 }
295 295
296 /* Program the DPLL only if not locked */ 296 /* SATA has issues if re-programmed when locked */
297 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); 297 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
298 if (!(val & PLL_LOCK)) 298 if ((val & PLL_LOCK) && of_device_is_compatible(phy->dev->of_node,
299 if (ti_pipe3_dpll_program(phy)) 299 "ti,phy-pipe3-sata"))
300 return -EINVAL; 300 return ret;
301
302 /* Program the DPLL */
303 ret = ti_pipe3_dpll_program(phy);
304 if (ret) {
305 ti_pipe3_disable_clocks(phy);
306 return -EINVAL;
307 }
301 308
302 return ret; 309 return ret;
303} 310}
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index 6b6af6cba454..d9b10a39a2cf 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -463,7 +463,8 @@ static int twl4030_phy_power_on(struct phy *phy)
463 twl4030_usb_set_mode(twl, twl->usb_mode); 463 twl4030_usb_set_mode(twl, twl->usb_mode);
464 if (twl->usb_mode == T2_USB_MODE_ULPI) 464 if (twl->usb_mode == T2_USB_MODE_ULPI)
465 twl4030_i2c_access(twl, 0); 465 twl4030_i2c_access(twl, 0);
466 schedule_delayed_work(&twl->id_workaround_work, 0); 466 twl->linkstat = MUSB_UNKNOWN;
467 schedule_delayed_work(&twl->id_workaround_work, HZ);
467 468
468 return 0; 469 return 0;
469} 470}
@@ -537,6 +538,7 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
537 struct twl4030_usb *twl = _twl; 538 struct twl4030_usb *twl = _twl;
538 enum musb_vbus_id_status status; 539 enum musb_vbus_id_status status;
539 bool status_changed = false; 540 bool status_changed = false;
541 int err;
540 542
541 status = twl4030_usb_linkstat(twl); 543 status = twl4030_usb_linkstat(twl);
542 544
@@ -567,7 +569,9 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
567 pm_runtime_mark_last_busy(twl->dev); 569 pm_runtime_mark_last_busy(twl->dev);
568 pm_runtime_put_autosuspend(twl->dev); 570 pm_runtime_put_autosuspend(twl->dev);
569 } 571 }
570 musb_mailbox(status); 572 err = musb_mailbox(status);
573 if (err)
574 twl->linkstat = MUSB_UNKNOWN;
571 } 575 }
572 576
573 /* don't schedule during sleep - irq works right then */ 577 /* don't schedule during sleep - irq works right then */
@@ -595,7 +599,8 @@ static int twl4030_phy_init(struct phy *phy)
595 struct twl4030_usb *twl = phy_get_drvdata(phy); 599 struct twl4030_usb *twl = phy_get_drvdata(phy);
596 600
597 pm_runtime_get_sync(twl->dev); 601 pm_runtime_get_sync(twl->dev);
598 schedule_delayed_work(&twl->id_workaround_work, 0); 602 twl->linkstat = MUSB_UNKNOWN;
603 schedule_delayed_work(&twl->id_workaround_work, HZ);
599 pm_runtime_mark_last_busy(twl->dev); 604 pm_runtime_mark_last_busy(twl->dev);
600 pm_runtime_put_autosuspend(twl->dev); 605 pm_runtime_put_autosuspend(twl->dev);
601 606
@@ -763,7 +768,8 @@ static int twl4030_usb_remove(struct platform_device *pdev)
763 if (cable_present(twl->linkstat)) 768 if (cable_present(twl->linkstat))
764 pm_runtime_put_noidle(twl->dev); 769 pm_runtime_put_noidle(twl->dev);
765 pm_runtime_mark_last_busy(twl->dev); 770 pm_runtime_mark_last_busy(twl->dev);
766 pm_runtime_put_sync_suspend(twl->dev); 771 pm_runtime_dont_use_autosuspend(&pdev->dev);
772 pm_runtime_put_sync(twl->dev);
767 pm_runtime_disable(twl->dev); 773 pm_runtime_disable(twl->dev);
768 774
769 /* autogate 60MHz ULPI clock, 775 /* autogate 60MHz ULPI clock,
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index e4bc1151e04f..42a5c1dddfef 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -23,7 +23,7 @@ obj-$(CONFIG_PINCTRL_PISTACHIO) += pinctrl-pistachio.o
23obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o 23obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
24obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o 24obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
25obj-$(CONFIG_PINCTRL_SIRF) += sirf/ 25obj-$(CONFIG_PINCTRL_SIRF) += sirf/
26obj-$(CONFIG_PINCTRL_TEGRA) += tegra/ 26obj-$(CONFIG_ARCH_TEGRA) += tegra/
27obj-$(CONFIG_PINCTRL_TZ1090) += pinctrl-tz1090.o 27obj-$(CONFIG_PINCTRL_TZ1090) += pinctrl-tz1090.o
28obj-$(CONFIG_PINCTRL_TZ1090_PDC) += pinctrl-tz1090-pdc.o 28obj-$(CONFIG_PINCTRL_TZ1090_PDC) += pinctrl-tz1090-pdc.o
29obj-$(CONFIG_PINCTRL_U300) += pinctrl-u300.o 29obj-$(CONFIG_PINCTRL_U300) += pinctrl-u300.o
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 47ccfcc8a647..eccb47480e1d 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -209,9 +209,9 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
209 pin_reg = &info->pin_regs[pin_id]; 209 pin_reg = &info->pin_regs[pin_id];
210 210
211 if (pin_reg->mux_reg == -1) { 211 if (pin_reg->mux_reg == -1) {
212 dev_err(ipctl->dev, "Pin(%s) does not support mux function\n", 212 dev_dbg(ipctl->dev, "Pin(%s) does not support mux function\n",
213 info->pins[pin_id].name); 213 info->pins[pin_id].name);
214 return -EINVAL; 214 continue;
215 } 215 }
216 216
217 if (info->flags & SHARE_MUX_CONF_REG) { 217 if (info->flags & SHARE_MUX_CONF_REG) {
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 677a811b3a6f..7abfd42e8ffd 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -401,9 +401,9 @@ static const struct byt_simple_func_mux byt_score_sata_mux[] = {
401static const unsigned int byt_score_plt_clk0_pins[] = { 96 }; 401static const unsigned int byt_score_plt_clk0_pins[] = { 96 };
402static const unsigned int byt_score_plt_clk1_pins[] = { 97 }; 402static const unsigned int byt_score_plt_clk1_pins[] = { 97 };
403static const unsigned int byt_score_plt_clk2_pins[] = { 98 }; 403static const unsigned int byt_score_plt_clk2_pins[] = { 98 };
404static const unsigned int byt_score_plt_clk4_pins[] = { 99 }; 404static const unsigned int byt_score_plt_clk3_pins[] = { 99 };
405static const unsigned int byt_score_plt_clk5_pins[] = { 100 }; 405static const unsigned int byt_score_plt_clk4_pins[] = { 100 };
406static const unsigned int byt_score_plt_clk3_pins[] = { 101 }; 406static const unsigned int byt_score_plt_clk5_pins[] = { 101 };
407static const struct byt_simple_func_mux byt_score_plt_clk_mux[] = { 407static const struct byt_simple_func_mux byt_score_plt_clk_mux[] = {
408 SIMPLE_FUNC("plt_clk", 1), 408 SIMPLE_FUNC("plt_clk", 1),
409}; 409};
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index cf9bafa10acf..bfdf720db270 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1580,6 +1580,9 @@ static inline void pcs_irq_set(struct pcs_soc_data *pcs_soc,
1580 else 1580 else
1581 mask &= ~soc_mask; 1581 mask &= ~soc_mask;
1582 pcs->write(mask, pcswi->reg); 1582 pcs->write(mask, pcswi->reg);
1583
1584 /* flush posted write */
1585 mask = pcs->read(pcswi->reg);
1583 raw_spin_unlock(&pcs->lock); 1586 raw_spin_unlock(&pcs->lock);
1584 } 1587 }
1585 1588
diff --git a/drivers/pinctrl/tegra/Makefile b/drivers/pinctrl/tegra/Makefile
index a927379b6794..d9ea2be69cc4 100644
--- a/drivers/pinctrl/tegra/Makefile
+++ b/drivers/pinctrl/tegra/Makefile
@@ -1,4 +1,4 @@
1obj-y += pinctrl-tegra.o 1obj-$(CONFIG_PINCTRL_TEGRA) += pinctrl-tegra.o
2obj-$(CONFIG_PINCTRL_TEGRA20) += pinctrl-tegra20.o 2obj-$(CONFIG_PINCTRL_TEGRA20) += pinctrl-tegra20.o
3obj-$(CONFIG_PINCTRL_TEGRA30) += pinctrl-tegra30.o 3obj-$(CONFIG_PINCTRL_TEGRA30) += pinctrl-tegra30.o
4obj-$(CONFIG_PINCTRL_TEGRA114) += pinctrl-tegra114.o 4obj-$(CONFIG_PINCTRL_TEGRA114) += pinctrl-tegra114.o
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c
index 6d8ee3b15872..8abd80dbcbed 100644
--- a/drivers/platform/chrome/cros_ec_dev.c
+++ b/drivers/platform/chrome/cros_ec_dev.c
@@ -151,13 +151,19 @@ static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
151 goto exit; 151 goto exit;
152 } 152 }
153 153
154 if (u_cmd.outsize != s_cmd->outsize ||
155 u_cmd.insize != s_cmd->insize) {
156 ret = -EINVAL;
157 goto exit;
158 }
159
154 s_cmd->command += ec->cmd_offset; 160 s_cmd->command += ec->cmd_offset;
155 ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd); 161 ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
156 /* Only copy data to userland if data was received. */ 162 /* Only copy data to userland if data was received. */
157 if (ret < 0) 163 if (ret < 0)
158 goto exit; 164 goto exit;
159 165
160 if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + u_cmd.insize)) 166 if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize))
161 ret = -EFAULT; 167 ret = -EFAULT;
162exit: 168exit:
163 kfree(s_cmd); 169 kfree(s_cmd);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index c06bb85c2839..3ec0025d19e7 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -103,7 +103,6 @@ config DELL_SMBIOS
103 103
104config DELL_LAPTOP 104config DELL_LAPTOP
105 tristate "Dell Laptop Extras" 105 tristate "Dell Laptop Extras"
106 depends on X86
107 depends on DELL_SMBIOS 106 depends on DELL_SMBIOS
108 depends on DMI 107 depends on DMI
109 depends on BACKLIGHT_CLASS_DEVICE 108 depends on BACKLIGHT_CLASS_DEVICE
@@ -505,7 +504,7 @@ config THINKPAD_ACPI_HOTKEY_POLL
505 504
506config SENSORS_HDAPS 505config SENSORS_HDAPS
507 tristate "Thinkpad Hard Drive Active Protection System (hdaps)" 506 tristate "Thinkpad Hard Drive Active Protection System (hdaps)"
508 depends on INPUT && X86 507 depends on INPUT
509 select INPUT_POLLDEV 508 select INPUT_POLLDEV
510 default n 509 default n
511 help 510 help
@@ -749,7 +748,7 @@ config TOSHIBA_WMI
749 748
750config ACPI_CMPC 749config ACPI_CMPC
751 tristate "CMPC Laptop Extras" 750 tristate "CMPC Laptop Extras"
752 depends on X86 && ACPI 751 depends on ACPI
753 depends on RFKILL || RFKILL=n 752 depends on RFKILL || RFKILL=n
754 select INPUT 753 select INPUT
755 select BACKLIGHT_CLASS_DEVICE 754 select BACKLIGHT_CLASS_DEVICE
@@ -848,7 +847,7 @@ config INTEL_IMR
848 847
849config INTEL_PMC_CORE 848config INTEL_PMC_CORE
850 bool "Intel PMC Core driver" 849 bool "Intel PMC Core driver"
851 depends on X86 && PCI 850 depends on PCI
852 ---help--- 851 ---help---
853 The Intel Platform Controller Hub for Intel Core SoCs provides access 852 The Intel Platform Controller Hub for Intel Core SoCs provides access
854 to Power Management Controller registers via a PCI interface. This 853 to Power Management Controller registers via a PCI interface. This
@@ -860,7 +859,7 @@ config INTEL_PMC_CORE
860 859
861config IBM_RTL 860config IBM_RTL
862 tristate "Device driver to enable PRTL support" 861 tristate "Device driver to enable PRTL support"
863 depends on X86 && PCI 862 depends on PCI
864 ---help--- 863 ---help---
865 Enable support for IBM Premium Real Time Mode (PRTM). 864 Enable support for IBM Premium Real Time Mode (PRTM).
866 This module will allow you the enter and exit PRTM in the BIOS via 865 This module will allow you the enter and exit PRTM in the BIOS via
@@ -894,7 +893,6 @@ config XO15_EBOOK
894 893
895config SAMSUNG_LAPTOP 894config SAMSUNG_LAPTOP
896 tristate "Samsung Laptop driver" 895 tristate "Samsung Laptop driver"
897 depends on X86
898 depends on RFKILL || RFKILL = n 896 depends on RFKILL || RFKILL = n
899 depends on ACPI_VIDEO || ACPI_VIDEO = n 897 depends on ACPI_VIDEO || ACPI_VIDEO = n
900 depends on BACKLIGHT_CLASS_DEVICE 898 depends on BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 4a23fbc66b71..d1a091b93192 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -567,6 +567,7 @@ static void ideapad_sysfs_exit(struct ideapad_private *priv)
567static const struct key_entry ideapad_keymap[] = { 567static const struct key_entry ideapad_keymap[] = {
568 { KE_KEY, 6, { KEY_SWITCHVIDEOMODE } }, 568 { KE_KEY, 6, { KEY_SWITCHVIDEOMODE } },
569 { KE_KEY, 7, { KEY_CAMERA } }, 569 { KE_KEY, 7, { KEY_CAMERA } },
570 { KE_KEY, 8, { KEY_MICMUTE } },
570 { KE_KEY, 11, { KEY_F16 } }, 571 { KE_KEY, 11, { KEY_F16 } },
571 { KE_KEY, 13, { KEY_WLAN } }, 572 { KE_KEY, 13, { KEY_WLAN } },
572 { KE_KEY, 16, { KEY_PROG1 } }, 573 { KE_KEY, 16, { KEY_PROG1 } },
@@ -809,6 +810,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
809 break; 810 break;
810 case 13: 811 case 13:
811 case 11: 812 case 11:
813 case 8:
812 case 7: 814 case 7:
813 case 6: 815 case 6:
814 ideapad_input_report(priv, vpc_bit); 816 ideapad_input_report(priv, vpc_bit);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index c3bfa1fe95bf..b65ce7519411 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -2043,6 +2043,7 @@ static int hotkey_autosleep_ack;
2043 2043
2044static u32 hotkey_orig_mask; /* events the BIOS had enabled */ 2044static u32 hotkey_orig_mask; /* events the BIOS had enabled */
2045static u32 hotkey_all_mask; /* all events supported in fw */ 2045static u32 hotkey_all_mask; /* all events supported in fw */
2046static u32 hotkey_adaptive_all_mask; /* all adaptive events supported in fw */
2046static u32 hotkey_reserved_mask; /* events better left disabled */ 2047static u32 hotkey_reserved_mask; /* events better left disabled */
2047static u32 hotkey_driver_mask; /* events needed by the driver */ 2048static u32 hotkey_driver_mask; /* events needed by the driver */
2048static u32 hotkey_user_mask; /* events visible to userspace */ 2049static u32 hotkey_user_mask; /* events visible to userspace */
@@ -2742,6 +2743,17 @@ static ssize_t hotkey_all_mask_show(struct device *dev,
2742 2743
2743static DEVICE_ATTR_RO(hotkey_all_mask); 2744static DEVICE_ATTR_RO(hotkey_all_mask);
2744 2745
2746/* sysfs hotkey all_mask ----------------------------------------------- */
2747static ssize_t hotkey_adaptive_all_mask_show(struct device *dev,
2748 struct device_attribute *attr,
2749 char *buf)
2750{
2751 return snprintf(buf, PAGE_SIZE, "0x%08x\n",
2752 hotkey_adaptive_all_mask | hotkey_source_mask);
2753}
2754
2755static DEVICE_ATTR_RO(hotkey_adaptive_all_mask);
2756
2745/* sysfs hotkey recommended_mask --------------------------------------- */ 2757/* sysfs hotkey recommended_mask --------------------------------------- */
2746static ssize_t hotkey_recommended_mask_show(struct device *dev, 2758static ssize_t hotkey_recommended_mask_show(struct device *dev,
2747 struct device_attribute *attr, 2759 struct device_attribute *attr,
@@ -2985,6 +2997,7 @@ static struct attribute *hotkey_attributes[] __initdata = {
2985 &dev_attr_wakeup_hotunplug_complete.attr, 2997 &dev_attr_wakeup_hotunplug_complete.attr,
2986 &dev_attr_hotkey_mask.attr, 2998 &dev_attr_hotkey_mask.attr,
2987 &dev_attr_hotkey_all_mask.attr, 2999 &dev_attr_hotkey_all_mask.attr,
3000 &dev_attr_hotkey_adaptive_all_mask.attr,
2988 &dev_attr_hotkey_recommended_mask.attr, 3001 &dev_attr_hotkey_recommended_mask.attr,
2989#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL 3002#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
2990 &dev_attr_hotkey_source_mask.attr, 3003 &dev_attr_hotkey_source_mask.attr,
@@ -3321,20 +3334,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3321 if (!tp_features.hotkey) 3334 if (!tp_features.hotkey)
3322 return 1; 3335 return 1;
3323 3336
3324 /*
3325 * Check if we have an adaptive keyboard, like on the
3326 * Lenovo Carbon X1 2014 (2nd Gen).
3327 */
3328 if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
3329 if ((hkeyv >> 8) == 2) {
3330 tp_features.has_adaptive_kbd = true;
3331 res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
3332 &adaptive_kbd_attr_group);
3333 if (res)
3334 goto err_exit;
3335 }
3336 }
3337
3338 quirks = tpacpi_check_quirks(tpacpi_hotkey_qtable, 3337 quirks = tpacpi_check_quirks(tpacpi_hotkey_qtable,
3339 ARRAY_SIZE(tpacpi_hotkey_qtable)); 3338 ARRAY_SIZE(tpacpi_hotkey_qtable));
3340 3339
@@ -3357,30 +3356,70 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3357 A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking 3356 A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking
3358 for HKEY interface version 0x100 */ 3357 for HKEY interface version 0x100 */
3359 if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) { 3358 if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
3360 if ((hkeyv >> 8) != 1) { 3359 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
3361 pr_err("unknown version of the HKEY interface: 0x%x\n", 3360 "firmware HKEY interface version: 0x%x\n",
3362 hkeyv); 3361 hkeyv);
3363 pr_err("please report this to %s\n", TPACPI_MAIL); 3362
3364 } else { 3363 switch (hkeyv >> 8) {
3364 case 1:
3365 /* 3365 /*
3366 * MHKV 0x100 in A31, R40, R40e, 3366 * MHKV 0x100 in A31, R40, R40e,
3367 * T4x, X31, and later 3367 * T4x, X31, and later
3368 */ 3368 */
3369 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
3370 "firmware HKEY interface version: 0x%x\n",
3371 hkeyv);
3372 3369
3373 /* Paranoia check AND init hotkey_all_mask */ 3370 /* Paranoia check AND init hotkey_all_mask */
3374 if (!acpi_evalf(hkey_handle, &hotkey_all_mask, 3371 if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
3375 "MHKA", "qd")) { 3372 "MHKA", "qd")) {
3376 pr_err("missing MHKA handler, " 3373 pr_err("missing MHKA handler, please report this to %s\n",
3377 "please report this to %s\n",
3378 TPACPI_MAIL); 3374 TPACPI_MAIL);
3379 /* Fallback: pre-init for FN+F3,F4,F12 */ 3375 /* Fallback: pre-init for FN+F3,F4,F12 */
3380 hotkey_all_mask = 0x080cU; 3376 hotkey_all_mask = 0x080cU;
3381 } else { 3377 } else {
3382 tp_features.hotkey_mask = 1; 3378 tp_features.hotkey_mask = 1;
3383 } 3379 }
3380 break;
3381
3382 case 2:
3383 /*
3384 * MHKV 0x200 in X1, T460s, X260, T560, X1 Tablet (2016)
3385 */
3386
3387 /* Paranoia check AND init hotkey_all_mask */
3388 if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
3389 "MHKA", "dd", 1)) {
3390 pr_err("missing MHKA handler, please report this to %s\n",
3391 TPACPI_MAIL);
3392 /* Fallback: pre-init for FN+F3,F4,F12 */
3393 hotkey_all_mask = 0x080cU;
3394 } else {
3395 tp_features.hotkey_mask = 1;
3396 }
3397
3398 /*
3399 * Check if we have an adaptive keyboard, like on the
3400 * Lenovo Carbon X1 2014 (2nd Gen).
3401 */
3402 if (acpi_evalf(hkey_handle, &hotkey_adaptive_all_mask,
3403 "MHKA", "dd", 2)) {
3404 if (hotkey_adaptive_all_mask != 0) {
3405 tp_features.has_adaptive_kbd = true;
3406 res = sysfs_create_group(
3407 &tpacpi_pdev->dev.kobj,
3408 &adaptive_kbd_attr_group);
3409 if (res)
3410 goto err_exit;
3411 }
3412 } else {
3413 tp_features.has_adaptive_kbd = false;
3414 hotkey_adaptive_all_mask = 0x0U;
3415 }
3416 break;
3417
3418 default:
3419 pr_err("unknown version of the HKEY interface: 0x%x\n",
3420 hkeyv);
3421 pr_err("please report this to %s\n", TPACPI_MAIL);
3422 break;
3384 } 3423 }
3385 } 3424 }
3386 3425
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 456987c88baa..b13cd074c52a 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -565,11 +565,12 @@ static int power_supply_read_temp(struct thermal_zone_device *tzd,
565 565
566 WARN_ON(tzd == NULL); 566 WARN_ON(tzd == NULL);
567 psy = tzd->devdata; 567 psy = tzd->devdata;
568 ret = psy->desc->get_property(psy, POWER_SUPPLY_PROP_TEMP, &val); 568 ret = power_supply_get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
569 if (ret)
570 return ret;
569 571
570 /* Convert tenths of degree Celsius to milli degree Celsius. */ 572 /* Convert tenths of degree Celsius to milli degree Celsius. */
571 if (!ret) 573 *temp = val.intval * 100;
572 *temp = val.intval * 100;
573 574
574 return ret; 575 return ret;
575} 576}
@@ -612,10 +613,12 @@ static int ps_get_max_charge_cntl_limit(struct thermal_cooling_device *tcd,
612 int ret; 613 int ret;
613 614
614 psy = tcd->devdata; 615 psy = tcd->devdata;
615 ret = psy->desc->get_property(psy, 616 ret = power_supply_get_property(psy,
616 POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val); 617 POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
617 if (!ret) 618 if (ret)
618 *state = val.intval; 619 return ret;
620
621 *state = val.intval;
619 622
620 return ret; 623 return ret;
621} 624}
@@ -628,10 +631,12 @@ static int ps_get_cur_chrage_cntl_limit(struct thermal_cooling_device *tcd,
628 int ret; 631 int ret;
629 632
630 psy = tcd->devdata; 633 psy = tcd->devdata;
631 ret = psy->desc->get_property(psy, 634 ret = power_supply_get_property(psy,
632 POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val); 635 POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
633 if (!ret) 636 if (ret)
634 *state = val.intval; 637 return ret;
638
639 *state = val.intval;
635 640
636 return ret; 641 return ret;
637} 642}
diff --git a/drivers/power/tps65217_charger.c b/drivers/power/tps65217_charger.c
index d9f56730c735..73dfae41def8 100644
--- a/drivers/power/tps65217_charger.c
+++ b/drivers/power/tps65217_charger.c
@@ -197,6 +197,7 @@ static int tps65217_charger_probe(struct platform_device *pdev)
197{ 197{
198 struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent); 198 struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
199 struct tps65217_charger *charger; 199 struct tps65217_charger *charger;
200 struct power_supply_config cfg = {};
200 int ret; 201 int ret;
201 202
202 dev_dbg(&pdev->dev, "%s\n", __func__); 203 dev_dbg(&pdev->dev, "%s\n", __func__);
@@ -208,9 +209,12 @@ static int tps65217_charger_probe(struct platform_device *pdev)
208 charger->tps = tps; 209 charger->tps = tps;
209 charger->dev = &pdev->dev; 210 charger->dev = &pdev->dev;
210 211
212 cfg.of_node = pdev->dev.of_node;
213 cfg.drv_data = charger;
214
211 charger->ac = devm_power_supply_register(&pdev->dev, 215 charger->ac = devm_power_supply_register(&pdev->dev,
212 &tps65217_charger_desc, 216 &tps65217_charger_desc,
213 NULL); 217 &cfg);
214 if (IS_ERR(charger->ac)) { 218 if (IS_ERR(charger->ac)) {
215 dev_err(&pdev->dev, "failed: power supply register\n"); 219 dev_err(&pdev->dev, "failed: power supply register\n");
216 return PTR_ERR(charger->ac); 220 return PTR_ERR(charger->ac);
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index dba3843c53b8..ed337a8c34ab 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -457,7 +457,8 @@ int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state)
457{ 457{
458 int err; 458 int err;
459 459
460 if (!pwm) 460 if (!pwm || !state || !state->period ||
461 state->duty_cycle > state->period)
461 return -EINVAL; 462 return -EINVAL;
462 463
463 if (!memcmp(state, &pwm->state, sizeof(*state))) 464 if (!memcmp(state, &pwm->state, sizeof(*state)))
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index f994c7eaf41c..14fc011faa32 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -272,7 +272,7 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
272 chip->chip.of_pwm_n_cells = 3; 272 chip->chip.of_pwm_n_cells = 3;
273 chip->chip.can_sleep = 1; 273 chip->chip.can_sleep = 1;
274 274
275 ret = pwmchip_add(&chip->chip); 275 ret = pwmchip_add_with_polarity(&chip->chip, PWM_POLARITY_INVERSED);
276 if (ret) { 276 if (ret) {
277 clk_disable_unprepare(hlcdc->periph_clk); 277 clk_disable_unprepare(hlcdc->periph_clk);
278 return ret; 278 return ret;
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index d98599249a05..01695d48dd54 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -152,7 +152,7 @@ static ssize_t enable_store(struct device *child,
152 goto unlock; 152 goto unlock;
153 } 153 }
154 154
155 pwm_apply_state(pwm, &state); 155 ret = pwm_apply_state(pwm, &state);
156 156
157unlock: 157unlock:
158 mutex_unlock(&export->lock); 158 mutex_unlock(&export->lock);
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 63cd5e68c864..3a6d0290c54c 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -296,7 +296,7 @@ static int anatop_regulator_probe(struct platform_device *pdev)
296 if (!sreg->sel && !strcmp(sreg->name, "vddpu")) 296 if (!sreg->sel && !strcmp(sreg->name, "vddpu"))
297 sreg->sel = 22; 297 sreg->sel = 22;
298 298
299 if (!sreg->sel) { 299 if (!sreg->bypass && !sreg->sel) {
300 dev_err(&pdev->dev, "Failed to read a valid default voltage selector.\n"); 300 dev_err(&pdev->dev, "Failed to read a valid default voltage selector.\n");
301 return -EINVAL; 301 return -EINVAL;
302 } 302 }
diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c
index 321e804aeab0..a1b49a6d538f 100644
--- a/drivers/regulator/max77620-regulator.c
+++ b/drivers/regulator/max77620-regulator.c
@@ -123,6 +123,9 @@ static int max77620_regulator_set_fps_src(struct max77620_regulator *pmic,
123 unsigned int val; 123 unsigned int val;
124 int ret; 124 int ret;
125 125
126 if (!rinfo)
127 return 0;
128
126 switch (fps_src) { 129 switch (fps_src) {
127 case MAX77620_FPS_SRC_0: 130 case MAX77620_FPS_SRC_0:
128 case MAX77620_FPS_SRC_1: 131 case MAX77620_FPS_SRC_1:
@@ -171,6 +174,9 @@ static int max77620_regulator_set_fps_slots(struct max77620_regulator *pmic,
171 int pd = rpdata->active_fps_pd_slot; 174 int pd = rpdata->active_fps_pd_slot;
172 int ret = 0; 175 int ret = 0;
173 176
177 if (!rinfo)
178 return 0;
179
174 if (is_suspend) { 180 if (is_suspend) {
175 pu = rpdata->suspend_fps_pu_slot; 181 pu = rpdata->suspend_fps_pu_slot;
176 pd = rpdata->suspend_fps_pd_slot; 182 pd = rpdata->suspend_fps_pd_slot;
@@ -680,7 +686,6 @@ static struct max77620_regulator_info max77620_regs_info[MAX77620_NUM_REGS] = {
680 RAIL_SD(SD1, sd1, "in-sd1", SD1, 600000, 1550000, 12500, 0x22, SD1), 686 RAIL_SD(SD1, sd1, "in-sd1", SD1, 600000, 1550000, 12500, 0x22, SD1),
681 RAIL_SD(SD2, sd2, "in-sd2", SDX, 600000, 3787500, 12500, 0xFF, NONE), 687 RAIL_SD(SD2, sd2, "in-sd2", SDX, 600000, 3787500, 12500, 0xFF, NONE),
682 RAIL_SD(SD3, sd3, "in-sd3", SDX, 600000, 3787500, 12500, 0xFF, NONE), 688 RAIL_SD(SD3, sd3, "in-sd3", SDX, 600000, 3787500, 12500, 0xFF, NONE),
683 RAIL_SD(SD4, sd4, "in-sd4", SDX, 600000, 3787500, 12500, 0xFF, NONE),
684 689
685 RAIL_LDO(LDO0, ldo0, "in-ldo0-1", N, 800000, 2375000, 25000), 690 RAIL_LDO(LDO0, ldo0, "in-ldo0-1", N, 800000, 2375000, 25000),
686 RAIL_LDO(LDO1, ldo1, "in-ldo0-1", N, 800000, 2375000, 25000), 691 RAIL_LDO(LDO1, ldo1, "in-ldo0-1", N, 800000, 2375000, 25000),
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 56a17ec5b5ef..526bf23dcb49 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -140,6 +140,19 @@ static const struct regulator_ops rpm_smps_ldo_ops = {
140 .enable = rpm_reg_enable, 140 .enable = rpm_reg_enable,
141 .disable = rpm_reg_disable, 141 .disable = rpm_reg_disable,
142 .is_enabled = rpm_reg_is_enabled, 142 .is_enabled = rpm_reg_is_enabled,
143 .list_voltage = regulator_list_voltage_linear_range,
144
145 .get_voltage = rpm_reg_get_voltage,
146 .set_voltage = rpm_reg_set_voltage,
147
148 .set_load = rpm_reg_set_load,
149};
150
151static const struct regulator_ops rpm_smps_ldo_ops_fixed = {
152 .enable = rpm_reg_enable,
153 .disable = rpm_reg_disable,
154 .is_enabled = rpm_reg_is_enabled,
155 .list_voltage = regulator_list_voltage_linear_range,
143 156
144 .get_voltage = rpm_reg_get_voltage, 157 .get_voltage = rpm_reg_get_voltage,
145 .set_voltage = rpm_reg_set_voltage, 158 .set_voltage = rpm_reg_set_voltage,
@@ -247,7 +260,7 @@ static const struct regulator_desc pm8941_nldo = {
247static const struct regulator_desc pm8941_lnldo = { 260static const struct regulator_desc pm8941_lnldo = {
248 .fixed_uV = 1740000, 261 .fixed_uV = 1740000,
249 .n_voltages = 1, 262 .n_voltages = 1,
250 .ops = &rpm_smps_ldo_ops, 263 .ops = &rpm_smps_ldo_ops_fixed,
251}; 264};
252 265
253static const struct regulator_desc pm8941_switch = { 266static const struct regulator_desc pm8941_switch = {
diff --git a/drivers/regulator/tps51632-regulator.c b/drivers/regulator/tps51632-regulator.c
index 572816e30095..c139890c1514 100644
--- a/drivers/regulator/tps51632-regulator.c
+++ b/drivers/regulator/tps51632-regulator.c
@@ -94,11 +94,14 @@ static int tps51632_dcdc_set_ramp_delay(struct regulator_dev *rdev,
94 int ramp_delay) 94 int ramp_delay)
95{ 95{
96 struct tps51632_chip *tps = rdev_get_drvdata(rdev); 96 struct tps51632_chip *tps = rdev_get_drvdata(rdev);
97 int bit = ramp_delay/6000; 97 int bit;
98 int ret; 98 int ret;
99 99
100 if (bit) 100 if (ramp_delay == 0)
101 bit--; 101 bit = 0;
102 else
103 bit = DIV_ROUND_UP(ramp_delay, 6000) - 1;
104
102 ret = regmap_write(tps->regmap, TPS51632_SLEW_REGS, BIT(bit)); 105 ret = regmap_write(tps->regmap, TPS51632_SLEW_REGS, BIT(bit));
103 if (ret < 0) 106 if (ret < 0)
104 dev_err(tps->dev, "SLEW reg write failed, err %d\n", ret); 107 dev_err(tps->dev, "SLEW reg write failed, err %d\n", ret);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 9fd48de38a4c..7bc20c5188bc 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1045,6 +1045,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
1045 qeth_l2_set_offline(cgdev); 1045 qeth_l2_set_offline(cgdev);
1046 1046
1047 if (card->dev) { 1047 if (card->dev) {
1048 netif_napi_del(&card->napi);
1048 unregister_netdev(card->dev); 1049 unregister_netdev(card->dev);
1049 card->dev = NULL; 1050 card->dev = NULL;
1050 } 1051 }
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index bcd324e054a9..72934666fedf 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3167,6 +3167,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
3167 qeth_l3_set_offline(cgdev); 3167 qeth_l3_set_offline(cgdev);
3168 3168
3169 if (card->dev) { 3169 if (card->dev) {
3170 netif_napi_del(&card->napi);
3170 unregister_netdev(card->dev); 3171 unregister_netdev(card->dev);
3171 card->dev = NULL; 3172 card->dev = NULL;
3172 } 3173 }
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index d4c285688ce9..3ddc85e6efd6 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -1122,7 +1122,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
1122 } else { 1122 } else {
1123 struct scsi_cmnd *SCp; 1123 struct scsi_cmnd *SCp;
1124 1124
1125 SCp = scsi_host_find_tag(SDp->host, SCSI_NO_TAG); 1125 SCp = SDp->current_cmnd;
1126 if(unlikely(SCp == NULL)) { 1126 if(unlikely(SCp == NULL)) {
1127 sdev_printk(KERN_ERR, SDp, 1127 sdev_printk(KERN_ERR, SDp,
1128 "no saved request for untagged cmd\n"); 1128 "no saved request for untagged cmd\n");
@@ -1826,7 +1826,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
1826 slot->tag, slot); 1826 slot->tag, slot);
1827 } else { 1827 } else {
1828 slot->tag = SCSI_NO_TAG; 1828 slot->tag = SCSI_NO_TAG;
1829 /* must populate current_cmnd for scsi_host_find_tag to work */ 1829 /* save current command for reselection */
1830 SCp->device->current_cmnd = SCp; 1830 SCp->device->current_cmnd = SCp;
1831 } 1831 }
1832 /* sanity check: some of the commands generated by the mid-layer 1832 /* sanity check: some of the commands generated by the mid-layer
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 3408578b08d6..ff41c310c900 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -230,6 +230,7 @@ static struct {
230 {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 230 {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
231 {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC}, 231 {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
232 {"Promise", "", NULL, BLIST_SPARSELUN}, 232 {"Promise", "", NULL, BLIST_SPARSELUN},
233 {"QEMU", "QEMU CD-ROM", NULL, BLIST_SKIP_VPD_PAGES},
233 {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024}, 234 {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
234 {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024}, 235 {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024},
235 {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, 236 {"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index a8b610eaa0ca..106a6adbd6f1 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1128,7 +1128,6 @@ static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
1128 */ 1128 */
1129void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q) 1129void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
1130{ 1130{
1131 scmd->device->host->host_failed--;
1132 scmd->eh_eflags = 0; 1131 scmd->eh_eflags = 0;
1133 list_move_tail(&scmd->eh_entry, done_q); 1132 list_move_tail(&scmd->eh_entry, done_q);
1134} 1133}
@@ -2227,6 +2226,9 @@ int scsi_error_handler(void *data)
2227 else 2226 else
2228 scsi_unjam_host(shost); 2227 scsi_unjam_host(shost);
2229 2228
2229 /* All scmds have been handled */
2230 shost->host_failed = 0;
2231
2230 /* 2232 /*
2231 * Note - if the above fails completely, the action is to take 2233 * Note - if the above fails completely, the action is to take
2232 * individual devices offline and flush the queue of any 2234 * individual devices offline and flush the queue of any
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index f459dff30512..60bff78e9ead 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2867,10 +2867,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
2867 if (sdkp->opt_xfer_blocks && 2867 if (sdkp->opt_xfer_blocks &&
2868 sdkp->opt_xfer_blocks <= dev_max && 2868 sdkp->opt_xfer_blocks <= dev_max &&
2869 sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && 2869 sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
2870 sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE) 2870 logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) {
2871 rw_max = q->limits.io_opt = 2871 q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
2872 sdkp->opt_xfer_blocks * sdp->sector_size; 2872 rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
2873 else 2873 } else
2874 rw_max = BLK_DEF_MAX_SECTORS; 2874 rw_max = BLK_DEF_MAX_SECTORS;
2875 2875
2876 /* Combine with controller limits */ 2876 /* Combine with controller limits */
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 654630bb7d0e..765a6f1ac1b7 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -151,6 +151,11 @@ static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blo
151 return blocks << (ilog2(sdev->sector_size) - 9); 151 return blocks << (ilog2(sdev->sector_size) - 9);
152} 152}
153 153
154static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks)
155{
156 return blocks * sdev->sector_size;
157}
158
154/* 159/*
155 * A DIF-capable target device can be formatted with different 160 * A DIF-capable target device can be formatted with different
156 * protection schemes. Currently 0 through 3 are defined: 161 * protection schemes. Currently 0 through 3 are defined:
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index cd89682065b9..1026e180eed7 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -578,7 +578,7 @@ static int rockchip_spi_transfer_one(
578 struct spi_device *spi, 578 struct spi_device *spi,
579 struct spi_transfer *xfer) 579 struct spi_transfer *xfer)
580{ 580{
581 int ret = 1; 581 int ret = 0;
582 struct rockchip_spi *rs = spi_master_get_devdata(master); 582 struct rockchip_spi *rs = spi_master_get_devdata(master);
583 583
584 WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) && 584 WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
@@ -627,6 +627,8 @@ static int rockchip_spi_transfer_one(
627 spi_enable_chip(rs, 1); 627 spi_enable_chip(rs, 1);
628 ret = rockchip_spi_prepare_dma(rs); 628 ret = rockchip_spi_prepare_dma(rs);
629 } 629 }
630 /* successful DMA prepare means the transfer is in progress */
631 ret = ret ? ret : 1;
630 } else { 632 } else {
631 spi_enable_chip(rs, 1); 633 spi_enable_chip(rs, 1);
632 ret = rockchip_spi_pio_transfer(rs); 634 ret = rockchip_spi_pio_transfer(rs);
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index 1ddd9e2309b6..cf007f3b83ec 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -173,13 +173,17 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
173{ 173{
174 struct sun4i_spi *sspi = spi_master_get_devdata(master); 174 struct sun4i_spi *sspi = spi_master_get_devdata(master);
175 unsigned int mclk_rate, div, timeout; 175 unsigned int mclk_rate, div, timeout;
176 unsigned int start, end, tx_time;
176 unsigned int tx_len = 0; 177 unsigned int tx_len = 0;
177 int ret = 0; 178 int ret = 0;
178 u32 reg; 179 u32 reg;
179 180
180 /* We don't support transfer larger than the FIFO */ 181 /* We don't support transfer larger than the FIFO */
181 if (tfr->len > SUN4I_FIFO_DEPTH) 182 if (tfr->len > SUN4I_FIFO_DEPTH)
182 return -EINVAL; 183 return -EMSGSIZE;
184
185 if (tfr->tx_buf && tfr->len >= SUN4I_FIFO_DEPTH)
186 return -EMSGSIZE;
183 187
184 reinit_completion(&sspi->done); 188 reinit_completion(&sspi->done);
185 sspi->tx_buf = tfr->tx_buf; 189 sspi->tx_buf = tfr->tx_buf;
@@ -269,8 +273,12 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
269 sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len)); 273 sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len));
270 sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len)); 274 sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len));
271 275
272 /* Fill the TX FIFO */ 276 /*
273 sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH); 277 * Fill the TX FIFO
278 * Filling the FIFO fully causes timeout for some reason
279 * at least on spi2 on A10s
280 */
281 sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH - 1);
274 282
275 /* Enable the interrupts */ 283 /* Enable the interrupts */
276 sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC); 284 sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC);
@@ -279,9 +287,16 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
279 reg = sun4i_spi_read(sspi, SUN4I_CTL_REG); 287 reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
280 sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH); 288 sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH);
281 289
290 tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
291 start = jiffies;
282 timeout = wait_for_completion_timeout(&sspi->done, 292 timeout = wait_for_completion_timeout(&sspi->done,
283 msecs_to_jiffies(1000)); 293 msecs_to_jiffies(tx_time));
294 end = jiffies;
284 if (!timeout) { 295 if (!timeout) {
296 dev_warn(&master->dev,
297 "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
298 dev_name(&spi->dev), tfr->len, tfr->speed_hz,
299 jiffies_to_msecs(end - start), tx_time);
285 ret = -ETIMEDOUT; 300 ret = -ETIMEDOUT;
286 goto out; 301 goto out;
287 } 302 }
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index 42e2c4bd690a..7fce79a60608 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -160,6 +160,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
160{ 160{
161 struct sun6i_spi *sspi = spi_master_get_devdata(master); 161 struct sun6i_spi *sspi = spi_master_get_devdata(master);
162 unsigned int mclk_rate, div, timeout; 162 unsigned int mclk_rate, div, timeout;
163 unsigned int start, end, tx_time;
163 unsigned int tx_len = 0; 164 unsigned int tx_len = 0;
164 int ret = 0; 165 int ret = 0;
165 u32 reg; 166 u32 reg;
@@ -269,9 +270,16 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
269 reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG); 270 reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
270 sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH); 271 sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
271 272
273 tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
274 start = jiffies;
272 timeout = wait_for_completion_timeout(&sspi->done, 275 timeout = wait_for_completion_timeout(&sspi->done,
273 msecs_to_jiffies(1000)); 276 msecs_to_jiffies(tx_time));
277 end = jiffies;
274 if (!timeout) { 278 if (!timeout) {
279 dev_warn(&master->dev,
280 "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
281 dev_name(&spi->dev), tfr->len, tfr->speed_hz,
282 jiffies_to_msecs(end - start), tx_time);
275 ret = -ETIMEDOUT; 283 ret = -ETIMEDOUT;
276 goto out; 284 goto out;
277 } 285 }
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 443f664534e1..29ea8d2f9824 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -646,6 +646,13 @@ free_master:
646 646
647static int ti_qspi_remove(struct platform_device *pdev) 647static int ti_qspi_remove(struct platform_device *pdev)
648{ 648{
649 struct ti_qspi *qspi = platform_get_drvdata(pdev);
650 int rc;
651
652 rc = spi_master_suspend(qspi->master);
653 if (rc)
654 return rc;
655
649 pm_runtime_put_sync(&pdev->dev); 656 pm_runtime_put_sync(&pdev->dev);
650 pm_runtime_disable(&pdev->dev); 657 pm_runtime_disable(&pdev->dev);
651 658
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index a8f533af9eca..ec12181822e6 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -594,7 +594,7 @@ static ssize_t sca3000_read_frequency(struct device *dev,
594 goto error_ret_mut; 594 goto error_ret_mut;
595 ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL); 595 ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
596 mutex_unlock(&st->lock); 596 mutex_unlock(&st->lock);
597 if (ret) 597 if (ret < 0)
598 goto error_ret; 598 goto error_ret;
599 val = ret; 599 val = ret;
600 if (base_freq > 0) 600 if (base_freq > 0)
diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/staging/iio/adc/ad7606_spi.c
index 825da0769936..9587fa86dc69 100644
--- a/drivers/staging/iio/adc/ad7606_spi.c
+++ b/drivers/staging/iio/adc/ad7606_spi.c
@@ -21,7 +21,7 @@ static int ad7606_spi_read_block(struct device *dev,
21{ 21{
22 struct spi_device *spi = to_spi_device(dev); 22 struct spi_device *spi = to_spi_device(dev);
23 int i, ret; 23 int i, ret;
24 unsigned short *data; 24 unsigned short *data = buf;
25 __be16 *bdata = buf; 25 __be16 *bdata = buf;
26 26
27 ret = spi_read(spi, buf, count * 2); 27 ret = spi_read(spi, buf, count * 2);
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 9f43976f4ef2..170ac980abcb 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -444,10 +444,10 @@ static ssize_t ad5933_store(struct device *dev,
444 st->settling_cycles = val; 444 st->settling_cycles = val;
445 445
446 /* 2x, 4x handling, see datasheet */ 446 /* 2x, 4x handling, see datasheet */
447 if (val > 511) 447 if (val > 1022)
448 val = (val >> 1) | (1 << 9);
449 else if (val > 1022)
450 val = (val >> 2) | (3 << 9); 448 val = (val >> 2) | (3 << 9);
449 else if (val > 511)
450 val = (val >> 1) | (1 << 9);
451 451
452 dat = cpu_to_be16(val); 452 dat = cpu_to_be16(val);
453 ret = ad5933_i2c_write(st->client, 453 ret = ad5933_i2c_write(st->client,
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index bbfee53cfcf5..845e49a52430 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -2521,12 +2521,13 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
2521 return 0; 2521 return 0;
2522 2522
2523 failed: 2523 failed:
2524 if (ni) 2524 if (ni) {
2525 lnet_ni_decref(ni); 2525 lnet_ni_decref(ni);
2526 rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni);
2527 rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni);
2528 }
2526 2529
2527 rej.ibr_version = version; 2530 rej.ibr_version = version;
2528 rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni);
2529 rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni);
2530 kiblnd_reject(cmid, &rej); 2531 kiblnd_reject(cmid, &rej);
2531 2532
2532 return -ECONNREFUSED; 2533 return -ECONNREFUSED;
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index c17870cddb5b..fbce1f7e68ca 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -102,7 +102,7 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
102 if (!efuseTbl) 102 if (!efuseTbl)
103 return; 103 return;
104 104
105 eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(*eFuseWord)); 105 eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16));
106 if (!eFuseWord) { 106 if (!eFuseWord) {
107 DBG_88E("%s: alloc eFuseWord fail!\n", __func__); 107 DBG_88E("%s: alloc eFuseWord fail!\n", __func__);
108 goto eFuseWord_failed; 108 goto eFuseWord_failed;
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index 87ea3b844951..363f3a34ddce 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -2072,7 +2072,8 @@ void rtl8188eu_set_hal_ops(struct adapter *adapt)
2072{ 2072{
2073 struct hal_ops *halfunc = &adapt->HalFunc; 2073 struct hal_ops *halfunc = &adapt->HalFunc;
2074 2074
2075 adapt->HalData = kzalloc(sizeof(*adapt->HalData), GFP_KERNEL); 2075
2076 adapt->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL);
2076 if (!adapt->HalData) 2077 if (!adapt->HalData)
2077 DBG_88E("cant not alloc memory for HAL DATA\n"); 2078 DBG_88E("cant not alloc memory for HAL DATA\n");
2078 2079
diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
index 0da559d929bc..d0ba3778990e 100644
--- a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
@@ -1256,10 +1256,15 @@ void rtw_cfg80211_indicate_scan_done(struct rtw_wdev_priv *pwdev_priv,
1256 DBG_8723A("%s with scan req\n", __func__); 1256 DBG_8723A("%s with scan req\n", __func__);
1257 1257
1258 if (pwdev_priv->scan_request->wiphy != 1258 if (pwdev_priv->scan_request->wiphy !=
1259 pwdev_priv->rtw_wdev->wiphy) 1259 pwdev_priv->rtw_wdev->wiphy) {
1260 DBG_8723A("error wiphy compare\n"); 1260 DBG_8723A("error wiphy compare\n");
1261 else 1261 } else {
1262 cfg80211_scan_done(pwdev_priv->scan_request, aborted); 1262 struct cfg80211_scan_info info = {
1263 .aborted = aborted,
1264 };
1265
1266 cfg80211_scan_done(pwdev_priv->scan_request, &info);
1267 }
1263 1268
1264 pwdev_priv->scan_request = NULL; 1269 pwdev_priv->scan_request = NULL;
1265 } else { 1270 } else {
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 51aff4ff7d7c..a0d8e22e575b 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -454,7 +454,11 @@ static void CfgScanResult(enum scan_event scan_event,
454 mutex_lock(&priv->scan_req_lock); 454 mutex_lock(&priv->scan_req_lock);
455 455
456 if (priv->pstrScanReq) { 456 if (priv->pstrScanReq) {
457 cfg80211_scan_done(priv->pstrScanReq, false); 457 struct cfg80211_scan_info info = {
458 .aborted = false,
459 };
460
461 cfg80211_scan_done(priv->pstrScanReq, &info);
458 priv->u32RcvdChCount = 0; 462 priv->u32RcvdChCount = 0;
459 priv->bCfgScanning = false; 463 priv->bCfgScanning = false;
460 priv->pstrScanReq = NULL; 464 priv->pstrScanReq = NULL;
@@ -464,10 +468,14 @@ static void CfgScanResult(enum scan_event scan_event,
464 mutex_lock(&priv->scan_req_lock); 468 mutex_lock(&priv->scan_req_lock);
465 469
466 if (priv->pstrScanReq) { 470 if (priv->pstrScanReq) {
471 struct cfg80211_scan_info info = {
472 .aborted = false,
473 };
474
467 update_scan_time(); 475 update_scan_time();
468 refresh_scan(priv, 1, false); 476 refresh_scan(priv, 1, false);
469 477
470 cfg80211_scan_done(priv->pstrScanReq, false); 478 cfg80211_scan_done(priv->pstrScanReq, &info);
471 priv->bCfgScanning = false; 479 priv->bCfgScanning = false;
472 priv->pstrScanReq = NULL; 480 priv->pstrScanReq = NULL;
473 } 481 }
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index a6e6fb9f42e1..f46dfe6b24e8 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -338,6 +338,8 @@ static int prism2_scan(struct wiphy *wiphy,
338 struct p80211msg_dot11req_scan msg1; 338 struct p80211msg_dot11req_scan msg1;
339 struct p80211msg_dot11req_scan_results msg2; 339 struct p80211msg_dot11req_scan_results msg2;
340 struct cfg80211_bss *bss; 340 struct cfg80211_bss *bss;
341 struct cfg80211_scan_info info = {};
342
341 int result; 343 int result;
342 int err = 0; 344 int err = 0;
343 int numbss = 0; 345 int numbss = 0;
@@ -440,7 +442,8 @@ static int prism2_scan(struct wiphy *wiphy,
440 err = prism2_result2err(msg2.resultcode.data); 442 err = prism2_result2err(msg2.resultcode.data);
441 443
442exit: 444exit:
443 cfg80211_scan_done(request, err ? 1 : 0); 445 info.aborted = !!(err);
446 cfg80211_scan_done(request, &info);
444 priv->scan_request = NULL; 447 priv->scan_request = NULL;
445 return err; 448 return err;
446} 449}
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 6ceac4f2d4b2..5b4b47ed948b 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -857,14 +857,6 @@ __cpufreq_cooling_register(struct device_node *np,
857 goto free_power_table; 857 goto free_power_table;
858 } 858 }
859 859
860 snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
861 cpufreq_dev->id);
862
863 cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
864 &cpufreq_cooling_ops);
865 if (IS_ERR(cool_dev))
866 goto remove_idr;
867
868 /* Fill freq-table in descending order of frequencies */ 860 /* Fill freq-table in descending order of frequencies */
869 for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) { 861 for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
870 freq = find_next_max(table, freq); 862 freq = find_next_max(table, freq);
@@ -877,6 +869,14 @@ __cpufreq_cooling_register(struct device_node *np,
877 pr_debug("%s: freq:%u KHz\n", __func__, freq); 869 pr_debug("%s: freq:%u KHz\n", __func__, freq);
878 } 870 }
879 871
872 snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
873 cpufreq_dev->id);
874
875 cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
876 &cpufreq_cooling_ops);
877 if (IS_ERR(cool_dev))
878 goto remove_idr;
879
880 cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0]; 880 cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
881 cpufreq_dev->cool_dev = cool_dev; 881 cpufreq_dev->cool_dev = cool_dev;
882 882
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index f856c4544eea..51e0d32883ba 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -667,8 +667,11 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
667 fsi = tty->driver_data; 667 fsi = tty->driver_data;
668 else 668 else
669 fsi = tty->link->driver_data; 669 fsi = tty->link->driver_data;
670 devpts_kill_index(fsi, tty->index); 670
671 devpts_release(fsi); 671 if (fsi) {
672 devpts_kill_index(fsi, tty->index);
673 devpts_release(fsi);
674 }
672} 675}
673 676
674static const struct tty_operations ptm_unix98_ops = { 677static const struct tty_operations ptm_unix98_ops = {
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index dc125322f48f..5b0fe97c46ca 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -750,6 +750,7 @@ static void visual_init(struct vc_data *vc, int num, int init)
750 vc->vc_complement_mask = 0; 750 vc->vc_complement_mask = 0;
751 vc->vc_can_do_color = 0; 751 vc->vc_can_do_color = 0;
752 vc->vc_panic_force_write = false; 752 vc->vc_panic_force_write = false;
753 vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS;
753 vc->vc_sw->con_init(vc, init); 754 vc->vc_sw->con_init(vc, init);
754 if (!vc->vc_complement_mask) 755 if (!vc->vc_complement_mask)
755 vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800; 756 vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800;
diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c
index 9059b7dc185e..2f537bbdda09 100644
--- a/drivers/usb/common/usb-otg-fsm.c
+++ b/drivers/usb/common/usb-otg-fsm.c
@@ -21,6 +21,7 @@
21 * 675 Mass Ave, Cambridge, MA 02139, USA. 21 * 675 Mass Ave, Cambridge, MA 02139, USA.
22 */ 22 */
23 23
24#include <linux/module.h>
24#include <linux/kernel.h> 25#include <linux/kernel.h>
25#include <linux/types.h> 26#include <linux/types.h>
26#include <linux/mutex.h> 27#include <linux/mutex.h>
@@ -450,3 +451,4 @@ int otg_statemachine(struct otg_fsm *fsm)
450 return fsm->state_changed; 451 return fsm->state_changed;
451} 452}
452EXPORT_SYMBOL_GPL(otg_statemachine); 453EXPORT_SYMBOL_GPL(otg_statemachine);
454MODULE_LICENSE("GPL");
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 34b837ae1ed7..d2e3f655c26f 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2598,26 +2598,23 @@ EXPORT_SYMBOL_GPL(usb_create_hcd);
2598 * Don't deallocate the bandwidth_mutex until the last shared usb_hcd is 2598 * Don't deallocate the bandwidth_mutex until the last shared usb_hcd is
2599 * deallocated. 2599 * deallocated.
2600 * 2600 *
2601 * Make sure to only deallocate the bandwidth_mutex when the primary HCD is 2601 * Make sure to deallocate the bandwidth_mutex only when the last HCD is
2602 * freed. When hcd_release() is called for either hcd in a peer set 2602 * freed. When hcd_release() is called for either hcd in a peer set,
2603 * invalidate the peer's ->shared_hcd and ->primary_hcd pointers to 2603 * invalidate the peer's ->shared_hcd and ->primary_hcd pointers.
2604 * block new peering attempts
2605 */ 2604 */
2606static void hcd_release(struct kref *kref) 2605static void hcd_release(struct kref *kref)
2607{ 2606{
2608 struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref); 2607 struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref);
2609 2608
2610 mutex_lock(&usb_port_peer_mutex); 2609 mutex_lock(&usb_port_peer_mutex);
2611 if (usb_hcd_is_primary_hcd(hcd)) {
2612 kfree(hcd->address0_mutex);
2613 kfree(hcd->bandwidth_mutex);
2614 }
2615 if (hcd->shared_hcd) { 2610 if (hcd->shared_hcd) {
2616 struct usb_hcd *peer = hcd->shared_hcd; 2611 struct usb_hcd *peer = hcd->shared_hcd;
2617 2612
2618 peer->shared_hcd = NULL; 2613 peer->shared_hcd = NULL;
2619 if (peer->primary_hcd == hcd) 2614 peer->primary_hcd = NULL;
2620 peer->primary_hcd = NULL; 2615 } else {
2616 kfree(hcd->address0_mutex);
2617 kfree(hcd->bandwidth_mutex);
2621 } 2618 }
2622 mutex_unlock(&usb_port_peer_mutex); 2619 mutex_unlock(&usb_port_peer_mutex);
2623 kfree(hcd); 2620 kfree(hcd);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 6dc810bce295..944a6dca0fcb 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -44,6 +44,9 @@ static const struct usb_device_id usb_quirk_list[] = {
44 /* Creative SB Audigy 2 NX */ 44 /* Creative SB Audigy 2 NX */
45 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, 45 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
46 46
47 /* USB3503 */
48 { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
49
47 /* Microsoft Wireless Laser Mouse 6000 Receiver */ 50 /* Microsoft Wireless Laser Mouse 6000 Receiver */
48 { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME }, 51 { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME },
49 52
@@ -173,6 +176,10 @@ static const struct usb_device_id usb_quirk_list[] = {
173 /* MAYA44USB sound device */ 176 /* MAYA44USB sound device */
174 { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME }, 177 { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
175 178
179 /* ASUS Base Station(T100) */
180 { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
181 USB_QUIRK_IGNORE_REMOTE_WAKEUP },
182
176 /* Action Semiconductor flash disk */ 183 /* Action Semiconductor flash disk */
177 { USB_DEVICE(0x10d6, 0x2200), .driver_info = 184 { USB_DEVICE(0x10d6, 0x2200), .driver_info =
178 USB_QUIRK_STRING_FETCH_255 }, 185 USB_QUIRK_STRING_FETCH_255 },
@@ -188,26 +195,22 @@ static const struct usb_device_id usb_quirk_list[] = {
188 { USB_DEVICE(0x1908, 0x1315), .driver_info = 195 { USB_DEVICE(0x1908, 0x1315), .driver_info =
189 USB_QUIRK_HONOR_BNUMINTERFACES }, 196 USB_QUIRK_HONOR_BNUMINTERFACES },
190 197
191 /* INTEL VALUE SSD */
192 { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
193
194 /* USB3503 */
195 { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
196
197 /* ASUS Base Station(T100) */
198 { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
199 USB_QUIRK_IGNORE_REMOTE_WAKEUP },
200
201 /* Protocol and OTG Electrical Test Device */ 198 /* Protocol and OTG Electrical Test Device */
202 { USB_DEVICE(0x1a0a, 0x0200), .driver_info = 199 { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
203 USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, 200 USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
204 201
202 /* Acer C120 LED Projector */
203 { USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
204
205 /* Blackmagic Design Intensity Shuttle */ 205 /* Blackmagic Design Intensity Shuttle */
206 { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM }, 206 { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM },
207 207
208 /* Blackmagic Design UltraStudio SDI */ 208 /* Blackmagic Design UltraStudio SDI */
209 { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM }, 209 { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
210 210
211 /* INTEL VALUE SSD */
212 { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
213
211 { } /* terminating entry must be last */ 214 { } /* terminating entry must be last */
212}; 215};
213 216
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 3c58d633ce80..dec0b21fc626 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -64,6 +64,17 @@
64 DWC2_TRACE_SCHEDULER_VB(pr_fmt("%s: SCH: " fmt), \ 64 DWC2_TRACE_SCHEDULER_VB(pr_fmt("%s: SCH: " fmt), \
65 dev_name(hsotg->dev), ##__VA_ARGS__) 65 dev_name(hsotg->dev), ##__VA_ARGS__)
66 66
67#ifdef CONFIG_MIPS
68/*
69 * There are some MIPS machines that can run in either big-endian
70 * or little-endian mode and that use the dwc2 register without
71 * a byteswap in both ways.
72 * Unlike other architectures, MIPS apparently does not require a
73 * barrier before the __raw_writel() to synchronize with DMA but does
74 * require the barrier after the __raw_writel() to serialize a set of
75 * writes. This set of operations was added specifically for MIPS and
76 * should only be used there.
77 */
67static inline u32 dwc2_readl(const void __iomem *addr) 78static inline u32 dwc2_readl(const void __iomem *addr)
68{ 79{
69 u32 value = __raw_readl(addr); 80 u32 value = __raw_readl(addr);
@@ -90,6 +101,22 @@ static inline void dwc2_writel(u32 value, void __iomem *addr)
90 pr_info("INFO:: wrote %08x to %p\n", value, addr); 101 pr_info("INFO:: wrote %08x to %p\n", value, addr);
91#endif 102#endif
92} 103}
104#else
105/* Normal architectures just use readl/write */
106static inline u32 dwc2_readl(const void __iomem *addr)
107{
108 return readl(addr);
109}
110
111static inline void dwc2_writel(u32 value, void __iomem *addr)
112{
113 writel(value, addr);
114
115#ifdef DWC2_LOG_WRITES
116 pr_info("info:: wrote %08x to %p\n", value, addr);
117#endif
118}
119#endif
93 120
94/* Maximum number of Endpoints/HostChannels */ 121/* Maximum number of Endpoints/HostChannels */
95#define MAX_EPS_CHANNELS 16 122#define MAX_EPS_CHANNELS 16
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 4c5e3005e1dc..26cf09d0fe3c 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -1018,7 +1018,7 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
1018 return 1; 1018 return 1;
1019} 1019}
1020 1020
1021static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value); 1021static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now);
1022 1022
1023/** 1023/**
1024 * get_ep_head - return the first request on the endpoint 1024 * get_ep_head - return the first request on the endpoint
@@ -1094,7 +1094,7 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
1094 case USB_ENDPOINT_HALT: 1094 case USB_ENDPOINT_HALT:
1095 halted = ep->halted; 1095 halted = ep->halted;
1096 1096
1097 dwc2_hsotg_ep_sethalt(&ep->ep, set); 1097 dwc2_hsotg_ep_sethalt(&ep->ep, set, true);
1098 1098
1099 ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0); 1099 ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
1100 if (ret) { 1100 if (ret) {
@@ -2948,8 +2948,13 @@ static int dwc2_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
2948 * dwc2_hsotg_ep_sethalt - set halt on a given endpoint 2948 * dwc2_hsotg_ep_sethalt - set halt on a given endpoint
2949 * @ep: The endpoint to set halt. 2949 * @ep: The endpoint to set halt.
2950 * @value: Set or unset the halt. 2950 * @value: Set or unset the halt.
2951 * @now: If true, stall the endpoint now. Otherwise return -EAGAIN if
2952 * the endpoint is busy processing requests.
2953 *
2954 * We need to stall the endpoint immediately if request comes from set_feature
2955 * protocol command handler.
2951 */ 2956 */
2952static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value) 2957static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
2953{ 2958{
2954 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 2959 struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
2955 struct dwc2_hsotg *hs = hs_ep->parent; 2960 struct dwc2_hsotg *hs = hs_ep->parent;
@@ -2969,6 +2974,17 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value)
2969 return 0; 2974 return 0;
2970 } 2975 }
2971 2976
2977 if (hs_ep->isochronous) {
2978 dev_err(hs->dev, "%s is Isochronous Endpoint\n", ep->name);
2979 return -EINVAL;
2980 }
2981
2982 if (!now && value && !list_empty(&hs_ep->queue)) {
2983 dev_dbg(hs->dev, "%s request is pending, cannot halt\n",
2984 ep->name);
2985 return -EAGAIN;
2986 }
2987
2972 if (hs_ep->dir_in) { 2988 if (hs_ep->dir_in) {
2973 epreg = DIEPCTL(index); 2989 epreg = DIEPCTL(index);
2974 epctl = dwc2_readl(hs->regs + epreg); 2990 epctl = dwc2_readl(hs->regs + epreg);
@@ -3020,7 +3036,7 @@ static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
3020 int ret = 0; 3036 int ret = 0;
3021 3037
3022 spin_lock_irqsave(&hs->lock, flags); 3038 spin_lock_irqsave(&hs->lock, flags);
3023 ret = dwc2_hsotg_ep_sethalt(ep, value); 3039 ret = dwc2_hsotg_ep_sethalt(ep, value, false);
3024 spin_unlock_irqrestore(&hs->lock, flags); 3040 spin_unlock_irqrestore(&hs->lock, flags);
3025 3041
3026 return ret; 3042 return ret;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 7ddf9449a063..654050684f4f 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -402,6 +402,7 @@
402#define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f) 402#define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
403#define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F) 403#define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F)
404#define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11) 404#define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11)
405#define DWC3_DEPCMD_CLEARPENDIN (1 << 11)
405#define DWC3_DEPCMD_CMDACT (1 << 10) 406#define DWC3_DEPCMD_CMDACT (1 << 10)
406#define DWC3_DEPCMD_CMDIOC (1 << 8) 407#define DWC3_DEPCMD_CMDIOC (1 << 8)
407 408
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index dd5cb5577dca..2f1fb7e7aa54 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -128,12 +128,6 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
128 128
129 platform_set_drvdata(pdev, exynos); 129 platform_set_drvdata(pdev, exynos);
130 130
131 ret = dwc3_exynos_register_phys(exynos);
132 if (ret) {
133 dev_err(dev, "couldn't register PHYs\n");
134 return ret;
135 }
136
137 exynos->dev = dev; 131 exynos->dev = dev;
138 132
139 exynos->clk = devm_clk_get(dev, "usbdrd30"); 133 exynos->clk = devm_clk_get(dev, "usbdrd30");
@@ -183,20 +177,29 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
183 goto err3; 177 goto err3;
184 } 178 }
185 179
180 ret = dwc3_exynos_register_phys(exynos);
181 if (ret) {
182 dev_err(dev, "couldn't register PHYs\n");
183 goto err4;
184 }
185
186 if (node) { 186 if (node) {
187 ret = of_platform_populate(node, NULL, NULL, dev); 187 ret = of_platform_populate(node, NULL, NULL, dev);
188 if (ret) { 188 if (ret) {
189 dev_err(dev, "failed to add dwc3 core\n"); 189 dev_err(dev, "failed to add dwc3 core\n");
190 goto err4; 190 goto err5;
191 } 191 }
192 } else { 192 } else {
193 dev_err(dev, "no device node, failed to add dwc3 core\n"); 193 dev_err(dev, "no device node, failed to add dwc3 core\n");
194 ret = -ENODEV; 194 ret = -ENODEV;
195 goto err4; 195 goto err5;
196 } 196 }
197 197
198 return 0; 198 return 0;
199 199
200err5:
201 platform_device_unregister(exynos->usb2_phy);
202 platform_device_unregister(exynos->usb3_phy);
200err4: 203err4:
201 regulator_disable(exynos->vdd10); 204 regulator_disable(exynos->vdd10);
202err3: 205err3:
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index 5c0adb9c6fb2..89a2f712fdfe 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -129,12 +129,18 @@ static int st_dwc3_drd_init(struct st_dwc3 *dwc3_data)
129 switch (dwc3_data->dr_mode) { 129 switch (dwc3_data->dr_mode) {
130 case USB_DR_MODE_PERIPHERAL: 130 case USB_DR_MODE_PERIPHERAL:
131 131
132 val &= ~(USB3_FORCE_VBUSVALID | USB3_DELAY_VBUSVALID 132 val &= ~(USB3_DELAY_VBUSVALID
133 | USB3_SEL_FORCE_OPMODE | USB3_FORCE_OPMODE(0x3) 133 | USB3_SEL_FORCE_OPMODE | USB3_FORCE_OPMODE(0x3)
134 | USB3_SEL_FORCE_DPPULLDOWN2 | USB3_FORCE_DPPULLDOWN2 134 | USB3_SEL_FORCE_DPPULLDOWN2 | USB3_FORCE_DPPULLDOWN2
135 | USB3_SEL_FORCE_DMPULLDOWN2 | USB3_FORCE_DMPULLDOWN2); 135 | USB3_SEL_FORCE_DMPULLDOWN2 | USB3_FORCE_DMPULLDOWN2);
136 136
137 val |= USB3_DEVICE_NOT_HOST; 137 /*
138 * USB3_PORT2_FORCE_VBUSVALID When '1' and when
139 * USB3_PORT2_DEVICE_NOT_HOST = 1, forces VBUSVLDEXT2 input
140 * of the pico PHY to 1.
141 */
142
143 val |= USB3_DEVICE_NOT_HOST | USB3_FORCE_VBUSVALID;
138 break; 144 break;
139 145
140 case USB_DR_MODE_HOST: 146 case USB_DR_MODE_HOST:
@@ -227,7 +233,8 @@ static int st_dwc3_probe(struct platform_device *pdev)
227 dev_vdbg(&pdev->dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n", 233 dev_vdbg(&pdev->dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n",
228 dwc3_data->glue_base, dwc3_data->syscfg_reg_off); 234 dwc3_data->glue_base, dwc3_data->syscfg_reg_off);
229 235
230 dwc3_data->rstc_pwrdn = devm_reset_control_get(dev, "powerdown"); 236 dwc3_data->rstc_pwrdn =
237 devm_reset_control_get_exclusive(dev, "powerdown");
231 if (IS_ERR(dwc3_data->rstc_pwrdn)) { 238 if (IS_ERR(dwc3_data->rstc_pwrdn)) {
232 dev_err(&pdev->dev, "could not get power controller\n"); 239 dev_err(&pdev->dev, "could not get power controller\n");
233 ret = PTR_ERR(dwc3_data->rstc_pwrdn); 240 ret = PTR_ERR(dwc3_data->rstc_pwrdn);
@@ -237,7 +244,8 @@ static int st_dwc3_probe(struct platform_device *pdev)
237 /* Manage PowerDown */ 244 /* Manage PowerDown */
238 reset_control_deassert(dwc3_data->rstc_pwrdn); 245 reset_control_deassert(dwc3_data->rstc_pwrdn);
239 246
240 dwc3_data->rstc_rst = devm_reset_control_get(dev, "softreset"); 247 dwc3_data->rstc_rst =
248 devm_reset_control_get_shared(dev, "softreset");
241 if (IS_ERR(dwc3_data->rstc_rst)) { 249 if (IS_ERR(dwc3_data->rstc_rst)) {
242 dev_err(&pdev->dev, "could not get reset controller\n"); 250 dev_err(&pdev->dev, "could not get reset controller\n");
243 ret = PTR_ERR(dwc3_data->rstc_rst); 251 ret = PTR_ERR(dwc3_data->rstc_rst);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 9a7d0bd15dc3..07248ff1be5c 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -347,6 +347,28 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
347 return ret; 347 return ret;
348} 348}
349 349
350static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
351{
352 struct dwc3 *dwc = dep->dwc;
353 struct dwc3_gadget_ep_cmd_params params;
354 u32 cmd = DWC3_DEPCMD_CLEARSTALL;
355
356 /*
357 * As of core revision 2.60a the recommended programming model
358 * is to set the ClearPendIN bit when issuing a Clear Stall EP
359 * command for IN endpoints. This is to prevent an issue where
360 * some (non-compliant) hosts may not send ACK TPs for pending
361 * IN transfers due to a mishandled error condition. Synopsys
362 * STAR 9000614252.
363 */
364 if (dep->direction && (dwc->revision >= DWC3_REVISION_260A))
365 cmd |= DWC3_DEPCMD_CLEARPENDIN;
366
367 memset(&params, 0, sizeof(params));
368
369 return dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
370}
371
350static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, 372static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
351 struct dwc3_trb *trb) 373 struct dwc3_trb *trb)
352{ 374{
@@ -1314,8 +1336,7 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1314 else 1336 else
1315 dep->flags |= DWC3_EP_STALL; 1337 dep->flags |= DWC3_EP_STALL;
1316 } else { 1338 } else {
1317 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1339 ret = dwc3_send_clear_stall_ep_cmd(dep);
1318 DWC3_DEPCMD_CLEARSTALL, &params);
1319 if (ret) 1340 if (ret)
1320 dev_err(dwc->dev, "failed to clear STALL on %s\n", 1341 dev_err(dwc->dev, "failed to clear STALL on %s\n",
1321 dep->name); 1342 dep->name);
@@ -2247,7 +2268,6 @@ static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2247 2268
2248 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { 2269 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2249 struct dwc3_ep *dep; 2270 struct dwc3_ep *dep;
2250 struct dwc3_gadget_ep_cmd_params params;
2251 int ret; 2271 int ret;
2252 2272
2253 dep = dwc->eps[epnum]; 2273 dep = dwc->eps[epnum];
@@ -2259,9 +2279,7 @@ static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2259 2279
2260 dep->flags &= ~DWC3_EP_STALL; 2280 dep->flags &= ~DWC3_EP_STALL;
2261 2281
2262 memset(&params, 0, sizeof(params)); 2282 ret = dwc3_send_clear_stall_ep_cmd(dep);
2263 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
2264 DWC3_DEPCMD_CLEARSTALL, &params);
2265 WARN_ON_ONCE(ret); 2283 WARN_ON_ONCE(ret);
2266 } 2284 }
2267} 2285}
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index d67de0d22a2b..eb648485a58c 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1868,14 +1868,19 @@ unknown:
1868 } 1868 }
1869 break; 1869 break;
1870 } 1870 }
1871 req->length = value; 1871
1872 req->context = cdev; 1872 if (value >= 0) {
1873 req->zero = value < w_length; 1873 req->length = value;
1874 value = composite_ep0_queue(cdev, req, GFP_ATOMIC); 1874 req->context = cdev;
1875 if (value < 0) { 1875 req->zero = value < w_length;
1876 DBG(cdev, "ep_queue --> %d\n", value); 1876 value = composite_ep0_queue(cdev, req,
1877 req->status = 0; 1877 GFP_ATOMIC);
1878 composite_setup_complete(gadget->ep0, req); 1878 if (value < 0) {
1879 DBG(cdev, "ep_queue --> %d\n", value);
1880 req->status = 0;
1881 composite_setup_complete(gadget->ep0,
1882 req);
1883 }
1879 } 1884 }
1880 return value; 1885 return value;
1881 } 1886 }
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index b6f60ca8a035..70cf3477f951 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1401,6 +1401,7 @@ static const struct usb_gadget_driver configfs_driver_template = {
1401 .owner = THIS_MODULE, 1401 .owner = THIS_MODULE,
1402 .name = "configfs-gadget", 1402 .name = "configfs-gadget",
1403 }, 1403 },
1404 .match_existing_only = 1,
1404}; 1405};
1405 1406
1406static struct config_group *gadgets_make( 1407static struct config_group *gadgets_make(
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 73515d54e1cc..cc33d2667408 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -2051,7 +2051,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
2051 2051
2052 if (len < sizeof(*d) || 2052 if (len < sizeof(*d) ||
2053 d->bFirstInterfaceNumber >= ffs->interfaces_count || 2053 d->bFirstInterfaceNumber >= ffs->interfaces_count ||
2054 d->Reserved1) 2054 !d->Reserved1)
2055 return -EINVAL; 2055 return -EINVAL;
2056 for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i) 2056 for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
2057 if (d->Reserved2[i]) 2057 if (d->Reserved2[i])
@@ -2729,6 +2729,7 @@ static int _ffs_func_bind(struct usb_configuration *c,
2729 func->ffs->ss_descs_count; 2729 func->ffs->ss_descs_count;
2730 2730
2731 int fs_len, hs_len, ss_len, ret, i; 2731 int fs_len, hs_len, ss_len, ret, i;
2732 struct ffs_ep *eps_ptr;
2732 2733
2733 /* Make it a single chunk, less management later on */ 2734 /* Make it a single chunk, less management later on */
2734 vla_group(d); 2735 vla_group(d);
@@ -2777,12 +2778,9 @@ static int _ffs_func_bind(struct usb_configuration *c,
2777 ffs->raw_descs_length); 2778 ffs->raw_descs_length);
2778 2779
2779 memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz); 2780 memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
2780 for (ret = ffs->eps_count; ret; --ret) { 2781 eps_ptr = vla_ptr(vlabuf, d, eps);
2781 struct ffs_ep *ptr; 2782 for (i = 0; i < ffs->eps_count; i++)
2782 2783 eps_ptr[i].num = -1;
2783 ptr = vla_ptr(vlabuf, d, eps);
2784 ptr[ret].num = -1;
2785 }
2786 2784
2787 /* Save pointers 2785 /* Save pointers
2788 * d_eps == vlabuf, func->eps used to kfree vlabuf later 2786 * d_eps == vlabuf, func->eps used to kfree vlabuf later
@@ -2851,7 +2849,7 @@ static int _ffs_func_bind(struct usb_configuration *c,
2851 goto error; 2849 goto error;
2852 2850
2853 func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table); 2851 func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table);
2854 if (c->cdev->use_os_string) 2852 if (c->cdev->use_os_string) {
2855 for (i = 0; i < ffs->interfaces_count; ++i) { 2853 for (i = 0; i < ffs->interfaces_count; ++i) {
2856 struct usb_os_desc *desc; 2854 struct usb_os_desc *desc;
2857 2855
@@ -2862,13 +2860,15 @@ static int _ffs_func_bind(struct usb_configuration *c,
2862 vla_ptr(vlabuf, d, ext_compat) + i * 16; 2860 vla_ptr(vlabuf, d, ext_compat) + i * 16;
2863 INIT_LIST_HEAD(&desc->ext_prop); 2861 INIT_LIST_HEAD(&desc->ext_prop);
2864 } 2862 }
2865 ret = ffs_do_os_descs(ffs->ms_os_descs_count, 2863 ret = ffs_do_os_descs(ffs->ms_os_descs_count,
2866 vla_ptr(vlabuf, d, raw_descs) + 2864 vla_ptr(vlabuf, d, raw_descs) +
2867 fs_len + hs_len + ss_len, 2865 fs_len + hs_len + ss_len,
2868 d_raw_descs__sz - fs_len - hs_len - ss_len, 2866 d_raw_descs__sz - fs_len - hs_len -
2869 __ffs_func_bind_do_os_desc, func); 2867 ss_len,
2870 if (unlikely(ret < 0)) 2868 __ffs_func_bind_do_os_desc, func);
2871 goto error; 2869 if (unlikely(ret < 0))
2870 goto error;
2871 }
2872 func->function.os_desc_n = 2872 func->function.os_desc_n =
2873 c->cdev->use_os_string ? ffs->interfaces_count : 0; 2873 c->cdev->use_os_string ? ffs->interfaces_count : 0;
2874 2874
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index c45104e3a64b..64706a789580 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -161,14 +161,6 @@ static struct usb_endpoint_descriptor hs_ep_out_desc = {
161 .wMaxPacketSize = cpu_to_le16(512) 161 .wMaxPacketSize = cpu_to_le16(512)
162}; 162};
163 163
164static struct usb_qualifier_descriptor dev_qualifier = {
165 .bLength = sizeof(dev_qualifier),
166 .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
167 .bcdUSB = cpu_to_le16(0x0200),
168 .bDeviceClass = USB_CLASS_PRINTER,
169 .bNumConfigurations = 1
170};
171
172static struct usb_descriptor_header *hs_printer_function[] = { 164static struct usb_descriptor_header *hs_printer_function[] = {
173 (struct usb_descriptor_header *) &intf_desc, 165 (struct usb_descriptor_header *) &intf_desc,
174 (struct usb_descriptor_header *) &hs_ep_in_desc, 166 (struct usb_descriptor_header *) &hs_ep_in_desc,
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 35fe3c80cfc0..197f73386fac 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -1445,16 +1445,18 @@ static void usbg_drop_tpg(struct se_portal_group *se_tpg)
1445 for (i = 0; i < TPG_INSTANCES; ++i) 1445 for (i = 0; i < TPG_INSTANCES; ++i)
1446 if (tpg_instances[i].tpg == tpg) 1446 if (tpg_instances[i].tpg == tpg)
1447 break; 1447 break;
1448 if (i < TPG_INSTANCES) 1448 if (i < TPG_INSTANCES) {
1449 tpg_instances[i].tpg = NULL; 1449 tpg_instances[i].tpg = NULL;
1450 opts = container_of(tpg_instances[i].func_inst, 1450 opts = container_of(tpg_instances[i].func_inst,
1451 struct f_tcm_opts, func_inst); 1451 struct f_tcm_opts, func_inst);
1452 mutex_lock(&opts->dep_lock); 1452 mutex_lock(&opts->dep_lock);
1453 if (opts->has_dep) 1453 if (opts->has_dep)
1454 module_put(opts->dependent); 1454 module_put(opts->dependent);
1455 else 1455 else
1456 configfs_undepend_item_unlocked(&opts->func_inst.group.cg_item); 1456 configfs_undepend_item_unlocked(
1457 mutex_unlock(&opts->dep_lock); 1457 &opts->func_inst.group.cg_item);
1458 mutex_unlock(&opts->dep_lock);
1459 }
1458 mutex_unlock(&tpg_instances_lock); 1460 mutex_unlock(&tpg_instances_lock);
1459 1461
1460 kfree(tpg); 1462 kfree(tpg);
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 186d4b162524..cd214ec8a601 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -598,18 +598,6 @@ static struct usb_gadget_strings *fn_strings[] = {
598 NULL, 598 NULL,
599}; 599};
600 600
601static struct usb_qualifier_descriptor devqual_desc = {
602 .bLength = sizeof devqual_desc,
603 .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
604
605 .bcdUSB = cpu_to_le16(0x200),
606 .bDeviceClass = USB_CLASS_MISC,
607 .bDeviceSubClass = 0x02,
608 .bDeviceProtocol = 0x01,
609 .bNumConfigurations = 1,
610 .bRESERVED = 0,
611};
612
613static struct usb_interface_assoc_descriptor iad_desc = { 601static struct usb_interface_assoc_descriptor iad_desc = {
614 .bLength = sizeof iad_desc, 602 .bLength = sizeof iad_desc,
615 .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, 603 .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
@@ -1292,6 +1280,7 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
1292 1280
1293 if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) { 1281 if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
1294 struct cntrl_cur_lay3 c; 1282 struct cntrl_cur_lay3 c;
1283 memset(&c, 0, sizeof(struct cntrl_cur_lay3));
1295 1284
1296 if (entity_id == USB_IN_CLK_ID) 1285 if (entity_id == USB_IN_CLK_ID)
1297 c.dCUR = p_srate; 1286 c.dCUR = p_srate;
diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c
index d62683017cf3..990df221c629 100644
--- a/drivers/usb/gadget/function/storage_common.c
+++ b/drivers/usb/gadget/function/storage_common.c
@@ -83,9 +83,7 @@ EXPORT_SYMBOL_GPL(fsg_fs_function);
83 * USB 2.0 devices need to expose both high speed and full speed 83 * USB 2.0 devices need to expose both high speed and full speed
84 * descriptors, unless they only run at full speed. 84 * descriptors, unless they only run at full speed.
85 * 85 *
86 * That means alternate endpoint descriptors (bigger packets) 86 * That means alternate endpoint descriptors (bigger packets).
87 * and a "device qualifier" ... plus more construction options
88 * for the configuration descriptor.
89 */ 87 */
90struct usb_endpoint_descriptor fsg_hs_bulk_in_desc = { 88struct usb_endpoint_descriptor fsg_hs_bulk_in_desc = {
91 .bLength = USB_DT_ENDPOINT_SIZE, 89 .bLength = USB_DT_ENDPOINT_SIZE,
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index e64479f882a5..aa3707bdebb4 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -938,8 +938,11 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
938 struct usb_ep *ep = dev->gadget->ep0; 938 struct usb_ep *ep = dev->gadget->ep0;
939 struct usb_request *req = dev->req; 939 struct usb_request *req = dev->req;
940 940
941 if ((retval = setup_req (ep, req, 0)) == 0) 941 if ((retval = setup_req (ep, req, 0)) == 0) {
942 retval = usb_ep_queue (ep, req, GFP_ATOMIC); 942 spin_unlock_irq (&dev->lock);
943 retval = usb_ep_queue (ep, req, GFP_KERNEL);
944 spin_lock_irq (&dev->lock);
945 }
943 dev->state = STATE_DEV_CONNECTED; 946 dev->state = STATE_DEV_CONNECTED;
944 947
945 /* assume that was SET_CONFIGURATION */ 948 /* assume that was SET_CONFIGURATION */
@@ -1457,8 +1460,11 @@ delegate:
1457 w_length); 1460 w_length);
1458 if (value < 0) 1461 if (value < 0)
1459 break; 1462 break;
1463
1464 spin_unlock (&dev->lock);
1460 value = usb_ep_queue (gadget->ep0, dev->req, 1465 value = usb_ep_queue (gadget->ep0, dev->req,
1461 GFP_ATOMIC); 1466 GFP_KERNEL);
1467 spin_lock (&dev->lock);
1462 if (value < 0) { 1468 if (value < 0) {
1463 clean_req (gadget->ep0, dev->req); 1469 clean_req (gadget->ep0, dev->req);
1464 break; 1470 break;
@@ -1481,11 +1487,14 @@ delegate:
1481 if (value >= 0 && dev->state != STATE_DEV_SETUP) { 1487 if (value >= 0 && dev->state != STATE_DEV_SETUP) {
1482 req->length = value; 1488 req->length = value;
1483 req->zero = value < w_length; 1489 req->zero = value < w_length;
1484 value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC); 1490
1491 spin_unlock (&dev->lock);
1492 value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
1485 if (value < 0) { 1493 if (value < 0) {
1486 DBG (dev, "ep_queue --> %d\n", value); 1494 DBG (dev, "ep_queue --> %d\n", value);
1487 req->status = 0; 1495 req->status = 0;
1488 } 1496 }
1497 return value;
1489 } 1498 }
1490 1499
1491 /* device stalls when value < 0 */ 1500 /* device stalls when value < 0 */
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index 6e8300d6a737..e1b2dcebdc2e 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -603,11 +603,15 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
603 } 603 }
604 } 604 }
605 605
606 list_add_tail(&driver->pending, &gadget_driver_pending_list); 606 if (!driver->match_existing_only) {
607 pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n", 607 list_add_tail(&driver->pending, &gadget_driver_pending_list);
608 driver->function); 608 pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n",
609 driver->function);
610 ret = 0;
611 }
612
609 mutex_unlock(&udc_lock); 613 mutex_unlock(&udc_lock);
610 return 0; 614 return ret;
611found: 615found:
612 ret = udc_bind_to_driver(udc, driver); 616 ret = udc_bind_to_driver(udc, driver);
613 mutex_unlock(&udc_lock); 617 mutex_unlock(&udc_lock);
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index ae1b6e69eb96..a962b89b65a6 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -368,6 +368,15 @@ static void ehci_shutdown(struct usb_hcd *hcd)
368{ 368{
369 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 369 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
370 370
371 /**
372 * Protect the system from crashing at system shutdown in cases where
373 * usb host is not added yet from OTG controller driver.
374 * As ehci_setup() not done yet, so stop accessing registers or
375 * variables initialized in ehci_setup()
376 */
377 if (!ehci->sbrn)
378 return;
379
371 spin_lock_irq(&ehci->lock); 380 spin_lock_irq(&ehci->lock);
372 ehci->shutdown = true; 381 ehci->shutdown = true;
373 ehci->rh_state = EHCI_RH_STOPPING; 382 ehci->rh_state = EHCI_RH_STOPPING;
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index ffc90295a95f..74f62d68f013 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -872,15 +872,23 @@ int ehci_hub_control(
872) { 872) {
873 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 873 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
874 int ports = HCS_N_PORTS (ehci->hcs_params); 874 int ports = HCS_N_PORTS (ehci->hcs_params);
875 u32 __iomem *status_reg = &ehci->regs->port_status[ 875 u32 __iomem *status_reg, *hostpc_reg;
876 (wIndex & 0xff) - 1];
877 u32 __iomem *hostpc_reg = &ehci->regs->hostpc[(wIndex & 0xff) - 1];
878 u32 temp, temp1, status; 876 u32 temp, temp1, status;
879 unsigned long flags; 877 unsigned long flags;
880 int retval = 0; 878 int retval = 0;
881 unsigned selector; 879 unsigned selector;
882 880
883 /* 881 /*
882 * Avoid underflow while calculating (wIndex & 0xff) - 1.
883 * The compiler might deduce that wIndex can never be 0 and then
884 * optimize away the tests for !wIndex below.
885 */
886 temp = wIndex & 0xff;
887 temp -= (temp > 0);
888 status_reg = &ehci->regs->port_status[temp];
889 hostpc_reg = &ehci->regs->hostpc[temp];
890
891 /*
884 * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR. 892 * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
885 * HCS_INDICATOR may say we can change LEDs to off/amber/green. 893 * HCS_INDICATOR may say we can change LEDs to off/amber/green.
886 * (track current state ourselves) ... blink for diagnostics, 894 * (track current state ourselves) ... blink for diagnostics,
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c
index d3afc89d00f5..2f8d3af811ce 100644
--- a/drivers/usb/host/ehci-msm.c
+++ b/drivers/usb/host/ehci-msm.c
@@ -179,22 +179,32 @@ static int ehci_msm_remove(struct platform_device *pdev)
179static int ehci_msm_pm_suspend(struct device *dev) 179static int ehci_msm_pm_suspend(struct device *dev)
180{ 180{
181 struct usb_hcd *hcd = dev_get_drvdata(dev); 181 struct usb_hcd *hcd = dev_get_drvdata(dev);
182 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
182 bool do_wakeup = device_may_wakeup(dev); 183 bool do_wakeup = device_may_wakeup(dev);
183 184
184 dev_dbg(dev, "ehci-msm PM suspend\n"); 185 dev_dbg(dev, "ehci-msm PM suspend\n");
185 186
186 return ehci_suspend(hcd, do_wakeup); 187 /* Only call ehci_suspend if ehci_setup has been done */
188 if (ehci->sbrn)
189 return ehci_suspend(hcd, do_wakeup);
190
191 return 0;
187} 192}
188 193
189static int ehci_msm_pm_resume(struct device *dev) 194static int ehci_msm_pm_resume(struct device *dev)
190{ 195{
191 struct usb_hcd *hcd = dev_get_drvdata(dev); 196 struct usb_hcd *hcd = dev_get_drvdata(dev);
197 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
192 198
193 dev_dbg(dev, "ehci-msm PM resume\n"); 199 dev_dbg(dev, "ehci-msm PM resume\n");
194 ehci_resume(hcd, false); 200
201 /* Only call ehci_resume if ehci_setup has been done */
202 if (ehci->sbrn)
203 ehci_resume(hcd, false);
195 204
196 return 0; 205 return 0;
197} 206}
207
198#else 208#else
199#define ehci_msm_pm_suspend NULL 209#define ehci_msm_pm_suspend NULL
200#define ehci_msm_pm_resume NULL 210#define ehci_msm_pm_resume NULL
diff --git a/drivers/usb/host/ehci-st.c b/drivers/usb/host/ehci-st.c
index a94ed677d937..be4a2788fc58 100644
--- a/drivers/usb/host/ehci-st.c
+++ b/drivers/usb/host/ehci-st.c
@@ -206,7 +206,8 @@ static int st_ehci_platform_probe(struct platform_device *dev)
206 priv->clk48 = NULL; 206 priv->clk48 = NULL;
207 } 207 }
208 208
209 priv->pwr = devm_reset_control_get_optional(&dev->dev, "power"); 209 priv->pwr =
210 devm_reset_control_get_optional_shared(&dev->dev, "power");
210 if (IS_ERR(priv->pwr)) { 211 if (IS_ERR(priv->pwr)) {
211 err = PTR_ERR(priv->pwr); 212 err = PTR_ERR(priv->pwr);
212 if (err == -EPROBE_DEFER) 213 if (err == -EPROBE_DEFER)
@@ -214,7 +215,8 @@ static int st_ehci_platform_probe(struct platform_device *dev)
214 priv->pwr = NULL; 215 priv->pwr = NULL;
215 } 216 }
216 217
217 priv->rst = devm_reset_control_get_optional(&dev->dev, "softreset"); 218 priv->rst =
219 devm_reset_control_get_optional_shared(&dev->dev, "softreset");
218 if (IS_ERR(priv->rst)) { 220 if (IS_ERR(priv->rst)) {
219 err = PTR_ERR(priv->rst); 221 err = PTR_ERR(priv->rst);
220 if (err == -EPROBE_DEFER) 222 if (err == -EPROBE_DEFER)
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 4031b372008e..9a3d7db5be57 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -81,15 +81,23 @@ static int tegra_reset_usb_controller(struct platform_device *pdev)
81 struct usb_hcd *hcd = platform_get_drvdata(pdev); 81 struct usb_hcd *hcd = platform_get_drvdata(pdev);
82 struct tegra_ehci_hcd *tegra = 82 struct tegra_ehci_hcd *tegra =
83 (struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv; 83 (struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv;
84 bool has_utmi_pad_registers = false;
84 85
85 phy_np = of_parse_phandle(pdev->dev.of_node, "nvidia,phy", 0); 86 phy_np = of_parse_phandle(pdev->dev.of_node, "nvidia,phy", 0);
86 if (!phy_np) 87 if (!phy_np)
87 return -ENOENT; 88 return -ENOENT;
88 89
90 if (of_property_read_bool(phy_np, "nvidia,has-utmi-pad-registers"))
91 has_utmi_pad_registers = true;
92
89 if (!usb1_reset_attempted) { 93 if (!usb1_reset_attempted) {
90 struct reset_control *usb1_reset; 94 struct reset_control *usb1_reset;
91 95
92 usb1_reset = of_reset_control_get(phy_np, "usb"); 96 if (!has_utmi_pad_registers)
97 usb1_reset = of_reset_control_get(phy_np, "utmi-pads");
98 else
99 usb1_reset = tegra->rst;
100
93 if (IS_ERR(usb1_reset)) { 101 if (IS_ERR(usb1_reset)) {
94 dev_warn(&pdev->dev, 102 dev_warn(&pdev->dev,
95 "can't get utmi-pads reset from the PHY\n"); 103 "can't get utmi-pads reset from the PHY\n");
@@ -99,13 +107,15 @@ static int tegra_reset_usb_controller(struct platform_device *pdev)
99 reset_control_assert(usb1_reset); 107 reset_control_assert(usb1_reset);
100 udelay(1); 108 udelay(1);
101 reset_control_deassert(usb1_reset); 109 reset_control_deassert(usb1_reset);
110
111 if (!has_utmi_pad_registers)
112 reset_control_put(usb1_reset);
102 } 113 }
103 114
104 reset_control_put(usb1_reset);
105 usb1_reset_attempted = true; 115 usb1_reset_attempted = true;
106 } 116 }
107 117
108 if (!of_property_read_bool(phy_np, "nvidia,has-utmi-pad-registers")) { 118 if (!has_utmi_pad_registers) {
109 reset_control_assert(tegra->rst); 119 reset_control_assert(tegra->rst);
110 udelay(1); 120 udelay(1);
111 reset_control_deassert(tegra->rst); 121 reset_control_deassert(tegra->rst);
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index d029bbe9eb36..641fed609911 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -183,7 +183,6 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
183{ 183{
184 int branch; 184 int branch;
185 185
186 ed->state = ED_OPER;
187 ed->ed_prev = NULL; 186 ed->ed_prev = NULL;
188 ed->ed_next = NULL; 187 ed->ed_next = NULL;
189 ed->hwNextED = 0; 188 ed->hwNextED = 0;
@@ -259,6 +258,8 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
259 /* the HC may not see the schedule updates yet, but if it does 258 /* the HC may not see the schedule updates yet, but if it does
260 * then they'll be properly ordered. 259 * then they'll be properly ordered.
261 */ 260 */
261
262 ed->state = ED_OPER;
262 return 0; 263 return 0;
263} 264}
264 265
diff --git a/drivers/usb/host/ohci-st.c b/drivers/usb/host/ohci-st.c
index acf2eb2a5676..02816a1515a1 100644
--- a/drivers/usb/host/ohci-st.c
+++ b/drivers/usb/host/ohci-st.c
@@ -188,13 +188,15 @@ static int st_ohci_platform_probe(struct platform_device *dev)
188 priv->clk48 = NULL; 188 priv->clk48 = NULL;
189 } 189 }
190 190
191 priv->pwr = devm_reset_control_get_optional(&dev->dev, "power"); 191 priv->pwr =
192 devm_reset_control_get_optional_shared(&dev->dev, "power");
192 if (IS_ERR(priv->pwr)) { 193 if (IS_ERR(priv->pwr)) {
193 err = PTR_ERR(priv->pwr); 194 err = PTR_ERR(priv->pwr);
194 goto err_put_clks; 195 goto err_put_clks;
195 } 196 }
196 197
197 priv->rst = devm_reset_control_get_optional(&dev->dev, "softreset"); 198 priv->rst =
199 devm_reset_control_get_optional_shared(&dev->dev, "softreset");
198 if (IS_ERR(priv->rst)) { 200 if (IS_ERR(priv->rst)) {
199 err = PTR_ERR(priv->rst); 201 err = PTR_ERR(priv->rst);
200 goto err_put_clks; 202 goto err_put_clks;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 48672fac7ff3..c10972fcc8e4 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -37,6 +37,7 @@
37/* Device for a quirk */ 37/* Device for a quirk */
38#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 38#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
39#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 39#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
40#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009 0x1009
40#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400 41#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400
41 42
42#define PCI_VENDOR_ID_ETRON 0x1b6f 43#define PCI_VENDOR_ID_ETRON 0x1b6f
@@ -114,6 +115,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
114 xhci->quirks |= XHCI_TRUST_TX_LENGTH; 115 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
115 } 116 }
116 117
118 if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
119 pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009)
120 xhci->quirks |= XHCI_BROKEN_STREAMS;
121
117 if (pdev->vendor == PCI_VENDOR_ID_NEC) 122 if (pdev->vendor == PCI_VENDOR_ID_NEC)
118 xhci->quirks |= XHCI_NEC_HOST; 123 xhci->quirks |= XHCI_NEC_HOST;
119 124
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 676ea458148b..1f3f981fe7f8 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -196,6 +196,9 @@ static int xhci_plat_probe(struct platform_device *pdev)
196 ret = clk_prepare_enable(clk); 196 ret = clk_prepare_enable(clk);
197 if (ret) 197 if (ret)
198 goto put_hcd; 198 goto put_hcd;
199 } else if (PTR_ERR(clk) == -EPROBE_DEFER) {
200 ret = -EPROBE_DEFER;
201 goto put_hcd;
199 } 202 }
200 203
201 xhci = hcd_to_xhci(hcd); 204 xhci = hcd_to_xhci(hcd);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 52deae4b7eac..d7d502578d79 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -290,6 +290,14 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
290 290
291 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 291 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
292 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; 292 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
293
294 /*
295 * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
296 * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
297 * but the completion event in never sent. Use the cmd timeout timer to
298 * handle those cases. Use twice the time to cover the bit polling retry
299 */
300 mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT));
293 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, 301 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
294 &xhci->op_regs->cmd_ring); 302 &xhci->op_regs->cmd_ring);
295 303
@@ -314,6 +322,7 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
314 322
315 xhci_err(xhci, "Stopped the command ring failed, " 323 xhci_err(xhci, "Stopped the command ring failed, "
316 "maybe the host is dead\n"); 324 "maybe the host is dead\n");
325 del_timer(&xhci->cmd_timer);
317 xhci->xhc_state |= XHCI_STATE_DYING; 326 xhci->xhc_state |= XHCI_STATE_DYING;
318 xhci_quiesce(xhci); 327 xhci_quiesce(xhci);
319 xhci_halt(xhci); 328 xhci_halt(xhci);
@@ -1246,22 +1255,21 @@ void xhci_handle_command_timeout(unsigned long data)
1246 int ret; 1255 int ret;
1247 unsigned long flags; 1256 unsigned long flags;
1248 u64 hw_ring_state; 1257 u64 hw_ring_state;
1249 struct xhci_command *cur_cmd = NULL; 1258 bool second_timeout = false;
1250 xhci = (struct xhci_hcd *) data; 1259 xhci = (struct xhci_hcd *) data;
1251 1260
1252 /* mark this command to be cancelled */ 1261 /* mark this command to be cancelled */
1253 spin_lock_irqsave(&xhci->lock, flags); 1262 spin_lock_irqsave(&xhci->lock, flags);
1254 if (xhci->current_cmd) { 1263 if (xhci->current_cmd) {
1255 cur_cmd = xhci->current_cmd; 1264 if (xhci->current_cmd->status == COMP_CMD_ABORT)
1256 cur_cmd->status = COMP_CMD_ABORT; 1265 second_timeout = true;
1266 xhci->current_cmd->status = COMP_CMD_ABORT;
1257 } 1267 }
1258 1268
1259
1260 /* Make sure command ring is running before aborting it */ 1269 /* Make sure command ring is running before aborting it */
1261 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 1270 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1262 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && 1271 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
1263 (hw_ring_state & CMD_RING_RUNNING)) { 1272 (hw_ring_state & CMD_RING_RUNNING)) {
1264
1265 spin_unlock_irqrestore(&xhci->lock, flags); 1273 spin_unlock_irqrestore(&xhci->lock, flags);
1266 xhci_dbg(xhci, "Command timeout\n"); 1274 xhci_dbg(xhci, "Command timeout\n");
1267 ret = xhci_abort_cmd_ring(xhci); 1275 ret = xhci_abort_cmd_ring(xhci);
@@ -1273,6 +1281,15 @@ void xhci_handle_command_timeout(unsigned long data)
1273 } 1281 }
1274 return; 1282 return;
1275 } 1283 }
1284
1285 /* command ring failed to restart, or host removed. Bail out */
1286 if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) {
1287 spin_unlock_irqrestore(&xhci->lock, flags);
1288 xhci_dbg(xhci, "command timed out twice, ring start fail?\n");
1289 xhci_cleanup_command_queue(xhci);
1290 return;
1291 }
1292
1276 /* command timeout on stopped ring, ring can't be aborted */ 1293 /* command timeout on stopped ring, ring can't be aborted */
1277 xhci_dbg(xhci, "Command timeout on stopped ring\n"); 1294 xhci_dbg(xhci, "Command timeout on stopped ring\n");
1278 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); 1295 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
@@ -2721,7 +2738,8 @@ hw_died:
2721 writel(irq_pending, &xhci->ir_set->irq_pending); 2738 writel(irq_pending, &xhci->ir_set->irq_pending);
2722 } 2739 }
2723 2740
2724 if (xhci->xhc_state & XHCI_STATE_DYING) { 2741 if (xhci->xhc_state & XHCI_STATE_DYING ||
2742 xhci->xhc_state & XHCI_STATE_HALTED) {
2725 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " 2743 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
2726 "Shouldn't IRQs be disabled?\n"); 2744 "Shouldn't IRQs be disabled?\n");
2727 /* Clear the event handler busy flag (RW1C); 2745 /* Clear the event handler busy flag (RW1C);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index fa7e1ef36cd9..f2f9518c53ab 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -685,20 +685,23 @@ void xhci_stop(struct usb_hcd *hcd)
685 u32 temp; 685 u32 temp;
686 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 686 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
687 687
688 if (xhci->xhc_state & XHCI_STATE_HALTED)
689 return;
690
691 mutex_lock(&xhci->mutex); 688 mutex_lock(&xhci->mutex);
692 spin_lock_irq(&xhci->lock);
693 xhci->xhc_state |= XHCI_STATE_HALTED;
694 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
695 689
696 /* Make sure the xHC is halted for a USB3 roothub 690 if (!(xhci->xhc_state & XHCI_STATE_HALTED)) {
697 * (xhci_stop() could be called as part of failed init). 691 spin_lock_irq(&xhci->lock);
698 */ 692
699 xhci_halt(xhci); 693 xhci->xhc_state |= XHCI_STATE_HALTED;
700 xhci_reset(xhci); 694 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
701 spin_unlock_irq(&xhci->lock); 695 xhci_halt(xhci);
696 xhci_reset(xhci);
697
698 spin_unlock_irq(&xhci->lock);
699 }
700
701 if (!usb_hcd_is_primary_hcd(hcd)) {
702 mutex_unlock(&xhci->mutex);
703 return;
704 }
702 705
703 xhci_cleanup_msix(xhci); 706 xhci_cleanup_msix(xhci);
704 707
@@ -4886,7 +4889,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4886 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2); 4889 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
4887 xhci_print_registers(xhci); 4890 xhci_print_registers(xhci);
4888 4891
4889 xhci->quirks = quirks; 4892 xhci->quirks |= quirks;
4890 4893
4891 get_quirks(dev, xhci); 4894 get_quirks(dev, xhci);
4892 4895
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 39fd95833eb8..f824336def5c 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1090,29 +1090,6 @@ void musb_stop(struct musb *musb)
1090 musb_platform_try_idle(musb, 0); 1090 musb_platform_try_idle(musb, 0);
1091} 1091}
1092 1092
1093static void musb_shutdown(struct platform_device *pdev)
1094{
1095 struct musb *musb = dev_to_musb(&pdev->dev);
1096 unsigned long flags;
1097
1098 pm_runtime_get_sync(musb->controller);
1099
1100 musb_host_cleanup(musb);
1101 musb_gadget_cleanup(musb);
1102
1103 spin_lock_irqsave(&musb->lock, flags);
1104 musb_platform_disable(musb);
1105 musb_generic_disable(musb);
1106 spin_unlock_irqrestore(&musb->lock, flags);
1107
1108 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
1109 musb_platform_exit(musb);
1110
1111 pm_runtime_put(musb->controller);
1112 /* FIXME power down */
1113}
1114
1115
1116/*-------------------------------------------------------------------------*/ 1093/*-------------------------------------------------------------------------*/
1117 1094
1118/* 1095/*
@@ -1702,7 +1679,7 @@ EXPORT_SYMBOL_GPL(musb_dma_completion);
1702#define use_dma 0 1679#define use_dma 0
1703#endif 1680#endif
1704 1681
1705static void (*musb_phy_callback)(enum musb_vbus_id_status status); 1682static int (*musb_phy_callback)(enum musb_vbus_id_status status);
1706 1683
1707/* 1684/*
1708 * musb_mailbox - optional phy notifier function 1685 * musb_mailbox - optional phy notifier function
@@ -1711,11 +1688,12 @@ static void (*musb_phy_callback)(enum musb_vbus_id_status status);
1711 * Optionally gets called from the USB PHY. Note that the USB PHY must be 1688 * Optionally gets called from the USB PHY. Note that the USB PHY must be
1712 * disabled at the point the phy_callback is registered or unregistered. 1689 * disabled at the point the phy_callback is registered or unregistered.
1713 */ 1690 */
1714void musb_mailbox(enum musb_vbus_id_status status) 1691int musb_mailbox(enum musb_vbus_id_status status)
1715{ 1692{
1716 if (musb_phy_callback) 1693 if (musb_phy_callback)
1717 musb_phy_callback(status); 1694 return musb_phy_callback(status);
1718 1695
1696 return -ENODEV;
1719}; 1697};
1720EXPORT_SYMBOL_GPL(musb_mailbox); 1698EXPORT_SYMBOL_GPL(musb_mailbox);
1721 1699
@@ -2028,11 +2006,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2028 musb_readl = musb_default_readl; 2006 musb_readl = musb_default_readl;
2029 musb_writel = musb_default_writel; 2007 musb_writel = musb_default_writel;
2030 2008
2031 /* We need musb_read/write functions initialized for PM */
2032 pm_runtime_use_autosuspend(musb->controller);
2033 pm_runtime_set_autosuspend_delay(musb->controller, 200);
2034 pm_runtime_enable(musb->controller);
2035
2036 /* The musb_platform_init() call: 2009 /* The musb_platform_init() call:
2037 * - adjusts musb->mregs 2010 * - adjusts musb->mregs
2038 * - sets the musb->isr 2011 * - sets the musb->isr
@@ -2134,6 +2107,16 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2134 if (musb->ops->phy_callback) 2107 if (musb->ops->phy_callback)
2135 musb_phy_callback = musb->ops->phy_callback; 2108 musb_phy_callback = musb->ops->phy_callback;
2136 2109
2110 /*
2111 * We need musb_read/write functions initialized for PM.
2112 * Note that at least 2430 glue needs autosuspend delay
2113 * somewhere above 300 ms for the hardware to idle properly
2114 * after disconnecting the cable in host mode. Let's use
2115 * 500 ms for some margin.
2116 */
2117 pm_runtime_use_autosuspend(musb->controller);
2118 pm_runtime_set_autosuspend_delay(musb->controller, 500);
2119 pm_runtime_enable(musb->controller);
2137 pm_runtime_get_sync(musb->controller); 2120 pm_runtime_get_sync(musb->controller);
2138 2121
2139 status = usb_phy_init(musb->xceiv); 2122 status = usb_phy_init(musb->xceiv);
@@ -2237,13 +2220,8 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2237 if (status) 2220 if (status)
2238 goto fail5; 2221 goto fail5;
2239 2222
2240 pm_runtime_put(musb->controller); 2223 pm_runtime_mark_last_busy(musb->controller);
2241 2224 pm_runtime_put_autosuspend(musb->controller);
2242 /*
2243 * For why this is currently needed, see commit 3e43a0725637
2244 * ("usb: musb: core: add pm_runtime_irq_safe()")
2245 */
2246 pm_runtime_irq_safe(musb->controller);
2247 2225
2248 return 0; 2226 return 0;
2249 2227
@@ -2265,7 +2243,9 @@ fail2_5:
2265 usb_phy_shutdown(musb->xceiv); 2243 usb_phy_shutdown(musb->xceiv);
2266 2244
2267err_usb_phy_init: 2245err_usb_phy_init:
2246 pm_runtime_dont_use_autosuspend(musb->controller);
2268 pm_runtime_put_sync(musb->controller); 2247 pm_runtime_put_sync(musb->controller);
2248 pm_runtime_disable(musb->controller);
2269 2249
2270fail2: 2250fail2:
2271 if (musb->irq_wake) 2251 if (musb->irq_wake)
@@ -2273,7 +2253,6 @@ fail2:
2273 musb_platform_exit(musb); 2253 musb_platform_exit(musb);
2274 2254
2275fail1: 2255fail1:
2276 pm_runtime_disable(musb->controller);
2277 dev_err(musb->controller, 2256 dev_err(musb->controller,
2278 "musb_init_controller failed with status %d\n", status); 2257 "musb_init_controller failed with status %d\n", status);
2279 2258
@@ -2312,6 +2291,7 @@ static int musb_remove(struct platform_device *pdev)
2312{ 2291{
2313 struct device *dev = &pdev->dev; 2292 struct device *dev = &pdev->dev;
2314 struct musb *musb = dev_to_musb(dev); 2293 struct musb *musb = dev_to_musb(dev);
2294 unsigned long flags;
2315 2295
2316 /* this gets called on rmmod. 2296 /* this gets called on rmmod.
2317 * - Host mode: host may still be active 2297 * - Host mode: host may still be active
@@ -2319,17 +2299,26 @@ static int musb_remove(struct platform_device *pdev)
2319 * - OTG mode: both roles are deactivated (or never-activated) 2299 * - OTG mode: both roles are deactivated (or never-activated)
2320 */ 2300 */
2321 musb_exit_debugfs(musb); 2301 musb_exit_debugfs(musb);
2322 musb_shutdown(pdev);
2323 musb_phy_callback = NULL;
2324
2325 if (musb->dma_controller)
2326 musb_dma_controller_destroy(musb->dma_controller);
2327
2328 usb_phy_shutdown(musb->xceiv);
2329 2302
2330 cancel_work_sync(&musb->irq_work); 2303 cancel_work_sync(&musb->irq_work);
2331 cancel_delayed_work_sync(&musb->finish_resume_work); 2304 cancel_delayed_work_sync(&musb->finish_resume_work);
2332 cancel_delayed_work_sync(&musb->deassert_reset_work); 2305 cancel_delayed_work_sync(&musb->deassert_reset_work);
2306 pm_runtime_get_sync(musb->controller);
2307 musb_host_cleanup(musb);
2308 musb_gadget_cleanup(musb);
2309 spin_lock_irqsave(&musb->lock, flags);
2310 musb_platform_disable(musb);
2311 musb_generic_disable(musb);
2312 spin_unlock_irqrestore(&musb->lock, flags);
2313 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2314 pm_runtime_dont_use_autosuspend(musb->controller);
2315 pm_runtime_put_sync(musb->controller);
2316 pm_runtime_disable(musb->controller);
2317 musb_platform_exit(musb);
2318 musb_phy_callback = NULL;
2319 if (musb->dma_controller)
2320 musb_dma_controller_destroy(musb->dma_controller);
2321 usb_phy_shutdown(musb->xceiv);
2333 musb_free(musb); 2322 musb_free(musb);
2334 device_init_wakeup(dev, 0); 2323 device_init_wakeup(dev, 0);
2335 return 0; 2324 return 0;
@@ -2429,7 +2418,8 @@ static void musb_restore_context(struct musb *musb)
2429 musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe); 2418 musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe);
2430 musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe); 2419 musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe);
2431 musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe); 2420 musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
2432 musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl); 2421 if (musb->context.devctl & MUSB_DEVCTL_SESSION)
2422 musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
2433 2423
2434 for (i = 0; i < musb->config->num_eps; ++i) { 2424 for (i = 0; i < musb->config->num_eps; ++i) {
2435 struct musb_hw_ep *hw_ep; 2425 struct musb_hw_ep *hw_ep;
@@ -2612,7 +2602,6 @@ static struct platform_driver musb_driver = {
2612 }, 2602 },
2613 .probe = musb_probe, 2603 .probe = musb_probe,
2614 .remove = musb_remove, 2604 .remove = musb_remove,
2615 .shutdown = musb_shutdown,
2616}; 2605};
2617 2606
2618module_platform_driver(musb_driver); 2607module_platform_driver(musb_driver);
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index b6afe9e43305..b55a776b03eb 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -215,7 +215,7 @@ struct musb_platform_ops {
215 dma_addr_t *dma_addr, u32 *len); 215 dma_addr_t *dma_addr, u32 *len);
216 void (*pre_root_reset_end)(struct musb *musb); 216 void (*pre_root_reset_end)(struct musb *musb);
217 void (*post_root_reset_end)(struct musb *musb); 217 void (*post_root_reset_end)(struct musb *musb);
218 void (*phy_callback)(enum musb_vbus_id_status status); 218 int (*phy_callback)(enum musb_vbus_id_status status);
219}; 219};
220 220
221/* 221/*
@@ -312,6 +312,7 @@ struct musb {
312 struct work_struct irq_work; 312 struct work_struct irq_work;
313 struct delayed_work deassert_reset_work; 313 struct delayed_work deassert_reset_work;
314 struct delayed_work finish_resume_work; 314 struct delayed_work finish_resume_work;
315 struct delayed_work gadget_work;
315 u16 hwvers; 316 u16 hwvers;
316 317
317 u16 intrrxe; 318 u16 intrrxe;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 152865b36522..af2a3a7addf9 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1656,6 +1656,20 @@ static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1656 return usb_phy_set_power(musb->xceiv, mA); 1656 return usb_phy_set_power(musb->xceiv, mA);
1657} 1657}
1658 1658
1659static void musb_gadget_work(struct work_struct *work)
1660{
1661 struct musb *musb;
1662 unsigned long flags;
1663
1664 musb = container_of(work, struct musb, gadget_work.work);
1665 pm_runtime_get_sync(musb->controller);
1666 spin_lock_irqsave(&musb->lock, flags);
1667 musb_pullup(musb, musb->softconnect);
1668 spin_unlock_irqrestore(&musb->lock, flags);
1669 pm_runtime_mark_last_busy(musb->controller);
1670 pm_runtime_put_autosuspend(musb->controller);
1671}
1672
1659static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) 1673static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1660{ 1674{
1661 struct musb *musb = gadget_to_musb(gadget); 1675 struct musb *musb = gadget_to_musb(gadget);
@@ -1663,20 +1677,16 @@ static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1663 1677
1664 is_on = !!is_on; 1678 is_on = !!is_on;
1665 1679
1666 pm_runtime_get_sync(musb->controller);
1667
1668 /* NOTE: this assumes we are sensing vbus; we'd rather 1680 /* NOTE: this assumes we are sensing vbus; we'd rather
1669 * not pullup unless the B-session is active. 1681 * not pullup unless the B-session is active.
1670 */ 1682 */
1671 spin_lock_irqsave(&musb->lock, flags); 1683 spin_lock_irqsave(&musb->lock, flags);
1672 if (is_on != musb->softconnect) { 1684 if (is_on != musb->softconnect) {
1673 musb->softconnect = is_on; 1685 musb->softconnect = is_on;
1674 musb_pullup(musb, is_on); 1686 schedule_delayed_work(&musb->gadget_work, 0);
1675 } 1687 }
1676 spin_unlock_irqrestore(&musb->lock, flags); 1688 spin_unlock_irqrestore(&musb->lock, flags);
1677 1689
1678 pm_runtime_put(musb->controller);
1679
1680 return 0; 1690 return 0;
1681} 1691}
1682 1692
@@ -1845,7 +1855,7 @@ int musb_gadget_setup(struct musb *musb)
1845#elif IS_ENABLED(CONFIG_USB_MUSB_GADGET) 1855#elif IS_ENABLED(CONFIG_USB_MUSB_GADGET)
1846 musb->g.is_otg = 0; 1856 musb->g.is_otg = 0;
1847#endif 1857#endif
1848 1858 INIT_DELAYED_WORK(&musb->gadget_work, musb_gadget_work);
1849 musb_g_init_endpoints(musb); 1859 musb_g_init_endpoints(musb);
1850 1860
1851 musb->is_active = 0; 1861 musb->is_active = 0;
@@ -1866,6 +1876,8 @@ void musb_gadget_cleanup(struct musb *musb)
1866{ 1876{
1867 if (musb->port_mode == MUSB_PORT_MODE_HOST) 1877 if (musb->port_mode == MUSB_PORT_MODE_HOST)
1868 return; 1878 return;
1879
1880 cancel_delayed_work_sync(&musb->gadget_work);
1869 usb_del_gadget_udc(&musb->g); 1881 usb_del_gadget_udc(&musb->g);
1870} 1882}
1871 1883
@@ -1914,8 +1926,8 @@ static int musb_gadget_start(struct usb_gadget *g,
1914 if (musb->xceiv->last_event == USB_EVENT_ID) 1926 if (musb->xceiv->last_event == USB_EVENT_ID)
1915 musb_platform_set_vbus(musb, 1); 1927 musb_platform_set_vbus(musb, 1);
1916 1928
1917 if (musb->xceiv->last_event == USB_EVENT_NONE) 1929 pm_runtime_mark_last_busy(musb->controller);
1918 pm_runtime_put(musb->controller); 1930 pm_runtime_put_autosuspend(musb->controller);
1919 1931
1920 return 0; 1932 return 0;
1921 1933
@@ -1934,8 +1946,7 @@ static int musb_gadget_stop(struct usb_gadget *g)
1934 struct musb *musb = gadget_to_musb(g); 1946 struct musb *musb = gadget_to_musb(g);
1935 unsigned long flags; 1947 unsigned long flags;
1936 1948
1937 if (musb->xceiv->last_event == USB_EVENT_NONE) 1949 pm_runtime_get_sync(musb->controller);
1938 pm_runtime_get_sync(musb->controller);
1939 1950
1940 /* 1951 /*
1941 * REVISIT always use otg_set_peripheral() here too; 1952 * REVISIT always use otg_set_peripheral() here too;
@@ -1963,7 +1974,8 @@ static int musb_gadget_stop(struct usb_gadget *g)
1963 * that currently misbehaves. 1974 * that currently misbehaves.
1964 */ 1975 */
1965 1976
1966 pm_runtime_put(musb->controller); 1977 pm_runtime_mark_last_busy(musb->controller);
1978 pm_runtime_put_autosuspend(musb->controller);
1967 1979
1968 return 0; 1980 return 0;
1969} 1981}
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 2f8ad7f1f482..d227a71d85e1 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -434,7 +434,13 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
434 } 434 }
435 } 435 }
436 436
437 if (qh != NULL && qh->is_ready) { 437 /*
438 * The pipe must be broken if current urb->status is set, so don't
439 * start next urb.
440 * TODO: to minimize the risk of regression, only check urb->status
441 * for RX, until we have a test case to understand the behavior of TX.
442 */
443 if ((!status || !is_in) && qh && qh->is_ready) {
438 dev_dbg(musb->controller, "... next ep%d %cX urb %p\n", 444 dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
439 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); 445 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
440 musb_start_urb(musb, is_in, qh); 446 musb_start_urb(musb, is_in, qh);
@@ -594,14 +600,13 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
594 musb_writew(ep->regs, MUSB_TXCSR, 0); 600 musb_writew(ep->regs, MUSB_TXCSR, 0);
595 601
596 /* scrub all previous state, clearing toggle */ 602 /* scrub all previous state, clearing toggle */
597 } else {
598 csr = musb_readw(ep->regs, MUSB_RXCSR);
599 if (csr & MUSB_RXCSR_RXPKTRDY)
600 WARNING("rx%d, packet/%d ready?\n", ep->epnum,
601 musb_readw(ep->regs, MUSB_RXCOUNT));
602
603 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
604 } 603 }
604 csr = musb_readw(ep->regs, MUSB_RXCSR);
605 if (csr & MUSB_RXCSR_RXPKTRDY)
606 WARNING("rx%d, packet/%d ready?\n", ep->epnum,
607 musb_readw(ep->regs, MUSB_RXCOUNT));
608
609 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
605 610
606 /* target addr and (for multipoint) hub addr/port */ 611 /* target addr and (for multipoint) hub addr/port */
607 if (musb->is_multipoint) { 612 if (musb->is_multipoint) {
@@ -627,7 +632,7 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
627 ep->rx_reinit = 0; 632 ep->rx_reinit = 0;
628} 633}
629 634
630static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma, 635static void musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
631 struct musb_hw_ep *hw_ep, struct musb_qh *qh, 636 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
632 struct urb *urb, u32 offset, 637 struct urb *urb, u32 offset,
633 u32 *length, u8 *mode) 638 u32 *length, u8 *mode)
@@ -664,23 +669,18 @@ static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
664 } 669 }
665 channel->desired_mode = *mode; 670 channel->desired_mode = *mode;
666 musb_writew(epio, MUSB_TXCSR, csr); 671 musb_writew(epio, MUSB_TXCSR, csr);
667
668 return 0;
669} 672}
670 673
671static int musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma, 674static void musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
672 struct musb_hw_ep *hw_ep, 675 struct musb_hw_ep *hw_ep,
673 struct musb_qh *qh, 676 struct musb_qh *qh,
674 struct urb *urb, 677 struct urb *urb,
675 u32 offset, 678 u32 offset,
676 u32 *length, 679 u32 *length,
677 u8 *mode) 680 u8 *mode)
678{ 681{
679 struct dma_channel *channel = hw_ep->tx_channel; 682 struct dma_channel *channel = hw_ep->tx_channel;
680 683
681 if (!is_cppi_enabled(hw_ep->musb) && !tusb_dma_omap(hw_ep->musb))
682 return -ENODEV;
683
684 channel->actual_len = 0; 684 channel->actual_len = 0;
685 685
686 /* 686 /*
@@ -688,8 +688,6 @@ static int musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
688 * to identify the zero-length-final-packet case. 688 * to identify the zero-length-final-packet case.
689 */ 689 */
690 *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0; 690 *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
691
692 return 0;
693} 691}
694 692
695static bool musb_tx_dma_program(struct dma_controller *dma, 693static bool musb_tx_dma_program(struct dma_controller *dma,
@@ -699,15 +697,14 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
699 struct dma_channel *channel = hw_ep->tx_channel; 697 struct dma_channel *channel = hw_ep->tx_channel;
700 u16 pkt_size = qh->maxpacket; 698 u16 pkt_size = qh->maxpacket;
701 u8 mode; 699 u8 mode;
702 int res;
703 700
704 if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb)) 701 if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
705 res = musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, 702 musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset,
706 offset, &length, &mode); 703 &length, &mode);
704 else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb))
705 musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset,
706 &length, &mode);
707 else 707 else
708 res = musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb,
709 offset, &length, &mode);
710 if (res)
711 return false; 708 return false;
712 709
713 qh->segsize = length; 710 qh->segsize = length;
@@ -995,9 +992,15 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
995 if (is_in) { 992 if (is_in) {
996 dma = is_dma_capable() ? ep->rx_channel : NULL; 993 dma = is_dma_capable() ? ep->rx_channel : NULL;
997 994
998 /* clear nak timeout bit */ 995 /*
996 * Need to stop the transaction by clearing REQPKT first
997 * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
998 * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
999 */
999 rx_csr = musb_readw(epio, MUSB_RXCSR); 1000 rx_csr = musb_readw(epio, MUSB_RXCSR);
1000 rx_csr |= MUSB_RXCSR_H_WZC_BITS; 1001 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1002 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1003 musb_writew(epio, MUSB_RXCSR, rx_csr);
1001 rx_csr &= ~MUSB_RXCSR_DATAERROR; 1004 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1002 musb_writew(epio, MUSB_RXCSR, rx_csr); 1005 musb_writew(epio, MUSB_RXCSR, rx_csr);
1003 1006
@@ -1551,7 +1554,7 @@ static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1551 struct urb *urb, 1554 struct urb *urb,
1552 size_t len) 1555 size_t len)
1553{ 1556{
1554 struct dma_channel *channel = hw_ep->tx_channel; 1557 struct dma_channel *channel = hw_ep->rx_channel;
1555 void __iomem *epio = hw_ep->regs; 1558 void __iomem *epio = hw_ep->regs;
1556 dma_addr_t *buf; 1559 dma_addr_t *buf;
1557 u32 length, res; 1560 u32 length, res;
@@ -1870,6 +1873,9 @@ void musb_host_rx(struct musb *musb, u8 epnum)
1870 status = -EPROTO; 1873 status = -EPROTO;
1871 musb_writeb(epio, MUSB_RXINTERVAL, 0); 1874 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1872 1875
1876 rx_csr &= ~MUSB_RXCSR_H_ERROR;
1877 musb_writew(epio, MUSB_RXCSR, rx_csr);
1878
1873 } else if (rx_csr & MUSB_RXCSR_DATAERROR) { 1879 } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1874 1880
1875 if (USB_ENDPOINT_XFER_ISOC != qh->type) { 1881 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index c84e0322c108..0b4cec940386 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -49,97 +49,14 @@ struct omap2430_glue {
49 enum musb_vbus_id_status status; 49 enum musb_vbus_id_status status;
50 struct work_struct omap_musb_mailbox_work; 50 struct work_struct omap_musb_mailbox_work;
51 struct device *control_otghs; 51 struct device *control_otghs;
52 bool cable_connected;
53 bool enabled;
54 bool powered;
52}; 55};
53#define glue_to_musb(g) platform_get_drvdata(g->musb) 56#define glue_to_musb(g) platform_get_drvdata(g->musb)
54 57
55static struct omap2430_glue *_glue; 58static struct omap2430_glue *_glue;
56 59
57static struct timer_list musb_idle_timer;
58
59static void musb_do_idle(unsigned long _musb)
60{
61 struct musb *musb = (void *)_musb;
62 unsigned long flags;
63 u8 power;
64 u8 devctl;
65
66 spin_lock_irqsave(&musb->lock, flags);
67
68 switch (musb->xceiv->otg->state) {
69 case OTG_STATE_A_WAIT_BCON:
70
71 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
72 if (devctl & MUSB_DEVCTL_BDEVICE) {
73 musb->xceiv->otg->state = OTG_STATE_B_IDLE;
74 MUSB_DEV_MODE(musb);
75 } else {
76 musb->xceiv->otg->state = OTG_STATE_A_IDLE;
77 MUSB_HST_MODE(musb);
78 }
79 break;
80 case OTG_STATE_A_SUSPEND:
81 /* finish RESUME signaling? */
82 if (musb->port1_status & MUSB_PORT_STAT_RESUME) {
83 power = musb_readb(musb->mregs, MUSB_POWER);
84 power &= ~MUSB_POWER_RESUME;
85 dev_dbg(musb->controller, "root port resume stopped, power %02x\n", power);
86 musb_writeb(musb->mregs, MUSB_POWER, power);
87 musb->is_active = 1;
88 musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
89 | MUSB_PORT_STAT_RESUME);
90 musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
91 usb_hcd_poll_rh_status(musb->hcd);
92 /* NOTE: it might really be A_WAIT_BCON ... */
93 musb->xceiv->otg->state = OTG_STATE_A_HOST;
94 }
95 break;
96 case OTG_STATE_A_HOST:
97 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
98 if (devctl & MUSB_DEVCTL_BDEVICE)
99 musb->xceiv->otg->state = OTG_STATE_B_IDLE;
100 else
101 musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
102 default:
103 break;
104 }
105 spin_unlock_irqrestore(&musb->lock, flags);
106}
107
108
109static void omap2430_musb_try_idle(struct musb *musb, unsigned long timeout)
110{
111 unsigned long default_timeout = jiffies + msecs_to_jiffies(3);
112 static unsigned long last_timer;
113
114 if (timeout == 0)
115 timeout = default_timeout;
116
117 /* Never idle if active, or when VBUS timeout is not set as host */
118 if (musb->is_active || ((musb->a_wait_bcon == 0)
119 && (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON))) {
120 dev_dbg(musb->controller, "%s active, deleting timer\n",
121 usb_otg_state_string(musb->xceiv->otg->state));
122 del_timer(&musb_idle_timer);
123 last_timer = jiffies;
124 return;
125 }
126
127 if (time_after(last_timer, timeout)) {
128 if (!timer_pending(&musb_idle_timer))
129 last_timer = timeout;
130 else {
131 dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n");
132 return;
133 }
134 }
135 last_timer = timeout;
136
137 dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n",
138 usb_otg_state_string(musb->xceiv->otg->state),
139 (unsigned long)jiffies_to_msecs(timeout - jiffies));
140 mod_timer(&musb_idle_timer, timeout);
141}
142
143static void omap2430_musb_set_vbus(struct musb *musb, int is_on) 60static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
144{ 61{
145 struct usb_otg *otg = musb->xceiv->otg; 62 struct usb_otg *otg = musb->xceiv->otg;
@@ -205,16 +122,6 @@ static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
205 musb_readb(musb->mregs, MUSB_DEVCTL)); 122 musb_readb(musb->mregs, MUSB_DEVCTL));
206} 123}
207 124
208static int omap2430_musb_set_mode(struct musb *musb, u8 musb_mode)
209{
210 u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
211
212 devctl |= MUSB_DEVCTL_SESSION;
213 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
214
215 return 0;
216}
217
218static inline void omap2430_low_level_exit(struct musb *musb) 125static inline void omap2430_low_level_exit(struct musb *musb)
219{ 126{
220 u32 l; 127 u32 l;
@@ -234,22 +141,63 @@ static inline void omap2430_low_level_init(struct musb *musb)
234 musb_writel(musb->mregs, OTG_FORCESTDBY, l); 141 musb_writel(musb->mregs, OTG_FORCESTDBY, l);
235} 142}
236 143
237static void omap2430_musb_mailbox(enum musb_vbus_id_status status) 144/*
145 * We can get multiple cable events so we need to keep track
146 * of the power state. Only keep power enabled if USB cable is
147 * connected and a gadget is started.
148 */
149static void omap2430_set_power(struct musb *musb, bool enabled, bool cable)
150{
151 struct device *dev = musb->controller;
152 struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
153 bool power_up;
154 int res;
155
156 if (glue->enabled != enabled)
157 glue->enabled = enabled;
158
159 if (glue->cable_connected != cable)
160 glue->cable_connected = cable;
161
162 power_up = glue->enabled && glue->cable_connected;
163 if (power_up == glue->powered) {
164 dev_warn(musb->controller, "power state already %i\n",
165 power_up);
166 return;
167 }
168
169 glue->powered = power_up;
170
171 if (power_up) {
172 res = pm_runtime_get_sync(musb->controller);
173 if (res < 0) {
174 dev_err(musb->controller, "could not enable: %i", res);
175 glue->powered = false;
176 }
177 } else {
178 pm_runtime_mark_last_busy(musb->controller);
179 pm_runtime_put_autosuspend(musb->controller);
180 }
181}
182
183static int omap2430_musb_mailbox(enum musb_vbus_id_status status)
238{ 184{
239 struct omap2430_glue *glue = _glue; 185 struct omap2430_glue *glue = _glue;
240 186
241 if (!glue) { 187 if (!glue) {
242 pr_err("%s: musb core is not yet initialized\n", __func__); 188 pr_err("%s: musb core is not yet initialized\n", __func__);
243 return; 189 return -EPROBE_DEFER;
244 } 190 }
245 glue->status = status; 191 glue->status = status;
246 192
247 if (!glue_to_musb(glue)) { 193 if (!glue_to_musb(glue)) {
248 pr_err("%s: musb core is not yet ready\n", __func__); 194 pr_err("%s: musb core is not yet ready\n", __func__);
249 return; 195 return -EPROBE_DEFER;
250 } 196 }
251 197
252 schedule_work(&glue->omap_musb_mailbox_work); 198 schedule_work(&glue->omap_musb_mailbox_work);
199
200 return 0;
253} 201}
254 202
255static void omap_musb_set_mailbox(struct omap2430_glue *glue) 203static void omap_musb_set_mailbox(struct omap2430_glue *glue)
@@ -259,6 +207,13 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
259 struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev); 207 struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
260 struct omap_musb_board_data *data = pdata->board_data; 208 struct omap_musb_board_data *data = pdata->board_data;
261 struct usb_otg *otg = musb->xceiv->otg; 209 struct usb_otg *otg = musb->xceiv->otg;
210 bool cable_connected;
211
212 cable_connected = ((glue->status == MUSB_ID_GROUND) ||
213 (glue->status == MUSB_VBUS_VALID));
214
215 if (cable_connected)
216 omap2430_set_power(musb, glue->enabled, cable_connected);
262 217
263 switch (glue->status) { 218 switch (glue->status) {
264 case MUSB_ID_GROUND: 219 case MUSB_ID_GROUND:
@@ -268,7 +223,6 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
268 musb->xceiv->otg->state = OTG_STATE_A_IDLE; 223 musb->xceiv->otg->state = OTG_STATE_A_IDLE;
269 musb->xceiv->last_event = USB_EVENT_ID; 224 musb->xceiv->last_event = USB_EVENT_ID;
270 if (musb->gadget_driver) { 225 if (musb->gadget_driver) {
271 pm_runtime_get_sync(dev);
272 omap_control_usb_set_mode(glue->control_otghs, 226 omap_control_usb_set_mode(glue->control_otghs,
273 USB_MODE_HOST); 227 USB_MODE_HOST);
274 omap2430_musb_set_vbus(musb, 1); 228 omap2430_musb_set_vbus(musb, 1);
@@ -281,8 +235,6 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
281 otg->default_a = false; 235 otg->default_a = false;
282 musb->xceiv->otg->state = OTG_STATE_B_IDLE; 236 musb->xceiv->otg->state = OTG_STATE_B_IDLE;
283 musb->xceiv->last_event = USB_EVENT_VBUS; 237 musb->xceiv->last_event = USB_EVENT_VBUS;
284 if (musb->gadget_driver)
285 pm_runtime_get_sync(dev);
286 omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE); 238 omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE);
287 break; 239 break;
288 240
@@ -291,11 +243,8 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
291 dev_dbg(dev, "VBUS Disconnect\n"); 243 dev_dbg(dev, "VBUS Disconnect\n");
292 244
293 musb->xceiv->last_event = USB_EVENT_NONE; 245 musb->xceiv->last_event = USB_EVENT_NONE;
294 if (musb->gadget_driver) { 246 if (musb->gadget_driver)
295 omap2430_musb_set_vbus(musb, 0); 247 omap2430_musb_set_vbus(musb, 0);
296 pm_runtime_mark_last_busy(dev);
297 pm_runtime_put_autosuspend(dev);
298 }
299 248
300 if (data->interface_type == MUSB_INTERFACE_UTMI) 249 if (data->interface_type == MUSB_INTERFACE_UTMI)
301 otg_set_vbus(musb->xceiv->otg, 0); 250 otg_set_vbus(musb->xceiv->otg, 0);
@@ -307,6 +256,9 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
307 dev_dbg(dev, "ID float\n"); 256 dev_dbg(dev, "ID float\n");
308 } 257 }
309 258
259 if (!cable_connected)
260 omap2430_set_power(musb, glue->enabled, cable_connected);
261
310 atomic_notifier_call_chain(&musb->xceiv->notifier, 262 atomic_notifier_call_chain(&musb->xceiv->notifier,
311 musb->xceiv->last_event, NULL); 263 musb->xceiv->last_event, NULL);
312} 264}
@@ -316,13 +268,8 @@ static void omap_musb_mailbox_work(struct work_struct *mailbox_work)
316{ 268{
317 struct omap2430_glue *glue = container_of(mailbox_work, 269 struct omap2430_glue *glue = container_of(mailbox_work,
318 struct omap2430_glue, omap_musb_mailbox_work); 270 struct omap2430_glue, omap_musb_mailbox_work);
319 struct musb *musb = glue_to_musb(glue);
320 struct device *dev = musb->controller;
321 271
322 pm_runtime_get_sync(dev);
323 omap_musb_set_mailbox(glue); 272 omap_musb_set_mailbox(glue);
324 pm_runtime_mark_last_busy(dev);
325 pm_runtime_put_autosuspend(dev);
326} 273}
327 274
328static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci) 275static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci)
@@ -389,23 +336,7 @@ static int omap2430_musb_init(struct musb *musb)
389 return PTR_ERR(musb->phy); 336 return PTR_ERR(musb->phy);
390 } 337 }
391 musb->isr = omap2430_musb_interrupt; 338 musb->isr = omap2430_musb_interrupt;
392 339 phy_init(musb->phy);
393 /*
394 * Enable runtime PM for musb parent (this driver). We can't
395 * do it earlier as struct musb is not yet allocated and we
396 * need to touch the musb registers for runtime PM.
397 */
398 pm_runtime_enable(glue->dev);
399 status = pm_runtime_get_sync(glue->dev);
400 if (status < 0)
401 goto err1;
402
403 status = pm_runtime_get_sync(dev);
404 if (status < 0) {
405 dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status);
406 pm_runtime_put_sync(glue->dev);
407 goto err1;
408 }
409 340
410 l = musb_readl(musb->mregs, OTG_INTERFSEL); 341 l = musb_readl(musb->mregs, OTG_INTERFSEL);
411 342
@@ -427,20 +358,10 @@ static int omap2430_musb_init(struct musb *musb)
427 musb_readl(musb->mregs, OTG_INTERFSEL), 358 musb_readl(musb->mregs, OTG_INTERFSEL),
428 musb_readl(musb->mregs, OTG_SIMENABLE)); 359 musb_readl(musb->mregs, OTG_SIMENABLE));
429 360
430 setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
431
432 if (glue->status != MUSB_UNKNOWN) 361 if (glue->status != MUSB_UNKNOWN)
433 omap_musb_set_mailbox(glue); 362 omap_musb_set_mailbox(glue);
434 363
435 phy_init(musb->phy);
436 phy_power_on(musb->phy);
437
438 pm_runtime_put_noidle(musb->controller);
439 pm_runtime_put_noidle(glue->dev);
440 return 0; 364 return 0;
441
442err1:
443 return status;
444} 365}
445 366
446static void omap2430_musb_enable(struct musb *musb) 367static void omap2430_musb_enable(struct musb *musb)
@@ -452,6 +373,11 @@ static void omap2430_musb_enable(struct musb *musb)
452 struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev); 373 struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
453 struct omap_musb_board_data *data = pdata->board_data; 374 struct omap_musb_board_data *data = pdata->board_data;
454 375
376 if (!WARN_ON(!musb->phy))
377 phy_power_on(musb->phy);
378
379 omap2430_set_power(musb, true, glue->cable_connected);
380
455 switch (glue->status) { 381 switch (glue->status) {
456 382
457 case MUSB_ID_GROUND: 383 case MUSB_ID_GROUND:
@@ -487,18 +413,25 @@ static void omap2430_musb_disable(struct musb *musb)
487 struct device *dev = musb->controller; 413 struct device *dev = musb->controller;
488 struct omap2430_glue *glue = dev_get_drvdata(dev->parent); 414 struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
489 415
416 if (!WARN_ON(!musb->phy))
417 phy_power_off(musb->phy);
418
490 if (glue->status != MUSB_UNKNOWN) 419 if (glue->status != MUSB_UNKNOWN)
491 omap_control_usb_set_mode(glue->control_otghs, 420 omap_control_usb_set_mode(glue->control_otghs,
492 USB_MODE_DISCONNECT); 421 USB_MODE_DISCONNECT);
422
423 omap2430_set_power(musb, false, glue->cable_connected);
493} 424}
494 425
495static int omap2430_musb_exit(struct musb *musb) 426static int omap2430_musb_exit(struct musb *musb)
496{ 427{
497 del_timer_sync(&musb_idle_timer); 428 struct device *dev = musb->controller;
429 struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
498 430
499 omap2430_low_level_exit(musb); 431 omap2430_low_level_exit(musb);
500 phy_power_off(musb->phy);
501 phy_exit(musb->phy); 432 phy_exit(musb->phy);
433 musb->phy = NULL;
434 cancel_work_sync(&glue->omap_musb_mailbox_work);
502 435
503 return 0; 436 return 0;
504} 437}
@@ -512,9 +445,6 @@ static const struct musb_platform_ops omap2430_ops = {
512 .init = omap2430_musb_init, 445 .init = omap2430_musb_init,
513 .exit = omap2430_musb_exit, 446 .exit = omap2430_musb_exit,
514 447
515 .set_mode = omap2430_musb_set_mode,
516 .try_idle = omap2430_musb_try_idle,
517
518 .set_vbus = omap2430_musb_set_vbus, 448 .set_vbus = omap2430_musb_set_vbus,
519 449
520 .enable = omap2430_musb_enable, 450 .enable = omap2430_musb_enable,
@@ -639,11 +569,9 @@ static int omap2430_probe(struct platform_device *pdev)
639 goto err2; 569 goto err2;
640 } 570 }
641 571
642 /* 572 pm_runtime_enable(glue->dev);
643 * Note that we cannot enable PM runtime yet for this 573 pm_runtime_use_autosuspend(glue->dev);
644 * driver as we need struct musb initialized first. 574 pm_runtime_set_autosuspend_delay(glue->dev, 500);
645 * See omap2430_musb_init above.
646 */
647 575
648 ret = platform_device_add(musb); 576 ret = platform_device_add(musb);
649 if (ret) { 577 if (ret) {
@@ -662,12 +590,14 @@ err0:
662 590
663static int omap2430_remove(struct platform_device *pdev) 591static int omap2430_remove(struct platform_device *pdev)
664{ 592{
665 struct omap2430_glue *glue = platform_get_drvdata(pdev); 593 struct omap2430_glue *glue = platform_get_drvdata(pdev);
594 struct musb *musb = glue_to_musb(glue);
666 595
667 pm_runtime_get_sync(glue->dev); 596 pm_runtime_get_sync(glue->dev);
668 cancel_work_sync(&glue->omap_musb_mailbox_work);
669 platform_device_unregister(glue->musb); 597 platform_device_unregister(glue->musb);
598 omap2430_set_power(musb, false, false);
670 pm_runtime_put_sync(glue->dev); 599 pm_runtime_put_sync(glue->dev);
600 pm_runtime_dont_use_autosuspend(glue->dev);
671 pm_runtime_disable(glue->dev); 601 pm_runtime_disable(glue->dev);
672 602
673 return 0; 603 return 0;
@@ -680,12 +610,13 @@ static int omap2430_runtime_suspend(struct device *dev)
680 struct omap2430_glue *glue = dev_get_drvdata(dev); 610 struct omap2430_glue *glue = dev_get_drvdata(dev);
681 struct musb *musb = glue_to_musb(glue); 611 struct musb *musb = glue_to_musb(glue);
682 612
683 if (musb) { 613 if (!musb)
684 musb->context.otg_interfsel = musb_readl(musb->mregs, 614 return 0;
685 OTG_INTERFSEL);
686 615
687 omap2430_low_level_exit(musb); 616 musb->context.otg_interfsel = musb_readl(musb->mregs,
688 } 617 OTG_INTERFSEL);
618
619 omap2430_low_level_exit(musb);
689 620
690 return 0; 621 return 0;
691} 622}
@@ -696,7 +627,7 @@ static int omap2430_runtime_resume(struct device *dev)
696 struct musb *musb = glue_to_musb(glue); 627 struct musb *musb = glue_to_musb(glue);
697 628
698 if (!musb) 629 if (!musb)
699 return -EPROBE_DEFER; 630 return 0;
700 631
701 omap2430_low_level_init(musb); 632 omap2430_low_level_init(musb);
702 musb_writel(musb->mregs, OTG_INTERFSEL, 633 musb_writel(musb->mregs, OTG_INTERFSEL,
@@ -738,18 +669,8 @@ static struct platform_driver omap2430_driver = {
738 }, 669 },
739}; 670};
740 671
672module_platform_driver(omap2430_driver);
673
741MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer"); 674MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer");
742MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); 675MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
743MODULE_LICENSE("GPL v2"); 676MODULE_LICENSE("GPL v2");
744
745static int __init omap2430_init(void)
746{
747 return platform_driver_register(&omap2430_driver);
748}
749subsys_initcall(omap2430_init);
750
751static void __exit omap2430_exit(void)
752{
753 platform_driver_unregister(&omap2430_driver);
754}
755module_exit(omap2430_exit);
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index fdab4232cfbf..76500515dd8b 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -80,7 +80,8 @@ static struct musb *sunxi_musb;
80 80
81struct sunxi_glue { 81struct sunxi_glue {
82 struct device *dev; 82 struct device *dev;
83 struct platform_device *musb; 83 struct musb *musb;
84 struct platform_device *musb_pdev;
84 struct clk *clk; 85 struct clk *clk;
85 struct reset_control *rst; 86 struct reset_control *rst;
86 struct phy *phy; 87 struct phy *phy;
@@ -102,7 +103,7 @@ static void sunxi_musb_work(struct work_struct *work)
102 return; 103 return;
103 104
104 if (test_and_clear_bit(SUNXI_MUSB_FL_HOSTMODE_PEND, &glue->flags)) { 105 if (test_and_clear_bit(SUNXI_MUSB_FL_HOSTMODE_PEND, &glue->flags)) {
105 struct musb *musb = platform_get_drvdata(glue->musb); 106 struct musb *musb = glue->musb;
106 unsigned long flags; 107 unsigned long flags;
107 u8 devctl; 108 u8 devctl;
108 109
@@ -112,7 +113,7 @@ static void sunxi_musb_work(struct work_struct *work)
112 if (test_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags)) { 113 if (test_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags)) {
113 set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags); 114 set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
114 musb->xceiv->otg->default_a = 1; 115 musb->xceiv->otg->default_a = 1;
115 musb->xceiv->otg->state = OTG_STATE_A_IDLE; 116 musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
116 MUSB_HST_MODE(musb); 117 MUSB_HST_MODE(musb);
117 devctl |= MUSB_DEVCTL_SESSION; 118 devctl |= MUSB_DEVCTL_SESSION;
118 } else { 119 } else {
@@ -145,10 +146,12 @@ static void sunxi_musb_set_vbus(struct musb *musb, int is_on)
145{ 146{
146 struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent); 147 struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
147 148
148 if (is_on) 149 if (is_on) {
149 set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags); 150 set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
150 else 151 musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
152 } else {
151 clear_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags); 153 clear_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
154 }
152 155
153 schedule_work(&glue->work); 156 schedule_work(&glue->work);
154} 157}
@@ -264,15 +267,6 @@ static int sunxi_musb_init(struct musb *musb)
264 if (ret) 267 if (ret)
265 goto error_unregister_notifier; 268 goto error_unregister_notifier;
266 269
267 if (musb->port_mode == MUSB_PORT_MODE_HOST) {
268 ret = phy_power_on(glue->phy);
269 if (ret)
270 goto error_phy_exit;
271 set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
272 /* Stop musb work from turning vbus off again */
273 set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
274 }
275
276 musb->isr = sunxi_musb_interrupt; 270 musb->isr = sunxi_musb_interrupt;
277 271
278 /* Stop the musb-core from doing runtime pm (not supported on sunxi) */ 272 /* Stop the musb-core from doing runtime pm (not supported on sunxi) */
@@ -280,8 +274,6 @@ static int sunxi_musb_init(struct musb *musb)
280 274
281 return 0; 275 return 0;
282 276
283error_phy_exit:
284 phy_exit(glue->phy);
285error_unregister_notifier: 277error_unregister_notifier:
286 if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE) 278 if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
287 extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST, 279 extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST,
@@ -323,10 +315,31 @@ static int sunxi_musb_exit(struct musb *musb)
323 return 0; 315 return 0;
324} 316}
325 317
318static int sunxi_set_mode(struct musb *musb, u8 mode)
319{
320 struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
321 int ret;
322
323 if (mode == MUSB_HOST) {
324 ret = phy_power_on(glue->phy);
325 if (ret)
326 return ret;
327
328 set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
329 /* Stop musb work from turning vbus off again */
330 set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
331 musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
332 }
333
334 return 0;
335}
336
326static void sunxi_musb_enable(struct musb *musb) 337static void sunxi_musb_enable(struct musb *musb)
327{ 338{
328 struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent); 339 struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
329 340
341 glue->musb = musb;
342
330 /* musb_core does not call us in a balanced manner */ 343 /* musb_core does not call us in a balanced manner */
331 if (test_and_set_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags)) 344 if (test_and_set_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags))
332 return; 345 return;
@@ -569,6 +582,7 @@ static const struct musb_platform_ops sunxi_musb_ops = {
569 .exit = sunxi_musb_exit, 582 .exit = sunxi_musb_exit,
570 .enable = sunxi_musb_enable, 583 .enable = sunxi_musb_enable,
571 .disable = sunxi_musb_disable, 584 .disable = sunxi_musb_disable,
585 .set_mode = sunxi_set_mode,
572 .fifo_offset = sunxi_musb_fifo_offset, 586 .fifo_offset = sunxi_musb_fifo_offset,
573 .ep_offset = sunxi_musb_ep_offset, 587 .ep_offset = sunxi_musb_ep_offset,
574 .busctl_offset = sunxi_musb_busctl_offset, 588 .busctl_offset = sunxi_musb_busctl_offset,
@@ -721,9 +735,9 @@ static int sunxi_musb_probe(struct platform_device *pdev)
721 pinfo.data = &pdata; 735 pinfo.data = &pdata;
722 pinfo.size_data = sizeof(pdata); 736 pinfo.size_data = sizeof(pdata);
723 737
724 glue->musb = platform_device_register_full(&pinfo); 738 glue->musb_pdev = platform_device_register_full(&pinfo);
725 if (IS_ERR(glue->musb)) { 739 if (IS_ERR(glue->musb_pdev)) {
726 ret = PTR_ERR(glue->musb); 740 ret = PTR_ERR(glue->musb_pdev);
727 dev_err(&pdev->dev, "Error registering musb dev: %d\n", ret); 741 dev_err(&pdev->dev, "Error registering musb dev: %d\n", ret);
728 goto err_unregister_usb_phy; 742 goto err_unregister_usb_phy;
729 } 743 }
@@ -740,7 +754,7 @@ static int sunxi_musb_remove(struct platform_device *pdev)
740 struct sunxi_glue *glue = platform_get_drvdata(pdev); 754 struct sunxi_glue *glue = platform_get_drvdata(pdev);
741 struct platform_device *usb_phy = glue->usb_phy; 755 struct platform_device *usb_phy = glue->usb_phy;
742 756
743 platform_device_unregister(glue->musb); /* Frees glue ! */ 757 platform_device_unregister(glue->musb_pdev);
744 usb_phy_generic_unregister(usb_phy); 758 usb_phy_generic_unregister(usb_phy);
745 759
746 return 0; 760 return 0;
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index 24e2b3cf1867..a72e8d670adc 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -97,6 +97,9 @@ struct twl6030_usb {
97 97
98 struct regulator *usb3v3; 98 struct regulator *usb3v3;
99 99
100 /* used to check initial cable status after probe */
101 struct delayed_work get_status_work;
102
100 /* used to set vbus, in atomic path */ 103 /* used to set vbus, in atomic path */
101 struct work_struct set_vbus_work; 104 struct work_struct set_vbus_work;
102 105
@@ -227,12 +230,16 @@ static irqreturn_t twl6030_usb_irq(int irq, void *_twl)
227 twl->asleep = 1; 230 twl->asleep = 1;
228 status = MUSB_VBUS_VALID; 231 status = MUSB_VBUS_VALID;
229 twl->linkstat = status; 232 twl->linkstat = status;
230 musb_mailbox(status); 233 ret = musb_mailbox(status);
234 if (ret)
235 twl->linkstat = MUSB_UNKNOWN;
231 } else { 236 } else {
232 if (twl->linkstat != MUSB_UNKNOWN) { 237 if (twl->linkstat != MUSB_UNKNOWN) {
233 status = MUSB_VBUS_OFF; 238 status = MUSB_VBUS_OFF;
234 twl->linkstat = status; 239 twl->linkstat = status;
235 musb_mailbox(status); 240 ret = musb_mailbox(status);
241 if (ret)
242 twl->linkstat = MUSB_UNKNOWN;
236 if (twl->asleep) { 243 if (twl->asleep) {
237 regulator_disable(twl->usb3v3); 244 regulator_disable(twl->usb3v3);
238 twl->asleep = 0; 245 twl->asleep = 0;
@@ -264,7 +271,9 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
264 twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_SET); 271 twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_SET);
265 status = MUSB_ID_GROUND; 272 status = MUSB_ID_GROUND;
266 twl->linkstat = status; 273 twl->linkstat = status;
267 musb_mailbox(status); 274 ret = musb_mailbox(status);
275 if (ret)
276 twl->linkstat = MUSB_UNKNOWN;
268 } else { 277 } else {
269 twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_CLR); 278 twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_CLR);
270 twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET); 279 twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
@@ -274,6 +283,15 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
274 return IRQ_HANDLED; 283 return IRQ_HANDLED;
275} 284}
276 285
286static void twl6030_status_work(struct work_struct *work)
287{
288 struct twl6030_usb *twl = container_of(work, struct twl6030_usb,
289 get_status_work.work);
290
291 twl6030_usb_irq(twl->irq2, twl);
292 twl6030_usbotg_irq(twl->irq1, twl);
293}
294
277static int twl6030_enable_irq(struct twl6030_usb *twl) 295static int twl6030_enable_irq(struct twl6030_usb *twl)
278{ 296{
279 twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET); 297 twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
@@ -284,8 +302,6 @@ static int twl6030_enable_irq(struct twl6030_usb *twl)
284 REG_INT_MSK_LINE_C); 302 REG_INT_MSK_LINE_C);
285 twl6030_interrupt_unmask(TWL6030_CHARGER_CTRL_INT_MASK, 303 twl6030_interrupt_unmask(TWL6030_CHARGER_CTRL_INT_MASK,
286 REG_INT_MSK_STS_C); 304 REG_INT_MSK_STS_C);
287 twl6030_usb_irq(twl->irq2, twl);
288 twl6030_usbotg_irq(twl->irq1, twl);
289 305
290 return 0; 306 return 0;
291} 307}
@@ -371,6 +387,7 @@ static int twl6030_usb_probe(struct platform_device *pdev)
371 dev_warn(&pdev->dev, "could not create sysfs file\n"); 387 dev_warn(&pdev->dev, "could not create sysfs file\n");
372 388
373 INIT_WORK(&twl->set_vbus_work, otg_set_vbus_work); 389 INIT_WORK(&twl->set_vbus_work, otg_set_vbus_work);
390 INIT_DELAYED_WORK(&twl->get_status_work, twl6030_status_work);
374 391
375 status = request_threaded_irq(twl->irq1, NULL, twl6030_usbotg_irq, 392 status = request_threaded_irq(twl->irq1, NULL, twl6030_usbotg_irq,
376 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT, 393 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT,
@@ -395,6 +412,7 @@ static int twl6030_usb_probe(struct platform_device *pdev)
395 412
396 twl->asleep = 0; 413 twl->asleep = 0;
397 twl6030_enable_irq(twl); 414 twl6030_enable_irq(twl);
415 schedule_delayed_work(&twl->get_status_work, HZ);
398 dev_info(&pdev->dev, "Initialized TWL6030 USB module\n"); 416 dev_info(&pdev->dev, "Initialized TWL6030 USB module\n");
399 417
400 return 0; 418 return 0;
@@ -404,6 +422,7 @@ static int twl6030_usb_remove(struct platform_device *pdev)
404{ 422{
405 struct twl6030_usb *twl = platform_get_drvdata(pdev); 423 struct twl6030_usb *twl = platform_get_drvdata(pdev);
406 424
425 cancel_delayed_work(&twl->get_status_work);
407 twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK, 426 twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
408 REG_INT_MSK_LINE_C); 427 REG_INT_MSK_LINE_C);
409 twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK, 428 twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 2eddbe538cda..5608af4a369d 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -2007,6 +2007,7 @@ static void mos7720_release(struct usb_serial *serial)
2007 urblist_entry) 2007 urblist_entry)
2008 usb_unlink_urb(urbtrack->urb); 2008 usb_unlink_urb(urbtrack->urb);
2009 spin_unlock_irqrestore(&mos_parport->listlock, flags); 2009 spin_unlock_irqrestore(&mos_parport->listlock, flags);
2010 parport_del_port(mos_parport->pp);
2010 2011
2011 kref_put(&mos_parport->ref_count, destroy_mos_parport); 2012 kref_put(&mos_parport->ref_count, destroy_mos_parport);
2012 } 2013 }
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 4d49fce406e1..5ef014ba6ae8 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -836,6 +836,7 @@ static int uas_slave_configure(struct scsi_device *sdev)
836 if (devinfo->flags & US_FL_BROKEN_FUA) 836 if (devinfo->flags & US_FL_BROKEN_FUA)
837 sdev->broken_fua = 1; 837 sdev->broken_fua = 1;
838 838
839 scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
839 return 0; 840 return 0;
840} 841}
841 842
@@ -848,7 +849,6 @@ static struct scsi_host_template uas_host_template = {
848 .slave_configure = uas_slave_configure, 849 .slave_configure = uas_slave_configure,
849 .eh_abort_handler = uas_eh_abort_handler, 850 .eh_abort_handler = uas_eh_abort_handler,
850 .eh_bus_reset_handler = uas_eh_bus_reset_handler, 851 .eh_bus_reset_handler = uas_eh_bus_reset_handler,
851 .can_queue = MAX_CMNDS,
852 .this_id = -1, 852 .this_id = -1,
853 .sg_tablesize = SG_NONE, 853 .sg_tablesize = SG_NONE,
854 .skip_settle_delay = 1, 854 .skip_settle_delay = 1,
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index fca51105974e..2e0450bec1b1 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -941,7 +941,7 @@ static void vhci_stop(struct usb_hcd *hcd)
941 941
942static int vhci_get_frame_number(struct usb_hcd *hcd) 942static int vhci_get_frame_number(struct usb_hcd *hcd)
943{ 943{
944 pr_err("Not yet implemented\n"); 944 dev_err_ratelimited(&hcd->self.root_hub->dev, "Not yet implemented\n");
945 return 0; 945 return 0;
946} 946}
947 947
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 1d3e45f84549..e032ca397371 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -481,10 +481,14 @@ out:
481 481
482static int peek_head_len(struct sock *sk) 482static int peek_head_len(struct sock *sk)
483{ 483{
484 struct socket *sock = sk->sk_socket;
484 struct sk_buff *head; 485 struct sk_buff *head;
485 int len = 0; 486 int len = 0;
486 unsigned long flags; 487 unsigned long flags;
487 488
489 if (sock->ops->peek_len)
490 return sock->ops->peek_len(sock);
491
488 spin_lock_irqsave(&sk->sk_receive_queue.lock, flags); 492 spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
489 head = skb_peek(&sk->sk_receive_queue); 493 head = skb_peek(&sk->sk_receive_queue);
490 if (likely(head)) { 494 if (likely(head)) {
@@ -497,6 +501,16 @@ static int peek_head_len(struct sock *sk)
497 return len; 501 return len;
498} 502}
499 503
504static int sk_has_rx_data(struct sock *sk)
505{
506 struct socket *sock = sk->sk_socket;
507
508 if (sock->ops->peek_len)
509 return sock->ops->peek_len(sock);
510
511 return skb_queue_empty(&sk->sk_receive_queue);
512}
513
500static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) 514static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
501{ 515{
502 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; 516 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
@@ -513,7 +527,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
513 endtime = busy_clock() + vq->busyloop_timeout; 527 endtime = busy_clock() + vq->busyloop_timeout;
514 528
515 while (vhost_can_busy_poll(&net->dev, endtime) && 529 while (vhost_can_busy_poll(&net->dev, endtime) &&
516 skb_queue_empty(&sk->sk_receive_queue) && 530 !sk_has_rx_data(sk) &&
517 vhost_vq_avail_empty(&net->dev, vq)) 531 vhost_vq_avail_empty(&net->dev, vq))
518 cpu_relax_lowlatency(); 532 cpu_relax_lowlatency();
519 533
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index b54f26c55dfd..b4b3e256491b 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -746,7 +746,7 @@ config ALIM7101_WDT
746 746
747config EBC_C384_WDT 747config EBC_C384_WDT
748 tristate "WinSystems EBC-C384 Watchdog Timer" 748 tristate "WinSystems EBC-C384 Watchdog Timer"
749 depends on X86 && ISA 749 depends on X86 && ISA_BUS_API
750 select WATCHDOG_CORE 750 select WATCHDOG_CORE
751 help 751 help
752 Enables watchdog timer support for the watchdog timer on the 752 Enables watchdog timer support for the watchdog timer on the
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index d46839f51e73..e4db19e88ab1 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -151,8 +151,6 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
151static void balloon_process(struct work_struct *work); 151static void balloon_process(struct work_struct *work);
152static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); 152static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
153 153
154static void release_memory_resource(struct resource *resource);
155
156/* When ballooning out (allocating memory to return to Xen) we don't really 154/* When ballooning out (allocating memory to return to Xen) we don't really
157 want the kernel to try too hard since that can trigger the oom killer. */ 155 want the kernel to try too hard since that can trigger the oom killer. */
158#define GFP_BALLOON \ 156#define GFP_BALLOON \
@@ -248,6 +246,19 @@ static enum bp_state update_schedule(enum bp_state state)
248} 246}
249 247
250#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG 248#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
249static void release_memory_resource(struct resource *resource)
250{
251 if (!resource)
252 return;
253
254 /*
255 * No need to reset region to identity mapped since we now
256 * know that no I/O can be in this region
257 */
258 release_resource(resource);
259 kfree(resource);
260}
261
251static struct resource *additional_memory_resource(phys_addr_t size) 262static struct resource *additional_memory_resource(phys_addr_t size)
252{ 263{
253 struct resource *res; 264 struct resource *res;
@@ -286,19 +297,6 @@ static struct resource *additional_memory_resource(phys_addr_t size)
286 return res; 297 return res;
287} 298}
288 299
289static void release_memory_resource(struct resource *resource)
290{
291 if (!resource)
292 return;
293
294 /*
295 * No need to reset region to identity mapped since we now
296 * know that no I/O can be in this region
297 */
298 release_resource(resource);
299 kfree(resource);
300}
301
302static enum bp_state reserve_additional_memory(void) 300static enum bp_state reserve_additional_memory(void)
303{ 301{
304 long credit; 302 long credit;
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index 8e67336f8ddd..6a25533da237 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -183,8 +183,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
183 field_start = OFFSET(cfg_entry); 183 field_start = OFFSET(cfg_entry);
184 field_end = OFFSET(cfg_entry) + field->size; 184 field_end = OFFSET(cfg_entry) + field->size;
185 185
186 if ((req_start >= field_start && req_start < field_end) 186 if (req_end > field_start && field_end > req_start) {
187 || (req_end > field_start && req_end <= field_end)) {
188 err = conf_space_read(dev, cfg_entry, field_start, 187 err = conf_space_read(dev, cfg_entry, field_start,
189 &tmp_val); 188 &tmp_val);
190 if (err) 189 if (err)
@@ -230,8 +229,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
230 field_start = OFFSET(cfg_entry); 229 field_start = OFFSET(cfg_entry);
231 field_end = OFFSET(cfg_entry) + field->size; 230 field_end = OFFSET(cfg_entry) + field->size;
232 231
233 if ((req_start >= field_start && req_start < field_end) 232 if (req_end > field_start && field_end > req_start) {
234 || (req_end > field_start && req_end <= field_end)) {
235 tmp_val = 0; 233 tmp_val = 0;
236 234
237 err = xen_pcibk_config_read(dev, field_start, 235 err = xen_pcibk_config_read(dev, field_start,
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
index ad3d17d29c81..9ead1c2ff1dd 100644
--- a/drivers/xen/xen-pciback/conf_space_header.c
+++ b/drivers/xen/xen-pciback/conf_space_header.c
@@ -145,7 +145,7 @@ static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
145 /* A write to obtain the length must happen as a 32-bit write. 145 /* A write to obtain the length must happen as a 32-bit write.
146 * This does not (yet) support writing individual bytes 146 * This does not (yet) support writing individual bytes
147 */ 147 */
148 if (value == ~PCI_ROM_ADDRESS_ENABLE) 148 if ((value | ~PCI_ROM_ADDRESS_MASK) == ~0U)
149 bar->which = 1; 149 bar->which = 1;
150 else { 150 else {
151 u32 tmpval; 151 u32 tmpval;
@@ -225,38 +225,42 @@ static inline void read_dev_bar(struct pci_dev *dev,
225 (PCI_BASE_ADDRESS_SPACE_MEMORY | 225 (PCI_BASE_ADDRESS_SPACE_MEMORY |
226 PCI_BASE_ADDRESS_MEM_TYPE_64))) { 226 PCI_BASE_ADDRESS_MEM_TYPE_64))) {
227 bar_info->val = res[pos - 1].start >> 32; 227 bar_info->val = res[pos - 1].start >> 32;
228 bar_info->len_val = res[pos - 1].end >> 32; 228 bar_info->len_val = -resource_size(&res[pos - 1]) >> 32;
229 return; 229 return;
230 } 230 }
231 } 231 }
232 232
233 if (!res[pos].flags ||
234 (res[pos].flags & (IORESOURCE_DISABLED | IORESOURCE_UNSET |
235 IORESOURCE_BUSY)))
236 return;
237
233 bar_info->val = res[pos].start | 238 bar_info->val = res[pos].start |
234 (res[pos].flags & PCI_REGION_FLAG_MASK); 239 (res[pos].flags & PCI_REGION_FLAG_MASK);
235 bar_info->len_val = resource_size(&res[pos]); 240 bar_info->len_val = -resource_size(&res[pos]) |
241 (res[pos].flags & PCI_REGION_FLAG_MASK);
236} 242}
237 243
238static void *bar_init(struct pci_dev *dev, int offset) 244static void *bar_init(struct pci_dev *dev, int offset)
239{ 245{
240 struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL); 246 struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL);
241 247
242 if (!bar) 248 if (!bar)
243 return ERR_PTR(-ENOMEM); 249 return ERR_PTR(-ENOMEM);
244 250
245 read_dev_bar(dev, bar, offset, ~0); 251 read_dev_bar(dev, bar, offset, ~0);
246 bar->which = 0;
247 252
248 return bar; 253 return bar;
249} 254}
250 255
251static void *rom_init(struct pci_dev *dev, int offset) 256static void *rom_init(struct pci_dev *dev, int offset)
252{ 257{
253 struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL); 258 struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL);
254 259
255 if (!bar) 260 if (!bar)
256 return ERR_PTR(-ENOMEM); 261 return ERR_PTR(-ENOMEM);
257 262
258 read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE); 263 read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE);
259 bar->which = 0;
260 264
261 return bar; 265 return bar;
262} 266}
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index b84c291ba1eb..d7b78d531e63 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -74,7 +74,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
74 v9fs_proto_dotu(v9ses)); 74 v9fs_proto_dotu(v9ses));
75 fid = file->private_data; 75 fid = file->private_data;
76 if (!fid) { 76 if (!fid) {
77 fid = v9fs_fid_clone(file->f_path.dentry); 77 fid = v9fs_fid_clone(file_dentry(file));
78 if (IS_ERR(fid)) 78 if (IS_ERR(fid))
79 return PTR_ERR(fid); 79 return PTR_ERR(fid);
80 80
@@ -100,7 +100,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
100 * because we want write after unlink usecase 100 * because we want write after unlink usecase
101 * to work. 101 * to work.
102 */ 102 */
103 fid = v9fs_writeback_fid(file->f_path.dentry); 103 fid = v9fs_writeback_fid(file_dentry(file));
104 if (IS_ERR(fid)) { 104 if (IS_ERR(fid)) {
105 err = PTR_ERR(fid); 105 err = PTR_ERR(fid);
106 mutex_unlock(&v9inode->v_mutex); 106 mutex_unlock(&v9inode->v_mutex);
@@ -516,7 +516,7 @@ v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
516 * because we want write after unlink usecase 516 * because we want write after unlink usecase
517 * to work. 517 * to work.
518 */ 518 */
519 fid = v9fs_writeback_fid(filp->f_path.dentry); 519 fid = v9fs_writeback_fid(file_dentry(filp));
520 if (IS_ERR(fid)) { 520 if (IS_ERR(fid)) {
521 retval = PTR_ERR(fid); 521 retval = PTR_ERR(fid);
522 mutex_unlock(&v9inode->v_mutex); 522 mutex_unlock(&v9inode->v_mutex);
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index f0d268b97d19..a439548de785 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -70,9 +70,13 @@ struct autofs_info {
70}; 70};
71 71
72#define AUTOFS_INF_EXPIRING (1<<0) /* dentry in the process of expiring */ 72#define AUTOFS_INF_EXPIRING (1<<0) /* dentry in the process of expiring */
73#define AUTOFS_INF_NO_RCU (1<<1) /* the dentry is being considered 73#define AUTOFS_INF_WANT_EXPIRE (1<<1) /* the dentry is being considered
74 * for expiry, so RCU_walk is 74 * for expiry, so RCU_walk is
75 * not permitted 75 * not permitted. If it progresses to
76 * actual expiry attempt, the flag is
77 * not cleared when EXPIRING is set -
78 * in that case it gets cleared only
79 * when it comes to clearing EXPIRING.
76 */ 80 */
77#define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */ 81#define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */
78 82
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 9510d8d2e9cd..b493909e7492 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -316,19 +316,17 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
316 if (ino->flags & AUTOFS_INF_PENDING) 316 if (ino->flags & AUTOFS_INF_PENDING)
317 goto out; 317 goto out;
318 if (!autofs4_direct_busy(mnt, root, timeout, do_now)) { 318 if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
319 ino->flags |= AUTOFS_INF_NO_RCU; 319 ino->flags |= AUTOFS_INF_WANT_EXPIRE;
320 spin_unlock(&sbi->fs_lock); 320 spin_unlock(&sbi->fs_lock);
321 synchronize_rcu(); 321 synchronize_rcu();
322 spin_lock(&sbi->fs_lock); 322 spin_lock(&sbi->fs_lock);
323 if (!autofs4_direct_busy(mnt, root, timeout, do_now)) { 323 if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
324 ino->flags |= AUTOFS_INF_EXPIRING; 324 ino->flags |= AUTOFS_INF_EXPIRING;
325 smp_mb();
326 ino->flags &= ~AUTOFS_INF_NO_RCU;
327 init_completion(&ino->expire_complete); 325 init_completion(&ino->expire_complete);
328 spin_unlock(&sbi->fs_lock); 326 spin_unlock(&sbi->fs_lock);
329 return root; 327 return root;
330 } 328 }
331 ino->flags &= ~AUTOFS_INF_NO_RCU; 329 ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
332 } 330 }
333out: 331out:
334 spin_unlock(&sbi->fs_lock); 332 spin_unlock(&sbi->fs_lock);
@@ -446,7 +444,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
446 while ((dentry = get_next_positive_subdir(dentry, root))) { 444 while ((dentry = get_next_positive_subdir(dentry, root))) {
447 spin_lock(&sbi->fs_lock); 445 spin_lock(&sbi->fs_lock);
448 ino = autofs4_dentry_ino(dentry); 446 ino = autofs4_dentry_ino(dentry);
449 if (ino->flags & AUTOFS_INF_NO_RCU) 447 if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
450 expired = NULL; 448 expired = NULL;
451 else 449 else
452 expired = should_expire(dentry, mnt, timeout, how); 450 expired = should_expire(dentry, mnt, timeout, how);
@@ -455,7 +453,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
455 continue; 453 continue;
456 } 454 }
457 ino = autofs4_dentry_ino(expired); 455 ino = autofs4_dentry_ino(expired);
458 ino->flags |= AUTOFS_INF_NO_RCU; 456 ino->flags |= AUTOFS_INF_WANT_EXPIRE;
459 spin_unlock(&sbi->fs_lock); 457 spin_unlock(&sbi->fs_lock);
460 synchronize_rcu(); 458 synchronize_rcu();
461 spin_lock(&sbi->fs_lock); 459 spin_lock(&sbi->fs_lock);
@@ -465,7 +463,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
465 goto found; 463 goto found;
466 } 464 }
467 465
468 ino->flags &= ~AUTOFS_INF_NO_RCU; 466 ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
469 if (expired != dentry) 467 if (expired != dentry)
470 dput(expired); 468 dput(expired);
471 spin_unlock(&sbi->fs_lock); 469 spin_unlock(&sbi->fs_lock);
@@ -475,17 +473,8 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
475found: 473found:
476 pr_debug("returning %p %pd\n", expired, expired); 474 pr_debug("returning %p %pd\n", expired, expired);
477 ino->flags |= AUTOFS_INF_EXPIRING; 475 ino->flags |= AUTOFS_INF_EXPIRING;
478 smp_mb();
479 ino->flags &= ~AUTOFS_INF_NO_RCU;
480 init_completion(&ino->expire_complete); 476 init_completion(&ino->expire_complete);
481 spin_unlock(&sbi->fs_lock); 477 spin_unlock(&sbi->fs_lock);
482 spin_lock(&sbi->lookup_lock);
483 spin_lock(&expired->d_parent->d_lock);
484 spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
485 list_move(&expired->d_parent->d_subdirs, &expired->d_child);
486 spin_unlock(&expired->d_lock);
487 spin_unlock(&expired->d_parent->d_lock);
488 spin_unlock(&sbi->lookup_lock);
489 return expired; 478 return expired;
490} 479}
491 480
@@ -496,7 +485,7 @@ int autofs4_expire_wait(struct dentry *dentry, int rcu_walk)
496 int status; 485 int status;
497 486
498 /* Block on any pending expire */ 487 /* Block on any pending expire */
499 if (!(ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU))) 488 if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE))
500 return 0; 489 return 0;
501 if (rcu_walk) 490 if (rcu_walk)
502 return -ECHILD; 491 return -ECHILD;
@@ -554,7 +543,7 @@ int autofs4_expire_run(struct super_block *sb,
554 ino = autofs4_dentry_ino(dentry); 543 ino = autofs4_dentry_ino(dentry);
555 /* avoid rapid-fire expire attempts if expiry fails */ 544 /* avoid rapid-fire expire attempts if expiry fails */
556 ino->last_used = now; 545 ino->last_used = now;
557 ino->flags &= ~AUTOFS_INF_EXPIRING; 546 ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
558 complete_all(&ino->expire_complete); 547 complete_all(&ino->expire_complete);
559 spin_unlock(&sbi->fs_lock); 548 spin_unlock(&sbi->fs_lock);
560 549
@@ -583,7 +572,7 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
583 spin_lock(&sbi->fs_lock); 572 spin_lock(&sbi->fs_lock);
584 /* avoid rapid-fire expire attempts if expiry fails */ 573 /* avoid rapid-fire expire attempts if expiry fails */
585 ino->last_used = now; 574 ino->last_used = now;
586 ino->flags &= ~AUTOFS_INF_EXPIRING; 575 ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
587 complete_all(&ino->expire_complete); 576 complete_all(&ino->expire_complete);
588 spin_unlock(&sbi->fs_lock); 577 spin_unlock(&sbi->fs_lock);
589 dput(dentry); 578 dput(dentry);
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 78bd80298528..3767f6641af1 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -458,7 +458,7 @@ static int autofs4_d_manage(struct dentry *dentry, bool rcu_walk)
458 */ 458 */
459 struct inode *inode; 459 struct inode *inode;
460 460
461 if (ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU)) 461 if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
462 return 0; 462 return 0;
463 if (d_mountpoint(dentry)) 463 if (d_mountpoint(dentry))
464 return 0; 464 return 0;
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 0146d911f468..631f1554c87b 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -66,11 +66,12 @@ static int autofs4_write(struct autofs_sb_info *sbi,
66 set_fs(KERNEL_DS); 66 set_fs(KERNEL_DS);
67 67
68 mutex_lock(&sbi->pipe_mutex); 68 mutex_lock(&sbi->pipe_mutex);
69 wr = __vfs_write(file, data, bytes, &file->f_pos); 69 while (bytes) {
70 while (bytes && wr) { 70 wr = __vfs_write(file, data, bytes, &file->f_pos);
71 if (wr <= 0)
72 break;
71 data += wr; 73 data += wr;
72 bytes -= wr; 74 bytes -= wr;
73 wr = __vfs_write(file, data, bytes, &file->f_pos);
74 } 75 }
75 mutex_unlock(&sbi->pipe_mutex); 76 mutex_unlock(&sbi->pipe_mutex);
76 77
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index b677a6ea6001..7706c8dc5fa6 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -2645,7 +2645,7 @@ static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
2645 * This algorithm is recursive because the amount of used stack space 2645 * This algorithm is recursive because the amount of used stack space
2646 * is very small and the max recursion depth is limited. 2646 * is very small and the max recursion depth is limited.
2647 */ 2647 */
2648 indent_add = sprintf(buf, "%c-%llu(%s/%llu/%d)", 2648 indent_add = sprintf(buf, "%c-%llu(%s/%llu/%u)",
2649 btrfsic_get_block_type(state, block), 2649 btrfsic_get_block_type(state, block),
2650 block->logical_bytenr, block->dev_state->name, 2650 block->logical_bytenr, block->dev_state->name,
2651 block->dev_bytenr, block->mirror_num); 2651 block->dev_bytenr, block->mirror_num);
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 427c36b430a6..a85cf7d23309 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1373,7 +1373,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1373 1373
1374 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) { 1374 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1375 BUG_ON(tm->slot != 0); 1375 BUG_ON(tm->slot != 0);
1376 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start); 1376 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start,
1377 eb->len);
1377 if (!eb_rewin) { 1378 if (!eb_rewin) {
1378 btrfs_tree_read_unlock_blocking(eb); 1379 btrfs_tree_read_unlock_blocking(eb);
1379 free_extent_buffer(eb); 1380 free_extent_buffer(eb);
@@ -1454,7 +1455,8 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
1454 } else if (old_root) { 1455 } else if (old_root) {
1455 btrfs_tree_read_unlock(eb_root); 1456 btrfs_tree_read_unlock(eb_root);
1456 free_extent_buffer(eb_root); 1457 free_extent_buffer(eb_root);
1457 eb = alloc_dummy_extent_buffer(root->fs_info, logical); 1458 eb = alloc_dummy_extent_buffer(root->fs_info, logical,
1459 root->nodesize);
1458 } else { 1460 } else {
1459 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK); 1461 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1460 eb = btrfs_clone_extent_buffer(eb_root); 1462 eb = btrfs_clone_extent_buffer(eb_root);
@@ -1552,6 +1554,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1552 trans->transid, root->fs_info->generation); 1554 trans->transid, root->fs_info->generation);
1553 1555
1554 if (!should_cow_block(trans, root, buf)) { 1556 if (!should_cow_block(trans, root, buf)) {
1557 trans->dirty = true;
1555 *cow_ret = buf; 1558 *cow_ret = buf;
1556 return 0; 1559 return 0;
1557 } 1560 }
@@ -1783,10 +1786,12 @@ static noinline int generic_bin_search(struct extent_buffer *eb,
1783 if (!err) { 1786 if (!err) {
1784 tmp = (struct btrfs_disk_key *)(kaddr + offset - 1787 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1785 map_start); 1788 map_start);
1786 } else { 1789 } else if (err == 1) {
1787 read_extent_buffer(eb, &unaligned, 1790 read_extent_buffer(eb, &unaligned,
1788 offset, sizeof(unaligned)); 1791 offset, sizeof(unaligned));
1789 tmp = &unaligned; 1792 tmp = &unaligned;
1793 } else {
1794 return err;
1790 } 1795 }
1791 1796
1792 } else { 1797 } else {
@@ -2510,6 +2515,8 @@ read_block_for_search(struct btrfs_trans_handle *trans,
2510 if (!btrfs_buffer_uptodate(tmp, 0, 0)) 2515 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2511 ret = -EIO; 2516 ret = -EIO;
2512 free_extent_buffer(tmp); 2517 free_extent_buffer(tmp);
2518 } else {
2519 ret = PTR_ERR(tmp);
2513 } 2520 }
2514 return ret; 2521 return ret;
2515} 2522}
@@ -2773,8 +2780,10 @@ again:
2773 * then we don't want to set the path blocking, 2780 * then we don't want to set the path blocking,
2774 * so we test it here 2781 * so we test it here
2775 */ 2782 */
2776 if (!should_cow_block(trans, root, b)) 2783 if (!should_cow_block(trans, root, b)) {
2784 trans->dirty = true;
2777 goto cow_done; 2785 goto cow_done;
2786 }
2778 2787
2779 /* 2788 /*
2780 * must have write locks on this node and the 2789 * must have write locks on this node and the
@@ -2823,6 +2832,8 @@ cow_done:
2823 } 2832 }
2824 2833
2825 ret = key_search(b, key, level, &prev_cmp, &slot); 2834 ret = key_search(b, key, level, &prev_cmp, &slot);
2835 if (ret < 0)
2836 goto done;
2826 2837
2827 if (level != 0) { 2838 if (level != 0) {
2828 int dec = 0; 2839 int dec = 0;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 101c3cfd3f7c..4274a7bfdaed 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2518,7 +2518,7 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
2518int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 2518int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2519 struct btrfs_root *root, unsigned long count); 2519 struct btrfs_root *root, unsigned long count);
2520int btrfs_async_run_delayed_refs(struct btrfs_root *root, 2520int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2521 unsigned long count, int wait); 2521 unsigned long count, u64 transid, int wait);
2522int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len); 2522int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len);
2523int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, 2523int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
2524 struct btrfs_root *root, u64 bytenr, 2524 struct btrfs_root *root, u64 bytenr,
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 61561c2a3f96..d3aaabbfada0 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1606,15 +1606,23 @@ int btrfs_inode_delayed_dir_index_count(struct inode *inode)
1606 return 0; 1606 return 0;
1607} 1607}
1608 1608
1609void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, 1609bool btrfs_readdir_get_delayed_items(struct inode *inode,
1610 struct list_head *del_list) 1610 struct list_head *ins_list,
1611 struct list_head *del_list)
1611{ 1612{
1612 struct btrfs_delayed_node *delayed_node; 1613 struct btrfs_delayed_node *delayed_node;
1613 struct btrfs_delayed_item *item; 1614 struct btrfs_delayed_item *item;
1614 1615
1615 delayed_node = btrfs_get_delayed_node(inode); 1616 delayed_node = btrfs_get_delayed_node(inode);
1616 if (!delayed_node) 1617 if (!delayed_node)
1617 return; 1618 return false;
1619
1620 /*
1621 * We can only do one readdir with delayed items at a time because of
1622 * item->readdir_list.
1623 */
1624 inode_unlock_shared(inode);
1625 inode_lock(inode);
1618 1626
1619 mutex_lock(&delayed_node->mutex); 1627 mutex_lock(&delayed_node->mutex);
1620 item = __btrfs_first_delayed_insertion_item(delayed_node); 1628 item = __btrfs_first_delayed_insertion_item(delayed_node);
@@ -1641,10 +1649,13 @@ void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
1641 * requeue or dequeue this delayed node. 1649 * requeue or dequeue this delayed node.
1642 */ 1650 */
1643 atomic_dec(&delayed_node->refs); 1651 atomic_dec(&delayed_node->refs);
1652
1653 return true;
1644} 1654}
1645 1655
1646void btrfs_put_delayed_items(struct list_head *ins_list, 1656void btrfs_readdir_put_delayed_items(struct inode *inode,
1647 struct list_head *del_list) 1657 struct list_head *ins_list,
1658 struct list_head *del_list)
1648{ 1659{
1649 struct btrfs_delayed_item *curr, *next; 1660 struct btrfs_delayed_item *curr, *next;
1650 1661
@@ -1659,6 +1670,12 @@ void btrfs_put_delayed_items(struct list_head *ins_list,
1659 if (atomic_dec_and_test(&curr->refs)) 1670 if (atomic_dec_and_test(&curr->refs))
1660 kfree(curr); 1671 kfree(curr);
1661 } 1672 }
1673
1674 /*
1675 * The VFS is going to do up_read(), so we need to downgrade back to a
1676 * read lock.
1677 */
1678 downgrade_write(&inode->i_rwsem);
1662} 1679}
1663 1680
1664int btrfs_should_delete_dir_index(struct list_head *del_list, 1681int btrfs_should_delete_dir_index(struct list_head *del_list,
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index 0167853c84ae..2495b3d4075f 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -137,10 +137,12 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
137void btrfs_destroy_delayed_inodes(struct btrfs_root *root); 137void btrfs_destroy_delayed_inodes(struct btrfs_root *root);
138 138
139/* Used for readdir() */ 139/* Used for readdir() */
140void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, 140bool btrfs_readdir_get_delayed_items(struct inode *inode,
141 struct list_head *del_list); 141 struct list_head *ins_list,
142void btrfs_put_delayed_items(struct list_head *ins_list, 142 struct list_head *del_list);
143 struct list_head *del_list); 143void btrfs_readdir_put_delayed_items(struct inode *inode,
144 struct list_head *ins_list,
145 struct list_head *del_list);
144int btrfs_should_delete_dir_index(struct list_head *del_list, 146int btrfs_should_delete_dir_index(struct list_head *del_list,
145 u64 index); 147 u64 index);
146int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, 148int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 6628fca9f4ed..60ce1190307b 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1098,7 +1098,7 @@ void readahead_tree_block(struct btrfs_root *root, u64 bytenr)
1098 struct inode *btree_inode = root->fs_info->btree_inode; 1098 struct inode *btree_inode = root->fs_info->btree_inode;
1099 1099
1100 buf = btrfs_find_create_tree_block(root, bytenr); 1100 buf = btrfs_find_create_tree_block(root, bytenr);
1101 if (!buf) 1101 if (IS_ERR(buf))
1102 return; 1102 return;
1103 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, 1103 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1104 buf, 0, WAIT_NONE, btree_get_extent, 0); 1104 buf, 0, WAIT_NONE, btree_get_extent, 0);
@@ -1114,7 +1114,7 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
1114 int ret; 1114 int ret;
1115 1115
1116 buf = btrfs_find_create_tree_block(root, bytenr); 1116 buf = btrfs_find_create_tree_block(root, bytenr);
1117 if (!buf) 1117 if (IS_ERR(buf))
1118 return 0; 1118 return 0;
1119 1119
1120 set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags); 1120 set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
@@ -1147,7 +1147,8 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
1147 u64 bytenr) 1147 u64 bytenr)
1148{ 1148{
1149 if (btrfs_test_is_dummy_root(root)) 1149 if (btrfs_test_is_dummy_root(root))
1150 return alloc_test_extent_buffer(root->fs_info, bytenr); 1150 return alloc_test_extent_buffer(root->fs_info, bytenr,
1151 root->nodesize);
1151 return alloc_extent_buffer(root->fs_info, bytenr); 1152 return alloc_extent_buffer(root->fs_info, bytenr);
1152} 1153}
1153 1154
@@ -1171,8 +1172,8 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1171 int ret; 1172 int ret;
1172 1173
1173 buf = btrfs_find_create_tree_block(root, bytenr); 1174 buf = btrfs_find_create_tree_block(root, bytenr);
1174 if (!buf) 1175 if (IS_ERR(buf))
1175 return ERR_PTR(-ENOMEM); 1176 return buf;
1176 1177
1177 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); 1178 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1178 if (ret) { 1179 if (ret) {
@@ -1314,14 +1315,16 @@ static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
1314 1315
1315#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 1316#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1316/* Should only be used by the testing infrastructure */ 1317/* Should only be used by the testing infrastructure */
1317struct btrfs_root *btrfs_alloc_dummy_root(void) 1318struct btrfs_root *btrfs_alloc_dummy_root(u32 sectorsize, u32 nodesize)
1318{ 1319{
1319 struct btrfs_root *root; 1320 struct btrfs_root *root;
1320 1321
1321 root = btrfs_alloc_root(NULL, GFP_KERNEL); 1322 root = btrfs_alloc_root(NULL, GFP_KERNEL);
1322 if (!root) 1323 if (!root)
1323 return ERR_PTR(-ENOMEM); 1324 return ERR_PTR(-ENOMEM);
1324 __setup_root(4096, 4096, 4096, root, NULL, 1); 1325 /* We don't use the stripesize in selftest, set it as sectorsize */
1326 __setup_root(nodesize, sectorsize, sectorsize, root, NULL,
1327 BTRFS_ROOT_TREE_OBJECTID);
1325 set_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state); 1328 set_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state);
1326 root->alloc_bytenr = 0; 1329 root->alloc_bytenr = 0;
1327 1330
@@ -1803,6 +1806,13 @@ static int cleaner_kthread(void *arg)
1803 if (btrfs_need_cleaner_sleep(root)) 1806 if (btrfs_need_cleaner_sleep(root))
1804 goto sleep; 1807 goto sleep;
1805 1808
1809 /*
1810 * Do not do anything if we might cause open_ctree() to block
1811 * before we have finished mounting the filesystem.
1812 */
1813 if (!root->fs_info->open)
1814 goto sleep;
1815
1806 if (!mutex_trylock(&root->fs_info->cleaner_mutex)) 1816 if (!mutex_trylock(&root->fs_info->cleaner_mutex))
1807 goto sleep; 1817 goto sleep;
1808 1818
@@ -2517,7 +2527,6 @@ int open_ctree(struct super_block *sb,
2517 int num_backups_tried = 0; 2527 int num_backups_tried = 0;
2518 int backup_index = 0; 2528 int backup_index = 0;
2519 int max_active; 2529 int max_active;
2520 bool cleaner_mutex_locked = false;
2521 2530
2522 tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL); 2531 tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
2523 chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL); 2532 chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
@@ -2797,7 +2806,7 @@ int open_ctree(struct super_block *sb,
2797 2806
2798 nodesize = btrfs_super_nodesize(disk_super); 2807 nodesize = btrfs_super_nodesize(disk_super);
2799 sectorsize = btrfs_super_sectorsize(disk_super); 2808 sectorsize = btrfs_super_sectorsize(disk_super);
2800 stripesize = btrfs_super_stripesize(disk_super); 2809 stripesize = sectorsize;
2801 fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); 2810 fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
2802 fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); 2811 fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2803 2812
@@ -2996,13 +3005,6 @@ retry_root_backup:
2996 goto fail_sysfs; 3005 goto fail_sysfs;
2997 } 3006 }
2998 3007
2999 /*
3000 * Hold the cleaner_mutex thread here so that we don't block
3001 * for a long time on btrfs_recover_relocation. cleaner_kthread
3002 * will wait for us to finish mounting the filesystem.
3003 */
3004 mutex_lock(&fs_info->cleaner_mutex);
3005 cleaner_mutex_locked = true;
3006 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, 3008 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
3007 "btrfs-cleaner"); 3009 "btrfs-cleaner");
3008 if (IS_ERR(fs_info->cleaner_kthread)) 3010 if (IS_ERR(fs_info->cleaner_kthread))
@@ -3062,8 +3064,10 @@ retry_root_backup:
3062 ret = btrfs_cleanup_fs_roots(fs_info); 3064 ret = btrfs_cleanup_fs_roots(fs_info);
3063 if (ret) 3065 if (ret)
3064 goto fail_qgroup; 3066 goto fail_qgroup;
3065 /* We locked cleaner_mutex before creating cleaner_kthread. */ 3067
3068 mutex_lock(&fs_info->cleaner_mutex);
3066 ret = btrfs_recover_relocation(tree_root); 3069 ret = btrfs_recover_relocation(tree_root);
3070 mutex_unlock(&fs_info->cleaner_mutex);
3067 if (ret < 0) { 3071 if (ret < 0) {
3068 btrfs_warn(fs_info, "failed to recover relocation: %d", 3072 btrfs_warn(fs_info, "failed to recover relocation: %d",
3069 ret); 3073 ret);
@@ -3071,8 +3075,6 @@ retry_root_backup:
3071 goto fail_qgroup; 3075 goto fail_qgroup;
3072 } 3076 }
3073 } 3077 }
3074 mutex_unlock(&fs_info->cleaner_mutex);
3075 cleaner_mutex_locked = false;
3076 3078
3077 location.objectid = BTRFS_FS_TREE_OBJECTID; 3079 location.objectid = BTRFS_FS_TREE_OBJECTID;
3078 location.type = BTRFS_ROOT_ITEM_KEY; 3080 location.type = BTRFS_ROOT_ITEM_KEY;
@@ -3186,10 +3188,6 @@ fail_cleaner:
3186 filemap_write_and_wait(fs_info->btree_inode->i_mapping); 3188 filemap_write_and_wait(fs_info->btree_inode->i_mapping);
3187 3189
3188fail_sysfs: 3190fail_sysfs:
3189 if (cleaner_mutex_locked) {
3190 mutex_unlock(&fs_info->cleaner_mutex);
3191 cleaner_mutex_locked = false;
3192 }
3193 btrfs_sysfs_remove_mounted(fs_info); 3191 btrfs_sysfs_remove_mounted(fs_info);
3194 3192
3195fail_fsdev_sysfs: 3193fail_fsdev_sysfs:
@@ -4130,6 +4128,16 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
4130 * Hint to catch really bogus numbers, bitflips or so, more exact checks are 4128 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
4131 * done later 4129 * done later
4132 */ 4130 */
4131 if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
4132 btrfs_err(fs_info, "bytes_used is too small %llu",
4133 btrfs_super_bytes_used(sb));
4134 ret = -EINVAL;
4135 }
4136 if (!is_power_of_2(btrfs_super_stripesize(sb))) {
4137 btrfs_err(fs_info, "invalid stripesize %u",
4138 btrfs_super_stripesize(sb));
4139 ret = -EINVAL;
4140 }
4133 if (btrfs_super_num_devices(sb) > (1UL << 31)) 4141 if (btrfs_super_num_devices(sb) > (1UL << 31))
4134 printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n", 4142 printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n",
4135 btrfs_super_num_devices(sb)); 4143 btrfs_super_num_devices(sb));
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 8e79d0070bcf..acba821499a9 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -90,7 +90,7 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
90void btrfs_free_fs_root(struct btrfs_root *root); 90void btrfs_free_fs_root(struct btrfs_root *root);
91 91
92#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 92#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
93struct btrfs_root *btrfs_alloc_dummy_root(void); 93struct btrfs_root *btrfs_alloc_dummy_root(u32 sectorsize, u32 nodesize);
94#endif 94#endif
95 95
96/* 96/*
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 689d25ac6a68..82b912a293ab 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2835,6 +2835,7 @@ int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2835 2835
2836struct async_delayed_refs { 2836struct async_delayed_refs {
2837 struct btrfs_root *root; 2837 struct btrfs_root *root;
2838 u64 transid;
2838 int count; 2839 int count;
2839 int error; 2840 int error;
2840 int sync; 2841 int sync;
@@ -2850,6 +2851,10 @@ static void delayed_ref_async_start(struct btrfs_work *work)
2850 2851
2851 async = container_of(work, struct async_delayed_refs, work); 2852 async = container_of(work, struct async_delayed_refs, work);
2852 2853
2854 /* if the commit is already started, we don't need to wait here */
2855 if (btrfs_transaction_blocked(async->root->fs_info))
2856 goto done;
2857
2853 trans = btrfs_join_transaction(async->root); 2858 trans = btrfs_join_transaction(async->root);
2854 if (IS_ERR(trans)) { 2859 if (IS_ERR(trans)) {
2855 async->error = PTR_ERR(trans); 2860 async->error = PTR_ERR(trans);
@@ -2861,10 +2866,15 @@ static void delayed_ref_async_start(struct btrfs_work *work)
2861 * wait on delayed refs 2866 * wait on delayed refs
2862 */ 2867 */
2863 trans->sync = true; 2868 trans->sync = true;
2869
2870 /* Don't bother flushing if we got into a different transaction */
2871 if (trans->transid > async->transid)
2872 goto end;
2873
2864 ret = btrfs_run_delayed_refs(trans, async->root, async->count); 2874 ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2865 if (ret) 2875 if (ret)
2866 async->error = ret; 2876 async->error = ret;
2867 2877end:
2868 ret = btrfs_end_transaction(trans, async->root); 2878 ret = btrfs_end_transaction(trans, async->root);
2869 if (ret && !async->error) 2879 if (ret && !async->error)
2870 async->error = ret; 2880 async->error = ret;
@@ -2876,7 +2886,7 @@ done:
2876} 2886}
2877 2887
2878int btrfs_async_run_delayed_refs(struct btrfs_root *root, 2888int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2879 unsigned long count, int wait) 2889 unsigned long count, u64 transid, int wait)
2880{ 2890{
2881 struct async_delayed_refs *async; 2891 struct async_delayed_refs *async;
2882 int ret; 2892 int ret;
@@ -2888,6 +2898,7 @@ int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2888 async->root = root->fs_info->tree_root; 2898 async->root = root->fs_info->tree_root;
2889 async->count = count; 2899 async->count = count;
2890 async->error = 0; 2900 async->error = 0;
2901 async->transid = transid;
2891 if (wait) 2902 if (wait)
2892 async->sync = 1; 2903 async->sync = 1;
2893 else 2904 else
@@ -8016,8 +8027,9 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
8016 struct extent_buffer *buf; 8027 struct extent_buffer *buf;
8017 8028
8018 buf = btrfs_find_create_tree_block(root, bytenr); 8029 buf = btrfs_find_create_tree_block(root, bytenr);
8019 if (!buf) 8030 if (IS_ERR(buf))
8020 return ERR_PTR(-ENOMEM); 8031 return buf;
8032
8021 btrfs_set_header_generation(buf, trans->transid); 8033 btrfs_set_header_generation(buf, trans->transid);
8022 btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level); 8034 btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
8023 btrfs_tree_lock(buf); 8035 btrfs_tree_lock(buf);
@@ -8044,7 +8056,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
8044 set_extent_dirty(&trans->transaction->dirty_pages, buf->start, 8056 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
8045 buf->start + buf->len - 1, GFP_NOFS); 8057 buf->start + buf->len - 1, GFP_NOFS);
8046 } 8058 }
8047 trans->blocks_used++; 8059 trans->dirty = true;
8048 /* this returns a buffer locked for blocking */ 8060 /* this returns a buffer locked for blocking */
8049 return buf; 8061 return buf;
8050} 8062}
@@ -8659,8 +8671,9 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8659 next = btrfs_find_tree_block(root->fs_info, bytenr); 8671 next = btrfs_find_tree_block(root->fs_info, bytenr);
8660 if (!next) { 8672 if (!next) {
8661 next = btrfs_find_create_tree_block(root, bytenr); 8673 next = btrfs_find_create_tree_block(root, bytenr);
8662 if (!next) 8674 if (IS_ERR(next))
8663 return -ENOMEM; 8675 return PTR_ERR(next);
8676
8664 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next, 8677 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8665 level - 1); 8678 level - 1);
8666 reada = 1; 8679 reada = 1;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 6e953de83f08..75533adef998 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4728,16 +4728,16 @@ err:
4728} 4728}
4729 4729
4730struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, 4730struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
4731 u64 start) 4731 u64 start, u32 nodesize)
4732{ 4732{
4733 unsigned long len; 4733 unsigned long len;
4734 4734
4735 if (!fs_info) { 4735 if (!fs_info) {
4736 /* 4736 /*
4737 * Called only from tests that don't always have a fs_info 4737 * Called only from tests that don't always have a fs_info
4738 * available, but we know that nodesize is 4096 4738 * available
4739 */ 4739 */
4740 len = 4096; 4740 len = nodesize;
4741 } else { 4741 } else {
4742 len = fs_info->tree_root->nodesize; 4742 len = fs_info->tree_root->nodesize;
4743 } 4743 }
@@ -4833,7 +4833,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
4833 4833
4834#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 4834#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4835struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, 4835struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
4836 u64 start) 4836 u64 start, u32 nodesize)
4837{ 4837{
4838 struct extent_buffer *eb, *exists = NULL; 4838 struct extent_buffer *eb, *exists = NULL;
4839 int ret; 4839 int ret;
@@ -4841,7 +4841,7 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
4841 eb = find_extent_buffer(fs_info, start); 4841 eb = find_extent_buffer(fs_info, start);
4842 if (eb) 4842 if (eb)
4843 return eb; 4843 return eb;
4844 eb = alloc_dummy_extent_buffer(fs_info, start); 4844 eb = alloc_dummy_extent_buffer(fs_info, start, nodesize);
4845 if (!eb) 4845 if (!eb)
4846 return NULL; 4846 return NULL;
4847 eb->fs_info = fs_info; 4847 eb->fs_info = fs_info;
@@ -4892,18 +4892,25 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4892 int uptodate = 1; 4892 int uptodate = 1;
4893 int ret; 4893 int ret;
4894 4894
4895 if (!IS_ALIGNED(start, fs_info->tree_root->sectorsize)) {
4896 btrfs_err(fs_info, "bad tree block start %llu", start);
4897 return ERR_PTR(-EINVAL);
4898 }
4899
4895 eb = find_extent_buffer(fs_info, start); 4900 eb = find_extent_buffer(fs_info, start);
4896 if (eb) 4901 if (eb)
4897 return eb; 4902 return eb;
4898 4903
4899 eb = __alloc_extent_buffer(fs_info, start, len); 4904 eb = __alloc_extent_buffer(fs_info, start, len);
4900 if (!eb) 4905 if (!eb)
4901 return NULL; 4906 return ERR_PTR(-ENOMEM);
4902 4907
4903 for (i = 0; i < num_pages; i++, index++) { 4908 for (i = 0; i < num_pages; i++, index++) {
4904 p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL); 4909 p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
4905 if (!p) 4910 if (!p) {
4911 exists = ERR_PTR(-ENOMEM);
4906 goto free_eb; 4912 goto free_eb;
4913 }
4907 4914
4908 spin_lock(&mapping->private_lock); 4915 spin_lock(&mapping->private_lock);
4909 if (PagePrivate(p)) { 4916 if (PagePrivate(p)) {
@@ -4948,8 +4955,10 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
4948 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); 4955 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4949again: 4956again:
4950 ret = radix_tree_preload(GFP_NOFS); 4957 ret = radix_tree_preload(GFP_NOFS);
4951 if (ret) 4958 if (ret) {
4959 exists = ERR_PTR(ret);
4952 goto free_eb; 4960 goto free_eb;
4961 }
4953 4962
4954 spin_lock(&fs_info->buffer_lock); 4963 spin_lock(&fs_info->buffer_lock);
4955 ret = radix_tree_insert(&fs_info->buffer_radix, 4964 ret = radix_tree_insert(&fs_info->buffer_radix,
@@ -5333,6 +5342,11 @@ int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
5333 return ret; 5342 return ret;
5334} 5343}
5335 5344
5345/*
5346 * return 0 if the item is found within a page.
5347 * return 1 if the item spans two pages.
5348 * return -EINVAL otherwise.
5349 */
5336int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, 5350int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
5337 unsigned long min_len, char **map, 5351 unsigned long min_len, char **map,
5338 unsigned long *map_start, 5352 unsigned long *map_start,
@@ -5347,7 +5361,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
5347 PAGE_SHIFT; 5361 PAGE_SHIFT;
5348 5362
5349 if (i != end_i) 5363 if (i != end_i)
5350 return -EINVAL; 5364 return 1;
5351 5365
5352 if (i == 0) { 5366 if (i == 0) {
5353 offset = start_offset; 5367 offset = start_offset;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 1baf19c9b79d..c0c1c4fef6ce 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -348,7 +348,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
348struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, 348struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
349 u64 start, unsigned long len); 349 u64 start, unsigned long len);
350struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, 350struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
351 u64 start); 351 u64 start, u32 nodesize);
352struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src); 352struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
353struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, 353struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
354 u64 start); 354 u64 start);
@@ -468,5 +468,5 @@ noinline u64 find_lock_delalloc_range(struct inode *inode,
468 u64 *end, u64 max_bytes); 468 u64 *end, u64 max_bytes);
469#endif 469#endif
470struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, 470struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
471 u64 start); 471 u64 start, u32 nodesize);
472#endif 472#endif
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index e0c9bd3fb02d..2234e88cf674 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1534,30 +1534,30 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1534 reserve_bytes = round_up(write_bytes + sector_offset, 1534 reserve_bytes = round_up(write_bytes + sector_offset,
1535 root->sectorsize); 1535 root->sectorsize);
1536 1536
1537 if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1538 BTRFS_INODE_PREALLOC)) &&
1539 check_can_nocow(inode, pos, &write_bytes) > 0) {
1540 /*
1541 * For nodata cow case, no need to reserve
1542 * data space.
1543 */
1544 only_release_metadata = true;
1545 /*
1546 * our prealloc extent may be smaller than
1547 * write_bytes, so scale down.
1548 */
1549 num_pages = DIV_ROUND_UP(write_bytes + offset,
1550 PAGE_SIZE);
1551 reserve_bytes = round_up(write_bytes + sector_offset,
1552 root->sectorsize);
1553 goto reserve_metadata;
1554 }
1555
1556 ret = btrfs_check_data_free_space(inode, pos, write_bytes); 1537 ret = btrfs_check_data_free_space(inode, pos, write_bytes);
1557 if (ret < 0) 1538 if (ret < 0) {
1558 break; 1539 if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1540 BTRFS_INODE_PREALLOC)) &&
1541 check_can_nocow(inode, pos, &write_bytes) > 0) {
1542 /*
1543 * For nodata cow case, no need to reserve
1544 * data space.
1545 */
1546 only_release_metadata = true;
1547 /*
1548 * our prealloc extent may be smaller than
1549 * write_bytes, so scale down.
1550 */
1551 num_pages = DIV_ROUND_UP(write_bytes + offset,
1552 PAGE_SIZE);
1553 reserve_bytes = round_up(write_bytes +
1554 sector_offset,
1555 root->sectorsize);
1556 } else {
1557 break;
1558 }
1559 }
1559 1560
1560reserve_metadata:
1561 ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes); 1561 ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);
1562 if (ret) { 1562 if (ret) {
1563 if (!only_release_metadata) 1563 if (!only_release_metadata)
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index c6dc1183f542..69d270f6602c 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -29,7 +29,7 @@
29#include "inode-map.h" 29#include "inode-map.h"
30#include "volumes.h" 30#include "volumes.h"
31 31
32#define BITS_PER_BITMAP (PAGE_SIZE * 8) 32#define BITS_PER_BITMAP (PAGE_SIZE * 8UL)
33#define MAX_CACHE_BYTES_PER_GIG SZ_32K 33#define MAX_CACHE_BYTES_PER_GIG SZ_32K
34 34
35struct btrfs_trim_range { 35struct btrfs_trim_range {
@@ -1415,11 +1415,11 @@ static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
1415 u64 offset) 1415 u64 offset)
1416{ 1416{
1417 u64 bitmap_start; 1417 u64 bitmap_start;
1418 u32 bytes_per_bitmap; 1418 u64 bytes_per_bitmap;
1419 1419
1420 bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit; 1420 bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
1421 bitmap_start = offset - ctl->start; 1421 bitmap_start = offset - ctl->start;
1422 bitmap_start = div_u64(bitmap_start, bytes_per_bitmap); 1422 bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
1423 bitmap_start *= bytes_per_bitmap; 1423 bitmap_start *= bytes_per_bitmap;
1424 bitmap_start += ctl->start; 1424 bitmap_start += ctl->start;
1425 1425
@@ -1638,10 +1638,10 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1638 u64 bitmap_bytes; 1638 u64 bitmap_bytes;
1639 u64 extent_bytes; 1639 u64 extent_bytes;
1640 u64 size = block_group->key.offset; 1640 u64 size = block_group->key.offset;
1641 u32 bytes_per_bg = BITS_PER_BITMAP * ctl->unit; 1641 u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
1642 u32 max_bitmaps = div_u64(size + bytes_per_bg - 1, bytes_per_bg); 1642 u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
1643 1643
1644 max_bitmaps = max_t(u32, max_bitmaps, 1); 1644 max_bitmaps = max_t(u64, max_bitmaps, 1);
1645 1645
1646 ASSERT(ctl->total_bitmaps <= max_bitmaps); 1646 ASSERT(ctl->total_bitmaps <= max_bitmaps);
1647 1647
@@ -1660,7 +1660,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1660 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as 1660 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1661 * we add more bitmaps. 1661 * we add more bitmaps.
1662 */ 1662 */
1663 bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_SIZE; 1663 bitmap_bytes = (ctl->total_bitmaps + 1) * ctl->unit;
1664 1664
1665 if (bitmap_bytes >= max_bytes) { 1665 if (bitmap_bytes >= max_bytes) {
1666 ctl->extents_thresh = 0; 1666 ctl->extents_thresh = 0;
@@ -3662,7 +3662,7 @@ have_info:
3662 if (tmp->offset + tmp->bytes < offset) 3662 if (tmp->offset + tmp->bytes < offset)
3663 break; 3663 break;
3664 if (offset + bytes < tmp->offset) { 3664 if (offset + bytes < tmp->offset) {
3665 n = rb_prev(&info->offset_index); 3665 n = rb_prev(&tmp->offset_index);
3666 continue; 3666 continue;
3667 } 3667 }
3668 info = tmp; 3668 info = tmp;
@@ -3676,7 +3676,7 @@ have_info:
3676 if (offset + bytes < tmp->offset) 3676 if (offset + bytes < tmp->offset)
3677 break; 3677 break;
3678 if (tmp->offset + tmp->bytes < offset) { 3678 if (tmp->offset + tmp->bytes < offset) {
3679 n = rb_next(&info->offset_index); 3679 n = rb_next(&tmp->offset_index);
3680 continue; 3680 continue;
3681 } 3681 }
3682 info = tmp; 3682 info = tmp;
diff --git a/fs/btrfs/hash.c b/fs/btrfs/hash.c
index aae520b2aee5..a97fdc156a03 100644
--- a/fs/btrfs/hash.c
+++ b/fs/btrfs/hash.c
@@ -24,6 +24,11 @@ int __init btrfs_hash_init(void)
24 return PTR_ERR_OR_ZERO(tfm); 24 return PTR_ERR_OR_ZERO(tfm);
25} 25}
26 26
27const char* btrfs_crc32c_impl(void)
28{
29 return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm));
30}
31
27void btrfs_hash_exit(void) 32void btrfs_hash_exit(void)
28{ 33{
29 crypto_free_shash(tfm); 34 crypto_free_shash(tfm);
diff --git a/fs/btrfs/hash.h b/fs/btrfs/hash.h
index 118a2316e5d3..c3a2ec554361 100644
--- a/fs/btrfs/hash.h
+++ b/fs/btrfs/hash.h
@@ -22,6 +22,7 @@
22int __init btrfs_hash_init(void); 22int __init btrfs_hash_init(void);
23 23
24void btrfs_hash_exit(void); 24void btrfs_hash_exit(void);
25const char* btrfs_crc32c_impl(void);
25 26
26u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length); 27u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length);
27 28
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 8b1212e8f7a8..4421954720b8 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3271,7 +3271,16 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
3271 /* grab metadata reservation from transaction handle */ 3271 /* grab metadata reservation from transaction handle */
3272 if (reserve) { 3272 if (reserve) {
3273 ret = btrfs_orphan_reserve_metadata(trans, inode); 3273 ret = btrfs_orphan_reserve_metadata(trans, inode);
3274 BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */ 3274 ASSERT(!ret);
3275 if (ret) {
3276 atomic_dec(&root->orphan_inodes);
3277 clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3278 &BTRFS_I(inode)->runtime_flags);
3279 if (insert)
3280 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3281 &BTRFS_I(inode)->runtime_flags);
3282 return ret;
3283 }
3275 } 3284 }
3276 3285
3277 /* insert an orphan item to track this unlinked/truncated file */ 3286 /* insert an orphan item to track this unlinked/truncated file */
@@ -4549,6 +4558,7 @@ delete:
4549 BUG_ON(ret); 4558 BUG_ON(ret);
4550 if (btrfs_should_throttle_delayed_refs(trans, root)) 4559 if (btrfs_should_throttle_delayed_refs(trans, root))
4551 btrfs_async_run_delayed_refs(root, 4560 btrfs_async_run_delayed_refs(root,
4561 trans->transid,
4552 trans->delayed_ref_updates * 2, 0); 4562 trans->delayed_ref_updates * 2, 0);
4553 if (be_nice) { 4563 if (be_nice) {
4554 if (truncate_space_check(trans, root, 4564 if (truncate_space_check(trans, root,
@@ -5748,6 +5758,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5748 int name_len; 5758 int name_len;
5749 int is_curr = 0; /* ctx->pos points to the current index? */ 5759 int is_curr = 0; /* ctx->pos points to the current index? */
5750 bool emitted; 5760 bool emitted;
5761 bool put = false;
5751 5762
5752 /* FIXME, use a real flag for deciding about the key type */ 5763 /* FIXME, use a real flag for deciding about the key type */
5753 if (root->fs_info->tree_root == root) 5764 if (root->fs_info->tree_root == root)
@@ -5765,7 +5776,8 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5765 if (key_type == BTRFS_DIR_INDEX_KEY) { 5776 if (key_type == BTRFS_DIR_INDEX_KEY) {
5766 INIT_LIST_HEAD(&ins_list); 5777 INIT_LIST_HEAD(&ins_list);
5767 INIT_LIST_HEAD(&del_list); 5778 INIT_LIST_HEAD(&del_list);
5768 btrfs_get_delayed_items(inode, &ins_list, &del_list); 5779 put = btrfs_readdir_get_delayed_items(inode, &ins_list,
5780 &del_list);
5769 } 5781 }
5770 5782
5771 key.type = key_type; 5783 key.type = key_type;
@@ -5912,8 +5924,8 @@ next:
5912nopos: 5924nopos:
5913 ret = 0; 5925 ret = 0;
5914err: 5926err:
5915 if (key_type == BTRFS_DIR_INDEX_KEY) 5927 if (put)
5916 btrfs_put_delayed_items(&ins_list, &del_list); 5928 btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
5917 btrfs_free_path(path); 5929 btrfs_free_path(path);
5918 return ret; 5930 return ret;
5919} 5931}
@@ -10525,7 +10537,7 @@ static const struct inode_operations btrfs_dir_ro_inode_operations = {
10525static const struct file_operations btrfs_dir_file_operations = { 10537static const struct file_operations btrfs_dir_file_operations = {
10526 .llseek = generic_file_llseek, 10538 .llseek = generic_file_llseek,
10527 .read = generic_read_dir, 10539 .read = generic_read_dir,
10528 .iterate = btrfs_real_readdir, 10540 .iterate_shared = btrfs_real_readdir,
10529 .unlocked_ioctl = btrfs_ioctl, 10541 .unlocked_ioctl = btrfs_ioctl,
10530#ifdef CONFIG_COMPAT 10542#ifdef CONFIG_COMPAT
10531 .compat_ioctl = btrfs_compat_ioctl, 10543 .compat_ioctl = btrfs_compat_ioctl,
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index e96634a725c3..aca8264f4a49 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -968,6 +968,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
968 struct rb_node *prev = NULL; 968 struct rb_node *prev = NULL;
969 struct btrfs_ordered_extent *test; 969 struct btrfs_ordered_extent *test;
970 int ret = 1; 970 int ret = 1;
971 u64 orig_offset = offset;
971 972
972 spin_lock_irq(&tree->lock); 973 spin_lock_irq(&tree->lock);
973 if (ordered) { 974 if (ordered) {
@@ -983,7 +984,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
983 984
984 /* truncate file */ 985 /* truncate file */
985 if (disk_i_size > i_size) { 986 if (disk_i_size > i_size) {
986 BTRFS_I(inode)->disk_i_size = i_size; 987 BTRFS_I(inode)->disk_i_size = orig_offset;
987 ret = 0; 988 ret = 0;
988 goto out; 989 goto out;
989 } 990 }
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 4e59a91a11e0..60e7179ed4b7 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -235,7 +235,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
235 trans->aborted = errno; 235 trans->aborted = errno;
236 /* Nothing used. The other threads that have joined this 236 /* Nothing used. The other threads that have joined this
237 * transaction may be able to continue. */ 237 * transaction may be able to continue. */
238 if (!trans->blocks_used && list_empty(&trans->new_bgs)) { 238 if (!trans->dirty && list_empty(&trans->new_bgs)) {
239 const char *errstr; 239 const char *errstr;
240 240
241 errstr = btrfs_decode_error(errno); 241 errstr = btrfs_decode_error(errno);
@@ -1807,6 +1807,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1807 } 1807 }
1808 } 1808 }
1809 sb->s_flags &= ~MS_RDONLY; 1809 sb->s_flags &= ~MS_RDONLY;
1810
1811 fs_info->open = 1;
1810 } 1812 }
1811out: 1813out:
1812 wake_up_process(fs_info->transaction_kthread); 1814 wake_up_process(fs_info->transaction_kthread);
@@ -2303,7 +2305,7 @@ static void btrfs_interface_exit(void)
2303 2305
2304static void btrfs_print_mod_info(void) 2306static void btrfs_print_mod_info(void)
2305{ 2307{
2306 printk(KERN_INFO "Btrfs loaded" 2308 printk(KERN_INFO "Btrfs loaded, crc32c=%s"
2307#ifdef CONFIG_BTRFS_DEBUG 2309#ifdef CONFIG_BTRFS_DEBUG
2308 ", debug=on" 2310 ", debug=on"
2309#endif 2311#endif
@@ -2313,33 +2315,48 @@ static void btrfs_print_mod_info(void)
2313#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 2315#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2314 ", integrity-checker=on" 2316 ", integrity-checker=on"
2315#endif 2317#endif
2316 "\n"); 2318 "\n",
2319 btrfs_crc32c_impl());
2317} 2320}
2318 2321
2319static int btrfs_run_sanity_tests(void) 2322static int btrfs_run_sanity_tests(void)
2320{ 2323{
2321 int ret; 2324 int ret, i;
2322 2325 u32 sectorsize, nodesize;
2326 u32 test_sectorsize[] = {
2327 PAGE_SIZE,
2328 };
2323 ret = btrfs_init_test_fs(); 2329 ret = btrfs_init_test_fs();
2324 if (ret) 2330 if (ret)
2325 return ret; 2331 return ret;
2326 2332 for (i = 0; i < ARRAY_SIZE(test_sectorsize); i++) {
2327 ret = btrfs_test_free_space_cache(); 2333 sectorsize = test_sectorsize[i];
2328 if (ret) 2334 for (nodesize = sectorsize;
2329 goto out; 2335 nodesize <= BTRFS_MAX_METADATA_BLOCKSIZE;
2330 ret = btrfs_test_extent_buffer_operations(); 2336 nodesize <<= 1) {
2331 if (ret) 2337 pr_info("BTRFS: selftest: sectorsize: %u nodesize: %u\n",
2332 goto out; 2338 sectorsize, nodesize);
2333 ret = btrfs_test_extent_io(); 2339 ret = btrfs_test_free_space_cache(sectorsize, nodesize);
2334 if (ret) 2340 if (ret)
2335 goto out; 2341 goto out;
2336 ret = btrfs_test_inodes(); 2342 ret = btrfs_test_extent_buffer_operations(sectorsize,
2337 if (ret) 2343 nodesize);
2338 goto out; 2344 if (ret)
2339 ret = btrfs_test_qgroups(); 2345 goto out;
2340 if (ret) 2346 ret = btrfs_test_extent_io(sectorsize, nodesize);
2341 goto out; 2347 if (ret)
2342 ret = btrfs_test_free_space_tree(); 2348 goto out;
2349 ret = btrfs_test_inodes(sectorsize, nodesize);
2350 if (ret)
2351 goto out;
2352 ret = btrfs_test_qgroups(sectorsize, nodesize);
2353 if (ret)
2354 goto out;
2355 ret = btrfs_test_free_space_tree(sectorsize, nodesize);
2356 if (ret)
2357 goto out;
2358 }
2359 }
2343out: 2360out:
2344 btrfs_destroy_test_fs(); 2361 btrfs_destroy_test_fs();
2345 return ret; 2362 return ret;
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index f54bf450bad3..02223f3f78f4 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -68,7 +68,7 @@ int btrfs_init_test_fs(void)
68 if (IS_ERR(test_mnt)) { 68 if (IS_ERR(test_mnt)) {
69 printk(KERN_ERR "btrfs: cannot mount test file system\n"); 69 printk(KERN_ERR "btrfs: cannot mount test file system\n");
70 unregister_filesystem(&test_type); 70 unregister_filesystem(&test_type);
71 return ret; 71 return PTR_ERR(test_mnt);
72 } 72 }
73 return 0; 73 return 0;
74} 74}
@@ -175,7 +175,7 @@ void btrfs_free_dummy_root(struct btrfs_root *root)
175} 175}
176 176
177struct btrfs_block_group_cache * 177struct btrfs_block_group_cache *
178btrfs_alloc_dummy_block_group(unsigned long length) 178btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize)
179{ 179{
180 struct btrfs_block_group_cache *cache; 180 struct btrfs_block_group_cache *cache;
181 181
@@ -192,8 +192,8 @@ btrfs_alloc_dummy_block_group(unsigned long length)
192 cache->key.objectid = 0; 192 cache->key.objectid = 0;
193 cache->key.offset = length; 193 cache->key.offset = length;
194 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 194 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
195 cache->sectorsize = 4096; 195 cache->sectorsize = sectorsize;
196 cache->full_stripe_len = 4096; 196 cache->full_stripe_len = sectorsize;
197 197
198 INIT_LIST_HEAD(&cache->list); 198 INIT_LIST_HEAD(&cache->list);
199 INIT_LIST_HEAD(&cache->cluster_list); 199 INIT_LIST_HEAD(&cache->cluster_list);
diff --git a/fs/btrfs/tests/btrfs-tests.h b/fs/btrfs/tests/btrfs-tests.h
index 054b8c73c951..66fb6b701eb7 100644
--- a/fs/btrfs/tests/btrfs-tests.h
+++ b/fs/btrfs/tests/btrfs-tests.h
@@ -26,27 +26,28 @@
26struct btrfs_root; 26struct btrfs_root;
27struct btrfs_trans_handle; 27struct btrfs_trans_handle;
28 28
29int btrfs_test_free_space_cache(void); 29int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize);
30int btrfs_test_extent_buffer_operations(void); 30int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize);
31int btrfs_test_extent_io(void); 31int btrfs_test_extent_io(u32 sectorsize, u32 nodesize);
32int btrfs_test_inodes(void); 32int btrfs_test_inodes(u32 sectorsize, u32 nodesize);
33int btrfs_test_qgroups(void); 33int btrfs_test_qgroups(u32 sectorsize, u32 nodesize);
34int btrfs_test_free_space_tree(void); 34int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize);
35int btrfs_init_test_fs(void); 35int btrfs_init_test_fs(void);
36void btrfs_destroy_test_fs(void); 36void btrfs_destroy_test_fs(void);
37struct inode *btrfs_new_test_inode(void); 37struct inode *btrfs_new_test_inode(void);
38struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void); 38struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void);
39void btrfs_free_dummy_root(struct btrfs_root *root); 39void btrfs_free_dummy_root(struct btrfs_root *root);
40struct btrfs_block_group_cache * 40struct btrfs_block_group_cache *
41btrfs_alloc_dummy_block_group(unsigned long length); 41btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize);
42void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache); 42void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache);
43void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans); 43void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans);
44#else 44#else
45static inline int btrfs_test_free_space_cache(void) 45static inline int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
46{ 46{
47 return 0; 47 return 0;
48} 48}
49static inline int btrfs_test_extent_buffer_operations(void) 49static inline int btrfs_test_extent_buffer_operations(u32 sectorsize,
50 u32 nodesize)
50{ 51{
51 return 0; 52 return 0;
52} 53}
@@ -57,19 +58,19 @@ static inline int btrfs_init_test_fs(void)
57static inline void btrfs_destroy_test_fs(void) 58static inline void btrfs_destroy_test_fs(void)
58{ 59{
59} 60}
60static inline int btrfs_test_extent_io(void) 61static inline int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
61{ 62{
62 return 0; 63 return 0;
63} 64}
64static inline int btrfs_test_inodes(void) 65static inline int btrfs_test_inodes(u32 sectorsize, u32 nodesize)
65{ 66{
66 return 0; 67 return 0;
67} 68}
68static inline int btrfs_test_qgroups(void) 69static inline int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
69{ 70{
70 return 0; 71 return 0;
71} 72}
72static inline int btrfs_test_free_space_tree(void) 73static inline int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize)
73{ 74{
74 return 0; 75 return 0;
75} 76}
diff --git a/fs/btrfs/tests/extent-buffer-tests.c b/fs/btrfs/tests/extent-buffer-tests.c
index f51963a8f929..4f8cbd1ec5ee 100644
--- a/fs/btrfs/tests/extent-buffer-tests.c
+++ b/fs/btrfs/tests/extent-buffer-tests.c
@@ -22,7 +22,7 @@
22#include "../extent_io.h" 22#include "../extent_io.h"
23#include "../disk-io.h" 23#include "../disk-io.h"
24 24
25static int test_btrfs_split_item(void) 25static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
26{ 26{
27 struct btrfs_path *path; 27 struct btrfs_path *path;
28 struct btrfs_root *root; 28 struct btrfs_root *root;
@@ -40,7 +40,7 @@ static int test_btrfs_split_item(void)
40 40
41 test_msg("Running btrfs_split_item tests\n"); 41 test_msg("Running btrfs_split_item tests\n");
42 42
43 root = btrfs_alloc_dummy_root(); 43 root = btrfs_alloc_dummy_root(sectorsize, nodesize);
44 if (IS_ERR(root)) { 44 if (IS_ERR(root)) {
45 test_msg("Could not allocate root\n"); 45 test_msg("Could not allocate root\n");
46 return PTR_ERR(root); 46 return PTR_ERR(root);
@@ -53,7 +53,8 @@ static int test_btrfs_split_item(void)
53 return -ENOMEM; 53 return -ENOMEM;
54 } 54 }
55 55
56 path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, 4096); 56 path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, nodesize,
57 nodesize);
57 if (!eb) { 58 if (!eb) {
58 test_msg("Could not allocate dummy buffer\n"); 59 test_msg("Could not allocate dummy buffer\n");
59 ret = -ENOMEM; 60 ret = -ENOMEM;
@@ -222,8 +223,8 @@ out:
222 return ret; 223 return ret;
223} 224}
224 225
225int btrfs_test_extent_buffer_operations(void) 226int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize)
226{ 227{
227 test_msg("Running extent buffer operation tests"); 228 test_msg("Running extent buffer operation tests\n");
228 return test_btrfs_split_item(); 229 return test_btrfs_split_item(sectorsize, nodesize);
229} 230}
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index 55724607f79b..d19ab0317283 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -21,6 +21,7 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/sizes.h> 22#include <linux/sizes.h>
23#include "btrfs-tests.h" 23#include "btrfs-tests.h"
24#include "../ctree.h"
24#include "../extent_io.h" 25#include "../extent_io.h"
25 26
26#define PROCESS_UNLOCK (1 << 0) 27#define PROCESS_UNLOCK (1 << 0)
@@ -65,7 +66,7 @@ static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
65 return count; 66 return count;
66} 67}
67 68
68static int test_find_delalloc(void) 69static int test_find_delalloc(u32 sectorsize)
69{ 70{
70 struct inode *inode; 71 struct inode *inode;
71 struct extent_io_tree tmp; 72 struct extent_io_tree tmp;
@@ -113,7 +114,7 @@ static int test_find_delalloc(void)
113 * |--- delalloc ---| 114 * |--- delalloc ---|
114 * |--- search ---| 115 * |--- search ---|
115 */ 116 */
116 set_extent_delalloc(&tmp, 0, 4095, NULL); 117 set_extent_delalloc(&tmp, 0, sectorsize - 1, NULL);
117 start = 0; 118 start = 0;
118 end = 0; 119 end = 0;
119 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, 120 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -122,9 +123,9 @@ static int test_find_delalloc(void)
122 test_msg("Should have found at least one delalloc\n"); 123 test_msg("Should have found at least one delalloc\n");
123 goto out_bits; 124 goto out_bits;
124 } 125 }
125 if (start != 0 || end != 4095) { 126 if (start != 0 || end != (sectorsize - 1)) {
126 test_msg("Expected start 0 end 4095, got start %Lu end %Lu\n", 127 test_msg("Expected start 0 end %u, got start %llu end %llu\n",
127 start, end); 128 sectorsize - 1, start, end);
128 goto out_bits; 129 goto out_bits;
129 } 130 }
130 unlock_extent(&tmp, start, end); 131 unlock_extent(&tmp, start, end);
@@ -144,7 +145,7 @@ static int test_find_delalloc(void)
144 test_msg("Couldn't find the locked page\n"); 145 test_msg("Couldn't find the locked page\n");
145 goto out_bits; 146 goto out_bits;
146 } 147 }
147 set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL); 148 set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, NULL);
148 start = test_start; 149 start = test_start;
149 end = 0; 150 end = 0;
150 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, 151 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -172,7 +173,7 @@ static int test_find_delalloc(void)
172 * |--- delalloc ---| 173 * |--- delalloc ---|
173 * |--- search ---| 174 * |--- search ---|
174 */ 175 */
175 test_start = max_bytes + 4096; 176 test_start = max_bytes + sectorsize;
176 locked_page = find_lock_page(inode->i_mapping, test_start >> 177 locked_page = find_lock_page(inode->i_mapping, test_start >>
177 PAGE_SHIFT); 178 PAGE_SHIFT);
178 if (!locked_page) { 179 if (!locked_page) {
@@ -272,6 +273,16 @@ out:
272 return ret; 273 return ret;
273} 274}
274 275
276/**
277 * test_bit_in_byte - Determine whether a bit is set in a byte
278 * @nr: bit number to test
279 * @addr: Address to start counting from
280 */
281static inline int test_bit_in_byte(int nr, const u8 *addr)
282{
283 return 1UL & (addr[nr / BITS_PER_BYTE] >> (nr & (BITS_PER_BYTE - 1)));
284}
285
275static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb, 286static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
276 unsigned long len) 287 unsigned long len)
277{ 288{
@@ -298,25 +309,29 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
298 return -EINVAL; 309 return -EINVAL;
299 } 310 }
300 311
301 bitmap_set(bitmap, (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, 312 /* Straddling pages test */
302 sizeof(long) * BITS_PER_BYTE); 313 if (len > PAGE_SIZE) {
303 extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0, 314 bitmap_set(bitmap,
304 sizeof(long) * BITS_PER_BYTE); 315 (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
305 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { 316 sizeof(long) * BITS_PER_BYTE);
306 test_msg("Setting straddling pages failed\n"); 317 extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
307 return -EINVAL; 318 sizeof(long) * BITS_PER_BYTE);
308 } 319 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
320 test_msg("Setting straddling pages failed\n");
321 return -EINVAL;
322 }
309 323
310 bitmap_set(bitmap, 0, len * BITS_PER_BYTE); 324 bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
311 bitmap_clear(bitmap, 325 bitmap_clear(bitmap,
312 (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, 326 (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
313 sizeof(long) * BITS_PER_BYTE); 327 sizeof(long) * BITS_PER_BYTE);
314 extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE); 328 extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
315 extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0, 329 extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
316 sizeof(long) * BITS_PER_BYTE); 330 sizeof(long) * BITS_PER_BYTE);
317 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { 331 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
318 test_msg("Clearing straddling pages failed\n"); 332 test_msg("Clearing straddling pages failed\n");
319 return -EINVAL; 333 return -EINVAL;
334 }
320 } 335 }
321 336
322 /* 337 /*
@@ -333,7 +348,7 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
333 for (i = 0; i < len * BITS_PER_BYTE; i++) { 348 for (i = 0; i < len * BITS_PER_BYTE; i++) {
334 int bit, bit1; 349 int bit, bit1;
335 350
336 bit = !!test_bit(i, bitmap); 351 bit = !!test_bit_in_byte(i, (u8 *)bitmap);
337 bit1 = !!extent_buffer_test_bit(eb, 0, i); 352 bit1 = !!extent_buffer_test_bit(eb, 0, i);
338 if (bit1 != bit) { 353 if (bit1 != bit) {
339 test_msg("Testing bit pattern failed\n"); 354 test_msg("Testing bit pattern failed\n");
@@ -351,15 +366,22 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
351 return 0; 366 return 0;
352} 367}
353 368
354static int test_eb_bitmaps(void) 369static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
355{ 370{
356 unsigned long len = PAGE_SIZE * 4; 371 unsigned long len;
357 unsigned long *bitmap; 372 unsigned long *bitmap;
358 struct extent_buffer *eb; 373 struct extent_buffer *eb;
359 int ret; 374 int ret;
360 375
361 test_msg("Running extent buffer bitmap tests\n"); 376 test_msg("Running extent buffer bitmap tests\n");
362 377
378 /*
379 * In ppc64, sectorsize can be 64K, thus 4 * 64K will be larger than
380 * BTRFS_MAX_METADATA_BLOCKSIZE.
381 */
382 len = (sectorsize < BTRFS_MAX_METADATA_BLOCKSIZE)
383 ? sectorsize * 4 : sectorsize;
384
363 bitmap = kmalloc(len, GFP_KERNEL); 385 bitmap = kmalloc(len, GFP_KERNEL);
364 if (!bitmap) { 386 if (!bitmap) {
365 test_msg("Couldn't allocate test bitmap\n"); 387 test_msg("Couldn't allocate test bitmap\n");
@@ -379,7 +401,7 @@ static int test_eb_bitmaps(void)
379 401
380 /* Do it over again with an extent buffer which isn't page-aligned. */ 402 /* Do it over again with an extent buffer which isn't page-aligned. */
381 free_extent_buffer(eb); 403 free_extent_buffer(eb);
382 eb = __alloc_dummy_extent_buffer(NULL, PAGE_SIZE / 2, len); 404 eb = __alloc_dummy_extent_buffer(NULL, nodesize / 2, len);
383 if (!eb) { 405 if (!eb) {
384 test_msg("Couldn't allocate test extent buffer\n"); 406 test_msg("Couldn't allocate test extent buffer\n");
385 kfree(bitmap); 407 kfree(bitmap);
@@ -393,17 +415,17 @@ out:
393 return ret; 415 return ret;
394} 416}
395 417
396int btrfs_test_extent_io(void) 418int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
397{ 419{
398 int ret; 420 int ret;
399 421
400 test_msg("Running extent I/O tests\n"); 422 test_msg("Running extent I/O tests\n");
401 423
402 ret = test_find_delalloc(); 424 ret = test_find_delalloc(sectorsize);
403 if (ret) 425 if (ret)
404 goto out; 426 goto out;
405 427
406 ret = test_eb_bitmaps(); 428 ret = test_eb_bitmaps(sectorsize, nodesize);
407out: 429out:
408 test_msg("Extent I/O tests finished\n"); 430 test_msg("Extent I/O tests finished\n");
409 return ret; 431 return ret;
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
index 0eeb8f3d6b67..3956bb2ff84c 100644
--- a/fs/btrfs/tests/free-space-tests.c
+++ b/fs/btrfs/tests/free-space-tests.c
@@ -22,7 +22,7 @@
22#include "../disk-io.h" 22#include "../disk-io.h"
23#include "../free-space-cache.h" 23#include "../free-space-cache.h"
24 24
25#define BITS_PER_BITMAP (PAGE_SIZE * 8) 25#define BITS_PER_BITMAP (PAGE_SIZE * 8UL)
26 26
27/* 27/*
28 * This test just does basic sanity checking, making sure we can add an extent 28 * This test just does basic sanity checking, making sure we can add an extent
@@ -99,7 +99,8 @@ static int test_extents(struct btrfs_block_group_cache *cache)
99 return 0; 99 return 0;
100} 100}
101 101
102static int test_bitmaps(struct btrfs_block_group_cache *cache) 102static int test_bitmaps(struct btrfs_block_group_cache *cache,
103 u32 sectorsize)
103{ 104{
104 u64 next_bitmap_offset; 105 u64 next_bitmap_offset;
105 int ret; 106 int ret;
@@ -139,7 +140,7 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache)
139 * The first bitmap we have starts at offset 0 so the next one is just 140 * The first bitmap we have starts at offset 0 so the next one is just
140 * at the end of the first bitmap. 141 * at the end of the first bitmap.
141 */ 142 */
142 next_bitmap_offset = (u64)(BITS_PER_BITMAP * 4096); 143 next_bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize);
143 144
144 /* Test a bit straddling two bitmaps */ 145 /* Test a bit straddling two bitmaps */
145 ret = test_add_free_space_entry(cache, next_bitmap_offset - SZ_2M, 146 ret = test_add_free_space_entry(cache, next_bitmap_offset - SZ_2M,
@@ -167,9 +168,10 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache)
167} 168}
168 169
169/* This is the high grade jackassery */ 170/* This is the high grade jackassery */
170static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache) 171static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache,
172 u32 sectorsize)
171{ 173{
172 u64 bitmap_offset = (u64)(BITS_PER_BITMAP * 4096); 174 u64 bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize);
173 int ret; 175 int ret;
174 176
175 test_msg("Running bitmap and extent tests\n"); 177 test_msg("Running bitmap and extent tests\n");
@@ -401,7 +403,8 @@ static int check_cache_empty(struct btrfs_block_group_cache *cache)
401 * requests. 403 * requests.
402 */ 404 */
403static int 405static int
404test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) 406test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache,
407 u32 sectorsize)
405{ 408{
406 int ret; 409 int ret;
407 u64 offset; 410 u64 offset;
@@ -539,7 +542,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
539 * The goal is to test that the bitmap entry space stealing doesn't 542 * The goal is to test that the bitmap entry space stealing doesn't
540 * steal this space region. 543 * steal this space region.
541 */ 544 */
542 ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, 4096); 545 ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, sectorsize);
543 if (ret) { 546 if (ret) {
544 test_msg("Error adding free space: %d\n", ret); 547 test_msg("Error adding free space: %d\n", ret);
545 return ret; 548 return ret;
@@ -597,8 +600,8 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
597 return -ENOENT; 600 return -ENOENT;
598 } 601 }
599 602
600 if (cache->free_space_ctl->free_space != (SZ_1M + 4096)) { 603 if (cache->free_space_ctl->free_space != (SZ_1M + sectorsize)) {
601 test_msg("Cache free space is not 1Mb + 4Kb\n"); 604 test_msg("Cache free space is not 1Mb + %u\n", sectorsize);
602 return -EINVAL; 605 return -EINVAL;
603 } 606 }
604 607
@@ -611,22 +614,25 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
611 return -EINVAL; 614 return -EINVAL;
612 } 615 }
613 616
614 /* All that remains is a 4Kb free space region in a bitmap. Confirm. */ 617 /*
618 * All that remains is a sectorsize free space region in a bitmap.
619 * Confirm.
620 */
615 ret = check_num_extents_and_bitmaps(cache, 1, 1); 621 ret = check_num_extents_and_bitmaps(cache, 1, 1);
616 if (ret) 622 if (ret)
617 return ret; 623 return ret;
618 624
619 if (cache->free_space_ctl->free_space != 4096) { 625 if (cache->free_space_ctl->free_space != sectorsize) {
620 test_msg("Cache free space is not 4Kb\n"); 626 test_msg("Cache free space is not %u\n", sectorsize);
621 return -EINVAL; 627 return -EINVAL;
622 } 628 }
623 629
624 offset = btrfs_find_space_for_alloc(cache, 630 offset = btrfs_find_space_for_alloc(cache,
625 0, 4096, 0, 631 0, sectorsize, 0,
626 &max_extent_size); 632 &max_extent_size);
627 if (offset != (SZ_128M + SZ_16M)) { 633 if (offset != (SZ_128M + SZ_16M)) {
628 test_msg("Failed to allocate 4Kb from space cache, returned offset is: %llu\n", 634 test_msg("Failed to allocate %u, returned offset : %llu\n",
629 offset); 635 sectorsize, offset);
630 return -EINVAL; 636 return -EINVAL;
631 } 637 }
632 638
@@ -733,7 +739,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
733 * The goal is to test that the bitmap entry space stealing doesn't 739 * The goal is to test that the bitmap entry space stealing doesn't
734 * steal this space region. 740 * steal this space region.
735 */ 741 */
736 ret = btrfs_add_free_space(cache, SZ_32M, 8192); 742 ret = btrfs_add_free_space(cache, SZ_32M, 2 * sectorsize);
737 if (ret) { 743 if (ret) {
738 test_msg("Error adding free space: %d\n", ret); 744 test_msg("Error adding free space: %d\n", ret);
739 return ret; 745 return ret;
@@ -757,7 +763,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
757 763
758 /* 764 /*
759 * Confirm that our extent entry didn't stole all free space from the 765 * Confirm that our extent entry didn't stole all free space from the
760 * bitmap, because of the small 8Kb free space region. 766 * bitmap, because of the small 2 * sectorsize free space region.
761 */ 767 */
762 ret = check_num_extents_and_bitmaps(cache, 2, 1); 768 ret = check_num_extents_and_bitmaps(cache, 2, 1);
763 if (ret) 769 if (ret)
@@ -783,8 +789,8 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
783 return -ENOENT; 789 return -ENOENT;
784 } 790 }
785 791
786 if (cache->free_space_ctl->free_space != (SZ_1M + 8192)) { 792 if (cache->free_space_ctl->free_space != (SZ_1M + 2 * sectorsize)) {
787 test_msg("Cache free space is not 1Mb + 8Kb\n"); 793 test_msg("Cache free space is not 1Mb + %u\n", 2 * sectorsize);
788 return -EINVAL; 794 return -EINVAL;
789 } 795 }
790 796
@@ -796,21 +802,25 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
796 return -EINVAL; 802 return -EINVAL;
797 } 803 }
798 804
799 /* All that remains is a 8Kb free space region in a bitmap. Confirm. */ 805 /*
806 * All that remains is 2 * sectorsize free space region
807 * in a bitmap. Confirm.
808 */
800 ret = check_num_extents_and_bitmaps(cache, 1, 1); 809 ret = check_num_extents_and_bitmaps(cache, 1, 1);
801 if (ret) 810 if (ret)
802 return ret; 811 return ret;
803 812
804 if (cache->free_space_ctl->free_space != 8192) { 813 if (cache->free_space_ctl->free_space != 2 * sectorsize) {
805 test_msg("Cache free space is not 8Kb\n"); 814 test_msg("Cache free space is not %u\n", 2 * sectorsize);
806 return -EINVAL; 815 return -EINVAL;
807 } 816 }
808 817
809 offset = btrfs_find_space_for_alloc(cache, 818 offset = btrfs_find_space_for_alloc(cache,
810 0, 8192, 0, 819 0, 2 * sectorsize, 0,
811 &max_extent_size); 820 &max_extent_size);
812 if (offset != SZ_32M) { 821 if (offset != SZ_32M) {
813 test_msg("Failed to allocate 8Kb from space cache, returned offset is: %llu\n", 822 test_msg("Failed to allocate %u, offset: %llu\n",
823 2 * sectorsize,
814 offset); 824 offset);
815 return -EINVAL; 825 return -EINVAL;
816 } 826 }
@@ -825,7 +835,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
825 return 0; 835 return 0;
826} 836}
827 837
828int btrfs_test_free_space_cache(void) 838int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
829{ 839{
830 struct btrfs_block_group_cache *cache; 840 struct btrfs_block_group_cache *cache;
831 struct btrfs_root *root = NULL; 841 struct btrfs_root *root = NULL;
@@ -833,13 +843,19 @@ int btrfs_test_free_space_cache(void)
833 843
834 test_msg("Running btrfs free space cache tests\n"); 844 test_msg("Running btrfs free space cache tests\n");
835 845
836 cache = btrfs_alloc_dummy_block_group(1024 * 1024 * 1024); 846 /*
847 * For ppc64 (with 64k page size), bytes per bitmap might be
848 * larger than 1G. To make bitmap test available in ppc64,
849 * alloc dummy block group whose size cross bitmaps.
850 */
851 cache = btrfs_alloc_dummy_block_group(BITS_PER_BITMAP * sectorsize
852 + PAGE_SIZE, sectorsize);
837 if (!cache) { 853 if (!cache) {
838 test_msg("Couldn't run the tests\n"); 854 test_msg("Couldn't run the tests\n");
839 return 0; 855 return 0;
840 } 856 }
841 857
842 root = btrfs_alloc_dummy_root(); 858 root = btrfs_alloc_dummy_root(sectorsize, nodesize);
843 if (IS_ERR(root)) { 859 if (IS_ERR(root)) {
844 ret = PTR_ERR(root); 860 ret = PTR_ERR(root);
845 goto out; 861 goto out;
@@ -855,14 +871,14 @@ int btrfs_test_free_space_cache(void)
855 ret = test_extents(cache); 871 ret = test_extents(cache);
856 if (ret) 872 if (ret)
857 goto out; 873 goto out;
858 ret = test_bitmaps(cache); 874 ret = test_bitmaps(cache, sectorsize);
859 if (ret) 875 if (ret)
860 goto out; 876 goto out;
861 ret = test_bitmaps_and_extents(cache); 877 ret = test_bitmaps_and_extents(cache, sectorsize);
862 if (ret) 878 if (ret)
863 goto out; 879 goto out;
864 880
865 ret = test_steal_space_from_bitmap_to_extent(cache); 881 ret = test_steal_space_from_bitmap_to_extent(cache, sectorsize);
866out: 882out:
867 btrfs_free_dummy_block_group(cache); 883 btrfs_free_dummy_block_group(cache);
868 btrfs_free_dummy_root(root); 884 btrfs_free_dummy_root(root);
diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
index 7cea4462acd5..aac507085ab0 100644
--- a/fs/btrfs/tests/free-space-tree-tests.c
+++ b/fs/btrfs/tests/free-space-tree-tests.c
@@ -16,6 +16,7 @@
16 * Boston, MA 021110-1307, USA. 16 * Boston, MA 021110-1307, USA.
17 */ 17 */
18 18
19#include <linux/types.h>
19#include "btrfs-tests.h" 20#include "btrfs-tests.h"
20#include "../ctree.h" 21#include "../ctree.h"
21#include "../disk-io.h" 22#include "../disk-io.h"
@@ -30,7 +31,7 @@ struct free_space_extent {
30 * The test cases align their operations to this in order to hit some of the 31 * The test cases align their operations to this in order to hit some of the
31 * edge cases in the bitmap code. 32 * edge cases in the bitmap code.
32 */ 33 */
33#define BITMAP_RANGE (BTRFS_FREE_SPACE_BITMAP_BITS * 4096) 34#define BITMAP_RANGE (BTRFS_FREE_SPACE_BITMAP_BITS * PAGE_SIZE)
34 35
35static int __check_free_space_extents(struct btrfs_trans_handle *trans, 36static int __check_free_space_extents(struct btrfs_trans_handle *trans,
36 struct btrfs_fs_info *fs_info, 37 struct btrfs_fs_info *fs_info,
@@ -439,7 +440,8 @@ typedef int (*test_func_t)(struct btrfs_trans_handle *,
439 struct btrfs_block_group_cache *, 440 struct btrfs_block_group_cache *,
440 struct btrfs_path *); 441 struct btrfs_path *);
441 442
442static int run_test(test_func_t test_func, int bitmaps) 443static int run_test(test_func_t test_func, int bitmaps,
444 u32 sectorsize, u32 nodesize)
443{ 445{
444 struct btrfs_root *root = NULL; 446 struct btrfs_root *root = NULL;
445 struct btrfs_block_group_cache *cache = NULL; 447 struct btrfs_block_group_cache *cache = NULL;
@@ -447,7 +449,7 @@ static int run_test(test_func_t test_func, int bitmaps)
447 struct btrfs_path *path = NULL; 449 struct btrfs_path *path = NULL;
448 int ret; 450 int ret;
449 451
450 root = btrfs_alloc_dummy_root(); 452 root = btrfs_alloc_dummy_root(sectorsize, nodesize);
451 if (IS_ERR(root)) { 453 if (IS_ERR(root)) {
452 test_msg("Couldn't allocate dummy root\n"); 454 test_msg("Couldn't allocate dummy root\n");
453 ret = PTR_ERR(root); 455 ret = PTR_ERR(root);
@@ -466,7 +468,8 @@ static int run_test(test_func_t test_func, int bitmaps)
466 root->fs_info->free_space_root = root; 468 root->fs_info->free_space_root = root;
467 root->fs_info->tree_root = root; 469 root->fs_info->tree_root = root;
468 470
469 root->node = alloc_test_extent_buffer(root->fs_info, 4096); 471 root->node = alloc_test_extent_buffer(root->fs_info,
472 nodesize, nodesize);
470 if (!root->node) { 473 if (!root->node) {
471 test_msg("Couldn't allocate dummy buffer\n"); 474 test_msg("Couldn't allocate dummy buffer\n");
472 ret = -ENOMEM; 475 ret = -ENOMEM;
@@ -474,9 +477,9 @@ static int run_test(test_func_t test_func, int bitmaps)
474 } 477 }
475 btrfs_set_header_level(root->node, 0); 478 btrfs_set_header_level(root->node, 0);
476 btrfs_set_header_nritems(root->node, 0); 479 btrfs_set_header_nritems(root->node, 0);
477 root->alloc_bytenr += 8192; 480 root->alloc_bytenr += 2 * nodesize;
478 481
479 cache = btrfs_alloc_dummy_block_group(8 * BITMAP_RANGE); 482 cache = btrfs_alloc_dummy_block_group(8 * BITMAP_RANGE, sectorsize);
480 if (!cache) { 483 if (!cache) {
481 test_msg("Couldn't allocate dummy block group cache\n"); 484 test_msg("Couldn't allocate dummy block group cache\n");
482 ret = -ENOMEM; 485 ret = -ENOMEM;
@@ -534,17 +537,18 @@ out:
534 return ret; 537 return ret;
535} 538}
536 539
537static int run_test_both_formats(test_func_t test_func) 540static int run_test_both_formats(test_func_t test_func,
541 u32 sectorsize, u32 nodesize)
538{ 542{
539 int ret; 543 int ret;
540 544
541 ret = run_test(test_func, 0); 545 ret = run_test(test_func, 0, sectorsize, nodesize);
542 if (ret) 546 if (ret)
543 return ret; 547 return ret;
544 return run_test(test_func, 1); 548 return run_test(test_func, 1, sectorsize, nodesize);
545} 549}
546 550
547int btrfs_test_free_space_tree(void) 551int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize)
548{ 552{
549 test_func_t tests[] = { 553 test_func_t tests[] = {
550 test_empty_block_group, 554 test_empty_block_group,
@@ -561,9 +565,11 @@ int btrfs_test_free_space_tree(void)
561 565
562 test_msg("Running free space tree tests\n"); 566 test_msg("Running free space tree tests\n");
563 for (i = 0; i < ARRAY_SIZE(tests); i++) { 567 for (i = 0; i < ARRAY_SIZE(tests); i++) {
564 int ret = run_test_both_formats(tests[i]); 568 int ret = run_test_both_formats(tests[i], sectorsize,
569 nodesize);
565 if (ret) { 570 if (ret) {
566 test_msg("%pf failed\n", tests[i]); 571 test_msg("%pf : sectorsize %u failed\n",
572 tests[i], sectorsize);
567 return ret; 573 return ret;
568 } 574 }
569 } 575 }
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index 8a25fe8b7c45..29648c0a39f1 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -16,6 +16,7 @@
16 * Boston, MA 021110-1307, USA. 16 * Boston, MA 021110-1307, USA.
17 */ 17 */
18 18
19#include <linux/types.h>
19#include "btrfs-tests.h" 20#include "btrfs-tests.h"
20#include "../ctree.h" 21#include "../ctree.h"
21#include "../btrfs_inode.h" 22#include "../btrfs_inode.h"
@@ -86,19 +87,19 @@ static void insert_inode_item_key(struct btrfs_root *root)
86 * diagram of how the extents will look though this may not be possible we still 87 * diagram of how the extents will look though this may not be possible we still
87 * want to make sure everything acts normally (the last number is not inclusive) 88 * want to make sure everything acts normally (the last number is not inclusive)
88 * 89 *
89 * [0 - 5][5 - 6][6 - 10][10 - 4096][ 4096 - 8192 ][8192 - 12288] 90 * [0 - 5][5 - 6][ 6 - 4096 ][ 4096 - 4100][4100 - 8195][8195 - 12291]
90 * [hole ][inline][ hole ][ regular ][regular1 split][ hole ] 91 * [hole ][inline][hole but no extent][ hole ][ regular ][regular1 split]
91 * 92 *
92 * [ 12288 - 20480][20480 - 24576][ 24576 - 28672 ][28672 - 36864][36864 - 45056] 93 * [12291 - 16387][16387 - 24579][24579 - 28675][ 28675 - 32771][32771 - 36867 ]
93 * [regular1 split][ prealloc1 ][prealloc1 written][ prealloc1 ][ compressed ] 94 * [ hole ][regular1 split][ prealloc ][ prealloc1 ][prealloc1 written]
94 * 95 *
95 * [45056 - 49152][49152-53248][53248-61440][61440-65536][ 65536+81920 ] 96 * [36867 - 45059][45059 - 53251][53251 - 57347][57347 - 61443][61443- 69635]
96 * [ compressed1 ][ regular ][compressed1][ regular ][ hole but no extent] 97 * [ prealloc1 ][ compressed ][ compressed1 ][ regular ][ compressed1]
97 * 98 *
98 * [81920-86016] 99 * [69635-73731][ 73731 - 86019 ][86019-90115]
99 * [ regular ] 100 * [ regular ][ hole but no extent][ regular ]
100 */ 101 */
101static void setup_file_extents(struct btrfs_root *root) 102static void setup_file_extents(struct btrfs_root *root, u32 sectorsize)
102{ 103{
103 int slot = 0; 104 int slot = 0;
104 u64 disk_bytenr = SZ_1M; 105 u64 disk_bytenr = SZ_1M;
@@ -119,7 +120,7 @@ static void setup_file_extents(struct btrfs_root *root)
119 insert_extent(root, offset, 1, 1, 0, 0, 0, BTRFS_FILE_EXTENT_INLINE, 0, 120 insert_extent(root, offset, 1, 1, 0, 0, 0, BTRFS_FILE_EXTENT_INLINE, 0,
120 slot); 121 slot);
121 slot++; 122 slot++;
122 offset = 4096; 123 offset = sectorsize;
123 124
124 /* Now another hole */ 125 /* Now another hole */
125 insert_extent(root, offset, 4, 4, 0, 0, 0, BTRFS_FILE_EXTENT_REG, 0, 126 insert_extent(root, offset, 4, 4, 0, 0, 0, BTRFS_FILE_EXTENT_REG, 0,
@@ -128,99 +129,106 @@ static void setup_file_extents(struct btrfs_root *root)
128 offset += 4; 129 offset += 4;
129 130
130 /* Now for a regular extent */ 131 /* Now for a regular extent */
131 insert_extent(root, offset, 4095, 4095, 0, disk_bytenr, 4096, 132 insert_extent(root, offset, sectorsize - 1, sectorsize - 1, 0,
132 BTRFS_FILE_EXTENT_REG, 0, slot); 133 disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
133 slot++; 134 slot++;
134 disk_bytenr += 4096; 135 disk_bytenr += sectorsize;
135 offset += 4095; 136 offset += sectorsize - 1;
136 137
137 /* 138 /*
138 * Now for 3 extents that were split from a hole punch so we test 139 * Now for 3 extents that were split from a hole punch so we test
139 * offsets properly. 140 * offsets properly.
140 */ 141 */
141 insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 16384, 142 insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr,
142 BTRFS_FILE_EXTENT_REG, 0, slot); 143 4 * sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
143 slot++; 144 slot++;
144 offset += 4096; 145 offset += sectorsize;
145 insert_extent(root, offset, 4096, 4096, 0, 0, 0, BTRFS_FILE_EXTENT_REG, 146 insert_extent(root, offset, sectorsize, sectorsize, 0, 0, 0,
146 0, slot); 147 BTRFS_FILE_EXTENT_REG, 0, slot);
147 slot++; 148 slot++;
148 offset += 4096; 149 offset += sectorsize;
149 insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 16384, 150 insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize,
151 2 * sectorsize, disk_bytenr, 4 * sectorsize,
150 BTRFS_FILE_EXTENT_REG, 0, slot); 152 BTRFS_FILE_EXTENT_REG, 0, slot);
151 slot++; 153 slot++;
152 offset += 8192; 154 offset += 2 * sectorsize;
153 disk_bytenr += 16384; 155 disk_bytenr += 4 * sectorsize;
154 156
155 /* Now for a unwritten prealloc extent */ 157 /* Now for a unwritten prealloc extent */
156 insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096, 158 insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr,
157 BTRFS_FILE_EXTENT_PREALLOC, 0, slot); 159 sectorsize, BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
158 slot++; 160 slot++;
159 offset += 4096; 161 offset += sectorsize;
160 162
161 /* 163 /*
162 * We want to jack up disk_bytenr a little more so the em stuff doesn't 164 * We want to jack up disk_bytenr a little more so the em stuff doesn't
163 * merge our records. 165 * merge our records.
164 */ 166 */
165 disk_bytenr += 8192; 167 disk_bytenr += 2 * sectorsize;
166 168
167 /* 169 /*
168 * Now for a partially written prealloc extent, basically the same as 170 * Now for a partially written prealloc extent, basically the same as
169 * the hole punch example above. Ram_bytes never changes when you mark 171 * the hole punch example above. Ram_bytes never changes when you mark
170 * extents written btw. 172 * extents written btw.
171 */ 173 */
172 insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 16384, 174 insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr,
173 BTRFS_FILE_EXTENT_PREALLOC, 0, slot); 175 4 * sectorsize, BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
174 slot++; 176 slot++;
175 offset += 4096; 177 offset += sectorsize;
176 insert_extent(root, offset, 4096, 16384, 4096, disk_bytenr, 16384, 178 insert_extent(root, offset, sectorsize, 4 * sectorsize, sectorsize,
177 BTRFS_FILE_EXTENT_REG, 0, slot); 179 disk_bytenr, 4 * sectorsize, BTRFS_FILE_EXTENT_REG, 0,
180 slot);
178 slot++; 181 slot++;
179 offset += 4096; 182 offset += sectorsize;
180 insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 16384, 183 insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize,
184 2 * sectorsize, disk_bytenr, 4 * sectorsize,
181 BTRFS_FILE_EXTENT_PREALLOC, 0, slot); 185 BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
182 slot++; 186 slot++;
183 offset += 8192; 187 offset += 2 * sectorsize;
184 disk_bytenr += 16384; 188 disk_bytenr += 4 * sectorsize;
185 189
186 /* Now a normal compressed extent */ 190 /* Now a normal compressed extent */
187 insert_extent(root, offset, 8192, 8192, 0, disk_bytenr, 4096, 191 insert_extent(root, offset, 2 * sectorsize, 2 * sectorsize, 0,
188 BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot); 192 disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG,
193 BTRFS_COMPRESS_ZLIB, slot);
189 slot++; 194 slot++;
190 offset += 8192; 195 offset += 2 * sectorsize;
191 /* No merges */ 196 /* No merges */
192 disk_bytenr += 8192; 197 disk_bytenr += 2 * sectorsize;
193 198
194 /* Now a split compressed extent */ 199 /* Now a split compressed extent */
195 insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 4096, 200 insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr,
196 BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot); 201 sectorsize, BTRFS_FILE_EXTENT_REG,
202 BTRFS_COMPRESS_ZLIB, slot);
197 slot++; 203 slot++;
198 offset += 4096; 204 offset += sectorsize;
199 insert_extent(root, offset, 4096, 4096, 0, disk_bytenr + 4096, 4096, 205 insert_extent(root, offset, sectorsize, sectorsize, 0,
206 disk_bytenr + sectorsize, sectorsize,
200 BTRFS_FILE_EXTENT_REG, 0, slot); 207 BTRFS_FILE_EXTENT_REG, 0, slot);
201 slot++; 208 slot++;
202 offset += 4096; 209 offset += sectorsize;
203 insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 4096, 210 insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize,
211 2 * sectorsize, disk_bytenr, sectorsize,
204 BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot); 212 BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot);
205 slot++; 213 slot++;
206 offset += 8192; 214 offset += 2 * sectorsize;
207 disk_bytenr += 8192; 215 disk_bytenr += 2 * sectorsize;
208 216
209 /* Now extents that have a hole but no hole extent */ 217 /* Now extents that have a hole but no hole extent */
210 insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096, 218 insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr,
211 BTRFS_FILE_EXTENT_REG, 0, slot); 219 sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
212 slot++; 220 slot++;
213 offset += 16384; 221 offset += 4 * sectorsize;
214 disk_bytenr += 4096; 222 disk_bytenr += sectorsize;
215 insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096, 223 insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr,
216 BTRFS_FILE_EXTENT_REG, 0, slot); 224 sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
217} 225}
218 226
219static unsigned long prealloc_only = 0; 227static unsigned long prealloc_only = 0;
220static unsigned long compressed_only = 0; 228static unsigned long compressed_only = 0;
221static unsigned long vacancy_only = 0; 229static unsigned long vacancy_only = 0;
222 230
223static noinline int test_btrfs_get_extent(void) 231static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
224{ 232{
225 struct inode *inode = NULL; 233 struct inode *inode = NULL;
226 struct btrfs_root *root = NULL; 234 struct btrfs_root *root = NULL;
@@ -240,7 +248,7 @@ static noinline int test_btrfs_get_extent(void)
240 BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID; 248 BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
241 BTRFS_I(inode)->location.offset = 0; 249 BTRFS_I(inode)->location.offset = 0;
242 250
243 root = btrfs_alloc_dummy_root(); 251 root = btrfs_alloc_dummy_root(sectorsize, nodesize);
244 if (IS_ERR(root)) { 252 if (IS_ERR(root)) {
245 test_msg("Couldn't allocate root\n"); 253 test_msg("Couldn't allocate root\n");
246 goto out; 254 goto out;
@@ -256,7 +264,7 @@ static noinline int test_btrfs_get_extent(void)
256 goto out; 264 goto out;
257 } 265 }
258 266
259 root->node = alloc_dummy_extent_buffer(NULL, 4096); 267 root->node = alloc_dummy_extent_buffer(NULL, nodesize, nodesize);
260 if (!root->node) { 268 if (!root->node) {
261 test_msg("Couldn't allocate dummy buffer\n"); 269 test_msg("Couldn't allocate dummy buffer\n");
262 goto out; 270 goto out;
@@ -273,7 +281,7 @@ static noinline int test_btrfs_get_extent(void)
273 281
274 /* First with no extents */ 282 /* First with no extents */
275 BTRFS_I(inode)->root = root; 283 BTRFS_I(inode)->root = root;
276 em = btrfs_get_extent(inode, NULL, 0, 0, 4096, 0); 284 em = btrfs_get_extent(inode, NULL, 0, 0, sectorsize, 0);
277 if (IS_ERR(em)) { 285 if (IS_ERR(em)) {
278 em = NULL; 286 em = NULL;
279 test_msg("Got an error when we shouldn't have\n"); 287 test_msg("Got an error when we shouldn't have\n");
@@ -295,7 +303,7 @@ static noinline int test_btrfs_get_extent(void)
295 * setup_file_extents, so if you change anything there you need to 303 * setup_file_extents, so if you change anything there you need to
296 * update the comment and update the expected values below. 304 * update the comment and update the expected values below.
297 */ 305 */
298 setup_file_extents(root); 306 setup_file_extents(root, sectorsize);
299 307
300 em = btrfs_get_extent(inode, NULL, 0, 0, (u64)-1, 0); 308 em = btrfs_get_extent(inode, NULL, 0, 0, (u64)-1, 0);
301 if (IS_ERR(em)) { 309 if (IS_ERR(em)) {
@@ -318,7 +326,7 @@ static noinline int test_btrfs_get_extent(void)
318 offset = em->start + em->len; 326 offset = em->start + em->len;
319 free_extent_map(em); 327 free_extent_map(em);
320 328
321 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 329 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
322 if (IS_ERR(em)) { 330 if (IS_ERR(em)) {
323 test_msg("Got an error when we shouldn't have\n"); 331 test_msg("Got an error when we shouldn't have\n");
324 goto out; 332 goto out;
@@ -327,7 +335,8 @@ static noinline int test_btrfs_get_extent(void)
327 test_msg("Expected an inline, got %llu\n", em->block_start); 335 test_msg("Expected an inline, got %llu\n", em->block_start);
328 goto out; 336 goto out;
329 } 337 }
330 if (em->start != offset || em->len != 4091) { 338
339 if (em->start != offset || em->len != (sectorsize - 5)) {
331 test_msg("Unexpected extent wanted start %llu len 1, got start " 340 test_msg("Unexpected extent wanted start %llu len 1, got start "
332 "%llu len %llu\n", offset, em->start, em->len); 341 "%llu len %llu\n", offset, em->start, em->len);
333 goto out; 342 goto out;
@@ -344,7 +353,7 @@ static noinline int test_btrfs_get_extent(void)
344 offset = em->start + em->len; 353 offset = em->start + em->len;
345 free_extent_map(em); 354 free_extent_map(em);
346 355
347 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 356 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
348 if (IS_ERR(em)) { 357 if (IS_ERR(em)) {
349 test_msg("Got an error when we shouldn't have\n"); 358 test_msg("Got an error when we shouldn't have\n");
350 goto out; 359 goto out;
@@ -366,7 +375,7 @@ static noinline int test_btrfs_get_extent(void)
366 free_extent_map(em); 375 free_extent_map(em);
367 376
368 /* Regular extent */ 377 /* Regular extent */
369 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 378 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
370 if (IS_ERR(em)) { 379 if (IS_ERR(em)) {
371 test_msg("Got an error when we shouldn't have\n"); 380 test_msg("Got an error when we shouldn't have\n");
372 goto out; 381 goto out;
@@ -375,7 +384,7 @@ static noinline int test_btrfs_get_extent(void)
375 test_msg("Expected a real extent, got %llu\n", em->block_start); 384 test_msg("Expected a real extent, got %llu\n", em->block_start);
376 goto out; 385 goto out;
377 } 386 }
378 if (em->start != offset || em->len != 4095) { 387 if (em->start != offset || em->len != sectorsize - 1) {
379 test_msg("Unexpected extent wanted start %llu len 4095, got " 388 test_msg("Unexpected extent wanted start %llu len 4095, got "
380 "start %llu len %llu\n", offset, em->start, em->len); 389 "start %llu len %llu\n", offset, em->start, em->len);
381 goto out; 390 goto out;
@@ -393,7 +402,7 @@ static noinline int test_btrfs_get_extent(void)
393 free_extent_map(em); 402 free_extent_map(em);
394 403
395 /* The next 3 are split extents */ 404 /* The next 3 are split extents */
396 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 405 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
397 if (IS_ERR(em)) { 406 if (IS_ERR(em)) {
398 test_msg("Got an error when we shouldn't have\n"); 407 test_msg("Got an error when we shouldn't have\n");
399 goto out; 408 goto out;
@@ -402,9 +411,10 @@ static noinline int test_btrfs_get_extent(void)
402 test_msg("Expected a real extent, got %llu\n", em->block_start); 411 test_msg("Expected a real extent, got %llu\n", em->block_start);
403 goto out; 412 goto out;
404 } 413 }
405 if (em->start != offset || em->len != 4096) { 414 if (em->start != offset || em->len != sectorsize) {
406 test_msg("Unexpected extent wanted start %llu len 4096, got " 415 test_msg("Unexpected extent start %llu len %u, "
407 "start %llu len %llu\n", offset, em->start, em->len); 416 "got start %llu len %llu\n",
417 offset, sectorsize, em->start, em->len);
408 goto out; 418 goto out;
409 } 419 }
410 if (em->flags != 0) { 420 if (em->flags != 0) {
@@ -421,7 +431,7 @@ static noinline int test_btrfs_get_extent(void)
421 offset = em->start + em->len; 431 offset = em->start + em->len;
422 free_extent_map(em); 432 free_extent_map(em);
423 433
424 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 434 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
425 if (IS_ERR(em)) { 435 if (IS_ERR(em)) {
426 test_msg("Got an error when we shouldn't have\n"); 436 test_msg("Got an error when we shouldn't have\n");
427 goto out; 437 goto out;
@@ -430,9 +440,10 @@ static noinline int test_btrfs_get_extent(void)
430 test_msg("Expected a hole, got %llu\n", em->block_start); 440 test_msg("Expected a hole, got %llu\n", em->block_start);
431 goto out; 441 goto out;
432 } 442 }
433 if (em->start != offset || em->len != 4096) { 443 if (em->start != offset || em->len != sectorsize) {
434 test_msg("Unexpected extent wanted start %llu len 4096, got " 444 test_msg("Unexpected extent wanted start %llu len %u, "
435 "start %llu len %llu\n", offset, em->start, em->len); 445 "got start %llu len %llu\n",
446 offset, sectorsize, em->start, em->len);
436 goto out; 447 goto out;
437 } 448 }
438 if (em->flags != 0) { 449 if (em->flags != 0) {
@@ -442,7 +453,7 @@ static noinline int test_btrfs_get_extent(void)
442 offset = em->start + em->len; 453 offset = em->start + em->len;
443 free_extent_map(em); 454 free_extent_map(em);
444 455
445 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 456 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
446 if (IS_ERR(em)) { 457 if (IS_ERR(em)) {
447 test_msg("Got an error when we shouldn't have\n"); 458 test_msg("Got an error when we shouldn't have\n");
448 goto out; 459 goto out;
@@ -451,9 +462,10 @@ static noinline int test_btrfs_get_extent(void)
451 test_msg("Expected a real extent, got %llu\n", em->block_start); 462 test_msg("Expected a real extent, got %llu\n", em->block_start);
452 goto out; 463 goto out;
453 } 464 }
454 if (em->start != offset || em->len != 8192) { 465 if (em->start != offset || em->len != 2 * sectorsize) {
455 test_msg("Unexpected extent wanted start %llu len 8192, got " 466 test_msg("Unexpected extent wanted start %llu len %u, "
456 "start %llu len %llu\n", offset, em->start, em->len); 467 "got start %llu len %llu\n",
468 offset, 2 * sectorsize, em->start, em->len);
457 goto out; 469 goto out;
458 } 470 }
459 if (em->flags != 0) { 471 if (em->flags != 0) {
@@ -475,7 +487,7 @@ static noinline int test_btrfs_get_extent(void)
475 free_extent_map(em); 487 free_extent_map(em);
476 488
477 /* Prealloc extent */ 489 /* Prealloc extent */
478 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 490 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
479 if (IS_ERR(em)) { 491 if (IS_ERR(em)) {
480 test_msg("Got an error when we shouldn't have\n"); 492 test_msg("Got an error when we shouldn't have\n");
481 goto out; 493 goto out;
@@ -484,9 +496,10 @@ static noinline int test_btrfs_get_extent(void)
484 test_msg("Expected a real extent, got %llu\n", em->block_start); 496 test_msg("Expected a real extent, got %llu\n", em->block_start);
485 goto out; 497 goto out;
486 } 498 }
487 if (em->start != offset || em->len != 4096) { 499 if (em->start != offset || em->len != sectorsize) {
488 test_msg("Unexpected extent wanted start %llu len 4096, got " 500 test_msg("Unexpected extent wanted start %llu len %u, "
489 "start %llu len %llu\n", offset, em->start, em->len); 501 "got start %llu len %llu\n",
502 offset, sectorsize, em->start, em->len);
490 goto out; 503 goto out;
491 } 504 }
492 if (em->flags != prealloc_only) { 505 if (em->flags != prealloc_only) {
@@ -503,7 +516,7 @@ static noinline int test_btrfs_get_extent(void)
503 free_extent_map(em); 516 free_extent_map(em);
504 517
505 /* The next 3 are a half written prealloc extent */ 518 /* The next 3 are a half written prealloc extent */
506 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 519 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
507 if (IS_ERR(em)) { 520 if (IS_ERR(em)) {
508 test_msg("Got an error when we shouldn't have\n"); 521 test_msg("Got an error when we shouldn't have\n");
509 goto out; 522 goto out;
@@ -512,9 +525,10 @@ static noinline int test_btrfs_get_extent(void)
512 test_msg("Expected a real extent, got %llu\n", em->block_start); 525 test_msg("Expected a real extent, got %llu\n", em->block_start);
513 goto out; 526 goto out;
514 } 527 }
515 if (em->start != offset || em->len != 4096) { 528 if (em->start != offset || em->len != sectorsize) {
516 test_msg("Unexpected extent wanted start %llu len 4096, got " 529 test_msg("Unexpected extent wanted start %llu len %u, "
517 "start %llu len %llu\n", offset, em->start, em->len); 530 "got start %llu len %llu\n",
531 offset, sectorsize, em->start, em->len);
518 goto out; 532 goto out;
519 } 533 }
520 if (em->flags != prealloc_only) { 534 if (em->flags != prealloc_only) {
@@ -532,7 +546,7 @@ static noinline int test_btrfs_get_extent(void)
532 offset = em->start + em->len; 546 offset = em->start + em->len;
533 free_extent_map(em); 547 free_extent_map(em);
534 548
535 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 549 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
536 if (IS_ERR(em)) { 550 if (IS_ERR(em)) {
537 test_msg("Got an error when we shouldn't have\n"); 551 test_msg("Got an error when we shouldn't have\n");
538 goto out; 552 goto out;
@@ -541,9 +555,10 @@ static noinline int test_btrfs_get_extent(void)
541 test_msg("Expected a real extent, got %llu\n", em->block_start); 555 test_msg("Expected a real extent, got %llu\n", em->block_start);
542 goto out; 556 goto out;
543 } 557 }
544 if (em->start != offset || em->len != 4096) { 558 if (em->start != offset || em->len != sectorsize) {
545 test_msg("Unexpected extent wanted start %llu len 4096, got " 559 test_msg("Unexpected extent wanted start %llu len %u, "
546 "start %llu len %llu\n", offset, em->start, em->len); 560 "got start %llu len %llu\n",
561 offset, sectorsize, em->start, em->len);
547 goto out; 562 goto out;
548 } 563 }
549 if (em->flags != 0) { 564 if (em->flags != 0) {
@@ -564,7 +579,7 @@ static noinline int test_btrfs_get_extent(void)
564 offset = em->start + em->len; 579 offset = em->start + em->len;
565 free_extent_map(em); 580 free_extent_map(em);
566 581
567 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 582 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
568 if (IS_ERR(em)) { 583 if (IS_ERR(em)) {
569 test_msg("Got an error when we shouldn't have\n"); 584 test_msg("Got an error when we shouldn't have\n");
570 goto out; 585 goto out;
@@ -573,9 +588,10 @@ static noinline int test_btrfs_get_extent(void)
573 test_msg("Expected a real extent, got %llu\n", em->block_start); 588 test_msg("Expected a real extent, got %llu\n", em->block_start);
574 goto out; 589 goto out;
575 } 590 }
576 if (em->start != offset || em->len != 8192) { 591 if (em->start != offset || em->len != 2 * sectorsize) {
577 test_msg("Unexpected extent wanted start %llu len 8192, got " 592 test_msg("Unexpected extent wanted start %llu len %u, "
578 "start %llu len %llu\n", offset, em->start, em->len); 593 "got start %llu len %llu\n",
594 offset, 2 * sectorsize, em->start, em->len);
579 goto out; 595 goto out;
580 } 596 }
581 if (em->flags != prealloc_only) { 597 if (em->flags != prealloc_only) {
@@ -598,7 +614,7 @@ static noinline int test_btrfs_get_extent(void)
598 free_extent_map(em); 614 free_extent_map(em);
599 615
600 /* Now for the compressed extent */ 616 /* Now for the compressed extent */
601 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 617 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
602 if (IS_ERR(em)) { 618 if (IS_ERR(em)) {
603 test_msg("Got an error when we shouldn't have\n"); 619 test_msg("Got an error when we shouldn't have\n");
604 goto out; 620 goto out;
@@ -607,9 +623,10 @@ static noinline int test_btrfs_get_extent(void)
607 test_msg("Expected a real extent, got %llu\n", em->block_start); 623 test_msg("Expected a real extent, got %llu\n", em->block_start);
608 goto out; 624 goto out;
609 } 625 }
610 if (em->start != offset || em->len != 8192) { 626 if (em->start != offset || em->len != 2 * sectorsize) {
611 test_msg("Unexpected extent wanted start %llu len 8192, got " 627 test_msg("Unexpected extent wanted start %llu len %u,"
612 "start %llu len %llu\n", offset, em->start, em->len); 628 "got start %llu len %llu\n",
629 offset, 2 * sectorsize, em->start, em->len);
613 goto out; 630 goto out;
614 } 631 }
615 if (em->flags != compressed_only) { 632 if (em->flags != compressed_only) {
@@ -631,7 +648,7 @@ static noinline int test_btrfs_get_extent(void)
631 free_extent_map(em); 648 free_extent_map(em);
632 649
633 /* Split compressed extent */ 650 /* Split compressed extent */
634 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 651 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
635 if (IS_ERR(em)) { 652 if (IS_ERR(em)) {
636 test_msg("Got an error when we shouldn't have\n"); 653 test_msg("Got an error when we shouldn't have\n");
637 goto out; 654 goto out;
@@ -640,9 +657,10 @@ static noinline int test_btrfs_get_extent(void)
640 test_msg("Expected a real extent, got %llu\n", em->block_start); 657 test_msg("Expected a real extent, got %llu\n", em->block_start);
641 goto out; 658 goto out;
642 } 659 }
643 if (em->start != offset || em->len != 4096) { 660 if (em->start != offset || em->len != sectorsize) {
644 test_msg("Unexpected extent wanted start %llu len 4096, got " 661 test_msg("Unexpected extent wanted start %llu len %u,"
645 "start %llu len %llu\n", offset, em->start, em->len); 662 "got start %llu len %llu\n",
663 offset, sectorsize, em->start, em->len);
646 goto out; 664 goto out;
647 } 665 }
648 if (em->flags != compressed_only) { 666 if (em->flags != compressed_only) {
@@ -665,7 +683,7 @@ static noinline int test_btrfs_get_extent(void)
665 offset = em->start + em->len; 683 offset = em->start + em->len;
666 free_extent_map(em); 684 free_extent_map(em);
667 685
668 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 686 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
669 if (IS_ERR(em)) { 687 if (IS_ERR(em)) {
670 test_msg("Got an error when we shouldn't have\n"); 688 test_msg("Got an error when we shouldn't have\n");
671 goto out; 689 goto out;
@@ -674,9 +692,10 @@ static noinline int test_btrfs_get_extent(void)
674 test_msg("Expected a real extent, got %llu\n", em->block_start); 692 test_msg("Expected a real extent, got %llu\n", em->block_start);
675 goto out; 693 goto out;
676 } 694 }
677 if (em->start != offset || em->len != 4096) { 695 if (em->start != offset || em->len != sectorsize) {
678 test_msg("Unexpected extent wanted start %llu len 4096, got " 696 test_msg("Unexpected extent wanted start %llu len %u, "
679 "start %llu len %llu\n", offset, em->start, em->len); 697 "got start %llu len %llu\n",
698 offset, sectorsize, em->start, em->len);
680 goto out; 699 goto out;
681 } 700 }
682 if (em->flags != 0) { 701 if (em->flags != 0) {
@@ -691,7 +710,7 @@ static noinline int test_btrfs_get_extent(void)
691 offset = em->start + em->len; 710 offset = em->start + em->len;
692 free_extent_map(em); 711 free_extent_map(em);
693 712
694 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 713 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
695 if (IS_ERR(em)) { 714 if (IS_ERR(em)) {
696 test_msg("Got an error when we shouldn't have\n"); 715 test_msg("Got an error when we shouldn't have\n");
697 goto out; 716 goto out;
@@ -701,9 +720,10 @@ static noinline int test_btrfs_get_extent(void)
701 disk_bytenr, em->block_start); 720 disk_bytenr, em->block_start);
702 goto out; 721 goto out;
703 } 722 }
704 if (em->start != offset || em->len != 8192) { 723 if (em->start != offset || em->len != 2 * sectorsize) {
705 test_msg("Unexpected extent wanted start %llu len 8192, got " 724 test_msg("Unexpected extent wanted start %llu len %u, "
706 "start %llu len %llu\n", offset, em->start, em->len); 725 "got start %llu len %llu\n",
726 offset, 2 * sectorsize, em->start, em->len);
707 goto out; 727 goto out;
708 } 728 }
709 if (em->flags != compressed_only) { 729 if (em->flags != compressed_only) {
@@ -725,7 +745,7 @@ static noinline int test_btrfs_get_extent(void)
725 free_extent_map(em); 745 free_extent_map(em);
726 746
727 /* A hole between regular extents but no hole extent */ 747 /* A hole between regular extents but no hole extent */
728 em = btrfs_get_extent(inode, NULL, 0, offset + 6, 4096, 0); 748 em = btrfs_get_extent(inode, NULL, 0, offset + 6, sectorsize, 0);
729 if (IS_ERR(em)) { 749 if (IS_ERR(em)) {
730 test_msg("Got an error when we shouldn't have\n"); 750 test_msg("Got an error when we shouldn't have\n");
731 goto out; 751 goto out;
@@ -734,9 +754,10 @@ static noinline int test_btrfs_get_extent(void)
734 test_msg("Expected a real extent, got %llu\n", em->block_start); 754 test_msg("Expected a real extent, got %llu\n", em->block_start);
735 goto out; 755 goto out;
736 } 756 }
737 if (em->start != offset || em->len != 4096) { 757 if (em->start != offset || em->len != sectorsize) {
738 test_msg("Unexpected extent wanted start %llu len 4096, got " 758 test_msg("Unexpected extent wanted start %llu len %u, "
739 "start %llu len %llu\n", offset, em->start, em->len); 759 "got start %llu len %llu\n",
760 offset, sectorsize, em->start, em->len);
740 goto out; 761 goto out;
741 } 762 }
742 if (em->flags != 0) { 763 if (em->flags != 0) {
@@ -765,9 +786,10 @@ static noinline int test_btrfs_get_extent(void)
765 * length of the actual hole, if this changes we'll have to change this 786 * length of the actual hole, if this changes we'll have to change this
766 * test. 787 * test.
767 */ 788 */
768 if (em->start != offset || em->len != 12288) { 789 if (em->start != offset || em->len != 3 * sectorsize) {
769 test_msg("Unexpected extent wanted start %llu len 12288, got " 790 test_msg("Unexpected extent wanted start %llu len %u, "
770 "start %llu len %llu\n", offset, em->start, em->len); 791 "got start %llu len %llu\n",
792 offset, 3 * sectorsize, em->start, em->len);
771 goto out; 793 goto out;
772 } 794 }
773 if (em->flags != vacancy_only) { 795 if (em->flags != vacancy_only) {
@@ -783,7 +805,7 @@ static noinline int test_btrfs_get_extent(void)
783 offset = em->start + em->len; 805 offset = em->start + em->len;
784 free_extent_map(em); 806 free_extent_map(em);
785 807
786 em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); 808 em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
787 if (IS_ERR(em)) { 809 if (IS_ERR(em)) {
788 test_msg("Got an error when we shouldn't have\n"); 810 test_msg("Got an error when we shouldn't have\n");
789 goto out; 811 goto out;
@@ -792,9 +814,10 @@ static noinline int test_btrfs_get_extent(void)
792 test_msg("Expected a real extent, got %llu\n", em->block_start); 814 test_msg("Expected a real extent, got %llu\n", em->block_start);
793 goto out; 815 goto out;
794 } 816 }
795 if (em->start != offset || em->len != 4096) { 817 if (em->start != offset || em->len != sectorsize) {
796 test_msg("Unexpected extent wanted start %llu len 4096, got " 818 test_msg("Unexpected extent wanted start %llu len %u,"
797 "start %llu len %llu\n", offset, em->start, em->len); 819 "got start %llu len %llu\n",
820 offset, sectorsize, em->start, em->len);
798 goto out; 821 goto out;
799 } 822 }
800 if (em->flags != 0) { 823 if (em->flags != 0) {
@@ -815,7 +838,7 @@ out:
815 return ret; 838 return ret;
816} 839}
817 840
818static int test_hole_first(void) 841static int test_hole_first(u32 sectorsize, u32 nodesize)
819{ 842{
820 struct inode *inode = NULL; 843 struct inode *inode = NULL;
821 struct btrfs_root *root = NULL; 844 struct btrfs_root *root = NULL;
@@ -832,7 +855,7 @@ static int test_hole_first(void)
832 BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID; 855 BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
833 BTRFS_I(inode)->location.offset = 0; 856 BTRFS_I(inode)->location.offset = 0;
834 857
835 root = btrfs_alloc_dummy_root(); 858 root = btrfs_alloc_dummy_root(sectorsize, nodesize);
836 if (IS_ERR(root)) { 859 if (IS_ERR(root)) {
837 test_msg("Couldn't allocate root\n"); 860 test_msg("Couldn't allocate root\n");
838 goto out; 861 goto out;
@@ -844,7 +867,7 @@ static int test_hole_first(void)
844 goto out; 867 goto out;
845 } 868 }
846 869
847 root->node = alloc_dummy_extent_buffer(NULL, 4096); 870 root->node = alloc_dummy_extent_buffer(NULL, nodesize, nodesize);
848 if (!root->node) { 871 if (!root->node) {
849 test_msg("Couldn't allocate dummy buffer\n"); 872 test_msg("Couldn't allocate dummy buffer\n");
850 goto out; 873 goto out;
@@ -861,9 +884,9 @@ static int test_hole_first(void)
861 * btrfs_get_extent. 884 * btrfs_get_extent.
862 */ 885 */
863 insert_inode_item_key(root); 886 insert_inode_item_key(root);
864 insert_extent(root, 4096, 4096, 4096, 0, 4096, 4096, 887 insert_extent(root, sectorsize, sectorsize, sectorsize, 0, sectorsize,
865 BTRFS_FILE_EXTENT_REG, 0, 1); 888 sectorsize, BTRFS_FILE_EXTENT_REG, 0, 1);
866 em = btrfs_get_extent(inode, NULL, 0, 0, 8192, 0); 889 em = btrfs_get_extent(inode, NULL, 0, 0, 2 * sectorsize, 0);
867 if (IS_ERR(em)) { 890 if (IS_ERR(em)) {
868 test_msg("Got an error when we shouldn't have\n"); 891 test_msg("Got an error when we shouldn't have\n");
869 goto out; 892 goto out;
@@ -872,9 +895,10 @@ static int test_hole_first(void)
872 test_msg("Expected a hole, got %llu\n", em->block_start); 895 test_msg("Expected a hole, got %llu\n", em->block_start);
873 goto out; 896 goto out;
874 } 897 }
875 if (em->start != 0 || em->len != 4096) { 898 if (em->start != 0 || em->len != sectorsize) {
876 test_msg("Unexpected extent wanted start 0 len 4096, got start " 899 test_msg("Unexpected extent wanted start 0 len %u, "
877 "%llu len %llu\n", em->start, em->len); 900 "got start %llu len %llu\n",
901 sectorsize, em->start, em->len);
878 goto out; 902 goto out;
879 } 903 }
880 if (em->flags != vacancy_only) { 904 if (em->flags != vacancy_only) {
@@ -884,18 +908,19 @@ static int test_hole_first(void)
884 } 908 }
885 free_extent_map(em); 909 free_extent_map(em);
886 910
887 em = btrfs_get_extent(inode, NULL, 0, 4096, 8192, 0); 911 em = btrfs_get_extent(inode, NULL, 0, sectorsize, 2 * sectorsize, 0);
888 if (IS_ERR(em)) { 912 if (IS_ERR(em)) {
889 test_msg("Got an error when we shouldn't have\n"); 913 test_msg("Got an error when we shouldn't have\n");
890 goto out; 914 goto out;
891 } 915 }
892 if (em->block_start != 4096) { 916 if (em->block_start != sectorsize) {
893 test_msg("Expected a real extent, got %llu\n", em->block_start); 917 test_msg("Expected a real extent, got %llu\n", em->block_start);
894 goto out; 918 goto out;
895 } 919 }
896 if (em->start != 4096 || em->len != 4096) { 920 if (em->start != sectorsize || em->len != sectorsize) {
897 test_msg("Unexpected extent wanted start 4096 len 4096, got " 921 test_msg("Unexpected extent wanted start %u len %u, "
898 "start %llu len %llu\n", em->start, em->len); 922 "got start %llu len %llu\n",
923 sectorsize, sectorsize, em->start, em->len);
899 goto out; 924 goto out;
900 } 925 }
901 if (em->flags != 0) { 926 if (em->flags != 0) {
@@ -912,7 +937,7 @@ out:
912 return ret; 937 return ret;
913} 938}
914 939
915static int test_extent_accounting(void) 940static int test_extent_accounting(u32 sectorsize, u32 nodesize)
916{ 941{
917 struct inode *inode = NULL; 942 struct inode *inode = NULL;
918 struct btrfs_root *root = NULL; 943 struct btrfs_root *root = NULL;
@@ -924,7 +949,7 @@ static int test_extent_accounting(void)
924 return ret; 949 return ret;
925 } 950 }
926 951
927 root = btrfs_alloc_dummy_root(); 952 root = btrfs_alloc_dummy_root(sectorsize, nodesize);
928 if (IS_ERR(root)) { 953 if (IS_ERR(root)) {
929 test_msg("Couldn't allocate root\n"); 954 test_msg("Couldn't allocate root\n");
930 goto out; 955 goto out;
@@ -954,10 +979,11 @@ static int test_extent_accounting(void)
954 goto out; 979 goto out;
955 } 980 }
956 981
957 /* [BTRFS_MAX_EXTENT_SIZE][4k] */ 982 /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */
958 BTRFS_I(inode)->outstanding_extents++; 983 BTRFS_I(inode)->outstanding_extents++;
959 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE, 984 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE,
960 BTRFS_MAX_EXTENT_SIZE + 4095, NULL); 985 BTRFS_MAX_EXTENT_SIZE + sectorsize - 1,
986 NULL);
961 if (ret) { 987 if (ret) {
962 test_msg("btrfs_set_extent_delalloc returned %d\n", ret); 988 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
963 goto out; 989 goto out;
@@ -969,10 +995,10 @@ static int test_extent_accounting(void)
969 goto out; 995 goto out;
970 } 996 }
971 997
972 /* [BTRFS_MAX_EXTENT_SIZE/2][4K HOLE][the rest] */ 998 /* [BTRFS_MAX_EXTENT_SIZE/2][sectorsize HOLE][the rest] */
973 ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 999 ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
974 BTRFS_MAX_EXTENT_SIZE >> 1, 1000 BTRFS_MAX_EXTENT_SIZE >> 1,
975 (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095, 1001 (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1,
976 EXTENT_DELALLOC | EXTENT_DIRTY | 1002 EXTENT_DELALLOC | EXTENT_DIRTY |
977 EXTENT_UPTODATE | EXTENT_DO_ACCOUNTING, 0, 0, 1003 EXTENT_UPTODATE | EXTENT_DO_ACCOUNTING, 0, 0,
978 NULL, GFP_KERNEL); 1004 NULL, GFP_KERNEL);
@@ -987,10 +1013,11 @@ static int test_extent_accounting(void)
987 goto out; 1013 goto out;
988 } 1014 }
989 1015
990 /* [BTRFS_MAX_EXTENT_SIZE][4K] */ 1016 /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */
991 BTRFS_I(inode)->outstanding_extents++; 1017 BTRFS_I(inode)->outstanding_extents++;
992 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1, 1018 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1,
993 (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095, 1019 (BTRFS_MAX_EXTENT_SIZE >> 1)
1020 + sectorsize - 1,
994 NULL); 1021 NULL);
995 if (ret) { 1022 if (ret) {
996 test_msg("btrfs_set_extent_delalloc returned %d\n", ret); 1023 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
@@ -1004,16 +1031,17 @@ static int test_extent_accounting(void)
1004 } 1031 }
1005 1032
1006 /* 1033 /*
1007 * [BTRFS_MAX_EXTENT_SIZE+4K][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4K] 1034 * [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize HOLE][BTRFS_MAX_EXTENT_SIZE+sectorsize]
1008 * 1035 *
1009 * I'm artificially adding 2 to outstanding_extents because in the 1036 * I'm artificially adding 2 to outstanding_extents because in the
1010 * buffered IO case we'd add things up as we go, but I don't feel like 1037 * buffered IO case we'd add things up as we go, but I don't feel like
1011 * doing that here, this isn't the interesting case we want to test. 1038 * doing that here, this isn't the interesting case we want to test.
1012 */ 1039 */
1013 BTRFS_I(inode)->outstanding_extents += 2; 1040 BTRFS_I(inode)->outstanding_extents += 2;
1014 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE + 8192, 1041 ret = btrfs_set_extent_delalloc(inode,
1015 (BTRFS_MAX_EXTENT_SIZE << 1) + 12287, 1042 BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize,
1016 NULL); 1043 (BTRFS_MAX_EXTENT_SIZE << 1) + 3 * sectorsize - 1,
1044 NULL);
1017 if (ret) { 1045 if (ret) {
1018 test_msg("btrfs_set_extent_delalloc returned %d\n", ret); 1046 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
1019 goto out; 1047 goto out;
@@ -1025,10 +1053,13 @@ static int test_extent_accounting(void)
1025 goto out; 1053 goto out;
1026 } 1054 }
1027 1055
1028 /* [BTRFS_MAX_EXTENT_SIZE+4k][4k][BTRFS_MAX_EXTENT_SIZE+4k] */ 1056 /*
1057 * [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize][BTRFS_MAX_EXTENT_SIZE+sectorsize]
1058 */
1029 BTRFS_I(inode)->outstanding_extents++; 1059 BTRFS_I(inode)->outstanding_extents++;
1030 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096, 1060 ret = btrfs_set_extent_delalloc(inode,
1031 BTRFS_MAX_EXTENT_SIZE+8191, NULL); 1061 BTRFS_MAX_EXTENT_SIZE + sectorsize,
1062 BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL);
1032 if (ret) { 1063 if (ret) {
1033 test_msg("btrfs_set_extent_delalloc returned %d\n", ret); 1064 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
1034 goto out; 1065 goto out;
@@ -1042,8 +1073,8 @@ static int test_extent_accounting(void)
1042 1073
1043 /* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */ 1074 /* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */
1044 ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 1075 ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
1045 BTRFS_MAX_EXTENT_SIZE+4096, 1076 BTRFS_MAX_EXTENT_SIZE + sectorsize,
1046 BTRFS_MAX_EXTENT_SIZE+8191, 1077 BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1,
1047 EXTENT_DIRTY | EXTENT_DELALLOC | 1078 EXTENT_DIRTY | EXTENT_DELALLOC |
1048 EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0, 1079 EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0,
1049 NULL, GFP_KERNEL); 1080 NULL, GFP_KERNEL);
@@ -1063,8 +1094,9 @@ static int test_extent_accounting(void)
1063 * might fail and I'd rather satisfy my paranoia at this point. 1094 * might fail and I'd rather satisfy my paranoia at this point.
1064 */ 1095 */
1065 BTRFS_I(inode)->outstanding_extents++; 1096 BTRFS_I(inode)->outstanding_extents++;
1066 ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096, 1097 ret = btrfs_set_extent_delalloc(inode,
1067 BTRFS_MAX_EXTENT_SIZE+8191, NULL); 1098 BTRFS_MAX_EXTENT_SIZE + sectorsize,
1099 BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL);
1068 if (ret) { 1100 if (ret) {
1069 test_msg("btrfs_set_extent_delalloc returned %d\n", ret); 1101 test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
1070 goto out; 1102 goto out;
@@ -1103,7 +1135,7 @@ out:
1103 return ret; 1135 return ret;
1104} 1136}
1105 1137
1106int btrfs_test_inodes(void) 1138int btrfs_test_inodes(u32 sectorsize, u32 nodesize)
1107{ 1139{
1108 int ret; 1140 int ret;
1109 1141
@@ -1112,13 +1144,13 @@ int btrfs_test_inodes(void)
1112 set_bit(EXTENT_FLAG_PREALLOC, &prealloc_only); 1144 set_bit(EXTENT_FLAG_PREALLOC, &prealloc_only);
1113 1145
1114 test_msg("Running btrfs_get_extent tests\n"); 1146 test_msg("Running btrfs_get_extent tests\n");
1115 ret = test_btrfs_get_extent(); 1147 ret = test_btrfs_get_extent(sectorsize, nodesize);
1116 if (ret) 1148 if (ret)
1117 return ret; 1149 return ret;
1118 test_msg("Running hole first btrfs_get_extent test\n"); 1150 test_msg("Running hole first btrfs_get_extent test\n");
1119 ret = test_hole_first(); 1151 ret = test_hole_first(sectorsize, nodesize);
1120 if (ret) 1152 if (ret)
1121 return ret; 1153 return ret;
1122 test_msg("Running outstanding_extents tests\n"); 1154 test_msg("Running outstanding_extents tests\n");
1123 return test_extent_accounting(); 1155 return test_extent_accounting(sectorsize, nodesize);
1124} 1156}
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index 8aa4ded31326..57a12c0d680b 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -16,6 +16,7 @@
16 * Boston, MA 021110-1307, USA. 16 * Boston, MA 021110-1307, USA.
17 */ 17 */
18 18
19#include <linux/types.h>
19#include "btrfs-tests.h" 20#include "btrfs-tests.h"
20#include "../ctree.h" 21#include "../ctree.h"
21#include "../transaction.h" 22#include "../transaction.h"
@@ -216,7 +217,8 @@ static int remove_extent_ref(struct btrfs_root *root, u64 bytenr,
216 return ret; 217 return ret;
217} 218}
218 219
219static int test_no_shared_qgroup(struct btrfs_root *root) 220static int test_no_shared_qgroup(struct btrfs_root *root,
221 u32 sectorsize, u32 nodesize)
220{ 222{
221 struct btrfs_trans_handle trans; 223 struct btrfs_trans_handle trans;
222 struct btrfs_fs_info *fs_info = root->fs_info; 224 struct btrfs_fs_info *fs_info = root->fs_info;
@@ -227,7 +229,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
227 btrfs_init_dummy_trans(&trans); 229 btrfs_init_dummy_trans(&trans);
228 230
229 test_msg("Qgroup basic add\n"); 231 test_msg("Qgroup basic add\n");
230 ret = btrfs_create_qgroup(NULL, fs_info, 5); 232 ret = btrfs_create_qgroup(NULL, fs_info, BTRFS_FS_TREE_OBJECTID);
231 if (ret) { 233 if (ret) {
232 test_msg("Couldn't create a qgroup %d\n", ret); 234 test_msg("Couldn't create a qgroup %d\n", ret);
233 return ret; 235 return ret;
@@ -238,18 +240,19 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
238 * we can only call btrfs_qgroup_account_extent() directly to test 240 * we can only call btrfs_qgroup_account_extent() directly to test
239 * quota. 241 * quota.
240 */ 242 */
241 ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots); 243 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
242 if (ret) { 244 if (ret) {
243 ulist_free(old_roots); 245 ulist_free(old_roots);
244 test_msg("Couldn't find old roots: %d\n", ret); 246 test_msg("Couldn't find old roots: %d\n", ret);
245 return ret; 247 return ret;
246 } 248 }
247 249
248 ret = insert_normal_tree_ref(root, 4096, 4096, 0, 5); 250 ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
251 BTRFS_FS_TREE_OBJECTID);
249 if (ret) 252 if (ret)
250 return ret; 253 return ret;
251 254
252 ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots); 255 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
253 if (ret) { 256 if (ret) {
254 ulist_free(old_roots); 257 ulist_free(old_roots);
255 ulist_free(new_roots); 258 ulist_free(new_roots);
@@ -257,32 +260,33 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
257 return ret; 260 return ret;
258 } 261 }
259 262
260 ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096, 263 ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
261 old_roots, new_roots); 264 nodesize, old_roots, new_roots);
262 if (ret) { 265 if (ret) {
263 test_msg("Couldn't account space for a qgroup %d\n", ret); 266 test_msg("Couldn't account space for a qgroup %d\n", ret);
264 return ret; 267 return ret;
265 } 268 }
266 269
267 if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) { 270 if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
271 nodesize, nodesize)) {
268 test_msg("Qgroup counts didn't match expected values\n"); 272 test_msg("Qgroup counts didn't match expected values\n");
269 return -EINVAL; 273 return -EINVAL;
270 } 274 }
271 old_roots = NULL; 275 old_roots = NULL;
272 new_roots = NULL; 276 new_roots = NULL;
273 277
274 ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots); 278 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
275 if (ret) { 279 if (ret) {
276 ulist_free(old_roots); 280 ulist_free(old_roots);
277 test_msg("Couldn't find old roots: %d\n", ret); 281 test_msg("Couldn't find old roots: %d\n", ret);
278 return ret; 282 return ret;
279 } 283 }
280 284
281 ret = remove_extent_item(root, 4096, 4096); 285 ret = remove_extent_item(root, nodesize, nodesize);
282 if (ret) 286 if (ret)
283 return -EINVAL; 287 return -EINVAL;
284 288
285 ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots); 289 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
286 if (ret) { 290 if (ret) {
287 ulist_free(old_roots); 291 ulist_free(old_roots);
288 ulist_free(new_roots); 292 ulist_free(new_roots);
@@ -290,14 +294,14 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
290 return ret; 294 return ret;
291 } 295 }
292 296
293 ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096, 297 ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
294 old_roots, new_roots); 298 nodesize, old_roots, new_roots);
295 if (ret) { 299 if (ret) {
296 test_msg("Couldn't account space for a qgroup %d\n", ret); 300 test_msg("Couldn't account space for a qgroup %d\n", ret);
297 return -EINVAL; 301 return -EINVAL;
298 } 302 }
299 303
300 if (btrfs_verify_qgroup_counts(fs_info, 5, 0, 0)) { 304 if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, 0, 0)) {
301 test_msg("Qgroup counts didn't match expected values\n"); 305 test_msg("Qgroup counts didn't match expected values\n");
302 return -EINVAL; 306 return -EINVAL;
303 } 307 }
@@ -310,7 +314,8 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
310 * right, also remove one of the roots and make sure the exclusive count is 314 * right, also remove one of the roots and make sure the exclusive count is
311 * adjusted properly. 315 * adjusted properly.
312 */ 316 */
313static int test_multiple_refs(struct btrfs_root *root) 317static int test_multiple_refs(struct btrfs_root *root,
318 u32 sectorsize, u32 nodesize)
314{ 319{
315 struct btrfs_trans_handle trans; 320 struct btrfs_trans_handle trans;
316 struct btrfs_fs_info *fs_info = root->fs_info; 321 struct btrfs_fs_info *fs_info = root->fs_info;
@@ -322,25 +327,29 @@ static int test_multiple_refs(struct btrfs_root *root)
322 327
323 test_msg("Qgroup multiple refs test\n"); 328 test_msg("Qgroup multiple refs test\n");
324 329
325 /* We have 5 created already from the previous test */ 330 /*
326 ret = btrfs_create_qgroup(NULL, fs_info, 256); 331 * We have BTRFS_FS_TREE_OBJECTID created already from the
332 * previous test.
333 */
334 ret = btrfs_create_qgroup(NULL, fs_info, BTRFS_FIRST_FREE_OBJECTID);
327 if (ret) { 335 if (ret) {
328 test_msg("Couldn't create a qgroup %d\n", ret); 336 test_msg("Couldn't create a qgroup %d\n", ret);
329 return ret; 337 return ret;
330 } 338 }
331 339
332 ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots); 340 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
333 if (ret) { 341 if (ret) {
334 ulist_free(old_roots); 342 ulist_free(old_roots);
335 test_msg("Couldn't find old roots: %d\n", ret); 343 test_msg("Couldn't find old roots: %d\n", ret);
336 return ret; 344 return ret;
337 } 345 }
338 346
339 ret = insert_normal_tree_ref(root, 4096, 4096, 0, 5); 347 ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
348 BTRFS_FS_TREE_OBJECTID);
340 if (ret) 349 if (ret)
341 return ret; 350 return ret;
342 351
343 ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots); 352 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
344 if (ret) { 353 if (ret) {
345 ulist_free(old_roots); 354 ulist_free(old_roots);
346 ulist_free(new_roots); 355 ulist_free(new_roots);
@@ -348,30 +357,32 @@ static int test_multiple_refs(struct btrfs_root *root)
348 return ret; 357 return ret;
349 } 358 }
350 359
351 ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096, 360 ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
352 old_roots, new_roots); 361 nodesize, old_roots, new_roots);
353 if (ret) { 362 if (ret) {
354 test_msg("Couldn't account space for a qgroup %d\n", ret); 363 test_msg("Couldn't account space for a qgroup %d\n", ret);
355 return ret; 364 return ret;
356 } 365 }
357 366
358 if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) { 367 if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
368 nodesize, nodesize)) {
359 test_msg("Qgroup counts didn't match expected values\n"); 369 test_msg("Qgroup counts didn't match expected values\n");
360 return -EINVAL; 370 return -EINVAL;
361 } 371 }
362 372
363 ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots); 373 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
364 if (ret) { 374 if (ret) {
365 ulist_free(old_roots); 375 ulist_free(old_roots);
366 test_msg("Couldn't find old roots: %d\n", ret); 376 test_msg("Couldn't find old roots: %d\n", ret);
367 return ret; 377 return ret;
368 } 378 }
369 379
370 ret = add_tree_ref(root, 4096, 4096, 0, 256); 380 ret = add_tree_ref(root, nodesize, nodesize, 0,
381 BTRFS_FIRST_FREE_OBJECTID);
371 if (ret) 382 if (ret)
372 return ret; 383 return ret;
373 384
374 ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots); 385 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
375 if (ret) { 386 if (ret) {
376 ulist_free(old_roots); 387 ulist_free(old_roots);
377 ulist_free(new_roots); 388 ulist_free(new_roots);
@@ -379,35 +390,38 @@ static int test_multiple_refs(struct btrfs_root *root)
379 return ret; 390 return ret;
380 } 391 }
381 392
382 ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096, 393 ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
383 old_roots, new_roots); 394 nodesize, old_roots, new_roots);
384 if (ret) { 395 if (ret) {
385 test_msg("Couldn't account space for a qgroup %d\n", ret); 396 test_msg("Couldn't account space for a qgroup %d\n", ret);
386 return ret; 397 return ret;
387 } 398 }
388 399
389 if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 0)) { 400 if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
401 nodesize, 0)) {
390 test_msg("Qgroup counts didn't match expected values\n"); 402 test_msg("Qgroup counts didn't match expected values\n");
391 return -EINVAL; 403 return -EINVAL;
392 } 404 }
393 405
394 if (btrfs_verify_qgroup_counts(fs_info, 256, 4096, 0)) { 406 if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FIRST_FREE_OBJECTID,
407 nodesize, 0)) {
395 test_msg("Qgroup counts didn't match expected values\n"); 408 test_msg("Qgroup counts didn't match expected values\n");
396 return -EINVAL; 409 return -EINVAL;
397 } 410 }
398 411
399 ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots); 412 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
400 if (ret) { 413 if (ret) {
401 ulist_free(old_roots); 414 ulist_free(old_roots);
402 test_msg("Couldn't find old roots: %d\n", ret); 415 test_msg("Couldn't find old roots: %d\n", ret);
403 return ret; 416 return ret;
404 } 417 }
405 418
406 ret = remove_extent_ref(root, 4096, 4096, 0, 256); 419 ret = remove_extent_ref(root, nodesize, nodesize, 0,
420 BTRFS_FIRST_FREE_OBJECTID);
407 if (ret) 421 if (ret)
408 return ret; 422 return ret;
409 423
410 ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots); 424 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
411 if (ret) { 425 if (ret) {
412 ulist_free(old_roots); 426 ulist_free(old_roots);
413 ulist_free(new_roots); 427 ulist_free(new_roots);
@@ -415,19 +429,21 @@ static int test_multiple_refs(struct btrfs_root *root)
415 return ret; 429 return ret;
416 } 430 }
417 431
418 ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096, 432 ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
419 old_roots, new_roots); 433 nodesize, old_roots, new_roots);
420 if (ret) { 434 if (ret) {
421 test_msg("Couldn't account space for a qgroup %d\n", ret); 435 test_msg("Couldn't account space for a qgroup %d\n", ret);
422 return ret; 436 return ret;
423 } 437 }
424 438
425 if (btrfs_verify_qgroup_counts(fs_info, 256, 0, 0)) { 439 if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FIRST_FREE_OBJECTID,
440 0, 0)) {
426 test_msg("Qgroup counts didn't match expected values\n"); 441 test_msg("Qgroup counts didn't match expected values\n");
427 return -EINVAL; 442 return -EINVAL;
428 } 443 }
429 444
430 if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) { 445 if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
446 nodesize, nodesize)) {
431 test_msg("Qgroup counts didn't match expected values\n"); 447 test_msg("Qgroup counts didn't match expected values\n");
432 return -EINVAL; 448 return -EINVAL;
433 } 449 }
@@ -435,13 +451,13 @@ static int test_multiple_refs(struct btrfs_root *root)
435 return 0; 451 return 0;
436} 452}
437 453
438int btrfs_test_qgroups(void) 454int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
439{ 455{
440 struct btrfs_root *root; 456 struct btrfs_root *root;
441 struct btrfs_root *tmp_root; 457 struct btrfs_root *tmp_root;
442 int ret = 0; 458 int ret = 0;
443 459
444 root = btrfs_alloc_dummy_root(); 460 root = btrfs_alloc_dummy_root(sectorsize, nodesize);
445 if (IS_ERR(root)) { 461 if (IS_ERR(root)) {
446 test_msg("Couldn't allocate root\n"); 462 test_msg("Couldn't allocate root\n");
447 return PTR_ERR(root); 463 return PTR_ERR(root);
@@ -468,7 +484,8 @@ int btrfs_test_qgroups(void)
468 * Can't use bytenr 0, some things freak out 484 * Can't use bytenr 0, some things freak out
469 * *cough*backref walking code*cough* 485 * *cough*backref walking code*cough*
470 */ 486 */
471 root->node = alloc_test_extent_buffer(root->fs_info, 4096); 487 root->node = alloc_test_extent_buffer(root->fs_info, nodesize,
488 nodesize);
472 if (!root->node) { 489 if (!root->node) {
473 test_msg("Couldn't allocate dummy buffer\n"); 490 test_msg("Couldn't allocate dummy buffer\n");
474 ret = -ENOMEM; 491 ret = -ENOMEM;
@@ -476,16 +493,16 @@ int btrfs_test_qgroups(void)
476 } 493 }
477 btrfs_set_header_level(root->node, 0); 494 btrfs_set_header_level(root->node, 0);
478 btrfs_set_header_nritems(root->node, 0); 495 btrfs_set_header_nritems(root->node, 0);
479 root->alloc_bytenr += 8192; 496 root->alloc_bytenr += 2 * nodesize;
480 497
481 tmp_root = btrfs_alloc_dummy_root(); 498 tmp_root = btrfs_alloc_dummy_root(sectorsize, nodesize);
482 if (IS_ERR(tmp_root)) { 499 if (IS_ERR(tmp_root)) {
483 test_msg("Couldn't allocate a fs root\n"); 500 test_msg("Couldn't allocate a fs root\n");
484 ret = PTR_ERR(tmp_root); 501 ret = PTR_ERR(tmp_root);
485 goto out; 502 goto out;
486 } 503 }
487 504
488 tmp_root->root_key.objectid = 5; 505 tmp_root->root_key.objectid = BTRFS_FS_TREE_OBJECTID;
489 root->fs_info->fs_root = tmp_root; 506 root->fs_info->fs_root = tmp_root;
490 ret = btrfs_insert_fs_root(root->fs_info, tmp_root); 507 ret = btrfs_insert_fs_root(root->fs_info, tmp_root);
491 if (ret) { 508 if (ret) {
@@ -493,14 +510,14 @@ int btrfs_test_qgroups(void)
493 goto out; 510 goto out;
494 } 511 }
495 512
496 tmp_root = btrfs_alloc_dummy_root(); 513 tmp_root = btrfs_alloc_dummy_root(sectorsize, nodesize);
497 if (IS_ERR(tmp_root)) { 514 if (IS_ERR(tmp_root)) {
498 test_msg("Couldn't allocate a fs root\n"); 515 test_msg("Couldn't allocate a fs root\n");
499 ret = PTR_ERR(tmp_root); 516 ret = PTR_ERR(tmp_root);
500 goto out; 517 goto out;
501 } 518 }
502 519
503 tmp_root->root_key.objectid = 256; 520 tmp_root->root_key.objectid = BTRFS_FIRST_FREE_OBJECTID;
504 ret = btrfs_insert_fs_root(root->fs_info, tmp_root); 521 ret = btrfs_insert_fs_root(root->fs_info, tmp_root);
505 if (ret) { 522 if (ret) {
506 test_msg("Couldn't insert fs root %d\n", ret); 523 test_msg("Couldn't insert fs root %d\n", ret);
@@ -508,10 +525,10 @@ int btrfs_test_qgroups(void)
508 } 525 }
509 526
510 test_msg("Running qgroup tests\n"); 527 test_msg("Running qgroup tests\n");
511 ret = test_no_shared_qgroup(root); 528 ret = test_no_shared_qgroup(root, sectorsize, nodesize);
512 if (ret) 529 if (ret)
513 goto out; 530 goto out;
514 ret = test_multiple_refs(root); 531 ret = test_multiple_refs(root, sectorsize, nodesize);
515out: 532out:
516 btrfs_free_dummy_root(root); 533 btrfs_free_dummy_root(root);
517 return ret; 534 return ret;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index f6e24cb423ae..948aa186b353 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -818,6 +818,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
818{ 818{
819 struct btrfs_transaction *cur_trans = trans->transaction; 819 struct btrfs_transaction *cur_trans = trans->transaction;
820 struct btrfs_fs_info *info = root->fs_info; 820 struct btrfs_fs_info *info = root->fs_info;
821 u64 transid = trans->transid;
821 unsigned long cur = trans->delayed_ref_updates; 822 unsigned long cur = trans->delayed_ref_updates;
822 int lock = (trans->type != TRANS_JOIN_NOLOCK); 823 int lock = (trans->type != TRANS_JOIN_NOLOCK);
823 int err = 0; 824 int err = 0;
@@ -905,7 +906,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
905 906
906 kmem_cache_free(btrfs_trans_handle_cachep, trans); 907 kmem_cache_free(btrfs_trans_handle_cachep, trans);
907 if (must_run_delayed_refs) { 908 if (must_run_delayed_refs) {
908 btrfs_async_run_delayed_refs(root, cur, 909 btrfs_async_run_delayed_refs(root, cur, transid,
909 must_run_delayed_refs == 1); 910 must_run_delayed_refs == 1);
910 } 911 }
911 return err; 912 return err;
@@ -1311,11 +1312,6 @@ int btrfs_defrag_root(struct btrfs_root *root)
1311 return ret; 1312 return ret;
1312} 1313}
1313 1314
1314/* Bisesctability fixup, remove in 4.8 */
1315#ifndef btrfs_std_error
1316#define btrfs_std_error btrfs_handle_fs_error
1317#endif
1318
1319/* 1315/*
1320 * Do all special snapshot related qgroup dirty hack. 1316 * Do all special snapshot related qgroup dirty hack.
1321 * 1317 *
@@ -1385,7 +1381,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
1385 switch_commit_roots(trans->transaction, fs_info); 1381 switch_commit_roots(trans->transaction, fs_info);
1386 ret = btrfs_write_and_wait_transaction(trans, src); 1382 ret = btrfs_write_and_wait_transaction(trans, src);
1387 if (ret) 1383 if (ret)
1388 btrfs_std_error(fs_info, ret, 1384 btrfs_handle_fs_error(fs_info, ret,
1389 "Error while writing out transaction for qgroup"); 1385 "Error while writing out transaction for qgroup");
1390 1386
1391out: 1387out:
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 9fe0ec2bf0fe..c5abee4f01ad 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -110,7 +110,6 @@ struct btrfs_trans_handle {
110 u64 chunk_bytes_reserved; 110 u64 chunk_bytes_reserved;
111 unsigned long use_count; 111 unsigned long use_count;
112 unsigned long blocks_reserved; 112 unsigned long blocks_reserved;
113 unsigned long blocks_used;
114 unsigned long delayed_ref_updates; 113 unsigned long delayed_ref_updates;
115 struct btrfs_transaction *transaction; 114 struct btrfs_transaction *transaction;
116 struct btrfs_block_rsv *block_rsv; 115 struct btrfs_block_rsv *block_rsv;
@@ -121,6 +120,7 @@ struct btrfs_trans_handle {
121 bool can_flush_pending_bgs; 120 bool can_flush_pending_bgs;
122 bool reloc_reserved; 121 bool reloc_reserved;
123 bool sync; 122 bool sync;
123 bool dirty;
124 unsigned int type; 124 unsigned int type;
125 /* 125 /*
126 * this root is only needed to validate that the root passed to 126 * this root is only needed to validate that the root passed to
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index b7665af471d8..c05f69a8ec42 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2422,8 +2422,8 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2422 root_owner = btrfs_header_owner(parent); 2422 root_owner = btrfs_header_owner(parent);
2423 2423
2424 next = btrfs_find_create_tree_block(root, bytenr); 2424 next = btrfs_find_create_tree_block(root, bytenr);
2425 if (!next) 2425 if (IS_ERR(next))
2426 return -ENOMEM; 2426 return PTR_ERR(next);
2427 2427
2428 if (*level == 1) { 2428 if (*level == 1) {
2429 ret = wc->process_func(root, next, wc, ptr_gen); 2429 ret = wc->process_func(root, next, wc, ptr_gen);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index da9e0036a864..589f128173b1 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -4241,6 +4241,7 @@ int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4241 if (IS_ERR(uuid_root)) { 4241 if (IS_ERR(uuid_root)) {
4242 ret = PTR_ERR(uuid_root); 4242 ret = PTR_ERR(uuid_root);
4243 btrfs_abort_transaction(trans, tree_root, ret); 4243 btrfs_abort_transaction(trans, tree_root, ret);
4244 btrfs_end_transaction(trans, tree_root);
4244 return ret; 4245 return ret;
4245 } 4246 }
4246 4247
@@ -4693,12 +4694,12 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4693 4694
4694 if (type & BTRFS_BLOCK_GROUP_RAID5) { 4695 if (type & BTRFS_BLOCK_GROUP_RAID5) {
4695 raid_stripe_len = find_raid56_stripe_len(ndevs - 1, 4696 raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
4696 btrfs_super_stripesize(info->super_copy)); 4697 extent_root->stripesize);
4697 data_stripes = num_stripes - 1; 4698 data_stripes = num_stripes - 1;
4698 } 4699 }
4699 if (type & BTRFS_BLOCK_GROUP_RAID6) { 4700 if (type & BTRFS_BLOCK_GROUP_RAID6) {
4700 raid_stripe_len = find_raid56_stripe_len(ndevs - 2, 4701 raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
4701 btrfs_super_stripesize(info->super_copy)); 4702 extent_root->stripesize);
4702 data_stripes = num_stripes - 2; 4703 data_stripes = num_stripes - 2;
4703 } 4704 }
4704 4705
@@ -6258,27 +6259,23 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6258 return dev; 6259 return dev;
6259} 6260}
6260 6261
6261static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, 6262/* Return -EIO if any error, otherwise return 0. */
6262 struct extent_buffer *leaf, 6263static int btrfs_check_chunk_valid(struct btrfs_root *root,
6263 struct btrfs_chunk *chunk) 6264 struct extent_buffer *leaf,
6265 struct btrfs_chunk *chunk, u64 logical)
6264{ 6266{
6265 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
6266 struct map_lookup *map;
6267 struct extent_map *em;
6268 u64 logical;
6269 u64 length; 6267 u64 length;
6270 u64 stripe_len; 6268 u64 stripe_len;
6271 u64 devid; 6269 u16 num_stripes;
6272 u8 uuid[BTRFS_UUID_SIZE]; 6270 u16 sub_stripes;
6273 int num_stripes; 6271 u64 type;
6274 int ret;
6275 int i;
6276 6272
6277 logical = key->offset;
6278 length = btrfs_chunk_length(leaf, chunk); 6273 length = btrfs_chunk_length(leaf, chunk);
6279 stripe_len = btrfs_chunk_stripe_len(leaf, chunk); 6274 stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6280 num_stripes = btrfs_chunk_num_stripes(leaf, chunk); 6275 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6281 /* Validation check */ 6276 sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6277 type = btrfs_chunk_type(leaf, chunk);
6278
6282 if (!num_stripes) { 6279 if (!num_stripes) {
6283 btrfs_err(root->fs_info, "invalid chunk num_stripes: %u", 6280 btrfs_err(root->fs_info, "invalid chunk num_stripes: %u",
6284 num_stripes); 6281 num_stripes);
@@ -6289,6 +6286,11 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
6289 "invalid chunk logical %llu", logical); 6286 "invalid chunk logical %llu", logical);
6290 return -EIO; 6287 return -EIO;
6291 } 6288 }
6289 if (btrfs_chunk_sector_size(leaf, chunk) != root->sectorsize) {
6290 btrfs_err(root->fs_info, "invalid chunk sectorsize %u",
6291 btrfs_chunk_sector_size(leaf, chunk));
6292 return -EIO;
6293 }
6292 if (!length || !IS_ALIGNED(length, root->sectorsize)) { 6294 if (!length || !IS_ALIGNED(length, root->sectorsize)) {
6293 btrfs_err(root->fs_info, 6295 btrfs_err(root->fs_info,
6294 "invalid chunk length %llu", length); 6296 "invalid chunk length %llu", length);
@@ -6300,13 +6302,54 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
6300 return -EIO; 6302 return -EIO;
6301 } 6303 }
6302 if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) & 6304 if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
6303 btrfs_chunk_type(leaf, chunk)) { 6305 type) {
6304 btrfs_err(root->fs_info, "unrecognized chunk type: %llu", 6306 btrfs_err(root->fs_info, "unrecognized chunk type: %llu",
6305 ~(BTRFS_BLOCK_GROUP_TYPE_MASK | 6307 ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
6306 BTRFS_BLOCK_GROUP_PROFILE_MASK) & 6308 BTRFS_BLOCK_GROUP_PROFILE_MASK) &
6307 btrfs_chunk_type(leaf, chunk)); 6309 btrfs_chunk_type(leaf, chunk));
6308 return -EIO; 6310 return -EIO;
6309 } 6311 }
6312 if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
6313 (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
6314 (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
6315 (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
6316 (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) ||
6317 ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
6318 num_stripes != 1)) {
6319 btrfs_err(root->fs_info,
6320 "invalid num_stripes:sub_stripes %u:%u for profile %llu",
6321 num_stripes, sub_stripes,
6322 type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
6323 return -EIO;
6324 }
6325
6326 return 0;
6327}
6328
6329static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
6330 struct extent_buffer *leaf,
6331 struct btrfs_chunk *chunk)
6332{
6333 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
6334 struct map_lookup *map;
6335 struct extent_map *em;
6336 u64 logical;
6337 u64 length;
6338 u64 stripe_len;
6339 u64 devid;
6340 u8 uuid[BTRFS_UUID_SIZE];
6341 int num_stripes;
6342 int ret;
6343 int i;
6344
6345 logical = key->offset;
6346 length = btrfs_chunk_length(leaf, chunk);
6347 stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6348 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6349
6350 ret = btrfs_check_chunk_valid(root, leaf, chunk, logical);
6351 if (ret)
6352 return ret;
6310 6353
6311 read_lock(&map_tree->map_tree.lock); 6354 read_lock(&map_tree->map_tree.lock);
6312 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); 6355 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
@@ -6554,6 +6597,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
6554 u32 array_size; 6597 u32 array_size;
6555 u32 len = 0; 6598 u32 len = 0;
6556 u32 cur_offset; 6599 u32 cur_offset;
6600 u64 type;
6557 struct btrfs_key key; 6601 struct btrfs_key key;
6558 6602
6559 ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize); 6603 ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
@@ -6563,8 +6607,8 @@ int btrfs_read_sys_array(struct btrfs_root *root)
6563 * overallocate but we can keep it as-is, only the first page is used. 6607 * overallocate but we can keep it as-is, only the first page is used.
6564 */ 6608 */
6565 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET); 6609 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
6566 if (!sb) 6610 if (IS_ERR(sb))
6567 return -ENOMEM; 6611 return PTR_ERR(sb);
6568 set_extent_buffer_uptodate(sb); 6612 set_extent_buffer_uptodate(sb);
6569 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0); 6613 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6570 /* 6614 /*
@@ -6620,6 +6664,15 @@ int btrfs_read_sys_array(struct btrfs_root *root)
6620 break; 6664 break;
6621 } 6665 }
6622 6666
6667 type = btrfs_chunk_type(sb, chunk);
6668 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
6669 btrfs_err(root->fs_info,
6670 "invalid chunk type %llu in sys_array at offset %u",
6671 type, cur_offset);
6672 ret = -EIO;
6673 break;
6674 }
6675
6623 len = btrfs_chunk_item_size(num_stripes); 6676 len = btrfs_chunk_item_size(num_stripes);
6624 if (cur_offset + len > array_size) 6677 if (cur_offset + len > array_size)
6625 goto out_short_read; 6678 goto out_short_read;
@@ -6638,12 +6691,14 @@ int btrfs_read_sys_array(struct btrfs_root *root)
6638 sb_array_offset += len; 6691 sb_array_offset += len;
6639 cur_offset += len; 6692 cur_offset += len;
6640 } 6693 }
6694 clear_extent_buffer_uptodate(sb);
6641 free_extent_buffer_stale(sb); 6695 free_extent_buffer_stale(sb);
6642 return ret; 6696 return ret;
6643 6697
6644out_short_read: 6698out_short_read:
6645 printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n", 6699 printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
6646 len, cur_offset); 6700 len, cur_offset);
6701 clear_extent_buffer_uptodate(sb);
6647 free_extent_buffer_stale(sb); 6702 free_extent_buffer_stale(sb);
6648 return -EIO; 6703 return -EIO;
6649} 6704}
@@ -6656,6 +6711,7 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
6656 struct btrfs_key found_key; 6711 struct btrfs_key found_key;
6657 int ret; 6712 int ret;
6658 int slot; 6713 int slot;
6714 u64 total_dev = 0;
6659 6715
6660 root = root->fs_info->chunk_root; 6716 root = root->fs_info->chunk_root;
6661 6717
@@ -6697,6 +6753,7 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
6697 ret = read_one_dev(root, leaf, dev_item); 6753 ret = read_one_dev(root, leaf, dev_item);
6698 if (ret) 6754 if (ret)
6699 goto error; 6755 goto error;
6756 total_dev++;
6700 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { 6757 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
6701 struct btrfs_chunk *chunk; 6758 struct btrfs_chunk *chunk;
6702 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 6759 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
@@ -6706,6 +6763,28 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
6706 } 6763 }
6707 path->slots[0]++; 6764 path->slots[0]++;
6708 } 6765 }
6766
6767 /*
6768 * After loading chunk tree, we've got all device information,
6769 * do another round of validation checks.
6770 */
6771 if (total_dev != root->fs_info->fs_devices->total_devices) {
6772 btrfs_err(root->fs_info,
6773 "super_num_devices %llu mismatch with num_devices %llu found here",
6774 btrfs_super_num_devices(root->fs_info->super_copy),
6775 total_dev);
6776 ret = -EINVAL;
6777 goto error;
6778 }
6779 if (btrfs_super_total_bytes(root->fs_info->super_copy) <
6780 root->fs_info->fs_devices->total_rw_bytes) {
6781 btrfs_err(root->fs_info,
6782 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
6783 btrfs_super_total_bytes(root->fs_info->super_copy),
6784 root->fs_info->fs_devices->total_rw_bytes);
6785 ret = -EINVAL;
6786 goto error;
6787 }
6709 ret = 0; 6788 ret = 0;
6710error: 6789error:
6711 unlock_chunks(root); 6790 unlock_chunks(root);
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 6e72c98162d5..1780218a48f0 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -95,10 +95,8 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
95 } 95 }
96 96
97 dentry = d_obtain_alias(inode); 97 dentry = d_obtain_alias(inode);
98 if (IS_ERR(dentry)) { 98 if (IS_ERR(dentry))
99 iput(inode);
100 return dentry; 99 return dentry;
101 }
102 err = ceph_init_dentry(dentry); 100 err = ceph_init_dentry(dentry);
103 if (err < 0) { 101 if (err < 0) {
104 dput(dentry); 102 dput(dentry);
@@ -167,10 +165,8 @@ static struct dentry *__get_parent(struct super_block *sb,
167 return ERR_PTR(-ENOENT); 165 return ERR_PTR(-ENOENT);
168 166
169 dentry = d_obtain_alias(inode); 167 dentry = d_obtain_alias(inode);
170 if (IS_ERR(dentry)) { 168 if (IS_ERR(dentry))
171 iput(inode);
172 return dentry; 169 return dentry;
173 }
174 err = ceph_init_dentry(dentry); 170 err = ceph_init_dentry(dentry);
175 if (err < 0) { 171 if (err < 0) {
176 dput(dentry); 172 dput(dentry);
@@ -210,7 +206,7 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
210 206
211 dout("fh_to_parent %llx\n", cfh->parent_ino); 207 dout("fh_to_parent %llx\n", cfh->parent_ino);
212 dentry = __get_parent(sb, NULL, cfh->ino); 208 dentry = __get_parent(sb, NULL, cfh->ino);
213 if (IS_ERR(dentry) && PTR_ERR(dentry) == -ENOENT) 209 if (unlikely(dentry == ERR_PTR(-ENOENT)))
214 dentry = __fh_to_dentry(sb, cfh->parent_ino); 210 dentry = __fh_to_dentry(sb, cfh->parent_ino);
215 return dentry; 211 return dentry;
216} 212}
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 5a53ac6b1e02..02b071bf3732 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -101,6 +101,12 @@ convert_sfm_char(const __u16 src_char, char *target)
101 case SFM_SLASH: 101 case SFM_SLASH:
102 *target = '\\'; 102 *target = '\\';
103 break; 103 break;
104 case SFM_SPACE:
105 *target = ' ';
106 break;
107 case SFM_PERIOD:
108 *target = '.';
109 break;
104 default: 110 default:
105 return false; 111 return false;
106 } 112 }
@@ -404,7 +410,7 @@ static __le16 convert_to_sfu_char(char src_char)
404 return dest_char; 410 return dest_char;
405} 411}
406 412
407static __le16 convert_to_sfm_char(char src_char) 413static __le16 convert_to_sfm_char(char src_char, bool end_of_string)
408{ 414{
409 __le16 dest_char; 415 __le16 dest_char;
410 416
@@ -427,6 +433,18 @@ static __le16 convert_to_sfm_char(char src_char)
427 case '|': 433 case '|':
428 dest_char = cpu_to_le16(SFM_PIPE); 434 dest_char = cpu_to_le16(SFM_PIPE);
429 break; 435 break;
436 case '.':
437 if (end_of_string)
438 dest_char = cpu_to_le16(SFM_PERIOD);
439 else
440 dest_char = 0;
441 break;
442 case ' ':
443 if (end_of_string)
444 dest_char = cpu_to_le16(SFM_SPACE);
445 else
446 dest_char = 0;
447 break;
430 default: 448 default:
431 dest_char = 0; 449 dest_char = 0;
432 } 450 }
@@ -469,9 +487,16 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
469 /* see if we must remap this char */ 487 /* see if we must remap this char */
470 if (map_chars == SFU_MAP_UNI_RSVD) 488 if (map_chars == SFU_MAP_UNI_RSVD)
471 dst_char = convert_to_sfu_char(src_char); 489 dst_char = convert_to_sfu_char(src_char);
472 else if (map_chars == SFM_MAP_UNI_RSVD) 490 else if (map_chars == SFM_MAP_UNI_RSVD) {
473 dst_char = convert_to_sfm_char(src_char); 491 bool end_of_string;
474 else 492
493 if (i == srclen - 1)
494 end_of_string = true;
495 else
496 end_of_string = false;
497
498 dst_char = convert_to_sfm_char(src_char, end_of_string);
499 } else
475 dst_char = 0; 500 dst_char = 0;
476 /* 501 /*
477 * FIXME: We can not handle remapping backslash (UNI_SLASH) 502 * FIXME: We can not handle remapping backslash (UNI_SLASH)
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
index bdc52cb9a676..479bc0a941f3 100644
--- a/fs/cifs/cifs_unicode.h
+++ b/fs/cifs/cifs_unicode.h
@@ -64,6 +64,8 @@
64#define SFM_LESSTHAN ((__u16) 0xF023) 64#define SFM_LESSTHAN ((__u16) 0xF023)
65#define SFM_PIPE ((__u16) 0xF027) 65#define SFM_PIPE ((__u16) 0xF027)
66#define SFM_SLASH ((__u16) 0xF026) 66#define SFM_SLASH ((__u16) 0xF026)
67#define SFM_PERIOD ((__u16) 0xF028)
68#define SFM_SPACE ((__u16) 0xF029)
67 69
68/* 70/*
69 * Mapping mechanism to use when one of the seven reserved characters is 71 * Mapping mechanism to use when one of the seven reserved characters is
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 5d8b7edf8a8f..5d841f39c4b7 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -87,6 +87,7 @@ extern mempool_t *cifs_req_poolp;
87extern mempool_t *cifs_mid_poolp; 87extern mempool_t *cifs_mid_poolp;
88 88
89struct workqueue_struct *cifsiod_wq; 89struct workqueue_struct *cifsiod_wq;
90__u32 cifs_lock_secret;
90 91
91/* 92/*
92 * Bumps refcount for cifs super block. 93 * Bumps refcount for cifs super block.
@@ -1266,6 +1267,8 @@ init_cifs(void)
1266 spin_lock_init(&cifs_file_list_lock); 1267 spin_lock_init(&cifs_file_list_lock);
1267 spin_lock_init(&GlobalMid_Lock); 1268 spin_lock_init(&GlobalMid_Lock);
1268 1269
1270 get_random_bytes(&cifs_lock_secret, sizeof(cifs_lock_secret));
1271
1269 if (cifs_max_pending < 2) { 1272 if (cifs_max_pending < 2) {
1270 cifs_max_pending = 2; 1273 cifs_max_pending = 2;
1271 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n"); 1274 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index bba106cdc43c..8f1d8c1e72be 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1619,6 +1619,7 @@ void cifs_oplock_break(struct work_struct *work);
1619 1619
1620extern const struct slow_work_ops cifs_oplock_break_ops; 1620extern const struct slow_work_ops cifs_oplock_break_ops;
1621extern struct workqueue_struct *cifsiod_wq; 1621extern struct workqueue_struct *cifsiod_wq;
1622extern __u32 cifs_lock_secret;
1622 1623
1623extern mempool_t *cifs_mid_poolp; 1624extern mempool_t *cifs_mid_poolp;
1624 1625
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 66736f57b5ab..7d2b15c06090 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -428,7 +428,9 @@ cifs_echo_request(struct work_struct *work)
428 * server->ops->need_neg() == true. Also, no need to ping if 428 * server->ops->need_neg() == true. Also, no need to ping if
429 * we got a response recently. 429 * we got a response recently.
430 */ 430 */
431 if (!server->ops->need_neg || server->ops->need_neg(server) || 431
432 if (server->tcpStatus == CifsNeedReconnect ||
433 server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
432 (server->ops->can_echo && !server->ops->can_echo(server)) || 434 (server->ops->can_echo && !server->ops->can_echo(server)) ||
433 time_before(jiffies, server->lstrp + echo_interval - HZ)) 435 time_before(jiffies, server->lstrp + echo_interval - HZ))
434 goto requeue_echo; 436 goto requeue_echo;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 9793ae0bcaa2..d4890b6dc22d 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1112,6 +1112,12 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1112 return rc; 1112 return rc;
1113} 1113}
1114 1114
1115static __u32
1116hash_lockowner(fl_owner_t owner)
1117{
1118 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1119}
1120
1115struct lock_to_push { 1121struct lock_to_push {
1116 struct list_head llist; 1122 struct list_head llist;
1117 __u64 offset; 1123 __u64 offset;
@@ -1178,7 +1184,7 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
1178 else 1184 else
1179 type = CIFS_WRLCK; 1185 type = CIFS_WRLCK;
1180 lck = list_entry(el, struct lock_to_push, llist); 1186 lck = list_entry(el, struct lock_to_push, llist);
1181 lck->pid = flock->fl_pid; 1187 lck->pid = hash_lockowner(flock->fl_owner);
1182 lck->netfid = cfile->fid.netfid; 1188 lck->netfid = cfile->fid.netfid;
1183 lck->length = length; 1189 lck->length = length;
1184 lck->type = type; 1190 lck->type = type;
@@ -1305,7 +1311,8 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1305 posix_lock_type = CIFS_RDLCK; 1311 posix_lock_type = CIFS_RDLCK;
1306 else 1312 else
1307 posix_lock_type = CIFS_WRLCK; 1313 posix_lock_type = CIFS_WRLCK;
1308 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid, 1314 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1315 hash_lockowner(flock->fl_owner),
1309 flock->fl_start, length, flock, 1316 flock->fl_start, length, flock,
1310 posix_lock_type, wait_flag); 1317 posix_lock_type, wait_flag);
1311 return rc; 1318 return rc;
@@ -1505,7 +1512,8 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
1505 posix_lock_type = CIFS_UNLCK; 1512 posix_lock_type = CIFS_UNLCK;
1506 1513
1507 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid, 1514 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1508 current->tgid, flock->fl_start, length, 1515 hash_lockowner(flock->fl_owner),
1516 flock->fl_start, length,
1509 NULL, posix_lock_type, wait_flag); 1517 NULL, posix_lock_type, wait_flag);
1510 goto out; 1518 goto out;
1511 } 1519 }
diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
index 848249fa120f..3079b38f0afb 100644
--- a/fs/cifs/ntlmssp.h
+++ b/fs/cifs/ntlmssp.h
@@ -133,6 +133,6 @@ typedef struct _AUTHENTICATE_MESSAGE {
133 133
134int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses); 134int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses);
135void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, struct cifs_ses *ses); 135void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, struct cifs_ses *ses);
136int build_ntlmssp_auth_blob(unsigned char *pbuffer, u16 *buflen, 136int build_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
137 struct cifs_ses *ses, 137 struct cifs_ses *ses,
138 const struct nls_table *nls_cp); 138 const struct nls_table *nls_cp);
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index af0ec2d5ad0e..538d9b55699a 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -364,19 +364,43 @@ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
364 sec_blob->DomainName.MaximumLength = 0; 364 sec_blob->DomainName.MaximumLength = 0;
365} 365}
366 366
367/* We do not malloc the blob, it is passed in pbuffer, because its 367static int size_of_ntlmssp_blob(struct cifs_ses *ses)
368 maximum possible size is fixed and small, making this approach cleaner. 368{
369 This function returns the length of the data in the blob */ 369 int sz = sizeof(AUTHENTICATE_MESSAGE) + ses->auth_key.len
370int build_ntlmssp_auth_blob(unsigned char *pbuffer, 370 - CIFS_SESS_KEY_SIZE + CIFS_CPHTXT_SIZE + 2;
371
372 if (ses->domainName)
373 sz += 2 * strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
374 else
375 sz += 2;
376
377 if (ses->user_name)
378 sz += 2 * strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN);
379 else
380 sz += 2;
381
382 return sz;
383}
384
385int build_ntlmssp_auth_blob(unsigned char **pbuffer,
371 u16 *buflen, 386 u16 *buflen,
372 struct cifs_ses *ses, 387 struct cifs_ses *ses,
373 const struct nls_table *nls_cp) 388 const struct nls_table *nls_cp)
374{ 389{
375 int rc; 390 int rc;
376 AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer; 391 AUTHENTICATE_MESSAGE *sec_blob;
377 __u32 flags; 392 __u32 flags;
378 unsigned char *tmp; 393 unsigned char *tmp;
379 394
395 rc = setup_ntlmv2_rsp(ses, nls_cp);
396 if (rc) {
397 cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
398 *buflen = 0;
399 goto setup_ntlmv2_ret;
400 }
401 *pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
402 sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
403
380 memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8); 404 memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
381 sec_blob->MessageType = NtLmAuthenticate; 405 sec_blob->MessageType = NtLmAuthenticate;
382 406
@@ -391,7 +415,7 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
391 flags |= NTLMSSP_NEGOTIATE_KEY_XCH; 415 flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
392 } 416 }
393 417
394 tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE); 418 tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
395 sec_blob->NegotiateFlags = cpu_to_le32(flags); 419 sec_blob->NegotiateFlags = cpu_to_le32(flags);
396 420
397 sec_blob->LmChallengeResponse.BufferOffset = 421 sec_blob->LmChallengeResponse.BufferOffset =
@@ -399,13 +423,9 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
399 sec_blob->LmChallengeResponse.Length = 0; 423 sec_blob->LmChallengeResponse.Length = 0;
400 sec_blob->LmChallengeResponse.MaximumLength = 0; 424 sec_blob->LmChallengeResponse.MaximumLength = 0;
401 425
402 sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer); 426 sec_blob->NtChallengeResponse.BufferOffset =
427 cpu_to_le32(tmp - *pbuffer);
403 if (ses->user_name != NULL) { 428 if (ses->user_name != NULL) {
404 rc = setup_ntlmv2_rsp(ses, nls_cp);
405 if (rc) {
406 cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
407 goto setup_ntlmv2_ret;
408 }
409 memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE, 429 memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
410 ses->auth_key.len - CIFS_SESS_KEY_SIZE); 430 ses->auth_key.len - CIFS_SESS_KEY_SIZE);
411 tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE; 431 tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
@@ -423,23 +443,23 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
423 } 443 }
424 444
425 if (ses->domainName == NULL) { 445 if (ses->domainName == NULL) {
426 sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer); 446 sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
427 sec_blob->DomainName.Length = 0; 447 sec_blob->DomainName.Length = 0;
428 sec_blob->DomainName.MaximumLength = 0; 448 sec_blob->DomainName.MaximumLength = 0;
429 tmp += 2; 449 tmp += 2;
430 } else { 450 } else {
431 int len; 451 int len;
432 len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName, 452 len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName,
433 CIFS_MAX_USERNAME_LEN, nls_cp); 453 CIFS_MAX_DOMAINNAME_LEN, nls_cp);
434 len *= 2; /* unicode is 2 bytes each */ 454 len *= 2; /* unicode is 2 bytes each */
435 sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer); 455 sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
436 sec_blob->DomainName.Length = cpu_to_le16(len); 456 sec_blob->DomainName.Length = cpu_to_le16(len);
437 sec_blob->DomainName.MaximumLength = cpu_to_le16(len); 457 sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
438 tmp += len; 458 tmp += len;
439 } 459 }
440 460
441 if (ses->user_name == NULL) { 461 if (ses->user_name == NULL) {
442 sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); 462 sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
443 sec_blob->UserName.Length = 0; 463 sec_blob->UserName.Length = 0;
444 sec_blob->UserName.MaximumLength = 0; 464 sec_blob->UserName.MaximumLength = 0;
445 tmp += 2; 465 tmp += 2;
@@ -448,13 +468,13 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
448 len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name, 468 len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name,
449 CIFS_MAX_USERNAME_LEN, nls_cp); 469 CIFS_MAX_USERNAME_LEN, nls_cp);
450 len *= 2; /* unicode is 2 bytes each */ 470 len *= 2; /* unicode is 2 bytes each */
451 sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); 471 sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
452 sec_blob->UserName.Length = cpu_to_le16(len); 472 sec_blob->UserName.Length = cpu_to_le16(len);
453 sec_blob->UserName.MaximumLength = cpu_to_le16(len); 473 sec_blob->UserName.MaximumLength = cpu_to_le16(len);
454 tmp += len; 474 tmp += len;
455 } 475 }
456 476
457 sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - pbuffer); 477 sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
458 sec_blob->WorkstationName.Length = 0; 478 sec_blob->WorkstationName.Length = 0;
459 sec_blob->WorkstationName.MaximumLength = 0; 479 sec_blob->WorkstationName.MaximumLength = 0;
460 tmp += 2; 480 tmp += 2;
@@ -463,19 +483,19 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
463 (ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC)) 483 (ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
464 && !calc_seckey(ses)) { 484 && !calc_seckey(ses)) {
465 memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE); 485 memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
466 sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer); 486 sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
467 sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE); 487 sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
468 sec_blob->SessionKey.MaximumLength = 488 sec_blob->SessionKey.MaximumLength =
469 cpu_to_le16(CIFS_CPHTXT_SIZE); 489 cpu_to_le16(CIFS_CPHTXT_SIZE);
470 tmp += CIFS_CPHTXT_SIZE; 490 tmp += CIFS_CPHTXT_SIZE;
471 } else { 491 } else {
472 sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer); 492 sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
473 sec_blob->SessionKey.Length = 0; 493 sec_blob->SessionKey.Length = 0;
474 sec_blob->SessionKey.MaximumLength = 0; 494 sec_blob->SessionKey.MaximumLength = 0;
475 } 495 }
476 496
497 *buflen = tmp - *pbuffer;
477setup_ntlmv2_ret: 498setup_ntlmv2_ret:
478 *buflen = tmp - pbuffer;
479 return rc; 499 return rc;
480} 500}
481 501
@@ -690,6 +710,8 @@ sess_auth_lanman(struct sess_data *sess_data)
690 rc = calc_lanman_hash(ses->password, ses->server->cryptkey, 710 rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
691 ses->server->sec_mode & SECMODE_PW_ENCRYPT ? 711 ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
692 true : false, lnm_session_key); 712 true : false, lnm_session_key);
713 if (rc)
714 goto out;
693 715
694 memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE); 716 memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
695 bcc_ptr += CIFS_AUTH_RESP_SIZE; 717 bcc_ptr += CIFS_AUTH_RESP_SIZE;
@@ -1266,7 +1288,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
1266 struct cifs_ses *ses = sess_data->ses; 1288 struct cifs_ses *ses = sess_data->ses;
1267 __u16 bytes_remaining; 1289 __u16 bytes_remaining;
1268 char *bcc_ptr; 1290 char *bcc_ptr;
1269 char *ntlmsspblob = NULL; 1291 unsigned char *ntlmsspblob = NULL;
1270 u16 blob_len; 1292 u16 blob_len;
1271 1293
1272 cifs_dbg(FYI, "rawntlmssp session setup authenticate phase\n"); 1294 cifs_dbg(FYI, "rawntlmssp session setup authenticate phase\n");
@@ -1279,19 +1301,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
1279 /* Build security blob before we assemble the request */ 1301 /* Build security blob before we assemble the request */
1280 pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; 1302 pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
1281 smb_buf = (struct smb_hdr *)pSMB; 1303 smb_buf = (struct smb_hdr *)pSMB;
1282 /* 1304 rc = build_ntlmssp_auth_blob(&ntlmsspblob,
1283 * 5 is an empirical value, large enough to hold
1284 * authenticate message plus max 10 of av paris,
1285 * domain, user, workstation names, flags, etc.
1286 */
1287 ntlmsspblob = kzalloc(5*sizeof(struct _AUTHENTICATE_MESSAGE),
1288 GFP_KERNEL);
1289 if (!ntlmsspblob) {
1290 rc = -ENOMEM;
1291 goto out;
1292 }
1293
1294 rc = build_ntlmssp_auth_blob(ntlmsspblob,
1295 &blob_len, ses, sess_data->nls_cp); 1305 &blob_len, ses, sess_data->nls_cp);
1296 if (rc) 1306 if (rc)
1297 goto out_free_ntlmsspblob; 1307 goto out_free_ntlmsspblob;
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 8f38e33d365b..29e06db5f187 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -588,7 +588,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
588 u16 blob_length = 0; 588 u16 blob_length = 0;
589 struct key *spnego_key = NULL; 589 struct key *spnego_key = NULL;
590 char *security_blob = NULL; 590 char *security_blob = NULL;
591 char *ntlmssp_blob = NULL; 591 unsigned char *ntlmssp_blob = NULL;
592 bool use_spnego = false; /* else use raw ntlmssp */ 592 bool use_spnego = false; /* else use raw ntlmssp */
593 593
594 cifs_dbg(FYI, "Session Setup\n"); 594 cifs_dbg(FYI, "Session Setup\n");
@@ -713,13 +713,7 @@ ssetup_ntlmssp_authenticate:
713 iov[1].iov_len = blob_length; 713 iov[1].iov_len = blob_length;
714 } else if (phase == NtLmAuthenticate) { 714 } else if (phase == NtLmAuthenticate) {
715 req->hdr.SessionId = ses->Suid; 715 req->hdr.SessionId = ses->Suid;
716 ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500, 716 rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
717 GFP_KERNEL);
718 if (ntlmssp_blob == NULL) {
719 rc = -ENOMEM;
720 goto ssetup_exit;
721 }
722 rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
723 nls_cp); 717 nls_cp);
724 if (rc) { 718 if (rc) {
725 cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", 719 cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n",
@@ -1818,6 +1812,33 @@ SMB2_echo(struct TCP_Server_Info *server)
1818 1812
1819 cifs_dbg(FYI, "In echo request\n"); 1813 cifs_dbg(FYI, "In echo request\n");
1820 1814
1815 if (server->tcpStatus == CifsNeedNegotiate) {
1816 struct list_head *tmp, *tmp2;
1817 struct cifs_ses *ses;
1818 struct cifs_tcon *tcon;
1819
1820 cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
1821 spin_lock(&cifs_tcp_ses_lock);
1822 list_for_each(tmp, &server->smb_ses_list) {
1823 ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
1824 list_for_each(tmp2, &ses->tcon_list) {
1825 tcon = list_entry(tmp2, struct cifs_tcon,
1826 tcon_list);
1827 /* add check for persistent handle reconnect */
1828 if (tcon && tcon->need_reconnect) {
1829 spin_unlock(&cifs_tcp_ses_lock);
1830 rc = smb2_reconnect(SMB2_ECHO, tcon);
1831 spin_lock(&cifs_tcp_ses_lock);
1832 }
1833 }
1834 }
1835 spin_unlock(&cifs_tcp_ses_lock);
1836 }
1837
1838 /* if no session, renegotiate failed above */
1839 if (server->tcpStatus == CifsNeedNegotiate)
1840 return -EIO;
1841
1821 rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req); 1842 rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
1822 if (rc) 1843 if (rc)
1823 return rc; 1844 return rc;
diff --git a/fs/dax.c b/fs/dax.c
index 761495bf5eb9..e207f8f9b700 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -208,7 +208,12 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
208 dax.addr += first; 208 dax.addr += first;
209 size = map_len - first; 209 size = map_len - first;
210 } 210 }
211 max = min(pos + size, end); 211 /*
212 * pos + size is one past the last offset for IO,
213 * so pos + size can overflow loff_t at extreme offsets.
214 * Cast to u64 to catch this and get the true minimum.
215 */
216 max = min_t(u64, pos + size, end);
212 } 217 }
213 218
214 if (iov_iter_rw(iter) == WRITE) { 219 if (iov_iter_rw(iter) == WRITE) {
diff --git a/fs/dcache.c b/fs/dcache.c
index 817c243c1ff1..d6847d7b123d 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -507,6 +507,44 @@ void d_drop(struct dentry *dentry)
507} 507}
508EXPORT_SYMBOL(d_drop); 508EXPORT_SYMBOL(d_drop);
509 509
510static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
511{
512 struct dentry *next;
513 /*
514 * Inform d_walk() and shrink_dentry_list() that we are no longer
515 * attached to the dentry tree
516 */
517 dentry->d_flags |= DCACHE_DENTRY_KILLED;
518 if (unlikely(list_empty(&dentry->d_child)))
519 return;
520 __list_del_entry(&dentry->d_child);
521 /*
522 * Cursors can move around the list of children. While we'd been
523 * a normal list member, it didn't matter - ->d_child.next would've
524 * been updated. However, from now on it won't be and for the
525 * things like d_walk() it might end up with a nasty surprise.
526 * Normally d_walk() doesn't care about cursors moving around -
527 * ->d_lock on parent prevents that and since a cursor has no children
528 * of its own, we get through it without ever unlocking the parent.
529 * There is one exception, though - if we ascend from a child that
530 * gets killed as soon as we unlock it, the next sibling is found
531 * using the value left in its ->d_child.next. And if _that_
532 * pointed to a cursor, and cursor got moved (e.g. by lseek())
533 * before d_walk() regains parent->d_lock, we'll end up skipping
534 * everything the cursor had been moved past.
535 *
536 * Solution: make sure that the pointer left behind in ->d_child.next
537 * points to something that won't be moving around. I.e. skip the
538 * cursors.
539 */
540 while (dentry->d_child.next != &parent->d_subdirs) {
541 next = list_entry(dentry->d_child.next, struct dentry, d_child);
542 if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
543 break;
544 dentry->d_child.next = next->d_child.next;
545 }
546}
547
510static void __dentry_kill(struct dentry *dentry) 548static void __dentry_kill(struct dentry *dentry)
511{ 549{
512 struct dentry *parent = NULL; 550 struct dentry *parent = NULL;
@@ -532,12 +570,7 @@ static void __dentry_kill(struct dentry *dentry)
532 } 570 }
533 /* if it was on the hash then remove it */ 571 /* if it was on the hash then remove it */
534 __d_drop(dentry); 572 __d_drop(dentry);
535 __list_del_entry(&dentry->d_child); 573 dentry_unlist(dentry, parent);
536 /*
537 * Inform d_walk() that we are no longer attached to the
538 * dentry tree
539 */
540 dentry->d_flags |= DCACHE_DENTRY_KILLED;
541 if (parent) 574 if (parent)
542 spin_unlock(&parent->d_lock); 575 spin_unlock(&parent->d_lock);
543 dentry_iput(dentry); 576 dentry_iput(dentry);
@@ -1203,6 +1236,9 @@ resume:
1203 struct dentry *dentry = list_entry(tmp, struct dentry, d_child); 1236 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1204 next = tmp->next; 1237 next = tmp->next;
1205 1238
1239 if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1240 continue;
1241
1206 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1242 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1207 1243
1208 ret = enter(data, dentry); 1244 ret = enter(data, dentry);
@@ -1651,6 +1687,16 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1651} 1687}
1652EXPORT_SYMBOL(d_alloc); 1688EXPORT_SYMBOL(d_alloc);
1653 1689
1690struct dentry *d_alloc_cursor(struct dentry * parent)
1691{
1692 struct dentry *dentry = __d_alloc(parent->d_sb, NULL);
1693 if (dentry) {
1694 dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR;
1695 dentry->d_parent = dget(parent);
1696 }
1697 return dentry;
1698}
1699
1654/** 1700/**
1655 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems) 1701 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1656 * @sb: the superblock 1702 * @sb: the superblock
@@ -2457,7 +2503,6 @@ retry:
2457 rcu_read_unlock(); 2503 rcu_read_unlock();
2458 goto retry; 2504 goto retry;
2459 } 2505 }
2460 rcu_read_unlock();
2461 /* 2506 /*
2462 * No changes for the parent since the beginning of d_lookup(). 2507 * No changes for the parent since the beginning of d_lookup().
2463 * Since all removals from the chain happen with hlist_bl_lock(), 2508 * Since all removals from the chain happen with hlist_bl_lock(),
@@ -2470,8 +2515,6 @@ retry:
2470 continue; 2515 continue;
2471 if (dentry->d_parent != parent) 2516 if (dentry->d_parent != parent)
2472 continue; 2517 continue;
2473 if (d_unhashed(dentry))
2474 continue;
2475 if (parent->d_flags & DCACHE_OP_COMPARE) { 2518 if (parent->d_flags & DCACHE_OP_COMPARE) {
2476 int tlen = dentry->d_name.len; 2519 int tlen = dentry->d_name.len;
2477 const char *tname = dentry->d_name.name; 2520 const char *tname = dentry->d_name.name;
@@ -2483,9 +2526,18 @@ retry:
2483 if (dentry_cmp(dentry, str, len)) 2526 if (dentry_cmp(dentry, str, len))
2484 continue; 2527 continue;
2485 } 2528 }
2486 dget(dentry);
2487 hlist_bl_unlock(b); 2529 hlist_bl_unlock(b);
2488 /* somebody is doing lookup for it right now; wait for it */ 2530 /* now we can try to grab a reference */
2531 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2532 rcu_read_unlock();
2533 goto retry;
2534 }
2535
2536 rcu_read_unlock();
2537 /*
2538 * somebody is likely to be still doing lookup for it;
2539 * wait for them to finish
2540 */
2489 spin_lock(&dentry->d_lock); 2541 spin_lock(&dentry->d_lock);
2490 d_wait_lookup(dentry); 2542 d_wait_lookup(dentry);
2491 /* 2543 /*
@@ -2516,6 +2568,7 @@ retry:
2516 dput(new); 2568 dput(new);
2517 return dentry; 2569 return dentry;
2518 } 2570 }
2571 rcu_read_unlock();
2519 /* we can't take ->d_lock here; it's OK, though. */ 2572 /* we can't take ->d_lock here; it's OK, though. */
2520 new->d_flags |= DCACHE_PAR_LOOKUP; 2573 new->d_flags |= DCACHE_PAR_LOOKUP;
2521 new->d_wait = wq; 2574 new->d_wait = wq;
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 9c1c9a01b7e5..592059f88e04 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -127,7 +127,6 @@ static int open_proxy_open(struct inode *inode, struct file *filp)
127 r = real_fops->open(inode, filp); 127 r = real_fops->open(inode, filp);
128 128
129out: 129out:
130 fops_put(real_fops);
131 debugfs_use_file_finish(srcu_idx); 130 debugfs_use_file_finish(srcu_idx);
132 return r; 131 return r;
133} 132}
@@ -262,8 +261,10 @@ static int full_proxy_open(struct inode *inode, struct file *filp)
262 261
263 if (real_fops->open) { 262 if (real_fops->open) {
264 r = real_fops->open(inode, filp); 263 r = real_fops->open(inode, filp);
265 264 if (r) {
266 if (filp->f_op != proxy_fops) { 265 replace_fops(filp, d_inode(dentry)->i_fop);
266 goto free_proxy;
267 } else if (filp->f_op != proxy_fops) {
267 /* No protection against file removal anymore. */ 268 /* No protection against file removal anymore. */
268 WARN(1, "debugfs file owner replaced proxy fops: %pd", 269 WARN(1, "debugfs file owner replaced proxy fops: %pd",
269 dentry); 270 dentry);
diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
index 866bb18efefe..e818f5ac7a26 100644
--- a/fs/ecryptfs/kthread.c
+++ b/fs/ecryptfs/kthread.c
@@ -25,6 +25,7 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/wait.h> 26#include <linux/wait.h>
27#include <linux/mount.h> 27#include <linux/mount.h>
28#include <linux/file.h>
28#include "ecryptfs_kernel.h" 29#include "ecryptfs_kernel.h"
29 30
30struct ecryptfs_open_req { 31struct ecryptfs_open_req {
@@ -147,7 +148,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
147 flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR; 148 flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR;
148 (*lower_file) = dentry_open(&req.path, flags, cred); 149 (*lower_file) = dentry_open(&req.path, flags, cred);
149 if (!IS_ERR(*lower_file)) 150 if (!IS_ERR(*lower_file))
150 goto out; 151 goto have_file;
151 if ((flags & O_ACCMODE) == O_RDONLY) { 152 if ((flags & O_ACCMODE) == O_RDONLY) {
152 rc = PTR_ERR((*lower_file)); 153 rc = PTR_ERR((*lower_file));
153 goto out; 154 goto out;
@@ -165,8 +166,16 @@ int ecryptfs_privileged_open(struct file **lower_file,
165 mutex_unlock(&ecryptfs_kthread_ctl.mux); 166 mutex_unlock(&ecryptfs_kthread_ctl.mux);
166 wake_up(&ecryptfs_kthread_ctl.wait); 167 wake_up(&ecryptfs_kthread_ctl.wait);
167 wait_for_completion(&req.done); 168 wait_for_completion(&req.done);
168 if (IS_ERR(*lower_file)) 169 if (IS_ERR(*lower_file)) {
169 rc = PTR_ERR(*lower_file); 170 rc = PTR_ERR(*lower_file);
171 goto out;
172 }
173have_file:
174 if ((*lower_file)->f_op->mmap == NULL) {
175 fput(*lower_file);
176 *lower_file = NULL;
177 rc = -EMEDIUMTYPE;
178 }
170out: 179out:
171 return rc; 180 return rc;
172} 181}
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index ccd4971cc6c1..264f07c7754e 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -341,8 +341,10 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
341 struct dentry *newent; 341 struct dentry *newent;
342 bool outarg_valid = true; 342 bool outarg_valid = true;
343 343
344 fuse_lock_inode(dir);
344 err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name, 345 err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
345 &outarg, &inode); 346 &outarg, &inode);
347 fuse_unlock_inode(dir);
346 if (err == -ENOENT) { 348 if (err == -ENOENT) {
347 outarg_valid = false; 349 outarg_valid = false;
348 err = 0; 350 err = 0;
@@ -1341,7 +1343,9 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx)
1341 fuse_read_fill(req, file, ctx->pos, PAGE_SIZE, 1343 fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
1342 FUSE_READDIR); 1344 FUSE_READDIR);
1343 } 1345 }
1346 fuse_lock_inode(inode);
1344 fuse_request_send(fc, req); 1347 fuse_request_send(fc, req);
1348 fuse_unlock_inode(inode);
1345 nbytes = req->out.args[0].size; 1349 nbytes = req->out.args[0].size;
1346 err = req->out.h.error; 1350 err = req->out.h.error;
1347 fuse_put_request(fc, req); 1351 fuse_put_request(fc, req);
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index eddbe02c4028..929c383432b0 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -110,6 +110,9 @@ struct fuse_inode {
110 110
111 /** Miscellaneous bits describing inode state */ 111 /** Miscellaneous bits describing inode state */
112 unsigned long state; 112 unsigned long state;
113
114 /** Lock for serializing lookup and readdir for back compatibility*/
115 struct mutex mutex;
113}; 116};
114 117
115/** FUSE inode state bits */ 118/** FUSE inode state bits */
@@ -540,6 +543,9 @@ struct fuse_conn {
540 /** write-back cache policy (default is write-through) */ 543 /** write-back cache policy (default is write-through) */
541 unsigned writeback_cache:1; 544 unsigned writeback_cache:1;
542 545
546 /** allow parallel lookups and readdir (default is serialized) */
547 unsigned parallel_dirops:1;
548
543 /* 549 /*
544 * The following bitfields are only for optimization purposes 550 * The following bitfields are only for optimization purposes
545 * and hence races in setting them will not cause malfunction 551 * and hence races in setting them will not cause malfunction
@@ -956,4 +962,7 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
956 962
957void fuse_set_initialized(struct fuse_conn *fc); 963void fuse_set_initialized(struct fuse_conn *fc);
958 964
965void fuse_unlock_inode(struct inode *inode);
966void fuse_lock_inode(struct inode *inode);
967
959#endif /* _FS_FUSE_I_H */ 968#endif /* _FS_FUSE_I_H */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 1ce67668a8e1..9961d8432ce3 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -97,6 +97,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
97 INIT_LIST_HEAD(&fi->queued_writes); 97 INIT_LIST_HEAD(&fi->queued_writes);
98 INIT_LIST_HEAD(&fi->writepages); 98 INIT_LIST_HEAD(&fi->writepages);
99 init_waitqueue_head(&fi->page_waitq); 99 init_waitqueue_head(&fi->page_waitq);
100 mutex_init(&fi->mutex);
100 fi->forget = fuse_alloc_forget(); 101 fi->forget = fuse_alloc_forget();
101 if (!fi->forget) { 102 if (!fi->forget) {
102 kmem_cache_free(fuse_inode_cachep, inode); 103 kmem_cache_free(fuse_inode_cachep, inode);
@@ -117,6 +118,7 @@ static void fuse_destroy_inode(struct inode *inode)
117 struct fuse_inode *fi = get_fuse_inode(inode); 118 struct fuse_inode *fi = get_fuse_inode(inode);
118 BUG_ON(!list_empty(&fi->write_files)); 119 BUG_ON(!list_empty(&fi->write_files));
119 BUG_ON(!list_empty(&fi->queued_writes)); 120 BUG_ON(!list_empty(&fi->queued_writes));
121 mutex_destroy(&fi->mutex);
120 kfree(fi->forget); 122 kfree(fi->forget);
121 call_rcu(&inode->i_rcu, fuse_i_callback); 123 call_rcu(&inode->i_rcu, fuse_i_callback);
122} 124}
@@ -351,6 +353,18 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
351 return 0; 353 return 0;
352} 354}
353 355
356void fuse_lock_inode(struct inode *inode)
357{
358 if (!get_fuse_conn(inode)->parallel_dirops)
359 mutex_lock(&get_fuse_inode(inode)->mutex);
360}
361
362void fuse_unlock_inode(struct inode *inode)
363{
364 if (!get_fuse_conn(inode)->parallel_dirops)
365 mutex_unlock(&get_fuse_inode(inode)->mutex);
366}
367
354static void fuse_umount_begin(struct super_block *sb) 368static void fuse_umount_begin(struct super_block *sb)
355{ 369{
356 fuse_abort_conn(get_fuse_conn_super(sb)); 370 fuse_abort_conn(get_fuse_conn_super(sb));
@@ -898,6 +912,8 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
898 fc->async_dio = 1; 912 fc->async_dio = 1;
899 if (arg->flags & FUSE_WRITEBACK_CACHE) 913 if (arg->flags & FUSE_WRITEBACK_CACHE)
900 fc->writeback_cache = 1; 914 fc->writeback_cache = 1;
915 if (arg->flags & FUSE_PARALLEL_DIROPS)
916 fc->parallel_dirops = 1;
901 if (arg->time_gran && arg->time_gran <= 1000000000) 917 if (arg->time_gran && arg->time_gran <= 1000000000)
902 fc->sb->s_time_gran = arg->time_gran; 918 fc->sb->s_time_gran = arg->time_gran;
903 } else { 919 } else {
@@ -928,7 +944,8 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
928 FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | 944 FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
929 FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA | 945 FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
930 FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO | 946 FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
931 FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT; 947 FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT |
948 FUSE_PARALLEL_DIROPS;
932 req->in.h.opcode = FUSE_INIT; 949 req->in.h.opcode = FUSE_INIT;
933 req->in.numargs = 1; 950 req->in.numargs = 1;
934 req->in.args[0].size = sizeof(*arg); 951 req->in.args[0].size = sizeof(*arg);
diff --git a/fs/internal.h b/fs/internal.h
index b71deeecea17..f57ced528cde 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -130,6 +130,7 @@ extern int invalidate_inodes(struct super_block *, bool);
130extern struct dentry *__d_alloc(struct super_block *, const struct qstr *); 130extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
131extern int d_set_mounted(struct dentry *dentry); 131extern int d_set_mounted(struct dentry *dentry);
132extern long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc); 132extern long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc);
133extern struct dentry *d_alloc_cursor(struct dentry *);
133 134
134/* 135/*
135 * read_write.c 136 * read_write.c
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index b31852f76f46..e3ca4b4cac84 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -2329,18 +2329,10 @@ void *jbd2_alloc(size_t size, gfp_t flags)
2329 2329
2330 BUG_ON(size & (size-1)); /* Must be a power of 2 */ 2330 BUG_ON(size & (size-1)); /* Must be a power of 2 */
2331 2331
2332 flags |= __GFP_REPEAT; 2332 if (size < PAGE_SIZE)
2333 if (size == PAGE_SIZE)
2334 ptr = (void *)__get_free_pages(flags, 0);
2335 else if (size > PAGE_SIZE) {
2336 int order = get_order(size);
2337
2338 if (order < 3)
2339 ptr = (void *)__get_free_pages(flags, order);
2340 else
2341 ptr = vmalloc(size);
2342 } else
2343 ptr = kmem_cache_alloc(get_slab(size), flags); 2333 ptr = kmem_cache_alloc(get_slab(size), flags);
2334 else
2335 ptr = (void *)__get_free_pages(flags, get_order(size));
2344 2336
2345 /* Check alignment; SLUB has gotten this wrong in the past, 2337 /* Check alignment; SLUB has gotten this wrong in the past,
2346 * and this can lead to user data corruption! */ 2338 * and this can lead to user data corruption! */
@@ -2351,20 +2343,10 @@ void *jbd2_alloc(size_t size, gfp_t flags)
2351 2343
2352void jbd2_free(void *ptr, size_t size) 2344void jbd2_free(void *ptr, size_t size)
2353{ 2345{
2354 if (size == PAGE_SIZE) { 2346 if (size < PAGE_SIZE)
2355 free_pages((unsigned long)ptr, 0); 2347 kmem_cache_free(get_slab(size), ptr);
2356 return; 2348 else
2357 } 2349 free_pages((unsigned long)ptr, get_order(size));
2358 if (size > PAGE_SIZE) {
2359 int order = get_order(size);
2360
2361 if (order < 3)
2362 free_pages((unsigned long)ptr, order);
2363 else
2364 vfree(ptr);
2365 return;
2366 }
2367 kmem_cache_free(get_slab(size), ptr);
2368}; 2350};
2369 2351
2370/* 2352/*
diff --git a/fs/libfs.c b/fs/libfs.c
index 3db2721144c2..74dc8b9e7f53 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -71,9 +71,7 @@ EXPORT_SYMBOL(simple_lookup);
71 71
72int dcache_dir_open(struct inode *inode, struct file *file) 72int dcache_dir_open(struct inode *inode, struct file *file)
73{ 73{
74 static struct qstr cursor_name = QSTR_INIT(".", 1); 74 file->private_data = d_alloc_cursor(file->f_path.dentry);
75
76 file->private_data = d_alloc(file->f_path.dentry, &cursor_name);
77 75
78 return file->private_data ? 0 : -ENOMEM; 76 return file->private_data ? 0 : -ENOMEM;
79} 77}
@@ -86,6 +84,61 @@ int dcache_dir_close(struct inode *inode, struct file *file)
86} 84}
87EXPORT_SYMBOL(dcache_dir_close); 85EXPORT_SYMBOL(dcache_dir_close);
88 86
87/* parent is locked at least shared */
88static struct dentry *next_positive(struct dentry *parent,
89 struct list_head *from,
90 int count)
91{
92 unsigned *seq = &parent->d_inode->i_dir_seq, n;
93 struct dentry *res;
94 struct list_head *p;
95 bool skipped;
96 int i;
97
98retry:
99 i = count;
100 skipped = false;
101 n = smp_load_acquire(seq) & ~1;
102 res = NULL;
103 rcu_read_lock();
104 for (p = from->next; p != &parent->d_subdirs; p = p->next) {
105 struct dentry *d = list_entry(p, struct dentry, d_child);
106 if (!simple_positive(d)) {
107 skipped = true;
108 } else if (!--i) {
109 res = d;
110 break;
111 }
112 }
113 rcu_read_unlock();
114 if (skipped) {
115 smp_rmb();
116 if (unlikely(*seq != n))
117 goto retry;
118 }
119 return res;
120}
121
122static void move_cursor(struct dentry *cursor, struct list_head *after)
123{
124 struct dentry *parent = cursor->d_parent;
125 unsigned n, *seq = &parent->d_inode->i_dir_seq;
126 spin_lock(&parent->d_lock);
127 for (;;) {
128 n = *seq;
129 if (!(n & 1) && cmpxchg(seq, n, n + 1) == n)
130 break;
131 cpu_relax();
132 }
133 __list_del(cursor->d_child.prev, cursor->d_child.next);
134 if (after)
135 list_add(&cursor->d_child, after);
136 else
137 list_add_tail(&cursor->d_child, &parent->d_subdirs);
138 smp_store_release(seq, n + 2);
139 spin_unlock(&parent->d_lock);
140}
141
89loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence) 142loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
90{ 143{
91 struct dentry *dentry = file->f_path.dentry; 144 struct dentry *dentry = file->f_path.dentry;
@@ -101,25 +154,14 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
101 if (offset != file->f_pos) { 154 if (offset != file->f_pos) {
102 file->f_pos = offset; 155 file->f_pos = offset;
103 if (file->f_pos >= 2) { 156 if (file->f_pos >= 2) {
104 struct list_head *p;
105 struct dentry *cursor = file->private_data; 157 struct dentry *cursor = file->private_data;
158 struct dentry *to;
106 loff_t n = file->f_pos - 2; 159 loff_t n = file->f_pos - 2;
107 160
108 spin_lock(&dentry->d_lock); 161 inode_lock_shared(dentry->d_inode);
109 /* d_lock not required for cursor */ 162 to = next_positive(dentry, &dentry->d_subdirs, n);
110 list_del(&cursor->d_child); 163 move_cursor(cursor, to ? &to->d_child : NULL);
111 p = dentry->d_subdirs.next; 164 inode_unlock_shared(dentry->d_inode);
112 while (n && p != &dentry->d_subdirs) {
113 struct dentry *next;
114 next = list_entry(p, struct dentry, d_child);
115 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
116 if (simple_positive(next))
117 n--;
118 spin_unlock(&next->d_lock);
119 p = p->next;
120 }
121 list_add_tail(&cursor->d_child, p);
122 spin_unlock(&dentry->d_lock);
123 } 165 }
124 } 166 }
125 return offset; 167 return offset;
@@ -142,36 +184,25 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
142{ 184{
143 struct dentry *dentry = file->f_path.dentry; 185 struct dentry *dentry = file->f_path.dentry;
144 struct dentry *cursor = file->private_data; 186 struct dentry *cursor = file->private_data;
145 struct list_head *p, *q = &cursor->d_child; 187 struct list_head *p = &cursor->d_child;
188 struct dentry *next;
189 bool moved = false;
146 190
147 if (!dir_emit_dots(file, ctx)) 191 if (!dir_emit_dots(file, ctx))
148 return 0; 192 return 0;
149 spin_lock(&dentry->d_lock);
150 if (ctx->pos == 2)
151 list_move(q, &dentry->d_subdirs);
152 193
153 for (p = q->next; p != &dentry->d_subdirs; p = p->next) { 194 if (ctx->pos == 2)
154 struct dentry *next = list_entry(p, struct dentry, d_child); 195 p = &dentry->d_subdirs;
155 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); 196 while ((next = next_positive(dentry, p, 1)) != NULL) {
156 if (!simple_positive(next)) {
157 spin_unlock(&next->d_lock);
158 continue;
159 }
160
161 spin_unlock(&next->d_lock);
162 spin_unlock(&dentry->d_lock);
163 if (!dir_emit(ctx, next->d_name.name, next->d_name.len, 197 if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
164 d_inode(next)->i_ino, dt_type(d_inode(next)))) 198 d_inode(next)->i_ino, dt_type(d_inode(next))))
165 return 0; 199 break;
166 spin_lock(&dentry->d_lock); 200 moved = true;
167 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); 201 p = &next->d_child;
168 /* next is still alive */
169 list_move(q, p);
170 spin_unlock(&next->d_lock);
171 p = q;
172 ctx->pos++; 202 ctx->pos++;
173 } 203 }
174 spin_unlock(&dentry->d_lock); 204 if (moved)
205 move_cursor(cursor, p);
175 return 0; 206 return 0;
176} 207}
177EXPORT_SYMBOL(dcache_readdir); 208EXPORT_SYMBOL(dcache_readdir);
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 154a107cd376..fc4084ef4736 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -335,12 +335,17 @@ static struct notifier_block lockd_inet6addr_notifier = {
335}; 335};
336#endif 336#endif
337 337
338static void lockd_svc_exit_thread(void) 338static void lockd_unregister_notifiers(void)
339{ 339{
340 unregister_inetaddr_notifier(&lockd_inetaddr_notifier); 340 unregister_inetaddr_notifier(&lockd_inetaddr_notifier);
341#if IS_ENABLED(CONFIG_IPV6) 341#if IS_ENABLED(CONFIG_IPV6)
342 unregister_inet6addr_notifier(&lockd_inet6addr_notifier); 342 unregister_inet6addr_notifier(&lockd_inet6addr_notifier);
343#endif 343#endif
344}
345
346static void lockd_svc_exit_thread(void)
347{
348 lockd_unregister_notifiers();
344 svc_exit_thread(nlmsvc_rqst); 349 svc_exit_thread(nlmsvc_rqst);
345} 350}
346 351
@@ -462,7 +467,7 @@ int lockd_up(struct net *net)
462 * Note: svc_serv structures have an initial use count of 1, 467 * Note: svc_serv structures have an initial use count of 1,
463 * so we exit through here on both success and failure. 468 * so we exit through here on both success and failure.
464 */ 469 */
465err_net: 470err_put:
466 svc_destroy(serv); 471 svc_destroy(serv);
467err_create: 472err_create:
468 mutex_unlock(&nlmsvc_mutex); 473 mutex_unlock(&nlmsvc_mutex);
@@ -470,7 +475,9 @@ err_create:
470 475
471err_start: 476err_start:
472 lockd_down_net(serv, net); 477 lockd_down_net(serv, net);
473 goto err_net; 478err_net:
479 lockd_unregister_notifiers();
480 goto err_put;
474} 481}
475EXPORT_SYMBOL_GPL(lockd_up); 482EXPORT_SYMBOL_GPL(lockd_up);
476 483
diff --git a/fs/locks.c b/fs/locks.c
index 7c5f91be9b65..ee1b15f6fc13 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1628,7 +1628,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
1628{ 1628{
1629 struct file_lock *fl, *my_fl = NULL, *lease; 1629 struct file_lock *fl, *my_fl = NULL, *lease;
1630 struct dentry *dentry = filp->f_path.dentry; 1630 struct dentry *dentry = filp->f_path.dentry;
1631 struct inode *inode = dentry->d_inode; 1631 struct inode *inode = file_inode(filp);
1632 struct file_lock_context *ctx; 1632 struct file_lock_context *ctx;
1633 bool is_deleg = (*flp)->fl_flags & FL_DELEG; 1633 bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1634 int error; 1634 int error;
diff --git a/fs/namespace.c b/fs/namespace.c
index a7ec92c051f5..419f746d851d 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1562,6 +1562,7 @@ void __detach_mounts(struct dentry *dentry)
1562 goto out_unlock; 1562 goto out_unlock;
1563 1563
1564 lock_mount_hash(); 1564 lock_mount_hash();
1565 event++;
1565 while (!hlist_empty(&mp->m_list)) { 1566 while (!hlist_empty(&mp->m_list)) {
1566 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); 1567 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
1567 if (mnt->mnt.mnt_flags & MNT_UMOUNT) { 1568 if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
@@ -3247,6 +3248,10 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
3247 if (mnt->mnt.mnt_sb->s_iflags & SB_I_NOEXEC) 3248 if (mnt->mnt.mnt_sb->s_iflags & SB_I_NOEXEC)
3248 mnt_flags &= ~(MNT_LOCK_NOSUID | MNT_LOCK_NOEXEC); 3249 mnt_flags &= ~(MNT_LOCK_NOSUID | MNT_LOCK_NOEXEC);
3249 3250
3251 /* Don't miss readonly hidden in the superblock flags */
3252 if (mnt->mnt.mnt_sb->s_flags & MS_RDONLY)
3253 mnt_flags |= MNT_LOCK_READONLY;
3254
3250 /* Verify the mount flags are equal to or more permissive 3255 /* Verify the mount flags are equal to or more permissive
3251 * than the proposed new mount. 3256 * than the proposed new mount.
3252 */ 3257 */
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index aaf7bd0cbae2..d8015a03db4c 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -424,12 +424,17 @@ static int xdr_decode(nfs_readdir_descriptor_t *desc,
424static 424static
425int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry) 425int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry)
426{ 426{
427 struct inode *inode;
427 struct nfs_inode *nfsi; 428 struct nfs_inode *nfsi;
428 429
429 if (d_really_is_negative(dentry)) 430 if (d_really_is_negative(dentry))
430 return 0; 431 return 0;
431 432
432 nfsi = NFS_I(d_inode(dentry)); 433 inode = d_inode(dentry);
434 if (is_bad_inode(inode) || NFS_STALE(inode))
435 return 0;
436
437 nfsi = NFS_I(inode);
433 if (entry->fattr->fileid == nfsi->fileid) 438 if (entry->fattr->fileid == nfsi->fileid)
434 return 1; 439 return 1;
435 if (nfs_compare_fh(entry->fh, &nfsi->fh) == 0) 440 if (nfs_compare_fh(entry->fh, &nfsi->fh) == 0)
@@ -1363,7 +1368,6 @@ EXPORT_SYMBOL_GPL(nfs_dentry_operations);
1363struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags) 1368struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags)
1364{ 1369{
1365 struct dentry *res; 1370 struct dentry *res;
1366 struct dentry *parent;
1367 struct inode *inode = NULL; 1371 struct inode *inode = NULL;
1368 struct nfs_fh *fhandle = NULL; 1372 struct nfs_fh *fhandle = NULL;
1369 struct nfs_fattr *fattr = NULL; 1373 struct nfs_fattr *fattr = NULL;
@@ -1393,7 +1397,6 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in
1393 if (IS_ERR(label)) 1397 if (IS_ERR(label))
1394 goto out; 1398 goto out;
1395 1399
1396 parent = dentry->d_parent;
1397 /* Protect against concurrent sillydeletes */ 1400 /* Protect against concurrent sillydeletes */
1398 trace_nfs_lookup_enter(dir, dentry, flags); 1401 trace_nfs_lookup_enter(dir, dentry, flags);
1399 error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label); 1402 error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
@@ -1536,9 +1539,9 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
1536 err = PTR_ERR(inode); 1539 err = PTR_ERR(inode);
1537 trace_nfs_atomic_open_exit(dir, ctx, open_flags, err); 1540 trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
1538 put_nfs_open_context(ctx); 1541 put_nfs_open_context(ctx);
1542 d_drop(dentry);
1539 switch (err) { 1543 switch (err) {
1540 case -ENOENT: 1544 case -ENOENT:
1541 d_drop(dentry);
1542 d_add(dentry, NULL); 1545 d_add(dentry, NULL);
1543 nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); 1546 nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
1544 break; 1547 break;
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 979b3c4dee6a..c7326c2af2c3 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -353,10 +353,12 @@ static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
353 353
354 result = wait_for_completion_killable(&dreq->completion); 354 result = wait_for_completion_killable(&dreq->completion);
355 355
356 if (!result) {
357 result = dreq->count;
358 WARN_ON_ONCE(dreq->count < 0);
359 }
356 if (!result) 360 if (!result)
357 result = dreq->error; 361 result = dreq->error;
358 if (!result)
359 result = dreq->count;
360 362
361out: 363out:
362 return (ssize_t) result; 364 return (ssize_t) result;
@@ -386,8 +388,10 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write)
386 388
387 if (dreq->iocb) { 389 if (dreq->iocb) {
388 long res = (long) dreq->error; 390 long res = (long) dreq->error;
389 if (!res) 391 if (dreq->count != 0) {
390 res = (long) dreq->count; 392 res = (long) dreq->count;
393 WARN_ON_ONCE(dreq->count < 0);
394 }
391 dreq->iocb->ki_complete(dreq->iocb, res, 0); 395 dreq->iocb->ki_complete(dreq->iocb, res, 0);
392 } 396 }
393 397
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 52e7d6869e3b..dda689d7a8a7 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -282,6 +282,7 @@ nfs_init_locked(struct inode *inode, void *opaque)
282 struct nfs_fattr *fattr = desc->fattr; 282 struct nfs_fattr *fattr = desc->fattr;
283 283
284 set_nfs_fileid(inode, fattr->fileid); 284 set_nfs_fileid(inode, fattr->fileid);
285 inode->i_mode = fattr->mode;
285 nfs_copy_fh(NFS_FH(inode), desc->fh); 286 nfs_copy_fh(NFS_FH(inode), desc->fh);
286 return 0; 287 return 0;
287} 288}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index de97567795a5..ff416d0e24bc 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2882,12 +2882,11 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
2882 call_close |= is_wronly; 2882 call_close |= is_wronly;
2883 else if (is_wronly) 2883 else if (is_wronly)
2884 calldata->arg.fmode |= FMODE_WRITE; 2884 calldata->arg.fmode |= FMODE_WRITE;
2885 if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
2886 call_close |= is_rdwr;
2885 } else if (is_rdwr) 2887 } else if (is_rdwr)
2886 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; 2888 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
2887 2889
2888 if (calldata->arg.fmode == 0)
2889 call_close |= is_rdwr;
2890
2891 if (!nfs4_valid_open_stateid(state)) 2890 if (!nfs4_valid_open_stateid(state))
2892 call_close = 0; 2891 call_close = 0;
2893 spin_unlock(&state->owner->so_lock); 2892 spin_unlock(&state->owner->so_lock);
@@ -7924,8 +7923,8 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
7924 break; 7923 break;
7925 } 7924 }
7926 lo = NFS_I(inode)->layout; 7925 lo = NFS_I(inode)->layout;
7927 if (lo && nfs4_stateid_match(&lgp->args.stateid, 7926 if (lo && !test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) &&
7928 &lo->plh_stateid)) { 7927 nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
7929 LIST_HEAD(head); 7928 LIST_HEAD(head);
7930 7929
7931 /* 7930 /*
@@ -7936,10 +7935,10 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
7936 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL, 0); 7935 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL, 0);
7937 spin_unlock(&inode->i_lock); 7936 spin_unlock(&inode->i_lock);
7938 pnfs_free_lseg_list(&head); 7937 pnfs_free_lseg_list(&head);
7938 status = -EAGAIN;
7939 goto out;
7939 } else 7940 } else
7940 spin_unlock(&inode->i_lock); 7941 spin_unlock(&inode->i_lock);
7941 status = -EAGAIN;
7942 goto out;
7943 } 7942 }
7944 7943
7945 status = nfs4_handle_exception(server, status, exception); 7944 status = nfs4_handle_exception(server, status, exception);
@@ -8036,7 +8035,10 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags)
8036 .flags = RPC_TASK_ASYNC, 8035 .flags = RPC_TASK_ASYNC,
8037 }; 8036 };
8038 struct pnfs_layout_segment *lseg = NULL; 8037 struct pnfs_layout_segment *lseg = NULL;
8039 struct nfs4_exception exception = { .timeout = *timeout }; 8038 struct nfs4_exception exception = {
8039 .inode = inode,
8040 .timeout = *timeout,
8041 };
8040 int status = 0; 8042 int status = 0;
8041 8043
8042 dprintk("--> %s\n", __func__); 8044 dprintk("--> %s\n", __func__);
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 9679f4749364..834b875900d6 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1488,9 +1488,9 @@ restart:
1488 } 1488 }
1489 spin_unlock(&state->state_lock); 1489 spin_unlock(&state->state_lock);
1490 } 1490 }
1491 nfs4_put_open_state(state);
1492 clear_bit(NFS_STATE_RECLAIM_NOGRACE, 1491 clear_bit(NFS_STATE_RECLAIM_NOGRACE,
1493 &state->flags); 1492 &state->flags);
1493 nfs4_put_open_state(state);
1494 spin_lock(&sp->so_lock); 1494 spin_lock(&sp->so_lock);
1495 goto restart; 1495 goto restart;
1496 } 1496 }
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 0c7e0d45a4de..0fbe734cc38c 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -361,8 +361,10 @@ pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
361 list_del_init(&lseg->pls_list); 361 list_del_init(&lseg->pls_list);
362 /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */ 362 /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
363 atomic_dec(&lo->plh_refcount); 363 atomic_dec(&lo->plh_refcount);
364 if (list_empty(&lo->plh_segs)) 364 if (list_empty(&lo->plh_segs)) {
365 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
365 clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags); 366 clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
367 }
366 rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq); 368 rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
367} 369}
368 370
@@ -1290,6 +1292,7 @@ alloc_init_layout_hdr(struct inode *ino,
1290 INIT_LIST_HEAD(&lo->plh_bulk_destroy); 1292 INIT_LIST_HEAD(&lo->plh_bulk_destroy);
1291 lo->plh_inode = ino; 1293 lo->plh_inode = ino;
1292 lo->plh_lc_cred = get_rpccred(ctx->cred); 1294 lo->plh_lc_cred = get_rpccred(ctx->cred);
1295 lo->plh_flags |= 1 << NFS_LAYOUT_INVALID_STID;
1293 return lo; 1296 return lo;
1294} 1297}
1295 1298
@@ -1297,6 +1300,8 @@ static struct pnfs_layout_hdr *
1297pnfs_find_alloc_layout(struct inode *ino, 1300pnfs_find_alloc_layout(struct inode *ino,
1298 struct nfs_open_context *ctx, 1301 struct nfs_open_context *ctx,
1299 gfp_t gfp_flags) 1302 gfp_t gfp_flags)
1303 __releases(&ino->i_lock)
1304 __acquires(&ino->i_lock)
1300{ 1305{
1301 struct nfs_inode *nfsi = NFS_I(ino); 1306 struct nfs_inode *nfsi = NFS_I(ino);
1302 struct pnfs_layout_hdr *new = NULL; 1307 struct pnfs_layout_hdr *new = NULL;
@@ -1565,8 +1570,7 @@ lookup_again:
1565 * stateid, or it has been invalidated, then we must use the open 1570 * stateid, or it has been invalidated, then we must use the open
1566 * stateid. 1571 * stateid.
1567 */ 1572 */
1568 if (lo->plh_stateid.seqid == 0 || 1573 if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
1569 test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
1570 1574
1571 /* 1575 /*
1572 * The first layoutget for the file. Need to serialize per 1576 * The first layoutget for the file. Need to serialize per
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index 0dfc476da3e1..b38e3c0dc790 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -247,7 +247,11 @@ void pnfs_fetch_commit_bucket_list(struct list_head *pages,
247} 247}
248 248
249/* Helper function for pnfs_generic_commit_pagelist to catch an empty 249/* Helper function for pnfs_generic_commit_pagelist to catch an empty
250 * page list. This can happen when two commits race. */ 250 * page list. This can happen when two commits race.
251 *
252 * This must be called instead of nfs_init_commit - call one or the other, but
253 * not both!
254 */
251static bool 255static bool
252pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages, 256pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages,
253 struct nfs_commit_data *data, 257 struct nfs_commit_data *data,
@@ -256,7 +260,11 @@ pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages,
256 if (list_empty(pages)) { 260 if (list_empty(pages)) {
257 if (atomic_dec_and_test(&cinfo->mds->rpcs_out)) 261 if (atomic_dec_and_test(&cinfo->mds->rpcs_out))
258 wake_up_atomic_t(&cinfo->mds->rpcs_out); 262 wake_up_atomic_t(&cinfo->mds->rpcs_out);
259 nfs_commitdata_release(data); 263 /* don't call nfs_commitdata_release - it tries to put
264 * the open_context which is not acquired until nfs_init_commit
265 * which has not been called on @data */
266 WARN_ON_ONCE(data->context);
267 nfs_commit_free(data);
260 return true; 268 return true;
261 } 269 }
262 270
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 6776d7a7839e..572e5b3b06f1 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -367,13 +367,13 @@ readpage_async_filler(void *data, struct page *page)
367 nfs_list_remove_request(new); 367 nfs_list_remove_request(new);
368 nfs_readpage_release(new); 368 nfs_readpage_release(new);
369 error = desc->pgio->pg_error; 369 error = desc->pgio->pg_error;
370 goto out_unlock; 370 goto out;
371 } 371 }
372 return 0; 372 return 0;
373out_error: 373out_error:
374 error = PTR_ERR(new); 374 error = PTR_ERR(new);
375out_unlock:
376 unlock_page(page); 375 unlock_page(page);
376out:
377 return error; 377 return error;
378} 378}
379 379
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index e55b5242614d..31f3df193bdb 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -290,7 +290,7 @@ out_free_buf:
290 return error; 290 return error;
291} 291}
292 292
293#define NFSD_MDS_PR_KEY 0x0100000000000000 293#define NFSD_MDS_PR_KEY 0x0100000000000000ULL
294 294
295/* 295/*
296 * We use the client ID as a unique key for the reservations. 296 * We use the client ID as a unique key for the reservations.
diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
index 1580ea6fd64d..d08cd88155c7 100644
--- a/fs/nfsd/nfs2acl.c
+++ b/fs/nfsd/nfs2acl.c
@@ -104,22 +104,21 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp,
104 goto out; 104 goto out;
105 105
106 inode = d_inode(fh->fh_dentry); 106 inode = d_inode(fh->fh_dentry);
107 if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
108 error = -EOPNOTSUPP;
109 goto out_errno;
110 }
111 107
112 error = fh_want_write(fh); 108 error = fh_want_write(fh);
113 if (error) 109 if (error)
114 goto out_errno; 110 goto out_errno;
115 111
116 error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS); 112 fh_lock(fh);
113
114 error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
117 if (error) 115 if (error)
118 goto out_drop_write; 116 goto out_drop_lock;
119 error = inode->i_op->set_acl(inode, argp->acl_default, 117 error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
120 ACL_TYPE_DEFAULT);
121 if (error) 118 if (error)
122 goto out_drop_write; 119 goto out_drop_lock;
120
121 fh_unlock(fh);
123 122
124 fh_drop_write(fh); 123 fh_drop_write(fh);
125 124
@@ -131,7 +130,8 @@ out:
131 posix_acl_release(argp->acl_access); 130 posix_acl_release(argp->acl_access);
132 posix_acl_release(argp->acl_default); 131 posix_acl_release(argp->acl_default);
133 return nfserr; 132 return nfserr;
134out_drop_write: 133out_drop_lock:
134 fh_unlock(fh);
135 fh_drop_write(fh); 135 fh_drop_write(fh);
136out_errno: 136out_errno:
137 nfserr = nfserrno(error); 137 nfserr = nfserrno(error);
diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
index 01df4cd7c753..0c890347cde3 100644
--- a/fs/nfsd/nfs3acl.c
+++ b/fs/nfsd/nfs3acl.c
@@ -95,22 +95,20 @@ static __be32 nfsd3_proc_setacl(struct svc_rqst * rqstp,
95 goto out; 95 goto out;
96 96
97 inode = d_inode(fh->fh_dentry); 97 inode = d_inode(fh->fh_dentry);
98 if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
99 error = -EOPNOTSUPP;
100 goto out_errno;
101 }
102 98
103 error = fh_want_write(fh); 99 error = fh_want_write(fh);
104 if (error) 100 if (error)
105 goto out_errno; 101 goto out_errno;
106 102
107 error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS); 103 fh_lock(fh);
104
105 error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
108 if (error) 106 if (error)
109 goto out_drop_write; 107 goto out_drop_lock;
110 error = inode->i_op->set_acl(inode, argp->acl_default, 108 error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
111 ACL_TYPE_DEFAULT);
112 109
113out_drop_write: 110out_drop_lock:
111 fh_unlock(fh);
114 fh_drop_write(fh); 112 fh_drop_write(fh);
115out_errno: 113out_errno:
116 nfserr = nfserrno(error); 114 nfserr = nfserrno(error);
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index 6adabd6049b7..71292a0d6f09 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -770,9 +770,6 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
770 dentry = fhp->fh_dentry; 770 dentry = fhp->fh_dentry;
771 inode = d_inode(dentry); 771 inode = d_inode(dentry);
772 772
773 if (!inode->i_op->set_acl || !IS_POSIXACL(inode))
774 return nfserr_attrnotsupp;
775
776 if (S_ISDIR(inode->i_mode)) 773 if (S_ISDIR(inode->i_mode))
777 flags = NFS4_ACL_DIR; 774 flags = NFS4_ACL_DIR;
778 775
@@ -782,16 +779,19 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
782 if (host_error < 0) 779 if (host_error < 0)
783 goto out_nfserr; 780 goto out_nfserr;
784 781
785 host_error = inode->i_op->set_acl(inode, pacl, ACL_TYPE_ACCESS); 782 fh_lock(fhp);
783
784 host_error = set_posix_acl(inode, ACL_TYPE_ACCESS, pacl);
786 if (host_error < 0) 785 if (host_error < 0)
787 goto out_release; 786 goto out_drop_lock;
788 787
789 if (S_ISDIR(inode->i_mode)) { 788 if (S_ISDIR(inode->i_mode)) {
790 host_error = inode->i_op->set_acl(inode, dpacl, 789 host_error = set_posix_acl(inode, ACL_TYPE_DEFAULT, dpacl);
791 ACL_TYPE_DEFAULT);
792 } 790 }
793 791
794out_release: 792out_drop_lock:
793 fh_unlock(fhp);
794
795 posix_acl_release(pacl); 795 posix_acl_release(pacl);
796 posix_acl_release(dpacl); 796 posix_acl_release(dpacl);
797out_nfserr: 797out_nfserr:
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 7389cb1d7409..04c68d900324 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -710,22 +710,6 @@ static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc
710 } 710 }
711} 711}
712 712
713static struct rpc_clnt *create_backchannel_client(struct rpc_create_args *args)
714{
715 struct rpc_xprt *xprt;
716
717 if (args->protocol != XPRT_TRANSPORT_BC_TCP)
718 return rpc_create(args);
719
720 xprt = args->bc_xprt->xpt_bc_xprt;
721 if (xprt) {
722 xprt_get(xprt);
723 return rpc_create_xprt(args, xprt);
724 }
725
726 return rpc_create(args);
727}
728
729static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses) 713static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
730{ 714{
731 int maxtime = max_cb_time(clp->net); 715 int maxtime = max_cb_time(clp->net);
@@ -768,7 +752,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
768 args.authflavor = ses->se_cb_sec.flavor; 752 args.authflavor = ses->se_cb_sec.flavor;
769 } 753 }
770 /* Create RPC client */ 754 /* Create RPC client */
771 client = create_backchannel_client(&args); 755 client = rpc_create(&args);
772 if (IS_ERR(client)) { 756 if (IS_ERR(client)) {
773 dprintk("NFSD: couldn't create callback client: %ld\n", 757 dprintk("NFSD: couldn't create callback client: %ld\n",
774 PTR_ERR(client)); 758 PTR_ERR(client));
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index f5f82e145018..70d0b9b33031 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3480,12 +3480,17 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
3480} 3480}
3481 3481
3482static struct nfs4_ol_stateid * 3482static struct nfs4_ol_stateid *
3483init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, 3483init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
3484 struct nfsd4_open *open)
3485{ 3484{
3486 3485
3487 struct nfs4_openowner *oo = open->op_openowner; 3486 struct nfs4_openowner *oo = open->op_openowner;
3488 struct nfs4_ol_stateid *retstp = NULL; 3487 struct nfs4_ol_stateid *retstp = NULL;
3488 struct nfs4_ol_stateid *stp;
3489
3490 stp = open->op_stp;
3491 /* We are moving these outside of the spinlocks to avoid the warnings */
3492 mutex_init(&stp->st_mutex);
3493 mutex_lock(&stp->st_mutex);
3489 3494
3490 spin_lock(&oo->oo_owner.so_client->cl_lock); 3495 spin_lock(&oo->oo_owner.so_client->cl_lock);
3491 spin_lock(&fp->fi_lock); 3496 spin_lock(&fp->fi_lock);
@@ -3493,6 +3498,8 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
3493 retstp = nfsd4_find_existing_open(fp, open); 3498 retstp = nfsd4_find_existing_open(fp, open);
3494 if (retstp) 3499 if (retstp)
3495 goto out_unlock; 3500 goto out_unlock;
3501
3502 open->op_stp = NULL;
3496 atomic_inc(&stp->st_stid.sc_count); 3503 atomic_inc(&stp->st_stid.sc_count);
3497 stp->st_stid.sc_type = NFS4_OPEN_STID; 3504 stp->st_stid.sc_type = NFS4_OPEN_STID;
3498 INIT_LIST_HEAD(&stp->st_locks); 3505 INIT_LIST_HEAD(&stp->st_locks);
@@ -3502,14 +3509,19 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
3502 stp->st_access_bmap = 0; 3509 stp->st_access_bmap = 0;
3503 stp->st_deny_bmap = 0; 3510 stp->st_deny_bmap = 0;
3504 stp->st_openstp = NULL; 3511 stp->st_openstp = NULL;
3505 init_rwsem(&stp->st_rwsem);
3506 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); 3512 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
3507 list_add(&stp->st_perfile, &fp->fi_stateids); 3513 list_add(&stp->st_perfile, &fp->fi_stateids);
3508 3514
3509out_unlock: 3515out_unlock:
3510 spin_unlock(&fp->fi_lock); 3516 spin_unlock(&fp->fi_lock);
3511 spin_unlock(&oo->oo_owner.so_client->cl_lock); 3517 spin_unlock(&oo->oo_owner.so_client->cl_lock);
3512 return retstp; 3518 if (retstp) {
3519 mutex_lock(&retstp->st_mutex);
3520 /* To keep mutex tracking happy */
3521 mutex_unlock(&stp->st_mutex);
3522 stp = retstp;
3523 }
3524 return stp;
3513} 3525}
3514 3526
3515/* 3527/*
@@ -4305,7 +4317,6 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
4305 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; 4317 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
4306 struct nfs4_file *fp = NULL; 4318 struct nfs4_file *fp = NULL;
4307 struct nfs4_ol_stateid *stp = NULL; 4319 struct nfs4_ol_stateid *stp = NULL;
4308 struct nfs4_ol_stateid *swapstp = NULL;
4309 struct nfs4_delegation *dp = NULL; 4320 struct nfs4_delegation *dp = NULL;
4310 __be32 status; 4321 __be32 status;
4311 4322
@@ -4335,32 +4346,28 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
4335 */ 4346 */
4336 if (stp) { 4347 if (stp) {
4337 /* Stateid was found, this is an OPEN upgrade */ 4348 /* Stateid was found, this is an OPEN upgrade */
4338 down_read(&stp->st_rwsem); 4349 mutex_lock(&stp->st_mutex);
4339 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); 4350 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
4340 if (status) { 4351 if (status) {
4341 up_read(&stp->st_rwsem); 4352 mutex_unlock(&stp->st_mutex);
4342 goto out; 4353 goto out;
4343 } 4354 }
4344 } else { 4355 } else {
4345 stp = open->op_stp; 4356 /* stp is returned locked. */
4346 open->op_stp = NULL; 4357 stp = init_open_stateid(fp, open);
4347 swapstp = init_open_stateid(stp, fp, open); 4358 /* See if we lost the race to some other thread */
4348 if (swapstp) { 4359 if (stp->st_access_bmap != 0) {
4349 nfs4_put_stid(&stp->st_stid);
4350 stp = swapstp;
4351 down_read(&stp->st_rwsem);
4352 status = nfs4_upgrade_open(rqstp, fp, current_fh, 4360 status = nfs4_upgrade_open(rqstp, fp, current_fh,
4353 stp, open); 4361 stp, open);
4354 if (status) { 4362 if (status) {
4355 up_read(&stp->st_rwsem); 4363 mutex_unlock(&stp->st_mutex);
4356 goto out; 4364 goto out;
4357 } 4365 }
4358 goto upgrade_out; 4366 goto upgrade_out;
4359 } 4367 }
4360 down_read(&stp->st_rwsem);
4361 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open); 4368 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
4362 if (status) { 4369 if (status) {
4363 up_read(&stp->st_rwsem); 4370 mutex_unlock(&stp->st_mutex);
4364 release_open_stateid(stp); 4371 release_open_stateid(stp);
4365 goto out; 4372 goto out;
4366 } 4373 }
@@ -4372,7 +4379,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
4372 } 4379 }
4373upgrade_out: 4380upgrade_out:
4374 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid); 4381 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
4375 up_read(&stp->st_rwsem); 4382 mutex_unlock(&stp->st_mutex);
4376 4383
4377 if (nfsd4_has_session(&resp->cstate)) { 4384 if (nfsd4_has_session(&resp->cstate)) {
4378 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) { 4385 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
@@ -4977,12 +4984,12 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
4977 * revoked delegations are kept only for free_stateid. 4984 * revoked delegations are kept only for free_stateid.
4978 */ 4985 */
4979 return nfserr_bad_stateid; 4986 return nfserr_bad_stateid;
4980 down_write(&stp->st_rwsem); 4987 mutex_lock(&stp->st_mutex);
4981 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); 4988 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
4982 if (status == nfs_ok) 4989 if (status == nfs_ok)
4983 status = nfs4_check_fh(current_fh, &stp->st_stid); 4990 status = nfs4_check_fh(current_fh, &stp->st_stid);
4984 if (status != nfs_ok) 4991 if (status != nfs_ok)
4985 up_write(&stp->st_rwsem); 4992 mutex_unlock(&stp->st_mutex);
4986 return status; 4993 return status;
4987} 4994}
4988 4995
@@ -5030,7 +5037,7 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
5030 return status; 5037 return status;
5031 oo = openowner(stp->st_stateowner); 5038 oo = openowner(stp->st_stateowner);
5032 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { 5039 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
5033 up_write(&stp->st_rwsem); 5040 mutex_unlock(&stp->st_mutex);
5034 nfs4_put_stid(&stp->st_stid); 5041 nfs4_put_stid(&stp->st_stid);
5035 return nfserr_bad_stateid; 5042 return nfserr_bad_stateid;
5036 } 5043 }
@@ -5062,12 +5069,12 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5062 oo = openowner(stp->st_stateowner); 5069 oo = openowner(stp->st_stateowner);
5063 status = nfserr_bad_stateid; 5070 status = nfserr_bad_stateid;
5064 if (oo->oo_flags & NFS4_OO_CONFIRMED) { 5071 if (oo->oo_flags & NFS4_OO_CONFIRMED) {
5065 up_write(&stp->st_rwsem); 5072 mutex_unlock(&stp->st_mutex);
5066 goto put_stateid; 5073 goto put_stateid;
5067 } 5074 }
5068 oo->oo_flags |= NFS4_OO_CONFIRMED; 5075 oo->oo_flags |= NFS4_OO_CONFIRMED;
5069 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid); 5076 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
5070 up_write(&stp->st_rwsem); 5077 mutex_unlock(&stp->st_mutex);
5071 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n", 5078 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
5072 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid)); 5079 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
5073 5080
@@ -5143,7 +5150,7 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
5143 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid); 5150 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
5144 status = nfs_ok; 5151 status = nfs_ok;
5145put_stateid: 5152put_stateid:
5146 up_write(&stp->st_rwsem); 5153 mutex_unlock(&stp->st_mutex);
5147 nfs4_put_stid(&stp->st_stid); 5154 nfs4_put_stid(&stp->st_stid);
5148out: 5155out:
5149 nfsd4_bump_seqid(cstate, status); 5156 nfsd4_bump_seqid(cstate, status);
@@ -5196,7 +5203,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5196 if (status) 5203 if (status)
5197 goto out; 5204 goto out;
5198 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid); 5205 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
5199 up_write(&stp->st_rwsem); 5206 mutex_unlock(&stp->st_mutex);
5200 5207
5201 nfsd4_close_open_stateid(stp); 5208 nfsd4_close_open_stateid(stp);
5202 5209
@@ -5422,7 +5429,7 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
5422 stp->st_access_bmap = 0; 5429 stp->st_access_bmap = 0;
5423 stp->st_deny_bmap = open_stp->st_deny_bmap; 5430 stp->st_deny_bmap = open_stp->st_deny_bmap;
5424 stp->st_openstp = open_stp; 5431 stp->st_openstp = open_stp;
5425 init_rwsem(&stp->st_rwsem); 5432 mutex_init(&stp->st_mutex);
5426 list_add(&stp->st_locks, &open_stp->st_locks); 5433 list_add(&stp->st_locks, &open_stp->st_locks);
5427 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); 5434 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
5428 spin_lock(&fp->fi_lock); 5435 spin_lock(&fp->fi_lock);
@@ -5591,7 +5598,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5591 &open_stp, nn); 5598 &open_stp, nn);
5592 if (status) 5599 if (status)
5593 goto out; 5600 goto out;
5594 up_write(&open_stp->st_rwsem); 5601 mutex_unlock(&open_stp->st_mutex);
5595 open_sop = openowner(open_stp->st_stateowner); 5602 open_sop = openowner(open_stp->st_stateowner);
5596 status = nfserr_bad_stateid; 5603 status = nfserr_bad_stateid;
5597 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid, 5604 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
@@ -5600,7 +5607,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5600 status = lookup_or_create_lock_state(cstate, open_stp, lock, 5607 status = lookup_or_create_lock_state(cstate, open_stp, lock,
5601 &lock_stp, &new); 5608 &lock_stp, &new);
5602 if (status == nfs_ok) 5609 if (status == nfs_ok)
5603 down_write(&lock_stp->st_rwsem); 5610 mutex_lock(&lock_stp->st_mutex);
5604 } else { 5611 } else {
5605 status = nfs4_preprocess_seqid_op(cstate, 5612 status = nfs4_preprocess_seqid_op(cstate,
5606 lock->lk_old_lock_seqid, 5613 lock->lk_old_lock_seqid,
@@ -5704,7 +5711,7 @@ out:
5704 seqid_mutating_err(ntohl(status))) 5711 seqid_mutating_err(ntohl(status)))
5705 lock_sop->lo_owner.so_seqid++; 5712 lock_sop->lo_owner.so_seqid++;
5706 5713
5707 up_write(&lock_stp->st_rwsem); 5714 mutex_unlock(&lock_stp->st_mutex);
5708 5715
5709 /* 5716 /*
5710 * If this is a new, never-before-used stateid, and we are 5717 * If this is a new, never-before-used stateid, and we are
@@ -5874,7 +5881,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5874fput: 5881fput:
5875 fput(filp); 5882 fput(filp);
5876put_stateid: 5883put_stateid:
5877 up_write(&stp->st_rwsem); 5884 mutex_unlock(&stp->st_mutex);
5878 nfs4_put_stid(&stp->st_stid); 5885 nfs4_put_stid(&stp->st_stid);
5879out: 5886out:
5880 nfsd4_bump_seqid(cstate, status); 5887 nfsd4_bump_seqid(cstate, status);
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 986e51e5ceac..64053eadeb81 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -535,7 +535,7 @@ struct nfs4_ol_stateid {
535 unsigned char st_access_bmap; 535 unsigned char st_access_bmap;
536 unsigned char st_deny_bmap; 536 unsigned char st_deny_bmap;
537 struct nfs4_ol_stateid *st_openstp; 537 struct nfs4_ol_stateid *st_openstp;
538 struct rw_semaphore st_rwsem; 538 struct mutex st_mutex;
539}; 539};
540 540
541static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s) 541static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 809bd2de7ad0..e9fd241b9a0a 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -439,7 +439,7 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp)
439 if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC) 439 if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC)
440 return 0; 440 return 0;
441 bytes = le16_to_cpu(sbp->s_bytes); 441 bytes = le16_to_cpu(sbp->s_bytes);
442 if (bytes > BLOCK_SIZE) 442 if (bytes < sumoff + 4 || bytes > BLOCK_SIZE)
443 return 0; 443 return 0;
444 crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp, 444 crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp,
445 sumoff); 445 sumoff);
diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile
index e27e6527912b..4342c7ee7d20 100644
--- a/fs/ocfs2/Makefile
+++ b/fs/ocfs2/Makefile
@@ -1,7 +1,5 @@
1ccflags-y := -Ifs/ocfs2 1ccflags-y := -Ifs/ocfs2
2 2
3ccflags-y += -DCATCH_BH_JBD_RACES
4
5obj-$(CONFIG_OCFS2_FS) += \ 3obj-$(CONFIG_OCFS2_FS) += \
6 ocfs2.o \ 4 ocfs2.o \
7 ocfs2_stackglue.o 5 ocfs2_stackglue.o
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index fe50ded1b4ce..498641eed2db 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -139,11 +139,16 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
139 139
140 lock_buffer(bh); 140 lock_buffer(bh);
141 if (buffer_jbd(bh)) { 141 if (buffer_jbd(bh)) {
142#ifdef CATCH_BH_JBD_RACES
142 mlog(ML_ERROR, 143 mlog(ML_ERROR,
143 "block %llu had the JBD bit set " 144 "block %llu had the JBD bit set "
144 "while I was in lock_buffer!", 145 "while I was in lock_buffer!",
145 (unsigned long long)bh->b_blocknr); 146 (unsigned long long)bh->b_blocknr);
146 BUG(); 147 BUG();
148#else
149 unlock_buffer(bh);
150 continue;
151#endif
147 } 152 }
148 153
149 clear_buffer_uptodate(bh); 154 clear_buffer_uptodate(bh);
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 22f0253a3567..c2a6b0894022 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -405,12 +405,21 @@ static int ovl_create_or_link(struct dentry *dentry, int mode, dev_t rdev,
405 err = ovl_create_upper(dentry, inode, &stat, link, hardlink); 405 err = ovl_create_upper(dentry, inode, &stat, link, hardlink);
406 } else { 406 } else {
407 const struct cred *old_cred; 407 const struct cred *old_cred;
408 struct cred *override_cred;
408 409
409 old_cred = ovl_override_creds(dentry->d_sb); 410 old_cred = ovl_override_creds(dentry->d_sb);
410 411
411 err = ovl_create_over_whiteout(dentry, inode, &stat, link, 412 err = -ENOMEM;
412 hardlink); 413 override_cred = prepare_creds();
414 if (override_cred) {
415 override_cred->fsuid = old_cred->fsuid;
416 override_cred->fsgid = old_cred->fsgid;
417 put_cred(override_creds(override_cred));
418 put_cred(override_cred);
413 419
420 err = ovl_create_over_whiteout(dentry, inode, &stat,
421 link, hardlink);
422 }
414 revert_creds(old_cred); 423 revert_creds(old_cred);
415 } 424 }
416 425
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 0ed7c4012437..c831c2e5f803 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -59,16 +59,37 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
59 if (err) 59 if (err)
60 goto out; 60 goto out;
61 61
62 if (attr->ia_valid & ATTR_SIZE) {
63 struct inode *realinode = d_inode(ovl_dentry_real(dentry));
64
65 err = -ETXTBSY;
66 if (atomic_read(&realinode->i_writecount) < 0)
67 goto out_drop_write;
68 }
69
62 err = ovl_copy_up(dentry); 70 err = ovl_copy_up(dentry);
63 if (!err) { 71 if (!err) {
72 struct inode *winode = NULL;
73
64 upperdentry = ovl_dentry_upper(dentry); 74 upperdentry = ovl_dentry_upper(dentry);
65 75
76 if (attr->ia_valid & ATTR_SIZE) {
77 winode = d_inode(upperdentry);
78 err = get_write_access(winode);
79 if (err)
80 goto out_drop_write;
81 }
82
66 inode_lock(upperdentry->d_inode); 83 inode_lock(upperdentry->d_inode);
67 err = notify_change(upperdentry, attr, NULL); 84 err = notify_change(upperdentry, attr, NULL);
68 if (!err) 85 if (!err)
69 ovl_copyattr(upperdentry->d_inode, dentry->d_inode); 86 ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
70 inode_unlock(upperdentry->d_inode); 87 inode_unlock(upperdentry->d_inode);
88
89 if (winode)
90 put_write_access(winode);
71 } 91 }
92out_drop_write:
72 ovl_drop_write(dentry); 93 ovl_drop_write(dentry);
73out: 94out:
74 return err; 95 return err;
@@ -121,16 +142,18 @@ int ovl_permission(struct inode *inode, int mask)
121 142
122 err = vfs_getattr(&realpath, &stat); 143 err = vfs_getattr(&realpath, &stat);
123 if (err) 144 if (err)
124 return err; 145 goto out_dput;
125 146
147 err = -ESTALE;
126 if ((stat.mode ^ inode->i_mode) & S_IFMT) 148 if ((stat.mode ^ inode->i_mode) & S_IFMT)
127 return -ESTALE; 149 goto out_dput;
128 150
129 inode->i_mode = stat.mode; 151 inode->i_mode = stat.mode;
130 inode->i_uid = stat.uid; 152 inode->i_uid = stat.uid;
131 inode->i_gid = stat.gid; 153 inode->i_gid = stat.gid;
132 154
133 return generic_permission(inode, mask); 155 err = generic_permission(inode, mask);
156 goto out_dput;
134 } 157 }
135 158
136 /* Careful in RCU walk mode */ 159 /* Careful in RCU walk mode */
@@ -238,41 +261,27 @@ out:
238 return err; 261 return err;
239} 262}
240 263
241static bool ovl_need_xattr_filter(struct dentry *dentry,
242 enum ovl_path_type type)
243{
244 if ((type & (__OVL_PATH_PURE | __OVL_PATH_UPPER)) == __OVL_PATH_UPPER)
245 return S_ISDIR(dentry->d_inode->i_mode);
246 else
247 return false;
248}
249
250ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode, 264ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode,
251 const char *name, void *value, size_t size) 265 const char *name, void *value, size_t size)
252{ 266{
253 struct path realpath; 267 struct dentry *realdentry = ovl_dentry_real(dentry);
254 enum ovl_path_type type = ovl_path_real(dentry, &realpath);
255 268
256 if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name)) 269 if (ovl_is_private_xattr(name))
257 return -ENODATA; 270 return -ENODATA;
258 271
259 return vfs_getxattr(realpath.dentry, name, value, size); 272 return vfs_getxattr(realdentry, name, value, size);
260} 273}
261 274
262ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) 275ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
263{ 276{
264 struct path realpath; 277 struct dentry *realdentry = ovl_dentry_real(dentry);
265 enum ovl_path_type type = ovl_path_real(dentry, &realpath);
266 ssize_t res; 278 ssize_t res;
267 int off; 279 int off;
268 280
269 res = vfs_listxattr(realpath.dentry, list, size); 281 res = vfs_listxattr(realdentry, list, size);
270 if (res <= 0 || size == 0) 282 if (res <= 0 || size == 0)
271 return res; 283 return res;
272 284
273 if (!ovl_need_xattr_filter(dentry, type))
274 return res;
275
276 /* filter out private xattrs */ 285 /* filter out private xattrs */
277 for (off = 0; off < res;) { 286 for (off = 0; off < res;) {
278 char *s = list + off; 287 char *s = list + off;
@@ -302,7 +311,7 @@ int ovl_removexattr(struct dentry *dentry, const char *name)
302 goto out; 311 goto out;
303 312
304 err = -ENODATA; 313 err = -ENODATA;
305 if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name)) 314 if (ovl_is_private_xattr(name))
306 goto out_drop_write; 315 goto out_drop_write;
307 316
308 if (!OVL_TYPE_UPPER(type)) { 317 if (!OVL_TYPE_UPPER(type)) {
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index ce02f46029da..9a7693d5f8ff 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -1082,11 +1082,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
1082 if (err < 0) 1082 if (err < 0)
1083 goto out_put_workdir; 1083 goto out_put_workdir;
1084 1084
1085 if (!err) { 1085 /*
1086 pr_err("overlayfs: upper fs needs to support d_type.\n"); 1086 * We allowed this configuration and don't want to
1087 err = -EINVAL; 1087 * break users over kernel upgrade. So warn instead
1088 goto out_put_workdir; 1088 * of erroring out.
1089 } 1089 */
1090 if (!err)
1091 pr_warn("overlayfs: upper fs needs to support d_type.\n");
1090 } 1092 }
1091 } 1093 }
1092 1094
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 8a4a266beff3..edc452c2a563 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -820,39 +820,43 @@ posix_acl_xattr_get(const struct xattr_handler *handler,
820 return error; 820 return error;
821} 821}
822 822
823static int 823int
824posix_acl_xattr_set(const struct xattr_handler *handler, 824set_posix_acl(struct inode *inode, int type, struct posix_acl *acl)
825 struct dentry *unused, struct inode *inode,
826 const char *name, const void *value,
827 size_t size, int flags)
828{ 825{
829 struct posix_acl *acl = NULL;
830 int ret;
831
832 if (!IS_POSIXACL(inode)) 826 if (!IS_POSIXACL(inode))
833 return -EOPNOTSUPP; 827 return -EOPNOTSUPP;
834 if (!inode->i_op->set_acl) 828 if (!inode->i_op->set_acl)
835 return -EOPNOTSUPP; 829 return -EOPNOTSUPP;
836 830
837 if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode)) 831 if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
838 return value ? -EACCES : 0; 832 return acl ? -EACCES : 0;
839 if (!inode_owner_or_capable(inode)) 833 if (!inode_owner_or_capable(inode))
840 return -EPERM; 834 return -EPERM;
841 835
836 if (acl) {
837 int ret = posix_acl_valid(acl);
838 if (ret)
839 return ret;
840 }
841 return inode->i_op->set_acl(inode, acl, type);
842}
843EXPORT_SYMBOL(set_posix_acl);
844
845static int
846posix_acl_xattr_set(const struct xattr_handler *handler,
847 struct dentry *unused, struct inode *inode,
848 const char *name, const void *value,
849 size_t size, int flags)
850{
851 struct posix_acl *acl = NULL;
852 int ret;
853
842 if (value) { 854 if (value) {
843 acl = posix_acl_from_xattr(&init_user_ns, value, size); 855 acl = posix_acl_from_xattr(&init_user_ns, value, size);
844 if (IS_ERR(acl)) 856 if (IS_ERR(acl))
845 return PTR_ERR(acl); 857 return PTR_ERR(acl);
846
847 if (acl) {
848 ret = posix_acl_valid(acl);
849 if (ret)
850 goto out;
851 }
852 } 858 }
853 859 ret = set_posix_acl(inode, handler->flags, acl);
854 ret = inode->i_op->set_acl(inode, acl, handler->flags);
855out:
856 posix_acl_release(acl); 860 posix_acl_release(acl);
857 return ret; 861 return ret;
858} 862}
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 55bc7d6c8aac..06702783bf40 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -121,6 +121,13 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
121 if (IS_ERR(sb)) 121 if (IS_ERR(sb))
122 return ERR_CAST(sb); 122 return ERR_CAST(sb);
123 123
124 /*
125 * procfs isn't actually a stacking filesystem; however, there is
126 * too much magic going on inside it to permit stacking things on
127 * top of it
128 */
129 sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
130
124 if (!proc_parse_options(options, ns)) { 131 if (!proc_parse_options(options, ns)) {
125 deactivate_locked_super(sb); 132 deactivate_locked_super(sb);
126 return ERR_PTR(-EINVAL); 133 return ERR_PTR(-EINVAL);
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index b8f2d1e8c645..c72c16c5a60f 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -1393,7 +1393,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1393 unsigned long safe_mask = 0; 1393 unsigned long safe_mask = 0;
1394 unsigned int commit_max_age = (unsigned int)-1; 1394 unsigned int commit_max_age = (unsigned int)-1;
1395 struct reiserfs_journal *journal = SB_JOURNAL(s); 1395 struct reiserfs_journal *journal = SB_JOURNAL(s);
1396 char *new_opts = kstrdup(arg, GFP_KERNEL); 1396 char *new_opts;
1397 int err; 1397 int err;
1398 char *qf_names[REISERFS_MAXQUOTAS]; 1398 char *qf_names[REISERFS_MAXQUOTAS];
1399 unsigned int qfmt = 0; 1399 unsigned int qfmt = 0;
@@ -1401,6 +1401,10 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1401 int i; 1401 int i;
1402#endif 1402#endif
1403 1403
1404 new_opts = kstrdup(arg, GFP_KERNEL);
1405 if (arg && !new_opts)
1406 return -ENOMEM;
1407
1404 sync_filesystem(s); 1408 sync_filesystem(s);
1405 reiserfs_write_lock(s); 1409 reiserfs_write_lock(s);
1406 1410
@@ -1546,7 +1550,8 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1546 } 1550 }
1547 1551
1548out_ok_unlocked: 1552out_ok_unlocked:
1549 replace_mount_options(s, new_opts); 1553 if (new_opts)
1554 replace_mount_options(s, new_opts);
1550 return 0; 1555 return 0;
1551 1556
1552out_err_unlock: 1557out_err_unlock:
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 08316972ff93..7bbf420d1289 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -52,6 +52,7 @@
52#include "ubifs.h" 52#include "ubifs.h"
53#include <linux/mount.h> 53#include <linux/mount.h>
54#include <linux/slab.h> 54#include <linux/slab.h>
55#include <linux/migrate.h>
55 56
56static int read_block(struct inode *inode, void *addr, unsigned int block, 57static int read_block(struct inode *inode, void *addr, unsigned int block,
57 struct ubifs_data_node *dn) 58 struct ubifs_data_node *dn)
@@ -1452,6 +1453,26 @@ static int ubifs_set_page_dirty(struct page *page)
1452 return ret; 1453 return ret;
1453} 1454}
1454 1455
1456#ifdef CONFIG_MIGRATION
1457static int ubifs_migrate_page(struct address_space *mapping,
1458 struct page *newpage, struct page *page, enum migrate_mode mode)
1459{
1460 int rc;
1461
1462 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
1463 if (rc != MIGRATEPAGE_SUCCESS)
1464 return rc;
1465
1466 if (PagePrivate(page)) {
1467 ClearPagePrivate(page);
1468 SetPagePrivate(newpage);
1469 }
1470
1471 migrate_page_copy(newpage, page);
1472 return MIGRATEPAGE_SUCCESS;
1473}
1474#endif
1475
1455static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags) 1476static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
1456{ 1477{
1457 /* 1478 /*
@@ -1591,6 +1612,9 @@ const struct address_space_operations ubifs_file_address_operations = {
1591 .write_end = ubifs_write_end, 1612 .write_end = ubifs_write_end,
1592 .invalidatepage = ubifs_invalidatepage, 1613 .invalidatepage = ubifs_invalidatepage,
1593 .set_page_dirty = ubifs_set_page_dirty, 1614 .set_page_dirty = ubifs_set_page_dirty,
1615#ifdef CONFIG_MIGRATION
1616 .migratepage = ubifs_migrate_page,
1617#endif
1594 .releasepage = ubifs_releasepage, 1618 .releasepage = ubifs_releasepage,
1595}; 1619};
1596 1620
diff --git a/fs/udf/partition.c b/fs/udf/partition.c
index 5f861ed287c3..888c364b2fe9 100644
--- a/fs/udf/partition.c
+++ b/fs/udf/partition.c
@@ -295,7 +295,8 @@ static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block,
295 map = &UDF_SB(sb)->s_partmaps[partition]; 295 map = &UDF_SB(sb)->s_partmaps[partition];
296 /* map to sparable/physical partition desc */ 296 /* map to sparable/physical partition desc */
297 phyblock = udf_get_pblock(sb, eloc.logicalBlockNum, 297 phyblock = udf_get_pblock(sb, eloc.logicalBlockNum,
298 map->s_partition_num, ext_offset + offset); 298 map->s_type_specific.s_metadata.s_phys_partition_ref,
299 ext_offset + offset);
299 } 300 }
300 301
301 brelse(epos.bh); 302 brelse(epos.bh);
@@ -317,14 +318,18 @@ uint32_t udf_get_pblock_meta25(struct super_block *sb, uint32_t block,
317 mdata = &map->s_type_specific.s_metadata; 318 mdata = &map->s_type_specific.s_metadata;
318 inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe; 319 inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe;
319 320
320 /* We shouldn't mount such media... */ 321 if (!inode)
321 BUG_ON(!inode); 322 return 0xFFFFFFFF;
323
322 retblk = udf_try_read_meta(inode, block, partition, offset); 324 retblk = udf_try_read_meta(inode, block, partition, offset);
323 if (retblk == 0xFFFFFFFF && mdata->s_metadata_fe) { 325 if (retblk == 0xFFFFFFFF && mdata->s_metadata_fe) {
324 udf_warn(sb, "error reading from METADATA, trying to read from MIRROR\n"); 326 udf_warn(sb, "error reading from METADATA, trying to read from MIRROR\n");
325 if (!(mdata->s_flags & MF_MIRROR_FE_LOADED)) { 327 if (!(mdata->s_flags & MF_MIRROR_FE_LOADED)) {
326 mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb, 328 mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb,
327 mdata->s_mirror_file_loc, map->s_partition_num); 329 mdata->s_mirror_file_loc,
330 mdata->s_phys_partition_ref);
331 if (IS_ERR(mdata->s_mirror_fe))
332 mdata->s_mirror_fe = NULL;
328 mdata->s_flags |= MF_MIRROR_FE_LOADED; 333 mdata->s_flags |= MF_MIRROR_FE_LOADED;
329 } 334 }
330 335
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 5e2c8c814e1b..4942549e7dc8 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -951,13 +951,13 @@ out2:
951} 951}
952 952
953struct inode *udf_find_metadata_inode_efe(struct super_block *sb, 953struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
954 u32 meta_file_loc, u32 partition_num) 954 u32 meta_file_loc, u32 partition_ref)
955{ 955{
956 struct kernel_lb_addr addr; 956 struct kernel_lb_addr addr;
957 struct inode *metadata_fe; 957 struct inode *metadata_fe;
958 958
959 addr.logicalBlockNum = meta_file_loc; 959 addr.logicalBlockNum = meta_file_loc;
960 addr.partitionReferenceNum = partition_num; 960 addr.partitionReferenceNum = partition_ref;
961 961
962 metadata_fe = udf_iget_special(sb, &addr); 962 metadata_fe = udf_iget_special(sb, &addr);
963 963
@@ -974,7 +974,8 @@ struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
974 return metadata_fe; 974 return metadata_fe;
975} 975}
976 976
977static int udf_load_metadata_files(struct super_block *sb, int partition) 977static int udf_load_metadata_files(struct super_block *sb, int partition,
978 int type1_index)
978{ 979{
979 struct udf_sb_info *sbi = UDF_SB(sb); 980 struct udf_sb_info *sbi = UDF_SB(sb);
980 struct udf_part_map *map; 981 struct udf_part_map *map;
@@ -984,20 +985,21 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
984 985
985 map = &sbi->s_partmaps[partition]; 986 map = &sbi->s_partmaps[partition];
986 mdata = &map->s_type_specific.s_metadata; 987 mdata = &map->s_type_specific.s_metadata;
988 mdata->s_phys_partition_ref = type1_index;
987 989
988 /* metadata address */ 990 /* metadata address */
989 udf_debug("Metadata file location: block = %d part = %d\n", 991 udf_debug("Metadata file location: block = %d part = %d\n",
990 mdata->s_meta_file_loc, map->s_partition_num); 992 mdata->s_meta_file_loc, mdata->s_phys_partition_ref);
991 993
992 fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc, 994 fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc,
993 map->s_partition_num); 995 mdata->s_phys_partition_ref);
994 if (IS_ERR(fe)) { 996 if (IS_ERR(fe)) {
995 /* mirror file entry */ 997 /* mirror file entry */
996 udf_debug("Mirror metadata file location: block = %d part = %d\n", 998 udf_debug("Mirror metadata file location: block = %d part = %d\n",
997 mdata->s_mirror_file_loc, map->s_partition_num); 999 mdata->s_mirror_file_loc, mdata->s_phys_partition_ref);
998 1000
999 fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc, 1001 fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc,
1000 map->s_partition_num); 1002 mdata->s_phys_partition_ref);
1001 1003
1002 if (IS_ERR(fe)) { 1004 if (IS_ERR(fe)) {
1003 udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n"); 1005 udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
@@ -1015,7 +1017,7 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
1015 */ 1017 */
1016 if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) { 1018 if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) {
1017 addr.logicalBlockNum = mdata->s_bitmap_file_loc; 1019 addr.logicalBlockNum = mdata->s_bitmap_file_loc;
1018 addr.partitionReferenceNum = map->s_partition_num; 1020 addr.partitionReferenceNum = mdata->s_phys_partition_ref;
1019 1021
1020 udf_debug("Bitmap file location: block = %d part = %d\n", 1022 udf_debug("Bitmap file location: block = %d part = %d\n",
1021 addr.logicalBlockNum, addr.partitionReferenceNum); 1023 addr.logicalBlockNum, addr.partitionReferenceNum);
@@ -1283,7 +1285,7 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
1283 p = (struct partitionDesc *)bh->b_data; 1285 p = (struct partitionDesc *)bh->b_data;
1284 partitionNumber = le16_to_cpu(p->partitionNumber); 1286 partitionNumber = le16_to_cpu(p->partitionNumber);
1285 1287
1286 /* First scan for TYPE1, SPARABLE and METADATA partitions */ 1288 /* First scan for TYPE1 and SPARABLE partitions */
1287 for (i = 0; i < sbi->s_partitions; i++) { 1289 for (i = 0; i < sbi->s_partitions; i++) {
1288 map = &sbi->s_partmaps[i]; 1290 map = &sbi->s_partmaps[i];
1289 udf_debug("Searching map: (%d == %d)\n", 1291 udf_debug("Searching map: (%d == %d)\n",
@@ -1333,7 +1335,7 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
1333 goto out_bh; 1335 goto out_bh;
1334 1336
1335 if (map->s_partition_type == UDF_METADATA_MAP25) { 1337 if (map->s_partition_type == UDF_METADATA_MAP25) {
1336 ret = udf_load_metadata_files(sb, i); 1338 ret = udf_load_metadata_files(sb, i, type1_idx);
1337 if (ret < 0) { 1339 if (ret < 0) {
1338 udf_err(sb, "error loading MetaData partition map %d\n", 1340 udf_err(sb, "error loading MetaData partition map %d\n",
1339 i); 1341 i);
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index 27b5335730c9..c13875d669c0 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -61,6 +61,11 @@ struct udf_meta_data {
61 __u32 s_bitmap_file_loc; 61 __u32 s_bitmap_file_loc;
62 __u32 s_alloc_unit_size; 62 __u32 s_alloc_unit_size;
63 __u16 s_align_unit_size; 63 __u16 s_align_unit_size;
64 /*
65 * Partition Reference Number of the associated physical / sparable
66 * partition
67 */
68 __u16 s_phys_partition_ref;
64 int s_flags; 69 int s_flags;
65 struct inode *s_metadata_fe; 70 struct inode *s_metadata_fe;
66 struct inode *s_mirror_fe; 71 struct inode *s_mirror_fe;
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 6bd05700d8c9..05f05f17a7c2 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -22,37 +22,33 @@
22#include <asm-generic/qspinlock_types.h> 22#include <asm-generic/qspinlock_types.h>
23 23
24/** 24/**
25 * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
26 * @lock : Pointer to queued spinlock structure
27 *
28 * There is a very slight possibility of live-lock if the lockers keep coming
29 * and the waiter is just unfortunate enough to not see any unlock state.
30 */
31#ifndef queued_spin_unlock_wait
32extern void queued_spin_unlock_wait(struct qspinlock *lock);
33#endif
34
35/**
25 * queued_spin_is_locked - is the spinlock locked? 36 * queued_spin_is_locked - is the spinlock locked?
26 * @lock: Pointer to queued spinlock structure 37 * @lock: Pointer to queued spinlock structure
27 * Return: 1 if it is locked, 0 otherwise 38 * Return: 1 if it is locked, 0 otherwise
28 */ 39 */
40#ifndef queued_spin_is_locked
29static __always_inline int queued_spin_is_locked(struct qspinlock *lock) 41static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
30{ 42{
31 /* 43 /*
32 * queued_spin_lock_slowpath() can ACQUIRE the lock before 44 * See queued_spin_unlock_wait().
33 * issuing the unordered store that sets _Q_LOCKED_VAL.
34 *
35 * See both smp_cond_acquire() sites for more detail.
36 *
37 * This however means that in code like:
38 *
39 * spin_lock(A) spin_lock(B)
40 * spin_unlock_wait(B) spin_is_locked(A)
41 * do_something() do_something()
42 *
43 * Both CPUs can end up running do_something() because the store
44 * setting _Q_LOCKED_VAL will pass through the loads in
45 * spin_unlock_wait() and/or spin_is_locked().
46 * 45 *
47 * Avoid this by issuing a full memory barrier between the spin_lock() 46 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
48 * and the loads in spin_unlock_wait() and spin_is_locked(). 47 * isn't immediately observable.
49 *
50 * Note that regular mutual exclusion doesn't care about this
51 * delayed store.
52 */ 48 */
53 smp_mb(); 49 return atomic_read(&lock->val);
54 return atomic_read(&lock->val) & _Q_LOCKED_MASK;
55} 50}
51#endif
56 52
57/** 53/**
58 * queued_spin_value_unlocked - is the spinlock structure unlocked? 54 * queued_spin_value_unlocked - is the spinlock structure unlocked?
@@ -122,21 +118,6 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
122} 118}
123#endif 119#endif
124 120
125/**
126 * queued_spin_unlock_wait - wait until current lock holder releases the lock
127 * @lock : Pointer to queued spinlock structure
128 *
129 * There is a very slight possibility of live-lock if the lockers keep coming
130 * and the waiter is just unfortunate enough to not see any unlock state.
131 */
132static inline void queued_spin_unlock_wait(struct qspinlock *lock)
133{
134 /* See queued_spin_is_locked() */
135 smp_mb();
136 while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
137 cpu_relax();
138}
139
140#ifndef virt_spin_lock 121#ifndef virt_spin_lock
141static __always_inline bool virt_spin_lock(struct qspinlock *lock) 122static __always_inline bool virt_spin_lock(struct qspinlock *lock)
142{ 123{
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 9094599a1150..33466bfc6440 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -309,6 +309,7 @@
309 INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \ 309 INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \
310 INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \ 310 INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \
311 INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \ 311 INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \
312 INTEL_VGA_DEVICE(0x5908, info), /* Halo GT1 */ \
312 INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \ 313 INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \
313 INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */ 314 INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */
314 315
@@ -322,15 +323,12 @@
322 INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */ 323 INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */
323 324
324#define INTEL_KBL_GT3_IDS(info) \ 325#define INTEL_KBL_GT3_IDS(info) \
326 INTEL_VGA_DEVICE(0x5923, info), /* ULT GT3 */ \
325 INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \ 327 INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \
326 INTEL_VGA_DEVICE(0x592B, info), /* Halo GT3 */ \ 328 INTEL_VGA_DEVICE(0x5927, info) /* ULT GT3 */
327 INTEL_VGA_DEVICE(0x592A, info) /* SRV GT3 */
328 329
329#define INTEL_KBL_GT4_IDS(info) \ 330#define INTEL_KBL_GT4_IDS(info) \
330 INTEL_VGA_DEVICE(0x5932, info), /* DT GT4 */ \ 331 INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
331 INTEL_VGA_DEVICE(0x593B, info), /* Halo GT4 */ \
332 INTEL_VGA_DEVICE(0x593A, info), /* SRV GT4 */ \
333 INTEL_VGA_DEVICE(0x593D, info) /* WKS GT4 */
334 332
335#define INTEL_KBL_IDS(info) \ 333#define INTEL_KBL_IDS(info) \
336 INTEL_KBL_GT1_IDS(info), \ 334 INTEL_KBL_GT1_IDS(info), \
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index fe389ac31489..92e7e97ca8ff 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -18,13 +18,13 @@
18#ifndef __ASM_ARM_KVM_PMU_H 18#ifndef __ASM_ARM_KVM_PMU_H
19#define __ASM_ARM_KVM_PMU_H 19#define __ASM_ARM_KVM_PMU_H
20 20
21#ifdef CONFIG_KVM_ARM_PMU
22
23#include <linux/perf_event.h> 21#include <linux/perf_event.h>
24#include <asm/perf_event.h> 22#include <asm/perf_event.h>
25 23
26#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) 24#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
27 25
26#ifdef CONFIG_KVM_ARM_PMU
27
28struct kvm_pmc { 28struct kvm_pmc {
29 u8 idx; /* index into the pmu->pmc array */ 29 u8 idx; /* index into the pmu->pmc array */
30 struct perf_event *perf_event; 30 struct perf_event *perf_event;
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 961a417d641e..e38e3fc13ea8 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -26,7 +26,6 @@
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <linux/ptrace.h> 27#include <linux/ptrace.h>
28#include <uapi/linux/audit.h> 28#include <uapi/linux/audit.h>
29#include <linux/tty.h>
30 29
31#define AUDIT_INO_UNSET ((unsigned long)-1) 30#define AUDIT_INO_UNSET ((unsigned long)-1)
32#define AUDIT_DEV_UNSET ((dev_t)-1) 31#define AUDIT_DEV_UNSET ((dev_t)-1)
@@ -348,23 +347,6 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
348 return tsk->sessionid; 347 return tsk->sessionid;
349} 348}
350 349
351static inline struct tty_struct *audit_get_tty(struct task_struct *tsk)
352{
353 struct tty_struct *tty = NULL;
354 unsigned long flags;
355
356 spin_lock_irqsave(&tsk->sighand->siglock, flags);
357 if (tsk->signal)
358 tty = tty_kref_get(tsk->signal->tty);
359 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
360 return tty;
361}
362
363static inline void audit_put_tty(struct tty_struct *tty)
364{
365 tty_kref_put(tty);
366}
367
368extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); 350extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
369extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode); 351extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
370extern void __audit_bprm(struct linux_binprm *bprm); 352extern void __audit_bprm(struct linux_binprm *bprm);
@@ -522,12 +504,6 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
522{ 504{
523 return -1; 505 return -1;
524} 506}
525static inline struct tty_struct *audit_get_tty(struct task_struct *tsk)
526{
527 return NULL;
528}
529static inline void audit_put_tty(struct tty_struct *tty)
530{ }
531static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) 507static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
532{ } 508{ }
533static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, 509static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid,
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 9adfef694a25..b3336b4f5d04 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -115,6 +115,31 @@ enum bpf_access_type {
115 BPF_WRITE = 2 115 BPF_WRITE = 2
116}; 116};
117 117
118/* types of values stored in eBPF registers */
119enum bpf_reg_type {
120 NOT_INIT = 0, /* nothing was written into register */
121 UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */
122 PTR_TO_CTX, /* reg points to bpf_context */
123 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
124 PTR_TO_MAP_VALUE, /* reg points to map element value */
125 PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
126 FRAME_PTR, /* reg == frame_pointer */
127 PTR_TO_STACK, /* reg == frame_pointer + imm */
128 CONST_IMM, /* constant integer value */
129
130 /* PTR_TO_PACKET represents:
131 * skb->data
132 * skb->data + imm
133 * skb->data + (u16) var
134 * skb->data + (u16) var + imm
135 * if (range > 0) then [ptr, ptr + range - off) is safe to access
136 * if (id > 0) means that some 'var' was added
137 * if (off > 0) menas that 'imm' was added
138 */
139 PTR_TO_PACKET,
140 PTR_TO_PACKET_END, /* skb->data + headlen */
141};
142
118struct bpf_prog; 143struct bpf_prog;
119 144
120struct bpf_verifier_ops { 145struct bpf_verifier_ops {
@@ -124,7 +149,8 @@ struct bpf_verifier_ops {
124 /* return true if 'size' wide access at offset 'off' within bpf_context 149 /* return true if 'size' wide access at offset 'off' within bpf_context
125 * with 'type' (read or write) is allowed 150 * with 'type' (read or write) is allowed
126 */ 151 */
127 bool (*is_valid_access)(int off, int size, enum bpf_access_type type); 152 bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
153 enum bpf_reg_type *reg_type);
128 154
129 u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg, 155 u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
130 int src_reg, int ctx_off, 156 int src_reg, int ctx_off,
@@ -192,9 +218,9 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl);
192void bpf_register_map_type(struct bpf_map_type_list *tl); 218void bpf_register_map_type(struct bpf_map_type_list *tl);
193 219
194struct bpf_prog *bpf_prog_get(u32 ufd); 220struct bpf_prog *bpf_prog_get(u32 ufd);
221struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
195struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog); 222struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
196void bpf_prog_put(struct bpf_prog *prog); 223void bpf_prog_put(struct bpf_prog *prog);
197void bpf_prog_put_rcu(struct bpf_prog *prog);
198 224
199struct bpf_map *bpf_map_get_with_uref(u32 ufd); 225struct bpf_map *bpf_map_get_with_uref(u32 ufd);
200struct bpf_map *__bpf_map_get(struct fd f); 226struct bpf_map *__bpf_map_get(struct fd f);
@@ -252,6 +278,12 @@ static inline struct bpf_prog *bpf_prog_get(u32 ufd)
252 return ERR_PTR(-EOPNOTSUPP); 278 return ERR_PTR(-EOPNOTSUPP);
253} 279}
254 280
281static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
282 enum bpf_prog_type type)
283{
284 return ERR_PTR(-EOPNOTSUPP);
285}
286
255static inline void bpf_prog_put(struct bpf_prog *prog) 287static inline void bpf_prog_put(struct bpf_prog *prog)
256{ 288{
257} 289}
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index a20320c666fd..984f73b719a9 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -87,6 +87,7 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
87 struct cgroup_subsys *ss); 87 struct cgroup_subsys *ss);
88 88
89struct cgroup *cgroup_get_from_path(const char *path); 89struct cgroup *cgroup_get_from_path(const char *path);
90struct cgroup *cgroup_get_from_fd(int fd);
90 91
91int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); 92int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
92int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); 93int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 484c8792da82..f53fa055021a 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -212,6 +212,7 @@ struct dentry_operations {
212#define DCACHE_OP_REAL 0x08000000 212#define DCACHE_OP_REAL 0x08000000
213 213
214#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */ 214#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */
215#define DCACHE_DENTRY_CURSOR 0x20000000
215 216
216extern seqlock_t rename_lock; 217extern seqlock_t rename_lock;
217 218
@@ -575,5 +576,17 @@ static inline struct inode *vfs_select_inode(struct dentry *dentry,
575 return inode; 576 return inode;
576} 577}
577 578
579/**
580 * d_real_inode - Return the real inode
581 * @dentry: The dentry to query
582 *
583 * If dentry is on an union/overlay, then return the underlying, real inode.
584 * Otherwise return d_inode().
585 */
586static inline struct inode *d_real_inode(struct dentry *dentry)
587{
588 return d_backing_inode(d_real(dentry));
589}
590
578 591
579#endif /* __LINUX_DCACHE_H */ 592#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index c2db3ca22217..f196dd0b0f2f 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1005,7 +1005,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm,
1005/* Iterate through an efi_memory_map */ 1005/* Iterate through an efi_memory_map */
1006#define for_each_efi_memory_desc_in_map(m, md) \ 1006#define for_each_efi_memory_desc_in_map(m, md) \
1007 for ((md) = (m)->map; \ 1007 for ((md) = (m)->map; \
1008 (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \ 1008 ((void *)(md) + (m)->desc_size) <= (m)->map_end; \
1009 (md) = (void *)(md) + (m)->desc_size) 1009 (md) = (void *)(md) + (m)->desc_size)
1010 1010
1011/** 1011/**
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 37ff4a6faa9a..6fec9e81bd70 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -374,6 +374,29 @@ static inline bool ether_addr_equal_unaligned(const u8 *addr1, const u8 *addr2)
374} 374}
375 375
376/** 376/**
377 * ether_addr_equal_masked - Compare two Ethernet addresses with a mask
378 * @addr1: Pointer to a six-byte array containing the 1st Ethernet address
379 * @addr2: Pointer to a six-byte array containing the 2nd Ethernet address
380 * @mask: Pointer to a six-byte array containing the Ethernet address bitmask
381 *
382 * Compare two Ethernet addresses with a mask, returns true if for every bit
383 * set in the bitmask the equivalent bits in the ethernet addresses are equal.
384 * Using a mask with all bits set is a slower ether_addr_equal.
385 */
386static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2,
387 const u8 *mask)
388{
389 int i;
390
391 for (i = 0; i < ETH_ALEN; i++) {
392 if ((addr1[i] ^ addr2[i]) & mask[i])
393 return false;
394 }
395
396 return true;
397}
398
399/**
377 * is_etherdev_addr - Tell if given Ethernet address belongs to the device. 400 * is_etherdev_addr - Tell if given Ethernet address belongs to the device.
378 * @dev: Pointer to a device structure 401 * @dev: Pointer to a device structure
379 * @addr: Pointer to a six-byte array containing the Ethernet address 402 * @addr: Pointer to a six-byte array containing the Ethernet address
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index b118744d3382..a80516fd65c8 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -19,6 +19,7 @@
19 19
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/if_ether.h> 21#include <linux/if_ether.h>
22#include <linux/etherdevice.h>
22#include <asm/byteorder.h> 23#include <asm/byteorder.h>
23#include <asm/unaligned.h> 24#include <asm/unaligned.h>
24 25
@@ -2464,7 +2465,7 @@ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
2464 */ 2465 */
2465static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb) 2466static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb)
2466{ 2467{
2467 if (skb->len < 25) 2468 if (skb->len < IEEE80211_MIN_ACTION_SIZE)
2468 return false; 2469 return false;
2469 return _ieee80211_is_robust_mgmt_frame((void *)skb->data); 2470 return _ieee80211_is_robust_mgmt_frame((void *)skb->data);
2470} 2471}
@@ -2487,6 +2488,35 @@ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
2487} 2488}
2488 2489
2489/** 2490/**
2491 * _ieee80211_is_group_privacy_action - check if frame is a group addressed
2492 * privacy action frame
2493 * @hdr: the frame
2494 */
2495static inline bool _ieee80211_is_group_privacy_action(struct ieee80211_hdr *hdr)
2496{
2497 struct ieee80211_mgmt *mgmt = (void *)hdr;
2498
2499 if (!ieee80211_is_action(hdr->frame_control) ||
2500 !is_multicast_ether_addr(hdr->addr1))
2501 return false;
2502
2503 return mgmt->u.action.category == WLAN_CATEGORY_MESH_ACTION ||
2504 mgmt->u.action.category == WLAN_CATEGORY_MULTIHOP_ACTION;
2505}
2506
2507/**
2508 * ieee80211_is_group_privacy_action - check if frame is a group addressed
2509 * privacy action frame
2510 * @skb: the skb containing the frame, length will be checked
2511 */
2512static inline bool ieee80211_is_group_privacy_action(struct sk_buff *skb)
2513{
2514 if (skb->len < IEEE80211_MIN_ACTION_SIZE)
2515 return false;
2516 return _ieee80211_is_group_privacy_action((void *)skb->data);
2517}
2518
2519/**
2490 * ieee80211_tu_to_usec - convert time units (TU) to microseconds 2520 * ieee80211_tu_to_usec - convert time units (TU) to microseconds
2491 * @tu: the TUs 2521 * @tu: the TUs
2492 */ 2522 */
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index d029ffac0d69..99403b19092f 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -223,6 +223,8 @@ struct st_sensor_settings {
223 * @get_irq_data_ready: Function to get the IRQ used for data ready signal. 223 * @get_irq_data_ready: Function to get the IRQ used for data ready signal.
224 * @tf: Transfer function structure used by I/O operations. 224 * @tf: Transfer function structure used by I/O operations.
225 * @tb: Transfer buffers and mutex used by I/O operations. 225 * @tb: Transfer buffers and mutex used by I/O operations.
226 * @hw_irq_trigger: if we're using the hardware interrupt on the sensor.
227 * @hw_timestamp: Latest timestamp from the interrupt handler, when in use.
226 */ 228 */
227struct st_sensor_data { 229struct st_sensor_data {
228 struct device *dev; 230 struct device *dev;
@@ -247,6 +249,9 @@ struct st_sensor_data {
247 249
248 const struct st_sensor_transfer_function *tf; 250 const struct st_sensor_transfer_function *tf;
249 struct st_sensor_transfer_buffer tb; 251 struct st_sensor_transfer_buffer tb;
252
253 bool hw_irq_trigger;
254 s64 hw_timestamp;
250}; 255};
251 256
252#ifdef CONFIG_IIO_BUFFER 257#ifdef CONFIG_IIO_BUFFER
@@ -260,7 +265,8 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
260 const struct iio_trigger_ops *trigger_ops); 265 const struct iio_trigger_ops *trigger_ops);
261 266
262void st_sensors_deallocate_trigger(struct iio_dev *indio_dev); 267void st_sensors_deallocate_trigger(struct iio_dev *indio_dev);
263 268int st_sensors_validate_device(struct iio_trigger *trig,
269 struct iio_dev *indio_dev);
264#else 270#else
265static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev, 271static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
266 const struct iio_trigger_ops *trigger_ops) 272 const struct iio_trigger_ops *trigger_ops)
@@ -271,6 +277,7 @@ static inline void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
271{ 277{
272 return; 278 return;
273} 279}
280#define st_sensors_validate_device NULL
274#endif 281#endif
275 282
276int st_sensors_init_sensor(struct iio_dev *indio_dev, 283int st_sensors_init_sensor(struct iio_dev *indio_dev,
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index 7c27fa1030e8..feb04ea20f11 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -52,6 +52,12 @@ struct sock *inet_diag_find_one_icsk(struct net *net,
52 52
53int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk); 53int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
54 54
55void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
56
57int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
58 struct inet_diag_msg *r, int ext,
59 struct user_namespace *user_ns);
60
55extern int inet_diag_register(const struct inet_diag_handler *handler); 61extern int inet_diag_register(const struct inet_diag_handler *handler);
56extern void inet_diag_unregister(const struct inet_diag_handler *handler); 62extern void inet_diag_unregister(const struct inet_diag_handler *handler);
57#endif /* _INET_DIAG_H_ */ 63#endif /* _INET_DIAG_H_ */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index f2cb8d45513d..f8834f820ec2 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -190,7 +190,7 @@ extern struct task_group root_task_group;
190#define INIT_TASK(tsk) \ 190#define INIT_TASK(tsk) \
191{ \ 191{ \
192 .state = 0, \ 192 .state = 0, \
193 .stack = &init_thread_info, \ 193 .stack = init_stack, \
194 .usage = ATOMIC_INIT(2), \ 194 .usage = ATOMIC_INIT(2), \
195 .flags = PF_KTHREAD, \ 195 .flags = PF_KTHREAD, \
196 .prio = MAX_PRIO-20, \ 196 .prio = MAX_PRIO-20, \
diff --git a/include/linux/isa.h b/include/linux/isa.h
index 5ab85281230b..f2d0258414cf 100644
--- a/include/linux/isa.h
+++ b/include/linux/isa.h
@@ -6,6 +6,7 @@
6#define __LINUX_ISA_H 6#define __LINUX_ISA_H
7 7
8#include <linux/device.h> 8#include <linux/device.h>
9#include <linux/errno.h>
9#include <linux/kernel.h> 10#include <linux/kernel.h>
10 11
11struct isa_driver { 12struct isa_driver {
@@ -22,13 +23,13 @@ struct isa_driver {
22 23
23#define to_isa_driver(x) container_of((x), struct isa_driver, driver) 24#define to_isa_driver(x) container_of((x), struct isa_driver, driver)
24 25
25#ifdef CONFIG_ISA 26#ifdef CONFIG_ISA_BUS_API
26int isa_register_driver(struct isa_driver *, unsigned int); 27int isa_register_driver(struct isa_driver *, unsigned int);
27void isa_unregister_driver(struct isa_driver *); 28void isa_unregister_driver(struct isa_driver *);
28#else 29#else
29static inline int isa_register_driver(struct isa_driver *d, unsigned int i) 30static inline int isa_register_driver(struct isa_driver *d, unsigned int i)
30{ 31{
31 return 0; 32 return -ENODEV;
32} 33}
33 34
34static inline void isa_unregister_driver(struct isa_driver *d) 35static inline void isa_unregister_driver(struct isa_driver *d)
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 0536524bb9eb..68904469fba1 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -117,13 +117,18 @@ struct module;
117 117
118#include <linux/atomic.h> 118#include <linux/atomic.h>
119 119
120#ifdef HAVE_JUMP_LABEL
121
120static inline int static_key_count(struct static_key *key) 122static inline int static_key_count(struct static_key *key)
121{ 123{
122 return atomic_read(&key->enabled); 124 /*
125 * -1 means the first static_key_slow_inc() is in progress.
126 * static_key_enabled() must return true, so return 1 here.
127 */
128 int n = atomic_read(&key->enabled);
129 return n >= 0 ? n : 1;
123} 130}
124 131
125#ifdef HAVE_JUMP_LABEL
126
127#define JUMP_TYPE_FALSE 0UL 132#define JUMP_TYPE_FALSE 0UL
128#define JUMP_TYPE_TRUE 1UL 133#define JUMP_TYPE_TRUE 1UL
129#define JUMP_TYPE_MASK 1UL 134#define JUMP_TYPE_MASK 1UL
@@ -162,6 +167,11 @@ extern void jump_label_apply_nops(struct module *mod);
162 167
163#else /* !HAVE_JUMP_LABEL */ 168#else /* !HAVE_JUMP_LABEL */
164 169
170static inline int static_key_count(struct static_key *key)
171{
172 return atomic_read(&key->enabled);
173}
174
165static __always_inline void jump_label_init(void) 175static __always_inline void jump_label_init(void)
166{ 176{
167 static_key_initialized = true; 177 static_key_initialized = true;
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 611927f5870d..ac4b3c46a84d 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -59,14 +59,13 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object);
59 59
60void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); 60void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
61void kasan_kfree_large(const void *ptr); 61void kasan_kfree_large(const void *ptr);
62void kasan_kfree(void *ptr); 62void kasan_poison_kfree(void *ptr);
63void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, 63void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
64 gfp_t flags); 64 gfp_t flags);
65void kasan_krealloc(const void *object, size_t new_size, gfp_t flags); 65void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
66 66
67void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); 67void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
68bool kasan_slab_free(struct kmem_cache *s, void *object); 68bool kasan_slab_free(struct kmem_cache *s, void *object);
69void kasan_poison_slab_free(struct kmem_cache *s, void *object);
70 69
71struct kasan_cache { 70struct kasan_cache {
72 int alloc_meta_offset; 71 int alloc_meta_offset;
@@ -76,6 +75,9 @@ struct kasan_cache {
76int kasan_module_alloc(void *addr, size_t size); 75int kasan_module_alloc(void *addr, size_t size);
77void kasan_free_shadow(const struct vm_struct *vm); 76void kasan_free_shadow(const struct vm_struct *vm);
78 77
78size_t ksize(const void *);
79static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
80
79#else /* CONFIG_KASAN */ 81#else /* CONFIG_KASAN */
80 82
81static inline void kasan_unpoison_shadow(const void *address, size_t size) {} 83static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
@@ -102,7 +104,7 @@ static inline void kasan_poison_object_data(struct kmem_cache *cache,
102 104
103static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} 105static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
104static inline void kasan_kfree_large(const void *ptr) {} 106static inline void kasan_kfree_large(const void *ptr) {}
105static inline void kasan_kfree(void *ptr) {} 107static inline void kasan_poison_kfree(void *ptr) {}
106static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, 108static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
107 size_t size, gfp_t flags) {} 109 size_t size, gfp_t flags) {}
108static inline void kasan_krealloc(const void *object, size_t new_size, 110static inline void kasan_krealloc(const void *object, size_t new_size,
@@ -114,11 +116,12 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
114{ 116{
115 return false; 117 return false;
116} 118}
117static inline void kasan_poison_slab_free(struct kmem_cache *s, void *object) {}
118 119
119static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } 120static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
120static inline void kasan_free_shadow(const struct vm_struct *vm) {} 121static inline void kasan_free_shadow(const struct vm_struct *vm) {}
121 122
123static inline void kasan_unpoison_slab(const void *ptr) { }
124
122#endif /* CONFIG_KASAN */ 125#endif /* CONFIG_KASAN */
123 126
124#endif /* LINUX_KASAN_H */ 127#endif /* LINUX_KASAN_H */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index d2b13066e781..e5e7f2e80a54 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -42,15 +42,16 @@ struct led_classdev {
42#define LED_UNREGISTERING (1 << 1) 42#define LED_UNREGISTERING (1 << 1)
43 /* Upper 16 bits reflect control information */ 43 /* Upper 16 bits reflect control information */
44#define LED_CORE_SUSPENDRESUME (1 << 16) 44#define LED_CORE_SUSPENDRESUME (1 << 16)
45#define LED_BLINK_ONESHOT (1 << 17) 45#define LED_BLINK_SW (1 << 17)
46#define LED_BLINK_ONESHOT_STOP (1 << 18) 46#define LED_BLINK_ONESHOT (1 << 18)
47#define LED_BLINK_INVERT (1 << 19) 47#define LED_BLINK_ONESHOT_STOP (1 << 19)
48#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 20) 48#define LED_BLINK_INVERT (1 << 20)
49#define LED_BLINK_DISABLE (1 << 21) 49#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 21)
50#define LED_SYSFS_DISABLE (1 << 22) 50#define LED_BLINK_DISABLE (1 << 22)
51#define LED_DEV_CAP_FLASH (1 << 23) 51#define LED_SYSFS_DISABLE (1 << 23)
52#define LED_HW_PLUGGABLE (1 << 24) 52#define LED_DEV_CAP_FLASH (1 << 24)
53#define LED_PANIC_INDICATOR (1 << 25) 53#define LED_HW_PLUGGABLE (1 << 25)
54#define LED_PANIC_INDICATOR (1 << 26)
54 55
55 /* Set LED brightness level 56 /* Set LED brightness level
56 * Must not sleep. Use brightness_set_blocking for drivers 57 * Must not sleep. Use brightness_set_blocking for drivers
@@ -72,8 +73,8 @@ struct led_classdev {
72 * and if both are zero then a sensible default should be chosen. 73 * and if both are zero then a sensible default should be chosen.
73 * The call should adjust the timings in that case and if it can't 74 * The call should adjust the timings in that case and if it can't
74 * match the values specified exactly. 75 * match the values specified exactly.
75 * Deactivate blinking again when the brightness is set to a fixed 76 * Deactivate blinking again when the brightness is set to LED_OFF
76 * value via the brightness_set() callback. 77 * via the brightness_set() callback.
77 */ 78 */
78 int (*blink_set)(struct led_classdev *led_cdev, 79 int (*blink_set)(struct led_classdev *led_cdev,
79 unsigned long *delay_on, 80 unsigned long *delay_on,
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
index c18a4c19d6fc..ce9230af09c2 100644
--- a/include/linux/mfd/da9052/da9052.h
+++ b/include/linux/mfd/da9052/da9052.h
@@ -171,7 +171,7 @@ static inline int da9052_group_read(struct da9052 *da9052, unsigned char reg,
171static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg, 171static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg,
172 unsigned reg_cnt, unsigned char *val) 172 unsigned reg_cnt, unsigned char *val)
173{ 173{
174 int ret; 174 int ret = 0;
175 int i; 175 int i;
176 176
177 for (i = 0; i < reg_cnt; i++) { 177 for (i = 0; i < reg_cnt; i++) {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 4dbc1450bbe0..e6f6910278f3 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -466,6 +466,7 @@ enum {
466enum { 466enum {
467 MLX4_INTERFACE_STATE_UP = 1 << 0, 467 MLX4_INTERFACE_STATE_UP = 1 << 0,
468 MLX4_INTERFACE_STATE_DELETION = 1 << 1, 468 MLX4_INTERFACE_STATE_DELETION = 1 << 1,
469 MLX4_INTERFACE_STATE_SHUTDOWN = 1 << 2,
469}; 470};
470 471
471#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ 472#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 46260fdc5305..81e8396574f4 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -550,14 +550,10 @@ struct mlx5_priv {
550 struct list_head ctx_list; 550 struct list_head ctx_list;
551 spinlock_t ctx_lock; 551 spinlock_t ctx_lock;
552 552
553 struct mlx5_flow_steering *steering;
553 struct mlx5_eswitch *eswitch; 554 struct mlx5_eswitch *eswitch;
554 struct mlx5_core_sriov sriov; 555 struct mlx5_core_sriov sriov;
555 unsigned long pci_dev_data; 556 unsigned long pci_dev_data;
556 struct mlx5_flow_root_namespace *root_ns;
557 struct mlx5_flow_root_namespace *fdb_root_ns;
558 struct mlx5_flow_root_namespace *esw_egress_root_ns;
559 struct mlx5_flow_root_namespace *esw_ingress_root_ns;
560
561 struct mlx5_fc_stats fc_stats; 557 struct mlx5_fc_stats fc_stats;
562 struct mlx5_rl_table rl_table; 558 struct mlx5_rl_table rl_table;
563}; 559};
@@ -578,6 +574,18 @@ enum mlx5_pci_status {
578 MLX5_PCI_STATUS_ENABLED, 574 MLX5_PCI_STATUS_ENABLED,
579}; 575};
580 576
577struct mlx5_td {
578 struct list_head tirs_list;
579 u32 tdn;
580};
581
582struct mlx5e_resources {
583 struct mlx5_uar cq_uar;
584 u32 pdn;
585 struct mlx5_td td;
586 struct mlx5_core_mkey mkey;
587};
588
581struct mlx5_core_dev { 589struct mlx5_core_dev {
582 struct pci_dev *pdev; 590 struct pci_dev *pdev;
583 /* sync pci state */ 591 /* sync pci state */
@@ -602,6 +610,7 @@ struct mlx5_core_dev {
602 struct mlx5_profile *profile; 610 struct mlx5_profile *profile;
603 atomic_t num_qps; 611 atomic_t num_qps;
604 u32 issi; 612 u32 issi;
613 struct mlx5e_resources mlx5e_res;
605#ifdef CONFIG_RFS_ACCEL 614#ifdef CONFIG_RFS_ACCEL
606 struct cpu_rmap *rmap; 615 struct cpu_rmap *rmap;
607#endif 616#endif
@@ -645,6 +654,7 @@ struct mlx5_cmd_work_ent {
645 void *uout; 654 void *uout;
646 int uout_size; 655 int uout_size;
647 mlx5_cmd_cbk_t callback; 656 mlx5_cmd_cbk_t callback;
657 struct delayed_work cb_timeout_work;
648 void *context; 658 void *context;
649 int idx; 659 int idx;
650 struct completion done; 660 struct completion done;
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 4b7a107d9c19..e036d6030867 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -54,6 +54,8 @@ static inline void build_leftovers_ft_param(int *priority,
54 54
55enum mlx5_flow_namespace_type { 55enum mlx5_flow_namespace_type {
56 MLX5_FLOW_NAMESPACE_BYPASS, 56 MLX5_FLOW_NAMESPACE_BYPASS,
57 MLX5_FLOW_NAMESPACE_OFFLOADS,
58 MLX5_FLOW_NAMESPACE_ETHTOOL,
57 MLX5_FLOW_NAMESPACE_KERNEL, 59 MLX5_FLOW_NAMESPACE_KERNEL,
58 MLX5_FLOW_NAMESPACE_LEFTOVERS, 60 MLX5_FLOW_NAMESPACE_LEFTOVERS,
59 MLX5_FLOW_NAMESPACE_ANCHOR, 61 MLX5_FLOW_NAMESPACE_ANCHOR,
@@ -67,6 +69,12 @@ struct mlx5_flow_group;
67struct mlx5_flow_rule; 69struct mlx5_flow_rule;
68struct mlx5_flow_namespace; 70struct mlx5_flow_namespace;
69 71
72struct mlx5_flow_spec {
73 u8 match_criteria_enable;
74 u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
75 u32 match_value[MLX5_ST_SZ_DW(fte_match_param)];
76};
77
70struct mlx5_flow_destination { 78struct mlx5_flow_destination {
71 enum mlx5_flow_destination_type type; 79 enum mlx5_flow_destination_type type;
72 union { 80 union {
@@ -115,9 +123,7 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
115 */ 123 */
116struct mlx5_flow_rule * 124struct mlx5_flow_rule *
117mlx5_add_flow_rule(struct mlx5_flow_table *ft, 125mlx5_add_flow_rule(struct mlx5_flow_table *ft,
118 u8 match_criteria_enable, 126 struct mlx5_flow_spec *spec,
119 u32 *match_criteria,
120 u32 *match_value,
121 u32 action, 127 u32 action,
122 u32 flow_tag, 128 u32 flow_tag,
123 struct mlx5_flow_destination *dest); 129 struct mlx5_flow_destination *dest);
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 266320feb160..ab310819ac36 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -172,6 +172,7 @@ enum {
172enum { 172enum {
173 MLX5_FENCE_MODE_NONE = 0 << 5, 173 MLX5_FENCE_MODE_NONE = 0 << 5,
174 MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5, 174 MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5,
175 MLX5_FENCE_MODE_FENCE = 2 << 5,
175 MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5, 176 MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5,
176 MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5, 177 MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5,
177}; 178};
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5df5feb49575..ece042dfe23c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -602,7 +602,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
602} 602}
603 603
604void do_set_pte(struct vm_area_struct *vma, unsigned long address, 604void do_set_pte(struct vm_area_struct *vma, unsigned long address,
605 struct page *page, pte_t *pte, bool write, bool anon, bool old); 605 struct page *page, pte_t *pte, bool write, bool anon);
606#endif 606#endif
607 607
608/* 608/*
diff --git a/include/linux/net.h b/include/linux/net.h
index 9aa49a05fe38..b9f0ff4d489c 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -185,6 +185,7 @@ struct proto_ops {
185 ssize_t (*splice_read)(struct socket *sock, loff_t *ppos, 185 ssize_t (*splice_read)(struct socket *sock, loff_t *ppos,
186 struct pipe_inode_info *pipe, size_t len, unsigned int flags); 186 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
187 int (*set_peek_off)(struct sock *sk, int val); 187 int (*set_peek_off)(struct sock *sk, int val);
188 int (*peek_len)(struct socket *sock);
188}; 189};
189 190
190#define DECLARE_SOCKADDR(type, dst, src) \ 191#define DECLARE_SOCKADDR(type, dst, src) \
@@ -251,7 +252,8 @@ do { \
251 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ 252 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
252 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ 253 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
253 net_ratelimit()) \ 254 net_ratelimit()) \
254 __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \ 255 __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \
256 ##__VA_ARGS__); \
255} while (0) 257} while (0)
256#elif defined(DEBUG) 258#elif defined(DEBUG)
257#define net_dbg_ratelimited(fmt, ...) \ 259#define net_dbg_ratelimited(fmt, ...) \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e84d9d23c2d5..49736a31acaa 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1209,8 +1209,10 @@ struct net_device_ops {
1209 netdev_features_t features); 1209 netdev_features_t features);
1210 int (*ndo_set_features)(struct net_device *dev, 1210 int (*ndo_set_features)(struct net_device *dev,
1211 netdev_features_t features); 1211 netdev_features_t features);
1212 int (*ndo_neigh_construct)(struct neighbour *n); 1212 int (*ndo_neigh_construct)(struct net_device *dev,
1213 void (*ndo_neigh_destroy)(struct neighbour *n); 1213 struct neighbour *n);
1214 void (*ndo_neigh_destroy)(struct net_device *dev,
1215 struct neighbour *n);
1214 1216
1215 int (*ndo_fdb_add)(struct ndmsg *ndm, 1217 int (*ndo_fdb_add)(struct ndmsg *ndm,
1216 struct nlattr *tb[], 1218 struct nlattr *tb[],
@@ -2237,6 +2239,7 @@ struct netdev_lag_lower_state_info {
2237#define NETDEV_PRECHANGEUPPER 0x001A 2239#define NETDEV_PRECHANGEUPPER 0x001A
2238#define NETDEV_CHANGELOWERSTATE 0x001B 2240#define NETDEV_CHANGELOWERSTATE 0x001B
2239#define NETDEV_UDP_TUNNEL_PUSH_INFO 0x001C 2241#define NETDEV_UDP_TUNNEL_PUSH_INFO 0x001C
2242#define NETDEV_CHANGE_TX_QUEUE_LEN 0x001E
2240 2243
2241int register_netdevice_notifier(struct notifier_block *nb); 2244int register_netdevice_notifier(struct notifier_block *nb);
2242int unregister_netdevice_notifier(struct notifier_block *nb); 2245int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -3803,12 +3806,30 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
3803 3806
3804void *netdev_lower_get_next(struct net_device *dev, 3807void *netdev_lower_get_next(struct net_device *dev,
3805 struct list_head **iter); 3808 struct list_head **iter);
3809
3806#define netdev_for_each_lower_dev(dev, ldev, iter) \ 3810#define netdev_for_each_lower_dev(dev, ldev, iter) \
3807 for (iter = (dev)->adj_list.lower.next, \ 3811 for (iter = (dev)->adj_list.lower.next, \
3808 ldev = netdev_lower_get_next(dev, &(iter)); \ 3812 ldev = netdev_lower_get_next(dev, &(iter)); \
3809 ldev; \ 3813 ldev; \
3810 ldev = netdev_lower_get_next(dev, &(iter))) 3814 ldev = netdev_lower_get_next(dev, &(iter)))
3811 3815
3816struct net_device *netdev_all_lower_get_next(struct net_device *dev,
3817 struct list_head **iter);
3818struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
3819 struct list_head **iter);
3820
3821#define netdev_for_each_all_lower_dev(dev, ldev, iter) \
3822 for (iter = (dev)->all_adj_list.lower.next, \
3823 ldev = netdev_all_lower_get_next(dev, &(iter)); \
3824 ldev; \
3825 ldev = netdev_all_lower_get_next(dev, &(iter)))
3826
3827#define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \
3828 for (iter = (dev)->all_adj_list.lower.next, \
3829 ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \
3830 ldev; \
3831 ldev = netdev_all_lower_get_next_rcu(dev, &(iter)))
3832
3812void *netdev_adjacent_get_private(struct list_head *adj_list); 3833void *netdev_adjacent_get_private(struct list_head *adj_list);
3813void *netdev_lower_get_first_private_rcu(struct net_device *dev); 3834void *netdev_lower_get_first_private_rcu(struct net_device *dev);
3814struct net_device *netdev_master_upper_dev_get(struct net_device *dev); 3835struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
@@ -3824,6 +3845,10 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
3824 struct net_device *lower_dev); 3845 struct net_device *lower_dev);
3825void netdev_lower_state_changed(struct net_device *lower_dev, 3846void netdev_lower_state_changed(struct net_device *lower_dev,
3826 void *lower_state_info); 3847 void *lower_state_info);
3848int netdev_default_l2upper_neigh_construct(struct net_device *dev,
3849 struct neighbour *n);
3850void netdev_default_l2upper_neigh_destroy(struct net_device *dev,
3851 struct neighbour *n);
3827 3852
3828/* RSS keys are 40 or 52 bytes long */ 3853/* RSS keys are 40 or 52 bytes long */
3829#define NETDEV_RSS_KEY_LEN 52 3854#define NETDEV_RSS_KEY_LEN 52
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index dc4f58a3cdcc..e94e81ab2b58 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -6,6 +6,10 @@
6#include <linux/static_key.h> 6#include <linux/static_key.h>
7#include <uapi/linux/netfilter/x_tables.h> 7#include <uapi/linux/netfilter/x_tables.h>
8 8
9/* Test a struct->invflags and a boolean for inequality */
10#define NF_INVF(ptr, flag, boolean) \
11 ((boolean) ^ !!((ptr)->invflags & (flag)))
12
9/** 13/**
10 * struct xt_action_param - parameters for matches/targets 14 * struct xt_action_param - parameters for matches/targets
11 * 15 *
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index 2ea517c7c6b9..984b2112c77b 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -115,8 +115,6 @@ extern unsigned int ebt_do_table(struct sk_buff *skb,
115 const struct nf_hook_state *state, 115 const struct nf_hook_state *state,
116 struct ebt_table *table); 116 struct ebt_table *table);
117 117
118/* Used in the kernel match() functions */
119#define FWINV(bool,invflg) ((bool) ^ !!(info->invflags & invflg))
120/* True if the hook mask denotes that the rule is in a base chain, 118/* True if the hook mask denotes that the rule is in a base chain,
121 * used in the check() functions */ 119 * used in the check() functions */
122#define BASE_CHAIN (par->hook_mask & (1 << NF_BR_NUMHOOKS)) 120#define BASE_CHAIN (par->hook_mask & (1 << NF_BR_NUMHOOKS))
diff --git a/include/linux/of.h b/include/linux/of.h
index c7292e8ea080..74eb28cadbef 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -614,7 +614,7 @@ static inline struct device_node *of_parse_phandle(const struct device_node *np,
614 return NULL; 614 return NULL;
615} 615}
616 616
617static inline int of_parse_phandle_with_args(struct device_node *np, 617static inline int of_parse_phandle_with_args(const struct device_node *np,
618 const char *list_name, 618 const char *list_name,
619 const char *cells_name, 619 const char *cells_name,
620 int index, 620 int index,
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 6c8cb9aa4c00..4b04587d0441 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -25,6 +25,8 @@ struct phy_device *of_phy_attach(struct net_device *dev,
25 25
26extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); 26extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
27extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np); 27extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np);
28extern int of_phy_register_fixed_link(struct device_node *np);
29extern bool of_phy_is_fixed_link(struct device_node *np);
28 30
29#else /* CONFIG_OF */ 31#else /* CONFIG_OF */
30static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) 32static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
@@ -67,12 +69,6 @@ static inline int of_mdio_parse_addr(struct device *dev,
67{ 69{
68 return -ENOSYS; 70 return -ENOSYS;
69} 71}
70#endif /* CONFIG_OF */
71
72#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_FIXED_PHY)
73extern int of_phy_register_fixed_link(struct device_node *np);
74extern bool of_phy_is_fixed_link(struct device_node *np);
75#else
76static inline int of_phy_register_fixed_link(struct device_node *np) 72static inline int of_phy_register_fixed_link(struct device_node *np)
77{ 73{
78 return -ENOSYS; 74 return -ENOSYS;
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index f6e9e85164e8..b969e9443962 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -8,7 +8,7 @@ struct pci_dev;
8struct of_phandle_args; 8struct of_phandle_args;
9struct device_node; 9struct device_node;
10 10
11#ifdef CONFIG_OF 11#ifdef CONFIG_OF_PCI
12int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq); 12int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq);
13struct device_node *of_pci_find_child_device(struct device_node *parent, 13struct device_node *of_pci_find_child_device(struct device_node *parent,
14 unsigned int devfn); 14 unsigned int devfn);
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
index ad2f67054372..c201060e0c6d 100644
--- a/include/linux/of_reserved_mem.h
+++ b/include/linux/of_reserved_mem.h
@@ -31,6 +31,13 @@ typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem);
31int of_reserved_mem_device_init(struct device *dev); 31int of_reserved_mem_device_init(struct device *dev);
32void of_reserved_mem_device_release(struct device *dev); 32void of_reserved_mem_device_release(struct device *dev);
33 33
34int early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
35 phys_addr_t align,
36 phys_addr_t start,
37 phys_addr_t end,
38 bool nomap,
39 phys_addr_t *res_base);
40
34void fdt_init_reserved_mem(void); 41void fdt_init_reserved_mem(void);
35void fdt_reserved_mem_save_node(unsigned long node, const char *uname, 42void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
36 phys_addr_t base, phys_addr_t size); 43 phys_addr_t base, phys_addr_t size);
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 562a65e8bcc0..2052011bf9fb 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -102,7 +102,7 @@ static inline bool ptr_ring_full_bh(struct ptr_ring *r)
102 */ 102 */
103static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) 103static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
104{ 104{
105 if (r->queue[r->producer]) 105 if (unlikely(!r->size) || r->queue[r->producer])
106 return -ENOSPC; 106 return -ENOSPC;
107 107
108 r->queue[r->producer++] = ptr; 108 r->queue[r->producer++] = ptr;
@@ -164,7 +164,9 @@ static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
164 */ 164 */
165static inline void *__ptr_ring_peek(struct ptr_ring *r) 165static inline void *__ptr_ring_peek(struct ptr_ring *r)
166{ 166{
167 return r->queue[r->consumer]; 167 if (likely(r->size))
168 return r->queue[r->consumer];
169 return NULL;
168} 170}
169 171
170/* Note: callers invoking this in a loop must use a compiler barrier, 172/* Note: callers invoking this in a loop must use a compiler barrier,
@@ -347,20 +349,14 @@ static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp)
347 return 0; 349 return 0;
348} 350}
349 351
350static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, 352static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
351 void (*destroy)(void *)) 353 int size, gfp_t gfp,
354 void (*destroy)(void *))
352{ 355{
353 unsigned long flags;
354 int producer = 0; 356 int producer = 0;
355 void **queue = __ptr_ring_init_queue_alloc(size, gfp);
356 void **old; 357 void **old;
357 void *ptr; 358 void *ptr;
358 359
359 if (!queue)
360 return -ENOMEM;
361
362 spin_lock_irqsave(&(r)->producer_lock, flags);
363
364 while ((ptr = ptr_ring_consume(r))) 360 while ((ptr = ptr_ring_consume(r)))
365 if (producer < size) 361 if (producer < size)
366 queue[producer++] = ptr; 362 queue[producer++] = ptr;
@@ -373,6 +369,23 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
373 old = r->queue; 369 old = r->queue;
374 r->queue = queue; 370 r->queue = queue;
375 371
372 return old;
373}
374
375static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
376 void (*destroy)(void *))
377{
378 unsigned long flags;
379 void **queue = __ptr_ring_init_queue_alloc(size, gfp);
380 void **old;
381
382 if (!queue)
383 return -ENOMEM;
384
385 spin_lock_irqsave(&(r)->producer_lock, flags);
386
387 old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy);
388
376 spin_unlock_irqrestore(&(r)->producer_lock, flags); 389 spin_unlock_irqrestore(&(r)->producer_lock, flags);
377 390
378 kfree(old); 391 kfree(old);
@@ -380,6 +393,48 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
380 return 0; 393 return 0;
381} 394}
382 395
396static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
397 int size,
398 gfp_t gfp, void (*destroy)(void *))
399{
400 unsigned long flags;
401 void ***queues;
402 int i;
403
404 queues = kmalloc(nrings * sizeof *queues, gfp);
405 if (!queues)
406 goto noqueues;
407
408 for (i = 0; i < nrings; ++i) {
409 queues[i] = __ptr_ring_init_queue_alloc(size, gfp);
410 if (!queues[i])
411 goto nomem;
412 }
413
414 for (i = 0; i < nrings; ++i) {
415 spin_lock_irqsave(&(rings[i])->producer_lock, flags);
416 queues[i] = __ptr_ring_swap_queue(rings[i], queues[i],
417 size, gfp, destroy);
418 spin_unlock_irqrestore(&(rings[i])->producer_lock, flags);
419 }
420
421 for (i = 0; i < nrings; ++i)
422 kfree(queues[i]);
423
424 kfree(queues);
425
426 return 0;
427
428nomem:
429 while (--i >= 0)
430 kfree(queues[i]);
431
432 kfree(queues);
433
434noqueues:
435 return -ENOMEM;
436}
437
383static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *)) 438static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
384{ 439{
385 void *ptr; 440 void *ptr;
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 17018f3c066e..c038ae36b10e 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -235,6 +235,9 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
235 if (!pwm) 235 if (!pwm)
236 return -EINVAL; 236 return -EINVAL;
237 237
238 if (duty_ns < 0 || period_ns < 0)
239 return -EINVAL;
240
238 pwm_get_state(pwm, &state); 241 pwm_get_state(pwm, &state);
239 if (state.duty_cycle == duty_ns && state.period == period_ns) 242 if (state.duty_cycle == duty_ns && state.period == period_ns)
240 return 0; 243 return 0;
@@ -461,6 +464,8 @@ static inline bool pwm_can_sleep(struct pwm_device *pwm)
461 464
462static inline void pwm_apply_args(struct pwm_device *pwm) 465static inline void pwm_apply_args(struct pwm_device *pwm)
463{ 466{
467 struct pwm_state state = { };
468
464 /* 469 /*
465 * PWM users calling pwm_apply_args() expect to have a fresh config 470 * PWM users calling pwm_apply_args() expect to have a fresh config
466 * where the polarity and period are set according to pwm_args info. 471 * where the polarity and period are set according to pwm_args info.
@@ -473,18 +478,20 @@ static inline void pwm_apply_args(struct pwm_device *pwm)
473 * at startup (even if they are actually enabled), thus authorizing 478 * at startup (even if they are actually enabled), thus authorizing
474 * polarity setting. 479 * polarity setting.
475 * 480 *
476 * Instead of setting ->enabled to false, we call pwm_disable() 481 * To fulfill this requirement, we apply a new state which disables
477 * before pwm_set_polarity() to ensure that everything is configured 482 * the PWM device and set the reference period and polarity config.
478 * as expected, and the PWM is really disabled when the user request
479 * it.
480 * 483 *
481 * Note that PWM users requiring a smooth handover between the 484 * Note that PWM users requiring a smooth handover between the
482 * bootloader and the kernel (like critical regulators controlled by 485 * bootloader and the kernel (like critical regulators controlled by
483 * PWM devices) will have to switch to the atomic API and avoid calling 486 * PWM devices) will have to switch to the atomic API and avoid calling
484 * pwm_apply_args(). 487 * pwm_apply_args().
485 */ 488 */
486 pwm_disable(pwm); 489
487 pwm_set_polarity(pwm, pwm->args.polarity); 490 state.enabled = false;
491 state.polarity = pwm->args.polarity;
492 state.period = pwm->args.period;
493
494 pwm_apply_state(pwm, &state);
488} 495}
489 496
490struct pwm_lookup { 497struct pwm_lookup {
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 71d523b4bc54..4475a9d8ae15 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -49,6 +49,7 @@ struct qed_start_vport_params {
49 bool drop_ttl0; 49 bool drop_ttl0;
50 u8 vport_id; 50 u8 vport_id;
51 u16 mtu; 51 u16 mtu;
52 bool clear_stats;
52}; 53};
53 54
54struct qed_stop_rxq_params { 55struct qed_stop_rxq_params {
diff --git a/include/linux/reset.h b/include/linux/reset.h
index ec0306ce7b92..45a4abeb6acb 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -84,8 +84,8 @@ static inline struct reset_control *__devm_reset_control_get(
84#endif /* CONFIG_RESET_CONTROLLER */ 84#endif /* CONFIG_RESET_CONTROLLER */
85 85
86/** 86/**
87 * reset_control_get - Lookup and obtain an exclusive reference to a 87 * reset_control_get_exclusive - Lookup and obtain an exclusive reference
88 * reset controller. 88 * to a reset controller.
89 * @dev: device to be reset by the controller 89 * @dev: device to be reset by the controller
90 * @id: reset line name 90 * @id: reset line name
91 * 91 *
@@ -98,8 +98,8 @@ static inline struct reset_control *__devm_reset_control_get(
98 * 98 *
99 * Use of id names is optional. 99 * Use of id names is optional.
100 */ 100 */
101static inline struct reset_control *__must_check reset_control_get( 101static inline struct reset_control *
102 struct device *dev, const char *id) 102__must_check reset_control_get_exclusive(struct device *dev, const char *id)
103{ 103{
104#ifndef CONFIG_RESET_CONTROLLER 104#ifndef CONFIG_RESET_CONTROLLER
105 WARN_ON(1); 105 WARN_ON(1);
@@ -107,12 +107,6 @@ static inline struct reset_control *__must_check reset_control_get(
107 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0); 107 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
108} 108}
109 109
110static inline struct reset_control *reset_control_get_optional(
111 struct device *dev, const char *id)
112{
113 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
114}
115
116/** 110/**
117 * reset_control_get_shared - Lookup and obtain a shared reference to a 111 * reset_control_get_shared - Lookup and obtain a shared reference to a
118 * reset controller. 112 * reset controller.
@@ -141,9 +135,21 @@ static inline struct reset_control *reset_control_get_shared(
141 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1); 135 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1);
142} 136}
143 137
138static inline struct reset_control *reset_control_get_optional_exclusive(
139 struct device *dev, const char *id)
140{
141 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
142}
143
144static inline struct reset_control *reset_control_get_optional_shared(
145 struct device *dev, const char *id)
146{
147 return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1);
148}
149
144/** 150/**
145 * of_reset_control_get - Lookup and obtain an exclusive reference to a 151 * of_reset_control_get_exclusive - Lookup and obtain an exclusive reference
146 * reset controller. 152 * to a reset controller.
147 * @node: device to be reset by the controller 153 * @node: device to be reset by the controller
148 * @id: reset line name 154 * @id: reset line name
149 * 155 *
@@ -151,15 +157,41 @@ static inline struct reset_control *reset_control_get_shared(
151 * 157 *
152 * Use of id names is optional. 158 * Use of id names is optional.
153 */ 159 */
154static inline struct reset_control *of_reset_control_get( 160static inline struct reset_control *of_reset_control_get_exclusive(
155 struct device_node *node, const char *id) 161 struct device_node *node, const char *id)
156{ 162{
157 return __of_reset_control_get(node, id, 0, 0); 163 return __of_reset_control_get(node, id, 0, 0);
158} 164}
159 165
160/** 166/**
161 * of_reset_control_get_by_index - Lookup and obtain an exclusive reference to 167 * of_reset_control_get_shared - Lookup and obtain an shared reference
162 * a reset controller by index. 168 * to a reset controller.
169 * @node: device to be reset by the controller
170 * @id: reset line name
171 *
172 * When a reset-control is shared, the behavior of reset_control_assert /
173 * deassert is changed, the reset-core will keep track of a deassert_count
174 * and only (re-)assert the reset after reset_control_assert has been called
175 * as many times as reset_control_deassert was called. Also see the remark
176 * about shared reset-controls in the reset_control_assert docs.
177 *
178 * Calling reset_control_assert without first calling reset_control_deassert
179 * is not allowed on a shared reset control. Calling reset_control_reset is
180 * also not allowed on a shared reset control.
181 * Returns a struct reset_control or IS_ERR() condition containing errno.
182 *
183 * Use of id names is optional.
184 */
185static inline struct reset_control *of_reset_control_get_shared(
186 struct device_node *node, const char *id)
187{
188 return __of_reset_control_get(node, id, 0, 1);
189}
190
191/**
192 * of_reset_control_get_exclusive_by_index - Lookup and obtain an exclusive
193 * reference to a reset controller
194 * by index.
163 * @node: device to be reset by the controller 195 * @node: device to be reset by the controller
164 * @index: index of the reset controller 196 * @index: index of the reset controller
165 * 197 *
@@ -167,49 +199,60 @@ static inline struct reset_control *of_reset_control_get(
167 * in whatever order. Returns a struct reset_control or IS_ERR() condition 199 * in whatever order. Returns a struct reset_control or IS_ERR() condition
168 * containing errno. 200 * containing errno.
169 */ 201 */
170static inline struct reset_control *of_reset_control_get_by_index( 202static inline struct reset_control *of_reset_control_get_exclusive_by_index(
171 struct device_node *node, int index) 203 struct device_node *node, int index)
172{ 204{
173 return __of_reset_control_get(node, NULL, index, 0); 205 return __of_reset_control_get(node, NULL, index, 0);
174} 206}
175 207
176/** 208/**
177 * devm_reset_control_get - resource managed reset_control_get() 209 * of_reset_control_get_shared_by_index - Lookup and obtain an shared
178 * @dev: device to be reset by the controller 210 * reference to a reset controller
179 * @id: reset line name 211 * by index.
212 * @node: device to be reset by the controller
213 * @index: index of the reset controller
214 *
215 * When a reset-control is shared, the behavior of reset_control_assert /
216 * deassert is changed, the reset-core will keep track of a deassert_count
217 * and only (re-)assert the reset after reset_control_assert has been called
218 * as many times as reset_control_deassert was called. Also see the remark
219 * about shared reset-controls in the reset_control_assert docs.
220 *
221 * Calling reset_control_assert without first calling reset_control_deassert
222 * is not allowed on a shared reset control. Calling reset_control_reset is
223 * also not allowed on a shared reset control.
224 * Returns a struct reset_control or IS_ERR() condition containing errno.
180 * 225 *
181 * Managed reset_control_get(). For reset controllers returned from this 226 * This is to be used to perform a list of resets for a device or power domain
182 * function, reset_control_put() is called automatically on driver detach. 227 * in whatever order. Returns a struct reset_control or IS_ERR() condition
183 * See reset_control_get() for more information. 228 * containing errno.
184 */ 229 */
185static inline struct reset_control *__must_check devm_reset_control_get( 230static inline struct reset_control *of_reset_control_get_shared_by_index(
186 struct device *dev, const char *id) 231 struct device_node *node, int index)
187{
188#ifndef CONFIG_RESET_CONTROLLER
189 WARN_ON(1);
190#endif
191 return __devm_reset_control_get(dev, id, 0, 0);
192}
193
194static inline struct reset_control *devm_reset_control_get_optional(
195 struct device *dev, const char *id)
196{ 232{
197 return __devm_reset_control_get(dev, id, 0, 0); 233 return __of_reset_control_get(node, NULL, index, 1);
198} 234}
199 235
200/** 236/**
201 * devm_reset_control_get_by_index - resource managed reset_control_get 237 * devm_reset_control_get_exclusive - resource managed
238 * reset_control_get_exclusive()
202 * @dev: device to be reset by the controller 239 * @dev: device to be reset by the controller
203 * @index: index of the reset controller 240 * @id: reset line name
204 * 241 *
205 * Managed reset_control_get(). For reset controllers returned from this 242 * Managed reset_control_get_exclusive(). For reset controllers returned
206 * function, reset_control_put() is called automatically on driver detach. 243 * from this function, reset_control_put() is called automatically on driver
207 * See reset_control_get() for more information. 244 * detach.
245 *
246 * See reset_control_get_exclusive() for more information.
208 */ 247 */
209static inline struct reset_control *devm_reset_control_get_by_index( 248static inline struct reset_control *
210 struct device *dev, int index) 249__must_check devm_reset_control_get_exclusive(struct device *dev,
250 const char *id)
211{ 251{
212 return __devm_reset_control_get(dev, NULL, index, 0); 252#ifndef CONFIG_RESET_CONTROLLER
253 WARN_ON(1);
254#endif
255 return __devm_reset_control_get(dev, id, 0, 0);
213} 256}
214 257
215/** 258/**
@@ -227,6 +270,36 @@ static inline struct reset_control *devm_reset_control_get_shared(
227 return __devm_reset_control_get(dev, id, 0, 1); 270 return __devm_reset_control_get(dev, id, 0, 1);
228} 271}
229 272
273static inline struct reset_control *devm_reset_control_get_optional_exclusive(
274 struct device *dev, const char *id)
275{
276 return __devm_reset_control_get(dev, id, 0, 0);
277}
278
279static inline struct reset_control *devm_reset_control_get_optional_shared(
280 struct device *dev, const char *id)
281{
282 return __devm_reset_control_get(dev, id, 0, 1);
283}
284
285/**
286 * devm_reset_control_get_exclusive_by_index - resource managed
287 * reset_control_get_exclusive()
288 * @dev: device to be reset by the controller
289 * @index: index of the reset controller
290 *
291 * Managed reset_control_get_exclusive(). For reset controllers returned from
292 * this function, reset_control_put() is called automatically on driver
293 * detach.
294 *
295 * See reset_control_get_exclusive() for more information.
296 */
297static inline struct reset_control *
298devm_reset_control_get_exclusive_by_index(struct device *dev, int index)
299{
300 return __devm_reset_control_get(dev, NULL, index, 0);
301}
302
230/** 303/**
231 * devm_reset_control_get_shared_by_index - resource managed 304 * devm_reset_control_get_shared_by_index - resource managed
232 * reset_control_get_shared 305 * reset_control_get_shared
@@ -237,10 +310,60 @@ static inline struct reset_control *devm_reset_control_get_shared(
237 * this function, reset_control_put() is called automatically on driver detach. 310 * this function, reset_control_put() is called automatically on driver detach.
238 * See reset_control_get_shared() for more information. 311 * See reset_control_get_shared() for more information.
239 */ 312 */
240static inline struct reset_control *devm_reset_control_get_shared_by_index( 313static inline struct reset_control *
241 struct device *dev, int index) 314devm_reset_control_get_shared_by_index(struct device *dev, int index)
242{ 315{
243 return __devm_reset_control_get(dev, NULL, index, 1); 316 return __devm_reset_control_get(dev, NULL, index, 1);
244} 317}
245 318
319/*
320 * TEMPORARY calls to use during transition:
321 *
322 * of_reset_control_get() => of_reset_control_get_exclusive()
323 *
324 * These inline function calls will be removed once all consumers
325 * have been moved over to the new explicit API.
326 */
327static inline struct reset_control *reset_control_get(
328 struct device *dev, const char *id)
329{
330 return reset_control_get_exclusive(dev, id);
331}
332
333static inline struct reset_control *reset_control_get_optional(
334 struct device *dev, const char *id)
335{
336 return reset_control_get_optional_exclusive(dev, id);
337}
338
339static inline struct reset_control *of_reset_control_get(
340 struct device_node *node, const char *id)
341{
342 return of_reset_control_get_exclusive(node, id);
343}
344
345static inline struct reset_control *of_reset_control_get_by_index(
346 struct device_node *node, int index)
347{
348 return of_reset_control_get_exclusive_by_index(node, index);
349}
350
351static inline struct reset_control *devm_reset_control_get(
352 struct device *dev, const char *id)
353{
354 return devm_reset_control_get_exclusive(dev, id);
355}
356
357static inline struct reset_control *devm_reset_control_get_optional(
358 struct device *dev, const char *id)
359{
360 return devm_reset_control_get_optional_exclusive(dev, id);
361
362}
363
364static inline struct reset_control *devm_reset_control_get_by_index(
365 struct device *dev, int index)
366{
367 return devm_reset_control_get_exclusive_by_index(dev, index);
368}
246#endif 369#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6e42ada26345..253538f29ade 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -3007,7 +3007,7 @@ static inline int object_is_on_stack(void *obj)
3007 return (obj >= stack) && (obj < (stack + THREAD_SIZE)); 3007 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
3008} 3008}
3009 3009
3010extern void thread_info_cache_init(void); 3010extern void thread_stack_cache_init(void);
3011 3011
3012#ifdef CONFIG_DEBUG_STACK_USAGE 3012#ifdef CONFIG_DEBUG_STACK_USAGE
3013static inline unsigned long stack_not_used(struct task_struct *p) 3013static inline unsigned long stack_not_used(struct task_struct *p)
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 7973a821ac58..ead97654c4e9 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -277,7 +277,10 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
277 277
278static inline int raw_read_seqcount_latch(seqcount_t *s) 278static inline int raw_read_seqcount_latch(seqcount_t *s)
279{ 279{
280 return lockless_dereference(s)->sequence; 280 int seq = READ_ONCE(s->sequence);
281 /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
282 smp_read_barrier_depends();
283 return seq;
281} 284}
282 285
283/** 286/**
@@ -331,7 +334,7 @@ static inline int raw_read_seqcount_latch(seqcount_t *s)
331 * unsigned seq, idx; 334 * unsigned seq, idx;
332 * 335 *
333 * do { 336 * do {
334 * seq = lockless_dereference(latch)->seq; 337 * seq = raw_read_seqcount_latch(&latch->seq);
335 * 338 *
336 * idx = seq & 0x01; 339 * idx = seq & 0x01;
337 * entry = data_query(latch->data[idx], ...); 340 * entry = data_query(latch->data[idx], ...);
diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
index 678bfbf78ac4..f4dfade428f0 100644
--- a/include/linux/skb_array.h
+++ b/include/linux/skb_array.h
@@ -151,16 +151,25 @@ static inline int skb_array_init(struct skb_array *a, int size, gfp_t gfp)
151 return ptr_ring_init(&a->ring, size, gfp); 151 return ptr_ring_init(&a->ring, size, gfp);
152} 152}
153 153
154void __skb_array_destroy_skb(void *ptr) 154static void __skb_array_destroy_skb(void *ptr)
155{ 155{
156 kfree_skb(ptr); 156 kfree_skb(ptr);
157} 157}
158 158
159int skb_array_resize(struct skb_array *a, int size, gfp_t gfp) 159static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
160{ 160{
161 return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb); 161 return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);
162} 162}
163 163
164static inline int skb_array_resize_multiple(struct skb_array **rings,
165 int nrings, int size, gfp_t gfp)
166{
167 BUILD_BUG_ON(offsetof(struct skb_array, ring));
168 return ptr_ring_resize_multiple((struct ptr_ring **)rings,
169 nrings, size, gfp,
170 __skb_array_destroy_skb);
171}
172
164static inline void skb_array_cleanup(struct skb_array *a) 173static inline void skb_array_cleanup(struct skb_array *a)
165{ 174{
166 ptr_ring_cleanup(&a->ring, __skb_array_destroy_skb); 175 ptr_ring_cleanup(&a->ring, __skb_array_destroy_skb);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index dc0fca747c5e..6f0b3e0adc73 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -37,6 +37,7 @@
37#include <net/flow_dissector.h> 37#include <net/flow_dissector.h>
38#include <linux/splice.h> 38#include <linux/splice.h>
39#include <linux/in6.h> 39#include <linux/in6.h>
40#include <linux/if_packet.h>
40#include <net/flow.h> 41#include <net/flow.h>
41 42
42/* The interface for checksum offload between the stack and networking drivers 43/* The interface for checksum offload between the stack and networking drivers
@@ -881,6 +882,15 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb)
881 return (struct rtable *)skb_dst(skb); 882 return (struct rtable *)skb_dst(skb);
882} 883}
883 884
885/* For mangling skb->pkt_type from user space side from applications
886 * such as nft, tc, etc, we only allow a conservative subset of
887 * possible pkt_types to be set.
888*/
889static inline bool skb_pkt_type_ok(u32 ptype)
890{
891 return ptype <= PACKET_OTHERHOST;
892}
893
884void kfree_skb(struct sk_buff *skb); 894void kfree_skb(struct sk_buff *skb);
885void kfree_skb_list(struct sk_buff *segs); 895void kfree_skb_list(struct sk_buff *segs);
886void skb_tx_error(struct sk_buff *skb); 896void skb_tx_error(struct sk_buff *skb);
@@ -1069,6 +1079,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1069} 1079}
1070 1080
1071void __skb_get_hash(struct sk_buff *skb); 1081void __skb_get_hash(struct sk_buff *skb);
1082u32 __skb_get_hash_symmetric(struct sk_buff *skb);
1072u32 skb_get_poff(const struct sk_buff *skb); 1083u32 skb_get_poff(const struct sk_buff *skb);
1073u32 __skb_get_poff(const struct sk_buff *skb, void *data, 1084u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1074 const struct flow_keys *keys, int hlen); 1085 const struct flow_keys *keys, int hlen);
@@ -2877,6 +2888,25 @@ static inline void skb_postpush_rcsum(struct sk_buff *skb,
2877} 2888}
2878 2889
2879/** 2890/**
2891 * skb_push_rcsum - push skb and update receive checksum
2892 * @skb: buffer to update
2893 * @len: length of data pulled
2894 *
2895 * This function performs an skb_push on the packet and updates
2896 * the CHECKSUM_COMPLETE checksum. It should be used on
2897 * receive path processing instead of skb_push unless you know
2898 * that the checksum difference is zero (e.g., a valid IP header)
2899 * or you are setting ip_summed to CHECKSUM_NONE.
2900 */
2901static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
2902 unsigned int len)
2903{
2904 skb_push(skb, len);
2905 skb_postpush_rcsum(skb, skb->data, len);
2906 return skb->data;
2907}
2908
2909/**
2880 * pskb_trim_rcsum - trim received skb and update checksum 2910 * pskb_trim_rcsum - trim received skb and update checksum
2881 * @skb: buffer to trim 2911 * @skb: buffer to trim
2882 * @len: new length 2912 * @len: new length
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index 4018b48f2b3b..a0596ca0e80a 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -36,6 +36,9 @@ enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk)
36{ 36{
37 switch (sk->sk_family) { 37 switch (sk->sk_family) {
38 case AF_INET: 38 case AF_INET:
39 if (sk->sk_type == SOCK_RAW)
40 return SKNLGRP_NONE;
41
39 switch (sk->sk_protocol) { 42 switch (sk->sk_protocol) {
40 case IPPROTO_TCP: 43 case IPPROTO_TCP:
41 return SKNLGRP_INET_TCP_DESTROY; 44 return SKNLGRP_INET_TCP_DESTROY;
@@ -45,6 +48,9 @@ enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk)
45 return SKNLGRP_NONE; 48 return SKNLGRP_NONE;
46 } 49 }
47 case AF_INET6: 50 case AF_INET6:
51 if (sk->sk_type == SOCK_RAW)
52 return SKNLGRP_NONE;
53
48 switch (sk->sk_protocol) { 54 switch (sk->sk_protocol) {
49 case IPPROTO_TCP: 55 case IPPROTO_TCP:
50 return SKNLGRP_INET6_TCP_DESTROY; 56 return SKNLGRP_INET6_TCP_DESTROY;
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 0507dbfbf63c..705840e0438f 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -141,5 +141,6 @@ struct plat_stmmacenet_data {
141 struct stmmac_axi *axi; 141 struct stmmac_axi *axi;
142 int has_gmac4; 142 int has_gmac4;
143 bool tso_en; 143 bool tso_en;
144 int mac_port_sel_speed;
144}; 145};
145#endif 146#endif
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 19c659d1c0f8..b6810c92b8bb 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -137,8 +137,6 @@ struct rpc_create_args {
137#define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9) 137#define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9)
138 138
139struct rpc_clnt *rpc_create(struct rpc_create_args *args); 139struct rpc_clnt *rpc_create(struct rpc_create_args *args);
140struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
141 struct rpc_xprt *xprt);
142struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, 140struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
143 const struct rpc_program *, u32); 141 const struct rpc_program *, u32);
144struct rpc_clnt *rpc_clone_client(struct rpc_clnt *); 142struct rpc_clnt *rpc_clone_client(struct rpc_clnt *);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index b7dabc4baafd..79ba50856707 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -84,6 +84,7 @@ struct svc_xprt {
84 84
85 struct net *xpt_net; 85 struct net *xpt_net;
86 struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */ 86 struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */
87 struct rpc_xprt_switch *xpt_bc_xps; /* NFSv4.1 backchannel */
87}; 88};
88 89
89static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) 90static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 5aa3834619a8..5e3e1b63dbb3 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -297,6 +297,7 @@ struct xprt_create {
297 size_t addrlen; 297 size_t addrlen;
298 const char *servername; 298 const char *servername;
299 struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ 299 struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
300 struct rpc_xprt_switch *bc_xps;
300 unsigned int flags; 301 unsigned int flags;
301}; 302};
302 303
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index e45abe7db9a6..ee517bef0db0 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -335,6 +335,8 @@ struct thermal_genl_event {
335 * @get_trend: a pointer to a function that reads the sensor temperature trend. 335 * @get_trend: a pointer to a function that reads the sensor temperature trend.
336 * @set_emul_temp: a pointer to a function that sets sensor emulated 336 * @set_emul_temp: a pointer to a function that sets sensor emulated
337 * temperature. 337 * temperature.
338 * @set_trip_temp: a pointer to a function that sets the trip temperature on
339 * hardware.
338 */ 340 */
339struct thermal_zone_of_device_ops { 341struct thermal_zone_of_device_ops {
340 int (*get_temp)(void *, int *); 342 int (*get_temp)(void *, int *);
diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
index 966889a20ea3..e479033bd782 100644
--- a/include/linux/usb/ehci_def.h
+++ b/include/linux/usb/ehci_def.h
@@ -180,11 +180,11 @@ struct ehci_regs {
180 * PORTSCx 180 * PORTSCx
181 */ 181 */
182 /* HOSTPC: offset 0x84 */ 182 /* HOSTPC: offset 0x84 */
183 u32 hostpc[1]; /* HOSTPC extension */ 183 u32 hostpc[0]; /* HOSTPC extension */
184#define HOSTPC_PHCD (1<<22) /* Phy clock disable */ 184#define HOSTPC_PHCD (1<<22) /* Phy clock disable */
185#define HOSTPC_PSPD (3<<25) /* Port speed detection */ 185#define HOSTPC_PSPD (3<<25) /* Port speed detection */
186 186
187 u32 reserved5[16]; 187 u32 reserved5[17];
188 188
189 /* USBMODE_EX: offset 0xc8 */ 189 /* USBMODE_EX: offset 0xc8 */
190 u32 usbmode_ex; /* USB Device mode extension */ 190 u32 usbmode_ex; /* USB Device mode extension */
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 457651bf45b0..fefe8b06a63d 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -1034,6 +1034,8 @@ static inline int usb_gadget_activate(struct usb_gadget *gadget)
1034 * @udc_name: A name of UDC this driver should be bound to. If udc_name is NULL, 1034 * @udc_name: A name of UDC this driver should be bound to. If udc_name is NULL,
1035 * this driver will be bound to any available UDC. 1035 * this driver will be bound to any available UDC.
1036 * @pending: UDC core private data used for deferred probe of this driver. 1036 * @pending: UDC core private data used for deferred probe of this driver.
1037 * @match_existing_only: If udc is not found, return an error and don't add this
1038 * gadget driver to list of pending driver
1037 * 1039 *
1038 * Devices are disabled till a gadget driver successfully bind()s, which 1040 * Devices are disabled till a gadget driver successfully bind()s, which
1039 * means the driver will handle setup() requests needed to enumerate (and 1041 * means the driver will handle setup() requests needed to enumerate (and
@@ -1097,6 +1099,7 @@ struct usb_gadget_driver {
1097 1099
1098 char *udc_name; 1100 char *udc_name;
1099 struct list_head pending; 1101 struct list_head pending;
1102 unsigned match_existing_only:1;
1100}; 1103};
1101 1104
1102 1105
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index 0b3da40a525e..d315c8907869 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -142,10 +142,11 @@ enum musb_vbus_id_status {
142}; 142};
143 143
144#if IS_ENABLED(CONFIG_USB_MUSB_HDRC) 144#if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
145void musb_mailbox(enum musb_vbus_id_status status); 145int musb_mailbox(enum musb_vbus_id_status status);
146#else 146#else
147static inline void musb_mailbox(enum musb_vbus_id_status status) 147static inline int musb_mailbox(enum musb_vbus_id_status status)
148{ 148{
149 return 0;
149} 150}
150#endif 151#endif
151 152
diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h
index 98a938aabdfb..7a8d6037a4bb 100644
--- a/include/media/v4l2-mc.h
+++ b/include/media/v4l2-mc.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * v4l2-mc.h - Media Controller V4L2 types and prototypes 2 * v4l2-mc.h - Media Controller V4L2 types and prototypes
3 * 3 *
4 * Copyright (C) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com> 4 * Copyright (C) 2016 Mauro Carvalho Chehab <mchehab@kernel.org>
5 * Copyright (C) 2006-2010 Nokia Corporation 5 * Copyright (C) 2006-2010 Nokia Corporation
6 * Copyright (c) 2016 Intel Corporation. 6 * Copyright (c) 2016 Intel Corporation.
7 * 7 *
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 791800ddd6d9..6360c259da6d 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -34,6 +34,9 @@
34 34
35#define BOND_DEFAULT_MIIMON 100 35#define BOND_DEFAULT_MIIMON 100
36 36
37#ifndef __long_aligned
38#define __long_aligned __attribute__((aligned((sizeof(long)))))
39#endif
37/* 40/*
38 * Less bad way to call ioctl from within the kernel; this needs to be 41 * Less bad way to call ioctl from within the kernel; this needs to be
39 * done some other way to get the call out of interrupt context. 42 * done some other way to get the call out of interrupt context.
@@ -138,7 +141,9 @@ struct bond_params {
138 struct reciprocal_value reciprocal_packets_per_slave; 141 struct reciprocal_value reciprocal_packets_per_slave;
139 u16 ad_actor_sys_prio; 142 u16 ad_actor_sys_prio;
140 u16 ad_user_port_key; 143 u16 ad_user_port_key;
141 u8 ad_actor_system[ETH_ALEN]; 144
145 /* 2 bytes of padding : see ether_addr_equal_64bits() */
146 u8 ad_actor_system[ETH_ALEN + 2];
142}; 147};
143 148
144struct bond_parm_tbl { 149struct bond_parm_tbl {
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 7bbb00d8b2cd..9c23f4d33e06 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -330,6 +330,9 @@ struct ieee80211_supported_band {
330 * in a separate chapter. 330 * in a separate chapter.
331 */ 331 */
332 332
333#define VHT_MUMIMO_GROUPS_DATA_LEN (WLAN_MEMBERSHIP_LEN +\
334 WLAN_USER_POSITION_LEN)
335
333/** 336/**
334 * struct vif_params - describes virtual interface parameters 337 * struct vif_params - describes virtual interface parameters
335 * @use_4addr: use 4-address frames 338 * @use_4addr: use 4-address frames
@@ -339,10 +342,13 @@ struct ieee80211_supported_band {
339 * This feature is only fully supported by drivers that enable the 342 * This feature is only fully supported by drivers that enable the
340 * %NL80211_FEATURE_MAC_ON_CREATE flag. Others may support creating 343 * %NL80211_FEATURE_MAC_ON_CREATE flag. Others may support creating
341 ** only p2p devices with specified MAC. 344 ** only p2p devices with specified MAC.
345 * @vht_mumimo_groups: MU-MIMO groupID. used for monitoring only
346 * packets belonging to that MU-MIMO groupID.
342 */ 347 */
343struct vif_params { 348struct vif_params {
344 int use_4addr; 349 int use_4addr;
345 u8 macaddr[ETH_ALEN]; 350 u8 macaddr[ETH_ALEN];
351 u8 vht_mumimo_groups[VHT_MUMIMO_GROUPS_DATA_LEN];
346}; 352};
347 353
348/** 354/**
@@ -774,6 +780,7 @@ enum station_parameters_apply_mask {
774 * (bitmask of BIT(NL80211_STA_FLAG_...)) 780 * (bitmask of BIT(NL80211_STA_FLAG_...))
775 * @listen_interval: listen interval or -1 for no change 781 * @listen_interval: listen interval or -1 for no change
776 * @aid: AID or zero for no change 782 * @aid: AID or zero for no change
783 * @peer_aid: mesh peer AID or zero for no change
777 * @plink_action: plink action to take 784 * @plink_action: plink action to take
778 * @plink_state: set the peer link state for a station 785 * @plink_state: set the peer link state for a station
779 * @ht_capa: HT capabilities of station 786 * @ht_capa: HT capabilities of station
@@ -805,6 +812,7 @@ struct station_parameters {
805 u32 sta_modify_mask; 812 u32 sta_modify_mask;
806 int listen_interval; 813 int listen_interval;
807 u16 aid; 814 u16 aid;
815 u16 peer_aid;
808 u8 supported_rates_len; 816 u8 supported_rates_len;
809 u8 plink_action; 817 u8 plink_action;
810 u8 plink_state; 818 u8 plink_state;
@@ -1418,6 +1426,21 @@ struct cfg80211_ssid {
1418}; 1426};
1419 1427
1420/** 1428/**
1429 * struct cfg80211_scan_info - information about completed scan
1430 * @scan_start_tsf: scan start time in terms of the TSF of the BSS that the
1431 * wireless device that requested the scan is connected to. If this
1432 * information is not available, this field is left zero.
1433 * @tsf_bssid: the BSSID according to which %scan_start_tsf is set.
1434 * @aborted: set to true if the scan was aborted for any reason,
1435 * userspace will be notified of that
1436 */
1437struct cfg80211_scan_info {
1438 u64 scan_start_tsf;
1439 u8 tsf_bssid[ETH_ALEN] __aligned(2);
1440 bool aborted;
1441};
1442
1443/**
1421 * struct cfg80211_scan_request - scan request description 1444 * struct cfg80211_scan_request - scan request description
1422 * 1445 *
1423 * @ssids: SSIDs to scan for (active scan only) 1446 * @ssids: SSIDs to scan for (active scan only)
@@ -1427,12 +1450,17 @@ struct cfg80211_ssid {
1427 * @scan_width: channel width for scanning 1450 * @scan_width: channel width for scanning
1428 * @ie: optional information element(s) to add into Probe Request or %NULL 1451 * @ie: optional information element(s) to add into Probe Request or %NULL
1429 * @ie_len: length of ie in octets 1452 * @ie_len: length of ie in octets
1453 * @duration: how long to listen on each channel, in TUs. If
1454 * %duration_mandatory is not set, this is the maximum dwell time and
1455 * the actual dwell time may be shorter.
1456 * @duration_mandatory: if set, the scan duration must be as specified by the
1457 * %duration field.
1430 * @flags: bit field of flags controlling operation 1458 * @flags: bit field of flags controlling operation
1431 * @rates: bitmap of rates to advertise for each band 1459 * @rates: bitmap of rates to advertise for each band
1432 * @wiphy: the wiphy this was for 1460 * @wiphy: the wiphy this was for
1433 * @scan_start: time (in jiffies) when the scan started 1461 * @scan_start: time (in jiffies) when the scan started
1434 * @wdev: the wireless device to scan for 1462 * @wdev: the wireless device to scan for
1435 * @aborted: (internal) scan request was notified as aborted 1463 * @info: (internal) information about completed scan
1436 * @notified: (internal) scan request was notified as done or aborted 1464 * @notified: (internal) scan request was notified as done or aborted
1437 * @no_cck: used to send probe requests at non CCK rate in 2GHz band 1465 * @no_cck: used to send probe requests at non CCK rate in 2GHz band
1438 * @mac_addr: MAC address used with randomisation 1466 * @mac_addr: MAC address used with randomisation
@@ -1448,6 +1476,8 @@ struct cfg80211_scan_request {
1448 enum nl80211_bss_scan_width scan_width; 1476 enum nl80211_bss_scan_width scan_width;
1449 const u8 *ie; 1477 const u8 *ie;
1450 size_t ie_len; 1478 size_t ie_len;
1479 u16 duration;
1480 bool duration_mandatory;
1451 u32 flags; 1481 u32 flags;
1452 1482
1453 u32 rates[NUM_NL80211_BANDS]; 1483 u32 rates[NUM_NL80211_BANDS];
@@ -1461,7 +1491,8 @@ struct cfg80211_scan_request {
1461 /* internal */ 1491 /* internal */
1462 struct wiphy *wiphy; 1492 struct wiphy *wiphy;
1463 unsigned long scan_start; 1493 unsigned long scan_start;
1464 bool aborted, notified; 1494 struct cfg80211_scan_info info;
1495 bool notified;
1465 bool no_cck; 1496 bool no_cck;
1466 1497
1467 /* keep last */ 1498 /* keep last */
@@ -1594,12 +1625,19 @@ enum cfg80211_signal_type {
1594 * buffered on the device) and be accurate to about 10ms. 1625 * buffered on the device) and be accurate to about 10ms.
1595 * If the frame isn't buffered, just passing the return value of 1626 * If the frame isn't buffered, just passing the return value of
1596 * ktime_get_boot_ns() is likely appropriate. 1627 * ktime_get_boot_ns() is likely appropriate.
1628 * @parent_tsf: the time at the start of reception of the first octet of the
1629 * timestamp field of the frame. The time is the TSF of the BSS specified
1630 * by %parent_bssid.
1631 * @parent_bssid: the BSS according to which %parent_tsf is set. This is set to
1632 * the BSS that requested the scan in which the beacon/probe was received.
1597 */ 1633 */
1598struct cfg80211_inform_bss { 1634struct cfg80211_inform_bss {
1599 struct ieee80211_channel *chan; 1635 struct ieee80211_channel *chan;
1600 enum nl80211_bss_scan_width scan_width; 1636 enum nl80211_bss_scan_width scan_width;
1601 s32 signal; 1637 s32 signal;
1602 u64 boottime_ns; 1638 u64 boottime_ns;
1639 u64 parent_tsf;
1640 u8 parent_bssid[ETH_ALEN] __aligned(2);
1603}; 1641};
1604 1642
1605/** 1643/**
@@ -4061,10 +4099,10 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
4061 * cfg80211_scan_done - notify that scan finished 4099 * cfg80211_scan_done - notify that scan finished
4062 * 4100 *
4063 * @request: the corresponding scan request 4101 * @request: the corresponding scan request
4064 * @aborted: set to true if the scan was aborted for any reason, 4102 * @info: information about the completed scan
4065 * userspace will be notified of that
4066 */ 4103 */
4067void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted); 4104void cfg80211_scan_done(struct cfg80211_scan_request *request,
4105 struct cfg80211_scan_info *info);
4068 4106
4069/** 4107/**
4070 * cfg80211_sched_scan_results - notify that new scan results are available 4108 * cfg80211_sched_scan_results - notify that new scan results are available
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 1d45b61cb320..c99ffe8cef3c 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -90,6 +90,9 @@ struct devlink_ops {
90 u16 tc_index, 90 u16 tc_index,
91 enum devlink_sb_pool_type pool_type, 91 enum devlink_sb_pool_type pool_type,
92 u32 *p_cur, u32 *p_max); 92 u32 *p_cur, u32 *p_max);
93
94 int (*eswitch_mode_get)(struct devlink *devlink, u16 *p_mode);
95 int (*eswitch_mode_set)(struct devlink *devlink, u16 mode);
93}; 96};
94 97
95static inline void *devlink_priv(struct devlink *devlink) 98static inline void *devlink_priv(struct devlink *devlink)
diff --git a/include/net/gre.h b/include/net/gre.h
index 5dce30a6abe3..7a54a31d1d4c 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -26,7 +26,7 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version);
26struct net_device *gretap_fb_dev_create(struct net *net, const char *name, 26struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
27 u8 name_assign_type); 27 u8 name_assign_type);
28int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, 28int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
29 bool *csum_err, __be16 proto); 29 bool *csum_err, __be16 proto, int nhs);
30 30
31static inline int gre_calc_hlen(__be16 o_flags) 31static inline int gre_calc_hlen(__be16 o_flags)
32{ 32{
diff --git a/include/net/ip.h b/include/net/ip.h
index 37165fba3741..08f36cd2b874 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -313,10 +313,9 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
313 return min(dst->dev->mtu, IP_MAX_MTU); 313 return min(dst->dev->mtu, IP_MAX_MTU);
314} 314}
315 315
316static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb) 316static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
317 const struct sk_buff *skb)
317{ 318{
318 struct sock *sk = skb->sk;
319
320 if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) { 319 if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
321 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED; 320 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
322 321
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index a52009ffc19f..b4faadbb4e01 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -4697,9 +4697,10 @@ void ieee80211_wake_queues(struct ieee80211_hw *hw);
4697 * any context, including hardirq context. 4697 * any context, including hardirq context.
4698 * 4698 *
4699 * @hw: the hardware that finished the scan 4699 * @hw: the hardware that finished the scan
4700 * @aborted: set to true if scan was aborted 4700 * @info: information about the completed scan
4701 */ 4701 */
4702void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted); 4702void ieee80211_scan_completed(struct ieee80211_hw *hw,
4703 struct cfg80211_scan_info *info);
4703 4704
4704/** 4705/**
4705 * ieee80211_sched_scan_results - got results from scheduled scan 4706 * ieee80211_sched_scan_results - got results from scheduled scan
diff --git a/include/net/netevent.h b/include/net/netevent.h
index d8bbb38584b6..f440df172b56 100644
--- a/include/net/netevent.h
+++ b/include/net/netevent.h
@@ -24,6 +24,7 @@ struct netevent_redirect {
24enum netevent_notif_type { 24enum netevent_notif_type {
25 NETEVENT_NEIGH_UPDATE = 1, /* arg is struct neighbour ptr */ 25 NETEVENT_NEIGH_UPDATE = 1, /* arg is struct neighbour ptr */
26 NETEVENT_REDIRECT, /* arg is struct netevent_redirect ptr */ 26 NETEVENT_REDIRECT, /* arg is struct netevent_redirect ptr */
27 NETEVENT_DELAY_PROBE_TIME_UPDATE, /* arg is struct neigh_parms ptr */
27}; 28};
28 29
29int register_netevent_notifier(struct notifier_block *nb); 30int register_netevent_notifier(struct notifier_block *nb);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index dd78bea227c8..5d3397f34583 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -85,6 +85,9 @@ struct nf_conn {
85 spinlock_t lock; 85 spinlock_t lock;
86 u16 cpu; 86 u16 cpu;
87 87
88#ifdef CONFIG_NF_CONNTRACK_ZONES
89 struct nf_conntrack_zone zone;
90#endif
88 /* XXX should I move this to the tail ? - Y.K */ 91 /* XXX should I move this to the tail ? - Y.K */
89 /* These are my tuples; original and reply */ 92 /* These are my tuples; original and reply */
90 struct nf_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX]; 93 struct nf_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX];
@@ -287,6 +290,7 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
287struct kernel_param; 290struct kernel_param;
288 291
289int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp); 292int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
293int nf_conntrack_hash_resize(unsigned int hashsize);
290extern unsigned int nf_conntrack_htable_size; 294extern unsigned int nf_conntrack_htable_size;
291extern unsigned int nf_conntrack_max; 295extern unsigned int nf_conntrack_max;
292 296
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index 55d15049ab2f..b925395fa5ed 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -15,9 +15,6 @@ enum nf_ct_ext_id {
15#ifdef CONFIG_NF_CONNTRACK_EVENTS 15#ifdef CONFIG_NF_CONNTRACK_EVENTS
16 NF_CT_EXT_ECACHE, 16 NF_CT_EXT_ECACHE,
17#endif 17#endif
18#ifdef CONFIG_NF_CONNTRACK_ZONES
19 NF_CT_EXT_ZONE,
20#endif
21#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP 18#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
22 NF_CT_EXT_TSTAMP, 19 NF_CT_EXT_TSTAMP,
23#endif 20#endif
@@ -38,7 +35,6 @@ enum nf_ct_ext_id {
38#define NF_CT_EXT_SEQADJ_TYPE struct nf_conn_seqadj 35#define NF_CT_EXT_SEQADJ_TYPE struct nf_conn_seqadj
39#define NF_CT_EXT_ACCT_TYPE struct nf_conn_acct 36#define NF_CT_EXT_ACCT_TYPE struct nf_conn_acct
40#define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache 37#define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache
41#define NF_CT_EXT_ZONE_TYPE struct nf_conntrack_zone
42#define NF_CT_EXT_TSTAMP_TYPE struct nf_conn_tstamp 38#define NF_CT_EXT_TSTAMP_TYPE struct nf_conn_tstamp
43#define NF_CT_EXT_TIMEOUT_TYPE struct nf_conn_timeout 39#define NF_CT_EXT_TIMEOUT_TYPE struct nf_conn_timeout
44#define NF_CT_EXT_LABELS_TYPE struct nf_conn_labels 40#define NF_CT_EXT_LABELS_TYPE struct nf_conn_labels
diff --git a/include/net/netfilter/nf_conntrack_zones.h b/include/net/netfilter/nf_conntrack_zones.h
index 4e32512cef32..64a718b60839 100644
--- a/include/net/netfilter/nf_conntrack_zones.h
+++ b/include/net/netfilter/nf_conntrack_zones.h
@@ -9,12 +9,11 @@
9static inline const struct nf_conntrack_zone * 9static inline const struct nf_conntrack_zone *
10nf_ct_zone(const struct nf_conn *ct) 10nf_ct_zone(const struct nf_conn *ct)
11{ 11{
12 const struct nf_conntrack_zone *nf_ct_zone = NULL;
13
14#ifdef CONFIG_NF_CONNTRACK_ZONES 12#ifdef CONFIG_NF_CONNTRACK_ZONES
15 nf_ct_zone = nf_ct_ext_find(ct, NF_CT_EXT_ZONE); 13 return &ct->zone;
14#else
15 return &nf_ct_zone_dflt;
16#endif 16#endif
17 return nf_ct_zone ? nf_ct_zone : &nf_ct_zone_dflt;
18} 17}
19 18
20static inline const struct nf_conntrack_zone * 19static inline const struct nf_conntrack_zone *
@@ -31,32 +30,22 @@ static inline const struct nf_conntrack_zone *
31nf_ct_zone_tmpl(const struct nf_conn *tmpl, const struct sk_buff *skb, 30nf_ct_zone_tmpl(const struct nf_conn *tmpl, const struct sk_buff *skb,
32 struct nf_conntrack_zone *tmp) 31 struct nf_conntrack_zone *tmp)
33{ 32{
34 const struct nf_conntrack_zone *zone; 33#ifdef CONFIG_NF_CONNTRACK_ZONES
35
36 if (!tmpl) 34 if (!tmpl)
37 return &nf_ct_zone_dflt; 35 return &nf_ct_zone_dflt;
38 36
39 zone = nf_ct_zone(tmpl); 37 if (tmpl->zone.flags & NF_CT_FLAG_MARK)
40 if (zone->flags & NF_CT_FLAG_MARK) 38 return nf_ct_zone_init(tmp, skb->mark, tmpl->zone.dir, 0);
41 zone = nf_ct_zone_init(tmp, skb->mark, zone->dir, 0); 39#endif
42 40 return nf_ct_zone(tmpl);
43 return zone;
44} 41}
45 42
46static inline int nf_ct_zone_add(struct nf_conn *ct, gfp_t flags, 43static inline void nf_ct_zone_add(struct nf_conn *ct,
47 const struct nf_conntrack_zone *info) 44 const struct nf_conntrack_zone *zone)
48{ 45{
49#ifdef CONFIG_NF_CONNTRACK_ZONES 46#ifdef CONFIG_NF_CONNTRACK_ZONES
50 struct nf_conntrack_zone *nf_ct_zone; 47 ct->zone = *zone;
51
52 nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, flags);
53 if (!nf_ct_zone)
54 return -ENOMEM;
55
56 nf_ct_zone_init(nf_ct_zone, info->id, info->dir,
57 info->flags);
58#endif 48#endif
59 return 0;
60} 49}
61 50
62static inline bool nf_ct_zone_matches_dir(const struct nf_conntrack_zone *zone, 51static inline bool nf_ct_zone_matches_dir(const struct nf_conntrack_zone *zone,
@@ -68,22 +57,34 @@ static inline bool nf_ct_zone_matches_dir(const struct nf_conntrack_zone *zone,
68static inline u16 nf_ct_zone_id(const struct nf_conntrack_zone *zone, 57static inline u16 nf_ct_zone_id(const struct nf_conntrack_zone *zone,
69 enum ip_conntrack_dir dir) 58 enum ip_conntrack_dir dir)
70{ 59{
60#ifdef CONFIG_NF_CONNTRACK_ZONES
71 return nf_ct_zone_matches_dir(zone, dir) ? 61 return nf_ct_zone_matches_dir(zone, dir) ?
72 zone->id : NF_CT_DEFAULT_ZONE_ID; 62 zone->id : NF_CT_DEFAULT_ZONE_ID;
63#else
64 return NF_CT_DEFAULT_ZONE_ID;
65#endif
73} 66}
74 67
75static inline bool nf_ct_zone_equal(const struct nf_conn *a, 68static inline bool nf_ct_zone_equal(const struct nf_conn *a,
76 const struct nf_conntrack_zone *b, 69 const struct nf_conntrack_zone *b,
77 enum ip_conntrack_dir dir) 70 enum ip_conntrack_dir dir)
78{ 71{
72#ifdef CONFIG_NF_CONNTRACK_ZONES
79 return nf_ct_zone_id(nf_ct_zone(a), dir) == 73 return nf_ct_zone_id(nf_ct_zone(a), dir) ==
80 nf_ct_zone_id(b, dir); 74 nf_ct_zone_id(b, dir);
75#else
76 return true;
77#endif
81} 78}
82 79
83static inline bool nf_ct_zone_equal_any(const struct nf_conn *a, 80static inline bool nf_ct_zone_equal_any(const struct nf_conn *a,
84 const struct nf_conntrack_zone *b) 81 const struct nf_conntrack_zone *b)
85{ 82{
83#ifdef CONFIG_NF_CONNTRACK_ZONES
86 return nf_ct_zone(a)->id == b->id; 84 return nf_ct_zone(a)->id == b->id;
85#else
86 return true;
87#endif
87} 88}
88#endif /* IS_ENABLED(CONFIG_NF_CONNTRACK) */ 89#endif /* IS_ENABLED(CONFIG_NF_CONNTRACK) */
89#endif /* _NF_CONNTRACK_ZONES_H */ 90#endif /* _NF_CONNTRACK_ZONES_H */
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index 57639fca223a..83d855ba6af1 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -12,6 +12,9 @@
12#define NF_LOG_UID 0x08 /* Log UID owning local socket */ 12#define NF_LOG_UID 0x08 /* Log UID owning local socket */
13#define NF_LOG_MASK 0x0f 13#define NF_LOG_MASK 0x0f
14 14
15/* This flag indicates that copy_len field in nf_loginfo is set */
16#define NF_LOG_F_COPY_LEN 0x1
17
15enum nf_log_type { 18enum nf_log_type {
16 NF_LOG_TYPE_LOG = 0, 19 NF_LOG_TYPE_LOG = 0,
17 NF_LOG_TYPE_ULOG, 20 NF_LOG_TYPE_ULOG,
@@ -22,9 +25,13 @@ struct nf_loginfo {
22 u_int8_t type; 25 u_int8_t type;
23 union { 26 union {
24 struct { 27 struct {
28 /* copy_len will be used iff you set
29 * NF_LOG_F_COPY_LEN in flags
30 */
25 u_int32_t copy_len; 31 u_int32_t copy_len;
26 u_int16_t group; 32 u_int16_t group;
27 u_int16_t qthreshold; 33 u_int16_t qthreshold;
34 u_int16_t flags;
28 } ulog; 35 } ulog;
29 struct { 36 struct {
30 u_int8_t level; 37 u_int8_t level;
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 092235458691..30c1d9489ae2 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -167,6 +167,7 @@ struct nft_set_elem {
167 167
168struct nft_set; 168struct nft_set;
169struct nft_set_iter { 169struct nft_set_iter {
170 u8 genmask;
170 unsigned int count; 171 unsigned int count;
171 unsigned int skip; 172 unsigned int skip;
172 int err; 173 int err;
@@ -296,6 +297,7 @@ void nft_unregister_set(struct nft_set_ops *ops);
296 * @ops: set ops 297 * @ops: set ops
297 * @pnet: network namespace 298 * @pnet: network namespace
298 * @flags: set flags 299 * @flags: set flags
300 * @genmask: generation mask
299 * @klen: key length 301 * @klen: key length
300 * @dlen: data length 302 * @dlen: data length
301 * @data: private set data 303 * @data: private set data
@@ -317,7 +319,8 @@ struct nft_set {
317 /* runtime data below here */ 319 /* runtime data below here */
318 const struct nft_set_ops *ops ____cacheline_aligned; 320 const struct nft_set_ops *ops ____cacheline_aligned;
319 possible_net_t pnet; 321 possible_net_t pnet;
320 u16 flags; 322 u16 flags:14,
323 genmask:2;
321 u8 klen; 324 u8 klen;
322 u8 dlen; 325 u8 dlen;
323 unsigned char data[] 326 unsigned char data[]
@@ -335,9 +338,9 @@ static inline struct nft_set *nft_set_container_of(const void *priv)
335} 338}
336 339
337struct nft_set *nf_tables_set_lookup(const struct nft_table *table, 340struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
338 const struct nlattr *nla); 341 const struct nlattr *nla, u8 genmask);
339struct nft_set *nf_tables_set_lookup_byid(const struct net *net, 342struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
340 const struct nlattr *nla); 343 const struct nlattr *nla, u8 genmask);
341 344
342static inline unsigned long nft_set_gc_interval(const struct nft_set *set) 345static inline unsigned long nft_set_gc_interval(const struct nft_set *set)
343{ 346{
@@ -732,7 +735,6 @@ static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule)
732 735
733enum nft_chain_flags { 736enum nft_chain_flags {
734 NFT_BASE_CHAIN = 0x1, 737 NFT_BASE_CHAIN = 0x1,
735 NFT_CHAIN_INACTIVE = 0x2,
736}; 738};
737 739
738/** 740/**
@@ -754,7 +756,8 @@ struct nft_chain {
754 u64 handle; 756 u64 handle;
755 u32 use; 757 u32 use;
756 u16 level; 758 u16 level;
757 u8 flags; 759 u8 flags:6,
760 genmask:2;
758 char name[NFT_CHAIN_MAXNAMELEN]; 761 char name[NFT_CHAIN_MAXNAMELEN];
759}; 762};
760 763
@@ -796,7 +799,6 @@ struct nft_stats {
796}; 799};
797 800
798#define NFT_HOOK_OPS_MAX 2 801#define NFT_HOOK_OPS_MAX 2
799#define NFT_BASECHAIN_DISABLED (1 << 0)
800 802
801/** 803/**
802 * struct nft_base_chain - nf_tables base chain 804 * struct nft_base_chain - nf_tables base chain
@@ -838,6 +840,7 @@ unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
838 * @hgenerator: handle generator state 840 * @hgenerator: handle generator state
839 * @use: number of chain references to this table 841 * @use: number of chain references to this table
840 * @flags: table flag (see enum nft_table_flags) 842 * @flags: table flag (see enum nft_table_flags)
843 * @genmask: generation mask
841 * @name: name of the table 844 * @name: name of the table
842 */ 845 */
843struct nft_table { 846struct nft_table {
@@ -846,7 +849,8 @@ struct nft_table {
846 struct list_head sets; 849 struct list_head sets;
847 u64 hgenerator; 850 u64 hgenerator;
848 u32 use; 851 u32 use;
849 u16 flags; 852 u16 flags:14,
853 genmask:2;
850 char name[NFT_TABLE_MAXNAMELEN]; 854 char name[NFT_TABLE_MAXNAMELEN];
851}; 855};
852 856
@@ -970,6 +974,32 @@ static inline u8 nft_genmask_cur(const struct net *net)
970#define NFT_GENMASK_ANY ((1 << 0) | (1 << 1)) 974#define NFT_GENMASK_ANY ((1 << 0) | (1 << 1))
971 975
972/* 976/*
977 * Generic transaction helpers
978 */
979
980/* Check if this object is currently active. */
981#define nft_is_active(__net, __obj) \
982 (((__obj)->genmask & nft_genmask_cur(__net)) == 0)
983
984/* Check if this object is active in the next generation. */
985#define nft_is_active_next(__net, __obj) \
986 (((__obj)->genmask & nft_genmask_next(__net)) == 0)
987
988/* This object becomes active in the next generation. */
989#define nft_activate_next(__net, __obj) \
990 (__obj)->genmask = nft_genmask_cur(__net)
991
992/* This object becomes inactive in the next generation. */
993#define nft_deactivate_next(__net, __obj) \
994 (__obj)->genmask = nft_genmask_next(__net)
995
996/* After committing the ruleset, clear the stale generation bit. */
997#define nft_clear(__net, __obj) \
998 (__obj)->genmask &= ~nft_genmask_next(__net)
999#define nft_active_genmask(__obj, __genmask) \
1000 !((__obj)->genmask & __genmask)
1001
1002/*
973 * Set element transaction helpers 1003 * Set element transaction helpers
974 */ 1004 */
975 1005
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 006a7b81d758..4113916cc1bb 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -98,10 +98,11 @@ struct rtnl_link_ops {
98 const struct net_device *dev, 98 const struct net_device *dev,
99 const struct net_device *slave_dev); 99 const struct net_device *slave_dev);
100 struct net *(*get_link_net)(const struct net_device *dev); 100 struct net *(*get_link_net)(const struct net_device *dev);
101 size_t (*get_linkxstats_size)(const struct net_device *dev); 101 size_t (*get_linkxstats_size)(const struct net_device *dev,
102 int attr);
102 int (*fill_linkxstats)(struct sk_buff *skb, 103 int (*fill_linkxstats)(struct sk_buff *skb,
103 const struct net_device *dev, 104 const struct net_device *dev,
104 int *prividx); 105 int *prividx, int attr);
105}; 106};
106 107
107int __rtnl_link_register(struct rtnl_link_ops *ops); 108int __rtnl_link_register(struct rtnl_link_ops *ops);
diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h
index dc9a09aefb33..c55facd17b7e 100644
--- a/include/net/tc_act/tc_ife.h
+++ b/include/net/tc_act/tc_ife.h
@@ -36,7 +36,7 @@ struct tcf_meta_ops {
36 int (*encode)(struct sk_buff *, void *, struct tcf_meta_info *); 36 int (*encode)(struct sk_buff *, void *, struct tcf_meta_info *);
37 int (*decode)(struct sk_buff *, void *, u16 len); 37 int (*decode)(struct sk_buff *, void *, u16 len);
38 int (*get)(struct sk_buff *skb, struct tcf_meta_info *mi); 38 int (*get)(struct sk_buff *skb, struct tcf_meta_info *mi);
39 int (*alloc)(struct tcf_meta_info *, void *); 39 int (*alloc)(struct tcf_meta_info *, void *, gfp_t);
40 void (*release)(struct tcf_meta_info *); 40 void (*release)(struct tcf_meta_info *);
41 int (*validate)(void *val, int len); 41 int (*validate)(void *val, int len);
42 struct module *owner; 42 struct module *owner;
@@ -48,8 +48,8 @@ int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi);
48int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi); 48int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi);
49int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, 49int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
50 const void *dval); 50 const void *dval);
51int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval); 51int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
52int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval); 52int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
53int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi); 53int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi);
54int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi); 54int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi);
55int ife_validate_meta_u32(void *val, int len); 55int ife_validate_meta_u32(void *val, int len);
diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h
index b496d5ad7d42..d01a5d40cfb5 100644
--- a/include/net/tc_act/tc_skbedit.h
+++ b/include/net/tc_act/tc_skbedit.h
@@ -24,11 +24,11 @@
24 24
25struct tcf_skbedit { 25struct tcf_skbedit {
26 struct tcf_common common; 26 struct tcf_common common;
27 u32 flags; 27 u32 flags;
28 u32 priority; 28 u32 priority;
29 u32 mark; 29 u32 mark;
30 u16 queue_mapping; 30 u16 queue_mapping;
31 /* XXX: 16-bit pad here? */ 31 u16 ptype;
32}; 32};
33#define to_skbedit(a) \ 33#define to_skbedit(a) \
34 container_of(a->priv, struct tcf_skbedit, common) 34 container_of(a->priv, struct tcf_skbedit, common)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index a79894b66726..c00e7d51bb18 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -589,7 +589,7 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
589 * On the other hand, for extremely large MSS devices, handling 589 * On the other hand, for extremely large MSS devices, handling
590 * smaller than MSS windows in this way does make sense. 590 * smaller than MSS windows in this way does make sense.
591 */ 591 */
592 if (tp->max_window >= 512) 592 if (tp->max_window > TCP_MSS_DEFAULT)
593 cutoff = (tp->max_window >> 1); 593 cutoff = (tp->max_window >> 1);
594 else 594 else
595 cutoff = tp->max_window; 595 cutoff = tp->max_window;
@@ -1384,7 +1384,7 @@ union tcp_md5sum_block {
1384/* - pool: digest algorithm, hash description and scratch buffer */ 1384/* - pool: digest algorithm, hash description and scratch buffer */
1385struct tcp_md5sig_pool { 1385struct tcp_md5sig_pool {
1386 struct ahash_request *md5_req; 1386 struct ahash_request *md5_req;
1387 union tcp_md5sum_block md5_blk; 1387 void *scratch;
1388}; 1388};
1389 1389
1390/* - functions */ 1390/* - functions */
@@ -1420,7 +1420,6 @@ static inline void tcp_put_md5sig_pool(void)
1420 local_bh_enable(); 1420 local_bh_enable();
1421} 1421}
1422 1422
1423int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
1424int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *, 1423int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1425 unsigned int header_len); 1424 unsigned int header_len);
1426int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, 1425int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index 16274e2133cd..9c9a27d42aaa 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -203,7 +203,9 @@ struct rvt_driver_provided {
203 203
204 /* 204 /*
205 * Allocate a private queue pair data structure for driver specific 205 * Allocate a private queue pair data structure for driver specific
206 * information which is opaque to rdmavt. 206 * information which is opaque to rdmavt. Errors are returned via
207 * ERR_PTR(err). The driver is free to return NULL or a valid
208 * pointer.
207 */ 209 */
208 void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp, 210 void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
209 gfp_t gfp); 211 gfp_t gfp);
diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h
new file mode 100644
index 000000000000..0fbf6fd4711b
--- /dev/null
+++ b/include/uapi/linux/batman_adv.h
@@ -0,0 +1,114 @@
1/* Copyright (C) 2016 B.A.T.M.A.N. contributors:
2 *
3 * Matthias Schiffer
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _UAPI_LINUX_BATMAN_ADV_H_
19#define _UAPI_LINUX_BATMAN_ADV_H_
20
21#define BATADV_NL_NAME "batadv"
22
23#define BATADV_NL_MCAST_GROUP_TPMETER "tpmeter"
24
25/**
26 * enum batadv_nl_attrs - batman-adv netlink attributes
27 *
28 * @BATADV_ATTR_UNSPEC: unspecified attribute to catch errors
29 * @BATADV_ATTR_VERSION: batman-adv version string
30 * @BATADV_ATTR_ALGO_NAME: name of routing algorithm
31 * @BATADV_ATTR_MESH_IFINDEX: index of the batman-adv interface
32 * @BATADV_ATTR_MESH_IFNAME: name of the batman-adv interface
33 * @BATADV_ATTR_MESH_ADDRESS: mac address of the batman-adv interface
34 * @BATADV_ATTR_HARD_IFINDEX: index of the non-batman-adv interface
35 * @BATADV_ATTR_HARD_IFNAME: name of the non-batman-adv interface
36 * @BATADV_ATTR_HARD_ADDRESS: mac address of the non-batman-adv interface
37 * @BATADV_ATTR_ORIG_ADDRESS: originator mac address
38 * @BATADV_ATTR_TPMETER_RESULT: result of run (see batadv_tp_meter_status)
39 * @BATADV_ATTR_TPMETER_TEST_TIME: time (msec) the run took
40 * @BATADV_ATTR_TPMETER_BYTES: amount of acked bytes during run
41 * @BATADV_ATTR_TPMETER_COOKIE: session cookie to match tp_meter session
42 * @BATADV_ATTR_PAD: attribute used for padding for 64-bit alignment
43 * @__BATADV_ATTR_AFTER_LAST: internal use
44 * @NUM_BATADV_ATTR: total number of batadv_nl_attrs available
45 * @BATADV_ATTR_MAX: highest attribute number currently defined
46 */
47enum batadv_nl_attrs {
48 BATADV_ATTR_UNSPEC,
49 BATADV_ATTR_VERSION,
50 BATADV_ATTR_ALGO_NAME,
51 BATADV_ATTR_MESH_IFINDEX,
52 BATADV_ATTR_MESH_IFNAME,
53 BATADV_ATTR_MESH_ADDRESS,
54 BATADV_ATTR_HARD_IFINDEX,
55 BATADV_ATTR_HARD_IFNAME,
56 BATADV_ATTR_HARD_ADDRESS,
57 BATADV_ATTR_ORIG_ADDRESS,
58 BATADV_ATTR_TPMETER_RESULT,
59 BATADV_ATTR_TPMETER_TEST_TIME,
60 BATADV_ATTR_TPMETER_BYTES,
61 BATADV_ATTR_TPMETER_COOKIE,
62 BATADV_ATTR_PAD,
63 /* add attributes above here, update the policy in netlink.c */
64 __BATADV_ATTR_AFTER_LAST,
65 NUM_BATADV_ATTR = __BATADV_ATTR_AFTER_LAST,
66 BATADV_ATTR_MAX = __BATADV_ATTR_AFTER_LAST - 1
67};
68
69/**
70 * enum batadv_nl_commands - supported batman-adv netlink commands
71 *
72 * @BATADV_CMD_UNSPEC: unspecified command to catch errors
73 * @BATADV_CMD_GET_MESH_INFO: Query basic information about batman-adv device
74 * @BATADV_CMD_TP_METER: Start a tp meter session
75 * @BATADV_CMD_TP_METER_CANCEL: Cancel a tp meter session
76 * @__BATADV_CMD_AFTER_LAST: internal use
77 * @BATADV_CMD_MAX: highest used command number
78 */
79enum batadv_nl_commands {
80 BATADV_CMD_UNSPEC,
81 BATADV_CMD_GET_MESH_INFO,
82 BATADV_CMD_TP_METER,
83 BATADV_CMD_TP_METER_CANCEL,
84 /* add new commands above here */
85 __BATADV_CMD_AFTER_LAST,
86 BATADV_CMD_MAX = __BATADV_CMD_AFTER_LAST - 1
87};
88
89/**
90 * enum batadv_tp_meter_reason - reason of a tp meter test run stop
91 * @BATADV_TP_REASON_COMPLETE: sender finished tp run
92 * @BATADV_TP_REASON_CANCEL: sender was stopped during run
93 * @BATADV_TP_REASON_DST_UNREACHABLE: receiver could not be reached or didn't
94 * answer
95 * @BATADV_TP_REASON_RESEND_LIMIT: (unused) sender retry reached limit
96 * @BATADV_TP_REASON_ALREADY_ONGOING: test to or from the same node already
97 * ongoing
98 * @BATADV_TP_REASON_MEMORY_ERROR: test was stopped due to low memory
99 * @BATADV_TP_REASON_CANT_SEND: failed to send via outgoing interface
100 * @BATADV_TP_REASON_TOO_MANY: too many ongoing sessions
101 */
102enum batadv_tp_meter_reason {
103 BATADV_TP_REASON_COMPLETE = 3,
104 BATADV_TP_REASON_CANCEL = 4,
105 /* error status >= 128 */
106 BATADV_TP_REASON_DST_UNREACHABLE = 128,
107 BATADV_TP_REASON_RESEND_LIMIT = 129,
108 BATADV_TP_REASON_ALREADY_ONGOING = 130,
109 BATADV_TP_REASON_MEMORY_ERROR = 131,
110 BATADV_TP_REASON_CANT_SEND = 132,
111 BATADV_TP_REASON_TOO_MANY = 133,
112};
113
114#endif /* _UAPI_LINUX_BATMAN_ADV_H_ */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 406459b935a2..c14ca1cd6297 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -84,6 +84,7 @@ enum bpf_map_type {
84 BPF_MAP_TYPE_PERCPU_HASH, 84 BPF_MAP_TYPE_PERCPU_HASH,
85 BPF_MAP_TYPE_PERCPU_ARRAY, 85 BPF_MAP_TYPE_PERCPU_ARRAY,
86 BPF_MAP_TYPE_STACK_TRACE, 86 BPF_MAP_TYPE_STACK_TRACE,
87 BPF_MAP_TYPE_CGROUP_ARRAY,
87}; 88};
88 89
89enum bpf_prog_type { 90enum bpf_prog_type {
@@ -313,6 +314,49 @@ enum bpf_func_id {
313 */ 314 */
314 BPF_FUNC_skb_get_tunnel_opt, 315 BPF_FUNC_skb_get_tunnel_opt,
315 BPF_FUNC_skb_set_tunnel_opt, 316 BPF_FUNC_skb_set_tunnel_opt,
317
318 /**
319 * bpf_skb_change_proto(skb, proto, flags)
320 * Change protocol of the skb. Currently supported is
321 * v4 -> v6, v6 -> v4 transitions. The helper will also
322 * resize the skb. eBPF program is expected to fill the
323 * new headers via skb_store_bytes and lX_csum_replace.
324 * @skb: pointer to skb
325 * @proto: new skb->protocol type
326 * @flags: reserved
327 * Return: 0 on success or negative error
328 */
329 BPF_FUNC_skb_change_proto,
330
331 /**
332 * bpf_skb_change_type(skb, type)
333 * Change packet type of skb.
334 * @skb: pointer to skb
335 * @type: new skb->pkt_type type
336 * Return: 0 on success or negative error
337 */
338 BPF_FUNC_skb_change_type,
339
340 /**
341 * bpf_skb_in_cgroup(skb, map, index) - Check cgroup2 membership of skb
342 * @skb: pointer to skb
343 * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
344 * @index: index of the cgroup in the bpf_map
345 * Return:
346 * == 0 skb failed the cgroup2 descendant test
347 * == 1 skb succeeded the cgroup2 descendant test
348 * < 0 error
349 */
350 BPF_FUNC_skb_in_cgroup,
351
352 /**
353 * bpf_get_hash_recalc(skb)
354 * Retrieve and possibly recalculate skb->hash.
355 * @skb: pointer to skb
356 * Return: hash
357 */
358 BPF_FUNC_get_hash_recalc,
359
316 __BPF_FUNC_MAX_ID, 360 __BPF_FUNC_MAX_ID,
317}; 361};
318 362
@@ -347,7 +391,7 @@ enum bpf_func_id {
347#define BPF_F_ZERO_CSUM_TX (1ULL << 1) 391#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
348#define BPF_F_DONT_FRAGMENT (1ULL << 2) 392#define BPF_F_DONT_FRAGMENT (1ULL << 2)
349 393
350/* BPF_FUNC_perf_event_output flags. */ 394/* BPF_FUNC_perf_event_output and BPF_FUNC_perf_event_read flags. */
351#define BPF_F_INDEX_MASK 0xffffffffULL 395#define BPF_F_INDEX_MASK 0xffffffffULL
352#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK 396#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
353 397
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 23c6960e94a4..2bdd1e3e7007 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -118,7 +118,7 @@ struct btrfs_ioctl_vol_args_v2 {
118 }; 118 };
119 union { 119 union {
120 char name[BTRFS_SUBVOL_NAME_MAX + 1]; 120 char name[BTRFS_SUBVOL_NAME_MAX + 1];
121 u64 devid; 121 __u64 devid;
122 }; 122 };
123}; 123};
124 124
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index ba0073b26fa6..915bfa74458c 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -57,6 +57,8 @@ enum devlink_command {
57 DEVLINK_CMD_SB_OCC_SNAPSHOT, 57 DEVLINK_CMD_SB_OCC_SNAPSHOT,
58 DEVLINK_CMD_SB_OCC_MAX_CLEAR, 58 DEVLINK_CMD_SB_OCC_MAX_CLEAR,
59 59
60 DEVLINK_CMD_ESWITCH_MODE_GET,
61 DEVLINK_CMD_ESWITCH_MODE_SET,
60 /* add new commands above here */ 62 /* add new commands above here */
61 63
62 __DEVLINK_CMD_MAX, 64 __DEVLINK_CMD_MAX,
@@ -95,6 +97,11 @@ enum devlink_sb_threshold_type {
95 97
96#define DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX 20 98#define DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX 20
97 99
100enum devlink_eswitch_mode {
101 DEVLINK_ESWITCH_MODE_LEGACY,
102 DEVLINK_ESWITCH_MODE_SWITCHDEV,
103};
104
98enum devlink_attr { 105enum devlink_attr {
99 /* don't change the order or add anything between, this is ABI! */ 106 /* don't change the order or add anything between, this is ABI! */
100 DEVLINK_ATTR_UNSPEC, 107 DEVLINK_ATTR_UNSPEC,
@@ -125,6 +132,7 @@ enum devlink_attr {
125 DEVLINK_ATTR_SB_TC_INDEX, /* u16 */ 132 DEVLINK_ATTR_SB_TC_INDEX, /* u16 */
126 DEVLINK_ATTR_SB_OCC_CUR, /* u32 */ 133 DEVLINK_ATTR_SB_OCC_CUR, /* u32 */
127 DEVLINK_ATTR_SB_OCC_MAX, /* u32 */ 134 DEVLINK_ATTR_SB_OCC_MAX, /* u32 */
135 DEVLINK_ATTR_ESWITCH_MODE, /* u16 */
128 136
129 /* add new attributes above here, update the policy in devlink.c */ 137 /* add new attributes above here, update the policy in devlink.c */
130 138
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 5974fae54e12..27e17363263a 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -105,6 +105,9 @@
105 * 105 *
106 * 7.24 106 * 7.24
107 * - add FUSE_LSEEK for SEEK_HOLE and SEEK_DATA support 107 * - add FUSE_LSEEK for SEEK_HOLE and SEEK_DATA support
108 *
109 * 7.25
110 * - add FUSE_PARALLEL_DIROPS
108 */ 111 */
109 112
110#ifndef _LINUX_FUSE_H 113#ifndef _LINUX_FUSE_H
@@ -140,7 +143,7 @@
140#define FUSE_KERNEL_VERSION 7 143#define FUSE_KERNEL_VERSION 7
141 144
142/** Minor version number of this interface */ 145/** Minor version number of this interface */
143#define FUSE_KERNEL_MINOR_VERSION 24 146#define FUSE_KERNEL_MINOR_VERSION 25
144 147
145/** The node ID of the root inode */ 148/** The node ID of the root inode */
146#define FUSE_ROOT_ID 1 149#define FUSE_ROOT_ID 1
@@ -234,6 +237,7 @@ struct fuse_file_lock {
234 * FUSE_ASYNC_DIO: asynchronous direct I/O submission 237 * FUSE_ASYNC_DIO: asynchronous direct I/O submission
235 * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes 238 * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes
236 * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens 239 * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens
240 * FUSE_PARALLEL_DIROPS: allow parallel lookups and readdir
237 */ 241 */
238#define FUSE_ASYNC_READ (1 << 0) 242#define FUSE_ASYNC_READ (1 << 0)
239#define FUSE_POSIX_LOCKS (1 << 1) 243#define FUSE_POSIX_LOCKS (1 << 1)
@@ -253,6 +257,7 @@ struct fuse_file_lock {
253#define FUSE_ASYNC_DIO (1 << 15) 257#define FUSE_ASYNC_DIO (1 << 15)
254#define FUSE_WRITEBACK_CACHE (1 << 16) 258#define FUSE_WRITEBACK_CACHE (1 << 16)
255#define FUSE_NO_OPEN_SUPPORT (1 << 17) 259#define FUSE_NO_OPEN_SUPPORT (1 << 17)
260#define FUSE_PARALLEL_DIROPS (1 << 18)
256 261
257/** 262/**
258 * CUSE INIT request/reply flags 263 * CUSE INIT request/reply flags
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 397d503fdedb..8304fe6f0561 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -247,8 +247,34 @@ enum {
247enum { 247enum {
248 BRIDGE_XSTATS_UNSPEC, 248 BRIDGE_XSTATS_UNSPEC,
249 BRIDGE_XSTATS_VLAN, 249 BRIDGE_XSTATS_VLAN,
250 BRIDGE_XSTATS_MCAST,
251 BRIDGE_XSTATS_PAD,
250 __BRIDGE_XSTATS_MAX 252 __BRIDGE_XSTATS_MAX
251}; 253};
252#define BRIDGE_XSTATS_MAX (__BRIDGE_XSTATS_MAX - 1) 254#define BRIDGE_XSTATS_MAX (__BRIDGE_XSTATS_MAX - 1)
253 255
256enum {
257 BR_MCAST_DIR_RX,
258 BR_MCAST_DIR_TX,
259 BR_MCAST_DIR_SIZE
260};
261
262/* IGMP/MLD statistics */
263struct br_mcast_stats {
264 __u64 igmp_queries[BR_MCAST_DIR_SIZE];
265 __u64 igmp_leaves[BR_MCAST_DIR_SIZE];
266 __u64 igmp_v1reports[BR_MCAST_DIR_SIZE];
267 __u64 igmp_v2reports[BR_MCAST_DIR_SIZE];
268 __u64 igmp_v3reports[BR_MCAST_DIR_SIZE];
269 __u64 igmp_parse_errors;
270
271 __u64 mld_queries[BR_MCAST_DIR_SIZE];
272 __u64 mld_leaves[BR_MCAST_DIR_SIZE];
273 __u64 mld_v1reports[BR_MCAST_DIR_SIZE];
274 __u64 mld_v2reports[BR_MCAST_DIR_SIZE];
275 __u64 mld_parse_errors;
276
277 __u64 mcast_bytes[BR_MCAST_DIR_SIZE];
278 __u64 mcast_packets[BR_MCAST_DIR_SIZE];
279};
254#endif /* _UAPI_LINUX_IF_BRIDGE_H */ 280#endif /* _UAPI_LINUX_IF_BRIDGE_H */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index bb36bd5675a7..4285ac31e865 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -273,6 +273,7 @@ enum {
273 IFLA_BR_VLAN_DEFAULT_PVID, 273 IFLA_BR_VLAN_DEFAULT_PVID,
274 IFLA_BR_PAD, 274 IFLA_BR_PAD,
275 IFLA_BR_VLAN_STATS_ENABLED, 275 IFLA_BR_VLAN_STATS_ENABLED,
276 IFLA_BR_MCAST_STATS_ENABLED,
276 __IFLA_BR_MAX, 277 __IFLA_BR_MAX,
277}; 278};
278 279
@@ -822,6 +823,7 @@ enum {
822 IFLA_STATS_UNSPEC, /* also used as 64bit pad attribute */ 823 IFLA_STATS_UNSPEC, /* also used as 64bit pad attribute */
823 IFLA_STATS_LINK_64, 824 IFLA_STATS_LINK_64,
824 IFLA_STATS_LINK_XSTATS, 825 IFLA_STATS_LINK_XSTATS,
826 IFLA_STATS_LINK_XSTATS_SLAVE,
825 __IFLA_STATS_MAX, 827 __IFLA_STATS_MAX,
826}; 828};
827 829
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index a16643705669..abbd1dc5d683 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -72,6 +72,7 @@ enum {
72 INET_DIAG_BC_AUTO, 72 INET_DIAG_BC_AUTO,
73 INET_DIAG_BC_S_COND, 73 INET_DIAG_BC_S_COND,
74 INET_DIAG_BC_D_COND, 74 INET_DIAG_BC_D_COND,
75 INET_DIAG_BC_DEV_COND, /* u32 ifindex */
75}; 76};
76 77
77struct inet_diag_hostcond { 78struct inet_diag_hostcond {
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 87cf351bab03..737fa32faad4 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -611,6 +611,37 @@
611#define KEY_KBDINPUTASSIST_ACCEPT 0x264 611#define KEY_KBDINPUTASSIST_ACCEPT 0x264
612#define KEY_KBDINPUTASSIST_CANCEL 0x265 612#define KEY_KBDINPUTASSIST_CANCEL 0x265
613 613
614/* Diagonal movement keys */
615#define KEY_RIGHT_UP 0x266
616#define KEY_RIGHT_DOWN 0x267
617#define KEY_LEFT_UP 0x268
618#define KEY_LEFT_DOWN 0x269
619
620#define KEY_ROOT_MENU 0x26a /* Show Device's Root Menu */
621/* Show Top Menu of the Media (e.g. DVD) */
622#define KEY_MEDIA_TOP_MENU 0x26b
623#define KEY_NUMERIC_11 0x26c
624#define KEY_NUMERIC_12 0x26d
625/*
626 * Toggle Audio Description: refers to an audio service that helps blind and
627 * visually impaired consumers understand the action in a program. Note: in
628 * some countries this is referred to as "Video Description".
629 */
630#define KEY_AUDIO_DESC 0x26e
631#define KEY_3D_MODE 0x26f
632#define KEY_NEXT_FAVORITE 0x270
633#define KEY_STOP_RECORD 0x271
634#define KEY_PAUSE_RECORD 0x272
635#define KEY_VOD 0x273 /* Video on Demand */
636#define KEY_UNMUTE 0x274
637#define KEY_FASTREVERSE 0x275
638#define KEY_SLOWREVERSE 0x276
639/*
640 * Control a data application associated with the currently viewed channel,
641 * e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.)
642 */
643#define KEY_DATA 0x275
644
614#define BTN_TRIGGER_HAPPY 0x2c0 645#define BTN_TRIGGER_HAPPY 0x2c0
615#define BTN_TRIGGER_HAPPY1 0x2c0 646#define BTN_TRIGGER_HAPPY1 0x2c0
616#define BTN_TRIGGER_HAPPY2 0x2c1 647#define BTN_TRIGGER_HAPPY2 0x2c1
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index 01113841190d..c51494119817 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -247,6 +247,7 @@ struct input_mask {
247#define BUS_ATARI 0x1B 247#define BUS_ATARI 0x1B
248#define BUS_SPI 0x1C 248#define BUS_SPI 0x1C
249#define BUS_RMI 0x1D 249#define BUS_RMI 0x1D
250#define BUS_CEC 0x1E
250 251
251/* 252/*
252 * MT_TOOL types 253 * MT_TOOL types
diff --git a/include/uapi/linux/netfilter/Kbuild b/include/uapi/linux/netfilter/Kbuild
index 1d973d2ba417..cd26d7a0fd07 100644
--- a/include/uapi/linux/netfilter/Kbuild
+++ b/include/uapi/linux/netfilter/Kbuild
@@ -33,6 +33,7 @@ header-y += xt_NFLOG.h
33header-y += xt_NFQUEUE.h 33header-y += xt_NFQUEUE.h
34header-y += xt_RATEEST.h 34header-y += xt_RATEEST.h
35header-y += xt_SECMARK.h 35header-y += xt_SECMARK.h
36header-y += xt_SYNPROXY.h
36header-y += xt_TCPMSS.h 37header-y += xt_TCPMSS.h
37header-y += xt_TCPOPTSTRIP.h 38header-y += xt_TCPOPTSTRIP.h
38header-y += xt_TEE.h 39header-y += xt_TEE.h
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 6a4dbe04f09e..01751faccaf8 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -546,6 +546,10 @@ enum nft_cmp_attributes {
546}; 546};
547#define NFTA_CMP_MAX (__NFTA_CMP_MAX - 1) 547#define NFTA_CMP_MAX (__NFTA_CMP_MAX - 1)
548 548
549enum nft_lookup_flags {
550 NFT_LOOKUP_F_INV = (1 << 0),
551};
552
549/** 553/**
550 * enum nft_lookup_attributes - nf_tables set lookup expression netlink attributes 554 * enum nft_lookup_attributes - nf_tables set lookup expression netlink attributes
551 * 555 *
@@ -553,6 +557,7 @@ enum nft_cmp_attributes {
553 * @NFTA_LOOKUP_SREG: source register of the data to look for (NLA_U32: nft_registers) 557 * @NFTA_LOOKUP_SREG: source register of the data to look for (NLA_U32: nft_registers)
554 * @NFTA_LOOKUP_DREG: destination register (NLA_U32: nft_registers) 558 * @NFTA_LOOKUP_DREG: destination register (NLA_U32: nft_registers)
555 * @NFTA_LOOKUP_SET_ID: uniquely identifies a set in a transaction (NLA_U32) 559 * @NFTA_LOOKUP_SET_ID: uniquely identifies a set in a transaction (NLA_U32)
560 * @NFTA_LOOKUP_FLAGS: flags (NLA_U32: enum nft_lookup_flags)
556 */ 561 */
557enum nft_lookup_attributes { 562enum nft_lookup_attributes {
558 NFTA_LOOKUP_UNSPEC, 563 NFTA_LOOKUP_UNSPEC,
@@ -560,6 +565,7 @@ enum nft_lookup_attributes {
560 NFTA_LOOKUP_SREG, 565 NFTA_LOOKUP_SREG,
561 NFTA_LOOKUP_DREG, 566 NFTA_LOOKUP_DREG,
562 NFTA_LOOKUP_SET_ID, 567 NFTA_LOOKUP_SET_ID,
568 NFTA_LOOKUP_FLAGS,
563 __NFTA_LOOKUP_MAX 569 __NFTA_LOOKUP_MAX
564}; 570};
565#define NFTA_LOOKUP_MAX (__NFTA_LOOKUP_MAX - 1) 571#define NFTA_LOOKUP_MAX (__NFTA_LOOKUP_MAX - 1)
diff --git a/include/uapi/linux/netfilter/xt_NFLOG.h b/include/uapi/linux/netfilter/xt_NFLOG.h
index 87b58311ce6b..f33070730fc8 100644
--- a/include/uapi/linux/netfilter/xt_NFLOG.h
+++ b/include/uapi/linux/netfilter/xt_NFLOG.h
@@ -6,9 +6,13 @@
6#define XT_NFLOG_DEFAULT_GROUP 0x1 6#define XT_NFLOG_DEFAULT_GROUP 0x1
7#define XT_NFLOG_DEFAULT_THRESHOLD 0 7#define XT_NFLOG_DEFAULT_THRESHOLD 0
8 8
9#define XT_NFLOG_MASK 0x0 9#define XT_NFLOG_MASK 0x1
10
11/* This flag indicates that 'len' field in xt_nflog_info is set*/
12#define XT_NFLOG_F_COPY_LEN 0x1
10 13
11struct xt_nflog_info { 14struct xt_nflog_info {
15 /* 'len' will be used iff you set XT_NFLOG_F_COPY_LEN in flags */
12 __u32 len; 16 __u32 len;
13 __u16 group; 17 __u16 group;
14 __u16 threshold; 18 __u16 threshold;
diff --git a/include/uapi/linux/netfilter/xt_SYNPROXY.h b/include/uapi/linux/netfilter/xt_SYNPROXY.h
index 2d59fbaa93c6..ca67e61d2a61 100644
--- a/include/uapi/linux/netfilter/xt_SYNPROXY.h
+++ b/include/uapi/linux/netfilter/xt_SYNPROXY.h
@@ -1,6 +1,8 @@
1#ifndef _XT_SYNPROXY_H 1#ifndef _XT_SYNPROXY_H
2#define _XT_SYNPROXY_H 2#define _XT_SYNPROXY_H
3 3
4#include <linux/types.h>
5
4#define XT_SYNPROXY_OPT_MSS 0x01 6#define XT_SYNPROXY_OPT_MSS 0x01
5#define XT_SYNPROXY_OPT_WSCALE 0x02 7#define XT_SYNPROXY_OPT_WSCALE 0x02
6#define XT_SYNPROXY_OPT_SACK_PERM 0x04 8#define XT_SYNPROXY_OPT_SACK_PERM 0x04
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 53c8278827a0..220694151434 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -1829,6 +1829,44 @@ enum nl80211_commands {
1829 * %NL80211_ATTR_EXT_CAPA_MASK, to specify the extended capabilities per 1829 * %NL80211_ATTR_EXT_CAPA_MASK, to specify the extended capabilities per
1830 * interface type. 1830 * interface type.
1831 * 1831 *
1832 * @NL80211_ATTR_MU_MIMO_GROUP_DATA: array of 24 bytes that defines a MU-MIMO
1833 * groupID for monitor mode.
1834 * The first 8 bytes are a mask that defines the membership in each
1835 * group (there are 64 groups, group 0 and 63 are reserved),
1836 * each bit represents a group and set to 1 for being a member in
1837 * that group and 0 for not being a member.
1838 * The remaining 16 bytes define the position in each group: 2 bits for
1839 * each group.
1840 * (smaller group numbers represented on most significant bits and bigger
1841 * group numbers on least significant bits.)
1842 * This attribute is used only if all interfaces are in monitor mode.
1843 * Set this attribute in order to monitor packets using the given MU-MIMO
1844 * groupID data.
1845 * to turn off that feature set all the bits of the groupID to zero.
1846 * @NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR: mac address for the sniffer to follow
1847 * when using MU-MIMO air sniffer.
1848 * to turn that feature off set an invalid mac address
1849 * (e.g. FF:FF:FF:FF:FF:FF)
1850 *
1851 * @NL80211_ATTR_SCAN_START_TIME_TSF: The time at which the scan was actually
1852 * started (u64). The time is the TSF of the BSS the interface that
1853 * requested the scan is connected to (if available, otherwise this
1854 * attribute must not be included).
1855 * @NL80211_ATTR_SCAN_START_TIME_TSF_BSSID: The BSS according to which
1856 * %NL80211_ATTR_SCAN_START_TIME_TSF is set.
1857 * @NL80211_ATTR_MEASUREMENT_DURATION: measurement duration in TUs (u16). If
1858 * %NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY is not set, this is the
1859 * maximum measurement duration allowed. This attribute is used with
1860 * measurement requests. It can also be used with %NL80211_CMD_TRIGGER_SCAN
1861 * if the scan is used for beacon report radio measurement.
1862 * @NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY: flag attribute that indicates
1863 * that the duration specified with %NL80211_ATTR_MEASUREMENT_DURATION is
1864 * mandatory. If this flag is not set, the duration is the maximum duration
1865 * and the actual measurement duration may be shorter.
1866 *
1867 * @NL80211_ATTR_MESH_PEER_AID: Association ID for the mesh peer (u16). This is
1868 * used to pull the stored data for mesh peer in power save state.
1869 *
1832 * @NUM_NL80211_ATTR: total number of nl80211_attrs available 1870 * @NUM_NL80211_ATTR: total number of nl80211_attrs available
1833 * @NL80211_ATTR_MAX: highest attribute number currently defined 1871 * @NL80211_ATTR_MAX: highest attribute number currently defined
1834 * @__NL80211_ATTR_AFTER_LAST: internal use 1872 * @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2213,6 +2251,16 @@ enum nl80211_attrs {
2213 2251
2214 NL80211_ATTR_IFTYPE_EXT_CAPA, 2252 NL80211_ATTR_IFTYPE_EXT_CAPA,
2215 2253
2254 NL80211_ATTR_MU_MIMO_GROUP_DATA,
2255 NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR,
2256
2257 NL80211_ATTR_SCAN_START_TIME_TSF,
2258 NL80211_ATTR_SCAN_START_TIME_TSF_BSSID,
2259 NL80211_ATTR_MEASUREMENT_DURATION,
2260 NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY,
2261
2262 NL80211_ATTR_MESH_PEER_AID,
2263
2216 /* add attributes here, update the policy in nl80211.c */ 2264 /* add attributes here, update the policy in nl80211.c */
2217 2265
2218 __NL80211_ATTR_AFTER_LAST, 2266 __NL80211_ATTR_AFTER_LAST,
@@ -3474,6 +3522,12 @@ enum nl80211_bss_scan_width {
3474 * was last updated by a received frame. The value is expected to be 3522 * was last updated by a received frame. The value is expected to be
3475 * accurate to about 10ms. (u64, nanoseconds) 3523 * accurate to about 10ms. (u64, nanoseconds)
3476 * @NL80211_BSS_PAD: attribute used for padding for 64-bit alignment 3524 * @NL80211_BSS_PAD: attribute used for padding for 64-bit alignment
3525 * @NL80211_BSS_PARENT_TSF: the time at the start of reception of the first
3526 * octet of the timestamp field of the last beacon/probe received for
3527 * this BSS. The time is the TSF of the BSS specified by
3528 * @NL80211_BSS_PARENT_BSSID. (u64).
3529 * @NL80211_BSS_PARENT_BSSID: the BSS according to which @NL80211_BSS_PARENT_TSF
3530 * is set.
3477 * @__NL80211_BSS_AFTER_LAST: internal 3531 * @__NL80211_BSS_AFTER_LAST: internal
3478 * @NL80211_BSS_MAX: highest BSS attribute 3532 * @NL80211_BSS_MAX: highest BSS attribute
3479 */ 3533 */
@@ -3495,6 +3549,8 @@ enum nl80211_bss {
3495 NL80211_BSS_PRESP_DATA, 3549 NL80211_BSS_PRESP_DATA,
3496 NL80211_BSS_LAST_SEEN_BOOTTIME, 3550 NL80211_BSS_LAST_SEEN_BOOTTIME,
3497 NL80211_BSS_PAD, 3551 NL80211_BSS_PAD,
3552 NL80211_BSS_PARENT_TSF,
3553 NL80211_BSS_PARENT_BSSID,
3498 3554
3499 /* keep last */ 3555 /* keep last */
3500 __NL80211_BSS_AFTER_LAST, 3556 __NL80211_BSS_AFTER_LAST,
@@ -4479,6 +4535,22 @@ enum nl80211_feature_flags {
4479 * %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests, which will set 4535 * %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests, which will set
4480 * the ASSOC_REQ_USE_RRM flag in the association request even if 4536 * the ASSOC_REQ_USE_RRM flag in the association request even if
4481 * NL80211_FEATURE_QUIET is not advertized. 4537 * NL80211_FEATURE_QUIET is not advertized.
4538 * @NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER: This device supports MU-MIMO air
4539 * sniffer which means that it can be configured to hear packets from
4540 * certain groups which can be configured by the
4541 * %NL80211_ATTR_MU_MIMO_GROUP_DATA attribute,
4542 * or can be configured to follow a station by configuring the
4543 * %NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR attribute.
4544 * @NL80211_EXT_FEATURE_SCAN_START_TIME: This driver includes the actual
4545 * time the scan started in scan results event. The time is the TSF of
4546 * the BSS that the interface that requested the scan is connected to
4547 * (if available).
4548 * @NL80211_EXT_FEATURE_BSS_PARENT_TSF: Per BSS, this driver reports the
4549 * time the last beacon/probe was received. The time is the TSF of the
4550 * BSS that the interface that requested the scan is connected to
4551 * (if available).
4552 * @NL80211_EXT_FEATURE_SET_SCAN_DWELL: This driver supports configuration of
4553 * channel dwell time.
4482 * 4554 *
4483 * @NUM_NL80211_EXT_FEATURES: number of extended features. 4555 * @NUM_NL80211_EXT_FEATURES: number of extended features.
4484 * @MAX_NL80211_EXT_FEATURES: highest extended feature index. 4556 * @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -4486,6 +4558,10 @@ enum nl80211_feature_flags {
4486enum nl80211_ext_feature_index { 4558enum nl80211_ext_feature_index {
4487 NL80211_EXT_FEATURE_VHT_IBSS, 4559 NL80211_EXT_FEATURE_VHT_IBSS,
4488 NL80211_EXT_FEATURE_RRM, 4560 NL80211_EXT_FEATURE_RRM,
4561 NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER,
4562 NL80211_EXT_FEATURE_SCAN_START_TIME,
4563 NL80211_EXT_FEATURE_BSS_PARENT_TSF,
4564 NL80211_EXT_FEATURE_SET_SCAN_DWELL,
4489 4565
4490 /* add new features before the definition below */ 4566 /* add new features before the definition below */
4491 NUM_NL80211_EXT_FEATURES, 4567 NUM_NL80211_EXT_FEATURES,
diff --git a/include/uapi/linux/tc_act/tc_skbedit.h b/include/uapi/linux/tc_act/tc_skbedit.h
index fecb5cc48c40..a4d00c608d8f 100644
--- a/include/uapi/linux/tc_act/tc_skbedit.h
+++ b/include/uapi/linux/tc_act/tc_skbedit.h
@@ -27,6 +27,7 @@
27#define SKBEDIT_F_PRIORITY 0x1 27#define SKBEDIT_F_PRIORITY 0x1
28#define SKBEDIT_F_QUEUE_MAPPING 0x2 28#define SKBEDIT_F_QUEUE_MAPPING 0x2
29#define SKBEDIT_F_MARK 0x4 29#define SKBEDIT_F_MARK 0x4
30#define SKBEDIT_F_PTYPE 0x8
30 31
31struct tc_skbedit { 32struct tc_skbedit {
32 tc_gen; 33 tc_gen;
@@ -40,6 +41,7 @@ enum {
40 TCA_SKBEDIT_QUEUE_MAPPING, 41 TCA_SKBEDIT_QUEUE_MAPPING,
41 TCA_SKBEDIT_MARK, 42 TCA_SKBEDIT_MARK,
42 TCA_SKBEDIT_PAD, 43 TCA_SKBEDIT_PAD,
44 TCA_SKBEDIT_PTYPE,
43 __TCA_SKBEDIT_MAX 45 __TCA_SKBEDIT_MAX
44}; 46};
45#define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1) 47#define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1)
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 53e8e3fe6b1b..482898fc433a 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -115,12 +115,22 @@ enum {
115#define TCP_CC_INFO 26 /* Get Congestion Control (optional) info */ 115#define TCP_CC_INFO 26 /* Get Congestion Control (optional) info */
116#define TCP_SAVE_SYN 27 /* Record SYN headers for new connections */ 116#define TCP_SAVE_SYN 27 /* Record SYN headers for new connections */
117#define TCP_SAVED_SYN 28 /* Get SYN headers recorded for connection */ 117#define TCP_SAVED_SYN 28 /* Get SYN headers recorded for connection */
118#define TCP_REPAIR_WINDOW 29 /* Get/set window parameters */
118 119
119struct tcp_repair_opt { 120struct tcp_repair_opt {
120 __u32 opt_code; 121 __u32 opt_code;
121 __u32 opt_val; 122 __u32 opt_val;
122}; 123};
123 124
125struct tcp_repair_window {
126 __u32 snd_wl1;
127 __u32 snd_wnd;
128 __u32 max_window;
129
130 __u32 rcv_wnd;
131 __u32 rcv_wup;
132};
133
124enum { 134enum {
125 TCP_NO_QUEUE, 135 TCP_NO_QUEUE,
126 TCP_RECV_QUEUE, 136 TCP_RECV_QUEUE,
diff --git a/init/main.c b/init/main.c
index 4c17fda5c2ff..eae02aa03c9e 100644
--- a/init/main.c
+++ b/init/main.c
@@ -453,7 +453,7 @@ void __init __weak smp_setup_processor_id(void)
453} 453}
454 454
455# if THREAD_SIZE >= PAGE_SIZE 455# if THREAD_SIZE >= PAGE_SIZE
456void __init __weak thread_info_cache_init(void) 456void __init __weak thread_stack_cache_init(void)
457{ 457{
458} 458}
459#endif 459#endif
@@ -627,7 +627,7 @@ asmlinkage __visible void __init start_kernel(void)
627 /* Should be run before the first non-init thread is created */ 627 /* Should be run before the first non-init thread is created */
628 init_espfix_bsp(); 628 init_espfix_bsp();
629#endif 629#endif
630 thread_info_cache_init(); 630 thread_stack_cache_init();
631 cred_init(); 631 cred_init();
632 fork_init(); 632 fork_init();
633 proc_caches_init(); 633 proc_caches_init();
@@ -708,11 +708,13 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
708{ 708{
709 struct blacklist_entry *entry; 709 struct blacklist_entry *entry;
710 char fn_name[KSYM_SYMBOL_LEN]; 710 char fn_name[KSYM_SYMBOL_LEN];
711 unsigned long addr;
711 712
712 if (list_empty(&blacklisted_initcalls)) 713 if (list_empty(&blacklisted_initcalls))
713 return false; 714 return false;
714 715
715 sprint_symbol_no_offset(fn_name, (unsigned long)fn); 716 addr = (unsigned long) dereference_function_descriptor(fn);
717 sprint_symbol_no_offset(fn_name, addr);
716 718
717 list_for_each_entry(entry, &blacklisted_initcalls, next) { 719 list_for_each_entry(entry, &blacklisted_initcalls, next) {
718 if (!strcmp(fn_name, entry->buf)) { 720 if (!strcmp(fn_name, entry->buf)) {
diff --git a/kernel/audit.c b/kernel/audit.c
index 22bb4f24f071..8d528f9930da 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1883,6 +1883,23 @@ out_null:
1883 audit_log_format(ab, " exe=(null)"); 1883 audit_log_format(ab, " exe=(null)");
1884} 1884}
1885 1885
1886struct tty_struct *audit_get_tty(struct task_struct *tsk)
1887{
1888 struct tty_struct *tty = NULL;
1889 unsigned long flags;
1890
1891 spin_lock_irqsave(&tsk->sighand->siglock, flags);
1892 if (tsk->signal)
1893 tty = tty_kref_get(tsk->signal->tty);
1894 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
1895 return tty;
1896}
1897
1898void audit_put_tty(struct tty_struct *tty)
1899{
1900 tty_kref_put(tty);
1901}
1902
1886void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk) 1903void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
1887{ 1904{
1888 const struct cred *cred; 1905 const struct cred *cred;
diff --git a/kernel/audit.h b/kernel/audit.h
index cbbe6bb6496e..a492f4c4e710 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -23,6 +23,7 @@
23#include <linux/audit.h> 23#include <linux/audit.h>
24#include <linux/skbuff.h> 24#include <linux/skbuff.h>
25#include <uapi/linux/mqueue.h> 25#include <uapi/linux/mqueue.h>
26#include <linux/tty.h>
26 27
27/* AUDIT_NAMES is the number of slots we reserve in the audit_context 28/* AUDIT_NAMES is the number of slots we reserve in the audit_context
28 * for saving names from getname(). If we get more names we will allocate 29 * for saving names from getname(). If we get more names we will allocate
@@ -262,6 +263,9 @@ extern struct audit_entry *audit_dupe_rule(struct audit_krule *old);
262extern void audit_log_d_path_exe(struct audit_buffer *ab, 263extern void audit_log_d_path_exe(struct audit_buffer *ab,
263 struct mm_struct *mm); 264 struct mm_struct *mm);
264 265
266extern struct tty_struct *audit_get_tty(struct task_struct *tsk);
267extern void audit_put_tty(struct tty_struct *tty);
268
265/* audit watch functions */ 269/* audit watch functions */
266#ifdef CONFIG_AUDIT_WATCH 270#ifdef CONFIG_AUDIT_WATCH
267extern void audit_put_watch(struct audit_watch *watch); 271extern void audit_put_watch(struct audit_watch *watch);
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 62ab53d7619c..2672d105cffc 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -63,7 +63,6 @@
63#include <asm/unistd.h> 63#include <asm/unistd.h>
64#include <linux/security.h> 64#include <linux/security.h>
65#include <linux/list.h> 65#include <linux/list.h>
66#include <linux/tty.h>
67#include <linux/binfmts.h> 66#include <linux/binfmts.h>
68#include <linux/highmem.h> 67#include <linux/highmem.h>
69#include <linux/syscalls.h> 68#include <linux/syscalls.h>
@@ -1985,14 +1984,15 @@ static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid,
1985 if (!audit_enabled) 1984 if (!audit_enabled)
1986 return; 1985 return;
1987 1986
1987 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
1988 if (!ab)
1989 return;
1990
1988 uid = from_kuid(&init_user_ns, task_uid(current)); 1991 uid = from_kuid(&init_user_ns, task_uid(current));
1989 oldloginuid = from_kuid(&init_user_ns, koldloginuid); 1992 oldloginuid = from_kuid(&init_user_ns, koldloginuid);
1990 loginuid = from_kuid(&init_user_ns, kloginuid), 1993 loginuid = from_kuid(&init_user_ns, kloginuid),
1991 tty = audit_get_tty(current); 1994 tty = audit_get_tty(current);
1992 1995
1993 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
1994 if (!ab)
1995 return;
1996 audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid); 1996 audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid);
1997 audit_log_task_context(ab); 1997 audit_log_task_context(ab);
1998 audit_log_format(ab, " old-auid=%u auid=%u tty=%s old-ses=%u ses=%u res=%d", 1998 audit_log_format(ab, " old-auid=%u auid=%u tty=%s old-ses=%u ses=%u res=%d",
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 5af30732697b..db1a743e3db2 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -390,9 +390,7 @@ static void *prog_fd_array_get_ptr(struct bpf_map *map,
390 390
391static void prog_fd_array_put_ptr(void *ptr) 391static void prog_fd_array_put_ptr(void *ptr)
392{ 392{
393 struct bpf_prog *prog = ptr; 393 bpf_prog_put(ptr);
394
395 bpf_prog_put_rcu(prog);
396} 394}
397 395
398/* decrement refcnt of all bpf_progs that are stored in this map */ 396/* decrement refcnt of all bpf_progs that are stored in this map */
@@ -539,3 +537,46 @@ static int __init register_perf_event_array_map(void)
539 return 0; 537 return 0;
540} 538}
541late_initcall(register_perf_event_array_map); 539late_initcall(register_perf_event_array_map);
540
541#ifdef CONFIG_SOCK_CGROUP_DATA
542static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
543 struct file *map_file /* not used */,
544 int fd)
545{
546 return cgroup_get_from_fd(fd);
547}
548
549static void cgroup_fd_array_put_ptr(void *ptr)
550{
551 /* cgroup_put free cgrp after a rcu grace period */
552 cgroup_put(ptr);
553}
554
555static void cgroup_fd_array_free(struct bpf_map *map)
556{
557 bpf_fd_array_map_clear(map);
558 fd_array_map_free(map);
559}
560
561static const struct bpf_map_ops cgroup_array_ops = {
562 .map_alloc = fd_array_map_alloc,
563 .map_free = cgroup_fd_array_free,
564 .map_get_next_key = array_map_get_next_key,
565 .map_lookup_elem = fd_array_map_lookup_elem,
566 .map_delete_elem = fd_array_map_delete_elem,
567 .map_fd_get_ptr = cgroup_fd_array_get_ptr,
568 .map_fd_put_ptr = cgroup_fd_array_put_ptr,
569};
570
571static struct bpf_map_type_list cgroup_array_type __read_mostly = {
572 .ops = &cgroup_array_ops,
573 .type = BPF_MAP_TYPE_CGROUP_ARRAY,
574};
575
576static int __init register_cgroup_array_map(void)
577{
578 bpf_register_map_type(&cgroup_array_type);
579 return 0;
580}
581late_initcall(register_cgroup_array_map);
582#endif
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index b94a36550591..d638062f66d6 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -719,14 +719,13 @@ select_insn:
719 719
720 if (unlikely(index >= array->map.max_entries)) 720 if (unlikely(index >= array->map.max_entries))
721 goto out; 721 goto out;
722
723 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT)) 722 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
724 goto out; 723 goto out;
725 724
726 tail_call_cnt++; 725 tail_call_cnt++;
727 726
728 prog = READ_ONCE(array->ptrs[index]); 727 prog = READ_ONCE(array->ptrs[index]);
729 if (unlikely(!prog)) 728 if (!prog)
730 goto out; 729 goto out;
731 730
732 /* ARG1 at this point is guaranteed to point to CTX from 731 /* ARG1 at this point is guaranteed to point to CTX from
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index ad7a0573f71b..1ea3afba1a4f 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -101,7 +101,7 @@ const struct bpf_func_proto bpf_get_prandom_u32_proto = {
101 101
102static u64 bpf_get_smp_processor_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 102static u64 bpf_get_smp_processor_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
103{ 103{
104 return raw_smp_processor_id(); 104 return smp_processor_id();
105} 105}
106 106
107const struct bpf_func_proto bpf_get_smp_processor_id_proto = { 107const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index c23a4e9311b3..96d938a22050 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -393,7 +393,8 @@ static int map_update_elem(union bpf_attr *attr)
393 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 393 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
394 err = bpf_percpu_array_update(map, key, value, attr->flags); 394 err = bpf_percpu_array_update(map, key, value, attr->flags);
395 } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || 395 } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
396 map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { 396 map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
397 map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY) {
397 rcu_read_lock(); 398 rcu_read_lock();
398 err = bpf_fd_array_map_update_elem(map, f.file, key, value, 399 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
399 attr->flags); 400 attr->flags);
@@ -623,7 +624,7 @@ static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
623 free_uid(user); 624 free_uid(user);
624} 625}
625 626
626static void __prog_put_common(struct rcu_head *rcu) 627static void __bpf_prog_put_rcu(struct rcu_head *rcu)
627{ 628{
628 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); 629 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
629 630
@@ -632,17 +633,10 @@ static void __prog_put_common(struct rcu_head *rcu)
632 bpf_prog_free(aux->prog); 633 bpf_prog_free(aux->prog);
633} 634}
634 635
635/* version of bpf_prog_put() that is called after a grace period */
636void bpf_prog_put_rcu(struct bpf_prog *prog)
637{
638 if (atomic_dec_and_test(&prog->aux->refcnt))
639 call_rcu(&prog->aux->rcu, __prog_put_common);
640}
641
642void bpf_prog_put(struct bpf_prog *prog) 636void bpf_prog_put(struct bpf_prog *prog)
643{ 637{
644 if (atomic_dec_and_test(&prog->aux->refcnt)) 638 if (atomic_dec_and_test(&prog->aux->refcnt))
645 __prog_put_common(&prog->aux->rcu); 639 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
646} 640}
647EXPORT_SYMBOL_GPL(bpf_prog_put); 641EXPORT_SYMBOL_GPL(bpf_prog_put);
648 642
@@ -650,7 +644,7 @@ static int bpf_prog_release(struct inode *inode, struct file *filp)
650{ 644{
651 struct bpf_prog *prog = filp->private_data; 645 struct bpf_prog *prog = filp->private_data;
652 646
653 bpf_prog_put_rcu(prog); 647 bpf_prog_put(prog);
654 return 0; 648 return 0;
655} 649}
656 650
@@ -664,7 +658,7 @@ int bpf_prog_new_fd(struct bpf_prog *prog)
664 O_RDWR | O_CLOEXEC); 658 O_RDWR | O_CLOEXEC);
665} 659}
666 660
667static struct bpf_prog *__bpf_prog_get(struct fd f) 661static struct bpf_prog *____bpf_prog_get(struct fd f)
668{ 662{
669 if (!f.file) 663 if (!f.file)
670 return ERR_PTR(-EBADF); 664 return ERR_PTR(-EBADF);
@@ -685,24 +679,35 @@ struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
685 return prog; 679 return prog;
686} 680}
687 681
688/* called by sockets/tracing/seccomp before attaching program to an event 682static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
689 * pairs with bpf_prog_put()
690 */
691struct bpf_prog *bpf_prog_get(u32 ufd)
692{ 683{
693 struct fd f = fdget(ufd); 684 struct fd f = fdget(ufd);
694 struct bpf_prog *prog; 685 struct bpf_prog *prog;
695 686
696 prog = __bpf_prog_get(f); 687 prog = ____bpf_prog_get(f);
697 if (IS_ERR(prog)) 688 if (IS_ERR(prog))
698 return prog; 689 return prog;
690 if (type && prog->type != *type) {
691 prog = ERR_PTR(-EINVAL);
692 goto out;
693 }
699 694
700 prog = bpf_prog_inc(prog); 695 prog = bpf_prog_inc(prog);
696out:
701 fdput(f); 697 fdput(f);
702
703 return prog; 698 return prog;
704} 699}
705EXPORT_SYMBOL_GPL(bpf_prog_get); 700
701struct bpf_prog *bpf_prog_get(u32 ufd)
702{
703 return __bpf_prog_get(ufd, NULL);
704}
705
706struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
707{
708 return __bpf_prog_get(ufd, &type);
709}
710EXPORT_SYMBOL_GPL(bpf_prog_get_type);
706 711
707/* last field in 'union bpf_attr' used by this command */ 712/* last field in 'union bpf_attr' used by this command */
708#define BPF_PROG_LOAD_LAST_FIELD kern_version 713#define BPF_PROG_LOAD_LAST_FIELD kern_version
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 668e07903c8f..e206c2181412 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -126,31 +126,6 @@
126 * are set to NOT_INIT to indicate that they are no longer readable. 126 * are set to NOT_INIT to indicate that they are no longer readable.
127 */ 127 */
128 128
129/* types of values stored in eBPF registers */
130enum bpf_reg_type {
131 NOT_INIT = 0, /* nothing was written into register */
132 UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */
133 PTR_TO_CTX, /* reg points to bpf_context */
134 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
135 PTR_TO_MAP_VALUE, /* reg points to map element value */
136 PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
137 FRAME_PTR, /* reg == frame_pointer */
138 PTR_TO_STACK, /* reg == frame_pointer + imm */
139 CONST_IMM, /* constant integer value */
140
141 /* PTR_TO_PACKET represents:
142 * skb->data
143 * skb->data + imm
144 * skb->data + (u16) var
145 * skb->data + (u16) var + imm
146 * if (range > 0) then [ptr, ptr + range - off) is safe to access
147 * if (id > 0) means that some 'var' was added
148 * if (off > 0) menas that 'imm' was added
149 */
150 PTR_TO_PACKET,
151 PTR_TO_PACKET_END, /* skb->data + headlen */
152};
153
154struct reg_state { 129struct reg_state {
155 enum bpf_reg_type type; 130 enum bpf_reg_type type;
156 union { 131 union {
@@ -695,10 +670,10 @@ static int check_packet_access(struct verifier_env *env, u32 regno, int off,
695 670
696/* check access to 'struct bpf_context' fields */ 671/* check access to 'struct bpf_context' fields */
697static int check_ctx_access(struct verifier_env *env, int off, int size, 672static int check_ctx_access(struct verifier_env *env, int off, int size,
698 enum bpf_access_type t) 673 enum bpf_access_type t, enum bpf_reg_type *reg_type)
699{ 674{
700 if (env->prog->aux->ops->is_valid_access && 675 if (env->prog->aux->ops->is_valid_access &&
701 env->prog->aux->ops->is_valid_access(off, size, t)) { 676 env->prog->aux->ops->is_valid_access(off, size, t, reg_type)) {
702 /* remember the offset of last byte accessed in ctx */ 677 /* remember the offset of last byte accessed in ctx */
703 if (env->prog->aux->max_ctx_offset < off + size) 678 if (env->prog->aux->max_ctx_offset < off + size)
704 env->prog->aux->max_ctx_offset = off + size; 679 env->prog->aux->max_ctx_offset = off + size;
@@ -798,21 +773,19 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off,
798 mark_reg_unknown_value(state->regs, value_regno); 773 mark_reg_unknown_value(state->regs, value_regno);
799 774
800 } else if (reg->type == PTR_TO_CTX) { 775 } else if (reg->type == PTR_TO_CTX) {
776 enum bpf_reg_type reg_type = UNKNOWN_VALUE;
777
801 if (t == BPF_WRITE && value_regno >= 0 && 778 if (t == BPF_WRITE && value_regno >= 0 &&
802 is_pointer_value(env, value_regno)) { 779 is_pointer_value(env, value_regno)) {
803 verbose("R%d leaks addr into ctx\n", value_regno); 780 verbose("R%d leaks addr into ctx\n", value_regno);
804 return -EACCES; 781 return -EACCES;
805 } 782 }
806 err = check_ctx_access(env, off, size, t); 783 err = check_ctx_access(env, off, size, t, &reg_type);
807 if (!err && t == BPF_READ && value_regno >= 0) { 784 if (!err && t == BPF_READ && value_regno >= 0) {
808 mark_reg_unknown_value(state->regs, value_regno); 785 mark_reg_unknown_value(state->regs, value_regno);
809 if (off == offsetof(struct __sk_buff, data) && 786 if (env->allow_ptr_leaks)
810 env->allow_ptr_leaks)
811 /* note that reg.[id|off|range] == 0 */ 787 /* note that reg.[id|off|range] == 0 */
812 state->regs[value_regno].type = PTR_TO_PACKET; 788 state->regs[value_regno].type = reg_type;
813 else if (off == offsetof(struct __sk_buff, data_end) &&
814 env->allow_ptr_leaks)
815 state->regs[value_regno].type = PTR_TO_PACKET_END;
816 } 789 }
817 790
818 } else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) { 791 } else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) {
@@ -1062,6 +1035,10 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id)
1062 if (func_id != BPF_FUNC_get_stackid) 1035 if (func_id != BPF_FUNC_get_stackid)
1063 goto error; 1036 goto error;
1064 break; 1037 break;
1038 case BPF_MAP_TYPE_CGROUP_ARRAY:
1039 if (func_id != BPF_FUNC_skb_in_cgroup)
1040 goto error;
1041 break;
1065 default: 1042 default:
1066 break; 1043 break;
1067 } 1044 }
@@ -1081,6 +1058,10 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id)
1081 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) 1058 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
1082 goto error; 1059 goto error;
1083 break; 1060 break;
1061 case BPF_FUNC_skb_in_cgroup:
1062 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
1063 goto error;
1064 break;
1084 default: 1065 default:
1085 break; 1066 break;
1086 } 1067 }
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 86cb5c6e8932..50787cd61da2 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -62,6 +62,7 @@
62#include <linux/proc_ns.h> 62#include <linux/proc_ns.h>
63#include <linux/nsproxy.h> 63#include <linux/nsproxy.h>
64#include <linux/proc_ns.h> 64#include <linux/proc_ns.h>
65#include <linux/file.h>
65#include <net/sock.h> 66#include <net/sock.h>
66 67
67/* 68/*
@@ -837,6 +838,8 @@ static void put_css_set_locked(struct css_set *cset)
837 838
838static void put_css_set(struct css_set *cset) 839static void put_css_set(struct css_set *cset)
839{ 840{
841 unsigned long flags;
842
840 /* 843 /*
841 * Ensure that the refcount doesn't hit zero while any readers 844 * Ensure that the refcount doesn't hit zero while any readers
842 * can see it. Similar to atomic_dec_and_lock(), but for an 845 * can see it. Similar to atomic_dec_and_lock(), but for an
@@ -845,9 +848,9 @@ static void put_css_set(struct css_set *cset)
845 if (atomic_add_unless(&cset->refcount, -1, 1)) 848 if (atomic_add_unless(&cset->refcount, -1, 1))
846 return; 849 return;
847 850
848 spin_lock_bh(&css_set_lock); 851 spin_lock_irqsave(&css_set_lock, flags);
849 put_css_set_locked(cset); 852 put_css_set_locked(cset);
850 spin_unlock_bh(&css_set_lock); 853 spin_unlock_irqrestore(&css_set_lock, flags);
851} 854}
852 855
853/* 856/*
@@ -1070,11 +1073,11 @@ static struct css_set *find_css_set(struct css_set *old_cset,
1070 1073
1071 /* First see if we already have a cgroup group that matches 1074 /* First see if we already have a cgroup group that matches
1072 * the desired set */ 1075 * the desired set */
1073 spin_lock_bh(&css_set_lock); 1076 spin_lock_irq(&css_set_lock);
1074 cset = find_existing_css_set(old_cset, cgrp, template); 1077 cset = find_existing_css_set(old_cset, cgrp, template);
1075 if (cset) 1078 if (cset)
1076 get_css_set(cset); 1079 get_css_set(cset);
1077 spin_unlock_bh(&css_set_lock); 1080 spin_unlock_irq(&css_set_lock);
1078 1081
1079 if (cset) 1082 if (cset)
1080 return cset; 1083 return cset;
@@ -1102,7 +1105,7 @@ static struct css_set *find_css_set(struct css_set *old_cset,
1102 * find_existing_css_set() */ 1105 * find_existing_css_set() */
1103 memcpy(cset->subsys, template, sizeof(cset->subsys)); 1106 memcpy(cset->subsys, template, sizeof(cset->subsys));
1104 1107
1105 spin_lock_bh(&css_set_lock); 1108 spin_lock_irq(&css_set_lock);
1106 /* Add reference counts and links from the new css_set. */ 1109 /* Add reference counts and links from the new css_set. */
1107 list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) { 1110 list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
1108 struct cgroup *c = link->cgrp; 1111 struct cgroup *c = link->cgrp;
@@ -1128,7 +1131,7 @@ static struct css_set *find_css_set(struct css_set *old_cset,
1128 css_get(css); 1131 css_get(css);
1129 } 1132 }
1130 1133
1131 spin_unlock_bh(&css_set_lock); 1134 spin_unlock_irq(&css_set_lock);
1132 1135
1133 return cset; 1136 return cset;
1134} 1137}
@@ -1192,7 +1195,7 @@ static void cgroup_destroy_root(struct cgroup_root *root)
1192 * Release all the links from cset_links to this hierarchy's 1195 * Release all the links from cset_links to this hierarchy's
1193 * root cgroup 1196 * root cgroup
1194 */ 1197 */
1195 spin_lock_bh(&css_set_lock); 1198 spin_lock_irq(&css_set_lock);
1196 1199
1197 list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) { 1200 list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
1198 list_del(&link->cset_link); 1201 list_del(&link->cset_link);
@@ -1200,7 +1203,7 @@ static void cgroup_destroy_root(struct cgroup_root *root)
1200 kfree(link); 1203 kfree(link);
1201 } 1204 }
1202 1205
1203 spin_unlock_bh(&css_set_lock); 1206 spin_unlock_irq(&css_set_lock);
1204 1207
1205 if (!list_empty(&root->root_list)) { 1208 if (!list_empty(&root->root_list)) {
1206 list_del(&root->root_list); 1209 list_del(&root->root_list);
@@ -1600,11 +1603,11 @@ static int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
1600 ss->root = dst_root; 1603 ss->root = dst_root;
1601 css->cgroup = dcgrp; 1604 css->cgroup = dcgrp;
1602 1605
1603 spin_lock_bh(&css_set_lock); 1606 spin_lock_irq(&css_set_lock);
1604 hash_for_each(css_set_table, i, cset, hlist) 1607 hash_for_each(css_set_table, i, cset, hlist)
1605 list_move_tail(&cset->e_cset_node[ss->id], 1608 list_move_tail(&cset->e_cset_node[ss->id],
1606 &dcgrp->e_csets[ss->id]); 1609 &dcgrp->e_csets[ss->id]);
1607 spin_unlock_bh(&css_set_lock); 1610 spin_unlock_irq(&css_set_lock);
1608 1611
1609 /* default hierarchy doesn't enable controllers by default */ 1612 /* default hierarchy doesn't enable controllers by default */
1610 dst_root->subsys_mask |= 1 << ssid; 1613 dst_root->subsys_mask |= 1 << ssid;
@@ -1640,10 +1643,10 @@ static int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
1640 if (!buf) 1643 if (!buf)
1641 return -ENOMEM; 1644 return -ENOMEM;
1642 1645
1643 spin_lock_bh(&css_set_lock); 1646 spin_lock_irq(&css_set_lock);
1644 ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot); 1647 ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot);
1645 len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX); 1648 len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX);
1646 spin_unlock_bh(&css_set_lock); 1649 spin_unlock_irq(&css_set_lock);
1647 1650
1648 if (len >= PATH_MAX) 1651 if (len >= PATH_MAX)
1649 len = -ERANGE; 1652 len = -ERANGE;
@@ -1897,7 +1900,7 @@ static void cgroup_enable_task_cg_lists(void)
1897{ 1900{
1898 struct task_struct *p, *g; 1901 struct task_struct *p, *g;
1899 1902
1900 spin_lock_bh(&css_set_lock); 1903 spin_lock_irq(&css_set_lock);
1901 1904
1902 if (use_task_css_set_links) 1905 if (use_task_css_set_links)
1903 goto out_unlock; 1906 goto out_unlock;
@@ -1922,8 +1925,12 @@ static void cgroup_enable_task_cg_lists(void)
1922 * entry won't be deleted though the process has exited. 1925 * entry won't be deleted though the process has exited.
1923 * Do it while holding siglock so that we don't end up 1926 * Do it while holding siglock so that we don't end up
1924 * racing against cgroup_exit(). 1927 * racing against cgroup_exit().
1928 *
1929 * Interrupts were already disabled while acquiring
1930 * the css_set_lock, so we do not need to disable it
1931 * again when acquiring the sighand->siglock here.
1925 */ 1932 */
1926 spin_lock_irq(&p->sighand->siglock); 1933 spin_lock(&p->sighand->siglock);
1927 if (!(p->flags & PF_EXITING)) { 1934 if (!(p->flags & PF_EXITING)) {
1928 struct css_set *cset = task_css_set(p); 1935 struct css_set *cset = task_css_set(p);
1929 1936
@@ -1932,11 +1939,11 @@ static void cgroup_enable_task_cg_lists(void)
1932 list_add_tail(&p->cg_list, &cset->tasks); 1939 list_add_tail(&p->cg_list, &cset->tasks);
1933 get_css_set(cset); 1940 get_css_set(cset);
1934 } 1941 }
1935 spin_unlock_irq(&p->sighand->siglock); 1942 spin_unlock(&p->sighand->siglock);
1936 } while_each_thread(g, p); 1943 } while_each_thread(g, p);
1937 read_unlock(&tasklist_lock); 1944 read_unlock(&tasklist_lock);
1938out_unlock: 1945out_unlock:
1939 spin_unlock_bh(&css_set_lock); 1946 spin_unlock_irq(&css_set_lock);
1940} 1947}
1941 1948
1942static void init_cgroup_housekeeping(struct cgroup *cgrp) 1949static void init_cgroup_housekeeping(struct cgroup *cgrp)
@@ -2043,13 +2050,13 @@ static int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
2043 * Link the root cgroup in this hierarchy into all the css_set 2050 * Link the root cgroup in this hierarchy into all the css_set
2044 * objects. 2051 * objects.
2045 */ 2052 */
2046 spin_lock_bh(&css_set_lock); 2053 spin_lock_irq(&css_set_lock);
2047 hash_for_each(css_set_table, i, cset, hlist) { 2054 hash_for_each(css_set_table, i, cset, hlist) {
2048 link_css_set(&tmp_links, cset, root_cgrp); 2055 link_css_set(&tmp_links, cset, root_cgrp);
2049 if (css_set_populated(cset)) 2056 if (css_set_populated(cset))
2050 cgroup_update_populated(root_cgrp, true); 2057 cgroup_update_populated(root_cgrp, true);
2051 } 2058 }
2052 spin_unlock_bh(&css_set_lock); 2059 spin_unlock_irq(&css_set_lock);
2053 2060
2054 BUG_ON(!list_empty(&root_cgrp->self.children)); 2061 BUG_ON(!list_empty(&root_cgrp->self.children));
2055 BUG_ON(atomic_read(&root->nr_cgrps) != 1); 2062 BUG_ON(atomic_read(&root->nr_cgrps) != 1);
@@ -2256,11 +2263,11 @@ out_mount:
2256 struct cgroup *cgrp; 2263 struct cgroup *cgrp;
2257 2264
2258 mutex_lock(&cgroup_mutex); 2265 mutex_lock(&cgroup_mutex);
2259 spin_lock_bh(&css_set_lock); 2266 spin_lock_irq(&css_set_lock);
2260 2267
2261 cgrp = cset_cgroup_from_root(ns->root_cset, root); 2268 cgrp = cset_cgroup_from_root(ns->root_cset, root);
2262 2269
2263 spin_unlock_bh(&css_set_lock); 2270 spin_unlock_irq(&css_set_lock);
2264 mutex_unlock(&cgroup_mutex); 2271 mutex_unlock(&cgroup_mutex);
2265 2272
2266 nsdentry = kernfs_node_dentry(cgrp->kn, dentry->d_sb); 2273 nsdentry = kernfs_node_dentry(cgrp->kn, dentry->d_sb);
@@ -2337,11 +2344,11 @@ char *cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
2337 char *ret; 2344 char *ret;
2338 2345
2339 mutex_lock(&cgroup_mutex); 2346 mutex_lock(&cgroup_mutex);
2340 spin_lock_bh(&css_set_lock); 2347 spin_lock_irq(&css_set_lock);
2341 2348
2342 ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns); 2349 ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns);
2343 2350
2344 spin_unlock_bh(&css_set_lock); 2351 spin_unlock_irq(&css_set_lock);
2345 mutex_unlock(&cgroup_mutex); 2352 mutex_unlock(&cgroup_mutex);
2346 2353
2347 return ret; 2354 return ret;
@@ -2369,7 +2376,7 @@ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
2369 char *path = NULL; 2376 char *path = NULL;
2370 2377
2371 mutex_lock(&cgroup_mutex); 2378 mutex_lock(&cgroup_mutex);
2372 spin_lock_bh(&css_set_lock); 2379 spin_lock_irq(&css_set_lock);
2373 2380
2374 root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id); 2381 root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
2375 2382
@@ -2382,7 +2389,7 @@ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
2382 path = buf; 2389 path = buf;
2383 } 2390 }
2384 2391
2385 spin_unlock_bh(&css_set_lock); 2392 spin_unlock_irq(&css_set_lock);
2386 mutex_unlock(&cgroup_mutex); 2393 mutex_unlock(&cgroup_mutex);
2387 return path; 2394 return path;
2388} 2395}
@@ -2557,7 +2564,7 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
2557 * the new cgroup. There are no failure cases after here, so this 2564 * the new cgroup. There are no failure cases after here, so this
2558 * is the commit point. 2565 * is the commit point.
2559 */ 2566 */
2560 spin_lock_bh(&css_set_lock); 2567 spin_lock_irq(&css_set_lock);
2561 list_for_each_entry(cset, &tset->src_csets, mg_node) { 2568 list_for_each_entry(cset, &tset->src_csets, mg_node) {
2562 list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) { 2569 list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
2563 struct css_set *from_cset = task_css_set(task); 2570 struct css_set *from_cset = task_css_set(task);
@@ -2568,7 +2575,7 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
2568 put_css_set_locked(from_cset); 2575 put_css_set_locked(from_cset);
2569 } 2576 }
2570 } 2577 }
2571 spin_unlock_bh(&css_set_lock); 2578 spin_unlock_irq(&css_set_lock);
2572 2579
2573 /* 2580 /*
2574 * Migration is committed, all target tasks are now on dst_csets. 2581 * Migration is committed, all target tasks are now on dst_csets.
@@ -2597,13 +2604,13 @@ out_cancel_attach:
2597 } 2604 }
2598 } while_each_subsys_mask(); 2605 } while_each_subsys_mask();
2599out_release_tset: 2606out_release_tset:
2600 spin_lock_bh(&css_set_lock); 2607 spin_lock_irq(&css_set_lock);
2601 list_splice_init(&tset->dst_csets, &tset->src_csets); 2608 list_splice_init(&tset->dst_csets, &tset->src_csets);
2602 list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) { 2609 list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
2603 list_splice_tail_init(&cset->mg_tasks, &cset->tasks); 2610 list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
2604 list_del_init(&cset->mg_node); 2611 list_del_init(&cset->mg_node);
2605 } 2612 }
2606 spin_unlock_bh(&css_set_lock); 2613 spin_unlock_irq(&css_set_lock);
2607 return ret; 2614 return ret;
2608} 2615}
2609 2616
@@ -2634,7 +2641,7 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets)
2634 2641
2635 lockdep_assert_held(&cgroup_mutex); 2642 lockdep_assert_held(&cgroup_mutex);
2636 2643
2637 spin_lock_bh(&css_set_lock); 2644 spin_lock_irq(&css_set_lock);
2638 list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) { 2645 list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
2639 cset->mg_src_cgrp = NULL; 2646 cset->mg_src_cgrp = NULL;
2640 cset->mg_dst_cgrp = NULL; 2647 cset->mg_dst_cgrp = NULL;
@@ -2642,7 +2649,7 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets)
2642 list_del_init(&cset->mg_preload_node); 2649 list_del_init(&cset->mg_preload_node);
2643 put_css_set_locked(cset); 2650 put_css_set_locked(cset);
2644 } 2651 }
2645 spin_unlock_bh(&css_set_lock); 2652 spin_unlock_irq(&css_set_lock);
2646} 2653}
2647 2654
2648/** 2655/**
@@ -2783,7 +2790,7 @@ static int cgroup_migrate(struct task_struct *leader, bool threadgroup,
2783 * already PF_EXITING could be freed from underneath us unless we 2790 * already PF_EXITING could be freed from underneath us unless we
2784 * take an rcu_read_lock. 2791 * take an rcu_read_lock.
2785 */ 2792 */
2786 spin_lock_bh(&css_set_lock); 2793 spin_lock_irq(&css_set_lock);
2787 rcu_read_lock(); 2794 rcu_read_lock();
2788 task = leader; 2795 task = leader;
2789 do { 2796 do {
@@ -2792,7 +2799,7 @@ static int cgroup_migrate(struct task_struct *leader, bool threadgroup,
2792 break; 2799 break;
2793 } while_each_thread(leader, task); 2800 } while_each_thread(leader, task);
2794 rcu_read_unlock(); 2801 rcu_read_unlock();
2795 spin_unlock_bh(&css_set_lock); 2802 spin_unlock_irq(&css_set_lock);
2796 2803
2797 return cgroup_taskset_migrate(&tset, root); 2804 return cgroup_taskset_migrate(&tset, root);
2798} 2805}
@@ -2816,7 +2823,7 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp,
2816 return -EBUSY; 2823 return -EBUSY;
2817 2824
2818 /* look up all src csets */ 2825 /* look up all src csets */
2819 spin_lock_bh(&css_set_lock); 2826 spin_lock_irq(&css_set_lock);
2820 rcu_read_lock(); 2827 rcu_read_lock();
2821 task = leader; 2828 task = leader;
2822 do { 2829 do {
@@ -2826,7 +2833,7 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp,
2826 break; 2833 break;
2827 } while_each_thread(leader, task); 2834 } while_each_thread(leader, task);
2828 rcu_read_unlock(); 2835 rcu_read_unlock();
2829 spin_unlock_bh(&css_set_lock); 2836 spin_unlock_irq(&css_set_lock);
2830 2837
2831 /* prepare dst csets and commit */ 2838 /* prepare dst csets and commit */
2832 ret = cgroup_migrate_prepare_dst(&preloaded_csets); 2839 ret = cgroup_migrate_prepare_dst(&preloaded_csets);
@@ -2859,9 +2866,9 @@ static int cgroup_procs_write_permission(struct task_struct *task,
2859 struct cgroup *cgrp; 2866 struct cgroup *cgrp;
2860 struct inode *inode; 2867 struct inode *inode;
2861 2868
2862 spin_lock_bh(&css_set_lock); 2869 spin_lock_irq(&css_set_lock);
2863 cgrp = task_cgroup_from_root(task, &cgrp_dfl_root); 2870 cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
2864 spin_unlock_bh(&css_set_lock); 2871 spin_unlock_irq(&css_set_lock);
2865 2872
2866 while (!cgroup_is_descendant(dst_cgrp, cgrp)) 2873 while (!cgroup_is_descendant(dst_cgrp, cgrp))
2867 cgrp = cgroup_parent(cgrp); 2874 cgrp = cgroup_parent(cgrp);
@@ -2962,9 +2969,9 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
2962 if (root == &cgrp_dfl_root) 2969 if (root == &cgrp_dfl_root)
2963 continue; 2970 continue;
2964 2971
2965 spin_lock_bh(&css_set_lock); 2972 spin_lock_irq(&css_set_lock);
2966 from_cgrp = task_cgroup_from_root(from, root); 2973 from_cgrp = task_cgroup_from_root(from, root);
2967 spin_unlock_bh(&css_set_lock); 2974 spin_unlock_irq(&css_set_lock);
2968 2975
2969 retval = cgroup_attach_task(from_cgrp, tsk, false); 2976 retval = cgroup_attach_task(from_cgrp, tsk, false);
2970 if (retval) 2977 if (retval)
@@ -3080,7 +3087,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
3080 percpu_down_write(&cgroup_threadgroup_rwsem); 3087 percpu_down_write(&cgroup_threadgroup_rwsem);
3081 3088
3082 /* look up all csses currently attached to @cgrp's subtree */ 3089 /* look up all csses currently attached to @cgrp's subtree */
3083 spin_lock_bh(&css_set_lock); 3090 spin_lock_irq(&css_set_lock);
3084 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { 3091 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
3085 struct cgrp_cset_link *link; 3092 struct cgrp_cset_link *link;
3086 3093
@@ -3088,14 +3095,14 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
3088 cgroup_migrate_add_src(link->cset, dsct, 3095 cgroup_migrate_add_src(link->cset, dsct,
3089 &preloaded_csets); 3096 &preloaded_csets);
3090 } 3097 }
3091 spin_unlock_bh(&css_set_lock); 3098 spin_unlock_irq(&css_set_lock);
3092 3099
3093 /* NULL dst indicates self on default hierarchy */ 3100 /* NULL dst indicates self on default hierarchy */
3094 ret = cgroup_migrate_prepare_dst(&preloaded_csets); 3101 ret = cgroup_migrate_prepare_dst(&preloaded_csets);
3095 if (ret) 3102 if (ret)
3096 goto out_finish; 3103 goto out_finish;
3097 3104
3098 spin_lock_bh(&css_set_lock); 3105 spin_lock_irq(&css_set_lock);
3099 list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) { 3106 list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
3100 struct task_struct *task, *ntask; 3107 struct task_struct *task, *ntask;
3101 3108
@@ -3107,7 +3114,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
3107 list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list) 3114 list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
3108 cgroup_taskset_add(task, &tset); 3115 cgroup_taskset_add(task, &tset);
3109 } 3116 }
3110 spin_unlock_bh(&css_set_lock); 3117 spin_unlock_irq(&css_set_lock);
3111 3118
3112 ret = cgroup_taskset_migrate(&tset, cgrp->root); 3119 ret = cgroup_taskset_migrate(&tset, cgrp->root);
3113out_finish: 3120out_finish:
@@ -3908,10 +3915,10 @@ static int cgroup_task_count(const struct cgroup *cgrp)
3908 int count = 0; 3915 int count = 0;
3909 struct cgrp_cset_link *link; 3916 struct cgrp_cset_link *link;
3910 3917
3911 spin_lock_bh(&css_set_lock); 3918 spin_lock_irq(&css_set_lock);
3912 list_for_each_entry(link, &cgrp->cset_links, cset_link) 3919 list_for_each_entry(link, &cgrp->cset_links, cset_link)
3913 count += atomic_read(&link->cset->refcount); 3920 count += atomic_read(&link->cset->refcount);
3914 spin_unlock_bh(&css_set_lock); 3921 spin_unlock_irq(&css_set_lock);
3915 return count; 3922 return count;
3916} 3923}
3917 3924
@@ -4249,7 +4256,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css,
4249 4256
4250 memset(it, 0, sizeof(*it)); 4257 memset(it, 0, sizeof(*it));
4251 4258
4252 spin_lock_bh(&css_set_lock); 4259 spin_lock_irq(&css_set_lock);
4253 4260
4254 it->ss = css->ss; 4261 it->ss = css->ss;
4255 4262
@@ -4262,7 +4269,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css,
4262 4269
4263 css_task_iter_advance_css_set(it); 4270 css_task_iter_advance_css_set(it);
4264 4271
4265 spin_unlock_bh(&css_set_lock); 4272 spin_unlock_irq(&css_set_lock);
4266} 4273}
4267 4274
4268/** 4275/**
@@ -4280,7 +4287,7 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
4280 it->cur_task = NULL; 4287 it->cur_task = NULL;
4281 } 4288 }
4282 4289
4283 spin_lock_bh(&css_set_lock); 4290 spin_lock_irq(&css_set_lock);
4284 4291
4285 if (it->task_pos) { 4292 if (it->task_pos) {
4286 it->cur_task = list_entry(it->task_pos, struct task_struct, 4293 it->cur_task = list_entry(it->task_pos, struct task_struct,
@@ -4289,7 +4296,7 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
4289 css_task_iter_advance(it); 4296 css_task_iter_advance(it);
4290 } 4297 }
4291 4298
4292 spin_unlock_bh(&css_set_lock); 4299 spin_unlock_irq(&css_set_lock);
4293 4300
4294 return it->cur_task; 4301 return it->cur_task;
4295} 4302}
@@ -4303,10 +4310,10 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
4303void css_task_iter_end(struct css_task_iter *it) 4310void css_task_iter_end(struct css_task_iter *it)
4304{ 4311{
4305 if (it->cur_cset) { 4312 if (it->cur_cset) {
4306 spin_lock_bh(&css_set_lock); 4313 spin_lock_irq(&css_set_lock);
4307 list_del(&it->iters_node); 4314 list_del(&it->iters_node);
4308 put_css_set_locked(it->cur_cset); 4315 put_css_set_locked(it->cur_cset);
4309 spin_unlock_bh(&css_set_lock); 4316 spin_unlock_irq(&css_set_lock);
4310 } 4317 }
4311 4318
4312 if (it->cur_task) 4319 if (it->cur_task)
@@ -4338,10 +4345,10 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
4338 mutex_lock(&cgroup_mutex); 4345 mutex_lock(&cgroup_mutex);
4339 4346
4340 /* all tasks in @from are being moved, all csets are source */ 4347 /* all tasks in @from are being moved, all csets are source */
4341 spin_lock_bh(&css_set_lock); 4348 spin_lock_irq(&css_set_lock);
4342 list_for_each_entry(link, &from->cset_links, cset_link) 4349 list_for_each_entry(link, &from->cset_links, cset_link)
4343 cgroup_migrate_add_src(link->cset, to, &preloaded_csets); 4350 cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
4344 spin_unlock_bh(&css_set_lock); 4351 spin_unlock_irq(&css_set_lock);
4345 4352
4346 ret = cgroup_migrate_prepare_dst(&preloaded_csets); 4353 ret = cgroup_migrate_prepare_dst(&preloaded_csets);
4347 if (ret) 4354 if (ret)
@@ -5063,6 +5070,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
5063 memset(css, 0, sizeof(*css)); 5070 memset(css, 0, sizeof(*css));
5064 css->cgroup = cgrp; 5071 css->cgroup = cgrp;
5065 css->ss = ss; 5072 css->ss = ss;
5073 css->id = -1;
5066 INIT_LIST_HEAD(&css->sibling); 5074 INIT_LIST_HEAD(&css->sibling);
5067 INIT_LIST_HEAD(&css->children); 5075 INIT_LIST_HEAD(&css->children);
5068 css->serial_nr = css_serial_nr_next++; 5076 css->serial_nr = css_serial_nr_next++;
@@ -5150,7 +5158,7 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
5150 5158
5151 err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL); 5159 err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
5152 if (err < 0) 5160 if (err < 0)
5153 goto err_free_percpu_ref; 5161 goto err_free_css;
5154 css->id = err; 5162 css->id = err;
5155 5163
5156 /* @css is ready to be brought online now, make it visible */ 5164 /* @css is ready to be brought online now, make it visible */
@@ -5174,9 +5182,6 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
5174 5182
5175err_list_del: 5183err_list_del:
5176 list_del_rcu(&css->sibling); 5184 list_del_rcu(&css->sibling);
5177 cgroup_idr_remove(&ss->css_idr, css->id);
5178err_free_percpu_ref:
5179 percpu_ref_exit(&css->refcnt);
5180err_free_css: 5185err_free_css:
5181 call_rcu(&css->rcu_head, css_free_rcu_fn); 5186 call_rcu(&css->rcu_head, css_free_rcu_fn);
5182 return ERR_PTR(err); 5187 return ERR_PTR(err);
@@ -5451,10 +5456,10 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
5451 */ 5456 */
5452 cgrp->self.flags &= ~CSS_ONLINE; 5457 cgrp->self.flags &= ~CSS_ONLINE;
5453 5458
5454 spin_lock_bh(&css_set_lock); 5459 spin_lock_irq(&css_set_lock);
5455 list_for_each_entry(link, &cgrp->cset_links, cset_link) 5460 list_for_each_entry(link, &cgrp->cset_links, cset_link)
5456 link->cset->dead = true; 5461 link->cset->dead = true;
5457 spin_unlock_bh(&css_set_lock); 5462 spin_unlock_irq(&css_set_lock);
5458 5463
5459 /* initiate massacre of all css's */ 5464 /* initiate massacre of all css's */
5460 for_each_css(css, ssid, cgrp) 5465 for_each_css(css, ssid, cgrp)
@@ -5725,7 +5730,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
5725 goto out; 5730 goto out;
5726 5731
5727 mutex_lock(&cgroup_mutex); 5732 mutex_lock(&cgroup_mutex);
5728 spin_lock_bh(&css_set_lock); 5733 spin_lock_irq(&css_set_lock);
5729 5734
5730 for_each_root(root) { 5735 for_each_root(root) {
5731 struct cgroup_subsys *ss; 5736 struct cgroup_subsys *ss;
@@ -5778,7 +5783,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
5778 5783
5779 retval = 0; 5784 retval = 0;
5780out_unlock: 5785out_unlock:
5781 spin_unlock_bh(&css_set_lock); 5786 spin_unlock_irq(&css_set_lock);
5782 mutex_unlock(&cgroup_mutex); 5787 mutex_unlock(&cgroup_mutex);
5783 kfree(buf); 5788 kfree(buf);
5784out: 5789out:
@@ -5923,13 +5928,13 @@ void cgroup_post_fork(struct task_struct *child)
5923 if (use_task_css_set_links) { 5928 if (use_task_css_set_links) {
5924 struct css_set *cset; 5929 struct css_set *cset;
5925 5930
5926 spin_lock_bh(&css_set_lock); 5931 spin_lock_irq(&css_set_lock);
5927 cset = task_css_set(current); 5932 cset = task_css_set(current);
5928 if (list_empty(&child->cg_list)) { 5933 if (list_empty(&child->cg_list)) {
5929 get_css_set(cset); 5934 get_css_set(cset);
5930 css_set_move_task(child, NULL, cset, false); 5935 css_set_move_task(child, NULL, cset, false);
5931 } 5936 }
5932 spin_unlock_bh(&css_set_lock); 5937 spin_unlock_irq(&css_set_lock);
5933 } 5938 }
5934 5939
5935 /* 5940 /*
@@ -5974,9 +5979,9 @@ void cgroup_exit(struct task_struct *tsk)
5974 cset = task_css_set(tsk); 5979 cset = task_css_set(tsk);
5975 5980
5976 if (!list_empty(&tsk->cg_list)) { 5981 if (!list_empty(&tsk->cg_list)) {
5977 spin_lock_bh(&css_set_lock); 5982 spin_lock_irq(&css_set_lock);
5978 css_set_move_task(tsk, cset, NULL, false); 5983 css_set_move_task(tsk, cset, NULL, false);
5979 spin_unlock_bh(&css_set_lock); 5984 spin_unlock_irq(&css_set_lock);
5980 } else { 5985 } else {
5981 get_css_set(cset); 5986 get_css_set(cset);
5982 } 5987 }
@@ -6044,9 +6049,9 @@ static void cgroup_release_agent(struct work_struct *work)
6044 if (!pathbuf || !agentbuf) 6049 if (!pathbuf || !agentbuf)
6045 goto out; 6050 goto out;
6046 6051
6047 spin_lock_bh(&css_set_lock); 6052 spin_lock_irq(&css_set_lock);
6048 path = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns); 6053 path = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
6049 spin_unlock_bh(&css_set_lock); 6054 spin_unlock_irq(&css_set_lock);
6050 if (!path) 6055 if (!path)
6051 goto out; 6056 goto out;
6052 6057
@@ -6205,6 +6210,40 @@ struct cgroup *cgroup_get_from_path(const char *path)
6205} 6210}
6206EXPORT_SYMBOL_GPL(cgroup_get_from_path); 6211EXPORT_SYMBOL_GPL(cgroup_get_from_path);
6207 6212
6213/**
6214 * cgroup_get_from_fd - get a cgroup pointer from a fd
6215 * @fd: fd obtained by open(cgroup2_dir)
6216 *
6217 * Find the cgroup from a fd which should be obtained
6218 * by opening a cgroup directory. Returns a pointer to the
6219 * cgroup on success. ERR_PTR is returned if the cgroup
6220 * cannot be found.
6221 */
6222struct cgroup *cgroup_get_from_fd(int fd)
6223{
6224 struct cgroup_subsys_state *css;
6225 struct cgroup *cgrp;
6226 struct file *f;
6227
6228 f = fget_raw(fd);
6229 if (!f)
6230 return ERR_PTR(-EBADF);
6231
6232 css = css_tryget_online_from_dir(f->f_path.dentry, NULL);
6233 fput(f);
6234 if (IS_ERR(css))
6235 return ERR_CAST(css);
6236
6237 cgrp = css->cgroup;
6238 if (!cgroup_on_dfl(cgrp)) {
6239 cgroup_put(cgrp);
6240 return ERR_PTR(-EBADF);
6241 }
6242
6243 return cgrp;
6244}
6245EXPORT_SYMBOL_GPL(cgroup_get_from_fd);
6246
6208/* 6247/*
6209 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data 6248 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
6210 * definition in cgroup-defs.h. 6249 * definition in cgroup-defs.h.
@@ -6306,12 +6345,12 @@ struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
6306 return ERR_PTR(-EPERM); 6345 return ERR_PTR(-EPERM);
6307 6346
6308 mutex_lock(&cgroup_mutex); 6347 mutex_lock(&cgroup_mutex);
6309 spin_lock_bh(&css_set_lock); 6348 spin_lock_irq(&css_set_lock);
6310 6349
6311 cset = task_css_set(current); 6350 cset = task_css_set(current);
6312 get_css_set(cset); 6351 get_css_set(cset);
6313 6352
6314 spin_unlock_bh(&css_set_lock); 6353 spin_unlock_irq(&css_set_lock);
6315 mutex_unlock(&cgroup_mutex); 6354 mutex_unlock(&cgroup_mutex);
6316 6355
6317 new_ns = alloc_cgroup_ns(); 6356 new_ns = alloc_cgroup_ns();
@@ -6435,7 +6474,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
6435 if (!name_buf) 6474 if (!name_buf)
6436 return -ENOMEM; 6475 return -ENOMEM;
6437 6476
6438 spin_lock_bh(&css_set_lock); 6477 spin_lock_irq(&css_set_lock);
6439 rcu_read_lock(); 6478 rcu_read_lock();
6440 cset = rcu_dereference(current->cgroups); 6479 cset = rcu_dereference(current->cgroups);
6441 list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { 6480 list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
@@ -6446,7 +6485,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
6446 c->root->hierarchy_id, name_buf); 6485 c->root->hierarchy_id, name_buf);
6447 } 6486 }
6448 rcu_read_unlock(); 6487 rcu_read_unlock();
6449 spin_unlock_bh(&css_set_lock); 6488 spin_unlock_irq(&css_set_lock);
6450 kfree(name_buf); 6489 kfree(name_buf);
6451 return 0; 6490 return 0;
6452} 6491}
@@ -6457,7 +6496,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
6457 struct cgroup_subsys_state *css = seq_css(seq); 6496 struct cgroup_subsys_state *css = seq_css(seq);
6458 struct cgrp_cset_link *link; 6497 struct cgrp_cset_link *link;
6459 6498
6460 spin_lock_bh(&css_set_lock); 6499 spin_lock_irq(&css_set_lock);
6461 list_for_each_entry(link, &css->cgroup->cset_links, cset_link) { 6500 list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
6462 struct css_set *cset = link->cset; 6501 struct css_set *cset = link->cset;
6463 struct task_struct *task; 6502 struct task_struct *task;
@@ -6480,7 +6519,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
6480 overflow: 6519 overflow:
6481 seq_puts(seq, " ...\n"); 6520 seq_puts(seq, " ...\n");
6482 } 6521 }
6483 spin_unlock_bh(&css_set_lock); 6522 spin_unlock_irq(&css_set_lock);
6484 return 0; 6523 return 0;
6485} 6524}
6486 6525
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 274450efea90..9c51ec3f0f44 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3862,10 +3862,8 @@ static void _free_event(struct perf_event *event)
3862 if (event->ctx) 3862 if (event->ctx)
3863 put_ctx(event->ctx); 3863 put_ctx(event->ctx);
3864 3864
3865 if (event->pmu) { 3865 exclusive_event_destroy(event);
3866 exclusive_event_destroy(event); 3866 module_put(event->pmu->module);
3867 module_put(event->pmu->module);
3868 }
3869 3867
3870 call_rcu(&event->rcu_head, free_event_rcu); 3868 call_rcu(&event->rcu_head, free_event_rcu);
3871} 3869}
diff --git a/kernel/fork.c b/kernel/fork.c
index 5c2c355aa97f..4a7ec0c6c88c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -148,18 +148,18 @@ static inline void free_task_struct(struct task_struct *tsk)
148} 148}
149#endif 149#endif
150 150
151void __weak arch_release_thread_info(struct thread_info *ti) 151void __weak arch_release_thread_stack(unsigned long *stack)
152{ 152{
153} 153}
154 154
155#ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR 155#ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
156 156
157/* 157/*
158 * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a 158 * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
159 * kmemcache based allocator. 159 * kmemcache based allocator.
160 */ 160 */
161# if THREAD_SIZE >= PAGE_SIZE 161# if THREAD_SIZE >= PAGE_SIZE
162static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, 162static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
163 int node) 163 int node)
164{ 164{
165 struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP, 165 struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP,
@@ -172,33 +172,33 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
172 return page ? page_address(page) : NULL; 172 return page ? page_address(page) : NULL;
173} 173}
174 174
175static inline void free_thread_info(struct thread_info *ti) 175static inline void free_thread_stack(unsigned long *stack)
176{ 176{
177 struct page *page = virt_to_page(ti); 177 struct page *page = virt_to_page(stack);
178 178
179 memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK, 179 memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK,
180 -(1 << THREAD_SIZE_ORDER)); 180 -(1 << THREAD_SIZE_ORDER));
181 __free_kmem_pages(page, THREAD_SIZE_ORDER); 181 __free_kmem_pages(page, THREAD_SIZE_ORDER);
182} 182}
183# else 183# else
184static struct kmem_cache *thread_info_cache; 184static struct kmem_cache *thread_stack_cache;
185 185
186static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, 186static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
187 int node) 187 int node)
188{ 188{
189 return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node); 189 return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
190} 190}
191 191
192static void free_thread_info(struct thread_info *ti) 192static void free_thread_stack(unsigned long *stack)
193{ 193{
194 kmem_cache_free(thread_info_cache, ti); 194 kmem_cache_free(thread_stack_cache, stack);
195} 195}
196 196
197void thread_info_cache_init(void) 197void thread_stack_cache_init(void)
198{ 198{
199 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, 199 thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE,
200 THREAD_SIZE, 0, NULL); 200 THREAD_SIZE, 0, NULL);
201 BUG_ON(thread_info_cache == NULL); 201 BUG_ON(thread_stack_cache == NULL);
202} 202}
203# endif 203# endif
204#endif 204#endif
@@ -221,9 +221,9 @@ struct kmem_cache *vm_area_cachep;
221/* SLAB cache for mm_struct structures (tsk->mm) */ 221/* SLAB cache for mm_struct structures (tsk->mm) */
222static struct kmem_cache *mm_cachep; 222static struct kmem_cache *mm_cachep;
223 223
224static void account_kernel_stack(struct thread_info *ti, int account) 224static void account_kernel_stack(unsigned long *stack, int account)
225{ 225{
226 struct zone *zone = page_zone(virt_to_page(ti)); 226 struct zone *zone = page_zone(virt_to_page(stack));
227 227
228 mod_zone_page_state(zone, NR_KERNEL_STACK, account); 228 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
229} 229}
@@ -231,8 +231,8 @@ static void account_kernel_stack(struct thread_info *ti, int account)
231void free_task(struct task_struct *tsk) 231void free_task(struct task_struct *tsk)
232{ 232{
233 account_kernel_stack(tsk->stack, -1); 233 account_kernel_stack(tsk->stack, -1);
234 arch_release_thread_info(tsk->stack); 234 arch_release_thread_stack(tsk->stack);
235 free_thread_info(tsk->stack); 235 free_thread_stack(tsk->stack);
236 rt_mutex_debug_task_free(tsk); 236 rt_mutex_debug_task_free(tsk);
237 ftrace_graph_exit_task(tsk); 237 ftrace_graph_exit_task(tsk);
238 put_seccomp_filter(tsk); 238 put_seccomp_filter(tsk);
@@ -343,7 +343,7 @@ void set_task_stack_end_magic(struct task_struct *tsk)
343static struct task_struct *dup_task_struct(struct task_struct *orig, int node) 343static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
344{ 344{
345 struct task_struct *tsk; 345 struct task_struct *tsk;
346 struct thread_info *ti; 346 unsigned long *stack;
347 int err; 347 int err;
348 348
349 if (node == NUMA_NO_NODE) 349 if (node == NUMA_NO_NODE)
@@ -352,15 +352,15 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
352 if (!tsk) 352 if (!tsk)
353 return NULL; 353 return NULL;
354 354
355 ti = alloc_thread_info_node(tsk, node); 355 stack = alloc_thread_stack_node(tsk, node);
356 if (!ti) 356 if (!stack)
357 goto free_tsk; 357 goto free_tsk;
358 358
359 err = arch_dup_task_struct(tsk, orig); 359 err = arch_dup_task_struct(tsk, orig);
360 if (err) 360 if (err)
361 goto free_ti; 361 goto free_stack;
362 362
363 tsk->stack = ti; 363 tsk->stack = stack;
364#ifdef CONFIG_SECCOMP 364#ifdef CONFIG_SECCOMP
365 /* 365 /*
366 * We must handle setting up seccomp filters once we're under 366 * We must handle setting up seccomp filters once we're under
@@ -392,14 +392,14 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
392 tsk->task_frag.page = NULL; 392 tsk->task_frag.page = NULL;
393 tsk->wake_q.next = NULL; 393 tsk->wake_q.next = NULL;
394 394
395 account_kernel_stack(ti, 1); 395 account_kernel_stack(stack, 1);
396 396
397 kcov_task_init(tsk); 397 kcov_task_init(tsk);
398 398
399 return tsk; 399 return tsk;
400 400
401free_ti: 401free_stack:
402 free_thread_info(ti); 402 free_thread_stack(stack);
403free_tsk: 403free_tsk:
404 free_task_struct(tsk); 404 free_task_struct(tsk);
405 return NULL; 405 return NULL;
diff --git a/kernel/futex.c b/kernel/futex.c
index ee25f5ba4aca..33664f70e2d2 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -469,7 +469,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
469{ 469{
470 unsigned long address = (unsigned long)uaddr; 470 unsigned long address = (unsigned long)uaddr;
471 struct mm_struct *mm = current->mm; 471 struct mm_struct *mm = current->mm;
472 struct page *page; 472 struct page *page, *tail;
473 struct address_space *mapping; 473 struct address_space *mapping;
474 int err, ro = 0; 474 int err, ro = 0;
475 475
@@ -530,7 +530,15 @@ again:
530 * considered here and page lock forces unnecessarily serialization 530 * considered here and page lock forces unnecessarily serialization
531 * From this point on, mapping will be re-verified if necessary and 531 * From this point on, mapping will be re-verified if necessary and
532 * page lock will be acquired only if it is unavoidable 532 * page lock will be acquired only if it is unavoidable
533 */ 533 *
534 * Mapping checks require the head page for any compound page so the
535 * head page and mapping is looked up now. For anonymous pages, it
536 * does not matter if the page splits in the future as the key is
537 * based on the address. For filesystem-backed pages, the tail is
538 * required as the index of the page determines the key. For
539 * base pages, there is no tail page and tail == page.
540 */
541 tail = page;
534 page = compound_head(page); 542 page = compound_head(page);
535 mapping = READ_ONCE(page->mapping); 543 mapping = READ_ONCE(page->mapping);
536 544
@@ -654,7 +662,7 @@ again:
654 662
655 key->both.offset |= FUT_OFF_INODE; /* inode-based key */ 663 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
656 key->shared.inode = inode; 664 key->shared.inode = inode;
657 key->shared.pgoff = basepage_index(page); 665 key->shared.pgoff = basepage_index(tail);
658 rcu_read_unlock(); 666 rcu_read_unlock();
659 } 667 }
660 668
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 05254eeb4b4e..4b353e0be121 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -58,13 +58,36 @@ static void jump_label_update(struct static_key *key);
58 58
59void static_key_slow_inc(struct static_key *key) 59void static_key_slow_inc(struct static_key *key)
60{ 60{
61 int v, v1;
62
61 STATIC_KEY_CHECK_USE(); 63 STATIC_KEY_CHECK_USE();
62 if (atomic_inc_not_zero(&key->enabled)) 64
63 return; 65 /*
66 * Careful if we get concurrent static_key_slow_inc() calls;
67 * later calls must wait for the first one to _finish_ the
68 * jump_label_update() process. At the same time, however,
69 * the jump_label_update() call below wants to see
70 * static_key_enabled(&key) for jumps to be updated properly.
71 *
72 * So give a special meaning to negative key->enabled: it sends
73 * static_key_slow_inc() down the slow path, and it is non-zero
74 * so it counts as "enabled" in jump_label_update(). Note that
75 * atomic_inc_unless_negative() checks >= 0, so roll our own.
76 */
77 for (v = atomic_read(&key->enabled); v > 0; v = v1) {
78 v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
79 if (likely(v1 == v))
80 return;
81 }
64 82
65 jump_label_lock(); 83 jump_label_lock();
66 if (atomic_inc_return(&key->enabled) == 1) 84 if (atomic_read(&key->enabled) == 0) {
85 atomic_set(&key->enabled, -1);
67 jump_label_update(key); 86 jump_label_update(key);
87 atomic_set(&key->enabled, 1);
88 } else {
89 atomic_inc(&key->enabled);
90 }
68 jump_label_unlock(); 91 jump_label_unlock();
69} 92}
70EXPORT_SYMBOL_GPL(static_key_slow_inc); 93EXPORT_SYMBOL_GPL(static_key_slow_inc);
@@ -72,6 +95,13 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc);
72static void __static_key_slow_dec(struct static_key *key, 95static void __static_key_slow_dec(struct static_key *key,
73 unsigned long rate_limit, struct delayed_work *work) 96 unsigned long rate_limit, struct delayed_work *work)
74{ 97{
98 /*
99 * The negative count check is valid even when a negative
100 * key->enabled is in use by static_key_slow_inc(); a
101 * __static_key_slow_dec() before the first static_key_slow_inc()
102 * returns is unbalanced, because all other static_key_slow_inc()
103 * instances block while the update is in progress.
104 */
75 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { 105 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
76 WARN(atomic_read(&key->enabled) < 0, 106 WARN(atomic_read(&key->enabled) < 0,
77 "jump label: negative count!\n"); 107 "jump label: negative count!\n");
diff --git a/kernel/kcov.c b/kernel/kcov.c
index a02f2dddd1d7..8d44b3fea9d0 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -264,7 +264,12 @@ static const struct file_operations kcov_fops = {
264 264
265static int __init kcov_init(void) 265static int __init kcov_init(void)
266{ 266{
267 if (!debugfs_create_file("kcov", 0600, NULL, NULL, &kcov_fops)) { 267 /*
268 * The kcov debugfs file won't ever get removed and thus,
269 * there is no need to protect it against removal races. The
270 * use of debugfs_create_file_unsafe() is actually safe here.
271 */
272 if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) {
268 pr_err("failed to create kcov in debugfs\n"); 273 pr_err("failed to create kcov in debugfs\n");
269 return -ENOMEM; 274 return -ENOMEM;
270 } 275 }
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index 3ef3736002d8..9c951fade415 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
49} 49}
50 50
51void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, 51void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
52 struct thread_info *ti) 52 struct task_struct *task)
53{ 53{
54 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); 54 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
55 55
56 /* Mark the current thread as blocked on the lock: */ 56 /* Mark the current thread as blocked on the lock: */
57 ti->task->blocked_on = waiter; 57 task->blocked_on = waiter;
58} 58}
59 59
60void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, 60void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
61 struct thread_info *ti) 61 struct task_struct *task)
62{ 62{
63 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list)); 63 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64 DEBUG_LOCKS_WARN_ON(waiter->task != ti->task); 64 DEBUG_LOCKS_WARN_ON(waiter->task != task);
65 DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter); 65 DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
66 ti->task->blocked_on = NULL; 66 task->blocked_on = NULL;
67 67
68 list_del_init(&waiter->list); 68 list_del_init(&waiter->list);
69 waiter->task = NULL; 69 waiter->task = NULL;
diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
index 0799fd3e4cfa..d06ae3bb46c5 100644
--- a/kernel/locking/mutex-debug.h
+++ b/kernel/locking/mutex-debug.h
@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
20extern void debug_mutex_free_waiter(struct mutex_waiter *waiter); 20extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
21extern void debug_mutex_add_waiter(struct mutex *lock, 21extern void debug_mutex_add_waiter(struct mutex *lock,
22 struct mutex_waiter *waiter, 22 struct mutex_waiter *waiter,
23 struct thread_info *ti); 23 struct task_struct *task);
24extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, 24extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
25 struct thread_info *ti); 25 struct task_struct *task);
26extern void debug_mutex_unlock(struct mutex *lock); 26extern void debug_mutex_unlock(struct mutex *lock);
27extern void debug_mutex_init(struct mutex *lock, const char *name, 27extern void debug_mutex_init(struct mutex *lock, const char *name,
28 struct lock_class_key *key); 28 struct lock_class_key *key);
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index e364b424b019..a70b90db3909 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -486,9 +486,6 @@ __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
486 if (!hold_ctx) 486 if (!hold_ctx)
487 return 0; 487 return 0;
488 488
489 if (unlikely(ctx == hold_ctx))
490 return -EALREADY;
491
492 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && 489 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
493 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { 490 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
494#ifdef CONFIG_DEBUG_MUTEXES 491#ifdef CONFIG_DEBUG_MUTEXES
@@ -514,6 +511,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
514 unsigned long flags; 511 unsigned long flags;
515 int ret; 512 int ret;
516 513
514 if (use_ww_ctx) {
515 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
516 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
517 return -EALREADY;
518 }
519
517 preempt_disable(); 520 preempt_disable();
518 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); 521 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
519 522
@@ -534,7 +537,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
534 goto skip_wait; 537 goto skip_wait;
535 538
536 debug_mutex_lock_common(lock, &waiter); 539 debug_mutex_lock_common(lock, &waiter);
537 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); 540 debug_mutex_add_waiter(lock, &waiter, task);
538 541
539 /* add waiting tasks to the end of the waitqueue (FIFO): */ 542 /* add waiting tasks to the end of the waitqueue (FIFO): */
540 list_add_tail(&waiter.list, &lock->wait_list); 543 list_add_tail(&waiter.list, &lock->wait_list);
@@ -581,7 +584,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
581 } 584 }
582 __set_task_state(task, TASK_RUNNING); 585 __set_task_state(task, TASK_RUNNING);
583 586
584 mutex_remove_waiter(lock, &waiter, current_thread_info()); 587 mutex_remove_waiter(lock, &waiter, task);
585 /* set it to 0 if there are no waiters left: */ 588 /* set it to 0 if there are no waiters left: */
586 if (likely(list_empty(&lock->wait_list))) 589 if (likely(list_empty(&lock->wait_list)))
587 atomic_set(&lock->count, 0); 590 atomic_set(&lock->count, 0);
@@ -602,7 +605,7 @@ skip_wait:
602 return 0; 605 return 0;
603 606
604err: 607err:
605 mutex_remove_waiter(lock, &waiter, task_thread_info(task)); 608 mutex_remove_waiter(lock, &waiter, task);
606 spin_unlock_mutex(&lock->wait_lock, flags); 609 spin_unlock_mutex(&lock->wait_lock, flags);
607 debug_mutex_free_waiter(&waiter); 610 debug_mutex_free_waiter(&waiter);
608 mutex_release(&lock->dep_map, 1, ip); 611 mutex_release(&lock->dep_map, 1, ip);
diff --git a/kernel/locking/mutex.h b/kernel/locking/mutex.h
index 5cda397607f2..a68bae5e852a 100644
--- a/kernel/locking/mutex.h
+++ b/kernel/locking/mutex.h
@@ -13,7 +13,7 @@
13 do { spin_lock(lock); (void)(flags); } while (0) 13 do { spin_lock(lock); (void)(flags); } while (0)
14#define spin_unlock_mutex(lock, flags) \ 14#define spin_unlock_mutex(lock, flags) \
15 do { spin_unlock(lock); (void)(flags); } while (0) 15 do { spin_unlock(lock); (void)(flags); } while (0)
16#define mutex_remove_waiter(lock, waiter, ti) \ 16#define mutex_remove_waiter(lock, waiter, task) \
17 __list_del((waiter)->list.prev, (waiter)->list.next) 17 __list_del((waiter)->list.prev, (waiter)->list.next)
18 18
19#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 19#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index ce2f75e32ae1..5fc8c311b8fe 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -267,6 +267,66 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
267#define queued_spin_lock_slowpath native_queued_spin_lock_slowpath 267#define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
268#endif 268#endif
269 269
270/*
271 * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before
272 * issuing an _unordered_ store to set _Q_LOCKED_VAL.
273 *
274 * This means that the store can be delayed, but no later than the
275 * store-release from the unlock. This means that simply observing
276 * _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired.
277 *
278 * There are two paths that can issue the unordered store:
279 *
280 * (1) clear_pending_set_locked(): *,1,0 -> *,0,1
281 *
282 * (2) set_locked(): t,0,0 -> t,0,1 ; t != 0
283 * atomic_cmpxchg_relaxed(): t,0,0 -> 0,0,1
284 *
285 * However, in both cases we have other !0 state we've set before to queue
286 * ourseves:
287 *
288 * For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our
289 * load is constrained by that ACQUIRE to not pass before that, and thus must
290 * observe the store.
291 *
292 * For (2) we have a more intersting scenario. We enqueue ourselves using
293 * xchg_tail(), which ends up being a RELEASE. This in itself is not
294 * sufficient, however that is followed by an smp_cond_acquire() on the same
295 * word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and
296 * guarantees we must observe that store.
297 *
298 * Therefore both cases have other !0 state that is observable before the
299 * unordered locked byte store comes through. This means we can use that to
300 * wait for the lock store, and then wait for an unlock.
301 */
302#ifndef queued_spin_unlock_wait
303void queued_spin_unlock_wait(struct qspinlock *lock)
304{
305 u32 val;
306
307 for (;;) {
308 val = atomic_read(&lock->val);
309
310 if (!val) /* not locked, we're done */
311 goto done;
312
313 if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */
314 break;
315
316 /* not locked, but pending, wait until we observe the lock */
317 cpu_relax();
318 }
319
320 /* any unlock is good */
321 while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
322 cpu_relax();
323
324done:
325 smp_rmb(); /* CTRL + RMB -> ACQUIRE */
326}
327EXPORT_SYMBOL(queued_spin_unlock_wait);
328#endif
329
270#endif /* _GEN_PV_LOCK_SLOWPATH */ 330#endif /* _GEN_PV_LOCK_SLOWPATH */
271 331
272/** 332/**
diff --git a/kernel/power/process.c b/kernel/power/process.c
index df058bed53ce..0c2ee9761d57 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -146,6 +146,18 @@ int freeze_processes(void)
146 if (!error && !oom_killer_disable()) 146 if (!error && !oom_killer_disable())
147 error = -EBUSY; 147 error = -EBUSY;
148 148
149 /*
150 * There is a hard to fix race between oom_reaper kernel thread
151 * and oom_killer_disable. oom_reaper calls exit_oom_victim
152 * before the victim reaches exit_mm so try to freeze all the tasks
153 * again and catch such a left over task.
154 */
155 if (!error) {
156 pr_info("Double checking all user space processes after OOM killer disable... ");
157 error = try_to_freeze_tasks(true);
158 pr_cont("\n");
159 }
160
149 if (error) 161 if (error)
150 thaw_processes(); 162 thaw_processes();
151 return error; 163 return error;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7f2cae4620c7..51d7105f529a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1536,7 +1536,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
1536 for (;;) { 1536 for (;;) {
1537 /* Any allowed, online CPU? */ 1537 /* Any allowed, online CPU? */
1538 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { 1538 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
1539 if (!cpu_active(dest_cpu)) 1539 if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu))
1540 continue;
1541 if (!cpu_online(dest_cpu))
1540 continue; 1542 continue;
1541 goto out; 1543 goto out;
1542 } 1544 }
@@ -2253,9 +2255,11 @@ int sysctl_numa_balancing(struct ctl_table *table, int write,
2253#endif 2255#endif
2254#endif 2256#endif
2255 2257
2258#ifdef CONFIG_SCHEDSTATS
2259
2256DEFINE_STATIC_KEY_FALSE(sched_schedstats); 2260DEFINE_STATIC_KEY_FALSE(sched_schedstats);
2261static bool __initdata __sched_schedstats = false;
2257 2262
2258#ifdef CONFIG_SCHEDSTATS
2259static void set_schedstats(bool enabled) 2263static void set_schedstats(bool enabled)
2260{ 2264{
2261 if (enabled) 2265 if (enabled)
@@ -2278,11 +2282,16 @@ static int __init setup_schedstats(char *str)
2278 if (!str) 2282 if (!str)
2279 goto out; 2283 goto out;
2280 2284
2285 /*
2286 * This code is called before jump labels have been set up, so we can't
2287 * change the static branch directly just yet. Instead set a temporary
2288 * variable so init_schedstats() can do it later.
2289 */
2281 if (!strcmp(str, "enable")) { 2290 if (!strcmp(str, "enable")) {
2282 set_schedstats(true); 2291 __sched_schedstats = true;
2283 ret = 1; 2292 ret = 1;
2284 } else if (!strcmp(str, "disable")) { 2293 } else if (!strcmp(str, "disable")) {
2285 set_schedstats(false); 2294 __sched_schedstats = false;
2286 ret = 1; 2295 ret = 1;
2287 } 2296 }
2288out: 2297out:
@@ -2293,6 +2302,11 @@ out:
2293} 2302}
2294__setup("schedstats=", setup_schedstats); 2303__setup("schedstats=", setup_schedstats);
2295 2304
2305static void __init init_schedstats(void)
2306{
2307 set_schedstats(__sched_schedstats);
2308}
2309
2296#ifdef CONFIG_PROC_SYSCTL 2310#ifdef CONFIG_PROC_SYSCTL
2297int sysctl_schedstats(struct ctl_table *table, int write, 2311int sysctl_schedstats(struct ctl_table *table, int write,
2298 void __user *buffer, size_t *lenp, loff_t *ppos) 2312 void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -2313,8 +2327,10 @@ int sysctl_schedstats(struct ctl_table *table, int write,
2313 set_schedstats(state); 2327 set_schedstats(state);
2314 return err; 2328 return err;
2315} 2329}
2316#endif 2330#endif /* CONFIG_PROC_SYSCTL */
2317#endif 2331#else /* !CONFIG_SCHEDSTATS */
2332static inline void init_schedstats(void) {}
2333#endif /* CONFIG_SCHEDSTATS */
2318 2334
2319/* 2335/*
2320 * fork()/clone()-time setup: 2336 * fork()/clone()-time setup:
@@ -2521,10 +2537,9 @@ void wake_up_new_task(struct task_struct *p)
2521 */ 2537 */
2522 set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); 2538 set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
2523#endif 2539#endif
2524 /* Post initialize new task's util average when its cfs_rq is set */ 2540 rq = __task_rq_lock(p, &rf);
2525 post_init_entity_util_avg(&p->se); 2541 post_init_entity_util_avg(&p->se);
2526 2542
2527 rq = __task_rq_lock(p, &rf);
2528 activate_task(rq, p, 0); 2543 activate_task(rq, p, 0);
2529 p->on_rq = TASK_ON_RQ_QUEUED; 2544 p->on_rq = TASK_ON_RQ_QUEUED;
2530 trace_sched_wakeup_new(p); 2545 trace_sched_wakeup_new(p);
@@ -3156,7 +3171,8 @@ static noinline void __schedule_bug(struct task_struct *prev)
3156static inline void schedule_debug(struct task_struct *prev) 3171static inline void schedule_debug(struct task_struct *prev)
3157{ 3172{
3158#ifdef CONFIG_SCHED_STACK_END_CHECK 3173#ifdef CONFIG_SCHED_STACK_END_CHECK
3159 BUG_ON(task_stack_end_corrupted(prev)); 3174 if (task_stack_end_corrupted(prev))
3175 panic("corrupted stack end detected inside scheduler\n");
3160#endif 3176#endif
3161 3177
3162 if (unlikely(in_atomic_preempt_off())) { 3178 if (unlikely(in_atomic_preempt_off())) {
@@ -5133,14 +5149,16 @@ void show_state_filter(unsigned long state_filter)
5133 /* 5149 /*
5134 * reset the NMI-timeout, listing all files on a slow 5150 * reset the NMI-timeout, listing all files on a slow
5135 * console might take a lot of time: 5151 * console might take a lot of time:
5152 * Also, reset softlockup watchdogs on all CPUs, because
5153 * another CPU might be blocked waiting for us to process
5154 * an IPI.
5136 */ 5155 */
5137 touch_nmi_watchdog(); 5156 touch_nmi_watchdog();
5157 touch_all_softlockup_watchdogs();
5138 if (!state_filter || (p->state & state_filter)) 5158 if (!state_filter || (p->state & state_filter))
5139 sched_show_task(p); 5159 sched_show_task(p);
5140 } 5160 }
5141 5161
5142 touch_all_softlockup_watchdogs();
5143
5144#ifdef CONFIG_SCHED_DEBUG 5162#ifdef CONFIG_SCHED_DEBUG
5145 if (!state_filter) 5163 if (!state_filter)
5146 sysrq_sched_debug_show(); 5164 sysrq_sched_debug_show();
@@ -7487,6 +7505,8 @@ void __init sched_init(void)
7487#endif 7505#endif
7488 init_sched_fair_class(); 7506 init_sched_fair_class();
7489 7507
7508 init_schedstats();
7509
7490 scheduler_running = 1; 7510 scheduler_running = 1;
7491} 7511}
7492 7512
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index cf905f655ba1..0368c393a336 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -427,19 +427,12 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
427 SPLIT_NS(p->se.vruntime), 427 SPLIT_NS(p->se.vruntime),
428 (long long)(p->nvcsw + p->nivcsw), 428 (long long)(p->nvcsw + p->nivcsw),
429 p->prio); 429 p->prio);
430#ifdef CONFIG_SCHEDSTATS 430
431 if (schedstat_enabled()) {
432 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
433 SPLIT_NS(p->se.statistics.wait_sum),
434 SPLIT_NS(p->se.sum_exec_runtime),
435 SPLIT_NS(p->se.statistics.sum_sleep_runtime));
436 }
437#else
438 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", 431 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
439 0LL, 0L, 432 SPLIT_NS(schedstat_val(p, se.statistics.wait_sum)),
440 SPLIT_NS(p->se.sum_exec_runtime), 433 SPLIT_NS(p->se.sum_exec_runtime),
441 0LL, 0L); 434 SPLIT_NS(schedstat_val(p, se.statistics.sum_sleep_runtime)));
442#endif 435
443#ifdef CONFIG_NUMA_BALANCING 436#ifdef CONFIG_NUMA_BALANCING
444 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); 437 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
445#endif 438#endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 218f8e83db73..bdcbeea90c95 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2904,6 +2904,23 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
2904 } 2904 }
2905} 2905}
2906 2906
2907/*
2908 * Unsigned subtract and clamp on underflow.
2909 *
2910 * Explicitly do a load-store to ensure the intermediate value never hits
2911 * memory. This allows lockless observations without ever seeing the negative
2912 * values.
2913 */
2914#define sub_positive(_ptr, _val) do { \
2915 typeof(_ptr) ptr = (_ptr); \
2916 typeof(*ptr) val = (_val); \
2917 typeof(*ptr) res, var = READ_ONCE(*ptr); \
2918 res = var - val; \
2919 if (res > var) \
2920 res = 0; \
2921 WRITE_ONCE(*ptr, res); \
2922} while (0)
2923
2907/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */ 2924/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
2908static inline int 2925static inline int
2909update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq) 2926update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
@@ -2913,15 +2930,15 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
2913 2930
2914 if (atomic_long_read(&cfs_rq->removed_load_avg)) { 2931 if (atomic_long_read(&cfs_rq->removed_load_avg)) {
2915 s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0); 2932 s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
2916 sa->load_avg = max_t(long, sa->load_avg - r, 0); 2933 sub_positive(&sa->load_avg, r);
2917 sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0); 2934 sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
2918 removed_load = 1; 2935 removed_load = 1;
2919 } 2936 }
2920 2937
2921 if (atomic_long_read(&cfs_rq->removed_util_avg)) { 2938 if (atomic_long_read(&cfs_rq->removed_util_avg)) {
2922 long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0); 2939 long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
2923 sa->util_avg = max_t(long, sa->util_avg - r, 0); 2940 sub_positive(&sa->util_avg, r);
2924 sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0); 2941 sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
2925 removed_util = 1; 2942 removed_util = 1;
2926 } 2943 }
2927 2944
@@ -2994,10 +3011,10 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
2994 &se->avg, se->on_rq * scale_load_down(se->load.weight), 3011 &se->avg, se->on_rq * scale_load_down(se->load.weight),
2995 cfs_rq->curr == se, NULL); 3012 cfs_rq->curr == se, NULL);
2996 3013
2997 cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0); 3014 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
2998 cfs_rq->avg.load_sum = max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0); 3015 sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
2999 cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0); 3016 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
3000 cfs_rq->avg.util_sum = max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0); 3017 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
3001 3018
3002 cfs_rq_util_change(cfs_rq); 3019 cfs_rq_util_change(cfs_rq);
3003} 3020}
@@ -3246,7 +3263,7 @@ static inline void check_schedstat_required(void)
3246 trace_sched_stat_iowait_enabled() || 3263 trace_sched_stat_iowait_enabled() ||
3247 trace_sched_stat_blocked_enabled() || 3264 trace_sched_stat_blocked_enabled() ||
3248 trace_sched_stat_runtime_enabled()) { 3265 trace_sched_stat_runtime_enabled()) {
3249 pr_warn_once("Scheduler tracepoints stat_sleep, stat_iowait, " 3266 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
3250 "stat_blocked and stat_runtime require the " 3267 "stat_blocked and stat_runtime require the "
3251 "kernel parameter schedstats=enabled or " 3268 "kernel parameter schedstats=enabled or "
3252 "kernel.sched_schedstats=1\n"); 3269 "kernel.sched_schedstats=1\n");
@@ -4185,6 +4202,26 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
4185 if (!cfs_bandwidth_used()) 4202 if (!cfs_bandwidth_used())
4186 return; 4203 return;
4187 4204
4205 /* Synchronize hierarchical throttle counter: */
4206 if (unlikely(!cfs_rq->throttle_uptodate)) {
4207 struct rq *rq = rq_of(cfs_rq);
4208 struct cfs_rq *pcfs_rq;
4209 struct task_group *tg;
4210
4211 cfs_rq->throttle_uptodate = 1;
4212
4213 /* Get closest up-to-date node, because leaves go first: */
4214 for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) {
4215 pcfs_rq = tg->cfs_rq[cpu_of(rq)];
4216 if (pcfs_rq->throttle_uptodate)
4217 break;
4218 }
4219 if (tg) {
4220 cfs_rq->throttle_count = pcfs_rq->throttle_count;
4221 cfs_rq->throttled_clock_task = rq_clock_task(rq);
4222 }
4223 }
4224
4188 /* an active group must be handled by the update_curr()->put() path */ 4225 /* an active group must be handled by the update_curr()->put() path */
4189 if (!cfs_rq->runtime_enabled || cfs_rq->curr) 4226 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
4190 return; 4227 return;
@@ -4500,15 +4537,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
4500 4537
4501 /* Don't dequeue parent if it has other entities besides us */ 4538 /* Don't dequeue parent if it has other entities besides us */
4502 if (cfs_rq->load.weight) { 4539 if (cfs_rq->load.weight) {
4540 /* Avoid re-evaluating load for this entity: */
4541 se = parent_entity(se);
4503 /* 4542 /*
4504 * Bias pick_next to pick a task from this cfs_rq, as 4543 * Bias pick_next to pick a task from this cfs_rq, as
4505 * p is sleeping when it is within its sched_slice. 4544 * p is sleeping when it is within its sched_slice.
4506 */ 4545 */
4507 if (task_sleep && parent_entity(se)) 4546 if (task_sleep && se && !throttled_hierarchy(cfs_rq))
4508 set_next_buddy(parent_entity(se)); 4547 set_next_buddy(se);
4509
4510 /* avoid re-evaluating load for this entity */
4511 se = parent_entity(se);
4512 break; 4548 break;
4513 } 4549 }
4514 flags |= DEQUEUE_SLEEP; 4550 flags |= DEQUEUE_SLEEP;
@@ -8496,8 +8532,9 @@ void free_fair_sched_group(struct task_group *tg)
8496 8532
8497int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) 8533int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8498{ 8534{
8499 struct cfs_rq *cfs_rq;
8500 struct sched_entity *se; 8535 struct sched_entity *se;
8536 struct cfs_rq *cfs_rq;
8537 struct rq *rq;
8501 int i; 8538 int i;
8502 8539
8503 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); 8540 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
@@ -8512,6 +8549,8 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8512 init_cfs_bandwidth(tg_cfs_bandwidth(tg)); 8549 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
8513 8550
8514 for_each_possible_cpu(i) { 8551 for_each_possible_cpu(i) {
8552 rq = cpu_rq(i);
8553
8515 cfs_rq = kzalloc_node(sizeof(struct cfs_rq), 8554 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8516 GFP_KERNEL, cpu_to_node(i)); 8555 GFP_KERNEL, cpu_to_node(i));
8517 if (!cfs_rq) 8556 if (!cfs_rq)
@@ -8525,7 +8564,10 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8525 init_cfs_rq(cfs_rq); 8564 init_cfs_rq(cfs_rq);
8526 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); 8565 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
8527 init_entity_runnable_average(se); 8566 init_entity_runnable_average(se);
8567
8568 raw_spin_lock_irq(&rq->lock);
8528 post_init_entity_util_avg(se); 8569 post_init_entity_util_avg(se);
8570 raw_spin_unlock_irq(&rq->lock);
8529 } 8571 }
8530 8572
8531 return 1; 8573 return 1;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 72f1f3087b04..7cbeb92a1cb9 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -437,7 +437,7 @@ struct cfs_rq {
437 437
438 u64 throttled_clock, throttled_clock_task; 438 u64 throttled_clock, throttled_clock_task;
439 u64 throttled_clock_task_time; 439 u64 throttled_clock_task_time;
440 int throttled, throttle_count; 440 int throttled, throttle_count, throttle_uptodate;
441 struct list_head throttled_list; 441 struct list_head throttled_list;
442#endif /* CONFIG_CFS_BANDWIDTH */ 442#endif /* CONFIG_CFS_BANDWIDTH */
443#endif /* CONFIG_FAIR_GROUP_SCHED */ 443#endif /* CONFIG_FAIR_GROUP_SCHED */
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 70b3b6a20fb0..78955cbea31c 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -33,6 +33,8 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
33# define schedstat_inc(rq, field) do { if (schedstat_enabled()) { (rq)->field++; } } while (0) 33# define schedstat_inc(rq, field) do { if (schedstat_enabled()) { (rq)->field++; } } while (0)
34# define schedstat_add(rq, field, amt) do { if (schedstat_enabled()) { (rq)->field += (amt); } } while (0) 34# define schedstat_add(rq, field, amt) do { if (schedstat_enabled()) { (rq)->field += (amt); } } while (0)
35# define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) 35# define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
36# define schedstat_val(rq, field) ((schedstat_enabled()) ? (rq)->field : 0)
37
36#else /* !CONFIG_SCHEDSTATS */ 38#else /* !CONFIG_SCHEDSTATS */
37static inline void 39static inline void
38rq_sched_info_arrive(struct rq *rq, unsigned long long delta) 40rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
@@ -47,6 +49,7 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta)
47# define schedstat_inc(rq, field) do { } while (0) 49# define schedstat_inc(rq, field) do { } while (0)
48# define schedstat_add(rq, field, amt) do { } while (0) 50# define schedstat_add(rq, field, amt) do { } while (0)
49# define schedstat_set(var, val) do { } while (0) 51# define schedstat_set(var, val) do { } while (0)
52# define schedstat_val(rq, field) 0
50#endif 53#endif
51 54
52#ifdef CONFIG_SCHED_INFO 55#ifdef CONFIG_SCHED_INFO
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 037ea6ea3cb2..19c5b4a5c3eb 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -188,24 +188,33 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
188 return &bpf_trace_printk_proto; 188 return &bpf_trace_printk_proto;
189} 189}
190 190
191static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5) 191static u64 bpf_perf_event_read(u64 r1, u64 flags, u64 r3, u64 r4, u64 r5)
192{ 192{
193 struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 193 struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
194 struct bpf_array *array = container_of(map, struct bpf_array, map); 194 struct bpf_array *array = container_of(map, struct bpf_array, map);
195 unsigned int cpu = smp_processor_id();
196 u64 index = flags & BPF_F_INDEX_MASK;
195 struct bpf_event_entry *ee; 197 struct bpf_event_entry *ee;
196 struct perf_event *event; 198 struct perf_event *event;
197 199
200 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
201 return -EINVAL;
202 if (index == BPF_F_CURRENT_CPU)
203 index = cpu;
198 if (unlikely(index >= array->map.max_entries)) 204 if (unlikely(index >= array->map.max_entries))
199 return -E2BIG; 205 return -E2BIG;
200 206
201 ee = READ_ONCE(array->ptrs[index]); 207 ee = READ_ONCE(array->ptrs[index]);
202 if (unlikely(!ee)) 208 if (!ee)
203 return -ENOENT; 209 return -ENOENT;
204 210
205 event = ee->event; 211 event = ee->event;
212 if (unlikely(event->attr.type != PERF_TYPE_HARDWARE &&
213 event->attr.type != PERF_TYPE_RAW))
214 return -EINVAL;
215
206 /* make sure event is local and doesn't have pmu::count */ 216 /* make sure event is local and doesn't have pmu::count */
207 if (event->oncpu != smp_processor_id() || 217 if (unlikely(event->oncpu != cpu || event->pmu->count))
208 event->pmu->count)
209 return -EINVAL; 218 return -EINVAL;
210 219
211 /* 220 /*
@@ -229,6 +238,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
229 struct pt_regs *regs = (struct pt_regs *) (long) r1; 238 struct pt_regs *regs = (struct pt_regs *) (long) r1;
230 struct bpf_map *map = (struct bpf_map *) (long) r2; 239 struct bpf_map *map = (struct bpf_map *) (long) r2;
231 struct bpf_array *array = container_of(map, struct bpf_array, map); 240 struct bpf_array *array = container_of(map, struct bpf_array, map);
241 unsigned int cpu = smp_processor_id();
232 u64 index = flags & BPF_F_INDEX_MASK; 242 u64 index = flags & BPF_F_INDEX_MASK;
233 void *data = (void *) (long) r4; 243 void *data = (void *) (long) r4;
234 struct perf_sample_data sample_data; 244 struct perf_sample_data sample_data;
@@ -242,12 +252,12 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
242 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 252 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
243 return -EINVAL; 253 return -EINVAL;
244 if (index == BPF_F_CURRENT_CPU) 254 if (index == BPF_F_CURRENT_CPU)
245 index = raw_smp_processor_id(); 255 index = cpu;
246 if (unlikely(index >= array->map.max_entries)) 256 if (unlikely(index >= array->map.max_entries))
247 return -E2BIG; 257 return -E2BIG;
248 258
249 ee = READ_ONCE(array->ptrs[index]); 259 ee = READ_ONCE(array->ptrs[index]);
250 if (unlikely(!ee)) 260 if (!ee)
251 return -ENOENT; 261 return -ENOENT;
252 262
253 event = ee->event; 263 event = ee->event;
@@ -255,7 +265,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
255 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) 265 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
256 return -EINVAL; 266 return -EINVAL;
257 267
258 if (unlikely(event->oncpu != smp_processor_id())) 268 if (unlikely(event->oncpu != cpu))
259 return -EOPNOTSUPP; 269 return -EOPNOTSUPP;
260 270
261 perf_sample_data_init(&sample_data, 0, 0); 271 perf_sample_data_init(&sample_data, 0, 0);
@@ -347,20 +357,15 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
347} 357}
348 358
349/* bpf+kprobe programs can access fields of 'struct pt_regs' */ 359/* bpf+kprobe programs can access fields of 'struct pt_regs' */
350static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type) 360static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
361 enum bpf_reg_type *reg_type)
351{ 362{
352 /* check bounds */
353 if (off < 0 || off >= sizeof(struct pt_regs)) 363 if (off < 0 || off >= sizeof(struct pt_regs))
354 return false; 364 return false;
355
356 /* only read is allowed */
357 if (type != BPF_READ) 365 if (type != BPF_READ)
358 return false; 366 return false;
359
360 /* disallow misaligned access */
361 if (off % size != 0) 367 if (off % size != 0)
362 return false; 368 return false;
363
364 return true; 369 return true;
365} 370}
366 371
@@ -425,7 +430,8 @@ static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
425 } 430 }
426} 431}
427 432
428static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type) 433static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
434 enum bpf_reg_type *reg_type)
429{ 435{
430 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) 436 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
431 return false; 437 return false;
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index f96f0383f6c6..ad1d6164e946 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -36,6 +36,10 @@ struct trace_bprintk_fmt {
36static inline struct trace_bprintk_fmt *lookup_format(const char *fmt) 36static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
37{ 37{
38 struct trace_bprintk_fmt *pos; 38 struct trace_bprintk_fmt *pos;
39
40 if (!fmt)
41 return ERR_PTR(-EINVAL);
42
39 list_for_each_entry(pos, &trace_bprintk_fmt_list, list) { 43 list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
40 if (!strcmp(pos->fmt, fmt)) 44 if (!strcmp(pos->fmt, fmt))
41 return pos; 45 return pos;
@@ -57,7 +61,8 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
57 for (iter = start; iter < end; iter++) { 61 for (iter = start; iter < end; iter++) {
58 struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter); 62 struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
59 if (tb_fmt) { 63 if (tb_fmt) {
60 *iter = tb_fmt->fmt; 64 if (!IS_ERR(tb_fmt))
65 *iter = tb_fmt->fmt;
61 continue; 66 continue;
62 } 67 }
63 68
diff --git a/mm/compaction.c b/mm/compaction.c
index 1427366ad673..79bfe0e06907 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -441,25 +441,23 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
441 441
442 /* Found a free page, break it into order-0 pages */ 442 /* Found a free page, break it into order-0 pages */
443 isolated = split_free_page(page); 443 isolated = split_free_page(page);
444 if (!isolated)
445 break;
446
444 total_isolated += isolated; 447 total_isolated += isolated;
448 cc->nr_freepages += isolated;
445 for (i = 0; i < isolated; i++) { 449 for (i = 0; i < isolated; i++) {
446 list_add(&page->lru, freelist); 450 list_add(&page->lru, freelist);
447 page++; 451 page++;
448 } 452 }
449 453 if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
450 /* If a page was split, advance to the end of it */ 454 blockpfn += isolated;
451 if (isolated) { 455 break;
452 cc->nr_freepages += isolated;
453 if (!strict &&
454 cc->nr_migratepages <= cc->nr_freepages) {
455 blockpfn += isolated;
456 break;
457 }
458
459 blockpfn += isolated - 1;
460 cursor += isolated - 1;
461 continue;
462 } 456 }
457 /* Advance to the end of split page */
458 blockpfn += isolated - 1;
459 cursor += isolated - 1;
460 continue;
463 461
464isolate_fail: 462isolate_fail:
465 if (strict) 463 if (strict)
@@ -469,6 +467,9 @@ isolate_fail:
469 467
470 } 468 }
471 469
470 if (locked)
471 spin_unlock_irqrestore(&cc->zone->lock, flags);
472
472 /* 473 /*
473 * There is a tiny chance that we have read bogus compound_order(), 474 * There is a tiny chance that we have read bogus compound_order(),
474 * so be careful to not go outside of the pageblock. 475 * so be careful to not go outside of the pageblock.
@@ -490,9 +491,6 @@ isolate_fail:
490 if (strict && blockpfn < end_pfn) 491 if (strict && blockpfn < end_pfn)
491 total_isolated = 0; 492 total_isolated = 0;
492 493
493 if (locked)
494 spin_unlock_irqrestore(&cc->zone->lock, flags);
495
496 /* Update the pageblock-skip if the whole pageblock was scanned */ 494 /* Update the pageblock-skip if the whole pageblock was scanned */
497 if (blockpfn == end_pfn) 495 if (blockpfn == end_pfn)
498 update_pageblock_skip(cc, valid_page, total_isolated, false); 496 update_pageblock_skip(cc, valid_page, total_isolated, false);
@@ -1011,6 +1009,7 @@ static void isolate_freepages(struct compact_control *cc)
1011 block_end_pfn = block_start_pfn, 1009 block_end_pfn = block_start_pfn,
1012 block_start_pfn -= pageblock_nr_pages, 1010 block_start_pfn -= pageblock_nr_pages,
1013 isolate_start_pfn = block_start_pfn) { 1011 isolate_start_pfn = block_start_pfn) {
1012 unsigned long isolated;
1014 1013
1015 /* 1014 /*
1016 * This can iterate a massively long zone without finding any 1015 * This can iterate a massively long zone without finding any
@@ -1035,8 +1034,12 @@ static void isolate_freepages(struct compact_control *cc)
1035 continue; 1034 continue;
1036 1035
1037 /* Found a block suitable for isolating free pages from. */ 1036 /* Found a block suitable for isolating free pages from. */
1038 isolate_freepages_block(cc, &isolate_start_pfn, 1037 isolated = isolate_freepages_block(cc, &isolate_start_pfn,
1039 block_end_pfn, freelist, false); 1038 block_end_pfn, freelist, false);
1039 /* If isolation failed early, do not continue needlessly */
1040 if (!isolated && isolate_start_pfn < block_end_pfn &&
1041 cc->nr_migratepages > cc->nr_freepages)
1042 break;
1040 1043
1041 /* 1044 /*
1042 * If we isolated enough freepages, or aborted due to async 1045 * If we isolated enough freepages, or aborted due to async
diff --git a/mm/filemap.c b/mm/filemap.c
index 00ae878b2a38..20f3b1f33f0e 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2186,7 +2186,7 @@ repeat:
2186 if (file->f_ra.mmap_miss > 0) 2186 if (file->f_ra.mmap_miss > 0)
2187 file->f_ra.mmap_miss--; 2187 file->f_ra.mmap_miss--;
2188 addr = address + (page->index - vmf->pgoff) * PAGE_SIZE; 2188 addr = address + (page->index - vmf->pgoff) * PAGE_SIZE;
2189 do_set_pte(vma, addr, page, pte, false, false, true); 2189 do_set_pte(vma, addr, page, pte, false, false);
2190 unlock_page(page); 2190 unlock_page(page);
2191 goto next; 2191 goto next;
2192unlock: 2192unlock:
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 388c2bb9b55c..c1f3c0be150a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1030,6 +1030,7 @@ static void destroy_compound_gigantic_page(struct page *page,
1030 int nr_pages = 1 << order; 1030 int nr_pages = 1 << order;
1031 struct page *p = page + 1; 1031 struct page *p = page + 1;
1032 1032
1033 atomic_set(compound_mapcount_ptr(page), 0);
1033 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 1034 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1034 clear_compound_head(p); 1035 clear_compound_head(p);
1035 set_page_refcounted(p); 1036 set_page_refcounted(p);
@@ -4228,7 +4229,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4228 if (saddr) { 4229 if (saddr) {
4229 spte = huge_pte_offset(svma->vm_mm, saddr); 4230 spte = huge_pte_offset(svma->vm_mm, saddr);
4230 if (spte) { 4231 if (spte) {
4231 mm_inc_nr_pmds(mm);
4232 get_page(virt_to_page(spte)); 4232 get_page(virt_to_page(spte));
4233 break; 4233 break;
4234 } 4234 }
@@ -4243,9 +4243,9 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4243 if (pud_none(*pud)) { 4243 if (pud_none(*pud)) {
4244 pud_populate(mm, pud, 4244 pud_populate(mm, pud,
4245 (pmd_t *)((unsigned long)spte & PAGE_MASK)); 4245 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4246 mm_inc_nr_pmds(mm);
4246 } else { 4247 } else {
4247 put_page(virt_to_page(spte)); 4248 put_page(virt_to_page(spte));
4248 mm_inc_nr_pmds(mm);
4249 } 4249 }
4250 spin_unlock(ptl); 4250 spin_unlock(ptl);
4251out: 4251out:
diff --git a/mm/internal.h b/mm/internal.h
index a37e5b6f9d25..2524ec880e24 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -24,7 +24,8 @@
24 */ 24 */
25#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ 25#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
26 __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ 26 __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
27 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC) 27 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
28 __GFP_ATOMIC)
28 29
29/* The GFP flags allowed during early boot */ 30/* The GFP flags allowed during early boot */
30#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS)) 31#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 28439acda6ec..6845f9294696 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -508,7 +508,7 @@ void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
508 kasan_kmalloc(cache, object, cache->object_size, flags); 508 kasan_kmalloc(cache, object, cache->object_size, flags);
509} 509}
510 510
511void kasan_poison_slab_free(struct kmem_cache *cache, void *object) 511static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
512{ 512{
513 unsigned long size = cache->object_size; 513 unsigned long size = cache->object_size;
514 unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); 514 unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
@@ -626,7 +626,7 @@ void kasan_krealloc(const void *object, size_t size, gfp_t flags)
626 kasan_kmalloc(page->slab_cache, object, size, flags); 626 kasan_kmalloc(page->slab_cache, object, size, flags);
627} 627}
628 628
629void kasan_kfree(void *ptr) 629void kasan_poison_kfree(void *ptr)
630{ 630{
631 struct page *page; 631 struct page *page;
632 632
@@ -636,7 +636,7 @@ void kasan_kfree(void *ptr)
636 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), 636 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
637 KASAN_FREE_PAGE); 637 KASAN_FREE_PAGE);
638 else 638 else
639 kasan_slab_free(page->slab_cache, ptr); 639 kasan_poison_slab_free(page->slab_cache, ptr);
640} 640}
641 641
642void kasan_kfree_large(const void *ptr) 642void kasan_kfree_large(const void *ptr)
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index e6429926e957..04320d3adbef 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -307,8 +307,10 @@ static void hex_dump_object(struct seq_file *seq,
307 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE); 307 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
308 308
309 seq_printf(seq, " hex dump (first %zu bytes):\n", len); 309 seq_printf(seq, " hex dump (first %zu bytes):\n", len);
310 kasan_disable_current();
310 seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE, 311 seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE,
311 HEX_GROUP_SIZE, ptr, len, HEX_ASCII); 312 HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
313 kasan_enable_current();
312} 314}
313 315
314/* 316/*
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 75e74408cc8f..ac8664db3823 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4203,7 +4203,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4203 return &memcg->css; 4203 return &memcg->css;
4204fail: 4204fail:
4205 mem_cgroup_free(memcg); 4205 mem_cgroup_free(memcg);
4206 return NULL; 4206 return ERR_PTR(-ENOMEM);
4207} 4207}
4208 4208
4209static int 4209static int
@@ -5544,6 +5544,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
5544 struct mem_cgroup *memcg; 5544 struct mem_cgroup *memcg;
5545 unsigned int nr_pages; 5545 unsigned int nr_pages;
5546 bool compound; 5546 bool compound;
5547 unsigned long flags;
5547 5548
5548 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 5549 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5549 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 5550 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
@@ -5574,10 +5575,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
5574 5575
5575 commit_charge(newpage, memcg, false); 5576 commit_charge(newpage, memcg, false);
5576 5577
5577 local_irq_disable(); 5578 local_irq_save(flags);
5578 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages); 5579 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
5579 memcg_check_events(memcg, newpage); 5580 memcg_check_events(memcg, newpage);
5580 local_irq_enable(); 5581 local_irq_restore(flags);
5581} 5582}
5582 5583
5583DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 5584DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
diff --git a/mm/memory.c b/mm/memory.c
index 15322b73636b..cd1f29e4897e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2877,7 +2877,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
2877 * vm_ops->map_pages. 2877 * vm_ops->map_pages.
2878 */ 2878 */
2879void do_set_pte(struct vm_area_struct *vma, unsigned long address, 2879void do_set_pte(struct vm_area_struct *vma, unsigned long address,
2880 struct page *page, pte_t *pte, bool write, bool anon, bool old) 2880 struct page *page, pte_t *pte, bool write, bool anon)
2881{ 2881{
2882 pte_t entry; 2882 pte_t entry;
2883 2883
@@ -2885,8 +2885,6 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
2885 entry = mk_pte(page, vma->vm_page_prot); 2885 entry = mk_pte(page, vma->vm_page_prot);
2886 if (write) 2886 if (write)
2887 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2887 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2888 if (old)
2889 entry = pte_mkold(entry);
2890 if (anon) { 2888 if (anon) {
2891 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); 2889 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
2892 page_add_new_anon_rmap(page, vma, address, false); 2890 page_add_new_anon_rmap(page, vma, address, false);
@@ -2900,16 +2898,8 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
2900 update_mmu_cache(vma, address, pte); 2898 update_mmu_cache(vma, address, pte);
2901} 2899}
2902 2900
2903/*
2904 * If architecture emulates "accessed" or "young" bit without HW support,
2905 * there is no much gain with fault_around.
2906 */
2907static unsigned long fault_around_bytes __read_mostly = 2901static unsigned long fault_around_bytes __read_mostly =
2908#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
2909 PAGE_SIZE;
2910#else
2911 rounddown_pow_of_two(65536); 2902 rounddown_pow_of_two(65536);
2912#endif
2913 2903
2914#ifdef CONFIG_DEBUG_FS 2904#ifdef CONFIG_DEBUG_FS
2915static int fault_around_bytes_get(void *data, u64 *val) 2905static int fault_around_bytes_get(void *data, u64 *val)
@@ -3032,20 +3022,9 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3032 */ 3022 */
3033 if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { 3023 if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
3034 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 3024 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
3035 if (!pte_same(*pte, orig_pte))
3036 goto unlock_out;
3037 do_fault_around(vma, address, pte, pgoff, flags); 3025 do_fault_around(vma, address, pte, pgoff, flags);
3038 /* Check if the fault is handled by faultaround */ 3026 if (!pte_same(*pte, orig_pte))
3039 if (!pte_same(*pte, orig_pte)) {
3040 /*
3041 * Faultaround produce old pte, but the pte we've
3042 * handler fault for should be young.
3043 */
3044 pte_t entry = pte_mkyoung(*pte);
3045 if (ptep_set_access_flags(vma, address, pte, entry, 0))
3046 update_mmu_cache(vma, address, pte);
3047 goto unlock_out; 3027 goto unlock_out;
3048 }
3049 pte_unmap_unlock(pte, ptl); 3028 pte_unmap_unlock(pte, ptl);
3050 } 3029 }
3051 3030
@@ -3060,7 +3039,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3060 put_page(fault_page); 3039 put_page(fault_page);
3061 return ret; 3040 return ret;
3062 } 3041 }
3063 do_set_pte(vma, address, fault_page, pte, false, false, false); 3042 do_set_pte(vma, address, fault_page, pte, false, false);
3064 unlock_page(fault_page); 3043 unlock_page(fault_page);
3065unlock_out: 3044unlock_out:
3066 pte_unmap_unlock(pte, ptl); 3045 pte_unmap_unlock(pte, ptl);
@@ -3111,7 +3090,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3111 } 3090 }
3112 goto uncharge_out; 3091 goto uncharge_out;
3113 } 3092 }
3114 do_set_pte(vma, address, new_page, pte, true, true, false); 3093 do_set_pte(vma, address, new_page, pte, true, true);
3115 mem_cgroup_commit_charge(new_page, memcg, false, false); 3094 mem_cgroup_commit_charge(new_page, memcg, false, false);
3116 lru_cache_add_active_or_unevictable(new_page, vma); 3095 lru_cache_add_active_or_unevictable(new_page, vma);
3117 pte_unmap_unlock(pte, ptl); 3096 pte_unmap_unlock(pte, ptl);
@@ -3164,7 +3143,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3164 put_page(fault_page); 3143 put_page(fault_page);
3165 return ret; 3144 return ret;
3166 } 3145 }
3167 do_set_pte(vma, address, fault_page, pte, true, false, false); 3146 do_set_pte(vma, address, fault_page, pte, true, false);
3168 pte_unmap_unlock(pte, ptl); 3147 pte_unmap_unlock(pte, ptl);
3169 3148
3170 if (set_page_dirty(fault_page)) 3149 if (set_page_dirty(fault_page))
diff --git a/mm/mempool.c b/mm/mempool.c
index 9e075f829d0d..8f65464da5de 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -104,20 +104,16 @@ static inline void poison_element(mempool_t *pool, void *element)
104 104
105static void kasan_poison_element(mempool_t *pool, void *element) 105static void kasan_poison_element(mempool_t *pool, void *element)
106{ 106{
107 if (pool->alloc == mempool_alloc_slab) 107 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
108 kasan_poison_slab_free(pool->pool_data, element); 108 kasan_poison_kfree(element);
109 if (pool->alloc == mempool_kmalloc)
110 kasan_kfree(element);
111 if (pool->alloc == mempool_alloc_pages) 109 if (pool->alloc == mempool_alloc_pages)
112 kasan_free_pages(element, (unsigned long)pool->pool_data); 110 kasan_free_pages(element, (unsigned long)pool->pool_data);
113} 111}
114 112
115static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags) 113static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
116{ 114{
117 if (pool->alloc == mempool_alloc_slab) 115 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
118 kasan_slab_alloc(pool->pool_data, element, flags); 116 kasan_unpoison_slab(element);
119 if (pool->alloc == mempool_kmalloc)
120 kasan_krealloc(element, (size_t)pool->pool_data, flags);
121 if (pool->alloc == mempool_alloc_pages) 117 if (pool->alloc == mempool_alloc_pages)
122 kasan_alloc_pages(element, (unsigned long)pool->pool_data); 118 kasan_alloc_pages(element, (unsigned long)pool->pool_data);
123} 119}
diff --git a/mm/migrate.c b/mm/migrate.c
index 9baf41c877ff..bd3fdc202e8b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -431,6 +431,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
431 431
432 return MIGRATEPAGE_SUCCESS; 432 return MIGRATEPAGE_SUCCESS;
433} 433}
434EXPORT_SYMBOL(migrate_page_move_mapping);
434 435
435/* 436/*
436 * The expected number of remaining references is the same as that 437 * The expected number of remaining references is the same as that
@@ -586,6 +587,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
586 587
587 mem_cgroup_migrate(page, newpage); 588 mem_cgroup_migrate(page, newpage);
588} 589}
590EXPORT_SYMBOL(migrate_page_copy);
589 591
590/************************************************************ 592/************************************************************
591 * Migration functions 593 * Migration functions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index acbc432d1a52..ddf74487f848 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -474,13 +474,8 @@ static bool __oom_reap_task(struct task_struct *tsk)
474 p = find_lock_task_mm(tsk); 474 p = find_lock_task_mm(tsk);
475 if (!p) 475 if (!p)
476 goto unlock_oom; 476 goto unlock_oom;
477
478 mm = p->mm; 477 mm = p->mm;
479 if (!atomic_inc_not_zero(&mm->mm_users)) { 478 atomic_inc(&mm->mm_users);
480 task_unlock(p);
481 goto unlock_oom;
482 }
483
484 task_unlock(p); 479 task_unlock(p);
485 480
486 if (!down_read_trylock(&mm->mmap_sem)) { 481 if (!down_read_trylock(&mm->mmap_sem)) {
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index b9956fdee8f5..e2481949494c 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -373,8 +373,9 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
373 struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc); 373 struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
374 unsigned long bytes = vm_dirty_bytes; 374 unsigned long bytes = vm_dirty_bytes;
375 unsigned long bg_bytes = dirty_background_bytes; 375 unsigned long bg_bytes = dirty_background_bytes;
376 unsigned long ratio = vm_dirty_ratio; 376 /* convert ratios to per-PAGE_SIZE for higher precision */
377 unsigned long bg_ratio = dirty_background_ratio; 377 unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
378 unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
378 unsigned long thresh; 379 unsigned long thresh;
379 unsigned long bg_thresh; 380 unsigned long bg_thresh;
380 struct task_struct *tsk; 381 struct task_struct *tsk;
@@ -386,26 +387,28 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
386 /* 387 /*
387 * The byte settings can't be applied directly to memcg 388 * The byte settings can't be applied directly to memcg
388 * domains. Convert them to ratios by scaling against 389 * domains. Convert them to ratios by scaling against
389 * globally available memory. 390 * globally available memory. As the ratios are in
391 * per-PAGE_SIZE, they can be obtained by dividing bytes by
392 * number of pages.
390 */ 393 */
391 if (bytes) 394 if (bytes)
392 ratio = min(DIV_ROUND_UP(bytes, PAGE_SIZE) * 100 / 395 ratio = min(DIV_ROUND_UP(bytes, global_avail),
393 global_avail, 100UL); 396 PAGE_SIZE);
394 if (bg_bytes) 397 if (bg_bytes)
395 bg_ratio = min(DIV_ROUND_UP(bg_bytes, PAGE_SIZE) * 100 / 398 bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
396 global_avail, 100UL); 399 PAGE_SIZE);
397 bytes = bg_bytes = 0; 400 bytes = bg_bytes = 0;
398 } 401 }
399 402
400 if (bytes) 403 if (bytes)
401 thresh = DIV_ROUND_UP(bytes, PAGE_SIZE); 404 thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
402 else 405 else
403 thresh = (ratio * available_memory) / 100; 406 thresh = (ratio * available_memory) / PAGE_SIZE;
404 407
405 if (bg_bytes) 408 if (bg_bytes)
406 bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE); 409 bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
407 else 410 else
408 bg_thresh = (bg_ratio * available_memory) / 100; 411 bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
409 412
410 if (bg_thresh >= thresh) 413 if (bg_thresh >= thresh)
411 bg_thresh = thresh / 2; 414 bg_thresh = thresh / 2;
diff --git a/mm/page_owner.c b/mm/page_owner.c
index c6cda3e36212..fedeba88c9cb 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -207,13 +207,15 @@ void __dump_page_owner(struct page *page)
207 .nr_entries = page_ext->nr_entries, 207 .nr_entries = page_ext->nr_entries,
208 .entries = &page_ext->trace_entries[0], 208 .entries = &page_ext->trace_entries[0],
209 }; 209 };
210 gfp_t gfp_mask = page_ext->gfp_mask; 210 gfp_t gfp_mask;
211 int mt = gfpflags_to_migratetype(gfp_mask); 211 int mt;
212 212
213 if (unlikely(!page_ext)) { 213 if (unlikely(!page_ext)) {
214 pr_alert("There is not page extension available.\n"); 214 pr_alert("There is not page extension available.\n");
215 return; 215 return;
216 } 216 }
217 gfp_mask = page_ext->gfp_mask;
218 mt = gfpflags_to_migratetype(gfp_mask);
217 219
218 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { 220 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
219 pr_alert("page_owner info is not active (free page?)\n"); 221 pr_alert("page_owner info is not active (free page?)\n");
diff --git a/mm/percpu.c b/mm/percpu.c
index 0c59684f1ff2..9903830aaebb 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -112,7 +112,7 @@ struct pcpu_chunk {
112 int map_used; /* # of map entries used before the sentry */ 112 int map_used; /* # of map entries used before the sentry */
113 int map_alloc; /* # of map entries allocated */ 113 int map_alloc; /* # of map entries allocated */
114 int *map; /* allocation map */ 114 int *map; /* allocation map */
115 struct work_struct map_extend_work;/* async ->map[] extension */ 115 struct list_head map_extend_list;/* on pcpu_map_extend_chunks */
116 116
117 void *data; /* chunk data */ 117 void *data; /* chunk data */
118 int first_free; /* no free below this */ 118 int first_free; /* no free below this */
@@ -162,10 +162,13 @@ static struct pcpu_chunk *pcpu_reserved_chunk;
162static int pcpu_reserved_chunk_limit; 162static int pcpu_reserved_chunk_limit;
163 163
164static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ 164static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
165static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */ 165static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
166 166
167static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ 167static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
168 168
169/* chunks which need their map areas extended, protected by pcpu_lock */
170static LIST_HEAD(pcpu_map_extend_chunks);
171
169/* 172/*
170 * The number of empty populated pages, protected by pcpu_lock. The 173 * The number of empty populated pages, protected by pcpu_lock. The
171 * reserved chunk doesn't contribute to the count. 174 * reserved chunk doesn't contribute to the count.
@@ -395,13 +398,19 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
395{ 398{
396 int margin, new_alloc; 399 int margin, new_alloc;
397 400
401 lockdep_assert_held(&pcpu_lock);
402
398 if (is_atomic) { 403 if (is_atomic) {
399 margin = 3; 404 margin = 3;
400 405
401 if (chunk->map_alloc < 406 if (chunk->map_alloc <
402 chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW && 407 chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
403 pcpu_async_enabled) 408 if (list_empty(&chunk->map_extend_list)) {
404 schedule_work(&chunk->map_extend_work); 409 list_add_tail(&chunk->map_extend_list,
410 &pcpu_map_extend_chunks);
411 pcpu_schedule_balance_work();
412 }
413 }
405 } else { 414 } else {
406 margin = PCPU_ATOMIC_MAP_MARGIN_HIGH; 415 margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
407 } 416 }
@@ -435,6 +444,8 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
435 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); 444 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
436 unsigned long flags; 445 unsigned long flags;
437 446
447 lockdep_assert_held(&pcpu_alloc_mutex);
448
438 new = pcpu_mem_zalloc(new_size); 449 new = pcpu_mem_zalloc(new_size);
439 if (!new) 450 if (!new)
440 return -ENOMEM; 451 return -ENOMEM;
@@ -467,20 +478,6 @@ out_unlock:
467 return 0; 478 return 0;
468} 479}
469 480
470static void pcpu_map_extend_workfn(struct work_struct *work)
471{
472 struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
473 map_extend_work);
474 int new_alloc;
475
476 spin_lock_irq(&pcpu_lock);
477 new_alloc = pcpu_need_to_extend(chunk, false);
478 spin_unlock_irq(&pcpu_lock);
479
480 if (new_alloc)
481 pcpu_extend_area_map(chunk, new_alloc);
482}
483
484/** 481/**
485 * pcpu_fit_in_area - try to fit the requested allocation in a candidate area 482 * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
486 * @chunk: chunk the candidate area belongs to 483 * @chunk: chunk the candidate area belongs to
@@ -740,7 +737,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
740 chunk->map_used = 1; 737 chunk->map_used = 1;
741 738
742 INIT_LIST_HEAD(&chunk->list); 739 INIT_LIST_HEAD(&chunk->list);
743 INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn); 740 INIT_LIST_HEAD(&chunk->map_extend_list);
744 chunk->free_size = pcpu_unit_size; 741 chunk->free_size = pcpu_unit_size;
745 chunk->contig_hint = pcpu_unit_size; 742 chunk->contig_hint = pcpu_unit_size;
746 743
@@ -895,6 +892,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
895 return NULL; 892 return NULL;
896 } 893 }
897 894
895 if (!is_atomic)
896 mutex_lock(&pcpu_alloc_mutex);
897
898 spin_lock_irqsave(&pcpu_lock, flags); 898 spin_lock_irqsave(&pcpu_lock, flags);
899 899
900 /* serve reserved allocations from the reserved chunk if available */ 900 /* serve reserved allocations from the reserved chunk if available */
@@ -967,12 +967,9 @@ restart:
967 if (is_atomic) 967 if (is_atomic)
968 goto fail; 968 goto fail;
969 969
970 mutex_lock(&pcpu_alloc_mutex);
971
972 if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { 970 if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
973 chunk = pcpu_create_chunk(); 971 chunk = pcpu_create_chunk();
974 if (!chunk) { 972 if (!chunk) {
975 mutex_unlock(&pcpu_alloc_mutex);
976 err = "failed to allocate new chunk"; 973 err = "failed to allocate new chunk";
977 goto fail; 974 goto fail;
978 } 975 }
@@ -983,7 +980,6 @@ restart:
983 spin_lock_irqsave(&pcpu_lock, flags); 980 spin_lock_irqsave(&pcpu_lock, flags);
984 } 981 }
985 982
986 mutex_unlock(&pcpu_alloc_mutex);
987 goto restart; 983 goto restart;
988 984
989area_found: 985area_found:
@@ -993,8 +989,6 @@ area_found:
993 if (!is_atomic) { 989 if (!is_atomic) {
994 int page_start, page_end, rs, re; 990 int page_start, page_end, rs, re;
995 991
996 mutex_lock(&pcpu_alloc_mutex);
997
998 page_start = PFN_DOWN(off); 992 page_start = PFN_DOWN(off);
999 page_end = PFN_UP(off + size); 993 page_end = PFN_UP(off + size);
1000 994
@@ -1005,7 +999,6 @@ area_found:
1005 999
1006 spin_lock_irqsave(&pcpu_lock, flags); 1000 spin_lock_irqsave(&pcpu_lock, flags);
1007 if (ret) { 1001 if (ret) {
1008 mutex_unlock(&pcpu_alloc_mutex);
1009 pcpu_free_area(chunk, off, &occ_pages); 1002 pcpu_free_area(chunk, off, &occ_pages);
1010 err = "failed to populate"; 1003 err = "failed to populate";
1011 goto fail_unlock; 1004 goto fail_unlock;
@@ -1045,6 +1038,8 @@ fail:
1045 /* see the flag handling in pcpu_blance_workfn() */ 1038 /* see the flag handling in pcpu_blance_workfn() */
1046 pcpu_atomic_alloc_failed = true; 1039 pcpu_atomic_alloc_failed = true;
1047 pcpu_schedule_balance_work(); 1040 pcpu_schedule_balance_work();
1041 } else {
1042 mutex_unlock(&pcpu_alloc_mutex);
1048 } 1043 }
1049 return NULL; 1044 return NULL;
1050} 1045}
@@ -1129,6 +1124,7 @@ static void pcpu_balance_workfn(struct work_struct *work)
1129 if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) 1124 if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1130 continue; 1125 continue;
1131 1126
1127 list_del_init(&chunk->map_extend_list);
1132 list_move(&chunk->list, &to_free); 1128 list_move(&chunk->list, &to_free);
1133 } 1129 }
1134 1130
@@ -1146,6 +1142,25 @@ static void pcpu_balance_workfn(struct work_struct *work)
1146 pcpu_destroy_chunk(chunk); 1142 pcpu_destroy_chunk(chunk);
1147 } 1143 }
1148 1144
1145 /* service chunks which requested async area map extension */
1146 do {
1147 int new_alloc = 0;
1148
1149 spin_lock_irq(&pcpu_lock);
1150
1151 chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
1152 struct pcpu_chunk, map_extend_list);
1153 if (chunk) {
1154 list_del_init(&chunk->map_extend_list);
1155 new_alloc = pcpu_need_to_extend(chunk, false);
1156 }
1157
1158 spin_unlock_irq(&pcpu_lock);
1159
1160 if (new_alloc)
1161 pcpu_extend_area_map(chunk, new_alloc);
1162 } while (chunk);
1163
1149 /* 1164 /*
1150 * Ensure there are certain number of free populated pages for 1165 * Ensure there are certain number of free populated pages for
1151 * atomic allocs. Fill up from the most packed so that atomic 1166 * atomic allocs. Fill up from the most packed so that atomic
@@ -1644,7 +1659,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1644 */ 1659 */
1645 schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); 1660 schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1646 INIT_LIST_HEAD(&schunk->list); 1661 INIT_LIST_HEAD(&schunk->list);
1647 INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn); 1662 INIT_LIST_HEAD(&schunk->map_extend_list);
1648 schunk->base_addr = base_addr; 1663 schunk->base_addr = base_addr;
1649 schunk->map = smap; 1664 schunk->map = smap;
1650 schunk->map_alloc = ARRAY_SIZE(smap); 1665 schunk->map_alloc = ARRAY_SIZE(smap);
@@ -1673,7 +1688,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1673 if (dyn_size) { 1688 if (dyn_size) {
1674 dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); 1689 dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1675 INIT_LIST_HEAD(&dchunk->list); 1690 INIT_LIST_HEAD(&dchunk->list);
1676 INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn); 1691 INIT_LIST_HEAD(&dchunk->map_extend_list);
1677 dchunk->base_addr = base_addr; 1692 dchunk->base_addr = base_addr;
1678 dchunk->map = dmap; 1693 dchunk->map = dmap;
1679 dchunk->map_alloc = ARRAY_SIZE(dmap); 1694 dchunk->map_alloc = ARRAY_SIZE(dmap);
diff --git a/mm/shmem.c b/mm/shmem.c
index a36144909b28..24463b67b6ef 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2227,7 +2227,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2227 /* Remove the !PageUptodate pages we added */ 2227 /* Remove the !PageUptodate pages we added */
2228 shmem_undo_range(inode, 2228 shmem_undo_range(inode,
2229 (loff_t)start << PAGE_SHIFT, 2229 (loff_t)start << PAGE_SHIFT,
2230 (loff_t)index << PAGE_SHIFT, true); 2230 ((loff_t)index << PAGE_SHIFT) - 1, true);
2231 goto undone; 2231 goto undone;
2232 } 2232 }
2233 2233
diff --git a/mm/swap.c b/mm/swap.c
index 59f5fafa6e1f..90530ff8ed16 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -242,7 +242,7 @@ void rotate_reclaimable_page(struct page *page)
242 get_page(page); 242 get_page(page);
243 local_irq_save(flags); 243 local_irq_save(flags);
244 pvec = this_cpu_ptr(&lru_rotate_pvecs); 244 pvec = this_cpu_ptr(&lru_rotate_pvecs);
245 if (!pagevec_add(pvec, page)) 245 if (!pagevec_add(pvec, page) || PageCompound(page))
246 pagevec_move_tail(pvec); 246 pagevec_move_tail(pvec);
247 local_irq_restore(flags); 247 local_irq_restore(flags);
248 } 248 }
@@ -296,7 +296,7 @@ void activate_page(struct page *page)
296 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); 296 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
297 297
298 get_page(page); 298 get_page(page);
299 if (!pagevec_add(pvec, page)) 299 if (!pagevec_add(pvec, page) || PageCompound(page))
300 pagevec_lru_move_fn(pvec, __activate_page, NULL); 300 pagevec_lru_move_fn(pvec, __activate_page, NULL);
301 put_cpu_var(activate_page_pvecs); 301 put_cpu_var(activate_page_pvecs);
302 } 302 }
@@ -391,9 +391,8 @@ static void __lru_cache_add(struct page *page)
391 struct pagevec *pvec = &get_cpu_var(lru_add_pvec); 391 struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
392 392
393 get_page(page); 393 get_page(page);
394 if (!pagevec_space(pvec)) 394 if (!pagevec_add(pvec, page) || PageCompound(page))
395 __pagevec_lru_add(pvec); 395 __pagevec_lru_add(pvec);
396 pagevec_add(pvec, page);
397 put_cpu_var(lru_add_pvec); 396 put_cpu_var(lru_add_pvec);
398} 397}
399 398
@@ -628,7 +627,7 @@ void deactivate_file_page(struct page *page)
628 if (likely(get_page_unless_zero(page))) { 627 if (likely(get_page_unless_zero(page))) {
629 struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); 628 struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
630 629
631 if (!pagevec_add(pvec, page)) 630 if (!pagevec_add(pvec, page) || PageCompound(page))
632 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); 631 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
633 put_cpu_var(lru_deactivate_file_pvecs); 632 put_cpu_var(lru_deactivate_file_pvecs);
634 } 633 }
@@ -648,7 +647,7 @@ void deactivate_page(struct page *page)
648 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); 647 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
649 648
650 get_page(page); 649 get_page(page);
651 if (!pagevec_add(pvec, page)) 650 if (!pagevec_add(pvec, page) || PageCompound(page))
652 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); 651 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
653 put_cpu_var(lru_deactivate_pvecs); 652 put_cpu_var(lru_deactivate_pvecs);
654 } 653 }
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 86ae75b77390..c8f422c90856 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -790,6 +790,8 @@ static const struct net_device_ops vlan_netdev_ops = {
790 .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup, 790 .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup,
791#endif 791#endif
792 .ndo_fix_features = vlan_dev_fix_features, 792 .ndo_fix_features = vlan_dev_fix_features,
793 .ndo_neigh_construct = netdev_default_l2upper_neigh_construct,
794 .ndo_neigh_destroy = netdev_default_l2upper_neigh_destroy,
793 .ndo_fdb_add = switchdev_port_fdb_add, 795 .ndo_fdb_add = switchdev_port_fdb_add,
794 .ndo_fdb_del = switchdev_port_fdb_del, 796 .ndo_fdb_del = switchdev_port_fdb_del,
795 .ndo_fdb_dump = switchdev_port_fdb_dump, 797 .ndo_fdb_dump = switchdev_port_fdb_dump,
diff --git a/net/atm/clip.c b/net/atm/clip.c
index e07f551a863c..53b4ac09e7b7 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -286,7 +286,7 @@ static const struct neigh_ops clip_neigh_ops = {
286 .connected_output = neigh_direct_output, 286 .connected_output = neigh_direct_output,
287}; 287};
288 288
289static int clip_constructor(struct neighbour *neigh) 289static int clip_constructor(struct net_device *dev, struct neighbour *neigh)
290{ 290{
291 struct atmarp_entry *entry = neighbour_priv(neigh); 291 struct atmarp_entry *entry = neighbour_priv(neigh);
292 292
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index fbd0acf80b13..2fdebabbfacd 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -976,7 +976,8 @@ static int ax25_release(struct socket *sock)
976 release_sock(sk); 976 release_sock(sk);
977 ax25_disconnect(ax25, 0); 977 ax25_disconnect(ax25, 0);
978 lock_sock(sk); 978 lock_sock(sk);
979 ax25_destroy_socket(ax25); 979 if (!sock_flag(ax25->sk, SOCK_DESTROY))
980 ax25_destroy_socket(ax25);
980 break; 981 break;
981 982
982 case AX25_STATE_3: 983 case AX25_STATE_3:
diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c
index 951cd57bb07d..5237dff6941d 100644
--- a/net/ax25/ax25_ds_timer.c
+++ b/net/ax25/ax25_ds_timer.c
@@ -102,6 +102,7 @@ void ax25_ds_heartbeat_expiry(ax25_cb *ax25)
102 switch (ax25->state) { 102 switch (ax25->state) {
103 103
104 case AX25_STATE_0: 104 case AX25_STATE_0:
105 case AX25_STATE_2:
105 /* Magic here: If we listen() and a new link dies before it 106 /* Magic here: If we listen() and a new link dies before it
106 is accepted() it isn't 'dead' so doesn't get removed. */ 107 is accepted() it isn't 'dead' so doesn't get removed. */
107 if (!sk || sock_flag(sk, SOCK_DESTROY) || 108 if (!sk || sock_flag(sk, SOCK_DESTROY) ||
@@ -111,6 +112,7 @@ void ax25_ds_heartbeat_expiry(ax25_cb *ax25)
111 sock_hold(sk); 112 sock_hold(sk);
112 ax25_destroy_socket(ax25); 113 ax25_destroy_socket(ax25);
113 bh_unlock_sock(sk); 114 bh_unlock_sock(sk);
115 /* Ungrab socket and destroy it */
114 sock_put(sk); 116 sock_put(sk);
115 } else 117 } else
116 ax25_destroy_socket(ax25); 118 ax25_destroy_socket(ax25);
@@ -213,7 +215,8 @@ void ax25_ds_t1_timeout(ax25_cb *ax25)
213 case AX25_STATE_2: 215 case AX25_STATE_2:
214 if (ax25->n2count == ax25->n2) { 216 if (ax25->n2count == ax25->n2) {
215 ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); 217 ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
216 ax25_disconnect(ax25, ETIMEDOUT); 218 if (!sock_flag(ax25->sk, SOCK_DESTROY))
219 ax25_disconnect(ax25, ETIMEDOUT);
217 return; 220 return;
218 } else { 221 } else {
219 ax25->n2count++; 222 ax25->n2count++;
diff --git a/net/ax25/ax25_std_timer.c b/net/ax25/ax25_std_timer.c
index 004467c9e6e1..2c0d6ef66f9d 100644
--- a/net/ax25/ax25_std_timer.c
+++ b/net/ax25/ax25_std_timer.c
@@ -38,6 +38,7 @@ void ax25_std_heartbeat_expiry(ax25_cb *ax25)
38 38
39 switch (ax25->state) { 39 switch (ax25->state) {
40 case AX25_STATE_0: 40 case AX25_STATE_0:
41 case AX25_STATE_2:
41 /* Magic here: If we listen() and a new link dies before it 42 /* Magic here: If we listen() and a new link dies before it
42 is accepted() it isn't 'dead' so doesn't get removed. */ 43 is accepted() it isn't 'dead' so doesn't get removed. */
43 if (!sk || sock_flag(sk, SOCK_DESTROY) || 44 if (!sk || sock_flag(sk, SOCK_DESTROY) ||
@@ -47,6 +48,7 @@ void ax25_std_heartbeat_expiry(ax25_cb *ax25)
47 sock_hold(sk); 48 sock_hold(sk);
48 ax25_destroy_socket(ax25); 49 ax25_destroy_socket(ax25);
49 bh_unlock_sock(sk); 50 bh_unlock_sock(sk);
51 /* Ungrab socket and destroy it */
50 sock_put(sk); 52 sock_put(sk);
51 } else 53 } else
52 ax25_destroy_socket(ax25); 54 ax25_destroy_socket(ax25);
@@ -144,7 +146,8 @@ void ax25_std_t1timer_expiry(ax25_cb *ax25)
144 case AX25_STATE_2: 146 case AX25_STATE_2:
145 if (ax25->n2count == ax25->n2) { 147 if (ax25->n2count == ax25->n2) {
146 ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); 148 ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
147 ax25_disconnect(ax25, ETIMEDOUT); 149 if (!sock_flag(ax25->sk, SOCK_DESTROY))
150 ax25_disconnect(ax25, ETIMEDOUT);
148 return; 151 return;
149 } else { 152 } else {
150 ax25->n2count++; 153 ax25->n2count++;
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index 3b78e8473a01..655a7d4c96e1 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -264,7 +264,8 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
264{ 264{
265 ax25_clear_queues(ax25); 265 ax25_clear_queues(ax25);
266 266
267 ax25_stop_heartbeat(ax25); 267 if (!sock_flag(ax25->sk, SOCK_DESTROY))
268 ax25_stop_heartbeat(ax25);
268 ax25_stop_t1timer(ax25); 269 ax25_stop_t1timer(ax25);
269 ax25_stop_t2timer(ax25); 270 ax25_stop_t2timer(ax25);
270 ax25_stop_t3timer(ax25); 271 ax25_stop_t3timer(ax25);
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index f66930ee3c0b..833bb145ba3c 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -66,7 +66,7 @@ config BATMAN_ADV_NC
66 66
67config BATMAN_ADV_MCAST 67config BATMAN_ADV_MCAST
68 bool "Multicast optimisation" 68 bool "Multicast optimisation"
69 depends on BATMAN_ADV 69 depends on BATMAN_ADV && INET && !(BRIDGE=m && BATMAN_ADV=y)
70 default n 70 default n
71 help 71 help
72 This option enables the multicast optimisation which aims to 72 This option enables the multicast optimisation which aims to
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index 797cf2fc88c1..a83fc6c58d19 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -17,6 +17,7 @@
17# 17#
18 18
19obj-$(CONFIG_BATMAN_ADV) += batman-adv.o 19obj-$(CONFIG_BATMAN_ADV) += batman-adv.o
20batman-adv-y += bat_algo.o
20batman-adv-y += bat_iv_ogm.o 21batman-adv-y += bat_iv_ogm.o
21batman-adv-$(CONFIG_BATMAN_ADV_BATMAN_V) += bat_v.o 22batman-adv-$(CONFIG_BATMAN_ADV_BATMAN_V) += bat_v.o
22batman-adv-$(CONFIG_BATMAN_ADV_BATMAN_V) += bat_v_elp.o 23batman-adv-$(CONFIG_BATMAN_ADV_BATMAN_V) += bat_v_elp.o
@@ -31,12 +32,16 @@ batman-adv-y += gateway_common.o
31batman-adv-y += hard-interface.o 32batman-adv-y += hard-interface.o
32batman-adv-y += hash.o 33batman-adv-y += hash.o
33batman-adv-y += icmp_socket.o 34batman-adv-y += icmp_socket.o
35batman-adv-$(CONFIG_BATMAN_ADV_DEBUG) += log.o
34batman-adv-y += main.o 36batman-adv-y += main.o
35batman-adv-$(CONFIG_BATMAN_ADV_MCAST) += multicast.o 37batman-adv-$(CONFIG_BATMAN_ADV_MCAST) += multicast.o
38batman-adv-y += netlink.o
36batman-adv-$(CONFIG_BATMAN_ADV_NC) += network-coding.o 39batman-adv-$(CONFIG_BATMAN_ADV_NC) += network-coding.o
37batman-adv-y += originator.o 40batman-adv-y += originator.o
38batman-adv-y += routing.o 41batman-adv-y += routing.o
39batman-adv-y += send.o 42batman-adv-y += send.o
40batman-adv-y += soft-interface.o 43batman-adv-y += soft-interface.o
41batman-adv-y += sysfs.o 44batman-adv-y += sysfs.o
45batman-adv-y += tp_meter.o
42batman-adv-y += translation-table.o 46batman-adv-y += translation-table.o
47batman-adv-y += tvlv.o
diff --git a/net/batman-adv/bat_algo.c b/net/batman-adv/bat_algo.c
new file mode 100644
index 000000000000..81dbbf569bd4
--- /dev/null
+++ b/net/batman-adv/bat_algo.c
@@ -0,0 +1,140 @@
1/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "main.h"
19
20#include <linux/errno.h>
21#include <linux/list.h>
22#include <linux/moduleparam.h>
23#include <linux/printk.h>
24#include <linux/seq_file.h>
25#include <linux/stddef.h>
26#include <linux/string.h>
27
28#include "bat_algo.h"
29
30char batadv_routing_algo[20] = "BATMAN_IV";
31static struct hlist_head batadv_algo_list;
32
33/**
34 * batadv_algo_init - Initialize batman-adv algorithm management data structures
35 */
36void batadv_algo_init(void)
37{
38 INIT_HLIST_HEAD(&batadv_algo_list);
39}
40
41static struct batadv_algo_ops *batadv_algo_get(char *name)
42{
43 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
44
45 hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) {
46 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
47 continue;
48
49 bat_algo_ops = bat_algo_ops_tmp;
50 break;
51 }
52
53 return bat_algo_ops;
54}
55
56int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
57{
58 struct batadv_algo_ops *bat_algo_ops_tmp;
59
60 bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
61 if (bat_algo_ops_tmp) {
62 pr_info("Trying to register already registered routing algorithm: %s\n",
63 bat_algo_ops->name);
64 return -EEXIST;
65 }
66
67 /* all algorithms must implement all ops (for now) */
68 if (!bat_algo_ops->iface.enable ||
69 !bat_algo_ops->iface.disable ||
70 !bat_algo_ops->iface.update_mac ||
71 !bat_algo_ops->iface.primary_set ||
72 !bat_algo_ops->neigh.cmp ||
73 !bat_algo_ops->neigh.is_similar_or_better) {
74 pr_info("Routing algo '%s' does not implement required ops\n",
75 bat_algo_ops->name);
76 return -EINVAL;
77 }
78
79 INIT_HLIST_NODE(&bat_algo_ops->list);
80 hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
81
82 return 0;
83}
84
85int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
86{
87 struct batadv_algo_ops *bat_algo_ops;
88
89 bat_algo_ops = batadv_algo_get(name);
90 if (!bat_algo_ops)
91 return -EINVAL;
92
93 bat_priv->algo_ops = bat_algo_ops;
94
95 return 0;
96}
97
98int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
99{
100 struct batadv_algo_ops *bat_algo_ops;
101
102 seq_puts(seq, "Available routing algorithms:\n");
103
104 hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
105 seq_printf(seq, " * %s\n", bat_algo_ops->name);
106 }
107
108 return 0;
109}
110
111static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
112{
113 struct batadv_algo_ops *bat_algo_ops;
114 char *algo_name = (char *)val;
115 size_t name_len = strlen(algo_name);
116
117 if (name_len > 0 && algo_name[name_len - 1] == '\n')
118 algo_name[name_len - 1] = '\0';
119
120 bat_algo_ops = batadv_algo_get(algo_name);
121 if (!bat_algo_ops) {
122 pr_err("Routing algorithm '%s' is not supported\n", algo_name);
123 return -EINVAL;
124 }
125
126 return param_set_copystring(algo_name, kp);
127}
128
129static const struct kernel_param_ops batadv_param_ops_ra = {
130 .set = batadv_param_set_ra,
131 .get = param_get_string,
132};
133
134static struct kparam_string batadv_param_string_ra = {
135 .maxlen = sizeof(batadv_routing_algo),
136 .string = batadv_routing_algo,
137};
138
139module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
140 0644);
diff --git a/net/batman-adv/bat_algo.h b/net/batman-adv/bat_algo.h
index 03dafd33d23b..860d773dd8fa 100644
--- a/net/batman-adv/bat_algo.h
+++ b/net/batman-adv/bat_algo.h
@@ -18,32 +18,18 @@
18#ifndef _NET_BATMAN_ADV_BAT_ALGO_H_ 18#ifndef _NET_BATMAN_ADV_BAT_ALGO_H_
19#define _NET_BATMAN_ADV_BAT_ALGO_H_ 19#define _NET_BATMAN_ADV_BAT_ALGO_H_
20 20
21struct batadv_priv; 21#include "main.h"
22 22
23int batadv_iv_init(void); 23#include <linux/types.h>
24 24
25#ifdef CONFIG_BATMAN_ADV_BATMAN_V 25struct seq_file;
26 26
27int batadv_v_init(void); 27extern char batadv_routing_algo[];
28int batadv_v_mesh_init(struct batadv_priv *bat_priv); 28extern struct list_head batadv_hardif_list;
29void batadv_v_mesh_free(struct batadv_priv *bat_priv);
30 29
31#else 30void batadv_algo_init(void);
32 31int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops);
33static inline int batadv_v_init(void) 32int batadv_algo_select(struct batadv_priv *bat_priv, char *name);
34{ 33int batadv_algo_seq_print_text(struct seq_file *seq, void *offset);
35 return 0;
36}
37
38static inline int batadv_v_mesh_init(struct batadv_priv *bat_priv)
39{
40 return 0;
41}
42
43static inline void batadv_v_mesh_free(struct batadv_priv *bat_priv)
44{
45}
46
47#endif /* CONFIG_BATMAN_ADV_BATMAN_V */
48 34
49#endif /* _NET_BATMAN_ADV_BAT_ALGO_H_ */ 35#endif /* _NET_BATMAN_ADV_BAT_ALGO_H_ */
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index ce2f203048d3..19b0abd6c640 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -15,7 +15,7 @@
15 * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include "bat_algo.h" 18#include "bat_iv_ogm.h"
19#include "main.h" 19#include "main.h"
20 20
21#include <linux/atomic.h> 21#include <linux/atomic.h>
@@ -30,8 +30,9 @@
30#include <linux/if_ether.h> 30#include <linux/if_ether.h>
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/jiffies.h> 32#include <linux/jiffies.h>
33#include <linux/list.h> 33#include <linux/kernel.h>
34#include <linux/kref.h> 34#include <linux/kref.h>
35#include <linux/list.h>
35#include <linux/lockdep.h> 36#include <linux/lockdep.h>
36#include <linux/netdevice.h> 37#include <linux/netdevice.h>
37#include <linux/pkt_sched.h> 38#include <linux/pkt_sched.h>
@@ -48,15 +49,20 @@
48#include <linux/types.h> 49#include <linux/types.h>
49#include <linux/workqueue.h> 50#include <linux/workqueue.h>
50 51
52#include "bat_algo.h"
51#include "bitarray.h" 53#include "bitarray.h"
52#include "hard-interface.h" 54#include "hard-interface.h"
53#include "hash.h" 55#include "hash.h"
56#include "log.h"
54#include "network-coding.h" 57#include "network-coding.h"
55#include "originator.h" 58#include "originator.h"
56#include "packet.h" 59#include "packet.h"
57#include "routing.h" 60#include "routing.h"
58#include "send.h" 61#include "send.h"
59#include "translation-table.h" 62#include "translation-table.h"
63#include "tvlv.h"
64
65static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work);
60 66
61/** 67/**
62 * enum batadv_dup_status - duplicate status 68 * enum batadv_dup_status - duplicate status
@@ -336,7 +342,8 @@ batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
336{ 342{
337 struct batadv_neigh_node *neigh_node; 343 struct batadv_neigh_node *neigh_node;
338 344
339 neigh_node = batadv_neigh_node_new(orig_node, hard_iface, neigh_addr); 345 neigh_node = batadv_neigh_node_get_or_create(orig_node,
346 hard_iface, neigh_addr);
340 if (!neigh_node) 347 if (!neigh_node)
341 goto out; 348 goto out;
342 349
@@ -730,7 +737,7 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
730 737
731 /* start timer for this packet */ 738 /* start timer for this packet */
732 INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work, 739 INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
733 batadv_send_outstanding_bat_ogm_packet); 740 batadv_iv_send_outstanding_bat_ogm_packet);
734 queue_delayed_work(batadv_event_workqueue, 741 queue_delayed_work(batadv_event_workqueue,
735 &forw_packet_aggr->delayed_work, 742 &forw_packet_aggr->delayed_work,
736 send_time - jiffies); 743 send_time - jiffies);
@@ -937,6 +944,19 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
937 u16 tvlv_len = 0; 944 u16 tvlv_len = 0;
938 unsigned long send_time; 945 unsigned long send_time;
939 946
947 if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
948 (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
949 return;
950
951 /* the interface gets activated here to avoid race conditions between
952 * the moment of activating the interface in
953 * hardif_activate_interface() where the originator mac is set and
954 * outdated packets (especially uninitialized mac addresses) in the
955 * packet queue
956 */
957 if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
958 hard_iface->if_status = BATADV_IF_ACTIVE;
959
940 primary_if = batadv_primary_if_get_selected(bat_priv); 960 primary_if = batadv_primary_if_get_selected(bat_priv);
941 961
942 if (hard_iface == primary_if) { 962 if (hard_iface == primary_if) {
@@ -1778,6 +1798,45 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
1778 batadv_orig_node_put(orig_node); 1798 batadv_orig_node_put(orig_node);
1779} 1799}
1780 1800
1801static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work)
1802{
1803 struct delayed_work *delayed_work;
1804 struct batadv_forw_packet *forw_packet;
1805 struct batadv_priv *bat_priv;
1806
1807 delayed_work = to_delayed_work(work);
1808 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
1809 delayed_work);
1810 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
1811 spin_lock_bh(&bat_priv->forw_bat_list_lock);
1812 hlist_del(&forw_packet->list);
1813 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
1814
1815 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
1816 goto out;
1817
1818 batadv_iv_ogm_emit(forw_packet);
1819
1820 /* we have to have at least one packet in the queue to determine the
1821 * queues wake up time unless we are shutting down.
1822 *
1823 * only re-schedule if this is the "original" copy, e.g. the OGM of the
1824 * primary interface should only be rescheduled once per period, but
1825 * this function will be called for the forw_packet instances of the
1826 * other secondary interfaces as well.
1827 */
1828 if (forw_packet->own &&
1829 forw_packet->if_incoming == forw_packet->if_outgoing)
1830 batadv_iv_ogm_schedule(forw_packet->if_incoming);
1831
1832out:
1833 /* don't count own packet */
1834 if (!forw_packet->own)
1835 atomic_inc(&bat_priv->batman_queue_left);
1836
1837 batadv_forw_packet_free(forw_packet);
1838}
1839
1781static int batadv_iv_ogm_receive(struct sk_buff *skb, 1840static int batadv_iv_ogm_receive(struct sk_buff *skb,
1782 struct batadv_hard_iface *if_incoming) 1841 struct batadv_hard_iface *if_incoming)
1783{ 1842{
@@ -1794,7 +1853,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
1794 /* did we receive a B.A.T.M.A.N. IV OGM packet on an interface 1853 /* did we receive a B.A.T.M.A.N. IV OGM packet on an interface
1795 * that does not have B.A.T.M.A.N. IV enabled ? 1854 * that does not have B.A.T.M.A.N. IV enabled ?
1796 */ 1855 */
1797 if (bat_priv->bat_algo_ops->bat_ogm_emit != batadv_iv_ogm_emit) 1856 if (bat_priv->algo_ops->iface.enable != batadv_iv_ogm_iface_enable)
1798 return NET_RX_DROP; 1857 return NET_RX_DROP;
1799 1858
1800 batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX); 1859 batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
@@ -2052,21 +2111,32 @@ out:
2052 return ret; 2111 return ret;
2053} 2112}
2054 2113
2114static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
2115{
2116 /* begin scheduling originator messages on that interface */
2117 batadv_iv_ogm_schedule(hard_iface);
2118}
2119
2055static struct batadv_algo_ops batadv_batman_iv __read_mostly = { 2120static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
2056 .name = "BATMAN_IV", 2121 .name = "BATMAN_IV",
2057 .bat_iface_enable = batadv_iv_ogm_iface_enable, 2122 .iface = {
2058 .bat_iface_disable = batadv_iv_ogm_iface_disable, 2123 .activate = batadv_iv_iface_activate,
2059 .bat_iface_update_mac = batadv_iv_ogm_iface_update_mac, 2124 .enable = batadv_iv_ogm_iface_enable,
2060 .bat_primary_iface_set = batadv_iv_ogm_primary_iface_set, 2125 .disable = batadv_iv_ogm_iface_disable,
2061 .bat_ogm_schedule = batadv_iv_ogm_schedule, 2126 .update_mac = batadv_iv_ogm_iface_update_mac,
2062 .bat_ogm_emit = batadv_iv_ogm_emit, 2127 .primary_set = batadv_iv_ogm_primary_iface_set,
2063 .bat_neigh_cmp = batadv_iv_ogm_neigh_cmp, 2128 },
2064 .bat_neigh_is_similar_or_better = batadv_iv_ogm_neigh_is_sob, 2129 .neigh = {
2065 .bat_neigh_print = batadv_iv_neigh_print, 2130 .cmp = batadv_iv_ogm_neigh_cmp,
2066 .bat_orig_print = batadv_iv_ogm_orig_print, 2131 .is_similar_or_better = batadv_iv_ogm_neigh_is_sob,
2067 .bat_orig_free = batadv_iv_ogm_orig_free, 2132 .print = batadv_iv_neigh_print,
2068 .bat_orig_add_if = batadv_iv_ogm_orig_add_if, 2133 },
2069 .bat_orig_del_if = batadv_iv_ogm_orig_del_if, 2134 .orig = {
2135 .print = batadv_iv_ogm_orig_print,
2136 .free = batadv_iv_ogm_orig_free,
2137 .add_if = batadv_iv_ogm_orig_add_if,
2138 .del_if = batadv_iv_ogm_orig_del_if,
2139 },
2070}; 2140};
2071 2141
2072int __init batadv_iv_init(void) 2142int __init batadv_iv_init(void)
diff --git a/net/batman-adv/bat_iv_ogm.h b/net/batman-adv/bat_iv_ogm.h
new file mode 100644
index 000000000000..b9f3550faaf7
--- /dev/null
+++ b/net/batman-adv/bat_iv_ogm.h
@@ -0,0 +1,25 @@
1/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef _BATMAN_ADV_BATADV_IV_OGM_H_
19#define _BATMAN_ADV_BATADV_IV_OGM_H_
20
21#include "main.h"
22
23int batadv_iv_init(void);
24
25#endif /* _BATMAN_ADV_BATADV_IV_OGM_H_ */
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index 0a12e5cdd65d..0366cbf5e444 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -15,7 +15,7 @@
15 * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include "bat_algo.h" 18#include "bat_v.h"
19#include "main.h" 19#include "main.h"
20 20
21#include <linux/atomic.h> 21#include <linux/atomic.h>
@@ -31,6 +31,7 @@
31#include <linux/types.h> 31#include <linux/types.h>
32#include <linux/workqueue.h> 32#include <linux/workqueue.h>
33 33
34#include "bat_algo.h"
34#include "bat_v_elp.h" 35#include "bat_v_elp.h"
35#include "bat_v_ogm.h" 36#include "bat_v_ogm.h"
36#include "hard-interface.h" 37#include "hard-interface.h"
@@ -70,11 +71,6 @@ static int batadv_v_iface_enable(struct batadv_hard_iface *hard_iface)
70 if (ret < 0) 71 if (ret < 0)
71 batadv_v_elp_iface_disable(hard_iface); 72 batadv_v_elp_iface_disable(hard_iface);
72 73
73 /* enable link throughput auto-detection by setting the throughput
74 * override to zero
75 */
76 atomic_set(&hard_iface->bat_v.throughput_override, 0);
77
78 return ret; 74 return ret;
79} 75}
80 76
@@ -119,14 +115,6 @@ batadv_v_hardif_neigh_init(struct batadv_hardif_neigh_node *hardif_neigh)
119 batadv_v_elp_throughput_metric_update); 115 batadv_v_elp_throughput_metric_update);
120} 116}
121 117
122static void batadv_v_ogm_schedule(struct batadv_hard_iface *hard_iface)
123{
124}
125
126static void batadv_v_ogm_emit(struct batadv_forw_packet *forw_packet)
127{
128}
129
130/** 118/**
131 * batadv_v_orig_print_neigh - print neighbors for the originator table 119 * batadv_v_orig_print_neigh - print neighbors for the originator table
132 * @orig_node: the orig_node for which the neighbors are printed 120 * @orig_node: the orig_node for which the neighbors are printed
@@ -334,21 +322,39 @@ err_ifinfo1:
334 322
335static struct batadv_algo_ops batadv_batman_v __read_mostly = { 323static struct batadv_algo_ops batadv_batman_v __read_mostly = {
336 .name = "BATMAN_V", 324 .name = "BATMAN_V",
337 .bat_iface_activate = batadv_v_iface_activate, 325 .iface = {
338 .bat_iface_enable = batadv_v_iface_enable, 326 .activate = batadv_v_iface_activate,
339 .bat_iface_disable = batadv_v_iface_disable, 327 .enable = batadv_v_iface_enable,
340 .bat_iface_update_mac = batadv_v_iface_update_mac, 328 .disable = batadv_v_iface_disable,
341 .bat_primary_iface_set = batadv_v_primary_iface_set, 329 .update_mac = batadv_v_iface_update_mac,
342 .bat_hardif_neigh_init = batadv_v_hardif_neigh_init, 330 .primary_set = batadv_v_primary_iface_set,
343 .bat_ogm_emit = batadv_v_ogm_emit, 331 },
344 .bat_ogm_schedule = batadv_v_ogm_schedule, 332 .neigh = {
345 .bat_orig_print = batadv_v_orig_print, 333 .hardif_init = batadv_v_hardif_neigh_init,
346 .bat_neigh_cmp = batadv_v_neigh_cmp, 334 .cmp = batadv_v_neigh_cmp,
347 .bat_neigh_is_similar_or_better = batadv_v_neigh_is_sob, 335 .is_similar_or_better = batadv_v_neigh_is_sob,
348 .bat_neigh_print = batadv_v_neigh_print, 336 .print = batadv_v_neigh_print,
337 },
338 .orig = {
339 .print = batadv_v_orig_print,
340 },
349}; 341};
350 342
351/** 343/**
344 * batadv_v_hardif_init - initialize the algorithm specific fields in the
345 * hard-interface object
346 * @hard_iface: the hard-interface to initialize
347 */
348void batadv_v_hardif_init(struct batadv_hard_iface *hard_iface)
349{
350 /* enable link throughput auto-detection by setting the throughput
351 * override to zero
352 */
353 atomic_set(&hard_iface->bat_v.throughput_override, 0);
354 atomic_set(&hard_iface->bat_v.elp_interval, 500);
355}
356
357/**
352 * batadv_v_mesh_init - initialize the B.A.T.M.A.N. V private resources for a 358 * batadv_v_mesh_init - initialize the B.A.T.M.A.N. V private resources for a
353 * mesh 359 * mesh
354 * @bat_priv: the object representing the mesh interface to initialise 360 * @bat_priv: the object representing the mesh interface to initialise
diff --git a/net/batman-adv/bat_v.h b/net/batman-adv/bat_v.h
new file mode 100644
index 000000000000..83b77639729e
--- /dev/null
+++ b/net/batman-adv/bat_v.h
@@ -0,0 +1,52 @@
1/* Copyright (C) 2011-2016 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Linus Lüssing
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef _NET_BATMAN_ADV_BAT_V_H_
19#define _NET_BATMAN_ADV_BAT_V_H_
20
21#include "main.h"
22
23#ifdef CONFIG_BATMAN_ADV_BATMAN_V
24
25int batadv_v_init(void);
26void batadv_v_hardif_init(struct batadv_hard_iface *hardif);
27int batadv_v_mesh_init(struct batadv_priv *bat_priv);
28void batadv_v_mesh_free(struct batadv_priv *bat_priv);
29
30#else
31
32static inline int batadv_v_init(void)
33{
34 return 0;
35}
36
37static inline void batadv_v_hardif_init(struct batadv_hard_iface *hardif)
38{
39}
40
41static inline int batadv_v_mesh_init(struct batadv_priv *bat_priv)
42{
43 return 0;
44}
45
46static inline void batadv_v_mesh_free(struct batadv_priv *bat_priv)
47{
48}
49
50#endif /* CONFIG_BATMAN_ADV_BATMAN_V */
51
52#endif /* _NET_BATMAN_ADV_BAT_V_H_ */
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index df42eb1365a0..7d170010beb9 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -43,6 +43,7 @@
43#include "bat_algo.h" 43#include "bat_algo.h"
44#include "bat_v_ogm.h" 44#include "bat_v_ogm.h"
45#include "hard-interface.h" 45#include "hard-interface.h"
46#include "log.h"
46#include "originator.h" 47#include "originator.h"
47#include "packet.h" 48#include "packet.h"
48#include "routing.h" 49#include "routing.h"
@@ -344,7 +345,6 @@ int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface)
344 /* randomize initial seqno to avoid collision */ 345 /* randomize initial seqno to avoid collision */
345 get_random_bytes(&random_seqno, sizeof(random_seqno)); 346 get_random_bytes(&random_seqno, sizeof(random_seqno));
346 atomic_set(&hard_iface->bat_v.elp_seqno, random_seqno); 347 atomic_set(&hard_iface->bat_v.elp_seqno, random_seqno);
347 atomic_set(&hard_iface->bat_v.elp_interval, 500);
348 348
349 /* assume full-duplex by default */ 349 /* assume full-duplex by default */
350 hard_iface->bat_v.flags |= BATADV_FULL_DUPLEX; 350 hard_iface->bat_v.flags |= BATADV_FULL_DUPLEX;
@@ -443,7 +443,8 @@ static void batadv_v_elp_neigh_update(struct batadv_priv *bat_priv,
443 if (!orig_neigh) 443 if (!orig_neigh)
444 return; 444 return;
445 445
446 neigh = batadv_neigh_node_new(orig_neigh, if_incoming, neigh_addr); 446 neigh = batadv_neigh_node_get_or_create(orig_neigh,
447 if_incoming, neigh_addr);
447 if (!neigh) 448 if (!neigh)
448 goto orig_free; 449 goto orig_free;
449 450
@@ -503,7 +504,7 @@ int batadv_v_elp_packet_recv(struct sk_buff *skb,
503 /* did we receive a B.A.T.M.A.N. V ELP packet on an interface 504 /* did we receive a B.A.T.M.A.N. V ELP packet on an interface
504 * that does not have B.A.T.M.A.N. V ELP enabled ? 505 * that does not have B.A.T.M.A.N. V ELP enabled ?
505 */ 506 */
506 if (strcmp(bat_priv->bat_algo_ops->name, "BATMAN_V") != 0) 507 if (strcmp(bat_priv->algo_ops->name, "BATMAN_V") != 0)
507 return NET_RX_DROP; 508 return NET_RX_DROP;
508 509
509 elp_packet = (struct batadv_elp_packet *)skb->data; 510 elp_packet = (struct batadv_elp_packet *)skb->data;
diff --git a/net/batman-adv/bat_v_elp.h b/net/batman-adv/bat_v_elp.h
index cc130b2d05e5..be17c0b1369e 100644
--- a/net/batman-adv/bat_v_elp.h
+++ b/net/batman-adv/bat_v_elp.h
@@ -15,11 +15,11 @@
15 * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include "main.h"
19
20#ifndef _NET_BATMAN_ADV_BAT_V_ELP_H_ 18#ifndef _NET_BATMAN_ADV_BAT_V_ELP_H_
21#define _NET_BATMAN_ADV_BAT_V_ELP_H_ 19#define _NET_BATMAN_ADV_BAT_V_ELP_H_
22 20
21#include "main.h"
22
23struct sk_buff; 23struct sk_buff;
24struct work_struct; 24struct work_struct;
25 25
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index 473ebb9a0e73..6fbba4eb0617 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -39,13 +39,16 @@
39#include <linux/types.h> 39#include <linux/types.h>
40#include <linux/workqueue.h> 40#include <linux/workqueue.h>
41 41
42#include "bat_algo.h"
42#include "hard-interface.h" 43#include "hard-interface.h"
43#include "hash.h" 44#include "hash.h"
45#include "log.h"
44#include "originator.h" 46#include "originator.h"
45#include "packet.h" 47#include "packet.h"
46#include "routing.h" 48#include "routing.h"
47#include "send.h" 49#include "send.h"
48#include "translation-table.h" 50#include "translation-table.h"
51#include "tvlv.h"
49 52
50/** 53/**
51 * batadv_v_ogm_orig_get - retrieve and possibly create an originator node 54 * batadv_v_ogm_orig_get - retrieve and possibly create an originator node
@@ -683,8 +686,8 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
683 if (!orig_node) 686 if (!orig_node)
684 return; 687 return;
685 688
686 neigh_node = batadv_neigh_node_new(orig_node, if_incoming, 689 neigh_node = batadv_neigh_node_get_or_create(orig_node, if_incoming,
687 ethhdr->h_source); 690 ethhdr->h_source);
688 if (!neigh_node) 691 if (!neigh_node)
689 goto out; 692 goto out;
690 693
@@ -751,7 +754,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
751 /* did we receive a OGM2 packet on an interface that does not have 754 /* did we receive a OGM2 packet on an interface that does not have
752 * B.A.T.M.A.N. V enabled ? 755 * B.A.T.M.A.N. V enabled ?
753 */ 756 */
754 if (strcmp(bat_priv->bat_algo_ops->name, "BATMAN_V") != 0) 757 if (strcmp(bat_priv->algo_ops->name, "BATMAN_V") != 0)
755 return NET_RX_DROP; 758 return NET_RX_DROP;
756 759
757 if (!batadv_check_management_packet(skb, if_incoming, BATADV_OGM2_HLEN)) 760 if (!batadv_check_management_packet(skb, if_incoming, BATADV_OGM2_HLEN))
diff --git a/net/batman-adv/bat_v_ogm.h b/net/batman-adv/bat_v_ogm.h
index d849c75ada0e..4c4d45caa422 100644
--- a/net/batman-adv/bat_v_ogm.h
+++ b/net/batman-adv/bat_v_ogm.h
@@ -18,10 +18,10 @@
18#ifndef _BATMAN_ADV_BATADV_V_OGM_H_ 18#ifndef _BATMAN_ADV_BATADV_V_OGM_H_
19#define _BATMAN_ADV_BATADV_V_OGM_H_ 19#define _BATMAN_ADV_BATADV_V_OGM_H_
20 20
21#include "main.h"
22
21#include <linux/types.h> 23#include <linux/types.h>
22 24
23struct batadv_hard_iface;
24struct batadv_priv;
25struct sk_buff; 25struct sk_buff;
26 26
27int batadv_v_ogm_init(struct batadv_priv *bat_priv); 27int batadv_v_ogm_init(struct batadv_priv *bat_priv);
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index a0c7913837a5..032271421a20 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -20,6 +20,8 @@
20 20
21#include <linux/bitmap.h> 21#include <linux/bitmap.h>
22 22
23#include "log.h"
24
23/* shift the packet array by n places. */ 25/* shift the packet array by n places. */
24static void batadv_bitmap_shift_left(unsigned long *seq_bits, s32 n) 26static void batadv_bitmap_shift_left(unsigned long *seq_bits, s32 n)
25{ 27{
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 748a9ead7ce5..e4f7494fb974 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -48,6 +48,7 @@
48 48
49#include "hard-interface.h" 49#include "hard-interface.h"
50#include "hash.h" 50#include "hash.h"
51#include "log.h"
51#include "originator.h" 52#include "originator.h"
52#include "packet.h" 53#include "packet.h"
53#include "sysfs.h" 54#include "sysfs.h"
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index 952900466d88..1d68b6e63b96 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -18,245 +18,33 @@
18#include "debugfs.h" 18#include "debugfs.h"
19#include "main.h" 19#include "main.h"
20 20
21#include <linux/compiler.h>
22#include <linux/debugfs.h> 21#include <linux/debugfs.h>
23#include <linux/device.h> 22#include <linux/device.h>
24#include <linux/errno.h> 23#include <linux/errno.h>
25#include <linux/export.h> 24#include <linux/export.h>
26#include <linux/fcntl.h>
27#include <linux/fs.h> 25#include <linux/fs.h>
28#include <linux/jiffies.h>
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/netdevice.h> 26#include <linux/netdevice.h>
32#include <linux/poll.h>
33#include <linux/printk.h> 27#include <linux/printk.h>
34#include <linux/sched.h> /* for linux/wait.h */ 28#include <linux/sched.h> /* for linux/wait.h */
35#include <linux/seq_file.h> 29#include <linux/seq_file.h>
36#include <linux/slab.h>
37#include <linux/spinlock.h>
38#include <linux/stat.h> 30#include <linux/stat.h>
39#include <linux/stddef.h> 31#include <linux/stddef.h>
40#include <linux/stringify.h> 32#include <linux/stringify.h>
41#include <linux/sysfs.h> 33#include <linux/sysfs.h>
42#include <linux/types.h>
43#include <linux/uaccess.h>
44#include <linux/wait.h>
45#include <stdarg.h>
46 34
35#include "bat_algo.h"
47#include "bridge_loop_avoidance.h" 36#include "bridge_loop_avoidance.h"
48#include "distributed-arp-table.h" 37#include "distributed-arp-table.h"
49#include "gateway_client.h" 38#include "gateway_client.h"
50#include "icmp_socket.h" 39#include "icmp_socket.h"
40#include "log.h"
41#include "multicast.h"
51#include "network-coding.h" 42#include "network-coding.h"
52#include "originator.h" 43#include "originator.h"
53#include "translation-table.h" 44#include "translation-table.h"
54 45
55static struct dentry *batadv_debugfs; 46static struct dentry *batadv_debugfs;
56 47
57#ifdef CONFIG_BATMAN_ADV_DEBUG
58#define BATADV_LOG_BUFF_MASK (batadv_log_buff_len - 1)
59
60static const int batadv_log_buff_len = BATADV_LOG_BUF_LEN;
61
62static char *batadv_log_char_addr(struct batadv_priv_debug_log *debug_log,
63 size_t idx)
64{
65 return &debug_log->log_buff[idx & BATADV_LOG_BUFF_MASK];
66}
67
68static void batadv_emit_log_char(struct batadv_priv_debug_log *debug_log,
69 char c)
70{
71 char *char_addr;
72
73 char_addr = batadv_log_char_addr(debug_log, debug_log->log_end);
74 *char_addr = c;
75 debug_log->log_end++;
76
77 if (debug_log->log_end - debug_log->log_start > batadv_log_buff_len)
78 debug_log->log_start = debug_log->log_end - batadv_log_buff_len;
79}
80
81__printf(2, 3)
82static int batadv_fdebug_log(struct batadv_priv_debug_log *debug_log,
83 const char *fmt, ...)
84{
85 va_list args;
86 static char debug_log_buf[256];
87 char *p;
88
89 if (!debug_log)
90 return 0;
91
92 spin_lock_bh(&debug_log->lock);
93 va_start(args, fmt);
94 vscnprintf(debug_log_buf, sizeof(debug_log_buf), fmt, args);
95 va_end(args);
96
97 for (p = debug_log_buf; *p != 0; p++)
98 batadv_emit_log_char(debug_log, *p);
99
100 spin_unlock_bh(&debug_log->lock);
101
102 wake_up(&debug_log->queue_wait);
103
104 return 0;
105}
106
107int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
108{
109 va_list args;
110 char tmp_log_buf[256];
111
112 va_start(args, fmt);
113 vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args);
114 batadv_fdebug_log(bat_priv->debug_log, "[%10u] %s",
115 jiffies_to_msecs(jiffies), tmp_log_buf);
116 va_end(args);
117
118 return 0;
119}
120
121static int batadv_log_open(struct inode *inode, struct file *file)
122{
123 if (!try_module_get(THIS_MODULE))
124 return -EBUSY;
125
126 nonseekable_open(inode, file);
127 file->private_data = inode->i_private;
128 return 0;
129}
130
131static int batadv_log_release(struct inode *inode, struct file *file)
132{
133 module_put(THIS_MODULE);
134 return 0;
135}
136
137static bool batadv_log_empty(struct batadv_priv_debug_log *debug_log)
138{
139 return !(debug_log->log_start - debug_log->log_end);
140}
141
142static ssize_t batadv_log_read(struct file *file, char __user *buf,
143 size_t count, loff_t *ppos)
144{
145 struct batadv_priv *bat_priv = file->private_data;
146 struct batadv_priv_debug_log *debug_log = bat_priv->debug_log;
147 int error, i = 0;
148 char *char_addr;
149 char c;
150
151 if ((file->f_flags & O_NONBLOCK) && batadv_log_empty(debug_log))
152 return -EAGAIN;
153
154 if (!buf)
155 return -EINVAL;
156
157 if (count == 0)
158 return 0;
159
160 if (!access_ok(VERIFY_WRITE, buf, count))
161 return -EFAULT;
162
163 error = wait_event_interruptible(debug_log->queue_wait,
164 (!batadv_log_empty(debug_log)));
165
166 if (error)
167 return error;
168
169 spin_lock_bh(&debug_log->lock);
170
171 while ((!error) && (i < count) &&
172 (debug_log->log_start != debug_log->log_end)) {
173 char_addr = batadv_log_char_addr(debug_log,
174 debug_log->log_start);
175 c = *char_addr;
176
177 debug_log->log_start++;
178
179 spin_unlock_bh(&debug_log->lock);
180
181 error = __put_user(c, buf);
182
183 spin_lock_bh(&debug_log->lock);
184
185 buf++;
186 i++;
187 }
188
189 spin_unlock_bh(&debug_log->lock);
190
191 if (!error)
192 return i;
193
194 return error;
195}
196
197static unsigned int batadv_log_poll(struct file *file, poll_table *wait)
198{
199 struct batadv_priv *bat_priv = file->private_data;
200 struct batadv_priv_debug_log *debug_log = bat_priv->debug_log;
201
202 poll_wait(file, &debug_log->queue_wait, wait);
203
204 if (!batadv_log_empty(debug_log))
205 return POLLIN | POLLRDNORM;
206
207 return 0;
208}
209
210static const struct file_operations batadv_log_fops = {
211 .open = batadv_log_open,
212 .release = batadv_log_release,
213 .read = batadv_log_read,
214 .poll = batadv_log_poll,
215 .llseek = no_llseek,
216};
217
218static int batadv_debug_log_setup(struct batadv_priv *bat_priv)
219{
220 struct dentry *d;
221
222 if (!bat_priv->debug_dir)
223 goto err;
224
225 bat_priv->debug_log = kzalloc(sizeof(*bat_priv->debug_log), GFP_ATOMIC);
226 if (!bat_priv->debug_log)
227 goto err;
228
229 spin_lock_init(&bat_priv->debug_log->lock);
230 init_waitqueue_head(&bat_priv->debug_log->queue_wait);
231
232 d = debugfs_create_file("log", S_IFREG | S_IRUSR,
233 bat_priv->debug_dir, bat_priv,
234 &batadv_log_fops);
235 if (!d)
236 goto err;
237
238 return 0;
239
240err:
241 return -ENOMEM;
242}
243
244static void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
245{
246 kfree(bat_priv->debug_log);
247 bat_priv->debug_log = NULL;
248}
249#else /* CONFIG_BATMAN_ADV_DEBUG */
250static int batadv_debug_log_setup(struct batadv_priv *bat_priv)
251{
252 return 0;
253}
254
255static void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
256{
257}
258#endif
259
260static int batadv_algorithms_open(struct inode *inode, struct file *file) 48static int batadv_algorithms_open(struct inode *inode, struct file *file)
261{ 49{
262 return single_open(file, batadv_algo_seq_print_text, NULL); 50 return single_open(file, batadv_algo_seq_print_text, NULL);
@@ -363,6 +151,22 @@ static int batadv_nc_nodes_open(struct inode *inode, struct file *file)
363} 151}
364#endif 152#endif
365 153
154#ifdef CONFIG_BATMAN_ADV_MCAST
155/**
156 * batadv_mcast_flags_open - prepare file handler for reads from mcast_flags
157 * @inode: inode which was opened
158 * @file: file handle to be initialized
159 *
160 * Return: 0 on success or negative error number in case of failure
161 */
162static int batadv_mcast_flags_open(struct inode *inode, struct file *file)
163{
164 struct net_device *net_dev = (struct net_device *)inode->i_private;
165
166 return single_open(file, batadv_mcast_flags_seq_print_text, net_dev);
167}
168#endif
169
366#define BATADV_DEBUGINFO(_name, _mode, _open) \ 170#define BATADV_DEBUGINFO(_name, _mode, _open) \
367struct batadv_debuginfo batadv_debuginfo_##_name = { \ 171struct batadv_debuginfo batadv_debuginfo_##_name = { \
368 .attr = { \ 172 .attr = { \
@@ -407,6 +211,9 @@ static BATADV_DEBUGINFO(transtable_local, S_IRUGO,
407#ifdef CONFIG_BATMAN_ADV_NC 211#ifdef CONFIG_BATMAN_ADV_NC
408static BATADV_DEBUGINFO(nc_nodes, S_IRUGO, batadv_nc_nodes_open); 212static BATADV_DEBUGINFO(nc_nodes, S_IRUGO, batadv_nc_nodes_open);
409#endif 213#endif
214#ifdef CONFIG_BATMAN_ADV_MCAST
215static BATADV_DEBUGINFO(mcast_flags, S_IRUGO, batadv_mcast_flags_open);
216#endif
410 217
411static struct batadv_debuginfo *batadv_mesh_debuginfos[] = { 218static struct batadv_debuginfo *batadv_mesh_debuginfos[] = {
412 &batadv_debuginfo_neighbors, 219 &batadv_debuginfo_neighbors,
@@ -424,6 +231,9 @@ static struct batadv_debuginfo *batadv_mesh_debuginfos[] = {
424#ifdef CONFIG_BATMAN_ADV_NC 231#ifdef CONFIG_BATMAN_ADV_NC
425 &batadv_debuginfo_nc_nodes, 232 &batadv_debuginfo_nc_nodes,
426#endif 233#endif
234#ifdef CONFIG_BATMAN_ADV_MCAST
235 &batadv_debuginfo_mcast_flags,
236#endif
427 NULL, 237 NULL,
428}; 238};
429 239
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 278800a99c69..fa7646532a13 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -45,9 +45,11 @@
45 45
46#include "hard-interface.h" 46#include "hard-interface.h"
47#include "hash.h" 47#include "hash.h"
48#include "log.h"
48#include "originator.h" 49#include "originator.h"
49#include "send.h" 50#include "send.h"
50#include "translation-table.h" 51#include "translation-table.h"
52#include "tvlv.h"
51 53
52static void batadv_dat_purge(struct work_struct *work); 54static void batadv_dat_purge(struct work_struct *work);
53 55
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 65536db1bff7..0934730fb7ff 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -27,7 +27,6 @@
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/lockdep.h> 28#include <linux/lockdep.h>
29#include <linux/netdevice.h> 29#include <linux/netdevice.h>
30#include <linux/pkt_sched.h>
31#include <linux/skbuff.h> 30#include <linux/skbuff.h>
32#include <linux/slab.h> 31#include <linux/slab.h>
33#include <linux/spinlock.h> 32#include <linux/spinlock.h>
@@ -414,7 +413,7 @@ static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
414 if (!skb_fragment) 413 if (!skb_fragment)
415 goto err; 414 goto err;
416 415
417 skb->priority = TC_PRIO_CONTROL; 416 skb_fragment->priority = skb->priority;
418 417
419 /* Eat the last mtu-bytes of the skb */ 418 /* Eat the last mtu-bytes of the skb */
420 skb_reserve(skb_fragment, header_size + ETH_HLEN); 419 skb_reserve(skb_fragment, header_size + ETH_HLEN);
@@ -434,11 +433,12 @@ err:
434 * @orig_node: final destination of the created fragments 433 * @orig_node: final destination of the created fragments
435 * @neigh_node: next-hop of the created fragments 434 * @neigh_node: next-hop of the created fragments
436 * 435 *
437 * Return: true on success, false otherwise. 436 * Return: the netdev tx status or -1 in case of error.
437 * When -1 is returned the skb is not consumed.
438 */ 438 */
439bool batadv_frag_send_packet(struct sk_buff *skb, 439int batadv_frag_send_packet(struct sk_buff *skb,
440 struct batadv_orig_node *orig_node, 440 struct batadv_orig_node *orig_node,
441 struct batadv_neigh_node *neigh_node) 441 struct batadv_neigh_node *neigh_node)
442{ 442{
443 struct batadv_priv *bat_priv; 443 struct batadv_priv *bat_priv;
444 struct batadv_hard_iface *primary_if = NULL; 444 struct batadv_hard_iface *primary_if = NULL;
@@ -447,7 +447,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
447 unsigned int mtu = neigh_node->if_incoming->net_dev->mtu; 447 unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
448 unsigned int header_size = sizeof(frag_header); 448 unsigned int header_size = sizeof(frag_header);
449 unsigned int max_fragment_size, max_packet_size; 449 unsigned int max_fragment_size, max_packet_size;
450 bool ret = false; 450 int ret = -1;
451 451
452 /* To avoid merge and refragmentation at next-hops we never send 452 /* To avoid merge and refragmentation at next-hops we never send
453 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE 453 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
@@ -458,12 +458,12 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
458 458
459 /* Don't even try to fragment, if we need more than 16 fragments */ 459 /* Don't even try to fragment, if we need more than 16 fragments */
460 if (skb->len > max_packet_size) 460 if (skb->len > max_packet_size)
461 goto out_err; 461 goto out;
462 462
463 bat_priv = orig_node->bat_priv; 463 bat_priv = orig_node->bat_priv;
464 primary_if = batadv_primary_if_get_selected(bat_priv); 464 primary_if = batadv_primary_if_get_selected(bat_priv);
465 if (!primary_if) 465 if (!primary_if)
466 goto out_err; 466 goto out;
467 467
468 /* Create one header to be copied to all fragments */ 468 /* Create one header to be copied to all fragments */
469 frag_header.packet_type = BATADV_UNICAST_FRAG; 469 frag_header.packet_type = BATADV_UNICAST_FRAG;
@@ -473,6 +473,15 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
473 frag_header.reserved = 0; 473 frag_header.reserved = 0;
474 frag_header.no = 0; 474 frag_header.no = 0;
475 frag_header.total_size = htons(skb->len); 475 frag_header.total_size = htons(skb->len);
476
477 /* skb->priority values from 256->263 are magic values to
478 * directly indicate a specific 802.1d priority. This is used
479 * to allow 802.1d priority to be passed directly in from VLAN
480 * tags, etc.
481 */
482 if (skb->priority >= 256 && skb->priority <= 263)
483 frag_header.priority = skb->priority - 256;
484
476 ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr); 485 ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr);
477 ether_addr_copy(frag_header.dest, orig_node->orig); 486 ether_addr_copy(frag_header.dest, orig_node->orig);
478 487
@@ -480,23 +489,33 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
480 while (skb->len > max_fragment_size) { 489 while (skb->len > max_fragment_size) {
481 skb_fragment = batadv_frag_create(skb, &frag_header, mtu); 490 skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
482 if (!skb_fragment) 491 if (!skb_fragment)
483 goto out_err; 492 goto out;
484 493
485 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX); 494 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
486 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES, 495 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
487 skb_fragment->len + ETH_HLEN); 496 skb_fragment->len + ETH_HLEN);
488 batadv_send_unicast_skb(skb_fragment, neigh_node); 497 ret = batadv_send_unicast_skb(skb_fragment, neigh_node);
498 if (ret != NET_XMIT_SUCCESS) {
499 /* return -1 so that the caller can free the original
500 * skb
501 */
502 ret = -1;
503 goto out;
504 }
505
489 frag_header.no++; 506 frag_header.no++;
490 507
491 /* The initial check in this function should cover this case */ 508 /* The initial check in this function should cover this case */
492 if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) 509 if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) {
493 goto out_err; 510 ret = -1;
511 goto out;
512 }
494 } 513 }
495 514
496 /* Make room for the fragment header. */ 515 /* Make room for the fragment header. */
497 if (batadv_skb_head_push(skb, header_size) < 0 || 516 if (batadv_skb_head_push(skb, header_size) < 0 ||
498 pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) 517 pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0)
499 goto out_err; 518 goto out;
500 519
501 memcpy(skb->data, &frag_header, header_size); 520 memcpy(skb->data, &frag_header, header_size);
502 521
@@ -504,11 +523,9 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
504 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX); 523 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
505 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES, 524 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
506 skb->len + ETH_HLEN); 525 skb->len + ETH_HLEN);
507 batadv_send_unicast_skb(skb, neigh_node); 526 ret = batadv_send_unicast_skb(skb, neigh_node);
508
509 ret = true;
510 527
511out_err: 528out:
512 if (primary_if) 529 if (primary_if)
513 batadv_hardif_put(primary_if); 530 batadv_hardif_put(primary_if);
514 531
diff --git a/net/batman-adv/fragmentation.h b/net/batman-adv/fragmentation.h
index 9ff77c7ef7c7..3202fe329e63 100644
--- a/net/batman-adv/fragmentation.h
+++ b/net/batman-adv/fragmentation.h
@@ -34,9 +34,9 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb,
34 struct batadv_orig_node *orig_node_src); 34 struct batadv_orig_node *orig_node_src);
35bool batadv_frag_skb_buffer(struct sk_buff **skb, 35bool batadv_frag_skb_buffer(struct sk_buff **skb,
36 struct batadv_orig_node *orig_node); 36 struct batadv_orig_node *orig_node);
37bool batadv_frag_send_packet(struct sk_buff *skb, 37int batadv_frag_send_packet(struct sk_buff *skb,
38 struct batadv_orig_node *orig_node, 38 struct batadv_orig_node *orig_node,
39 struct batadv_neigh_node *neigh_node); 39 struct batadv_neigh_node *neigh_node);
40 40
41/** 41/**
42 * batadv_frag_check_entry - check if a list of fragments has timed out 42 * batadv_frag_check_entry - check if a list of fragments has timed out
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 5839c569f769..63a805d3f96e 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -42,6 +42,7 @@
42 42
43#include "gateway_common.h" 43#include "gateway_common.h"
44#include "hard-interface.h" 44#include "hard-interface.h"
45#include "log.h"
45#include "originator.h" 46#include "originator.h"
46#include "packet.h" 47#include "packet.h"
47#include "routing.h" 48#include "routing.h"
@@ -192,7 +193,7 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
192 193
193 tq_avg = router_ifinfo->bat_iv.tq_avg; 194 tq_avg = router_ifinfo->bat_iv.tq_avg;
194 195
195 switch (atomic_read(&bat_priv->gw_sel_class)) { 196 switch (atomic_read(&bat_priv->gw.sel_class)) {
196 case 1: /* fast connection */ 197 case 1: /* fast connection */
197 tmp_gw_factor = tq_avg * tq_avg; 198 tmp_gw_factor = tq_avg * tq_avg;
198 tmp_gw_factor *= gw_node->bandwidth_down; 199 tmp_gw_factor *= gw_node->bandwidth_down;
@@ -255,7 +256,7 @@ void batadv_gw_check_client_stop(struct batadv_priv *bat_priv)
255{ 256{
256 struct batadv_gw_node *curr_gw; 257 struct batadv_gw_node *curr_gw;
257 258
258 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT) 259 if (atomic_read(&bat_priv->gw.mode) != BATADV_GW_MODE_CLIENT)
259 return; 260 return;
260 261
261 curr_gw = batadv_gw_get_selected_gw_node(bat_priv); 262 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
@@ -283,7 +284,7 @@ void batadv_gw_election(struct batadv_priv *bat_priv)
283 struct batadv_neigh_ifinfo *router_ifinfo = NULL; 284 struct batadv_neigh_ifinfo *router_ifinfo = NULL;
284 char gw_addr[18] = { '\0' }; 285 char gw_addr[18] = { '\0' };
285 286
286 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT) 287 if (atomic_read(&bat_priv->gw.mode) != BATADV_GW_MODE_CLIENT)
287 goto out; 288 goto out;
288 289
289 curr_gw = batadv_gw_get_selected_gw_node(bat_priv); 290 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
@@ -402,8 +403,8 @@ void batadv_gw_check_election(struct batadv_priv *bat_priv,
402 /* if the routing class is greater than 3 the value tells us how much 403 /* if the routing class is greater than 3 the value tells us how much
403 * greater the TQ value of the new gateway must be 404 * greater the TQ value of the new gateway must be
404 */ 405 */
405 if ((atomic_read(&bat_priv->gw_sel_class) > 3) && 406 if ((atomic_read(&bat_priv->gw.sel_class) > 3) &&
406 (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class))) 407 (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw.sel_class)))
407 goto out; 408 goto out;
408 409
409 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 410 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -638,8 +639,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
638 goto out; 639 goto out;
639 640
640 seq_printf(seq, 641 seq_printf(seq,
641 " %-12s (%s/%i) %17s [%10s]: advertised uplink bandwidth ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", 642 " Gateway (#/255) Nexthop [outgoingIF]: advertised uplink bandwidth ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
642 "Gateway", "#", BATADV_TQ_MAX_VALUE, "Nexthop", "outgoingIF",
643 BATADV_SOURCE_VERSION, primary_if->net_dev->name, 643 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
644 primary_if->net_dev->dev_addr, net_dev->name); 644 primary_if->net_dev->dev_addr, net_dev->name);
645 645
@@ -821,7 +821,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
821 if (!gw_node) 821 if (!gw_node)
822 goto out; 822 goto out;
823 823
824 switch (atomic_read(&bat_priv->gw_mode)) { 824 switch (atomic_read(&bat_priv->gw.mode)) {
825 case BATADV_GW_MODE_SERVER: 825 case BATADV_GW_MODE_SERVER:
826 /* If we are a GW then we are our best GW. We can artificially 826 /* If we are a GW then we are our best GW. We can artificially
827 * set the tq towards ourself as the maximum value 827 * set the tq towards ourself as the maximum value
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 4423047889e1..d7bc6a87bcc9 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -19,8 +19,8 @@
19#include "main.h" 19#include "main.h"
20 20
21#include <linux/atomic.h> 21#include <linux/atomic.h>
22#include <linux/errno.h>
23#include <linux/byteorder/generic.h> 22#include <linux/byteorder/generic.h>
23#include <linux/errno.h>
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/math64.h> 25#include <linux/math64.h>
26#include <linux/netdevice.h> 26#include <linux/netdevice.h>
@@ -28,7 +28,9 @@
28#include <linux/string.h> 28#include <linux/string.h>
29 29
30#include "gateway_client.h" 30#include "gateway_client.h"
31#include "log.h"
31#include "packet.h" 32#include "packet.h"
33#include "tvlv.h"
32 34
33/** 35/**
34 * batadv_parse_throughput - parse supplied string buffer to extract throughput 36 * batadv_parse_throughput - parse supplied string buffer to extract throughput
@@ -144,7 +146,7 @@ void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv)
144 u32 down, up; 146 u32 down, up;
145 char gw_mode; 147 char gw_mode;
146 148
147 gw_mode = atomic_read(&bat_priv->gw_mode); 149 gw_mode = atomic_read(&bat_priv->gw.mode);
148 150
149 switch (gw_mode) { 151 switch (gw_mode) {
150 case BATADV_GW_MODE_OFF: 152 case BATADV_GW_MODE_OFF:
@@ -241,8 +243,8 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
241 243
242 /* restart gateway selection if fast or late switching was enabled */ 244 /* restart gateway selection if fast or late switching was enabled */
243 if ((gateway.bandwidth_down != 0) && 245 if ((gateway.bandwidth_down != 0) &&
244 (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_CLIENT) && 246 (atomic_read(&bat_priv->gw.mode) == BATADV_GW_MODE_CLIENT) &&
245 (atomic_read(&bat_priv->gw_sel_class) > 2)) 247 (atomic_read(&bat_priv->gw.sel_class) > 2))
246 batadv_gw_check_election(bat_priv, orig); 248 batadv_gw_check_election(bat_priv, orig);
247} 249}
248 250
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 8c2f39962fa5..1f9080840566 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -23,9 +23,9 @@
23#include <linux/byteorder/generic.h> 23#include <linux/byteorder/generic.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/fs.h> 25#include <linux/fs.h>
26#include <linux/if.h>
26#include <linux/if_arp.h> 27#include <linux/if_arp.h>
27#include <linux/if_ether.h> 28#include <linux/if_ether.h>
28#include <linux/if.h>
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/kref.h> 30#include <linux/kref.h>
31#include <linux/list.h> 31#include <linux/list.h>
@@ -37,10 +37,12 @@
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/workqueue.h> 38#include <linux/workqueue.h>
39 39
40#include "bat_v.h"
40#include "bridge_loop_avoidance.h" 41#include "bridge_loop_avoidance.h"
41#include "debugfs.h" 42#include "debugfs.h"
42#include "distributed-arp-table.h" 43#include "distributed-arp-table.h"
43#include "gateway_client.h" 44#include "gateway_client.h"
45#include "log.h"
44#include "originator.h" 46#include "originator.h"
45#include "packet.h" 47#include "packet.h"
46#include "send.h" 48#include "send.h"
@@ -245,7 +247,7 @@ static void batadv_primary_if_select(struct batadv_priv *bat_priv,
245 if (!new_hard_iface) 247 if (!new_hard_iface)
246 goto out; 248 goto out;
247 249
248 bat_priv->bat_algo_ops->bat_primary_iface_set(new_hard_iface); 250 bat_priv->algo_ops->iface.primary_set(new_hard_iface);
249 batadv_primary_if_update_addr(bat_priv, curr_hard_iface); 251 batadv_primary_if_update_addr(bat_priv, curr_hard_iface);
250 252
251out: 253out:
@@ -392,7 +394,7 @@ batadv_hardif_activate_interface(struct batadv_hard_iface *hard_iface)
392 394
393 bat_priv = netdev_priv(hard_iface->soft_iface); 395 bat_priv = netdev_priv(hard_iface->soft_iface);
394 396
395 bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface); 397 bat_priv->algo_ops->iface.update_mac(hard_iface);
396 hard_iface->if_status = BATADV_IF_TO_BE_ACTIVATED; 398 hard_iface->if_status = BATADV_IF_TO_BE_ACTIVATED;
397 399
398 /* the first active interface becomes our primary interface or 400 /* the first active interface becomes our primary interface or
@@ -407,8 +409,8 @@ batadv_hardif_activate_interface(struct batadv_hard_iface *hard_iface)
407 409
408 batadv_update_min_mtu(hard_iface->soft_iface); 410 batadv_update_min_mtu(hard_iface->soft_iface);
409 411
410 if (bat_priv->bat_algo_ops->bat_iface_activate) 412 if (bat_priv->algo_ops->iface.activate)
411 bat_priv->bat_algo_ops->bat_iface_activate(hard_iface); 413 bat_priv->algo_ops->iface.activate(hard_iface);
412 414
413out: 415out:
414 if (primary_if) 416 if (primary_if)
@@ -506,7 +508,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
506 if (ret) 508 if (ret)
507 goto err_dev; 509 goto err_dev;
508 510
509 ret = bat_priv->bat_algo_ops->bat_iface_enable(hard_iface); 511 ret = bat_priv->algo_ops->iface.enable(hard_iface);
510 if (ret < 0) 512 if (ret < 0)
511 goto err_upper; 513 goto err_upper;
512 514
@@ -515,7 +517,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
515 hard_iface->if_status = BATADV_IF_INACTIVE; 517 hard_iface->if_status = BATADV_IF_INACTIVE;
516 ret = batadv_orig_hash_add_if(hard_iface, bat_priv->num_ifaces); 518 ret = batadv_orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
517 if (ret < 0) { 519 if (ret < 0) {
518 bat_priv->bat_algo_ops->bat_iface_disable(hard_iface); 520 bat_priv->algo_ops->iface.disable(hard_iface);
519 bat_priv->num_ifaces--; 521 bat_priv->num_ifaces--;
520 hard_iface->if_status = BATADV_IF_NOT_IN_USE; 522 hard_iface->if_status = BATADV_IF_NOT_IN_USE;
521 goto err_upper; 523 goto err_upper;
@@ -553,9 +555,6 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
553 555
554 batadv_hardif_recalc_extra_skbroom(soft_iface); 556 batadv_hardif_recalc_extra_skbroom(soft_iface);
555 557
556 /* begin scheduling originator messages on that interface */
557 batadv_schedule_bat_ogm(hard_iface);
558
559out: 558out:
560 return 0; 559 return 0;
561 560
@@ -599,7 +598,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
599 batadv_hardif_put(new_if); 598 batadv_hardif_put(new_if);
600 } 599 }
601 600
602 bat_priv->bat_algo_ops->bat_iface_disable(hard_iface); 601 bat_priv->algo_ops->iface.disable(hard_iface);
603 hard_iface->if_status = BATADV_IF_NOT_IN_USE; 602 hard_iface->if_status = BATADV_IF_NOT_IN_USE;
604 603
605 /* delete all references to this hard_iface */ 604 /* delete all references to this hard_iface */
@@ -686,6 +685,8 @@ batadv_hardif_add_interface(struct net_device *net_dev)
686 if (batadv_is_wifi_netdev(net_dev)) 685 if (batadv_is_wifi_netdev(net_dev))
687 hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS; 686 hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS;
688 687
688 batadv_v_hardif_init(hard_iface);
689
689 /* extra reference for return */ 690 /* extra reference for return */
690 kref_init(&hard_iface->refcount); 691 kref_init(&hard_iface->refcount);
691 kref_get(&hard_iface->refcount); 692 kref_get(&hard_iface->refcount);
@@ -782,7 +783,7 @@ static int batadv_hard_if_event(struct notifier_block *this,
782 batadv_check_known_mac_addr(hard_iface->net_dev); 783 batadv_check_known_mac_addr(hard_iface->net_dev);
783 784
784 bat_priv = netdev_priv(hard_iface->soft_iface); 785 bat_priv = netdev_priv(hard_iface->soft_iface);
785 bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface); 786 bat_priv->algo_ops->iface.update_mac(hard_iface);
786 787
787 primary_if = batadv_primary_if_get_selected(bat_priv); 788 primary_if = batadv_primary_if_get_selected(bat_priv);
788 if (!primary_if) 789 if (!primary_if)
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 777aea10cd8f..378cc1119d66 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -45,6 +45,7 @@
45#include <linux/wait.h> 45#include <linux/wait.h>
46 46
47#include "hard-interface.h" 47#include "hard-interface.h"
48#include "log.h"
48#include "originator.h" 49#include "originator.h"
49#include "packet.h" 50#include "packet.h"
50#include "send.h" 51#include "send.h"
diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c
new file mode 100644
index 000000000000..56dc532f7a2c
--- /dev/null
+++ b/net/batman-adv/log.c
@@ -0,0 +1,231 @@
1/* Copyright (C) 2010-2016 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "log.h"
19#include "main.h"
20
21#include <linux/compiler.h>
22#include <linux/debugfs.h>
23#include <linux/errno.h>
24#include <linux/export.h>
25#include <linux/fcntl.h>
26#include <linux/fs.h>
27#include <linux/jiffies.h>
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/poll.h>
31#include <linux/sched.h> /* for linux/wait.h */
32#include <linux/slab.h>
33#include <linux/spinlock.h>
34#include <linux/stat.h>
35#include <linux/stddef.h>
36#include <linux/types.h>
37#include <linux/uaccess.h>
38#include <linux/wait.h>
39#include <stdarg.h>
40
41#define BATADV_LOG_BUFF_MASK (batadv_log_buff_len - 1)
42
43static const int batadv_log_buff_len = BATADV_LOG_BUF_LEN;
44
45static char *batadv_log_char_addr(struct batadv_priv_debug_log *debug_log,
46 size_t idx)
47{
48 return &debug_log->log_buff[idx & BATADV_LOG_BUFF_MASK];
49}
50
51static void batadv_emit_log_char(struct batadv_priv_debug_log *debug_log,
52 char c)
53{
54 char *char_addr;
55
56 char_addr = batadv_log_char_addr(debug_log, debug_log->log_end);
57 *char_addr = c;
58 debug_log->log_end++;
59
60 if (debug_log->log_end - debug_log->log_start > batadv_log_buff_len)
61 debug_log->log_start = debug_log->log_end - batadv_log_buff_len;
62}
63
64__printf(2, 3)
65static int batadv_fdebug_log(struct batadv_priv_debug_log *debug_log,
66 const char *fmt, ...)
67{
68 va_list args;
69 static char debug_log_buf[256];
70 char *p;
71
72 if (!debug_log)
73 return 0;
74
75 spin_lock_bh(&debug_log->lock);
76 va_start(args, fmt);
77 vscnprintf(debug_log_buf, sizeof(debug_log_buf), fmt, args);
78 va_end(args);
79
80 for (p = debug_log_buf; *p != 0; p++)
81 batadv_emit_log_char(debug_log, *p);
82
83 spin_unlock_bh(&debug_log->lock);
84
85 wake_up(&debug_log->queue_wait);
86
87 return 0;
88}
89
90int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
91{
92 va_list args;
93 char tmp_log_buf[256];
94
95 va_start(args, fmt);
96 vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args);
97 batadv_fdebug_log(bat_priv->debug_log, "[%10u] %s",
98 jiffies_to_msecs(jiffies), tmp_log_buf);
99 va_end(args);
100
101 return 0;
102}
103
104static int batadv_log_open(struct inode *inode, struct file *file)
105{
106 if (!try_module_get(THIS_MODULE))
107 return -EBUSY;
108
109 nonseekable_open(inode, file);
110 file->private_data = inode->i_private;
111 return 0;
112}
113
114static int batadv_log_release(struct inode *inode, struct file *file)
115{
116 module_put(THIS_MODULE);
117 return 0;
118}
119
120static bool batadv_log_empty(struct batadv_priv_debug_log *debug_log)
121{
122 return !(debug_log->log_start - debug_log->log_end);
123}
124
125static ssize_t batadv_log_read(struct file *file, char __user *buf,
126 size_t count, loff_t *ppos)
127{
128 struct batadv_priv *bat_priv = file->private_data;
129 struct batadv_priv_debug_log *debug_log = bat_priv->debug_log;
130 int error, i = 0;
131 char *char_addr;
132 char c;
133
134 if ((file->f_flags & O_NONBLOCK) && batadv_log_empty(debug_log))
135 return -EAGAIN;
136
137 if (!buf)
138 return -EINVAL;
139
140 if (count == 0)
141 return 0;
142
143 if (!access_ok(VERIFY_WRITE, buf, count))
144 return -EFAULT;
145
146 error = wait_event_interruptible(debug_log->queue_wait,
147 (!batadv_log_empty(debug_log)));
148
149 if (error)
150 return error;
151
152 spin_lock_bh(&debug_log->lock);
153
154 while ((!error) && (i < count) &&
155 (debug_log->log_start != debug_log->log_end)) {
156 char_addr = batadv_log_char_addr(debug_log,
157 debug_log->log_start);
158 c = *char_addr;
159
160 debug_log->log_start++;
161
162 spin_unlock_bh(&debug_log->lock);
163
164 error = __put_user(c, buf);
165
166 spin_lock_bh(&debug_log->lock);
167
168 buf++;
169 i++;
170 }
171
172 spin_unlock_bh(&debug_log->lock);
173
174 if (!error)
175 return i;
176
177 return error;
178}
179
180static unsigned int batadv_log_poll(struct file *file, poll_table *wait)
181{
182 struct batadv_priv *bat_priv = file->private_data;
183 struct batadv_priv_debug_log *debug_log = bat_priv->debug_log;
184
185 poll_wait(file, &debug_log->queue_wait, wait);
186
187 if (!batadv_log_empty(debug_log))
188 return POLLIN | POLLRDNORM;
189
190 return 0;
191}
192
193static const struct file_operations batadv_log_fops = {
194 .open = batadv_log_open,
195 .release = batadv_log_release,
196 .read = batadv_log_read,
197 .poll = batadv_log_poll,
198 .llseek = no_llseek,
199};
200
201int batadv_debug_log_setup(struct batadv_priv *bat_priv)
202{
203 struct dentry *d;
204
205 if (!bat_priv->debug_dir)
206 goto err;
207
208 bat_priv->debug_log = kzalloc(sizeof(*bat_priv->debug_log), GFP_ATOMIC);
209 if (!bat_priv->debug_log)
210 goto err;
211
212 spin_lock_init(&bat_priv->debug_log->lock);
213 init_waitqueue_head(&bat_priv->debug_log->queue_wait);
214
215 d = debugfs_create_file("log", S_IFREG | S_IRUSR,
216 bat_priv->debug_dir, bat_priv,
217 &batadv_log_fops);
218 if (!d)
219 goto err;
220
221 return 0;
222
223err:
224 return -ENOMEM;
225}
226
227void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
228{
229 kfree(bat_priv->debug_log);
230 bat_priv->debug_log = NULL;
231}
diff --git a/net/batman-adv/log.h b/net/batman-adv/log.h
new file mode 100644
index 000000000000..e0e1a88c3e58
--- /dev/null
+++ b/net/batman-adv/log.h
@@ -0,0 +1,111 @@
1/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef _NET_BATMAN_ADV_LOG_H_
19#define _NET_BATMAN_ADV_LOG_H_
20
21#include "main.h"
22
23#include <linux/bitops.h>
24#include <linux/compiler.h>
25#include <linux/printk.h>
26
27#ifdef CONFIG_BATMAN_ADV_DEBUG
28
29int batadv_debug_log_setup(struct batadv_priv *bat_priv);
30void batadv_debug_log_cleanup(struct batadv_priv *bat_priv);
31
32#else
33
34static inline int batadv_debug_log_setup(struct batadv_priv *bat_priv)
35{
36 return 0;
37}
38
39static inline void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
40{
41}
42
43#endif
44
45/**
46 * enum batadv_dbg_level - available log levels
47 * @BATADV_DBG_BATMAN: OGM and TQ computations related messages
48 * @BATADV_DBG_ROUTES: route added / changed / deleted
49 * @BATADV_DBG_TT: translation table messages
50 * @BATADV_DBG_BLA: bridge loop avoidance messages
51 * @BATADV_DBG_DAT: ARP snooping and DAT related messages
52 * @BATADV_DBG_NC: network coding related messages
53 * @BATADV_DBG_MCAST: multicast related messages
54 * @BATADV_DBG_TP_METER: throughput meter messages
55 * @BATADV_DBG_ALL: the union of all the above log levels
56 */
57enum batadv_dbg_level {
58 BATADV_DBG_BATMAN = BIT(0),
59 BATADV_DBG_ROUTES = BIT(1),
60 BATADV_DBG_TT = BIT(2),
61 BATADV_DBG_BLA = BIT(3),
62 BATADV_DBG_DAT = BIT(4),
63 BATADV_DBG_NC = BIT(5),
64 BATADV_DBG_MCAST = BIT(6),
65 BATADV_DBG_TP_METER = BIT(7),
66 BATADV_DBG_ALL = 127,
67};
68
69#ifdef CONFIG_BATMAN_ADV_DEBUG
70int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
71__printf(2, 3);
72
73/* possibly ratelimited debug output */
74#define _batadv_dbg(type, bat_priv, ratelimited, fmt, arg...) \
75 do { \
76 if (atomic_read(&bat_priv->log_level) & type && \
77 (!ratelimited || net_ratelimit())) \
78 batadv_debug_log(bat_priv, fmt, ## arg);\
79 } \
80 while (0)
81#else /* !CONFIG_BATMAN_ADV_DEBUG */
82__printf(4, 5)
83static inline void _batadv_dbg(int type __always_unused,
84 struct batadv_priv *bat_priv __always_unused,
85 int ratelimited __always_unused,
86 const char *fmt __always_unused, ...)
87{
88}
89#endif
90
91#define batadv_dbg(type, bat_priv, arg...) \
92 _batadv_dbg(type, bat_priv, 0, ## arg)
93#define batadv_dbg_ratelimited(type, bat_priv, arg...) \
94 _batadv_dbg(type, bat_priv, 1, ## arg)
95
96#define batadv_info(net_dev, fmt, arg...) \
97 do { \
98 struct net_device *_netdev = (net_dev); \
99 struct batadv_priv *_batpriv = netdev_priv(_netdev); \
100 batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg); \
101 pr_info("%s: " fmt, _netdev->name, ## arg); \
102 } while (0)
103#define batadv_err(net_dev, fmt, arg...) \
104 do { \
105 struct net_device *_netdev = (net_dev); \
106 struct batadv_priv *_batpriv = netdev_priv(_netdev); \
107 batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg); \
108 pr_err("%s: " fmt, _netdev->name, ## arg); \
109 } while (0)
110
111#endif /* _NET_BATMAN_ADV_LOG_H_ */
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 5f2974bd1227..fe4c5e29f96b 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -31,16 +31,13 @@
31#include <linux/kernel.h> 31#include <linux/kernel.h>
32#include <linux/kref.h> 32#include <linux/kref.h>
33#include <linux/list.h> 33#include <linux/list.h>
34#include <linux/lockdep.h>
35#include <linux/module.h> 34#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/netdevice.h> 35#include <linux/netdevice.h>
38#include <linux/pkt_sched.h> 36#include <linux/printk.h>
39#include <linux/rculist.h> 37#include <linux/rculist.h>
40#include <linux/rcupdate.h> 38#include <linux/rcupdate.h>
41#include <linux/seq_file.h> 39#include <linux/seq_file.h>
42#include <linux/skbuff.h> 40#include <linux/skbuff.h>
43#include <linux/slab.h>
44#include <linux/spinlock.h> 41#include <linux/spinlock.h>
45#include <linux/stddef.h> 42#include <linux/stddef.h>
46#include <linux/string.h> 43#include <linux/string.h>
@@ -49,6 +46,8 @@
49#include <net/rtnetlink.h> 46#include <net/rtnetlink.h>
50 47
51#include "bat_algo.h" 48#include "bat_algo.h"
49#include "bat_iv_ogm.h"
50#include "bat_v.h"
52#include "bridge_loop_avoidance.h" 51#include "bridge_loop_avoidance.h"
53#include "debugfs.h" 52#include "debugfs.h"
54#include "distributed-arp-table.h" 53#include "distributed-arp-table.h"
@@ -56,13 +55,16 @@
56#include "gateway_common.h" 55#include "gateway_common.h"
57#include "hard-interface.h" 56#include "hard-interface.h"
58#include "icmp_socket.h" 57#include "icmp_socket.h"
58#include "log.h"
59#include "multicast.h" 59#include "multicast.h"
60#include "netlink.h"
60#include "network-coding.h" 61#include "network-coding.h"
61#include "originator.h" 62#include "originator.h"
62#include "packet.h" 63#include "packet.h"
63#include "routing.h" 64#include "routing.h"
64#include "send.h" 65#include "send.h"
65#include "soft-interface.h" 66#include "soft-interface.h"
67#include "tp_meter.h"
66#include "translation-table.h" 68#include "translation-table.h"
67 69
68/* List manipulations on hardif_list have to be rtnl_lock()'ed, 70/* List manipulations on hardif_list have to be rtnl_lock()'ed,
@@ -71,8 +73,6 @@
71struct list_head batadv_hardif_list; 73struct list_head batadv_hardif_list;
72static int (*batadv_rx_handler[256])(struct sk_buff *, 74static int (*batadv_rx_handler[256])(struct sk_buff *,
73 struct batadv_hard_iface *); 75 struct batadv_hard_iface *);
74char batadv_routing_algo[20] = "BATMAN_IV";
75static struct hlist_head batadv_algo_list;
76 76
77unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 77unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
78 78
@@ -83,13 +83,14 @@ static void batadv_recv_handler_init(void);
83static int __init batadv_init(void) 83static int __init batadv_init(void)
84{ 84{
85 INIT_LIST_HEAD(&batadv_hardif_list); 85 INIT_LIST_HEAD(&batadv_hardif_list);
86 INIT_HLIST_HEAD(&batadv_algo_list); 86 batadv_algo_init();
87 87
88 batadv_recv_handler_init(); 88 batadv_recv_handler_init();
89 89
90 batadv_v_init(); 90 batadv_v_init();
91 batadv_iv_init(); 91 batadv_iv_init();
92 batadv_nc_init(); 92 batadv_nc_init();
93 batadv_tp_meter_init();
93 94
94 batadv_event_workqueue = create_singlethread_workqueue("bat_events"); 95 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
95 96
@@ -101,6 +102,7 @@ static int __init batadv_init(void)
101 102
102 register_netdevice_notifier(&batadv_hard_if_notifier); 103 register_netdevice_notifier(&batadv_hard_if_notifier);
103 rtnl_link_register(&batadv_link_ops); 104 rtnl_link_register(&batadv_link_ops);
105 batadv_netlink_register();
104 106
105 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n", 107 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
106 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION); 108 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
@@ -111,6 +113,7 @@ static int __init batadv_init(void)
111static void __exit batadv_exit(void) 113static void __exit batadv_exit(void)
112{ 114{
113 batadv_debugfs_destroy(); 115 batadv_debugfs_destroy();
116 batadv_netlink_unregister();
114 rtnl_link_unregister(&batadv_link_ops); 117 rtnl_link_unregister(&batadv_link_ops);
115 unregister_netdevice_notifier(&batadv_hard_if_notifier); 118 unregister_netdevice_notifier(&batadv_hard_if_notifier);
116 batadv_hardif_remove_interfaces(); 119 batadv_hardif_remove_interfaces();
@@ -141,6 +144,7 @@ int batadv_mesh_init(struct net_device *soft_iface)
141 spin_lock_init(&bat_priv->tvlv.container_list_lock); 144 spin_lock_init(&bat_priv->tvlv.container_list_lock);
142 spin_lock_init(&bat_priv->tvlv.handler_list_lock); 145 spin_lock_init(&bat_priv->tvlv.handler_list_lock);
143 spin_lock_init(&bat_priv->softif_vlan_list_lock); 146 spin_lock_init(&bat_priv->softif_vlan_list_lock);
147 spin_lock_init(&bat_priv->tp_list_lock);
144 148
145 INIT_HLIST_HEAD(&bat_priv->forw_bat_list); 149 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
146 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list); 150 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
@@ -159,6 +163,7 @@ int batadv_mesh_init(struct net_device *soft_iface)
159 INIT_HLIST_HEAD(&bat_priv->tvlv.container_list); 163 INIT_HLIST_HEAD(&bat_priv->tvlv.container_list);
160 INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list); 164 INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list);
161 INIT_HLIST_HEAD(&bat_priv->softif_vlan_list); 165 INIT_HLIST_HEAD(&bat_priv->softif_vlan_list);
166 INIT_HLIST_HEAD(&bat_priv->tp_list);
162 167
163 ret = batadv_v_mesh_init(bat_priv); 168 ret = batadv_v_mesh_init(bat_priv);
164 if (ret < 0) 169 if (ret < 0)
@@ -538,78 +543,6 @@ void batadv_recv_handler_unregister(u8 packet_type)
538 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet; 543 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
539} 544}
540 545
541static struct batadv_algo_ops *batadv_algo_get(char *name)
542{
543 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
544
545 hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) {
546 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
547 continue;
548
549 bat_algo_ops = bat_algo_ops_tmp;
550 break;
551 }
552
553 return bat_algo_ops;
554}
555
556int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
557{
558 struct batadv_algo_ops *bat_algo_ops_tmp;
559
560 bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
561 if (bat_algo_ops_tmp) {
562 pr_info("Trying to register already registered routing algorithm: %s\n",
563 bat_algo_ops->name);
564 return -EEXIST;
565 }
566
567 /* all algorithms must implement all ops (for now) */
568 if (!bat_algo_ops->bat_iface_enable ||
569 !bat_algo_ops->bat_iface_disable ||
570 !bat_algo_ops->bat_iface_update_mac ||
571 !bat_algo_ops->bat_primary_iface_set ||
572 !bat_algo_ops->bat_ogm_schedule ||
573 !bat_algo_ops->bat_ogm_emit ||
574 !bat_algo_ops->bat_neigh_cmp ||
575 !bat_algo_ops->bat_neigh_is_similar_or_better) {
576 pr_info("Routing algo '%s' does not implement required ops\n",
577 bat_algo_ops->name);
578 return -EINVAL;
579 }
580
581 INIT_HLIST_NODE(&bat_algo_ops->list);
582 hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
583
584 return 0;
585}
586
587int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
588{
589 struct batadv_algo_ops *bat_algo_ops;
590
591 bat_algo_ops = batadv_algo_get(name);
592 if (!bat_algo_ops)
593 return -EINVAL;
594
595 bat_priv->bat_algo_ops = bat_algo_ops;
596
597 return 0;
598}
599
600int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
601{
602 struct batadv_algo_ops *bat_algo_ops;
603
604 seq_puts(seq, "Available routing algorithms:\n");
605
606 hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
607 seq_printf(seq, " * %s\n", bat_algo_ops->name);
608 }
609
610 return 0;
611}
612
613/** 546/**
614 * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in 547 * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
615 * the header 548 * the header
@@ -644,594 +577,6 @@ __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
644} 577}
645 578
646/** 579/**
647 * batadv_tvlv_handler_release - release tvlv handler from lists and queue for
648 * free after rcu grace period
649 * @ref: kref pointer of the tvlv
650 */
651static void batadv_tvlv_handler_release(struct kref *ref)
652{
653 struct batadv_tvlv_handler *tvlv_handler;
654
655 tvlv_handler = container_of(ref, struct batadv_tvlv_handler, refcount);
656 kfree_rcu(tvlv_handler, rcu);
657}
658
659/**
660 * batadv_tvlv_handler_put - decrement the tvlv container refcounter and
661 * possibly release it
662 * @tvlv_handler: the tvlv handler to free
663 */
664static void batadv_tvlv_handler_put(struct batadv_tvlv_handler *tvlv_handler)
665{
666 kref_put(&tvlv_handler->refcount, batadv_tvlv_handler_release);
667}
668
669/**
670 * batadv_tvlv_handler_get - retrieve tvlv handler from the tvlv handler list
671 * based on the provided type and version (both need to match)
672 * @bat_priv: the bat priv with all the soft interface information
673 * @type: tvlv handler type to look for
674 * @version: tvlv handler version to look for
675 *
676 * Return: tvlv handler if found or NULL otherwise.
677 */
678static struct batadv_tvlv_handler *
679batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version)
680{
681 struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
682
683 rcu_read_lock();
684 hlist_for_each_entry_rcu(tvlv_handler_tmp,
685 &bat_priv->tvlv.handler_list, list) {
686 if (tvlv_handler_tmp->type != type)
687 continue;
688
689 if (tvlv_handler_tmp->version != version)
690 continue;
691
692 if (!kref_get_unless_zero(&tvlv_handler_tmp->refcount))
693 continue;
694
695 tvlv_handler = tvlv_handler_tmp;
696 break;
697 }
698 rcu_read_unlock();
699
700 return tvlv_handler;
701}
702
703/**
704 * batadv_tvlv_container_release - release tvlv from lists and free
705 * @ref: kref pointer of the tvlv
706 */
707static void batadv_tvlv_container_release(struct kref *ref)
708{
709 struct batadv_tvlv_container *tvlv;
710
711 tvlv = container_of(ref, struct batadv_tvlv_container, refcount);
712 kfree(tvlv);
713}
714
715/**
716 * batadv_tvlv_container_put - decrement the tvlv container refcounter and
717 * possibly release it
718 * @tvlv: the tvlv container to free
719 */
720static void batadv_tvlv_container_put(struct batadv_tvlv_container *tvlv)
721{
722 kref_put(&tvlv->refcount, batadv_tvlv_container_release);
723}
724
725/**
726 * batadv_tvlv_container_get - retrieve tvlv container from the tvlv container
727 * list based on the provided type and version (both need to match)
728 * @bat_priv: the bat priv with all the soft interface information
729 * @type: tvlv container type to look for
730 * @version: tvlv container version to look for
731 *
732 * Has to be called with the appropriate locks being acquired
733 * (tvlv.container_list_lock).
734 *
735 * Return: tvlv container if found or NULL otherwise.
736 */
737static struct batadv_tvlv_container *
738batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version)
739{
740 struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
741
742 lockdep_assert_held(&bat_priv->tvlv.container_list_lock);
743
744 hlist_for_each_entry(tvlv_tmp, &bat_priv->tvlv.container_list, list) {
745 if (tvlv_tmp->tvlv_hdr.type != type)
746 continue;
747
748 if (tvlv_tmp->tvlv_hdr.version != version)
749 continue;
750
751 kref_get(&tvlv_tmp->refcount);
752 tvlv = tvlv_tmp;
753 break;
754 }
755
756 return tvlv;
757}
758
759/**
760 * batadv_tvlv_container_list_size - calculate the size of the tvlv container
761 * list entries
762 * @bat_priv: the bat priv with all the soft interface information
763 *
764 * Has to be called with the appropriate locks being acquired
765 * (tvlv.container_list_lock).
766 *
767 * Return: size of all currently registered tvlv containers in bytes.
768 */
769static u16 batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
770{
771 struct batadv_tvlv_container *tvlv;
772 u16 tvlv_len = 0;
773
774 lockdep_assert_held(&bat_priv->tvlv.container_list_lock);
775
776 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
777 tvlv_len += sizeof(struct batadv_tvlv_hdr);
778 tvlv_len += ntohs(tvlv->tvlv_hdr.len);
779 }
780
781 return tvlv_len;
782}
783
784/**
785 * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
786 * list
787 * @bat_priv: the bat priv with all the soft interface information
788 * @tvlv: the to be removed tvlv container
789 *
790 * Has to be called with the appropriate locks being acquired
791 * (tvlv.container_list_lock).
792 */
793static void batadv_tvlv_container_remove(struct batadv_priv *bat_priv,
794 struct batadv_tvlv_container *tvlv)
795{
796 lockdep_assert_held(&bat_priv->tvlv.container_list_lock);
797
798 if (!tvlv)
799 return;
800
801 hlist_del(&tvlv->list);
802
803 /* first call to decrement the counter, second call to free */
804 batadv_tvlv_container_put(tvlv);
805 batadv_tvlv_container_put(tvlv);
806}
807
808/**
809 * batadv_tvlv_container_unregister - unregister tvlv container based on the
810 * provided type and version (both need to match)
811 * @bat_priv: the bat priv with all the soft interface information
812 * @type: tvlv container type to unregister
813 * @version: tvlv container type to unregister
814 */
815void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
816 u8 type, u8 version)
817{
818 struct batadv_tvlv_container *tvlv;
819
820 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
821 tvlv = batadv_tvlv_container_get(bat_priv, type, version);
822 batadv_tvlv_container_remove(bat_priv, tvlv);
823 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
824}
825
826/**
827 * batadv_tvlv_container_register - register tvlv type, version and content
828 * to be propagated with each (primary interface) OGM
829 * @bat_priv: the bat priv with all the soft interface information
830 * @type: tvlv container type
831 * @version: tvlv container version
832 * @tvlv_value: tvlv container content
833 * @tvlv_value_len: tvlv container content length
834 *
835 * If a container of the same type and version was already registered the new
836 * content is going to replace the old one.
837 */
838void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
839 u8 type, u8 version,
840 void *tvlv_value, u16 tvlv_value_len)
841{
842 struct batadv_tvlv_container *tvlv_old, *tvlv_new;
843
844 if (!tvlv_value)
845 tvlv_value_len = 0;
846
847 tvlv_new = kzalloc(sizeof(*tvlv_new) + tvlv_value_len, GFP_ATOMIC);
848 if (!tvlv_new)
849 return;
850
851 tvlv_new->tvlv_hdr.version = version;
852 tvlv_new->tvlv_hdr.type = type;
853 tvlv_new->tvlv_hdr.len = htons(tvlv_value_len);
854
855 memcpy(tvlv_new + 1, tvlv_value, ntohs(tvlv_new->tvlv_hdr.len));
856 INIT_HLIST_NODE(&tvlv_new->list);
857 kref_init(&tvlv_new->refcount);
858
859 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
860 tvlv_old = batadv_tvlv_container_get(bat_priv, type, version);
861 batadv_tvlv_container_remove(bat_priv, tvlv_old);
862 hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list);
863 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
864}
865
866/**
867 * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accommodate
868 * requested packet size
869 * @packet_buff: packet buffer
870 * @packet_buff_len: packet buffer size
871 * @min_packet_len: requested packet minimum size
872 * @additional_packet_len: requested additional packet size on top of minimum
873 * size
874 *
875 * Return: true of the packet buffer could be changed to the requested size,
876 * false otherwise.
877 */
878static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
879 int *packet_buff_len,
880 int min_packet_len,
881 int additional_packet_len)
882{
883 unsigned char *new_buff;
884
885 new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC);
886
887 /* keep old buffer if kmalloc should fail */
888 if (!new_buff)
889 return false;
890
891 memcpy(new_buff, *packet_buff, min_packet_len);
892 kfree(*packet_buff);
893 *packet_buff = new_buff;
894 *packet_buff_len = min_packet_len + additional_packet_len;
895
896 return true;
897}
898
899/**
900 * batadv_tvlv_container_ogm_append - append tvlv container content to given
901 * OGM packet buffer
902 * @bat_priv: the bat priv with all the soft interface information
903 * @packet_buff: ogm packet buffer
904 * @packet_buff_len: ogm packet buffer size including ogm header and tvlv
905 * content
906 * @packet_min_len: ogm header size to be preserved for the OGM itself
907 *
908 * The ogm packet might be enlarged or shrunk depending on the current size
909 * and the size of the to-be-appended tvlv containers.
910 *
911 * Return: size of all appended tvlv containers in bytes.
912 */
913u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
914 unsigned char **packet_buff,
915 int *packet_buff_len, int packet_min_len)
916{
917 struct batadv_tvlv_container *tvlv;
918 struct batadv_tvlv_hdr *tvlv_hdr;
919 u16 tvlv_value_len;
920 void *tvlv_value;
921 bool ret;
922
923 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
924 tvlv_value_len = batadv_tvlv_container_list_size(bat_priv);
925
926 ret = batadv_tvlv_realloc_packet_buff(packet_buff, packet_buff_len,
927 packet_min_len, tvlv_value_len);
928
929 if (!ret)
930 goto end;
931
932 if (!tvlv_value_len)
933 goto end;
934
935 tvlv_value = (*packet_buff) + packet_min_len;
936
937 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
938 tvlv_hdr = tvlv_value;
939 tvlv_hdr->type = tvlv->tvlv_hdr.type;
940 tvlv_hdr->version = tvlv->tvlv_hdr.version;
941 tvlv_hdr->len = tvlv->tvlv_hdr.len;
942 tvlv_value = tvlv_hdr + 1;
943 memcpy(tvlv_value, tvlv + 1, ntohs(tvlv->tvlv_hdr.len));
944 tvlv_value = (u8 *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
945 }
946
947end:
948 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
949 return tvlv_value_len;
950}
951
952/**
953 * batadv_tvlv_call_handler - parse the given tvlv buffer to call the
954 * appropriate handlers
955 * @bat_priv: the bat priv with all the soft interface information
956 * @tvlv_handler: tvlv callback function handling the tvlv content
957 * @ogm_source: flag indicating whether the tvlv is an ogm or a unicast packet
958 * @orig_node: orig node emitting the ogm packet
959 * @src: source mac address of the unicast packet
960 * @dst: destination mac address of the unicast packet
961 * @tvlv_value: tvlv content
962 * @tvlv_value_len: tvlv content length
963 *
964 * Return: success if handler was not found or the return value of the handler
965 * callback.
966 */
967static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
968 struct batadv_tvlv_handler *tvlv_handler,
969 bool ogm_source,
970 struct batadv_orig_node *orig_node,
971 u8 *src, u8 *dst,
972 void *tvlv_value, u16 tvlv_value_len)
973{
974 if (!tvlv_handler)
975 return NET_RX_SUCCESS;
976
977 if (ogm_source) {
978 if (!tvlv_handler->ogm_handler)
979 return NET_RX_SUCCESS;
980
981 if (!orig_node)
982 return NET_RX_SUCCESS;
983
984 tvlv_handler->ogm_handler(bat_priv, orig_node,
985 BATADV_NO_FLAGS,
986 tvlv_value, tvlv_value_len);
987 tvlv_handler->flags |= BATADV_TVLV_HANDLER_OGM_CALLED;
988 } else {
989 if (!src)
990 return NET_RX_SUCCESS;
991
992 if (!dst)
993 return NET_RX_SUCCESS;
994
995 if (!tvlv_handler->unicast_handler)
996 return NET_RX_SUCCESS;
997
998 return tvlv_handler->unicast_handler(bat_priv, src,
999 dst, tvlv_value,
1000 tvlv_value_len);
1001 }
1002
1003 return NET_RX_SUCCESS;
1004}
1005
1006/**
1007 * batadv_tvlv_containers_process - parse the given tvlv buffer to call the
1008 * appropriate handlers
1009 * @bat_priv: the bat priv with all the soft interface information
1010 * @ogm_source: flag indicating whether the tvlv is an ogm or a unicast packet
1011 * @orig_node: orig node emitting the ogm packet
1012 * @src: source mac address of the unicast packet
1013 * @dst: destination mac address of the unicast packet
1014 * @tvlv_value: tvlv content
1015 * @tvlv_value_len: tvlv content length
1016 *
1017 * Return: success when processing an OGM or the return value of all called
1018 * handler callbacks.
1019 */
1020int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
1021 bool ogm_source,
1022 struct batadv_orig_node *orig_node,
1023 u8 *src, u8 *dst,
1024 void *tvlv_value, u16 tvlv_value_len)
1025{
1026 struct batadv_tvlv_handler *tvlv_handler;
1027 struct batadv_tvlv_hdr *tvlv_hdr;
1028 u16 tvlv_value_cont_len;
1029 u8 cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
1030 int ret = NET_RX_SUCCESS;
1031
1032 while (tvlv_value_len >= sizeof(*tvlv_hdr)) {
1033 tvlv_hdr = tvlv_value;
1034 tvlv_value_cont_len = ntohs(tvlv_hdr->len);
1035 tvlv_value = tvlv_hdr + 1;
1036 tvlv_value_len -= sizeof(*tvlv_hdr);
1037
1038 if (tvlv_value_cont_len > tvlv_value_len)
1039 break;
1040
1041 tvlv_handler = batadv_tvlv_handler_get(bat_priv,
1042 tvlv_hdr->type,
1043 tvlv_hdr->version);
1044
1045 ret |= batadv_tvlv_call_handler(bat_priv, tvlv_handler,
1046 ogm_source, orig_node,
1047 src, dst, tvlv_value,
1048 tvlv_value_cont_len);
1049 if (tvlv_handler)
1050 batadv_tvlv_handler_put(tvlv_handler);
1051 tvlv_value = (u8 *)tvlv_value + tvlv_value_cont_len;
1052 tvlv_value_len -= tvlv_value_cont_len;
1053 }
1054
1055 if (!ogm_source)
1056 return ret;
1057
1058 rcu_read_lock();
1059 hlist_for_each_entry_rcu(tvlv_handler,
1060 &bat_priv->tvlv.handler_list, list) {
1061 if ((tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) &&
1062 !(tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CALLED))
1063 tvlv_handler->ogm_handler(bat_priv, orig_node,
1064 cifnotfound, NULL, 0);
1065
1066 tvlv_handler->flags &= ~BATADV_TVLV_HANDLER_OGM_CALLED;
1067 }
1068 rcu_read_unlock();
1069
1070 return NET_RX_SUCCESS;
1071}
1072
1073/**
1074 * batadv_tvlv_ogm_receive - process an incoming ogm and call the appropriate
1075 * handlers
1076 * @bat_priv: the bat priv with all the soft interface information
1077 * @batadv_ogm_packet: ogm packet containing the tvlv containers
1078 * @orig_node: orig node emitting the ogm packet
1079 */
1080void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
1081 struct batadv_ogm_packet *batadv_ogm_packet,
1082 struct batadv_orig_node *orig_node)
1083{
1084 void *tvlv_value;
1085 u16 tvlv_value_len;
1086
1087 if (!batadv_ogm_packet)
1088 return;
1089
1090 tvlv_value_len = ntohs(batadv_ogm_packet->tvlv_len);
1091 if (!tvlv_value_len)
1092 return;
1093
1094 tvlv_value = batadv_ogm_packet + 1;
1095
1096 batadv_tvlv_containers_process(bat_priv, true, orig_node, NULL, NULL,
1097 tvlv_value, tvlv_value_len);
1098}
1099
1100/**
1101 * batadv_tvlv_handler_register - register tvlv handler based on the provided
1102 * type and version (both need to match) for ogm tvlv payload and/or unicast
1103 * payload
1104 * @bat_priv: the bat priv with all the soft interface information
1105 * @optr: ogm tvlv handler callback function. This function receives the orig
1106 * node, flags and the tvlv content as argument to process.
1107 * @uptr: unicast tvlv handler callback function. This function receives the
1108 * source & destination of the unicast packet as well as the tvlv content
1109 * to process.
1110 * @type: tvlv handler type to be registered
1111 * @version: tvlv handler version to be registered
1112 * @flags: flags to enable or disable TVLV API behavior
1113 */
1114void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
1115 void (*optr)(struct batadv_priv *bat_priv,
1116 struct batadv_orig_node *orig,
1117 u8 flags,
1118 void *tvlv_value,
1119 u16 tvlv_value_len),
1120 int (*uptr)(struct batadv_priv *bat_priv,
1121 u8 *src, u8 *dst,
1122 void *tvlv_value,
1123 u16 tvlv_value_len),
1124 u8 type, u8 version, u8 flags)
1125{
1126 struct batadv_tvlv_handler *tvlv_handler;
1127
1128 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1129 if (tvlv_handler) {
1130 batadv_tvlv_handler_put(tvlv_handler);
1131 return;
1132 }
1133
1134 tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
1135 if (!tvlv_handler)
1136 return;
1137
1138 tvlv_handler->ogm_handler = optr;
1139 tvlv_handler->unicast_handler = uptr;
1140 tvlv_handler->type = type;
1141 tvlv_handler->version = version;
1142 tvlv_handler->flags = flags;
1143 kref_init(&tvlv_handler->refcount);
1144 INIT_HLIST_NODE(&tvlv_handler->list);
1145
1146 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1147 hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
1148 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1149}
1150
1151/**
1152 * batadv_tvlv_handler_unregister - unregister tvlv handler based on the
1153 * provided type and version (both need to match)
1154 * @bat_priv: the bat priv with all the soft interface information
1155 * @type: tvlv handler type to be unregistered
1156 * @version: tvlv handler version to be unregistered
1157 */
1158void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
1159 u8 type, u8 version)
1160{
1161 struct batadv_tvlv_handler *tvlv_handler;
1162
1163 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1164 if (!tvlv_handler)
1165 return;
1166
1167 batadv_tvlv_handler_put(tvlv_handler);
1168 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1169 hlist_del_rcu(&tvlv_handler->list);
1170 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1171 batadv_tvlv_handler_put(tvlv_handler);
1172}
1173
1174/**
1175 * batadv_tvlv_unicast_send - send a unicast packet with tvlv payload to the
1176 * specified host
1177 * @bat_priv: the bat priv with all the soft interface information
1178 * @src: source mac address of the unicast packet
1179 * @dst: destination mac address of the unicast packet
1180 * @type: tvlv type
1181 * @version: tvlv version
1182 * @tvlv_value: tvlv content
1183 * @tvlv_value_len: tvlv content length
1184 */
1185void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src,
1186 u8 *dst, u8 type, u8 version,
1187 void *tvlv_value, u16 tvlv_value_len)
1188{
1189 struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
1190 struct batadv_tvlv_hdr *tvlv_hdr;
1191 struct batadv_orig_node *orig_node;
1192 struct sk_buff *skb;
1193 unsigned char *tvlv_buff;
1194 unsigned int tvlv_len;
1195 ssize_t hdr_len = sizeof(*unicast_tvlv_packet);
1196
1197 orig_node = batadv_orig_hash_find(bat_priv, dst);
1198 if (!orig_node)
1199 return;
1200
1201 tvlv_len = sizeof(*tvlv_hdr) + tvlv_value_len;
1202
1203 skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + hdr_len + tvlv_len);
1204 if (!skb)
1205 goto out;
1206
1207 skb->priority = TC_PRIO_CONTROL;
1208 skb_reserve(skb, ETH_HLEN);
1209 tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
1210 unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
1211 unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV;
1212 unicast_tvlv_packet->version = BATADV_COMPAT_VERSION;
1213 unicast_tvlv_packet->ttl = BATADV_TTL;
1214 unicast_tvlv_packet->reserved = 0;
1215 unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
1216 unicast_tvlv_packet->align = 0;
1217 ether_addr_copy(unicast_tvlv_packet->src, src);
1218 ether_addr_copy(unicast_tvlv_packet->dst, dst);
1219
1220 tvlv_buff = (unsigned char *)(unicast_tvlv_packet + 1);
1221 tvlv_hdr = (struct batadv_tvlv_hdr *)tvlv_buff;
1222 tvlv_hdr->version = version;
1223 tvlv_hdr->type = type;
1224 tvlv_hdr->len = htons(tvlv_value_len);
1225 tvlv_buff += sizeof(*tvlv_hdr);
1226 memcpy(tvlv_buff, tvlv_value, tvlv_value_len);
1227
1228 if (batadv_send_skb_to_orig(skb, orig_node, NULL) == NET_XMIT_DROP)
1229 kfree_skb(skb);
1230out:
1231 batadv_orig_node_put(orig_node);
1232}
1233
1234/**
1235 * batadv_get_vid - extract the VLAN identifier from skb if any 580 * batadv_get_vid - extract the VLAN identifier from skb if any
1236 * @skb: the buffer containing the packet 581 * @skb: the buffer containing the packet
1237 * @header_len: length of the batman header preceding the ethernet header 582 * @header_len: length of the batman header preceding the ethernet header
@@ -1284,36 +629,6 @@ bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid)
1284 return ap_isolation_enabled; 629 return ap_isolation_enabled;
1285} 630}
1286 631
1287static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
1288{
1289 struct batadv_algo_ops *bat_algo_ops;
1290 char *algo_name = (char *)val;
1291 size_t name_len = strlen(algo_name);
1292
1293 if (name_len > 0 && algo_name[name_len - 1] == '\n')
1294 algo_name[name_len - 1] = '\0';
1295
1296 bat_algo_ops = batadv_algo_get(algo_name);
1297 if (!bat_algo_ops) {
1298 pr_err("Routing algorithm '%s' is not supported\n", algo_name);
1299 return -EINVAL;
1300 }
1301
1302 return param_set_copystring(algo_name, kp);
1303}
1304
1305static const struct kernel_param_ops batadv_param_ops_ra = {
1306 .set = batadv_param_set_ra,
1307 .get = param_get_string,
1308};
1309
1310static struct kparam_string batadv_param_string_ra = {
1311 .maxlen = sizeof(batadv_routing_algo),
1312 .string = batadv_routing_algo,
1313};
1314
1315module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
1316 0644);
1317module_init(batadv_init); 632module_init(batadv_init);
1318module_exit(batadv_exit); 633module_exit(batadv_exit);
1319 634
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 76925266deed..06a860845434 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -24,7 +24,7 @@
24#define BATADV_DRIVER_DEVICE "batman-adv" 24#define BATADV_DRIVER_DEVICE "batman-adv"
25 25
26#ifndef BATADV_SOURCE_VERSION 26#ifndef BATADV_SOURCE_VERSION
27#define BATADV_SOURCE_VERSION "2016.2" 27#define BATADV_SOURCE_VERSION "2016.3"
28#endif 28#endif
29 29
30/* B.A.T.M.A.N. parameters */ 30/* B.A.T.M.A.N. parameters */
@@ -100,6 +100,9 @@
100#define BATADV_NUM_BCASTS_WIRELESS 3 100#define BATADV_NUM_BCASTS_WIRELESS 3
101#define BATADV_NUM_BCASTS_MAX 3 101#define BATADV_NUM_BCASTS_MAX 3
102 102
103/* length of the single packet used by the TP meter */
104#define BATADV_TP_PACKET_LEN ETH_DATA_LEN
105
103/* msecs after which an ARP_REQUEST is sent in broadcast as fallback */ 106/* msecs after which an ARP_REQUEST is sent in broadcast as fallback */
104#define ARP_REQ_DELAY 250 107#define ARP_REQ_DELAY 250
105/* numbers of originator to contact for any PUT/GET DHT operation */ 108/* numbers of originator to contact for any PUT/GET DHT operation */
@@ -131,6 +134,11 @@
131 134
132#define BATADV_NC_NODE_TIMEOUT 10000 /* Milliseconds */ 135#define BATADV_NC_NODE_TIMEOUT 10000 /* Milliseconds */
133 136
137/**
138 * BATADV_TP_MAX_NUM - maximum number of simultaneously active tp sessions
139 */
140#define BATADV_TP_MAX_NUM 5
141
134enum batadv_mesh_state { 142enum batadv_mesh_state {
135 BATADV_MESH_INACTIVE, 143 BATADV_MESH_INACTIVE,
136 BATADV_MESH_ACTIVE, 144 BATADV_MESH_ACTIVE,
@@ -175,29 +183,26 @@ enum batadv_uev_type {
175 183
176/* Kernel headers */ 184/* Kernel headers */
177 185
178#include <linux/atomic.h>
179#include <linux/bitops.h> /* for packet.h */ 186#include <linux/bitops.h> /* for packet.h */
180#include <linux/compiler.h> 187#include <linux/compiler.h>
181#include <linux/cpumask.h> 188#include <linux/cpumask.h>
182#include <linux/etherdevice.h> 189#include <linux/etherdevice.h>
183#include <linux/if_ether.h> /* for packet.h */ 190#include <linux/if_ether.h> /* for packet.h */
184#include <linux/netdevice.h>
185#include <linux/printk.h>
186#include <linux/types.h>
187#include <linux/percpu.h>
188#include <linux/jiffies.h>
189#include <linux/if_vlan.h> 191#include <linux/if_vlan.h>
192#include <linux/jiffies.h>
193#include <linux/percpu.h>
194#include <linux/types.h>
190 195
191#include "types.h" 196#include "types.h"
192 197
193struct batadv_ogm_packet; 198struct net_device;
199struct packet_type;
194struct seq_file; 200struct seq_file;
195struct sk_buff; 201struct sk_buff;
196 202
197#define BATADV_PRINT_VID(vid) ((vid & BATADV_VLAN_HAS_TAG) ? \ 203#define BATADV_PRINT_VID(vid) ((vid & BATADV_VLAN_HAS_TAG) ? \
198 (int)(vid & VLAN_VID_MASK) : -1) 204 (int)(vid & VLAN_VID_MASK) : -1)
199 205
200extern char batadv_routing_algo[];
201extern struct list_head batadv_hardif_list; 206extern struct list_head batadv_hardif_list;
202 207
203extern unsigned char batadv_broadcast_addr[]; 208extern unsigned char batadv_broadcast_addr[];
@@ -218,74 +223,9 @@ batadv_recv_handler_register(u8 packet_type,
218 int (*recv_handler)(struct sk_buff *, 223 int (*recv_handler)(struct sk_buff *,
219 struct batadv_hard_iface *)); 224 struct batadv_hard_iface *));
220void batadv_recv_handler_unregister(u8 packet_type); 225void batadv_recv_handler_unregister(u8 packet_type);
221int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops);
222int batadv_algo_select(struct batadv_priv *bat_priv, char *name);
223int batadv_algo_seq_print_text(struct seq_file *seq, void *offset);
224__be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr); 226__be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr);
225 227
226/** 228/**
227 * enum batadv_dbg_level - available log levels
228 * @BATADV_DBG_BATMAN: OGM and TQ computations related messages
229 * @BATADV_DBG_ROUTES: route added / changed / deleted
230 * @BATADV_DBG_TT: translation table messages
231 * @BATADV_DBG_BLA: bridge loop avoidance messages
232 * @BATADV_DBG_DAT: ARP snooping and DAT related messages
233 * @BATADV_DBG_NC: network coding related messages
234 * @BATADV_DBG_ALL: the union of all the above log levels
235 */
236enum batadv_dbg_level {
237 BATADV_DBG_BATMAN = BIT(0),
238 BATADV_DBG_ROUTES = BIT(1),
239 BATADV_DBG_TT = BIT(2),
240 BATADV_DBG_BLA = BIT(3),
241 BATADV_DBG_DAT = BIT(4),
242 BATADV_DBG_NC = BIT(5),
243 BATADV_DBG_ALL = 63,
244};
245
246#ifdef CONFIG_BATMAN_ADV_DEBUG
247int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
248__printf(2, 3);
249
250/* possibly ratelimited debug output */
251#define _batadv_dbg(type, bat_priv, ratelimited, fmt, arg...) \
252 do { \
253 if (atomic_read(&bat_priv->log_level) & type && \
254 (!ratelimited || net_ratelimit())) \
255 batadv_debug_log(bat_priv, fmt, ## arg);\
256 } \
257 while (0)
258#else /* !CONFIG_BATMAN_ADV_DEBUG */
259__printf(4, 5)
260static inline void _batadv_dbg(int type __always_unused,
261 struct batadv_priv *bat_priv __always_unused,
262 int ratelimited __always_unused,
263 const char *fmt __always_unused, ...)
264{
265}
266#endif
267
268#define batadv_dbg(type, bat_priv, arg...) \
269 _batadv_dbg(type, bat_priv, 0, ## arg)
270#define batadv_dbg_ratelimited(type, bat_priv, arg...) \
271 _batadv_dbg(type, bat_priv, 1, ## arg)
272
273#define batadv_info(net_dev, fmt, arg...) \
274 do { \
275 struct net_device *_netdev = (net_dev); \
276 struct batadv_priv *_batpriv = netdev_priv(_netdev); \
277 batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg); \
278 pr_info("%s: " fmt, _netdev->name, ## arg); \
279 } while (0)
280#define batadv_err(net_dev, fmt, arg...) \
281 do { \
282 struct net_device *_netdev = (net_dev); \
283 struct batadv_priv *_batpriv = netdev_priv(_netdev); \
284 batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg); \
285 pr_err("%s: " fmt, _netdev->name, ## arg); \
286 } while (0)
287
288/**
289 * batadv_compare_eth - Compare two not u16 aligned Ethernet addresses 229 * batadv_compare_eth - Compare two not u16 aligned Ethernet addresses
290 * @data1: Pointer to a six-byte array containing the Ethernet address 230 * @data1: Pointer to a six-byte array containing the Ethernet address
291 * @data2: Pointer other six-byte array containing the Ethernet address 231 * @data2: Pointer other six-byte array containing the Ethernet address
@@ -370,39 +310,6 @@ static inline u64 batadv_sum_counter(struct batadv_priv *bat_priv, size_t idx)
370 */ 310 */
371#define BATADV_SKB_CB(__skb) ((struct batadv_skb_cb *)&((__skb)->cb[0])) 311#define BATADV_SKB_CB(__skb) ((struct batadv_skb_cb *)&((__skb)->cb[0]))
372 312
373void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
374 u8 type, u8 version,
375 void *tvlv_value, u16 tvlv_value_len);
376u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
377 unsigned char **packet_buff,
378 int *packet_buff_len, int packet_min_len);
379void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
380 struct batadv_ogm_packet *batadv_ogm_packet,
381 struct batadv_orig_node *orig_node);
382void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
383 u8 type, u8 version);
384
385void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
386 void (*optr)(struct batadv_priv *bat_priv,
387 struct batadv_orig_node *orig,
388 u8 flags,
389 void *tvlv_value,
390 u16 tvlv_value_len),
391 int (*uptr)(struct batadv_priv *bat_priv,
392 u8 *src, u8 *dst,
393 void *tvlv_value,
394 u16 tvlv_value_len),
395 u8 type, u8 version, u8 flags);
396void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
397 u8 type, u8 version);
398int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
399 bool ogm_source,
400 struct batadv_orig_node *orig_node,
401 u8 *src, u8 *dst,
402 void *tvlv_buff, u16 tvlv_buff_len);
403void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src,
404 u8 *dst, u8 type, u8 version,
405 void *tvlv_value, u16 tvlv_value_len);
406unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len); 313unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len);
407bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid); 314bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid);
408 315
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index c32f24fafe67..cc915073a753 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -25,17 +25,23 @@
25#include <linux/errno.h> 25#include <linux/errno.h>
26#include <linux/etherdevice.h> 26#include <linux/etherdevice.h>
27#include <linux/fs.h> 27#include <linux/fs.h>
28#include <linux/icmpv6.h>
29#include <linux/if_bridge.h>
28#include <linux/if_ether.h> 30#include <linux/if_ether.h>
29#include <linux/in6.h> 31#include <linux/igmp.h>
30#include <linux/in.h> 32#include <linux/in.h>
33#include <linux/in6.h>
31#include <linux/ip.h> 34#include <linux/ip.h>
32#include <linux/ipv6.h> 35#include <linux/ipv6.h>
36#include <linux/kernel.h>
33#include <linux/kref.h> 37#include <linux/kref.h>
34#include <linux/list.h> 38#include <linux/list.h>
35#include <linux/lockdep.h> 39#include <linux/lockdep.h>
36#include <linux/netdevice.h> 40#include <linux/netdevice.h>
41#include <linux/printk.h>
37#include <linux/rculist.h> 42#include <linux/rculist.h>
38#include <linux/rcupdate.h> 43#include <linux/rcupdate.h>
44#include <linux/seq_file.h>
39#include <linux/skbuff.h> 45#include <linux/skbuff.h>
40#include <linux/slab.h> 46#include <linux/slab.h>
41#include <linux/spinlock.h> 47#include <linux/spinlock.h>
@@ -43,18 +49,57 @@
43#include <linux/string.h> 49#include <linux/string.h>
44#include <linux/types.h> 50#include <linux/types.h>
45#include <net/addrconf.h> 51#include <net/addrconf.h>
52#include <net/if_inet6.h>
53#include <net/ip.h>
46#include <net/ipv6.h> 54#include <net/ipv6.h>
47 55
56#include "hard-interface.h"
57#include "hash.h"
58#include "log.h"
48#include "packet.h" 59#include "packet.h"
49#include "translation-table.h" 60#include "translation-table.h"
61#include "tvlv.h"
62
63/**
64 * batadv_mcast_get_bridge - get the bridge on top of the softif if it exists
65 * @soft_iface: netdev struct of the mesh interface
66 *
67 * If the given soft interface has a bridge on top then the refcount
68 * of the according net device is increased.
69 *
70 * Return: NULL if no such bridge exists. Otherwise the net device of the
71 * bridge.
72 */
73static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface)
74{
75 struct net_device *upper = soft_iface;
76
77 rcu_read_lock();
78 do {
79 upper = netdev_master_upper_dev_get_rcu(upper);
80 } while (upper && !(upper->priv_flags & IFF_EBRIDGE));
81
82 if (upper)
83 dev_hold(upper);
84 rcu_read_unlock();
85
86 return upper;
87}
50 88
51/** 89/**
52 * batadv_mcast_mla_softif_get - get softif multicast listeners 90 * batadv_mcast_mla_softif_get - get softif multicast listeners
53 * @dev: the device to collect multicast addresses from 91 * @dev: the device to collect multicast addresses from
54 * @mcast_list: a list to put found addresses into 92 * @mcast_list: a list to put found addresses into
55 * 93 *
56 * Collect multicast addresses of the local multicast listeners 94 * Collects multicast addresses of multicast listeners residing
57 * on the given soft interface, dev, in the given mcast_list. 95 * on this kernel on the given soft interface, dev, in
96 * the given mcast_list. In general, multicast listeners provided by
97 * your multicast receiving applications run directly on this node.
98 *
99 * If there is a bridge interface on top of dev, collects from that one
100 * instead. Just like with IP addresses and routes, multicast listeners
101 * will(/should) register to the bridge interface instead of an
102 * enslaved bat0.
58 * 103 *
59 * Return: -ENOMEM on memory allocation error or the number of 104 * Return: -ENOMEM on memory allocation error or the number of
60 * items added to the mcast_list otherwise. 105 * items added to the mcast_list otherwise.
@@ -62,12 +107,13 @@
62static int batadv_mcast_mla_softif_get(struct net_device *dev, 107static int batadv_mcast_mla_softif_get(struct net_device *dev,
63 struct hlist_head *mcast_list) 108 struct hlist_head *mcast_list)
64{ 109{
110 struct net_device *bridge = batadv_mcast_get_bridge(dev);
65 struct netdev_hw_addr *mc_list_entry; 111 struct netdev_hw_addr *mc_list_entry;
66 struct batadv_hw_addr *new; 112 struct batadv_hw_addr *new;
67 int ret = 0; 113 int ret = 0;
68 114
69 netif_addr_lock_bh(dev); 115 netif_addr_lock_bh(bridge ? bridge : dev);
70 netdev_for_each_mc_addr(mc_list_entry, dev) { 116 netdev_for_each_mc_addr(mc_list_entry, bridge ? bridge : dev) {
71 new = kmalloc(sizeof(*new), GFP_ATOMIC); 117 new = kmalloc(sizeof(*new), GFP_ATOMIC);
72 if (!new) { 118 if (!new) {
73 ret = -ENOMEM; 119 ret = -ENOMEM;
@@ -78,7 +124,10 @@ static int batadv_mcast_mla_softif_get(struct net_device *dev,
78 hlist_add_head(&new->list, mcast_list); 124 hlist_add_head(&new->list, mcast_list);
79 ret++; 125 ret++;
80 } 126 }
81 netif_addr_unlock_bh(dev); 127 netif_addr_unlock_bh(bridge ? bridge : dev);
128
129 if (bridge)
130 dev_put(bridge);
82 131
83 return ret; 132 return ret;
84} 133}
@@ -104,6 +153,83 @@ static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr,
104} 153}
105 154
106/** 155/**
156 * batadv_mcast_mla_br_addr_cpy - copy a bridge multicast address
157 * @dst: destination to write to - a multicast MAC address
158 * @src: source to read from - a multicast IP address
159 *
160 * Converts a given multicast IPv4/IPv6 address from a bridge
161 * to its matching multicast MAC address and copies it into the given
162 * destination buffer.
163 *
164 * Caller needs to make sure the destination buffer can hold
165 * at least ETH_ALEN bytes.
166 */
167static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src)
168{
169 if (src->proto == htons(ETH_P_IP))
170 ip_eth_mc_map(src->u.ip4, dst);
171#if IS_ENABLED(CONFIG_IPV6)
172 else if (src->proto == htons(ETH_P_IPV6))
173 ipv6_eth_mc_map(&src->u.ip6, dst);
174#endif
175 else
176 eth_zero_addr(dst);
177}
178
179/**
180 * batadv_mcast_mla_bridge_get - get bridged-in multicast listeners
181 * @dev: a bridge slave whose bridge to collect multicast addresses from
182 * @mcast_list: a list to put found addresses into
183 *
184 * Collects multicast addresses of multicast listeners residing
185 * on foreign, non-mesh devices which we gave access to our mesh via
186 * a bridge on top of the given soft interface, dev, in the given
187 * mcast_list.
188 *
189 * Return: -ENOMEM on memory allocation error or the number of
190 * items added to the mcast_list otherwise.
191 */
192static int batadv_mcast_mla_bridge_get(struct net_device *dev,
193 struct hlist_head *mcast_list)
194{
195 struct list_head bridge_mcast_list = LIST_HEAD_INIT(bridge_mcast_list);
196 struct br_ip_list *br_ip_entry, *tmp;
197 struct batadv_hw_addr *new;
198 u8 mcast_addr[ETH_ALEN];
199 int ret;
200
201 /* we don't need to detect these devices/listeners, the IGMP/MLD
202 * snooping code of the Linux bridge already does that for us
203 */
204 ret = br_multicast_list_adjacent(dev, &bridge_mcast_list);
205 if (ret < 0)
206 goto out;
207
208 list_for_each_entry(br_ip_entry, &bridge_mcast_list, list) {
209 batadv_mcast_mla_br_addr_cpy(mcast_addr, &br_ip_entry->addr);
210 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
211 continue;
212
213 new = kmalloc(sizeof(*new), GFP_ATOMIC);
214 if (!new) {
215 ret = -ENOMEM;
216 break;
217 }
218
219 ether_addr_copy(new->addr, mcast_addr);
220 hlist_add_head(&new->list, mcast_list);
221 }
222
223out:
224 list_for_each_entry_safe(br_ip_entry, tmp, &bridge_mcast_list, list) {
225 list_del(&br_ip_entry->list);
226 kfree(br_ip_entry);
227 }
228
229 return ret;
230}
231
232/**
107 * batadv_mcast_mla_list_free - free a list of multicast addresses 233 * batadv_mcast_mla_list_free - free a list of multicast addresses
108 * @bat_priv: the bat priv with all the soft interface information 234 * @bat_priv: the bat priv with all the soft interface information
109 * @mcast_list: the list to free 235 * @mcast_list: the list to free
@@ -214,44 +340,195 @@ static bool batadv_mcast_has_bridge(struct batadv_priv *bat_priv)
214} 340}
215 341
216/** 342/**
343 * batadv_mcast_querier_log - debug output regarding the querier status on link
344 * @bat_priv: the bat priv with all the soft interface information
345 * @str_proto: a string for the querier protocol (e.g. "IGMP" or "MLD")
346 * @old_state: the previous querier state on our link
347 * @new_state: the new querier state on our link
348 *
349 * Outputs debug messages to the logging facility with log level 'mcast'
350 * regarding changes to the querier status on the link which are relevant
351 * to our multicast optimizations.
352 *
353 * Usually this is about whether a querier appeared or vanished in
354 * our mesh or whether the querier is in the suboptimal position of being
355 * behind our local bridge segment: Snooping switches will directly
356 * forward listener reports to the querier, therefore batman-adv and
357 * the bridge will potentially not see these listeners - the querier is
358 * potentially shadowing listeners from us then.
359 *
360 * This is only interesting for nodes with a bridge on top of their
361 * soft interface.
362 */
363static void
364batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto,
365 struct batadv_mcast_querier_state *old_state,
366 struct batadv_mcast_querier_state *new_state)
367{
368 if (!old_state->exists && new_state->exists)
369 batadv_info(bat_priv->soft_iface, "%s Querier appeared\n",
370 str_proto);
371 else if (old_state->exists && !new_state->exists)
372 batadv_info(bat_priv->soft_iface,
373 "%s Querier disappeared - multicast optimizations disabled\n",
374 str_proto);
375 else if (!bat_priv->mcast.bridged && !new_state->exists)
376 batadv_info(bat_priv->soft_iface,
377 "No %s Querier present - multicast optimizations disabled\n",
378 str_proto);
379
380 if (new_state->exists) {
381 if ((!old_state->shadowing && new_state->shadowing) ||
382 (!old_state->exists && new_state->shadowing))
383 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
384 "%s Querier is behind our bridged segment: Might shadow listeners\n",
385 str_proto);
386 else if (old_state->shadowing && !new_state->shadowing)
387 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
388 "%s Querier is not behind our bridged segment\n",
389 str_proto);
390 }
391}
392
393/**
394 * batadv_mcast_bridge_log - debug output for topology changes in bridged setups
395 * @bat_priv: the bat priv with all the soft interface information
396 * @bridged: a flag about whether the soft interface is currently bridged or not
397 * @querier_ipv4: (maybe) new status of a potential, selected IGMP querier
398 * @querier_ipv6: (maybe) new status of a potential, selected MLD querier
399 *
400 * If no bridges are ever used on this node, then this function does nothing.
401 *
402 * Otherwise this function outputs debug information to the 'mcast' log level
403 * which might be relevant to our multicast optimizations.
404 *
405 * More precisely, it outputs information when a bridge interface is added or
406 * removed from a soft interface. And when a bridge is present, it further
407 * outputs information about the querier state which is relevant for the
408 * multicast flags this node is going to set.
409 */
410static void
411batadv_mcast_bridge_log(struct batadv_priv *bat_priv, bool bridged,
412 struct batadv_mcast_querier_state *querier_ipv4,
413 struct batadv_mcast_querier_state *querier_ipv6)
414{
415 if (!bat_priv->mcast.bridged && bridged)
416 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
417 "Bridge added: Setting Unsnoopables(U)-flag\n");
418 else if (bat_priv->mcast.bridged && !bridged)
419 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
420 "Bridge removed: Unsetting Unsnoopables(U)-flag\n");
421
422 if (bridged) {
423 batadv_mcast_querier_log(bat_priv, "IGMP",
424 &bat_priv->mcast.querier_ipv4,
425 querier_ipv4);
426 batadv_mcast_querier_log(bat_priv, "MLD",
427 &bat_priv->mcast.querier_ipv6,
428 querier_ipv6);
429 }
430}
431
432/**
433 * batadv_mcast_flags_logs - output debug information about mcast flag changes
434 * @bat_priv: the bat priv with all the soft interface information
435 * @flags: flags indicating the new multicast state
436 *
437 * Whenever the multicast flags this nodes announces changes (@mcast_flags vs.
438 * bat_priv->mcast.flags), this notifies userspace via the 'mcast' log level.
439 */
440static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags)
441{
442 u8 old_flags = bat_priv->mcast.flags;
443 char str_old_flags[] = "[...]";
444
445 sprintf(str_old_flags, "[%c%c%c]",
446 (old_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
447 (old_flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
448 (old_flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.');
449
450 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
451 "Changing multicast flags from '%s' to '[%c%c%c]'\n",
452 bat_priv->mcast.enabled ? str_old_flags : "<undefined>",
453 (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
454 (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
455 (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.');
456}
457
458/**
217 * batadv_mcast_mla_tvlv_update - update multicast tvlv 459 * batadv_mcast_mla_tvlv_update - update multicast tvlv
218 * @bat_priv: the bat priv with all the soft interface information 460 * @bat_priv: the bat priv with all the soft interface information
219 * 461 *
220 * Updates the own multicast tvlv with our current multicast related settings, 462 * Updates the own multicast tvlv with our current multicast related settings,
221 * capabilities and inabilities. 463 * capabilities and inabilities.
222 * 464 *
223 * Return: true if the tvlv container is registered afterwards. Otherwise 465 * Return: false if we want all IPv4 && IPv6 multicast traffic and true
224 * returns false. 466 * otherwise.
225 */ 467 */
226static bool batadv_mcast_mla_tvlv_update(struct batadv_priv *bat_priv) 468static bool batadv_mcast_mla_tvlv_update(struct batadv_priv *bat_priv)
227{ 469{
228 struct batadv_tvlv_mcast_data mcast_data; 470 struct batadv_tvlv_mcast_data mcast_data;
471 struct batadv_mcast_querier_state querier4 = {false, false};
472 struct batadv_mcast_querier_state querier6 = {false, false};
473 struct net_device *dev = bat_priv->soft_iface;
474 bool bridged;
229 475
230 mcast_data.flags = BATADV_NO_FLAGS; 476 mcast_data.flags = BATADV_NO_FLAGS;
231 memset(mcast_data.reserved, 0, sizeof(mcast_data.reserved)); 477 memset(mcast_data.reserved, 0, sizeof(mcast_data.reserved));
232 478
233 /* Avoid attaching MLAs, if there is a bridge on top of our soft 479 bridged = batadv_mcast_has_bridge(bat_priv);
234 * interface, we don't support that yet (TODO) 480 if (!bridged)
481 goto update;
482
483#if !IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
484 pr_warn_once("No bridge IGMP snooping compiled - multicast optimizations disabled\n");
485#endif
486
487 querier4.exists = br_multicast_has_querier_anywhere(dev, ETH_P_IP);
488 querier4.shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IP);
489
490 querier6.exists = br_multicast_has_querier_anywhere(dev, ETH_P_IPV6);
491 querier6.shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IPV6);
492
493 mcast_data.flags |= BATADV_MCAST_WANT_ALL_UNSNOOPABLES;
494
495 /* 1) If no querier exists at all, then multicast listeners on
496 * our local TT clients behind the bridge will keep silent.
497 * 2) If the selected querier is on one of our local TT clients,
498 * behind the bridge, then this querier might shadow multicast
499 * listeners on our local TT clients, behind this bridge.
500 *
501 * In both cases, we will signalize other batman nodes that
502 * we need all multicast traffic of the according protocol.
235 */ 503 */
236 if (batadv_mcast_has_bridge(bat_priv)) { 504 if (!querier4.exists || querier4.shadowing)
237 if (bat_priv->mcast.enabled) { 505 mcast_data.flags |= BATADV_MCAST_WANT_ALL_IPV4;
238 batadv_tvlv_container_unregister(bat_priv,
239 BATADV_TVLV_MCAST, 1);
240 bat_priv->mcast.enabled = false;
241 }
242 506
243 return false; 507 if (!querier6.exists || querier6.shadowing)
244 } 508 mcast_data.flags |= BATADV_MCAST_WANT_ALL_IPV6;
509
510update:
511 batadv_mcast_bridge_log(bat_priv, bridged, &querier4, &querier6);
512
513 bat_priv->mcast.querier_ipv4.exists = querier4.exists;
514 bat_priv->mcast.querier_ipv4.shadowing = querier4.shadowing;
515
516 bat_priv->mcast.querier_ipv6.exists = querier6.exists;
517 bat_priv->mcast.querier_ipv6.shadowing = querier6.shadowing;
518
519 bat_priv->mcast.bridged = bridged;
245 520
246 if (!bat_priv->mcast.enabled || 521 if (!bat_priv->mcast.enabled ||
247 mcast_data.flags != bat_priv->mcast.flags) { 522 mcast_data.flags != bat_priv->mcast.flags) {
248 batadv_tvlv_container_register(bat_priv, BATADV_TVLV_MCAST, 1, 523 batadv_mcast_flags_log(bat_priv, mcast_data.flags);
524 batadv_tvlv_container_register(bat_priv, BATADV_TVLV_MCAST, 2,
249 &mcast_data, sizeof(mcast_data)); 525 &mcast_data, sizeof(mcast_data));
250 bat_priv->mcast.flags = mcast_data.flags; 526 bat_priv->mcast.flags = mcast_data.flags;
251 bat_priv->mcast.enabled = true; 527 bat_priv->mcast.enabled = true;
252 } 528 }
253 529
254 return true; 530 return !(mcast_data.flags &
531 (BATADV_MCAST_WANT_ALL_IPV4 + BATADV_MCAST_WANT_ALL_IPV6));
255} 532}
256 533
257/** 534/**
@@ -274,6 +551,10 @@ void batadv_mcast_mla_update(struct batadv_priv *bat_priv)
274 if (ret < 0) 551 if (ret < 0)
275 goto out; 552 goto out;
276 553
554 ret = batadv_mcast_mla_bridge_get(soft_iface, &mcast_list);
555 if (ret < 0)
556 goto out;
557
277update: 558update:
278 batadv_mcast_mla_tt_retract(bat_priv, &mcast_list); 559 batadv_mcast_mla_tt_retract(bat_priv, &mcast_list);
279 batadv_mcast_mla_tt_add(bat_priv, &mcast_list); 560 batadv_mcast_mla_tt_add(bat_priv, &mcast_list);
@@ -283,6 +564,31 @@ out:
283} 564}
284 565
285/** 566/**
567 * batadv_mcast_is_report_ipv4 - check for IGMP reports
568 * @skb: the ethernet frame destined for the mesh
569 *
570 * This call might reallocate skb data.
571 *
572 * Checks whether the given frame is a valid IGMP report.
573 *
574 * Return: If so then true, otherwise false.
575 */
576static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb)
577{
578 if (ip_mc_check_igmp(skb, NULL) < 0)
579 return false;
580
581 switch (igmp_hdr(skb)->type) {
582 case IGMP_HOST_MEMBERSHIP_REPORT:
583 case IGMPV2_HOST_MEMBERSHIP_REPORT:
584 case IGMPV3_HOST_MEMBERSHIP_REPORT:
585 return true;
586 }
587
588 return false;
589}
590
591/**
286 * batadv_mcast_forw_mode_check_ipv4 - check for optimized forwarding potential 592 * batadv_mcast_forw_mode_check_ipv4 - check for optimized forwarding potential
287 * @bat_priv: the bat priv with all the soft interface information 593 * @bat_priv: the bat priv with all the soft interface information
288 * @skb: the IPv4 packet to check 594 * @skb: the IPv4 packet to check
@@ -304,6 +610,9 @@ static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
304 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*iphdr))) 610 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*iphdr)))
305 return -ENOMEM; 611 return -ENOMEM;
306 612
613 if (batadv_mcast_is_report_ipv4(skb))
614 return -EINVAL;
615
307 iphdr = ip_hdr(skb); 616 iphdr = ip_hdr(skb);
308 617
309 /* TODO: Implement Multicast Router Discovery (RFC4286), 618 /* TODO: Implement Multicast Router Discovery (RFC4286),
@@ -320,6 +629,31 @@ static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
320 return 0; 629 return 0;
321} 630}
322 631
632#if IS_ENABLED(CONFIG_IPV6)
633/**
634 * batadv_mcast_is_report_ipv6 - check for MLD reports
635 * @skb: the ethernet frame destined for the mesh
636 *
637 * This call might reallocate skb data.
638 *
639 * Checks whether the given frame is a valid MLD report.
640 *
641 * Return: If so then true, otherwise false.
642 */
643static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb)
644{
645 if (ipv6_mc_check_mld(skb, NULL) < 0)
646 return false;
647
648 switch (icmp6_hdr(skb)->icmp6_type) {
649 case ICMPV6_MGM_REPORT:
650 case ICMPV6_MLD2_REPORT:
651 return true;
652 }
653
654 return false;
655}
656
323/** 657/**
324 * batadv_mcast_forw_mode_check_ipv6 - check for optimized forwarding potential 658 * batadv_mcast_forw_mode_check_ipv6 - check for optimized forwarding potential
325 * @bat_priv: the bat priv with all the soft interface information 659 * @bat_priv: the bat priv with all the soft interface information
@@ -341,6 +675,9 @@ static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv,
341 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*ip6hdr))) 675 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*ip6hdr)))
342 return -ENOMEM; 676 return -ENOMEM;
343 677
678 if (batadv_mcast_is_report_ipv6(skb))
679 return -EINVAL;
680
344 ip6hdr = ipv6_hdr(skb); 681 ip6hdr = ipv6_hdr(skb);
345 682
346 /* TODO: Implement Multicast Router Discovery (RFC4286), 683 /* TODO: Implement Multicast Router Discovery (RFC4286),
@@ -357,6 +694,7 @@ static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv,
357 694
358 return 0; 695 return 0;
359} 696}
697#endif
360 698
361/** 699/**
362 * batadv_mcast_forw_mode_check - check for optimized forwarding potential 700 * batadv_mcast_forw_mode_check - check for optimized forwarding potential
@@ -385,9 +723,11 @@ static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv,
385 case ETH_P_IP: 723 case ETH_P_IP:
386 return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb, 724 return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb,
387 is_unsnoopable); 725 is_unsnoopable);
726#if IS_ENABLED(CONFIG_IPV6)
388 case ETH_P_IPV6: 727 case ETH_P_IPV6:
389 return batadv_mcast_forw_mode_check_ipv6(bat_priv, skb, 728 return batadv_mcast_forw_mode_check_ipv6(bat_priv, skb,
390 is_unsnoopable); 729 is_unsnoopable);
730#endif
391 default: 731 default:
392 return -EINVAL; 732 return -EINVAL;
393 } 733 }
@@ -728,18 +1068,18 @@ static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
728} 1068}
729 1069
730/** 1070/**
731 * batadv_mcast_tvlv_ogm_handler_v1 - process incoming multicast tvlv container 1071 * batadv_mcast_tvlv_ogm_handler - process incoming multicast tvlv container
732 * @bat_priv: the bat priv with all the soft interface information 1072 * @bat_priv: the bat priv with all the soft interface information
733 * @orig: the orig_node of the ogm 1073 * @orig: the orig_node of the ogm
734 * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags) 1074 * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
735 * @tvlv_value: tvlv buffer containing the multicast data 1075 * @tvlv_value: tvlv buffer containing the multicast data
736 * @tvlv_value_len: tvlv buffer length 1076 * @tvlv_value_len: tvlv buffer length
737 */ 1077 */
738static void batadv_mcast_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, 1078static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv,
739 struct batadv_orig_node *orig, 1079 struct batadv_orig_node *orig,
740 u8 flags, 1080 u8 flags,
741 void *tvlv_value, 1081 void *tvlv_value,
742 u16 tvlv_value_len) 1082 u16 tvlv_value_len)
743{ 1083{
744 bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND); 1084 bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
745 u8 mcast_flags = BATADV_NO_FLAGS; 1085 u8 mcast_flags = BATADV_NO_FLAGS;
@@ -789,19 +1129,120 @@ static void batadv_mcast_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
789 */ 1129 */
790void batadv_mcast_init(struct batadv_priv *bat_priv) 1130void batadv_mcast_init(struct batadv_priv *bat_priv)
791{ 1131{
792 batadv_tvlv_handler_register(bat_priv, batadv_mcast_tvlv_ogm_handler_v1, 1132 batadv_tvlv_handler_register(bat_priv, batadv_mcast_tvlv_ogm_handler,
793 NULL, BATADV_TVLV_MCAST, 1, 1133 NULL, BATADV_TVLV_MCAST, 2,
794 BATADV_TVLV_HANDLER_OGM_CIFNOTFND); 1134 BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
795} 1135}
796 1136
797/** 1137/**
1138 * batadv_mcast_flags_print_header - print own mcast flags to debugfs table
1139 * @bat_priv: the bat priv with all the soft interface information
1140 * @seq: debugfs table seq_file struct
1141 *
1142 * Prints our own multicast flags including a more specific reason why
1143 * they are set, that is prints the bridge and querier state too, to
1144 * the debugfs table specified via @seq.
1145 */
1146static void batadv_mcast_flags_print_header(struct batadv_priv *bat_priv,
1147 struct seq_file *seq)
1148{
1149 u8 flags = bat_priv->mcast.flags;
1150 char querier4, querier6, shadowing4, shadowing6;
1151 bool bridged = bat_priv->mcast.bridged;
1152
1153 if (bridged) {
1154 querier4 = bat_priv->mcast.querier_ipv4.exists ? '.' : '4';
1155 querier6 = bat_priv->mcast.querier_ipv6.exists ? '.' : '6';
1156 shadowing4 = bat_priv->mcast.querier_ipv4.shadowing ? '4' : '.';
1157 shadowing6 = bat_priv->mcast.querier_ipv6.shadowing ? '6' : '.';
1158 } else {
1159 querier4 = '?';
1160 querier6 = '?';
1161 shadowing4 = '?';
1162 shadowing6 = '?';
1163 }
1164
1165 seq_printf(seq, "Multicast flags (own flags: [%c%c%c])\n",
1166 (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
1167 (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
1168 (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.');
1169 seq_printf(seq, "* Bridged [U]\t\t\t\t%c\n", bridged ? 'U' : '.');
1170 seq_printf(seq, "* No IGMP/MLD Querier [4/6]:\t\t%c/%c\n",
1171 querier4, querier6);
1172 seq_printf(seq, "* Shadowing IGMP/MLD Querier [4/6]:\t%c/%c\n",
1173 shadowing4, shadowing6);
1174 seq_puts(seq, "-------------------------------------------\n");
1175 seq_printf(seq, " %-10s %s\n", "Originator", "Flags");
1176}
1177
1178/**
1179 * batadv_mcast_flags_seq_print_text - print the mcast flags of other nodes
1180 * @seq: seq file to print on
1181 * @offset: not used
1182 *
1183 * This prints a table of (primary) originators and their according
1184 * multicast flags, including (in the header) our own.
1185 *
1186 * Return: always 0
1187 */
1188int batadv_mcast_flags_seq_print_text(struct seq_file *seq, void *offset)
1189{
1190 struct net_device *net_dev = (struct net_device *)seq->private;
1191 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1192 struct batadv_hard_iface *primary_if;
1193 struct batadv_hashtable *hash = bat_priv->orig_hash;
1194 struct batadv_orig_node *orig_node;
1195 struct hlist_head *head;
1196 u8 flags;
1197 u32 i;
1198
1199 primary_if = batadv_seq_print_text_primary_if_get(seq);
1200 if (!primary_if)
1201 return 0;
1202
1203 batadv_mcast_flags_print_header(bat_priv, seq);
1204
1205 for (i = 0; i < hash->size; i++) {
1206 head = &hash->table[i];
1207
1208 rcu_read_lock();
1209 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1210 if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
1211 &orig_node->capa_initialized))
1212 continue;
1213
1214 if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
1215 &orig_node->capabilities)) {
1216 seq_printf(seq, "%pM -\n", orig_node->orig);
1217 continue;
1218 }
1219
1220 flags = orig_node->mcast_flags;
1221
1222 seq_printf(seq, "%pM [%c%c%c]\n", orig_node->orig,
1223 (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)
1224 ? 'U' : '.',
1225 (flags & BATADV_MCAST_WANT_ALL_IPV4)
1226 ? '4' : '.',
1227 (flags & BATADV_MCAST_WANT_ALL_IPV6)
1228 ? '6' : '.');
1229 }
1230 rcu_read_unlock();
1231 }
1232
1233 batadv_hardif_put(primary_if);
1234
1235 return 0;
1236}
1237
1238/**
798 * batadv_mcast_free - free the multicast optimizations structures 1239 * batadv_mcast_free - free the multicast optimizations structures
799 * @bat_priv: the bat priv with all the soft interface information 1240 * @bat_priv: the bat priv with all the soft interface information
800 */ 1241 */
801void batadv_mcast_free(struct batadv_priv *bat_priv) 1242void batadv_mcast_free(struct batadv_priv *bat_priv)
802{ 1243{
803 batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 1); 1244 batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
804 batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 1); 1245 batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
805 1246
806 spin_lock_bh(&bat_priv->tt.commit_lock); 1247 spin_lock_bh(&bat_priv->tt.commit_lock);
807 batadv_mcast_mla_tt_retract(bat_priv, NULL); 1248 batadv_mcast_mla_tt_retract(bat_priv, NULL);
diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h
index 80bceec55592..1fb00ba84907 100644
--- a/net/batman-adv/multicast.h
+++ b/net/batman-adv/multicast.h
@@ -20,6 +20,7 @@
20 20
21#include "main.h" 21#include "main.h"
22 22
23struct seq_file;
23struct sk_buff; 24struct sk_buff;
24 25
25/** 26/**
@@ -46,6 +47,8 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
46 47
47void batadv_mcast_init(struct batadv_priv *bat_priv); 48void batadv_mcast_init(struct batadv_priv *bat_priv);
48 49
50int batadv_mcast_flags_seq_print_text(struct seq_file *seq, void *offset);
51
49void batadv_mcast_free(struct batadv_priv *bat_priv); 52void batadv_mcast_free(struct batadv_priv *bat_priv);
50 53
51void batadv_mcast_purge_orig(struct batadv_orig_node *orig_node); 54void batadv_mcast_purge_orig(struct batadv_orig_node *orig_node);
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
new file mode 100644
index 000000000000..231f8eaf075b
--- /dev/null
+++ b/net/batman-adv/netlink.c
@@ -0,0 +1,424 @@
1/* Copyright (C) 2016 B.A.T.M.A.N. contributors:
2 *
3 * Matthias Schiffer
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "netlink.h"
19#include "main.h"
20
21#include <linux/errno.h>
22#include <linux/fs.h>
23#include <linux/genetlink.h>
24#include <linux/if_ether.h>
25#include <linux/init.h>
26#include <linux/netdevice.h>
27#include <linux/netlink.h>
28#include <linux/printk.h>
29#include <linux/stddef.h>
30#include <linux/types.h>
31#include <net/genetlink.h>
32#include <net/netlink.h>
33#include <uapi/linux/batman_adv.h>
34
35#include "hard-interface.h"
36#include "soft-interface.h"
37#include "tp_meter.h"
38
39struct sk_buff;
40
41static struct genl_family batadv_netlink_family = {
42 .id = GENL_ID_GENERATE,
43 .hdrsize = 0,
44 .name = BATADV_NL_NAME,
45 .version = 1,
46 .maxattr = BATADV_ATTR_MAX,
47};
48
49/* multicast groups */
50enum batadv_netlink_multicast_groups {
51 BATADV_NL_MCGRP_TPMETER,
52};
53
54static struct genl_multicast_group batadv_netlink_mcgrps[] = {
55 [BATADV_NL_MCGRP_TPMETER] = { .name = BATADV_NL_MCAST_GROUP_TPMETER },
56};
57
58static struct nla_policy batadv_netlink_policy[NUM_BATADV_ATTR] = {
59 [BATADV_ATTR_VERSION] = { .type = NLA_STRING },
60 [BATADV_ATTR_ALGO_NAME] = { .type = NLA_STRING },
61 [BATADV_ATTR_MESH_IFINDEX] = { .type = NLA_U32 },
62 [BATADV_ATTR_MESH_IFNAME] = { .type = NLA_STRING },
63 [BATADV_ATTR_MESH_ADDRESS] = { .len = ETH_ALEN },
64 [BATADV_ATTR_HARD_IFINDEX] = { .type = NLA_U32 },
65 [BATADV_ATTR_HARD_IFNAME] = { .type = NLA_STRING },
66 [BATADV_ATTR_HARD_ADDRESS] = { .len = ETH_ALEN },
67 [BATADV_ATTR_ORIG_ADDRESS] = { .len = ETH_ALEN },
68 [BATADV_ATTR_TPMETER_RESULT] = { .type = NLA_U8 },
69 [BATADV_ATTR_TPMETER_TEST_TIME] = { .type = NLA_U32 },
70 [BATADV_ATTR_TPMETER_BYTES] = { .type = NLA_U64 },
71 [BATADV_ATTR_TPMETER_COOKIE] = { .type = NLA_U32 },
72};
73
74/**
75 * batadv_netlink_mesh_info_put - fill in generic information about mesh
76 * interface
77 * @msg: netlink message to be sent back
78 * @soft_iface: interface for which the data should be taken
79 *
80 * Return: 0 on success, < 0 on error
81 */
82static int
83batadv_netlink_mesh_info_put(struct sk_buff *msg, struct net_device *soft_iface)
84{
85 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
86 struct batadv_hard_iface *primary_if = NULL;
87 struct net_device *hard_iface;
88 int ret = -ENOBUFS;
89
90 if (nla_put_string(msg, BATADV_ATTR_VERSION, BATADV_SOURCE_VERSION) ||
91 nla_put_string(msg, BATADV_ATTR_ALGO_NAME,
92 bat_priv->algo_ops->name) ||
93 nla_put_u32(msg, BATADV_ATTR_MESH_IFINDEX, soft_iface->ifindex) ||
94 nla_put_string(msg, BATADV_ATTR_MESH_IFNAME, soft_iface->name) ||
95 nla_put(msg, BATADV_ATTR_MESH_ADDRESS, ETH_ALEN,
96 soft_iface->dev_addr))
97 goto out;
98
99 primary_if = batadv_primary_if_get_selected(bat_priv);
100 if (primary_if && primary_if->if_status == BATADV_IF_ACTIVE) {
101 hard_iface = primary_if->net_dev;
102
103 if (nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
104 hard_iface->ifindex) ||
105 nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
106 hard_iface->name) ||
107 nla_put(msg, BATADV_ATTR_HARD_ADDRESS, ETH_ALEN,
108 hard_iface->dev_addr))
109 goto out;
110 }
111
112 ret = 0;
113
114 out:
115 if (primary_if)
116 batadv_hardif_put(primary_if);
117
118 return ret;
119}
120
121/**
122 * batadv_netlink_get_mesh_info - handle incoming BATADV_CMD_GET_MESH_INFO
123 * netlink request
124 * @skb: received netlink message
125 * @info: receiver information
126 *
127 * Return: 0 on success, < 0 on error
128 */
129static int
130batadv_netlink_get_mesh_info(struct sk_buff *skb, struct genl_info *info)
131{
132 struct net *net = genl_info_net(info);
133 struct net_device *soft_iface;
134 struct sk_buff *msg = NULL;
135 void *msg_head;
136 int ifindex;
137 int ret;
138
139 if (!info->attrs[BATADV_ATTR_MESH_IFINDEX])
140 return -EINVAL;
141
142 ifindex = nla_get_u32(info->attrs[BATADV_ATTR_MESH_IFINDEX]);
143 if (!ifindex)
144 return -EINVAL;
145
146 soft_iface = dev_get_by_index(net, ifindex);
147 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
148 ret = -ENODEV;
149 goto out;
150 }
151
152 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
153 if (!msg) {
154 ret = -ENOMEM;
155 goto out;
156 }
157
158 msg_head = genlmsg_put(msg, info->snd_portid, info->snd_seq,
159 &batadv_netlink_family, 0,
160 BATADV_CMD_GET_MESH_INFO);
161 if (!msg_head) {
162 ret = -ENOBUFS;
163 goto out;
164 }
165
166 ret = batadv_netlink_mesh_info_put(msg, soft_iface);
167
168 out:
169 if (soft_iface)
170 dev_put(soft_iface);
171
172 if (ret) {
173 if (msg)
174 nlmsg_free(msg);
175 return ret;
176 }
177
178 genlmsg_end(msg, msg_head);
179 return genlmsg_reply(msg, info);
180}
181
182/**
183 * batadv_netlink_tp_meter_put - Fill information of started tp_meter session
184 * @msg: netlink message to be sent back
185 * @cookie: tp meter session cookie
186 *
187 * Return: 0 on success, < 0 on error
188 */
189static int
190batadv_netlink_tp_meter_put(struct sk_buff *msg, u32 cookie)
191{
192 if (nla_put_u32(msg, BATADV_ATTR_TPMETER_COOKIE, cookie))
193 return -ENOBUFS;
194
195 return 0;
196}
197
198/**
199 * batadv_netlink_tpmeter_notify - send tp_meter result via netlink to client
200 * @bat_priv: the bat priv with all the soft interface information
201 * @dst: destination of tp_meter session
202 * @result: reason for tp meter session stop
203 * @test_time: total time ot the tp_meter session
204 * @total_bytes: bytes acked to the receiver
205 * @cookie: cookie of tp_meter session
206 *
207 * Return: 0 on success, < 0 on error
208 */
209int batadv_netlink_tpmeter_notify(struct batadv_priv *bat_priv, const u8 *dst,
210 u8 result, u32 test_time, u64 total_bytes,
211 u32 cookie)
212{
213 struct sk_buff *msg;
214 void *hdr;
215 int ret;
216
217 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
218 if (!msg)
219 return -ENOMEM;
220
221 hdr = genlmsg_put(msg, 0, 0, &batadv_netlink_family, 0,
222 BATADV_CMD_TP_METER);
223 if (!hdr) {
224 ret = -ENOBUFS;
225 goto err_genlmsg;
226 }
227
228 if (nla_put_u32(msg, BATADV_ATTR_TPMETER_COOKIE, cookie))
229 goto nla_put_failure;
230
231 if (nla_put_u32(msg, BATADV_ATTR_TPMETER_TEST_TIME, test_time))
232 goto nla_put_failure;
233
234 if (nla_put_u64_64bit(msg, BATADV_ATTR_TPMETER_BYTES, total_bytes,
235 BATADV_ATTR_PAD))
236 goto nla_put_failure;
237
238 if (nla_put_u8(msg, BATADV_ATTR_TPMETER_RESULT, result))
239 goto nla_put_failure;
240
241 if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, dst))
242 goto nla_put_failure;
243
244 genlmsg_end(msg, hdr);
245
246 genlmsg_multicast_netns(&batadv_netlink_family,
247 dev_net(bat_priv->soft_iface), msg, 0,
248 BATADV_NL_MCGRP_TPMETER, GFP_KERNEL);
249
250 return 0;
251
252nla_put_failure:
253 genlmsg_cancel(msg, hdr);
254 ret = -EMSGSIZE;
255
256err_genlmsg:
257 nlmsg_free(msg);
258 return ret;
259}
260
261/**
262 * batadv_netlink_tp_meter_start - Start a new tp_meter session
263 * @skb: received netlink message
264 * @info: receiver information
265 *
266 * Return: 0 on success, < 0 on error
267 */
268static int
269batadv_netlink_tp_meter_start(struct sk_buff *skb, struct genl_info *info)
270{
271 struct net *net = genl_info_net(info);
272 struct net_device *soft_iface;
273 struct batadv_priv *bat_priv;
274 struct sk_buff *msg = NULL;
275 u32 test_length;
276 void *msg_head;
277 int ifindex;
278 u32 cookie;
279 u8 *dst;
280 int ret;
281
282 if (!info->attrs[BATADV_ATTR_MESH_IFINDEX])
283 return -EINVAL;
284
285 if (!info->attrs[BATADV_ATTR_ORIG_ADDRESS])
286 return -EINVAL;
287
288 if (!info->attrs[BATADV_ATTR_TPMETER_TEST_TIME])
289 return -EINVAL;
290
291 ifindex = nla_get_u32(info->attrs[BATADV_ATTR_MESH_IFINDEX]);
292 if (!ifindex)
293 return -EINVAL;
294
295 dst = nla_data(info->attrs[BATADV_ATTR_ORIG_ADDRESS]);
296
297 test_length = nla_get_u32(info->attrs[BATADV_ATTR_TPMETER_TEST_TIME]);
298
299 soft_iface = dev_get_by_index(net, ifindex);
300 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
301 ret = -ENODEV;
302 goto out;
303 }
304
305 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
306 if (!msg) {
307 ret = -ENOMEM;
308 goto out;
309 }
310
311 msg_head = genlmsg_put(msg, info->snd_portid, info->snd_seq,
312 &batadv_netlink_family, 0,
313 BATADV_CMD_TP_METER);
314 if (!msg_head) {
315 ret = -ENOBUFS;
316 goto out;
317 }
318
319 bat_priv = netdev_priv(soft_iface);
320 batadv_tp_start(bat_priv, dst, test_length, &cookie);
321
322 ret = batadv_netlink_tp_meter_put(msg, cookie);
323
324 out:
325 if (soft_iface)
326 dev_put(soft_iface);
327
328 if (ret) {
329 if (msg)
330 nlmsg_free(msg);
331 return ret;
332 }
333
334 genlmsg_end(msg, msg_head);
335 return genlmsg_reply(msg, info);
336}
337
338/**
339 * batadv_netlink_tp_meter_start - Cancel a running tp_meter session
340 * @skb: received netlink message
341 * @info: receiver information
342 *
343 * Return: 0 on success, < 0 on error
344 */
345static int
346batadv_netlink_tp_meter_cancel(struct sk_buff *skb, struct genl_info *info)
347{
348 struct net *net = genl_info_net(info);
349 struct net_device *soft_iface;
350 struct batadv_priv *bat_priv;
351 int ifindex;
352 u8 *dst;
353 int ret = 0;
354
355 if (!info->attrs[BATADV_ATTR_MESH_IFINDEX])
356 return -EINVAL;
357
358 if (!info->attrs[BATADV_ATTR_ORIG_ADDRESS])
359 return -EINVAL;
360
361 ifindex = nla_get_u32(info->attrs[BATADV_ATTR_MESH_IFINDEX]);
362 if (!ifindex)
363 return -EINVAL;
364
365 dst = nla_data(info->attrs[BATADV_ATTR_ORIG_ADDRESS]);
366
367 soft_iface = dev_get_by_index(net, ifindex);
368 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
369 ret = -ENODEV;
370 goto out;
371 }
372
373 bat_priv = netdev_priv(soft_iface);
374 batadv_tp_stop(bat_priv, dst, BATADV_TP_REASON_CANCEL);
375
376out:
377 if (soft_iface)
378 dev_put(soft_iface);
379
380 return ret;
381}
382
383static struct genl_ops batadv_netlink_ops[] = {
384 {
385 .cmd = BATADV_CMD_GET_MESH_INFO,
386 .flags = GENL_ADMIN_PERM,
387 .policy = batadv_netlink_policy,
388 .doit = batadv_netlink_get_mesh_info,
389 },
390 {
391 .cmd = BATADV_CMD_TP_METER,
392 .flags = GENL_ADMIN_PERM,
393 .policy = batadv_netlink_policy,
394 .doit = batadv_netlink_tp_meter_start,
395 },
396 {
397 .cmd = BATADV_CMD_TP_METER_CANCEL,
398 .flags = GENL_ADMIN_PERM,
399 .policy = batadv_netlink_policy,
400 .doit = batadv_netlink_tp_meter_cancel,
401 },
402};
403
404/**
405 * batadv_netlink_register - register batadv genl netlink family
406 */
407void __init batadv_netlink_register(void)
408{
409 int ret;
410
411 ret = genl_register_family_with_ops_groups(&batadv_netlink_family,
412 batadv_netlink_ops,
413 batadv_netlink_mcgrps);
414 if (ret)
415 pr_warn("unable to register netlink family");
416}
417
418/**
419 * batadv_netlink_unregister - unregister batadv genl netlink family
420 */
421void batadv_netlink_unregister(void)
422{
423 genl_unregister_family(&batadv_netlink_family);
424}
diff --git a/net/batman-adv/netlink.h b/net/batman-adv/netlink.h
new file mode 100644
index 000000000000..945653ab58c6
--- /dev/null
+++ b/net/batman-adv/netlink.h
@@ -0,0 +1,32 @@
1/* Copyright (C) 2016 B.A.T.M.A.N. contributors:
2 *
3 * Matthias Schiffer
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef _NET_BATMAN_ADV_NETLINK_H_
19#define _NET_BATMAN_ADV_NETLINK_H_
20
21#include "main.h"
22
23#include <linux/types.h>
24
25void batadv_netlink_register(void);
26void batadv_netlink_unregister(void);
27
28int batadv_netlink_tpmeter_notify(struct batadv_priv *bat_priv, const u8 *dst,
29 u8 result, u32 test_time, u64 total_bytes,
30 u32 cookie);
31
32#endif /* _NET_BATMAN_ADV_NETLINK_H_ */
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 678f06865312..293ef4ffd4e1 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -51,10 +51,12 @@
51 51
52#include "hard-interface.h" 52#include "hard-interface.h"
53#include "hash.h" 53#include "hash.h"
54#include "log.h"
54#include "originator.h" 55#include "originator.h"
55#include "packet.h" 56#include "packet.h"
56#include "routing.h" 57#include "routing.h"
57#include "send.h" 58#include "send.h"
59#include "tvlv.h"
58 60
59static struct lock_class_key batadv_nc_coding_hash_lock_class_key; 61static struct lock_class_key batadv_nc_coding_hash_lock_class_key;
60static struct lock_class_key batadv_nc_decoding_hash_lock_class_key; 62static struct lock_class_key batadv_nc_decoding_hash_lock_class_key;
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 7f51bc2c06eb..7d1e5421f6bc 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -34,11 +34,13 @@
34#include <linux/spinlock.h> 34#include <linux/spinlock.h>
35#include <linux/workqueue.h> 35#include <linux/workqueue.h>
36 36
37#include "bat_algo.h"
37#include "distributed-arp-table.h" 38#include "distributed-arp-table.h"
38#include "fragmentation.h" 39#include "fragmentation.h"
39#include "gateway_client.h" 40#include "gateway_client.h"
40#include "hard-interface.h" 41#include "hard-interface.h"
41#include "hash.h" 42#include "hash.h"
43#include "log.h"
42#include "multicast.h" 44#include "multicast.h"
43#include "network-coding.h" 45#include "network-coding.h"
44#include "routing.h" 46#include "routing.h"
@@ -251,10 +253,8 @@ static void batadv_neigh_node_release(struct kref *ref)
251 struct hlist_node *node_tmp; 253 struct hlist_node *node_tmp;
252 struct batadv_neigh_node *neigh_node; 254 struct batadv_neigh_node *neigh_node;
253 struct batadv_neigh_ifinfo *neigh_ifinfo; 255 struct batadv_neigh_ifinfo *neigh_ifinfo;
254 struct batadv_algo_ops *bao;
255 256
256 neigh_node = container_of(ref, struct batadv_neigh_node, refcount); 257 neigh_node = container_of(ref, struct batadv_neigh_node, refcount);
257 bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
258 258
259 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp, 259 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
260 &neigh_node->ifinfo_list, list) { 260 &neigh_node->ifinfo_list, list) {
@@ -263,9 +263,6 @@ static void batadv_neigh_node_release(struct kref *ref)
263 263
264 batadv_hardif_neigh_put(neigh_node->hardif_neigh); 264 batadv_hardif_neigh_put(neigh_node->hardif_neigh);
265 265
266 if (bao->bat_neigh_free)
267 bao->bat_neigh_free(neigh_node);
268
269 batadv_hardif_put(neigh_node->if_incoming); 266 batadv_hardif_put(neigh_node->if_incoming);
270 267
271 kfree_rcu(neigh_node, rcu); 268 kfree_rcu(neigh_node, rcu);
@@ -537,8 +534,8 @@ batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
537 534
538 kref_init(&hardif_neigh->refcount); 535 kref_init(&hardif_neigh->refcount);
539 536
540 if (bat_priv->bat_algo_ops->bat_hardif_neigh_init) 537 if (bat_priv->algo_ops->neigh.hardif_init)
541 bat_priv->bat_algo_ops->bat_hardif_neigh_init(hardif_neigh); 538 bat_priv->algo_ops->neigh.hardif_init(hardif_neigh);
542 539
543 hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list); 540 hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list);
544 541
@@ -602,19 +599,19 @@ batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
602} 599}
603 600
604/** 601/**
605 * batadv_neigh_node_new - create and init a new neigh_node object 602 * batadv_neigh_node_create - create a neigh node object
606 * @orig_node: originator object representing the neighbour 603 * @orig_node: originator object representing the neighbour
607 * @hard_iface: the interface where the neighbour is connected to 604 * @hard_iface: the interface where the neighbour is connected to
608 * @neigh_addr: the mac address of the neighbour interface 605 * @neigh_addr: the mac address of the neighbour interface
609 * 606 *
610 * Allocates a new neigh_node object and initialises all the generic fields. 607 * Allocates a new neigh_node object and initialises all the generic fields.
611 * 608 *
612 * Return: neighbor when found. Othwerwise NULL 609 * Return: the neighbour node if found or created or NULL otherwise.
613 */ 610 */
614struct batadv_neigh_node * 611static struct batadv_neigh_node *
615batadv_neigh_node_new(struct batadv_orig_node *orig_node, 612batadv_neigh_node_create(struct batadv_orig_node *orig_node,
616 struct batadv_hard_iface *hard_iface, 613 struct batadv_hard_iface *hard_iface,
617 const u8 *neigh_addr) 614 const u8 *neigh_addr)
618{ 615{
619 struct batadv_neigh_node *neigh_node; 616 struct batadv_neigh_node *neigh_node;
620 struct batadv_hardif_neigh_node *hardif_neigh = NULL; 617 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
@@ -667,6 +664,29 @@ out:
667} 664}
668 665
669/** 666/**
667 * batadv_neigh_node_get_or_create - retrieve or create a neigh node object
668 * @orig_node: originator object representing the neighbour
669 * @hard_iface: the interface where the neighbour is connected to
670 * @neigh_addr: the mac address of the neighbour interface
671 *
672 * Return: the neighbour node if found or created or NULL otherwise.
673 */
674struct batadv_neigh_node *
675batadv_neigh_node_get_or_create(struct batadv_orig_node *orig_node,
676 struct batadv_hard_iface *hard_iface,
677 const u8 *neigh_addr)
678{
679 struct batadv_neigh_node *neigh_node = NULL;
680
681 /* first check without locking to avoid the overhead */
682 neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
683 if (neigh_node)
684 return neigh_node;
685
686 return batadv_neigh_node_create(orig_node, hard_iface, neigh_addr);
687}
688
689/**
670 * batadv_hardif_neigh_seq_print_text - print the single hop neighbour list 690 * batadv_hardif_neigh_seq_print_text - print the single hop neighbour list
671 * @seq: neighbour table seq_file struct 691 * @seq: neighbour table seq_file struct
672 * @offset: not used 692 * @offset: not used
@@ -686,17 +706,17 @@ int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset)
686 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n", 706 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
687 BATADV_SOURCE_VERSION, primary_if->net_dev->name, 707 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
688 primary_if->net_dev->dev_addr, net_dev->name, 708 primary_if->net_dev->dev_addr, net_dev->name,
689 bat_priv->bat_algo_ops->name); 709 bat_priv->algo_ops->name);
690 710
691 batadv_hardif_put(primary_if); 711 batadv_hardif_put(primary_if);
692 712
693 if (!bat_priv->bat_algo_ops->bat_neigh_print) { 713 if (!bat_priv->algo_ops->neigh.print) {
694 seq_puts(seq, 714 seq_puts(seq,
695 "No printing function for this routing protocol\n"); 715 "No printing function for this routing protocol\n");
696 return 0; 716 return 0;
697 } 717 }
698 718
699 bat_priv->bat_algo_ops->bat_neigh_print(bat_priv, seq); 719 bat_priv->algo_ops->neigh.print(bat_priv, seq);
700 return 0; 720 return 0;
701} 721}
702 722
@@ -747,8 +767,8 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
747 767
748 batadv_frag_purge_orig(orig_node, NULL); 768 batadv_frag_purge_orig(orig_node, NULL);
749 769
750 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free) 770 if (orig_node->bat_priv->algo_ops->orig.free)
751 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node); 771 orig_node->bat_priv->algo_ops->orig.free(orig_node);
752 772
753 kfree(orig_node->tt_buff); 773 kfree(orig_node->tt_buff);
754 kfree(orig_node); 774 kfree(orig_node);
@@ -1077,12 +1097,12 @@ batadv_find_best_neighbor(struct batadv_priv *bat_priv,
1077 struct batadv_hard_iface *if_outgoing) 1097 struct batadv_hard_iface *if_outgoing)
1078{ 1098{
1079 struct batadv_neigh_node *best = NULL, *neigh; 1099 struct batadv_neigh_node *best = NULL, *neigh;
1080 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops; 1100 struct batadv_algo_ops *bao = bat_priv->algo_ops;
1081 1101
1082 rcu_read_lock(); 1102 rcu_read_lock();
1083 hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) { 1103 hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
1084 if (best && (bao->bat_neigh_cmp(neigh, if_outgoing, 1104 if (best && (bao->neigh.cmp(neigh, if_outgoing, best,
1085 best, if_outgoing) <= 0)) 1105 if_outgoing) <= 0))
1086 continue; 1106 continue;
1087 1107
1088 if (!kref_get_unless_zero(&neigh->refcount)) 1108 if (!kref_get_unless_zero(&neigh->refcount))
@@ -1234,18 +1254,17 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
1234 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n", 1254 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
1235 BATADV_SOURCE_VERSION, primary_if->net_dev->name, 1255 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
1236 primary_if->net_dev->dev_addr, net_dev->name, 1256 primary_if->net_dev->dev_addr, net_dev->name,
1237 bat_priv->bat_algo_ops->name); 1257 bat_priv->algo_ops->name);
1238 1258
1239 batadv_hardif_put(primary_if); 1259 batadv_hardif_put(primary_if);
1240 1260
1241 if (!bat_priv->bat_algo_ops->bat_orig_print) { 1261 if (!bat_priv->algo_ops->orig.print) {
1242 seq_puts(seq, 1262 seq_puts(seq,
1243 "No printing function for this routing protocol\n"); 1263 "No printing function for this routing protocol\n");
1244 return 0; 1264 return 0;
1245 } 1265 }
1246 1266
1247 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, 1267 bat_priv->algo_ops->orig.print(bat_priv, seq, BATADV_IF_DEFAULT);
1248 BATADV_IF_DEFAULT);
1249 1268
1250 return 0; 1269 return 0;
1251} 1270}
@@ -1272,7 +1291,7 @@ int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
1272 } 1291 }
1273 1292
1274 bat_priv = netdev_priv(hard_iface->soft_iface); 1293 bat_priv = netdev_priv(hard_iface->soft_iface);
1275 if (!bat_priv->bat_algo_ops->bat_orig_print) { 1294 if (!bat_priv->algo_ops->orig.print) {
1276 seq_puts(seq, 1295 seq_puts(seq,
1277 "No printing function for this routing protocol\n"); 1296 "No printing function for this routing protocol\n");
1278 goto out; 1297 goto out;
@@ -1286,9 +1305,9 @@ int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
1286 seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n", 1305 seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
1287 BATADV_SOURCE_VERSION, hard_iface->net_dev->name, 1306 BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
1288 hard_iface->net_dev->dev_addr, 1307 hard_iface->net_dev->dev_addr,
1289 hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name); 1308 hard_iface->soft_iface->name, bat_priv->algo_ops->name);
1290 1309
1291 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface); 1310 bat_priv->algo_ops->orig.print(bat_priv, seq, hard_iface);
1292 1311
1293out: 1312out:
1294 if (hard_iface) 1313 if (hard_iface)
@@ -1300,7 +1319,7 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
1300 int max_if_num) 1319 int max_if_num)
1301{ 1320{
1302 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 1321 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1303 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops; 1322 struct batadv_algo_ops *bao = bat_priv->algo_ops;
1304 struct batadv_hashtable *hash = bat_priv->orig_hash; 1323 struct batadv_hashtable *hash = bat_priv->orig_hash;
1305 struct hlist_head *head; 1324 struct hlist_head *head;
1306 struct batadv_orig_node *orig_node; 1325 struct batadv_orig_node *orig_node;
@@ -1316,9 +1335,8 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
1316 rcu_read_lock(); 1335 rcu_read_lock();
1317 hlist_for_each_entry_rcu(orig_node, head, hash_entry) { 1336 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1318 ret = 0; 1337 ret = 0;
1319 if (bao->bat_orig_add_if) 1338 if (bao->orig.add_if)
1320 ret = bao->bat_orig_add_if(orig_node, 1339 ret = bao->orig.add_if(orig_node, max_if_num);
1321 max_if_num);
1322 if (ret == -ENOMEM) 1340 if (ret == -ENOMEM)
1323 goto err; 1341 goto err;
1324 } 1342 }
@@ -1340,7 +1358,7 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1340 struct hlist_head *head; 1358 struct hlist_head *head;
1341 struct batadv_hard_iface *hard_iface_tmp; 1359 struct batadv_hard_iface *hard_iface_tmp;
1342 struct batadv_orig_node *orig_node; 1360 struct batadv_orig_node *orig_node;
1343 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops; 1361 struct batadv_algo_ops *bao = bat_priv->algo_ops;
1344 u32 i; 1362 u32 i;
1345 int ret; 1363 int ret;
1346 1364
@@ -1353,10 +1371,9 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1353 rcu_read_lock(); 1371 rcu_read_lock();
1354 hlist_for_each_entry_rcu(orig_node, head, hash_entry) { 1372 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1355 ret = 0; 1373 ret = 0;
1356 if (bao->bat_orig_del_if) 1374 if (bao->orig.del_if)
1357 ret = bao->bat_orig_del_if(orig_node, 1375 ret = bao->orig.del_if(orig_node, max_if_num,
1358 max_if_num, 1376 hard_iface->if_num);
1359 hard_iface->if_num);
1360 if (ret == -ENOMEM) 1377 if (ret == -ENOMEM)
1361 goto err; 1378 goto err;
1362 } 1379 }
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 64a8951e5844..566306bf05dc 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -46,9 +46,9 @@ batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
46void 46void
47batadv_hardif_neigh_put(struct batadv_hardif_neigh_node *hardif_neigh); 47batadv_hardif_neigh_put(struct batadv_hardif_neigh_node *hardif_neigh);
48struct batadv_neigh_node * 48struct batadv_neigh_node *
49batadv_neigh_node_new(struct batadv_orig_node *orig_node, 49batadv_neigh_node_get_or_create(struct batadv_orig_node *orig_node,
50 struct batadv_hard_iface *hard_iface, 50 struct batadv_hard_iface *hard_iface,
51 const u8 *neigh_addr); 51 const u8 *neigh_addr);
52void batadv_neigh_node_put(struct batadv_neigh_node *neigh_node); 52void batadv_neigh_node_put(struct batadv_neigh_node *neigh_node);
53struct batadv_neigh_node * 53struct batadv_neigh_node *
54batadv_orig_router_get(struct batadv_orig_node *orig_node, 54batadv_orig_router_get(struct batadv_orig_node *orig_node,
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 372128ddb474..6b011ff64dd8 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -21,6 +21,8 @@
21#include <asm/byteorder.h> 21#include <asm/byteorder.h>
22#include <linux/types.h> 22#include <linux/types.h>
23 23
24#define batadv_tp_is_error(n) ((u8)n > 127 ? 1 : 0)
25
24/** 26/**
25 * enum batadv_packettype - types for batman-adv encapsulated packets 27 * enum batadv_packettype - types for batman-adv encapsulated packets
26 * @BATADV_IV_OGM: originator messages for B.A.T.M.A.N. IV 28 * @BATADV_IV_OGM: originator messages for B.A.T.M.A.N. IV
@@ -93,6 +95,7 @@ enum batadv_icmp_packettype {
93 BATADV_ECHO_REQUEST = 8, 95 BATADV_ECHO_REQUEST = 8,
94 BATADV_TTL_EXCEEDED = 11, 96 BATADV_TTL_EXCEEDED = 11,
95 BATADV_PARAMETER_PROBLEM = 12, 97 BATADV_PARAMETER_PROBLEM = 12,
98 BATADV_TP = 15,
96}; 99};
97 100
98/** 101/**
@@ -285,6 +288,16 @@ struct batadv_elp_packet {
285#define BATADV_ELP_HLEN sizeof(struct batadv_elp_packet) 288#define BATADV_ELP_HLEN sizeof(struct batadv_elp_packet)
286 289
287/** 290/**
291 * enum batadv_icmp_user_cmd_type - types for batman-adv icmp cmd modes
292 * @BATADV_TP_START: start a throughput meter run
293 * @BATADV_TP_STOP: stop a throughput meter run
294 */
295enum batadv_icmp_user_cmd_type {
296 BATADV_TP_START = 0,
297 BATADV_TP_STOP = 2,
298};
299
300/**
288 * struct batadv_icmp_header - common members among all the ICMP packets 301 * struct batadv_icmp_header - common members among all the ICMP packets
289 * @packet_type: batman-adv packet type, part of the general header 302 * @packet_type: batman-adv packet type, part of the general header
290 * @version: batman-adv protocol version, part of the genereal header 303 * @version: batman-adv protocol version, part of the genereal header
@@ -334,6 +347,47 @@ struct batadv_icmp_packet {
334 __be16 seqno; 347 __be16 seqno;
335}; 348};
336 349
350/**
351 * struct batadv_icmp_tp_packet - ICMP TP Meter packet
352 * @packet_type: batman-adv packet type, part of the general header
353 * @version: batman-adv protocol version, part of the genereal header
354 * @ttl: time to live for this packet, part of the genereal header
355 * @msg_type: ICMP packet type
356 * @dst: address of the destination node
357 * @orig: address of the source node
358 * @uid: local ICMP socket identifier
359 * @subtype: TP packet subtype (see batadv_icmp_tp_subtype)
360 * @session: TP session identifier
361 * @seqno: the TP sequence number
362 * @timestamp: time when the packet has been sent. This value is filled in a
363 * TP_MSG and echoed back in the next TP_ACK so that the sender can compute the
364 * RTT. Since it is read only by the host which wrote it, there is no need to
365 * store it using network order
366 */
367struct batadv_icmp_tp_packet {
368 u8 packet_type;
369 u8 version;
370 u8 ttl;
371 u8 msg_type; /* see ICMP message types above */
372 u8 dst[ETH_ALEN];
373 u8 orig[ETH_ALEN];
374 u8 uid;
375 u8 subtype;
376 u8 session[2];
377 __be32 seqno;
378 __be32 timestamp;
379};
380
381/**
382 * enum batadv_icmp_tp_subtype - ICMP TP Meter packet subtypes
383 * @BATADV_TP_MSG: Msg from sender to receiver
384 * @BATADV_TP_ACK: acknowledgment from receiver to sender
385 */
386enum batadv_icmp_tp_subtype {
387 BATADV_TP_MSG = 0,
388 BATADV_TP_ACK,
389};
390
337#define BATADV_RR_LEN 16 391#define BATADV_RR_LEN 16
338 392
339/** 393/**
@@ -420,6 +474,7 @@ struct batadv_unicast_4addr_packet {
420 * @dest: final destination used when routing fragments 474 * @dest: final destination used when routing fragments
421 * @orig: originator of the fragment used when merging the packet 475 * @orig: originator of the fragment used when merging the packet
422 * @no: fragment number within this sequence 476 * @no: fragment number within this sequence
477 * @priority: priority of frame, from ToS IP precedence or 802.1p
423 * @reserved: reserved byte for alignment 478 * @reserved: reserved byte for alignment
424 * @seqno: sequence identification 479 * @seqno: sequence identification
425 * @total_size: size of the merged packet 480 * @total_size: size of the merged packet
@@ -430,9 +485,11 @@ struct batadv_frag_packet {
430 u8 ttl; 485 u8 ttl;
431#if defined(__BIG_ENDIAN_BITFIELD) 486#if defined(__BIG_ENDIAN_BITFIELD)
432 u8 no:4; 487 u8 no:4;
433 u8 reserved:4; 488 u8 priority:3;
489 u8 reserved:1;
434#elif defined(__LITTLE_ENDIAN_BITFIELD) 490#elif defined(__LITTLE_ENDIAN_BITFIELD)
435 u8 reserved:4; 491 u8 reserved:1;
492 u8 priority:3;
436 u8 no:4; 493 u8 no:4;
437#else 494#else
438#error "unknown bitfield endianness" 495#error "unknown bitfield endianness"
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index f75091c983ee..af8e11933928 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -40,12 +40,15 @@
40#include "fragmentation.h" 40#include "fragmentation.h"
41#include "hard-interface.h" 41#include "hard-interface.h"
42#include "icmp_socket.h" 42#include "icmp_socket.h"
43#include "log.h"
43#include "network-coding.h" 44#include "network-coding.h"
44#include "originator.h" 45#include "originator.h"
45#include "packet.h" 46#include "packet.h"
46#include "send.h" 47#include "send.h"
47#include "soft-interface.h" 48#include "soft-interface.h"
49#include "tp_meter.h"
48#include "translation-table.h" 50#include "translation-table.h"
51#include "tvlv.h"
49 52
50static int batadv_route_unicast_packet(struct sk_buff *skb, 53static int batadv_route_unicast_packet(struct sk_buff *skb,
51 struct batadv_hard_iface *recv_if); 54 struct batadv_hard_iface *recv_if);
@@ -268,10 +271,19 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
268 icmph->ttl = BATADV_TTL; 271 icmph->ttl = BATADV_TTL;
269 272
270 res = batadv_send_skb_to_orig(skb, orig_node, NULL); 273 res = batadv_send_skb_to_orig(skb, orig_node, NULL);
271 if (res != NET_XMIT_DROP) 274 if (res == -1)
272 ret = NET_RX_SUCCESS; 275 goto out;
276
277 ret = NET_RX_SUCCESS;
273 278
274 break; 279 break;
280 case BATADV_TP:
281 if (!pskb_may_pull(skb, sizeof(struct batadv_icmp_tp_packet)))
282 goto out;
283
284 batadv_tp_meter_recv(bat_priv, skb);
285 ret = NET_RX_SUCCESS;
286 goto out;
275 default: 287 default:
276 /* drop unknown type */ 288 /* drop unknown type */
277 goto out; 289 goto out;
@@ -290,7 +302,7 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
290 struct batadv_hard_iface *primary_if = NULL; 302 struct batadv_hard_iface *primary_if = NULL;
291 struct batadv_orig_node *orig_node = NULL; 303 struct batadv_orig_node *orig_node = NULL;
292 struct batadv_icmp_packet *icmp_packet; 304 struct batadv_icmp_packet *icmp_packet;
293 int ret = NET_RX_DROP; 305 int res, ret = NET_RX_DROP;
294 306
295 icmp_packet = (struct batadv_icmp_packet *)skb->data; 307 icmp_packet = (struct batadv_icmp_packet *)skb->data;
296 308
@@ -321,7 +333,8 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
321 icmp_packet->msg_type = BATADV_TTL_EXCEEDED; 333 icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
322 icmp_packet->ttl = BATADV_TTL; 334 icmp_packet->ttl = BATADV_TTL;
323 335
324 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP) 336 res = batadv_send_skb_to_orig(skb, orig_node, NULL);
337 if (res != -1)
325 ret = NET_RX_SUCCESS; 338 ret = NET_RX_SUCCESS;
326 339
327out: 340out:
@@ -341,7 +354,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
341 struct ethhdr *ethhdr; 354 struct ethhdr *ethhdr;
342 struct batadv_orig_node *orig_node = NULL; 355 struct batadv_orig_node *orig_node = NULL;
343 int hdr_size = sizeof(struct batadv_icmp_header); 356 int hdr_size = sizeof(struct batadv_icmp_header);
344 int ret = NET_RX_DROP; 357 int res, ret = NET_RX_DROP;
345 358
346 /* drop packet if it has not necessary minimum size */ 359 /* drop packet if it has not necessary minimum size */
347 if (unlikely(!pskb_may_pull(skb, hdr_size))) 360 if (unlikely(!pskb_may_pull(skb, hdr_size)))
@@ -374,6 +387,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
374 if (skb_cow(skb, ETH_HLEN) < 0) 387 if (skb_cow(skb, ETH_HLEN) < 0)
375 goto out; 388 goto out;
376 389
390 ethhdr = eth_hdr(skb);
377 icmph = (struct batadv_icmp_header *)skb->data; 391 icmph = (struct batadv_icmp_header *)skb->data;
378 icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmph; 392 icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmph;
379 if (icmp_packet_rr->rr_cur >= BATADV_RR_LEN) 393 if (icmp_packet_rr->rr_cur >= BATADV_RR_LEN)
@@ -407,7 +421,8 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
407 icmph->ttl--; 421 icmph->ttl--;
408 422
409 /* route it */ 423 /* route it */
410 if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP) 424 res = batadv_send_skb_to_orig(skb, orig_node, recv_if);
425 if (res != -1)
411 ret = NET_RX_SUCCESS; 426 ret = NET_RX_SUCCESS;
412 427
413out: 428out:
@@ -468,7 +483,7 @@ batadv_find_router(struct batadv_priv *bat_priv,
468 struct batadv_orig_node *orig_node, 483 struct batadv_orig_node *orig_node,
469 struct batadv_hard_iface *recv_if) 484 struct batadv_hard_iface *recv_if)
470{ 485{
471 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops; 486 struct batadv_algo_ops *bao = bat_priv->algo_ops;
472 struct batadv_neigh_node *first_candidate_router = NULL; 487 struct batadv_neigh_node *first_candidate_router = NULL;
473 struct batadv_neigh_node *next_candidate_router = NULL; 488 struct batadv_neigh_node *next_candidate_router = NULL;
474 struct batadv_neigh_node *router, *cand_router = NULL; 489 struct batadv_neigh_node *router, *cand_router = NULL;
@@ -522,9 +537,9 @@ batadv_find_router(struct batadv_priv *bat_priv,
522 /* alternative candidate should be good enough to be 537 /* alternative candidate should be good enough to be
523 * considered 538 * considered
524 */ 539 */
525 if (!bao->bat_neigh_is_similar_or_better(cand_router, 540 if (!bao->neigh.is_similar_or_better(cand_router,
526 cand->if_outgoing, 541 cand->if_outgoing, router,
527 router, recv_if)) 542 recv_if))
528 goto next; 543 goto next;
529 544
530 /* don't use the same router twice */ 545 /* don't use the same router twice */
@@ -644,6 +659,8 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
644 659
645 len = skb->len; 660 len = skb->len;
646 res = batadv_send_skb_to_orig(skb, orig_node, recv_if); 661 res = batadv_send_skb_to_orig(skb, orig_node, recv_if);
662 if (res == -1)
663 goto out;
647 664
648 /* translate transmit result into receive result */ 665 /* translate transmit result into receive result */
649 if (res == NET_XMIT_SUCCESS) { 666 if (res == NET_XMIT_SUCCESS) {
@@ -651,13 +668,10 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
651 batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD); 668 batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
652 batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES, 669 batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
653 len + ETH_HLEN); 670 len + ETH_HLEN);
654
655 ret = NET_RX_SUCCESS;
656 } else if (res == -EINPROGRESS) {
657 /* skb was buffered and consumed */
658 ret = NET_RX_SUCCESS;
659 } 671 }
660 672
673 ret = NET_RX_SUCCESS;
674
661out: 675out:
662 if (orig_node) 676 if (orig_node)
663 batadv_orig_node_put(orig_node); 677 batadv_orig_node_put(orig_node);
@@ -1006,6 +1020,8 @@ int batadv_recv_frag_packet(struct sk_buff *skb,
1006 if (!orig_node_src) 1020 if (!orig_node_src)
1007 goto out; 1021 goto out;
1008 1022
1023 skb->priority = frag_packet->priority + 256;
1024
1009 /* Route the fragment if it is not for us and too big to be merged. */ 1025 /* Route the fragment if it is not for us and too big to be merged. */
1010 if (!batadv_is_my_mac(bat_priv, frag_packet->dest) && 1026 if (!batadv_is_my_mac(bat_priv, frag_packet->dest) &&
1011 batadv_frag_skb_fwd(skb, recv_if, orig_node_src)) { 1027 batadv_frag_skb_fwd(skb, recv_if, orig_node_src)) {
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index b1a4e8a811c8..3a10d87b4b76 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -20,10 +20,11 @@
20 20
21#include <linux/atomic.h> 21#include <linux/atomic.h>
22#include <linux/byteorder/generic.h> 22#include <linux/byteorder/generic.h>
23#include <linux/errno.h>
23#include <linux/etherdevice.h> 24#include <linux/etherdevice.h>
24#include <linux/fs.h> 25#include <linux/fs.h>
25#include <linux/if_ether.h>
26#include <linux/if.h> 26#include <linux/if.h>
27#include <linux/if_ether.h>
27#include <linux/jiffies.h> 28#include <linux/jiffies.h>
28#include <linux/kernel.h> 29#include <linux/kernel.h>
29#include <linux/kref.h> 30#include <linux/kref.h>
@@ -42,6 +43,7 @@
42#include "fragmentation.h" 43#include "fragmentation.h"
43#include "gateway_client.h" 44#include "gateway_client.h"
44#include "hard-interface.h" 45#include "hard-interface.h"
46#include "log.h"
45#include "network-coding.h" 47#include "network-coding.h"
46#include "originator.h" 48#include "originator.h"
47#include "routing.h" 49#include "routing.h"
@@ -71,6 +73,7 @@ int batadv_send_skb_packet(struct sk_buff *skb,
71{ 73{
72 struct batadv_priv *bat_priv; 74 struct batadv_priv *bat_priv;
73 struct ethhdr *ethhdr; 75 struct ethhdr *ethhdr;
76 int ret;
74 77
75 bat_priv = netdev_priv(hard_iface->soft_iface); 78 bat_priv = netdev_priv(hard_iface->soft_iface);
76 79
@@ -108,8 +111,15 @@ int batadv_send_skb_packet(struct sk_buff *skb,
108 /* dev_queue_xmit() returns a negative result on error. However on 111 /* dev_queue_xmit() returns a negative result on error. However on
109 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP 112 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
110 * (which is > 0). This will not be treated as an error. 113 * (which is > 0). This will not be treated as an error.
114 *
115 * a negative value cannot be returned because it could be interepreted
116 * as not consumed skb by callers of batadv_send_skb_to_orig.
111 */ 117 */
112 return dev_queue_xmit(skb); 118 ret = dev_queue_xmit(skb);
119 if (ret < 0)
120 ret = NET_XMIT_DROP;
121
122 return ret;
113send_skb_err: 123send_skb_err:
114 kfree_skb(skb); 124 kfree_skb(skb);
115 return NET_XMIT_DROP; 125 return NET_XMIT_DROP;
@@ -155,8 +165,11 @@ int batadv_send_unicast_skb(struct sk_buff *skb,
155 * host, NULL can be passed as recv_if and no interface alternating is 165 * host, NULL can be passed as recv_if and no interface alternating is
156 * attempted. 166 * attempted.
157 * 167 *
158 * Return: NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or 168 * Return: -1 on failure (and the skb is not consumed), -EINPROGRESS if the
159 * -EINPROGRESS if the skb is buffered for later transmit. 169 * skb is buffered for later transmit or the NET_XMIT status returned by the
170 * lower routine if the packet has been passed down.
171 *
172 * If the returning value is not -1 the skb has been consumed.
160 */ 173 */
161int batadv_send_skb_to_orig(struct sk_buff *skb, 174int batadv_send_skb_to_orig(struct sk_buff *skb,
162 struct batadv_orig_node *orig_node, 175 struct batadv_orig_node *orig_node,
@@ -164,7 +177,7 @@ int batadv_send_skb_to_orig(struct sk_buff *skb,
164{ 177{
165 struct batadv_priv *bat_priv = orig_node->bat_priv; 178 struct batadv_priv *bat_priv = orig_node->bat_priv;
166 struct batadv_neigh_node *neigh_node; 179 struct batadv_neigh_node *neigh_node;
167 int ret = NET_XMIT_DROP; 180 int ret = -1;
168 181
169 /* batadv_find_router() increases neigh_nodes refcount if found. */ 182 /* batadv_find_router() increases neigh_nodes refcount if found. */
170 neigh_node = batadv_find_router(bat_priv, orig_node, recv_if); 183 neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
@@ -177,8 +190,7 @@ int batadv_send_skb_to_orig(struct sk_buff *skb,
177 if (atomic_read(&bat_priv->fragmentation) && 190 if (atomic_read(&bat_priv->fragmentation) &&
178 skb->len > neigh_node->if_incoming->net_dev->mtu) { 191 skb->len > neigh_node->if_incoming->net_dev->mtu) {
179 /* Fragment and send packet. */ 192 /* Fragment and send packet. */
180 if (batadv_frag_send_packet(skb, orig_node, neigh_node)) 193 ret = batadv_frag_send_packet(skb, orig_node, neigh_node);
181 ret = NET_XMIT_SUCCESS;
182 194
183 goto out; 195 goto out;
184 } 196 }
@@ -187,12 +199,10 @@ int batadv_send_skb_to_orig(struct sk_buff *skb,
187 * (i.e. being forwarded). If the packet originates from this node or if 199 * (i.e. being forwarded). If the packet originates from this node or if
188 * network coding fails, then send the packet as usual. 200 * network coding fails, then send the packet as usual.
189 */ 201 */
190 if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) { 202 if (recv_if && batadv_nc_skb_forward(skb, neigh_node))
191 ret = -EINPROGRESS; 203 ret = -EINPROGRESS;
192 } else { 204 else
193 batadv_send_unicast_skb(skb, neigh_node); 205 ret = batadv_send_unicast_skb(skb, neigh_node);
194 ret = NET_XMIT_SUCCESS;
195 }
196 206
197out: 207out:
198 if (neigh_node) 208 if (neigh_node)
@@ -318,7 +328,7 @@ int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
318{ 328{
319 struct batadv_unicast_packet *unicast_packet; 329 struct batadv_unicast_packet *unicast_packet;
320 struct ethhdr *ethhdr; 330 struct ethhdr *ethhdr;
321 int ret = NET_XMIT_DROP; 331 int res, ret = NET_XMIT_DROP;
322 332
323 if (!orig_node) 333 if (!orig_node)
324 goto out; 334 goto out;
@@ -355,7 +365,8 @@ int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
355 if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) 365 if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
356 unicast_packet->ttvn = unicast_packet->ttvn - 1; 366 unicast_packet->ttvn = unicast_packet->ttvn - 1;
357 367
358 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP) 368 res = batadv_send_skb_to_orig(skb, orig_node, NULL);
369 if (res != -1)
359 ret = NET_XMIT_SUCCESS; 370 ret = NET_XMIT_SUCCESS;
360 371
361out: 372out:
@@ -428,27 +439,7 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
428 orig_node, vid); 439 orig_node, vid);
429} 440}
430 441
431void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface) 442void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
432{
433 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
434
435 if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
436 (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
437 return;
438
439 /* the interface gets activated here to avoid race conditions between
440 * the moment of activating the interface in
441 * hardif_activate_interface() where the originator mac is set and
442 * outdated packets (especially uninitialized mac addresses) in the
443 * packet queue
444 */
445 if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
446 hard_iface->if_status = BATADV_IF_ACTIVE;
447
448 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
449}
450
451static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
452{ 443{
453 kfree_skb(forw_packet->skb); 444 kfree_skb(forw_packet->skb);
454 if (forw_packet->if_incoming) 445 if (forw_packet->if_incoming)
@@ -604,45 +595,6 @@ out:
604 atomic_inc(&bat_priv->bcast_queue_left); 595 atomic_inc(&bat_priv->bcast_queue_left);
605} 596}
606 597
607void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
608{
609 struct delayed_work *delayed_work;
610 struct batadv_forw_packet *forw_packet;
611 struct batadv_priv *bat_priv;
612
613 delayed_work = to_delayed_work(work);
614 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
615 delayed_work);
616 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
617 spin_lock_bh(&bat_priv->forw_bat_list_lock);
618 hlist_del(&forw_packet->list);
619 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
620
621 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
622 goto out;
623
624 bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
625
626 /* we have to have at least one packet in the queue to determine the
627 * queues wake up time unless we are shutting down.
628 *
629 * only re-schedule if this is the "original" copy, e.g. the OGM of the
630 * primary interface should only be rescheduled once per period, but
631 * this function will be called for the forw_packet instances of the
632 * other secondary interfaces as well.
633 */
634 if (forw_packet->own &&
635 forw_packet->if_incoming == forw_packet->if_outgoing)
636 batadv_schedule_bat_ogm(forw_packet->if_incoming);
637
638out:
639 /* don't count own packet */
640 if (!forw_packet->own)
641 atomic_inc(&bat_priv->batman_queue_left);
642
643 batadv_forw_packet_free(forw_packet);
644}
645
646void 598void
647batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, 599batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
648 const struct batadv_hard_iface *hard_iface) 600 const struct batadv_hard_iface *hard_iface)
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 6fd7270d8ce6..7cecb7563b45 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -26,8 +26,8 @@
26#include "packet.h" 26#include "packet.h"
27 27
28struct sk_buff; 28struct sk_buff;
29struct work_struct;
30 29
30void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet);
31int batadv_send_skb_to_orig(struct sk_buff *skb, 31int batadv_send_skb_to_orig(struct sk_buff *skb,
32 struct batadv_orig_node *orig_node, 32 struct batadv_orig_node *orig_node,
33 struct batadv_hard_iface *recv_if); 33 struct batadv_hard_iface *recv_if);
@@ -38,11 +38,9 @@ int batadv_send_broadcast_skb(struct sk_buff *skb,
38 struct batadv_hard_iface *hard_iface); 38 struct batadv_hard_iface *hard_iface);
39int batadv_send_unicast_skb(struct sk_buff *skb, 39int batadv_send_unicast_skb(struct sk_buff *skb,
40 struct batadv_neigh_node *neigh_node); 40 struct batadv_neigh_node *neigh_node);
41void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface);
42int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, 41int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
43 const struct sk_buff *skb, 42 const struct sk_buff *skb,
44 unsigned long delay); 43 unsigned long delay);
45void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work);
46void 44void
47batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, 45batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
48 const struct batadv_hard_iface *hard_iface); 46 const struct batadv_hard_iface *hard_iface);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 343d2c904399..7527c0652dd5 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -48,6 +48,7 @@
48#include <linux/types.h> 48#include <linux/types.h>
49#include <linux/workqueue.h> 49#include <linux/workqueue.h>
50 50
51#include "bat_algo.h"
51#include "bridge_loop_avoidance.h" 52#include "bridge_loop_avoidance.h"
52#include "debugfs.h" 53#include "debugfs.h"
53#include "distributed-arp-table.h" 54#include "distributed-arp-table.h"
@@ -255,7 +256,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
255 if (batadv_compare_eth(ethhdr->h_dest, ectp_addr)) 256 if (batadv_compare_eth(ethhdr->h_dest, ectp_addr))
256 goto dropped; 257 goto dropped;
257 258
258 gw_mode = atomic_read(&bat_priv->gw_mode); 259 gw_mode = atomic_read(&bat_priv->gw.mode);
259 if (is_multicast_ether_addr(ethhdr->h_dest)) { 260 if (is_multicast_ether_addr(ethhdr->h_dest)) {
260 /* if gw mode is off, broadcast every packet */ 261 /* if gw mode is off, broadcast every packet */
261 if (gw_mode == BATADV_GW_MODE_OFF) { 262 if (gw_mode == BATADV_GW_MODE_OFF) {
@@ -808,6 +809,10 @@ static int batadv_softif_init_late(struct net_device *dev)
808 atomic_set(&bat_priv->distributed_arp_table, 1); 809 atomic_set(&bat_priv->distributed_arp_table, 1);
809#endif 810#endif
810#ifdef CONFIG_BATMAN_ADV_MCAST 811#ifdef CONFIG_BATMAN_ADV_MCAST
812 bat_priv->mcast.querier_ipv4.exists = false;
813 bat_priv->mcast.querier_ipv4.shadowing = false;
814 bat_priv->mcast.querier_ipv6.exists = false;
815 bat_priv->mcast.querier_ipv6.shadowing = false;
811 bat_priv->mcast.flags = BATADV_NO_FLAGS; 816 bat_priv->mcast.flags = BATADV_NO_FLAGS;
812 atomic_set(&bat_priv->multicast_mode, 1); 817 atomic_set(&bat_priv->multicast_mode, 1);
813 atomic_set(&bat_priv->mcast.num_disabled, 0); 818 atomic_set(&bat_priv->mcast.num_disabled, 0);
@@ -815,8 +820,8 @@ static int batadv_softif_init_late(struct net_device *dev)
815 atomic_set(&bat_priv->mcast.num_want_all_ipv4, 0); 820 atomic_set(&bat_priv->mcast.num_want_all_ipv4, 0);
816 atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0); 821 atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0);
817#endif 822#endif
818 atomic_set(&bat_priv->gw_mode, BATADV_GW_MODE_OFF); 823 atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF);
819 atomic_set(&bat_priv->gw_sel_class, 20); 824 atomic_set(&bat_priv->gw.sel_class, 20);
820 atomic_set(&bat_priv->gw.bandwidth_down, 100); 825 atomic_set(&bat_priv->gw.bandwidth_down, 100);
821 atomic_set(&bat_priv->gw.bandwidth_up, 20); 826 atomic_set(&bat_priv->gw.bandwidth_up, 20);
822 atomic_set(&bat_priv->orig_interval, 1000); 827 atomic_set(&bat_priv->orig_interval, 1000);
@@ -837,6 +842,8 @@ static int batadv_softif_init_late(struct net_device *dev)
837#ifdef CONFIG_BATMAN_ADV_BLA 842#ifdef CONFIG_BATMAN_ADV_BLA
838 atomic_set(&bat_priv->bla.num_requests, 0); 843 atomic_set(&bat_priv->bla.num_requests, 0);
839#endif 844#endif
845 atomic_set(&bat_priv->tp_num, 0);
846
840 bat_priv->tt.last_changeset = NULL; 847 bat_priv->tt.last_changeset = NULL;
841 bat_priv->tt.last_changeset_len = 0; 848 bat_priv->tt.last_changeset_len = 0;
842 bat_priv->isolation_mark = 0; 849 bat_priv->isolation_mark = 0;
@@ -1033,7 +1040,9 @@ void batadv_softif_destroy_sysfs(struct net_device *soft_iface)
1033static void batadv_softif_destroy_netlink(struct net_device *soft_iface, 1040static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
1034 struct list_head *head) 1041 struct list_head *head)
1035{ 1042{
1043 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
1036 struct batadv_hard_iface *hard_iface; 1044 struct batadv_hard_iface *hard_iface;
1045 struct batadv_softif_vlan *vlan;
1037 1046
1038 list_for_each_entry(hard_iface, &batadv_hardif_list, list) { 1047 list_for_each_entry(hard_iface, &batadv_hardif_list, list) {
1039 if (hard_iface->soft_iface == soft_iface) 1048 if (hard_iface->soft_iface == soft_iface)
@@ -1041,6 +1050,13 @@ static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
1041 BATADV_IF_CLEANUP_KEEP); 1050 BATADV_IF_CLEANUP_KEEP);
1042 } 1051 }
1043 1052
1053 /* destroy the "untagged" VLAN */
1054 vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
1055 if (vlan) {
1056 batadv_softif_destroy_vlan(bat_priv, vlan);
1057 batadv_softif_vlan_put(vlan);
1058 }
1059
1044 batadv_sysfs_del_meshif(soft_iface); 1060 batadv_sysfs_del_meshif(soft_iface);
1045 unregister_netdevice_queue(soft_iface, head); 1061 unregister_netdevice_queue(soft_iface, head);
1046} 1062}
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index 414b2074165f..fe9ca94ddee2 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -25,8 +25,8 @@
25#include <linux/fs.h> 25#include <linux/fs.h>
26#include <linux/if.h> 26#include <linux/if.h>
27#include <linux/if_vlan.h> 27#include <linux/if_vlan.h>
28#include <linux/kref.h>
29#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/kref.h>
30#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/printk.h> 31#include <linux/printk.h>
32#include <linux/rculist.h> 32#include <linux/rculist.h>
@@ -38,11 +38,12 @@
38#include <linux/string.h> 38#include <linux/string.h>
39#include <linux/stringify.h> 39#include <linux/stringify.h>
40 40
41#include "bridge_loop_avoidance.h"
41#include "distributed-arp-table.h" 42#include "distributed-arp-table.h"
42#include "gateway_client.h" 43#include "gateway_client.h"
43#include "gateway_common.h" 44#include "gateway_common.h"
44#include "bridge_loop_avoidance.h"
45#include "hard-interface.h" 45#include "hard-interface.h"
46#include "log.h"
46#include "network-coding.h" 47#include "network-coding.h"
47#include "packet.h" 48#include "packet.h"
48#include "soft-interface.h" 49#include "soft-interface.h"
@@ -389,12 +390,12 @@ static int batadv_store_uint_attr(const char *buff, size_t count,
389 return count; 390 return count;
390} 391}
391 392
392static inline ssize_t 393static ssize_t __batadv_store_uint_attr(const char *buff, size_t count,
393__batadv_store_uint_attr(const char *buff, size_t count, 394 int min, int max,
394 int min, int max, 395 void (*post_func)(struct net_device *),
395 void (*post_func)(struct net_device *), 396 const struct attribute *attr,
396 const struct attribute *attr, 397 atomic_t *attr_store,
397 atomic_t *attr_store, struct net_device *net_dev) 398 struct net_device *net_dev)
398{ 399{
399 int ret; 400 int ret;
400 401
@@ -411,7 +412,7 @@ static ssize_t batadv_show_bat_algo(struct kobject *kobj,
411{ 412{
412 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); 413 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
413 414
414 return sprintf(buff, "%s\n", bat_priv->bat_algo_ops->name); 415 return sprintf(buff, "%s\n", bat_priv->algo_ops->name);
415} 416}
416 417
417static void batadv_post_gw_reselect(struct net_device *net_dev) 418static void batadv_post_gw_reselect(struct net_device *net_dev)
@@ -427,7 +428,7 @@ static ssize_t batadv_show_gw_mode(struct kobject *kobj, struct attribute *attr,
427 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); 428 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
428 int bytes_written; 429 int bytes_written;
429 430
430 switch (atomic_read(&bat_priv->gw_mode)) { 431 switch (atomic_read(&bat_priv->gw.mode)) {
431 case BATADV_GW_MODE_CLIENT: 432 case BATADV_GW_MODE_CLIENT:
432 bytes_written = sprintf(buff, "%s\n", 433 bytes_written = sprintf(buff, "%s\n",
433 BATADV_GW_MODE_CLIENT_NAME); 434 BATADV_GW_MODE_CLIENT_NAME);
@@ -476,10 +477,10 @@ static ssize_t batadv_store_gw_mode(struct kobject *kobj,
476 return -EINVAL; 477 return -EINVAL;
477 } 478 }
478 479
479 if (atomic_read(&bat_priv->gw_mode) == gw_mode_tmp) 480 if (atomic_read(&bat_priv->gw.mode) == gw_mode_tmp)
480 return count; 481 return count;
481 482
482 switch (atomic_read(&bat_priv->gw_mode)) { 483 switch (atomic_read(&bat_priv->gw.mode)) {
483 case BATADV_GW_MODE_CLIENT: 484 case BATADV_GW_MODE_CLIENT:
484 curr_gw_mode_str = BATADV_GW_MODE_CLIENT_NAME; 485 curr_gw_mode_str = BATADV_GW_MODE_CLIENT_NAME;
485 break; 486 break;
@@ -508,7 +509,7 @@ static ssize_t batadv_store_gw_mode(struct kobject *kobj,
508 * state 509 * state
509 */ 510 */
510 batadv_gw_check_client_stop(bat_priv); 511 batadv_gw_check_client_stop(bat_priv);
511 atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp); 512 atomic_set(&bat_priv->gw.mode, (unsigned int)gw_mode_tmp);
512 batadv_gw_tvlv_container_update(bat_priv); 513 batadv_gw_tvlv_container_update(bat_priv);
513 return count; 514 return count;
514} 515}
@@ -624,7 +625,7 @@ BATADV_ATTR_SIF_UINT(orig_interval, orig_interval, S_IRUGO | S_IWUSR,
624 2 * BATADV_JITTER, INT_MAX, NULL); 625 2 * BATADV_JITTER, INT_MAX, NULL);
625BATADV_ATTR_SIF_UINT(hop_penalty, hop_penalty, S_IRUGO | S_IWUSR, 0, 626BATADV_ATTR_SIF_UINT(hop_penalty, hop_penalty, S_IRUGO | S_IWUSR, 0,
626 BATADV_TQ_MAX_VALUE, NULL); 627 BATADV_TQ_MAX_VALUE, NULL);
627BATADV_ATTR_SIF_UINT(gw_sel_class, gw_sel_class, S_IRUGO | S_IWUSR, 1, 628BATADV_ATTR_SIF_UINT(gw_sel_class, gw.sel_class, S_IRUGO | S_IWUSR, 1,
628 BATADV_TQ_MAX_VALUE, batadv_post_gw_reselect); 629 BATADV_TQ_MAX_VALUE, batadv_post_gw_reselect);
629static BATADV_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, batadv_show_gw_bwidth, 630static BATADV_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, batadv_show_gw_bwidth,
630 batadv_store_gw_bwidth); 631 batadv_store_gw_bwidth);
diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c
new file mode 100644
index 000000000000..2333777f919d
--- /dev/null
+++ b/net/batman-adv/tp_meter.c
@@ -0,0 +1,1507 @@
1/* Copyright (C) 2012-2016 B.A.T.M.A.N. contributors:
2 *
3 * Edo Monticelli, Antonio Quartulli
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "tp_meter.h"
19#include "main.h"
20
21#include <linux/atomic.h>
22#include <linux/bug.h>
23#include <linux/byteorder/generic.h>
24#include <linux/cache.h>
25#include <linux/compiler.h>
26#include <linux/device.h>
27#include <linux/etherdevice.h>
28#include <linux/fs.h>
29#include <linux/if_ether.h>
30#include <linux/jiffies.h>
31#include <linux/kernel.h>
32#include <linux/kref.h>
33#include <linux/kthread.h>
34#include <linux/list.h>
35#include <linux/netdevice.h>
36#include <linux/param.h>
37#include <linux/printk.h>
38#include <linux/random.h>
39#include <linux/rculist.h>
40#include <linux/rcupdate.h>
41#include <linux/sched.h>
42#include <linux/skbuff.h>
43#include <linux/slab.h>
44#include <linux/spinlock.h>
45#include <linux/stddef.h>
46#include <linux/string.h>
47#include <linux/timer.h>
48#include <linux/wait.h>
49#include <linux/workqueue.h>
50#include <uapi/linux/batman_adv.h>
51
52#include "hard-interface.h"
53#include "log.h"
54#include "netlink.h"
55#include "originator.h"
56#include "packet.h"
57#include "send.h"
58
59/**
60 * BATADV_TP_DEF_TEST_LENGTH - Default test length if not specified by the user
61 * in milliseconds
62 */
63#define BATADV_TP_DEF_TEST_LENGTH 10000
64
65/**
66 * BATADV_TP_AWND - Advertised window by the receiver (in bytes)
67 */
68#define BATADV_TP_AWND 0x20000000
69
70/**
71 * BATADV_TP_RECV_TIMEOUT - Receiver activity timeout. If the receiver does not
72 * get anything for such amount of milliseconds, the connection is killed
73 */
74#define BATADV_TP_RECV_TIMEOUT 1000
75
76/**
77 * BATADV_TP_MAX_RTO - Maximum sender timeout. If the sender RTO gets beyond
78 * such amound of milliseconds, the receiver is considered unreachable and the
79 * connection is killed
80 */
81#define BATADV_TP_MAX_RTO 30000
82
83/**
84 * BATADV_TP_FIRST_SEQ - First seqno of each session. The number is rather high
85 * in order to immediately trigger a wrap around (test purposes)
86 */
87#define BATADV_TP_FIRST_SEQ ((u32)-1 - 2000)
88
89/**
90 * BATADV_TP_PLEN - length of the payload (data after the batadv_unicast header)
91 * to simulate
92 */
93#define BATADV_TP_PLEN (BATADV_TP_PACKET_LEN - ETH_HLEN - \
94 sizeof(struct batadv_unicast_packet))
95
96static u8 batadv_tp_prerandom[4096] __read_mostly;
97
98/**
99 * batadv_tp_session_cookie - generate session cookie based on session ids
100 * @session: TP session identifier
101 * @icmp_uid: icmp pseudo uid of the tp session
102 *
103 * Return: 32 bit tp_meter session cookie
104 */
105static u32 batadv_tp_session_cookie(const u8 session[2], u8 icmp_uid)
106{
107 u32 cookie;
108
109 cookie = icmp_uid << 16;
110 cookie |= session[0] << 8;
111 cookie |= session[1];
112
113 return cookie;
114}
115
116/**
117 * batadv_tp_cwnd - compute the new cwnd size
118 * @base: base cwnd size value
119 * @increment: the value to add to base to get the new size
120 * @min: minumim cwnd value (usually MSS)
121 *
122 * Return the new cwnd size and ensures it does not exceed the Advertised
123 * Receiver Window size. It is wrap around safe.
124 * For details refer to Section 3.1 of RFC5681
125 *
126 * Return: new congestion window size in bytes
127 */
128static u32 batadv_tp_cwnd(u32 base, u32 increment, u32 min)
129{
130 u32 new_size = base + increment;
131
132 /* check for wrap-around */
133 if (new_size < base)
134 new_size = (u32)ULONG_MAX;
135
136 new_size = min_t(u32, new_size, BATADV_TP_AWND);
137
138 return max_t(u32, new_size, min);
139}
140
141/**
142 * batadv_tp_updated_cwnd - update the Congestion Windows
143 * @tp_vars: the private data of the current TP meter session
144 * @mss: maximum segment size of transmission
145 *
146 * 1) if the session is in Slow Start, the CWND has to be increased by 1
147 * MSS every unique received ACK
148 * 2) if the session is in Congestion Avoidance, the CWND has to be
149 * increased by MSS * MSS / CWND for every unique received ACK
150 */
151static void batadv_tp_update_cwnd(struct batadv_tp_vars *tp_vars, u32 mss)
152{
153 spin_lock_bh(&tp_vars->cwnd_lock);
154
155 /* slow start... */
156 if (tp_vars->cwnd <= tp_vars->ss_threshold) {
157 tp_vars->dec_cwnd = 0;
158 tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, mss, mss);
159 spin_unlock_bh(&tp_vars->cwnd_lock);
160 return;
161 }
162
163 /* increment CWND at least of 1 (section 3.1 of RFC5681) */
164 tp_vars->dec_cwnd += max_t(u32, 1U << 3,
165 ((mss * mss) << 6) / (tp_vars->cwnd << 3));
166 if (tp_vars->dec_cwnd < (mss << 3)) {
167 spin_unlock_bh(&tp_vars->cwnd_lock);
168 return;
169 }
170
171 tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, mss, mss);
172 tp_vars->dec_cwnd = 0;
173
174 spin_unlock_bh(&tp_vars->cwnd_lock);
175}
176
177/**
178 * batadv_tp_update_rto - calculate new retransmission timeout
179 * @tp_vars: the private data of the current TP meter session
180 * @new_rtt: new roundtrip time in msec
181 */
182static void batadv_tp_update_rto(struct batadv_tp_vars *tp_vars,
183 u32 new_rtt)
184{
185 long m = new_rtt;
186
187 /* RTT update
188 * Details in Section 2.2 and 2.3 of RFC6298
189 *
190 * It's tricky to understand. Don't lose hair please.
191 * Inspired by tcp_rtt_estimator() tcp_input.c
192 */
193 if (tp_vars->srtt != 0) {
194 m -= (tp_vars->srtt >> 3); /* m is now error in rtt est */
195 tp_vars->srtt += m; /* rtt = 7/8 srtt + 1/8 new */
196 if (m < 0)
197 m = -m;
198
199 m -= (tp_vars->rttvar >> 2);
200 tp_vars->rttvar += m; /* mdev ~= 3/4 rttvar + 1/4 new */
201 } else {
202 /* first measure getting in */
203 tp_vars->srtt = m << 3; /* take the measured time to be srtt */
204 tp_vars->rttvar = m << 1; /* new_rtt / 2 */
205 }
206
207 /* rto = srtt + 4 * rttvar.
208 * rttvar is scaled by 4, therefore doesn't need to be multiplied
209 */
210 tp_vars->rto = (tp_vars->srtt >> 3) + tp_vars->rttvar;
211}
212
213/**
214 * batadv_tp_batctl_notify - send client status result to client
215 * @reason: reason for tp meter session stop
216 * @dst: destination of tp_meter session
217 * @bat_priv: the bat priv with all the soft interface information
218 * @start_time: start of transmission in jiffies
219 * @total_sent: bytes acked to the receiver
220 * @cookie: cookie of tp_meter session
221 */
222static void batadv_tp_batctl_notify(enum batadv_tp_meter_reason reason,
223 const u8 *dst, struct batadv_priv *bat_priv,
224 unsigned long start_time, u64 total_sent,
225 u32 cookie)
226{
227 u32 test_time;
228 u8 result;
229 u32 total_bytes;
230
231 if (!batadv_tp_is_error(reason)) {
232 result = BATADV_TP_REASON_COMPLETE;
233 test_time = jiffies_to_msecs(jiffies - start_time);
234 total_bytes = total_sent;
235 } else {
236 result = reason;
237 test_time = 0;
238 total_bytes = 0;
239 }
240
241 batadv_netlink_tpmeter_notify(bat_priv, dst, result, test_time,
242 total_bytes, cookie);
243}
244
245/**
246 * batadv_tp_batctl_error_notify - send client error result to client
247 * @reason: reason for tp meter session stop
248 * @dst: destination of tp_meter session
249 * @bat_priv: the bat priv with all the soft interface information
250 * @cookie: cookie of tp_meter session
251 */
252static void batadv_tp_batctl_error_notify(enum batadv_tp_meter_reason reason,
253 const u8 *dst,
254 struct batadv_priv *bat_priv,
255 u32 cookie)
256{
257 batadv_tp_batctl_notify(reason, dst, bat_priv, 0, 0, cookie);
258}
259
260/**
261 * batadv_tp_list_find - find a tp_vars object in the global list
262 * @bat_priv: the bat priv with all the soft interface information
263 * @dst: the other endpoint MAC address to look for
264 *
265 * Look for a tp_vars object matching dst as end_point and return it after
266 * having incremented the refcounter. Return NULL is not found
267 *
268 * Return: matching tp_vars or NULL when no tp_vars with @dst was found
269 */
270static struct batadv_tp_vars *batadv_tp_list_find(struct batadv_priv *bat_priv,
271 const u8 *dst)
272{
273 struct batadv_tp_vars *pos, *tp_vars = NULL;
274
275 rcu_read_lock();
276 hlist_for_each_entry_rcu(pos, &bat_priv->tp_list, list) {
277 if (!batadv_compare_eth(pos->other_end, dst))
278 continue;
279
280 /* most of the time this function is invoked during the normal
281 * process..it makes sens to pay more when the session is
282 * finished and to speed the process up during the measurement
283 */
284 if (unlikely(!kref_get_unless_zero(&pos->refcount)))
285 continue;
286
287 tp_vars = pos;
288 break;
289 }
290 rcu_read_unlock();
291
292 return tp_vars;
293}
294
295/**
296 * batadv_tp_list_find_session - find tp_vars session object in the global list
297 * @bat_priv: the bat priv with all the soft interface information
298 * @dst: the other endpoint MAC address to look for
299 * @session: session identifier
300 *
301 * Look for a tp_vars object matching dst as end_point, session as tp meter
302 * session and return it after having incremented the refcounter. Return NULL
303 * is not found
304 *
305 * Return: matching tp_vars or NULL when no tp_vars was found
306 */
307static struct batadv_tp_vars *
308batadv_tp_list_find_session(struct batadv_priv *bat_priv, const u8 *dst,
309 const u8 *session)
310{
311 struct batadv_tp_vars *pos, *tp_vars = NULL;
312
313 rcu_read_lock();
314 hlist_for_each_entry_rcu(pos, &bat_priv->tp_list, list) {
315 if (!batadv_compare_eth(pos->other_end, dst))
316 continue;
317
318 if (memcmp(pos->session, session, sizeof(pos->session)) != 0)
319 continue;
320
321 /* most of the time this function is invoked during the normal
322 * process..it makes sense to pay more when the session is
323 * finished and to speed the process up during the measurement
324 */
325 if (unlikely(!kref_get_unless_zero(&pos->refcount)))
326 continue;
327
328 tp_vars = pos;
329 break;
330 }
331 rcu_read_unlock();
332
333 return tp_vars;
334}
335
336/**
337 * batadv_tp_vars_release - release batadv_tp_vars from lists and queue for
338 * free after rcu grace period
339 * @ref: kref pointer of the batadv_tp_vars
340 */
341static void batadv_tp_vars_release(struct kref *ref)
342{
343 struct batadv_tp_vars *tp_vars;
344 struct batadv_tp_unacked *un, *safe;
345
346 tp_vars = container_of(ref, struct batadv_tp_vars, refcount);
347
348 /* lock should not be needed because this object is now out of any
349 * context!
350 */
351 spin_lock_bh(&tp_vars->unacked_lock);
352 list_for_each_entry_safe(un, safe, &tp_vars->unacked_list, list) {
353 list_del(&un->list);
354 kfree(un);
355 }
356 spin_unlock_bh(&tp_vars->unacked_lock);
357
358 kfree_rcu(tp_vars, rcu);
359}
360
361/**
362 * batadv_tp_vars_put - decrement the batadv_tp_vars refcounter and possibly
363 * release it
364 * @tp_vars: the private data of the current TP meter session to be free'd
365 */
366static void batadv_tp_vars_put(struct batadv_tp_vars *tp_vars)
367{
368 kref_put(&tp_vars->refcount, batadv_tp_vars_release);
369}
370
371/**
372 * batadv_tp_sender_cleanup - cleanup sender data and drop and timer
373 * @bat_priv: the bat priv with all the soft interface information
374 * @tp_vars: the private data of the current TP meter session to cleanup
375 */
376static void batadv_tp_sender_cleanup(struct batadv_priv *bat_priv,
377 struct batadv_tp_vars *tp_vars)
378{
379 cancel_delayed_work(&tp_vars->finish_work);
380
381 spin_lock_bh(&tp_vars->bat_priv->tp_list_lock);
382 hlist_del_rcu(&tp_vars->list);
383 spin_unlock_bh(&tp_vars->bat_priv->tp_list_lock);
384
385 /* drop list reference */
386 batadv_tp_vars_put(tp_vars);
387
388 atomic_dec(&tp_vars->bat_priv->tp_num);
389
390 /* kill the timer and remove its reference */
391 del_timer_sync(&tp_vars->timer);
392 /* the worker might have rearmed itself therefore we kill it again. Note
393 * that if the worker should run again before invoking the following
394 * del_timer(), it would not re-arm itself once again because the status
395 * is OFF now
396 */
397 del_timer(&tp_vars->timer);
398 batadv_tp_vars_put(tp_vars);
399}
400
401/**
402 * batadv_tp_sender_end - print info about ended session and inform client
403 * @bat_priv: the bat priv with all the soft interface information
404 * @tp_vars: the private data of the current TP meter session
405 */
406static void batadv_tp_sender_end(struct batadv_priv *bat_priv,
407 struct batadv_tp_vars *tp_vars)
408{
409 u32 session_cookie;
410
411 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
412 "Test towards %pM finished..shutting down (reason=%d)\n",
413 tp_vars->other_end, tp_vars->reason);
414
415 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
416 "Last timing stats: SRTT=%ums RTTVAR=%ums RTO=%ums\n",
417 tp_vars->srtt >> 3, tp_vars->rttvar >> 2, tp_vars->rto);
418
419 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
420 "Final values: cwnd=%u ss_threshold=%u\n",
421 tp_vars->cwnd, tp_vars->ss_threshold);
422
423 session_cookie = batadv_tp_session_cookie(tp_vars->session,
424 tp_vars->icmp_uid);
425
426 batadv_tp_batctl_notify(tp_vars->reason,
427 tp_vars->other_end,
428 bat_priv,
429 tp_vars->start_time,
430 atomic64_read(&tp_vars->tot_sent),
431 session_cookie);
432}
433
434/**
435 * batadv_tp_sender_shutdown - let sender thread/timer stop gracefully
436 * @tp_vars: the private data of the current TP meter session
437 * @reason: reason for tp meter session stop
438 */
439static void batadv_tp_sender_shutdown(struct batadv_tp_vars *tp_vars,
440 enum batadv_tp_meter_reason reason)
441{
442 if (!atomic_dec_and_test(&tp_vars->sending))
443 return;
444
445 tp_vars->reason = reason;
446}
447
448/**
449 * batadv_tp_sender_finish - stop sender session after test_length was reached
450 * @work: delayed work reference of the related tp_vars
451 */
452static void batadv_tp_sender_finish(struct work_struct *work)
453{
454 struct delayed_work *delayed_work;
455 struct batadv_tp_vars *tp_vars;
456
457 delayed_work = to_delayed_work(work);
458 tp_vars = container_of(delayed_work, struct batadv_tp_vars,
459 finish_work);
460
461 batadv_tp_sender_shutdown(tp_vars, BATADV_TP_REASON_COMPLETE);
462}
463
464/**
465 * batadv_tp_reset_sender_timer - reschedule the sender timer
466 * @tp_vars: the private TP meter data for this session
467 *
468 * Reschedule the timer using tp_vars->rto as delay
469 */
470static void batadv_tp_reset_sender_timer(struct batadv_tp_vars *tp_vars)
471{
472 /* most of the time this function is invoked while normal packet
473 * reception...
474 */
475 if (unlikely(atomic_read(&tp_vars->sending) == 0))
476 /* timer ref will be dropped in batadv_tp_sender_cleanup */
477 return;
478
479 mod_timer(&tp_vars->timer, jiffies + msecs_to_jiffies(tp_vars->rto));
480}
481
482/**
483 * batadv_tp_sender_timeout - timer that fires in case of packet loss
484 * @arg: address of the related tp_vars
485 *
486 * If fired it means that there was packet loss.
487 * Switch to Slow Start, set the ss_threshold to half of the current cwnd and
488 * reset the cwnd to 3*MSS
489 */
490static void batadv_tp_sender_timeout(unsigned long arg)
491{
492 struct batadv_tp_vars *tp_vars = (struct batadv_tp_vars *)arg;
493 struct batadv_priv *bat_priv = tp_vars->bat_priv;
494
495 if (atomic_read(&tp_vars->sending) == 0)
496 return;
497
498 /* if the user waited long enough...shutdown the test */
499 if (unlikely(tp_vars->rto >= BATADV_TP_MAX_RTO)) {
500 batadv_tp_sender_shutdown(tp_vars,
501 BATADV_TP_REASON_DST_UNREACHABLE);
502 return;
503 }
504
505 /* RTO exponential backoff
506 * Details in Section 5.5 of RFC6298
507 */
508 tp_vars->rto <<= 1;
509
510 spin_lock_bh(&tp_vars->cwnd_lock);
511
512 tp_vars->ss_threshold = tp_vars->cwnd >> 1;
513 if (tp_vars->ss_threshold < BATADV_TP_PLEN * 2)
514 tp_vars->ss_threshold = BATADV_TP_PLEN * 2;
515
516 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
517 "Meter: RTO fired during test towards %pM! cwnd=%u new ss_thr=%u, resetting last_sent to %u\n",
518 tp_vars->other_end, tp_vars->cwnd, tp_vars->ss_threshold,
519 atomic_read(&tp_vars->last_acked));
520
521 tp_vars->cwnd = BATADV_TP_PLEN * 3;
522
523 spin_unlock_bh(&tp_vars->cwnd_lock);
524
525 /* resend the non-ACKed packets.. */
526 tp_vars->last_sent = atomic_read(&tp_vars->last_acked);
527 wake_up(&tp_vars->more_bytes);
528
529 batadv_tp_reset_sender_timer(tp_vars);
530}
531
532/**
533 * batadv_tp_fill_prerandom - Fill buffer with prefetched random bytes
534 * @tp_vars: the private TP meter data for this session
535 * @buf: Buffer to fill with bytes
536 * @nbytes: amount of pseudorandom bytes
537 */
538static void batadv_tp_fill_prerandom(struct batadv_tp_vars *tp_vars,
539 u8 *buf, size_t nbytes)
540{
541 u32 local_offset;
542 size_t bytes_inbuf;
543 size_t to_copy;
544 size_t pos = 0;
545
546 spin_lock_bh(&tp_vars->prerandom_lock);
547 local_offset = tp_vars->prerandom_offset;
548 tp_vars->prerandom_offset += nbytes;
549 tp_vars->prerandom_offset %= sizeof(batadv_tp_prerandom);
550 spin_unlock_bh(&tp_vars->prerandom_lock);
551
552 while (nbytes) {
553 local_offset %= sizeof(batadv_tp_prerandom);
554 bytes_inbuf = sizeof(batadv_tp_prerandom) - local_offset;
555 to_copy = min(nbytes, bytes_inbuf);
556
557 memcpy(&buf[pos], &batadv_tp_prerandom[local_offset], to_copy);
558 pos += to_copy;
559 nbytes -= to_copy;
560 local_offset = 0;
561 }
562}
563
564/**
565 * batadv_tp_send_msg - send a single message
566 * @tp_vars: the private TP meter data for this session
567 * @src: source mac address
568 * @orig_node: the originator of the destination
569 * @seqno: sequence number of this packet
570 * @len: length of the entire packet
571 * @session: session identifier
572 * @uid: local ICMP "socket" index
573 * @timestamp: timestamp in jiffies which is replied in ack
574 *
575 * Create and send a single TP Meter message.
576 *
577 * Return: 0 on success, BATADV_TP_REASON_DST_UNREACHABLE if the destination is
578 * not reachable, BATADV_TP_REASON_MEMORY_ERROR if the packet couldn't be
579 * allocated
580 */
581static int batadv_tp_send_msg(struct batadv_tp_vars *tp_vars, const u8 *src,
582 struct batadv_orig_node *orig_node,
583 u32 seqno, size_t len, const u8 *session,
584 int uid, u32 timestamp)
585{
586 struct batadv_icmp_tp_packet *icmp;
587 struct sk_buff *skb;
588 int r;
589 u8 *data;
590 size_t data_len;
591
592 skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
593 if (unlikely(!skb))
594 return BATADV_TP_REASON_MEMORY_ERROR;
595
596 skb_reserve(skb, ETH_HLEN);
597 icmp = (struct batadv_icmp_tp_packet *)skb_put(skb, sizeof(*icmp));
598
599 /* fill the icmp header */
600 ether_addr_copy(icmp->dst, orig_node->orig);
601 ether_addr_copy(icmp->orig, src);
602 icmp->version = BATADV_COMPAT_VERSION;
603 icmp->packet_type = BATADV_ICMP;
604 icmp->ttl = BATADV_TTL;
605 icmp->msg_type = BATADV_TP;
606 icmp->uid = uid;
607
608 icmp->subtype = BATADV_TP_MSG;
609 memcpy(icmp->session, session, sizeof(icmp->session));
610 icmp->seqno = htonl(seqno);
611 icmp->timestamp = htonl(timestamp);
612
613 data_len = len - sizeof(*icmp);
614 data = (u8 *)skb_put(skb, data_len);
615 batadv_tp_fill_prerandom(tp_vars, data, data_len);
616
617 r = batadv_send_skb_to_orig(skb, orig_node, NULL);
618 if (r == -1)
619 kfree_skb(skb);
620
621 if (r == NET_XMIT_SUCCESS)
622 return 0;
623
624 return BATADV_TP_REASON_CANT_SEND;
625}
626
627/**
628 * batadv_tp_recv_ack - ACK receiving function
629 * @bat_priv: the bat priv with all the soft interface information
630 * @skb: the buffer containing the received packet
631 *
632 * Process a received TP ACK packet
633 */
634static void batadv_tp_recv_ack(struct batadv_priv *bat_priv,
635 const struct sk_buff *skb)
636{
637 struct batadv_hard_iface *primary_if = NULL;
638 struct batadv_orig_node *orig_node = NULL;
639 const struct batadv_icmp_tp_packet *icmp;
640 struct batadv_tp_vars *tp_vars;
641 size_t packet_len, mss;
642 u32 rtt, recv_ack, cwnd;
643 unsigned char *dev_addr;
644
645 packet_len = BATADV_TP_PLEN;
646 mss = BATADV_TP_PLEN;
647 packet_len += sizeof(struct batadv_unicast_packet);
648
649 icmp = (struct batadv_icmp_tp_packet *)skb->data;
650
651 /* find the tp_vars */
652 tp_vars = batadv_tp_list_find_session(bat_priv, icmp->orig,
653 icmp->session);
654 if (unlikely(!tp_vars))
655 return;
656
657 if (unlikely(atomic_read(&tp_vars->sending) == 0))
658 goto out;
659
660 /* old ACK? silently drop it.. */
661 if (batadv_seq_before(ntohl(icmp->seqno),
662 (u32)atomic_read(&tp_vars->last_acked)))
663 goto out;
664
665 primary_if = batadv_primary_if_get_selected(bat_priv);
666 if (unlikely(!primary_if))
667 goto out;
668
669 orig_node = batadv_orig_hash_find(bat_priv, icmp->orig);
670 if (unlikely(!orig_node))
671 goto out;
672
673 /* update RTO with the new sampled RTT, if any */
674 rtt = jiffies_to_msecs(jiffies) - ntohl(icmp->timestamp);
675 if (icmp->timestamp && rtt)
676 batadv_tp_update_rto(tp_vars, rtt);
677
678 /* ACK for new data... reset the timer */
679 batadv_tp_reset_sender_timer(tp_vars);
680
681 recv_ack = ntohl(icmp->seqno);
682
683 /* check if this ACK is a duplicate */
684 if (atomic_read(&tp_vars->last_acked) == recv_ack) {
685 atomic_inc(&tp_vars->dup_acks);
686 if (atomic_read(&tp_vars->dup_acks) != 3)
687 goto out;
688
689 if (recv_ack >= tp_vars->recover)
690 goto out;
691
692 /* if this is the third duplicate ACK do Fast Retransmit */
693 batadv_tp_send_msg(tp_vars, primary_if->net_dev->dev_addr,
694 orig_node, recv_ack, packet_len,
695 icmp->session, icmp->uid,
696 jiffies_to_msecs(jiffies));
697
698 spin_lock_bh(&tp_vars->cwnd_lock);
699
700 /* Fast Recovery */
701 tp_vars->fast_recovery = true;
702 /* Set recover to the last outstanding seqno when Fast Recovery
703 * is entered. RFC6582, Section 3.2, step 1
704 */
705 tp_vars->recover = tp_vars->last_sent;
706 tp_vars->ss_threshold = tp_vars->cwnd >> 1;
707 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
708 "Meter: Fast Recovery, (cur cwnd=%u) ss_thr=%u last_sent=%u recv_ack=%u\n",
709 tp_vars->cwnd, tp_vars->ss_threshold,
710 tp_vars->last_sent, recv_ack);
711 tp_vars->cwnd = batadv_tp_cwnd(tp_vars->ss_threshold, 3 * mss,
712 mss);
713 tp_vars->dec_cwnd = 0;
714 tp_vars->last_sent = recv_ack;
715
716 spin_unlock_bh(&tp_vars->cwnd_lock);
717 } else {
718 /* count the acked data */
719 atomic64_add(recv_ack - atomic_read(&tp_vars->last_acked),
720 &tp_vars->tot_sent);
721 /* reset the duplicate ACKs counter */
722 atomic_set(&tp_vars->dup_acks, 0);
723
724 if (tp_vars->fast_recovery) {
725 /* partial ACK */
726 if (batadv_seq_before(recv_ack, tp_vars->recover)) {
727 /* this is another hole in the window. React
728 * immediately as specified by NewReno (see
729 * Section 3.2 of RFC6582 for details)
730 */
731 dev_addr = primary_if->net_dev->dev_addr;
732 batadv_tp_send_msg(tp_vars, dev_addr,
733 orig_node, recv_ack,
734 packet_len, icmp->session,
735 icmp->uid,
736 jiffies_to_msecs(jiffies));
737 tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd,
738 mss, mss);
739 } else {
740 tp_vars->fast_recovery = false;
741 /* set cwnd to the value of ss_threshold at the
742 * moment that Fast Recovery was entered.
743 * RFC6582, Section 3.2, step 3
744 */
745 cwnd = batadv_tp_cwnd(tp_vars->ss_threshold, 0,
746 mss);
747 tp_vars->cwnd = cwnd;
748 }
749 goto move_twnd;
750 }
751
752 if (recv_ack - atomic_read(&tp_vars->last_acked) >= mss)
753 batadv_tp_update_cwnd(tp_vars, mss);
754move_twnd:
755 /* move the Transmit Window */
756 atomic_set(&tp_vars->last_acked, recv_ack);
757 }
758
759 wake_up(&tp_vars->more_bytes);
760out:
761 if (likely(primary_if))
762 batadv_hardif_put(primary_if);
763 if (likely(orig_node))
764 batadv_orig_node_put(orig_node);
765 if (likely(tp_vars))
766 batadv_tp_vars_put(tp_vars);
767}
768
769/**
770 * batadv_tp_avail - check if congestion window is not full
771 * @tp_vars: the private data of the current TP meter session
772 * @payload_len: size of the payload of a single message
773 *
774 * Return: true when congestion window is not full, false otherwise
775 */
776static bool batadv_tp_avail(struct batadv_tp_vars *tp_vars,
777 size_t payload_len)
778{
779 u32 win_left, win_limit;
780
781 win_limit = atomic_read(&tp_vars->last_acked) + tp_vars->cwnd;
782 win_left = win_limit - tp_vars->last_sent;
783
784 return win_left >= payload_len;
785}
786
787/**
788 * batadv_tp_wait_available - wait until congestion window becomes free or
789 * timeout is reached
790 * @tp_vars: the private data of the current TP meter session
791 * @plen: size of the payload of a single message
792 *
793 * Return: 0 if the condition evaluated to false after the timeout elapsed,
794 * 1 if the condition evaluated to true after the timeout elapsed, the
795 * remaining jiffies (at least 1) if the condition evaluated to true before
796 * the timeout elapsed, or -ERESTARTSYS if it was interrupted by a signal.
797 */
798static int batadv_tp_wait_available(struct batadv_tp_vars *tp_vars, size_t plen)
799{
800 int ret;
801
802 ret = wait_event_interruptible_timeout(tp_vars->more_bytes,
803 batadv_tp_avail(tp_vars, plen),
804 HZ / 10);
805
806 return ret;
807}
808
809/**
810 * batadv_tp_send - main sending thread of a tp meter session
811 * @arg: address of the related tp_vars
812 *
813 * Return: nothing, this function never returns
814 */
815static int batadv_tp_send(void *arg)
816{
817 struct batadv_tp_vars *tp_vars = arg;
818 struct batadv_priv *bat_priv = tp_vars->bat_priv;
819 struct batadv_hard_iface *primary_if = NULL;
820 struct batadv_orig_node *orig_node = NULL;
821 size_t payload_len, packet_len;
822 int err = 0;
823
824 if (unlikely(tp_vars->role != BATADV_TP_SENDER)) {
825 err = BATADV_TP_REASON_DST_UNREACHABLE;
826 tp_vars->reason = err;
827 goto out;
828 }
829
830 orig_node = batadv_orig_hash_find(bat_priv, tp_vars->other_end);
831 if (unlikely(!orig_node)) {
832 err = BATADV_TP_REASON_DST_UNREACHABLE;
833 tp_vars->reason = err;
834 goto out;
835 }
836
837 primary_if = batadv_primary_if_get_selected(bat_priv);
838 if (unlikely(!primary_if)) {
839 err = BATADV_TP_REASON_DST_UNREACHABLE;
840 goto out;
841 }
842
843 /* assume that all the hard_interfaces have a correctly
844 * configured MTU, so use the soft_iface MTU as MSS.
845 * This might not be true and in that case the fragmentation
846 * should be used.
847 * Now, try to send the packet as it is
848 */
849 payload_len = BATADV_TP_PLEN;
850 BUILD_BUG_ON(sizeof(struct batadv_icmp_tp_packet) > BATADV_TP_PLEN);
851
852 batadv_tp_reset_sender_timer(tp_vars);
853
854 /* queue the worker in charge of terminating the test */
855 queue_delayed_work(batadv_event_workqueue, &tp_vars->finish_work,
856 msecs_to_jiffies(tp_vars->test_length));
857
858 while (atomic_read(&tp_vars->sending) != 0) {
859 if (unlikely(!batadv_tp_avail(tp_vars, payload_len))) {
860 batadv_tp_wait_available(tp_vars, payload_len);
861 continue;
862 }
863
864 /* to emulate normal unicast traffic, add to the payload len
865 * the size of the unicast header
866 */
867 packet_len = payload_len + sizeof(struct batadv_unicast_packet);
868
869 err = batadv_tp_send_msg(tp_vars, primary_if->net_dev->dev_addr,
870 orig_node, tp_vars->last_sent,
871 packet_len,
872 tp_vars->session, tp_vars->icmp_uid,
873 jiffies_to_msecs(jiffies));
874
875 /* something went wrong during the preparation/transmission */
876 if (unlikely(err && err != BATADV_TP_REASON_CANT_SEND)) {
877 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
878 "Meter: batadv_tp_send() cannot send packets (%d)\n",
879 err);
880 /* ensure nobody else tries to stop the thread now */
881 if (atomic_dec_and_test(&tp_vars->sending))
882 tp_vars->reason = err;
883 break;
884 }
885
886 /* right-shift the TWND */
887 if (!err)
888 tp_vars->last_sent += payload_len;
889
890 cond_resched();
891 }
892
893out:
894 if (likely(primary_if))
895 batadv_hardif_put(primary_if);
896 if (likely(orig_node))
897 batadv_orig_node_put(orig_node);
898
899 batadv_tp_sender_end(bat_priv, tp_vars);
900 batadv_tp_sender_cleanup(bat_priv, tp_vars);
901
902 batadv_tp_vars_put(tp_vars);
903
904 do_exit(0);
905}
906
907/**
908 * batadv_tp_start_kthread - start new thread which manages the tp meter sender
909 * @tp_vars: the private data of the current TP meter session
910 */
911static void batadv_tp_start_kthread(struct batadv_tp_vars *tp_vars)
912{
913 struct task_struct *kthread;
914 struct batadv_priv *bat_priv = tp_vars->bat_priv;
915 u32 session_cookie;
916
917 kref_get(&tp_vars->refcount);
918 kthread = kthread_create(batadv_tp_send, tp_vars, "kbatadv_tp_meter");
919 if (IS_ERR(kthread)) {
920 session_cookie = batadv_tp_session_cookie(tp_vars->session,
921 tp_vars->icmp_uid);
922 pr_err("batadv: cannot create tp meter kthread\n");
923 batadv_tp_batctl_error_notify(BATADV_TP_REASON_MEMORY_ERROR,
924 tp_vars->other_end,
925 bat_priv, session_cookie);
926
927 /* drop reserved reference for kthread */
928 batadv_tp_vars_put(tp_vars);
929
930 /* cleanup of failed tp meter variables */
931 batadv_tp_sender_cleanup(bat_priv, tp_vars);
932 return;
933 }
934
935 wake_up_process(kthread);
936}
937
938/**
939 * batadv_tp_start - start a new tp meter session
940 * @bat_priv: the bat priv with all the soft interface information
941 * @dst: the receiver MAC address
942 * @test_length: test length in milliseconds
943 * @cookie: session cookie
944 */
945void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst,
946 u32 test_length, u32 *cookie)
947{
948 struct batadv_tp_vars *tp_vars;
949 u8 session_id[2];
950 u8 icmp_uid;
951 u32 session_cookie;
952
953 get_random_bytes(session_id, sizeof(session_id));
954 get_random_bytes(&icmp_uid, 1);
955 session_cookie = batadv_tp_session_cookie(session_id, icmp_uid);
956 *cookie = session_cookie;
957
958 /* look for an already existing test towards this node */
959 spin_lock_bh(&bat_priv->tp_list_lock);
960 tp_vars = batadv_tp_list_find(bat_priv, dst);
961 if (tp_vars) {
962 spin_unlock_bh(&bat_priv->tp_list_lock);
963 batadv_tp_vars_put(tp_vars);
964 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
965 "Meter: test to or from the same node already ongoing, aborting\n");
966 batadv_tp_batctl_error_notify(BATADV_TP_REASON_ALREADY_ONGOING,
967 dst, bat_priv, session_cookie);
968 return;
969 }
970
971 if (!atomic_add_unless(&bat_priv->tp_num, 1, BATADV_TP_MAX_NUM)) {
972 spin_unlock_bh(&bat_priv->tp_list_lock);
973 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
974 "Meter: too many ongoing sessions, aborting (SEND)\n");
975 batadv_tp_batctl_error_notify(BATADV_TP_REASON_TOO_MANY, dst,
976 bat_priv, session_cookie);
977 return;
978 }
979
980 tp_vars = kmalloc(sizeof(*tp_vars), GFP_ATOMIC);
981 if (!tp_vars) {
982 spin_unlock_bh(&bat_priv->tp_list_lock);
983 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
984 "Meter: batadv_tp_start cannot allocate list elements\n");
985 batadv_tp_batctl_error_notify(BATADV_TP_REASON_MEMORY_ERROR,
986 dst, bat_priv, session_cookie);
987 return;
988 }
989
990 /* initialize tp_vars */
991 ether_addr_copy(tp_vars->other_end, dst);
992 kref_init(&tp_vars->refcount);
993 tp_vars->role = BATADV_TP_SENDER;
994 atomic_set(&tp_vars->sending, 1);
995 memcpy(tp_vars->session, session_id, sizeof(session_id));
996 tp_vars->icmp_uid = icmp_uid;
997
998 tp_vars->last_sent = BATADV_TP_FIRST_SEQ;
999 atomic_set(&tp_vars->last_acked, BATADV_TP_FIRST_SEQ);
1000 tp_vars->fast_recovery = false;
1001 tp_vars->recover = BATADV_TP_FIRST_SEQ;
1002
1003 /* initialise the CWND to 3*MSS (Section 3.1 in RFC5681).
1004 * For batman-adv the MSS is the size of the payload received by the
1005 * soft_interface, hence its MTU
1006 */
1007 tp_vars->cwnd = BATADV_TP_PLEN * 3;
1008 /* at the beginning initialise the SS threshold to the biggest possible
1009 * window size, hence the AWND size
1010 */
1011 tp_vars->ss_threshold = BATADV_TP_AWND;
1012
1013 /* RTO initial value is 3 seconds.
1014 * Details in Section 2.1 of RFC6298
1015 */
1016 tp_vars->rto = 1000;
1017 tp_vars->srtt = 0;
1018 tp_vars->rttvar = 0;
1019
1020 atomic64_set(&tp_vars->tot_sent, 0);
1021
1022 kref_get(&tp_vars->refcount);
1023 setup_timer(&tp_vars->timer, batadv_tp_sender_timeout,
1024 (unsigned long)tp_vars);
1025
1026 tp_vars->bat_priv = bat_priv;
1027 tp_vars->start_time = jiffies;
1028
1029 init_waitqueue_head(&tp_vars->more_bytes);
1030
1031 spin_lock_init(&tp_vars->unacked_lock);
1032 INIT_LIST_HEAD(&tp_vars->unacked_list);
1033
1034 spin_lock_init(&tp_vars->cwnd_lock);
1035
1036 tp_vars->prerandom_offset = 0;
1037 spin_lock_init(&tp_vars->prerandom_lock);
1038
1039 kref_get(&tp_vars->refcount);
1040 hlist_add_head_rcu(&tp_vars->list, &bat_priv->tp_list);
1041 spin_unlock_bh(&bat_priv->tp_list_lock);
1042
1043 tp_vars->test_length = test_length;
1044 if (!tp_vars->test_length)
1045 tp_vars->test_length = BATADV_TP_DEF_TEST_LENGTH;
1046
1047 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1048 "Meter: starting throughput meter towards %pM (length=%ums)\n",
1049 dst, test_length);
1050
1051 /* init work item for finished tp tests */
1052 INIT_DELAYED_WORK(&tp_vars->finish_work, batadv_tp_sender_finish);
1053
1054 /* start tp kthread. This way the write() call issued from userspace can
1055 * happily return and avoid to block
1056 */
1057 batadv_tp_start_kthread(tp_vars);
1058
1059 /* don't return reference to new tp_vars */
1060 batadv_tp_vars_put(tp_vars);
1061}
1062
1063/**
1064 * batadv_tp_stop - stop currently running tp meter session
1065 * @bat_priv: the bat priv with all the soft interface information
1066 * @dst: the receiver MAC address
1067 * @return_value: reason for tp meter session stop
1068 */
1069void batadv_tp_stop(struct batadv_priv *bat_priv, const u8 *dst,
1070 u8 return_value)
1071{
1072 struct batadv_orig_node *orig_node;
1073 struct batadv_tp_vars *tp_vars;
1074
1075 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1076 "Meter: stopping test towards %pM\n", dst);
1077
1078 orig_node = batadv_orig_hash_find(bat_priv, dst);
1079 if (!orig_node)
1080 return;
1081
1082 tp_vars = batadv_tp_list_find(bat_priv, orig_node->orig);
1083 if (!tp_vars) {
1084 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1085 "Meter: trying to interrupt an already over connection\n");
1086 goto out;
1087 }
1088
1089 batadv_tp_sender_shutdown(tp_vars, return_value);
1090 batadv_tp_vars_put(tp_vars);
1091out:
1092 batadv_orig_node_put(orig_node);
1093}
1094
1095/**
1096 * batadv_tp_reset_receiver_timer - reset the receiver shutdown timer
1097 * @tp_vars: the private data of the current TP meter session
1098 *
1099 * start the receiver shutdown timer or reset it if already started
1100 */
1101static void batadv_tp_reset_receiver_timer(struct batadv_tp_vars *tp_vars)
1102{
1103 mod_timer(&tp_vars->timer,
1104 jiffies + msecs_to_jiffies(BATADV_TP_RECV_TIMEOUT));
1105}
1106
1107/**
1108 * batadv_tp_receiver_shutdown - stop a tp meter receiver when timeout is
1109 * reached without received ack
1110 * @arg: address of the related tp_vars
1111 */
1112static void batadv_tp_receiver_shutdown(unsigned long arg)
1113{
1114 struct batadv_tp_vars *tp_vars = (struct batadv_tp_vars *)arg;
1115 struct batadv_tp_unacked *un, *safe;
1116 struct batadv_priv *bat_priv;
1117
1118 bat_priv = tp_vars->bat_priv;
1119
1120 /* if there is recent activity rearm the timer */
1121 if (!batadv_has_timed_out(tp_vars->last_recv_time,
1122 BATADV_TP_RECV_TIMEOUT)) {
1123 /* reset the receiver shutdown timer */
1124 batadv_tp_reset_receiver_timer(tp_vars);
1125 return;
1126 }
1127
1128 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1129 "Shutting down for inactivity (more than %dms) from %pM\n",
1130 BATADV_TP_RECV_TIMEOUT, tp_vars->other_end);
1131
1132 spin_lock_bh(&tp_vars->bat_priv->tp_list_lock);
1133 hlist_del_rcu(&tp_vars->list);
1134 spin_unlock_bh(&tp_vars->bat_priv->tp_list_lock);
1135
1136 /* drop list reference */
1137 batadv_tp_vars_put(tp_vars);
1138
1139 atomic_dec(&bat_priv->tp_num);
1140
1141 spin_lock_bh(&tp_vars->unacked_lock);
1142 list_for_each_entry_safe(un, safe, &tp_vars->unacked_list, list) {
1143 list_del(&un->list);
1144 kfree(un);
1145 }
1146 spin_unlock_bh(&tp_vars->unacked_lock);
1147
1148 /* drop reference of timer */
1149 batadv_tp_vars_put(tp_vars);
1150}
1151
1152/**
1153 * batadv_tp_send_ack - send an ACK packet
1154 * @bat_priv: the bat priv with all the soft interface information
1155 * @dst: the mac address of the destination originator
1156 * @seq: the sequence number to ACK
1157 * @timestamp: the timestamp to echo back in the ACK
1158 * @session: session identifier
1159 * @socket_index: local ICMP socket identifier
1160 *
1161 * Return: 0 on success, a positive integer representing the reason of the
1162 * failure otherwise
1163 */
1164static int batadv_tp_send_ack(struct batadv_priv *bat_priv, const u8 *dst,
1165 u32 seq, __be32 timestamp, const u8 *session,
1166 int socket_index)
1167{
1168 struct batadv_hard_iface *primary_if = NULL;
1169 struct batadv_orig_node *orig_node;
1170 struct batadv_icmp_tp_packet *icmp;
1171 struct sk_buff *skb;
1172 int r, ret;
1173
1174 orig_node = batadv_orig_hash_find(bat_priv, dst);
1175 if (unlikely(!orig_node)) {
1176 ret = BATADV_TP_REASON_DST_UNREACHABLE;
1177 goto out;
1178 }
1179
1180 primary_if = batadv_primary_if_get_selected(bat_priv);
1181 if (unlikely(!primary_if)) {
1182 ret = BATADV_TP_REASON_DST_UNREACHABLE;
1183 goto out;
1184 }
1185
1186 skb = netdev_alloc_skb_ip_align(NULL, sizeof(*icmp) + ETH_HLEN);
1187 if (unlikely(!skb)) {
1188 ret = BATADV_TP_REASON_MEMORY_ERROR;
1189 goto out;
1190 }
1191
1192 skb_reserve(skb, ETH_HLEN);
1193 icmp = (struct batadv_icmp_tp_packet *)skb_put(skb, sizeof(*icmp));
1194 icmp->packet_type = BATADV_ICMP;
1195 icmp->version = BATADV_COMPAT_VERSION;
1196 icmp->ttl = BATADV_TTL;
1197 icmp->msg_type = BATADV_TP;
1198 ether_addr_copy(icmp->dst, orig_node->orig);
1199 ether_addr_copy(icmp->orig, primary_if->net_dev->dev_addr);
1200 icmp->uid = socket_index;
1201
1202 icmp->subtype = BATADV_TP_ACK;
1203 memcpy(icmp->session, session, sizeof(icmp->session));
1204 icmp->seqno = htonl(seq);
1205 icmp->timestamp = timestamp;
1206
1207 /* send the ack */
1208 r = batadv_send_skb_to_orig(skb, orig_node, NULL);
1209 if (r == -1)
1210 kfree_skb(skb);
1211
1212 if (unlikely(r < 0) || (r == NET_XMIT_DROP)) {
1213 ret = BATADV_TP_REASON_DST_UNREACHABLE;
1214 goto out;
1215 }
1216 ret = 0;
1217
1218out:
1219 if (likely(orig_node))
1220 batadv_orig_node_put(orig_node);
1221 if (likely(primary_if))
1222 batadv_hardif_put(primary_if);
1223
1224 return ret;
1225}
1226
1227/**
1228 * batadv_tp_handle_out_of_order - store an out of order packet
1229 * @tp_vars: the private data of the current TP meter session
1230 * @skb: the buffer containing the received packet
1231 *
1232 * Store the out of order packet in the unacked list for late processing. This
1233 * packets are kept in this list so that they can be ACKed at once as soon as
1234 * all the previous packets have been received
1235 *
1236 * Return: true if the packed has been successfully processed, false otherwise
1237 */
1238static bool batadv_tp_handle_out_of_order(struct batadv_tp_vars *tp_vars,
1239 const struct sk_buff *skb)
1240{
1241 const struct batadv_icmp_tp_packet *icmp;
1242 struct batadv_tp_unacked *un, *new;
1243 u32 payload_len;
1244 bool added = false;
1245
1246 new = kmalloc(sizeof(*new), GFP_ATOMIC);
1247 if (unlikely(!new))
1248 return false;
1249
1250 icmp = (struct batadv_icmp_tp_packet *)skb->data;
1251
1252 new->seqno = ntohl(icmp->seqno);
1253 payload_len = skb->len - sizeof(struct batadv_unicast_packet);
1254 new->len = payload_len;
1255
1256 spin_lock_bh(&tp_vars->unacked_lock);
1257 /* if the list is empty immediately attach this new object */
1258 if (list_empty(&tp_vars->unacked_list)) {
1259 list_add(&new->list, &tp_vars->unacked_list);
1260 goto out;
1261 }
1262
1263 /* otherwise loop over the list and either drop the packet because this
1264 * is a duplicate or store it at the right position.
1265 *
1266 * The iteration is done in the reverse way because it is likely that
1267 * the last received packet (the one being processed now) has a bigger
1268 * seqno than all the others already stored.
1269 */
1270 list_for_each_entry_reverse(un, &tp_vars->unacked_list, list) {
1271 /* check for duplicates */
1272 if (new->seqno == un->seqno) {
1273 if (new->len > un->len)
1274 un->len = new->len;
1275 kfree(new);
1276 added = true;
1277 break;
1278 }
1279
1280 /* look for the right position */
1281 if (batadv_seq_before(new->seqno, un->seqno))
1282 continue;
1283
1284 /* as soon as an entry having a bigger seqno is found, the new
1285 * one is attached _after_ it. In this way the list is kept in
1286 * ascending order
1287 */
1288 list_add_tail(&new->list, &un->list);
1289 added = true;
1290 break;
1291 }
1292
1293 /* received packet with smallest seqno out of order; add it to front */
1294 if (!added)
1295 list_add(&new->list, &tp_vars->unacked_list);
1296
1297out:
1298 spin_unlock_bh(&tp_vars->unacked_lock);
1299
1300 return true;
1301}
1302
1303/**
1304 * batadv_tp_ack_unordered - update number received bytes in current stream
1305 * without gaps
1306 * @tp_vars: the private data of the current TP meter session
1307 */
1308static void batadv_tp_ack_unordered(struct batadv_tp_vars *tp_vars)
1309{
1310 struct batadv_tp_unacked *un, *safe;
1311 u32 to_ack;
1312
1313 /* go through the unacked packet list and possibly ACK them as
1314 * well
1315 */
1316 spin_lock_bh(&tp_vars->unacked_lock);
1317 list_for_each_entry_safe(un, safe, &tp_vars->unacked_list, list) {
1318 /* the list is ordered, therefore it is possible to stop as soon
1319 * there is a gap between the last acked seqno and the seqno of
1320 * the packet under inspection
1321 */
1322 if (batadv_seq_before(tp_vars->last_recv, un->seqno))
1323 break;
1324
1325 to_ack = un->seqno + un->len - tp_vars->last_recv;
1326
1327 if (batadv_seq_before(tp_vars->last_recv, un->seqno + un->len))
1328 tp_vars->last_recv += to_ack;
1329
1330 list_del(&un->list);
1331 kfree(un);
1332 }
1333 spin_unlock_bh(&tp_vars->unacked_lock);
1334}
1335
1336/**
1337 * batadv_tp_init_recv - return matching or create new receiver tp_vars
1338 * @bat_priv: the bat priv with all the soft interface information
1339 * @icmp: received icmp tp msg
1340 *
1341 * Return: corresponding tp_vars or NULL on errors
1342 */
1343static struct batadv_tp_vars *
1344batadv_tp_init_recv(struct batadv_priv *bat_priv,
1345 const struct batadv_icmp_tp_packet *icmp)
1346{
1347 struct batadv_tp_vars *tp_vars;
1348
1349 spin_lock_bh(&bat_priv->tp_list_lock);
1350 tp_vars = batadv_tp_list_find_session(bat_priv, icmp->orig,
1351 icmp->session);
1352 if (tp_vars)
1353 goto out_unlock;
1354
1355 if (!atomic_add_unless(&bat_priv->tp_num, 1, BATADV_TP_MAX_NUM)) {
1356 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1357 "Meter: too many ongoing sessions, aborting (RECV)\n");
1358 goto out_unlock;
1359 }
1360
1361 tp_vars = kmalloc(sizeof(*tp_vars), GFP_ATOMIC);
1362 if (!tp_vars)
1363 goto out_unlock;
1364
1365 ether_addr_copy(tp_vars->other_end, icmp->orig);
1366 tp_vars->role = BATADV_TP_RECEIVER;
1367 memcpy(tp_vars->session, icmp->session, sizeof(tp_vars->session));
1368 tp_vars->last_recv = BATADV_TP_FIRST_SEQ;
1369 tp_vars->bat_priv = bat_priv;
1370 kref_init(&tp_vars->refcount);
1371
1372 spin_lock_init(&tp_vars->unacked_lock);
1373 INIT_LIST_HEAD(&tp_vars->unacked_list);
1374
1375 kref_get(&tp_vars->refcount);
1376 hlist_add_head_rcu(&tp_vars->list, &bat_priv->tp_list);
1377
1378 kref_get(&tp_vars->refcount);
1379 setup_timer(&tp_vars->timer, batadv_tp_receiver_shutdown,
1380 (unsigned long)tp_vars);
1381
1382 batadv_tp_reset_receiver_timer(tp_vars);
1383
1384out_unlock:
1385 spin_unlock_bh(&bat_priv->tp_list_lock);
1386
1387 return tp_vars;
1388}
1389
1390/**
1391 * batadv_tp_recv_msg - process a single data message
1392 * @bat_priv: the bat priv with all the soft interface information
1393 * @skb: the buffer containing the received packet
1394 *
1395 * Process a received TP MSG packet
1396 */
1397static void batadv_tp_recv_msg(struct batadv_priv *bat_priv,
1398 const struct sk_buff *skb)
1399{
1400 const struct batadv_icmp_tp_packet *icmp;
1401 struct batadv_tp_vars *tp_vars;
1402 size_t packet_size;
1403 u32 seqno;
1404
1405 icmp = (struct batadv_icmp_tp_packet *)skb->data;
1406
1407 seqno = ntohl(icmp->seqno);
1408 /* check if this is the first seqno. This means that if the
1409 * first packet is lost, the tp meter does not work anymore!
1410 */
1411 if (seqno == BATADV_TP_FIRST_SEQ) {
1412 tp_vars = batadv_tp_init_recv(bat_priv, icmp);
1413 if (!tp_vars) {
1414 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1415 "Meter: seqno != BATADV_TP_FIRST_SEQ cannot initiate connection\n");
1416 goto out;
1417 }
1418 } else {
1419 tp_vars = batadv_tp_list_find_session(bat_priv, icmp->orig,
1420 icmp->session);
1421 if (!tp_vars) {
1422 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1423 "Unexpected packet from %pM!\n",
1424 icmp->orig);
1425 goto out;
1426 }
1427 }
1428
1429 if (unlikely(tp_vars->role != BATADV_TP_RECEIVER)) {
1430 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1431 "Meter: dropping packet: not expected (role=%u)\n",
1432 tp_vars->role);
1433 goto out;
1434 }
1435
1436 tp_vars->last_recv_time = jiffies;
1437
1438 /* if the packet is a duplicate, it may be the case that an ACK has been
1439 * lost. Resend the ACK
1440 */
1441 if (batadv_seq_before(seqno, tp_vars->last_recv))
1442 goto send_ack;
1443
1444 /* if the packet is out of order enqueue it */
1445 if (ntohl(icmp->seqno) != tp_vars->last_recv) {
1446 /* exit immediately (and do not send any ACK) if the packet has
1447 * not been enqueued correctly
1448 */
1449 if (!batadv_tp_handle_out_of_order(tp_vars, skb))
1450 goto out;
1451
1452 /* send a duplicate ACK */
1453 goto send_ack;
1454 }
1455
1456 /* if everything was fine count the ACKed bytes */
1457 packet_size = skb->len - sizeof(struct batadv_unicast_packet);
1458 tp_vars->last_recv += packet_size;
1459
1460 /* check if this ordered message filled a gap.... */
1461 batadv_tp_ack_unordered(tp_vars);
1462
1463send_ack:
1464 /* send the ACK. If the received packet was out of order, the ACK that
1465 * is going to be sent is a duplicate (the sender will count them and
1466 * possibly enter Fast Retransmit as soon as it has reached 3)
1467 */
1468 batadv_tp_send_ack(bat_priv, icmp->orig, tp_vars->last_recv,
1469 icmp->timestamp, icmp->session, icmp->uid);
1470out:
1471 if (likely(tp_vars))
1472 batadv_tp_vars_put(tp_vars);
1473}
1474
1475/**
1476 * batadv_tp_meter_recv - main TP Meter receiving function
1477 * @bat_priv: the bat priv with all the soft interface information
1478 * @skb: the buffer containing the received packet
1479 */
1480void batadv_tp_meter_recv(struct batadv_priv *bat_priv, struct sk_buff *skb)
1481{
1482 struct batadv_icmp_tp_packet *icmp;
1483
1484 icmp = (struct batadv_icmp_tp_packet *)skb->data;
1485
1486 switch (icmp->subtype) {
1487 case BATADV_TP_MSG:
1488 batadv_tp_recv_msg(bat_priv, skb);
1489 break;
1490 case BATADV_TP_ACK:
1491 batadv_tp_recv_ack(bat_priv, skb);
1492 break;
1493 default:
1494 batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1495 "Received unknown TP Metric packet type %u\n",
1496 icmp->subtype);
1497 }
1498 consume_skb(skb);
1499}
1500
1501/**
1502 * batadv_tp_meter_init - initialize global tp_meter structures
1503 */
1504void batadv_tp_meter_init(void)
1505{
1506 get_random_bytes(batadv_tp_prerandom, sizeof(batadv_tp_prerandom));
1507}
diff --git a/net/batman-adv/tp_meter.h b/net/batman-adv/tp_meter.h
new file mode 100644
index 000000000000..ba922c425e56
--- /dev/null
+++ b/net/batman-adv/tp_meter.h
@@ -0,0 +1,34 @@
1/* Copyright (C) 2012-2016 B.A.T.M.A.N. contributors:
2 *
3 * Edo Monticelli, Antonio Quartulli
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef _NET_BATMAN_ADV_TP_METER_H_
19#define _NET_BATMAN_ADV_TP_METER_H_
20
21#include "main.h"
22
23#include <linux/types.h>
24
25struct sk_buff;
26
27void batadv_tp_meter_init(void);
28void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst,
29 u32 test_length, u32 *cookie);
30void batadv_tp_stop(struct batadv_priv *bat_priv, const u8 *dst,
31 u8 return_value);
32void batadv_tp_meter_recv(struct batadv_priv *bat_priv, struct sk_buff *skb);
33
34#endif /* _NET_BATMAN_ADV_TP_METER_H_ */
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index feaf492b01ca..7e6df7a4964a 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -47,10 +47,12 @@
47#include "bridge_loop_avoidance.h" 47#include "bridge_loop_avoidance.h"
48#include "hard-interface.h" 48#include "hard-interface.h"
49#include "hash.h" 49#include "hash.h"
50#include "log.h"
50#include "multicast.h" 51#include "multicast.h"
51#include "originator.h" 52#include "originator.h"
52#include "packet.h" 53#include "packet.h"
53#include "soft-interface.h" 54#include "soft-interface.h"
55#include "tvlv.h"
54 56
55/* hash class keys */ 57/* hash class keys */
56static struct lock_class_key batadv_tt_local_hash_lock_class_key; 58static struct lock_class_key batadv_tt_local_hash_lock_class_key;
@@ -650,8 +652,10 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
650 652
651 /* increase the refcounter of the related vlan */ 653 /* increase the refcounter of the related vlan */
652 vlan = batadv_softif_vlan_get(bat_priv, vid); 654 vlan = batadv_softif_vlan_get(bat_priv, vid);
653 if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d", 655 if (!vlan) {
654 addr, BATADV_PRINT_VID(vid))) { 656 net_ratelimited_function(batadv_info, soft_iface,
657 "adding TT local entry %pM to non-existent VLAN %d\n",
658 addr, BATADV_PRINT_VID(vid));
655 kfree(tt_local); 659 kfree(tt_local);
656 tt_local = NULL; 660 tt_local = NULL;
657 goto out; 661 goto out;
@@ -691,7 +695,6 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
691 if (unlikely(hash_added != 0)) { 695 if (unlikely(hash_added != 0)) {
692 /* remove the reference for the hash */ 696 /* remove the reference for the hash */
693 batadv_tt_local_entry_put(tt_local); 697 batadv_tt_local_entry_put(tt_local);
694 batadv_softif_vlan_put(vlan);
695 goto out; 698 goto out;
696 } 699 }
697 700
@@ -995,7 +998,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
995 struct batadv_tt_local_entry *tt_local; 998 struct batadv_tt_local_entry *tt_local;
996 struct batadv_hard_iface *primary_if; 999 struct batadv_hard_iface *primary_if;
997 struct hlist_head *head; 1000 struct hlist_head *head;
998 unsigned short vid;
999 u32 i; 1001 u32 i;
1000 int last_seen_secs; 1002 int last_seen_secs;
1001 int last_seen_msecs; 1003 int last_seen_msecs;
@@ -1022,7 +1024,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
1022 tt_local = container_of(tt_common_entry, 1024 tt_local = container_of(tt_common_entry,
1023 struct batadv_tt_local_entry, 1025 struct batadv_tt_local_entry,
1024 common); 1026 common);
1025 vid = tt_common_entry->vid;
1026 last_seen_jiffies = jiffies - tt_local->last_seen; 1027 last_seen_jiffies = jiffies - tt_local->last_seen;
1027 last_seen_msecs = jiffies_to_msecs(last_seen_jiffies); 1028 last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
1028 last_seen_secs = last_seen_msecs / 1000; 1029 last_seen_secs = last_seen_msecs / 1000;
@@ -1546,7 +1547,7 @@ batadv_transtable_best_orig(struct batadv_priv *bat_priv,
1546 struct batadv_tt_global_entry *tt_global_entry) 1547 struct batadv_tt_global_entry *tt_global_entry)
1547{ 1548{
1548 struct batadv_neigh_node *router, *best_router = NULL; 1549 struct batadv_neigh_node *router, *best_router = NULL;
1549 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops; 1550 struct batadv_algo_ops *bao = bat_priv->algo_ops;
1550 struct hlist_head *head; 1551 struct hlist_head *head;
1551 struct batadv_tt_orig_list_entry *orig_entry, *best_entry = NULL; 1552 struct batadv_tt_orig_list_entry *orig_entry, *best_entry = NULL;
1552 1553
@@ -1558,8 +1559,8 @@ batadv_transtable_best_orig(struct batadv_priv *bat_priv,
1558 continue; 1559 continue;
1559 1560
1560 if (best_router && 1561 if (best_router &&
1561 bao->bat_neigh_cmp(router, BATADV_IF_DEFAULT, 1562 bao->neigh.cmp(router, BATADV_IF_DEFAULT, best_router,
1562 best_router, BATADV_IF_DEFAULT) <= 0) { 1563 BATADV_IF_DEFAULT) <= 0) {
1563 batadv_neigh_node_put(router); 1564 batadv_neigh_node_put(router);
1564 continue; 1565 continue;
1565 } 1566 }
@@ -2269,6 +2270,29 @@ static u32 batadv_tt_local_crc(struct batadv_priv *bat_priv,
2269 return crc; 2270 return crc;
2270} 2271}
2271 2272
2273/**
2274 * batadv_tt_req_node_release - free tt_req node entry
2275 * @ref: kref pointer of the tt req_node entry
2276 */
2277static void batadv_tt_req_node_release(struct kref *ref)
2278{
2279 struct batadv_tt_req_node *tt_req_node;
2280
2281 tt_req_node = container_of(ref, struct batadv_tt_req_node, refcount);
2282
2283 kfree(tt_req_node);
2284}
2285
2286/**
2287 * batadv_tt_req_node_put - decrement the tt_req_node refcounter and
2288 * possibly release it
2289 * @tt_req_node: tt_req_node to be free'd
2290 */
2291static void batadv_tt_req_node_put(struct batadv_tt_req_node *tt_req_node)
2292{
2293 kref_put(&tt_req_node->refcount, batadv_tt_req_node_release);
2294}
2295
2272static void batadv_tt_req_list_free(struct batadv_priv *bat_priv) 2296static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
2273{ 2297{
2274 struct batadv_tt_req_node *node; 2298 struct batadv_tt_req_node *node;
@@ -2278,7 +2302,7 @@ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
2278 2302
2279 hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) { 2303 hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
2280 hlist_del_init(&node->list); 2304 hlist_del_init(&node->list);
2281 kfree(node); 2305 batadv_tt_req_node_put(node);
2282 } 2306 }
2283 2307
2284 spin_unlock_bh(&bat_priv->tt.req_list_lock); 2308 spin_unlock_bh(&bat_priv->tt.req_list_lock);
@@ -2315,7 +2339,7 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
2315 if (batadv_has_timed_out(node->issued_at, 2339 if (batadv_has_timed_out(node->issued_at,
2316 BATADV_TT_REQUEST_TIMEOUT)) { 2340 BATADV_TT_REQUEST_TIMEOUT)) {
2317 hlist_del_init(&node->list); 2341 hlist_del_init(&node->list);
2318 kfree(node); 2342 batadv_tt_req_node_put(node);
2319 } 2343 }
2320 } 2344 }
2321 spin_unlock_bh(&bat_priv->tt.req_list_lock); 2345 spin_unlock_bh(&bat_priv->tt.req_list_lock);
@@ -2347,9 +2371,11 @@ batadv_tt_req_node_new(struct batadv_priv *bat_priv,
2347 if (!tt_req_node) 2371 if (!tt_req_node)
2348 goto unlock; 2372 goto unlock;
2349 2373
2374 kref_init(&tt_req_node->refcount);
2350 ether_addr_copy(tt_req_node->addr, orig_node->orig); 2375 ether_addr_copy(tt_req_node->addr, orig_node->orig);
2351 tt_req_node->issued_at = jiffies; 2376 tt_req_node->issued_at = jiffies;
2352 2377
2378 kref_get(&tt_req_node->refcount);
2353 hlist_add_head(&tt_req_node->list, &bat_priv->tt.req_list); 2379 hlist_add_head(&tt_req_node->list, &bat_priv->tt.req_list);
2354unlock: 2380unlock:
2355 spin_unlock_bh(&bat_priv->tt.req_list_lock); 2381 spin_unlock_bh(&bat_priv->tt.req_list_lock);
@@ -2613,13 +2639,19 @@ static bool batadv_send_tt_request(struct batadv_priv *bat_priv,
2613out: 2639out:
2614 if (primary_if) 2640 if (primary_if)
2615 batadv_hardif_put(primary_if); 2641 batadv_hardif_put(primary_if);
2642
2616 if (ret && tt_req_node) { 2643 if (ret && tt_req_node) {
2617 spin_lock_bh(&bat_priv->tt.req_list_lock); 2644 spin_lock_bh(&bat_priv->tt.req_list_lock);
2618 /* hlist_del_init() verifies tt_req_node still is in the list */ 2645 if (!hlist_unhashed(&tt_req_node->list)) {
2619 hlist_del_init(&tt_req_node->list); 2646 hlist_del_init(&tt_req_node->list);
2647 batadv_tt_req_node_put(tt_req_node);
2648 }
2620 spin_unlock_bh(&bat_priv->tt.req_list_lock); 2649 spin_unlock_bh(&bat_priv->tt.req_list_lock);
2621 kfree(tt_req_node);
2622 } 2650 }
2651
2652 if (tt_req_node)
2653 batadv_tt_req_node_put(tt_req_node);
2654
2623 kfree(tvlv_tt_data); 2655 kfree(tvlv_tt_data);
2624 return ret; 2656 return ret;
2625} 2657}
@@ -3055,7 +3087,7 @@ static void batadv_handle_tt_response(struct batadv_priv *bat_priv,
3055 if (!batadv_compare_eth(node->addr, resp_src)) 3087 if (!batadv_compare_eth(node->addr, resp_src))
3056 continue; 3088 continue;
3057 hlist_del_init(&node->list); 3089 hlist_del_init(&node->list);
3058 kfree(node); 3090 batadv_tt_req_node_put(node);
3059 } 3091 }
3060 3092
3061 spin_unlock_bh(&bat_priv->tt.req_list_lock); 3093 spin_unlock_bh(&bat_priv->tt.req_list_lock);
diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c
new file mode 100644
index 000000000000..3d1cf0fb112d
--- /dev/null
+++ b/net/batman-adv/tvlv.c
@@ -0,0 +1,632 @@
1/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "main.h"
19
20#include <linux/byteorder/generic.h>
21#include <linux/etherdevice.h>
22#include <linux/fs.h>
23#include <linux/if_ether.h>
24#include <linux/kernel.h>
25#include <linux/kref.h>
26#include <linux/list.h>
27#include <linux/lockdep.h>
28#include <linux/netdevice.h>
29#include <linux/pkt_sched.h>
30#include <linux/rculist.h>
31#include <linux/rcupdate.h>
32#include <linux/skbuff.h>
33#include <linux/slab.h>
34#include <linux/spinlock.h>
35#include <linux/stddef.h>
36#include <linux/string.h>
37#include <linux/types.h>
38
39#include "originator.h"
40#include "packet.h"
41#include "send.h"
42#include "tvlv.h"
43
44/**
45 * batadv_tvlv_handler_release - release tvlv handler from lists and queue for
46 * free after rcu grace period
47 * @ref: kref pointer of the tvlv
48 */
49static void batadv_tvlv_handler_release(struct kref *ref)
50{
51 struct batadv_tvlv_handler *tvlv_handler;
52
53 tvlv_handler = container_of(ref, struct batadv_tvlv_handler, refcount);
54 kfree_rcu(tvlv_handler, rcu);
55}
56
57/**
58 * batadv_tvlv_handler_put - decrement the tvlv container refcounter and
59 * possibly release it
60 * @tvlv_handler: the tvlv handler to free
61 */
62static void batadv_tvlv_handler_put(struct batadv_tvlv_handler *tvlv_handler)
63{
64 kref_put(&tvlv_handler->refcount, batadv_tvlv_handler_release);
65}
66
67/**
68 * batadv_tvlv_handler_get - retrieve tvlv handler from the tvlv handler list
69 * based on the provided type and version (both need to match)
70 * @bat_priv: the bat priv with all the soft interface information
71 * @type: tvlv handler type to look for
72 * @version: tvlv handler version to look for
73 *
74 * Return: tvlv handler if found or NULL otherwise.
75 */
76static struct batadv_tvlv_handler *
77batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version)
78{
79 struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
80
81 rcu_read_lock();
82 hlist_for_each_entry_rcu(tvlv_handler_tmp,
83 &bat_priv->tvlv.handler_list, list) {
84 if (tvlv_handler_tmp->type != type)
85 continue;
86
87 if (tvlv_handler_tmp->version != version)
88 continue;
89
90 if (!kref_get_unless_zero(&tvlv_handler_tmp->refcount))
91 continue;
92
93 tvlv_handler = tvlv_handler_tmp;
94 break;
95 }
96 rcu_read_unlock();
97
98 return tvlv_handler;
99}
100
101/**
102 * batadv_tvlv_container_release - release tvlv from lists and free
103 * @ref: kref pointer of the tvlv
104 */
105static void batadv_tvlv_container_release(struct kref *ref)
106{
107 struct batadv_tvlv_container *tvlv;
108
109 tvlv = container_of(ref, struct batadv_tvlv_container, refcount);
110 kfree(tvlv);
111}
112
113/**
114 * batadv_tvlv_container_put - decrement the tvlv container refcounter and
115 * possibly release it
116 * @tvlv: the tvlv container to free
117 */
118static void batadv_tvlv_container_put(struct batadv_tvlv_container *tvlv)
119{
120 kref_put(&tvlv->refcount, batadv_tvlv_container_release);
121}
122
123/**
124 * batadv_tvlv_container_get - retrieve tvlv container from the tvlv container
125 * list based on the provided type and version (both need to match)
126 * @bat_priv: the bat priv with all the soft interface information
127 * @type: tvlv container type to look for
128 * @version: tvlv container version to look for
129 *
130 * Has to be called with the appropriate locks being acquired
131 * (tvlv.container_list_lock).
132 *
133 * Return: tvlv container if found or NULL otherwise.
134 */
135static struct batadv_tvlv_container *
136batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version)
137{
138 struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
139
140 lockdep_assert_held(&bat_priv->tvlv.container_list_lock);
141
142 hlist_for_each_entry(tvlv_tmp, &bat_priv->tvlv.container_list, list) {
143 if (tvlv_tmp->tvlv_hdr.type != type)
144 continue;
145
146 if (tvlv_tmp->tvlv_hdr.version != version)
147 continue;
148
149 kref_get(&tvlv_tmp->refcount);
150 tvlv = tvlv_tmp;
151 break;
152 }
153
154 return tvlv;
155}
156
157/**
158 * batadv_tvlv_container_list_size - calculate the size of the tvlv container
159 * list entries
160 * @bat_priv: the bat priv with all the soft interface information
161 *
162 * Has to be called with the appropriate locks being acquired
163 * (tvlv.container_list_lock).
164 *
165 * Return: size of all currently registered tvlv containers in bytes.
166 */
167static u16 batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
168{
169 struct batadv_tvlv_container *tvlv;
170 u16 tvlv_len = 0;
171
172 lockdep_assert_held(&bat_priv->tvlv.container_list_lock);
173
174 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
175 tvlv_len += sizeof(struct batadv_tvlv_hdr);
176 tvlv_len += ntohs(tvlv->tvlv_hdr.len);
177 }
178
179 return tvlv_len;
180}
181
182/**
183 * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
184 * list
185 * @bat_priv: the bat priv with all the soft interface information
186 * @tvlv: the to be removed tvlv container
187 *
188 * Has to be called with the appropriate locks being acquired
189 * (tvlv.container_list_lock).
190 */
191static void batadv_tvlv_container_remove(struct batadv_priv *bat_priv,
192 struct batadv_tvlv_container *tvlv)
193{
194 lockdep_assert_held(&bat_priv->tvlv.container_list_lock);
195
196 if (!tvlv)
197 return;
198
199 hlist_del(&tvlv->list);
200
201 /* first call to decrement the counter, second call to free */
202 batadv_tvlv_container_put(tvlv);
203 batadv_tvlv_container_put(tvlv);
204}
205
206/**
207 * batadv_tvlv_container_unregister - unregister tvlv container based on the
208 * provided type and version (both need to match)
209 * @bat_priv: the bat priv with all the soft interface information
210 * @type: tvlv container type to unregister
211 * @version: tvlv container type to unregister
212 */
213void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
214 u8 type, u8 version)
215{
216 struct batadv_tvlv_container *tvlv;
217
218 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
219 tvlv = batadv_tvlv_container_get(bat_priv, type, version);
220 batadv_tvlv_container_remove(bat_priv, tvlv);
221 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
222}
223
224/**
225 * batadv_tvlv_container_register - register tvlv type, version and content
226 * to be propagated with each (primary interface) OGM
227 * @bat_priv: the bat priv with all the soft interface information
228 * @type: tvlv container type
229 * @version: tvlv container version
230 * @tvlv_value: tvlv container content
231 * @tvlv_value_len: tvlv container content length
232 *
233 * If a container of the same type and version was already registered the new
234 * content is going to replace the old one.
235 */
236void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
237 u8 type, u8 version,
238 void *tvlv_value, u16 tvlv_value_len)
239{
240 struct batadv_tvlv_container *tvlv_old, *tvlv_new;
241
242 if (!tvlv_value)
243 tvlv_value_len = 0;
244
245 tvlv_new = kzalloc(sizeof(*tvlv_new) + tvlv_value_len, GFP_ATOMIC);
246 if (!tvlv_new)
247 return;
248
249 tvlv_new->tvlv_hdr.version = version;
250 tvlv_new->tvlv_hdr.type = type;
251 tvlv_new->tvlv_hdr.len = htons(tvlv_value_len);
252
253 memcpy(tvlv_new + 1, tvlv_value, ntohs(tvlv_new->tvlv_hdr.len));
254 INIT_HLIST_NODE(&tvlv_new->list);
255 kref_init(&tvlv_new->refcount);
256
257 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
258 tvlv_old = batadv_tvlv_container_get(bat_priv, type, version);
259 batadv_tvlv_container_remove(bat_priv, tvlv_old);
260 hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list);
261 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
262}
263
264/**
265 * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accommodate
266 * requested packet size
267 * @packet_buff: packet buffer
268 * @packet_buff_len: packet buffer size
269 * @min_packet_len: requested packet minimum size
270 * @additional_packet_len: requested additional packet size on top of minimum
271 * size
272 *
273 * Return: true of the packet buffer could be changed to the requested size,
274 * false otherwise.
275 */
276static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
277 int *packet_buff_len,
278 int min_packet_len,
279 int additional_packet_len)
280{
281 unsigned char *new_buff;
282
283 new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC);
284
285 /* keep old buffer if kmalloc should fail */
286 if (!new_buff)
287 return false;
288
289 memcpy(new_buff, *packet_buff, min_packet_len);
290 kfree(*packet_buff);
291 *packet_buff = new_buff;
292 *packet_buff_len = min_packet_len + additional_packet_len;
293
294 return true;
295}
296
297/**
298 * batadv_tvlv_container_ogm_append - append tvlv container content to given
299 * OGM packet buffer
300 * @bat_priv: the bat priv with all the soft interface information
301 * @packet_buff: ogm packet buffer
302 * @packet_buff_len: ogm packet buffer size including ogm header and tvlv
303 * content
304 * @packet_min_len: ogm header size to be preserved for the OGM itself
305 *
306 * The ogm packet might be enlarged or shrunk depending on the current size
307 * and the size of the to-be-appended tvlv containers.
308 *
309 * Return: size of all appended tvlv containers in bytes.
310 */
311u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
312 unsigned char **packet_buff,
313 int *packet_buff_len, int packet_min_len)
314{
315 struct batadv_tvlv_container *tvlv;
316 struct batadv_tvlv_hdr *tvlv_hdr;
317 u16 tvlv_value_len;
318 void *tvlv_value;
319 bool ret;
320
321 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
322 tvlv_value_len = batadv_tvlv_container_list_size(bat_priv);
323
324 ret = batadv_tvlv_realloc_packet_buff(packet_buff, packet_buff_len,
325 packet_min_len, tvlv_value_len);
326
327 if (!ret)
328 goto end;
329
330 if (!tvlv_value_len)
331 goto end;
332
333 tvlv_value = (*packet_buff) + packet_min_len;
334
335 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
336 tvlv_hdr = tvlv_value;
337 tvlv_hdr->type = tvlv->tvlv_hdr.type;
338 tvlv_hdr->version = tvlv->tvlv_hdr.version;
339 tvlv_hdr->len = tvlv->tvlv_hdr.len;
340 tvlv_value = tvlv_hdr + 1;
341 memcpy(tvlv_value, tvlv + 1, ntohs(tvlv->tvlv_hdr.len));
342 tvlv_value = (u8 *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
343 }
344
345end:
346 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
347 return tvlv_value_len;
348}
349
350/**
351 * batadv_tvlv_call_handler - parse the given tvlv buffer to call the
352 * appropriate handlers
353 * @bat_priv: the bat priv with all the soft interface information
354 * @tvlv_handler: tvlv callback function handling the tvlv content
355 * @ogm_source: flag indicating whether the tvlv is an ogm or a unicast packet
356 * @orig_node: orig node emitting the ogm packet
357 * @src: source mac address of the unicast packet
358 * @dst: destination mac address of the unicast packet
359 * @tvlv_value: tvlv content
360 * @tvlv_value_len: tvlv content length
361 *
362 * Return: success if handler was not found or the return value of the handler
363 * callback.
364 */
365static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
366 struct batadv_tvlv_handler *tvlv_handler,
367 bool ogm_source,
368 struct batadv_orig_node *orig_node,
369 u8 *src, u8 *dst,
370 void *tvlv_value, u16 tvlv_value_len)
371{
372 if (!tvlv_handler)
373 return NET_RX_SUCCESS;
374
375 if (ogm_source) {
376 if (!tvlv_handler->ogm_handler)
377 return NET_RX_SUCCESS;
378
379 if (!orig_node)
380 return NET_RX_SUCCESS;
381
382 tvlv_handler->ogm_handler(bat_priv, orig_node,
383 BATADV_NO_FLAGS,
384 tvlv_value, tvlv_value_len);
385 tvlv_handler->flags |= BATADV_TVLV_HANDLER_OGM_CALLED;
386 } else {
387 if (!src)
388 return NET_RX_SUCCESS;
389
390 if (!dst)
391 return NET_RX_SUCCESS;
392
393 if (!tvlv_handler->unicast_handler)
394 return NET_RX_SUCCESS;
395
396 return tvlv_handler->unicast_handler(bat_priv, src,
397 dst, tvlv_value,
398 tvlv_value_len);
399 }
400
401 return NET_RX_SUCCESS;
402}
403
404/**
405 * batadv_tvlv_containers_process - parse the given tvlv buffer to call the
406 * appropriate handlers
407 * @bat_priv: the bat priv with all the soft interface information
408 * @ogm_source: flag indicating whether the tvlv is an ogm or a unicast packet
409 * @orig_node: orig node emitting the ogm packet
410 * @src: source mac address of the unicast packet
411 * @dst: destination mac address of the unicast packet
412 * @tvlv_value: tvlv content
413 * @tvlv_value_len: tvlv content length
414 *
415 * Return: success when processing an OGM or the return value of all called
416 * handler callbacks.
417 */
418int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
419 bool ogm_source,
420 struct batadv_orig_node *orig_node,
421 u8 *src, u8 *dst,
422 void *tvlv_value, u16 tvlv_value_len)
423{
424 struct batadv_tvlv_handler *tvlv_handler;
425 struct batadv_tvlv_hdr *tvlv_hdr;
426 u16 tvlv_value_cont_len;
427 u8 cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
428 int ret = NET_RX_SUCCESS;
429
430 while (tvlv_value_len >= sizeof(*tvlv_hdr)) {
431 tvlv_hdr = tvlv_value;
432 tvlv_value_cont_len = ntohs(tvlv_hdr->len);
433 tvlv_value = tvlv_hdr + 1;
434 tvlv_value_len -= sizeof(*tvlv_hdr);
435
436 if (tvlv_value_cont_len > tvlv_value_len)
437 break;
438
439 tvlv_handler = batadv_tvlv_handler_get(bat_priv,
440 tvlv_hdr->type,
441 tvlv_hdr->version);
442
443 ret |= batadv_tvlv_call_handler(bat_priv, tvlv_handler,
444 ogm_source, orig_node,
445 src, dst, tvlv_value,
446 tvlv_value_cont_len);
447 if (tvlv_handler)
448 batadv_tvlv_handler_put(tvlv_handler);
449 tvlv_value = (u8 *)tvlv_value + tvlv_value_cont_len;
450 tvlv_value_len -= tvlv_value_cont_len;
451 }
452
453 if (!ogm_source)
454 return ret;
455
456 rcu_read_lock();
457 hlist_for_each_entry_rcu(tvlv_handler,
458 &bat_priv->tvlv.handler_list, list) {
459 if ((tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) &&
460 !(tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CALLED))
461 tvlv_handler->ogm_handler(bat_priv, orig_node,
462 cifnotfound, NULL, 0);
463
464 tvlv_handler->flags &= ~BATADV_TVLV_HANDLER_OGM_CALLED;
465 }
466 rcu_read_unlock();
467
468 return NET_RX_SUCCESS;
469}
470
471/**
472 * batadv_tvlv_ogm_receive - process an incoming ogm and call the appropriate
473 * handlers
474 * @bat_priv: the bat priv with all the soft interface information
475 * @batadv_ogm_packet: ogm packet containing the tvlv containers
476 * @orig_node: orig node emitting the ogm packet
477 */
478void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
479 struct batadv_ogm_packet *batadv_ogm_packet,
480 struct batadv_orig_node *orig_node)
481{
482 void *tvlv_value;
483 u16 tvlv_value_len;
484
485 if (!batadv_ogm_packet)
486 return;
487
488 tvlv_value_len = ntohs(batadv_ogm_packet->tvlv_len);
489 if (!tvlv_value_len)
490 return;
491
492 tvlv_value = batadv_ogm_packet + 1;
493
494 batadv_tvlv_containers_process(bat_priv, true, orig_node, NULL, NULL,
495 tvlv_value, tvlv_value_len);
496}
497
498/**
499 * batadv_tvlv_handler_register - register tvlv handler based on the provided
500 * type and version (both need to match) for ogm tvlv payload and/or unicast
501 * payload
502 * @bat_priv: the bat priv with all the soft interface information
503 * @optr: ogm tvlv handler callback function. This function receives the orig
504 * node, flags and the tvlv content as argument to process.
505 * @uptr: unicast tvlv handler callback function. This function receives the
506 * source & destination of the unicast packet as well as the tvlv content
507 * to process.
508 * @type: tvlv handler type to be registered
509 * @version: tvlv handler version to be registered
510 * @flags: flags to enable or disable TVLV API behavior
511 */
512void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
513 void (*optr)(struct batadv_priv *bat_priv,
514 struct batadv_orig_node *orig,
515 u8 flags,
516 void *tvlv_value,
517 u16 tvlv_value_len),
518 int (*uptr)(struct batadv_priv *bat_priv,
519 u8 *src, u8 *dst,
520 void *tvlv_value,
521 u16 tvlv_value_len),
522 u8 type, u8 version, u8 flags)
523{
524 struct batadv_tvlv_handler *tvlv_handler;
525
526 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
527 if (tvlv_handler) {
528 batadv_tvlv_handler_put(tvlv_handler);
529 return;
530 }
531
532 tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
533 if (!tvlv_handler)
534 return;
535
536 tvlv_handler->ogm_handler = optr;
537 tvlv_handler->unicast_handler = uptr;
538 tvlv_handler->type = type;
539 tvlv_handler->version = version;
540 tvlv_handler->flags = flags;
541 kref_init(&tvlv_handler->refcount);
542 INIT_HLIST_NODE(&tvlv_handler->list);
543
544 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
545 hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
546 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
547}
548
549/**
550 * batadv_tvlv_handler_unregister - unregister tvlv handler based on the
551 * provided type and version (both need to match)
552 * @bat_priv: the bat priv with all the soft interface information
553 * @type: tvlv handler type to be unregistered
554 * @version: tvlv handler version to be unregistered
555 */
556void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
557 u8 type, u8 version)
558{
559 struct batadv_tvlv_handler *tvlv_handler;
560
561 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
562 if (!tvlv_handler)
563 return;
564
565 batadv_tvlv_handler_put(tvlv_handler);
566 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
567 hlist_del_rcu(&tvlv_handler->list);
568 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
569 batadv_tvlv_handler_put(tvlv_handler);
570}
571
572/**
573 * batadv_tvlv_unicast_send - send a unicast packet with tvlv payload to the
574 * specified host
575 * @bat_priv: the bat priv with all the soft interface information
576 * @src: source mac address of the unicast packet
577 * @dst: destination mac address of the unicast packet
578 * @type: tvlv type
579 * @version: tvlv version
580 * @tvlv_value: tvlv content
581 * @tvlv_value_len: tvlv content length
582 */
583void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src,
584 u8 *dst, u8 type, u8 version,
585 void *tvlv_value, u16 tvlv_value_len)
586{
587 struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
588 struct batadv_tvlv_hdr *tvlv_hdr;
589 struct batadv_orig_node *orig_node;
590 struct sk_buff *skb;
591 unsigned char *tvlv_buff;
592 unsigned int tvlv_len;
593 ssize_t hdr_len = sizeof(*unicast_tvlv_packet);
594 int res;
595
596 orig_node = batadv_orig_hash_find(bat_priv, dst);
597 if (!orig_node)
598 return;
599
600 tvlv_len = sizeof(*tvlv_hdr) + tvlv_value_len;
601
602 skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + hdr_len + tvlv_len);
603 if (!skb)
604 goto out;
605
606 skb->priority = TC_PRIO_CONTROL;
607 skb_reserve(skb, ETH_HLEN);
608 tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
609 unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
610 unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV;
611 unicast_tvlv_packet->version = BATADV_COMPAT_VERSION;
612 unicast_tvlv_packet->ttl = BATADV_TTL;
613 unicast_tvlv_packet->reserved = 0;
614 unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
615 unicast_tvlv_packet->align = 0;
616 ether_addr_copy(unicast_tvlv_packet->src, src);
617 ether_addr_copy(unicast_tvlv_packet->dst, dst);
618
619 tvlv_buff = (unsigned char *)(unicast_tvlv_packet + 1);
620 tvlv_hdr = (struct batadv_tvlv_hdr *)tvlv_buff;
621 tvlv_hdr->version = version;
622 tvlv_hdr->type = type;
623 tvlv_hdr->len = htons(tvlv_value_len);
624 tvlv_buff += sizeof(*tvlv_hdr);
625 memcpy(tvlv_buff, tvlv_value, tvlv_value_len);
626
627 res = batadv_send_skb_to_orig(skb, orig_node, NULL);
628 if (res == -1)
629 kfree_skb(skb);
630out:
631 batadv_orig_node_put(orig_node);
632}
diff --git a/net/batman-adv/tvlv.h b/net/batman-adv/tvlv.h
new file mode 100644
index 000000000000..e4369b547b43
--- /dev/null
+++ b/net/batman-adv/tvlv.h
@@ -0,0 +1,61 @@
1/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef _NET_BATMAN_ADV_TVLV_H_
19#define _NET_BATMAN_ADV_TVLV_H_
20
21#include "main.h"
22
23#include <linux/types.h>
24
25struct batadv_ogm_packet;
26
27void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
28 u8 type, u8 version,
29 void *tvlv_value, u16 tvlv_value_len);
30u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
31 unsigned char **packet_buff,
32 int *packet_buff_len, int packet_min_len);
33void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
34 struct batadv_ogm_packet *batadv_ogm_packet,
35 struct batadv_orig_node *orig_node);
36void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
37 u8 type, u8 version);
38
39void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
40 void (*optr)(struct batadv_priv *bat_priv,
41 struct batadv_orig_node *orig,
42 u8 flags,
43 void *tvlv_value,
44 u16 tvlv_value_len),
45 int (*uptr)(struct batadv_priv *bat_priv,
46 u8 *src, u8 *dst,
47 void *tvlv_value,
48 u16 tvlv_value_len),
49 u8 type, u8 version, u8 flags);
50void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
51 u8 type, u8 version);
52int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
53 bool ogm_source,
54 struct batadv_orig_node *orig_node,
55 u8 *src, u8 *dst,
56 void *tvlv_buff, u16 tvlv_buff_len);
57void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src,
58 u8 *dst, u8 type, u8 version,
59 void *tvlv_value, u16 tvlv_value_len);
60
61#endif /* _NET_BATMAN_ADV_TVLV_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 6a577f4f8ba7..43db7b61f8eb 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -33,6 +33,7 @@
33#include <linux/types.h> 33#include <linux/types.h>
34#include <linux/wait.h> 34#include <linux/wait.h>
35#include <linux/workqueue.h> 35#include <linux/workqueue.h>
36#include <uapi/linux/batman_adv.h>
36 37
37#include "packet.h" 38#include "packet.h"
38 39
@@ -707,6 +708,8 @@ struct batadv_priv_debug_log {
707 * @list: list of available gateway nodes 708 * @list: list of available gateway nodes
708 * @list_lock: lock protecting gw_list & curr_gw 709 * @list_lock: lock protecting gw_list & curr_gw
709 * @curr_gw: pointer to currently selected gateway node 710 * @curr_gw: pointer to currently selected gateway node
711 * @mode: gateway operation: off, client or server (see batadv_gw_modes)
712 * @sel_class: gateway selection class (applies if gw_mode client)
710 * @bandwidth_down: advertised uplink download bandwidth (if gw_mode server) 713 * @bandwidth_down: advertised uplink download bandwidth (if gw_mode server)
711 * @bandwidth_up: advertised uplink upload bandwidth (if gw_mode server) 714 * @bandwidth_up: advertised uplink upload bandwidth (if gw_mode server)
712 * @reselect: bool indicating a gateway re-selection is in progress 715 * @reselect: bool indicating a gateway re-selection is in progress
@@ -715,6 +718,8 @@ struct batadv_priv_gw {
715 struct hlist_head list; 718 struct hlist_head list;
716 spinlock_t list_lock; /* protects gw_list & curr_gw */ 719 spinlock_t list_lock; /* protects gw_list & curr_gw */
717 struct batadv_gw_node __rcu *curr_gw; /* rcu protected pointer */ 720 struct batadv_gw_node __rcu *curr_gw; /* rcu protected pointer */
721 atomic_t mode;
722 atomic_t sel_class;
718 atomic_t bandwidth_down; 723 atomic_t bandwidth_down;
719 atomic_t bandwidth_up; 724 atomic_t bandwidth_up;
720 atomic_t reselect; 725 atomic_t reselect;
@@ -751,14 +756,28 @@ struct batadv_priv_dat {
751 756
752#ifdef CONFIG_BATMAN_ADV_MCAST 757#ifdef CONFIG_BATMAN_ADV_MCAST
753/** 758/**
759 * struct batadv_mcast_querier_state - IGMP/MLD querier state when bridged
760 * @exists: whether a querier exists in the mesh
761 * @shadowing: if a querier exists, whether it is potentially shadowing
762 * multicast listeners (i.e. querier is behind our own bridge segment)
763 */
764struct batadv_mcast_querier_state {
765 bool exists;
766 bool shadowing;
767};
768
769/**
754 * struct batadv_priv_mcast - per mesh interface mcast data 770 * struct batadv_priv_mcast - per mesh interface mcast data
755 * @mla_list: list of multicast addresses we are currently announcing via TT 771 * @mla_list: list of multicast addresses we are currently announcing via TT
756 * @want_all_unsnoopables_list: a list of orig_nodes wanting all unsnoopable 772 * @want_all_unsnoopables_list: a list of orig_nodes wanting all unsnoopable
757 * multicast traffic 773 * multicast traffic
758 * @want_all_ipv4_list: a list of orig_nodes wanting all IPv4 multicast traffic 774 * @want_all_ipv4_list: a list of orig_nodes wanting all IPv4 multicast traffic
759 * @want_all_ipv6_list: a list of orig_nodes wanting all IPv6 multicast traffic 775 * @want_all_ipv6_list: a list of orig_nodes wanting all IPv6 multicast traffic
776 * @querier_ipv4: the current state of an IGMP querier in the mesh
777 * @querier_ipv6: the current state of an MLD querier in the mesh
760 * @flags: the flags we have last sent in our mcast tvlv 778 * @flags: the flags we have last sent in our mcast tvlv
761 * @enabled: whether the multicast tvlv is currently enabled 779 * @enabled: whether the multicast tvlv is currently enabled
780 * @bridged: whether the soft interface has a bridge on top
762 * @num_disabled: number of nodes that have no mcast tvlv 781 * @num_disabled: number of nodes that have no mcast tvlv
763 * @num_want_all_unsnoopables: number of nodes wanting unsnoopable IP traffic 782 * @num_want_all_unsnoopables: number of nodes wanting unsnoopable IP traffic
764 * @num_want_all_ipv4: counter for items in want_all_ipv4_list 783 * @num_want_all_ipv4: counter for items in want_all_ipv4_list
@@ -771,8 +790,11 @@ struct batadv_priv_mcast {
771 struct hlist_head want_all_unsnoopables_list; 790 struct hlist_head want_all_unsnoopables_list;
772 struct hlist_head want_all_ipv4_list; 791 struct hlist_head want_all_ipv4_list;
773 struct hlist_head want_all_ipv6_list; 792 struct hlist_head want_all_ipv6_list;
793 struct batadv_mcast_querier_state querier_ipv4;
794 struct batadv_mcast_querier_state querier_ipv6;
774 u8 flags; 795 u8 flags;
775 bool enabled; 796 bool enabled;
797 bool bridged;
776 atomic_t num_disabled; 798 atomic_t num_disabled;
777 atomic_t num_want_all_unsnoopables; 799 atomic_t num_want_all_unsnoopables;
778 atomic_t num_want_all_ipv4; 800 atomic_t num_want_all_ipv4;
@@ -812,6 +834,111 @@ struct batadv_priv_nc {
812}; 834};
813 835
814/** 836/**
837 * struct batadv_tp_unacked - unacked packet meta-information
838 * @seqno: seqno of the unacked packet
839 * @len: length of the packet
840 * @list: list node for batadv_tp_vars::unacked_list
841 *
842 * This struct is supposed to represent a buffer unacked packet. However, since
843 * the purpose of the TP meter is to count the traffic only, there is no need to
844 * store the entire sk_buff, the starting offset and the length are enough
845 */
846struct batadv_tp_unacked {
847 u32 seqno;
848 u16 len;
849 struct list_head list;
850};
851
852/**
853 * enum batadv_tp_meter_role - Modus in tp meter session
854 * @BATADV_TP_RECEIVER: Initialized as receiver
855 * @BATADV_TP_SENDER: Initialized as sender
856 */
857enum batadv_tp_meter_role {
858 BATADV_TP_RECEIVER,
859 BATADV_TP_SENDER
860};
861
862/**
863 * struct batadv_tp_vars - tp meter private variables per session
864 * @list: list node for bat_priv::tp_list
865 * @timer: timer for ack (receiver) and retry (sender)
866 * @bat_priv: pointer to the mesh object
867 * @start_time: start time in jiffies
868 * @other_end: mac address of remote
869 * @role: receiver/sender modi
870 * @sending: sending binary semaphore: 1 if sending, 0 is not
871 * @reason: reason for a stopped session
872 * @finish_work: work item for the finishing procedure
873 * @test_length: test length in milliseconds
874 * @session: TP session identifier
875 * @icmp_uid: local ICMP "socket" index
876 * @dec_cwnd: decimal part of the cwnd used during linear growth
877 * @cwnd: current size of the congestion window
878 * @cwnd_lock: lock do protect @cwnd & @dec_cwnd
879 * @ss_threshold: Slow Start threshold. Once cwnd exceeds this value the
880 * connection switches to the Congestion Avoidance state
881 * @last_acked: last acked byte
882 * @last_sent: last sent byte, not yet acked
883 * @tot_sent: amount of data sent/ACKed so far
884 * @dup_acks: duplicate ACKs counter
885 * @fast_recovery: true if in Fast Recovery mode
886 * @recover: last sent seqno when entering Fast Recovery
887 * @rto: sender timeout
888 * @srtt: smoothed RTT scaled by 2^3
889 * @rttvar: RTT variation scaled by 2^2
890 * @more_bytes: waiting queue anchor when waiting for more ack/retry timeout
891 * @prerandom_offset: offset inside the prerandom buffer
892 * @prerandom_lock: spinlock protecting access to prerandom_offset
893 * @last_recv: last in-order received packet
894 * @unacked_list: list of unacked packets (meta-info only)
895 * @unacked_lock: protect unacked_list
896 * @last_recv_time: time time (jiffies) a msg was received
897 * @refcount: number of context where the object is used
898 * @rcu: struct used for freeing in an RCU-safe manner
899 */
900struct batadv_tp_vars {
901 struct hlist_node list;
902 struct timer_list timer;
903 struct batadv_priv *bat_priv;
904 unsigned long start_time;
905 u8 other_end[ETH_ALEN];
906 enum batadv_tp_meter_role role;
907 atomic_t sending;
908 enum batadv_tp_meter_reason reason;
909 struct delayed_work finish_work;
910 u32 test_length;
911 u8 session[2];
912 u8 icmp_uid;
913
914 /* sender variables */
915 u16 dec_cwnd;
916 u32 cwnd;
917 spinlock_t cwnd_lock; /* Protects cwnd & dec_cwnd */
918 u32 ss_threshold;
919 atomic_t last_acked;
920 u32 last_sent;
921 atomic64_t tot_sent;
922 atomic_t dup_acks;
923 bool fast_recovery;
924 u32 recover;
925 u32 rto;
926 u32 srtt;
927 u32 rttvar;
928 wait_queue_head_t more_bytes;
929 u32 prerandom_offset;
930 spinlock_t prerandom_lock; /* Protects prerandom_offset */
931
932 /* receiver variables */
933 u32 last_recv;
934 struct list_head unacked_list;
935 spinlock_t unacked_lock; /* Protects unacked_list */
936 unsigned long last_recv_time;
937 struct kref refcount;
938 struct rcu_head rcu;
939};
940
941/**
815 * struct batadv_softif_vlan - per VLAN attributes set 942 * struct batadv_softif_vlan - per VLAN attributes set
816 * @bat_priv: pointer to the mesh object 943 * @bat_priv: pointer to the mesh object
817 * @vid: VLAN identifier 944 * @vid: VLAN identifier
@@ -865,8 +992,6 @@ struct batadv_priv_bat_v {
865 * enabled 992 * enabled
866 * @multicast_mode: Enable or disable multicast optimizations on this node's 993 * @multicast_mode: Enable or disable multicast optimizations on this node's
867 * sender/originating side 994 * sender/originating side
868 * @gw_mode: gateway operation: off, client or server (see batadv_gw_modes)
869 * @gw_sel_class: gateway selection class (applies if gw_mode client)
870 * @orig_interval: OGM broadcast interval in milliseconds 995 * @orig_interval: OGM broadcast interval in milliseconds
871 * @hop_penalty: penalty which will be applied to an OGM's tq-field on every hop 996 * @hop_penalty: penalty which will be applied to an OGM's tq-field on every hop
872 * @log_level: configured log level (see batadv_dbg_level) 997 * @log_level: configured log level (see batadv_dbg_level)
@@ -881,14 +1006,17 @@ struct batadv_priv_bat_v {
881 * @debug_dir: dentry for debugfs batman-adv subdirectory 1006 * @debug_dir: dentry for debugfs batman-adv subdirectory
882 * @forw_bat_list: list of aggregated OGMs that will be forwarded 1007 * @forw_bat_list: list of aggregated OGMs that will be forwarded
883 * @forw_bcast_list: list of broadcast packets that will be rebroadcasted 1008 * @forw_bcast_list: list of broadcast packets that will be rebroadcasted
1009 * @tp_list: list of tp sessions
1010 * @tp_num: number of currently active tp sessions
884 * @orig_hash: hash table containing mesh participants (orig nodes) 1011 * @orig_hash: hash table containing mesh participants (orig nodes)
885 * @forw_bat_list_lock: lock protecting forw_bat_list 1012 * @forw_bat_list_lock: lock protecting forw_bat_list
886 * @forw_bcast_list_lock: lock protecting forw_bcast_list 1013 * @forw_bcast_list_lock: lock protecting forw_bcast_list
1014 * @tp_list_lock: spinlock protecting @tp_list
887 * @orig_work: work queue callback item for orig node purging 1015 * @orig_work: work queue callback item for orig node purging
888 * @cleanup_work: work queue callback item for soft-interface deinit 1016 * @cleanup_work: work queue callback item for soft-interface deinit
889 * @primary_if: one of the hard-interfaces assigned to this mesh interface 1017 * @primary_if: one of the hard-interfaces assigned to this mesh interface
890 * becomes the primary interface 1018 * becomes the primary interface
891 * @bat_algo_ops: routing algorithm used by this mesh interface 1019 * @algo_ops: routing algorithm used by this mesh interface
892 * @softif_vlan_list: a list of softif_vlan structs, one per VLAN created on top 1020 * @softif_vlan_list: a list of softif_vlan structs, one per VLAN created on top
893 * of the mesh interface represented by this object 1021 * of the mesh interface represented by this object
894 * @softif_vlan_list_lock: lock protecting softif_vlan_list 1022 * @softif_vlan_list_lock: lock protecting softif_vlan_list
@@ -922,8 +1050,6 @@ struct batadv_priv {
922#ifdef CONFIG_BATMAN_ADV_MCAST 1050#ifdef CONFIG_BATMAN_ADV_MCAST
923 atomic_t multicast_mode; 1051 atomic_t multicast_mode;
924#endif 1052#endif
925 atomic_t gw_mode;
926 atomic_t gw_sel_class;
927 atomic_t orig_interval; 1053 atomic_t orig_interval;
928 atomic_t hop_penalty; 1054 atomic_t hop_penalty;
929#ifdef CONFIG_BATMAN_ADV_DEBUG 1055#ifdef CONFIG_BATMAN_ADV_DEBUG
@@ -939,13 +1065,16 @@ struct batadv_priv {
939 struct dentry *debug_dir; 1065 struct dentry *debug_dir;
940 struct hlist_head forw_bat_list; 1066 struct hlist_head forw_bat_list;
941 struct hlist_head forw_bcast_list; 1067 struct hlist_head forw_bcast_list;
1068 struct hlist_head tp_list;
942 struct batadv_hashtable *orig_hash; 1069 struct batadv_hashtable *orig_hash;
943 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ 1070 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
944 spinlock_t forw_bcast_list_lock; /* protects forw_bcast_list */ 1071 spinlock_t forw_bcast_list_lock; /* protects forw_bcast_list */
1072 spinlock_t tp_list_lock; /* protects tp_list */
1073 atomic_t tp_num;
945 struct delayed_work orig_work; 1074 struct delayed_work orig_work;
946 struct work_struct cleanup_work; 1075 struct work_struct cleanup_work;
947 struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */ 1076 struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */
948 struct batadv_algo_ops *bat_algo_ops; 1077 struct batadv_algo_ops *algo_ops;
949 struct hlist_head softif_vlan_list; 1078 struct hlist_head softif_vlan_list;
950 spinlock_t softif_vlan_list_lock; /* protects softif_vlan_list */ 1079 spinlock_t softif_vlan_list_lock; /* protects softif_vlan_list */
951#ifdef CONFIG_BATMAN_ADV_BLA 1080#ifdef CONFIG_BATMAN_ADV_BLA
@@ -1137,11 +1266,13 @@ struct batadv_tt_change_node {
1137 * struct batadv_tt_req_node - data to keep track of the tt requests in flight 1266 * struct batadv_tt_req_node - data to keep track of the tt requests in flight
1138 * @addr: mac address address of the originator this request was sent to 1267 * @addr: mac address address of the originator this request was sent to
1139 * @issued_at: timestamp used for purging stale tt requests 1268 * @issued_at: timestamp used for purging stale tt requests
1269 * @refcount: number of contexts the object is used by
1140 * @list: list node for batadv_priv_tt::req_list 1270 * @list: list node for batadv_priv_tt::req_list
1141 */ 1271 */
1142struct batadv_tt_req_node { 1272struct batadv_tt_req_node {
1143 u8 addr[ETH_ALEN]; 1273 u8 addr[ETH_ALEN];
1144 unsigned long issued_at; 1274 unsigned long issued_at;
1275 struct kref refcount;
1145 struct hlist_node list; 1276 struct hlist_node list;
1146}; 1277};
1147 1278
@@ -1259,66 +1390,77 @@ struct batadv_forw_packet {
1259}; 1390};
1260 1391
1261/** 1392/**
1393 * struct batadv_algo_iface_ops - mesh algorithm callbacks (interface specific)
1394 * @activate: start routing mechanisms when hard-interface is brought up
1395 * @enable: init routing info when hard-interface is enabled
1396 * @disable: de-init routing info when hard-interface is disabled
1397 * @update_mac: (re-)init mac addresses of the protocol information
1398 * belonging to this hard-interface
1399 * @primary_set: called when primary interface is selected / changed
1400 */
1401struct batadv_algo_iface_ops {
1402 void (*activate)(struct batadv_hard_iface *hard_iface);
1403 int (*enable)(struct batadv_hard_iface *hard_iface);
1404 void (*disable)(struct batadv_hard_iface *hard_iface);
1405 void (*update_mac)(struct batadv_hard_iface *hard_iface);
1406 void (*primary_set)(struct batadv_hard_iface *hard_iface);
1407};
1408
1409/**
1410 * struct batadv_algo_neigh_ops - mesh algorithm callbacks (neighbour specific)
1411 * @hardif_init: called on creation of single hop entry
1412 * @cmp: compare the metrics of two neighbors for their respective outgoing
1413 * interfaces
1414 * @is_similar_or_better: check if neigh1 is equally similar or better than
1415 * neigh2 for their respective outgoing interface from the metric prospective
1416 * @print: print the single hop neighbor list (optional)
1417 */
1418struct batadv_algo_neigh_ops {
1419 void (*hardif_init)(struct batadv_hardif_neigh_node *neigh);
1420 int (*cmp)(struct batadv_neigh_node *neigh1,
1421 struct batadv_hard_iface *if_outgoing1,
1422 struct batadv_neigh_node *neigh2,
1423 struct batadv_hard_iface *if_outgoing2);
1424 bool (*is_similar_or_better)(struct batadv_neigh_node *neigh1,
1425 struct batadv_hard_iface *if_outgoing1,
1426 struct batadv_neigh_node *neigh2,
1427 struct batadv_hard_iface *if_outgoing2);
1428 void (*print)(struct batadv_priv *priv, struct seq_file *seq);
1429};
1430
1431/**
1432 * struct batadv_algo_orig_ops - mesh algorithm callbacks (originator specific)
1433 * @free: free the resources allocated by the routing algorithm for an orig_node
1434 * object
1435 * @add_if: ask the routing algorithm to apply the needed changes to the
1436 * orig_node due to a new hard-interface being added into the mesh
1437 * @del_if: ask the routing algorithm to apply the needed changes to the
1438 * orig_node due to an hard-interface being removed from the mesh
1439 * @print: print the originator table (optional)
1440 */
1441struct batadv_algo_orig_ops {
1442 void (*free)(struct batadv_orig_node *orig_node);
1443 int (*add_if)(struct batadv_orig_node *orig_node, int max_if_num);
1444 int (*del_if)(struct batadv_orig_node *orig_node, int max_if_num,
1445 int del_if_num);
1446 void (*print)(struct batadv_priv *priv, struct seq_file *seq,
1447 struct batadv_hard_iface *hard_iface);
1448};
1449
1450/**
1262 * struct batadv_algo_ops - mesh algorithm callbacks 1451 * struct batadv_algo_ops - mesh algorithm callbacks
1263 * @list: list node for the batadv_algo_list 1452 * @list: list node for the batadv_algo_list
1264 * @name: name of the algorithm 1453 * @name: name of the algorithm
1265 * @bat_iface_activate: start routing mechanisms when hard-interface is brought 1454 * @iface: callbacks related to interface handling
1266 * up 1455 * @neigh: callbacks related to neighbors handling
1267 * @bat_iface_enable: init routing info when hard-interface is enabled 1456 * @orig: callbacks related to originators handling
1268 * @bat_iface_disable: de-init routing info when hard-interface is disabled
1269 * @bat_iface_update_mac: (re-)init mac addresses of the protocol information
1270 * belonging to this hard-interface
1271 * @bat_primary_iface_set: called when primary interface is selected / changed
1272 * @bat_ogm_schedule: prepare a new outgoing OGM for the send queue
1273 * @bat_ogm_emit: send scheduled OGM
1274 * @bat_hardif_neigh_init: called on creation of single hop entry
1275 * @bat_neigh_cmp: compare the metrics of two neighbors for their respective
1276 * outgoing interfaces
1277 * @bat_neigh_is_similar_or_better: check if neigh1 is equally similar or
1278 * better than neigh2 for their respective outgoing interface from the metric
1279 * prospective
1280 * @bat_neigh_print: print the single hop neighbor list (optional)
1281 * @bat_neigh_free: free the resources allocated by the routing algorithm for a
1282 * neigh_node object
1283 * @bat_orig_print: print the originator table (optional)
1284 * @bat_orig_free: free the resources allocated by the routing algorithm for an
1285 * orig_node object
1286 * @bat_orig_add_if: ask the routing algorithm to apply the needed changes to
1287 * the orig_node due to a new hard-interface being added into the mesh
1288 * @bat_orig_del_if: ask the routing algorithm to apply the needed changes to
1289 * the orig_node due to an hard-interface being removed from the mesh
1290 */ 1457 */
1291struct batadv_algo_ops { 1458struct batadv_algo_ops {
1292 struct hlist_node list; 1459 struct hlist_node list;
1293 char *name; 1460 char *name;
1294 void (*bat_iface_activate)(struct batadv_hard_iface *hard_iface); 1461 struct batadv_algo_iface_ops iface;
1295 int (*bat_iface_enable)(struct batadv_hard_iface *hard_iface); 1462 struct batadv_algo_neigh_ops neigh;
1296 void (*bat_iface_disable)(struct batadv_hard_iface *hard_iface); 1463 struct batadv_algo_orig_ops orig;
1297 void (*bat_iface_update_mac)(struct batadv_hard_iface *hard_iface);
1298 void (*bat_primary_iface_set)(struct batadv_hard_iface *hard_iface);
1299 void (*bat_ogm_schedule)(struct batadv_hard_iface *hard_iface);
1300 void (*bat_ogm_emit)(struct batadv_forw_packet *forw_packet);
1301 /* neigh_node handling API */
1302 void (*bat_hardif_neigh_init)(struct batadv_hardif_neigh_node *neigh);
1303 int (*bat_neigh_cmp)(struct batadv_neigh_node *neigh1,
1304 struct batadv_hard_iface *if_outgoing1,
1305 struct batadv_neigh_node *neigh2,
1306 struct batadv_hard_iface *if_outgoing2);
1307 bool (*bat_neigh_is_similar_or_better)
1308 (struct batadv_neigh_node *neigh1,
1309 struct batadv_hard_iface *if_outgoing1,
1310 struct batadv_neigh_node *neigh2,
1311 struct batadv_hard_iface *if_outgoing2);
1312 void (*bat_neigh_print)(struct batadv_priv *priv, struct seq_file *seq);
1313 void (*bat_neigh_free)(struct batadv_neigh_node *neigh);
1314 /* orig_node handling API */
1315 void (*bat_orig_print)(struct batadv_priv *priv, struct seq_file *seq,
1316 struct batadv_hard_iface *hard_iface);
1317 void (*bat_orig_free)(struct batadv_orig_node *orig_node);
1318 int (*bat_orig_add_if)(struct batadv_orig_node *orig_node,
1319 int max_if_num);
1320 int (*bat_orig_del_if)(struct batadv_orig_node *orig_node,
1321 int max_if_num, int del_if_num);
1322}; 1464};
1323 1465
1324/** 1466/**
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 2c8095a5d824..8eecd0ec22f2 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -104,8 +104,16 @@ static int br_dev_init(struct net_device *dev)
104 return -ENOMEM; 104 return -ENOMEM;
105 105
106 err = br_vlan_init(br); 106 err = br_vlan_init(br);
107 if (err) 107 if (err) {
108 free_percpu(br->stats); 108 free_percpu(br->stats);
109 return err;
110 }
111
112 err = br_multicast_init_stats(br);
113 if (err) {
114 free_percpu(br->stats);
115 br_vlan_flush(br);
116 }
109 br_set_lockdep_class(dev); 117 br_set_lockdep_class(dev);
110 118
111 return err; 119 return err;
@@ -341,6 +349,8 @@ static const struct net_device_ops br_netdev_ops = {
341 .ndo_add_slave = br_add_slave, 349 .ndo_add_slave = br_add_slave,
342 .ndo_del_slave = br_del_slave, 350 .ndo_del_slave = br_del_slave,
343 .ndo_fix_features = br_fix_features, 351 .ndo_fix_features = br_fix_features,
352 .ndo_neigh_construct = netdev_default_l2upper_neigh_construct,
353 .ndo_neigh_destroy = netdev_default_l2upper_neigh_destroy,
344 .ndo_fdb_add = br_fdb_add, 354 .ndo_fdb_add = br_fdb_add,
345 .ndo_fdb_del = br_fdb_delete, 355 .ndo_fdb_del = br_fdb_delete,
346 .ndo_fdb_dump = br_fdb_dump, 356 .ndo_fdb_dump = br_fdb_dump,
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index f47759f05b6d..6c196037d818 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -198,8 +198,10 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb,
198 struct sk_buff *skb), 198 struct sk_buff *skb),
199 bool unicast) 199 bool unicast)
200{ 200{
201 struct net_bridge_port *p; 201 u8 igmp_type = br_multicast_igmp_type(skb);
202 __be16 proto = skb->protocol;
202 struct net_bridge_port *prev; 203 struct net_bridge_port *prev;
204 struct net_bridge_port *p;
203 205
204 prev = NULL; 206 prev = NULL;
205 207
@@ -218,6 +220,9 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb,
218 prev = maybe_deliver(prev, p, skb, __packet_hook); 220 prev = maybe_deliver(prev, p, skb, __packet_hook);
219 if (IS_ERR(prev)) 221 if (IS_ERR(prev))
220 goto out; 222 goto out;
223 if (prev == p)
224 br_multicast_count(p->br, p, proto, igmp_type,
225 BR_MCAST_DIR_TX);
221 } 226 }
222 227
223 if (!prev) 228 if (!prev)
@@ -257,9 +262,12 @@ static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
257 struct sk_buff *skb)) 262 struct sk_buff *skb))
258{ 263{
259 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; 264 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
265 u8 igmp_type = br_multicast_igmp_type(skb);
260 struct net_bridge *br = netdev_priv(dev); 266 struct net_bridge *br = netdev_priv(dev);
261 struct net_bridge_port *prev = NULL; 267 struct net_bridge_port *prev = NULL;
262 struct net_bridge_port_group *p; 268 struct net_bridge_port_group *p;
269 __be16 proto = skb->protocol;
270
263 struct hlist_node *rp; 271 struct hlist_node *rp;
264 272
265 rp = rcu_dereference(hlist_first_rcu(&br->router_list)); 273 rp = rcu_dereference(hlist_first_rcu(&br->router_list));
@@ -277,6 +285,9 @@ static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
277 prev = maybe_deliver(prev, port, skb, __packet_hook); 285 prev = maybe_deliver(prev, port, skb, __packet_hook);
278 if (IS_ERR(prev)) 286 if (IS_ERR(prev))
279 goto out; 287 goto out;
288 if (prev == port)
289 br_multicast_count(port->br, port, proto, igmp_type,
290 BR_MCAST_DIR_TX);
280 291
281 if ((unsigned long)lport >= (unsigned long)port) 292 if ((unsigned long)lport >= (unsigned long)port)
282 p = rcu_dereference(p->next); 293 p = rcu_dereference(p->next);
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 8217aecf025b..f2fede05d32c 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -345,8 +345,8 @@ static int find_portno(struct net_bridge *br)
345static struct net_bridge_port *new_nbp(struct net_bridge *br, 345static struct net_bridge_port *new_nbp(struct net_bridge *br,
346 struct net_device *dev) 346 struct net_device *dev)
347{ 347{
348 int index;
349 struct net_bridge_port *p; 348 struct net_bridge_port *p;
349 int index, err;
350 350
351 index = find_portno(br); 351 index = find_portno(br);
352 if (index < 0) 352 if (index < 0)
@@ -366,7 +366,12 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
366 br_init_port(p); 366 br_init_port(p);
367 br_set_state(p, BR_STATE_DISABLED); 367 br_set_state(p, BR_STATE_DISABLED);
368 br_stp_port_timer_init(p); 368 br_stp_port_timer_init(p);
369 br_multicast_add_port(p); 369 err = br_multicast_add_port(p);
370 if (err) {
371 dev_put(dev);
372 kfree(p);
373 p = ERR_PTR(err);
374 }
370 375
371 return p; 376 return p;
372} 377}
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 160797722228..786602bc0567 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -60,6 +60,9 @@ static int br_pass_frame_up(struct sk_buff *skb)
60 skb = br_handle_vlan(br, vg, skb); 60 skb = br_handle_vlan(br, vg, skb);
61 if (!skb) 61 if (!skb)
62 return NET_RX_DROP; 62 return NET_RX_DROP;
63 /* update the multicast stats if the packet is IGMP/MLD */
64 br_multicast_count(br, NULL, skb->protocol, br_multicast_igmp_type(skb),
65 BR_MCAST_DIR_TX);
63 66
64 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, 67 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
65 dev_net(indev), NULL, skb, indev, NULL, 68 dev_net(indev), NULL, skb, indev, NULL,
@@ -213,8 +216,7 @@ drop:
213} 216}
214EXPORT_SYMBOL_GPL(br_handle_frame_finish); 217EXPORT_SYMBOL_GPL(br_handle_frame_finish);
215 218
216/* note: already called with rcu_read_lock */ 219static void __br_handle_local_finish(struct sk_buff *skb)
217static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
218{ 220{
219 struct net_bridge_port *p = br_port_get_rcu(skb->dev); 221 struct net_bridge_port *p = br_port_get_rcu(skb->dev);
220 u16 vid = 0; 222 u16 vid = 0;
@@ -222,6 +224,14 @@ static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_bu
222 /* check if vlan is allowed, to avoid spoofing */ 224 /* check if vlan is allowed, to avoid spoofing */
223 if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid)) 225 if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid))
224 br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false); 226 br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
227}
228
229/* note: already called with rcu_read_lock */
230static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
231{
232 struct net_bridge_port *p = br_port_get_rcu(skb->dev);
233
234 __br_handle_local_finish(skb);
225 235
226 BR_INPUT_SKB_CB(skb)->brdev = p->br->dev; 236 BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
227 br_pass_frame_up(skb); 237 br_pass_frame_up(skb);
@@ -274,7 +284,9 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
274 if (p->br->stp_enabled == BR_NO_STP || 284 if (p->br->stp_enabled == BR_NO_STP ||
275 fwd_mask & (1u << dest[5])) 285 fwd_mask & (1u << dest[5]))
276 goto forward; 286 goto forward;
277 break; 287 *pskb = skb;
288 __br_handle_local_finish(skb);
289 return RX_HANDLER_PASS;
278 290
279 case 0x01: /* IEEE MAC (Pause) */ 291 case 0x01: /* IEEE MAC (Pause) */
280 goto drop; 292 goto drop;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 6852f3c7009c..e405eef0ae2e 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -361,7 +361,8 @@ out:
361} 361}
362 362
363static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, 363static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
364 __be32 group) 364 __be32 group,
365 u8 *igmp_type)
365{ 366{
366 struct sk_buff *skb; 367 struct sk_buff *skb;
367 struct igmphdr *ih; 368 struct igmphdr *ih;
@@ -411,6 +412,7 @@ static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
411 412
412 skb_set_transport_header(skb, skb->len); 413 skb_set_transport_header(skb, skb->len);
413 ih = igmp_hdr(skb); 414 ih = igmp_hdr(skb);
415 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
414 ih->type = IGMP_HOST_MEMBERSHIP_QUERY; 416 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
415 ih->code = (group ? br->multicast_last_member_interval : 417 ih->code = (group ? br->multicast_last_member_interval :
416 br->multicast_query_response_interval) / 418 br->multicast_query_response_interval) /
@@ -428,7 +430,8 @@ out:
428 430
429#if IS_ENABLED(CONFIG_IPV6) 431#if IS_ENABLED(CONFIG_IPV6)
430static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 432static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
431 const struct in6_addr *group) 433 const struct in6_addr *grp,
434 u8 *igmp_type)
432{ 435{
433 struct sk_buff *skb; 436 struct sk_buff *skb;
434 struct ipv6hdr *ip6h; 437 struct ipv6hdr *ip6h;
@@ -464,8 +467,11 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
464 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, 467 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
465 &ip6h->saddr)) { 468 &ip6h->saddr)) {
466 kfree_skb(skb); 469 kfree_skb(skb);
470 br->has_ipv6_addr = 0;
467 return NULL; 471 return NULL;
468 } 472 }
473
474 br->has_ipv6_addr = 1;
469 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); 475 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
470 476
471 hopopt = (u8 *)(ip6h + 1); 477 hopopt = (u8 *)(ip6h + 1);
@@ -484,16 +490,17 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
484 skb_set_transport_header(skb, skb->len); 490 skb_set_transport_header(skb, skb->len);
485 mldq = (struct mld_msg *) icmp6_hdr(skb); 491 mldq = (struct mld_msg *) icmp6_hdr(skb);
486 492
487 interval = ipv6_addr_any(group) ? 493 interval = ipv6_addr_any(grp) ?
488 br->multicast_query_response_interval : 494 br->multicast_query_response_interval :
489 br->multicast_last_member_interval; 495 br->multicast_last_member_interval;
490 496
497 *igmp_type = ICMPV6_MGM_QUERY;
491 mldq->mld_type = ICMPV6_MGM_QUERY; 498 mldq->mld_type = ICMPV6_MGM_QUERY;
492 mldq->mld_code = 0; 499 mldq->mld_code = 0;
493 mldq->mld_cksum = 0; 500 mldq->mld_cksum = 0;
494 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); 501 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
495 mldq->mld_reserved = 0; 502 mldq->mld_reserved = 0;
496 mldq->mld_mca = *group; 503 mldq->mld_mca = *grp;
497 504
498 /* checksum */ 505 /* checksum */
499 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 506 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
@@ -510,14 +517,16 @@ out:
510#endif 517#endif
511 518
512static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, 519static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
513 struct br_ip *addr) 520 struct br_ip *addr,
521 u8 *igmp_type)
514{ 522{
515 switch (addr->proto) { 523 switch (addr->proto) {
516 case htons(ETH_P_IP): 524 case htons(ETH_P_IP):
517 return br_ip4_multicast_alloc_query(br, addr->u.ip4); 525 return br_ip4_multicast_alloc_query(br, addr->u.ip4, igmp_type);
518#if IS_ENABLED(CONFIG_IPV6) 526#if IS_ENABLED(CONFIG_IPV6)
519 case htons(ETH_P_IPV6): 527 case htons(ETH_P_IPV6):
520 return br_ip6_multicast_alloc_query(br, &addr->u.ip6); 528 return br_ip6_multicast_alloc_query(br, &addr->u.ip6,
529 igmp_type);
521#endif 530#endif
522 } 531 }
523 return NULL; 532 return NULL;
@@ -826,18 +835,23 @@ static void __br_multicast_send_query(struct net_bridge *br,
826 struct br_ip *ip) 835 struct br_ip *ip)
827{ 836{
828 struct sk_buff *skb; 837 struct sk_buff *skb;
838 u8 igmp_type;
829 839
830 skb = br_multicast_alloc_query(br, ip); 840 skb = br_multicast_alloc_query(br, ip, &igmp_type);
831 if (!skb) 841 if (!skb)
832 return; 842 return;
833 843
834 if (port) { 844 if (port) {
835 skb->dev = port->dev; 845 skb->dev = port->dev;
846 br_multicast_count(br, port, skb->protocol, igmp_type,
847 BR_MCAST_DIR_TX);
836 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, 848 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
837 dev_net(port->dev), NULL, skb, NULL, skb->dev, 849 dev_net(port->dev), NULL, skb, NULL, skb->dev,
838 br_dev_queue_push_xmit); 850 br_dev_queue_push_xmit);
839 } else { 851 } else {
840 br_multicast_select_own_querier(br, ip, skb); 852 br_multicast_select_own_querier(br, ip, skb);
853 br_multicast_count(br, port, skb->protocol, igmp_type,
854 BR_MCAST_DIR_RX);
841 netif_rx(skb); 855 netif_rx(skb);
842 } 856 }
843} 857}
@@ -915,7 +929,7 @@ static void br_ip6_multicast_port_query_expired(unsigned long data)
915} 929}
916#endif 930#endif
917 931
918void br_multicast_add_port(struct net_bridge_port *port) 932int br_multicast_add_port(struct net_bridge_port *port)
919{ 933{
920 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY; 934 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
921 935
@@ -927,6 +941,11 @@ void br_multicast_add_port(struct net_bridge_port *port)
927 setup_timer(&port->ip6_own_query.timer, 941 setup_timer(&port->ip6_own_query.timer,
928 br_ip6_multicast_port_query_expired, (unsigned long)port); 942 br_ip6_multicast_port_query_expired, (unsigned long)port);
929#endif 943#endif
944 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
945 if (!port->mcast_stats)
946 return -ENOMEM;
947
948 return 0;
930} 949}
931 950
932void br_multicast_del_port(struct net_bridge_port *port) 951void br_multicast_del_port(struct net_bridge_port *port)
@@ -941,6 +960,7 @@ void br_multicast_del_port(struct net_bridge_port *port)
941 br_multicast_del_pg(br, pg); 960 br_multicast_del_pg(br, pg);
942 spin_unlock_bh(&br->multicast_lock); 961 spin_unlock_bh(&br->multicast_lock);
943 del_timer_sync(&port->multicast_router_timer); 962 del_timer_sync(&port->multicast_router_timer);
963 free_percpu(port->mcast_stats);
944} 964}
945 965
946static void br_multicast_enable(struct bridge_mcast_own_query *query) 966static void br_multicast_enable(struct bridge_mcast_own_query *query)
@@ -1580,6 +1600,39 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
1580} 1600}
1581#endif 1601#endif
1582 1602
1603static void br_multicast_err_count(const struct net_bridge *br,
1604 const struct net_bridge_port *p,
1605 __be16 proto)
1606{
1607 struct bridge_mcast_stats __percpu *stats;
1608 struct bridge_mcast_stats *pstats;
1609
1610 if (!br->multicast_stats_enabled)
1611 return;
1612
1613 if (p)
1614 stats = p->mcast_stats;
1615 else
1616 stats = br->mcast_stats;
1617 if (WARN_ON(!stats))
1618 return;
1619
1620 pstats = this_cpu_ptr(stats);
1621
1622 u64_stats_update_begin(&pstats->syncp);
1623 switch (proto) {
1624 case htons(ETH_P_IP):
1625 pstats->mstats.igmp_parse_errors++;
1626 break;
1627#if IS_ENABLED(CONFIG_IPV6)
1628 case htons(ETH_P_IPV6):
1629 pstats->mstats.mld_parse_errors++;
1630 break;
1631#endif
1632 }
1633 u64_stats_update_end(&pstats->syncp);
1634}
1635
1583static int br_multicast_ipv4_rcv(struct net_bridge *br, 1636static int br_multicast_ipv4_rcv(struct net_bridge *br,
1584 struct net_bridge_port *port, 1637 struct net_bridge_port *port,
1585 struct sk_buff *skb, 1638 struct sk_buff *skb,
@@ -1596,11 +1649,12 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1596 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1649 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1597 return 0; 1650 return 0;
1598 } else if (err < 0) { 1651 } else if (err < 0) {
1652 br_multicast_err_count(br, port, skb->protocol);
1599 return err; 1653 return err;
1600 } 1654 }
1601 1655
1602 BR_INPUT_SKB_CB(skb)->igmp = 1;
1603 ih = igmp_hdr(skb); 1656 ih = igmp_hdr(skb);
1657 BR_INPUT_SKB_CB(skb)->igmp = ih->type;
1604 1658
1605 switch (ih->type) { 1659 switch (ih->type) {
1606 case IGMP_HOST_MEMBERSHIP_REPORT: 1660 case IGMP_HOST_MEMBERSHIP_REPORT:
@@ -1622,6 +1676,9 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1622 if (skb_trimmed && skb_trimmed != skb) 1676 if (skb_trimmed && skb_trimmed != skb)
1623 kfree_skb(skb_trimmed); 1677 kfree_skb(skb_trimmed);
1624 1678
1679 br_multicast_count(br, port, skb->protocol, BR_INPUT_SKB_CB(skb)->igmp,
1680 BR_MCAST_DIR_RX);
1681
1625 return err; 1682 return err;
1626} 1683}
1627 1684
@@ -1642,11 +1699,12 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1642 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1699 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1643 return 0; 1700 return 0;
1644 } else if (err < 0) { 1701 } else if (err < 0) {
1702 br_multicast_err_count(br, port, skb->protocol);
1645 return err; 1703 return err;
1646 } 1704 }
1647 1705
1648 BR_INPUT_SKB_CB(skb)->igmp = 1;
1649 mld = (struct mld_msg *)skb_transport_header(skb); 1706 mld = (struct mld_msg *)skb_transport_header(skb);
1707 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
1650 1708
1651 switch (mld->mld_type) { 1709 switch (mld->mld_type) {
1652 case ICMPV6_MGM_REPORT: 1710 case ICMPV6_MGM_REPORT:
@@ -1667,6 +1725,9 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1667 if (skb_trimmed && skb_trimmed != skb) 1725 if (skb_trimmed && skb_trimmed != skb)
1668 kfree_skb(skb_trimmed); 1726 kfree_skb(skb_trimmed);
1669 1727
1728 br_multicast_count(br, port, skb->protocol, BR_INPUT_SKB_CB(skb)->igmp,
1729 BR_MCAST_DIR_RX);
1730
1670 return err; 1731 return err;
1671} 1732}
1672#endif 1733#endif
@@ -1674,6 +1735,8 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1674int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 1735int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1675 struct sk_buff *skb, u16 vid) 1736 struct sk_buff *skb, u16 vid)
1676{ 1737{
1738 int ret = 0;
1739
1677 BR_INPUT_SKB_CB(skb)->igmp = 0; 1740 BR_INPUT_SKB_CB(skb)->igmp = 0;
1678 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 1741 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
1679 1742
@@ -1682,14 +1745,16 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1682 1745
1683 switch (skb->protocol) { 1746 switch (skb->protocol) {
1684 case htons(ETH_P_IP): 1747 case htons(ETH_P_IP):
1685 return br_multicast_ipv4_rcv(br, port, skb, vid); 1748 ret = br_multicast_ipv4_rcv(br, port, skb, vid);
1749 break;
1686#if IS_ENABLED(CONFIG_IPV6) 1750#if IS_ENABLED(CONFIG_IPV6)
1687 case htons(ETH_P_IPV6): 1751 case htons(ETH_P_IPV6):
1688 return br_multicast_ipv6_rcv(br, port, skb, vid); 1752 ret = br_multicast_ipv6_rcv(br, port, skb, vid);
1753 break;
1689#endif 1754#endif
1690 } 1755 }
1691 1756
1692 return 0; 1757 return ret;
1693} 1758}
1694 1759
1695static void br_multicast_query_expired(struct net_bridge *br, 1760static void br_multicast_query_expired(struct net_bridge *br,
@@ -1745,6 +1810,7 @@ void br_multicast_init(struct net_bridge *br)
1745 br->ip6_other_query.delay_time = 0; 1810 br->ip6_other_query.delay_time = 0;
1746 br->ip6_querier.port = NULL; 1811 br->ip6_querier.port = NULL;
1747#endif 1812#endif
1813 br->has_ipv6_addr = 1;
1748 1814
1749 spin_lock_init(&br->multicast_lock); 1815 spin_lock_init(&br->multicast_lock);
1750 setup_timer(&br->multicast_router_timer, 1816 setup_timer(&br->multicast_router_timer,
@@ -1827,6 +1893,8 @@ void br_multicast_dev_del(struct net_bridge *br)
1827 1893
1828out: 1894out:
1829 spin_unlock_bh(&br->multicast_lock); 1895 spin_unlock_bh(&br->multicast_lock);
1896
1897 free_percpu(br->mcast_stats);
1830} 1898}
1831 1899
1832int br_multicast_set_router(struct net_bridge *br, unsigned long val) 1900int br_multicast_set_router(struct net_bridge *br, unsigned long val)
@@ -2181,3 +2249,128 @@ unlock:
2181 return ret; 2249 return ret;
2182} 2250}
2183EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent); 2251EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
2252
2253static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
2254 __be16 proto, u8 type, u8 dir)
2255{
2256 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
2257
2258 u64_stats_update_begin(&pstats->syncp);
2259 switch (proto) {
2260 case htons(ETH_P_IP):
2261 switch (type) {
2262 case IGMP_HOST_MEMBERSHIP_REPORT:
2263 pstats->mstats.igmp_v1reports[dir]++;
2264 break;
2265 case IGMPV2_HOST_MEMBERSHIP_REPORT:
2266 pstats->mstats.igmp_v2reports[dir]++;
2267 break;
2268 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2269 pstats->mstats.igmp_v3reports[dir]++;
2270 break;
2271 case IGMP_HOST_MEMBERSHIP_QUERY:
2272 pstats->mstats.igmp_queries[dir]++;
2273 break;
2274 case IGMP_HOST_LEAVE_MESSAGE:
2275 pstats->mstats.igmp_leaves[dir]++;
2276 break;
2277 }
2278 break;
2279#if IS_ENABLED(CONFIG_IPV6)
2280 case htons(ETH_P_IPV6):
2281 switch (type) {
2282 case ICMPV6_MGM_REPORT:
2283 pstats->mstats.mld_v1reports[dir]++;
2284 break;
2285 case ICMPV6_MLD2_REPORT:
2286 pstats->mstats.mld_v2reports[dir]++;
2287 break;
2288 case ICMPV6_MGM_QUERY:
2289 pstats->mstats.mld_queries[dir]++;
2290 break;
2291 case ICMPV6_MGM_REDUCTION:
2292 pstats->mstats.mld_leaves[dir]++;
2293 break;
2294 }
2295 break;
2296#endif /* CONFIG_IPV6 */
2297 }
2298 u64_stats_update_end(&pstats->syncp);
2299}
2300
2301void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
2302 __be16 proto, u8 type, u8 dir)
2303{
2304 struct bridge_mcast_stats __percpu *stats;
2305
2306 /* if multicast_disabled is true then igmp type can't be set */
2307 if (!type || !br->multicast_stats_enabled)
2308 return;
2309
2310 if (p)
2311 stats = p->mcast_stats;
2312 else
2313 stats = br->mcast_stats;
2314 if (WARN_ON(!stats))
2315 return;
2316
2317 br_mcast_stats_add(stats, proto, type, dir);
2318}
2319
2320int br_multicast_init_stats(struct net_bridge *br)
2321{
2322 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
2323 if (!br->mcast_stats)
2324 return -ENOMEM;
2325
2326 return 0;
2327}
2328
2329static void mcast_stats_add_dir(u64 *dst, u64 *src)
2330{
2331 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
2332 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
2333}
2334
2335void br_multicast_get_stats(const struct net_bridge *br,
2336 const struct net_bridge_port *p,
2337 struct br_mcast_stats *dest)
2338{
2339 struct bridge_mcast_stats __percpu *stats;
2340 struct br_mcast_stats tdst;
2341 int i;
2342
2343 memset(dest, 0, sizeof(*dest));
2344 if (p)
2345 stats = p->mcast_stats;
2346 else
2347 stats = br->mcast_stats;
2348 if (WARN_ON(!stats))
2349 return;
2350
2351 memset(&tdst, 0, sizeof(tdst));
2352 for_each_possible_cpu(i) {
2353 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
2354 struct br_mcast_stats temp;
2355 unsigned int start;
2356
2357 do {
2358 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
2359 memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
2360 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
2361
2362 mcast_stats_add_dir(tdst.igmp_queries, temp.igmp_queries);
2363 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
2364 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
2365 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
2366 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
2367 tdst.igmp_parse_errors += temp.igmp_parse_errors;
2368
2369 mcast_stats_add_dir(tdst.mld_queries, temp.mld_queries);
2370 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
2371 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
2372 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
2373 tdst.mld_parse_errors += temp.mld_parse_errors;
2374 }
2375 memcpy(dest, &tdst, sizeof(*dest));
2376}
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 2d25979273a6..77e7f69bf80d 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -700,7 +700,7 @@ static int
700br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, 700br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
701 int (*output)(struct net *, struct sock *, struct sk_buff *)) 701 int (*output)(struct net *, struct sock *, struct sk_buff *))
702{ 702{
703 unsigned int mtu = ip_skb_dst_mtu(skb); 703 unsigned int mtu = ip_skb_dst_mtu(sk, skb);
704 struct iphdr *iph = ip_hdr(skb); 704 struct iphdr *iph = ip_hdr(skb);
705 705
706 if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) || 706 if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index a5343c7232bf..f2a29e467e78 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -851,6 +851,7 @@ static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
851 [IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 }, 851 [IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 },
852 [IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 }, 852 [IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 },
853 [IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 }, 853 [IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 },
854 [IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 },
854}; 855};
855 856
856static int br_changelink(struct net_device *brdev, struct nlattr *tb[], 857static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
@@ -1055,6 +1056,13 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
1055 1056
1056 br->multicast_startup_query_interval = clock_t_to_jiffies(val); 1057 br->multicast_startup_query_interval = clock_t_to_jiffies(val);
1057 } 1058 }
1059
1060 if (data[IFLA_BR_MCAST_STATS_ENABLED]) {
1061 __u8 mcast_stats;
1062
1063 mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]);
1064 br->multicast_stats_enabled = !!mcast_stats;
1065 }
1058#endif 1066#endif
1059#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 1067#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1060 if (data[IFLA_BR_NF_CALL_IPTABLES]) { 1068 if (data[IFLA_BR_NF_CALL_IPTABLES]) {
@@ -1110,6 +1118,7 @@ static size_t br_get_size(const struct net_device *brdev)
1110 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_SNOOPING */ 1118 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_SNOOPING */
1111 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERY_USE_IFADDR */ 1119 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERY_USE_IFADDR */
1112 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERIER */ 1120 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERIER */
1121 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_STATS_ENABLED */
1113 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_ELASTICITY */ 1122 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_ELASTICITY */
1114 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */ 1123 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */
1115 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */ 1124 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */
@@ -1187,6 +1196,8 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
1187 nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR, 1196 nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR,
1188 br->multicast_query_use_ifaddr) || 1197 br->multicast_query_use_ifaddr) ||
1189 nla_put_u8(skb, IFLA_BR_MCAST_QUERIER, br->multicast_querier) || 1198 nla_put_u8(skb, IFLA_BR_MCAST_QUERIER, br->multicast_querier) ||
1199 nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED,
1200 br->multicast_stats_enabled) ||
1190 nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY, 1201 nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY,
1191 br->hash_elasticity) || 1202 br->hash_elasticity) ||
1192 nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) || 1203 nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) ||
@@ -1234,7 +1245,7 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
1234 return 0; 1245 return 0;
1235} 1246}
1236 1247
1237static size_t br_get_linkxstats_size(const struct net_device *dev) 1248static size_t bridge_get_linkxstats_size(const struct net_device *dev)
1238{ 1249{
1239 struct net_bridge *br = netdev_priv(dev); 1250 struct net_bridge *br = netdev_priv(dev);
1240 struct net_bridge_vlan_group *vg; 1251 struct net_bridge_vlan_group *vg;
@@ -1242,53 +1253,88 @@ static size_t br_get_linkxstats_size(const struct net_device *dev)
1242 int numvls = 0; 1253 int numvls = 0;
1243 1254
1244 vg = br_vlan_group(br); 1255 vg = br_vlan_group(br);
1245 if (!vg) 1256 if (vg) {
1246 return 0; 1257 /* we need to count all, even placeholder entries */
1247 1258 list_for_each_entry(v, &vg->vlan_list, vlist)
1248 /* we need to count all, even placeholder entries */ 1259 numvls++;
1249 list_for_each_entry(v, &vg->vlan_list, vlist) 1260 }
1250 numvls++;
1251 1261
1252 /* account for the vlans and the link xstats type nest attribute */
1253 return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) + 1262 return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) +
1263 nla_total_size(sizeof(struct br_mcast_stats)) +
1254 nla_total_size(0); 1264 nla_total_size(0);
1255} 1265}
1256 1266
1257static int br_fill_linkxstats(struct sk_buff *skb, const struct net_device *dev, 1267static size_t brport_get_linkxstats_size(const struct net_device *dev)
1258 int *prividx) 1268{
1269 return nla_total_size(sizeof(struct br_mcast_stats)) +
1270 nla_total_size(0);
1271}
1272
1273static size_t br_get_linkxstats_size(const struct net_device *dev, int attr)
1274{
1275 size_t retsize = 0;
1276
1277 switch (attr) {
1278 case IFLA_STATS_LINK_XSTATS:
1279 retsize = bridge_get_linkxstats_size(dev);
1280 break;
1281 case IFLA_STATS_LINK_XSTATS_SLAVE:
1282 retsize = brport_get_linkxstats_size(dev);
1283 break;
1284 }
1285
1286 return retsize;
1287}
1288
1289static int bridge_fill_linkxstats(struct sk_buff *skb,
1290 const struct net_device *dev,
1291 int *prividx)
1259{ 1292{
1260 struct net_bridge *br = netdev_priv(dev); 1293 struct net_bridge *br = netdev_priv(dev);
1294 struct nlattr *nla __maybe_unused;
1261 struct net_bridge_vlan_group *vg; 1295 struct net_bridge_vlan_group *vg;
1262 struct net_bridge_vlan *v; 1296 struct net_bridge_vlan *v;
1263 struct nlattr *nest; 1297 struct nlattr *nest;
1264 int vl_idx = 0; 1298 int vl_idx = 0;
1265 1299
1266 vg = br_vlan_group(br);
1267 if (!vg)
1268 goto out;
1269 nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE); 1300 nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE);
1270 if (!nest) 1301 if (!nest)
1271 return -EMSGSIZE; 1302 return -EMSGSIZE;
1272 list_for_each_entry(v, &vg->vlan_list, vlist) {
1273 struct bridge_vlan_xstats vxi;
1274 struct br_vlan_stats stats;
1275 1303
1276 if (vl_idx++ < *prividx) 1304 vg = br_vlan_group(br);
1277 continue; 1305 if (vg) {
1278 memset(&vxi, 0, sizeof(vxi)); 1306 list_for_each_entry(v, &vg->vlan_list, vlist) {
1279 vxi.vid = v->vid; 1307 struct bridge_vlan_xstats vxi;
1280 br_vlan_get_stats(v, &stats); 1308 struct br_vlan_stats stats;
1281 vxi.rx_bytes = stats.rx_bytes; 1309
1282 vxi.rx_packets = stats.rx_packets; 1310 if (++vl_idx < *prividx)
1283 vxi.tx_bytes = stats.tx_bytes; 1311 continue;
1284 vxi.tx_packets = stats.tx_packets; 1312 memset(&vxi, 0, sizeof(vxi));
1285 1313 vxi.vid = v->vid;
1286 if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi)) 1314 br_vlan_get_stats(v, &stats);
1315 vxi.rx_bytes = stats.rx_bytes;
1316 vxi.rx_packets = stats.rx_packets;
1317 vxi.tx_bytes = stats.tx_bytes;
1318 vxi.tx_packets = stats.tx_packets;
1319
1320 if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi))
1321 goto nla_put_failure;
1322 }
1323 }
1324
1325#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1326 if (++vl_idx >= *prividx) {
1327 nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST,
1328 sizeof(struct br_mcast_stats),
1329 BRIDGE_XSTATS_PAD);
1330 if (!nla)
1287 goto nla_put_failure; 1331 goto nla_put_failure;
1332 br_multicast_get_stats(br, NULL, nla_data(nla));
1288 } 1333 }
1334#endif
1289 nla_nest_end(skb, nest); 1335 nla_nest_end(skb, nest);
1290 *prividx = 0; 1336 *prividx = 0;
1291out: 1337
1292 return 0; 1338 return 0;
1293 1339
1294nla_put_failure: 1340nla_put_failure:
@@ -1298,6 +1344,52 @@ nla_put_failure:
1298 return -EMSGSIZE; 1344 return -EMSGSIZE;
1299} 1345}
1300 1346
1347static int brport_fill_linkxstats(struct sk_buff *skb,
1348 const struct net_device *dev,
1349 int *prividx)
1350{
1351 struct net_bridge_port *p = br_port_get_rtnl(dev);
1352 struct nlattr *nla __maybe_unused;
1353 struct nlattr *nest;
1354
1355 if (!p)
1356 return 0;
1357
1358 nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE);
1359 if (!nest)
1360 return -EMSGSIZE;
1361#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1362 nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST,
1363 sizeof(struct br_mcast_stats),
1364 BRIDGE_XSTATS_PAD);
1365 if (!nla) {
1366 nla_nest_end(skb, nest);
1367 return -EMSGSIZE;
1368 }
1369 br_multicast_get_stats(p->br, p, nla_data(nla));
1370#endif
1371 nla_nest_end(skb, nest);
1372
1373 return 0;
1374}
1375
1376static int br_fill_linkxstats(struct sk_buff *skb, const struct net_device *dev,
1377 int *prividx, int attr)
1378{
1379 int ret = -EINVAL;
1380
1381 switch (attr) {
1382 case IFLA_STATS_LINK_XSTATS:
1383 ret = bridge_fill_linkxstats(skb, dev, prividx);
1384 break;
1385 case IFLA_STATS_LINK_XSTATS_SLAVE:
1386 ret = brport_fill_linkxstats(skb, dev, prividx);
1387 break;
1388 }
1389
1390 return ret;
1391}
1392
1301static struct rtnl_af_ops br_af_ops __read_mostly = { 1393static struct rtnl_af_ops br_af_ops __read_mostly = {
1302 .family = AF_BRIDGE, 1394 .family = AF_BRIDGE,
1303 .get_link_af_size = br_get_link_af_size_filtered, 1395 .get_link_af_size = br_get_link_af_size_filtered,
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index c7fb5d7a7218..4dc851166ad1 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -75,6 +75,12 @@ struct bridge_mcast_querier {
75 struct br_ip addr; 75 struct br_ip addr;
76 struct net_bridge_port __rcu *port; 76 struct net_bridge_port __rcu *port;
77}; 77};
78
79/* IGMP/MLD statistics */
80struct bridge_mcast_stats {
81 struct br_mcast_stats mstats;
82 struct u64_stats_sync syncp;
83};
78#endif 84#endif
79 85
80struct br_vlan_stats { 86struct br_vlan_stats {
@@ -229,6 +235,7 @@ struct net_bridge_port
229 struct bridge_mcast_own_query ip6_own_query; 235 struct bridge_mcast_own_query ip6_own_query;
230#endif /* IS_ENABLED(CONFIG_IPV6) */ 236#endif /* IS_ENABLED(CONFIG_IPV6) */
231 unsigned char multicast_router; 237 unsigned char multicast_router;
238 struct bridge_mcast_stats __percpu *mcast_stats;
232 struct timer_list multicast_router_timer; 239 struct timer_list multicast_router_timer;
233 struct hlist_head mglist; 240 struct hlist_head mglist;
234 struct hlist_node rlist; 241 struct hlist_node rlist;
@@ -314,6 +321,8 @@ struct net_bridge
314 u8 multicast_disabled:1; 321 u8 multicast_disabled:1;
315 u8 multicast_querier:1; 322 u8 multicast_querier:1;
316 u8 multicast_query_use_ifaddr:1; 323 u8 multicast_query_use_ifaddr:1;
324 u8 has_ipv6_addr:1;
325 u8 multicast_stats_enabled:1;
317 326
318 u32 hash_elasticity; 327 u32 hash_elasticity;
319 u32 hash_max; 328 u32 hash_max;
@@ -336,6 +345,7 @@ struct net_bridge
336 struct bridge_mcast_other_query ip4_other_query; 345 struct bridge_mcast_other_query ip4_other_query;
337 struct bridge_mcast_own_query ip4_own_query; 346 struct bridge_mcast_own_query ip4_own_query;
338 struct bridge_mcast_querier ip4_querier; 347 struct bridge_mcast_querier ip4_querier;
348 struct bridge_mcast_stats __percpu *mcast_stats;
339#if IS_ENABLED(CONFIG_IPV6) 349#if IS_ENABLED(CONFIG_IPV6)
340 struct bridge_mcast_other_query ip6_other_query; 350 struct bridge_mcast_other_query ip6_other_query;
341 struct bridge_mcast_own_query ip6_own_query; 351 struct bridge_mcast_own_query ip6_own_query;
@@ -542,7 +552,7 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
542 struct sk_buff *skb, u16 vid); 552 struct sk_buff *skb, u16 vid);
543struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 553struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
544 struct sk_buff *skb, u16 vid); 554 struct sk_buff *skb, u16 vid);
545void br_multicast_add_port(struct net_bridge_port *port); 555int br_multicast_add_port(struct net_bridge_port *port);
546void br_multicast_del_port(struct net_bridge_port *port); 556void br_multicast_del_port(struct net_bridge_port *port);
547void br_multicast_enable_port(struct net_bridge_port *port); 557void br_multicast_enable_port(struct net_bridge_port *port);
548void br_multicast_disable_port(struct net_bridge_port *port); 558void br_multicast_disable_port(struct net_bridge_port *port);
@@ -575,6 +585,12 @@ void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
575 struct br_ip *group, int type, u8 flags); 585 struct br_ip *group, int type, u8 flags);
576void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port, 586void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
577 int type); 587 int type);
588void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
589 __be16 proto, u8 type, u8 dir);
590int br_multicast_init_stats(struct net_bridge *br);
591void br_multicast_get_stats(const struct net_bridge *br,
592 const struct net_bridge_port *p,
593 struct br_mcast_stats *dest);
578 594
579#define mlock_dereference(X, br) \ 595#define mlock_dereference(X, br) \
580 rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock)) 596 rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
@@ -588,10 +604,22 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
588 604
589static inline bool 605static inline bool
590__br_multicast_querier_exists(struct net_bridge *br, 606__br_multicast_querier_exists(struct net_bridge *br,
591 struct bridge_mcast_other_query *querier) 607 struct bridge_mcast_other_query *querier,
608 const bool is_ipv6)
592{ 609{
610 bool own_querier_enabled;
611
612 if (br->multicast_querier) {
613 if (is_ipv6 && !br->has_ipv6_addr)
614 own_querier_enabled = false;
615 else
616 own_querier_enabled = true;
617 } else {
618 own_querier_enabled = false;
619 }
620
593 return time_is_before_jiffies(querier->delay_time) && 621 return time_is_before_jiffies(querier->delay_time) &&
594 (br->multicast_querier || timer_pending(&querier->timer)); 622 (own_querier_enabled || timer_pending(&querier->timer));
595} 623}
596 624
597static inline bool br_multicast_querier_exists(struct net_bridge *br, 625static inline bool br_multicast_querier_exists(struct net_bridge *br,
@@ -599,15 +627,22 @@ static inline bool br_multicast_querier_exists(struct net_bridge *br,
599{ 627{
600 switch (eth->h_proto) { 628 switch (eth->h_proto) {
601 case (htons(ETH_P_IP)): 629 case (htons(ETH_P_IP)):
602 return __br_multicast_querier_exists(br, &br->ip4_other_query); 630 return __br_multicast_querier_exists(br,
631 &br->ip4_other_query, false);
603#if IS_ENABLED(CONFIG_IPV6) 632#if IS_ENABLED(CONFIG_IPV6)
604 case (htons(ETH_P_IPV6)): 633 case (htons(ETH_P_IPV6)):
605 return __br_multicast_querier_exists(br, &br->ip6_other_query); 634 return __br_multicast_querier_exists(br,
635 &br->ip6_other_query, true);
606#endif 636#endif
607 default: 637 default:
608 return false; 638 return false;
609 } 639 }
610} 640}
641
642static inline int br_multicast_igmp_type(const struct sk_buff *skb)
643{
644 return BR_INPUT_SKB_CB(skb)->igmp;
645}
611#else 646#else
612static inline int br_multicast_rcv(struct net_bridge *br, 647static inline int br_multicast_rcv(struct net_bridge *br,
613 struct net_bridge_port *port, 648 struct net_bridge_port *port,
@@ -623,8 +658,9 @@ static inline struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
623 return NULL; 658 return NULL;
624} 659}
625 660
626static inline void br_multicast_add_port(struct net_bridge_port *port) 661static inline int br_multicast_add_port(struct net_bridge_port *port)
627{ 662{
663 return 0;
628} 664}
629 665
630static inline void br_multicast_del_port(struct net_bridge_port *port) 666static inline void br_multicast_del_port(struct net_bridge_port *port)
@@ -680,6 +716,22 @@ static inline void br_mdb_init(void)
680static inline void br_mdb_uninit(void) 716static inline void br_mdb_uninit(void)
681{ 717{
682} 718}
719
720static inline void br_multicast_count(struct net_bridge *br,
721 const struct net_bridge_port *p,
722 __be16 proto, u8 type, u8 dir)
723{
724}
725
726static inline int br_multicast_init_stats(struct net_bridge *br)
727{
728 return 0;
729}
730
731static inline int br_multicast_igmp_type(const struct sk_buff *skb)
732{
733 return 0;
734}
683#endif 735#endif
684 736
685/* br_vlan.c */ 737/* br_vlan.c */
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index beb47071e38d..e120307c6e36 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -618,6 +618,30 @@ static ssize_t multicast_startup_query_interval_store(
618 return store_bridge_parm(d, buf, len, set_startup_query_interval); 618 return store_bridge_parm(d, buf, len, set_startup_query_interval);
619} 619}
620static DEVICE_ATTR_RW(multicast_startup_query_interval); 620static DEVICE_ATTR_RW(multicast_startup_query_interval);
621
622static ssize_t multicast_stats_enabled_show(struct device *d,
623 struct device_attribute *attr,
624 char *buf)
625{
626 struct net_bridge *br = to_bridge(d);
627
628 return sprintf(buf, "%u\n", br->multicast_stats_enabled);
629}
630
631static int set_stats_enabled(struct net_bridge *br, unsigned long val)
632{
633 br->multicast_stats_enabled = !!val;
634 return 0;
635}
636
637static ssize_t multicast_stats_enabled_store(struct device *d,
638 struct device_attribute *attr,
639 const char *buf,
640 size_t len)
641{
642 return store_bridge_parm(d, buf, len, set_stats_enabled);
643}
644static DEVICE_ATTR_RW(multicast_stats_enabled);
621#endif 645#endif
622#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 646#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
623static ssize_t nf_call_iptables_show( 647static ssize_t nf_call_iptables_show(
@@ -784,6 +808,7 @@ static struct attribute *bridge_attrs[] = {
784 &dev_attr_multicast_query_interval.attr, 808 &dev_attr_multicast_query_interval.attr,
785 &dev_attr_multicast_query_response_interval.attr, 809 &dev_attr_multicast_query_response_interval.attr,
786 &dev_attr_multicast_startup_query_interval.attr, 810 &dev_attr_multicast_startup_query_interval.attr,
811 &dev_attr_multicast_stats_enabled.attr,
787#endif 812#endif
788#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 813#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
789 &dev_attr_nf_call_iptables.attr, 814 &dev_attr_nf_call_iptables.attr,
diff --git a/net/bridge/netfilter/ebt_802_3.c b/net/bridge/netfilter/ebt_802_3.c
index 2a449b7ab8fa..5fc4affd9fdb 100644
--- a/net/bridge/netfilter/ebt_802_3.c
+++ b/net/bridge/netfilter/ebt_802_3.c
@@ -20,16 +20,16 @@ ebt_802_3_mt(const struct sk_buff *skb, struct xt_action_param *par)
20 __be16 type = hdr->llc.ui.ctrl & IS_UI ? hdr->llc.ui.type : hdr->llc.ni.type; 20 __be16 type = hdr->llc.ui.ctrl & IS_UI ? hdr->llc.ui.type : hdr->llc.ni.type;
21 21
22 if (info->bitmask & EBT_802_3_SAP) { 22 if (info->bitmask & EBT_802_3_SAP) {
23 if (FWINV(info->sap != hdr->llc.ui.ssap, EBT_802_3_SAP)) 23 if (NF_INVF(info, EBT_802_3_SAP, info->sap != hdr->llc.ui.ssap))
24 return false; 24 return false;
25 if (FWINV(info->sap != hdr->llc.ui.dsap, EBT_802_3_SAP)) 25 if (NF_INVF(info, EBT_802_3_SAP, info->sap != hdr->llc.ui.dsap))
26 return false; 26 return false;
27 } 27 }
28 28
29 if (info->bitmask & EBT_802_3_TYPE) { 29 if (info->bitmask & EBT_802_3_TYPE) {
30 if (!(hdr->llc.ui.dsap == CHECK_TYPE && hdr->llc.ui.ssap == CHECK_TYPE)) 30 if (!(hdr->llc.ui.dsap == CHECK_TYPE && hdr->llc.ui.ssap == CHECK_TYPE))
31 return false; 31 return false;
32 if (FWINV(info->type != type, EBT_802_3_TYPE)) 32 if (NF_INVF(info, EBT_802_3_TYPE, info->type != type))
33 return false; 33 return false;
34 } 34 }
35 35
diff --git a/net/bridge/netfilter/ebt_arp.c b/net/bridge/netfilter/ebt_arp.c
index cd457b891b27..227142282b45 100644
--- a/net/bridge/netfilter/ebt_arp.c
+++ b/net/bridge/netfilter/ebt_arp.c
@@ -25,14 +25,14 @@ ebt_arp_mt(const struct sk_buff *skb, struct xt_action_param *par)
25 ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); 25 ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
26 if (ah == NULL) 26 if (ah == NULL)
27 return false; 27 return false;
28 if (info->bitmask & EBT_ARP_OPCODE && FWINV(info->opcode != 28 if ((info->bitmask & EBT_ARP_OPCODE) &&
29 ah->ar_op, EBT_ARP_OPCODE)) 29 NF_INVF(info, EBT_ARP_OPCODE, info->opcode != ah->ar_op))
30 return false; 30 return false;
31 if (info->bitmask & EBT_ARP_HTYPE && FWINV(info->htype != 31 if ((info->bitmask & EBT_ARP_HTYPE) &&
32 ah->ar_hrd, EBT_ARP_HTYPE)) 32 NF_INVF(info, EBT_ARP_HTYPE, info->htype != ah->ar_hrd))
33 return false; 33 return false;
34 if (info->bitmask & EBT_ARP_PTYPE && FWINV(info->ptype != 34 if ((info->bitmask & EBT_ARP_PTYPE) &&
35 ah->ar_pro, EBT_ARP_PTYPE)) 35 NF_INVF(info, EBT_ARP_PTYPE, info->ptype != ah->ar_pro))
36 return false; 36 return false;
37 37
38 if (info->bitmask & (EBT_ARP_SRC_IP | EBT_ARP_DST_IP | EBT_ARP_GRAT)) { 38 if (info->bitmask & (EBT_ARP_SRC_IP | EBT_ARP_DST_IP | EBT_ARP_GRAT)) {
@@ -51,21 +51,22 @@ ebt_arp_mt(const struct sk_buff *skb, struct xt_action_param *par)
51 sizeof(daddr), &daddr); 51 sizeof(daddr), &daddr);
52 if (dap == NULL) 52 if (dap == NULL)
53 return false; 53 return false;
54 if (info->bitmask & EBT_ARP_SRC_IP && 54 if ((info->bitmask & EBT_ARP_SRC_IP) &&
55 FWINV(info->saddr != (*sap & info->smsk), EBT_ARP_SRC_IP)) 55 NF_INVF(info, EBT_ARP_SRC_IP,
56 info->saddr != (*sap & info->smsk)))
56 return false; 57 return false;
57 if (info->bitmask & EBT_ARP_DST_IP && 58 if ((info->bitmask & EBT_ARP_DST_IP) &&
58 FWINV(info->daddr != (*dap & info->dmsk), EBT_ARP_DST_IP)) 59 NF_INVF(info, EBT_ARP_DST_IP,
60 info->daddr != (*dap & info->dmsk)))
59 return false; 61 return false;
60 if (info->bitmask & EBT_ARP_GRAT && 62 if ((info->bitmask & EBT_ARP_GRAT) &&
61 FWINV(*dap != *sap, EBT_ARP_GRAT)) 63 NF_INVF(info, EBT_ARP_GRAT, *dap != *sap))
62 return false; 64 return false;
63 } 65 }
64 66
65 if (info->bitmask & (EBT_ARP_SRC_MAC | EBT_ARP_DST_MAC)) { 67 if (info->bitmask & (EBT_ARP_SRC_MAC | EBT_ARP_DST_MAC)) {
66 const unsigned char *mp; 68 const unsigned char *mp;
67 unsigned char _mac[ETH_ALEN]; 69 unsigned char _mac[ETH_ALEN];
68 uint8_t verdict, i;
69 70
70 if (ah->ar_hln != ETH_ALEN || ah->ar_hrd != htons(ARPHRD_ETHER)) 71 if (ah->ar_hln != ETH_ALEN || ah->ar_hrd != htons(ARPHRD_ETHER))
71 return false; 72 return false;
@@ -74,11 +75,9 @@ ebt_arp_mt(const struct sk_buff *skb, struct xt_action_param *par)
74 sizeof(_mac), &_mac); 75 sizeof(_mac), &_mac);
75 if (mp == NULL) 76 if (mp == NULL)
76 return false; 77 return false;
77 verdict = 0; 78 if (NF_INVF(info, EBT_ARP_SRC_MAC,
78 for (i = 0; i < 6; i++) 79 !ether_addr_equal_masked(mp, info->smaddr,
79 verdict |= (mp[i] ^ info->smaddr[i]) & 80 info->smmsk)))
80 info->smmsk[i];
81 if (FWINV(verdict != 0, EBT_ARP_SRC_MAC))
82 return false; 81 return false;
83 } 82 }
84 83
@@ -88,11 +87,9 @@ ebt_arp_mt(const struct sk_buff *skb, struct xt_action_param *par)
88 sizeof(_mac), &_mac); 87 sizeof(_mac), &_mac);
89 if (mp == NULL) 88 if (mp == NULL)
90 return false; 89 return false;
91 verdict = 0; 90 if (NF_INVF(info, EBT_ARP_DST_MAC,
92 for (i = 0; i < 6; i++) 91 !ether_addr_equal_masked(mp, info->dmaddr,
93 verdict |= (mp[i] ^ info->dmaddr[i]) & 92 info->dmmsk)))
94 info->dmmsk[i];
95 if (FWINV(verdict != 0, EBT_ARP_DST_MAC))
96 return false; 93 return false;
97 } 94 }
98 } 95 }
diff --git a/net/bridge/netfilter/ebt_ip.c b/net/bridge/netfilter/ebt_ip.c
index 23bca62d58d2..d06968bdf5ec 100644
--- a/net/bridge/netfilter/ebt_ip.c
+++ b/net/bridge/netfilter/ebt_ip.c
@@ -36,19 +36,19 @@ ebt_ip_mt(const struct sk_buff *skb, struct xt_action_param *par)
36 ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); 36 ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
37 if (ih == NULL) 37 if (ih == NULL)
38 return false; 38 return false;
39 if (info->bitmask & EBT_IP_TOS && 39 if ((info->bitmask & EBT_IP_TOS) &&
40 FWINV(info->tos != ih->tos, EBT_IP_TOS)) 40 NF_INVF(info, EBT_IP_TOS, info->tos != ih->tos))
41 return false; 41 return false;
42 if (info->bitmask & EBT_IP_SOURCE && 42 if ((info->bitmask & EBT_IP_SOURCE) &&
43 FWINV((ih->saddr & info->smsk) != 43 NF_INVF(info, EBT_IP_SOURCE,
44 info->saddr, EBT_IP_SOURCE)) 44 (ih->saddr & info->smsk) != info->saddr))
45 return false; 45 return false;
46 if ((info->bitmask & EBT_IP_DEST) && 46 if ((info->bitmask & EBT_IP_DEST) &&
47 FWINV((ih->daddr & info->dmsk) != 47 NF_INVF(info, EBT_IP_DEST,
48 info->daddr, EBT_IP_DEST)) 48 (ih->daddr & info->dmsk) != info->daddr))
49 return false; 49 return false;
50 if (info->bitmask & EBT_IP_PROTO) { 50 if (info->bitmask & EBT_IP_PROTO) {
51 if (FWINV(info->protocol != ih->protocol, EBT_IP_PROTO)) 51 if (NF_INVF(info, EBT_IP_PROTO, info->protocol != ih->protocol))
52 return false; 52 return false;
53 if (!(info->bitmask & EBT_IP_DPORT) && 53 if (!(info->bitmask & EBT_IP_DPORT) &&
54 !(info->bitmask & EBT_IP_SPORT)) 54 !(info->bitmask & EBT_IP_SPORT))
@@ -61,16 +61,16 @@ ebt_ip_mt(const struct sk_buff *skb, struct xt_action_param *par)
61 return false; 61 return false;
62 if (info->bitmask & EBT_IP_DPORT) { 62 if (info->bitmask & EBT_IP_DPORT) {
63 u32 dst = ntohs(pptr->dst); 63 u32 dst = ntohs(pptr->dst);
64 if (FWINV(dst < info->dport[0] || 64 if (NF_INVF(info, EBT_IP_DPORT,
65 dst > info->dport[1], 65 dst < info->dport[0] ||
66 EBT_IP_DPORT)) 66 dst > info->dport[1]))
67 return false; 67 return false;
68 } 68 }
69 if (info->bitmask & EBT_IP_SPORT) { 69 if (info->bitmask & EBT_IP_SPORT) {
70 u32 src = ntohs(pptr->src); 70 u32 src = ntohs(pptr->src);
71 if (FWINV(src < info->sport[0] || 71 if (NF_INVF(info, EBT_IP_SPORT,
72 src > info->sport[1], 72 src < info->sport[0] ||
73 EBT_IP_SPORT)) 73 src > info->sport[1]))
74 return false; 74 return false;
75 } 75 }
76 } 76 }
diff --git a/net/bridge/netfilter/ebt_ip6.c b/net/bridge/netfilter/ebt_ip6.c
index 98de6e7fd86d..4617491be41e 100644
--- a/net/bridge/netfilter/ebt_ip6.c
+++ b/net/bridge/netfilter/ebt_ip6.c
@@ -45,15 +45,18 @@ ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
45 ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h); 45 ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h);
46 if (ih6 == NULL) 46 if (ih6 == NULL)
47 return false; 47 return false;
48 if (info->bitmask & EBT_IP6_TCLASS && 48 if ((info->bitmask & EBT_IP6_TCLASS) &&
49 FWINV(info->tclass != ipv6_get_dsfield(ih6), EBT_IP6_TCLASS)) 49 NF_INVF(info, EBT_IP6_TCLASS,
50 info->tclass != ipv6_get_dsfield(ih6)))
50 return false; 51 return false;
51 if ((info->bitmask & EBT_IP6_SOURCE && 52 if (((info->bitmask & EBT_IP6_SOURCE) &&
52 FWINV(ipv6_masked_addr_cmp(&ih6->saddr, &info->smsk, 53 NF_INVF(info, EBT_IP6_SOURCE,
53 &info->saddr), EBT_IP6_SOURCE)) || 54 ipv6_masked_addr_cmp(&ih6->saddr, &info->smsk,
54 (info->bitmask & EBT_IP6_DEST && 55 &info->saddr))) ||
55 FWINV(ipv6_masked_addr_cmp(&ih6->daddr, &info->dmsk, 56 ((info->bitmask & EBT_IP6_DEST) &&
56 &info->daddr), EBT_IP6_DEST))) 57 NF_INVF(info, EBT_IP6_DEST,
58 ipv6_masked_addr_cmp(&ih6->daddr, &info->dmsk,
59 &info->daddr))))
57 return false; 60 return false;
58 if (info->bitmask & EBT_IP6_PROTO) { 61 if (info->bitmask & EBT_IP6_PROTO) {
59 uint8_t nexthdr = ih6->nexthdr; 62 uint8_t nexthdr = ih6->nexthdr;
@@ -63,7 +66,7 @@ ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
63 offset_ph = ipv6_skip_exthdr(skb, sizeof(_ip6h), &nexthdr, &frag_off); 66 offset_ph = ipv6_skip_exthdr(skb, sizeof(_ip6h), &nexthdr, &frag_off);
64 if (offset_ph == -1) 67 if (offset_ph == -1)
65 return false; 68 return false;
66 if (FWINV(info->protocol != nexthdr, EBT_IP6_PROTO)) 69 if (NF_INVF(info, EBT_IP6_PROTO, info->protocol != nexthdr))
67 return false; 70 return false;
68 if (!(info->bitmask & (EBT_IP6_DPORT | 71 if (!(info->bitmask & (EBT_IP6_DPORT |
69 EBT_IP6_SPORT | EBT_IP6_ICMP6))) 72 EBT_IP6_SPORT | EBT_IP6_ICMP6)))
@@ -76,22 +79,24 @@ ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
76 return false; 79 return false;
77 if (info->bitmask & EBT_IP6_DPORT) { 80 if (info->bitmask & EBT_IP6_DPORT) {
78 u16 dst = ntohs(pptr->tcpudphdr.dst); 81 u16 dst = ntohs(pptr->tcpudphdr.dst);
79 if (FWINV(dst < info->dport[0] || 82 if (NF_INVF(info, EBT_IP6_DPORT,
80 dst > info->dport[1], EBT_IP6_DPORT)) 83 dst < info->dport[0] ||
84 dst > info->dport[1]))
81 return false; 85 return false;
82 } 86 }
83 if (info->bitmask & EBT_IP6_SPORT) { 87 if (info->bitmask & EBT_IP6_SPORT) {
84 u16 src = ntohs(pptr->tcpudphdr.src); 88 u16 src = ntohs(pptr->tcpudphdr.src);
85 if (FWINV(src < info->sport[0] || 89 if (NF_INVF(info, EBT_IP6_SPORT,
86 src > info->sport[1], EBT_IP6_SPORT)) 90 src < info->sport[0] ||
91 src > info->sport[1]))
87 return false; 92 return false;
88 } 93 }
89 if ((info->bitmask & EBT_IP6_ICMP6) && 94 if ((info->bitmask & EBT_IP6_ICMP6) &&
90 FWINV(pptr->icmphdr.type < info->icmpv6_type[0] || 95 NF_INVF(info, EBT_IP6_ICMP6,
91 pptr->icmphdr.type > info->icmpv6_type[1] || 96 pptr->icmphdr.type < info->icmpv6_type[0] ||
92 pptr->icmphdr.code < info->icmpv6_code[0] || 97 pptr->icmphdr.type > info->icmpv6_type[1] ||
93 pptr->icmphdr.code > info->icmpv6_code[1], 98 pptr->icmphdr.code < info->icmpv6_code[0] ||
94 EBT_IP6_ICMP6)) 99 pptr->icmphdr.code > info->icmpv6_code[1]))
95 return false; 100 return false;
96 } 101 }
97 return true; 102 return true;
diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c
index 6b731e12ecfa..3140eb912d7e 100644
--- a/net/bridge/netfilter/ebt_stp.c
+++ b/net/bridge/netfilter/ebt_stp.c
@@ -17,24 +17,24 @@
17#define BPDU_TYPE_TCN 0x80 17#define BPDU_TYPE_TCN 0x80
18 18
19struct stp_header { 19struct stp_header {
20 uint8_t dsap; 20 u8 dsap;
21 uint8_t ssap; 21 u8 ssap;
22 uint8_t ctrl; 22 u8 ctrl;
23 uint8_t pid; 23 u8 pid;
24 uint8_t vers; 24 u8 vers;
25 uint8_t type; 25 u8 type;
26}; 26};
27 27
28struct stp_config_pdu { 28struct stp_config_pdu {
29 uint8_t flags; 29 u8 flags;
30 uint8_t root[8]; 30 u8 root[8];
31 uint8_t root_cost[4]; 31 u8 root_cost[4];
32 uint8_t sender[8]; 32 u8 sender[8];
33 uint8_t port[2]; 33 u8 port[2];
34 uint8_t msg_age[2]; 34 u8 msg_age[2];
35 uint8_t max_age[2]; 35 u8 max_age[2];
36 uint8_t hello_time[2]; 36 u8 hello_time[2];
37 uint8_t forward_delay[2]; 37 u8 forward_delay[2];
38}; 38};
39 39
40#define NR16(p) (p[0] << 8 | p[1]) 40#define NR16(p) (p[0] << 8 | p[1])
@@ -44,76 +44,73 @@ static bool ebt_filter_config(const struct ebt_stp_info *info,
44 const struct stp_config_pdu *stpc) 44 const struct stp_config_pdu *stpc)
45{ 45{
46 const struct ebt_stp_config_info *c; 46 const struct ebt_stp_config_info *c;
47 uint16_t v16; 47 u16 v16;
48 uint32_t v32; 48 u32 v32;
49 int verdict, i;
50 49
51 c = &info->config; 50 c = &info->config;
52 if ((info->bitmask & EBT_STP_FLAGS) && 51 if ((info->bitmask & EBT_STP_FLAGS) &&
53 FWINV(c->flags != stpc->flags, EBT_STP_FLAGS)) 52 NF_INVF(info, EBT_STP_FLAGS, c->flags != stpc->flags))
54 return false; 53 return false;
55 if (info->bitmask & EBT_STP_ROOTPRIO) { 54 if (info->bitmask & EBT_STP_ROOTPRIO) {
56 v16 = NR16(stpc->root); 55 v16 = NR16(stpc->root);
57 if (FWINV(v16 < c->root_priol || 56 if (NF_INVF(info, EBT_STP_ROOTPRIO,
58 v16 > c->root_priou, EBT_STP_ROOTPRIO)) 57 v16 < c->root_priol || v16 > c->root_priou))
59 return false; 58 return false;
60 } 59 }
61 if (info->bitmask & EBT_STP_ROOTADDR) { 60 if (info->bitmask & EBT_STP_ROOTADDR) {
62 verdict = 0; 61 if (NF_INVF(info, EBT_STP_ROOTADDR,
63 for (i = 0; i < 6; i++) 62 !ether_addr_equal_masked(&stpc->root[2],
64 verdict |= (stpc->root[2+i] ^ c->root_addr[i]) & 63 c->root_addr,
65 c->root_addrmsk[i]; 64 c->root_addrmsk)))
66 if (FWINV(verdict != 0, EBT_STP_ROOTADDR))
67 return false; 65 return false;
68 } 66 }
69 if (info->bitmask & EBT_STP_ROOTCOST) { 67 if (info->bitmask & EBT_STP_ROOTCOST) {
70 v32 = NR32(stpc->root_cost); 68 v32 = NR32(stpc->root_cost);
71 if (FWINV(v32 < c->root_costl || 69 if (NF_INVF(info, EBT_STP_ROOTCOST,
72 v32 > c->root_costu, EBT_STP_ROOTCOST)) 70 v32 < c->root_costl || v32 > c->root_costu))
73 return false; 71 return false;
74 } 72 }
75 if (info->bitmask & EBT_STP_SENDERPRIO) { 73 if (info->bitmask & EBT_STP_SENDERPRIO) {
76 v16 = NR16(stpc->sender); 74 v16 = NR16(stpc->sender);
77 if (FWINV(v16 < c->sender_priol || 75 if (NF_INVF(info, EBT_STP_SENDERPRIO,
78 v16 > c->sender_priou, EBT_STP_SENDERPRIO)) 76 v16 < c->sender_priol || v16 > c->sender_priou))
79 return false; 77 return false;
80 } 78 }
81 if (info->bitmask & EBT_STP_SENDERADDR) { 79 if (info->bitmask & EBT_STP_SENDERADDR) {
82 verdict = 0; 80 if (NF_INVF(info, EBT_STP_SENDERADDR,
83 for (i = 0; i < 6; i++) 81 !ether_addr_equal_masked(&stpc->sender[2],
84 verdict |= (stpc->sender[2+i] ^ c->sender_addr[i]) & 82 c->sender_addr,
85 c->sender_addrmsk[i]; 83 c->sender_addrmsk)))
86 if (FWINV(verdict != 0, EBT_STP_SENDERADDR))
87 return false; 84 return false;
88 } 85 }
89 if (info->bitmask & EBT_STP_PORT) { 86 if (info->bitmask & EBT_STP_PORT) {
90 v16 = NR16(stpc->port); 87 v16 = NR16(stpc->port);
91 if (FWINV(v16 < c->portl || 88 if (NF_INVF(info, EBT_STP_PORT,
92 v16 > c->portu, EBT_STP_PORT)) 89 v16 < c->portl || v16 > c->portu))
93 return false; 90 return false;
94 } 91 }
95 if (info->bitmask & EBT_STP_MSGAGE) { 92 if (info->bitmask & EBT_STP_MSGAGE) {
96 v16 = NR16(stpc->msg_age); 93 v16 = NR16(stpc->msg_age);
97 if (FWINV(v16 < c->msg_agel || 94 if (NF_INVF(info, EBT_STP_MSGAGE,
98 v16 > c->msg_ageu, EBT_STP_MSGAGE)) 95 v16 < c->msg_agel || v16 > c->msg_ageu))
99 return false; 96 return false;
100 } 97 }
101 if (info->bitmask & EBT_STP_MAXAGE) { 98 if (info->bitmask & EBT_STP_MAXAGE) {
102 v16 = NR16(stpc->max_age); 99 v16 = NR16(stpc->max_age);
103 if (FWINV(v16 < c->max_agel || 100 if (NF_INVF(info, EBT_STP_MAXAGE,
104 v16 > c->max_ageu, EBT_STP_MAXAGE)) 101 v16 < c->max_agel || v16 > c->max_ageu))
105 return false; 102 return false;
106 } 103 }
107 if (info->bitmask & EBT_STP_HELLOTIME) { 104 if (info->bitmask & EBT_STP_HELLOTIME) {
108 v16 = NR16(stpc->hello_time); 105 v16 = NR16(stpc->hello_time);
109 if (FWINV(v16 < c->hello_timel || 106 if (NF_INVF(info, EBT_STP_HELLOTIME,
110 v16 > c->hello_timeu, EBT_STP_HELLOTIME)) 107 v16 < c->hello_timel || v16 > c->hello_timeu))
111 return false; 108 return false;
112 } 109 }
113 if (info->bitmask & EBT_STP_FWDD) { 110 if (info->bitmask & EBT_STP_FWDD) {
114 v16 = NR16(stpc->forward_delay); 111 v16 = NR16(stpc->forward_delay);
115 if (FWINV(v16 < c->forward_delayl || 112 if (NF_INVF(info, EBT_STP_FWDD,
116 v16 > c->forward_delayu, EBT_STP_FWDD)) 113 v16 < c->forward_delayl || v16 > c->forward_delayu))
117 return false; 114 return false;
118 } 115 }
119 return true; 116 return true;
@@ -125,7 +122,7 @@ ebt_stp_mt(const struct sk_buff *skb, struct xt_action_param *par)
125 const struct ebt_stp_info *info = par->matchinfo; 122 const struct ebt_stp_info *info = par->matchinfo;
126 const struct stp_header *sp; 123 const struct stp_header *sp;
127 struct stp_header _stph; 124 struct stp_header _stph;
128 const uint8_t header[6] = {0x42, 0x42, 0x03, 0x00, 0x00, 0x00}; 125 const u8 header[6] = {0x42, 0x42, 0x03, 0x00, 0x00, 0x00};
129 126
130 sp = skb_header_pointer(skb, 0, sizeof(_stph), &_stph); 127 sp = skb_header_pointer(skb, 0, sizeof(_stph), &_stph);
131 if (sp == NULL) 128 if (sp == NULL)
@@ -135,8 +132,8 @@ ebt_stp_mt(const struct sk_buff *skb, struct xt_action_param *par)
135 if (memcmp(sp, header, sizeof(header))) 132 if (memcmp(sp, header, sizeof(header)))
136 return false; 133 return false;
137 134
138 if (info->bitmask & EBT_STP_TYPE && 135 if ((info->bitmask & EBT_STP_TYPE) &&
139 FWINV(info->type != sp->type, EBT_STP_TYPE)) 136 NF_INVF(info, EBT_STP_TYPE, info->type != sp->type))
140 return false; 137 return false;
141 138
142 if (sp->type == BPDU_TYPE_CONFIG && 139 if (sp->type == BPDU_TYPE_CONFIG &&
@@ -156,8 +153,8 @@ ebt_stp_mt(const struct sk_buff *skb, struct xt_action_param *par)
156static int ebt_stp_mt_check(const struct xt_mtchk_param *par) 153static int ebt_stp_mt_check(const struct xt_mtchk_param *par)
157{ 154{
158 const struct ebt_stp_info *info = par->matchinfo; 155 const struct ebt_stp_info *info = par->matchinfo;
159 const uint8_t bridge_ula[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00}; 156 const u8 bridge_ula[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
160 const uint8_t msk[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 157 const u8 msk[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
161 const struct ebt_entry *e = par->entryinfo; 158 const struct ebt_entry *e = par->entryinfo;
162 159
163 if (info->bitmask & ~EBT_STP_MASK || info->invflags & ~EBT_STP_MASK || 160 if (info->bitmask & ~EBT_STP_MASK || info->invflags & ~EBT_STP_MASK ||
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 5a61f35412a0..cceac5bb658f 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -121,7 +121,6 @@ ebt_dev_check(const char *entry, const struct net_device *device)
121 return devname[i] != entry[i] && entry[i] != 1; 121 return devname[i] != entry[i] && entry[i] != 1;
122} 122}
123 123
124#define FWINV2(bool, invflg) ((bool) ^ !!(e->invflags & invflg))
125/* process standard matches */ 124/* process standard matches */
126static inline int 125static inline int
127ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb, 126ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
@@ -130,7 +129,6 @@ ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
130 const struct ethhdr *h = eth_hdr(skb); 129 const struct ethhdr *h = eth_hdr(skb);
131 const struct net_bridge_port *p; 130 const struct net_bridge_port *p;
132 __be16 ethproto; 131 __be16 ethproto;
133 int verdict, i;
134 132
135 if (skb_vlan_tag_present(skb)) 133 if (skb_vlan_tag_present(skb))
136 ethproto = htons(ETH_P_8021Q); 134 ethproto = htons(ETH_P_8021Q);
@@ -138,38 +136,36 @@ ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
138 ethproto = h->h_proto; 136 ethproto = h->h_proto;
139 137
140 if (e->bitmask & EBT_802_3) { 138 if (e->bitmask & EBT_802_3) {
141 if (FWINV2(eth_proto_is_802_3(ethproto), EBT_IPROTO)) 139 if (NF_INVF(e, EBT_IPROTO, eth_proto_is_802_3(ethproto)))
142 return 1; 140 return 1;
143 } else if (!(e->bitmask & EBT_NOPROTO) && 141 } else if (!(e->bitmask & EBT_NOPROTO) &&
144 FWINV2(e->ethproto != ethproto, EBT_IPROTO)) 142 NF_INVF(e, EBT_IPROTO, e->ethproto != ethproto))
145 return 1; 143 return 1;
146 144
147 if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN)) 145 if (NF_INVF(e, EBT_IIN, ebt_dev_check(e->in, in)))
148 return 1; 146 return 1;
149 if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT)) 147 if (NF_INVF(e, EBT_IOUT, ebt_dev_check(e->out, out)))
150 return 1; 148 return 1;
151 /* rcu_read_lock()ed by nf_hook_slow */ 149 /* rcu_read_lock()ed by nf_hook_slow */
152 if (in && (p = br_port_get_rcu(in)) != NULL && 150 if (in && (p = br_port_get_rcu(in)) != NULL &&
153 FWINV2(ebt_dev_check(e->logical_in, p->br->dev), EBT_ILOGICALIN)) 151 NF_INVF(e, EBT_ILOGICALIN,
152 ebt_dev_check(e->logical_in, p->br->dev)))
154 return 1; 153 return 1;
155 if (out && (p = br_port_get_rcu(out)) != NULL && 154 if (out && (p = br_port_get_rcu(out)) != NULL &&
156 FWINV2(ebt_dev_check(e->logical_out, p->br->dev), EBT_ILOGICALOUT)) 155 NF_INVF(e, EBT_ILOGICALOUT,
156 ebt_dev_check(e->logical_out, p->br->dev)))
157 return 1; 157 return 1;
158 158
159 if (e->bitmask & EBT_SOURCEMAC) { 159 if (e->bitmask & EBT_SOURCEMAC) {
160 verdict = 0; 160 if (NF_INVF(e, EBT_ISOURCE,
161 for (i = 0; i < 6; i++) 161 !ether_addr_equal_masked(h->h_source, e->sourcemac,
162 verdict |= (h->h_source[i] ^ e->sourcemac[i]) & 162 e->sourcemsk)))
163 e->sourcemsk[i];
164 if (FWINV2(verdict != 0, EBT_ISOURCE))
165 return 1; 163 return 1;
166 } 164 }
167 if (e->bitmask & EBT_DESTMAC) { 165 if (e->bitmask & EBT_DESTMAC) {
168 verdict = 0; 166 if (NF_INVF(e, EBT_IDEST,
169 for (i = 0; i < 6; i++) 167 !ether_addr_equal_masked(h->h_dest, e->destmac,
170 verdict |= (h->h_dest[i] ^ e->destmac[i]) & 168 e->destmsk)))
171 e->destmsk[i];
172 if (FWINV2(verdict != 0, EBT_IDEST))
173 return 1; 169 return 1;
174 } 170 }
175 return 0; 171 return 0;
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 67a4a36febd1..3408ed51b611 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -13,7 +13,6 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/if_ether.h> 15#include <linux/if_ether.h>
16#include <linux/moduleparam.h>
17#include <linux/ip.h> 16#include <linux/ip.h>
18#include <linux/sched.h> 17#include <linux/sched.h>
19#include <linux/sockios.h> 18#include <linux/sockios.h>
diff --git a/net/core/dev.c b/net/core/dev.c
index aba10d2a8bc3..b92d63bfde7a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5445,6 +5445,52 @@ void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5445EXPORT_SYMBOL(netdev_lower_get_next); 5445EXPORT_SYMBOL(netdev_lower_get_next);
5446 5446
5447/** 5447/**
5448 * netdev_all_lower_get_next - Get the next device from all lower neighbour list
5449 * @dev: device
5450 * @iter: list_head ** of the current position
5451 *
5452 * Gets the next netdev_adjacent from the dev's all lower neighbour
5453 * list, starting from iter position. The caller must hold RTNL lock or
5454 * its own locking that guarantees that the neighbour all lower
5455 * list will remain unchanged.
5456 */
5457struct net_device *netdev_all_lower_get_next(struct net_device *dev, struct list_head **iter)
5458{
5459 struct netdev_adjacent *lower;
5460
5461 lower = list_entry(*iter, struct netdev_adjacent, list);
5462
5463 if (&lower->list == &dev->all_adj_list.lower)
5464 return NULL;
5465
5466 *iter = lower->list.next;
5467
5468 return lower->dev;
5469}
5470EXPORT_SYMBOL(netdev_all_lower_get_next);
5471
5472/**
5473 * netdev_all_lower_get_next_rcu - Get the next device from all
5474 * lower neighbour list, RCU variant
5475 * @dev: device
5476 * @iter: list_head ** of the current position
5477 *
5478 * Gets the next netdev_adjacent from the dev's all lower neighbour
5479 * list, starting from iter position. The caller must hold RCU read lock.
5480 */
5481struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
5482 struct list_head **iter)
5483{
5484 struct netdev_adjacent *lower;
5485
5486 lower = list_first_or_null_rcu(&dev->all_adj_list.lower,
5487 struct netdev_adjacent, list);
5488
5489 return lower ? lower->dev : NULL;
5490}
5491EXPORT_SYMBOL(netdev_all_lower_get_next_rcu);
5492
5493/**
5448 * netdev_lower_get_first_private_rcu - Get the first ->private from the 5494 * netdev_lower_get_first_private_rcu - Get the first ->private from the
5449 * lower neighbour list, RCU 5495 * lower neighbour list, RCU
5450 * variant 5496 * variant
@@ -6041,6 +6087,50 @@ void netdev_lower_state_changed(struct net_device *lower_dev,
6041} 6087}
6042EXPORT_SYMBOL(netdev_lower_state_changed); 6088EXPORT_SYMBOL(netdev_lower_state_changed);
6043 6089
6090int netdev_default_l2upper_neigh_construct(struct net_device *dev,
6091 struct neighbour *n)
6092{
6093 struct net_device *lower_dev, *stop_dev;
6094 struct list_head *iter;
6095 int err;
6096
6097 netdev_for_each_lower_dev(dev, lower_dev, iter) {
6098 if (!lower_dev->netdev_ops->ndo_neigh_construct)
6099 continue;
6100 err = lower_dev->netdev_ops->ndo_neigh_construct(lower_dev, n);
6101 if (err) {
6102 stop_dev = lower_dev;
6103 goto rollback;
6104 }
6105 }
6106 return 0;
6107
6108rollback:
6109 netdev_for_each_lower_dev(dev, lower_dev, iter) {
6110 if (lower_dev == stop_dev)
6111 break;
6112 if (!lower_dev->netdev_ops->ndo_neigh_destroy)
6113 continue;
6114 lower_dev->netdev_ops->ndo_neigh_destroy(lower_dev, n);
6115 }
6116 return err;
6117}
6118EXPORT_SYMBOL_GPL(netdev_default_l2upper_neigh_construct);
6119
6120void netdev_default_l2upper_neigh_destroy(struct net_device *dev,
6121 struct neighbour *n)
6122{
6123 struct net_device *lower_dev;
6124 struct list_head *iter;
6125
6126 netdev_for_each_lower_dev(dev, lower_dev, iter) {
6127 if (!lower_dev->netdev_ops->ndo_neigh_destroy)
6128 continue;
6129 lower_dev->netdev_ops->ndo_neigh_destroy(lower_dev, n);
6130 }
6131}
6132EXPORT_SYMBOL_GPL(netdev_default_l2upper_neigh_destroy);
6133
6044static void dev_change_rx_flags(struct net_device *dev, int flags) 6134static void dev_change_rx_flags(struct net_device *dev, int flags)
6045{ 6135{
6046 const struct net_device_ops *ops = dev->netdev_ops; 6136 const struct net_device_ops *ops = dev->netdev_ops;
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 933e8d4d3968..b2e592a198c0 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -1394,6 +1394,78 @@ static int devlink_nl_cmd_sb_occ_max_clear_doit(struct sk_buff *skb,
1394 return -EOPNOTSUPP; 1394 return -EOPNOTSUPP;
1395} 1395}
1396 1396
1397static int devlink_eswitch_fill(struct sk_buff *msg, struct devlink *devlink,
1398 enum devlink_command cmd, u32 portid,
1399 u32 seq, int flags, u16 mode)
1400{
1401 void *hdr;
1402
1403 hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
1404 if (!hdr)
1405 return -EMSGSIZE;
1406
1407 if (devlink_nl_put_handle(msg, devlink))
1408 goto nla_put_failure;
1409
1410 if (nla_put_u16(msg, DEVLINK_ATTR_ESWITCH_MODE, mode))
1411 goto nla_put_failure;
1412
1413 genlmsg_end(msg, hdr);
1414 return 0;
1415
1416nla_put_failure:
1417 genlmsg_cancel(msg, hdr);
1418 return -EMSGSIZE;
1419}
1420
1421static int devlink_nl_cmd_eswitch_mode_get_doit(struct sk_buff *skb,
1422 struct genl_info *info)
1423{
1424 struct devlink *devlink = info->user_ptr[0];
1425 const struct devlink_ops *ops = devlink->ops;
1426 struct sk_buff *msg;
1427 u16 mode;
1428 int err;
1429
1430 if (!ops || !ops->eswitch_mode_get)
1431 return -EOPNOTSUPP;
1432
1433 err = ops->eswitch_mode_get(devlink, &mode);
1434 if (err)
1435 return err;
1436
1437 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1438 if (!msg)
1439 return -ENOMEM;
1440
1441 err = devlink_eswitch_fill(msg, devlink, DEVLINK_CMD_ESWITCH_MODE_GET,
1442 info->snd_portid, info->snd_seq, 0, mode);
1443
1444 if (err) {
1445 nlmsg_free(msg);
1446 return err;
1447 }
1448
1449 return genlmsg_reply(msg, info);
1450}
1451
1452static int devlink_nl_cmd_eswitch_mode_set_doit(struct sk_buff *skb,
1453 struct genl_info *info)
1454{
1455 struct devlink *devlink = info->user_ptr[0];
1456 const struct devlink_ops *ops = devlink->ops;
1457 u16 mode;
1458
1459 if (!info->attrs[DEVLINK_ATTR_ESWITCH_MODE])
1460 return -EINVAL;
1461
1462 mode = nla_get_u16(info->attrs[DEVLINK_ATTR_ESWITCH_MODE]);
1463
1464 if (ops && ops->eswitch_mode_set)
1465 return ops->eswitch_mode_set(devlink, mode);
1466 return -EOPNOTSUPP;
1467}
1468
1397static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { 1469static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
1398 [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING }, 1470 [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING },
1399 [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING }, 1471 [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING },
@@ -1407,6 +1479,7 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
1407 [DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE] = { .type = NLA_U8 }, 1479 [DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE] = { .type = NLA_U8 },
1408 [DEVLINK_ATTR_SB_THRESHOLD] = { .type = NLA_U32 }, 1480 [DEVLINK_ATTR_SB_THRESHOLD] = { .type = NLA_U32 },
1409 [DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16 }, 1481 [DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16 },
1482 [DEVLINK_ATTR_ESWITCH_MODE] = { .type = NLA_U16 },
1410}; 1483};
1411 1484
1412static const struct genl_ops devlink_nl_ops[] = { 1485static const struct genl_ops devlink_nl_ops[] = {
@@ -1525,6 +1598,20 @@ static const struct genl_ops devlink_nl_ops[] = {
1525 DEVLINK_NL_FLAG_NEED_SB | 1598 DEVLINK_NL_FLAG_NEED_SB |
1526 DEVLINK_NL_FLAG_LOCK_PORTS, 1599 DEVLINK_NL_FLAG_LOCK_PORTS,
1527 }, 1600 },
1601 {
1602 .cmd = DEVLINK_CMD_ESWITCH_MODE_GET,
1603 .doit = devlink_nl_cmd_eswitch_mode_get_doit,
1604 .policy = devlink_nl_policy,
1605 .flags = GENL_ADMIN_PERM,
1606 .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
1607 },
1608 {
1609 .cmd = DEVLINK_CMD_ESWITCH_MODE_SET,
1610 .doit = devlink_nl_cmd_eswitch_mode_set_doit,
1611 .policy = devlink_nl_policy,
1612 .flags = GENL_ADMIN_PERM,
1613 .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
1614 },
1528}; 1615};
1529 1616
1530/** 1617/**
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 98298b11f534..be4629c344a6 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -269,6 +269,49 @@ errout:
269 return err; 269 return err;
270} 270}
271 271
272static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh,
273 struct nlattr **tb, struct fib_rule *rule)
274{
275 struct fib_rule *r;
276
277 list_for_each_entry(r, &ops->rules_list, list) {
278 if (r->action != rule->action)
279 continue;
280
281 if (r->table != rule->table)
282 continue;
283
284 if (r->pref != rule->pref)
285 continue;
286
287 if (memcmp(r->iifname, rule->iifname, IFNAMSIZ))
288 continue;
289
290 if (memcmp(r->oifname, rule->oifname, IFNAMSIZ))
291 continue;
292
293 if (r->mark != rule->mark)
294 continue;
295
296 if (r->mark_mask != rule->mark_mask)
297 continue;
298
299 if (r->tun_id != rule->tun_id)
300 continue;
301
302 if (r->fr_net != rule->fr_net)
303 continue;
304
305 if (r->l3mdev != rule->l3mdev)
306 continue;
307
308 if (!ops->compare(r, frh, tb))
309 continue;
310 return 1;
311 }
312 return 0;
313}
314
272int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh) 315int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh)
273{ 316{
274 struct net *net = sock_net(skb->sk); 317 struct net *net = sock_net(skb->sk);
@@ -386,6 +429,12 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh)
386 if (rule->l3mdev && rule->table) 429 if (rule->l3mdev && rule->table)
387 goto errout_free; 430 goto errout_free;
388 431
432 if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
433 rule_exists(ops, frh, tb, rule)) {
434 err = -EEXIST;
435 goto errout_free;
436 }
437
389 err = ops->configure(rule, skb, frh, tb); 438 err = ops->configure(rule, skb, frh, tb);
390 if (err < 0) 439 if (err < 0)
391 goto errout_free; 440 goto errout_free;
diff --git a/net/core/filter.c b/net/core/filter.c
index df6860c85d72..10c4a2f9e8bb 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -150,6 +150,12 @@ static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
150 return raw_smp_processor_id(); 150 return raw_smp_processor_id();
151} 151}
152 152
153static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
154 .func = __get_raw_cpu_id,
155 .gpl_only = false,
156 .ret_type = RET_INTEGER,
157};
158
153static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, 159static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
154 struct bpf_insn *insn_buf) 160 struct bpf_insn *insn_buf)
155{ 161{
@@ -1295,21 +1301,10 @@ int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1295 1301
1296static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk) 1302static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk)
1297{ 1303{
1298 struct bpf_prog *prog;
1299
1300 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 1304 if (sock_flag(sk, SOCK_FILTER_LOCKED))
1301 return ERR_PTR(-EPERM); 1305 return ERR_PTR(-EPERM);
1302 1306
1303 prog = bpf_prog_get(ufd); 1307 return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
1304 if (IS_ERR(prog))
1305 return prog;
1306
1307 if (prog->type != BPF_PROG_TYPE_SOCKET_FILTER) {
1308 bpf_prog_put(prog);
1309 return ERR_PTR(-EINVAL);
1310 }
1311
1312 return prog;
1313} 1308}
1314 1309
1315int sk_attach_bpf(u32 ufd, struct sock *sk) 1310int sk_attach_bpf(u32 ufd, struct sock *sk)
@@ -1734,6 +1729,23 @@ static const struct bpf_func_proto bpf_get_route_realm_proto = {
1734 .arg1_type = ARG_PTR_TO_CTX, 1729 .arg1_type = ARG_PTR_TO_CTX,
1735}; 1730};
1736 1731
1732static u64 bpf_get_hash_recalc(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1733{
1734 /* If skb_clear_hash() was called due to mangling, we can
1735 * trigger SW recalculation here. Later access to hash
1736 * can then use the inline skb->hash via context directly
1737 * instead of calling this helper again.
1738 */
1739 return skb_get_hash((struct sk_buff *) (unsigned long) r1);
1740}
1741
1742static const struct bpf_func_proto bpf_get_hash_recalc_proto = {
1743 .func = bpf_get_hash_recalc,
1744 .gpl_only = false,
1745 .ret_type = RET_INTEGER,
1746 .arg1_type = ARG_PTR_TO_CTX,
1747};
1748
1737static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5) 1749static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
1738{ 1750{
1739 struct sk_buff *skb = (struct sk_buff *) (long) r1; 1751 struct sk_buff *skb = (struct sk_buff *) (long) r1;
@@ -1777,6 +1789,224 @@ const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
1777}; 1789};
1778EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto); 1790EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto);
1779 1791
1792static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
1793{
1794 /* Caller already did skb_cow() with len as headroom,
1795 * so no need to do it here.
1796 */
1797 skb_push(skb, len);
1798 memmove(skb->data, skb->data + len, off);
1799 memset(skb->data + off, 0, len);
1800
1801 /* No skb_postpush_rcsum(skb, skb->data + off, len)
1802 * needed here as it does not change the skb->csum
1803 * result for checksum complete when summing over
1804 * zeroed blocks.
1805 */
1806 return 0;
1807}
1808
1809static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
1810{
1811 /* skb_ensure_writable() is not needed here, as we're
1812 * already working on an uncloned skb.
1813 */
1814 if (unlikely(!pskb_may_pull(skb, off + len)))
1815 return -ENOMEM;
1816
1817 skb_postpull_rcsum(skb, skb->data + off, len);
1818 memmove(skb->data + len, skb->data, off);
1819 __skb_pull(skb, len);
1820
1821 return 0;
1822}
1823
1824static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len)
1825{
1826 bool trans_same = skb->transport_header == skb->network_header;
1827 int ret;
1828
1829 /* There's no need for __skb_push()/__skb_pull() pair to
1830 * get to the start of the mac header as we're guaranteed
1831 * to always start from here under eBPF.
1832 */
1833 ret = bpf_skb_generic_push(skb, off, len);
1834 if (likely(!ret)) {
1835 skb->mac_header -= len;
1836 skb->network_header -= len;
1837 if (trans_same)
1838 skb->transport_header = skb->network_header;
1839 }
1840
1841 return ret;
1842}
1843
1844static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len)
1845{
1846 bool trans_same = skb->transport_header == skb->network_header;
1847 int ret;
1848
1849 /* Same here, __skb_push()/__skb_pull() pair not needed. */
1850 ret = bpf_skb_generic_pop(skb, off, len);
1851 if (likely(!ret)) {
1852 skb->mac_header += len;
1853 skb->network_header += len;
1854 if (trans_same)
1855 skb->transport_header = skb->network_header;
1856 }
1857
1858 return ret;
1859}
1860
1861static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
1862{
1863 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
1864 u32 off = skb->network_header - skb->mac_header;
1865 int ret;
1866
1867 ret = skb_cow(skb, len_diff);
1868 if (unlikely(ret < 0))
1869 return ret;
1870
1871 ret = bpf_skb_net_hdr_push(skb, off, len_diff);
1872 if (unlikely(ret < 0))
1873 return ret;
1874
1875 if (skb_is_gso(skb)) {
1876 /* SKB_GSO_UDP stays as is. SKB_GSO_TCPV4 needs to
1877 * be changed into SKB_GSO_TCPV6.
1878 */
1879 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
1880 skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV4;
1881 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
1882 }
1883
1884 /* Due to IPv6 header, MSS needs to be downgraded. */
1885 skb_shinfo(skb)->gso_size -= len_diff;
1886 /* Header must be checked, and gso_segs recomputed. */
1887 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1888 skb_shinfo(skb)->gso_segs = 0;
1889 }
1890
1891 skb->protocol = htons(ETH_P_IPV6);
1892 skb_clear_hash(skb);
1893
1894 return 0;
1895}
1896
1897static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
1898{
1899 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
1900 u32 off = skb->network_header - skb->mac_header;
1901 int ret;
1902
1903 ret = skb_unclone(skb, GFP_ATOMIC);
1904 if (unlikely(ret < 0))
1905 return ret;
1906
1907 ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
1908 if (unlikely(ret < 0))
1909 return ret;
1910
1911 if (skb_is_gso(skb)) {
1912 /* SKB_GSO_UDP stays as is. SKB_GSO_TCPV6 needs to
1913 * be changed into SKB_GSO_TCPV4.
1914 */
1915 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
1916 skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV6;
1917 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
1918 }
1919
1920 /* Due to IPv4 header, MSS can be upgraded. */
1921 skb_shinfo(skb)->gso_size += len_diff;
1922 /* Header must be checked, and gso_segs recomputed. */
1923 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1924 skb_shinfo(skb)->gso_segs = 0;
1925 }
1926
1927 skb->protocol = htons(ETH_P_IP);
1928 skb_clear_hash(skb);
1929
1930 return 0;
1931}
1932
1933static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
1934{
1935 __be16 from_proto = skb->protocol;
1936
1937 if (from_proto == htons(ETH_P_IP) &&
1938 to_proto == htons(ETH_P_IPV6))
1939 return bpf_skb_proto_4_to_6(skb);
1940
1941 if (from_proto == htons(ETH_P_IPV6) &&
1942 to_proto == htons(ETH_P_IP))
1943 return bpf_skb_proto_6_to_4(skb);
1944
1945 return -ENOTSUPP;
1946}
1947
1948static u64 bpf_skb_change_proto(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
1949{
1950 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1951 __be16 proto = (__force __be16) r2;
1952 int ret;
1953
1954 if (unlikely(flags))
1955 return -EINVAL;
1956
1957 /* General idea is that this helper does the basic groundwork
1958 * needed for changing the protocol, and eBPF program fills the
1959 * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace()
1960 * and other helpers, rather than passing a raw buffer here.
1961 *
1962 * The rationale is to keep this minimal and without a need to
1963 * deal with raw packet data. F.e. even if we would pass buffers
1964 * here, the program still needs to call the bpf_lX_csum_replace()
1965 * helpers anyway. Plus, this way we keep also separation of
1966 * concerns, since f.e. bpf_skb_store_bytes() should only take
1967 * care of stores.
1968 *
1969 * Currently, additional options and extension header space are
1970 * not supported, but flags register is reserved so we can adapt
1971 * that. For offloads, we mark packet as dodgy, so that headers
1972 * need to be verified first.
1973 */
1974 ret = bpf_skb_proto_xlat(skb, proto);
1975 bpf_compute_data_end(skb);
1976 return ret;
1977}
1978
1979static const struct bpf_func_proto bpf_skb_change_proto_proto = {
1980 .func = bpf_skb_change_proto,
1981 .gpl_only = false,
1982 .ret_type = RET_INTEGER,
1983 .arg1_type = ARG_PTR_TO_CTX,
1984 .arg2_type = ARG_ANYTHING,
1985 .arg3_type = ARG_ANYTHING,
1986};
1987
1988static u64 bpf_skb_change_type(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1989{
1990 struct sk_buff *skb = (struct sk_buff *) (long) r1;
1991 u32 pkt_type = r2;
1992
1993 /* We only allow a restricted subset to be changed for now. */
1994 if (unlikely(skb->pkt_type > PACKET_OTHERHOST ||
1995 pkt_type > PACKET_OTHERHOST))
1996 return -EINVAL;
1997
1998 skb->pkt_type = pkt_type;
1999 return 0;
2000}
2001
2002static const struct bpf_func_proto bpf_skb_change_type_proto = {
2003 .func = bpf_skb_change_type,
2004 .gpl_only = false,
2005 .ret_type = RET_INTEGER,
2006 .arg1_type = ARG_PTR_TO_CTX,
2007 .arg2_type = ARG_ANYTHING,
2008};
2009
1780bool bpf_helper_changes_skb_data(void *func) 2010bool bpf_helper_changes_skb_data(void *func)
1781{ 2011{
1782 if (func == bpf_skb_vlan_push) 2012 if (func == bpf_skb_vlan_push)
@@ -1785,6 +2015,8 @@ bool bpf_helper_changes_skb_data(void *func)
1785 return true; 2015 return true;
1786 if (func == bpf_skb_store_bytes) 2016 if (func == bpf_skb_store_bytes)
1787 return true; 2017 return true;
2018 if (func == bpf_skb_change_proto)
2019 return true;
1788 if (func == bpf_l3_csum_replace) 2020 if (func == bpf_l3_csum_replace)
1789 return true; 2021 return true;
1790 if (func == bpf_l4_csum_replace) 2022 if (func == bpf_l4_csum_replace)
@@ -2024,6 +2256,40 @@ bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
2024 } 2256 }
2025} 2257}
2026 2258
2259#ifdef CONFIG_SOCK_CGROUP_DATA
2260static u64 bpf_skb_in_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
2261{
2262 struct sk_buff *skb = (struct sk_buff *)(long)r1;
2263 struct bpf_map *map = (struct bpf_map *)(long)r2;
2264 struct bpf_array *array = container_of(map, struct bpf_array, map);
2265 struct cgroup *cgrp;
2266 struct sock *sk;
2267 u32 i = (u32)r3;
2268
2269 sk = skb->sk;
2270 if (!sk || !sk_fullsock(sk))
2271 return -ENOENT;
2272
2273 if (unlikely(i >= array->map.max_entries))
2274 return -E2BIG;
2275
2276 cgrp = READ_ONCE(array->ptrs[i]);
2277 if (unlikely(!cgrp))
2278 return -EAGAIN;
2279
2280 return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), cgrp);
2281}
2282
2283static const struct bpf_func_proto bpf_skb_in_cgroup_proto = {
2284 .func = bpf_skb_in_cgroup,
2285 .gpl_only = false,
2286 .ret_type = RET_INTEGER,
2287 .arg1_type = ARG_PTR_TO_CTX,
2288 .arg2_type = ARG_CONST_MAP_PTR,
2289 .arg3_type = ARG_ANYTHING,
2290};
2291#endif
2292
2027static const struct bpf_func_proto * 2293static const struct bpf_func_proto *
2028sk_filter_func_proto(enum bpf_func_id func_id) 2294sk_filter_func_proto(enum bpf_func_id func_id)
2029{ 2295{
@@ -2037,7 +2303,7 @@ sk_filter_func_proto(enum bpf_func_id func_id)
2037 case BPF_FUNC_get_prandom_u32: 2303 case BPF_FUNC_get_prandom_u32:
2038 return &bpf_get_prandom_u32_proto; 2304 return &bpf_get_prandom_u32_proto;
2039 case BPF_FUNC_get_smp_processor_id: 2305 case BPF_FUNC_get_smp_processor_id:
2040 return &bpf_get_smp_processor_id_proto; 2306 return &bpf_get_raw_smp_processor_id_proto;
2041 case BPF_FUNC_tail_call: 2307 case BPF_FUNC_tail_call:
2042 return &bpf_tail_call_proto; 2308 return &bpf_tail_call_proto;
2043 case BPF_FUNC_ktime_get_ns: 2309 case BPF_FUNC_ktime_get_ns:
@@ -2072,6 +2338,10 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
2072 return &bpf_skb_vlan_push_proto; 2338 return &bpf_skb_vlan_push_proto;
2073 case BPF_FUNC_skb_vlan_pop: 2339 case BPF_FUNC_skb_vlan_pop:
2074 return &bpf_skb_vlan_pop_proto; 2340 return &bpf_skb_vlan_pop_proto;
2341 case BPF_FUNC_skb_change_proto:
2342 return &bpf_skb_change_proto_proto;
2343 case BPF_FUNC_skb_change_type:
2344 return &bpf_skb_change_type_proto;
2075 case BPF_FUNC_skb_get_tunnel_key: 2345 case BPF_FUNC_skb_get_tunnel_key:
2076 return &bpf_skb_get_tunnel_key_proto; 2346 return &bpf_skb_get_tunnel_key_proto;
2077 case BPF_FUNC_skb_set_tunnel_key: 2347 case BPF_FUNC_skb_set_tunnel_key:
@@ -2084,8 +2354,16 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
2084 return &bpf_redirect_proto; 2354 return &bpf_redirect_proto;
2085 case BPF_FUNC_get_route_realm: 2355 case BPF_FUNC_get_route_realm:
2086 return &bpf_get_route_realm_proto; 2356 return &bpf_get_route_realm_proto;
2357 case BPF_FUNC_get_hash_recalc:
2358 return &bpf_get_hash_recalc_proto;
2087 case BPF_FUNC_perf_event_output: 2359 case BPF_FUNC_perf_event_output:
2088 return bpf_get_event_output_proto(); 2360 return bpf_get_event_output_proto();
2361 case BPF_FUNC_get_smp_processor_id:
2362 return &bpf_get_smp_processor_id_proto;
2363#ifdef CONFIG_SOCK_CGROUP_DATA
2364 case BPF_FUNC_skb_in_cgroup:
2365 return &bpf_skb_in_cgroup_proto;
2366#endif
2089 default: 2367 default:
2090 return sk_filter_func_proto(func_id); 2368 return sk_filter_func_proto(func_id);
2091 } 2369 }
@@ -2105,7 +2383,8 @@ static bool __is_valid_access(int off, int size, enum bpf_access_type type)
2105} 2383}
2106 2384
2107static bool sk_filter_is_valid_access(int off, int size, 2385static bool sk_filter_is_valid_access(int off, int size,
2108 enum bpf_access_type type) 2386 enum bpf_access_type type,
2387 enum bpf_reg_type *reg_type)
2109{ 2388{
2110 switch (off) { 2389 switch (off) {
2111 case offsetof(struct __sk_buff, tc_classid): 2390 case offsetof(struct __sk_buff, tc_classid):
@@ -2128,7 +2407,8 @@ static bool sk_filter_is_valid_access(int off, int size,
2128} 2407}
2129 2408
2130static bool tc_cls_act_is_valid_access(int off, int size, 2409static bool tc_cls_act_is_valid_access(int off, int size,
2131 enum bpf_access_type type) 2410 enum bpf_access_type type,
2411 enum bpf_reg_type *reg_type)
2132{ 2412{
2133 if (type == BPF_WRITE) { 2413 if (type == BPF_WRITE) {
2134 switch (off) { 2414 switch (off) {
@@ -2143,6 +2423,16 @@ static bool tc_cls_act_is_valid_access(int off, int size,
2143 return false; 2423 return false;
2144 } 2424 }
2145 } 2425 }
2426
2427 switch (off) {
2428 case offsetof(struct __sk_buff, data):
2429 *reg_type = PTR_TO_PACKET;
2430 break;
2431 case offsetof(struct __sk_buff, data_end):
2432 *reg_type = PTR_TO_PACKET_END;
2433 break;
2434 }
2435
2146 return __is_valid_access(off, size, type); 2436 return __is_valid_access(off, size, type);
2147} 2437}
2148 2438
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index a669dea146c6..61ad43f61c5e 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -651,6 +651,23 @@ void make_flow_keys_digest(struct flow_keys_digest *digest,
651} 651}
652EXPORT_SYMBOL(make_flow_keys_digest); 652EXPORT_SYMBOL(make_flow_keys_digest);
653 653
654static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
655
656u32 __skb_get_hash_symmetric(struct sk_buff *skb)
657{
658 struct flow_keys keys;
659
660 __flow_hash_secret_init();
661
662 memset(&keys, 0, sizeof(keys));
663 __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
664 NULL, 0, 0, 0,
665 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
666
667 return __flow_hash_from_keys(&keys, hashrnd);
668}
669EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
670
654/** 671/**
655 * __skb_get_hash: calculate a flow hash 672 * __skb_get_hash: calculate a flow hash
656 * @skb: sk_buff to calculate flow hash from 673 * @skb: sk_buff to calculate flow hash from
@@ -868,6 +885,29 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = {
868 }, 885 },
869}; 886};
870 887
888static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
889 {
890 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
891 .offset = offsetof(struct flow_keys, control),
892 },
893 {
894 .key_id = FLOW_DISSECTOR_KEY_BASIC,
895 .offset = offsetof(struct flow_keys, basic),
896 },
897 {
898 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
899 .offset = offsetof(struct flow_keys, addrs.v4addrs),
900 },
901 {
902 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
903 .offset = offsetof(struct flow_keys, addrs.v6addrs),
904 },
905 {
906 .key_id = FLOW_DISSECTOR_KEY_PORTS,
907 .offset = offsetof(struct flow_keys, ports),
908 },
909};
910
871static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = { 911static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = {
872 { 912 {
873 .key_id = FLOW_DISSECTOR_KEY_CONTROL, 913 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
@@ -889,6 +929,9 @@ static int __init init_default_flow_dissectors(void)
889 skb_flow_dissector_init(&flow_keys_dissector, 929 skb_flow_dissector_init(&flow_keys_dissector,
890 flow_keys_dissector_keys, 930 flow_keys_dissector_keys,
891 ARRAY_SIZE(flow_keys_dissector_keys)); 931 ARRAY_SIZE(flow_keys_dissector_keys));
932 skb_flow_dissector_init(&flow_keys_dissector_symmetric,
933 flow_keys_dissector_symmetric_keys,
934 ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
892 skb_flow_dissector_init(&flow_keys_buf_dissector, 935 skb_flow_dissector_init(&flow_keys_buf_dissector,
893 flow_keys_buf_dissector_keys, 936 flow_keys_buf_dissector_keys,
894 ARRAY_SIZE(flow_keys_buf_dissector_keys)); 937 ARRAY_SIZE(flow_keys_buf_dissector_keys));
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 29dd8cc22bbf..5cdc62a8eb84 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -473,7 +473,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
473 } 473 }
474 474
475 if (dev->netdev_ops->ndo_neigh_construct) { 475 if (dev->netdev_ops->ndo_neigh_construct) {
476 error = dev->netdev_ops->ndo_neigh_construct(n); 476 error = dev->netdev_ops->ndo_neigh_construct(dev, n);
477 if (error < 0) { 477 if (error < 0) {
478 rc = ERR_PTR(error); 478 rc = ERR_PTR(error);
479 goto out_neigh_release; 479 goto out_neigh_release;
@@ -701,7 +701,7 @@ void neigh_destroy(struct neighbour *neigh)
701 neigh->arp_queue_len_bytes = 0; 701 neigh->arp_queue_len_bytes = 0;
702 702
703 if (dev->netdev_ops->ndo_neigh_destroy) 703 if (dev->netdev_ops->ndo_neigh_destroy)
704 dev->netdev_ops->ndo_neigh_destroy(neigh); 704 dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
705 705
706 dev_put(dev); 706 dev_put(dev);
707 neigh_parms_put(neigh->parms); 707 neigh_parms_put(neigh->parms);
@@ -2047,6 +2047,7 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
2047 case NDTPA_DELAY_PROBE_TIME: 2047 case NDTPA_DELAY_PROBE_TIME:
2048 NEIGH_VAR_SET(p, DELAY_PROBE_TIME, 2048 NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2049 nla_get_msecs(tbp[i])); 2049 nla_get_msecs(tbp[i]));
2050 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2050 break; 2051 break;
2051 case NDTPA_RETRANS_TIME: 2052 case NDTPA_RETRANS_TIME:
2052 NEIGH_VAR_SET(p, RETRANS_TIME, 2053 NEIGH_VAR_SET(p, RETRANS_TIME,
@@ -2469,13 +2470,17 @@ int neigh_xmit(int index, struct net_device *dev,
2469 tbl = neigh_tables[index]; 2470 tbl = neigh_tables[index];
2470 if (!tbl) 2471 if (!tbl)
2471 goto out; 2472 goto out;
2473 rcu_read_lock_bh();
2472 neigh = __neigh_lookup_noref(tbl, addr, dev); 2474 neigh = __neigh_lookup_noref(tbl, addr, dev);
2473 if (!neigh) 2475 if (!neigh)
2474 neigh = __neigh_create(tbl, addr, dev, false); 2476 neigh = __neigh_create(tbl, addr, dev, false);
2475 err = PTR_ERR(neigh); 2477 err = PTR_ERR(neigh);
2476 if (IS_ERR(neigh)) 2478 if (IS_ERR(neigh)) {
2479 rcu_read_unlock_bh();
2477 goto out_kfree_skb; 2480 goto out_kfree_skb;
2481 }
2478 err = neigh->output(neigh, skb); 2482 err = neigh->output(neigh, skb);
2483 rcu_read_unlock_bh();
2479 } 2484 }
2480 else if (index == NEIGH_LINK_TABLE) { 2485 else if (index == NEIGH_LINK_TABLE) {
2481 err = dev_hard_header(skb, dev, ntohs(skb->protocol), 2486 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
@@ -2926,6 +2931,7 @@ static void neigh_proc_update(struct ctl_table *ctl, int write)
2926 return; 2931 return;
2927 2932
2928 set_bit(index, p->data_state); 2933 set_bit(index, p->data_state);
2934 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2929 if (!dev) /* NULL dev means this is default value */ 2935 if (!dev) /* NULL dev means this is default value */
2930 neigh_copy_dflt_parms(net, p, index); 2936 neigh_copy_dflt_parms(net, p, index);
2931} 2937}
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 7a0b616557ab..6e4f34721080 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -322,7 +322,20 @@ NETDEVICE_SHOW_RW(flags, fmt_hex);
322 322
323static int change_tx_queue_len(struct net_device *dev, unsigned long new_len) 323static int change_tx_queue_len(struct net_device *dev, unsigned long new_len)
324{ 324{
325 dev->tx_queue_len = new_len; 325 int res, orig_len = dev->tx_queue_len;
326
327 if (new_len != orig_len) {
328 dev->tx_queue_len = new_len;
329 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
330 res = notifier_to_errno(res);
331 if (res) {
332 netdev_err(dev,
333 "refused to change device tx_queue_len\n");
334 dev->tx_queue_len = orig_len;
335 return -EFAULT;
336 }
337 }
338
326 return 0; 339 return 0;
327} 340}
328 341
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index f74ab9c3b38f..bbd118b19aef 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -213,6 +213,7 @@
213/* Xmit modes */ 213/* Xmit modes */
214#define M_START_XMIT 0 /* Default normal TX */ 214#define M_START_XMIT 0 /* Default normal TX */
215#define M_NETIF_RECEIVE 1 /* Inject packets into stack */ 215#define M_NETIF_RECEIVE 1 /* Inject packets into stack */
216#define M_QUEUE_XMIT 2 /* Inject packet into qdisc */
216 217
217/* If lock -- protects updating of if_list */ 218/* If lock -- protects updating of if_list */
218#define if_lock(t) spin_lock(&(t->if_lock)); 219#define if_lock(t) spin_lock(&(t->if_lock));
@@ -626,6 +627,8 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
626 627
627 if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) 628 if (pkt_dev->xmit_mode == M_NETIF_RECEIVE)
628 seq_puts(seq, " xmit_mode: netif_receive\n"); 629 seq_puts(seq, " xmit_mode: netif_receive\n");
630 else if (pkt_dev->xmit_mode == M_QUEUE_XMIT)
631 seq_puts(seq, " xmit_mode: xmit_queue\n");
629 632
630 seq_puts(seq, " Flags: "); 633 seq_puts(seq, " Flags: ");
631 634
@@ -1142,8 +1145,10 @@ static ssize_t pktgen_if_write(struct file *file,
1142 return len; 1145 return len;
1143 1146
1144 i += len; 1147 i += len;
1145 if ((value > 1) && (pkt_dev->xmit_mode == M_START_XMIT) && 1148 if ((value > 1) &&
1146 (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING))) 1149 ((pkt_dev->xmit_mode == M_QUEUE_XMIT) ||
1150 ((pkt_dev->xmit_mode == M_START_XMIT) &&
1151 (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))))
1147 return -ENOTSUPP; 1152 return -ENOTSUPP;
1148 pkt_dev->burst = value < 1 ? 1 : value; 1153 pkt_dev->burst = value < 1 ? 1 : value;
1149 sprintf(pg_result, "OK: burst=%d", pkt_dev->burst); 1154 sprintf(pg_result, "OK: burst=%d", pkt_dev->burst);
@@ -1198,6 +1203,9 @@ static ssize_t pktgen_if_write(struct file *file,
1198 * at module loading time 1203 * at module loading time
1199 */ 1204 */
1200 pkt_dev->clone_skb = 0; 1205 pkt_dev->clone_skb = 0;
1206 } else if (strcmp(f, "queue_xmit") == 0) {
1207 pkt_dev->xmit_mode = M_QUEUE_XMIT;
1208 pkt_dev->last_ok = 1;
1201 } else { 1209 } else {
1202 sprintf(pg_result, 1210 sprintf(pg_result,
1203 "xmit_mode -:%s:- unknown\nAvailable modes: %s", 1211 "xmit_mode -:%s:- unknown\nAvailable modes: %s",
@@ -3434,6 +3442,36 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3434#endif 3442#endif
3435 } while (--burst > 0); 3443 } while (--burst > 0);
3436 goto out; /* Skips xmit_mode M_START_XMIT */ 3444 goto out; /* Skips xmit_mode M_START_XMIT */
3445 } else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) {
3446 local_bh_disable();
3447 atomic_inc(&pkt_dev->skb->users);
3448
3449 ret = dev_queue_xmit(pkt_dev->skb);
3450 switch (ret) {
3451 case NET_XMIT_SUCCESS:
3452 pkt_dev->sofar++;
3453 pkt_dev->seq_num++;
3454 pkt_dev->tx_bytes += pkt_dev->last_pkt_size;
3455 break;
3456 case NET_XMIT_DROP:
3457 case NET_XMIT_CN:
3458 /* These are all valid return codes for a qdisc but
3459 * indicate packets are being dropped or will likely
3460 * be dropped soon.
3461 */
3462 case NETDEV_TX_BUSY:
3463 /* qdisc may call dev_hard_start_xmit directly in cases
3464 * where no queues exist e.g. loopback device, virtual
3465 * devices, etc. In this case we need to handle
3466 * NETDEV_TX_ codes.
3467 */
3468 default:
3469 pkt_dev->errors++;
3470 net_info_ratelimited("%s xmit error: %d\n",
3471 pkt_dev->odevname, ret);
3472 break;
3473 }
3474 goto out;
3437 } 3475 }
3438 3476
3439 txq = skb_get_tx_queue(odev, pkt_dev->skb); 3477 txq = skb_get_tx_queue(odev, pkt_dev->skb);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index eb49ca24274a..a9e3805af739 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1927,11 +1927,19 @@ static int do_setlink(const struct sk_buff *skb,
1927 1927
1928 if (tb[IFLA_TXQLEN]) { 1928 if (tb[IFLA_TXQLEN]) {
1929 unsigned long value = nla_get_u32(tb[IFLA_TXQLEN]); 1929 unsigned long value = nla_get_u32(tb[IFLA_TXQLEN]);
1930 1930 unsigned long orig_len = dev->tx_queue_len;
1931 if (dev->tx_queue_len ^ value) 1931
1932 if (dev->tx_queue_len ^ value) {
1933 dev->tx_queue_len = value;
1934 err = call_netdevice_notifiers(
1935 NETDEV_CHANGE_TX_QUEUE_LEN, dev);
1936 err = notifier_to_errno(err);
1937 if (err) {
1938 dev->tx_queue_len = orig_len;
1939 goto errout;
1940 }
1932 status |= DO_SETLINK_NOTIFY; 1941 status |= DO_SETLINK_NOTIFY;
1933 1942 }
1934 dev->tx_queue_len = value;
1935 } 1943 }
1936 1944
1937 if (tb[IFLA_OPERSTATE]) 1945 if (tb[IFLA_OPERSTATE])
@@ -3519,7 +3527,32 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
3519 if (!attr) 3527 if (!attr)
3520 goto nla_put_failure; 3528 goto nla_put_failure;
3521 3529
3522 err = ops->fill_linkxstats(skb, dev, prividx); 3530 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
3531 nla_nest_end(skb, attr);
3532 if (err)
3533 goto nla_put_failure;
3534 *idxattr = 0;
3535 }
3536 }
3537
3538 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
3539 *idxattr)) {
3540 const struct rtnl_link_ops *ops = NULL;
3541 const struct net_device *master;
3542
3543 master = netdev_master_upper_dev_get(dev);
3544 if (master)
3545 ops = master->rtnl_link_ops;
3546 if (ops && ops->fill_linkxstats) {
3547 int err;
3548
3549 *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
3550 attr = nla_nest_start(skb,
3551 IFLA_STATS_LINK_XSTATS_SLAVE);
3552 if (!attr)
3553 goto nla_put_failure;
3554
3555 err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
3523 nla_nest_end(skb, attr); 3556 nla_nest_end(skb, attr);
3524 if (err) 3557 if (err)
3525 goto nla_put_failure; 3558 goto nla_put_failure;
@@ -3555,14 +3588,35 @@ static size_t if_nlmsg_stats_size(const struct net_device *dev,
3555 3588
3556 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) { 3589 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
3557 const struct rtnl_link_ops *ops = dev->rtnl_link_ops; 3590 const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
3591 int attr = IFLA_STATS_LINK_XSTATS;
3558 3592
3559 if (ops && ops->get_linkxstats_size) { 3593 if (ops && ops->get_linkxstats_size) {
3560 size += nla_total_size(ops->get_linkxstats_size(dev)); 3594 size += nla_total_size(ops->get_linkxstats_size(dev,
3595 attr));
3561 /* for IFLA_STATS_LINK_XSTATS */ 3596 /* for IFLA_STATS_LINK_XSTATS */
3562 size += nla_total_size(0); 3597 size += nla_total_size(0);
3563 } 3598 }
3564 } 3599 }
3565 3600
3601 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
3602 struct net_device *_dev = (struct net_device *)dev;
3603 const struct rtnl_link_ops *ops = NULL;
3604 const struct net_device *master;
3605
3606 /* netdev_master_upper_dev_get can't take const */
3607 master = netdev_master_upper_dev_get(_dev);
3608 if (master)
3609 ops = master->rtnl_link_ops;
3610 if (ops && ops->get_linkxstats_size) {
3611 int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
3612
3613 size += nla_total_size(ops->get_linkxstats_size(dev,
3614 attr));
3615 /* for IFLA_STATS_LINK_XSTATS_SLAVE */
3616 size += nla_total_size(0);
3617 }
3618 }
3619
3566 return size; 3620 return size;
3567} 3621}
3568 3622
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index e7ec6d3ad5f0..3864b4b68fa1 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3017,24 +3017,6 @@ int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
3017EXPORT_SYMBOL_GPL(skb_append_pagefrags); 3017EXPORT_SYMBOL_GPL(skb_append_pagefrags);
3018 3018
3019/** 3019/**
3020 * skb_push_rcsum - push skb and update receive checksum
3021 * @skb: buffer to update
3022 * @len: length of data pulled
3023 *
3024 * This function performs an skb_push on the packet and updates
3025 * the CHECKSUM_COMPLETE checksum. It should be used on
3026 * receive path processing instead of skb_push unless you know
3027 * that the checksum difference is zero (e.g., a valid IP header)
3028 * or you are setting ip_summed to CHECKSUM_NONE.
3029 */
3030static unsigned char *skb_push_rcsum(struct sk_buff *skb, unsigned len)
3031{
3032 skb_push(skb, len);
3033 skb_postpush_rcsum(skb, skb->data, len);
3034 return skb->data;
3035}
3036
3037/**
3038 * skb_pull_rcsum - pull skb and update receive checksum 3020 * skb_pull_rcsum - pull skb and update receive checksum
3039 * @skb: buffer to update 3021 * @skb: buffer to update
3040 * @len: length of data pulled 3022 * @len: length of data pulled
diff --git a/net/core/utils.c b/net/core/utils.c
index 3d17ca8b4744..cf5622b9ccc4 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -133,7 +133,7 @@ int in4_pton(const char *src, int srclen,
133 s = src; 133 s = src;
134 d = dbuf; 134 d = dbuf;
135 i = 0; 135 i = 0;
136 while(1) { 136 while (1) {
137 int c; 137 int c;
138 c = xdigit2bin(srclen > 0 ? *s : '\0', delim); 138 c = xdigit2bin(srclen > 0 ? *s : '\0', delim);
139 if (!(c & (IN6PTON_DIGIT | IN6PTON_DOT | IN6PTON_DELIM | IN6PTON_COLON_MASK))) { 139 if (!(c & (IN6PTON_DIGIT | IN6PTON_DOT | IN6PTON_DELIM | IN6PTON_COLON_MASK))) {
@@ -283,11 +283,11 @@ cont:
283 i = 15; d--; 283 i = 15; d--;
284 284
285 if (dc) { 285 if (dc) {
286 while(d >= dc) 286 while (d >= dc)
287 dst[i--] = *d--; 287 dst[i--] = *d--;
288 while(i >= dc - dbuf) 288 while (i >= dc - dbuf)
289 dst[i--] = 0; 289 dst[i--] = 0;
290 while(i >= 0) 290 while (i >= 0)
291 dst[i--] = *d--; 291 dst[i--] = *d--;
292 } else 292 } else
293 memcpy(dst, dbuf, sizeof(dbuf)); 293 memcpy(dst, dbuf, sizeof(dbuf));
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index df4803437888..a796fc7cbc35 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -41,6 +41,7 @@
41#include <net/dn_fib.h> 41#include <net/dn_fib.h>
42#include <net/dn_neigh.h> 42#include <net/dn_neigh.h>
43#include <net/dn_dev.h> 43#include <net/dn_dev.h>
44#include <net/nexthop.h>
44 45
45#define RT_MIN_TABLE 1 46#define RT_MIN_TABLE 1
46 47
@@ -150,14 +151,13 @@ static int dn_fib_count_nhs(const struct nlattr *attr)
150 struct rtnexthop *nhp = nla_data(attr); 151 struct rtnexthop *nhp = nla_data(attr);
151 int nhs = 0, nhlen = nla_len(attr); 152 int nhs = 0, nhlen = nla_len(attr);
152 153
153 while(nhlen >= (int)sizeof(struct rtnexthop)) { 154 while (rtnh_ok(nhp, nhlen)) {
154 if ((nhlen -= nhp->rtnh_len) < 0)
155 return 0;
156 nhs++; 155 nhs++;
157 nhp = RTNH_NEXT(nhp); 156 nhp = rtnh_next(nhp, &nhlen);
158 } 157 }
159 158
160 return nhs; 159 /* leftover implies invalid nexthop configuration, discard it */
160 return nhlen > 0 ? 0 : nhs;
161} 161}
162 162
163static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr, 163static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr,
@@ -167,21 +167,24 @@ static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr,
167 int nhlen = nla_len(attr); 167 int nhlen = nla_len(attr);
168 168
169 change_nexthops(fi) { 169 change_nexthops(fi) {
170 int attrlen = nhlen - sizeof(struct rtnexthop); 170 int attrlen;
171 if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0) 171
172 if (!rtnh_ok(nhp, nhlen))
172 return -EINVAL; 173 return -EINVAL;
173 174
174 nh->nh_flags = (r->rtm_flags&~0xFF) | nhp->rtnh_flags; 175 nh->nh_flags = (r->rtm_flags&~0xFF) | nhp->rtnh_flags;
175 nh->nh_oif = nhp->rtnh_ifindex; 176 nh->nh_oif = nhp->rtnh_ifindex;
176 nh->nh_weight = nhp->rtnh_hops + 1; 177 nh->nh_weight = nhp->rtnh_hops + 1;
177 178
178 if (attrlen) { 179 attrlen = rtnh_attrlen(nhp);
180 if (attrlen > 0) {
179 struct nlattr *gw_attr; 181 struct nlattr *gw_attr;
180 182
181 gw_attr = nla_find((struct nlattr *) (nhp + 1), attrlen, RTA_GATEWAY); 183 gw_attr = nla_find((struct nlattr *) (nhp + 1), attrlen, RTA_GATEWAY);
182 nh->nh_gw = gw_attr ? nla_get_le16(gw_attr) : 0; 184 nh->nh_gw = gw_attr ? nla_get_le16(gw_attr) : 0;
183 } 185 }
184 nhp = RTNH_NEXT(nhp); 186
187 nhp = rtnh_next(nhp, &nhlen);
185 } endfor_nexthops(fi); 188 } endfor_nexthops(fi);
186 189
187 return 0; 190 return 0;
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index 8c004a0c8d64..935ab932e841 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -81,7 +81,7 @@ static int lowpan_stop(struct net_device *dev)
81 return 0; 81 return 0;
82} 82}
83 83
84static int lowpan_neigh_construct(struct neighbour *n) 84static int lowpan_neigh_construct(struct net_device *dev, struct neighbour *n)
85{ 85{
86 struct lowpan_802154_neigh *neigh = lowpan_802154_neigh(neighbour_priv(n)); 86 struct lowpan_802154_neigh *neigh = lowpan_802154_neigh(neighbour_priv(n));
87 87
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 477937465a20..d95631d09248 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -23,6 +23,11 @@ struct esp_skb_cb {
23 void *tmp; 23 void *tmp;
24}; 24};
25 25
26struct esp_output_extra {
27 __be32 seqhi;
28 u32 esphoff;
29};
30
26#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) 31#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
27 32
28static u32 esp4_get_mtu(struct xfrm_state *x, int mtu); 33static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
@@ -35,11 +40,11 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
35 * 40 *
36 * TODO: Use spare space in skb for this where possible. 41 * TODO: Use spare space in skb for this where possible.
37 */ 42 */
38static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen) 43static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen)
39{ 44{
40 unsigned int len; 45 unsigned int len;
41 46
42 len = seqhilen; 47 len = extralen;
43 48
44 len += crypto_aead_ivsize(aead); 49 len += crypto_aead_ivsize(aead);
45 50
@@ -57,15 +62,16 @@ static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
57 return kmalloc(len, GFP_ATOMIC); 62 return kmalloc(len, GFP_ATOMIC);
58} 63}
59 64
60static inline __be32 *esp_tmp_seqhi(void *tmp) 65static inline void *esp_tmp_extra(void *tmp)
61{ 66{
62 return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32)); 67 return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
63} 68}
64static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen) 69
70static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen)
65{ 71{
66 return crypto_aead_ivsize(aead) ? 72 return crypto_aead_ivsize(aead) ?
67 PTR_ALIGN((u8 *)tmp + seqhilen, 73 PTR_ALIGN((u8 *)tmp + extralen,
68 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen; 74 crypto_aead_alignmask(aead) + 1) : tmp + extralen;
69} 75}
70 76
71static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) 77static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
@@ -99,7 +105,7 @@ static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
99{ 105{
100 struct ip_esp_hdr *esph = (void *)(skb->data + offset); 106 struct ip_esp_hdr *esph = (void *)(skb->data + offset);
101 void *tmp = ESP_SKB_CB(skb)->tmp; 107 void *tmp = ESP_SKB_CB(skb)->tmp;
102 __be32 *seqhi = esp_tmp_seqhi(tmp); 108 __be32 *seqhi = esp_tmp_extra(tmp);
103 109
104 esph->seq_no = esph->spi; 110 esph->seq_no = esph->spi;
105 esph->spi = *seqhi; 111 esph->spi = *seqhi;
@@ -107,7 +113,11 @@ static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
107 113
108static void esp_output_restore_header(struct sk_buff *skb) 114static void esp_output_restore_header(struct sk_buff *skb)
109{ 115{
110 esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32)); 116 void *tmp = ESP_SKB_CB(skb)->tmp;
117 struct esp_output_extra *extra = esp_tmp_extra(tmp);
118
119 esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
120 sizeof(__be32));
111} 121}
112 122
113static void esp_output_done_esn(struct crypto_async_request *base, int err) 123static void esp_output_done_esn(struct crypto_async_request *base, int err)
@@ -121,6 +131,7 @@ static void esp_output_done_esn(struct crypto_async_request *base, int err)
121static int esp_output(struct xfrm_state *x, struct sk_buff *skb) 131static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
122{ 132{
123 int err; 133 int err;
134 struct esp_output_extra *extra;
124 struct ip_esp_hdr *esph; 135 struct ip_esp_hdr *esph;
125 struct crypto_aead *aead; 136 struct crypto_aead *aead;
126 struct aead_request *req; 137 struct aead_request *req;
@@ -137,8 +148,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
137 int tfclen; 148 int tfclen;
138 int nfrags; 149 int nfrags;
139 int assoclen; 150 int assoclen;
140 int seqhilen; 151 int extralen;
141 __be32 *seqhi;
142 __be64 seqno; 152 __be64 seqno;
143 153
144 /* skb is pure payload to encrypt */ 154 /* skb is pure payload to encrypt */
@@ -166,21 +176,21 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
166 nfrags = err; 176 nfrags = err;
167 177
168 assoclen = sizeof(*esph); 178 assoclen = sizeof(*esph);
169 seqhilen = 0; 179 extralen = 0;
170 180
171 if (x->props.flags & XFRM_STATE_ESN) { 181 if (x->props.flags & XFRM_STATE_ESN) {
172 seqhilen += sizeof(__be32); 182 extralen += sizeof(*extra);
173 assoclen += seqhilen; 183 assoclen += sizeof(__be32);
174 } 184 }
175 185
176 tmp = esp_alloc_tmp(aead, nfrags, seqhilen); 186 tmp = esp_alloc_tmp(aead, nfrags, extralen);
177 if (!tmp) { 187 if (!tmp) {
178 err = -ENOMEM; 188 err = -ENOMEM;
179 goto error; 189 goto error;
180 } 190 }
181 191
182 seqhi = esp_tmp_seqhi(tmp); 192 extra = esp_tmp_extra(tmp);
183 iv = esp_tmp_iv(aead, tmp, seqhilen); 193 iv = esp_tmp_iv(aead, tmp, extralen);
184 req = esp_tmp_req(aead, iv); 194 req = esp_tmp_req(aead, iv);
185 sg = esp_req_sg(aead, req); 195 sg = esp_req_sg(aead, req);
186 196
@@ -247,8 +257,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
247 * encryption. 257 * encryption.
248 */ 258 */
249 if ((x->props.flags & XFRM_STATE_ESN)) { 259 if ((x->props.flags & XFRM_STATE_ESN)) {
250 esph = (void *)(skb_transport_header(skb) - sizeof(__be32)); 260 extra->esphoff = (unsigned char *)esph -
251 *seqhi = esph->spi; 261 skb_transport_header(skb);
262 esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
263 extra->seqhi = esph->spi;
252 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi); 264 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
253 aead_request_set_callback(req, 0, esp_output_done_esn, skb); 265 aead_request_set_callback(req, 0, esp_output_done_esn, skb);
254 } 266 }
@@ -445,7 +457,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
445 goto out; 457 goto out;
446 458
447 ESP_SKB_CB(skb)->tmp = tmp; 459 ESP_SKB_CB(skb)->tmp = tmp;
448 seqhi = esp_tmp_seqhi(tmp); 460 seqhi = esp_tmp_extra(tmp);
449 iv = esp_tmp_iv(aead, tmp, seqhilen); 461 iv = esp_tmp_iv(aead, tmp, seqhilen);
450 req = esp_tmp_req(aead, iv); 462 req = esp_tmp_req(aead, iv);
451 sg = esp_req_sg(aead, req); 463 sg = esp_req_sg(aead, req);
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index c4c3e439f424..b798862b6be5 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -62,26 +62,26 @@ EXPORT_SYMBOL_GPL(gre_del_protocol);
62 62
63/* Fills in tpi and returns header length to be pulled. */ 63/* Fills in tpi and returns header length to be pulled. */
64int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, 64int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
65 bool *csum_err, __be16 proto) 65 bool *csum_err, __be16 proto, int nhs)
66{ 66{
67 const struct gre_base_hdr *greh; 67 const struct gre_base_hdr *greh;
68 __be32 *options; 68 __be32 *options;
69 int hdr_len; 69 int hdr_len;
70 70
71 if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr)))) 71 if (unlikely(!pskb_may_pull(skb, nhs + sizeof(struct gre_base_hdr))))
72 return -EINVAL; 72 return -EINVAL;
73 73
74 greh = (struct gre_base_hdr *)skb_transport_header(skb); 74 greh = (struct gre_base_hdr *)(skb->data + nhs);
75 if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING))) 75 if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
76 return -EINVAL; 76 return -EINVAL;
77 77
78 tpi->flags = gre_flags_to_tnl_flags(greh->flags); 78 tpi->flags = gre_flags_to_tnl_flags(greh->flags);
79 hdr_len = gre_calc_hlen(tpi->flags); 79 hdr_len = gre_calc_hlen(tpi->flags);
80 80
81 if (!pskb_may_pull(skb, hdr_len)) 81 if (!pskb_may_pull(skb, nhs + hdr_len))
82 return -EINVAL; 82 return -EINVAL;
83 83
84 greh = (struct gre_base_hdr *)skb_transport_header(skb); 84 greh = (struct gre_base_hdr *)(skb->data + nhs);
85 tpi->proto = greh->protocol; 85 tpi->proto = greh->protocol;
86 86
87 options = (__be32 *)(greh + 1); 87 options = (__be32 *)(greh + 1);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 25af1243649b..38c2c47fe0e8 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -44,6 +44,7 @@ struct inet_diag_entry {
44 u16 dport; 44 u16 dport;
45 u16 family; 45 u16 family;
46 u16 userlocks; 46 u16 userlocks;
47 u32 ifindex;
47}; 48};
48 49
49static DEFINE_MUTEX(inet_diag_table_mutex); 50static DEFINE_MUTEX(inet_diag_table_mutex);
@@ -571,6 +572,14 @@ static int inet_diag_bc_run(const struct nlattr *_bc,
571 yes = 0; 572 yes = 0;
572 break; 573 break;
573 } 574 }
575 case INET_DIAG_BC_DEV_COND: {
576 u32 ifindex;
577
578 ifindex = *((const u32 *)(op + 1));
579 if (ifindex != entry->ifindex)
580 yes = 0;
581 break;
582 }
574 } 583 }
575 584
576 if (yes) { 585 if (yes) {
@@ -613,6 +622,7 @@ int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
613 entry_fill_addrs(&entry, sk); 622 entry_fill_addrs(&entry, sk);
614 entry.sport = inet->inet_num; 623 entry.sport = inet->inet_num;
615 entry.dport = ntohs(inet->inet_dport); 624 entry.dport = ntohs(inet->inet_dport);
625 entry.ifindex = sk->sk_bound_dev_if;
616 entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0; 626 entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0;
617 627
618 return inet_diag_bc_run(bc, &entry); 628 return inet_diag_bc_run(bc, &entry);
@@ -636,6 +646,17 @@ static int valid_cc(const void *bc, int len, int cc)
636 return 0; 646 return 0;
637} 647}
638 648
649/* data is u32 ifindex */
650static bool valid_devcond(const struct inet_diag_bc_op *op, int len,
651 int *min_len)
652{
653 /* Check ifindex space. */
654 *min_len += sizeof(u32);
655 if (len < *min_len)
656 return false;
657
658 return true;
659}
639/* Validate an inet_diag_hostcond. */ 660/* Validate an inet_diag_hostcond. */
640static bool valid_hostcond(const struct inet_diag_bc_op *op, int len, 661static bool valid_hostcond(const struct inet_diag_bc_op *op, int len,
641 int *min_len) 662 int *min_len)
@@ -700,6 +721,10 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
700 if (!valid_hostcond(bc, len, &min_len)) 721 if (!valid_hostcond(bc, len, &min_len))
701 return -EINVAL; 722 return -EINVAL;
702 break; 723 break;
724 case INET_DIAG_BC_DEV_COND:
725 if (!valid_devcond(bc, len, &min_len))
726 return -EINVAL;
727 break;
703 case INET_DIAG_BC_S_GE: 728 case INET_DIAG_BC_S_GE:
704 case INET_DIAG_BC_S_LE: 729 case INET_DIAG_BC_S_LE:
705 case INET_DIAG_BC_D_GE: 730 case INET_DIAG_BC_D_GE:
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 8eec78f53f9e..5b1481be0282 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -49,12 +49,6 @@
49#include <net/gre.h> 49#include <net/gre.h>
50#include <net/dst_metadata.h> 50#include <net/dst_metadata.h>
51 51
52#if IS_ENABLED(CONFIG_IPV6)
53#include <net/ipv6.h>
54#include <net/ip6_fib.h>
55#include <net/ip6_route.h>
56#endif
57
58/* 52/*
59 Problems & solutions 53 Problems & solutions
60 -------------------- 54 --------------------
@@ -226,12 +220,14 @@ static void gre_err(struct sk_buff *skb, u32 info)
226 * by themselves??? 220 * by themselves???
227 */ 221 */
228 222
223 const struct iphdr *iph = (struct iphdr *)skb->data;
229 const int type = icmp_hdr(skb)->type; 224 const int type = icmp_hdr(skb)->type;
230 const int code = icmp_hdr(skb)->code; 225 const int code = icmp_hdr(skb)->code;
231 struct tnl_ptk_info tpi; 226 struct tnl_ptk_info tpi;
232 bool csum_err = false; 227 bool csum_err = false;
233 228
234 if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP)) < 0) { 229 if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP),
230 iph->ihl * 4) < 0) {
235 if (!csum_err) /* ignore csum errors. */ 231 if (!csum_err) /* ignore csum errors. */
236 return; 232 return;
237 } 233 }
@@ -347,7 +343,7 @@ static int gre_rcv(struct sk_buff *skb)
347 } 343 }
348#endif 344#endif
349 345
350 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP)); 346 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
351 if (hdr_len < 0) 347 if (hdr_len < 0)
352 goto drop; 348 goto drop;
353 349
@@ -1154,6 +1150,7 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1154{ 1150{
1155 struct nlattr *tb[IFLA_MAX + 1]; 1151 struct nlattr *tb[IFLA_MAX + 1];
1156 struct net_device *dev; 1152 struct net_device *dev;
1153 LIST_HEAD(list_kill);
1157 struct ip_tunnel *t; 1154 struct ip_tunnel *t;
1158 int err; 1155 int err;
1159 1156
@@ -1169,8 +1166,10 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1169 t->collect_md = true; 1166 t->collect_md = true;
1170 1167
1171 err = ipgre_newlink(net, dev, tb, NULL); 1168 err = ipgre_newlink(net, dev, tb, NULL);
1172 if (err < 0) 1169 if (err < 0) {
1173 goto out; 1170 free_netdev(dev);
1171 return ERR_PTR(err);
1172 }
1174 1173
1175 /* openvswitch users expect packet sizes to be unrestricted, 1174 /* openvswitch users expect packet sizes to be unrestricted,
1176 * so set the largest MTU we can. 1175 * so set the largest MTU we can.
@@ -1179,9 +1178,14 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1179 if (err) 1178 if (err)
1180 goto out; 1179 goto out;
1181 1180
1181 err = rtnl_configure_link(dev, NULL);
1182 if (err < 0)
1183 goto out;
1184
1182 return dev; 1185 return dev;
1183out: 1186out:
1184 free_netdev(dev); 1187 ip_tunnel_dellink(dev, &list_kill);
1188 unregister_netdevice_many(&list_kill);
1185 return ERR_PTR(err); 1189 return ERR_PTR(err);
1186} 1190}
1187EXPORT_SYMBOL_GPL(gretap_fb_dev_create); 1191EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index cbac493c913a..e23f141c9ba5 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -271,7 +271,7 @@ static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *sk
271 return dst_output(net, sk, skb); 271 return dst_output(net, sk, skb);
272 } 272 }
273#endif 273#endif
274 mtu = ip_skb_dst_mtu(skb); 274 mtu = ip_skb_dst_mtu(sk, skb);
275 if (skb_is_gso(skb)) 275 if (skb_is_gso(skb))
276 return ip_finish_output_gso(net, sk, skb, mtu); 276 return ip_finish_output_gso(net, sk, skb, mtu);
277 277
@@ -541,7 +541,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
541 541
542 iph = ip_hdr(skb); 542 iph = ip_hdr(skb);
543 543
544 mtu = ip_skb_dst_mtu(skb); 544 mtu = ip_skb_dst_mtu(sk, skb);
545 if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu) 545 if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
546 mtu = IPCB(skb)->frag_max_size; 546 mtu = IPCB(skb)->frag_max_size;
547 547
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 2ed9dd2b5f2f..1d71c40eaaf3 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -127,7 +127,9 @@ __be32 ic_myaddr = NONE; /* My IP address */
127static __be32 ic_netmask = NONE; /* Netmask for local subnet */ 127static __be32 ic_netmask = NONE; /* Netmask for local subnet */
128__be32 ic_gateway = NONE; /* Gateway IP address */ 128__be32 ic_gateway = NONE; /* Gateway IP address */
129 129
130__be32 ic_addrservaddr = NONE; /* IP Address of the IP addresses'server */ 130#ifdef IPCONFIG_DYNAMIC
131static __be32 ic_addrservaddr = NONE; /* IP Address of the IP addresses'server */
132#endif
131 133
132__be32 ic_servaddr = NONE; /* Boot server IP address */ 134__be32 ic_servaddr = NONE; /* Boot server IP address */
133 135
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 21a38e296fe2..5ad48ec77710 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -891,8 +891,10 @@ static struct mfc_cache *ipmr_cache_alloc(void)
891{ 891{
892 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); 892 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
893 893
894 if (c) 894 if (c) {
895 c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
895 c->mfc_un.res.minvif = MAXVIFS; 896 c->mfc_un.res.minvif = MAXVIFS;
897 }
896 return c; 898 return c;
897} 899}
898 900
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 2033f929aa66..c8dd9e26b185 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -89,22 +89,20 @@ static inline int arp_packet_match(const struct arphdr *arphdr,
89 __be32 src_ipaddr, tgt_ipaddr; 89 __be32 src_ipaddr, tgt_ipaddr;
90 long ret; 90 long ret;
91 91
92#define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg))) 92 if (NF_INVF(arpinfo, ARPT_INV_ARPOP,
93 93 (arphdr->ar_op & arpinfo->arpop_mask) != arpinfo->arpop))
94 if (FWINV((arphdr->ar_op & arpinfo->arpop_mask) != arpinfo->arpop,
95 ARPT_INV_ARPOP))
96 return 0; 94 return 0;
97 95
98 if (FWINV((arphdr->ar_hrd & arpinfo->arhrd_mask) != arpinfo->arhrd, 96 if (NF_INVF(arpinfo, ARPT_INV_ARPHRD,
99 ARPT_INV_ARPHRD)) 97 (arphdr->ar_hrd & arpinfo->arhrd_mask) != arpinfo->arhrd))
100 return 0; 98 return 0;
101 99
102 if (FWINV((arphdr->ar_pro & arpinfo->arpro_mask) != arpinfo->arpro, 100 if (NF_INVF(arpinfo, ARPT_INV_ARPPRO,
103 ARPT_INV_ARPPRO)) 101 (arphdr->ar_pro & arpinfo->arpro_mask) != arpinfo->arpro))
104 return 0; 102 return 0;
105 103
106 if (FWINV((arphdr->ar_hln & arpinfo->arhln_mask) != arpinfo->arhln, 104 if (NF_INVF(arpinfo, ARPT_INV_ARPHLN,
107 ARPT_INV_ARPHLN)) 105 (arphdr->ar_hln & arpinfo->arhln_mask) != arpinfo->arhln))
108 return 0; 106 return 0;
109 107
110 src_devaddr = arpptr; 108 src_devaddr = arpptr;
@@ -115,31 +113,32 @@ static inline int arp_packet_match(const struct arphdr *arphdr,
115 arpptr += dev->addr_len; 113 arpptr += dev->addr_len;
116 memcpy(&tgt_ipaddr, arpptr, sizeof(u32)); 114 memcpy(&tgt_ipaddr, arpptr, sizeof(u32));
117 115
118 if (FWINV(arp_devaddr_compare(&arpinfo->src_devaddr, src_devaddr, dev->addr_len), 116 if (NF_INVF(arpinfo, ARPT_INV_SRCDEVADDR,
119 ARPT_INV_SRCDEVADDR) || 117 arp_devaddr_compare(&arpinfo->src_devaddr, src_devaddr,
120 FWINV(arp_devaddr_compare(&arpinfo->tgt_devaddr, tgt_devaddr, dev->addr_len), 118 dev->addr_len)) ||
121 ARPT_INV_TGTDEVADDR)) 119 NF_INVF(arpinfo, ARPT_INV_TGTDEVADDR,
120 arp_devaddr_compare(&arpinfo->tgt_devaddr, tgt_devaddr,
121 dev->addr_len)))
122 return 0; 122 return 0;
123 123
124 if (FWINV((src_ipaddr & arpinfo->smsk.s_addr) != arpinfo->src.s_addr, 124 if (NF_INVF(arpinfo, ARPT_INV_SRCIP,
125 ARPT_INV_SRCIP) || 125 (src_ipaddr & arpinfo->smsk.s_addr) != arpinfo->src.s_addr) ||
126 FWINV(((tgt_ipaddr & arpinfo->tmsk.s_addr) != arpinfo->tgt.s_addr), 126 NF_INVF(arpinfo, ARPT_INV_TGTIP,
127 ARPT_INV_TGTIP)) 127 (tgt_ipaddr & arpinfo->tmsk.s_addr) != arpinfo->tgt.s_addr))
128 return 0; 128 return 0;
129 129
130 /* Look for ifname matches. */ 130 /* Look for ifname matches. */
131 ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask); 131 ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask);
132 132
133 if (FWINV(ret != 0, ARPT_INV_VIA_IN)) 133 if (NF_INVF(arpinfo, ARPT_INV_VIA_IN, ret != 0))
134 return 0; 134 return 0;
135 135
136 ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask); 136 ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask);
137 137
138 if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) 138 if (NF_INVF(arpinfo, ARPT_INV_VIA_OUT, ret != 0))
139 return 0; 139 return 0;
140 140
141 return 1; 141 return 1;
142#undef FWINV
143} 142}
144 143
145static inline int arp_checkentry(const struct arpt_arp *arp) 144static inline int arp_checkentry(const struct arpt_arp *arp)
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 54906e0e8e0c..f0df66f54ce6 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -58,32 +58,31 @@ ip_packet_match(const struct iphdr *ip,
58{ 58{
59 unsigned long ret; 59 unsigned long ret;
60 60
61#define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg))) 61 if (NF_INVF(ipinfo, IPT_INV_SRCIP,
62 62 (ip->saddr & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) ||
63 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr, 63 NF_INVF(ipinfo, IPT_INV_DSTIP,
64 IPT_INV_SRCIP) || 64 (ip->daddr & ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr))
65 FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
66 IPT_INV_DSTIP))
67 return false; 65 return false;
68 66
69 ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask); 67 ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
70 68
71 if (FWINV(ret != 0, IPT_INV_VIA_IN)) 69 if (NF_INVF(ipinfo, IPT_INV_VIA_IN, ret != 0))
72 return false; 70 return false;
73 71
74 ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask); 72 ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
75 73
76 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) 74 if (NF_INVF(ipinfo, IPT_INV_VIA_OUT, ret != 0))
77 return false; 75 return false;
78 76
79 /* Check specific protocol */ 77 /* Check specific protocol */
80 if (ipinfo->proto && 78 if (ipinfo->proto &&
81 FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) 79 NF_INVF(ipinfo, IPT_INV_PROTO, ip->protocol != ipinfo->proto))
82 return false; 80 return false;
83 81
84 /* If we have a fragment rule but the packet is not a fragment 82 /* If we have a fragment rule but the packet is not a fragment
85 * then we return zero */ 83 * then we return zero */
86 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) 84 if (NF_INVF(ipinfo, IPT_INV_FRAG,
85 (ipinfo->flags & IPT_F_FRAG) && !isfrag))
87 return false; 86 return false;
88 87
89 return true; 88 return true;
@@ -122,7 +121,6 @@ static inline bool unconditional(const struct ipt_entry *e)
122 121
123 return e->target_offset == sizeof(struct ipt_entry) && 122 return e->target_offset == sizeof(struct ipt_entry) &&
124 memcmp(&e->ip, &uncond, sizeof(uncond)) == 0; 123 memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
125#undef FWINV
126} 124}
127 125
128/* for const-correctness */ 126/* for const-correctness */
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index 57fc97cdac70..aebdb337fd7e 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -87,10 +87,6 @@ iptable_mangle_hook(void *priv,
87{ 87{
88 if (state->hook == NF_INET_LOCAL_OUT) 88 if (state->hook == NF_INET_LOCAL_OUT)
89 return ipt_mangle_out(skb, state); 89 return ipt_mangle_out(skb, state);
90 if (state->hook == NF_INET_POST_ROUTING)
91 return ipt_do_table(skb, state,
92 state->net->ipv4.iptable_mangle);
93 /* PREROUTING/INPUT/FORWARD: */
94 return ipt_do_table(skb, state, state->net->ipv4.iptable_mangle); 90 return ipt_do_table(skb, state, state->net->ipv4.iptable_mangle);
95} 91}
96 92
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index b6ea57ec5e14..fd8220213afc 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -24,6 +24,9 @@ const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
24 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) 24 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
25 return NULL; 25 return NULL;
26 26
27 if (ip_hdr(oldskb)->protocol != IPPROTO_TCP)
28 return NULL;
29
27 oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb), 30 oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb),
28 sizeof(struct tcphdr), _oth); 31 sizeof(struct tcphdr), _oth);
29 if (oth == NULL) 32 if (oth == NULL)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5c7ed147449c..032a96d78c99 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2277,6 +2277,38 @@ static inline bool tcp_can_repair_sock(const struct sock *sk)
2277 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED)); 2277 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED));
2278} 2278}
2279 2279
2280static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int len)
2281{
2282 struct tcp_repair_window opt;
2283
2284 if (!tp->repair)
2285 return -EPERM;
2286
2287 if (len != sizeof(opt))
2288 return -EINVAL;
2289
2290 if (copy_from_user(&opt, optbuf, sizeof(opt)))
2291 return -EFAULT;
2292
2293 if (opt.max_window < opt.snd_wnd)
2294 return -EINVAL;
2295
2296 if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd))
2297 return -EINVAL;
2298
2299 if (after(opt.rcv_wup, tp->rcv_nxt))
2300 return -EINVAL;
2301
2302 tp->snd_wl1 = opt.snd_wl1;
2303 tp->snd_wnd = opt.snd_wnd;
2304 tp->max_window = opt.max_window;
2305
2306 tp->rcv_wnd = opt.rcv_wnd;
2307 tp->rcv_wup = opt.rcv_wup;
2308
2309 return 0;
2310}
2311
2280static int tcp_repair_options_est(struct tcp_sock *tp, 2312static int tcp_repair_options_est(struct tcp_sock *tp,
2281 struct tcp_repair_opt __user *optbuf, unsigned int len) 2313 struct tcp_repair_opt __user *optbuf, unsigned int len)
2282{ 2314{
@@ -2604,6 +2636,9 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2604 else 2636 else
2605 tp->tsoffset = val - tcp_time_stamp; 2637 tp->tsoffset = val - tcp_time_stamp;
2606 break; 2638 break;
2639 case TCP_REPAIR_WINDOW:
2640 err = tcp_repair_set_window(tp, optval, optlen);
2641 break;
2607 case TCP_NOTSENT_LOWAT: 2642 case TCP_NOTSENT_LOWAT:
2608 tp->notsent_lowat = val; 2643 tp->notsent_lowat = val;
2609 sk->sk_write_space(sk); 2644 sk->sk_write_space(sk);
@@ -2860,6 +2895,28 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
2860 return -EINVAL; 2895 return -EINVAL;
2861 break; 2896 break;
2862 2897
2898 case TCP_REPAIR_WINDOW: {
2899 struct tcp_repair_window opt;
2900
2901 if (get_user(len, optlen))
2902 return -EFAULT;
2903
2904 if (len != sizeof(opt))
2905 return -EINVAL;
2906
2907 if (!tp->repair)
2908 return -EPERM;
2909
2910 opt.snd_wl1 = tp->snd_wl1;
2911 opt.snd_wnd = tp->snd_wnd;
2912 opt.max_window = tp->max_window;
2913 opt.rcv_wnd = tp->rcv_wnd;
2914 opt.rcv_wup = tp->rcv_wup;
2915
2916 if (copy_to_user(optval, &opt, len))
2917 return -EFAULT;
2918 return 0;
2919 }
2863 case TCP_QUEUE_SEQ: 2920 case TCP_QUEUE_SEQ:
2864 if (tp->repair_queue == TCP_SEND_QUEUE) 2921 if (tp->repair_queue == TCP_SEND_QUEUE)
2865 val = tp->write_seq; 2922 val = tp->write_seq;
@@ -2969,8 +3026,18 @@ static void __tcp_alloc_md5sig_pool(void)
2969 return; 3026 return;
2970 3027
2971 for_each_possible_cpu(cpu) { 3028 for_each_possible_cpu(cpu) {
3029 void *scratch = per_cpu(tcp_md5sig_pool, cpu).scratch;
2972 struct ahash_request *req; 3030 struct ahash_request *req;
2973 3031
3032 if (!scratch) {
3033 scratch = kmalloc_node(sizeof(union tcp_md5sum_block) +
3034 sizeof(struct tcphdr),
3035 GFP_KERNEL,
3036 cpu_to_node(cpu));
3037 if (!scratch)
3038 return;
3039 per_cpu(tcp_md5sig_pool, cpu).scratch = scratch;
3040 }
2974 if (per_cpu(tcp_md5sig_pool, cpu).md5_req) 3041 if (per_cpu(tcp_md5sig_pool, cpu).md5_req)
2975 continue; 3042 continue;
2976 3043
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3708de2a6683..32b048e524d6 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1018,27 +1018,28 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1018 GFP_KERNEL); 1018 GFP_KERNEL);
1019} 1019}
1020 1020
1021static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, 1021static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1022 __be32 daddr, __be32 saddr, int nbytes) 1022 __be32 daddr, __be32 saddr,
1023 const struct tcphdr *th, int nbytes)
1023{ 1024{
1024 struct tcp4_pseudohdr *bp; 1025 struct tcp4_pseudohdr *bp;
1025 struct scatterlist sg; 1026 struct scatterlist sg;
1027 struct tcphdr *_th;
1026 1028
1027 bp = &hp->md5_blk.ip4; 1029 bp = hp->scratch;
1028
1029 /*
1030 * 1. the TCP pseudo-header (in the order: source IP address,
1031 * destination IP address, zero-padded protocol number, and
1032 * segment length)
1033 */
1034 bp->saddr = saddr; 1030 bp->saddr = saddr;
1035 bp->daddr = daddr; 1031 bp->daddr = daddr;
1036 bp->pad = 0; 1032 bp->pad = 0;
1037 bp->protocol = IPPROTO_TCP; 1033 bp->protocol = IPPROTO_TCP;
1038 bp->len = cpu_to_be16(nbytes); 1034 bp->len = cpu_to_be16(nbytes);
1039 1035
1040 sg_init_one(&sg, bp, sizeof(*bp)); 1036 _th = (struct tcphdr *)(bp + 1);
1041 ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp)); 1037 memcpy(_th, th, sizeof(*th));
1038 _th->check = 0;
1039
1040 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1041 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1042 sizeof(*bp) + sizeof(*th));
1042 return crypto_ahash_update(hp->md5_req); 1043 return crypto_ahash_update(hp->md5_req);
1043} 1044}
1044 1045
@@ -1055,9 +1056,7 @@ static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1055 1056
1056 if (crypto_ahash_init(req)) 1057 if (crypto_ahash_init(req))
1057 goto clear_hash; 1058 goto clear_hash;
1058 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2)) 1059 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1059 goto clear_hash;
1060 if (tcp_md5_hash_header(hp, th))
1061 goto clear_hash; 1060 goto clear_hash;
1062 if (tcp_md5_hash_key(hp, key)) 1061 if (tcp_md5_hash_key(hp, key))
1063 goto clear_hash; 1062 goto clear_hash;
@@ -1101,9 +1100,7 @@ int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1101 if (crypto_ahash_init(req)) 1100 if (crypto_ahash_init(req))
1102 goto clear_hash; 1101 goto clear_hash;
1103 1102
1104 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len)) 1103 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1105 goto clear_hash;
1106 if (tcp_md5_hash_header(hp, th))
1107 goto clear_hash; 1104 goto clear_hash;
1108 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2)) 1105 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1109 goto clear_hash; 1106 goto clear_hash;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index b1bcba0563f2..b26aa870adc0 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2753,7 +2753,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2753 struct tcp_sock *tp = tcp_sk(sk); 2753 struct tcp_sock *tp = tcp_sk(sk);
2754 struct sk_buff *skb; 2754 struct sk_buff *skb;
2755 struct sk_buff *hole = NULL; 2755 struct sk_buff *hole = NULL;
2756 u32 last_lost; 2756 u32 max_segs, last_lost;
2757 int mib_idx; 2757 int mib_idx;
2758 int fwd_rexmitting = 0; 2758 int fwd_rexmitting = 0;
2759 2759
@@ -2773,6 +2773,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2773 last_lost = tp->snd_una; 2773 last_lost = tp->snd_una;
2774 } 2774 }
2775 2775
2776 max_segs = tcp_tso_autosize(sk, tcp_current_mss(sk));
2776 tcp_for_write_queue_from(skb, sk) { 2777 tcp_for_write_queue_from(skb, sk) {
2777 __u8 sacked = TCP_SKB_CB(skb)->sacked; 2778 __u8 sacked = TCP_SKB_CB(skb)->sacked;
2778 int segs; 2779 int segs;
@@ -2786,6 +2787,10 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2786 segs = tp->snd_cwnd - tcp_packets_in_flight(tp); 2787 segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
2787 if (segs <= 0) 2788 if (segs <= 0)
2788 return; 2789 return;
2790 /* In case tcp_shift_skb_data() have aggregated large skbs,
2791 * we need to make sure not sending too bigs TSO packets
2792 */
2793 segs = min_t(int, segs, max_segs);
2789 2794
2790 if (fwd_rexmitting) { 2795 if (fwd_rexmitting) {
2791begin_fwd: 2796begin_fwd:
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 0ff31d97d485..ca5e8ea29538 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -391,9 +391,9 @@ int udp_v4_get_port(struct sock *sk, unsigned short snum)
391 return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr); 391 return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr);
392} 392}
393 393
394static inline int compute_score(struct sock *sk, struct net *net, 394static int compute_score(struct sock *sk, struct net *net,
395 __be32 saddr, unsigned short hnum, __be16 sport, 395 __be32 saddr, __be16 sport,
396 __be32 daddr, __be16 dport, int dif) 396 __be32 daddr, unsigned short hnum, int dif)
397{ 397{
398 int score; 398 int score;
399 struct inet_sock *inet; 399 struct inet_sock *inet;
@@ -434,52 +434,6 @@ static inline int compute_score(struct sock *sk, struct net *net,
434 return score; 434 return score;
435} 435}
436 436
437/*
438 * In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num)
439 */
440static inline int compute_score2(struct sock *sk, struct net *net,
441 __be32 saddr, __be16 sport,
442 __be32 daddr, unsigned int hnum, int dif)
443{
444 int score;
445 struct inet_sock *inet;
446
447 if (!net_eq(sock_net(sk), net) ||
448 ipv6_only_sock(sk))
449 return -1;
450
451 inet = inet_sk(sk);
452
453 if (inet->inet_rcv_saddr != daddr ||
454 inet->inet_num != hnum)
455 return -1;
456
457 score = (sk->sk_family == PF_INET) ? 2 : 1;
458
459 if (inet->inet_daddr) {
460 if (inet->inet_daddr != saddr)
461 return -1;
462 score += 4;
463 }
464
465 if (inet->inet_dport) {
466 if (inet->inet_dport != sport)
467 return -1;
468 score += 4;
469 }
470
471 if (sk->sk_bound_dev_if) {
472 if (sk->sk_bound_dev_if != dif)
473 return -1;
474 score += 4;
475 }
476
477 if (sk->sk_incoming_cpu == raw_smp_processor_id())
478 score++;
479
480 return score;
481}
482
483static u32 udp_ehashfn(const struct net *net, const __be32 laddr, 437static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
484 const __u16 lport, const __be32 faddr, 438 const __u16 lport, const __be32 faddr,
485 const __be16 fport) 439 const __be16 fport)
@@ -492,11 +446,11 @@ static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
492 udp_ehash_secret + net_hash_mix(net)); 446 udp_ehash_secret + net_hash_mix(net));
493} 447}
494 448
495/* called with read_rcu_lock() */ 449/* called with rcu_read_lock() */
496static struct sock *udp4_lib_lookup2(struct net *net, 450static struct sock *udp4_lib_lookup2(struct net *net,
497 __be32 saddr, __be16 sport, 451 __be32 saddr, __be16 sport,
498 __be32 daddr, unsigned int hnum, int dif, 452 __be32 daddr, unsigned int hnum, int dif,
499 struct udp_hslot *hslot2, unsigned int slot2, 453 struct udp_hslot *hslot2,
500 struct sk_buff *skb) 454 struct sk_buff *skb)
501{ 455{
502 struct sock *sk, *result; 456 struct sock *sk, *result;
@@ -506,7 +460,7 @@ static struct sock *udp4_lib_lookup2(struct net *net,
506 result = NULL; 460 result = NULL;
507 badness = 0; 461 badness = 0;
508 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 462 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
509 score = compute_score2(sk, net, saddr, sport, 463 score = compute_score(sk, net, saddr, sport,
510 daddr, hnum, dif); 464 daddr, hnum, dif);
511 if (score > badness) { 465 if (score > badness) {
512 reuseport = sk->sk_reuseport; 466 reuseport = sk->sk_reuseport;
@@ -554,17 +508,22 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
554 508
555 result = udp4_lib_lookup2(net, saddr, sport, 509 result = udp4_lib_lookup2(net, saddr, sport,
556 daddr, hnum, dif, 510 daddr, hnum, dif,
557 hslot2, slot2, skb); 511 hslot2, skb);
558 if (!result) { 512 if (!result) {
513 unsigned int old_slot2 = slot2;
559 hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum); 514 hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
560 slot2 = hash2 & udptable->mask; 515 slot2 = hash2 & udptable->mask;
516 /* avoid searching the same slot again. */
517 if (unlikely(slot2 == old_slot2))
518 return result;
519
561 hslot2 = &udptable->hash2[slot2]; 520 hslot2 = &udptable->hash2[slot2];
562 if (hslot->count < hslot2->count) 521 if (hslot->count < hslot2->count)
563 goto begin; 522 goto begin;
564 523
565 result = udp4_lib_lookup2(net, saddr, sport, 524 result = udp4_lib_lookup2(net, saddr, sport,
566 htonl(INADDR_ANY), hnum, dif, 525 daddr, hnum, dif,
567 hslot2, slot2, skb); 526 hslot2, skb);
568 } 527 }
569 return result; 528 return result;
570 } 529 }
@@ -572,8 +531,8 @@ begin:
572 result = NULL; 531 result = NULL;
573 badness = 0; 532 badness = 0;
574 sk_for_each_rcu(sk, &hslot->head) { 533 sk_for_each_rcu(sk, &hslot->head) {
575 score = compute_score(sk, net, saddr, hnum, sport, 534 score = compute_score(sk, net, saddr, sport,
576 daddr, dport, dif); 535 daddr, hnum, dif);
577 if (score > badness) { 536 if (score > badness) {
578 reuseport = sk->sk_reuseport; 537 reuseport = sk->sk_reuseport;
579 if (reuseport) { 538 if (reuseport) {
@@ -1755,8 +1714,11 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
1755 return err; 1714 return err;
1756 } 1715 }
1757 1716
1758 return skb_checksum_init_zero_check(skb, proto, uh->check, 1717 /* Note, we are only interested in != 0 or == 0, thus the
1759 inet_compute_pseudo); 1718 * force to int.
1719 */
1720 return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
1721 inet_compute_pseudo);
1760} 1722}
1761 1723
1762/* 1724/*
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index fd11f5856ce8..bd59c343d35f 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -98,7 +98,7 @@ static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
98 98
99 if (!(type & ICMPV6_INFOMSG_MASK)) 99 if (!(type & ICMPV6_INFOMSG_MASK))
100 if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST) 100 if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
101 ping_err(skb, offset, info); 101 ping_err(skb, offset, ntohl(info));
102} 102}
103 103
104static int icmpv6_rcv(struct sk_buff *skb); 104static int icmpv6_rcv(struct sk_buff *skb);
diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c
index b2025bf3da4a..c0cbcb259f5a 100644
--- a/net/ipv6/ip6_checksum.c
+++ b/net/ipv6/ip6_checksum.c
@@ -78,9 +78,12 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
78 * we accept a checksum of zero here. When we find the socket 78 * we accept a checksum of zero here. When we find the socket
79 * for the UDP packet we'll check if that socket allows zero checksum 79 * for the UDP packet we'll check if that socket allows zero checksum
80 * for IPv6 (set by socket option). 80 * for IPv6 (set by socket option).
81 *
82 * Note, we are only interested in != 0 or == 0, thus the
83 * force to int.
81 */ 84 */
82 return skb_checksum_init_zero_check(skb, proto, uh->check, 85 return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
83 ip6_compute_pseudo); 86 ip6_compute_pseudo);
84} 87}
85EXPORT_SYMBOL(udp6_csum_init); 88EXPORT_SYMBOL(udp6_csum_init);
86 89
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 1bcef2369d64..771be1fa4176 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -177,6 +177,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
177 } 177 }
178 } 178 }
179 179
180 free_percpu(non_pcpu_rt->rt6i_pcpu);
180 non_pcpu_rt->rt6i_pcpu = NULL; 181 non_pcpu_rt->rt6i_pcpu = NULL;
181} 182}
182 183
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index fdc9de276ab1..776d145113e1 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -468,7 +468,7 @@ static int gre_rcv(struct sk_buff *skb)
468 bool csum_err = false; 468 bool csum_err = false;
469 int hdr_len; 469 int hdr_len;
470 470
471 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6)); 471 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6), 0);
472 if (hdr_len < 0) 472 if (hdr_len < 0)
473 goto drop; 473 goto drop;
474 474
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index f2e2013f8346..487ef3bc7bbc 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1074,6 +1074,7 @@ static struct mfc6_cache *ip6mr_cache_alloc(void)
1074 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); 1074 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1075 if (!c) 1075 if (!c)
1076 return NULL; 1076 return NULL;
1077 c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
1077 c->mfc_un.res.minvif = MAXMIFS; 1078 c->mfc_un.res.minvif = MAXMIFS;
1078 return c; 1079 return c;
1079} 1080}
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 63e06c3dd319..61ed95054efa 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -73,22 +73,22 @@ ip6_packet_match(const struct sk_buff *skb,
73 unsigned long ret; 73 unsigned long ret;
74 const struct ipv6hdr *ipv6 = ipv6_hdr(skb); 74 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
75 75
76#define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg))) 76 if (NF_INVF(ip6info, IP6T_INV_SRCIP,
77 77 ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
78 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk, 78 &ip6info->src)) ||
79 &ip6info->src), IP6T_INV_SRCIP) || 79 NF_INVF(ip6info, IP6T_INV_DSTIP,
80 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk, 80 ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
81 &ip6info->dst), IP6T_INV_DSTIP)) 81 &ip6info->dst)))
82 return false; 82 return false;
83 83
84 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask); 84 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
85 85
86 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) 86 if (NF_INVF(ip6info, IP6T_INV_VIA_IN, ret != 0))
87 return false; 87 return false;
88 88
89 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask); 89 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
90 90
91 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) 91 if (NF_INVF(ip6info, IP6T_INV_VIA_OUT, ret != 0))
92 return false; 92 return false;
93 93
94/* ... might want to do something with class and flowlabel here ... */ 94/* ... might want to do something with class and flowlabel here ... */
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index cb2b28883252..2b1a9dcdbcb3 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -83,10 +83,6 @@ ip6table_mangle_hook(void *priv, struct sk_buff *skb,
83{ 83{
84 if (state->hook == NF_INET_LOCAL_OUT) 84 if (state->hook == NF_INET_LOCAL_OUT)
85 return ip6t_mangle_out(skb, state); 85 return ip6t_mangle_out(skb, state);
86 if (state->hook == NF_INET_POST_ROUTING)
87 return ip6t_do_table(skb, state,
88 state->net->ipv6.ip6table_mangle);
89 /* INPUT/FORWARD */
90 return ip6t_do_table(skb, state, state->net->ipv6.ip6table_mangle); 86 return ip6t_do_table(skb, state, state->net->ipv6.ip6table_mangle);
91} 87}
92 88
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 08b77f421268..49817555449e 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1783,7 +1783,7 @@ static struct rt6_info *ip6_nh_lookup_table(struct net *net,
1783 }; 1783 };
1784 struct fib6_table *table; 1784 struct fib6_table *table;
1785 struct rt6_info *rt; 1785 struct rt6_info *rt;
1786 int flags = 0; 1786 int flags = RT6_LOOKUP_F_IFACE;
1787 1787
1788 table = fib6_get_table(net, cfg->fc_table); 1788 table = fib6_get_table(net, cfg->fc_table);
1789 if (!table) 1789 if (!table)
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index cdd714690f95..917a5cd4b8fc 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -526,13 +526,13 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
526 526
527 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { 527 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
528 ipv4_update_pmtu(skb, dev_net(skb->dev), info, 528 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
529 t->parms.link, 0, IPPROTO_IPV6, 0); 529 t->parms.link, 0, iph->protocol, 0);
530 err = 0; 530 err = 0;
531 goto out; 531 goto out;
532 } 532 }
533 if (type == ICMP_REDIRECT) { 533 if (type == ICMP_REDIRECT) {
534 ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0, 534 ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
535 IPPROTO_IPV6, 0); 535 iph->protocol, 0);
536 err = 0; 536 err = 0;
537 goto out; 537 goto out;
538 } 538 }
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index f36c2d076fce..37cf91323319 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -526,26 +526,33 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
526 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); 526 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
527} 527}
528 528
529static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, 529static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
530 const struct in6_addr *daddr, 530 const struct in6_addr *daddr,
531 const struct in6_addr *saddr, int nbytes) 531 const struct in6_addr *saddr,
532 const struct tcphdr *th, int nbytes)
532{ 533{
533 struct tcp6_pseudohdr *bp; 534 struct tcp6_pseudohdr *bp;
534 struct scatterlist sg; 535 struct scatterlist sg;
536 struct tcphdr *_th;
535 537
536 bp = &hp->md5_blk.ip6; 538 bp = hp->scratch;
537 /* 1. TCP pseudo-header (RFC2460) */ 539 /* 1. TCP pseudo-header (RFC2460) */
538 bp->saddr = *saddr; 540 bp->saddr = *saddr;
539 bp->daddr = *daddr; 541 bp->daddr = *daddr;
540 bp->protocol = cpu_to_be32(IPPROTO_TCP); 542 bp->protocol = cpu_to_be32(IPPROTO_TCP);
541 bp->len = cpu_to_be32(nbytes); 543 bp->len = cpu_to_be32(nbytes);
542 544
543 sg_init_one(&sg, bp, sizeof(*bp)); 545 _th = (struct tcphdr *)(bp + 1);
544 ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp)); 546 memcpy(_th, th, sizeof(*th));
547 _th->check = 0;
548
549 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
550 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
551 sizeof(*bp) + sizeof(*th));
545 return crypto_ahash_update(hp->md5_req); 552 return crypto_ahash_update(hp->md5_req);
546} 553}
547 554
548static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, 555static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
549 const struct in6_addr *daddr, struct in6_addr *saddr, 556 const struct in6_addr *daddr, struct in6_addr *saddr,
550 const struct tcphdr *th) 557 const struct tcphdr *th)
551{ 558{
@@ -559,9 +566,7 @@ static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
559 566
560 if (crypto_ahash_init(req)) 567 if (crypto_ahash_init(req))
561 goto clear_hash; 568 goto clear_hash;
562 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2)) 569 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
563 goto clear_hash;
564 if (tcp_md5_hash_header(hp, th))
565 goto clear_hash; 570 goto clear_hash;
566 if (tcp_md5_hash_key(hp, key)) 571 if (tcp_md5_hash_key(hp, key))
567 goto clear_hash; 572 goto clear_hash;
@@ -606,9 +611,7 @@ static int tcp_v6_md5_hash_skb(char *md5_hash,
606 if (crypto_ahash_init(req)) 611 if (crypto_ahash_init(req))
607 goto clear_hash; 612 goto clear_hash;
608 613
609 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len)) 614 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
610 goto clear_hash;
611 if (tcp_md5_hash_header(hp, th))
612 goto clear_hash; 615 goto clear_hash;
613 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2)) 616 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
614 goto clear_hash; 617 goto clear_hash;
@@ -738,7 +741,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
738static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq, 741static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
739 u32 ack, u32 win, u32 tsval, u32 tsecr, 742 u32 ack, u32 win, u32 tsval, u32 tsecr,
740 int oif, struct tcp_md5sig_key *key, int rst, 743 int oif, struct tcp_md5sig_key *key, int rst,
741 u8 tclass, u32 label) 744 u8 tclass, __be32 label)
742{ 745{
743 const struct tcphdr *th = tcp_hdr(skb); 746 const struct tcphdr *th = tcp_hdr(skb);
744 struct tcphdr *t1; 747 struct tcphdr *t1;
@@ -911,7 +914,7 @@ out:
911static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq, 914static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
912 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, 915 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
913 struct tcp_md5sig_key *key, u8 tclass, 916 struct tcp_md5sig_key *key, u8 tclass,
914 u32 label) 917 __be32 label)
915{ 918{
916 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0, 919 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
917 tclass, label); 920 tclass, label);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 4bb5c13777f1..0a71a312d0d8 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -115,11 +115,10 @@ static void udp_v6_rehash(struct sock *sk)
115 udp_lib_rehash(sk, new_hash); 115 udp_lib_rehash(sk, new_hash);
116} 116}
117 117
118static inline int compute_score(struct sock *sk, struct net *net, 118static int compute_score(struct sock *sk, struct net *net,
119 unsigned short hnum, 119 const struct in6_addr *saddr, __be16 sport,
120 const struct in6_addr *saddr, __be16 sport, 120 const struct in6_addr *daddr, unsigned short hnum,
121 const struct in6_addr *daddr, __be16 dport, 121 int dif)
122 int dif)
123{ 122{
124 int score; 123 int score;
125 struct inet_sock *inet; 124 struct inet_sock *inet;
@@ -162,54 +161,11 @@ static inline int compute_score(struct sock *sk, struct net *net,
162 return score; 161 return score;
163} 162}
164 163
165static inline int compute_score2(struct sock *sk, struct net *net, 164/* called with rcu_read_lock() */
166 const struct in6_addr *saddr, __be16 sport,
167 const struct in6_addr *daddr,
168 unsigned short hnum, int dif)
169{
170 int score;
171 struct inet_sock *inet;
172
173 if (!net_eq(sock_net(sk), net) ||
174 udp_sk(sk)->udp_port_hash != hnum ||
175 sk->sk_family != PF_INET6)
176 return -1;
177
178 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
179 return -1;
180
181 score = 0;
182 inet = inet_sk(sk);
183
184 if (inet->inet_dport) {
185 if (inet->inet_dport != sport)
186 return -1;
187 score++;
188 }
189
190 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
191 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
192 return -1;
193 score++;
194 }
195
196 if (sk->sk_bound_dev_if) {
197 if (sk->sk_bound_dev_if != dif)
198 return -1;
199 score++;
200 }
201
202 if (sk->sk_incoming_cpu == raw_smp_processor_id())
203 score++;
204
205 return score;
206}
207
208/* called with read_rcu_lock() */
209static struct sock *udp6_lib_lookup2(struct net *net, 165static struct sock *udp6_lib_lookup2(struct net *net,
210 const struct in6_addr *saddr, __be16 sport, 166 const struct in6_addr *saddr, __be16 sport,
211 const struct in6_addr *daddr, unsigned int hnum, int dif, 167 const struct in6_addr *daddr, unsigned int hnum, int dif,
212 struct udp_hslot *hslot2, unsigned int slot2, 168 struct udp_hslot *hslot2,
213 struct sk_buff *skb) 169 struct sk_buff *skb)
214{ 170{
215 struct sock *sk, *result; 171 struct sock *sk, *result;
@@ -219,7 +175,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
219 result = NULL; 175 result = NULL;
220 badness = -1; 176 badness = -1;
221 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 177 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
222 score = compute_score2(sk, net, saddr, sport, 178 score = compute_score(sk, net, saddr, sport,
223 daddr, hnum, dif); 179 daddr, hnum, dif);
224 if (score > badness) { 180 if (score > badness) {
225 reuseport = sk->sk_reuseport; 181 reuseport = sk->sk_reuseport;
@@ -268,17 +224,22 @@ struct sock *__udp6_lib_lookup(struct net *net,
268 224
269 result = udp6_lib_lookup2(net, saddr, sport, 225 result = udp6_lib_lookup2(net, saddr, sport,
270 daddr, hnum, dif, 226 daddr, hnum, dif,
271 hslot2, slot2, skb); 227 hslot2, skb);
272 if (!result) { 228 if (!result) {
229 unsigned int old_slot2 = slot2;
273 hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum); 230 hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum);
274 slot2 = hash2 & udptable->mask; 231 slot2 = hash2 & udptable->mask;
232 /* avoid searching the same slot again. */
233 if (unlikely(slot2 == old_slot2))
234 return result;
235
275 hslot2 = &udptable->hash2[slot2]; 236 hslot2 = &udptable->hash2[slot2];
276 if (hslot->count < hslot2->count) 237 if (hslot->count < hslot2->count)
277 goto begin; 238 goto begin;
278 239
279 result = udp6_lib_lookup2(net, saddr, sport, 240 result = udp6_lib_lookup2(net, saddr, sport,
280 &in6addr_any, hnum, dif, 241 daddr, hnum, dif,
281 hslot2, slot2, skb); 242 hslot2, skb);
282 } 243 }
283 return result; 244 return result;
284 } 245 }
@@ -286,7 +247,7 @@ begin:
286 result = NULL; 247 result = NULL;
287 badness = -1; 248 badness = -1;
288 sk_for_each_rcu(sk, &hslot->head) { 249 sk_for_each_rcu(sk, &hslot->head) {
289 score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif); 250 score = compute_score(sk, net, saddr, sport, daddr, hnum, dif);
290 if (score > badness) { 251 if (score > badness) {
291 reuseport = sk->sk_reuseport; 252 reuseport = sk->sk_reuseport;
292 if (reuseport) { 253 if (reuseport) {
diff --git a/net/kcm/kcmproc.c b/net/kcm/kcmproc.c
index 738008726cc6..fda7f4715c58 100644
--- a/net/kcm/kcmproc.c
+++ b/net/kcm/kcmproc.c
@@ -241,6 +241,7 @@ static const struct file_operations kcm_seq_fops = {
241 .open = kcm_seq_open, 241 .open = kcm_seq_open,
242 .read = seq_read, 242 .read = seq_read,
243 .llseek = seq_lseek, 243 .llseek = seq_lseek,
244 .release = seq_release_net,
244}; 245};
245 246
246static struct kcm_seq_muxinfo kcm_seq_muxinfo = { 247static struct kcm_seq_muxinfo kcm_seq_muxinfo = {
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 0b68ba730a06..cb39e05b166c 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1765,18 +1765,12 @@ static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
1765 if (!csock) 1765 if (!csock)
1766 return -ENOENT; 1766 return -ENOENT;
1767 1767
1768 prog = bpf_prog_get(info->bpf_fd); 1768 prog = bpf_prog_get_type(info->bpf_fd, BPF_PROG_TYPE_SOCKET_FILTER);
1769 if (IS_ERR(prog)) { 1769 if (IS_ERR(prog)) {
1770 err = PTR_ERR(prog); 1770 err = PTR_ERR(prog);
1771 goto out; 1771 goto out;
1772 } 1772 }
1773 1773
1774 if (prog->type != BPF_PROG_TYPE_SOCKET_FILTER) {
1775 bpf_prog_put(prog);
1776 err = -EINVAL;
1777 goto out;
1778 }
1779
1780 err = kcm_attach(sock, csock, prog); 1774 err = kcm_attach(sock, csock, prog);
1781 if (err) { 1775 if (err) {
1782 bpf_prog_put(prog); 1776 bpf_prog_put(prog);
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 3a8f881b22f1..a9aff6079c42 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -306,6 +306,24 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
306 mutex_lock(&sta->ampdu_mlme.mtx); 306 mutex_lock(&sta->ampdu_mlme.mtx);
307 307
308 if (test_bit(tid, sta->ampdu_mlme.agg_session_valid)) { 308 if (test_bit(tid, sta->ampdu_mlme.agg_session_valid)) {
309 tid_agg_rx = rcu_dereference_protected(
310 sta->ampdu_mlme.tid_rx[tid],
311 lockdep_is_held(&sta->ampdu_mlme.mtx));
312
313 if (tid_agg_rx->dialog_token == dialog_token) {
314 ht_dbg_ratelimited(sta->sdata,
315 "updated AddBA Req from %pM on tid %u\n",
316 sta->sta.addr, tid);
317 /* We have no API to update the timeout value in the
318 * driver so reject the timeout update.
319 */
320 status = WLAN_STATUS_REQUEST_DECLINED;
321 ieee80211_send_addba_resp(sta->sdata, sta->sta.addr,
322 tid, dialog_token, status,
323 1, buf_size, timeout);
324 goto end;
325 }
326
309 ht_dbg_ratelimited(sta->sdata, 327 ht_dbg_ratelimited(sta->sdata,
310 "unexpected AddBA Req from %pM on tid %u\n", 328 "unexpected AddBA Req from %pM on tid %u\n",
311 sta->sta.addr, tid); 329 sta->sta.addr, tid);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 0c12e4001f19..47e99ab8d97a 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -997,6 +997,7 @@ static void sta_apply_mesh_params(struct ieee80211_local *local,
997 if (sta->mesh->plink_state != NL80211_PLINK_ESTAB) 997 if (sta->mesh->plink_state != NL80211_PLINK_ESTAB)
998 changed = mesh_plink_inc_estab_count(sdata); 998 changed = mesh_plink_inc_estab_count(sdata);
999 sta->mesh->plink_state = params->plink_state; 999 sta->mesh->plink_state = params->plink_state;
1000 sta->mesh->aid = params->peer_aid;
1000 1001
1001 ieee80211_mps_sta_status_update(sta); 1002 ieee80211_mps_sta_status_update(sta);
1002 changed |= ieee80211_mps_set_sta_local_pm(sta, 1003 changed |= ieee80211_mps_set_sta_local_pm(sta,
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 54edfb6fc1d1..f56d342c31b8 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1250,6 +1250,7 @@ struct ieee80211_local {
1250 int scan_channel_idx; 1250 int scan_channel_idx;
1251 int scan_ies_len; 1251 int scan_ies_len;
1252 int hw_scan_ies_bufsize; 1252 int hw_scan_ies_bufsize;
1253 struct cfg80211_scan_info scan_info;
1253 1254
1254 struct work_struct sched_scan_stopped_work; 1255 struct work_struct sched_scan_stopped_work;
1255 struct ieee80211_sub_if_data __rcu *sched_scan_sdata; 1256 struct ieee80211_sub_if_data __rcu *sched_scan_sdata;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 21b1fdf5d01d..c66411df9863 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -148,22 +148,7 @@ u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
148void mesh_sta_cleanup(struct sta_info *sta) 148void mesh_sta_cleanup(struct sta_info *sta)
149{ 149{
150 struct ieee80211_sub_if_data *sdata = sta->sdata; 150 struct ieee80211_sub_if_data *sdata = sta->sdata;
151 u32 changed; 151 u32 changed = mesh_plink_deactivate(sta);
152
153 /*
154 * maybe userspace handles peer allocation and peering, but in either
155 * case the beacon is still generated by the kernel and we might need
156 * an update.
157 */
158 changed = mesh_accept_plinks_update(sdata);
159 if (!sdata->u.mesh.user_mpm) {
160 changed |= mesh_plink_deactivate(sta);
161 del_timer_sync(&sta->mesh->plink_timer);
162 }
163
164 /* make sure no readers can access nexthop sta from here on */
165 mesh_path_flush_by_nexthop(sta);
166 synchronize_net();
167 152
168 if (changed) 153 if (changed)
169 ieee80211_mbss_info_change_notify(sdata, changed); 154 ieee80211_mbss_info_change_notify(sdata, changed);
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 79f2a0a13db8..7fcdcf622655 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -370,13 +370,21 @@ u32 mesh_plink_deactivate(struct sta_info *sta)
370 370
371 spin_lock_bh(&sta->mesh->plink_lock); 371 spin_lock_bh(&sta->mesh->plink_lock);
372 changed = __mesh_plink_deactivate(sta); 372 changed = __mesh_plink_deactivate(sta);
373 sta->mesh->reason = WLAN_REASON_MESH_PEER_CANCELED; 373
374 mesh_plink_frame_tx(sdata, sta, WLAN_SP_MESH_PEERING_CLOSE, 374 if (!sdata->u.mesh.user_mpm) {
375 sta->sta.addr, sta->mesh->llid, sta->mesh->plid, 375 sta->mesh->reason = WLAN_REASON_MESH_PEER_CANCELED;
376 sta->mesh->reason); 376 mesh_plink_frame_tx(sdata, sta, WLAN_SP_MESH_PEERING_CLOSE,
377 sta->sta.addr, sta->mesh->llid,
378 sta->mesh->plid, sta->mesh->reason);
379 }
377 spin_unlock_bh(&sta->mesh->plink_lock); 380 spin_unlock_bh(&sta->mesh->plink_lock);
381 if (!sdata->u.mesh.user_mpm)
382 del_timer_sync(&sta->mesh->plink_timer);
378 mesh_path_flush_by_nexthop(sta); 383 mesh_path_flush_by_nexthop(sta);
379 384
385 /* make sure no readers can access nexthop sta from here on */
386 synchronize_net();
387
380 return changed; 388 return changed;
381} 389}
382 390
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 9a1eb70cb120..2e8a9024625a 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1624,8 +1624,13 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
1624 if (mmie_keyidx < NUM_DEFAULT_KEYS || 1624 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
1625 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 1625 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
1626 return RX_DROP_MONITOR; /* unexpected BIP keyidx */ 1626 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
1627 if (rx->sta) 1627 if (rx->sta) {
1628 if (ieee80211_is_group_privacy_action(skb) &&
1629 test_sta_flag(rx->sta, WLAN_STA_MFP))
1630 return RX_DROP_MONITOR;
1631
1628 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]); 1632 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
1633 }
1629 if (!rx->key) 1634 if (!rx->key)
1630 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); 1635 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
1631 } else if (!ieee80211_has_protected(fc)) { 1636 } else if (!ieee80211_has_protected(fc)) {
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index f9648ef9e31f..070b40f15850 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -7,6 +7,7 @@
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2013-2015 Intel Mobile Communications GmbH 9 * Copyright 2013-2015 Intel Mobile Communications GmbH
10 * Copyright 2016 Intel Deutschland GmbH
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 13 * it under the terms of the GNU General Public License version 2 as
@@ -70,6 +71,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
70 .boottime_ns = rx_status->boottime_ns, 71 .boottime_ns = rx_status->boottime_ns,
71 }; 72 };
72 bool signal_valid; 73 bool signal_valid;
74 struct ieee80211_sub_if_data *scan_sdata;
73 75
74 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) 76 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
75 bss_meta.signal = rx_status->signal * 100; 77 bss_meta.signal = rx_status->signal * 100;
@@ -83,6 +85,20 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
83 bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_10; 85 bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_10;
84 86
85 bss_meta.chan = channel; 87 bss_meta.chan = channel;
88
89 rcu_read_lock();
90 scan_sdata = rcu_dereference(local->scan_sdata);
91 if (scan_sdata && scan_sdata->vif.type == NL80211_IFTYPE_STATION &&
92 scan_sdata->vif.bss_conf.assoc &&
93 ieee80211_have_rx_timestamp(rx_status)) {
94 bss_meta.parent_tsf =
95 ieee80211_calculate_rx_timestamp(local, rx_status,
96 len + FCS_LEN, 24);
97 ether_addr_copy(bss_meta.parent_bssid,
98 scan_sdata->vif.bss_conf.bssid);
99 }
100 rcu_read_unlock();
101
86 cbss = cfg80211_inform_bss_frame_data(local->hw.wiphy, &bss_meta, 102 cbss = cfg80211_inform_bss_frame_data(local->hw.wiphy, &bss_meta,
87 mgmt, len, GFP_ATOMIC); 103 mgmt, len, GFP_ATOMIC);
88 if (!cbss) 104 if (!cbss)
@@ -345,6 +361,12 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
345 361
346 if (rc == 0) 362 if (rc == 0)
347 return; 363 return;
364
365 /* HW scan failed and is going to be reported as aborted,
366 * so clear old scan info.
367 */
368 memset(&local->scan_info, 0, sizeof(local->scan_info));
369 aborted = true;
348 } 370 }
349 371
350 kfree(local->hw_scan_req); 372 kfree(local->hw_scan_req);
@@ -353,8 +375,10 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
353 scan_req = rcu_dereference_protected(local->scan_req, 375 scan_req = rcu_dereference_protected(local->scan_req,
354 lockdep_is_held(&local->mtx)); 376 lockdep_is_held(&local->mtx));
355 377
356 if (scan_req != local->int_scan_req) 378 if (scan_req != local->int_scan_req) {
357 cfg80211_scan_done(scan_req, aborted); 379 local->scan_info.aborted = aborted;
380 cfg80211_scan_done(scan_req, &local->scan_info);
381 }
358 RCU_INIT_POINTER(local->scan_req, NULL); 382 RCU_INIT_POINTER(local->scan_req, NULL);
359 383
360 scan_sdata = rcu_dereference_protected(local->scan_sdata, 384 scan_sdata = rcu_dereference_protected(local->scan_sdata,
@@ -391,15 +415,19 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
391 ieee80211_start_next_roc(local); 415 ieee80211_start_next_roc(local);
392} 416}
393 417
394void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) 418void ieee80211_scan_completed(struct ieee80211_hw *hw,
419 struct cfg80211_scan_info *info)
395{ 420{
396 struct ieee80211_local *local = hw_to_local(hw); 421 struct ieee80211_local *local = hw_to_local(hw);
397 422
398 trace_api_scan_completed(local, aborted); 423 trace_api_scan_completed(local, info);
399 424
400 set_bit(SCAN_COMPLETED, &local->scanning); 425 set_bit(SCAN_COMPLETED, &local->scanning);
401 if (aborted) 426 if (info->aborted)
402 set_bit(SCAN_ABORTED, &local->scanning); 427 set_bit(SCAN_ABORTED, &local->scanning);
428
429 memcpy(&local->scan_info, info, sizeof(*info));
430
403 ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0); 431 ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
404} 432}
405EXPORT_SYMBOL(ieee80211_scan_completed); 433EXPORT_SYMBOL(ieee80211_scan_completed);
@@ -566,6 +594,9 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
566 local->hw_scan_req->req.ie = ies; 594 local->hw_scan_req->req.ie = ies;
567 local->hw_scan_req->req.flags = req->flags; 595 local->hw_scan_req->req.flags = req->flags;
568 eth_broadcast_addr(local->hw_scan_req->req.bssid); 596 eth_broadcast_addr(local->hw_scan_req->req.bssid);
597 local->hw_scan_req->req.duration = req->duration;
598 local->hw_scan_req->req.duration_mandatory =
599 req->duration_mandatory;
569 600
570 local->hw_scan_band = 0; 601 local->hw_scan_band = 0;
571 602
@@ -1073,6 +1104,7 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
1073 */ 1104 */
1074 cancel_delayed_work(&local->scan_work); 1105 cancel_delayed_work(&local->scan_work);
1075 /* and clean up */ 1106 /* and clean up */
1107 memset(&local->scan_info, 0, sizeof(local->scan_info));
1076 __ieee80211_scan_completed(&local->hw, true); 1108 __ieee80211_scan_completed(&local->hw, true);
1077out: 1109out:
1078 mutex_unlock(&local->mtx); 1110 mutex_unlock(&local->mtx);
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 2ddc661f0988..97f4c9d6b54c 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -129,42 +129,31 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
129 } 129 }
130 130
131 if (wide_bw_chansw_ie) { 131 if (wide_bw_chansw_ie) {
132 new_vht_chandef.chan = new_chan; 132 struct ieee80211_vht_operation vht_oper = {
133 new_vht_chandef.center_freq1 = 133 .chan_width =
134 ieee80211_channel_to_frequency( 134 wide_bw_chansw_ie->new_channel_width,
135 .center_freq_seg1_idx =
135 wide_bw_chansw_ie->new_center_freq_seg0, 136 wide_bw_chansw_ie->new_center_freq_seg0,
136 new_band); 137 .center_freq_seg2_idx =
137 138 wide_bw_chansw_ie->new_center_freq_seg1,
138 switch (wide_bw_chansw_ie->new_channel_width) { 139 /* .basic_mcs_set doesn't matter */
139 default: 140 };
140 /* hmmm, ignore VHT and use HT if present */ 141
141 case IEEE80211_VHT_CHANWIDTH_USE_HT: 142 /* default, for the case of IEEE80211_VHT_CHANWIDTH_USE_HT,
143 * to the previously parsed chandef
144 */
145 new_vht_chandef = csa_ie->chandef;
146
147 /* ignore if parsing fails */
148 if (!ieee80211_chandef_vht_oper(&vht_oper, &new_vht_chandef))
142 new_vht_chandef.chan = NULL; 149 new_vht_chandef.chan = NULL;
143 break; 150
144 case IEEE80211_VHT_CHANWIDTH_80MHZ:
145 new_vht_chandef.width = NL80211_CHAN_WIDTH_80;
146 break;
147 case IEEE80211_VHT_CHANWIDTH_160MHZ:
148 new_vht_chandef.width = NL80211_CHAN_WIDTH_160;
149 break;
150 case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
151 /* field is otherwise reserved */
152 new_vht_chandef.center_freq2 =
153 ieee80211_channel_to_frequency(
154 wide_bw_chansw_ie->new_center_freq_seg1,
155 new_band);
156 new_vht_chandef.width = NL80211_CHAN_WIDTH_80P80;
157 break;
158 }
159 if (sta_flags & IEEE80211_STA_DISABLE_80P80MHZ && 151 if (sta_flags & IEEE80211_STA_DISABLE_80P80MHZ &&
160 new_vht_chandef.width == NL80211_CHAN_WIDTH_80P80) 152 new_vht_chandef.width == NL80211_CHAN_WIDTH_80P80)
161 ieee80211_chandef_downgrade(&new_vht_chandef); 153 ieee80211_chandef_downgrade(&new_vht_chandef);
162 if (sta_flags & IEEE80211_STA_DISABLE_160MHZ && 154 if (sta_flags & IEEE80211_STA_DISABLE_160MHZ &&
163 new_vht_chandef.width == NL80211_CHAN_WIDTH_160) 155 new_vht_chandef.width == NL80211_CHAN_WIDTH_160)
164 ieee80211_chandef_downgrade(&new_vht_chandef); 156 ieee80211_chandef_downgrade(&new_vht_chandef);
165 if (sta_flags & IEEE80211_STA_DISABLE_40MHZ &&
166 new_vht_chandef.width > NL80211_CHAN_WIDTH_20)
167 ieee80211_chandef_downgrade(&new_vht_chandef);
168 } 157 }
169 158
170 /* if VHT data is there validate & use it */ 159 /* if VHT data is there validate & use it */
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 1c7d45a6d93e..b5d28f14b9cf 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -1747,6 +1747,7 @@ ieee80211_process_tdls_channel_switch_resp(struct ieee80211_sub_if_data *sdata,
1747 goto out; 1747 goto out;
1748 } 1748 }
1749 1749
1750 ret = 0;
1750call_drv: 1751call_drv:
1751 drv_tdls_recv_channel_switch(sdata->local, sdata, &params); 1752 drv_tdls_recv_channel_switch(sdata->local, sdata, &params);
1752 1753
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 44ec605a5682..91461c415525 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -593,6 +593,9 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
593 else if (tx->sta && 593 else if (tx->sta &&
594 (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx]))) 594 (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx])))
595 tx->key = key; 595 tx->key = key;
596 else if (ieee80211_is_group_privacy_action(tx->skb) &&
597 (key = rcu_dereference(tx->sdata->default_multicast_key)))
598 tx->key = key;
596 else if (ieee80211_is_mgmt(hdr->frame_control) && 599 else if (ieee80211_is_mgmt(hdr->frame_control) &&
597 is_multicast_ether_addr(hdr->addr1) && 600 is_multicast_ether_addr(hdr->addr1) &&
598 ieee80211_is_robust_mgmt_frame(tx->skb) && 601 ieee80211_is_robust_mgmt_frame(tx->skb) &&
@@ -625,7 +628,8 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
625 case WLAN_CIPHER_SUITE_GCMP_256: 628 case WLAN_CIPHER_SUITE_GCMP_256:
626 if (!ieee80211_is_data_present(hdr->frame_control) && 629 if (!ieee80211_is_data_present(hdr->frame_control) &&
627 !ieee80211_use_mfp(hdr->frame_control, tx->sta, 630 !ieee80211_use_mfp(hdr->frame_control, tx->sta,
628 tx->skb)) 631 tx->skb) &&
632 !ieee80211_is_group_privacy_action(tx->skb))
629 tx->key = NULL; 633 tx->key = NULL;
630 else 634 else
631 skip_hw = (tx->key->conf.flags & 635 skip_hw = (tx->key->conf.flags &
@@ -1445,7 +1449,9 @@ int ieee80211_txq_setup_flows(struct ieee80211_local *local)
1445 local->cvars = kcalloc(fq->flows_cnt, sizeof(local->cvars[0]), 1449 local->cvars = kcalloc(fq->flows_cnt, sizeof(local->cvars[0]),
1446 GFP_KERNEL); 1450 GFP_KERNEL);
1447 if (!local->cvars) { 1451 if (!local->cvars) {
1452 spin_lock_bh(&fq->lock);
1448 fq_reset(fq, fq_skb_free_func); 1453 fq_reset(fq, fq_skb_free_func);
1454 spin_unlock_bh(&fq->lock);
1449 return -ENOMEM; 1455 return -ENOMEM;
1450 } 1456 }
1451 1457
@@ -1465,7 +1471,9 @@ void ieee80211_txq_teardown_flows(struct ieee80211_local *local)
1465 kfree(local->cvars); 1471 kfree(local->cvars);
1466 local->cvars = NULL; 1472 local->cvars = NULL;
1467 1473
1474 spin_lock_bh(&fq->lock);
1468 fq_reset(fq, fq_skb_free_func); 1475 fq_reset(fq, fq_skb_free_func);
1476 spin_unlock_bh(&fq->lock);
1469} 1477}
1470 1478
1471struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, 1479struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 95e757c377f9..9266ceebd112 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -609,9 +609,8 @@ config NETFILTER_XT_MARK
609 The target allows you to create rules in the "mangle" table which alter 609 The target allows you to create rules in the "mangle" table which alter
610 the netfilter mark (nfmark) field associated with the packet. 610 the netfilter mark (nfmark) field associated with the packet.
611 611
612 Prior to routing, the nfmark can influence the routing method (see 612 Prior to routing, the nfmark can influence the routing method and can
613 "Use netfilter MARK value as routing key") and can also be used by 613 also be used by other subsystems to change their behavior.
614 other subsystems to change their behavior.
615 614
616config NETFILTER_XT_CONNMARK 615config NETFILTER_XT_CONNMARK
617 tristate 'ctmark target and match support' 616 tristate 'ctmark target and match support'
@@ -753,9 +752,8 @@ config NETFILTER_XT_TARGET_HMARK
753 752
754 The target allows you to create rules in the "raw" and "mangle" tables 753 The target allows you to create rules in the "raw" and "mangle" tables
755 which set the skbuff mark by means of hash calculation within a given 754 which set the skbuff mark by means of hash calculation within a given
756 range. The nfmark can influence the routing method (see "Use netfilter 755 range. The nfmark can influence the routing method and can also be used
757 MARK value as routing key") and can also be used by other subsystems to 756 by other subsystems to change their behaviour.
758 change their behaviour.
759 757
760 To compile it as a module, choose M here. If unsure, say N. 758 To compile it as a module, choose M here. If unsure, say N.
761 759
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index db2312eeb2a4..153e33ffeeaa 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -327,16 +327,10 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
327 327
328 tmpl->status = IPS_TEMPLATE; 328 tmpl->status = IPS_TEMPLATE;
329 write_pnet(&tmpl->ct_net, net); 329 write_pnet(&tmpl->ct_net, net);
330 330 nf_ct_zone_add(tmpl, zone);
331 if (nf_ct_zone_add(tmpl, flags, zone) < 0)
332 goto out_free;
333
334 atomic_set(&tmpl->ct_general.use, 0); 331 atomic_set(&tmpl->ct_general.use, 0);
335 332
336 return tmpl; 333 return tmpl;
337out_free:
338 kfree(tmpl);
339 return NULL;
340} 334}
341EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc); 335EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
342 336
@@ -929,16 +923,13 @@ __nf_conntrack_alloc(struct net *net,
929 offsetof(struct nf_conn, proto) - 923 offsetof(struct nf_conn, proto) -
930 offsetof(struct nf_conn, __nfct_init_offset[0])); 924 offsetof(struct nf_conn, __nfct_init_offset[0]));
931 925
932 if (zone && nf_ct_zone_add(ct, GFP_ATOMIC, zone) < 0) 926 nf_ct_zone_add(ct, zone);
933 goto out_free;
934 927
935 /* Because we use RCU lookups, we set ct_general.use to zero before 928 /* Because we use RCU lookups, we set ct_general.use to zero before
936 * this is inserted in any list. 929 * this is inserted in any list.
937 */ 930 */
938 atomic_set(&ct->ct_general.use, 0); 931 atomic_set(&ct->ct_general.use, 0);
939 return ct; 932 return ct;
940out_free:
941 kmem_cache_free(nf_conntrack_cachep, ct);
942out: 933out:
943 atomic_dec(&net->ct.count); 934 atomic_dec(&net->ct.count);
944 return ERR_PTR(-ENOMEM); 935 return ERR_PTR(-ENOMEM);
@@ -1342,14 +1333,6 @@ bool __nf_ct_kill_acct(struct nf_conn *ct,
1342} 1333}
1343EXPORT_SYMBOL_GPL(__nf_ct_kill_acct); 1334EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
1344 1335
1345#ifdef CONFIG_NF_CONNTRACK_ZONES
1346static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
1347 .len = sizeof(struct nf_conntrack_zone),
1348 .align = __alignof__(struct nf_conntrack_zone),
1349 .id = NF_CT_EXT_ZONE,
1350};
1351#endif
1352
1353#if IS_ENABLED(CONFIG_NF_CT_NETLINK) 1336#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1354 1337
1355#include <linux/netfilter/nfnetlink.h> 1338#include <linux/netfilter/nfnetlink.h>
@@ -1532,9 +1515,6 @@ void nf_conntrack_cleanup_end(void)
1532 1515
1533 nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size); 1516 nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
1534 1517
1535#ifdef CONFIG_NF_CONNTRACK_ZONES
1536 nf_ct_extend_unregister(&nf_ct_zone_extend);
1537#endif
1538 nf_conntrack_proto_fini(); 1518 nf_conntrack_proto_fini();
1539 nf_conntrack_seqadj_fini(); 1519 nf_conntrack_seqadj_fini();
1540 nf_conntrack_labels_fini(); 1520 nf_conntrack_labels_fini();
@@ -1544,6 +1524,8 @@ void nf_conntrack_cleanup_end(void)
1544 nf_conntrack_tstamp_fini(); 1524 nf_conntrack_tstamp_fini();
1545 nf_conntrack_acct_fini(); 1525 nf_conntrack_acct_fini();
1546 nf_conntrack_expect_fini(); 1526 nf_conntrack_expect_fini();
1527
1528 kmem_cache_destroy(nf_conntrack_cachep);
1547} 1529}
1548 1530
1549/* 1531/*
@@ -1615,24 +1597,14 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
1615} 1597}
1616EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable); 1598EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1617 1599
1618int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) 1600int nf_conntrack_hash_resize(unsigned int hashsize)
1619{ 1601{
1620 int i, bucket, rc; 1602 int i, bucket;
1621 unsigned int hashsize, old_size; 1603 unsigned int old_size;
1622 struct hlist_nulls_head *hash, *old_hash; 1604 struct hlist_nulls_head *hash, *old_hash;
1623 struct nf_conntrack_tuple_hash *h; 1605 struct nf_conntrack_tuple_hash *h;
1624 struct nf_conn *ct; 1606 struct nf_conn *ct;
1625 1607
1626 if (current->nsproxy->net_ns != &init_net)
1627 return -EOPNOTSUPP;
1628
1629 /* On boot, we can set this without any fancy locking. */
1630 if (!nf_conntrack_htable_size)
1631 return param_set_uint(val, kp);
1632
1633 rc = kstrtouint(val, 0, &hashsize);
1634 if (rc)
1635 return rc;
1636 if (!hashsize) 1608 if (!hashsize)
1637 return -EINVAL; 1609 return -EINVAL;
1638 1610
@@ -1640,6 +1612,12 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1640 if (!hash) 1612 if (!hash)
1641 return -ENOMEM; 1613 return -ENOMEM;
1642 1614
1615 old_size = nf_conntrack_htable_size;
1616 if (old_size == hashsize) {
1617 nf_ct_free_hashtable(hash, hashsize);
1618 return 0;
1619 }
1620
1643 local_bh_disable(); 1621 local_bh_disable();
1644 nf_conntrack_all_lock(); 1622 nf_conntrack_all_lock();
1645 write_seqcount_begin(&nf_conntrack_generation); 1623 write_seqcount_begin(&nf_conntrack_generation);
@@ -1675,6 +1653,25 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1675 nf_ct_free_hashtable(old_hash, old_size); 1653 nf_ct_free_hashtable(old_hash, old_size);
1676 return 0; 1654 return 0;
1677} 1655}
1656
1657int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1658{
1659 unsigned int hashsize;
1660 int rc;
1661
1662 if (current->nsproxy->net_ns != &init_net)
1663 return -EOPNOTSUPP;
1664
1665 /* On boot, we can set this without any fancy locking. */
1666 if (!nf_conntrack_htable_size)
1667 return param_set_uint(val, kp);
1668
1669 rc = kstrtouint(val, 0, &hashsize);
1670 if (rc)
1671 return rc;
1672
1673 return nf_conntrack_hash_resize(hashsize);
1674}
1678EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); 1675EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1679 1676
1680module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, 1677module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
@@ -1731,7 +1728,7 @@ int nf_conntrack_init_start(void)
1731 1728
1732 nf_conntrack_cachep = kmem_cache_create("nf_conntrack", 1729 nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
1733 sizeof(struct nf_conn), 0, 1730 sizeof(struct nf_conn), 0,
1734 SLAB_DESTROY_BY_RCU, NULL); 1731 SLAB_DESTROY_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
1735 if (!nf_conntrack_cachep) 1732 if (!nf_conntrack_cachep)
1736 goto err_cachep; 1733 goto err_cachep;
1737 1734
@@ -1771,11 +1768,6 @@ int nf_conntrack_init_start(void)
1771 if (ret < 0) 1768 if (ret < 0)
1772 goto err_seqadj; 1769 goto err_seqadj;
1773 1770
1774#ifdef CONFIG_NF_CONNTRACK_ZONES
1775 ret = nf_ct_extend_register(&nf_ct_zone_extend);
1776 if (ret < 0)
1777 goto err_extend;
1778#endif
1779 ret = nf_conntrack_proto_init(); 1771 ret = nf_conntrack_proto_init();
1780 if (ret < 0) 1772 if (ret < 0)
1781 goto err_proto; 1773 goto err_proto;
@@ -1791,10 +1783,6 @@ int nf_conntrack_init_start(void)
1791 return 0; 1783 return 0;
1792 1784
1793err_proto: 1785err_proto:
1794#ifdef CONFIG_NF_CONNTRACK_ZONES
1795 nf_ct_extend_unregister(&nf_ct_zone_extend);
1796err_extend:
1797#endif
1798 nf_conntrack_seqadj_fini(); 1786 nf_conntrack_seqadj_fini();
1799err_seqadj: 1787err_seqadj:
1800 nf_conntrack_labels_fini(); 1788 nf_conntrack_labels_fini();
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 196cb39649e1..3a1a88b9bafa 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -389,11 +389,38 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
389 struct net *net) 389 struct net *net)
390{ 390{
391 struct nf_conntrack_tuple_hash *h; 391 struct nf_conntrack_tuple_hash *h;
392 const struct hlist_nulls_node *nn;
393 int cpu;
394
395 /* Get rid of expecteds, set helpers to NULL. */
396 for_each_possible_cpu(cpu) {
397 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
398
399 spin_lock_bh(&pcpu->lock);
400 hlist_nulls_for_each_entry(h, nn, &pcpu->unconfirmed, hnnode)
401 unhelp(h, me);
402 spin_unlock_bh(&pcpu->lock);
403 }
404}
405
406void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
407{
408 struct nf_conntrack_tuple_hash *h;
392 struct nf_conntrack_expect *exp; 409 struct nf_conntrack_expect *exp;
393 const struct hlist_node *next; 410 const struct hlist_node *next;
394 const struct hlist_nulls_node *nn; 411 const struct hlist_nulls_node *nn;
412 struct net *net;
395 unsigned int i; 413 unsigned int i;
396 int cpu; 414
415 mutex_lock(&nf_ct_helper_mutex);
416 hlist_del_rcu(&me->hnode);
417 nf_ct_helper_count--;
418 mutex_unlock(&nf_ct_helper_mutex);
419
420 /* Make sure every nothing is still using the helper unless its a
421 * connection in the hash.
422 */
423 synchronize_rcu();
397 424
398 /* Get rid of expectations */ 425 /* Get rid of expectations */
399 spin_lock_bh(&nf_conntrack_expect_lock); 426 spin_lock_bh(&nf_conntrack_expect_lock);
@@ -413,15 +440,11 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
413 } 440 }
414 spin_unlock_bh(&nf_conntrack_expect_lock); 441 spin_unlock_bh(&nf_conntrack_expect_lock);
415 442
416 /* Get rid of expecteds, set helpers to NULL. */ 443 rtnl_lock();
417 for_each_possible_cpu(cpu) { 444 for_each_net(net)
418 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); 445 __nf_conntrack_helper_unregister(me, net);
446 rtnl_unlock();
419 447
420 spin_lock_bh(&pcpu->lock);
421 hlist_nulls_for_each_entry(h, nn, &pcpu->unconfirmed, hnnode)
422 unhelp(h, me);
423 spin_unlock_bh(&pcpu->lock);
424 }
425 local_bh_disable(); 448 local_bh_disable();
426 for (i = 0; i < nf_conntrack_htable_size; i++) { 449 for (i = 0; i < nf_conntrack_htable_size; i++) {
427 nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]); 450 nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
@@ -433,26 +456,6 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
433 } 456 }
434 local_bh_enable(); 457 local_bh_enable();
435} 458}
436
437void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
438{
439 struct net *net;
440
441 mutex_lock(&nf_ct_helper_mutex);
442 hlist_del_rcu(&me->hnode);
443 nf_ct_helper_count--;
444 mutex_unlock(&nf_ct_helper_mutex);
445
446 /* Make sure every nothing is still using the helper unless its a
447 * connection in the hash.
448 */
449 synchronize_rcu();
450
451 rtnl_lock();
452 for_each_net(net)
453 __nf_conntrack_helper_unregister(me, net);
454 rtnl_unlock();
455}
456EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister); 459EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister);
457 460
458static struct nf_ct_ext_type helper_extend __read_mostly = { 461static struct nf_ct_ext_type helper_extend __read_mostly = {
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index c026c472ea80..2aaa188ee961 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -434,8 +434,29 @@ static void nf_conntrack_standalone_fini_proc(struct net *net)
434 434
435#ifdef CONFIG_SYSCTL 435#ifdef CONFIG_SYSCTL
436/* Log invalid packets of a given protocol */ 436/* Log invalid packets of a given protocol */
437static int log_invalid_proto_min = 0; 437static int log_invalid_proto_min __read_mostly;
438static int log_invalid_proto_max = 255; 438static int log_invalid_proto_max __read_mostly = 255;
439
440/* size the user *wants to set */
441static unsigned int nf_conntrack_htable_size_user __read_mostly;
442
443static int
444nf_conntrack_hash_sysctl(struct ctl_table *table, int write,
445 void __user *buffer, size_t *lenp, loff_t *ppos)
446{
447 int ret;
448
449 ret = proc_dointvec(table, write, buffer, lenp, ppos);
450 if (ret < 0 || !write)
451 return ret;
452
453 /* update ret, we might not be able to satisfy request */
454 ret = nf_conntrack_hash_resize(nf_conntrack_htable_size_user);
455
456 /* update it to the actual value used by conntrack */
457 nf_conntrack_htable_size_user = nf_conntrack_htable_size;
458 return ret;
459}
439 460
440static struct ctl_table_header *nf_ct_netfilter_header; 461static struct ctl_table_header *nf_ct_netfilter_header;
441 462
@@ -456,10 +477,10 @@ static struct ctl_table nf_ct_sysctl_table[] = {
456 }, 477 },
457 { 478 {
458 .procname = "nf_conntrack_buckets", 479 .procname = "nf_conntrack_buckets",
459 .data = &nf_conntrack_htable_size, 480 .data = &nf_conntrack_htable_size_user,
460 .maxlen = sizeof(unsigned int), 481 .maxlen = sizeof(unsigned int),
461 .mode = 0444, 482 .mode = 0644,
462 .proc_handler = proc_dointvec, 483 .proc_handler = nf_conntrack_hash_sysctl,
463 }, 484 },
464 { 485 {
465 .procname = "nf_conntrack_checksum", 486 .procname = "nf_conntrack_checksum",
@@ -515,6 +536,9 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
515 if (net->user_ns != &init_user_ns) 536 if (net->user_ns != &init_user_ns)
516 table[0].procname = NULL; 537 table[0].procname = NULL;
517 538
539 if (!net_eq(&init_net, net))
540 table[2].mode = 0444;
541
518 net->ct.sysctl_header = register_net_sysctl(net, "net/netfilter", table); 542 net->ct.sysctl_header = register_net_sysctl(net, "net/netfilter", table);
519 if (!net->ct.sysctl_header) 543 if (!net->ct.sysctl_header)
520 goto out_unregister_netfilter; 544 goto out_unregister_netfilter;
@@ -604,6 +628,8 @@ static int __init nf_conntrack_standalone_init(void)
604 ret = -ENOMEM; 628 ret = -ENOMEM;
605 goto out_sysctl; 629 goto out_sysctl;
606 } 630 }
631
632 nf_conntrack_htable_size_user = nf_conntrack_htable_size;
607#endif 633#endif
608 634
609 ret = register_pernet_subsys(&nf_conntrack_net_ops); 635 ret = register_pernet_subsys(&nf_conntrack_net_ops);
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index a5d41dfa9f05..aa5847a16713 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -159,6 +159,20 @@ int nf_logger_find_get(int pf, enum nf_log_type type)
159 struct nf_logger *logger; 159 struct nf_logger *logger;
160 int ret = -ENOENT; 160 int ret = -ENOENT;
161 161
162 if (pf == NFPROTO_INET) {
163 ret = nf_logger_find_get(NFPROTO_IPV4, type);
164 if (ret < 0)
165 return ret;
166
167 ret = nf_logger_find_get(NFPROTO_IPV6, type);
168 if (ret < 0) {
169 nf_logger_put(NFPROTO_IPV4, type);
170 return ret;
171 }
172
173 return 0;
174 }
175
162 if (rcu_access_pointer(loggers[pf][type]) == NULL) 176 if (rcu_access_pointer(loggers[pf][type]) == NULL)
163 request_module("nf-logger-%u-%u", pf, type); 177 request_module("nf-logger-%u-%u", pf, type);
164 178
@@ -167,7 +181,7 @@ int nf_logger_find_get(int pf, enum nf_log_type type)
167 if (logger == NULL) 181 if (logger == NULL)
168 goto out; 182 goto out;
169 183
170 if (logger && try_module_get(logger->me)) 184 if (try_module_get(logger->me))
171 ret = 0; 185 ret = 0;
172out: 186out:
173 rcu_read_unlock(); 187 rcu_read_unlock();
@@ -179,6 +193,12 @@ void nf_logger_put(int pf, enum nf_log_type type)
179{ 193{
180 struct nf_logger *logger; 194 struct nf_logger *logger;
181 195
196 if (pf == NFPROTO_INET) {
197 nf_logger_put(NFPROTO_IPV4, type);
198 nf_logger_put(NFPROTO_IPV6, type);
199 return;
200 }
201
182 BUG_ON(loggers[pf][type] == NULL); 202 BUG_ON(loggers[pf][type] == NULL);
183 203
184 rcu_read_lock(); 204 rcu_read_lock();
@@ -398,16 +418,17 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
398{ 418{
399 const struct nf_logger *logger; 419 const struct nf_logger *logger;
400 char buf[NFLOGGER_NAME_LEN]; 420 char buf[NFLOGGER_NAME_LEN];
401 size_t size = *lenp;
402 int r = 0; 421 int r = 0;
403 int tindex = (unsigned long)table->extra1; 422 int tindex = (unsigned long)table->extra1;
404 struct net *net = current->nsproxy->net_ns; 423 struct net *net = current->nsproxy->net_ns;
405 424
406 if (write) { 425 if (write) {
407 if (size > sizeof(buf)) 426 struct ctl_table tmp = *table;
408 size = sizeof(buf); 427
409 if (copy_from_user(buf, buffer, size)) 428 tmp.data = buf;
410 return -EFAULT; 429 r = proc_dostring(&tmp, write, buffer, lenp, ppos);
430 if (r)
431 return r;
411 432
412 if (!strcmp(buf, "NONE")) { 433 if (!strcmp(buf, "NONE")) {
413 nf_log_unbind_pf(net, tindex); 434 nf_log_unbind_pf(net, tindex);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 7b7aa871a174..18b7f8578ee0 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -131,29 +131,8 @@ static void nft_trans_destroy(struct nft_trans *trans)
131 kfree(trans); 131 kfree(trans);
132} 132}
133 133
134static int nft_register_basechain(struct nft_base_chain *basechain, 134static int nf_tables_register_hooks(struct net *net,
135 unsigned int hook_nops) 135 const struct nft_table *table,
136{
137 struct net *net = read_pnet(&basechain->pnet);
138
139 if (basechain->flags & NFT_BASECHAIN_DISABLED)
140 return 0;
141
142 return nf_register_net_hooks(net, basechain->ops, hook_nops);
143}
144
145static void nft_unregister_basechain(struct nft_base_chain *basechain,
146 unsigned int hook_nops)
147{
148 struct net *net = read_pnet(&basechain->pnet);
149
150 if (basechain->flags & NFT_BASECHAIN_DISABLED)
151 return;
152
153 nf_unregister_net_hooks(net, basechain->ops, hook_nops);
154}
155
156static int nf_tables_register_hooks(const struct nft_table *table,
157 struct nft_chain *chain, 136 struct nft_chain *chain,
158 unsigned int hook_nops) 137 unsigned int hook_nops)
159{ 138{
@@ -161,10 +140,12 @@ static int nf_tables_register_hooks(const struct nft_table *table,
161 !(chain->flags & NFT_BASE_CHAIN)) 140 !(chain->flags & NFT_BASE_CHAIN))
162 return 0; 141 return 0;
163 142
164 return nft_register_basechain(nft_base_chain(chain), hook_nops); 143 return nf_register_net_hooks(net, nft_base_chain(chain)->ops,
144 hook_nops);
165} 145}
166 146
167static void nf_tables_unregister_hooks(const struct nft_table *table, 147static void nf_tables_unregister_hooks(struct net *net,
148 const struct nft_table *table,
168 struct nft_chain *chain, 149 struct nft_chain *chain,
169 unsigned int hook_nops) 150 unsigned int hook_nops)
170{ 151{
@@ -172,12 +153,9 @@ static void nf_tables_unregister_hooks(const struct nft_table *table,
172 !(chain->flags & NFT_BASE_CHAIN)) 153 !(chain->flags & NFT_BASE_CHAIN))
173 return; 154 return;
174 155
175 nft_unregister_basechain(nft_base_chain(chain), hook_nops); 156 nf_unregister_net_hooks(net, nft_base_chain(chain)->ops, hook_nops);
176} 157}
177 158
178/* Internal table flags */
179#define NFT_TABLE_INACTIVE (1 << 15)
180
181static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type) 159static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type)
182{ 160{
183 struct nft_trans *trans; 161 struct nft_trans *trans;
@@ -187,7 +165,7 @@ static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type)
187 return -ENOMEM; 165 return -ENOMEM;
188 166
189 if (msg_type == NFT_MSG_NEWTABLE) 167 if (msg_type == NFT_MSG_NEWTABLE)
190 ctx->table->flags |= NFT_TABLE_INACTIVE; 168 nft_activate_next(ctx->net, ctx->table);
191 169
192 list_add_tail(&trans->list, &ctx->net->nft.commit_list); 170 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
193 return 0; 171 return 0;
@@ -201,7 +179,7 @@ static int nft_deltable(struct nft_ctx *ctx)
201 if (err < 0) 179 if (err < 0)
202 return err; 180 return err;
203 181
204 list_del_rcu(&ctx->table->list); 182 nft_deactivate_next(ctx->net, ctx->table);
205 return err; 183 return err;
206} 184}
207 185
@@ -214,7 +192,7 @@ static int nft_trans_chain_add(struct nft_ctx *ctx, int msg_type)
214 return -ENOMEM; 192 return -ENOMEM;
215 193
216 if (msg_type == NFT_MSG_NEWCHAIN) 194 if (msg_type == NFT_MSG_NEWCHAIN)
217 ctx->chain->flags |= NFT_CHAIN_INACTIVE; 195 nft_activate_next(ctx->net, ctx->chain);
218 196
219 list_add_tail(&trans->list, &ctx->net->nft.commit_list); 197 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
220 return 0; 198 return 0;
@@ -229,47 +207,17 @@ static int nft_delchain(struct nft_ctx *ctx)
229 return err; 207 return err;
230 208
231 ctx->table->use--; 209 ctx->table->use--;
232 list_del_rcu(&ctx->chain->list); 210 nft_deactivate_next(ctx->net, ctx->chain);
233 211
234 return err; 212 return err;
235} 213}
236 214
237static inline bool
238nft_rule_is_active(struct net *net, const struct nft_rule *rule)
239{
240 return (rule->genmask & nft_genmask_cur(net)) == 0;
241}
242
243static inline int
244nft_rule_is_active_next(struct net *net, const struct nft_rule *rule)
245{
246 return (rule->genmask & nft_genmask_next(net)) == 0;
247}
248
249static inline void
250nft_rule_activate_next(struct net *net, struct nft_rule *rule)
251{
252 /* Now inactive, will be active in the future */
253 rule->genmask = nft_genmask_cur(net);
254}
255
256static inline void
257nft_rule_deactivate_next(struct net *net, struct nft_rule *rule)
258{
259 rule->genmask = nft_genmask_next(net);
260}
261
262static inline void nft_rule_clear(struct net *net, struct nft_rule *rule)
263{
264 rule->genmask &= ~nft_genmask_next(net);
265}
266
267static int 215static int
268nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule) 216nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule)
269{ 217{
270 /* You cannot delete the same rule twice */ 218 /* You cannot delete the same rule twice */
271 if (nft_rule_is_active_next(ctx->net, rule)) { 219 if (nft_is_active_next(ctx->net, rule)) {
272 nft_rule_deactivate_next(ctx->net, rule); 220 nft_deactivate_next(ctx->net, rule);
273 ctx->chain->use--; 221 ctx->chain->use--;
274 return 0; 222 return 0;
275 } 223 }
@@ -322,9 +270,6 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx)
322 return 0; 270 return 0;
323} 271}
324 272
325/* Internal set flag */
326#define NFT_SET_INACTIVE (1 << 15)
327
328static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type, 273static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type,
329 struct nft_set *set) 274 struct nft_set *set)
330{ 275{
@@ -337,7 +282,7 @@ static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type,
337 if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] != NULL) { 282 if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] != NULL) {
338 nft_trans_set_id(trans) = 283 nft_trans_set_id(trans) =
339 ntohl(nla_get_be32(ctx->nla[NFTA_SET_ID])); 284 ntohl(nla_get_be32(ctx->nla[NFTA_SET_ID]));
340 set->flags |= NFT_SET_INACTIVE; 285 nft_activate_next(ctx->net, set);
341 } 286 }
342 nft_trans_set(trans) = set; 287 nft_trans_set(trans) = set;
343 list_add_tail(&trans->list, &ctx->net->nft.commit_list); 288 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
@@ -353,7 +298,7 @@ static int nft_delset(struct nft_ctx *ctx, struct nft_set *set)
353 if (err < 0) 298 if (err < 0)
354 return err; 299 return err;
355 300
356 list_del_rcu(&set->list); 301 nft_deactivate_next(ctx->net, set);
357 ctx->table->use--; 302 ctx->table->use--;
358 303
359 return err; 304 return err;
@@ -364,26 +309,29 @@ static int nft_delset(struct nft_ctx *ctx, struct nft_set *set)
364 */ 309 */
365 310
366static struct nft_table *nft_table_lookup(const struct nft_af_info *afi, 311static struct nft_table *nft_table_lookup(const struct nft_af_info *afi,
367 const struct nlattr *nla) 312 const struct nlattr *nla,
313 u8 genmask)
368{ 314{
369 struct nft_table *table; 315 struct nft_table *table;
370 316
371 list_for_each_entry(table, &afi->tables, list) { 317 list_for_each_entry(table, &afi->tables, list) {
372 if (!nla_strcmp(nla, table->name)) 318 if (!nla_strcmp(nla, table->name) &&
319 nft_active_genmask(table, genmask))
373 return table; 320 return table;
374 } 321 }
375 return NULL; 322 return NULL;
376} 323}
377 324
378static struct nft_table *nf_tables_table_lookup(const struct nft_af_info *afi, 325static struct nft_table *nf_tables_table_lookup(const struct nft_af_info *afi,
379 const struct nlattr *nla) 326 const struct nlattr *nla,
327 u8 genmask)
380{ 328{
381 struct nft_table *table; 329 struct nft_table *table;
382 330
383 if (nla == NULL) 331 if (nla == NULL)
384 return ERR_PTR(-EINVAL); 332 return ERR_PTR(-EINVAL);
385 333
386 table = nft_table_lookup(afi, nla); 334 table = nft_table_lookup(afi, nla, genmask);
387 if (table != NULL) 335 if (table != NULL)
388 return table; 336 return table;
389 337
@@ -524,6 +472,8 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
524 if (idx > s_idx) 472 if (idx > s_idx)
525 memset(&cb->args[1], 0, 473 memset(&cb->args[1], 0,
526 sizeof(cb->args) - sizeof(cb->args[0])); 474 sizeof(cb->args) - sizeof(cb->args[0]));
475 if (!nft_is_active(net, table))
476 continue;
527 if (nf_tables_fill_table_info(skb, net, 477 if (nf_tables_fill_table_info(skb, net,
528 NETLINK_CB(cb->skb).portid, 478 NETLINK_CB(cb->skb).portid,
529 cb->nlh->nlmsg_seq, 479 cb->nlh->nlmsg_seq,
@@ -548,6 +498,7 @@ static int nf_tables_gettable(struct net *net, struct sock *nlsk,
548 const struct nlattr * const nla[]) 498 const struct nlattr * const nla[])
549{ 499{
550 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 500 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
501 u8 genmask = nft_genmask_cur(net);
551 const struct nft_af_info *afi; 502 const struct nft_af_info *afi;
552 const struct nft_table *table; 503 const struct nft_table *table;
553 struct sk_buff *skb2; 504 struct sk_buff *skb2;
@@ -565,11 +516,9 @@ static int nf_tables_gettable(struct net *net, struct sock *nlsk,
565 if (IS_ERR(afi)) 516 if (IS_ERR(afi))
566 return PTR_ERR(afi); 517 return PTR_ERR(afi);
567 518
568 table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]); 519 table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME], genmask);
569 if (IS_ERR(table)) 520 if (IS_ERR(table))
570 return PTR_ERR(table); 521 return PTR_ERR(table);
571 if (table->flags & NFT_TABLE_INACTIVE)
572 return -ENOENT;
573 522
574 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 523 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
575 if (!skb2) 524 if (!skb2)
@@ -588,17 +537,21 @@ err:
588 return err; 537 return err;
589} 538}
590 539
591static int nf_tables_table_enable(const struct nft_af_info *afi, 540static int nf_tables_table_enable(struct net *net,
541 const struct nft_af_info *afi,
592 struct nft_table *table) 542 struct nft_table *table)
593{ 543{
594 struct nft_chain *chain; 544 struct nft_chain *chain;
595 int err, i = 0; 545 int err, i = 0;
596 546
597 list_for_each_entry(chain, &table->chains, list) { 547 list_for_each_entry(chain, &table->chains, list) {
548 if (!nft_is_active_next(net, chain))
549 continue;
598 if (!(chain->flags & NFT_BASE_CHAIN)) 550 if (!(chain->flags & NFT_BASE_CHAIN))
599 continue; 551 continue;
600 552
601 err = nft_register_basechain(nft_base_chain(chain), afi->nops); 553 err = nf_register_net_hooks(net, nft_base_chain(chain)->ops,
554 afi->nops);
602 if (err < 0) 555 if (err < 0)
603 goto err; 556 goto err;
604 557
@@ -607,26 +560,34 @@ static int nf_tables_table_enable(const struct nft_af_info *afi,
607 return 0; 560 return 0;
608err: 561err:
609 list_for_each_entry(chain, &table->chains, list) { 562 list_for_each_entry(chain, &table->chains, list) {
563 if (!nft_is_active_next(net, chain))
564 continue;
610 if (!(chain->flags & NFT_BASE_CHAIN)) 565 if (!(chain->flags & NFT_BASE_CHAIN))
611 continue; 566 continue;
612 567
613 if (i-- <= 0) 568 if (i-- <= 0)
614 break; 569 break;
615 570
616 nft_unregister_basechain(nft_base_chain(chain), afi->nops); 571 nf_unregister_net_hooks(net, nft_base_chain(chain)->ops,
572 afi->nops);
617 } 573 }
618 return err; 574 return err;
619} 575}
620 576
621static void nf_tables_table_disable(const struct nft_af_info *afi, 577static void nf_tables_table_disable(struct net *net,
578 const struct nft_af_info *afi,
622 struct nft_table *table) 579 struct nft_table *table)
623{ 580{
624 struct nft_chain *chain; 581 struct nft_chain *chain;
625 582
626 list_for_each_entry(chain, &table->chains, list) { 583 list_for_each_entry(chain, &table->chains, list) {
627 if (chain->flags & NFT_BASE_CHAIN) 584 if (!nft_is_active_next(net, chain))
628 nft_unregister_basechain(nft_base_chain(chain), 585 continue;
629 afi->nops); 586 if (!(chain->flags & NFT_BASE_CHAIN))
587 continue;
588
589 nf_unregister_net_hooks(net, nft_base_chain(chain)->ops,
590 afi->nops);
630 } 591 }
631} 592}
632 593
@@ -656,7 +617,7 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
656 nft_trans_table_enable(trans) = false; 617 nft_trans_table_enable(trans) = false;
657 } else if (!(flags & NFT_TABLE_F_DORMANT) && 618 } else if (!(flags & NFT_TABLE_F_DORMANT) &&
658 ctx->table->flags & NFT_TABLE_F_DORMANT) { 619 ctx->table->flags & NFT_TABLE_F_DORMANT) {
659 ret = nf_tables_table_enable(ctx->afi, ctx->table); 620 ret = nf_tables_table_enable(ctx->net, ctx->afi, ctx->table);
660 if (ret >= 0) { 621 if (ret >= 0) {
661 ctx->table->flags &= ~NFT_TABLE_F_DORMANT; 622 ctx->table->flags &= ~NFT_TABLE_F_DORMANT;
662 nft_trans_table_enable(trans) = true; 623 nft_trans_table_enable(trans) = true;
@@ -678,6 +639,7 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk,
678 const struct nlattr * const nla[]) 639 const struct nlattr * const nla[])
679{ 640{
680 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 641 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
642 u8 genmask = nft_genmask_next(net);
681 const struct nlattr *name; 643 const struct nlattr *name;
682 struct nft_af_info *afi; 644 struct nft_af_info *afi;
683 struct nft_table *table; 645 struct nft_table *table;
@@ -691,7 +653,7 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk,
691 return PTR_ERR(afi); 653 return PTR_ERR(afi);
692 654
693 name = nla[NFTA_TABLE_NAME]; 655 name = nla[NFTA_TABLE_NAME];
694 table = nf_tables_table_lookup(afi, name); 656 table = nf_tables_table_lookup(afi, name, genmask);
695 if (IS_ERR(table)) { 657 if (IS_ERR(table)) {
696 if (PTR_ERR(table) != -ENOENT) 658 if (PTR_ERR(table) != -ENOENT)
697 return PTR_ERR(table); 659 return PTR_ERR(table);
@@ -699,8 +661,6 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk,
699 } 661 }
700 662
701 if (table != NULL) { 663 if (table != NULL) {
702 if (table->flags & NFT_TABLE_INACTIVE)
703 return -ENOENT;
704 if (nlh->nlmsg_flags & NLM_F_EXCL) 664 if (nlh->nlmsg_flags & NLM_F_EXCL)
705 return -EEXIST; 665 return -EEXIST;
706 if (nlh->nlmsg_flags & NLM_F_REPLACE) 666 if (nlh->nlmsg_flags & NLM_F_REPLACE)
@@ -752,6 +712,9 @@ static int nft_flush_table(struct nft_ctx *ctx)
752 struct nft_set *set, *ns; 712 struct nft_set *set, *ns;
753 713
754 list_for_each_entry(chain, &ctx->table->chains, list) { 714 list_for_each_entry(chain, &ctx->table->chains, list) {
715 if (!nft_is_active_next(ctx->net, chain))
716 continue;
717
755 ctx->chain = chain; 718 ctx->chain = chain;
756 719
757 err = nft_delrule_by_chain(ctx); 720 err = nft_delrule_by_chain(ctx);
@@ -760,6 +723,9 @@ static int nft_flush_table(struct nft_ctx *ctx)
760 } 723 }
761 724
762 list_for_each_entry_safe(set, ns, &ctx->table->sets, list) { 725 list_for_each_entry_safe(set, ns, &ctx->table->sets, list) {
726 if (!nft_is_active_next(ctx->net, set))
727 continue;
728
763 if (set->flags & NFT_SET_ANONYMOUS && 729 if (set->flags & NFT_SET_ANONYMOUS &&
764 !list_empty(&set->bindings)) 730 !list_empty(&set->bindings))
765 continue; 731 continue;
@@ -770,6 +736,9 @@ static int nft_flush_table(struct nft_ctx *ctx)
770 } 736 }
771 737
772 list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) { 738 list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) {
739 if (!nft_is_active_next(ctx->net, chain))
740 continue;
741
773 ctx->chain = chain; 742 ctx->chain = chain;
774 743
775 err = nft_delchain(ctx); 744 err = nft_delchain(ctx);
@@ -795,6 +764,9 @@ static int nft_flush(struct nft_ctx *ctx, int family)
795 764
796 ctx->afi = afi; 765 ctx->afi = afi;
797 list_for_each_entry_safe(table, nt, &afi->tables, list) { 766 list_for_each_entry_safe(table, nt, &afi->tables, list) {
767 if (!nft_is_active_next(ctx->net, table))
768 continue;
769
798 if (nla[NFTA_TABLE_NAME] && 770 if (nla[NFTA_TABLE_NAME] &&
799 nla_strcmp(nla[NFTA_TABLE_NAME], table->name) != 0) 771 nla_strcmp(nla[NFTA_TABLE_NAME], table->name) != 0)
800 continue; 772 continue;
@@ -815,6 +787,7 @@ static int nf_tables_deltable(struct net *net, struct sock *nlsk,
815 const struct nlattr * const nla[]) 787 const struct nlattr * const nla[])
816{ 788{
817 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 789 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
790 u8 genmask = nft_genmask_next(net);
818 struct nft_af_info *afi; 791 struct nft_af_info *afi;
819 struct nft_table *table; 792 struct nft_table *table;
820 int family = nfmsg->nfgen_family; 793 int family = nfmsg->nfgen_family;
@@ -828,7 +801,7 @@ static int nf_tables_deltable(struct net *net, struct sock *nlsk,
828 if (IS_ERR(afi)) 801 if (IS_ERR(afi))
829 return PTR_ERR(afi); 802 return PTR_ERR(afi);
830 803
831 table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]); 804 table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME], genmask);
832 if (IS_ERR(table)) 805 if (IS_ERR(table))
833 return PTR_ERR(table); 806 return PTR_ERR(table);
834 807
@@ -875,12 +848,14 @@ EXPORT_SYMBOL_GPL(nft_unregister_chain_type);
875 */ 848 */
876 849
877static struct nft_chain * 850static struct nft_chain *
878nf_tables_chain_lookup_byhandle(const struct nft_table *table, u64 handle) 851nf_tables_chain_lookup_byhandle(const struct nft_table *table, u64 handle,
852 u8 genmask)
879{ 853{
880 struct nft_chain *chain; 854 struct nft_chain *chain;
881 855
882 list_for_each_entry(chain, &table->chains, list) { 856 list_for_each_entry(chain, &table->chains, list) {
883 if (chain->handle == handle) 857 if (chain->handle == handle &&
858 nft_active_genmask(chain, genmask))
884 return chain; 859 return chain;
885 } 860 }
886 861
@@ -888,7 +863,8 @@ nf_tables_chain_lookup_byhandle(const struct nft_table *table, u64 handle)
888} 863}
889 864
890static struct nft_chain *nf_tables_chain_lookup(const struct nft_table *table, 865static struct nft_chain *nf_tables_chain_lookup(const struct nft_table *table,
891 const struct nlattr *nla) 866 const struct nlattr *nla,
867 u8 genmask)
892{ 868{
893 struct nft_chain *chain; 869 struct nft_chain *chain;
894 870
@@ -896,7 +872,8 @@ static struct nft_chain *nf_tables_chain_lookup(const struct nft_table *table,
896 return ERR_PTR(-EINVAL); 872 return ERR_PTR(-EINVAL);
897 873
898 list_for_each_entry(chain, &table->chains, list) { 874 list_for_each_entry(chain, &table->chains, list) {
899 if (!nla_strcmp(nla, chain->name)) 875 if (!nla_strcmp(nla, chain->name) &&
876 nft_active_genmask(chain, genmask))
900 return chain; 877 return chain;
901 } 878 }
902 879
@@ -1079,6 +1056,8 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
1079 if (idx > s_idx) 1056 if (idx > s_idx)
1080 memset(&cb->args[1], 0, 1057 memset(&cb->args[1], 0,
1081 sizeof(cb->args) - sizeof(cb->args[0])); 1058 sizeof(cb->args) - sizeof(cb->args[0]));
1059 if (!nft_is_active(net, chain))
1060 continue;
1082 if (nf_tables_fill_chain_info(skb, net, 1061 if (nf_tables_fill_chain_info(skb, net,
1083 NETLINK_CB(cb->skb).portid, 1062 NETLINK_CB(cb->skb).portid,
1084 cb->nlh->nlmsg_seq, 1063 cb->nlh->nlmsg_seq,
@@ -1104,6 +1083,7 @@ static int nf_tables_getchain(struct net *net, struct sock *nlsk,
1104 const struct nlattr * const nla[]) 1083 const struct nlattr * const nla[])
1105{ 1084{
1106 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1085 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1086 u8 genmask = nft_genmask_cur(net);
1107 const struct nft_af_info *afi; 1087 const struct nft_af_info *afi;
1108 const struct nft_table *table; 1088 const struct nft_table *table;
1109 const struct nft_chain *chain; 1089 const struct nft_chain *chain;
@@ -1122,17 +1102,13 @@ static int nf_tables_getchain(struct net *net, struct sock *nlsk,
1122 if (IS_ERR(afi)) 1102 if (IS_ERR(afi))
1123 return PTR_ERR(afi); 1103 return PTR_ERR(afi);
1124 1104
1125 table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]); 1105 table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE], genmask);
1126 if (IS_ERR(table)) 1106 if (IS_ERR(table))
1127 return PTR_ERR(table); 1107 return PTR_ERR(table);
1128 if (table->flags & NFT_TABLE_INACTIVE)
1129 return -ENOENT;
1130 1108
1131 chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]); 1109 chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME], genmask);
1132 if (IS_ERR(chain)) 1110 if (IS_ERR(chain))
1133 return PTR_ERR(chain); 1111 return PTR_ERR(chain);
1134 if (chain->flags & NFT_CHAIN_INACTIVE)
1135 return -ENOENT;
1136 1112
1137 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1113 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1138 if (!skb2) 1114 if (!skb2)
@@ -1231,6 +1207,7 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
1231 struct nft_chain *chain; 1207 struct nft_chain *chain;
1232 struct nft_base_chain *basechain = NULL; 1208 struct nft_base_chain *basechain = NULL;
1233 struct nlattr *ha[NFTA_HOOK_MAX + 1]; 1209 struct nlattr *ha[NFTA_HOOK_MAX + 1];
1210 u8 genmask = nft_genmask_next(net);
1234 int family = nfmsg->nfgen_family; 1211 int family = nfmsg->nfgen_family;
1235 struct net_device *dev = NULL; 1212 struct net_device *dev = NULL;
1236 u8 policy = NF_ACCEPT; 1213 u8 policy = NF_ACCEPT;
@@ -1247,7 +1224,7 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
1247 if (IS_ERR(afi)) 1224 if (IS_ERR(afi))
1248 return PTR_ERR(afi); 1225 return PTR_ERR(afi);
1249 1226
1250 table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]); 1227 table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE], genmask);
1251 if (IS_ERR(table)) 1228 if (IS_ERR(table))
1252 return PTR_ERR(table); 1229 return PTR_ERR(table);
1253 1230
@@ -1256,11 +1233,11 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
1256 1233
1257 if (nla[NFTA_CHAIN_HANDLE]) { 1234 if (nla[NFTA_CHAIN_HANDLE]) {
1258 handle = be64_to_cpu(nla_get_be64(nla[NFTA_CHAIN_HANDLE])); 1235 handle = be64_to_cpu(nla_get_be64(nla[NFTA_CHAIN_HANDLE]));
1259 chain = nf_tables_chain_lookup_byhandle(table, handle); 1236 chain = nf_tables_chain_lookup_byhandle(table, handle, genmask);
1260 if (IS_ERR(chain)) 1237 if (IS_ERR(chain))
1261 return PTR_ERR(chain); 1238 return PTR_ERR(chain);
1262 } else { 1239 } else {
1263 chain = nf_tables_chain_lookup(table, name); 1240 chain = nf_tables_chain_lookup(table, name, genmask);
1264 if (IS_ERR(chain)) { 1241 if (IS_ERR(chain)) {
1265 if (PTR_ERR(chain) != -ENOENT) 1242 if (PTR_ERR(chain) != -ENOENT)
1266 return PTR_ERR(chain); 1243 return PTR_ERR(chain);
@@ -1291,16 +1268,20 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
1291 struct nft_stats *stats = NULL; 1268 struct nft_stats *stats = NULL;
1292 struct nft_trans *trans; 1269 struct nft_trans *trans;
1293 1270
1294 if (chain->flags & NFT_CHAIN_INACTIVE)
1295 return -ENOENT;
1296 if (nlh->nlmsg_flags & NLM_F_EXCL) 1271 if (nlh->nlmsg_flags & NLM_F_EXCL)
1297 return -EEXIST; 1272 return -EEXIST;
1298 if (nlh->nlmsg_flags & NLM_F_REPLACE) 1273 if (nlh->nlmsg_flags & NLM_F_REPLACE)
1299 return -EOPNOTSUPP; 1274 return -EOPNOTSUPP;
1300 1275
1301 if (nla[NFTA_CHAIN_HANDLE] && name && 1276 if (nla[NFTA_CHAIN_HANDLE] && name) {
1302 !IS_ERR(nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]))) 1277 struct nft_chain *chain2;
1303 return -EEXIST; 1278
1279 chain2 = nf_tables_chain_lookup(table,
1280 nla[NFTA_CHAIN_NAME],
1281 genmask);
1282 if (IS_ERR(chain2))
1283 return PTR_ERR(chain2);
1284 }
1304 1285
1305 if (nla[NFTA_CHAIN_COUNTERS]) { 1286 if (nla[NFTA_CHAIN_COUNTERS]) {
1306 if (!(chain->flags & NFT_BASE_CHAIN)) 1287 if (!(chain->flags & NFT_BASE_CHAIN))
@@ -1455,7 +1436,7 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
1455 chain->table = table; 1436 chain->table = table;
1456 nla_strlcpy(chain->name, name, NFT_CHAIN_MAXNAMELEN); 1437 nla_strlcpy(chain->name, name, NFT_CHAIN_MAXNAMELEN);
1457 1438
1458 err = nf_tables_register_hooks(table, chain, afi->nops); 1439 err = nf_tables_register_hooks(net, table, chain, afi->nops);
1459 if (err < 0) 1440 if (err < 0)
1460 goto err1; 1441 goto err1;
1461 1442
@@ -1468,7 +1449,7 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
1468 list_add_tail_rcu(&chain->list, &table->chains); 1449 list_add_tail_rcu(&chain->list, &table->chains);
1469 return 0; 1450 return 0;
1470err2: 1451err2:
1471 nf_tables_unregister_hooks(table, chain, afi->nops); 1452 nf_tables_unregister_hooks(net, table, chain, afi->nops);
1472err1: 1453err1:
1473 nf_tables_chain_destroy(chain); 1454 nf_tables_chain_destroy(chain);
1474 return err; 1455 return err;
@@ -1479,6 +1460,7 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk,
1479 const struct nlattr * const nla[]) 1460 const struct nlattr * const nla[])
1480{ 1461{
1481 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1462 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1463 u8 genmask = nft_genmask_next(net);
1482 struct nft_af_info *afi; 1464 struct nft_af_info *afi;
1483 struct nft_table *table; 1465 struct nft_table *table;
1484 struct nft_chain *chain; 1466 struct nft_chain *chain;
@@ -1489,11 +1471,11 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk,
1489 if (IS_ERR(afi)) 1471 if (IS_ERR(afi))
1490 return PTR_ERR(afi); 1472 return PTR_ERR(afi);
1491 1473
1492 table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]); 1474 table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE], genmask);
1493 if (IS_ERR(table)) 1475 if (IS_ERR(table))
1494 return PTR_ERR(table); 1476 return PTR_ERR(table);
1495 1477
1496 chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]); 1478 chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME], genmask);
1497 if (IS_ERR(chain)) 1479 if (IS_ERR(chain))
1498 return PTR_ERR(chain); 1480 return PTR_ERR(chain);
1499 if (chain->use > 0) 1481 if (chain->use > 0)
@@ -1898,7 +1880,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
1898 list_for_each_entry_rcu(table, &afi->tables, list) { 1880 list_for_each_entry_rcu(table, &afi->tables, list) {
1899 list_for_each_entry_rcu(chain, &table->chains, list) { 1881 list_for_each_entry_rcu(chain, &table->chains, list) {
1900 list_for_each_entry_rcu(rule, &chain->rules, list) { 1882 list_for_each_entry_rcu(rule, &chain->rules, list) {
1901 if (!nft_rule_is_active(net, rule)) 1883 if (!nft_is_active(net, rule))
1902 goto cont; 1884 goto cont;
1903 if (idx < s_idx) 1885 if (idx < s_idx)
1904 goto cont; 1886 goto cont;
@@ -1931,6 +1913,7 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk,
1931 const struct nlattr * const nla[]) 1913 const struct nlattr * const nla[])
1932{ 1914{
1933 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1915 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1916 u8 genmask = nft_genmask_cur(net);
1934 const struct nft_af_info *afi; 1917 const struct nft_af_info *afi;
1935 const struct nft_table *table; 1918 const struct nft_table *table;
1936 const struct nft_chain *chain; 1919 const struct nft_chain *chain;
@@ -1950,17 +1933,13 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk,
1950 if (IS_ERR(afi)) 1933 if (IS_ERR(afi))
1951 return PTR_ERR(afi); 1934 return PTR_ERR(afi);
1952 1935
1953 table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]); 1936 table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE], genmask);
1954 if (IS_ERR(table)) 1937 if (IS_ERR(table))
1955 return PTR_ERR(table); 1938 return PTR_ERR(table);
1956 if (table->flags & NFT_TABLE_INACTIVE)
1957 return -ENOENT;
1958 1939
1959 chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); 1940 chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN], genmask);
1960 if (IS_ERR(chain)) 1941 if (IS_ERR(chain))
1961 return PTR_ERR(chain); 1942 return PTR_ERR(chain);
1962 if (chain->flags & NFT_CHAIN_INACTIVE)
1963 return -ENOENT;
1964 1943
1965 rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]); 1944 rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
1966 if (IS_ERR(rule)) 1945 if (IS_ERR(rule))
@@ -2009,6 +1988,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
2009 const struct nlattr * const nla[]) 1988 const struct nlattr * const nla[])
2010{ 1989{
2011 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1990 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1991 u8 genmask = nft_genmask_next(net);
2012 struct nft_af_info *afi; 1992 struct nft_af_info *afi;
2013 struct nft_table *table; 1993 struct nft_table *table;
2014 struct nft_chain *chain; 1994 struct nft_chain *chain;
@@ -2029,11 +2009,11 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
2029 if (IS_ERR(afi)) 2009 if (IS_ERR(afi))
2030 return PTR_ERR(afi); 2010 return PTR_ERR(afi);
2031 2011
2032 table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]); 2012 table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE], genmask);
2033 if (IS_ERR(table)) 2013 if (IS_ERR(table))
2034 return PTR_ERR(table); 2014 return PTR_ERR(table);
2035 2015
2036 chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); 2016 chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN], genmask);
2037 if (IS_ERR(chain)) 2017 if (IS_ERR(chain))
2038 return PTR_ERR(chain); 2018 return PTR_ERR(chain);
2039 2019
@@ -2102,7 +2082,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
2102 if (rule == NULL) 2082 if (rule == NULL)
2103 goto err1; 2083 goto err1;
2104 2084
2105 nft_rule_activate_next(net, rule); 2085 nft_activate_next(net, rule);
2106 2086
2107 rule->handle = handle; 2087 rule->handle = handle;
2108 rule->dlen = size; 2088 rule->dlen = size;
@@ -2124,14 +2104,14 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
2124 } 2104 }
2125 2105
2126 if (nlh->nlmsg_flags & NLM_F_REPLACE) { 2106 if (nlh->nlmsg_flags & NLM_F_REPLACE) {
2127 if (nft_rule_is_active_next(net, old_rule)) { 2107 if (nft_is_active_next(net, old_rule)) {
2128 trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE, 2108 trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
2129 old_rule); 2109 old_rule);
2130 if (trans == NULL) { 2110 if (trans == NULL) {
2131 err = -ENOMEM; 2111 err = -ENOMEM;
2132 goto err2; 2112 goto err2;
2133 } 2113 }
2134 nft_rule_deactivate_next(net, old_rule); 2114 nft_deactivate_next(net, old_rule);
2135 chain->use--; 2115 chain->use--;
2136 list_add_tail_rcu(&rule->list, &old_rule->list); 2116 list_add_tail_rcu(&rule->list, &old_rule->list);
2137 } else { 2117 } else {
@@ -2174,6 +2154,7 @@ static int nf_tables_delrule(struct net *net, struct sock *nlsk,
2174 const struct nlattr * const nla[]) 2154 const struct nlattr * const nla[])
2175{ 2155{
2176 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2156 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2157 u8 genmask = nft_genmask_next(net);
2177 struct nft_af_info *afi; 2158 struct nft_af_info *afi;
2178 struct nft_table *table; 2159 struct nft_table *table;
2179 struct nft_chain *chain = NULL; 2160 struct nft_chain *chain = NULL;
@@ -2185,12 +2166,13 @@ static int nf_tables_delrule(struct net *net, struct sock *nlsk,
2185 if (IS_ERR(afi)) 2166 if (IS_ERR(afi))
2186 return PTR_ERR(afi); 2167 return PTR_ERR(afi);
2187 2168
2188 table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]); 2169 table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE], genmask);
2189 if (IS_ERR(table)) 2170 if (IS_ERR(table))
2190 return PTR_ERR(table); 2171 return PTR_ERR(table);
2191 2172
2192 if (nla[NFTA_RULE_CHAIN]) { 2173 if (nla[NFTA_RULE_CHAIN]) {
2193 chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); 2174 chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN],
2175 genmask);
2194 if (IS_ERR(chain)) 2176 if (IS_ERR(chain))
2195 return PTR_ERR(chain); 2177 return PTR_ERR(chain);
2196 } 2178 }
@@ -2210,6 +2192,9 @@ static int nf_tables_delrule(struct net *net, struct sock *nlsk,
2210 } 2192 }
2211 } else { 2193 } else {
2212 list_for_each_entry(chain, &table->chains, list) { 2194 list_for_each_entry(chain, &table->chains, list) {
2195 if (!nft_is_active_next(net, chain))
2196 continue;
2197
2213 ctx.chain = chain; 2198 ctx.chain = chain;
2214 err = nft_delrule_by_chain(&ctx); 2199 err = nft_delrule_by_chain(&ctx);
2215 if (err < 0) 2200 if (err < 0)
@@ -2339,7 +2324,8 @@ static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = {
2339static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, struct net *net, 2324static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, struct net *net,
2340 const struct sk_buff *skb, 2325 const struct sk_buff *skb,
2341 const struct nlmsghdr *nlh, 2326 const struct nlmsghdr *nlh,
2342 const struct nlattr * const nla[]) 2327 const struct nlattr * const nla[],
2328 u8 genmask)
2343{ 2329{
2344 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2330 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2345 struct nft_af_info *afi = NULL; 2331 struct nft_af_info *afi = NULL;
@@ -2355,7 +2341,8 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, struct net *net,
2355 if (afi == NULL) 2341 if (afi == NULL)
2356 return -EAFNOSUPPORT; 2342 return -EAFNOSUPPORT;
2357 2343
2358 table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]); 2344 table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE],
2345 genmask);
2359 if (IS_ERR(table)) 2346 if (IS_ERR(table))
2360 return PTR_ERR(table); 2347 return PTR_ERR(table);
2361 } 2348 }
@@ -2365,7 +2352,7 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, struct net *net,
2365} 2352}
2366 2353
2367struct nft_set *nf_tables_set_lookup(const struct nft_table *table, 2354struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
2368 const struct nlattr *nla) 2355 const struct nlattr *nla, u8 genmask)
2369{ 2356{
2370 struct nft_set *set; 2357 struct nft_set *set;
2371 2358
@@ -2373,22 +2360,27 @@ struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
2373 return ERR_PTR(-EINVAL); 2360 return ERR_PTR(-EINVAL);
2374 2361
2375 list_for_each_entry(set, &table->sets, list) { 2362 list_for_each_entry(set, &table->sets, list) {
2376 if (!nla_strcmp(nla, set->name)) 2363 if (!nla_strcmp(nla, set->name) &&
2364 nft_active_genmask(set, genmask))
2377 return set; 2365 return set;
2378 } 2366 }
2379 return ERR_PTR(-ENOENT); 2367 return ERR_PTR(-ENOENT);
2380} 2368}
2381 2369
2382struct nft_set *nf_tables_set_lookup_byid(const struct net *net, 2370struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
2383 const struct nlattr *nla) 2371 const struct nlattr *nla,
2372 u8 genmask)
2384{ 2373{
2385 struct nft_trans *trans; 2374 struct nft_trans *trans;
2386 u32 id = ntohl(nla_get_be32(nla)); 2375 u32 id = ntohl(nla_get_be32(nla));
2387 2376
2388 list_for_each_entry(trans, &net->nft.commit_list, list) { 2377 list_for_each_entry(trans, &net->nft.commit_list, list) {
2378 struct nft_set *set = nft_trans_set(trans);
2379
2389 if (trans->msg_type == NFT_MSG_NEWSET && 2380 if (trans->msg_type == NFT_MSG_NEWSET &&
2390 id == nft_trans_set_id(trans)) 2381 id == nft_trans_set_id(trans) &&
2391 return nft_trans_set(trans); 2382 nft_active_genmask(set, genmask))
2383 return set;
2392 } 2384 }
2393 return ERR_PTR(-ENOENT); 2385 return ERR_PTR(-ENOENT);
2394} 2386}
@@ -2413,6 +2405,8 @@ cont:
2413 list_for_each_entry(i, &ctx->table->sets, list) { 2405 list_for_each_entry(i, &ctx->table->sets, list) {
2414 int tmp; 2406 int tmp;
2415 2407
2408 if (!nft_is_active_next(ctx->net, set))
2409 continue;
2416 if (!sscanf(i->name, name, &tmp)) 2410 if (!sscanf(i->name, name, &tmp))
2417 continue; 2411 continue;
2418 if (tmp < min || tmp >= min + BITS_PER_BYTE * PAGE_SIZE) 2412 if (tmp < min || tmp >= min + BITS_PER_BYTE * PAGE_SIZE)
@@ -2432,6 +2426,8 @@ cont:
2432 2426
2433 snprintf(set->name, sizeof(set->name), name, min + n); 2427 snprintf(set->name, sizeof(set->name), name, min + n);
2434 list_for_each_entry(i, &ctx->table->sets, list) { 2428 list_for_each_entry(i, &ctx->table->sets, list) {
2429 if (!nft_is_active_next(ctx->net, i))
2430 continue;
2435 if (!strcmp(set->name, i->name)) 2431 if (!strcmp(set->name, i->name))
2436 return -ENFILE; 2432 return -ENFILE;
2437 } 2433 }
@@ -2580,6 +2576,8 @@ static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
2580 list_for_each_entry_rcu(set, &table->sets, list) { 2576 list_for_each_entry_rcu(set, &table->sets, list) {
2581 if (idx < s_idx) 2577 if (idx < s_idx)
2582 goto cont; 2578 goto cont;
2579 if (!nft_is_active(net, set))
2580 goto cont;
2583 2581
2584 ctx_set = *ctx; 2582 ctx_set = *ctx;
2585 ctx_set.table = table; 2583 ctx_set.table = table;
@@ -2616,6 +2614,7 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk,
2616 struct sk_buff *skb, const struct nlmsghdr *nlh, 2614 struct sk_buff *skb, const struct nlmsghdr *nlh,
2617 const struct nlattr * const nla[]) 2615 const struct nlattr * const nla[])
2618{ 2616{
2617 u8 genmask = nft_genmask_cur(net);
2619 const struct nft_set *set; 2618 const struct nft_set *set;
2620 struct nft_ctx ctx; 2619 struct nft_ctx ctx;
2621 struct sk_buff *skb2; 2620 struct sk_buff *skb2;
@@ -2623,7 +2622,7 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk,
2623 int err; 2622 int err;
2624 2623
2625 /* Verify existence before starting dump */ 2624 /* Verify existence before starting dump */
2626 err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla); 2625 err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla, genmask);
2627 if (err < 0) 2626 if (err < 0)
2628 return err; 2627 return err;
2629 2628
@@ -2650,11 +2649,9 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk,
2650 if (!nla[NFTA_SET_TABLE]) 2649 if (!nla[NFTA_SET_TABLE])
2651 return -EINVAL; 2650 return -EINVAL;
2652 2651
2653 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); 2652 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME], genmask);
2654 if (IS_ERR(set)) 2653 if (IS_ERR(set))
2655 return PTR_ERR(set); 2654 return PTR_ERR(set);
2656 if (set->flags & NFT_SET_INACTIVE)
2657 return -ENOENT;
2658 2655
2659 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2656 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2660 if (skb2 == NULL) 2657 if (skb2 == NULL)
@@ -2693,6 +2690,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
2693 const struct nlattr * const nla[]) 2690 const struct nlattr * const nla[])
2694{ 2691{
2695 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2692 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2693 u8 genmask = nft_genmask_next(net);
2696 const struct nft_set_ops *ops; 2694 const struct nft_set_ops *ops;
2697 struct nft_af_info *afi; 2695 struct nft_af_info *afi;
2698 struct nft_table *table; 2696 struct nft_table *table;
@@ -2790,13 +2788,13 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
2790 if (IS_ERR(afi)) 2788 if (IS_ERR(afi))
2791 return PTR_ERR(afi); 2789 return PTR_ERR(afi);
2792 2790
2793 table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]); 2791 table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE], genmask);
2794 if (IS_ERR(table)) 2792 if (IS_ERR(table))
2795 return PTR_ERR(table); 2793 return PTR_ERR(table);
2796 2794
2797 nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla); 2795 nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
2798 2796
2799 set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME]); 2797 set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME], genmask);
2800 if (IS_ERR(set)) { 2798 if (IS_ERR(set)) {
2801 if (PTR_ERR(set) != -ENOENT) 2799 if (PTR_ERR(set) != -ENOENT)
2802 return PTR_ERR(set); 2800 return PTR_ERR(set);
@@ -2895,6 +2893,7 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk,
2895 const struct nlattr * const nla[]) 2893 const struct nlattr * const nla[])
2896{ 2894{
2897 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2895 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2896 u8 genmask = nft_genmask_next(net);
2898 struct nft_set *set; 2897 struct nft_set *set;
2899 struct nft_ctx ctx; 2898 struct nft_ctx ctx;
2900 int err; 2899 int err;
@@ -2904,11 +2903,11 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk,
2904 if (nla[NFTA_SET_TABLE] == NULL) 2903 if (nla[NFTA_SET_TABLE] == NULL)
2905 return -EINVAL; 2904 return -EINVAL;
2906 2905
2907 err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla); 2906 err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla, genmask);
2908 if (err < 0) 2907 if (err < 0)
2909 return err; 2908 return err;
2910 2909
2911 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); 2910 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME], genmask);
2912 if (IS_ERR(set)) 2911 if (IS_ERR(set))
2913 return PTR_ERR(set); 2912 return PTR_ERR(set);
2914 if (!list_empty(&set->bindings)) 2913 if (!list_empty(&set->bindings))
@@ -2946,24 +2945,20 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
2946 * jumps are already validated for that chain. 2945 * jumps are already validated for that chain.
2947 */ 2946 */
2948 list_for_each_entry(i, &set->bindings, list) { 2947 list_for_each_entry(i, &set->bindings, list) {
2949 if (binding->flags & NFT_SET_MAP && 2948 if (i->flags & NFT_SET_MAP &&
2950 i->chain == binding->chain) 2949 i->chain == binding->chain)
2951 goto bind; 2950 goto bind;
2952 } 2951 }
2953 2952
2953 iter.genmask = nft_genmask_next(ctx->net);
2954 iter.skip = 0; 2954 iter.skip = 0;
2955 iter.count = 0; 2955 iter.count = 0;
2956 iter.err = 0; 2956 iter.err = 0;
2957 iter.fn = nf_tables_bind_check_setelem; 2957 iter.fn = nf_tables_bind_check_setelem;
2958 2958
2959 set->ops->walk(ctx, set, &iter); 2959 set->ops->walk(ctx, set, &iter);
2960 if (iter.err < 0) { 2960 if (iter.err < 0)
2961 /* Destroy anonymous sets if binding fails */
2962 if (set->flags & NFT_SET_ANONYMOUS)
2963 nf_tables_set_destroy(ctx, set);
2964
2965 return iter.err; 2961 return iter.err;
2966 }
2967 } 2962 }
2968bind: 2963bind:
2969 binding->chain = ctx->chain; 2964 binding->chain = ctx->chain;
@@ -2977,7 +2972,7 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
2977 list_del_rcu(&binding->list); 2972 list_del_rcu(&binding->list);
2978 2973
2979 if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS && 2974 if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS &&
2980 !(set->flags & NFT_SET_INACTIVE)) 2975 nft_is_active(ctx->net, set))
2981 nf_tables_set_destroy(ctx, set); 2976 nf_tables_set_destroy(ctx, set);
2982} 2977}
2983 2978
@@ -3033,7 +3028,8 @@ static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX +
3033static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, struct net *net, 3028static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, struct net *net,
3034 const struct sk_buff *skb, 3029 const struct sk_buff *skb,
3035 const struct nlmsghdr *nlh, 3030 const struct nlmsghdr *nlh,
3036 const struct nlattr * const nla[]) 3031 const struct nlattr * const nla[],
3032 u8 genmask)
3037{ 3033{
3038 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 3034 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
3039 struct nft_af_info *afi; 3035 struct nft_af_info *afi;
@@ -3043,7 +3039,8 @@ static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, struct net *net,
3043 if (IS_ERR(afi)) 3039 if (IS_ERR(afi))
3044 return PTR_ERR(afi); 3040 return PTR_ERR(afi);
3045 3041
3046 table = nf_tables_table_lookup(afi, nla[NFTA_SET_ELEM_LIST_TABLE]); 3042 table = nf_tables_table_lookup(afi, nla[NFTA_SET_ELEM_LIST_TABLE],
3043 genmask);
3047 if (IS_ERR(table)) 3044 if (IS_ERR(table))
3048 return PTR_ERR(table); 3045 return PTR_ERR(table);
3049 3046
@@ -3140,6 +3137,7 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
3140static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) 3137static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
3141{ 3138{
3142 struct net *net = sock_net(skb->sk); 3139 struct net *net = sock_net(skb->sk);
3140 u8 genmask = nft_genmask_cur(net);
3143 const struct nft_set *set; 3141 const struct nft_set *set;
3144 struct nft_set_dump_args args; 3142 struct nft_set_dump_args args;
3145 struct nft_ctx ctx; 3143 struct nft_ctx ctx;
@@ -3156,17 +3154,14 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
3156 return err; 3154 return err;
3157 3155
3158 err = nft_ctx_init_from_elemattr(&ctx, net, cb->skb, cb->nlh, 3156 err = nft_ctx_init_from_elemattr(&ctx, net, cb->skb, cb->nlh,
3159 (void *)nla); 3157 (void *)nla, genmask);
3160 if (err < 0) 3158 if (err < 0)
3161 return err; 3159 return err;
3162 if (ctx.table->flags & NFT_TABLE_INACTIVE)
3163 return -ENOENT;
3164 3160
3165 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); 3161 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET],
3162 genmask);
3166 if (IS_ERR(set)) 3163 if (IS_ERR(set))
3167 return PTR_ERR(set); 3164 return PTR_ERR(set);
3168 if (set->flags & NFT_SET_INACTIVE)
3169 return -ENOENT;
3170 3165
3171 event = NFT_MSG_NEWSETELEM; 3166 event = NFT_MSG_NEWSETELEM;
3172 event |= NFNL_SUBSYS_NFTABLES << 8; 3167 event |= NFNL_SUBSYS_NFTABLES << 8;
@@ -3192,12 +3187,13 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
3192 if (nest == NULL) 3187 if (nest == NULL)
3193 goto nla_put_failure; 3188 goto nla_put_failure;
3194 3189
3195 args.cb = cb; 3190 args.cb = cb;
3196 args.skb = skb; 3191 args.skb = skb;
3197 args.iter.skip = cb->args[0]; 3192 args.iter.genmask = nft_genmask_cur(ctx.net);
3198 args.iter.count = 0; 3193 args.iter.skip = cb->args[0];
3199 args.iter.err = 0; 3194 args.iter.count = 0;
3200 args.iter.fn = nf_tables_dump_setelem; 3195 args.iter.err = 0;
3196 args.iter.fn = nf_tables_dump_setelem;
3201 set->ops->walk(&ctx, set, &args.iter); 3197 set->ops->walk(&ctx, set, &args.iter);
3202 3198
3203 nla_nest_end(skb, nest); 3199 nla_nest_end(skb, nest);
@@ -3219,21 +3215,19 @@ static int nf_tables_getsetelem(struct net *net, struct sock *nlsk,
3219 struct sk_buff *skb, const struct nlmsghdr *nlh, 3215 struct sk_buff *skb, const struct nlmsghdr *nlh,
3220 const struct nlattr * const nla[]) 3216 const struct nlattr * const nla[])
3221{ 3217{
3218 u8 genmask = nft_genmask_cur(net);
3222 const struct nft_set *set; 3219 const struct nft_set *set;
3223 struct nft_ctx ctx; 3220 struct nft_ctx ctx;
3224 int err; 3221 int err;
3225 3222
3226 err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla); 3223 err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, genmask);
3227 if (err < 0) 3224 if (err < 0)
3228 return err; 3225 return err;
3229 if (ctx.table->flags & NFT_TABLE_INACTIVE)
3230 return -ENOENT;
3231 3226
3232 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); 3227 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET],
3228 genmask);
3233 if (IS_ERR(set)) 3229 if (IS_ERR(set))
3234 return PTR_ERR(set); 3230 return PTR_ERR(set);
3235 if (set->flags & NFT_SET_INACTIVE)
3236 return -ENOENT;
3237 3231
3238 if (nlh->nlmsg_flags & NLM_F_DUMP) { 3232 if (nlh->nlmsg_flags & NLM_F_DUMP) {
3239 struct netlink_dump_control c = { 3233 struct netlink_dump_control c = {
@@ -3551,6 +3545,7 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
3551 struct sk_buff *skb, const struct nlmsghdr *nlh, 3545 struct sk_buff *skb, const struct nlmsghdr *nlh,
3552 const struct nlattr * const nla[]) 3546 const struct nlattr * const nla[])
3553{ 3547{
3548 u8 genmask = nft_genmask_next(net);
3554 const struct nlattr *attr; 3549 const struct nlattr *attr;
3555 struct nft_set *set; 3550 struct nft_set *set;
3556 struct nft_ctx ctx; 3551 struct nft_ctx ctx;
@@ -3559,15 +3554,17 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
3559 if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) 3554 if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL)
3560 return -EINVAL; 3555 return -EINVAL;
3561 3556
3562 err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla); 3557 err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, genmask);
3563 if (err < 0) 3558 if (err < 0)
3564 return err; 3559 return err;
3565 3560
3566 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); 3561 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET],
3562 genmask);
3567 if (IS_ERR(set)) { 3563 if (IS_ERR(set)) {
3568 if (nla[NFTA_SET_ELEM_LIST_SET_ID]) { 3564 if (nla[NFTA_SET_ELEM_LIST_SET_ID]) {
3569 set = nf_tables_set_lookup_byid(net, 3565 set = nf_tables_set_lookup_byid(net,
3570 nla[NFTA_SET_ELEM_LIST_SET_ID]); 3566 nla[NFTA_SET_ELEM_LIST_SET_ID],
3567 genmask);
3571 } 3568 }
3572 if (IS_ERR(set)) 3569 if (IS_ERR(set))
3573 return PTR_ERR(set); 3570 return PTR_ERR(set);
@@ -3673,6 +3670,7 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk,
3673 struct sk_buff *skb, const struct nlmsghdr *nlh, 3670 struct sk_buff *skb, const struct nlmsghdr *nlh,
3674 const struct nlattr * const nla[]) 3671 const struct nlattr * const nla[])
3675{ 3672{
3673 u8 genmask = nft_genmask_next(net);
3676 const struct nlattr *attr; 3674 const struct nlattr *attr;
3677 struct nft_set *set; 3675 struct nft_set *set;
3678 struct nft_ctx ctx; 3676 struct nft_ctx ctx;
@@ -3681,11 +3679,12 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk,
3681 if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) 3679 if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL)
3682 return -EINVAL; 3680 return -EINVAL;
3683 3681
3684 err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla); 3682 err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, genmask);
3685 if (err < 0) 3683 if (err < 0)
3686 return err; 3684 return err;
3687 3685
3688 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); 3686 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET],
3687 genmask);
3689 if (IS_ERR(set)) 3688 if (IS_ERR(set))
3690 return PTR_ERR(set); 3689 return PTR_ERR(set);
3691 if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT) 3690 if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
@@ -3953,36 +3952,40 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
3953 case NFT_MSG_NEWTABLE: 3952 case NFT_MSG_NEWTABLE:
3954 if (nft_trans_table_update(trans)) { 3953 if (nft_trans_table_update(trans)) {
3955 if (!nft_trans_table_enable(trans)) { 3954 if (!nft_trans_table_enable(trans)) {
3956 nf_tables_table_disable(trans->ctx.afi, 3955 nf_tables_table_disable(net,
3956 trans->ctx.afi,
3957 trans->ctx.table); 3957 trans->ctx.table);
3958 trans->ctx.table->flags |= NFT_TABLE_F_DORMANT; 3958 trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
3959 } 3959 }
3960 } else { 3960 } else {
3961 trans->ctx.table->flags &= ~NFT_TABLE_INACTIVE; 3961 nft_clear(net, trans->ctx.table);
3962 } 3962 }
3963 nf_tables_table_notify(&trans->ctx, NFT_MSG_NEWTABLE); 3963 nf_tables_table_notify(&trans->ctx, NFT_MSG_NEWTABLE);
3964 nft_trans_destroy(trans); 3964 nft_trans_destroy(trans);
3965 break; 3965 break;
3966 case NFT_MSG_DELTABLE: 3966 case NFT_MSG_DELTABLE:
3967 list_del_rcu(&trans->ctx.table->list);
3967 nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE); 3968 nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE);
3968 break; 3969 break;
3969 case NFT_MSG_NEWCHAIN: 3970 case NFT_MSG_NEWCHAIN:
3970 if (nft_trans_chain_update(trans)) 3971 if (nft_trans_chain_update(trans))
3971 nft_chain_commit_update(trans); 3972 nft_chain_commit_update(trans);
3972 else 3973 else
3973 trans->ctx.chain->flags &= ~NFT_CHAIN_INACTIVE; 3974 nft_clear(net, trans->ctx.chain);
3974 3975
3975 nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); 3976 nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
3976 nft_trans_destroy(trans); 3977 nft_trans_destroy(trans);
3977 break; 3978 break;
3978 case NFT_MSG_DELCHAIN: 3979 case NFT_MSG_DELCHAIN:
3980 list_del_rcu(&trans->ctx.chain->list);
3979 nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN); 3981 nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN);
3980 nf_tables_unregister_hooks(trans->ctx.table, 3982 nf_tables_unregister_hooks(trans->ctx.net,
3983 trans->ctx.table,
3981 trans->ctx.chain, 3984 trans->ctx.chain,
3982 trans->ctx.afi->nops); 3985 trans->ctx.afi->nops);
3983 break; 3986 break;
3984 case NFT_MSG_NEWRULE: 3987 case NFT_MSG_NEWRULE:
3985 nft_rule_clear(trans->ctx.net, nft_trans_rule(trans)); 3988 nft_clear(trans->ctx.net, nft_trans_rule(trans));
3986 nf_tables_rule_notify(&trans->ctx, 3989 nf_tables_rule_notify(&trans->ctx,
3987 nft_trans_rule(trans), 3990 nft_trans_rule(trans),
3988 NFT_MSG_NEWRULE); 3991 NFT_MSG_NEWRULE);
@@ -3995,7 +3998,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
3995 NFT_MSG_DELRULE); 3998 NFT_MSG_DELRULE);
3996 break; 3999 break;
3997 case NFT_MSG_NEWSET: 4000 case NFT_MSG_NEWSET:
3998 nft_trans_set(trans)->flags &= ~NFT_SET_INACTIVE; 4001 nft_clear(net, nft_trans_set(trans));
3999 /* This avoids hitting -EBUSY when deleting the table 4002 /* This avoids hitting -EBUSY when deleting the table
4000 * from the transaction. 4003 * from the transaction.
4001 */ 4004 */
@@ -4008,6 +4011,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
4008 nft_trans_destroy(trans); 4011 nft_trans_destroy(trans);
4009 break; 4012 break;
4010 case NFT_MSG_DELSET: 4013 case NFT_MSG_DELSET:
4014 list_del_rcu(&nft_trans_set(trans)->list);
4011 nf_tables_set_notify(&trans->ctx, nft_trans_set(trans), 4015 nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
4012 NFT_MSG_DELSET, GFP_KERNEL); 4016 NFT_MSG_DELSET, GFP_KERNEL);
4013 break; 4017 break;
@@ -4079,7 +4083,8 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
4079 case NFT_MSG_NEWTABLE: 4083 case NFT_MSG_NEWTABLE:
4080 if (nft_trans_table_update(trans)) { 4084 if (nft_trans_table_update(trans)) {
4081 if (nft_trans_table_enable(trans)) { 4085 if (nft_trans_table_enable(trans)) {
4082 nf_tables_table_disable(trans->ctx.afi, 4086 nf_tables_table_disable(net,
4087 trans->ctx.afi,
4083 trans->ctx.table); 4088 trans->ctx.table);
4084 trans->ctx.table->flags |= NFT_TABLE_F_DORMANT; 4089 trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
4085 } 4090 }
@@ -4089,8 +4094,7 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
4089 } 4094 }
4090 break; 4095 break;
4091 case NFT_MSG_DELTABLE: 4096 case NFT_MSG_DELTABLE:
4092 list_add_tail_rcu(&trans->ctx.table->list, 4097 nft_clear(trans->ctx.net, trans->ctx.table);
4093 &trans->ctx.afi->tables);
4094 nft_trans_destroy(trans); 4098 nft_trans_destroy(trans);
4095 break; 4099 break;
4096 case NFT_MSG_NEWCHAIN: 4100 case NFT_MSG_NEWCHAIN:
@@ -4101,15 +4105,15 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
4101 } else { 4105 } else {
4102 trans->ctx.table->use--; 4106 trans->ctx.table->use--;
4103 list_del_rcu(&trans->ctx.chain->list); 4107 list_del_rcu(&trans->ctx.chain->list);
4104 nf_tables_unregister_hooks(trans->ctx.table, 4108 nf_tables_unregister_hooks(trans->ctx.net,
4109 trans->ctx.table,
4105 trans->ctx.chain, 4110 trans->ctx.chain,
4106 trans->ctx.afi->nops); 4111 trans->ctx.afi->nops);
4107 } 4112 }
4108 break; 4113 break;
4109 case NFT_MSG_DELCHAIN: 4114 case NFT_MSG_DELCHAIN:
4110 trans->ctx.table->use++; 4115 trans->ctx.table->use++;
4111 list_add_tail_rcu(&trans->ctx.chain->list, 4116 nft_clear(trans->ctx.net, trans->ctx.chain);
4112 &trans->ctx.table->chains);
4113 nft_trans_destroy(trans); 4117 nft_trans_destroy(trans);
4114 break; 4118 break;
4115 case NFT_MSG_NEWRULE: 4119 case NFT_MSG_NEWRULE:
@@ -4118,7 +4122,7 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
4118 break; 4122 break;
4119 case NFT_MSG_DELRULE: 4123 case NFT_MSG_DELRULE:
4120 trans->ctx.chain->use++; 4124 trans->ctx.chain->use++;
4121 nft_rule_clear(trans->ctx.net, nft_trans_rule(trans)); 4125 nft_clear(trans->ctx.net, nft_trans_rule(trans));
4122 nft_trans_destroy(trans); 4126 nft_trans_destroy(trans);
4123 break; 4127 break;
4124 case NFT_MSG_NEWSET: 4128 case NFT_MSG_NEWSET:
@@ -4127,8 +4131,7 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
4127 break; 4131 break;
4128 case NFT_MSG_DELSET: 4132 case NFT_MSG_DELSET:
4129 trans->ctx.table->use++; 4133 trans->ctx.table->use++;
4130 list_add_tail_rcu(&nft_trans_set(trans)->list, 4134 nft_clear(trans->ctx.net, nft_trans_set(trans));
4131 &trans->ctx.table->sets);
4132 nft_trans_destroy(trans); 4135 nft_trans_destroy(trans);
4133 break; 4136 break;
4134 case NFT_MSG_NEWSETELEM: 4137 case NFT_MSG_NEWSETELEM:
@@ -4275,6 +4278,8 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
4275 } 4278 }
4276 4279
4277 list_for_each_entry(set, &ctx->table->sets, list) { 4280 list_for_each_entry(set, &ctx->table->sets, list) {
4281 if (!nft_is_active_next(ctx->net, set))
4282 continue;
4278 if (!(set->flags & NFT_SET_MAP) || 4283 if (!(set->flags & NFT_SET_MAP) ||
4279 set->dtype != NFT_DATA_VERDICT) 4284 set->dtype != NFT_DATA_VERDICT)
4280 continue; 4285 continue;
@@ -4284,6 +4289,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
4284 binding->chain != chain) 4289 binding->chain != chain)
4285 continue; 4290 continue;
4286 4291
4292 iter.genmask = nft_genmask_next(ctx->net);
4287 iter.skip = 0; 4293 iter.skip = 0;
4288 iter.count = 0; 4294 iter.count = 0;
4289 iter.err = 0; 4295 iter.err = 0;
@@ -4432,6 +4438,7 @@ static const struct nla_policy nft_verdict_policy[NFTA_VERDICT_MAX + 1] = {
4432static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, 4438static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
4433 struct nft_data_desc *desc, const struct nlattr *nla) 4439 struct nft_data_desc *desc, const struct nlattr *nla)
4434{ 4440{
4441 u8 genmask = nft_genmask_next(ctx->net);
4435 struct nlattr *tb[NFTA_VERDICT_MAX + 1]; 4442 struct nlattr *tb[NFTA_VERDICT_MAX + 1];
4436 struct nft_chain *chain; 4443 struct nft_chain *chain;
4437 int err; 4444 int err;
@@ -4464,7 +4471,7 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
4464 if (!tb[NFTA_VERDICT_CHAIN]) 4471 if (!tb[NFTA_VERDICT_CHAIN])
4465 return -EINVAL; 4472 return -EINVAL;
4466 chain = nf_tables_chain_lookup(ctx->table, 4473 chain = nf_tables_chain_lookup(ctx->table,
4467 tb[NFTA_VERDICT_CHAIN]); 4474 tb[NFTA_VERDICT_CHAIN], genmask);
4468 if (IS_ERR(chain)) 4475 if (IS_ERR(chain))
4469 return PTR_ERR(chain); 4476 return PTR_ERR(chain);
4470 if (chain->flags & NFT_BASE_CHAIN) 4477 if (chain->flags & NFT_BASE_CHAIN)
@@ -4642,7 +4649,7 @@ int __nft_release_basechain(struct nft_ctx *ctx)
4642 4649
4643 BUG_ON(!(ctx->chain->flags & NFT_BASE_CHAIN)); 4650 BUG_ON(!(ctx->chain->flags & NFT_BASE_CHAIN));
4644 4651
4645 nf_tables_unregister_hooks(ctx->chain->table, ctx->chain, 4652 nf_tables_unregister_hooks(ctx->net, ctx->chain->table, ctx->chain,
4646 ctx->afi->nops); 4653 ctx->afi->nops);
4647 list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) { 4654 list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) {
4648 list_del(&rule->list); 4655 list_del(&rule->list);
@@ -4671,7 +4678,8 @@ static void __nft_release_afinfo(struct net *net, struct nft_af_info *afi)
4671 4678
4672 list_for_each_entry_safe(table, nt, &afi->tables, list) { 4679 list_for_each_entry_safe(table, nt, &afi->tables, list) {
4673 list_for_each_entry(chain, &table->chains, list) 4680 list_for_each_entry(chain, &table->chains, list)
4674 nf_tables_unregister_hooks(table, chain, afi->nops); 4681 nf_tables_unregister_hooks(net, table, chain,
4682 afi->nops);
4675 /* No packets are walking on these chains anymore. */ 4683 /* No packets are walking on these chains anymore. */
4676 ctx.table = table; 4684 ctx.table = table;
4677 list_for_each_entry(chain, &table->chains, list) { 4685 list_for_each_entry(chain, &table->chains, list) {
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index e9f8dffcc244..fb8b5892b5ff 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -143,7 +143,7 @@ next_rule:
143 list_for_each_entry_continue_rcu(rule, &chain->rules, list) { 143 list_for_each_entry_continue_rcu(rule, &chain->rules, list) {
144 144
145 /* This rule is not active, skip. */ 145 /* This rule is not active, skip. */
146 if (unlikely(rule->genmask & (1 << gencursor))) 146 if (unlikely(rule->genmask & gencursor))
147 continue; 147 continue;
148 148
149 rulenum++; 149 rulenum++;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 11f81c8385fc..cbcfdfb586a6 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -700,10 +700,13 @@ nfulnl_log_packet(struct net *net,
700 break; 700 break;
701 701
702 case NFULNL_COPY_PACKET: 702 case NFULNL_COPY_PACKET:
703 if (inst->copy_range > skb->len) 703 data_len = inst->copy_range;
704 if ((li->u.ulog.flags & NF_LOG_F_COPY_LEN) &&
705 (li->u.ulog.copy_len < data_len))
706 data_len = li->u.ulog.copy_len;
707
708 if (data_len > skb->len)
704 data_len = skb->len; 709 data_len = skb->len;
705 else
706 data_len = inst->copy_range;
707 710
708 size += nla_total_size(data_len); 711 size += nla_total_size(data_len);
709 break; 712 break;
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 78d4914fb39c..0af26699bf04 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -103,6 +103,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
103 const struct nlattr * const tb[]) 103 const struct nlattr * const tb[])
104{ 104{
105 struct nft_dynset *priv = nft_expr_priv(expr); 105 struct nft_dynset *priv = nft_expr_priv(expr);
106 u8 genmask = nft_genmask_next(ctx->net);
106 struct nft_set *set; 107 struct nft_set *set;
107 u64 timeout; 108 u64 timeout;
108 int err; 109 int err;
@@ -112,11 +113,13 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
112 tb[NFTA_DYNSET_SREG_KEY] == NULL) 113 tb[NFTA_DYNSET_SREG_KEY] == NULL)
113 return -EINVAL; 114 return -EINVAL;
114 115
115 set = nf_tables_set_lookup(ctx->table, tb[NFTA_DYNSET_SET_NAME]); 116 set = nf_tables_set_lookup(ctx->table, tb[NFTA_DYNSET_SET_NAME],
117 genmask);
116 if (IS_ERR(set)) { 118 if (IS_ERR(set)) {
117 if (tb[NFTA_DYNSET_SET_ID]) 119 if (tb[NFTA_DYNSET_SET_ID])
118 set = nf_tables_set_lookup_byid(ctx->net, 120 set = nf_tables_set_lookup_byid(ctx->net,
119 tb[NFTA_DYNSET_SET_ID]); 121 tb[NFTA_DYNSET_SET_ID],
122 genmask);
120 if (IS_ERR(set)) 123 if (IS_ERR(set))
121 return PTR_ERR(set); 124 return PTR_ERR(set);
122 } 125 }
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 6fa016564f90..ea924816b7b8 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -153,9 +153,10 @@ static void *nft_hash_deactivate(const struct nft_set *set,
153 const struct nft_set_elem *elem) 153 const struct nft_set_elem *elem)
154{ 154{
155 struct nft_hash *priv = nft_set_priv(set); 155 struct nft_hash *priv = nft_set_priv(set);
156 struct net *net = read_pnet(&set->pnet);
156 struct nft_hash_elem *he; 157 struct nft_hash_elem *he;
157 struct nft_hash_cmp_arg arg = { 158 struct nft_hash_cmp_arg arg = {
158 .genmask = nft_genmask_next(read_pnet(&set->pnet)), 159 .genmask = nft_genmask_next(net),
159 .set = set, 160 .set = set,
160 .key = elem->key.val.data, 161 .key = elem->key.val.data,
161 }; 162 };
@@ -163,7 +164,8 @@ static void *nft_hash_deactivate(const struct nft_set *set,
163 rcu_read_lock(); 164 rcu_read_lock();
164 he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params); 165 he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
165 if (he != NULL) { 166 if (he != NULL) {
166 if (!nft_set_elem_mark_busy(&he->ext)) 167 if (!nft_set_elem_mark_busy(&he->ext) ||
168 !nft_is_active(net, &he->ext))
167 nft_set_elem_change_active(set, &he->ext); 169 nft_set_elem_change_active(set, &he->ext);
168 else 170 else
169 he = NULL; 171 he = NULL;
@@ -189,7 +191,6 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
189 struct nft_hash_elem *he; 191 struct nft_hash_elem *he;
190 struct rhashtable_iter hti; 192 struct rhashtable_iter hti;
191 struct nft_set_elem elem; 193 struct nft_set_elem elem;
192 u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
193 int err; 194 int err;
194 195
195 err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL); 196 err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL);
@@ -218,7 +219,7 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
218 goto cont; 219 goto cont;
219 if (nft_set_elem_expired(&he->ext)) 220 if (nft_set_elem_expired(&he->ext))
220 goto cont; 221 goto cont;
221 if (!nft_set_elem_active(&he->ext, genmask)) 222 if (!nft_set_elem_active(&he->ext, iter->genmask))
222 goto cont; 223 goto cont;
223 224
224 elem.priv = he; 225 elem.priv = he;
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index 319c22b4bca2..713d66837705 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -52,7 +52,6 @@ static int nft_log_init(const struct nft_ctx *ctx,
52 struct nft_log *priv = nft_expr_priv(expr); 52 struct nft_log *priv = nft_expr_priv(expr);
53 struct nf_loginfo *li = &priv->loginfo; 53 struct nf_loginfo *li = &priv->loginfo;
54 const struct nlattr *nla; 54 const struct nlattr *nla;
55 int ret;
56 55
57 nla = tb[NFTA_LOG_PREFIX]; 56 nla = tb[NFTA_LOG_PREFIX];
58 if (nla != NULL) { 57 if (nla != NULL) {
@@ -97,19 +96,6 @@ static int nft_log_init(const struct nft_ctx *ctx,
97 break; 96 break;
98 } 97 }
99 98
100 if (ctx->afi->family == NFPROTO_INET) {
101 ret = nf_logger_find_get(NFPROTO_IPV4, li->type);
102 if (ret < 0)
103 return ret;
104
105 ret = nf_logger_find_get(NFPROTO_IPV6, li->type);
106 if (ret < 0) {
107 nf_logger_put(NFPROTO_IPV4, li->type);
108 return ret;
109 }
110 return 0;
111 }
112
113 return nf_logger_find_get(ctx->afi->family, li->type); 99 return nf_logger_find_get(ctx->afi->family, li->type);
114} 100}
115 101
@@ -122,12 +108,7 @@ static void nft_log_destroy(const struct nft_ctx *ctx,
122 if (priv->prefix != nft_log_null_prefix) 108 if (priv->prefix != nft_log_null_prefix)
123 kfree(priv->prefix); 109 kfree(priv->prefix);
124 110
125 if (ctx->afi->family == NFPROTO_INET) { 111 nf_logger_put(ctx->afi->family, li->type);
126 nf_logger_put(NFPROTO_IPV4, li->type);
127 nf_logger_put(NFPROTO_IPV6, li->type);
128 } else {
129 nf_logger_put(ctx->afi->family, li->type);
130 }
131} 112}
132 113
133static int nft_log_dump(struct sk_buff *skb, const struct nft_expr *expr) 114static int nft_log_dump(struct sk_buff *skb, const struct nft_expr *expr)
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index b3c31ef8015d..b8d18f598569 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -22,6 +22,7 @@ struct nft_lookup {
22 struct nft_set *set; 22 struct nft_set *set;
23 enum nft_registers sreg:8; 23 enum nft_registers sreg:8;
24 enum nft_registers dreg:8; 24 enum nft_registers dreg:8;
25 bool invert;
25 struct nft_set_binding binding; 26 struct nft_set_binding binding;
26}; 27};
27 28
@@ -32,14 +33,20 @@ static void nft_lookup_eval(const struct nft_expr *expr,
32 const struct nft_lookup *priv = nft_expr_priv(expr); 33 const struct nft_lookup *priv = nft_expr_priv(expr);
33 const struct nft_set *set = priv->set; 34 const struct nft_set *set = priv->set;
34 const struct nft_set_ext *ext; 35 const struct nft_set_ext *ext;
36 bool found;
35 37
36 if (set->ops->lookup(set, &regs->data[priv->sreg], &ext)) { 38 found = set->ops->lookup(set, &regs->data[priv->sreg], &ext) ^
37 if (set->flags & NFT_SET_MAP) 39 priv->invert;
38 nft_data_copy(&regs->data[priv->dreg], 40
39 nft_set_ext_data(ext), set->dlen); 41 if (!found) {
42 regs->verdict.code = NFT_BREAK;
40 return; 43 return;
41 } 44 }
42 regs->verdict.code = NFT_BREAK; 45
46 if (found && set->flags & NFT_SET_MAP)
47 nft_data_copy(&regs->data[priv->dreg],
48 nft_set_ext_data(ext), set->dlen);
49
43} 50}
44 51
45static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = { 52static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = {
@@ -47,6 +54,7 @@ static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = {
47 [NFTA_LOOKUP_SET_ID] = { .type = NLA_U32 }, 54 [NFTA_LOOKUP_SET_ID] = { .type = NLA_U32 },
48 [NFTA_LOOKUP_SREG] = { .type = NLA_U32 }, 55 [NFTA_LOOKUP_SREG] = { .type = NLA_U32 },
49 [NFTA_LOOKUP_DREG] = { .type = NLA_U32 }, 56 [NFTA_LOOKUP_DREG] = { .type = NLA_U32 },
57 [NFTA_LOOKUP_FLAGS] = { .type = NLA_U32 },
50}; 58};
51 59
52static int nft_lookup_init(const struct nft_ctx *ctx, 60static int nft_lookup_init(const struct nft_ctx *ctx,
@@ -54,18 +62,21 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
54 const struct nlattr * const tb[]) 62 const struct nlattr * const tb[])
55{ 63{
56 struct nft_lookup *priv = nft_expr_priv(expr); 64 struct nft_lookup *priv = nft_expr_priv(expr);
65 u8 genmask = nft_genmask_next(ctx->net);
57 struct nft_set *set; 66 struct nft_set *set;
67 u32 flags;
58 int err; 68 int err;
59 69
60 if (tb[NFTA_LOOKUP_SET] == NULL || 70 if (tb[NFTA_LOOKUP_SET] == NULL ||
61 tb[NFTA_LOOKUP_SREG] == NULL) 71 tb[NFTA_LOOKUP_SREG] == NULL)
62 return -EINVAL; 72 return -EINVAL;
63 73
64 set = nf_tables_set_lookup(ctx->table, tb[NFTA_LOOKUP_SET]); 74 set = nf_tables_set_lookup(ctx->table, tb[NFTA_LOOKUP_SET], genmask);
65 if (IS_ERR(set)) { 75 if (IS_ERR(set)) {
66 if (tb[NFTA_LOOKUP_SET_ID]) { 76 if (tb[NFTA_LOOKUP_SET_ID]) {
67 set = nf_tables_set_lookup_byid(ctx->net, 77 set = nf_tables_set_lookup_byid(ctx->net,
68 tb[NFTA_LOOKUP_SET_ID]); 78 tb[NFTA_LOOKUP_SET_ID],
79 genmask);
69 } 80 }
70 if (IS_ERR(set)) 81 if (IS_ERR(set))
71 return PTR_ERR(set); 82 return PTR_ERR(set);
@@ -79,7 +90,22 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
79 if (err < 0) 90 if (err < 0)
80 return err; 91 return err;
81 92
93 if (tb[NFTA_LOOKUP_FLAGS]) {
94 flags = ntohl(nla_get_be32(tb[NFTA_LOOKUP_FLAGS]));
95
96 if (flags & ~NFT_LOOKUP_F_INV)
97 return -EINVAL;
98
99 if (flags & NFT_LOOKUP_F_INV) {
100 if (set->flags & NFT_SET_MAP)
101 return -EINVAL;
102 priv->invert = true;
103 }
104 }
105
82 if (tb[NFTA_LOOKUP_DREG] != NULL) { 106 if (tb[NFTA_LOOKUP_DREG] != NULL) {
107 if (priv->invert)
108 return -EINVAL;
83 if (!(set->flags & NFT_SET_MAP)) 109 if (!(set->flags & NFT_SET_MAP))
84 return -EINVAL; 110 return -EINVAL;
85 111
@@ -112,6 +138,7 @@ static void nft_lookup_destroy(const struct nft_ctx *ctx,
112static int nft_lookup_dump(struct sk_buff *skb, const struct nft_expr *expr) 138static int nft_lookup_dump(struct sk_buff *skb, const struct nft_expr *expr)
113{ 139{
114 const struct nft_lookup *priv = nft_expr_priv(expr); 140 const struct nft_lookup *priv = nft_expr_priv(expr);
141 u32 flags = priv->invert ? NFT_LOOKUP_F_INV : 0;
115 142
116 if (nla_put_string(skb, NFTA_LOOKUP_SET, priv->set->name)) 143 if (nla_put_string(skb, NFTA_LOOKUP_SET, priv->set->name))
117 goto nla_put_failure; 144 goto nla_put_failure;
@@ -120,6 +147,8 @@ static int nft_lookup_dump(struct sk_buff *skb, const struct nft_expr *expr)
120 if (priv->set->flags & NFT_SET_MAP) 147 if (priv->set->flags & NFT_SET_MAP)
121 if (nft_dump_register(skb, NFTA_LOOKUP_DREG, priv->dreg)) 148 if (nft_dump_register(skb, NFTA_LOOKUP_DREG, priv->dreg))
122 goto nla_put_failure; 149 goto nla_put_failure;
150 if (nla_put_be32(skb, NFTA_LOOKUP_FLAGS, htonl(flags)))
151 goto nla_put_failure;
123 return 0; 152 return 0;
124 153
125nla_put_failure: 154nla_put_failure:
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 16c50b0dd426..03e5e33b5c39 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -199,13 +199,6 @@ err:
199} 199}
200EXPORT_SYMBOL_GPL(nft_meta_get_eval); 200EXPORT_SYMBOL_GPL(nft_meta_get_eval);
201 201
202/* don't change or set _LOOPBACK, _USER, etc. */
203static bool pkt_type_ok(u32 p)
204{
205 return p == PACKET_HOST || p == PACKET_BROADCAST ||
206 p == PACKET_MULTICAST || p == PACKET_OTHERHOST;
207}
208
209void nft_meta_set_eval(const struct nft_expr *expr, 202void nft_meta_set_eval(const struct nft_expr *expr,
210 struct nft_regs *regs, 203 struct nft_regs *regs,
211 const struct nft_pktinfo *pkt) 204 const struct nft_pktinfo *pkt)
@@ -223,7 +216,7 @@ void nft_meta_set_eval(const struct nft_expr *expr,
223 break; 216 break;
224 case NFT_META_PKTTYPE: 217 case NFT_META_PKTTYPE:
225 if (skb->pkt_type != value && 218 if (skb->pkt_type != value &&
226 pkt_type_ok(value) && pkt_type_ok(skb->pkt_type)) 219 skb_pkt_type_ok(value) && skb_pkt_type_ok(skb->pkt_type))
227 skb->pkt_type = value; 220 skb->pkt_type = value;
228 break; 221 break;
229 case NFT_META_NFTRACE: 222 case NFT_META_NFTRACE:
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c
index f762094af7c1..c0f638745adc 100644
--- a/net/netfilter/nft_rbtree.c
+++ b/net/netfilter/nft_rbtree.c
@@ -170,7 +170,7 @@ static void *nft_rbtree_deactivate(const struct nft_set *set,
170 const struct nft_rbtree *priv = nft_set_priv(set); 170 const struct nft_rbtree *priv = nft_set_priv(set);
171 const struct rb_node *parent = priv->root.rb_node; 171 const struct rb_node *parent = priv->root.rb_node;
172 struct nft_rbtree_elem *rbe, *this = elem->priv; 172 struct nft_rbtree_elem *rbe, *this = elem->priv;
173 u8 genmask = nft_genmask_cur(read_pnet(&set->pnet)); 173 u8 genmask = nft_genmask_next(read_pnet(&set->pnet));
174 int d; 174 int d;
175 175
176 while (parent != NULL) { 176 while (parent != NULL) {
@@ -211,7 +211,6 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
211 struct nft_rbtree_elem *rbe; 211 struct nft_rbtree_elem *rbe;
212 struct nft_set_elem elem; 212 struct nft_set_elem elem;
213 struct rb_node *node; 213 struct rb_node *node;
214 u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
215 214
216 spin_lock_bh(&nft_rbtree_lock); 215 spin_lock_bh(&nft_rbtree_lock);
217 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { 216 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
@@ -219,7 +218,7 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
219 218
220 if (iter->count < iter->skip) 219 if (iter->count < iter->skip)
221 goto cont; 220 goto cont;
222 if (!nft_set_elem_active(&rbe->ext, genmask)) 221 if (!nft_set_elem_active(&rbe->ext, iter->genmask))
223 goto cont; 222 goto cont;
224 223
225 elem.priv = rbe; 224 elem.priv = rbe;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 2675d580c490..fe0e2db632c7 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1460,6 +1460,9 @@ xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
1460 uint8_t hooknum; 1460 uint8_t hooknum;
1461 struct nf_hook_ops *ops; 1461 struct nf_hook_ops *ops;
1462 1462
1463 if (!num_hooks)
1464 return ERR_PTR(-EINVAL);
1465
1463 ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL); 1466 ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
1464 if (ops == NULL) 1467 if (ops == NULL)
1465 return ERR_PTR(-ENOMEM); 1468 return ERR_PTR(-ENOMEM);
diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c
index a1fa2c800cb9..018eed7e1ff1 100644
--- a/net/netfilter/xt_NFLOG.c
+++ b/net/netfilter/xt_NFLOG.c
@@ -33,6 +33,9 @@ nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
33 li.u.ulog.group = info->group; 33 li.u.ulog.group = info->group;
34 li.u.ulog.qthreshold = info->threshold; 34 li.u.ulog.qthreshold = info->threshold;
35 35
36 if (info->flags & XT_NFLOG_F_COPY_LEN)
37 li.u.ulog.flags |= NF_LOG_F_COPY_LEN;
38
36 nfulnl_log_packet(net, par->family, par->hooknum, skb, par->in, 39 nfulnl_log_packet(net, par->family, par->hooknum, skb, par->in,
37 par->out, &li, info->prefix); 40 par->out, &li, info->prefix);
38 return XT_CONTINUE; 41 return XT_CONTINUE;
diff --git a/net/netfilter/xt_TRACE.c b/net/netfilter/xt_TRACE.c
index df48967af382..858d189a1303 100644
--- a/net/netfilter/xt_TRACE.c
+++ b/net/netfilter/xt_TRACE.c
@@ -4,12 +4,23 @@
4#include <linux/skbuff.h> 4#include <linux/skbuff.h>
5 5
6#include <linux/netfilter/x_tables.h> 6#include <linux/netfilter/x_tables.h>
7#include <net/netfilter/nf_log.h>
7 8
8MODULE_DESCRIPTION("Xtables: packet flow tracing"); 9MODULE_DESCRIPTION("Xtables: packet flow tracing");
9MODULE_LICENSE("GPL"); 10MODULE_LICENSE("GPL");
10MODULE_ALIAS("ipt_TRACE"); 11MODULE_ALIAS("ipt_TRACE");
11MODULE_ALIAS("ip6t_TRACE"); 12MODULE_ALIAS("ip6t_TRACE");
12 13
14static int trace_tg_check(const struct xt_tgchk_param *par)
15{
16 return nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);
17}
18
19static void trace_tg_destroy(const struct xt_tgdtor_param *par)
20{
21 nf_logger_put(par->family, NF_LOG_TYPE_LOG);
22}
23
13static unsigned int 24static unsigned int
14trace_tg(struct sk_buff *skb, const struct xt_action_param *par) 25trace_tg(struct sk_buff *skb, const struct xt_action_param *par)
15{ 26{
@@ -18,12 +29,14 @@ trace_tg(struct sk_buff *skb, const struct xt_action_param *par)
18} 29}
19 30
20static struct xt_target trace_tg_reg __read_mostly = { 31static struct xt_target trace_tg_reg __read_mostly = {
21 .name = "TRACE", 32 .name = "TRACE",
22 .revision = 0, 33 .revision = 0,
23 .family = NFPROTO_UNSPEC, 34 .family = NFPROTO_UNSPEC,
24 .table = "raw", 35 .table = "raw",
25 .target = trace_tg, 36 .target = trace_tg,
26 .me = THIS_MODULE, 37 .checkentry = trace_tg_check,
38 .destroy = trace_tg_destroy,
39 .me = THIS_MODULE,
27}; 40};
28 41
29static int __init trace_tg_init(void) 42static int __init trace_tg_init(void)
diff --git a/net/netfilter/xt_owner.c b/net/netfilter/xt_owner.c
index 1302b475abcb..a20e731b5b6c 100644
--- a/net/netfilter/xt_owner.c
+++ b/net/netfilter/xt_owner.c
@@ -21,11 +21,39 @@
21static int owner_check(const struct xt_mtchk_param *par) 21static int owner_check(const struct xt_mtchk_param *par)
22{ 22{
23 struct xt_owner_match_info *info = par->matchinfo; 23 struct xt_owner_match_info *info = par->matchinfo;
24 struct net *net = par->net;
24 25
25 /* For now only allow adding matches from the initial user namespace */ 26 /* Only allow the common case where the userns of the writer
27 * matches the userns of the network namespace.
28 */
26 if ((info->match & (XT_OWNER_UID|XT_OWNER_GID)) && 29 if ((info->match & (XT_OWNER_UID|XT_OWNER_GID)) &&
27 (current_user_ns() != &init_user_ns)) 30 (current_user_ns() != net->user_ns))
28 return -EINVAL; 31 return -EINVAL;
32
33 /* Ensure the uids are valid */
34 if (info->match & XT_OWNER_UID) {
35 kuid_t uid_min = make_kuid(net->user_ns, info->uid_min);
36 kuid_t uid_max = make_kuid(net->user_ns, info->uid_max);
37
38 if (!uid_valid(uid_min) || !uid_valid(uid_max) ||
39 (info->uid_max < info->uid_min) ||
40 uid_lt(uid_max, uid_min)) {
41 return -EINVAL;
42 }
43 }
44
45 /* Ensure the gids are valid */
46 if (info->match & XT_OWNER_GID) {
47 kgid_t gid_min = make_kgid(net->user_ns, info->gid_min);
48 kgid_t gid_max = make_kgid(net->user_ns, info->gid_max);
49
50 if (!gid_valid(gid_min) || !gid_valid(gid_max) ||
51 (info->gid_max < info->gid_min) ||
52 gid_lt(gid_max, gid_min)) {
53 return -EINVAL;
54 }
55 }
56
29 return 0; 57 return 0;
30} 58}
31 59
@@ -35,6 +63,7 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
35 const struct xt_owner_match_info *info = par->matchinfo; 63 const struct xt_owner_match_info *info = par->matchinfo;
36 const struct file *filp; 64 const struct file *filp;
37 struct sock *sk = skb_to_full_sk(skb); 65 struct sock *sk = skb_to_full_sk(skb);
66 struct net *net = par->net;
38 67
39 if (sk == NULL || sk->sk_socket == NULL) 68 if (sk == NULL || sk->sk_socket == NULL)
40 return (info->match ^ info->invert) == 0; 69 return (info->match ^ info->invert) == 0;
@@ -51,8 +80,8 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
51 (XT_OWNER_UID | XT_OWNER_GID)) == 0; 80 (XT_OWNER_UID | XT_OWNER_GID)) == 0;
52 81
53 if (info->match & XT_OWNER_UID) { 82 if (info->match & XT_OWNER_UID) {
54 kuid_t uid_min = make_kuid(&init_user_ns, info->uid_min); 83 kuid_t uid_min = make_kuid(net->user_ns, info->uid_min);
55 kuid_t uid_max = make_kuid(&init_user_ns, info->uid_max); 84 kuid_t uid_max = make_kuid(net->user_ns, info->uid_max);
56 if ((uid_gte(filp->f_cred->fsuid, uid_min) && 85 if ((uid_gte(filp->f_cred->fsuid, uid_min) &&
57 uid_lte(filp->f_cred->fsuid, uid_max)) ^ 86 uid_lte(filp->f_cred->fsuid, uid_max)) ^
58 !(info->invert & XT_OWNER_UID)) 87 !(info->invert & XT_OWNER_UID))
@@ -60,8 +89,8 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
60 } 89 }
61 90
62 if (info->match & XT_OWNER_GID) { 91 if (info->match & XT_OWNER_GID) {
63 kgid_t gid_min = make_kgid(&init_user_ns, info->gid_min); 92 kgid_t gid_min = make_kgid(net->user_ns, info->gid_min);
64 kgid_t gid_max = make_kgid(&init_user_ns, info->gid_max); 93 kgid_t gid_max = make_kgid(net->user_ns, info->gid_max);
65 if ((gid_gte(filp->f_cred->fsgid, gid_min) && 94 if ((gid_gte(filp->f_cred->fsgid, gid_min) &&
66 gid_lte(filp->f_cred->fsgid, gid_max)) ^ 95 gid_lte(filp->f_cred->fsgid, gid_max)) ^
67 !(info->invert & XT_OWNER_GID)) 96 !(info->invert & XT_OWNER_GID))
diff --git a/net/netfilter/xt_tcpudp.c b/net/netfilter/xt_tcpudp.c
index c14d4645daa3..ade024c90f4f 100644
--- a/net/netfilter/xt_tcpudp.c
+++ b/net/netfilter/xt_tcpudp.c
@@ -83,8 +83,6 @@ static bool tcp_mt(const struct sk_buff *skb, struct xt_action_param *par)
83 return false; 83 return false;
84 } 84 }
85 85
86#define FWINVTCP(bool, invflg) ((bool) ^ !!(tcpinfo->invflags & (invflg)))
87
88 th = skb_header_pointer(skb, par->thoff, sizeof(_tcph), &_tcph); 86 th = skb_header_pointer(skb, par->thoff, sizeof(_tcph), &_tcph);
89 if (th == NULL) { 87 if (th == NULL) {
90 /* We've been asked to examine this packet, and we 88 /* We've been asked to examine this packet, and we
@@ -102,9 +100,8 @@ static bool tcp_mt(const struct sk_buff *skb, struct xt_action_param *par)
102 ntohs(th->dest), 100 ntohs(th->dest),
103 !!(tcpinfo->invflags & XT_TCP_INV_DSTPT))) 101 !!(tcpinfo->invflags & XT_TCP_INV_DSTPT)))
104 return false; 102 return false;
105 if (!FWINVTCP((((unsigned char *)th)[13] & tcpinfo->flg_mask) 103 if (!NF_INVF(tcpinfo, XT_TCP_INV_FLAGS,
106 == tcpinfo->flg_cmp, 104 (((unsigned char *)th)[13] & tcpinfo->flg_mask) == tcpinfo->flg_cmp))
107 XT_TCP_INV_FLAGS))
108 return false; 105 return false;
109 if (tcpinfo->option) { 106 if (tcpinfo->option) {
110 if (th->doff * 4 < sizeof(_tcph)) { 107 if (th->doff * 4 < sizeof(_tcph)) {
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 52f3b9b89e97..b4069a90e375 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -818,8 +818,18 @@ static int ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
818 */ 818 */
819 state = OVS_CS_F_TRACKED | OVS_CS_F_NEW | OVS_CS_F_RELATED; 819 state = OVS_CS_F_TRACKED | OVS_CS_F_NEW | OVS_CS_F_RELATED;
820 __ovs_ct_update_key(key, state, &info->zone, exp->master); 820 __ovs_ct_update_key(key, state, &info->zone, exp->master);
821 } else 821 } else {
822 return __ovs_ct_lookup(net, key, info, skb); 822 struct nf_conn *ct;
823 int err;
824
825 err = __ovs_ct_lookup(net, key, info, skb);
826 if (err)
827 return err;
828
829 ct = (struct nf_conn *)skb->nfct;
830 if (ct)
831 nf_ct_deliver_cached_events(ct);
832 }
823 833
824 return 0; 834 return 0;
825} 835}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index d1f3b9e977e5..9d92c4c46871 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1341,7 +1341,7 @@ static unsigned int fanout_demux_hash(struct packet_fanout *f,
1341 struct sk_buff *skb, 1341 struct sk_buff *skb,
1342 unsigned int num) 1342 unsigned int num)
1343{ 1343{
1344 return reciprocal_scale(skb_get_hash(skb), num); 1344 return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1345} 1345}
1346 1346
1347static unsigned int fanout_demux_lb(struct packet_fanout *f, 1347static unsigned int fanout_demux_lb(struct packet_fanout *f,
@@ -1588,13 +1588,9 @@ static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
1588 if (copy_from_user(&fd, data, len)) 1588 if (copy_from_user(&fd, data, len))
1589 return -EFAULT; 1589 return -EFAULT;
1590 1590
1591 new = bpf_prog_get(fd); 1591 new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1592 if (IS_ERR(new)) 1592 if (IS_ERR(new))
1593 return PTR_ERR(new); 1593 return PTR_ERR(new);
1594 if (new->type != BPF_PROG_TYPE_SOCKET_FILTER) {
1595 bpf_prog_put(new);
1596 return -EINVAL;
1597 }
1598 1594
1599 __fanout_set_data_bpf(po->fanout, new); 1595 __fanout_set_data_bpf(po->fanout, new);
1600 return 0; 1596 return 0;
diff --git a/net/rds/connection.c b/net/rds/connection.c
index a4b07c899d89..19a4fee5f4dd 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -253,9 +253,12 @@ static struct rds_connection *__rds_conn_create(struct net *net,
253 253
254 for (i = 0; i < RDS_MPATH_WORKERS; i++) { 254 for (i = 0; i < RDS_MPATH_WORKERS; i++) {
255 cp = &conn->c_path[i]; 255 cp = &conn->c_path[i];
256 trans->conn_free(cp->cp_transport_data); 256 /* The ->conn_alloc invocation may have
257 if (!trans->t_mp_capable) 257 * allocated resource for all paths, so all
258 break; 258 * of them may have to be freed here.
259 */
260 if (cp->cp_transport_data)
261 trans->conn_free(cp->cp_transport_data);
259 } 262 }
260 kmem_cache_free(rds_conn_slab, conn); 263 kmem_cache_free(rds_conn_slab, conn);
261 conn = found; 264 conn = found;
@@ -326,10 +329,7 @@ void rds_conn_shutdown(struct rds_conn_path *cp)
326 wait_event(cp->cp_waitq, 329 wait_event(cp->cp_waitq,
327 !test_bit(RDS_RECV_REFILL, &cp->cp_flags)); 330 !test_bit(RDS_RECV_REFILL, &cp->cp_flags));
328 331
329 if (!conn->c_trans->t_mp_capable) 332 conn->c_trans->conn_path_shutdown(cp);
330 conn->c_trans->conn_shutdown(conn);
331 else
332 conn->c_trans->conn_path_shutdown(cp);
333 rds_conn_path_reset(cp); 333 rds_conn_path_reset(cp);
334 334
335 if (!rds_conn_path_transition(cp, RDS_CONN_DISCONNECTING, 335 if (!rds_conn_path_transition(cp, RDS_CONN_DISCONNECTING,
@@ -355,9 +355,7 @@ void rds_conn_shutdown(struct rds_conn_path *cp)
355 rcu_read_lock(); 355 rcu_read_lock();
356 if (!hlist_unhashed(&conn->c_hash_node)) { 356 if (!hlist_unhashed(&conn->c_hash_node)) {
357 rcu_read_unlock(); 357 rcu_read_unlock();
358 if (conn->c_trans->t_type != RDS_TRANS_TCP || 358 rds_queue_reconnect(cp);
359 cp->cp_outgoing == 1)
360 rds_queue_reconnect(cp);
361 } else { 359 } else {
362 rcu_read_unlock(); 360 rcu_read_unlock();
363 } 361 }
@@ -370,6 +368,9 @@ static void rds_conn_path_destroy(struct rds_conn_path *cp)
370{ 368{
371 struct rds_message *rm, *rtmp; 369 struct rds_message *rm, *rtmp;
372 370
371 if (!cp->cp_transport_data)
372 return;
373
373 rds_conn_path_drop(cp); 374 rds_conn_path_drop(cp);
374 flush_work(&cp->cp_down_w); 375 flush_work(&cp->cp_down_w);
375 376
@@ -401,6 +402,8 @@ static void rds_conn_path_destroy(struct rds_conn_path *cp)
401void rds_conn_destroy(struct rds_connection *conn) 402void rds_conn_destroy(struct rds_connection *conn)
402{ 403{
403 unsigned long flags; 404 unsigned long flags;
405 int i;
406 struct rds_conn_path *cp;
404 407
405 rdsdebug("freeing conn %p for %pI4 -> " 408 rdsdebug("freeing conn %p for %pI4 -> "
406 "%pI4\n", conn, &conn->c_laddr, 409 "%pI4\n", conn, &conn->c_laddr,
@@ -413,18 +416,10 @@ void rds_conn_destroy(struct rds_connection *conn)
413 synchronize_rcu(); 416 synchronize_rcu();
414 417
415 /* shut the connection down */ 418 /* shut the connection down */
416 if (!conn->c_trans->t_mp_capable) { 419 for (i = 0; i < RDS_MPATH_WORKERS; i++) {
417 rds_conn_path_destroy(&conn->c_path[0]); 420 cp = &conn->c_path[i];
418 BUG_ON(!list_empty(&conn->c_path[0].cp_retrans)); 421 rds_conn_path_destroy(cp);
419 } else { 422 BUG_ON(!list_empty(&cp->cp_retrans));
420 int i;
421 struct rds_conn_path *cp;
422
423 for (i = 0; i < RDS_MPATH_WORKERS; i++) {
424 cp = &conn->c_path[i];
425 rds_conn_path_destroy(cp);
426 BUG_ON(!list_empty(&cp->cp_retrans));
427 }
428 } 423 }
429 424
430 /* 425 /*
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 44946a681a8c..7eaf887e46f8 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -381,15 +381,15 @@ void rds_ib_exit(void)
381 381
382struct rds_transport rds_ib_transport = { 382struct rds_transport rds_ib_transport = {
383 .laddr_check = rds_ib_laddr_check, 383 .laddr_check = rds_ib_laddr_check,
384 .xmit_complete = rds_ib_xmit_complete, 384 .xmit_path_complete = rds_ib_xmit_path_complete,
385 .xmit = rds_ib_xmit, 385 .xmit = rds_ib_xmit,
386 .xmit_rdma = rds_ib_xmit_rdma, 386 .xmit_rdma = rds_ib_xmit_rdma,
387 .xmit_atomic = rds_ib_xmit_atomic, 387 .xmit_atomic = rds_ib_xmit_atomic,
388 .recv = rds_ib_recv, 388 .recv_path = rds_ib_recv_path,
389 .conn_alloc = rds_ib_conn_alloc, 389 .conn_alloc = rds_ib_conn_alloc,
390 .conn_free = rds_ib_conn_free, 390 .conn_free = rds_ib_conn_free,
391 .conn_connect = rds_ib_conn_connect, 391 .conn_path_connect = rds_ib_conn_path_connect,
392 .conn_shutdown = rds_ib_conn_shutdown, 392 .conn_path_shutdown = rds_ib_conn_path_shutdown,
393 .inc_copy_to_user = rds_ib_inc_copy_to_user, 393 .inc_copy_to_user = rds_ib_inc_copy_to_user,
394 .inc_free = rds_ib_inc_free, 394 .inc_free = rds_ib_inc_free,
395 .cm_initiate_connect = rds_ib_cm_initiate_connect, 395 .cm_initiate_connect = rds_ib_cm_initiate_connect,
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 627fb79aee65..046f7508c06b 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -328,8 +328,8 @@ extern struct list_head ib_nodev_conns;
328/* ib_cm.c */ 328/* ib_cm.c */
329int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp); 329int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp);
330void rds_ib_conn_free(void *arg); 330void rds_ib_conn_free(void *arg);
331int rds_ib_conn_connect(struct rds_connection *conn); 331int rds_ib_conn_path_connect(struct rds_conn_path *cp);
332void rds_ib_conn_shutdown(struct rds_connection *conn); 332void rds_ib_conn_path_shutdown(struct rds_conn_path *cp);
333void rds_ib_state_change(struct sock *sk); 333void rds_ib_state_change(struct sock *sk);
334int rds_ib_listen_init(void); 334int rds_ib_listen_init(void);
335void rds_ib_listen_stop(void); 335void rds_ib_listen_stop(void);
@@ -354,7 +354,7 @@ void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
354/* ib_recv.c */ 354/* ib_recv.c */
355int rds_ib_recv_init(void); 355int rds_ib_recv_init(void);
356void rds_ib_recv_exit(void); 356void rds_ib_recv_exit(void);
357int rds_ib_recv(struct rds_connection *conn); 357int rds_ib_recv_path(struct rds_conn_path *conn);
358int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic); 358int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic);
359void rds_ib_recv_free_caches(struct rds_ib_connection *ic); 359void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
360void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp); 360void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp);
@@ -384,7 +384,7 @@ u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
384extern wait_queue_head_t rds_ib_ring_empty_wait; 384extern wait_queue_head_t rds_ib_ring_empty_wait;
385 385
386/* ib_send.c */ 386/* ib_send.c */
387void rds_ib_xmit_complete(struct rds_connection *conn); 387void rds_ib_xmit_path_complete(struct rds_conn_path *cp);
388int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, 388int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
389 unsigned int hdr_off, unsigned int sg, unsigned int off); 389 unsigned int hdr_off, unsigned int sg, unsigned int off);
390void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc); 390void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 334287602b78..5b2ab95afa07 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -112,7 +112,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
112 } 112 }
113 } 113 }
114 114
115 if (conn->c_version < RDS_PROTOCOL(3,1)) { 115 if (conn->c_version < RDS_PROTOCOL(3, 1)) {
116 printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed," 116 printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed,"
117 " no longer supported\n", 117 " no longer supported\n",
118 &conn->c_faddr, 118 &conn->c_faddr,
@@ -685,8 +685,9 @@ out:
685 return ret; 685 return ret;
686} 686}
687 687
688int rds_ib_conn_connect(struct rds_connection *conn) 688int rds_ib_conn_path_connect(struct rds_conn_path *cp)
689{ 689{
690 struct rds_connection *conn = cp->cp_conn;
690 struct rds_ib_connection *ic = conn->c_transport_data; 691 struct rds_ib_connection *ic = conn->c_transport_data;
691 struct sockaddr_in src, dest; 692 struct sockaddr_in src, dest;
692 int ret; 693 int ret;
@@ -731,8 +732,9 @@ out:
731 * so that it can be called at any point during startup. In fact it 732 * so that it can be called at any point during startup. In fact it
732 * can be called multiple times for a given connection. 733 * can be called multiple times for a given connection.
733 */ 734 */
734void rds_ib_conn_shutdown(struct rds_connection *conn) 735void rds_ib_conn_path_shutdown(struct rds_conn_path *cp)
735{ 736{
737 struct rds_connection *conn = cp->cp_conn;
736 struct rds_ib_connection *ic = conn->c_transport_data; 738 struct rds_ib_connection *ic = conn->c_transport_data;
737 int err = 0; 739 int err = 0;
738 740
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 4ea8cb17cc7a..606a11f681d2 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -1009,8 +1009,9 @@ void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
1009 rds_ib_recv_refill(conn, 0, GFP_NOWAIT); 1009 rds_ib_recv_refill(conn, 0, GFP_NOWAIT);
1010} 1010}
1011 1011
1012int rds_ib_recv(struct rds_connection *conn) 1012int rds_ib_recv_path(struct rds_conn_path *cp)
1013{ 1013{
1014 struct rds_connection *conn = cp->cp_conn;
1014 struct rds_ib_connection *ic = conn->c_transport_data; 1015 struct rds_ib_connection *ic = conn->c_transport_data;
1015 int ret = 0; 1016 int ret = 0;
1016 1017
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 6e4110aa5135..84d90c97332f 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -980,8 +980,9 @@ out:
980 return ret; 980 return ret;
981} 981}
982 982
983void rds_ib_xmit_complete(struct rds_connection *conn) 983void rds_ib_xmit_path_complete(struct rds_conn_path *cp)
984{ 984{
985 struct rds_connection *conn = cp->cp_conn;
985 struct rds_ib_connection *ic = conn->c_transport_data; 986 struct rds_ib_connection *ic = conn->c_transport_data;
986 987
987 /* We may have a pending ACK or window update we were unable 988 /* We may have a pending ACK or window update we were unable
diff --git a/net/rds/loop.c b/net/rds/loop.c
index 268f07faaa1a..f2bf78de5688 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -96,12 +96,13 @@ out:
96 */ 96 */
97static void rds_loop_inc_free(struct rds_incoming *inc) 97static void rds_loop_inc_free(struct rds_incoming *inc)
98{ 98{
99 struct rds_message *rm = container_of(inc, struct rds_message, m_inc); 99 struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
100 rds_message_put(rm); 100
101 rds_message_put(rm);
101} 102}
102 103
103/* we need to at least give the thread something to succeed */ 104/* we need to at least give the thread something to succeed */
104static int rds_loop_recv(struct rds_connection *conn) 105static int rds_loop_recv_path(struct rds_conn_path *cp)
105{ 106{
106 return 0; 107 return 0;
107} 108}
@@ -149,13 +150,13 @@ static void rds_loop_conn_free(void *arg)
149 kfree(lc); 150 kfree(lc);
150} 151}
151 152
152static int rds_loop_conn_connect(struct rds_connection *conn) 153static int rds_loop_conn_path_connect(struct rds_conn_path *cp)
153{ 154{
154 rds_connect_complete(conn); 155 rds_connect_complete(cp->cp_conn);
155 return 0; 156 return 0;
156} 157}
157 158
158static void rds_loop_conn_shutdown(struct rds_connection *conn) 159static void rds_loop_conn_path_shutdown(struct rds_conn_path *cp)
159{ 160{
160} 161}
161 162
@@ -184,11 +185,11 @@ void rds_loop_exit(void)
184 */ 185 */
185struct rds_transport rds_loop_transport = { 186struct rds_transport rds_loop_transport = {
186 .xmit = rds_loop_xmit, 187 .xmit = rds_loop_xmit,
187 .recv = rds_loop_recv, 188 .recv_path = rds_loop_recv_path,
188 .conn_alloc = rds_loop_conn_alloc, 189 .conn_alloc = rds_loop_conn_alloc,
189 .conn_free = rds_loop_conn_free, 190 .conn_free = rds_loop_conn_free,
190 .conn_connect = rds_loop_conn_connect, 191 .conn_path_connect = rds_loop_conn_path_connect,
191 .conn_shutdown = rds_loop_conn_shutdown, 192 .conn_path_shutdown = rds_loop_conn_path_shutdown,
192 .inc_copy_to_user = rds_message_inc_copy_to_user, 193 .inc_copy_to_user = rds_message_inc_copy_to_user,
193 .inc_free = rds_loop_inc_free, 194 .inc_free = rds_loop_inc_free,
194 .t_name = "loopback", 195 .t_name = "loopback",
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 2e35b738176f..6ef07bd27227 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -454,18 +454,15 @@ struct rds_transport {
454 int (*laddr_check)(struct net *net, __be32 addr); 454 int (*laddr_check)(struct net *net, __be32 addr);
455 int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp); 455 int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
456 void (*conn_free)(void *data); 456 void (*conn_free)(void *data);
457 int (*conn_connect)(struct rds_connection *conn); 457 int (*conn_path_connect)(struct rds_conn_path *cp);
458 void (*conn_shutdown)(struct rds_connection *conn);
459 void (*conn_path_shutdown)(struct rds_conn_path *conn); 458 void (*conn_path_shutdown)(struct rds_conn_path *conn);
460 void (*xmit_prepare)(struct rds_connection *conn);
461 void (*xmit_path_prepare)(struct rds_conn_path *cp); 459 void (*xmit_path_prepare)(struct rds_conn_path *cp);
462 void (*xmit_complete)(struct rds_connection *conn);
463 void (*xmit_path_complete)(struct rds_conn_path *cp); 460 void (*xmit_path_complete)(struct rds_conn_path *cp);
464 int (*xmit)(struct rds_connection *conn, struct rds_message *rm, 461 int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
465 unsigned int hdr_off, unsigned int sg, unsigned int off); 462 unsigned int hdr_off, unsigned int sg, unsigned int off);
466 int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op); 463 int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
467 int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op); 464 int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
468 int (*recv)(struct rds_connection *conn); 465 int (*recv_path)(struct rds_conn_path *cp);
469 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to); 466 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
470 void (*inc_free)(struct rds_incoming *inc); 467 void (*inc_free)(struct rds_incoming *inc);
471 468
diff --git a/net/rds/recv.c b/net/rds/recv.c
index b58f50571782..fed53a6c2890 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -226,6 +226,10 @@ void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
226 cp->cp_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1; 226 cp->cp_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1;
227 227
228 if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) { 228 if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) {
229 if (inc->i_hdr.h_sport == 0) {
230 rdsdebug("ignore ping with 0 sport from 0x%x\n", saddr);
231 goto out;
232 }
229 rds_stats_inc(s_recv_ping); 233 rds_stats_inc(s_recv_ping);
230 rds_send_pong(cp, inc->i_hdr.h_sport); 234 rds_send_pong(cp, inc->i_hdr.h_sport);
231 goto out; 235 goto out;
diff --git a/net/rds/send.c b/net/rds/send.c
index ee43d6b2ea8f..5a9caf1da896 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -183,12 +183,8 @@ restart:
183 goto out; 183 goto out;
184 } 184 }
185 185
186 if (conn->c_trans->t_mp_capable) { 186 if (conn->c_trans->xmit_path_prepare)
187 if (conn->c_trans->xmit_path_prepare) 187 conn->c_trans->xmit_path_prepare(cp);
188 conn->c_trans->xmit_path_prepare(cp);
189 } else if (conn->c_trans->xmit_prepare) {
190 conn->c_trans->xmit_prepare(conn);
191 }
192 188
193 /* 189 /*
194 * spin trying to push headers and data down the connection until 190 * spin trying to push headers and data down the connection until
@@ -403,12 +399,8 @@ restart:
403 } 399 }
404 400
405over_batch: 401over_batch:
406 if (conn->c_trans->t_mp_capable) { 402 if (conn->c_trans->xmit_path_complete)
407 if (conn->c_trans->xmit_path_complete) 403 conn->c_trans->xmit_path_complete(cp);
408 conn->c_trans->xmit_path_complete(cp);
409 } else if (conn->c_trans->xmit_complete) {
410 conn->c_trans->xmit_complete(conn);
411 }
412 release_in_xmit(cp); 404 release_in_xmit(cp);
413 405
414 /* Nuke any messages we decided not to retransmit. */ 406 /* Nuke any messages we decided not to retransmit. */
diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c
index c173f69e1479..e381bbcd9cc1 100644
--- a/net/rds/sysctl.c
+++ b/net/rds/sysctl.c
@@ -102,7 +102,8 @@ int rds_sysctl_init(void)
102 rds_sysctl_reconnect_min = msecs_to_jiffies(1); 102 rds_sysctl_reconnect_min = msecs_to_jiffies(1);
103 rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min; 103 rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min;
104 104
105 rds_sysctl_reg_table = register_net_sysctl(&init_net,"net/rds", rds_sysctl_rds_table); 105 rds_sysctl_reg_table =
106 register_net_sysctl(&init_net, "net/rds", rds_sysctl_rds_table);
106 if (!rds_sysctl_reg_table) 107 if (!rds_sysctl_reg_table)
107 return -ENOMEM; 108 return -ENOMEM;
108 return 0; 109 return 0;
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 5217d49ce6d6..d24f6c142d03 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -136,9 +136,9 @@ void rds_tcp_restore_callbacks(struct socket *sock,
136 * from being called while it isn't set. 136 * from being called while it isn't set.
137 */ 137 */
138void rds_tcp_reset_callbacks(struct socket *sock, 138void rds_tcp_reset_callbacks(struct socket *sock,
139 struct rds_connection *conn) 139 struct rds_conn_path *cp)
140{ 140{
141 struct rds_tcp_connection *tc = conn->c_transport_data; 141 struct rds_tcp_connection *tc = cp->cp_transport_data;
142 struct socket *osock = tc->t_sock; 142 struct socket *osock = tc->t_sock;
143 143
144 if (!osock) 144 if (!osock)
@@ -148,8 +148,8 @@ void rds_tcp_reset_callbacks(struct socket *sock,
148 * We have an outstanding SYN to this peer, which may 148 * We have an outstanding SYN to this peer, which may
149 * potentially have transitioned to the RDS_CONN_UP state, 149 * potentially have transitioned to the RDS_CONN_UP state,
150 * so we must quiesce any send threads before resetting 150 * so we must quiesce any send threads before resetting
151 * c_transport_data. We quiesce these threads by setting 151 * cp_transport_data. We quiesce these threads by setting
152 * c_state to something other than RDS_CONN_UP, and then 152 * cp_state to something other than RDS_CONN_UP, and then
153 * waiting for any existing threads in rds_send_xmit to 153 * waiting for any existing threads in rds_send_xmit to
154 * complete release_in_xmit(). (Subsequent threads entering 154 * complete release_in_xmit(). (Subsequent threads entering
155 * rds_send_xmit() will bail on !rds_conn_up(). 155 * rds_send_xmit() will bail on !rds_conn_up().
@@ -164,8 +164,8 @@ void rds_tcp_reset_callbacks(struct socket *sock,
164 * RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change 164 * RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change
165 * cannot mark rds_conn_path_up() in the window before lock_sock() 165 * cannot mark rds_conn_path_up() in the window before lock_sock()
166 */ 166 */
167 atomic_set(&conn->c_state, RDS_CONN_RESETTING); 167 atomic_set(&cp->cp_state, RDS_CONN_RESETTING);
168 wait_event(conn->c_waitq, !test_bit(RDS_IN_XMIT, &conn->c_flags)); 168 wait_event(cp->cp_waitq, !test_bit(RDS_IN_XMIT, &cp->cp_flags));
169 lock_sock(osock->sk); 169 lock_sock(osock->sk);
170 /* reset receive side state for rds_tcp_data_recv() for osock */ 170 /* reset receive side state for rds_tcp_data_recv() for osock */
171 if (tc->t_tinc) { 171 if (tc->t_tinc) {
@@ -186,11 +186,12 @@ void rds_tcp_reset_callbacks(struct socket *sock,
186 release_sock(osock->sk); 186 release_sock(osock->sk);
187 sock_release(osock); 187 sock_release(osock);
188newsock: 188newsock:
189 rds_send_path_reset(&conn->c_path[0]); 189 rds_send_path_reset(cp);
190 lock_sock(sock->sk); 190 lock_sock(sock->sk);
191 write_lock_bh(&sock->sk->sk_callback_lock); 191 write_lock_bh(&sock->sk->sk_callback_lock);
192 tc->t_sock = sock; 192 tc->t_sock = sock;
193 sock->sk->sk_user_data = conn; 193 tc->t_cpath = cp;
194 sock->sk->sk_user_data = cp;
194 sock->sk->sk_data_ready = rds_tcp_data_ready; 195 sock->sk->sk_data_ready = rds_tcp_data_ready;
195 sock->sk->sk_write_space = rds_tcp_write_space; 196 sock->sk->sk_write_space = rds_tcp_write_space;
196 sock->sk->sk_state_change = rds_tcp_state_change; 197 sock->sk->sk_state_change = rds_tcp_state_change;
@@ -203,9 +204,9 @@ newsock:
203 * above rds_tcp_reset_callbacks for notes about synchronization 204 * above rds_tcp_reset_callbacks for notes about synchronization
204 * with data path 205 * with data path
205 */ 206 */
206void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn) 207void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp)
207{ 208{
208 struct rds_tcp_connection *tc = conn->c_transport_data; 209 struct rds_tcp_connection *tc = cp->cp_transport_data;
209 210
210 rdsdebug("setting sock %p callbacks to tc %p\n", sock, tc); 211 rdsdebug("setting sock %p callbacks to tc %p\n", sock, tc);
211 write_lock_bh(&sock->sk->sk_callback_lock); 212 write_lock_bh(&sock->sk->sk_callback_lock);
@@ -221,12 +222,12 @@ void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
221 sock->sk->sk_data_ready = sock->sk->sk_user_data; 222 sock->sk->sk_data_ready = sock->sk->sk_user_data;
222 223
223 tc->t_sock = sock; 224 tc->t_sock = sock;
224 tc->conn = conn; 225 tc->t_cpath = cp;
225 tc->t_orig_data_ready = sock->sk->sk_data_ready; 226 tc->t_orig_data_ready = sock->sk->sk_data_ready;
226 tc->t_orig_write_space = sock->sk->sk_write_space; 227 tc->t_orig_write_space = sock->sk->sk_write_space;
227 tc->t_orig_state_change = sock->sk->sk_state_change; 228 tc->t_orig_state_change = sock->sk->sk_state_change;
228 229
229 sock->sk->sk_user_data = conn; 230 sock->sk->sk_user_data = cp;
230 sock->sk->sk_data_ready = rds_tcp_data_ready; 231 sock->sk->sk_data_ready = rds_tcp_data_ready;
231 sock->sk->sk_write_space = rds_tcp_write_space; 232 sock->sk->sk_write_space = rds_tcp_write_space;
232 sock->sk->sk_state_change = rds_tcp_state_change; 233 sock->sk->sk_state_change = rds_tcp_state_change;
@@ -284,24 +285,29 @@ static int rds_tcp_laddr_check(struct net *net, __be32 addr)
284static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp) 285static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
285{ 286{
286 struct rds_tcp_connection *tc; 287 struct rds_tcp_connection *tc;
288 int i;
287 289
288 tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp); 290 for (i = 0; i < RDS_MPATH_WORKERS; i++) {
289 if (!tc) 291 tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
290 return -ENOMEM; 292 if (!tc)
293 return -ENOMEM;
291 294
292 mutex_init(&tc->t_conn_lock); 295 mutex_init(&tc->t_conn_path_lock);
293 tc->t_sock = NULL; 296 tc->t_sock = NULL;
294 tc->t_tinc = NULL; 297 tc->t_tinc = NULL;
295 tc->t_tinc_hdr_rem = sizeof(struct rds_header); 298 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
296 tc->t_tinc_data_rem = 0; 299 tc->t_tinc_data_rem = 0;
297 300
298 conn->c_transport_data = tc; 301 conn->c_path[i].cp_transport_data = tc;
302 tc->t_cpath = &conn->c_path[i];
299 303
300 spin_lock_irq(&rds_tcp_conn_lock); 304 spin_lock_irq(&rds_tcp_conn_lock);
301 list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list); 305 list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
302 spin_unlock_irq(&rds_tcp_conn_lock); 306 spin_unlock_irq(&rds_tcp_conn_lock);
307 rdsdebug("rds_conn_path [%d] tc %p\n", i,
308 conn->c_path[i].cp_transport_data);
309 }
303 310
304 rdsdebug("alloced tc %p\n", conn->c_transport_data);
305 return 0; 311 return 0;
306} 312}
307 313
@@ -318,6 +324,17 @@ static void rds_tcp_conn_free(void *arg)
318 kmem_cache_free(rds_tcp_conn_slab, tc); 324 kmem_cache_free(rds_tcp_conn_slab, tc);
319} 325}
320 326
327static bool list_has_conn(struct list_head *list, struct rds_connection *conn)
328{
329 struct rds_tcp_connection *tc, *_tc;
330
331 list_for_each_entry_safe(tc, _tc, list, t_tcp_node) {
332 if (tc->t_cpath->cp_conn == conn)
333 return true;
334 }
335 return false;
336}
337
321static void rds_tcp_destroy_conns(void) 338static void rds_tcp_destroy_conns(void)
322{ 339{
323 struct rds_tcp_connection *tc, *_tc; 340 struct rds_tcp_connection *tc, *_tc;
@@ -325,29 +342,28 @@ static void rds_tcp_destroy_conns(void)
325 342
326 /* avoid calling conn_destroy with irqs off */ 343 /* avoid calling conn_destroy with irqs off */
327 spin_lock_irq(&rds_tcp_conn_lock); 344 spin_lock_irq(&rds_tcp_conn_lock);
328 list_splice(&rds_tcp_conn_list, &tmp_list); 345 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
329 INIT_LIST_HEAD(&rds_tcp_conn_list); 346 if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn))
347 list_move_tail(&tc->t_tcp_node, &tmp_list);
348 }
330 spin_unlock_irq(&rds_tcp_conn_lock); 349 spin_unlock_irq(&rds_tcp_conn_lock);
331 350
332 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) { 351 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
333 if (tc->conn->c_passive) 352 rds_conn_destroy(tc->t_cpath->cp_conn);
334 rds_conn_destroy(tc->conn->c_passive);
335 rds_conn_destroy(tc->conn);
336 }
337} 353}
338 354
339static void rds_tcp_exit(void); 355static void rds_tcp_exit(void);
340 356
341struct rds_transport rds_tcp_transport = { 357struct rds_transport rds_tcp_transport = {
342 .laddr_check = rds_tcp_laddr_check, 358 .laddr_check = rds_tcp_laddr_check,
343 .xmit_prepare = rds_tcp_xmit_prepare, 359 .xmit_path_prepare = rds_tcp_xmit_path_prepare,
344 .xmit_complete = rds_tcp_xmit_complete, 360 .xmit_path_complete = rds_tcp_xmit_path_complete,
345 .xmit = rds_tcp_xmit, 361 .xmit = rds_tcp_xmit,
346 .recv = rds_tcp_recv, 362 .recv_path = rds_tcp_recv_path,
347 .conn_alloc = rds_tcp_conn_alloc, 363 .conn_alloc = rds_tcp_conn_alloc,
348 .conn_free = rds_tcp_conn_free, 364 .conn_free = rds_tcp_conn_free,
349 .conn_connect = rds_tcp_conn_connect, 365 .conn_path_connect = rds_tcp_conn_path_connect,
350 .conn_shutdown = rds_tcp_conn_shutdown, 366 .conn_path_shutdown = rds_tcp_conn_path_shutdown,
351 .inc_copy_to_user = rds_tcp_inc_copy_to_user, 367 .inc_copy_to_user = rds_tcp_inc_copy_to_user,
352 .inc_free = rds_tcp_inc_free, 368 .inc_free = rds_tcp_inc_free,
353 .stats_info_copy = rds_tcp_stats_info_copy, 369 .stats_info_copy = rds_tcp_stats_info_copy,
@@ -489,10 +505,30 @@ static struct pernet_operations rds_tcp_net_ops = {
489 .size = sizeof(struct rds_tcp_net), 505 .size = sizeof(struct rds_tcp_net),
490}; 506};
491 507
508/* explicitly send a RST on each socket, thereby releasing any socket refcnts
509 * that may otherwise hold up netns deletion.
510 */
511static void rds_tcp_conn_paths_destroy(struct rds_connection *conn)
512{
513 struct rds_conn_path *cp;
514 struct rds_tcp_connection *tc;
515 int i;
516 struct sock *sk;
517
518 for (i = 0; i < RDS_MPATH_WORKERS; i++) {
519 cp = &conn->c_path[i];
520 tc = cp->cp_transport_data;
521 if (!tc->t_sock)
522 continue;
523 sk = tc->t_sock->sk;
524 sk->sk_prot->disconnect(sk, 0);
525 tcp_done(sk);
526 }
527}
528
492static void rds_tcp_kill_sock(struct net *net) 529static void rds_tcp_kill_sock(struct net *net)
493{ 530{
494 struct rds_tcp_connection *tc, *_tc; 531 struct rds_tcp_connection *tc, *_tc;
495 struct sock *sk;
496 LIST_HEAD(tmp_list); 532 LIST_HEAD(tmp_list);
497 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); 533 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
498 534
@@ -501,20 +537,17 @@ static void rds_tcp_kill_sock(struct net *net)
501 flush_work(&rtn->rds_tcp_accept_w); 537 flush_work(&rtn->rds_tcp_accept_w);
502 spin_lock_irq(&rds_tcp_conn_lock); 538 spin_lock_irq(&rds_tcp_conn_lock);
503 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { 539 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
504 struct net *c_net = read_pnet(&tc->conn->c_net); 540 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
505 541
506 if (net != c_net || !tc->t_sock) 542 if (net != c_net || !tc->t_sock)
507 continue; 543 continue;
508 list_move_tail(&tc->t_tcp_node, &tmp_list); 544 if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn))
545 list_move_tail(&tc->t_tcp_node, &tmp_list);
509 } 546 }
510 spin_unlock_irq(&rds_tcp_conn_lock); 547 spin_unlock_irq(&rds_tcp_conn_lock);
511 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) { 548 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
512 sk = tc->t_sock->sk; 549 rds_tcp_conn_paths_destroy(tc->t_cpath->cp_conn);
513 sk->sk_prot->disconnect(sk, 0); 550 rds_conn_destroy(tc->t_cpath->cp_conn);
514 tcp_done(sk);
515 if (tc->conn->c_passive)
516 rds_conn_destroy(tc->conn->c_passive);
517 rds_conn_destroy(tc->conn);
518 } 551 }
519} 552}
520 553
@@ -552,12 +585,13 @@ static void rds_tcp_sysctl_reset(struct net *net)
552 585
553 spin_lock_irq(&rds_tcp_conn_lock); 586 spin_lock_irq(&rds_tcp_conn_lock);
554 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { 587 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
555 struct net *c_net = read_pnet(&tc->conn->c_net); 588 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
556 589
557 if (net != c_net || !tc->t_sock) 590 if (net != c_net || !tc->t_sock)
558 continue; 591 continue;
559 592
560 rds_conn_drop(tc->conn); /* reconnect with new parameters */ 593 /* reconnect with new parameters */
594 rds_conn_path_drop(tc->t_cpath);
561 } 595 }
562 spin_unlock_irq(&rds_tcp_conn_lock); 596 spin_unlock_irq(&rds_tcp_conn_lock);
563} 597}
@@ -617,7 +651,7 @@ static int rds_tcp_init(void)
617 651
618 ret = rds_tcp_recv_init(); 652 ret = rds_tcp_recv_init();
619 if (ret) 653 if (ret)
620 goto out_slab; 654 goto out_pernet;
621 655
622 ret = rds_trans_register(&rds_tcp_transport); 656 ret = rds_trans_register(&rds_tcp_transport);
623 if (ret) 657 if (ret)
@@ -629,8 +663,9 @@ static int rds_tcp_init(void)
629 663
630out_recv: 664out_recv:
631 rds_tcp_recv_exit(); 665 rds_tcp_recv_exit();
632out_slab: 666out_pernet:
633 unregister_pernet_subsys(&rds_tcp_net_ops); 667 unregister_pernet_subsys(&rds_tcp_net_ops);
668out_slab:
634 kmem_cache_destroy(rds_tcp_conn_slab); 669 kmem_cache_destroy(rds_tcp_conn_slab);
635out: 670out:
636 return ret; 671 return ret;
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index ec0602b0dc24..1c3160faa963 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -11,11 +11,11 @@ struct rds_tcp_incoming {
11struct rds_tcp_connection { 11struct rds_tcp_connection {
12 12
13 struct list_head t_tcp_node; 13 struct list_head t_tcp_node;
14 struct rds_connection *conn; 14 struct rds_conn_path *t_cpath;
15 /* t_conn_lock synchronizes the connection establishment between 15 /* t_conn_path_lock synchronizes the connection establishment between
16 * rds_tcp_accept_one and rds_tcp_conn_connect 16 * rds_tcp_accept_one and rds_tcp_conn_path_connect
17 */ 17 */
18 struct mutex t_conn_lock; 18 struct mutex t_conn_path_lock;
19 struct socket *t_sock; 19 struct socket *t_sock;
20 void *t_orig_write_space; 20 void *t_orig_write_space;
21 void *t_orig_data_ready; 21 void *t_orig_data_ready;
@@ -49,8 +49,8 @@ struct rds_tcp_statistics {
49/* tcp.c */ 49/* tcp.c */
50void rds_tcp_tune(struct socket *sock); 50void rds_tcp_tune(struct socket *sock);
51void rds_tcp_nonagle(struct socket *sock); 51void rds_tcp_nonagle(struct socket *sock);
52void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn); 52void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp);
53void rds_tcp_reset_callbacks(struct socket *sock, struct rds_connection *conn); 53void rds_tcp_reset_callbacks(struct socket *sock, struct rds_conn_path *cp);
54void rds_tcp_restore_callbacks(struct socket *sock, 54void rds_tcp_restore_callbacks(struct socket *sock,
55 struct rds_tcp_connection *tc); 55 struct rds_tcp_connection *tc);
56u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc); 56u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc);
@@ -60,8 +60,8 @@ extern struct rds_transport rds_tcp_transport;
60void rds_tcp_accept_work(struct sock *sk); 60void rds_tcp_accept_work(struct sock *sk);
61 61
62/* tcp_connect.c */ 62/* tcp_connect.c */
63int rds_tcp_conn_connect(struct rds_connection *conn); 63int rds_tcp_conn_path_connect(struct rds_conn_path *cp);
64void rds_tcp_conn_shutdown(struct rds_connection *conn); 64void rds_tcp_conn_path_shutdown(struct rds_conn_path *conn);
65void rds_tcp_state_change(struct sock *sk); 65void rds_tcp_state_change(struct sock *sk);
66 66
67/* tcp_listen.c */ 67/* tcp_listen.c */
@@ -75,15 +75,15 @@ int rds_tcp_keepalive(struct socket *sock);
75int rds_tcp_recv_init(void); 75int rds_tcp_recv_init(void);
76void rds_tcp_recv_exit(void); 76void rds_tcp_recv_exit(void);
77void rds_tcp_data_ready(struct sock *sk); 77void rds_tcp_data_ready(struct sock *sk);
78int rds_tcp_recv(struct rds_connection *conn); 78int rds_tcp_recv_path(struct rds_conn_path *cp);
79void rds_tcp_inc_free(struct rds_incoming *inc); 79void rds_tcp_inc_free(struct rds_incoming *inc);
80int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to); 80int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
81 81
82/* tcp_send.c */ 82/* tcp_send.c */
83void rds_tcp_xmit_prepare(struct rds_connection *conn); 83void rds_tcp_xmit_path_prepare(struct rds_conn_path *cp);
84void rds_tcp_xmit_complete(struct rds_connection *conn); 84void rds_tcp_xmit_path_complete(struct rds_conn_path *cp);
85int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, 85int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
86 unsigned int hdr_off, unsigned int sg, unsigned int off); 86 unsigned int hdr_off, unsigned int sg, unsigned int off);
87void rds_tcp_write_space(struct sock *sk); 87void rds_tcp_write_space(struct sock *sk);
88 88
89/* tcp_stats.c */ 89/* tcp_stats.c */
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index ba9ec67f4e41..c916715fbe61 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -41,51 +41,51 @@
41void rds_tcp_state_change(struct sock *sk) 41void rds_tcp_state_change(struct sock *sk)
42{ 42{
43 void (*state_change)(struct sock *sk); 43 void (*state_change)(struct sock *sk);
44 struct rds_connection *conn; 44 struct rds_conn_path *cp;
45 struct rds_tcp_connection *tc; 45 struct rds_tcp_connection *tc;
46 46
47 read_lock_bh(&sk->sk_callback_lock); 47 read_lock_bh(&sk->sk_callback_lock);
48 conn = sk->sk_user_data; 48 cp = sk->sk_user_data;
49 if (!conn) { 49 if (!cp) {
50 state_change = sk->sk_state_change; 50 state_change = sk->sk_state_change;
51 goto out; 51 goto out;
52 } 52 }
53 tc = conn->c_transport_data; 53 tc = cp->cp_transport_data;
54 state_change = tc->t_orig_state_change; 54 state_change = tc->t_orig_state_change;
55 55
56 rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state); 56 rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state);
57 57
58 switch(sk->sk_state) { 58 switch (sk->sk_state) {
59 /* ignore connecting sockets as they make progress */ 59 /* ignore connecting sockets as they make progress */
60 case TCP_SYN_SENT: 60 case TCP_SYN_SENT:
61 case TCP_SYN_RECV: 61 case TCP_SYN_RECV:
62 break; 62 break;
63 case TCP_ESTABLISHED: 63 case TCP_ESTABLISHED:
64 rds_connect_path_complete(&conn->c_path[0], 64 rds_connect_path_complete(cp, RDS_CONN_CONNECTING);
65 RDS_CONN_CONNECTING); 65 break;
66 break; 66 case TCP_CLOSE_WAIT:
67 case TCP_CLOSE_WAIT: 67 case TCP_CLOSE:
68 case TCP_CLOSE: 68 rds_conn_path_drop(cp);
69 rds_conn_drop(conn); 69 default:
70 default: 70 break;
71 break;
72 } 71 }
73out: 72out:
74 read_unlock_bh(&sk->sk_callback_lock); 73 read_unlock_bh(&sk->sk_callback_lock);
75 state_change(sk); 74 state_change(sk);
76} 75}
77 76
78int rds_tcp_conn_connect(struct rds_connection *conn) 77int rds_tcp_conn_path_connect(struct rds_conn_path *cp)
79{ 78{
80 struct socket *sock = NULL; 79 struct socket *sock = NULL;
81 struct sockaddr_in src, dest; 80 struct sockaddr_in src, dest;
82 int ret; 81 int ret;
83 struct rds_tcp_connection *tc = conn->c_transport_data; 82 struct rds_connection *conn = cp->cp_conn;
83 struct rds_tcp_connection *tc = cp->cp_transport_data;
84 84
85 mutex_lock(&tc->t_conn_lock); 85 mutex_lock(&tc->t_conn_path_lock);
86 86
87 if (rds_conn_up(conn)) { 87 if (rds_conn_path_up(cp)) {
88 mutex_unlock(&tc->t_conn_lock); 88 mutex_unlock(&tc->t_conn_path_lock);
89 return 0; 89 return 0;
90 } 90 }
91 ret = sock_create_kern(rds_conn_net(conn), PF_INET, 91 ret = sock_create_kern(rds_conn_net(conn), PF_INET,
@@ -114,10 +114,11 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
114 * once we call connect() we can start getting callbacks and they 114 * once we call connect() we can start getting callbacks and they
115 * own the socket 115 * own the socket
116 */ 116 */
117 rds_tcp_set_callbacks(sock, conn); 117 rds_tcp_set_callbacks(sock, cp);
118 ret = sock->ops->connect(sock, (struct sockaddr *)&dest, sizeof(dest), 118 ret = sock->ops->connect(sock, (struct sockaddr *)&dest, sizeof(dest),
119 O_NONBLOCK); 119 O_NONBLOCK);
120 120
121 cp->cp_outgoing = 1;
121 rdsdebug("connect to address %pI4 returned %d\n", &conn->c_faddr, ret); 122 rdsdebug("connect to address %pI4 returned %d\n", &conn->c_faddr, ret);
122 if (ret == -EINPROGRESS) 123 if (ret == -EINPROGRESS)
123 ret = 0; 124 ret = 0;
@@ -125,11 +126,11 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
125 rds_tcp_keepalive(sock); 126 rds_tcp_keepalive(sock);
126 sock = NULL; 127 sock = NULL;
127 } else { 128 } else {
128 rds_tcp_restore_callbacks(sock, conn->c_transport_data); 129 rds_tcp_restore_callbacks(sock, cp->cp_transport_data);
129 } 130 }
130 131
131out: 132out:
132 mutex_unlock(&tc->t_conn_lock); 133 mutex_unlock(&tc->t_conn_path_lock);
133 if (sock) 134 if (sock)
134 sock_release(sock); 135 sock_release(sock);
135 return ret; 136 return ret;
@@ -144,12 +145,13 @@ out:
144 * callbacks to those set by TCP. Our callbacks won't execute again once we 145 * callbacks to those set by TCP. Our callbacks won't execute again once we
145 * hold the sock lock. 146 * hold the sock lock.
146 */ 147 */
147void rds_tcp_conn_shutdown(struct rds_connection *conn) 148void rds_tcp_conn_path_shutdown(struct rds_conn_path *cp)
148{ 149{
149 struct rds_tcp_connection *tc = conn->c_transport_data; 150 struct rds_tcp_connection *tc = cp->cp_transport_data;
150 struct socket *sock = tc->t_sock; 151 struct socket *sock = tc->t_sock;
151 152
152 rdsdebug("shutting down conn %p tc %p sock %p\n", conn, tc, sock); 153 rdsdebug("shutting down conn %p tc %p sock %p\n",
154 cp->cp_conn, tc, sock);
153 155
154 if (sock) { 156 if (sock) {
155 sock->ops->shutdown(sock, RCV_SHUTDOWN | SEND_SHUTDOWN); 157 sock->ops->shutdown(sock, RCV_SHUTDOWN | SEND_SHUTDOWN);
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 22d9bb15f731..ca975a217a49 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -79,6 +79,7 @@ int rds_tcp_accept_one(struct socket *sock)
79 struct inet_sock *inet; 79 struct inet_sock *inet;
80 struct rds_tcp_connection *rs_tcp = NULL; 80 struct rds_tcp_connection *rs_tcp = NULL;
81 int conn_state; 81 int conn_state;
82 struct rds_conn_path *cp;
82 83
83 if (!sock) /* module unload or netns delete in progress */ 84 if (!sock) /* module unload or netns delete in progress */
84 return -ENETUNREACH; 85 return -ENETUNREACH;
@@ -120,8 +121,9 @@ int rds_tcp_accept_one(struct socket *sock)
120 * rds_tcp_state_change() will do that cleanup 121 * rds_tcp_state_change() will do that cleanup
121 */ 122 */
122 rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data; 123 rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data;
124 cp = &conn->c_path[0];
123 rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING); 125 rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING);
124 mutex_lock(&rs_tcp->t_conn_lock); 126 mutex_lock(&rs_tcp->t_conn_path_lock);
125 conn_state = rds_conn_state(conn); 127 conn_state = rds_conn_state(conn);
126 if (conn_state != RDS_CONN_CONNECTING && conn_state != RDS_CONN_UP) 128 if (conn_state != RDS_CONN_CONNECTING && conn_state != RDS_CONN_UP)
127 goto rst_nsk; 129 goto rst_nsk;
@@ -136,16 +138,14 @@ int rds_tcp_accept_one(struct socket *sock)
136 !conn->c_path[0].cp_outgoing) { 138 !conn->c_path[0].cp_outgoing) {
137 goto rst_nsk; 139 goto rst_nsk;
138 } else { 140 } else {
139 rds_tcp_reset_callbacks(new_sock, conn); 141 rds_tcp_reset_callbacks(new_sock, cp);
140 conn->c_path[0].cp_outgoing = 0; 142 conn->c_path[0].cp_outgoing = 0;
141 /* rds_connect_path_complete() marks RDS_CONN_UP */ 143 /* rds_connect_path_complete() marks RDS_CONN_UP */
142 rds_connect_path_complete(&conn->c_path[0], 144 rds_connect_path_complete(cp, RDS_CONN_RESETTING);
143 RDS_CONN_DISCONNECTING);
144 } 145 }
145 } else { 146 } else {
146 rds_tcp_set_callbacks(new_sock, conn); 147 rds_tcp_set_callbacks(new_sock, cp);
147 rds_connect_path_complete(&conn->c_path[0], 148 rds_connect_path_complete(cp, RDS_CONN_CONNECTING);
148 RDS_CONN_CONNECTING);
149 } 149 }
150 new_sock = NULL; 150 new_sock = NULL;
151 ret = 0; 151 ret = 0;
@@ -156,7 +156,7 @@ rst_nsk:
156 ret = 0; 156 ret = 0;
157out: 157out:
158 if (rs_tcp) 158 if (rs_tcp)
159 mutex_unlock(&rs_tcp->t_conn_lock); 159 mutex_unlock(&rs_tcp->t_conn_path_lock);
160 if (new_sock) 160 if (new_sock)
161 sock_release(new_sock); 161 sock_release(new_sock);
162 return ret; 162 return ret;
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index 3f8fb38996c7..ad4892e97f91 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -34,7 +34,6 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <net/tcp.h> 35#include <net/tcp.h>
36 36
37#include "rds_single_path.h"
38#include "rds.h" 37#include "rds.h"
39#include "tcp.h" 38#include "tcp.h"
40 39
@@ -148,7 +147,7 @@ static void rds_tcp_cong_recv(struct rds_connection *conn,
148} 147}
149 148
150struct rds_tcp_desc_arg { 149struct rds_tcp_desc_arg {
151 struct rds_connection *conn; 150 struct rds_conn_path *conn_path;
152 gfp_t gfp; 151 gfp_t gfp;
153}; 152};
154 153
@@ -156,8 +155,8 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
156 unsigned int offset, size_t len) 155 unsigned int offset, size_t len)
157{ 156{
158 struct rds_tcp_desc_arg *arg = desc->arg.data; 157 struct rds_tcp_desc_arg *arg = desc->arg.data;
159 struct rds_connection *conn = arg->conn; 158 struct rds_conn_path *cp = arg->conn_path;
160 struct rds_tcp_connection *tc = conn->c_transport_data; 159 struct rds_tcp_connection *tc = cp->cp_transport_data;
161 struct rds_tcp_incoming *tinc = tc->t_tinc; 160 struct rds_tcp_incoming *tinc = tc->t_tinc;
162 struct sk_buff *clone; 161 struct sk_buff *clone;
163 size_t left = len, to_copy; 162 size_t left = len, to_copy;
@@ -172,14 +171,15 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
172 while (left) { 171 while (left) {
173 if (!tinc) { 172 if (!tinc) {
174 tinc = kmem_cache_alloc(rds_tcp_incoming_slab, 173 tinc = kmem_cache_alloc(rds_tcp_incoming_slab,
175 arg->gfp); 174 arg->gfp);
176 if (!tinc) { 175 if (!tinc) {
177 desc->error = -ENOMEM; 176 desc->error = -ENOMEM;
178 goto out; 177 goto out;
179 } 178 }
180 tc->t_tinc = tinc; 179 tc->t_tinc = tinc;
181 rdsdebug("alloced tinc %p\n", tinc); 180 rdsdebug("alloced tinc %p\n", tinc);
182 rds_inc_init(&tinc->ti_inc, conn, conn->c_faddr); 181 rds_inc_path_init(&tinc->ti_inc, cp,
182 cp->cp_conn->c_faddr);
183 /* 183 /*
184 * XXX * we might be able to use the __ variants when 184 * XXX * we might be able to use the __ variants when
185 * we've already serialized at a higher level. 185 * we've already serialized at a higher level.
@@ -229,6 +229,8 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
229 } 229 }
230 230
231 if (tc->t_tinc_hdr_rem == 0 && tc->t_tinc_data_rem == 0) { 231 if (tc->t_tinc_hdr_rem == 0 && tc->t_tinc_data_rem == 0) {
232 struct rds_connection *conn = cp->cp_conn;
233
232 if (tinc->ti_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP) 234 if (tinc->ti_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
233 rds_tcp_cong_recv(conn, tinc); 235 rds_tcp_cong_recv(conn, tinc);
234 else 236 else
@@ -251,15 +253,15 @@ out:
251} 253}
252 254
253/* the caller has to hold the sock lock */ 255/* the caller has to hold the sock lock */
254static int rds_tcp_read_sock(struct rds_connection *conn, gfp_t gfp) 256static int rds_tcp_read_sock(struct rds_conn_path *cp, gfp_t gfp)
255{ 257{
256 struct rds_tcp_connection *tc = conn->c_transport_data; 258 struct rds_tcp_connection *tc = cp->cp_transport_data;
257 struct socket *sock = tc->t_sock; 259 struct socket *sock = tc->t_sock;
258 read_descriptor_t desc; 260 read_descriptor_t desc;
259 struct rds_tcp_desc_arg arg; 261 struct rds_tcp_desc_arg arg;
260 262
261 /* It's like glib in the kernel! */ 263 /* It's like glib in the kernel! */
262 arg.conn = conn; 264 arg.conn_path = cp;
263 arg.gfp = gfp; 265 arg.gfp = gfp;
264 desc.arg.data = &arg; 266 desc.arg.data = &arg;
265 desc.error = 0; 267 desc.error = 0;
@@ -279,16 +281,17 @@ static int rds_tcp_read_sock(struct rds_connection *conn, gfp_t gfp)
279 * if we fail to allocate we're in trouble.. blindly wait some time before 281 * if we fail to allocate we're in trouble.. blindly wait some time before
280 * trying again to see if the VM can free up something for us. 282 * trying again to see if the VM can free up something for us.
281 */ 283 */
282int rds_tcp_recv(struct rds_connection *conn) 284int rds_tcp_recv_path(struct rds_conn_path *cp)
283{ 285{
284 struct rds_tcp_connection *tc = conn->c_transport_data; 286 struct rds_tcp_connection *tc = cp->cp_transport_data;
285 struct socket *sock = tc->t_sock; 287 struct socket *sock = tc->t_sock;
286 int ret = 0; 288 int ret = 0;
287 289
288 rdsdebug("recv worker conn %p tc %p sock %p\n", conn, tc, sock); 290 rdsdebug("recv worker path [%d] tc %p sock %p\n",
291 cp->cp_index, tc, sock);
289 292
290 lock_sock(sock->sk); 293 lock_sock(sock->sk);
291 ret = rds_tcp_read_sock(conn, GFP_KERNEL); 294 ret = rds_tcp_read_sock(cp, GFP_KERNEL);
292 release_sock(sock->sk); 295 release_sock(sock->sk);
293 296
294 return ret; 297 return ret;
@@ -297,24 +300,24 @@ int rds_tcp_recv(struct rds_connection *conn)
297void rds_tcp_data_ready(struct sock *sk) 300void rds_tcp_data_ready(struct sock *sk)
298{ 301{
299 void (*ready)(struct sock *sk); 302 void (*ready)(struct sock *sk);
300 struct rds_connection *conn; 303 struct rds_conn_path *cp;
301 struct rds_tcp_connection *tc; 304 struct rds_tcp_connection *tc;
302 305
303 rdsdebug("data ready sk %p\n", sk); 306 rdsdebug("data ready sk %p\n", sk);
304 307
305 read_lock_bh(&sk->sk_callback_lock); 308 read_lock_bh(&sk->sk_callback_lock);
306 conn = sk->sk_user_data; 309 cp = sk->sk_user_data;
307 if (!conn) { /* check for teardown race */ 310 if (!cp) { /* check for teardown race */
308 ready = sk->sk_data_ready; 311 ready = sk->sk_data_ready;
309 goto out; 312 goto out;
310 } 313 }
311 314
312 tc = conn->c_transport_data; 315 tc = cp->cp_transport_data;
313 ready = tc->t_orig_data_ready; 316 ready = tc->t_orig_data_ready;
314 rds_tcp_stats_inc(s_tcp_data_ready_calls); 317 rds_tcp_stats_inc(s_tcp_data_ready_calls);
315 318
316 if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM) 319 if (rds_tcp_read_sock(cp, GFP_ATOMIC) == -ENOMEM)
317 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 320 queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
318out: 321out:
319 read_unlock_bh(&sk->sk_callback_lock); 322 read_unlock_bh(&sk->sk_callback_lock);
320 ready(sk); 323 ready(sk);
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index 2b3414f3c45c..57e0f5826406 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -49,16 +49,16 @@ static void rds_tcp_cork(struct socket *sock, int val)
49 set_fs(oldfs); 49 set_fs(oldfs);
50} 50}
51 51
52void rds_tcp_xmit_prepare(struct rds_connection *conn) 52void rds_tcp_xmit_path_prepare(struct rds_conn_path *cp)
53{ 53{
54 struct rds_tcp_connection *tc = conn->c_transport_data; 54 struct rds_tcp_connection *tc = cp->cp_transport_data;
55 55
56 rds_tcp_cork(tc->t_sock, 1); 56 rds_tcp_cork(tc->t_sock, 1);
57} 57}
58 58
59void rds_tcp_xmit_complete(struct rds_connection *conn) 59void rds_tcp_xmit_path_complete(struct rds_conn_path *cp)
60{ 60{
61 struct rds_tcp_connection *tc = conn->c_transport_data; 61 struct rds_tcp_connection *tc = cp->cp_transport_data;
62 62
63 rds_tcp_cork(tc->t_sock, 0); 63 rds_tcp_cork(tc->t_sock, 0);
64} 64}
@@ -67,19 +67,19 @@ void rds_tcp_xmit_complete(struct rds_connection *conn)
67static int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len) 67static int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len)
68{ 68{
69 struct kvec vec = { 69 struct kvec vec = {
70 .iov_base = data, 70 .iov_base = data,
71 .iov_len = len, 71 .iov_len = len,
72 };
73 struct msghdr msg = {
74 .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL,
72 }; 75 };
73 struct msghdr msg = {
74 .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL,
75 };
76 76
77 return kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len); 77 return kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len);
78} 78}
79 79
80/* the core send_sem serializes this with other xmit and shutdown */ 80/* the core send_sem serializes this with other xmit and shutdown */
81int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, 81int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
82 unsigned int hdr_off, unsigned int sg, unsigned int off) 82 unsigned int hdr_off, unsigned int sg, unsigned int off)
83{ 83{
84 struct rds_tcp_connection *tc = conn->c_transport_data; 84 struct rds_tcp_connection *tc = conn->c_transport_data;
85 int done = 0; 85 int done = 0;
@@ -178,27 +178,27 @@ static int rds_tcp_is_acked(struct rds_message *rm, uint64_t ack)
178void rds_tcp_write_space(struct sock *sk) 178void rds_tcp_write_space(struct sock *sk)
179{ 179{
180 void (*write_space)(struct sock *sk); 180 void (*write_space)(struct sock *sk);
181 struct rds_connection *conn; 181 struct rds_conn_path *cp;
182 struct rds_tcp_connection *tc; 182 struct rds_tcp_connection *tc;
183 183
184 read_lock_bh(&sk->sk_callback_lock); 184 read_lock_bh(&sk->sk_callback_lock);
185 conn = sk->sk_user_data; 185 cp = sk->sk_user_data;
186 if (!conn) { 186 if (!cp) {
187 write_space = sk->sk_write_space; 187 write_space = sk->sk_write_space;
188 goto out; 188 goto out;
189 } 189 }
190 190
191 tc = conn->c_transport_data; 191 tc = cp->cp_transport_data;
192 rdsdebug("write_space for tc %p\n", tc); 192 rdsdebug("write_space for tc %p\n", tc);
193 write_space = tc->t_orig_write_space; 193 write_space = tc->t_orig_write_space;
194 rds_tcp_stats_inc(s_tcp_write_space_calls); 194 rds_tcp_stats_inc(s_tcp_write_space_calls);
195 195
196 rdsdebug("tcp una %u\n", rds_tcp_snd_una(tc)); 196 rdsdebug("tcp una %u\n", rds_tcp_snd_una(tc));
197 tc->t_last_seen_una = rds_tcp_snd_una(tc); 197 tc->t_last_seen_una = rds_tcp_snd_una(tc);
198 rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked); 198 rds_send_path_drop_acked(cp, rds_tcp_snd_una(tc), rds_tcp_is_acked);
199 199
200 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) 200 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
201 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 201 queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
202 202
203out: 203out:
204 read_unlock_bh(&sk->sk_callback_lock); 204 read_unlock_bh(&sk->sk_callback_lock);
diff --git a/net/rds/threads.c b/net/rds/threads.c
index 9fbe95bb14a9..bc97d67f29cc 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -125,6 +125,11 @@ void rds_queue_reconnect(struct rds_conn_path *cp)
125 conn, &conn->c_laddr, &conn->c_faddr, 125 conn, &conn->c_laddr, &conn->c_faddr,
126 cp->cp_reconnect_jiffies); 126 cp->cp_reconnect_jiffies);
127 127
128 /* let peer with smaller addr initiate reconnect, to avoid duels */
129 if (conn->c_trans->t_type == RDS_TRANS_TCP &&
130 conn->c_laddr > conn->c_faddr)
131 return;
132
128 set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags); 133 set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags);
129 if (cp->cp_reconnect_jiffies == 0) { 134 if (cp->cp_reconnect_jiffies == 0) {
130 cp->cp_reconnect_jiffies = rds_sysctl_reconnect_min_jiffies; 135 cp->cp_reconnect_jiffies = rds_sysctl_reconnect_min_jiffies;
@@ -152,8 +157,9 @@ void rds_connect_worker(struct work_struct *work)
152 int ret; 157 int ret;
153 158
154 clear_bit(RDS_RECONNECT_PENDING, &cp->cp_flags); 159 clear_bit(RDS_RECONNECT_PENDING, &cp->cp_flags);
155 if (rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) { 160 ret = rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_CONNECTING);
156 ret = conn->c_trans->conn_connect(conn); 161 if (ret) {
162 ret = conn->c_trans->conn_path_connect(cp);
157 rdsdebug("conn %p for %pI4 to %pI4 dispatched, ret %d\n", 163 rdsdebug("conn %p for %pI4 to %pI4 dispatched, ret %d\n",
158 conn, &conn->c_laddr, &conn->c_faddr, ret); 164 conn, &conn->c_laddr, &conn->c_faddr, ret);
159 165
@@ -203,7 +209,7 @@ void rds_recv_worker(struct work_struct *work)
203 int ret; 209 int ret;
204 210
205 if (rds_conn_path_state(cp) == RDS_CONN_UP) { 211 if (rds_conn_path_state(cp) == RDS_CONN_UP) {
206 ret = cp->cp_conn->c_trans->recv(cp->cp_conn); 212 ret = cp->cp_conn->c_trans->recv_path(cp);
207 rdsdebug("conn %p ret %d\n", cp->cp_conn, ret); 213 rdsdebug("conn %p ret %d\n", cp->cp_conn, ret);
208 switch (ret) { 214 switch (ret) {
209 case -EAGAIN: 215 case -EAGAIN:
diff --git a/net/rds/transport.c b/net/rds/transport.c
index f3afd1d60d3c..2ffd3e30c643 100644
--- a/net/rds/transport.c
+++ b/net/rds/transport.c
@@ -140,8 +140,7 @@ unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
140 rds_info_iter_unmap(iter); 140 rds_info_iter_unmap(iter);
141 down_read(&rds_trans_sem); 141 down_read(&rds_trans_sem);
142 142
143 for (i = 0; i < RDS_TRANS_COUNT; i++) 143 for (i = 0; i < RDS_TRANS_COUNT; i++) {
144 {
145 trans = transports[i]; 144 trans = transports[i];
146 if (!trans || !trans->stats_info_copy) 145 if (!trans || !trans->stats_info_copy)
147 continue; 146 continue;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index f8c61d2a7963..47ec2305f920 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -1122,7 +1122,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1122 nla_nest_end(skb, nest); 1122 nla_nest_end(skb, nest);
1123 ret = skb->len; 1123 ret = skb->len;
1124 } else 1124 } else
1125 nla_nest_cancel(skb, nest); 1125 nlmsg_trim(skb, b);
1126 1126
1127 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1127 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1128 if (NETLINK_CB(cb->skb).portid && ret) 1128 if (NETLINK_CB(cb->skb).portid && ret)
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index f7b6cf49ea6f..ef74bffa6101 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -223,15 +223,10 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
223 223
224 bpf_fd = nla_get_u32(tb[TCA_ACT_BPF_FD]); 224 bpf_fd = nla_get_u32(tb[TCA_ACT_BPF_FD]);
225 225
226 fp = bpf_prog_get(bpf_fd); 226 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_ACT);
227 if (IS_ERR(fp)) 227 if (IS_ERR(fp))
228 return PTR_ERR(fp); 228 return PTR_ERR(fp);
229 229
230 if (fp->type != BPF_PROG_TYPE_SCHED_ACT) {
231 bpf_prog_put(fp);
232 return -EINVAL;
233 }
234
235 if (tb[TCA_ACT_BPF_NAME]) { 230 if (tb[TCA_ACT_BPF_NAME]) {
236 name = kmemdup(nla_data(tb[TCA_ACT_BPF_NAME]), 231 name = kmemdup(nla_data(tb[TCA_ACT_BPF_NAME]),
237 nla_len(tb[TCA_ACT_BPF_NAME]), 232 nla_len(tb[TCA_ACT_BPF_NAME]),
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index b7fa96926c90..845ab5119c05 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -106,9 +106,9 @@ int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi)
106} 106}
107EXPORT_SYMBOL_GPL(ife_get_meta_u16); 107EXPORT_SYMBOL_GPL(ife_get_meta_u16);
108 108
109int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval) 109int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
110{ 110{
111 mi->metaval = kmemdup(metaval, sizeof(u32), GFP_KERNEL); 111 mi->metaval = kmemdup(metaval, sizeof(u32), gfp);
112 if (!mi->metaval) 112 if (!mi->metaval)
113 return -ENOMEM; 113 return -ENOMEM;
114 114
@@ -116,9 +116,9 @@ int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval)
116} 116}
117EXPORT_SYMBOL_GPL(ife_alloc_meta_u32); 117EXPORT_SYMBOL_GPL(ife_alloc_meta_u32);
118 118
119int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval) 119int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
120{ 120{
121 mi->metaval = kmemdup(metaval, sizeof(u16), GFP_KERNEL); 121 mi->metaval = kmemdup(metaval, sizeof(u16), gfp);
122 if (!mi->metaval) 122 if (!mi->metaval)
123 return -ENOMEM; 123 return -ENOMEM;
124 124
@@ -240,10 +240,10 @@ static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len)
240} 240}
241 241
242/* called when adding new meta information 242/* called when adding new meta information
243 * under ife->tcf_lock 243 * under ife->tcf_lock for existing action
244*/ 244*/
245static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, 245static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
246 void *val, int len) 246 void *val, int len, bool exists)
247{ 247{
248 struct tcf_meta_ops *ops = find_ife_oplist(metaid); 248 struct tcf_meta_ops *ops = find_ife_oplist(metaid);
249 int ret = 0; 249 int ret = 0;
@@ -251,11 +251,13 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
251 if (!ops) { 251 if (!ops) {
252 ret = -ENOENT; 252 ret = -ENOENT;
253#ifdef CONFIG_MODULES 253#ifdef CONFIG_MODULES
254 spin_unlock_bh(&ife->tcf_lock); 254 if (exists)
255 spin_unlock_bh(&ife->tcf_lock);
255 rtnl_unlock(); 256 rtnl_unlock();
256 request_module("ifemeta%u", metaid); 257 request_module("ifemeta%u", metaid);
257 rtnl_lock(); 258 rtnl_lock();
258 spin_lock_bh(&ife->tcf_lock); 259 if (exists)
260 spin_lock_bh(&ife->tcf_lock);
259 ops = find_ife_oplist(metaid); 261 ops = find_ife_oplist(metaid);
260#endif 262#endif
261 } 263 }
@@ -272,10 +274,10 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
272} 274}
273 275
274/* called when adding new meta information 276/* called when adding new meta information
275 * under ife->tcf_lock 277 * under ife->tcf_lock for existing action
276*/ 278*/
277static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, 279static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
278 int len) 280 int len, bool atomic)
279{ 281{
280 struct tcf_meta_info *mi = NULL; 282 struct tcf_meta_info *mi = NULL;
281 struct tcf_meta_ops *ops = find_ife_oplist(metaid); 283 struct tcf_meta_ops *ops = find_ife_oplist(metaid);
@@ -284,7 +286,7 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
284 if (!ops) 286 if (!ops)
285 return -ENOENT; 287 return -ENOENT;
286 288
287 mi = kzalloc(sizeof(*mi), GFP_KERNEL); 289 mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
288 if (!mi) { 290 if (!mi) {
289 /*put back what find_ife_oplist took */ 291 /*put back what find_ife_oplist took */
290 module_put(ops->owner); 292 module_put(ops->owner);
@@ -294,7 +296,7 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
294 mi->metaid = metaid; 296 mi->metaid = metaid;
295 mi->ops = ops; 297 mi->ops = ops;
296 if (len > 0) { 298 if (len > 0) {
297 ret = ops->alloc(mi, metaval); 299 ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
298 if (ret != 0) { 300 if (ret != 0) {
299 kfree(mi); 301 kfree(mi);
300 module_put(ops->owner); 302 module_put(ops->owner);
@@ -313,11 +315,13 @@ static int use_all_metadata(struct tcf_ife_info *ife)
313 int rc = 0; 315 int rc = 0;
314 int installed = 0; 316 int installed = 0;
315 317
318 read_lock(&ife_mod_lock);
316 list_for_each_entry(o, &ifeoplist, list) { 319 list_for_each_entry(o, &ifeoplist, list) {
317 rc = add_metainfo(ife, o->metaid, NULL, 0); 320 rc = add_metainfo(ife, o->metaid, NULL, 0, true);
318 if (rc == 0) 321 if (rc == 0)
319 installed += 1; 322 installed += 1;
320 } 323 }
324 read_unlock(&ife_mod_lock);
321 325
322 if (installed) 326 if (installed)
323 return 0; 327 return 0;
@@ -385,8 +389,9 @@ static void tcf_ife_cleanup(struct tc_action *a, int bind)
385 spin_unlock_bh(&ife->tcf_lock); 389 spin_unlock_bh(&ife->tcf_lock);
386} 390}
387 391
388/* under ife->tcf_lock */ 392/* under ife->tcf_lock for existing action */
389static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb) 393static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
394 bool exists)
390{ 395{
391 int len = 0; 396 int len = 0;
392 int rc = 0; 397 int rc = 0;
@@ -398,11 +403,11 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb)
398 val = nla_data(tb[i]); 403 val = nla_data(tb[i]);
399 len = nla_len(tb[i]); 404 len = nla_len(tb[i]);
400 405
401 rc = load_metaops_and_vet(ife, i, val, len); 406 rc = load_metaops_and_vet(ife, i, val, len, exists);
402 if (rc != 0) 407 if (rc != 0)
403 return rc; 408 return rc;
404 409
405 rc = add_metainfo(ife, i, val, len); 410 rc = add_metainfo(ife, i, val, len, exists);
406 if (rc) 411 if (rc)
407 return rc; 412 return rc;
408 } 413 }
@@ -475,7 +480,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
475 saddr = nla_data(tb[TCA_IFE_SMAC]); 480 saddr = nla_data(tb[TCA_IFE_SMAC]);
476 } 481 }
477 482
478 spin_lock_bh(&ife->tcf_lock); 483 if (exists)
484 spin_lock_bh(&ife->tcf_lock);
479 ife->tcf_action = parm->action; 485 ife->tcf_action = parm->action;
480 486
481 if (parm->flags & IFE_ENCODE) { 487 if (parm->flags & IFE_ENCODE) {
@@ -505,11 +511,12 @@ metadata_parse_err:
505 if (ret == ACT_P_CREATED) 511 if (ret == ACT_P_CREATED)
506 _tcf_ife_cleanup(a, bind); 512 _tcf_ife_cleanup(a, bind);
507 513
508 spin_unlock_bh(&ife->tcf_lock); 514 if (exists)
515 spin_unlock_bh(&ife->tcf_lock);
509 return err; 516 return err;
510 } 517 }
511 518
512 err = populate_metalist(ife, tb2); 519 err = populate_metalist(ife, tb2, exists);
513 if (err) 520 if (err)
514 goto metadata_parse_err; 521 goto metadata_parse_err;
515 522
@@ -524,12 +531,14 @@ metadata_parse_err:
524 if (ret == ACT_P_CREATED) 531 if (ret == ACT_P_CREATED)
525 _tcf_ife_cleanup(a, bind); 532 _tcf_ife_cleanup(a, bind);
526 533
527 spin_unlock_bh(&ife->tcf_lock); 534 if (exists)
535 spin_unlock_bh(&ife->tcf_lock);
528 return err; 536 return err;
529 } 537 }
530 } 538 }
531 539
532 spin_unlock_bh(&ife->tcf_lock); 540 if (exists)
541 spin_unlock_bh(&ife->tcf_lock);
533 542
534 if (ret == ACT_P_CREATED) 543 if (ret == ACT_P_CREATED)
535 tcf_hash_insert(tn, a); 544 tcf_hash_insert(tn, a);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 6148e323ed93..b8c50600697a 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -123,10 +123,13 @@ static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
123 } 123 }
124 124
125 td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]); 125 td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
126 if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) 126 if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) {
127 if (exists)
128 tcf_hash_release(a, bind);
127 return -EINVAL; 129 return -EINVAL;
130 }
128 131
129 if (!tcf_hash_check(tn, index, a, bind)) { 132 if (!exists) {
130 ret = tcf_hash_create(tn, index, est, a, sizeof(*ipt), bind, 133 ret = tcf_hash_create(tn, index, est, a, sizeof(*ipt), bind,
131 false); 134 false);
132 if (ret) 135 if (ret)
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 5b135d357e1e..70cfbbf96af2 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -181,7 +181,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
181 181
182 if (!(at & AT_EGRESS)) { 182 if (!(at & AT_EGRESS)) {
183 if (m->tcfm_ok_push) 183 if (m->tcfm_ok_push)
184 skb_push(skb2, skb->mac_len); 184 skb_push_rcsum(skb2, skb->mac_len);
185 } 185 }
186 186
187 /* mirror is always swallowed */ 187 /* mirror is always swallowed */
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 53d1486cddf7..8e573c0f8742 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -47,6 +47,8 @@ static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a,
47 skb_set_queue_mapping(skb, d->queue_mapping); 47 skb_set_queue_mapping(skb, d->queue_mapping);
48 if (d->flags & SKBEDIT_F_MARK) 48 if (d->flags & SKBEDIT_F_MARK)
49 skb->mark = d->mark; 49 skb->mark = d->mark;
50 if (d->flags & SKBEDIT_F_PTYPE)
51 skb->pkt_type = d->ptype;
50 52
51 spin_unlock(&d->tcf_lock); 53 spin_unlock(&d->tcf_lock);
52 return d->tcf_action; 54 return d->tcf_action;
@@ -57,6 +59,7 @@ static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
57 [TCA_SKBEDIT_PRIORITY] = { .len = sizeof(u32) }, 59 [TCA_SKBEDIT_PRIORITY] = { .len = sizeof(u32) },
58 [TCA_SKBEDIT_QUEUE_MAPPING] = { .len = sizeof(u16) }, 60 [TCA_SKBEDIT_QUEUE_MAPPING] = { .len = sizeof(u16) },
59 [TCA_SKBEDIT_MARK] = { .len = sizeof(u32) }, 61 [TCA_SKBEDIT_MARK] = { .len = sizeof(u32) },
62 [TCA_SKBEDIT_PTYPE] = { .len = sizeof(u16) },
60}; 63};
61 64
62static int tcf_skbedit_init(struct net *net, struct nlattr *nla, 65static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
@@ -68,7 +71,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
68 struct tc_skbedit *parm; 71 struct tc_skbedit *parm;
69 struct tcf_skbedit *d; 72 struct tcf_skbedit *d;
70 u32 flags = 0, *priority = NULL, *mark = NULL; 73 u32 flags = 0, *priority = NULL, *mark = NULL;
71 u16 *queue_mapping = NULL; 74 u16 *queue_mapping = NULL, *ptype = NULL;
72 bool exists = false; 75 bool exists = false;
73 int ret = 0, err; 76 int ret = 0, err;
74 77
@@ -92,6 +95,13 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
92 queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]); 95 queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]);
93 } 96 }
94 97
98 if (tb[TCA_SKBEDIT_PTYPE] != NULL) {
99 ptype = nla_data(tb[TCA_SKBEDIT_PTYPE]);
100 if (!skb_pkt_type_ok(*ptype))
101 return -EINVAL;
102 flags |= SKBEDIT_F_PTYPE;
103 }
104
95 if (tb[TCA_SKBEDIT_MARK] != NULL) { 105 if (tb[TCA_SKBEDIT_MARK] != NULL) {
96 flags |= SKBEDIT_F_MARK; 106 flags |= SKBEDIT_F_MARK;
97 mark = nla_data(tb[TCA_SKBEDIT_MARK]); 107 mark = nla_data(tb[TCA_SKBEDIT_MARK]);
@@ -132,6 +142,8 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
132 d->queue_mapping = *queue_mapping; 142 d->queue_mapping = *queue_mapping;
133 if (flags & SKBEDIT_F_MARK) 143 if (flags & SKBEDIT_F_MARK)
134 d->mark = *mark; 144 d->mark = *mark;
145 if (flags & SKBEDIT_F_PTYPE)
146 d->ptype = *ptype;
135 147
136 d->tcf_action = parm->action; 148 d->tcf_action = parm->action;
137 149
@@ -158,16 +170,16 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
158 if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt)) 170 if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
159 goto nla_put_failure; 171 goto nla_put_failure;
160 if ((d->flags & SKBEDIT_F_PRIORITY) && 172 if ((d->flags & SKBEDIT_F_PRIORITY) &&
161 nla_put(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority), 173 nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, d->priority))
162 &d->priority))
163 goto nla_put_failure; 174 goto nla_put_failure;
164 if ((d->flags & SKBEDIT_F_QUEUE_MAPPING) && 175 if ((d->flags & SKBEDIT_F_QUEUE_MAPPING) &&
165 nla_put(skb, TCA_SKBEDIT_QUEUE_MAPPING, 176 nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, d->queue_mapping))
166 sizeof(d->queue_mapping), &d->queue_mapping))
167 goto nla_put_failure; 177 goto nla_put_failure;
168 if ((d->flags & SKBEDIT_F_MARK) && 178 if ((d->flags & SKBEDIT_F_MARK) &&
169 nla_put(skb, TCA_SKBEDIT_MARK, sizeof(d->mark), 179 nla_put_u32(skb, TCA_SKBEDIT_MARK, d->mark))
170 &d->mark)) 180 goto nla_put_failure;
181 if ((d->flags & SKBEDIT_F_PTYPE) &&
182 nla_put_u16(skb, TCA_SKBEDIT_PTYPE, d->ptype))
171 goto nla_put_failure; 183 goto nla_put_failure;
172 184
173 tcf_tm_dump(&t, &d->tcf_tm); 185 tcf_tm_dump(&t, &d->tcf_tm);
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 7b342c779da7..c3002c2c68bb 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -272,15 +272,10 @@ static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
272 272
273 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]); 273 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
274 274
275 fp = bpf_prog_get(bpf_fd); 275 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
276 if (IS_ERR(fp)) 276 if (IS_ERR(fp))
277 return PTR_ERR(fp); 277 return PTR_ERR(fp);
278 278
279 if (fp->type != BPF_PROG_TYPE_SCHED_CLS) {
280 bpf_prog_put(fp);
281 return -EINVAL;
282 }
283
284 if (tb[TCA_BPF_NAME]) { 279 if (tb[TCA_BPF_NAME]) {
285 name = kmemdup(nla_data(tb[TCA_BPF_NAME]), 280 name = kmemdup(nla_data(tb[TCA_BPF_NAME]),
286 nla_len(tb[TCA_BPF_NAME]), 281 nla_len(tb[TCA_BPF_NAME]),
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 6ea0db427f91..baeed6a78d28 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -40,14 +40,18 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
40static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch, 40static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
41 struct sk_buff **to_free) 41 struct sk_buff **to_free)
42{ 42{
43 unsigned int prev_backlog;
44
43 if (likely(skb_queue_len(&sch->q) < sch->limit)) 45 if (likely(skb_queue_len(&sch->q) < sch->limit))
44 return qdisc_enqueue_tail(skb, sch); 46 return qdisc_enqueue_tail(skb, sch);
45 47
48 prev_backlog = sch->qstats.backlog;
46 /* queue full, remove one skb to fulfill the limit */ 49 /* queue full, remove one skb to fulfill the limit */
47 __qdisc_queue_drop_head(sch, &sch->q, to_free); 50 __qdisc_queue_drop_head(sch, &sch->q, to_free);
48 qdisc_qstats_drop(sch); 51 qdisc_qstats_drop(sch);
49 qdisc_enqueue_tail(skb, sch); 52 qdisc_enqueue_tail(skb, sch);
50 53
54 qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog);
51 return NET_XMIT_CN; 55 return NET_XMIT_CN;
52} 56}
53 57
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 8cb5eff7b79c..3ddc7bd74ecb 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -115,9 +115,9 @@ struct hfsc_class {
115 struct gnet_stats_basic_packed bstats; 115 struct gnet_stats_basic_packed bstats;
116 struct gnet_stats_queue qstats; 116 struct gnet_stats_queue qstats;
117 struct gnet_stats_rate_est64 rate_est; 117 struct gnet_stats_rate_est64 rate_est;
118 unsigned int level; /* class level in hierarchy */
119 struct tcf_proto __rcu *filter_list; /* filter list */ 118 struct tcf_proto __rcu *filter_list; /* filter list */
120 unsigned int filter_cnt; /* filter count */ 119 unsigned int filter_cnt; /* filter count */
120 unsigned int level; /* class level in hierarchy */
121 121
122 struct hfsc_sched *sched; /* scheduler data */ 122 struct hfsc_sched *sched; /* scheduler data */
123 struct hfsc_class *cl_parent; /* parent class */ 123 struct hfsc_class *cl_parent; /* parent class */
@@ -130,7 +130,6 @@ struct hfsc_class {
130 struct rb_node vt_node; /* parent's vt_tree member */ 130 struct rb_node vt_node; /* parent's vt_tree member */
131 struct rb_root cf_tree; /* active children sorted by cl_f */ 131 struct rb_root cf_tree; /* active children sorted by cl_f */
132 struct rb_node cf_node; /* parent's cf_heap member */ 132 struct rb_node cf_node; /* parent's cf_heap member */
133 struct list_head dlist; /* drop list member */
134 133
135 u64 cl_total; /* total work in bytes */ 134 u64 cl_total; /* total work in bytes */
136 u64 cl_cumul; /* cumulative work in bytes done by 135 u64 cl_cumul; /* cumulative work in bytes done by
@@ -166,10 +165,10 @@ struct hfsc_class {
166 struct runtime_sc cl_virtual; /* virtual curve */ 165 struct runtime_sc cl_virtual; /* virtual curve */
167 struct runtime_sc cl_ulimit; /* upperlimit curve */ 166 struct runtime_sc cl_ulimit; /* upperlimit curve */
168 167
169 unsigned long cl_flags; /* which curves are valid */ 168 u8 cl_flags; /* which curves are valid */
170 unsigned long cl_vtperiod; /* vt period sequence number */ 169 u32 cl_vtperiod; /* vt period sequence number */
171 unsigned long cl_parentperiod;/* parent's vt period sequence number*/ 170 u32 cl_parentperiod;/* parent's vt period sequence number*/
172 unsigned long cl_nactive; /* number of active children */ 171 u32 cl_nactive; /* number of active children */
173}; 172};
174 173
175struct hfsc_sched { 174struct hfsc_sched {
@@ -177,8 +176,6 @@ struct hfsc_sched {
177 struct hfsc_class root; /* root class */ 176 struct hfsc_class root; /* root class */
178 struct Qdisc_class_hash clhash; /* class hash */ 177 struct Qdisc_class_hash clhash; /* class hash */
179 struct rb_root eligible; /* eligible tree */ 178 struct rb_root eligible; /* eligible tree */
180 struct list_head droplist; /* active leaf class list (for
181 dropping) */
182 struct qdisc_watchdog watchdog; /* watchdog timer */ 179 struct qdisc_watchdog watchdog; /* watchdog timer */
183}; 180};
184 181
@@ -781,6 +778,20 @@ update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
781 else 778 else
782 go_passive = 0; 779 go_passive = 0;
783 780
781 /* update vt */
782 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
783 - cl->cl_vtoff + cl->cl_vtadj;
784
785 /*
786 * if vt of the class is smaller than cvtmin,
787 * the class was skipped in the past due to non-fit.
788 * if so, we need to adjust vtadj.
789 */
790 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
791 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
792 cl->cl_vt = cl->cl_parent->cl_cvtmin;
793 }
794
784 if (go_passive) { 795 if (go_passive) {
785 /* no more active child, going passive */ 796 /* no more active child, going passive */
786 797
@@ -797,25 +808,10 @@ update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
797 continue; 808 continue;
798 } 809 }
799 810
800 /*
801 * update vt and f
802 */
803 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
804 - cl->cl_vtoff + cl->cl_vtadj;
805
806 /*
807 * if vt of the class is smaller than cvtmin,
808 * the class was skipped in the past due to non-fit.
809 * if so, we need to adjust vtadj.
810 */
811 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
812 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
813 cl->cl_vt = cl->cl_parent->cl_cvtmin;
814 }
815
816 /* update the vt tree */ 811 /* update the vt tree */
817 vttree_update(cl); 812 vttree_update(cl);
818 813
814 /* update f */
819 if (cl->cl_flags & HFSC_USC) { 815 if (cl->cl_flags & HFSC_USC) {
820 cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit, 816 cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
821 cl->cl_total); 817 cl->cl_total);
@@ -858,7 +854,6 @@ set_active(struct hfsc_class *cl, unsigned int len)
858 if (cl->cl_flags & HFSC_FSC) 854 if (cl->cl_flags & HFSC_FSC)
859 init_vf(cl, len); 855 init_vf(cl, len);
860 856
861 list_add_tail(&cl->dlist, &cl->sched->droplist);
862} 857}
863 858
864static void 859static void
@@ -867,8 +862,6 @@ set_passive(struct hfsc_class *cl)
867 if (cl->cl_flags & HFSC_RSC) 862 if (cl->cl_flags & HFSC_RSC)
868 eltree_remove(cl); 863 eltree_remove(cl);
869 864
870 list_del(&cl->dlist);
871
872 /* 865 /*
873 * vttree is now handled in update_vf() so that update_vf(cl, 0, 0) 866 * vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
874 * needs to be called explicitly to remove a class from vttree. 867 * needs to be called explicitly to remove a class from vttree.
@@ -882,7 +875,7 @@ qdisc_peek_len(struct Qdisc *sch)
882 unsigned int len; 875 unsigned int len;
883 876
884 skb = sch->ops->peek(sch); 877 skb = sch->ops->peek(sch);
885 if (skb == NULL) { 878 if (unlikely(skb == NULL)) {
886 qdisc_warn_nonwc("qdisc_peek_len", sch); 879 qdisc_warn_nonwc("qdisc_peek_len", sch);
887 return 0; 880 return 0;
888 } 881 }
@@ -947,7 +940,7 @@ static void
947hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc) 940hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
948{ 941{
949 sc2isc(fsc, &cl->cl_fsc); 942 sc2isc(fsc, &cl->cl_fsc);
950 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total); 943 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vtoff + cl->cl_vt, cl->cl_total);
951 cl->cl_flags |= HFSC_FSC; 944 cl->cl_flags |= HFSC_FSC;
952} 945}
953 946
@@ -1443,7 +1436,6 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1443 if (err < 0) 1436 if (err < 0)
1444 return err; 1437 return err;
1445 q->eligible = RB_ROOT; 1438 q->eligible = RB_ROOT;
1446 INIT_LIST_HEAD(&q->droplist);
1447 1439
1448 q->root.cl_common.classid = sch->handle; 1440 q->root.cl_common.classid = sch->handle;
1449 q->root.refcnt = 1; 1441 q->root.refcnt = 1;
@@ -1527,7 +1519,6 @@ hfsc_reset_qdisc(struct Qdisc *sch)
1527 hfsc_reset_class(cl); 1519 hfsc_reset_class(cl);
1528 } 1520 }
1529 q->eligible = RB_ROOT; 1521 q->eligible = RB_ROOT;
1530 INIT_LIST_HEAD(&q->droplist);
1531 qdisc_watchdog_cancel(&q->watchdog); 1522 qdisc_watchdog_cancel(&q->watchdog);
1532 sch->qstats.backlog = 0; 1523 sch->qstats.backlog = 0;
1533 sch->q.qlen = 0; 1524 sch->q.qlen = 0;
@@ -1594,8 +1585,17 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
1594 return err; 1585 return err;
1595 } 1586 }
1596 1587
1597 if (cl->qdisc->q.qlen == 1) 1588 if (cl->qdisc->q.qlen == 1) {
1598 set_active(cl, qdisc_pkt_len(skb)); 1589 set_active(cl, qdisc_pkt_len(skb));
1590 /*
1591 * If this is the first packet, isolate the head so an eventual
1592 * head drop before the first dequeue operation has no chance
1593 * to invalidate the deadline.
1594 */
1595 if (cl->cl_flags & HFSC_RSC)
1596 cl->qdisc->ops->peek(cl->qdisc);
1597
1598 }
1599 1599
1600 qdisc_qstats_backlog_inc(sch, skb); 1600 qdisc_qstats_backlog_inc(sch, skb);
1601 sch->q.qlen++; 1601 sch->q.qlen++;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index ba098f2654b4..91982d9784b3 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -984,7 +984,9 @@ static void htb_work_func(struct work_struct *work)
984 struct htb_sched *q = container_of(work, struct htb_sched, work); 984 struct htb_sched *q = container_of(work, struct htb_sched, work);
985 struct Qdisc *sch = q->watchdog.qdisc; 985 struct Qdisc *sch = q->watchdog.qdisc;
986 986
987 rcu_read_lock();
987 __netif_schedule(qdisc_root(sch)); 988 __netif_schedule(qdisc_root(sch));
989 rcu_read_unlock();
988} 990}
989 991
990static int htb_init(struct Qdisc *sch, struct nlattr *opt) 992static int htb_init(struct Qdisc *sch, struct nlattr *opt)
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index ccca8ca4c722..aaaf02175338 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -487,10 +487,14 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
487 skb = segs; 487 skb = segs;
488 segs = segs->next; 488 segs = segs->next;
489 489
490 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || 490 skb = skb_unshare(skb, GFP_ATOMIC);
491 (skb->ip_summed == CHECKSUM_PARTIAL && 491 if (unlikely(!skb)) {
492 skb_checksum_help(skb))) { 492 qdisc_qstats_drop(sch);
493 rc = qdisc_drop(skb, sch, to_free); 493 goto finish_segs;
494 }
495 if (skb->ip_summed == CHECKSUM_PARTIAL &&
496 skb_checksum_help(skb)) {
497 qdisc_drop(skb, sch, to_free);
494 goto finish_segs; 498 goto finish_segs;
495 } 499 }
496 500
@@ -617,17 +621,17 @@ deliver:
617#endif 621#endif
618 622
619 if (q->qdisc) { 623 if (q->qdisc) {
624 unsigned int pkt_len = qdisc_pkt_len(skb);
620 struct sk_buff *to_free = NULL; 625 struct sk_buff *to_free = NULL;
621 int err; 626 int err;
622 627
623 err = qdisc_enqueue(skb, q->qdisc, &to_free); 628 err = qdisc_enqueue(skb, q->qdisc, &to_free);
624 kfree_skb_list(to_free); 629 kfree_skb_list(to_free);
625 if (unlikely(err != NET_XMIT_SUCCESS)) { 630 if (err != NET_XMIT_SUCCESS &&
626 if (net_xmit_drop_count(err)) { 631 net_xmit_drop_count(err)) {
627 qdisc_qstats_drop(sch); 632 qdisc_qstats_drop(sch);
628 qdisc_tree_reduce_backlog(sch, 1, 633 qdisc_tree_reduce_backlog(sch, 1,
629 qdisc_pkt_len(skb)); 634 pkt_len);
630 }
631 } 635 }
632 goto tfifo_dequeue; 636 goto tfifo_dequeue;
633 } 637 }
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index f4d443aeae54..8f575899adfa 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -153,8 +153,9 @@ prio_destroy(struct Qdisc *sch)
153static int prio_tune(struct Qdisc *sch, struct nlattr *opt) 153static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
154{ 154{
155 struct prio_sched_data *q = qdisc_priv(sch); 155 struct prio_sched_data *q = qdisc_priv(sch);
156 struct Qdisc *queues[TCQ_PRIO_BANDS];
157 int oldbands = q->bands, i;
156 struct tc_prio_qopt *qopt; 158 struct tc_prio_qopt *qopt;
157 int i;
158 159
159 if (nla_len(opt) < sizeof(*qopt)) 160 if (nla_len(opt) < sizeof(*qopt))
160 return -EINVAL; 161 return -EINVAL;
@@ -168,62 +169,42 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
168 return -EINVAL; 169 return -EINVAL;
169 } 170 }
170 171
172 /* Before commit, make sure we can allocate all new qdiscs */
173 for (i = oldbands; i < qopt->bands; i++) {
174 queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
175 TC_H_MAKE(sch->handle, i + 1));
176 if (!queues[i]) {
177 while (i > oldbands)
178 qdisc_destroy(queues[--i]);
179 return -ENOMEM;
180 }
181 }
182
171 sch_tree_lock(sch); 183 sch_tree_lock(sch);
172 q->bands = qopt->bands; 184 q->bands = qopt->bands;
173 memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); 185 memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
174 186
175 for (i = q->bands; i < TCQ_PRIO_BANDS; i++) { 187 for (i = q->bands; i < oldbands; i++) {
176 struct Qdisc *child = q->queues[i]; 188 struct Qdisc *child = q->queues[i];
177 q->queues[i] = &noop_qdisc;
178 if (child != &noop_qdisc) {
179 qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
180 qdisc_destroy(child);
181 }
182 }
183 sch_tree_unlock(sch);
184 189
185 for (i = 0; i < q->bands; i++) { 190 qdisc_tree_reduce_backlog(child, child->q.qlen,
186 if (q->queues[i] == &noop_qdisc) { 191 child->qstats.backlog);
187 struct Qdisc *child, *old; 192 qdisc_destroy(child);
188
189 child = qdisc_create_dflt(sch->dev_queue,
190 &pfifo_qdisc_ops,
191 TC_H_MAKE(sch->handle, i + 1));
192 if (child) {
193 sch_tree_lock(sch);
194 old = q->queues[i];
195 q->queues[i] = child;
196
197 if (old != &noop_qdisc) {
198 qdisc_tree_reduce_backlog(old,
199 old->q.qlen,
200 old->qstats.backlog);
201 qdisc_destroy(old);
202 }
203 sch_tree_unlock(sch);
204 }
205 }
206 } 193 }
194
195 for (i = oldbands; i < q->bands; i++)
196 q->queues[i] = queues[i];
197
198 sch_tree_unlock(sch);
207 return 0; 199 return 0;
208} 200}
209 201
210static int prio_init(struct Qdisc *sch, struct nlattr *opt) 202static int prio_init(struct Qdisc *sch, struct nlattr *opt)
211{ 203{
212 struct prio_sched_data *q = qdisc_priv(sch); 204 if (!opt)
213 int i;
214
215 for (i = 0; i < TCQ_PRIO_BANDS; i++)
216 q->queues[i] = &noop_qdisc;
217
218 if (opt == NULL) {
219 return -EINVAL; 205 return -EINVAL;
220 } else {
221 int err;
222 206
223 if ((err = prio_tune(sch, opt)) != 0) 207 return prio_tune(sch, opt);
224 return err;
225 }
226 return 0;
227} 208}
228 209
229static int prio_dump(struct Qdisc *sch, struct sk_buff *skb) 210static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
index 1ce724b87618..f69edcf219e5 100644
--- a/net/sctp/sctp_diag.c
+++ b/net/sctp/sctp_diag.c
@@ -3,12 +3,6 @@
3#include <linux/sock_diag.h> 3#include <linux/sock_diag.h>
4#include <net/sctp/sctp.h> 4#include <net/sctp/sctp.h>
5 5
6extern void inet_diag_msg_common_fill(struct inet_diag_msg *r,
7 struct sock *sk);
8extern int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
9 struct inet_diag_msg *r, int ext,
10 struct user_namespace *user_ns);
11
12static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, 6static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
13 void *info); 7 void *info);
14 8
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 06b4df9faaa1..2808d550d273 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -446,16 +446,27 @@ out_no_rpciod:
446 return ERR_PTR(err); 446 return ERR_PTR(err);
447} 447}
448 448
449struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, 449static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
450 struct rpc_xprt *xprt) 450 struct rpc_xprt *xprt)
451{ 451{
452 struct rpc_clnt *clnt = NULL; 452 struct rpc_clnt *clnt = NULL;
453 struct rpc_xprt_switch *xps; 453 struct rpc_xprt_switch *xps;
454 454
455 xps = xprt_switch_alloc(xprt, GFP_KERNEL); 455 if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) {
456 if (xps == NULL) 456 WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
457 return ERR_PTR(-ENOMEM); 457 xps = args->bc_xprt->xpt_bc_xps;
458 458 xprt_switch_get(xps);
459 } else {
460 xps = xprt_switch_alloc(xprt, GFP_KERNEL);
461 if (xps == NULL) {
462 xprt_put(xprt);
463 return ERR_PTR(-ENOMEM);
464 }
465 if (xprt->bc_xprt) {
466 xprt_switch_get(xps);
467 xprt->bc_xprt->xpt_bc_xps = xps;
468 }
469 }
459 clnt = rpc_new_client(args, xps, xprt, NULL); 470 clnt = rpc_new_client(args, xps, xprt, NULL);
460 if (IS_ERR(clnt)) 471 if (IS_ERR(clnt))
461 return clnt; 472 return clnt;
@@ -483,7 +494,6 @@ struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
483 494
484 return clnt; 495 return clnt;
485} 496}
486EXPORT_SYMBOL_GPL(rpc_create_xprt);
487 497
488/** 498/**
489 * rpc_create - create an RPC client and transport with one call 499 * rpc_create - create an RPC client and transport with one call
@@ -509,6 +519,15 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
509 }; 519 };
510 char servername[48]; 520 char servername[48];
511 521
522 if (args->bc_xprt) {
523 WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
524 xprt = args->bc_xprt->xpt_bc_xprt;
525 if (xprt) {
526 xprt_get(xprt);
527 return rpc_create_xprt(args, xprt);
528 }
529 }
530
512 if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS) 531 if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
513 xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS; 532 xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
514 if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT) 533 if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index f5572e31d518..4f01f63102ee 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -136,6 +136,8 @@ static void svc_xprt_free(struct kref *kref)
136 /* See comment on corresponding get in xs_setup_bc_tcp(): */ 136 /* See comment on corresponding get in xs_setup_bc_tcp(): */
137 if (xprt->xpt_bc_xprt) 137 if (xprt->xpt_bc_xprt)
138 xprt_put(xprt->xpt_bc_xprt); 138 xprt_put(xprt->xpt_bc_xprt);
139 if (xprt->xpt_bc_xps)
140 xprt_switch_put(xprt->xpt_bc_xps);
139 xprt->xpt_ops->xpo_free(xprt); 141 xprt->xpt_ops->xpo_free(xprt);
140 module_put(owner); 142 module_put(owner);
141} 143}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 2d3e0c42361e..7e2b2fa189c3 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -3057,6 +3057,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
3057 return xprt; 3057 return xprt;
3058 3058
3059 args->bc_xprt->xpt_bc_xprt = NULL; 3059 args->bc_xprt->xpt_bc_xprt = NULL;
3060 args->bc_xprt->xpt_bc_xps = NULL;
3060 xprt_put(xprt); 3061 xprt_put(xprt);
3061 ret = ERR_PTR(-EINVAL); 3062 ret = ERR_PTR(-EINVAL);
3062out_err: 3063out_err:
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 9a70e1d744d2..8584cc48654c 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -411,7 +411,7 @@ int tipc_l2_send_msg(struct net *net, struct sk_buff *skb,
411 return 0; 411 return 0;
412 412
413 /* Send RESET message even if bearer is detached from device */ 413 /* Send RESET message even if bearer is detached from device */
414 tipc_ptr = rtnl_dereference(dev->tipc_ptr); 414 tipc_ptr = rcu_dereference_rtnl(dev->tipc_ptr);
415 if (unlikely(!tipc_ptr && !msg_is_reset(buf_msg(skb)))) 415 if (unlikely(!tipc_ptr && !msg_is_reset(buf_msg(skb))))
416 goto drop; 416 goto drop;
417 417
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index ad9d477cc242..6b109a808d4c 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -135,9 +135,12 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
135 u16 caps = msg_node_capabilities(hdr); 135 u16 caps = msg_node_capabilities(hdr);
136 bool respond = false; 136 bool respond = false;
137 bool dupl_addr = false; 137 bool dupl_addr = false;
138 int err;
138 139
139 bearer->media->msg2addr(bearer, &maddr, msg_media_addr(hdr)); 140 err = bearer->media->msg2addr(bearer, &maddr, msg_media_addr(hdr));
140 kfree_skb(skb); 141 kfree_skb(skb);
142 if (err)
143 return;
141 144
142 /* Ensure message from node is valid and communication is permitted */ 145 /* Ensure message from node is valid and communication is permitted */
143 if (net_id != tn->net_id) 146 if (net_id != tn->net_id)
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 03f8bdf70d8f..c1df33f878b2 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -705,7 +705,8 @@ static void link_profile_stats(struct tipc_link *l)
705 */ 705 */
706int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) 706int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
707{ 707{
708 int mtyp, rc = 0; 708 int mtyp = 0;
709 int rc = 0;
709 bool state = false; 710 bool state = false;
710 bool probe = false; 711 bool probe = false;
711 bool setup = false; 712 bool setup = false;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 8740930f0787..17201aa8423d 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -41,6 +41,8 @@
41#include "name_table.h" 41#include "name_table.h"
42 42
43#define MAX_FORWARD_SIZE 1024 43#define MAX_FORWARD_SIZE 1024
44#define BUF_HEADROOM (LL_MAX_HEADER + 48)
45#define BUF_TAILROOM 16
44 46
45static unsigned int align(unsigned int i) 47static unsigned int align(unsigned int i)
46{ 48{
@@ -505,6 +507,10 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
505 msg_set_hdr_sz(hdr, BASIC_H_SIZE); 507 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
506 } 508 }
507 509
510 if (skb_cloned(_skb) &&
511 pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_KERNEL))
512 goto exit;
513
508 /* Now reverse the concerned fields */ 514 /* Now reverse the concerned fields */
509 msg_set_errcode(hdr, err); 515 msg_set_errcode(hdr, err);
510 msg_set_origport(hdr, msg_destport(&ohdr)); 516 msg_set_origport(hdr, msg_destport(&ohdr));
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 024da8af91f0..7cf52fb39bee 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -94,17 +94,6 @@ struct plist;
94 94
95#define TIPC_MEDIA_INFO_OFFSET 5 95#define TIPC_MEDIA_INFO_OFFSET 5
96 96
97/**
98 * TIPC message buffer code
99 *
100 * TIPC message buffer headroom reserves space for the worst-case
101 * link-level device header (in case the message is sent off-node).
102 *
103 * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields
104 * are word aligned for quicker access
105 */
106#define BUF_HEADROOM (LL_MAX_HEADER + 48)
107
108struct tipc_skb_cb { 97struct tipc_skb_cb {
109 void *handle; 98 void *handle;
110 struct sk_buff *tail; 99 struct sk_buff *tail;
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 3ad9fab1985f..1fd464764765 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -604,7 +604,7 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
604 604
605 link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]); 605 link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
606 link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP])); 606 link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
607 nla_strlcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]), 607 nla_strlcpy(link_info.str, link[TIPC_NLA_LINK_NAME],
608 TIPC_MAX_LINK_NAME); 608 TIPC_MAX_LINK_NAME);
609 609
610 return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO, 610 return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 88bfcd707064..c49b8df438cb 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -796,9 +796,11 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
796 * @tsk: receiving socket 796 * @tsk: receiving socket
797 * @skb: pointer to message buffer. 797 * @skb: pointer to message buffer.
798 */ 798 */
799static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb) 799static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
800 struct sk_buff_head *xmitq)
800{ 801{
801 struct sock *sk = &tsk->sk; 802 struct sock *sk = &tsk->sk;
803 u32 onode = tsk_own_node(tsk);
802 struct tipc_msg *hdr = buf_msg(skb); 804 struct tipc_msg *hdr = buf_msg(skb);
803 int mtyp = msg_type(hdr); 805 int mtyp = msg_type(hdr);
804 bool conn_cong; 806 bool conn_cong;
@@ -811,7 +813,8 @@ static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
811 813
812 if (mtyp == CONN_PROBE) { 814 if (mtyp == CONN_PROBE) {
813 msg_set_type(hdr, CONN_PROBE_REPLY); 815 msg_set_type(hdr, CONN_PROBE_REPLY);
814 tipc_sk_respond(sk, skb, TIPC_OK); 816 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
817 __skb_queue_tail(xmitq, skb);
815 return; 818 return;
816 } else if (mtyp == CONN_ACK) { 819 } else if (mtyp == CONN_ACK) {
817 conn_cong = tsk_conn_cong(tsk); 820 conn_cong = tsk_conn_cong(tsk);
@@ -1686,7 +1689,8 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
1686 * 1689 *
1687 * Returns true if message was added to socket receive queue, otherwise false 1690 * Returns true if message was added to socket receive queue, otherwise false
1688 */ 1691 */
1689static bool filter_rcv(struct sock *sk, struct sk_buff *skb) 1692static bool filter_rcv(struct sock *sk, struct sk_buff *skb,
1693 struct sk_buff_head *xmitq)
1690{ 1694{
1691 struct socket *sock = sk->sk_socket; 1695 struct socket *sock = sk->sk_socket;
1692 struct tipc_sock *tsk = tipc_sk(sk); 1696 struct tipc_sock *tsk = tipc_sk(sk);
@@ -1696,7 +1700,7 @@ static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
1696 int usr = msg_user(hdr); 1700 int usr = msg_user(hdr);
1697 1701
1698 if (unlikely(msg_user(hdr) == CONN_MANAGER)) { 1702 if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
1699 tipc_sk_proto_rcv(tsk, skb); 1703 tipc_sk_proto_rcv(tsk, skb, xmitq);
1700 return false; 1704 return false;
1701 } 1705 }
1702 1706
@@ -1739,7 +1743,8 @@ static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
1739 return true; 1743 return true;
1740 1744
1741reject: 1745reject:
1742 tipc_sk_respond(sk, skb, err); 1746 if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err))
1747 __skb_queue_tail(xmitq, skb);
1743 return false; 1748 return false;
1744} 1749}
1745 1750
@@ -1755,9 +1760,24 @@ reject:
1755static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb) 1760static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1756{ 1761{
1757 unsigned int truesize = skb->truesize; 1762 unsigned int truesize = skb->truesize;
1763 struct sk_buff_head xmitq;
1764 u32 dnode, selector;
1758 1765
1759 if (likely(filter_rcv(sk, skb))) 1766 __skb_queue_head_init(&xmitq);
1767
1768 if (likely(filter_rcv(sk, skb, &xmitq))) {
1760 atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt); 1769 atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
1770 return 0;
1771 }
1772
1773 if (skb_queue_empty(&xmitq))
1774 return 0;
1775
1776 /* Send response/rejected message */
1777 skb = __skb_dequeue(&xmitq);
1778 dnode = msg_destnode(buf_msg(skb));
1779 selector = msg_origport(buf_msg(skb));
1780 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
1761 return 0; 1781 return 0;
1762} 1782}
1763 1783
@@ -1771,12 +1791,13 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1771 * Caller must hold socket lock 1791 * Caller must hold socket lock
1772 */ 1792 */
1773static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, 1793static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
1774 u32 dport) 1794 u32 dport, struct sk_buff_head *xmitq)
1775{ 1795{
1796 unsigned long time_limit = jiffies + 2;
1797 struct sk_buff *skb;
1776 unsigned int lim; 1798 unsigned int lim;
1777 atomic_t *dcnt; 1799 atomic_t *dcnt;
1778 struct sk_buff *skb; 1800 u32 onode;
1779 unsigned long time_limit = jiffies + 2;
1780 1801
1781 while (skb_queue_len(inputq)) { 1802 while (skb_queue_len(inputq)) {
1782 if (unlikely(time_after_eq(jiffies, time_limit))) 1803 if (unlikely(time_after_eq(jiffies, time_limit)))
@@ -1788,7 +1809,7 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
1788 1809
1789 /* Add message directly to receive queue if possible */ 1810 /* Add message directly to receive queue if possible */
1790 if (!sock_owned_by_user(sk)) { 1811 if (!sock_owned_by_user(sk)) {
1791 filter_rcv(sk, skb); 1812 filter_rcv(sk, skb, xmitq);
1792 continue; 1813 continue;
1793 } 1814 }
1794 1815
@@ -1801,7 +1822,9 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
1801 continue; 1822 continue;
1802 1823
1803 /* Overload => reject message back to sender */ 1824 /* Overload => reject message back to sender */
1804 tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD); 1825 onode = tipc_own_addr(sock_net(sk));
1826 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
1827 __skb_queue_tail(xmitq, skb);
1805 break; 1828 break;
1806 } 1829 }
1807} 1830}
@@ -1814,12 +1837,14 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
1814 */ 1837 */
1815void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq) 1838void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
1816{ 1839{
1840 struct sk_buff_head xmitq;
1817 u32 dnode, dport = 0; 1841 u32 dnode, dport = 0;
1818 int err; 1842 int err;
1819 struct tipc_sock *tsk; 1843 struct tipc_sock *tsk;
1820 struct sock *sk; 1844 struct sock *sk;
1821 struct sk_buff *skb; 1845 struct sk_buff *skb;
1822 1846
1847 __skb_queue_head_init(&xmitq);
1823 while (skb_queue_len(inputq)) { 1848 while (skb_queue_len(inputq)) {
1824 dport = tipc_skb_peek_port(inputq, dport); 1849 dport = tipc_skb_peek_port(inputq, dport);
1825 tsk = tipc_sk_lookup(net, dport); 1850 tsk = tipc_sk_lookup(net, dport);
@@ -1827,9 +1852,14 @@ void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
1827 if (likely(tsk)) { 1852 if (likely(tsk)) {
1828 sk = &tsk->sk; 1853 sk = &tsk->sk;
1829 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) { 1854 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
1830 tipc_sk_enqueue(inputq, sk, dport); 1855 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
1831 spin_unlock_bh(&sk->sk_lock.slock); 1856 spin_unlock_bh(&sk->sk_lock.slock);
1832 } 1857 }
1858 /* Send pending response/rejected messages, if any */
1859 while ((skb = __skb_dequeue(&xmitq))) {
1860 dnode = msg_destnode(buf_msg(skb));
1861 tipc_node_xmit_skb(net, skb, dnode, dport);
1862 }
1833 sock_put(sk); 1863 sock_put(sk);
1834 continue; 1864 continue;
1835 } 1865 }
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index c9cf2be3674a..b016c011970b 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -63,7 +63,7 @@
63 */ 63 */
64struct udp_media_addr { 64struct udp_media_addr {
65 __be16 proto; 65 __be16 proto;
66 __be16 udp_port; 66 __be16 port;
67 union { 67 union {
68 struct in_addr ipv4; 68 struct in_addr ipv4;
69 struct in6_addr ipv6; 69 struct in6_addr ipv6;
@@ -108,9 +108,9 @@ static int tipc_udp_addr2str(struct tipc_media_addr *a, char *buf, int size)
108 struct udp_media_addr *ua = (struct udp_media_addr *)&a->value; 108 struct udp_media_addr *ua = (struct udp_media_addr *)&a->value;
109 109
110 if (ntohs(ua->proto) == ETH_P_IP) 110 if (ntohs(ua->proto) == ETH_P_IP)
111 snprintf(buf, size, "%pI4:%u", &ua->ipv4, ntohs(ua->udp_port)); 111 snprintf(buf, size, "%pI4:%u", &ua->ipv4, ntohs(ua->port));
112 else if (ntohs(ua->proto) == ETH_P_IPV6) 112 else if (ntohs(ua->proto) == ETH_P_IPV6)
113 snprintf(buf, size, "%pI6:%u", &ua->ipv6, ntohs(ua->udp_port)); 113 snprintf(buf, size, "%pI6:%u", &ua->ipv6, ntohs(ua->port));
114 else 114 else
115 pr_err("Invalid UDP media address\n"); 115 pr_err("Invalid UDP media address\n");
116 return 0; 116 return 0;
@@ -178,8 +178,8 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
178 skb->dev = rt->dst.dev; 178 skb->dev = rt->dst.dev;
179 ttl = ip4_dst_hoplimit(&rt->dst); 179 ttl = ip4_dst_hoplimit(&rt->dst);
180 udp_tunnel_xmit_skb(rt, ub->ubsock->sk, skb, src->ipv4.s_addr, 180 udp_tunnel_xmit_skb(rt, ub->ubsock->sk, skb, src->ipv4.s_addr,
181 dst->ipv4.s_addr, 0, ttl, 0, src->udp_port, 181 dst->ipv4.s_addr, 0, ttl, 0, src->port,
182 dst->udp_port, false, true); 182 dst->port, false, true);
183#if IS_ENABLED(CONFIG_IPV6) 183#if IS_ENABLED(CONFIG_IPV6)
184 } else { 184 } else {
185 struct dst_entry *ndst; 185 struct dst_entry *ndst;
@@ -196,8 +196,8 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
196 ttl = ip6_dst_hoplimit(ndst); 196 ttl = ip6_dst_hoplimit(ndst);
197 err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, 197 err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb,
198 ndst->dev, &src->ipv6, 198 ndst->dev, &src->ipv6,
199 &dst->ipv6, 0, ttl, 0, src->udp_port, 199 &dst->ipv6, 0, ttl, 0, src->port,
200 dst->udp_port, false); 200 dst->port, false);
201#endif 201#endif
202 } 202 }
203 return err; 203 return err;
@@ -292,12 +292,12 @@ err:
292 292
293 ip4 = (struct sockaddr_in *)&sa_local; 293 ip4 = (struct sockaddr_in *)&sa_local;
294 local->proto = htons(ETH_P_IP); 294 local->proto = htons(ETH_P_IP);
295 local->udp_port = ip4->sin_port; 295 local->port = ip4->sin_port;
296 local->ipv4.s_addr = ip4->sin_addr.s_addr; 296 local->ipv4.s_addr = ip4->sin_addr.s_addr;
297 297
298 ip4 = (struct sockaddr_in *)&sa_remote; 298 ip4 = (struct sockaddr_in *)&sa_remote;
299 remote->proto = htons(ETH_P_IP); 299 remote->proto = htons(ETH_P_IP);
300 remote->udp_port = ip4->sin_port; 300 remote->port = ip4->sin_port;
301 remote->ipv4.s_addr = ip4->sin_addr.s_addr; 301 remote->ipv4.s_addr = ip4->sin_addr.s_addr;
302 return 0; 302 return 0;
303 303
@@ -312,13 +312,13 @@ err:
312 return -EINVAL; 312 return -EINVAL;
313 313
314 local->proto = htons(ETH_P_IPV6); 314 local->proto = htons(ETH_P_IPV6);
315 local->udp_port = ip6->sin6_port; 315 local->port = ip6->sin6_port;
316 memcpy(&local->ipv6, &ip6->sin6_addr, sizeof(struct in6_addr)); 316 memcpy(&local->ipv6, &ip6->sin6_addr, sizeof(struct in6_addr));
317 ub->ifindex = ip6->sin6_scope_id; 317 ub->ifindex = ip6->sin6_scope_id;
318 318
319 ip6 = (struct sockaddr_in6 *)&sa_remote; 319 ip6 = (struct sockaddr_in6 *)&sa_remote;
320 remote->proto = htons(ETH_P_IPV6); 320 remote->proto = htons(ETH_P_IPV6);
321 remote->udp_port = ip6->sin6_port; 321 remote->port = ip6->sin6_port;
322 memcpy(&remote->ipv6, &ip6->sin6_addr, sizeof(struct in6_addr)); 322 memcpy(&remote->ipv6, &ip6->sin6_addr, sizeof(struct in6_addr));
323 return 0; 323 return 0;
324#endif 324#endif
@@ -386,7 +386,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
386 err = -EAFNOSUPPORT; 386 err = -EAFNOSUPPORT;
387 goto err; 387 goto err;
388 } 388 }
389 udp_conf.local_udp_port = local.udp_port; 389 udp_conf.local_udp_port = local.port;
390 err = udp_sock_create(net, &udp_conf, &ub->ubsock); 390 err = udp_sock_create(net, &udp_conf, &ub->ubsock);
391 if (err) 391 if (err)
392 goto err; 392 goto err;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 80aa6a3e6817..735362c26c8e 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -315,7 +315,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
315 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { 315 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
316 struct dentry *dentry = unix_sk(s)->path.dentry; 316 struct dentry *dentry = unix_sk(s)->path.dentry;
317 317
318 if (dentry && d_backing_inode(dentry) == i) { 318 if (dentry && d_real_inode(dentry) == i) {
319 sock_hold(s); 319 sock_hold(s);
320 goto found; 320 goto found;
321 } 321 }
@@ -911,7 +911,7 @@ static struct sock *unix_find_other(struct net *net,
911 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path); 911 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
912 if (err) 912 if (err)
913 goto fail; 913 goto fail;
914 inode = d_backing_inode(path.dentry); 914 inode = d_real_inode(path.dentry);
915 err = inode_permission(inode, MAY_WRITE); 915 err = inode_permission(inode, MAY_WRITE);
916 if (err) 916 if (err)
917 goto put_fail; 917 goto put_fail;
@@ -1048,7 +1048,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1048 goto out_up; 1048 goto out_up;
1049 } 1049 }
1050 addr->hash = UNIX_HASH_SIZE; 1050 addr->hash = UNIX_HASH_SIZE;
1051 hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1); 1051 hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
1052 spin_lock(&unix_table_lock); 1052 spin_lock(&unix_table_lock);
1053 u->path = u_path; 1053 u->path = u_path;
1054 list = &unix_socket_table[hash]; 1054 list = &unix_socket_table[hash];
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index b5f1221f48d4..b96ac918e0ba 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -61,6 +61,14 @@
61 * function will also cleanup rejected sockets, those that reach the connected 61 * function will also cleanup rejected sockets, those that reach the connected
62 * state but leave it before they have been accepted. 62 * state but leave it before they have been accepted.
63 * 63 *
64 * - Lock ordering for pending or accept queue sockets is:
65 *
66 * lock_sock(listener);
67 * lock_sock_nested(pending, SINGLE_DEPTH_NESTING);
68 *
69 * Using explicit nested locking keeps lockdep happy since normally only one
70 * lock of a given class may be taken at a time.
71 *
64 * - Sockets created by user action will be cleaned up when the user process 72 * - Sockets created by user action will be cleaned up when the user process
65 * calls close(2), causing our release implementation to be called. Our release 73 * calls close(2), causing our release implementation to be called. Our release
66 * implementation will perform some cleanup then drop the last reference so our 74 * implementation will perform some cleanup then drop the last reference so our
@@ -443,7 +451,7 @@ void vsock_pending_work(struct work_struct *work)
443 cleanup = true; 451 cleanup = true;
444 452
445 lock_sock(listener); 453 lock_sock(listener);
446 lock_sock(sk); 454 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
447 455
448 if (vsock_is_pending(sk)) { 456 if (vsock_is_pending(sk)) {
449 vsock_remove_pending(listener, sk); 457 vsock_remove_pending(listener, sk);
@@ -1292,7 +1300,7 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags)
1292 if (connected) { 1300 if (connected) {
1293 listener->sk_ack_backlog--; 1301 listener->sk_ack_backlog--;
1294 1302
1295 lock_sock(connected); 1303 lock_sock_nested(connected, SINGLE_DEPTH_NESTING);
1296 vconnected = vsock_sk(connected); 1304 vconnected = vsock_sk(connected);
1297 1305
1298 /* If the listener socket has received an error, then we should 1306 /* If the listener socket has received an error, then we should
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 39d9abd309ea..7645e97362c0 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -220,7 +220,7 @@ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
220 220
221 if (rdev->scan_req && rdev->scan_req->wdev == wdev) { 221 if (rdev->scan_req && rdev->scan_req->wdev == wdev) {
222 if (WARN_ON(!rdev->scan_req->notified)) 222 if (WARN_ON(!rdev->scan_req->notified))
223 rdev->scan_req->aborted = true; 223 rdev->scan_req->info.aborted = true;
224 ___cfg80211_scan_done(rdev, false); 224 ___cfg80211_scan_done(rdev, false);
225 } 225 }
226} 226}
@@ -1087,7 +1087,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
1087 cfg80211_update_iface_num(rdev, wdev->iftype, -1); 1087 cfg80211_update_iface_num(rdev, wdev->iftype, -1);
1088 if (rdev->scan_req && rdev->scan_req->wdev == wdev) { 1088 if (rdev->scan_req && rdev->scan_req->wdev == wdev) {
1089 if (WARN_ON(!rdev->scan_req->notified)) 1089 if (WARN_ON(!rdev->scan_req->notified))
1090 rdev->scan_req->aborted = true; 1090 rdev->scan_req->info.aborted = true;
1091 ___cfg80211_scan_done(rdev, false); 1091 ___cfg80211_scan_done(rdev, false);
1092 } 1092 }
1093 1093
diff --git a/net/wireless/core.h b/net/wireless/core.h
index a4d547f99f8d..eee91443924d 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -141,6 +141,18 @@ struct cfg80211_internal_bss {
141 unsigned long refcount; 141 unsigned long refcount;
142 atomic_t hold; 142 atomic_t hold;
143 143
144 /* time at the start of the reception of the first octet of the
145 * timestamp field of the last beacon/probe received for this BSS.
146 * The time is the TSF of the BSS specified by %parent_bssid.
147 */
148 u64 parent_tsf;
149
150 /* the BSS according to which %parent_tsf is set. This is set to
151 * the BSS that the interface that requested the scan was connected to
152 * when the beacon/probe was received.
153 */
154 u8 parent_bssid[ETH_ALEN] __aligned(2);
155
144 /* must be last because of priv member */ 156 /* must be last because of priv member */
145 struct cfg80211_bss pub; 157 struct cfg80211_bss pub;
146}; 158};
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index c503e96bfd5a..5782f718d567 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -405,6 +405,10 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
405 [NL80211_ATTR_PBSS] = { .type = NLA_FLAG }, 405 [NL80211_ATTR_PBSS] = { .type = NLA_FLAG },
406 [NL80211_ATTR_BSS_SELECT] = { .type = NLA_NESTED }, 406 [NL80211_ATTR_BSS_SELECT] = { .type = NLA_NESTED },
407 [NL80211_ATTR_STA_SUPPORT_P2P_PS] = { .type = NLA_U8 }, 407 [NL80211_ATTR_STA_SUPPORT_P2P_PS] = { .type = NLA_U8 },
408 [NL80211_ATTR_MU_MIMO_GROUP_DATA] = {
409 .len = VHT_MUMIMO_GROUPS_DATA_LEN
410 },
411 [NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR] = { .len = ETH_ALEN },
408}; 412};
409 413
410/* policy for the key attributes */ 414/* policy for the key attributes */
@@ -2695,6 +2699,38 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
2695 change = true; 2699 change = true;
2696 } 2700 }
2697 2701
2702 if (info->attrs[NL80211_ATTR_MU_MIMO_GROUP_DATA]) {
2703 const u8 *mumimo_groups;
2704 u32 cap_flag = NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER;
2705
2706 if (!wiphy_ext_feature_isset(&rdev->wiphy, cap_flag))
2707 return -EOPNOTSUPP;
2708
2709 mumimo_groups =
2710 nla_data(info->attrs[NL80211_ATTR_MU_MIMO_GROUP_DATA]);
2711
2712 /* bits 0 and 63 are reserved and must be zero */
2713 if ((mumimo_groups[0] & BIT(7)) ||
2714 (mumimo_groups[VHT_MUMIMO_GROUPS_DATA_LEN - 1] & BIT(0)))
2715 return -EINVAL;
2716
2717 memcpy(params.vht_mumimo_groups, mumimo_groups,
2718 VHT_MUMIMO_GROUPS_DATA_LEN);
2719 change = true;
2720 }
2721
2722 if (info->attrs[NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR]) {
2723 u32 cap_flag = NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER;
2724
2725 if (!wiphy_ext_feature_isset(&rdev->wiphy, cap_flag))
2726 return -EOPNOTSUPP;
2727
2728 nla_memcpy(params.macaddr,
2729 info->attrs[NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR],
2730 ETH_ALEN);
2731 change = true;
2732 }
2733
2698 if (flags && (*flags & MONITOR_FLAG_ACTIVE) && 2734 if (flags && (*flags & MONITOR_FLAG_ACTIVE) &&
2699 !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) 2735 !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
2700 return -EOPNOTSUPP; 2736 return -EOPNOTSUPP;
@@ -4410,6 +4446,12 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
4410 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]); 4446 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]);
4411 if (params.plink_state >= NUM_NL80211_PLINK_STATES) 4447 if (params.plink_state >= NUM_NL80211_PLINK_STATES)
4412 return -EINVAL; 4448 return -EINVAL;
4449 if (info->attrs[NL80211_ATTR_MESH_PEER_AID]) {
4450 params.peer_aid = nla_get_u16(
4451 info->attrs[NL80211_ATTR_MESH_PEER_AID]);
4452 if (params.peer_aid > IEEE80211_MAX_AID)
4453 return -EINVAL;
4454 }
4413 params.sta_modify_mask |= STATION_PARAM_APPLY_PLINK_STATE; 4455 params.sta_modify_mask |= STATION_PARAM_APPLY_PLINK_STATE;
4414 } 4456 }
4415 4457
@@ -5287,6 +5329,51 @@ static const struct nla_policy
5287 [NL80211_MESH_SETUP_USERSPACE_AMPE] = { .type = NLA_FLAG }, 5329 [NL80211_MESH_SETUP_USERSPACE_AMPE] = { .type = NLA_FLAG },
5288}; 5330};
5289 5331
5332static int nl80211_check_bool(const struct nlattr *nla, u8 min, u8 max, bool *out)
5333{
5334 u8 val = nla_get_u8(nla);
5335 if (val < min || val > max)
5336 return -EINVAL;
5337 *out = val;
5338 return 0;
5339}
5340
5341static int nl80211_check_u8(const struct nlattr *nla, u8 min, u8 max, u8 *out)
5342{
5343 u8 val = nla_get_u8(nla);
5344 if (val < min || val > max)
5345 return -EINVAL;
5346 *out = val;
5347 return 0;
5348}
5349
5350static int nl80211_check_u16(const struct nlattr *nla, u16 min, u16 max, u16 *out)
5351{
5352 u16 val = nla_get_u16(nla);
5353 if (val < min || val > max)
5354 return -EINVAL;
5355 *out = val;
5356 return 0;
5357}
5358
5359static int nl80211_check_u32(const struct nlattr *nla, u32 min, u32 max, u32 *out)
5360{
5361 u32 val = nla_get_u32(nla);
5362 if (val < min || val > max)
5363 return -EINVAL;
5364 *out = val;
5365 return 0;
5366}
5367
5368static int nl80211_check_s32(const struct nlattr *nla, s32 min, s32 max, s32 *out)
5369{
5370 s32 val = nla_get_s32(nla);
5371 if (val < min || val > max)
5372 return -EINVAL;
5373 *out = val;
5374 return 0;
5375}
5376
5290static int nl80211_parse_mesh_config(struct genl_info *info, 5377static int nl80211_parse_mesh_config(struct genl_info *info,
5291 struct mesh_config *cfg, 5378 struct mesh_config *cfg,
5292 u32 *mask_out) 5379 u32 *mask_out)
@@ -5297,9 +5384,8 @@ static int nl80211_parse_mesh_config(struct genl_info *info,
5297#define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, min, max, mask, attr, fn) \ 5384#define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, min, max, mask, attr, fn) \
5298do { \ 5385do { \
5299 if (tb[attr]) { \ 5386 if (tb[attr]) { \
5300 if (fn(tb[attr]) < min || fn(tb[attr]) > max) \ 5387 if (fn(tb[attr], min, max, &cfg->param)) \
5301 return -EINVAL; \ 5388 return -EINVAL; \
5302 cfg->param = fn(tb[attr]); \
5303 mask |= (1 << (attr - 1)); \ 5389 mask |= (1 << (attr - 1)); \
5304 } \ 5390 } \
5305} while (0) 5391} while (0)
@@ -5318,99 +5404,99 @@ do { \
5318 /* Fill in the params struct */ 5404 /* Fill in the params struct */
5319 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout, 1, 255, 5405 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout, 1, 255,
5320 mask, NL80211_MESHCONF_RETRY_TIMEOUT, 5406 mask, NL80211_MESHCONF_RETRY_TIMEOUT,
5321 nla_get_u16); 5407 nl80211_check_u16);
5322 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout, 1, 255, 5408 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout, 1, 255,
5323 mask, NL80211_MESHCONF_CONFIRM_TIMEOUT, 5409 mask, NL80211_MESHCONF_CONFIRM_TIMEOUT,
5324 nla_get_u16); 5410 nl80211_check_u16);
5325 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout, 1, 255, 5411 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout, 1, 255,
5326 mask, NL80211_MESHCONF_HOLDING_TIMEOUT, 5412 mask, NL80211_MESHCONF_HOLDING_TIMEOUT,
5327 nla_get_u16); 5413 nl80211_check_u16);
5328 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks, 0, 255, 5414 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks, 0, 255,
5329 mask, NL80211_MESHCONF_MAX_PEER_LINKS, 5415 mask, NL80211_MESHCONF_MAX_PEER_LINKS,
5330 nla_get_u16); 5416 nl80211_check_u16);
5331 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries, 0, 16, 5417 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries, 0, 16,
5332 mask, NL80211_MESHCONF_MAX_RETRIES, 5418 mask, NL80211_MESHCONF_MAX_RETRIES,
5333 nla_get_u8); 5419 nl80211_check_u8);
5334 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL, 1, 255, 5420 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL, 1, 255,
5335 mask, NL80211_MESHCONF_TTL, nla_get_u8); 5421 mask, NL80211_MESHCONF_TTL, nl80211_check_u8);
5336 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, element_ttl, 1, 255, 5422 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, element_ttl, 1, 255,
5337 mask, NL80211_MESHCONF_ELEMENT_TTL, 5423 mask, NL80211_MESHCONF_ELEMENT_TTL,
5338 nla_get_u8); 5424 nl80211_check_u8);
5339 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, 0, 1, 5425 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, 0, 1,
5340 mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS, 5426 mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS,
5341 nla_get_u8); 5427 nl80211_check_bool);
5342 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor, 5428 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor,
5343 1, 255, mask, 5429 1, 255, mask,
5344 NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, 5430 NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR,
5345 nla_get_u32); 5431 nl80211_check_u32);
5346 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, 0, 255, 5432 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, 0, 255,
5347 mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, 5433 mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
5348 nla_get_u8); 5434 nl80211_check_u8);
5349 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time, 1, 65535, 5435 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time, 1, 65535,
5350 mask, NL80211_MESHCONF_PATH_REFRESH_TIME, 5436 mask, NL80211_MESHCONF_PATH_REFRESH_TIME,
5351 nla_get_u32); 5437 nl80211_check_u32);
5352 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout, 1, 65535, 5438 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout, 1, 65535,
5353 mask, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, 5439 mask, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
5354 nla_get_u16); 5440 nl80211_check_u16);
5355 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathTimeout, 5441 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathTimeout,
5356 1, 65535, mask, 5442 1, 65535, mask,
5357 NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, 5443 NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT,
5358 nla_get_u32); 5444 nl80211_check_u32);
5359 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval, 5445 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval,
5360 1, 65535, mask, 5446 1, 65535, mask,
5361 NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, 5447 NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
5362 nla_get_u16); 5448 nl80211_check_u16);
5363 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval, 5449 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval,
5364 1, 65535, mask, 5450 1, 65535, mask,
5365 NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, 5451 NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
5366 nla_get_u16); 5452 nl80211_check_u16);
5367 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, 5453 FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
5368 dot11MeshHWMPnetDiameterTraversalTime, 5454 dot11MeshHWMPnetDiameterTraversalTime,
5369 1, 65535, mask, 5455 1, 65535, mask,
5370 NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, 5456 NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
5371 nla_get_u16); 5457 nl80211_check_u16);
5372 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRootMode, 0, 4, 5458 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRootMode, 0, 4,
5373 mask, NL80211_MESHCONF_HWMP_ROOTMODE, 5459 mask, NL80211_MESHCONF_HWMP_ROOTMODE,
5374 nla_get_u8); 5460 nl80211_check_u8);
5375 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRannInterval, 1, 65535, 5461 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRannInterval, 1, 65535,
5376 mask, NL80211_MESHCONF_HWMP_RANN_INTERVAL, 5462 mask, NL80211_MESHCONF_HWMP_RANN_INTERVAL,
5377 nla_get_u16); 5463 nl80211_check_u16);
5378 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, 5464 FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
5379 dot11MeshGateAnnouncementProtocol, 0, 1, 5465 dot11MeshGateAnnouncementProtocol, 0, 1,
5380 mask, NL80211_MESHCONF_GATE_ANNOUNCEMENTS, 5466 mask, NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
5381 nla_get_u8); 5467 nl80211_check_bool);
5382 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding, 0, 1, 5468 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding, 0, 1,
5383 mask, NL80211_MESHCONF_FORWARDING, 5469 mask, NL80211_MESHCONF_FORWARDING,
5384 nla_get_u8); 5470 nl80211_check_bool);
5385 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, -255, 0, 5471 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, -255, 0,
5386 mask, NL80211_MESHCONF_RSSI_THRESHOLD, 5472 mask, NL80211_MESHCONF_RSSI_THRESHOLD,
5387 nla_get_s32); 5473 nl80211_check_s32);
5388 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode, 0, 16, 5474 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode, 0, 16,
5389 mask, NL80211_MESHCONF_HT_OPMODE, 5475 mask, NL80211_MESHCONF_HT_OPMODE,
5390 nla_get_u16); 5476 nl80211_check_u16);
5391 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout, 5477 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout,
5392 1, 65535, mask, 5478 1, 65535, mask,
5393 NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, 5479 NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT,
5394 nla_get_u32); 5480 nl80211_check_u32);
5395 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMProotInterval, 1, 65535, 5481 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMProotInterval, 1, 65535,
5396 mask, NL80211_MESHCONF_HWMP_ROOT_INTERVAL, 5482 mask, NL80211_MESHCONF_HWMP_ROOT_INTERVAL,
5397 nla_get_u16); 5483 nl80211_check_u16);
5398 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, 5484 FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
5399 dot11MeshHWMPconfirmationInterval, 5485 dot11MeshHWMPconfirmationInterval,
5400 1, 65535, mask, 5486 1, 65535, mask,
5401 NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, 5487 NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL,
5402 nla_get_u16); 5488 nl80211_check_u16);
5403 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, power_mode, 5489 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, power_mode,
5404 NL80211_MESH_POWER_ACTIVE, 5490 NL80211_MESH_POWER_ACTIVE,
5405 NL80211_MESH_POWER_MAX, 5491 NL80211_MESH_POWER_MAX,
5406 mask, NL80211_MESHCONF_POWER_MODE, 5492 mask, NL80211_MESHCONF_POWER_MODE,
5407 nla_get_u32); 5493 nl80211_check_u32);
5408 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration, 5494 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration,
5409 0, 65535, mask, 5495 0, 65535, mask,
5410 NL80211_MESHCONF_AWAKE_WINDOW, nla_get_u16); 5496 NL80211_MESHCONF_AWAKE_WINDOW, nl80211_check_u16);
5411 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, plink_timeout, 0, 0xffffffff, 5497 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, plink_timeout, 0, 0xffffffff,
5412 mask, NL80211_MESHCONF_PLINK_TIMEOUT, 5498 mask, NL80211_MESHCONF_PLINK_TIMEOUT,
5413 nla_get_u32); 5499 nl80211_check_u32);
5414 if (mask_out) 5500 if (mask_out)
5415 *mask_out = mask; 5501 *mask_out = mask;
5416 5502
@@ -6143,6 +6229,19 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
6143 } 6229 }
6144 } 6230 }
6145 6231
6232 if (info->attrs[NL80211_ATTR_MEASUREMENT_DURATION]) {
6233 if (!wiphy_ext_feature_isset(wiphy,
6234 NL80211_EXT_FEATURE_SET_SCAN_DWELL)) {
6235 err = -EOPNOTSUPP;
6236 goto out_free;
6237 }
6238
6239 request->duration =
6240 nla_get_u16(info->attrs[NL80211_ATTR_MEASUREMENT_DURATION]);
6241 request->duration_mandatory =
6242 nla_get_flag(info->attrs[NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY]);
6243 }
6244
6146 if (info->attrs[NL80211_ATTR_SCAN_FLAGS]) { 6245 if (info->attrs[NL80211_ATTR_SCAN_FLAGS]) {
6147 request->flags = nla_get_u32( 6246 request->flags = nla_get_u32(
6148 info->attrs[NL80211_ATTR_SCAN_FLAGS]); 6247 info->attrs[NL80211_ATTR_SCAN_FLAGS]);
@@ -6976,6 +7075,13 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
6976 jiffies_to_msecs(jiffies - intbss->ts))) 7075 jiffies_to_msecs(jiffies - intbss->ts)))
6977 goto nla_put_failure; 7076 goto nla_put_failure;
6978 7077
7078 if (intbss->parent_tsf &&
7079 (nla_put_u64_64bit(msg, NL80211_BSS_PARENT_TSF,
7080 intbss->parent_tsf, NL80211_BSS_PAD) ||
7081 nla_put(msg, NL80211_BSS_PARENT_BSSID, ETH_ALEN,
7082 intbss->parent_bssid)))
7083 goto nla_put_failure;
7084
6979 if (intbss->ts_boottime && 7085 if (intbss->ts_boottime &&
6980 nla_put_u64_64bit(msg, NL80211_BSS_LAST_SEEN_BOOTTIME, 7086 nla_put_u64_64bit(msg, NL80211_BSS_LAST_SEEN_BOOTTIME,
6981 intbss->ts_boottime, NL80211_BSS_PAD)) 7087 intbss->ts_boottime, NL80211_BSS_PAD))
@@ -11749,6 +11855,13 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
11749 nla_put_u32(msg, NL80211_ATTR_SCAN_FLAGS, req->flags)) 11855 nla_put_u32(msg, NL80211_ATTR_SCAN_FLAGS, req->flags))
11750 goto nla_put_failure; 11856 goto nla_put_failure;
11751 11857
11858 if (req->info.scan_start_tsf &&
11859 (nla_put_u64_64bit(msg, NL80211_ATTR_SCAN_START_TIME_TSF,
11860 req->info.scan_start_tsf, NL80211_BSS_PAD) ||
11861 nla_put(msg, NL80211_ATTR_SCAN_START_TIME_TSF_BSSID, ETH_ALEN,
11862 req->info.tsf_bssid)))
11863 goto nla_put_failure;
11864
11752 return 0; 11865 return 0;
11753 nla_put_failure: 11866 nla_put_failure:
11754 return -ENOBUFS; 11867 return -ENOBUFS;
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index ef2955c89a00..0358e12be54b 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright 2008 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2008 Johannes Berg <johannes@sipsolutions.net>
5 * Copyright 2013-2014 Intel Mobile Communications GmbH 5 * Copyright 2013-2014 Intel Mobile Communications GmbH
6 * Copyright 2016 Intel Deutschland GmbH
6 */ 7 */
7#include <linux/kernel.h> 8#include <linux/kernel.h>
8#include <linux/slab.h> 9#include <linux/slab.h>
@@ -194,7 +195,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
194 if (wdev->netdev) 195 if (wdev->netdev)
195 cfg80211_sme_scan_done(wdev->netdev); 196 cfg80211_sme_scan_done(wdev->netdev);
196 197
197 if (!request->aborted && 198 if (!request->info.aborted &&
198 request->flags & NL80211_SCAN_FLAG_FLUSH) { 199 request->flags & NL80211_SCAN_FLAG_FLUSH) {
199 /* flush entries from previous scans */ 200 /* flush entries from previous scans */
200 spin_lock_bh(&rdev->bss_lock); 201 spin_lock_bh(&rdev->bss_lock);
@@ -202,10 +203,10 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
202 spin_unlock_bh(&rdev->bss_lock); 203 spin_unlock_bh(&rdev->bss_lock);
203 } 204 }
204 205
205 msg = nl80211_build_scan_msg(rdev, wdev, request->aborted); 206 msg = nl80211_build_scan_msg(rdev, wdev, request->info.aborted);
206 207
207#ifdef CONFIG_CFG80211_WEXT 208#ifdef CONFIG_CFG80211_WEXT
208 if (wdev->netdev && !request->aborted) { 209 if (wdev->netdev && !request->info.aborted) {
209 memset(&wrqu, 0, sizeof(wrqu)); 210 memset(&wrqu, 0, sizeof(wrqu));
210 211
211 wireless_send_event(wdev->netdev, SIOCGIWSCAN, &wrqu, NULL); 212 wireless_send_event(wdev->netdev, SIOCGIWSCAN, &wrqu, NULL);
@@ -236,12 +237,13 @@ void __cfg80211_scan_done(struct work_struct *wk)
236 rtnl_unlock(); 237 rtnl_unlock();
237} 238}
238 239
239void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) 240void cfg80211_scan_done(struct cfg80211_scan_request *request,
241 struct cfg80211_scan_info *info)
240{ 242{
241 trace_cfg80211_scan_done(request, aborted); 243 trace_cfg80211_scan_done(request, info);
242 WARN_ON(request != wiphy_to_rdev(request->wiphy)->scan_req); 244 WARN_ON(request != wiphy_to_rdev(request->wiphy)->scan_req);
243 245
244 request->aborted = aborted; 246 request->info = *info;
245 request->notified = true; 247 request->notified = true;
246 queue_work(cfg80211_wq, &wiphy_to_rdev(request->wiphy)->scan_done_wk); 248 queue_work(cfg80211_wq, &wiphy_to_rdev(request->wiphy)->scan_done_wk);
247} 249}
@@ -843,6 +845,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
843 found->pub.capability = tmp->pub.capability; 845 found->pub.capability = tmp->pub.capability;
844 found->ts = tmp->ts; 846 found->ts = tmp->ts;
845 found->ts_boottime = tmp->ts_boottime; 847 found->ts_boottime = tmp->ts_boottime;
848 found->parent_tsf = tmp->parent_tsf;
849 ether_addr_copy(found->parent_bssid, tmp->parent_bssid);
846 } else { 850 } else {
847 struct cfg80211_internal_bss *new; 851 struct cfg80211_internal_bss *new;
848 struct cfg80211_internal_bss *hidden; 852 struct cfg80211_internal_bss *hidden;
@@ -1086,6 +1090,8 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
1086 tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int); 1090 tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
1087 tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info); 1091 tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
1088 tmp.ts_boottime = data->boottime_ns; 1092 tmp.ts_boottime = data->boottime_ns;
1093 tmp.parent_tsf = data->parent_tsf;
1094 ether_addr_copy(tmp.parent_bssid, data->parent_bssid);
1089 1095
1090 signal_valid = abs(data->chan->center_freq - channel->center_freq) <= 1096 signal_valid = abs(data->chan->center_freq - channel->center_freq) <=
1091 wiphy->max_adj_channel_rssi_comp; 1097 wiphy->max_adj_channel_rssi_comp;
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 3c1091ae6c36..72b5255cefe2 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2642,8 +2642,9 @@ TRACE_EVENT(cfg80211_tdls_oper_request,
2642 ); 2642 );
2643 2643
2644TRACE_EVENT(cfg80211_scan_done, 2644TRACE_EVENT(cfg80211_scan_done,
2645 TP_PROTO(struct cfg80211_scan_request *request, bool aborted), 2645 TP_PROTO(struct cfg80211_scan_request *request,
2646 TP_ARGS(request, aborted), 2646 struct cfg80211_scan_info *info),
2647 TP_ARGS(request, info),
2647 TP_STRUCT__entry( 2648 TP_STRUCT__entry(
2648 __field(u32, n_channels) 2649 __field(u32, n_channels)
2649 __dynamic_array(u8, ie, request ? request->ie_len : 0) 2650 __dynamic_array(u8, ie, request ? request->ie_len : 0)
@@ -2652,6 +2653,8 @@ TRACE_EVENT(cfg80211_scan_done,
2652 MAC_ENTRY(wiphy_mac) 2653 MAC_ENTRY(wiphy_mac)
2653 __field(bool, no_cck) 2654 __field(bool, no_cck)
2654 __field(bool, aborted) 2655 __field(bool, aborted)
2656 __field(u64, scan_start_tsf)
2657 MAC_ENTRY(tsf_bssid)
2655 ), 2658 ),
2656 TP_fast_assign( 2659 TP_fast_assign(
2657 if (request) { 2660 if (request) {
@@ -2666,9 +2669,16 @@ TRACE_EVENT(cfg80211_scan_done,
2666 request->wiphy->perm_addr); 2669 request->wiphy->perm_addr);
2667 __entry->no_cck = request->no_cck; 2670 __entry->no_cck = request->no_cck;
2668 } 2671 }
2669 __entry->aborted = aborted; 2672 if (info) {
2673 __entry->aborted = info->aborted;
2674 __entry->scan_start_tsf = info->scan_start_tsf;
2675 MAC_ASSIGN(tsf_bssid, info->tsf_bssid);
2676 }
2670 ), 2677 ),
2671 TP_printk("aborted: %s", BOOL_TO_STR(__entry->aborted)) 2678 TP_printk("aborted: %s, scan start (TSF): %llu, tsf_bssid: " MAC_PR_FMT,
2679 BOOL_TO_STR(__entry->aborted),
2680 (unsigned long long)__entry->scan_start_tsf,
2681 MAC_PR_ARG(tsf_bssid))
2672); 2682);
2673 2683
2674DEFINE_EVENT(wiphy_only_evt, cfg80211_sched_scan_results, 2684DEFINE_EVENT(wiphy_only_evt, cfg80211_sched_scan_results,
@@ -2721,6 +2731,8 @@ TRACE_EVENT(cfg80211_inform_bss_frame,
2721 __dynamic_array(u8, mgmt, len) 2731 __dynamic_array(u8, mgmt, len)
2722 __field(s32, signal) 2732 __field(s32, signal)
2723 __field(u64, ts_boottime) 2733 __field(u64, ts_boottime)
2734 __field(u64, parent_tsf)
2735 MAC_ENTRY(parent_bssid)
2724 ), 2736 ),
2725 TP_fast_assign( 2737 TP_fast_assign(
2726 WIPHY_ASSIGN; 2738 WIPHY_ASSIGN;
@@ -2730,10 +2742,15 @@ TRACE_EVENT(cfg80211_inform_bss_frame,
2730 memcpy(__get_dynamic_array(mgmt), mgmt, len); 2742 memcpy(__get_dynamic_array(mgmt), mgmt, len);
2731 __entry->signal = data->signal; 2743 __entry->signal = data->signal;
2732 __entry->ts_boottime = data->boottime_ns; 2744 __entry->ts_boottime = data->boottime_ns;
2733 ), 2745 __entry->parent_tsf = data->parent_tsf;
2734 TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT "(scan_width: %d) signal: %d, tsb:%llu", 2746 MAC_ASSIGN(parent_bssid, data->parent_bssid);
2735 WIPHY_PR_ARG, CHAN_PR_ARG, __entry->scan_width, 2747 ),
2736 __entry->signal, (unsigned long long)__entry->ts_boottime) 2748 TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT
2749 "(scan_width: %d) signal: %d, tsb:%llu, detect_tsf:%llu, tsf_bssid: "
2750 MAC_PR_FMT, WIPHY_PR_ARG, CHAN_PR_ARG, __entry->scan_width,
2751 __entry->signal, (unsigned long long)__entry->ts_boottime,
2752 (unsigned long long)__entry->parent_tsf,
2753 MAC_PR_ARG(parent_bssid))
2737); 2754);
2738 2755
2739DECLARE_EVENT_CLASS(cfg80211_bss_evt, 2756DECLARE_EVENT_CLASS(cfg80211_bss_evt,
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 4e809e978b7d..2443ee30ba5b 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -509,7 +509,7 @@ static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr,
509 * replace EtherType */ 509 * replace EtherType */
510 hdrlen += ETH_ALEN + 2; 510 hdrlen += ETH_ALEN + 2;
511 else 511 else
512 tmp.h_proto = htons(skb->len); 512 tmp.h_proto = htons(skb->len - hdrlen);
513 513
514 pskb_pull(skb, hdrlen); 514 pskb_pull(skb, hdrlen);
515 515
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 0bf2478cb7df..a98b780e974c 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -20,6 +20,7 @@ hostprogs-y += offwaketime
20hostprogs-y += spintest 20hostprogs-y += spintest
21hostprogs-y += map_perf_test 21hostprogs-y += map_perf_test
22hostprogs-y += test_overhead 22hostprogs-y += test_overhead
23hostprogs-y += test_cgrp2_array_pin
23 24
24test_verifier-objs := test_verifier.o libbpf.o 25test_verifier-objs := test_verifier.o libbpf.o
25test_maps-objs := test_maps.o libbpf.o 26test_maps-objs := test_maps.o libbpf.o
@@ -40,6 +41,7 @@ offwaketime-objs := bpf_load.o libbpf.o offwaketime_user.o
40spintest-objs := bpf_load.o libbpf.o spintest_user.o 41spintest-objs := bpf_load.o libbpf.o spintest_user.o
41map_perf_test-objs := bpf_load.o libbpf.o map_perf_test_user.o 42map_perf_test-objs := bpf_load.o libbpf.o map_perf_test_user.o
42test_overhead-objs := bpf_load.o libbpf.o test_overhead_user.o 43test_overhead-objs := bpf_load.o libbpf.o test_overhead_user.o
44test_cgrp2_array_pin-objs := libbpf.o test_cgrp2_array_pin.o
43 45
44# Tell kbuild to always build the programs 46# Tell kbuild to always build the programs
45always := $(hostprogs-y) 47always := $(hostprogs-y)
@@ -61,6 +63,7 @@ always += map_perf_test_kern.o
61always += test_overhead_tp_kern.o 63always += test_overhead_tp_kern.o
62always += test_overhead_kprobe_kern.o 64always += test_overhead_kprobe_kern.o
63always += parse_varlen.o parse_simple.o parse_ldabs.o 65always += parse_varlen.o parse_simple.o parse_ldabs.o
66always += test_cgrp2_tc_kern.o
64 67
65HOSTCFLAGS += -I$(objtree)/usr/include 68HOSTCFLAGS += -I$(objtree)/usr/include
66 69
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index 7904a2a493de..84e3fd919a06 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -70,6 +70,8 @@ static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flag
70 (void *) BPF_FUNC_l3_csum_replace; 70 (void *) BPF_FUNC_l3_csum_replace;
71static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) = 71static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) =
72 (void *) BPF_FUNC_l4_csum_replace; 72 (void *) BPF_FUNC_l4_csum_replace;
73static int (*bpf_skb_in_cgroup)(void *ctx, void *map, int index) =
74 (void *) BPF_FUNC_skb_in_cgroup;
73 75
74#if defined(__x86_64__) 76#if defined(__x86_64__)
75 77
diff --git a/samples/bpf/test_cgrp2_array_pin.c b/samples/bpf/test_cgrp2_array_pin.c
new file mode 100644
index 000000000000..70e86f7be69d
--- /dev/null
+++ b/samples/bpf/test_cgrp2_array_pin.c
@@ -0,0 +1,109 @@
1/* Copyright (c) 2016 Facebook
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#include <linux/unistd.h>
8#include <linux/bpf.h>
9
10#include <stdio.h>
11#include <stdint.h>
12#include <unistd.h>
13#include <string.h>
14#include <errno.h>
15#include <fcntl.h>
16
17#include "libbpf.h"
18
19static void usage(void)
20{
21 printf("Usage: test_cgrp2_array_pin [...]\n");
22 printf(" -F <file> File to pin an BPF cgroup array\n");
23 printf(" -U <file> Update an already pinned BPF cgroup array\n");
24 printf(" -v <value> Full path of the cgroup2\n");
25 printf(" -h Display this help\n");
26}
27
28int main(int argc, char **argv)
29{
30 const char *pinned_file = NULL, *cg2 = NULL;
31 int create_array = 1;
32 int array_key = 0;
33 int array_fd = -1;
34 int cg2_fd = -1;
35 int ret = -1;
36 int opt;
37
38 while ((opt = getopt(argc, argv, "F:U:v:")) != -1) {
39 switch (opt) {
40 /* General args */
41 case 'F':
42 pinned_file = optarg;
43 break;
44 case 'U':
45 pinned_file = optarg;
46 create_array = 0;
47 break;
48 case 'v':
49 cg2 = optarg;
50 break;
51 default:
52 usage();
53 goto out;
54 }
55 }
56
57 if (!cg2 || !pinned_file) {
58 usage();
59 goto out;
60 }
61
62 cg2_fd = open(cg2, O_RDONLY);
63 if (cg2_fd < 0) {
64 fprintf(stderr, "open(%s,...): %s(%d)\n",
65 cg2, strerror(errno), errno);
66 goto out;
67 }
68
69 if (create_array) {
70 array_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_ARRAY,
71 sizeof(uint32_t), sizeof(uint32_t),
72 1, 0);
73 if (array_fd < 0) {
74 fprintf(stderr,
75 "bpf_create_map(BPF_MAP_TYPE_CGROUP_ARRAY,...): %s(%d)\n",
76 strerror(errno), errno);
77 goto out;
78 }
79 } else {
80 array_fd = bpf_obj_get(pinned_file);
81 if (array_fd < 0) {
82 fprintf(stderr, "bpf_obj_get(%s): %s(%d)\n",
83 pinned_file, strerror(errno), errno);
84 goto out;
85 }
86 }
87
88 ret = bpf_update_elem(array_fd, &array_key, &cg2_fd, 0);
89 if (ret) {
90 perror("bpf_update_elem");
91 goto out;
92 }
93
94 if (create_array) {
95 ret = bpf_obj_pin(array_fd, pinned_file);
96 if (ret) {
97 fprintf(stderr, "bpf_obj_pin(..., %s): %s(%d)\n",
98 pinned_file, strerror(errno), errno);
99 goto out;
100 }
101 }
102
103out:
104 if (array_fd != -1)
105 close(array_fd);
106 if (cg2_fd != -1)
107 close(cg2_fd);
108 return ret;
109}
diff --git a/samples/bpf/test_cgrp2_tc.sh b/samples/bpf/test_cgrp2_tc.sh
new file mode 100755
index 000000000000..0b119eeaf85c
--- /dev/null
+++ b/samples/bpf/test_cgrp2_tc.sh
@@ -0,0 +1,184 @@
1#!/bin/bash
2
3MY_DIR=$(dirname $0)
4# Details on the bpf prog
5BPF_CGRP2_ARRAY_NAME='test_cgrp2_array_pin'
6BPF_PROG="$MY_DIR/test_cgrp2_tc_kern.o"
7BPF_SECTION='filter'
8
9[ -z "$TC" ] && TC='tc'
10[ -z "$IP" ] && IP='ip'
11
12# Names of the veth interface, net namespace...etc.
13HOST_IFC='ve'
14NS_IFC='vens'
15NS='ns'
16
17find_mnt() {
18 cat /proc/mounts | \
19 awk '{ if ($3 == "'$1'" && mnt == "") { mnt = $2 }} END { print mnt }'
20}
21
22# Init cgroup2 vars
23init_cgrp2_vars() {
24 CGRP2_ROOT=$(find_mnt cgroup2)
25 if [ -z "$CGRP2_ROOT" ]
26 then
27 CGRP2_ROOT='/mnt/cgroup2'
28 MOUNT_CGRP2="yes"
29 fi
30 CGRP2_TC="$CGRP2_ROOT/tc"
31 CGRP2_TC_LEAF="$CGRP2_TC/leaf"
32}
33
34# Init bpf fs vars
35init_bpf_fs_vars() {
36 local bpf_fs_root=$(find_mnt bpf)
37 [ -n "$bpf_fs_root" ] || return -1
38 BPF_FS_TC_SHARE="$bpf_fs_root/tc/globals"
39}
40
41setup_cgrp2() {
42 case $1 in
43 start)
44 if [ "$MOUNT_CGRP2" == 'yes' ]
45 then
46 [ -d $CGRP2_ROOT ] || mkdir -p $CGRP2_ROOT
47 mount -t cgroup2 none $CGRP2_ROOT || return $?
48 fi
49 mkdir -p $CGRP2_TC_LEAF
50 ;;
51 *)
52 rmdir $CGRP2_TC_LEAF && rmdir $CGRP2_TC
53 [ "$MOUNT_CGRP2" == 'yes' ] && umount $CGRP2_ROOT
54 ;;
55 esac
56}
57
58setup_bpf_cgrp2_array() {
59 local bpf_cgrp2_array="$BPF_FS_TC_SHARE/$BPF_CGRP2_ARRAY_NAME"
60 case $1 in
61 start)
62 $MY_DIR/test_cgrp2_array_pin -U $bpf_cgrp2_array -v $CGRP2_TC
63 ;;
64 *)
65 [ -d "$BPF_FS_TC_SHARE" ] && rm -f $bpf_cgrp2_array
66 ;;
67 esac
68}
69
70setup_net() {
71 case $1 in
72 start)
73 $IP link add $HOST_IFC type veth peer name $NS_IFC || return $?
74 $IP link set dev $HOST_IFC up || return $?
75 sysctl -q net.ipv6.conf.$HOST_IFC.accept_dad=0
76
77 $IP netns add ns || return $?
78 $IP link set dev $NS_IFC netns ns || return $?
79 $IP -n $NS link set dev $NS_IFC up || return $?
80 $IP netns exec $NS sysctl -q net.ipv6.conf.$NS_IFC.accept_dad=0
81 $TC qdisc add dev $HOST_IFC clsact || return $?
82 $TC filter add dev $HOST_IFC egress bpf da obj $BPF_PROG sec $BPF_SECTION || return $?
83 ;;
84 *)
85 $IP netns del $NS
86 $IP link del $HOST_IFC
87 ;;
88 esac
89}
90
91run_in_cgrp() {
92 # Fork another bash and move it under the specified cgroup.
93 # It makes the cgroup cleanup easier at the end of the test.
94 cmd='echo $$ > '
95 cmd="$cmd $1/cgroup.procs; exec $2"
96 bash -c "$cmd"
97}
98
99do_test() {
100 run_in_cgrp $CGRP2_TC_LEAF "ping -6 -c3 ff02::1%$HOST_IFC >& /dev/null"
101 local dropped=$($TC -s qdisc show dev $HOST_IFC | tail -3 | \
102 awk '/drop/{print substr($7, 0, index($7, ",")-1)}')
103 if [[ $dropped -eq 0 ]]
104 then
105 echo "FAIL"
106 return 1
107 else
108 echo "Successfully filtered $dropped packets"
109 return 0
110 fi
111}
112
113do_exit() {
114 if [ "$DEBUG" == "yes" ] && [ "$MODE" != 'cleanuponly' ]
115 then
116 echo "------ DEBUG ------"
117 echo "mount: "; mount | egrep '(cgroup2|bpf)'; echo
118 echo "$CGRP2_TC_LEAF: "; ls -l $CGRP2_TC_LEAF; echo
119 if [ -d "$BPF_FS_TC_SHARE" ]
120 then
121 echo "$BPF_FS_TC_SHARE: "; ls -l $BPF_FS_TC_SHARE; echo
122 fi
123 echo "Host net:"
124 $IP netns
125 $IP link show dev $HOST_IFC
126 $IP -6 a show dev $HOST_IFC
127 $TC -s qdisc show dev $HOST_IFC
128 echo
129 echo "$NS net:"
130 $IP -n $NS link show dev $NS_IFC
131 $IP -n $NS -6 link show dev $NS_IFC
132 echo "------ DEBUG ------"
133 echo
134 fi
135
136 if [ "$MODE" != 'nocleanup' ]
137 then
138 setup_net stop
139 setup_bpf_cgrp2_array stop
140 setup_cgrp2 stop
141 fi
142}
143
144init_cgrp2_vars
145init_bpf_fs_vars
146
147while [[ $# -ge 1 ]]
148do
149 a="$1"
150 case $a in
151 debug)
152 DEBUG='yes'
153 shift 1
154 ;;
155 cleanup-only)
156 MODE='cleanuponly'
157 shift 1
158 ;;
159 no-cleanup)
160 MODE='nocleanup'
161 shift 1
162 ;;
163 *)
164 echo "test_cgrp2_tc [debug] [cleanup-only | no-cleanup]"
165 echo " debug: Print cgrp and network setup details at the end of the test"
166 echo " cleanup-only: Try to cleanup things from last test. No test will be run"
167 echo " no-cleanup: Run the test but don't do cleanup at the end"
168 echo "[Note: If no arg is given, it will run the test and do cleanup at the end]"
169 echo
170 exit -1
171 ;;
172 esac
173done
174
175trap do_exit 0
176
177[ "$MODE" == 'cleanuponly' ] && exit
178
179setup_cgrp2 start || exit $?
180setup_net start || exit $?
181init_bpf_fs_vars || exit $?
182setup_bpf_cgrp2_array start || exit $?
183do_test
184echo
diff --git a/samples/bpf/test_cgrp2_tc_kern.c b/samples/bpf/test_cgrp2_tc_kern.c
new file mode 100644
index 000000000000..2732c37c8d5b
--- /dev/null
+++ b/samples/bpf/test_cgrp2_tc_kern.c
@@ -0,0 +1,69 @@
1/* Copyright (c) 2016 Facebook
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#include <uapi/linux/if_ether.h>
8#include <uapi/linux/in6.h>
9#include <uapi/linux/ipv6.h>
10#include <uapi/linux/pkt_cls.h>
11#include <uapi/linux/bpf.h>
12#include "bpf_helpers.h"
13
14/* copy of 'struct ethhdr' without __packed */
15struct eth_hdr {
16 unsigned char h_dest[ETH_ALEN];
17 unsigned char h_source[ETH_ALEN];
18 unsigned short h_proto;
19};
20
21#define PIN_GLOBAL_NS 2
22struct bpf_elf_map {
23 __u32 type;
24 __u32 size_key;
25 __u32 size_value;
26 __u32 max_elem;
27 __u32 flags;
28 __u32 id;
29 __u32 pinning;
30};
31
32struct bpf_elf_map SEC("maps") test_cgrp2_array_pin = {
33 .type = BPF_MAP_TYPE_CGROUP_ARRAY,
34 .size_key = sizeof(uint32_t),
35 .size_value = sizeof(uint32_t),
36 .pinning = PIN_GLOBAL_NS,
37 .max_elem = 1,
38};
39
40SEC("filter")
41int handle_egress(struct __sk_buff *skb)
42{
43 void *data = (void *)(long)skb->data;
44 struct eth_hdr *eth = data;
45 struct ipv6hdr *ip6h = data + sizeof(*eth);
46 void *data_end = (void *)(long)skb->data_end;
47 char dont_care_msg[] = "dont care %04x %d\n";
48 char pass_msg[] = "pass\n";
49 char reject_msg[] = "reject\n";
50
51 /* single length check */
52 if (data + sizeof(*eth) + sizeof(*ip6h) > data_end)
53 return TC_ACT_OK;
54
55 if (eth->h_proto != htons(ETH_P_IPV6) ||
56 ip6h->nexthdr != IPPROTO_ICMPV6) {
57 bpf_trace_printk(dont_care_msg, sizeof(dont_care_msg),
58 eth->h_proto, ip6h->nexthdr);
59 return TC_ACT_OK;
60 } else if (bpf_skb_in_cgroup(skb, &test_cgrp2_array_pin, 0) != 1) {
61 bpf_trace_printk(pass_msg, sizeof(pass_msg));
62 return TC_ACT_OK;
63 } else {
64 bpf_trace_printk(reject_msg, sizeof(reject_msg));
65 return TC_ACT_SHOT;
66 }
67}
68
69char _license[] SEC("license") = "GPL";
diff --git a/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh b/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh
new file mode 100755
index 000000000000..4e4e92b2515e
--- /dev/null
+++ b/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh
@@ -0,0 +1,66 @@
1#!/bin/bash
2#
3# Benchmark script:
4# - developed for benchmarking egress qdisc path, derived (more
5# like cut'n'pasted) from ingress benchmark script.
6#
7# Script for injecting packets into egress qdisc path of the stack
8# with pktgen "xmit_mode queue_xmit".
9#
10basedir=`dirname $0`
11source ${basedir}/functions.sh
12root_check_run_with_sudo "$@"
13
14# Parameter parsing via include
15source ${basedir}/parameters.sh
16[ -z "$DEST_IP" ] && DEST_IP="198.18.0.42"
17[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
18
19# Burst greater than 1 are invalid for queue_xmit mode
20if [[ -n "$BURST" ]]; then
21 err 1 "Bursting not supported for this mode"
22fi
23
24# Base Config
25DELAY="0" # Zero means max speed
26COUNT="10000000" # Zero means indefinitely
27
28# General cleanup everything since last run
29pg_ctrl "reset"
30
31# Threads are specified with parameter -t value in $THREADS
32for ((thread = 0; thread < $THREADS; thread++)); do
33 # The device name is extended with @name, using thread number to
34 # make then unique, but any name will do.
35 dev=${DEV}@${thread}
36
37 # Add remove all other devices and add_device $dev to thread
38 pg_thread $thread "rem_device_all"
39 pg_thread $thread "add_device" $dev
40
41 # Base config of dev
42 pg_set $dev "flag QUEUE_MAP_CPU"
43 pg_set $dev "count $COUNT"
44 pg_set $dev "pkt_size $PKT_SIZE"
45 pg_set $dev "delay $DELAY"
46 pg_set $dev "flag NO_TIMESTAMP"
47
48 # Destination
49 pg_set $dev "dst_mac $DST_MAC"
50 pg_set $dev "dst $DEST_IP"
51
52 # Inject packet into TX qdisc egress path of stack
53 pg_set $dev "xmit_mode queue_xmit"
54done
55
56# start_run
57echo "Running... ctrl^C to stop" >&2
58pg_ctrl "start"
59echo "Done" >&2
60
61# Print results
62for ((thread = 0; thread < $THREADS; thread++)); do
63 dev=${DEV}@${thread}
64 echo "Device: $dev"
65 cat /proc/net/pktgen/$dev | grep -A2 "Result:"
66done
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index a9155077feef..fec75786f75b 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -384,7 +384,7 @@ static void do_of_entry_multi(void *symval, struct module *mod)
384 len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*", 384 len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
385 (*type)[0] ? *type : "*"); 385 (*type)[0] ? *type : "*");
386 386
387 if (compatible[0]) 387 if ((*compatible)[0])
388 sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "", 388 sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
389 *compatible); 389 *compatible);
390 390
diff --git a/security/keys/key.c b/security/keys/key.c
index bd5a272f28a6..346fbf201c22 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -597,7 +597,7 @@ int key_reject_and_link(struct key *key,
597 597
598 mutex_unlock(&key_construction_mutex); 598 mutex_unlock(&key_construction_mutex);
599 599
600 if (keyring) 600 if (keyring && link_ret == 0)
601 __key_link_end(keyring, &key->index_key, edit); 601 __key_link_end(keyring, &key->index_key, edit);
602 602
603 /* wake up anyone waiting for a key to be constructed */ 603 /* wake up anyone waiting for a key to be constructed */
diff --git a/sound/core/timer.c b/sound/core/timer.c
index e722022d325d..9a6157ea6881 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -1955,6 +1955,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
1955 1955
1956 qhead = tu->qhead++; 1956 qhead = tu->qhead++;
1957 tu->qhead %= tu->queue_size; 1957 tu->qhead %= tu->queue_size;
1958 tu->qused--;
1958 spin_unlock_irq(&tu->qlock); 1959 spin_unlock_irq(&tu->qlock);
1959 1960
1960 if (tu->tread) { 1961 if (tu->tread) {
@@ -1968,7 +1969,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
1968 } 1969 }
1969 1970
1970 spin_lock_irq(&tu->qlock); 1971 spin_lock_irq(&tu->qlock);
1971 tu->qused--;
1972 if (err < 0) 1972 if (err < 0)
1973 goto _error; 1973 goto _error;
1974 result += unit; 1974 result += unit;
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index c0f8f613f1f1..172dacd925f5 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -420,6 +420,7 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
420 420
421static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm) 421static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
422{ 422{
423 hrtimer_cancel(&dpcm->timer);
423 tasklet_kill(&dpcm->tasklet); 424 tasklet_kill(&dpcm->tasklet);
424} 425}
425 426
diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
index 87041ddd29cb..47a358fab132 100644
--- a/sound/hda/hdac_regmap.c
+++ b/sound/hda/hdac_regmap.c
@@ -444,7 +444,7 @@ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
444 err = reg_raw_write(codec, reg, val); 444 err = reg_raw_write(codec, reg, val);
445 if (err == -EAGAIN) { 445 if (err == -EAGAIN) {
446 err = snd_hdac_power_up_pm(codec); 446 err = snd_hdac_power_up_pm(codec);
447 if (!err) 447 if (err >= 0)
448 err = reg_raw_write(codec, reg, val); 448 err = reg_raw_write(codec, reg, val);
449 snd_hdac_power_down_pm(codec); 449 snd_hdac_power_down_pm(codec);
450 } 450 }
@@ -470,7 +470,7 @@ static int __snd_hdac_regmap_read_raw(struct hdac_device *codec,
470 err = reg_raw_read(codec, reg, val, uncached); 470 err = reg_raw_read(codec, reg, val, uncached);
471 if (err == -EAGAIN) { 471 if (err == -EAGAIN) {
472 err = snd_hdac_power_up_pm(codec); 472 err = snd_hdac_power_up_pm(codec);
473 if (!err) 473 if (err >= 0)
474 err = reg_raw_read(codec, reg, val, uncached); 474 err = reg_raw_read(codec, reg, val, uncached);
475 snd_hdac_power_down_pm(codec); 475 snd_hdac_power_down_pm(codec);
476 } 476 }
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
index 4a054d720112..d3125c169684 100644
--- a/sound/pci/au88x0/au88x0_core.c
+++ b/sound/pci/au88x0/au88x0_core.c
@@ -1444,9 +1444,8 @@ static int vortex_wtdma_bufshift(vortex_t * vortex, int wtdma)
1444 int page, p, pp, delta, i; 1444 int page, p, pp, delta, i;
1445 1445
1446 page = 1446 page =
1447 (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) & 1447 (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2))
1448 WT_SUBBUF_MASK) 1448 >> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK;
1449 >> WT_SUBBUF_SHIFT;
1450 if (dma->nr_periods >= 4) 1449 if (dma->nr_periods >= 4)
1451 delta = (page - dma->period_real) & 3; 1450 delta = (page - dma->period_real) & 3;
1452 else { 1451 else {
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index 1cb85aeb0cea..286f5e3686a3 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -2200,11 +2200,11 @@ static int snd_echo_resume(struct device *dev)
2200 u32 pipe_alloc_mask; 2200 u32 pipe_alloc_mask;
2201 int err; 2201 int err;
2202 2202
2203 commpage_bak = kmalloc(sizeof(struct echoaudio), GFP_KERNEL); 2203 commpage_bak = kmalloc(sizeof(*commpage), GFP_KERNEL);
2204 if (commpage_bak == NULL) 2204 if (commpage_bak == NULL)
2205 return -ENOMEM; 2205 return -ENOMEM;
2206 commpage = chip->comm_page; 2206 commpage = chip->comm_page;
2207 memcpy(commpage_bak, commpage, sizeof(struct comm_page)); 2207 memcpy(commpage_bak, commpage, sizeof(*commpage));
2208 2208
2209 err = init_hw(chip, chip->pci->device, chip->pci->subsystem_device); 2209 err = init_hw(chip, chip->pci->device, chip->pci->subsystem_device);
2210 if (err < 0) { 2210 if (err < 0) {
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 320445f3bf73..79c7b340acc2 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -3977,6 +3977,8 @@ static hda_nid_t set_path_power(struct hda_codec *codec, hda_nid_t nid,
3977 3977
3978 for (n = 0; n < spec->paths.used; n++) { 3978 for (n = 0; n < spec->paths.used; n++) {
3979 path = snd_array_elem(&spec->paths, n); 3979 path = snd_array_elem(&spec->paths, n);
3980 if (!path->depth)
3981 continue;
3980 if (path->path[0] == nid || 3982 if (path->path[0] == nid ||
3981 path->path[path->depth - 1] == nid) { 3983 path->path[path->depth - 1] == nid) {
3982 bool pin_old = path->pin_enabled; 3984 bool pin_old = path->pin_enabled;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 94089fc71884..e320c44714b1 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -367,9 +367,10 @@ enum {
367#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70) 367#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
368#define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171) 368#define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171)
369#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71) 369#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
370#define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0)
370#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) 371#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
371#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \ 372#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
372 IS_KBL(pci) || IS_KBL_LP(pci) 373 IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci)
373 374
374static char *driver_short_names[] = { 375static char *driver_short_names[] = {
375 [AZX_DRIVER_ICH] = "HDA Intel", 376 [AZX_DRIVER_ICH] = "HDA Intel",
@@ -2190,6 +2191,9 @@ static const struct pci_device_id azx_ids[] = {
2190 /* Kabylake-LP */ 2191 /* Kabylake-LP */
2191 { PCI_DEVICE(0x8086, 0x9d71), 2192 { PCI_DEVICE(0x8086, 0x9d71),
2192 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, 2193 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
2194 /* Kabylake-H */
2195 { PCI_DEVICE(0x8086, 0xa2f0),
2196 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
2193 /* Broxton-P(Apollolake) */ 2197 /* Broxton-P(Apollolake) */
2194 { PCI_DEVICE(0x8086, 0x5a98), 2198 { PCI_DEVICE(0x8086, 0x5a98),
2195 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, 2199 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index 17fd81736d3d..0621920f7617 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -115,20 +115,20 @@ static int substream_free_pages(struct azx *chip,
115/* 115/*
116 * Register access ops. Tegra HDA register access is DWORD only. 116 * Register access ops. Tegra HDA register access is DWORD only.
117 */ 117 */
118static void hda_tegra_writel(u32 value, u32 *addr) 118static void hda_tegra_writel(u32 value, u32 __iomem *addr)
119{ 119{
120 writel(value, addr); 120 writel(value, addr);
121} 121}
122 122
123static u32 hda_tegra_readl(u32 *addr) 123static u32 hda_tegra_readl(u32 __iomem *addr)
124{ 124{
125 return readl(addr); 125 return readl(addr);
126} 126}
127 127
128static void hda_tegra_writew(u16 value, u16 *addr) 128static void hda_tegra_writew(u16 value, u16 __iomem *addr)
129{ 129{
130 unsigned int shift = ((unsigned long)(addr) & 0x3) << 3; 130 unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
131 void *dword_addr = (void *)((unsigned long)(addr) & ~0x3); 131 void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
132 u32 v; 132 u32 v;
133 133
134 v = readl(dword_addr); 134 v = readl(dword_addr);
@@ -137,20 +137,20 @@ static void hda_tegra_writew(u16 value, u16 *addr)
137 writel(v, dword_addr); 137 writel(v, dword_addr);
138} 138}
139 139
140static u16 hda_tegra_readw(u16 *addr) 140static u16 hda_tegra_readw(u16 __iomem *addr)
141{ 141{
142 unsigned int shift = ((unsigned long)(addr) & 0x3) << 3; 142 unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
143 void *dword_addr = (void *)((unsigned long)(addr) & ~0x3); 143 void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
144 u32 v; 144 u32 v;
145 145
146 v = readl(dword_addr); 146 v = readl(dword_addr);
147 return (v >> shift) & 0xffff; 147 return (v >> shift) & 0xffff;
148} 148}
149 149
150static void hda_tegra_writeb(u8 value, u8 *addr) 150static void hda_tegra_writeb(u8 value, u8 __iomem *addr)
151{ 151{
152 unsigned int shift = ((unsigned long)(addr) & 0x3) << 3; 152 unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
153 void *dword_addr = (void *)((unsigned long)(addr) & ~0x3); 153 void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
154 u32 v; 154 u32 v;
155 155
156 v = readl(dword_addr); 156 v = readl(dword_addr);
@@ -159,10 +159,10 @@ static void hda_tegra_writeb(u8 value, u8 *addr)
159 writel(v, dword_addr); 159 writel(v, dword_addr);
160} 160}
161 161
162static u8 hda_tegra_readb(u8 *addr) 162static u8 hda_tegra_readb(u8 __iomem *addr)
163{ 163{
164 unsigned int shift = ((unsigned long)(addr) & 0x3) << 3; 164 unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
165 void *dword_addr = (void *)((unsigned long)(addr) & ~0x3); 165 void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
166 u32 v; 166 u32 v;
167 167
168 v = readl(dword_addr); 168 v = readl(dword_addr);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 0fe18ede3e85..5fac786e4982 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5650,6 +5650,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5650 SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK), 5650 SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
5651 SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK), 5651 SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
5652 SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE), 5652 SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
5653 SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
5654 SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460),
5655 SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
5653 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5656 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5654 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), 5657 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
5655 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), 5658 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
@@ -5832,6 +5835,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5832 {0x14, 0x90170120}, 5835 {0x14, 0x90170120},
5833 {0x21, 0x02211030}), 5836 {0x21, 0x02211030}),
5834 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5837 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5838 {0x12, 0x90a60170},
5839 {0x14, 0x90170120},
5840 {0x21, 0x02211030}),
5841 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5835 ALC256_STANDARD_PINS), 5842 ALC256_STANDARD_PINS),
5836 SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, 5843 SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
5837 {0x12, 0x90a60130}, 5844 {0x12, 0x90a60130},
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 4d82a58ff6b0..f3fb98f0a995 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -483,9 +483,10 @@ config SND_SOC_DMIC
483 tristate 483 tristate
484 484
485config SND_SOC_HDMI_CODEC 485config SND_SOC_HDMI_CODEC
486 tristate 486 tristate
487 select SND_PCM_ELD 487 select SND_PCM_ELD
488 select SND_PCM_IEC958 488 select SND_PCM_IEC958
489 select HDMI
489 490
490config SND_SOC_ES8328 491config SND_SOC_ES8328
491 tristate "Everest Semi ES8328 CODEC" 492 tristate "Everest Semi ES8328 CODEC"
diff --git a/sound/soc/codecs/ak4613.c b/sound/soc/codecs/ak4613.c
index 647f69de6baa..5013d2ba0c10 100644
--- a/sound/soc/codecs/ak4613.c
+++ b/sound/soc/codecs/ak4613.c
@@ -146,6 +146,7 @@ static const struct regmap_config ak4613_regmap_cfg = {
146 .max_register = 0x16, 146 .max_register = 0x16,
147 .reg_defaults = ak4613_reg, 147 .reg_defaults = ak4613_reg,
148 .num_reg_defaults = ARRAY_SIZE(ak4613_reg), 148 .num_reg_defaults = ARRAY_SIZE(ak4613_reg),
149 .cache_type = REGCACHE_RBTREE,
149}; 150};
150 151
151static const struct of_device_id ak4613_of_match[] = { 152static const struct of_device_id ak4613_of_match[] = {
@@ -530,7 +531,6 @@ static int ak4613_i2c_remove(struct i2c_client *client)
530static struct i2c_driver ak4613_i2c_driver = { 531static struct i2c_driver ak4613_i2c_driver = {
531 .driver = { 532 .driver = {
532 .name = "ak4613-codec", 533 .name = "ak4613-codec",
533 .owner = THIS_MODULE,
534 .of_match_table = ak4613_of_match, 534 .of_match_table = ak4613_of_match,
535 }, 535 },
536 .probe = ak4613_i2c_probe, 536 .probe = ak4613_i2c_probe,
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
index d6f4abbbf8a7..fb3885fe0afb 100644
--- a/sound/soc/codecs/cx20442.c
+++ b/sound/soc/codecs/cx20442.c
@@ -226,6 +226,7 @@ static int v253_open(struct tty_struct *tty)
226 if (!tty->disc_data) 226 if (!tty->disc_data)
227 return -ENODEV; 227 return -ENODEV;
228 228
229 tty->receive_room = 16;
229 if (tty->ops->write(tty, v253_init, len) != len) { 230 if (tty->ops->write(tty, v253_init, len) != len) {
230 ret = -EIO; 231 ret = -EIO;
231 goto err; 232 goto err;
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 181cd3bf0b92..2abb742fc47b 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -1474,6 +1474,11 @@ static int hdmi_codec_probe(struct snd_soc_codec *codec)
1474 * exit, we call pm_runtime_suspend() so that will do for us 1474 * exit, we call pm_runtime_suspend() so that will do for us
1475 */ 1475 */
1476 hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev)); 1476 hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
1477 if (!hlink) {
1478 dev_err(&edev->hdac.dev, "hdac link not found\n");
1479 return -EIO;
1480 }
1481
1477 snd_hdac_ext_bus_link_get(edev->ebus, hlink); 1482 snd_hdac_ext_bus_link_get(edev->ebus, hlink);
1478 1483
1479 ret = create_fill_widget_route_map(dapm); 1484 ret = create_fill_widget_route_map(dapm);
@@ -1634,6 +1639,11 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
1634 1639
1635 /* hold the ref while we probe */ 1640 /* hold the ref while we probe */
1636 hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev)); 1641 hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
1642 if (!hlink) {
1643 dev_err(&edev->hdac.dev, "hdac link not found\n");
1644 return -EIO;
1645 }
1646
1637 snd_hdac_ext_bus_link_get(edev->ebus, hlink); 1647 snd_hdac_ext_bus_link_get(edev->ebus, hlink);
1638 1648
1639 hdmi_priv = devm_kzalloc(&codec->dev, sizeof(*hdmi_priv), GFP_KERNEL); 1649 hdmi_priv = devm_kzalloc(&codec->dev, sizeof(*hdmi_priv), GFP_KERNEL);
@@ -1744,6 +1754,11 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
1744 } 1754 }
1745 1755
1746 hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev)); 1756 hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev));
1757 if (!hlink) {
1758 dev_err(dev, "hdac link not found\n");
1759 return -EIO;
1760 }
1761
1747 snd_hdac_ext_bus_link_put(ebus, hlink); 1762 snd_hdac_ext_bus_link_put(ebus, hlink);
1748 1763
1749 return 0; 1764 return 0;
@@ -1765,6 +1780,11 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
1765 return 0; 1780 return 0;
1766 1781
1767 hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev)); 1782 hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev));
1783 if (!hlink) {
1784 dev_err(dev, "hdac link not found\n");
1785 return -EIO;
1786 }
1787
1768 snd_hdac_ext_bus_link_get(ebus, hlink); 1788 snd_hdac_ext_bus_link_get(ebus, hlink);
1769 1789
1770 err = snd_hdac_display_power(bus, true); 1790 err = snd_hdac_display_power(bus, true);
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 3c6594da6c9c..d70847c9eeb0 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -253,7 +253,7 @@ static const struct reg_default rt5650_reg[] = {
253 { 0x2b, 0x5454 }, 253 { 0x2b, 0x5454 },
254 { 0x2c, 0xaaa0 }, 254 { 0x2c, 0xaaa0 },
255 { 0x2d, 0x0000 }, 255 { 0x2d, 0x0000 },
256 { 0x2f, 0x1002 }, 256 { 0x2f, 0x5002 },
257 { 0x31, 0x5000 }, 257 { 0x31, 0x5000 },
258 { 0x32, 0x0000 }, 258 { 0x32, 0x0000 },
259 { 0x33, 0x0000 }, 259 { 0x33, 0x0000 },
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index 49a9e7049e2b..0af5ddbef1da 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -619,7 +619,7 @@ static const struct snd_kcontrol_new rt5670_snd_controls[] = {
619 RT5670_L_MUTE_SFT, RT5670_R_MUTE_SFT, 1, 1), 619 RT5670_L_MUTE_SFT, RT5670_R_MUTE_SFT, 1, 1),
620 SOC_DOUBLE_TLV("HP Playback Volume", RT5670_HP_VOL, 620 SOC_DOUBLE_TLV("HP Playback Volume", RT5670_HP_VOL,
621 RT5670_L_VOL_SFT, RT5670_R_VOL_SFT, 621 RT5670_L_VOL_SFT, RT5670_R_VOL_SFT,
622 39, 0, out_vol_tlv), 622 39, 1, out_vol_tlv),
623 /* OUTPUT Control */ 623 /* OUTPUT Control */
624 SOC_DOUBLE("OUT Channel Switch", RT5670_LOUT1, 624 SOC_DOUBLE("OUT Channel Switch", RT5670_LOUT1,
625 RT5670_VOL_L_SFT, RT5670_VOL_R_SFT, 1, 1), 625 RT5670_VOL_L_SFT, RT5670_VOL_R_SFT, 1, 1),
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index da60e3fe5ee7..e7fe6b7b95b7 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -1872,7 +1872,7 @@ static struct snd_soc_dai_driver wm5102_dai[] = {
1872 .capture = { 1872 .capture = {
1873 .stream_name = "Audio Trace CPU", 1873 .stream_name = "Audio Trace CPU",
1874 .channels_min = 1, 1874 .channels_min = 1,
1875 .channels_max = 6, 1875 .channels_max = 4,
1876 .rates = WM5102_RATES, 1876 .rates = WM5102_RATES,
1877 .formats = WM5102_FORMATS, 1877 .formats = WM5102_FORMATS,
1878 }, 1878 },
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index b5820e4d5471..d54f1b46c9ec 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -1723,6 +1723,7 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
1723 { "OUT2L", NULL, "SYSCLK" }, 1723 { "OUT2L", NULL, "SYSCLK" },
1724 { "OUT2R", NULL, "SYSCLK" }, 1724 { "OUT2R", NULL, "SYSCLK" },
1725 { "OUT3L", NULL, "SYSCLK" }, 1725 { "OUT3L", NULL, "SYSCLK" },
1726 { "OUT3R", NULL, "SYSCLK" },
1726 { "OUT4L", NULL, "SYSCLK" }, 1727 { "OUT4L", NULL, "SYSCLK" },
1727 { "OUT4R", NULL, "SYSCLK" }, 1728 { "OUT4R", NULL, "SYSCLK" },
1728 { "OUT5L", NULL, "SYSCLK" }, 1729 { "OUT5L", NULL, "SYSCLK" },
diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
index f6f9395ea38e..1c600819f768 100644
--- a/sound/soc/codecs/wm8940.c
+++ b/sound/soc/codecs/wm8940.c
@@ -743,6 +743,7 @@ static const struct regmap_config wm8940_regmap = {
743 .max_register = WM8940_MONOMIX, 743 .max_register = WM8940_MONOMIX,
744 .reg_defaults = wm8940_reg_defaults, 744 .reg_defaults = wm8940_reg_defaults,
745 .num_reg_defaults = ARRAY_SIZE(wm8940_reg_defaults), 745 .num_reg_defaults = ARRAY_SIZE(wm8940_reg_defaults),
746 .cache_type = REGCACHE_RBTREE,
746 747
747 .readable_reg = wm8940_readable_register, 748 .readable_reg = wm8940_readable_register,
748 .volatile_reg = wm8940_volatile_register, 749 .volatile_reg = wm8940_volatile_register,
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 0f66fda2c772..237dc67002ef 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -1513,8 +1513,9 @@ static struct davinci_mcasp_pdata am33xx_mcasp_pdata = {
1513}; 1513};
1514 1514
1515static struct davinci_mcasp_pdata dra7_mcasp_pdata = { 1515static struct davinci_mcasp_pdata dra7_mcasp_pdata = {
1516 .tx_dma_offset = 0x200, 1516 /* The CFG port offset will be calculated if it is needed */
1517 .rx_dma_offset = 0x284, 1517 .tx_dma_offset = 0,
1518 .rx_dma_offset = 0,
1518 .version = MCASP_VERSION_4, 1519 .version = MCASP_VERSION_4,
1519}; 1520};
1520 1521
@@ -1734,6 +1735,52 @@ static int davinci_mcasp_get_dma_type(struct davinci_mcasp *mcasp)
1734 return PCM_EDMA; 1735 return PCM_EDMA;
1735} 1736}
1736 1737
1738static u32 davinci_mcasp_txdma_offset(struct davinci_mcasp_pdata *pdata)
1739{
1740 int i;
1741 u32 offset = 0;
1742
1743 if (pdata->version != MCASP_VERSION_4)
1744 return pdata->tx_dma_offset;
1745
1746 for (i = 0; i < pdata->num_serializer; i++) {
1747 if (pdata->serial_dir[i] == TX_MODE) {
1748 if (!offset) {
1749 offset = DAVINCI_MCASP_TXBUF_REG(i);
1750 } else {
1751 pr_err("%s: Only one serializer allowed!\n",
1752 __func__);
1753 break;
1754 }
1755 }
1756 }
1757
1758 return offset;
1759}
1760
1761static u32 davinci_mcasp_rxdma_offset(struct davinci_mcasp_pdata *pdata)
1762{
1763 int i;
1764 u32 offset = 0;
1765
1766 if (pdata->version != MCASP_VERSION_4)
1767 return pdata->rx_dma_offset;
1768
1769 for (i = 0; i < pdata->num_serializer; i++) {
1770 if (pdata->serial_dir[i] == RX_MODE) {
1771 if (!offset) {
1772 offset = DAVINCI_MCASP_RXBUF_REG(i);
1773 } else {
1774 pr_err("%s: Only one serializer allowed!\n",
1775 __func__);
1776 break;
1777 }
1778 }
1779 }
1780
1781 return offset;
1782}
1783
1737static int davinci_mcasp_probe(struct platform_device *pdev) 1784static int davinci_mcasp_probe(struct platform_device *pdev)
1738{ 1785{
1739 struct snd_dmaengine_dai_dma_data *dma_data; 1786 struct snd_dmaengine_dai_dma_data *dma_data;
@@ -1862,7 +1909,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
1862 if (dat) 1909 if (dat)
1863 dma_data->addr = dat->start; 1910 dma_data->addr = dat->start;
1864 else 1911 else
1865 dma_data->addr = mem->start + pdata->tx_dma_offset; 1912 dma_data->addr = mem->start + davinci_mcasp_txdma_offset(pdata);
1866 1913
1867 dma = &mcasp->dma_request[SNDRV_PCM_STREAM_PLAYBACK]; 1914 dma = &mcasp->dma_request[SNDRV_PCM_STREAM_PLAYBACK];
1868 res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1915 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
@@ -1883,7 +1930,8 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
1883 if (dat) 1930 if (dat)
1884 dma_data->addr = dat->start; 1931 dma_data->addr = dat->start;
1885 else 1932 else
1886 dma_data->addr = mem->start + pdata->rx_dma_offset; 1933 dma_data->addr =
1934 mem->start + davinci_mcasp_rxdma_offset(pdata);
1887 1935
1888 dma = &mcasp->dma_request[SNDRV_PCM_STREAM_CAPTURE]; 1936 dma = &mcasp->dma_request[SNDRV_PCM_STREAM_CAPTURE];
1889 res = platform_get_resource(pdev, IORESOURCE_DMA, 1); 1937 res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
diff --git a/sound/soc/davinci/davinci-mcasp.h b/sound/soc/davinci/davinci-mcasp.h
index 1e8787fb3fb7..afddc8010c54 100644
--- a/sound/soc/davinci/davinci-mcasp.h
+++ b/sound/soc/davinci/davinci-mcasp.h
@@ -85,9 +85,9 @@
85 (n << 2)) 85 (n << 2))
86 86
87/* Transmit Buffer for Serializer n */ 87/* Transmit Buffer for Serializer n */
88#define DAVINCI_MCASP_TXBUF_REG 0x200 88#define DAVINCI_MCASP_TXBUF_REG(n) (0x200 + (n << 2))
89/* Receive Buffer for Serializer n */ 89/* Receive Buffer for Serializer n */
90#define DAVINCI_MCASP_RXBUF_REG 0x280 90#define DAVINCI_MCASP_RXBUF_REG(n) (0x280 + (n << 2))
91 91
92/* McASP FIFO Registers */ 92/* McASP FIFO Registers */
93#define DAVINCI_MCASP_V2_AFIFO_BASE (0x1010) 93#define DAVINCI_MCASP_V2_AFIFO_BASE (0x1010)
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 632ecc0e3956..bedec4a32581 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -952,16 +952,16 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
952 ssi_private->i2s_mode = CCSR_SSI_SCR_NET; 952 ssi_private->i2s_mode = CCSR_SSI_SCR_NET;
953 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 953 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
954 case SND_SOC_DAIFMT_I2S: 954 case SND_SOC_DAIFMT_I2S:
955 regmap_update_bits(regs, CCSR_SSI_STCCR,
956 CCSR_SSI_SxCCR_DC_MASK,
957 CCSR_SSI_SxCCR_DC(2));
958 regmap_update_bits(regs, CCSR_SSI_SRCCR,
959 CCSR_SSI_SxCCR_DC_MASK,
960 CCSR_SSI_SxCCR_DC(2));
955 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { 961 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
956 case SND_SOC_DAIFMT_CBM_CFS: 962 case SND_SOC_DAIFMT_CBM_CFS:
957 case SND_SOC_DAIFMT_CBS_CFS: 963 case SND_SOC_DAIFMT_CBS_CFS:
958 ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_MASTER; 964 ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_MASTER;
959 regmap_update_bits(regs, CCSR_SSI_STCCR,
960 CCSR_SSI_SxCCR_DC_MASK,
961 CCSR_SSI_SxCCR_DC(2));
962 regmap_update_bits(regs, CCSR_SSI_SRCCR,
963 CCSR_SSI_SxCCR_DC_MASK,
964 CCSR_SSI_SxCCR_DC(2));
965 break; 965 break;
966 case SND_SOC_DAIFMT_CBM_CFM: 966 case SND_SOC_DAIFMT_CBM_CFM:
967 ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_SLAVE; 967 ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_SLAVE;
diff --git a/sound/soc/intel/atom/sst-mfld-platform-compress.c b/sound/soc/intel/atom/sst-mfld-platform-compress.c
index 395168986462..1bead81bb510 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-compress.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-compress.c
@@ -182,24 +182,29 @@ static int sst_platform_compr_trigger(struct snd_compr_stream *cstream, int cmd)
182 case SNDRV_PCM_TRIGGER_START: 182 case SNDRV_PCM_TRIGGER_START:
183 if (stream->compr_ops->stream_start) 183 if (stream->compr_ops->stream_start)
184 return stream->compr_ops->stream_start(sst->dev, stream->id); 184 return stream->compr_ops->stream_start(sst->dev, stream->id);
185 break;
185 case SNDRV_PCM_TRIGGER_STOP: 186 case SNDRV_PCM_TRIGGER_STOP:
186 if (stream->compr_ops->stream_drop) 187 if (stream->compr_ops->stream_drop)
187 return stream->compr_ops->stream_drop(sst->dev, stream->id); 188 return stream->compr_ops->stream_drop(sst->dev, stream->id);
189 break;
188 case SND_COMPR_TRIGGER_DRAIN: 190 case SND_COMPR_TRIGGER_DRAIN:
189 if (stream->compr_ops->stream_drain) 191 if (stream->compr_ops->stream_drain)
190 return stream->compr_ops->stream_drain(sst->dev, stream->id); 192 return stream->compr_ops->stream_drain(sst->dev, stream->id);
193 break;
191 case SND_COMPR_TRIGGER_PARTIAL_DRAIN: 194 case SND_COMPR_TRIGGER_PARTIAL_DRAIN:
192 if (stream->compr_ops->stream_partial_drain) 195 if (stream->compr_ops->stream_partial_drain)
193 return stream->compr_ops->stream_partial_drain(sst->dev, stream->id); 196 return stream->compr_ops->stream_partial_drain(sst->dev, stream->id);
197 break;
194 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 198 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
195 if (stream->compr_ops->stream_pause) 199 if (stream->compr_ops->stream_pause)
196 return stream->compr_ops->stream_pause(sst->dev, stream->id); 200 return stream->compr_ops->stream_pause(sst->dev, stream->id);
201 break;
197 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 202 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
198 if (stream->compr_ops->stream_pause_release) 203 if (stream->compr_ops->stream_pause_release)
199 return stream->compr_ops->stream_pause_release(sst->dev, stream->id); 204 return stream->compr_ops->stream_pause_release(sst->dev, stream->id);
200 default: 205 break;
201 return -EINVAL;
202 } 206 }
207 return -EINVAL;
203} 208}
204 209
205static int sst_platform_compr_pointer(struct snd_compr_stream *cstream, 210static int sst_platform_compr_pointer(struct snd_compr_stream *cstream,
diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c
index 965ce40ce752..8b95e09e23e8 100644
--- a/sound/soc/intel/skylake/bxt-sst.c
+++ b/sound/soc/intel/skylake/bxt-sst.c
@@ -291,6 +291,7 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
291 sst_dsp_mailbox_init(sst, (BXT_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ), 291 sst_dsp_mailbox_init(sst, (BXT_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
292 SKL_ADSP_W0_UP_SZ, BXT_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ); 292 SKL_ADSP_W0_UP_SZ, BXT_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
293 293
294 INIT_LIST_HEAD(&sst->module_list);
294 ret = skl_ipc_init(dev, skl); 295 ret = skl_ipc_init(dev, skl);
295 if (ret) 296 if (ret)
296 return ret; 297 return ret;
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 49354d17ea55..c4c51a4d3c8f 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -518,7 +518,7 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
518 } 518 }
519 } 519 }
520 520
521 rsnd_mod_bset(adg_mod, SSICKR, 0x00FF0000, ckr); 521 rsnd_mod_bset(adg_mod, SSICKR, 0x80FF0000, ckr);
522 rsnd_mod_write(adg_mod, BRRA, rbga); 522 rsnd_mod_write(adg_mod, BRRA, rbga);
523 rsnd_mod_write(adg_mod, BRRB, rbgb); 523 rsnd_mod_write(adg_mod, BRRB, rbgb);
524 524
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
index bbf69d248ec5..9f53020c3269 100644
--- a/tools/perf/util/data-convert-bt.c
+++ b/tools/perf/util/data-convert-bt.c
@@ -204,6 +204,44 @@ static unsigned long long adjust_signedness(unsigned long long value_int, int si
204 return (value_int & value_mask) | ~value_mask; 204 return (value_int & value_mask) | ~value_mask;
205} 205}
206 206
207static int string_set_value(struct bt_ctf_field *field, const char *string)
208{
209 char *buffer = NULL;
210 size_t len = strlen(string), i, p;
211 int err;
212
213 for (i = p = 0; i < len; i++, p++) {
214 if (isprint(string[i])) {
215 if (!buffer)
216 continue;
217 buffer[p] = string[i];
218 } else {
219 char numstr[5];
220
221 snprintf(numstr, sizeof(numstr), "\\x%02x",
222 (unsigned int)(string[i]) & 0xff);
223
224 if (!buffer) {
225 buffer = zalloc(i + (len - i) * 4 + 2);
226 if (!buffer) {
227 pr_err("failed to set unprintable string '%s'\n", string);
228 return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
229 }
230 if (i > 0)
231 strncpy(buffer, string, i);
232 }
233 strncat(buffer + p, numstr, 4);
234 p += 3;
235 }
236 }
237
238 if (!buffer)
239 return bt_ctf_field_string_set_value(field, string);
240 err = bt_ctf_field_string_set_value(field, buffer);
241 free(buffer);
242 return err;
243}
244
207static int add_tracepoint_field_value(struct ctf_writer *cw, 245static int add_tracepoint_field_value(struct ctf_writer *cw,
208 struct bt_ctf_event_class *event_class, 246 struct bt_ctf_event_class *event_class,
209 struct bt_ctf_event *event, 247 struct bt_ctf_event *event,
@@ -270,8 +308,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
270 } 308 }
271 309
272 if (flags & FIELD_IS_STRING) 310 if (flags & FIELD_IS_STRING)
273 ret = bt_ctf_field_string_set_value(field, 311 ret = string_set_value(field, data + offset + i * len);
274 data + offset + i * len);
275 else { 312 else {
276 unsigned long long value_int; 313 unsigned long long value_int;
277 314
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index f6fcc6832949..9b141f12329e 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -673,6 +673,8 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
673 int err; 673 int err;
674 union perf_event *event; 674 union perf_event *event;
675 675
676 if (symbol_conf.kptr_restrict)
677 return -1;
676 if (map == NULL) 678 if (map == NULL)
677 return -1; 679 return -1;
678 680
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 20f9cb32b703..54c4ff2b1cee 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1933,17 +1933,17 @@ int setup_intlist(struct intlist **list, const char *list_str,
1933static bool symbol__read_kptr_restrict(void) 1933static bool symbol__read_kptr_restrict(void)
1934{ 1934{
1935 bool value = false; 1935 bool value = false;
1936 FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
1936 1937
1937 if (geteuid() != 0) { 1938 if (fp != NULL) {
1938 FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r"); 1939 char line[8];
1939 if (fp != NULL) {
1940 char line[8];
1941 1940
1942 if (fgets(line, sizeof(line), fp) != NULL) 1941 if (fgets(line, sizeof(line), fp) != NULL)
1943 value = atoi(line) != 0; 1942 value = (geteuid() != 0) ?
1943 (atoi(line) != 0) :
1944 (atoi(line) == 2);
1944 1945
1945 fclose(fp); 1946 fclose(fp);
1946 }
1947 } 1947 }
1948 1948
1949 return value; 1949 return value;
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc
index c2b61c4fda11..0bf5085281f3 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc
@@ -23,15 +23,14 @@ if [ ! -f events/sched/sched_process_fork/trigger ]; then
23 exit_unsupported 23 exit_unsupported
24fi 24fi
25 25
26reset_tracer 26if [ ! -f events/sched/sched_process_fork/hist ]; then
27do_reset
28
29FEATURE=`grep hist events/sched/sched_process_fork/trigger`
30if [ -z "$FEATURE" ]; then
31 echo "hist trigger is not supported" 27 echo "hist trigger is not supported"
32 exit_unsupported 28 exit_unsupported
33fi 29fi
34 30
31reset_tracer
32do_reset
33
35echo "Test histogram with execname modifier" 34echo "Test histogram with execname modifier"
36 35
37echo 'hist:keys=common_pid.execname' > events/sched/sched_process_fork/trigger 36echo 'hist:keys=common_pid.execname' > events/sched/sched_process_fork/trigger
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc
index b2902d42a537..a00184cd9c95 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc
@@ -23,15 +23,14 @@ if [ ! -f events/sched/sched_process_fork/trigger ]; then
23 exit_unsupported 23 exit_unsupported
24fi 24fi
25 25
26reset_tracer 26if [ ! -f events/sched/sched_process_fork/hist ]; then
27do_reset
28
29FEATURE=`grep hist events/sched/sched_process_fork/trigger`
30if [ -z "$FEATURE" ]; then
31 echo "hist trigger is not supported" 27 echo "hist trigger is not supported"
32 exit_unsupported 28 exit_unsupported
33fi 29fi
34 30
31reset_tracer
32do_reset
33
35echo "Test histogram basic tigger" 34echo "Test histogram basic tigger"
36 35
37echo 'hist:keys=parent_pid:vals=child_pid' > events/sched/sched_process_fork/trigger 36echo 'hist:keys=parent_pid:vals=child_pid' > events/sched/sched_process_fork/trigger
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc
index 03c4a46561fc..3478b00ead57 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc
@@ -23,15 +23,14 @@ if [ ! -f events/sched/sched_process_fork/trigger ]; then
23 exit_unsupported 23 exit_unsupported
24fi 24fi
25 25
26reset_tracer 26if [ ! -f events/sched/sched_process_fork/hist ]; then
27do_reset
28
29FEATURE=`grep hist events/sched/sched_process_fork/trigger`
30if [ -z "$FEATURE" ]; then
31 echo "hist trigger is not supported" 27 echo "hist trigger is not supported"
32 exit_unsupported 28 exit_unsupported
33fi 29fi
34 30
31reset_tracer
32do_reset
33
35reset_trigger 34reset_trigger
36 35
37echo "Test histogram multiple tiggers" 36echo "Test histogram multiple tiggers"
diff --git a/tools/testing/selftests/vm/compaction_test.c b/tools/testing/selftests/vm/compaction_test.c
index 932ff577ffc0..00c4f65d12da 100644
--- a/tools/testing/selftests/vm/compaction_test.c
+++ b/tools/testing/selftests/vm/compaction_test.c
@@ -136,7 +136,7 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
136 printf("No of huge pages allocated = %d\n", 136 printf("No of huge pages allocated = %d\n",
137 (atoi(nr_hugepages))); 137 (atoi(nr_hugepages)));
138 138
139 if (write(fd, initial_nr_hugepages, sizeof(initial_nr_hugepages)) 139 if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
140 != strlen(initial_nr_hugepages)) { 140 != strlen(initial_nr_hugepages)) {
141 perror("Failed to write to /proc/sys/vm/nr_hugepages\n"); 141 perror("Failed to write to /proc/sys/vm/nr_hugepages\n");
142 goto close_fd; 142 goto close_fd;
diff --git a/tools/virtio/ringtest/Makefile b/tools/virtio/ringtest/Makefile
index 50e086c6a7b6..877a8a4721b6 100644
--- a/tools/virtio/ringtest/Makefile
+++ b/tools/virtio/ringtest/Makefile
@@ -1,6 +1,6 @@
1all: 1all:
2 2
3all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder ptr_ring 3all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder ptr_ring noring
4 4
5CFLAGS += -Wall 5CFLAGS += -Wall
6CFLAGS += -pthread -O2 -ggdb 6CFLAGS += -pthread -O2 -ggdb
@@ -17,6 +17,7 @@ virtio_ring_0_9: virtio_ring_0_9.o main.o
17virtio_ring_poll: virtio_ring_poll.o main.o 17virtio_ring_poll: virtio_ring_poll.o main.o
18virtio_ring_inorder: virtio_ring_inorder.o main.o 18virtio_ring_inorder: virtio_ring_inorder.o main.o
19ptr_ring: ptr_ring.o main.o 19ptr_ring: ptr_ring.o main.o
20noring: noring.o main.o
20clean: 21clean:
21 -rm main.o 22 -rm main.o
22 -rm ring.o ring 23 -rm ring.o ring
@@ -24,5 +25,6 @@ clean:
24 -rm virtio_ring_poll.o virtio_ring_poll 25 -rm virtio_ring_poll.o virtio_ring_poll
25 -rm virtio_ring_inorder.o virtio_ring_inorder 26 -rm virtio_ring_inorder.o virtio_ring_inorder
26 -rm ptr_ring.o ptr_ring 27 -rm ptr_ring.o ptr_ring
28 -rm noring.o noring
27 29
28.PHONY: all clean 30.PHONY: all clean
diff --git a/tools/virtio/ringtest/README b/tools/virtio/ringtest/README
index 34e94c46104f..d83707a336c9 100644
--- a/tools/virtio/ringtest/README
+++ b/tools/virtio/ringtest/README
@@ -1,2 +1,6 @@
1Partial implementation of various ring layouts, useful to tune virtio design. 1Partial implementation of various ring layouts, useful to tune virtio design.
2Uses shared memory heavily. 2Uses shared memory heavily.
3
4Typical use:
5
6# sh run-on-all.sh perf stat -r 10 --log-fd 1 -- ./ring
diff --git a/tools/virtio/ringtest/noring.c b/tools/virtio/ringtest/noring.c
new file mode 100644
index 000000000000..eda2f4824130
--- /dev/null
+++ b/tools/virtio/ringtest/noring.c
@@ -0,0 +1,69 @@
1#define _GNU_SOURCE
2#include "main.h"
3#include <assert.h>
4
5/* stub implementation: useful for measuring overhead */
6void alloc_ring(void)
7{
8}
9
10/* guest side */
11int add_inbuf(unsigned len, void *buf, void *datap)
12{
13 return 0;
14}
15
16/*
17 * skb_array API provides no way for producer to find out whether a given
18 * buffer was consumed. Our tests merely require that a successful get_buf
19 * implies that add_inbuf succeed in the past, and that add_inbuf will succeed,
20 * fake it accordingly.
21 */
22void *get_buf(unsigned *lenp, void **bufp)
23{
24 return "Buffer";
25}
26
27void poll_used(void)
28{
29}
30
31void disable_call()
32{
33 assert(0);
34}
35
36bool enable_call()
37{
38 assert(0);
39}
40
41void kick_available(void)
42{
43 assert(0);
44}
45
46/* host side */
47void disable_kick()
48{
49 assert(0);
50}
51
52bool enable_kick()
53{
54 assert(0);
55}
56
57void poll_avail(void)
58{
59}
60
61bool use_buf(unsigned *lenp, void **bufp)
62{
63 return true;
64}
65
66void call_used(void)
67{
68 assert(0);
69}
diff --git a/tools/virtio/ringtest/ptr_ring.c b/tools/virtio/ringtest/ptr_ring.c
index 74abd746ae91..68e4f9f0da3a 100644
--- a/tools/virtio/ringtest/ptr_ring.c
+++ b/tools/virtio/ringtest/ptr_ring.c
@@ -17,6 +17,11 @@
17typedef pthread_spinlock_t spinlock_t; 17typedef pthread_spinlock_t spinlock_t;
18 18
19typedef int gfp_t; 19typedef int gfp_t;
20static void *kmalloc(unsigned size, gfp_t gfp)
21{
22 return memalign(64, size);
23}
24
20static void *kzalloc(unsigned size, gfp_t gfp) 25static void *kzalloc(unsigned size, gfp_t gfp)
21{ 26{
22 void *p = memalign(64, size); 27 void *p = memalign(64, size);
diff --git a/tools/virtio/ringtest/run-on-all.sh b/tools/virtio/ringtest/run-on-all.sh
index 52b0f71ffa8d..2e69ca812b4c 100755
--- a/tools/virtio/ringtest/run-on-all.sh
+++ b/tools/virtio/ringtest/run-on-all.sh
@@ -3,10 +3,10 @@
3#use last CPU for host. Why not the first? 3#use last CPU for host. Why not the first?
4#many devices tend to use cpu0 by default so 4#many devices tend to use cpu0 by default so
5#it tends to be busier 5#it tends to be busier
6HOST_AFFINITY=$(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n|tail -1) 6HOST_AFFINITY=$(lscpu -p=cpu | tail -1)
7 7
8#run command on all cpus 8#run command on all cpus
9for cpu in $(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n); 9for cpu in $(seq 0 $HOST_AFFINITY)
10do 10do
11 #Don't run guest and host on same CPU 11 #Don't run guest and host on same CPU
12 #It actually works ok if using signalling 12 #It actually works ok if using signalling
diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
index 1889163f2f05..7cf6e1769903 100644
--- a/tools/vm/slabinfo.c
+++ b/tools/vm/slabinfo.c
@@ -492,7 +492,7 @@ static void slab_stats(struct slabinfo *s)
492 s->deactivate_to_head + s->deactivate_to_tail + s->deactivate_bypass; 492 s->deactivate_to_head + s->deactivate_to_tail + s->deactivate_bypass;
493 493
494 if (total) { 494 if (total) {
495 printf("\nSlab Deactivation Ocurrences %%\n"); 495 printf("\nSlab Deactivation Occurrences %%\n");
496 printf("-------------------------------------------------\n"); 496 printf("-------------------------------------------------\n");
497 printf("Slab full %7lu %3lu%%\n", 497 printf("Slab full %7lu %3lu%%\n",
498 s->deactivate_full, (s->deactivate_full * 100) / total); 498 s->deactivate_full, (s->deactivate_full * 100) / total);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 02e98f3131bd..48bd520fc702 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2941,7 +2941,7 @@ static long kvm_vm_ioctl(struct file *filp,
2941 if (copy_from_user(&routing, argp, sizeof(routing))) 2941 if (copy_from_user(&routing, argp, sizeof(routing)))
2942 goto out; 2942 goto out;
2943 r = -EINVAL; 2943 r = -EINVAL;
2944 if (routing.nr >= KVM_MAX_IRQ_ROUTES) 2944 if (routing.nr > KVM_MAX_IRQ_ROUTES)
2945 goto out; 2945 goto out;
2946 if (routing.flags) 2946 if (routing.flags)
2947 goto out; 2947 goto out;