summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/input/atmel,maxtouch.txt7
-rw-r--r--Documentation/devicetree/bindings/net/meson-dwmac.txt1
-rw-r--r--Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/mvebu-uart.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,sci-serial.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/usb-xhci.txt5
-rw-r--r--Documentation/driver-api/firmware/request_firmware.rst16
-rw-r--r--Documentation/driver-api/infrastructure.rst2
-rw-r--r--Documentation/driver-api/usb/typec.rst2
-rw-r--r--Documentation/i2c/dev-interface32
-rw-r--r--Documentation/ioctl/ioctl-number.txt2
-rw-r--r--Documentation/networking/ip-sysctl.txt28
-rw-r--r--Documentation/power/suspend-and-cpuhotplug.txt2
-rw-r--r--Documentation/process/magic-number.rst3
-rw-r--r--Documentation/trace/ftrace.rst14
-rw-r--r--Documentation/virtual/kvm/api.txt9
-rw-r--r--Documentation/virtual/kvm/arm/psci.txt30
-rw-r--r--MAINTAINERS49
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/gemini-nas4220b.dts28
-rw-r--r--arch/arm/boot/dts/omap4.dtsi8
-rw-r--r--arch/arm/configs/gemini_defconfig27
-rw-r--r--arch/arm/configs/socfpga_defconfig1
-rw-r--r--arch/arm/include/asm/kvm_host.h3
-rw-r--r--arch/arm/include/uapi/asm/kvm.h6
-rw-r--r--arch/arm/kvm/guest.c13
-rw-r--r--arch/arm/mach-omap2/Makefile6
-rw-r--r--arch/arm/mach-omap2/pm-asm-offsets.c3
-rw-r--r--arch/arm/mach-omap2/sleep33xx.S1
-rw-r--r--arch/arm/mach-omap2/sleep43xx.S1
-rw-r--r--arch/arm/mach-s3c24xx/mach-jive.c4
-rw-r--r--arch/arm64/Makefile4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts12
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl.dtsi61
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm.dtsi17
-rw-r--r--arch/arm64/boot/dts/arm/juno-motherboard.dtsi2
-rw-r--r--arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi80
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/module.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h4
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h6
-rw-r--r--arch/arm64/kernel/cpufeature.c1
-rw-r--r--arch/arm64/kernel/module-plts.c2
-rw-r--r--arch/arm64/kernel/module.c2
-rw-r--r--arch/arm64/kernel/ptrace.c20
-rw-r--r--arch/arm64/kernel/traps.c3
-rw-r--r--arch/arm64/kvm/guest.c14
-rw-r--r--arch/arm64/kvm/sys_regs.c6
-rw-r--r--arch/arm64/lib/Makefile4
-rw-r--r--arch/arm64/mm/flush.c2
-rw-r--r--arch/hexagon/include/asm/io.h6
-rw-r--r--arch/hexagon/lib/checksum.c1
-rw-r--r--arch/parisc/Makefile3
-rw-r--r--arch/parisc/kernel/drivers.c7
-rw-r--r--arch/parisc/kernel/pci.c2
-rw-r--r--arch/parisc/kernel/time.c2
-rw-r--r--arch/parisc/kernel/traps.c11
-rw-r--r--arch/parisc/mm/init.c2
-rw-r--r--arch/powerpc/include/asm/powernv.h2
-rw-r--r--arch/powerpc/kernel/mce_power.c7
-rw-r--r--arch/powerpc/kernel/smp.c49
-rw-r--r--arch/powerpc/kvm/booke.c7
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/powerpc/platforms/powernv/memtrace.c17
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c88
-rw-r--r--arch/powerpc/platforms/powernv/opal-rtc.c8
-rw-r--r--arch/riscv/Kconfig4
-rw-r--r--arch/riscv/include/asm/Kbuild1
-rw-r--r--arch/riscv/kernel/vdso/Makefile2
-rw-r--r--arch/s390/include/asm/thread_info.h3
-rw-r--r--arch/s390/kernel/module.c4
-rw-r--r--arch/s390/kernel/perf_cpum_cf_events.c8
-rw-r--r--arch/s390/kernel/process.c10
-rw-r--r--arch/s390/kernel/uprobes.c9
-rw-r--r--arch/sparc/include/uapi/asm/oradax.h2
-rw-r--r--arch/sparc/kernel/vio.c2
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/entry/entry_64_compat.S8
-rw-r--r--arch/x86/events/intel/core.c9
-rw-r--r--arch/x86/include/asm/cpufeatures.h1
-rw-r--r--arch/x86/include/asm/ftrace.h19
-rw-r--r--arch/x86/include/asm/irq_vectors.h7
-rw-r--r--arch/x86/include/asm/jailhouse_para.h2
-rw-r--r--arch/x86/include/asm/pgtable.h5
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h8
-rw-r--r--arch/x86/include/uapi/asm/msgbuf.h31
-rw-r--r--arch/x86/include/uapi/asm/shmbuf.h42
-rw-r--r--arch/x86/kernel/cpu/intel.c3
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c6
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c2
-rw-r--r--arch/x86/kernel/jailhouse.c2
-rw-r--r--arch/x86/kernel/setup.c6
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kvm/vmx.c14
-rw-r--r--arch/x86/kvm/x86.h7
-rw-r--r--arch/x86/mm/pageattr.c44
-rw-r--r--arch/x86/mm/pti.c26
-rw-r--r--arch/x86/net/bpf_jit_comp.c6
-rw-r--r--block/bfq-iosched.c10
-rw-r--r--block/blk-cgroup.c28
-rw-r--r--block/blk-core.c15
-rw-r--r--block/blk-mq.c41
-rw-r--r--block/blk-mq.h3
-rw-r--r--crypto/api.c11
-rw-r--r--crypto/drbg.c2
-rw-r--r--drivers/acpi/acpi_video.c27
-rw-r--r--drivers/acpi/acpi_watchdog.c59
-rw-r--r--drivers/acpi/button.c24
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/acpi/sleep.c13
-rw-r--r--drivers/amba/bus.c17
-rw-r--r--drivers/android/binder.c8
-rw-r--r--drivers/base/dma-coherent.c5
-rw-r--r--drivers/base/dma-mapping.c6
-rw-r--r--drivers/base/firmware_loader/fallback.c4
-rw-r--r--drivers/base/firmware_loader/fallback.h2
-rw-r--r--drivers/block/loop.c64
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/swim.c49
-rw-r--r--drivers/block/swim3.c6
-rw-r--r--drivers/bus/Kconfig1
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/char/random.c48
-rw-r--r--drivers/char/virtio_console.c157
-rw-r--r--drivers/connector/cn_proc.c4
-rw-r--r--drivers/cpufreq/Kconfig.arm10
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c323
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c14
-rw-r--r--drivers/firmware/arm_scmi/clock.c2
-rw-r--r--drivers/fpga/altera-ps-spi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c54
-rw-r--r--drivers/gpu/drm/drm_edid.c11
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c16
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h4
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c2
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c11
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c1
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_format.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_kms.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c16
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c109
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c28
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c3
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c11
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c20
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h5
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c18
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c55
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c4
-rw-r--r--drivers/hwmon/k10temp.c17
-rw-r--r--drivers/hwmon/nct6683.c4
-rw-r--r--drivers/hwmon/scmi-hwmon.c5
-rw-r--r--drivers/i2c/busses/Kconfig3
-rw-r--r--drivers/i2c/busses/i2c-sprd.c22
-rw-r--r--drivers/i2c/i2c-dev.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c21
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h2
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c2
-rw-r--r--drivers/input/evdev.c7
-rw-r--r--drivers/input/input-leds.c8
-rw-r--r--drivers/input/mouse/alps.c2
-rw-r--r--drivers/input/rmi4/rmi_spi.c7
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c200
-rw-r--r--drivers/memory/emif-asm-offsets.c72
-rw-r--r--drivers/message/fusion/mptsas.c1
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c33
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c9
-rw-r--r--drivers/mtd/nand/core.c3
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c25
-rw-r--r--drivers/mtd/nand/raw/tango_nand.c2
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c19
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/dsa/b53/b53_common.c81
-rw-r--r--drivers/net/dsa/b53/b53_priv.h6
-rw-r--r--drivers/net/dsa/bcm_sf2.c1
-rw-r--r--drivers/net/dsa/dsa_loop.c12
-rw-r--r--drivers/net/dsa/lan9303-core.c11
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c11
-rw-r--r--drivers/net/dsa/mt7530.c11
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c63
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h4
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c62
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h25
-rw-r--r--drivers/net/dsa/qca8k.c10
-rw-r--r--drivers/net/ethernet/3com/3c59x.c62
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c147
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c166
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c124
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c40
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h17
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c510
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c715
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c342
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c259
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h16
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h77
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/srq.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h15
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c167
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c5
-rw-r--r--drivers/net/ethernet/intel/e100.c28
-rw-r--r--drivers/net/ethernet/intel/e1000/Makefile26
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h29
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c23
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c28
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.h28
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c28
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_osdep.h29
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_param.c28
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/Makefile27
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h21
-rw-r--r--drivers/net/ethernet/intel/fm10k/Makefile23
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.h20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.h20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.h20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.h20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.h20
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h33
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_alloc.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c32
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c63
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c117
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c37
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c54
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c160
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c71
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_trace.h23
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c32
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c111
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/Makefile26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_alloc.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c27
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_devids.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_hmc.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_osdep.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_register.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_status.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_trace.h23
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c30
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h36
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_client.c6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_client.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c33
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c51
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c37
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile28
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h22
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c22
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h23
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h23
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c23
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c23
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c23
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c19
-rw-r--r--drivers/net/ethernet/intel/igbvf/Makefile28
-rw-r--r--drivers/net/ethernet/intel/igbvf/defines.h26
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c26
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h26
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.c26
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.h26
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c26
-rw-r--r--drivers/net/ethernet/intel/igbvf/regs.h26
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c26
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.h26
-rw-r--r--drivers/net/ethernet/intel/ixgb/Makefile27
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb.h28
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ee.c29
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ee.h28
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c29
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.c29
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.h28
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ids.h28
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c29
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_osdep.h28
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_param.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h27
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_model.h26
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h24
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c26
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/Makefile28
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h26
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c27
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h26
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c27
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c27
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h26
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/regs.h26
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c27
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h26
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c197
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c278
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c562
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c148
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h74
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c95
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h43
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c10
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h5
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app_nic.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c2
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c747
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c34
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c5
-rw-r--r--drivers/net/ethernet/sfc/rx.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c120
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/hyperv/netvsc.c58
-rw-r--r--drivers/net/phy/bcm-phy-lib.c6
-rw-r--r--drivers/net/phy/marvell.c5
-rw-r--r--drivers/net/phy/micrel.c5
-rw-r--r--drivers/net/phy/smsc.c5
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/lan78xx.c110
-rw-r--r--drivers/net/usb/qmi_wwan.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c111
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h5
-rw-r--r--drivers/of/fdt.c7
-rw-r--r--drivers/parisc/ccio-dma.c2
-rw-r--r--drivers/pci/dwc/pcie-kirin.c2
-rw-r--r--drivers/pci/host/pci-aardvark.c53
-rw-r--r--drivers/pci/pci-driver.c5
-rw-r--r--drivers/pci/pci.c4
-rw-r--r--drivers/ptp/ptp_pch.c7
-rw-r--r--drivers/rtc/rtc-opal.c37
-rw-r--r--drivers/s390/block/dasd_alias.c13
-rw-r--r--drivers/s390/cio/chsc.c14
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c19
-rw-r--r--drivers/s390/net/lcs.c3
-rw-r--r--drivers/s390/net/qeth_core.h61
-rw-r--r--drivers/s390/net/qeth_core_main.c148
-rw-r--r--drivers/s390/net/qeth_core_mpc.h2
-rw-r--r--drivers/s390/net/qeth_core_sys.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c98
-rw-r--r--drivers/s390/net/qeth_l3_main.c207
-rw-r--r--drivers/sbus/char/oradax.c2
-rw-r--r--drivers/scsi/fnic/fnic_trace.c2
-rw-r--r--drivers/scsi/isci/port_config.c3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c6
-rw-r--r--drivers/scsi/scsi_debug.c33
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c29
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sd_zbc.c140
-rw-r--r--drivers/scsi/storvsc_drv.c7
-rw-r--r--drivers/scsi/ufs/ufshcd.c40
-rw-r--r--drivers/slimbus/messaging.c2
-rw-r--r--drivers/soc/bcm/raspberrypi-power.c2
-rw-r--r--drivers/staging/wilc1000/host_interface.c2
-rw-r--r--drivers/target/target_core_iblock.c8
-rw-r--r--drivers/target/target_core_pscsi.c2
-rw-r--r--drivers/tty/n_gsm.c23
-rw-r--r--drivers/tty/serial/earlycon.c6
-rw-r--r--drivers/tty/serial/imx.c19
-rw-r--r--drivers/tty/serial/mvebu-uart.c1
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c10
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/tty_io.c5
-rw-r--r--drivers/tty/tty_ldisc.c29
-rw-r--r--drivers/uio/uio_hv_generic.c72
-rw-r--r--drivers/usb/Kconfig1
-rw-r--r--drivers/usb/core/hcd.c19
-rw-r--r--drivers/usb/core/hub.c10
-rw-r--r--drivers/usb/core/phy.c93
-rw-r--r--drivers/usb/core/phy.h22
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/host/xhci-dbgtty.c8
-rw-r--r--drivers/usb/host/xhci-pci.c5
-rw-r--r--drivers/usb/host/xhci-plat.c32
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/musb/musb_dsps.c2
-rw-r--r--drivers/usb/musb/musb_host.c1
-rw-r--r--drivers/usb/serial/Kconfig1
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/usb-serial-simple.c7
-rw-r--r--drivers/usb/typec/ucsi/Makefile2
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c2
-rw-r--r--drivers/usb/usbip/stub_main.c5
-rw-r--r--drivers/usb/usbip/usbip_common.h2
-rw-r--r--drivers/usb/usbip/usbip_event.c4
-rw-r--r--drivers/usb/usbip/vhci_hcd.c13
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c70
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.h9
-rw-r--r--drivers/virt/vboxguest/vboxguest_linux.c19
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c17
-rw-r--r--fs/ceph/xattr.c28
-rw-r--r--fs/cifs/cifssmb.c3
-rw-r--r--fs/cifs/connect.c32
-rw-r--r--fs/cifs/smb2ops.c18
-rw-r--r--fs/cifs/smb2pdu.c13
-rw-r--r--fs/cifs/smb2pdu.h2
-rw-r--r--fs/cifs/smbdirect.c36
-rw-r--r--fs/cifs/transport.c9
-rw-r--r--fs/ext4/balloc.c9
-rw-r--r--fs/ext4/extents.c16
-rw-r--r--fs/ext4/super.c1
-rw-r--r--fs/jbd2/transaction.c1
-rw-r--r--fs/xfs/libxfs/xfs_attr.c9
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c4
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c21
-rw-r--r--fs/xfs/xfs_file.c14
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/kvm/arm_psci.h16
-rw-r--r--include/linux/blk-mq.h3
-rw-r--r--include/linux/blkdev.h6
-rw-r--r--include/linux/device.h6
-rw-r--r--include/linux/ethtool.h5
-rw-r--r--include/linux/fsnotify_backend.h4
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/if_bridge.h28
-rw-r--r--include/linux/mlx5/driver.h12
-rw-r--r--include/linux/mlx5/mlx5_ifc.h16
-rw-r--r--include/linux/mlx5/mlx5_ifc_fpga.h77
-rw-r--r--include/linux/mtd/flashchip.h1
-rw-r--r--include/linux/netdev_features.h2
-rw-r--r--include/linux/netdevice.h37
-rw-r--r--include/linux/netfilter/nf_osf.h27
-rw-r--r--include/linux/netfilter_bridge/ebtables.h4
-rw-r--r--include/linux/phy.h46
-rw-r--r--include/linux/serial_core.h21
-rw-r--r--include/linux/skbuff.h1
-rw-r--r--include/linux/stringhash.h4
-rw-r--r--include/linux/tcp.h2
-rw-r--r--include/linux/ti-emif-sram.h75
-rw-r--r--include/linux/timekeeper_internal.h2
-rw-r--r--include/linux/timekeeping.h37
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/u64_stats_sync.h14
-rw-r--r--include/linux/vbox_utils.h23
-rw-r--r--include/linux/virtio.h3
-rw-r--r--include/net/dsa.h12
-rw-r--r--include/net/inet_connection_sock.h2
-rw-r--r--include/net/ip6_fib.h6
-rw-r--r--include/net/ip6_route.h21
-rw-r--r--include/net/ip_vs.h1
-rw-r--r--include/net/ipv6.h2
-rw-r--r--include/net/netfilter/ipv4/nf_nat_masquerade.h2
-rw-r--r--include/net/netfilter/ipv6/nf_nat_masquerade.h2
-rw-r--r--include/net/netfilter/nf_flow_table.h24
-rw-r--r--include/net/netfilter/nf_nat.h2
-rw-r--r--include/net/netfilter/nf_nat_l3proto.h28
-rw-r--r--include/net/netfilter/nf_nat_l4proto.h8
-rw-r--r--include/net/netfilter/nf_nat_redirect.h2
-rw-r--r--include/net/netfilter/nf_tables.h53
-rw-r--r--include/net/netfilter/nf_tables_core.h3
-rw-r--r--include/net/netfilter/nfnetlink_log.h17
-rw-r--r--include/net/netfilter/nft_meta.h44
-rw-r--r--include/net/sctp/constants.h5
-rw-r--r--include/net/sctp/sctp.h52
-rw-r--r--include/net/sctp/sm.h2
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/net/sock.h21
-rw-r--r--include/net/switchdev.h1
-rw-r--r--include/net/tcp.h8
-rw-r--r--include/net/tls.h121
-rw-r--r--include/scsi/scsi_dbg.h2
-rw-r--r--include/soc/bcm2835/raspberrypi-firmware.h4
-rw-r--r--include/sound/control.h7
-rw-r--r--include/trace/events/initcall.h14
-rw-r--r--include/trace/events/ufs.h27
-rw-r--r--include/trace/events/workqueue.h2
-rw-r--r--include/uapi/linux/cn_proc.h4
-rw-r--r--include/uapi/linux/kvm.h7
-rw-r--r--include/uapi/linux/netfilter/nf_nat.h12
-rw-r--r--include/uapi/linux/netfilter/nf_osf.h90
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h8
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_conntrack.h1
-rw-r--r--include/uapi/linux/netfilter/xt_osf.h106
-rw-r--r--include/uapi/linux/netfilter_bridge/ebtables.h6
-rw-r--r--include/uapi/linux/netfilter_ipv6/ip6t_srh.h43
-rw-r--r--include/uapi/linux/sysctl.h18
-rw-r--r--include/uapi/linux/tcp.h11
-rw-r--r--include/uapi/linux/time.h1
-rw-r--r--include/uapi/linux/tipc.h12
-rw-r--r--include/uapi/linux/virtio_balloon.h15
-rw-r--r--kernel/bpf/sockmap.c48
-rw-r--r--kernel/events/uprobes.c7
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/module.c3
-rw-r--r--kernel/sysctl_binary.c20
-rw-r--r--kernel/time/hrtimer.c16
-rw-r--r--kernel/time/posix-stubs.c2
-rw-r--r--kernel/time/posix-timers.c26
-rw-r--r--kernel/time/tick-common.c15
-rw-r--r--kernel/time/tick-internal.h6
-rw-r--r--kernel/time/tick-sched.c19
-rw-r--r--kernel/time/timekeeping.c78
-rw-r--r--kernel/time/timekeeping.h1
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_entries.h2
-rw-r--r--kernel/trace/trace_events_filter.c14
-rw-r--r--kernel/trace/trace_events_hist.c12
-rw-r--r--kernel/trace/trace_uprobe.c35
-rw-r--r--kernel/tracepoint.c4
-rw-r--r--lib/dma-direct.c3
-rw-r--r--lib/errseq.c23
-rw-r--r--lib/kobject.c11
-rw-r--r--lib/kobject_uevent.c178
-rw-r--r--lib/swiotlb.c2
-rw-r--r--mm/mmap.c11
-rw-r--r--net/Kconfig3
-rw-r--r--net/bridge/br.c16
-rw-r--r--net/bridge/br_fdb.c69
-rw-r--r--net/bridge/br_forward.c3
-rw-r--r--net/bridge/br_if.c15
-rw-r--r--net/bridge/br_private.h19
-rw-r--r--net/bridge/br_switchdev.c12
-rw-r--r--net/bridge/br_vlan.c39
-rw-r--r--net/bridge/netfilter/Kconfig7
-rw-r--r--net/bridge/netfilter/Makefile1
-rw-r--r--net/bridge/netfilter/ebtables.c63
-rw-r--r--net/bridge/netfilter/nft_meta_bridge.c135
-rw-r--r--net/ceph/messenger.c7
-rw-r--r--net/ceph/mon_client.c14
-rw-r--r--net/compat.c6
-rw-r--r--net/core/dev.c18
-rw-r--r--net/core/ethtool.c67
-rw-r--r--net/core/skbuff.c9
-rw-r--r--net/dccp/ccids/ccid2.c14
-rw-r--r--net/dccp/timer.c2
-rw-r--r--net/dsa/master.c62
-rw-r--r--net/dsa/port.c90
-rw-r--r--net/dsa/slave.c10
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/ip_gre.c6
-rw-r--r--net/ipv4/netfilter/ip_tables.c2
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c2
-rw-r--r--net/ipv4/netfilter/iptable_nat.c3
-rw-r--r--net/ipv4/netfilter/nf_flow_table_ipv4.c255
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_l3proto_ipv4.c18
-rw-r--r--net/ipv4/netfilter/nf_nat_masquerade_ipv4.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_gre.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_icmp.c2
-rw-r--r--net/ipv4/netfilter/nft_chain_nat_ipv4.c3
-rw-r--r--net/ipv4/netfilter/nft_masq_ipv4.c2
-rw-r--r--net/ipv4/route.c118
-rw-r--r--net/ipv4/tcp.c244
-rw-r--r--net/ipv4/tcp_bbr.c4
-rw-r--r--net/ipv4/tcp_input.c25
-rw-r--r--net/ipv4/tcp_output.c8
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv4/udp_offload.c2
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/ip6_fib.c26
-rw-r--r--net/ipv6/ip6_gre.c8
-rw-r--r--net/ipv6/ip6_output.c22
-rw-r--r--net/ipv6/netfilter/ip6_tables.c1
-rw-r--r--net/ipv6/netfilter/ip6t_MASQUERADE.c2
-rw-r--r--net/ipv6/netfilter/ip6t_srh.c173
-rw-r--r--net/ipv6/netfilter/ip6table_nat.c3
-rw-r--r--net/ipv6/netfilter/nf_flow_table_ipv6.c246
-rw-r--r--net/ipv6/netfilter/nf_nat_l3proto_ipv6.c18
-rw-r--r--net/ipv6/netfilter/nf_nat_masquerade_ipv6.c4
-rw-r--r--net/ipv6/netfilter/nf_nat_proto_icmpv6.c2
-rw-r--r--net/ipv6/netfilter/nft_chain_nat_ipv6.c3
-rw-r--r--net/ipv6/netfilter/nft_masq_ipv6.c2
-rw-r--r--net/ipv6/netfilter/nft_redir_ipv6.c2
-rw-r--r--net/ipv6/route.c19
-rw-r--r--net/ipv6/seg6_iptunnel.c2
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/l2tp/l2tp_debugfs.c20
-rw-r--r--net/l2tp/l2tp_ppp.c21
-rw-r--r--net/netfilter/Kconfig25
-rw-r--r--net/netfilter/Makefile8
-rw-r--r--net/netfilter/ipvs/Kconfig37
-rw-r--r--net/netfilter/ipvs/Makefile1
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_dh.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_mh.c540
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c3
-rw-r--r--net/netfilter/nf_conntrack_core.c1
-rw-r--r--net/netfilter/nf_conntrack_ftp.c3
-rw-r--r--net/netfilter/nf_conntrack_irc.c6
-rw-r--r--net/netfilter/nf_conntrack_netlink.c3
-rw-r--r--net/netfilter/nf_conntrack_sane.c3
-rw-r--r--net/netfilter/nf_conntrack_sip.c2
-rw-r--r--net/netfilter/nf_conntrack_tftp.c2
-rw-r--r--net/netfilter/nf_flow_table_core.c (renamed from net/netfilter/nf_flow_table.c)309
-rw-r--r--net/netfilter/nf_flow_table_inet.c3
-rw-r--r--net/netfilter/nf_flow_table_ip.c487
-rw-r--r--net/netfilter/nf_nat_core.c27
-rw-r--r--net/netfilter/nf_nat_helper.c2
-rw-r--r--net/netfilter/nf_nat_proto_common.c9
-rw-r--r--net/netfilter/nf_nat_proto_dccp.c2
-rw-r--r--net/netfilter/nf_nat_proto_sctp.c2
-rw-r--r--net/netfilter/nf_nat_proto_tcp.c2
-rw-r--r--net/netfilter/nf_nat_proto_udp.c4
-rw-r--r--net/netfilter/nf_nat_proto_unknown.c2
-rw-r--r--net/netfilter/nf_nat_redirect.c6
-rw-r--r--net/netfilter/nf_nat_sip.c2
-rw-r--r--net/netfilter/nf_osf.c218
-rw-r--r--net/netfilter/nf_tables_api.c624
-rw-r--r--net/netfilter/nf_tables_core.c3
-rw-r--r--net/netfilter/nfnetlink_log.c8
-rw-r--r--net/netfilter/nft_dynset.c7
-rw-r--r--net/netfilter/nft_exthdr.c23
-rw-r--r--net/netfilter/nft_flow_offload.c5
-rw-r--r--net/netfilter/nft_hash.c2
-rw-r--r--net/netfilter/nft_meta.c112
-rw-r--r--net/netfilter/nft_nat.c2
-rw-r--r--net/netfilter/nft_numgen.c85
-rw-r--r--net/netfilter/nft_objref.c4
-rw-r--r--net/netfilter/nft_rt.c22
-rw-r--r--net/netfilter/nft_set_bitmap.c34
-rw-r--r--net/netfilter/nft_set_hash.c153
-rw-r--r--net/netfilter/nft_set_rbtree.c36
-rw-r--r--net/netfilter/xt_NETMAP.c8
-rw-r--r--net/netfilter/xt_NFLOG.c15
-rw-r--r--net/netfilter/xt_REDIRECT.c2
-rw-r--r--net/netfilter/xt_nat.c72
-rw-r--r--net/netfilter/xt_osf.c202
-rw-r--r--net/openvswitch/conntrack.c4
-rw-r--r--net/qrtr/Kconfig7
-rw-r--r--net/qrtr/Makefile2
-rw-r--r--net/qrtr/tun.c161
-rw-r--r--net/rds/recv.c1
-rw-r--r--net/sched/act_csum.c6
-rw-r--r--net/sched/cls_flower.c275
-rw-r--r--net/sched/sch_fq.c37
-rw-r--r--net/sctp/associola.c60
-rw-r--r--net/sctp/chunk.c12
-rw-r--r--net/sctp/inqueue.c2
-rw-r--r--net/sctp/ipv6.c3
-rw-r--r--net/sctp/output.c28
-rw-r--r--net/sctp/sm_make_chunk.c134
-rw-r--r--net/sctp/sm_statefuns.c8
-rw-r--r--net/sctp/socket.c43
-rw-r--r--net/sctp/stream.c2
-rw-r--r--net/sctp/transport.c37
-rw-r--r--net/smc/af_smc.c232
-rw-r--r--net/smc/smc.h7
-rw-r--r--net/smc/smc_cdc.c2
-rw-r--r--net/smc/smc_cdc.h2
-rw-r--r--net/smc/smc_core.c68
-rw-r--r--net/smc/smc_core.h8
-rw-r--r--net/smc/smc_diag.c39
-rw-r--r--net/smc/smc_llc.c62
-rw-r--r--net/smc/smc_llc.h3
-rw-r--r--net/smc/smc_rx.c200
-rw-r--r--net/smc/smc_rx.h11
-rw-r--r--net/smc/smc_tx.c24
-rw-r--r--net/smc/smc_wr.c1
-rw-r--r--net/tipc/node.c23
-rw-r--r--net/tipc/node.h1
-rw-r--r--net/tipc/socket.c13
-rw-r--r--net/tls/Kconfig10
-rw-r--r--net/tls/Makefile2
-rw-r--r--net/tls/tls_device.c764
-rw-r--r--net/tls/tls_device_fallback.c450
-rw-r--r--net/tls/tls_main.c150
-rw-r--r--net/tls/tls_sw.c138
-rw-r--r--sound/core/control.c2
-rw-r--r--sound/core/pcm_compat.c7
-rw-r--r--sound/core/pcm_native.c30
-rw-r--r--sound/core/seq/oss/seq_oss_event.c15
-rw-r--r--sound/core/seq/oss/seq_oss_midi.c2
-rw-r--r--sound/core/seq/oss/seq_oss_synth.c85
-rw-r--r--sound/core/seq/oss/seq_oss_synth.h3
-rw-r--r--sound/drivers/opl3/opl3_synth.c7
-rw-r--r--sound/firewire/dice/dice-stream.c2
-rw-r--r--sound/firewire/dice/dice.c2
-rw-r--r--sound/pci/asihpi/hpimsginit.c13
-rw-r--r--sound/pci/asihpi/hpioctl.c4
-rw-r--r--sound/pci/hda/hda_hwdep.c12
-rw-r--r--sound/pci/hda/patch_hdmi.c9
-rw-r--r--sound/pci/hda/patch_realtek.c5
-rw-r--r--sound/pci/rme9652/hdspm.c24
-rw-r--r--sound/pci/rme9652/rme9652.c6
-rw-r--r--sound/soc/amd/acp-da7219-max98357a.c2
-rw-r--r--sound/soc/codecs/adau17x1.c26
-rw-r--r--sound/soc/codecs/adau17x1.h3
-rw-r--r--sound/soc/codecs/msm8916-wcd-analog.c9
-rw-r--r--sound/soc/codecs/rt5514.c3
-rw-r--r--sound/soc/fsl/fsl_esai.c7
-rw-r--r--sound/soc/fsl/fsl_ssi.c14
-rw-r--r--sound/soc/intel/Kconfig22
-rw-r--r--sound/soc/omap/omap-dmic.c14
-rw-r--r--sound/soc/sh/rcar/core.c4
-rw-r--r--sound/soc/soc-topology.c14
-rw-r--r--sound/usb/mixer.c7
-rw-r--r--sound/usb/mixer_maps.c3
-rw-r--r--sound/usb/stream.c2
-rw-r--r--sound/usb/usx2y/us122l.c2
-rw-r--r--sound/usb/usx2y/usX2Yhwdep.c2
-rw-r--r--sound/usb/usx2y/usx2yhwdeppcm.c2
-rw-r--r--tools/bpf/bpf_dbg.c7
-rw-r--r--tools/perf/Documentation/perf-mem.txt41
-rw-r--r--tools/perf/arch/s390/util/auxtrace.c1
-rw-r--r--tools/perf/arch/s390/util/header.c18
-rw-r--r--tools/perf/builtin-stat.c40
-rw-r--r--tools/perf/pmu-events/arch/s390/mapfile.csv10
-rw-r--r--tools/perf/tests/attr/test-record-group-sampling3
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh6
-rw-r--r--tools/perf/util/evsel.c18
-rw-r--r--tools/perf/util/evsel.h1
-rw-r--r--tools/perf/util/machine.c30
-rw-r--r--tools/perf/util/parse-events.y8
-rw-r--r--tools/perf/util/pmu.c22
-rw-r--r--tools/testing/selftests/bpf/test_progs.c4
-rw-r--r--tools/testing/selftests/firmware/Makefile1
-rwxr-xr-xtools/testing/selftests/firmware/fw_lib.sh10
-rwxr-xr-xtools/testing/selftests/firmware/fw_run_tests.sh2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc44
-rw-r--r--tools/testing/selftests/lib.mk8
-rw-r--r--tools/testing/selftests/net/Makefile6
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_vlan_aware.sh26
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_vlan_unaware.sh26
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh133
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre.sh161
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bound.sh226
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_changes.sh212
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_flower.sh129
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_gre_lib.sh85
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_neigh.sh115
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_nh.sh127
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh129
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_lib.sh40
-rwxr-xr-xtools/testing/selftests/net/forwarding/router.sh14
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_multipath.sh29
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_actions.sh25
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_chains.sh7
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_flower.sh14
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_shblocks.sh5
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh4
-rw-r--r--tools/testing/selftests/net/tcp_inq.c189
-rw-r--r--tools/testing/selftests/net/tcp_mmap.c66
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/csum.json74
-rw-r--r--tools/testing/selftests/x86/test_syscall_vdso.c35
-rw-r--r--virt/kvm/arm/arm.c15
-rw-r--r--virt/kvm/arm/psci.c60
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c5
-rw-r--r--virt/kvm/arm/vgic/vgic.c22
925 files changed, 17294 insertions, 12248 deletions
diff --git a/Documentation/devicetree/bindings/input/atmel,maxtouch.txt b/Documentation/devicetree/bindings/input/atmel,maxtouch.txt
index 23e3abc3fdef..c88919480d37 100644
--- a/Documentation/devicetree/bindings/input/atmel,maxtouch.txt
+++ b/Documentation/devicetree/bindings/input/atmel,maxtouch.txt
@@ -4,6 +4,13 @@ Required properties:
4- compatible: 4- compatible:
5 atmel,maxtouch 5 atmel,maxtouch
6 6
7 The following compatibles have been used in various products but are
8 deprecated:
9 atmel,qt602240_ts
10 atmel,atmel_mxt_ts
11 atmel,atmel_mxt_tp
12 atmel,mXT224
13
7- reg: The I2C address of the device 14- reg: The I2C address of the device
8 15
9- interrupts: The sink for the touchpad's IRQ output 16- interrupts: The sink for the touchpad's IRQ output
diff --git a/Documentation/devicetree/bindings/net/meson-dwmac.txt b/Documentation/devicetree/bindings/net/meson-dwmac.txt
index 61cada22ae6c..1321bb194ed9 100644
--- a/Documentation/devicetree/bindings/net/meson-dwmac.txt
+++ b/Documentation/devicetree/bindings/net/meson-dwmac.txt
@@ -11,6 +11,7 @@ Required properties on all platforms:
11 - "amlogic,meson8b-dwmac" 11 - "amlogic,meson8b-dwmac"
12 - "amlogic,meson8m2-dwmac" 12 - "amlogic,meson8m2-dwmac"
13 - "amlogic,meson-gxbb-dwmac" 13 - "amlogic,meson-gxbb-dwmac"
14 - "amlogic,meson-axg-dwmac"
14 Additionally "snps,dwmac" and any applicable more 15 Additionally "snps,dwmac" and any applicable more
15 detailed version number described in net/stmmac.txt 16 detailed version number described in net/stmmac.txt
16 should be used. 17 should be used.
diff --git a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt
index 8ff65fa632fd..c06c045126fc 100644
--- a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt
+++ b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt
@@ -21,7 +21,7 @@ Required properties:
21- interrupts : identifier to the device interrupt 21- interrupts : identifier to the device interrupt
22- clocks : a list of phandle + clock-specifier pairs, one for each 22- clocks : a list of phandle + clock-specifier pairs, one for each
23 entry in clock names. 23 entry in clock names.
24- clocks-names : 24- clock-names :
25 * "xtal" for external xtal clock identifier 25 * "xtal" for external xtal clock identifier
26 * "pclk" for the bus core clock, either the clk81 clock or the gate clock 26 * "pclk" for the bus core clock, either the clk81 clock or the gate clock
27 * "baud" for the source of the baudrate generator, can be either the xtal 27 * "baud" for the source of the baudrate generator, can be either the xtal
diff --git a/Documentation/devicetree/bindings/serial/mvebu-uart.txt b/Documentation/devicetree/bindings/serial/mvebu-uart.txt
index 2ae2fee7e023..b7e0e32b9ac6 100644
--- a/Documentation/devicetree/bindings/serial/mvebu-uart.txt
+++ b/Documentation/devicetree/bindings/serial/mvebu-uart.txt
@@ -24,7 +24,7 @@ Required properties:
24 - Must contain two elements for the extended variant of the IP 24 - Must contain two elements for the extended variant of the IP
25 (marvell,armada-3700-uart-ext): "uart-tx" and "uart-rx", 25 (marvell,armada-3700-uart-ext): "uart-tx" and "uart-rx",
26 respectively the UART TX interrupt and the UART RX interrupt. A 26 respectively the UART TX interrupt and the UART RX interrupt. A
27 corresponding interrupts-names property must be defined. 27 corresponding interrupt-names property must be defined.
28 - For backward compatibility reasons, a single element interrupts 28 - For backward compatibility reasons, a single element interrupts
29 property is also supported for the standard variant of the IP, 29 property is also supported for the standard variant of the IP,
30 containing only the UART sum interrupt. This form is deprecated 30 containing only the UART sum interrupt. This form is deprecated
diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
index ad962f4ec3aa..a006ea4d065f 100644
--- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
+++ b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
@@ -17,6 +17,8 @@ Required properties:
17 - "renesas,scifa-r8a7745" for R8A7745 (RZ/G1E) SCIFA compatible UART. 17 - "renesas,scifa-r8a7745" for R8A7745 (RZ/G1E) SCIFA compatible UART.
18 - "renesas,scifb-r8a7745" for R8A7745 (RZ/G1E) SCIFB compatible UART. 18 - "renesas,scifb-r8a7745" for R8A7745 (RZ/G1E) SCIFB compatible UART.
19 - "renesas,hscif-r8a7745" for R8A7745 (RZ/G1E) HSCIF compatible UART. 19 - "renesas,hscif-r8a7745" for R8A7745 (RZ/G1E) HSCIF compatible UART.
20 - "renesas,scif-r8a77470" for R8A77470 (RZ/G1C) SCIF compatible UART.
21 - "renesas,hscif-r8a77470" for R8A77470 (RZ/G1C) HSCIF compatible UART.
20 - "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART. 22 - "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART.
21 - "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART. 23 - "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART.
22 - "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART. 24 - "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART.
diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt
index c4c00dff4b56..bd1dd316fb23 100644
--- a/Documentation/devicetree/bindings/usb/usb-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt
@@ -28,7 +28,10 @@ Required properties:
28 - interrupts: one XHCI interrupt should be described here. 28 - interrupts: one XHCI interrupt should be described here.
29 29
30Optional properties: 30Optional properties:
31 - clocks: reference to a clock 31 - clocks: reference to the clocks
32 - clock-names: mandatory if there is a second clock, in this case
33 the name must be "core" for the first clock and "reg" for the
34 second one
32 - usb2-lpm-disable: indicate if we don't want to enable USB2 HW LPM 35 - usb2-lpm-disable: indicate if we don't want to enable USB2 HW LPM
33 - usb3-lpm-capable: determines if platform is USB3 LPM capable 36 - usb3-lpm-capable: determines if platform is USB3 LPM capable
34 - quirk-broken-port-ped: set if the controller has broken port disable mechanism 37 - quirk-broken-port-ped: set if the controller has broken port disable mechanism
diff --git a/Documentation/driver-api/firmware/request_firmware.rst b/Documentation/driver-api/firmware/request_firmware.rst
index cf4516dfbf96..d5ec95a7195b 100644
--- a/Documentation/driver-api/firmware/request_firmware.rst
+++ b/Documentation/driver-api/firmware/request_firmware.rst
@@ -17,17 +17,17 @@ an error is returned.
17 17
18request_firmware 18request_firmware
19---------------- 19----------------
20.. kernel-doc:: drivers/base/firmware_class.c 20.. kernel-doc:: drivers/base/firmware_loader/main.c
21 :functions: request_firmware 21 :functions: request_firmware
22 22
23request_firmware_direct 23request_firmware_direct
24----------------------- 24-----------------------
25.. kernel-doc:: drivers/base/firmware_class.c 25.. kernel-doc:: drivers/base/firmware_loader/main.c
26 :functions: request_firmware_direct 26 :functions: request_firmware_direct
27 27
28request_firmware_into_buf 28request_firmware_into_buf
29------------------------- 29-------------------------
30.. kernel-doc:: drivers/base/firmware_class.c 30.. kernel-doc:: drivers/base/firmware_loader/main.c
31 :functions: request_firmware_into_buf 31 :functions: request_firmware_into_buf
32 32
33Asynchronous firmware requests 33Asynchronous firmware requests
@@ -41,7 +41,7 @@ in atomic contexts.
41 41
42request_firmware_nowait 42request_firmware_nowait
43----------------------- 43-----------------------
44.. kernel-doc:: drivers/base/firmware_class.c 44.. kernel-doc:: drivers/base/firmware_loader/main.c
45 :functions: request_firmware_nowait 45 :functions: request_firmware_nowait
46 46
47Special optimizations on reboot 47Special optimizations on reboot
@@ -50,12 +50,12 @@ Special optimizations on reboot
50Some devices have an optimization in place to enable the firmware to be 50Some devices have an optimization in place to enable the firmware to be
51retained during system reboot. When such optimizations are used the driver 51retained during system reboot. When such optimizations are used the driver
52author must ensure the firmware is still available on resume from suspend, 52author must ensure the firmware is still available on resume from suspend,
53this can be done with firmware_request_cache() insted of requesting for the 53this can be done with firmware_request_cache() instead of requesting for the
54firmare to be loaded. 54firmware to be loaded.
55 55
56firmware_request_cache() 56firmware_request_cache()
57----------------------- 57------------------------
58.. kernel-doc:: drivers/base/firmware_class.c 58.. kernel-doc:: drivers/base/firmware_loader/main.c
59 :functions: firmware_request_cache 59 :functions: firmware_request_cache
60 60
61request firmware API expected driver use 61request firmware API expected driver use
diff --git a/Documentation/driver-api/infrastructure.rst b/Documentation/driver-api/infrastructure.rst
index 6d9ff316b608..bee1b9a1702f 100644
--- a/Documentation/driver-api/infrastructure.rst
+++ b/Documentation/driver-api/infrastructure.rst
@@ -28,7 +28,7 @@ Device Drivers Base
28.. kernel-doc:: drivers/base/node.c 28.. kernel-doc:: drivers/base/node.c
29 :internal: 29 :internal:
30 30
31.. kernel-doc:: drivers/base/firmware_class.c 31.. kernel-doc:: drivers/base/firmware_loader/main.c
32 :export: 32 :export:
33 33
34.. kernel-doc:: drivers/base/transport_class.c 34.. kernel-doc:: drivers/base/transport_class.c
diff --git a/Documentation/driver-api/usb/typec.rst b/Documentation/driver-api/usb/typec.rst
index feb31946490b..48ff58095f11 100644
--- a/Documentation/driver-api/usb/typec.rst
+++ b/Documentation/driver-api/usb/typec.rst
@@ -210,7 +210,7 @@ If the connector is dual-role capable, there may also be a switch for the data
210role. USB Type-C Connector Class does not supply separate API for them. The 210role. USB Type-C Connector Class does not supply separate API for them. The
211port drivers can use USB Role Class API with those. 211port drivers can use USB Role Class API with those.
212 212
213Illustration of the muxes behind a connector that supports an alternate mode: 213Illustration of the muxes behind a connector that supports an alternate mode::
214 214
215 ------------------------ 215 ------------------------
216 | Connector | 216 | Connector |
diff --git a/Documentation/i2c/dev-interface b/Documentation/i2c/dev-interface
index d04e6e4964ee..fbed645ccd75 100644
--- a/Documentation/i2c/dev-interface
+++ b/Documentation/i2c/dev-interface
@@ -9,8 +9,8 @@ i2c adapters present on your system at a given time. i2cdetect is part of
9the i2c-tools package. 9the i2c-tools package.
10 10
11I2C device files are character device files with major device number 89 11I2C device files are character device files with major device number 89
12and a minor device number corresponding to the number assigned as 12and a minor device number corresponding to the number assigned as
13explained above. They should be called "i2c-%d" (i2c-0, i2c-1, ..., 13explained above. They should be called "i2c-%d" (i2c-0, i2c-1, ...,
14i2c-10, ...). All 256 minor device numbers are reserved for i2c. 14i2c-10, ...). All 256 minor device numbers are reserved for i2c.
15 15
16 16
@@ -23,11 +23,6 @@ First, you need to include these two headers:
23 #include <linux/i2c-dev.h> 23 #include <linux/i2c-dev.h>
24 #include <i2c/smbus.h> 24 #include <i2c/smbus.h>
25 25
26(Please note that there are two files named "i2c-dev.h" out there. One is
27distributed with the Linux kernel and the other one is included in the
28source tree of i2c-tools. They used to be different in content but since 2012
29they're identical. You should use "linux/i2c-dev.h").
30
31Now, you have to decide which adapter you want to access. You should 26Now, you have to decide which adapter you want to access. You should
32inspect /sys/class/i2c-dev/ or run "i2cdetect -l" to decide this. 27inspect /sys/class/i2c-dev/ or run "i2cdetect -l" to decide this.
33Adapter numbers are assigned somewhat dynamically, so you can not 28Adapter numbers are assigned somewhat dynamically, so you can not
@@ -38,7 +33,7 @@ Next thing, open the device file, as follows:
38 int file; 33 int file;
39 int adapter_nr = 2; /* probably dynamically determined */ 34 int adapter_nr = 2; /* probably dynamically determined */
40 char filename[20]; 35 char filename[20];
41 36
42 snprintf(filename, 19, "/dev/i2c-%d", adapter_nr); 37 snprintf(filename, 19, "/dev/i2c-%d", adapter_nr);
43 file = open(filename, O_RDWR); 38 file = open(filename, O_RDWR);
44 if (file < 0) { 39 if (file < 0) {
@@ -72,8 +67,10 @@ the device supports them. Both are illustrated below.
72 /* res contains the read word */ 67 /* res contains the read word */
73 } 68 }
74 69
75 /* Using I2C Write, equivalent of 70 /*
76 i2c_smbus_write_word_data(file, reg, 0x6543) */ 71 * Using I2C Write, equivalent of
72 * i2c_smbus_write_word_data(file, reg, 0x6543)
73 */
77 buf[0] = reg; 74 buf[0] = reg;
78 buf[1] = 0x43; 75 buf[1] = 0x43;
79 buf[2] = 0x65; 76 buf[2] = 0x65;
@@ -140,14 +137,14 @@ ioctl(file, I2C_RDWR, struct i2c_rdwr_ioctl_data *msgset)
140 set in each message, overriding the values set with the above ioctl's. 137 set in each message, overriding the values set with the above ioctl's.
141 138
142ioctl(file, I2C_SMBUS, struct i2c_smbus_ioctl_data *args) 139ioctl(file, I2C_SMBUS, struct i2c_smbus_ioctl_data *args)
143 Not meant to be called directly; instead, use the access functions 140 If possible, use the provided i2c_smbus_* methods described below instead
144 below. 141 of issuing direct ioctls.
145 142
146You can do plain i2c transactions by using read(2) and write(2) calls. 143You can do plain i2c transactions by using read(2) and write(2) calls.
147You do not need to pass the address byte; instead, set it through 144You do not need to pass the address byte; instead, set it through
148ioctl I2C_SLAVE before you try to access the device. 145ioctl I2C_SLAVE before you try to access the device.
149 146
150You can do SMBus level transactions (see documentation file smbus-protocol 147You can do SMBus level transactions (see documentation file smbus-protocol
151for details) through the following functions: 148for details) through the following functions:
152 __s32 i2c_smbus_write_quick(int file, __u8 value); 149 __s32 i2c_smbus_write_quick(int file, __u8 value);
153 __s32 i2c_smbus_read_byte(int file); 150 __s32 i2c_smbus_read_byte(int file);
@@ -158,7 +155,7 @@ for details) through the following functions:
158 __s32 i2c_smbus_write_word_data(int file, __u8 command, __u16 value); 155 __s32 i2c_smbus_write_word_data(int file, __u8 command, __u16 value);
159 __s32 i2c_smbus_process_call(int file, __u8 command, __u16 value); 156 __s32 i2c_smbus_process_call(int file, __u8 command, __u16 value);
160 __s32 i2c_smbus_read_block_data(int file, __u8 command, __u8 *values); 157 __s32 i2c_smbus_read_block_data(int file, __u8 command, __u8 *values);
161 __s32 i2c_smbus_write_block_data(int file, __u8 command, __u8 length, 158 __s32 i2c_smbus_write_block_data(int file, __u8 command, __u8 length,
162 __u8 *values); 159 __u8 *values);
163All these transactions return -1 on failure; you can read errno to see 160All these transactions return -1 on failure; you can read errno to see
164what happened. The 'write' transactions return 0 on success; the 161what happened. The 'write' transactions return 0 on success; the
@@ -166,10 +163,9 @@ what happened. The 'write' transactions return 0 on success; the
166returns the number of values read. The block buffers need not be longer 163returns the number of values read. The block buffers need not be longer
167than 32 bytes. 164than 32 bytes.
168 165
169The above functions are all inline functions, that resolve to calls to 166The above functions are made available by linking against the libi2c library,
170the i2c_smbus_access function, that on its turn calls a specific ioctl 167which is provided by the i2c-tools project. See:
171with the data in a specific format. Read the source code if you 168https://git.kernel.org/pub/scm/utils/i2c-tools/i2c-tools.git/.
172want to know what happens behind the screens.
173 169
174 170
175Implementation details 171Implementation details
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 84bb74dcae12..7f7413e597f3 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -217,7 +217,6 @@ Code Seq#(hex) Include File Comments
217'd' 02-40 pcmcia/ds.h conflict! 217'd' 02-40 pcmcia/ds.h conflict!
218'd' F0-FF linux/digi1.h 218'd' F0-FF linux/digi1.h
219'e' all linux/digi1.h conflict! 219'e' all linux/digi1.h conflict!
220'e' 00-1F drivers/net/irda/irtty-sir.h conflict!
221'f' 00-1F linux/ext2_fs.h conflict! 220'f' 00-1F linux/ext2_fs.h conflict!
222'f' 00-1F linux/ext3_fs.h conflict! 221'f' 00-1F linux/ext3_fs.h conflict!
223'f' 00-0F fs/jfs/jfs_dinode.h conflict! 222'f' 00-0F fs/jfs/jfs_dinode.h conflict!
@@ -247,7 +246,6 @@ Code Seq#(hex) Include File Comments
247'm' all linux/synclink.h conflict! 246'm' all linux/synclink.h conflict!
248'm' 00-19 drivers/message/fusion/mptctl.h conflict! 247'm' 00-19 drivers/message/fusion/mptctl.h conflict!
249'm' 00 drivers/scsi/megaraid/megaraid_ioctl.h conflict! 248'm' 00 drivers/scsi/megaraid/megaraid_ioctl.h conflict!
250'm' 00-1F net/irda/irmod.h conflict!
251'n' 00-7F linux/ncp_fs.h and fs/ncpfs/ioctl.c 249'n' 00-7F linux/ncp_fs.h and fs/ncpfs/ioctl.c
252'n' 80-8F uapi/linux/nilfs2_api.h NILFS2 250'n' 80-8F uapi/linux/nilfs2_api.h NILFS2
253'n' E0-FF linux/matroxfb.h matroxfb 251'n' E0-FF linux/matroxfb.h matroxfb
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index b583a73cf95f..59afc9a10b4f 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1428,6 +1428,19 @@ ip6frag_low_thresh - INTEGER
1428ip6frag_time - INTEGER 1428ip6frag_time - INTEGER
1429 Time in seconds to keep an IPv6 fragment in memory. 1429 Time in seconds to keep an IPv6 fragment in memory.
1430 1430
1431IPv6 Segment Routing:
1432
1433seg6_flowlabel - INTEGER
1434 Controls the behaviour of computing the flowlabel of outer
1435 IPv6 header in case of SR T.encaps
1436
1437 -1 set flowlabel to zero.
1438 0 copy flowlabel from Inner packet in case of Inner IPv6
1439 (Set flowlabel to 0 in case IPv4/L2)
1440 1 Compute the flowlabel using seg6_make_flowlabel()
1441
1442 Default is 0.
1443
1431conf/default/*: 1444conf/default/*:
1432 Change the interface-specific default settings. 1445 Change the interface-specific default settings.
1433 1446
@@ -2126,18 +2139,3 @@ max_dgram_qlen - INTEGER
2126 2139
2127 Default: 10 2140 Default: 10
2128 2141
2129
2130UNDOCUMENTED:
2131
2132/proc/sys/net/irda/*
2133 fast_poll_increase FIXME
2134 warn_noreply_time FIXME
2135 discovery_slots FIXME
2136 slot_timeout FIXME
2137 max_baud_rate FIXME
2138 discovery_timeout FIXME
2139 lap_keepalive_time FIXME
2140 max_noreply_time FIXME
2141 max_tx_data_size FIXME
2142 max_tx_window FIXME
2143 min_tx_turn_time FIXME
diff --git a/Documentation/power/suspend-and-cpuhotplug.txt b/Documentation/power/suspend-and-cpuhotplug.txt
index 31abd04b9572..6f55eb960a6d 100644
--- a/Documentation/power/suspend-and-cpuhotplug.txt
+++ b/Documentation/power/suspend-and-cpuhotplug.txt
@@ -168,7 +168,7 @@ update on the CPUs, as discussed below:
168 168
169[Please bear in mind that the kernel requests the microcode images from 169[Please bear in mind that the kernel requests the microcode images from
170userspace, using the request_firmware() function defined in 170userspace, using the request_firmware() function defined in
171drivers/base/firmware_class.c] 171drivers/base/firmware_loader/main.c]
172 172
173 173
174a. When all the CPUs are identical: 174a. When all the CPUs are identical:
diff --git a/Documentation/process/magic-number.rst b/Documentation/process/magic-number.rst
index 00cecf1fcba9..633be1043690 100644
--- a/Documentation/process/magic-number.rst
+++ b/Documentation/process/magic-number.rst
@@ -157,8 +157,5 @@ memory management. See ``include/sound/sndmagic.h`` for complete list of them. M
157OSS sound drivers have their magic numbers constructed from the soundcard PCI 157OSS sound drivers have their magic numbers constructed from the soundcard PCI
158ID - these are not listed here as well. 158ID - these are not listed here as well.
159 159
160IrDA subsystem also uses large number of own magic numbers, see
161``include/net/irda/irda.h`` for a complete list of them.
162
163HFS is another larger user of magic numbers - you can find them in 160HFS is another larger user of magic numbers - you can find them in
164``fs/hfs/hfs.h``. 161``fs/hfs/hfs.h``.
diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
index e45f0786f3f9..67d9c38e95eb 100644
--- a/Documentation/trace/ftrace.rst
+++ b/Documentation/trace/ftrace.rst
@@ -461,9 +461,17 @@ of ftrace. Here is a list of some of the key files:
461 and ticks at the same rate as the hardware clocksource. 461 and ticks at the same rate as the hardware clocksource.
462 462
463 boot: 463 boot:
464 Same as mono. Used to be a separate clock which accounted 464 This is the boot clock (CLOCK_BOOTTIME) and is based on the
465 for the time spent in suspend while CLOCK_MONOTONIC did 465 fast monotonic clock, but also accounts for time spent in
466 not. 466 suspend. Since the clock access is designed for use in
467 tracing in the suspend path, some side effects are possible
468 if clock is accessed after the suspend time is accounted before
469 the fast mono clock is updated. In this case, the clock update
470 appears to happen slightly sooner than it normally would have.
471 Also on 32-bit systems, it's possible that the 64-bit boot offset
472 sees a partial update. These effects are rare and post
473 processing should be able to handle them. See comments in the
474 ktime_get_boot_fast_ns() function for more information.
467 475
468 To set a clock, simply echo the clock name into this file:: 476 To set a clock, simply echo the clock name into this file::
469 477
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 1c7958b57fe9..758bf403a169 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1960,6 +1960,9 @@ ARM 32-bit VFP control registers have the following id bit patterns:
1960ARM 64-bit FP registers have the following id bit patterns: 1960ARM 64-bit FP registers have the following id bit patterns:
1961 0x4030 0000 0012 0 <regno:12> 1961 0x4030 0000 0012 0 <regno:12>
1962 1962
1963ARM firmware pseudo-registers have the following bit pattern:
1964 0x4030 0000 0014 <regno:16>
1965
1963 1966
1964arm64 registers are mapped using the lower 32 bits. The upper 16 of 1967arm64 registers are mapped using the lower 32 bits. The upper 16 of
1965that is the register group type, or coprocessor number: 1968that is the register group type, or coprocessor number:
@@ -1976,6 +1979,9 @@ arm64 CCSIDR registers are demultiplexed by CSSELR value:
1976arm64 system registers have the following id bit patterns: 1979arm64 system registers have the following id bit patterns:
1977 0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3> 1980 0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3>
1978 1981
1982arm64 firmware pseudo-registers have the following bit pattern:
1983 0x6030 0000 0014 <regno:16>
1984
1979 1985
1980MIPS registers are mapped using the lower 32 bits. The upper 16 of that is 1986MIPS registers are mapped using the lower 32 bits. The upper 16 of that is
1981the register group type: 1987the register group type:
@@ -2510,7 +2516,8 @@ Possible features:
2510 and execute guest code when KVM_RUN is called. 2516 and execute guest code when KVM_RUN is called.
2511 - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode. 2517 - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
2512 Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only). 2518 Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
2513 - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU. 2519 - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 (or a future revision
2520 backward compatible with v0.2) for the CPU.
2514 Depends on KVM_CAP_ARM_PSCI_0_2. 2521 Depends on KVM_CAP_ARM_PSCI_0_2.
2515 - KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU. 2522 - KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU.
2516 Depends on KVM_CAP_ARM_PMU_V3. 2523 Depends on KVM_CAP_ARM_PMU_V3.
diff --git a/Documentation/virtual/kvm/arm/psci.txt b/Documentation/virtual/kvm/arm/psci.txt
new file mode 100644
index 000000000000..aafdab887b04
--- /dev/null
+++ b/Documentation/virtual/kvm/arm/psci.txt
@@ -0,0 +1,30 @@
1KVM implements the PSCI (Power State Coordination Interface)
2specification in order to provide services such as CPU on/off, reset
3and power-off to the guest.
4
5The PSCI specification is regularly updated to provide new features,
6and KVM implements these updates if they make sense from a virtualization
7point of view.
8
9This means that a guest booted on two different versions of KVM can
10observe two different "firmware" revisions. This could cause issues if
11a given guest is tied to a particular PSCI revision (unlikely), or if
12a migration causes a different PSCI version to be exposed out of the
13blue to an unsuspecting guest.
14
15In order to remedy this situation, KVM exposes a set of "firmware
16pseudo-registers" that can be manipulated using the GET/SET_ONE_REG
17interface. These registers can be saved/restored by userspace, and set
18to a convenient value if required.
19
20The following register is defined:
21
22* KVM_REG_ARM_PSCI_VERSION:
23
24 - Only valid if the vcpu has the KVM_ARM_VCPU_PSCI_0_2 feature set
25 (and thus has already been initialized)
26 - Returns the current PSCI version on GET_ONE_REG (defaulting to the
27 highest PSCI version implemented by KVM and compatible with v0.2)
28 - Allows any PSCI version implemented by KVM and compatible with
29 v0.2 to be set with SET_ONE_REG
30 - Affects the whole VM (even if the register view is per-vcpu)
diff --git a/MAINTAINERS b/MAINTAINERS
index 52d246fd29c9..b22be10d5916 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -564,8 +564,9 @@ S: Maintained
564F: drivers/media/dvb-frontends/af9033* 564F: drivers/media/dvb-frontends/af9033*
565 565
566AFFS FILE SYSTEM 566AFFS FILE SYSTEM
567M: David Sterba <dsterba@suse.com>
567L: linux-fsdevel@vger.kernel.org 568L: linux-fsdevel@vger.kernel.org
568S: Orphan 569S: Odd Fixes
569F: Documentation/filesystems/affs.txt 570F: Documentation/filesystems/affs.txt
570F: fs/affs/ 571F: fs/affs/
571 572
@@ -905,6 +906,8 @@ ANDROID ION DRIVER
905M: Laura Abbott <labbott@redhat.com> 906M: Laura Abbott <labbott@redhat.com>
906M: Sumit Semwal <sumit.semwal@linaro.org> 907M: Sumit Semwal <sumit.semwal@linaro.org>
907L: devel@driverdev.osuosl.org 908L: devel@driverdev.osuosl.org
909L: dri-devel@lists.freedesktop.org
910L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers)
908S: Supported 911S: Supported
909F: drivers/staging/android/ion 912F: drivers/staging/android/ion
910F: drivers/staging/android/uapi/ion.h 913F: drivers/staging/android/uapi/ion.h
@@ -1208,7 +1211,6 @@ F: drivers/*/*alpine*
1208ARM/ARTPEC MACHINE SUPPORT 1211ARM/ARTPEC MACHINE SUPPORT
1209M: Jesper Nilsson <jesper.nilsson@axis.com> 1212M: Jesper Nilsson <jesper.nilsson@axis.com>
1210M: Lars Persson <lars.persson@axis.com> 1213M: Lars Persson <lars.persson@axis.com>
1211M: Niklas Cassel <niklas.cassel@axis.com>
1212S: Maintained 1214S: Maintained
1213L: linux-arm-kernel@axis.com 1215L: linux-arm-kernel@axis.com
1214F: arch/arm/mach-artpec 1216F: arch/arm/mach-artpec
@@ -2617,7 +2619,7 @@ S: Maintained
2617F: drivers/net/hamradio/baycom* 2619F: drivers/net/hamradio/baycom*
2618 2620
2619BCACHE (BLOCK LAYER CACHE) 2621BCACHE (BLOCK LAYER CACHE)
2620M: Michael Lyle <mlyle@lyle.org> 2622M: Coly Li <colyli@suse.de>
2621M: Kent Overstreet <kent.overstreet@gmail.com> 2623M: Kent Overstreet <kent.overstreet@gmail.com>
2622L: linux-bcache@vger.kernel.org 2624L: linux-bcache@vger.kernel.org
2623W: http://bcache.evilpiepirate.org 2625W: http://bcache.evilpiepirate.org
@@ -7409,16 +7411,6 @@ S: Obsolete
7409F: include/uapi/linux/ipx.h 7411F: include/uapi/linux/ipx.h
7410F: drivers/staging/ipx/ 7412F: drivers/staging/ipx/
7411 7413
7412IRDA SUBSYSTEM
7413M: Samuel Ortiz <samuel@sortiz.org>
7414L: irda-users@lists.sourceforge.net (subscribers-only)
7415L: netdev@vger.kernel.org
7416W: http://irda.sourceforge.net/
7417S: Obsolete
7418T: git git://git.kernel.org/pub/scm/linux/kernel/git/sameo/irda-2.6.git
7419F: Documentation/networking/irda.txt
7420F: drivers/staging/irda/
7421
7422IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) 7414IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
7423M: Marc Zyngier <marc.zyngier@arm.com> 7415M: Marc Zyngier <marc.zyngier@arm.com>
7424S: Maintained 7416S: Maintained
@@ -7751,7 +7743,7 @@ F: arch/x86/include/asm/svm.h
7751F: arch/x86/kvm/svm.c 7743F: arch/x86/kvm/svm.c
7752 7744
7753KERNEL VIRTUAL MACHINE FOR ARM (KVM/arm) 7745KERNEL VIRTUAL MACHINE FOR ARM (KVM/arm)
7754M: Christoffer Dall <christoffer.dall@linaro.org> 7746M: Christoffer Dall <christoffer.dall@arm.com>
7755M: Marc Zyngier <marc.zyngier@arm.com> 7747M: Marc Zyngier <marc.zyngier@arm.com>
7756L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 7748L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
7757L: kvmarm@lists.cs.columbia.edu 7749L: kvmarm@lists.cs.columbia.edu
@@ -7765,7 +7757,7 @@ F: virt/kvm/arm/
7765F: include/kvm/arm_* 7757F: include/kvm/arm_*
7766 7758
7767KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64) 7759KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
7768M: Christoffer Dall <christoffer.dall@linaro.org> 7760M: Christoffer Dall <christoffer.dall@arm.com>
7769M: Marc Zyngier <marc.zyngier@arm.com> 7761M: Marc Zyngier <marc.zyngier@arm.com>
7770L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 7762L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
7771L: kvmarm@lists.cs.columbia.edu 7763L: kvmarm@lists.cs.columbia.edu
@@ -9036,26 +9028,17 @@ W: http://www.mellanox.com
9036Q: http://patchwork.ozlabs.org/project/netdev/list/ 9028Q: http://patchwork.ozlabs.org/project/netdev/list/
9037F: drivers/net/ethernet/mellanox/mlx5/core/en_* 9029F: drivers/net/ethernet/mellanox/mlx5/core/en_*
9038 9030
9039MELLANOX ETHERNET INNOVA DRIVER 9031MELLANOX ETHERNET INNOVA DRIVERS
9040M: Ilan Tayari <ilant@mellanox.com> 9032M: Boris Pismenny <borisp@mellanox.com>
9041R: Boris Pismenny <borisp@mellanox.com>
9042L: netdev@vger.kernel.org 9033L: netdev@vger.kernel.org
9043S: Supported 9034S: Supported
9044W: http://www.mellanox.com 9035W: http://www.mellanox.com
9045Q: http://patchwork.ozlabs.org/project/netdev/list/ 9036Q: http://patchwork.ozlabs.org/project/netdev/list/
9037F: drivers/net/ethernet/mellanox/mlx5/core/en_accel/*
9038F: drivers/net/ethernet/mellanox/mlx5/core/accel/*
9046F: drivers/net/ethernet/mellanox/mlx5/core/fpga/* 9039F: drivers/net/ethernet/mellanox/mlx5/core/fpga/*
9047F: include/linux/mlx5/mlx5_ifc_fpga.h 9040F: include/linux/mlx5/mlx5_ifc_fpga.h
9048 9041
9049MELLANOX ETHERNET INNOVA IPSEC DRIVER
9050M: Ilan Tayari <ilant@mellanox.com>
9051R: Boris Pismenny <borisp@mellanox.com>
9052L: netdev@vger.kernel.org
9053S: Supported
9054W: http://www.mellanox.com
9055Q: http://patchwork.ozlabs.org/project/netdev/list/
9056F: drivers/net/ethernet/mellanox/mlx5/core/en_ipsec/*
9057F: drivers/net/ethernet/mellanox/mlx5/core/ipsec*
9058
9059MELLANOX ETHERNET SWITCH DRIVERS 9042MELLANOX ETHERNET SWITCH DRIVERS
9060M: Jiri Pirko <jiri@mellanox.com> 9043M: Jiri Pirko <jiri@mellanox.com>
9061M: Ido Schimmel <idosch@mellanox.com> 9044M: Ido Schimmel <idosch@mellanox.com>
@@ -9731,6 +9714,7 @@ W: https://fedorahosted.org/dropwatch/
9731F: net/core/drop_monitor.c 9714F: net/core/drop_monitor.c
9732 9715
9733NETWORKING DRIVERS 9716NETWORKING DRIVERS
9717M: "David S. Miller" <davem@davemloft.net>
9734L: netdev@vger.kernel.org 9718L: netdev@vger.kernel.org
9735W: http://www.linuxfoundation.org/en/Net 9719W: http://www.linuxfoundation.org/en/Net
9736Q: http://patchwork.ozlabs.org/project/netdev/list/ 9720Q: http://patchwork.ozlabs.org/project/netdev/list/
@@ -9847,7 +9831,7 @@ F: net/netfilter/xt_CONNSECMARK.c
9847F: net/netfilter/xt_SECMARK.c 9831F: net/netfilter/xt_SECMARK.c
9848 9832
9849NETWORKING [TLS] 9833NETWORKING [TLS]
9850M: Ilya Lesokhin <ilyal@mellanox.com> 9834M: Boris Pismenny <borisp@mellanox.com>
9851M: Aviad Yehezkel <aviadye@mellanox.com> 9835M: Aviad Yehezkel <aviadye@mellanox.com>
9852M: Dave Watson <davejwatson@fb.com> 9836M: Dave Watson <davejwatson@fb.com>
9853L: netdev@vger.kernel.org 9837L: netdev@vger.kernel.org
@@ -10907,7 +10891,6 @@ F: drivers/pci/host/
10907F: drivers/pci/dwc/ 10891F: drivers/pci/dwc/
10908 10892
10909PCIE DRIVER FOR AXIS ARTPEC 10893PCIE DRIVER FOR AXIS ARTPEC
10910M: Niklas Cassel <niklas.cassel@axis.com>
10911M: Jesper Nilsson <jesper.nilsson@axis.com> 10894M: Jesper Nilsson <jesper.nilsson@axis.com>
10912L: linux-arm-kernel@axis.com 10895L: linux-arm-kernel@axis.com
10913L: linux-pci@vger.kernel.org 10896L: linux-pci@vger.kernel.org
@@ -12505,6 +12488,7 @@ F: drivers/scsi/st_*.h
12505SCTP PROTOCOL 12488SCTP PROTOCOL
12506M: Vlad Yasevich <vyasevich@gmail.com> 12489M: Vlad Yasevich <vyasevich@gmail.com>
12507M: Neil Horman <nhorman@tuxdriver.com> 12490M: Neil Horman <nhorman@tuxdriver.com>
12491M: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
12508L: linux-sctp@vger.kernel.org 12492L: linux-sctp@vger.kernel.org
12509W: http://lksctp.sourceforge.net 12493W: http://lksctp.sourceforge.net
12510S: Maintained 12494S: Maintained
@@ -13860,7 +13844,6 @@ S: Supported
13860F: drivers/iommu/tegra* 13844F: drivers/iommu/tegra*
13861 13845
13862TEGRA KBC DRIVER 13846TEGRA KBC DRIVER
13863M: Rakesh Iyer <riyer@nvidia.com>
13864M: Laxman Dewangan <ldewangan@nvidia.com> 13847M: Laxman Dewangan <ldewangan@nvidia.com>
13865S: Supported 13848S: Supported
13866F: drivers/input/keyboard/tegra-kbc.c 13849F: drivers/input/keyboard/tegra-kbc.c
@@ -13963,7 +13946,7 @@ THUNDERBOLT DRIVER
13963M: Andreas Noever <andreas.noever@gmail.com> 13946M: Andreas Noever <andreas.noever@gmail.com>
13964M: Michael Jamet <michael.jamet@intel.com> 13947M: Michael Jamet <michael.jamet@intel.com>
13965M: Mika Westerberg <mika.westerberg@linux.intel.com> 13948M: Mika Westerberg <mika.westerberg@linux.intel.com>
13966M: Yehezkel Bernat <yehezkel.bernat@intel.com> 13949M: Yehezkel Bernat <YehezkelShB@gmail.com>
13967T: git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git 13950T: git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git
13968S: Maintained 13951S: Maintained
13969F: Documentation/admin-guide/thunderbolt.rst 13952F: Documentation/admin-guide/thunderbolt.rst
@@ -13973,7 +13956,7 @@ F: include/linux/thunderbolt.h
13973THUNDERBOLT NETWORK DRIVER 13956THUNDERBOLT NETWORK DRIVER
13974M: Michael Jamet <michael.jamet@intel.com> 13957M: Michael Jamet <michael.jamet@intel.com>
13975M: Mika Westerberg <mika.westerberg@linux.intel.com> 13958M: Mika Westerberg <mika.westerberg@linux.intel.com>
13976M: Yehezkel Bernat <yehezkel.bernat@intel.com> 13959M: Yehezkel Bernat <YehezkelShB@gmail.com>
13977L: netdev@vger.kernel.org 13960L: netdev@vger.kernel.org
13978S: Maintained 13961S: Maintained
13979F: drivers/net/thunderbolt.c 13962F: drivers/net/thunderbolt.c
diff --git a/Makefile b/Makefile
index 83b6c541565a..619a85ad716b 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 4 2VERSION = 4
3PATCHLEVEL = 17 3PATCHLEVEL = 17
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc2 5EXTRAVERSION = -rc3
6NAME = Fearless Coyote 6NAME = Fearless Coyote
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/gemini-nas4220b.dts b/arch/arm/boot/dts/gemini-nas4220b.dts
index 8bbb6f85d161..4785fbcc41ed 100644
--- a/arch/arm/boot/dts/gemini-nas4220b.dts
+++ b/arch/arm/boot/dts/gemini-nas4220b.dts
@@ -134,37 +134,37 @@
134 function = "gmii"; 134 function = "gmii";
135 groups = "gmii_gmac0_grp"; 135 groups = "gmii_gmac0_grp";
136 }; 136 };
137 /* Settings come from OpenWRT */ 137 /* Settings come from OpenWRT, pins on SL3516 */
138 conf0 { 138 conf0 {
139 pins = "R8 GMAC0 RXDV", "U11 GMAC1 RXDV"; 139 pins = "V8 GMAC0 RXDV", "T10 GMAC1 RXDV";
140 skew-delay = <0>; 140 skew-delay = <0>;
141 }; 141 };
142 conf1 { 142 conf1 {
143 pins = "T8 GMAC0 RXC", "T11 GMAC1 RXC"; 143 pins = "Y7 GMAC0 RXC", "Y11 GMAC1 RXC";
144 skew-delay = <15>; 144 skew-delay = <15>;
145 }; 145 };
146 conf2 { 146 conf2 {
147 pins = "P8 GMAC0 TXEN", "V11 GMAC1 TXEN"; 147 pins = "T8 GMAC0 TXEN", "W11 GMAC1 TXEN";
148 skew-delay = <7>; 148 skew-delay = <7>;
149 }; 149 };
150 conf3 { 150 conf3 {
151 pins = "V7 GMAC0 TXC"; 151 pins = "U8 GMAC0 TXC";
152 skew-delay = <11>; 152 skew-delay = <11>;
153 }; 153 };
154 conf4 { 154 conf4 {
155 pins = "P10 GMAC1 TXC"; 155 pins = "V11 GMAC1 TXC";
156 skew-delay = <10>; 156 skew-delay = <10>;
157 }; 157 };
158 conf5 { 158 conf5 {
159 /* The data lines all have default skew */ 159 /* The data lines all have default skew */
160 pins = "U8 GMAC0 RXD0", "V8 GMAC0 RXD1", 160 pins = "W8 GMAC0 RXD0", "V9 GMAC0 RXD1",
161 "P9 GMAC0 RXD2", "R9 GMAC0 RXD3", 161 "Y8 GMAC0 RXD2", "U9 GMAC0 RXD3",
162 "U7 GMAC0 TXD0", "T7 GMAC0 TXD1", 162 "T7 GMAC0 TXD0", "U6 GMAC0 TXD1",
163 "R7 GMAC0 TXD2", "P7 GMAC0 TXD3", 163 "V7 GMAC0 TXD2", "U7 GMAC0 TXD3",
164 "R11 GMAC1 RXD0", "P11 GMAC1 RXD1", 164 "Y12 GMAC1 RXD0", "V12 GMAC1 RXD1",
165 "V12 GMAC1 RXD2", "U12 GMAC1 RXD3", 165 "T11 GMAC1 RXD2", "W12 GMAC1 RXD3",
166 "R10 GMAC1 TXD0", "T10 GMAC1 TXD1", 166 "U10 GMAC1 TXD0", "Y10 GMAC1 TXD1",
167 "U10 GMAC1 TXD2", "V10 GMAC1 TXD3"; 167 "W10 GMAC1 TXD2", "T9 GMAC1 TXD3";
168 skew-delay = <7>; 168 skew-delay = <7>;
169 }; 169 };
170 /* Set up drive strength on GMAC0 to 16 mA */ 170 /* Set up drive strength on GMAC0 to 16 mA */
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 475904894b86..e554b6e039f3 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -163,10 +163,10 @@
163 163
164 cm2: cm2@8000 { 164 cm2: cm2@8000 {
165 compatible = "ti,omap4-cm2", "simple-bus"; 165 compatible = "ti,omap4-cm2", "simple-bus";
166 reg = <0x8000 0x3000>; 166 reg = <0x8000 0x2000>;
167 #address-cells = <1>; 167 #address-cells = <1>;
168 #size-cells = <1>; 168 #size-cells = <1>;
169 ranges = <0 0x8000 0x3000>; 169 ranges = <0 0x8000 0x2000>;
170 170
171 cm2_clocks: clocks { 171 cm2_clocks: clocks {
172 #address-cells = <1>; 172 #address-cells = <1>;
@@ -250,11 +250,11 @@
250 250
251 prm: prm@6000 { 251 prm: prm@6000 {
252 compatible = "ti,omap4-prm"; 252 compatible = "ti,omap4-prm";
253 reg = <0x6000 0x3000>; 253 reg = <0x6000 0x2000>;
254 interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>; 254 interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
255 #address-cells = <1>; 255 #address-cells = <1>;
256 #size-cells = <1>; 256 #size-cells = <1>;
257 ranges = <0 0x6000 0x3000>; 257 ranges = <0 0x6000 0x2000>;
258 258
259 prm_clocks: clocks { 259 prm_clocks: clocks {
260 #address-cells = <1>; 260 #address-cells = <1>;
diff --git a/arch/arm/configs/gemini_defconfig b/arch/arm/configs/gemini_defconfig
index 2a63fa10c813..553777ac2814 100644
--- a/arch/arm/configs/gemini_defconfig
+++ b/arch/arm/configs/gemini_defconfig
@@ -1,6 +1,7 @@
1# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
2CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
3CONFIG_NO_HZ_IDLE=y 3CONFIG_NO_HZ_IDLE=y
4CONFIG_HIGH_RES_TIMERS=y
4CONFIG_BSD_PROCESS_ACCT=y 5CONFIG_BSD_PROCESS_ACCT=y
5CONFIG_USER_NS=y 6CONFIG_USER_NS=y
6CONFIG_RELAY=y 7CONFIG_RELAY=y
@@ -12,15 +13,21 @@ CONFIG_ARCH_GEMINI=y
12CONFIG_PCI=y 13CONFIG_PCI=y
13CONFIG_PREEMPT=y 14CONFIG_PREEMPT=y
14CONFIG_AEABI=y 15CONFIG_AEABI=y
16CONFIG_HIGHMEM=y
17CONFIG_CMA=y
15CONFIG_CMDLINE="console=ttyS0,115200n8" 18CONFIG_CMDLINE="console=ttyS0,115200n8"
16CONFIG_KEXEC=y 19CONFIG_KEXEC=y
17CONFIG_BINFMT_MISC=y 20CONFIG_BINFMT_MISC=y
18CONFIG_PM=y 21CONFIG_PM=y
22CONFIG_NET=y
23CONFIG_UNIX=y
24CONFIG_INET=y
19CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 25CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
20CONFIG_DEVTMPFS=y 26CONFIG_DEVTMPFS=y
21CONFIG_MTD=y 27CONFIG_MTD=y
22CONFIG_MTD_BLOCK=y 28CONFIG_MTD_BLOCK=y
23CONFIG_MTD_CFI=y 29CONFIG_MTD_CFI=y
30CONFIG_MTD_JEDECPROBE=y
24CONFIG_MTD_CFI_INTELEXT=y 31CONFIG_MTD_CFI_INTELEXT=y
25CONFIG_MTD_CFI_AMDSTD=y 32CONFIG_MTD_CFI_AMDSTD=y
26CONFIG_MTD_CFI_STAA=y 33CONFIG_MTD_CFI_STAA=y
@@ -33,6 +40,11 @@ CONFIG_BLK_DEV_SD=y
33# CONFIG_SCSI_LOWLEVEL is not set 40# CONFIG_SCSI_LOWLEVEL is not set
34CONFIG_ATA=y 41CONFIG_ATA=y
35CONFIG_PATA_FTIDE010=y 42CONFIG_PATA_FTIDE010=y
43CONFIG_NETDEVICES=y
44CONFIG_GEMINI_ETHERNET=y
45CONFIG_MDIO_BITBANG=y
46CONFIG_MDIO_GPIO=y
47CONFIG_REALTEK_PHY=y
36CONFIG_INPUT_EVDEV=y 48CONFIG_INPUT_EVDEV=y
37CONFIG_KEYBOARD_GPIO=y 49CONFIG_KEYBOARD_GPIO=y
38# CONFIG_INPUT_MOUSE is not set 50# CONFIG_INPUT_MOUSE is not set
@@ -43,9 +55,19 @@ CONFIG_SERIAL_8250_NR_UARTS=1
43CONFIG_SERIAL_8250_RUNTIME_UARTS=1 55CONFIG_SERIAL_8250_RUNTIME_UARTS=1
44CONFIG_SERIAL_OF_PLATFORM=y 56CONFIG_SERIAL_OF_PLATFORM=y
45# CONFIG_HW_RANDOM is not set 57# CONFIG_HW_RANDOM is not set
46# CONFIG_HWMON is not set 58CONFIG_I2C_GPIO=y
59CONFIG_SPI=y
60CONFIG_SPI_GPIO=y
61CONFIG_SENSORS_GPIO_FAN=y
62CONFIG_SENSORS_LM75=y
63CONFIG_THERMAL=y
47CONFIG_WATCHDOG=y 64CONFIG_WATCHDOG=y
48CONFIG_GEMINI_WATCHDOG=y 65CONFIG_REGULATOR=y
66CONFIG_REGULATOR_FIXED_VOLTAGE=y
67CONFIG_DRM=y
68CONFIG_DRM_PANEL_ILITEK_IL9322=y
69CONFIG_DRM_TVE200=y
70CONFIG_LOGO=y
49CONFIG_USB=y 71CONFIG_USB=y
50CONFIG_USB_MON=y 72CONFIG_USB_MON=y
51CONFIG_USB_FOTG210_HCD=y 73CONFIG_USB_FOTG210_HCD=y
@@ -54,6 +76,7 @@ CONFIG_NEW_LEDS=y
54CONFIG_LEDS_CLASS=y 76CONFIG_LEDS_CLASS=y
55CONFIG_LEDS_GPIO=y 77CONFIG_LEDS_GPIO=y
56CONFIG_LEDS_TRIGGERS=y 78CONFIG_LEDS_TRIGGERS=y
79CONFIG_LEDS_TRIGGER_DISK=y
57CONFIG_LEDS_TRIGGER_HEARTBEAT=y 80CONFIG_LEDS_TRIGGER_HEARTBEAT=y
58CONFIG_RTC_CLASS=y 81CONFIG_RTC_CLASS=y
59CONFIG_DMADEVICES=y 82CONFIG_DMADEVICES=y
diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig
index 2620ce790db0..371fca4e1ab7 100644
--- a/arch/arm/configs/socfpga_defconfig
+++ b/arch/arm/configs/socfpga_defconfig
@@ -57,6 +57,7 @@ CONFIG_MTD_M25P80=y
57CONFIG_MTD_NAND=y 57CONFIG_MTD_NAND=y
58CONFIG_MTD_NAND_DENALI_DT=y 58CONFIG_MTD_NAND_DENALI_DT=y
59CONFIG_MTD_SPI_NOR=y 59CONFIG_MTD_SPI_NOR=y
60# CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set
60CONFIG_SPI_CADENCE_QUADSPI=y 61CONFIG_SPI_CADENCE_QUADSPI=y
61CONFIG_OF_OVERLAY=y 62CONFIG_OF_OVERLAY=y
62CONFIG_OF_CONFIGFS=y 63CONFIG_OF_CONFIGFS=y
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index c6a749568dd6..c7c28c885a19 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -77,6 +77,9 @@ struct kvm_arch {
77 /* Interrupt controller */ 77 /* Interrupt controller */
78 struct vgic_dist vgic; 78 struct vgic_dist vgic;
79 int max_vcpus; 79 int max_vcpus;
80
81 /* Mandated version of PSCI */
82 u32 psci_version;
80}; 83};
81 84
82#define KVM_NR_MEM_OBJS 40 85#define KVM_NR_MEM_OBJS 40
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 2ba95d6fe852..caae4843cb70 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -195,6 +195,12 @@ struct kvm_arch_memory_slot {
195#define KVM_REG_ARM_VFP_FPINST 0x1009 195#define KVM_REG_ARM_VFP_FPINST 0x1009
196#define KVM_REG_ARM_VFP_FPINST2 0x100A 196#define KVM_REG_ARM_VFP_FPINST2 0x100A
197 197
198/* KVM-as-firmware specific pseudo-registers */
199#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
200#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \
201 KVM_REG_ARM_FW | ((r) & 0xffff))
202#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
203
198/* Device Control API: ARM VGIC */ 204/* Device Control API: ARM VGIC */
199#define KVM_DEV_ARM_VGIC_GRP_ADDR 0 205#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
200#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 206#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 1e0784ebbfd6..a18f33edc471 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -22,6 +22,7 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include <kvm/arm_psci.h>
25#include <asm/cputype.h> 26#include <asm/cputype.h>
26#include <linux/uaccess.h> 27#include <linux/uaccess.h>
27#include <asm/kvm.h> 28#include <asm/kvm.h>
@@ -176,6 +177,7 @@ static unsigned long num_core_regs(void)
176unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) 177unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
177{ 178{
178 return num_core_regs() + kvm_arm_num_coproc_regs(vcpu) 179 return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
180 + kvm_arm_get_fw_num_regs(vcpu)
179 + NUM_TIMER_REGS; 181 + NUM_TIMER_REGS;
180} 182}
181 183
@@ -196,6 +198,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
196 uindices++; 198 uindices++;
197 } 199 }
198 200
201 ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
202 if (ret)
203 return ret;
204 uindices += kvm_arm_get_fw_num_regs(vcpu);
205
199 ret = copy_timer_indices(vcpu, uindices); 206 ret = copy_timer_indices(vcpu, uindices);
200 if (ret) 207 if (ret)
201 return ret; 208 return ret;
@@ -214,6 +221,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
214 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 221 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
215 return get_core_reg(vcpu, reg); 222 return get_core_reg(vcpu, reg);
216 223
224 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
225 return kvm_arm_get_fw_reg(vcpu, reg);
226
217 if (is_timer_reg(reg->id)) 227 if (is_timer_reg(reg->id))
218 return get_timer_reg(vcpu, reg); 228 return get_timer_reg(vcpu, reg);
219 229
@@ -230,6 +240,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
230 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 240 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
231 return set_core_reg(vcpu, reg); 241 return set_core_reg(vcpu, reg);
232 242
243 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
244 return kvm_arm_set_fw_reg(vcpu, reg);
245
233 if (is_timer_reg(reg->id)) 246 if (is_timer_reg(reg->id))
234 return set_timer_reg(vcpu, reg); 247 return set_timer_reg(vcpu, reg);
235 248
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 4603c30fef73..0d9ce58bc464 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -243,8 +243,4 @@ arch/arm/mach-omap2/pm-asm-offsets.s: arch/arm/mach-omap2/pm-asm-offsets.c
243include/generated/ti-pm-asm-offsets.h: arch/arm/mach-omap2/pm-asm-offsets.s FORCE 243include/generated/ti-pm-asm-offsets.h: arch/arm/mach-omap2/pm-asm-offsets.s FORCE
244 $(call filechk,offsets,__TI_PM_ASM_OFFSETS_H__) 244 $(call filechk,offsets,__TI_PM_ASM_OFFSETS_H__)
245 245
246# For rule to generate ti-emif-asm-offsets.h dependency 246$(obj)/sleep33xx.o $(obj)/sleep43xx.o: include/generated/ti-pm-asm-offsets.h
247include drivers/memory/Makefile.asm-offsets
248
249arch/arm/mach-omap2/sleep33xx.o: include/generated/ti-pm-asm-offsets.h include/generated/ti-emif-asm-offsets.h
250arch/arm/mach-omap2/sleep43xx.o: include/generated/ti-pm-asm-offsets.h include/generated/ti-emif-asm-offsets.h
diff --git a/arch/arm/mach-omap2/pm-asm-offsets.c b/arch/arm/mach-omap2/pm-asm-offsets.c
index 6d4392da7c11..b9846b19e5e2 100644
--- a/arch/arm/mach-omap2/pm-asm-offsets.c
+++ b/arch/arm/mach-omap2/pm-asm-offsets.c
@@ -7,9 +7,12 @@
7 7
8#include <linux/kbuild.h> 8#include <linux/kbuild.h>
9#include <linux/platform_data/pm33xx.h> 9#include <linux/platform_data/pm33xx.h>
10#include <linux/ti-emif-sram.h>
10 11
11int main(void) 12int main(void)
12{ 13{
14 ti_emif_asm_offsets();
15
13 DEFINE(AMX3_PM_WFI_FLAGS_OFFSET, 16 DEFINE(AMX3_PM_WFI_FLAGS_OFFSET,
14 offsetof(struct am33xx_pm_sram_data, wfi_flags)); 17 offsetof(struct am33xx_pm_sram_data, wfi_flags));
15 DEFINE(AMX3_PM_L2_AUX_CTRL_VAL_OFFSET, 18 DEFINE(AMX3_PM_L2_AUX_CTRL_VAL_OFFSET,
diff --git a/arch/arm/mach-omap2/sleep33xx.S b/arch/arm/mach-omap2/sleep33xx.S
index 218d79930b04..322b3bb868b4 100644
--- a/arch/arm/mach-omap2/sleep33xx.S
+++ b/arch/arm/mach-omap2/sleep33xx.S
@@ -6,7 +6,6 @@
6 * Dave Gerlach, Vaibhav Bedia 6 * Dave Gerlach, Vaibhav Bedia
7 */ 7 */
8 8
9#include <generated/ti-emif-asm-offsets.h>
10#include <generated/ti-pm-asm-offsets.h> 9#include <generated/ti-pm-asm-offsets.h>
11#include <linux/linkage.h> 10#include <linux/linkage.h>
12#include <linux/ti-emif-sram.h> 11#include <linux/ti-emif-sram.h>
diff --git a/arch/arm/mach-omap2/sleep43xx.S b/arch/arm/mach-omap2/sleep43xx.S
index b24be624e8b9..8903814a6677 100644
--- a/arch/arm/mach-omap2/sleep43xx.S
+++ b/arch/arm/mach-omap2/sleep43xx.S
@@ -6,7 +6,6 @@
6 * Dave Gerlach, Vaibhav Bedia 6 * Dave Gerlach, Vaibhav Bedia
7 */ 7 */
8 8
9#include <generated/ti-emif-asm-offsets.h>
10#include <generated/ti-pm-asm-offsets.h> 9#include <generated/ti-pm-asm-offsets.h>
11#include <linux/linkage.h> 10#include <linux/linkage.h>
12#include <linux/ti-emif-sram.h> 11#include <linux/ti-emif-sram.h>
diff --git a/arch/arm/mach-s3c24xx/mach-jive.c b/arch/arm/mach-s3c24xx/mach-jive.c
index 59589a4a0d4b..885e8f12e4b9 100644
--- a/arch/arm/mach-s3c24xx/mach-jive.c
+++ b/arch/arm/mach-s3c24xx/mach-jive.c
@@ -427,9 +427,9 @@ static struct gpiod_lookup_table jive_wm8750_gpiod_table = {
427 .dev_id = "spi_gpio", 427 .dev_id = "spi_gpio",
428 .table = { 428 .table = {
429 GPIO_LOOKUP("GPIOB", 4, 429 GPIO_LOOKUP("GPIOB", 4,
430 "gpio-sck", GPIO_ACTIVE_HIGH), 430 "sck", GPIO_ACTIVE_HIGH),
431 GPIO_LOOKUP("GPIOB", 9, 431 GPIO_LOOKUP("GPIOB", 9,
432 "gpio-mosi", GPIO_ACTIVE_HIGH), 432 "mosi", GPIO_ACTIVE_HIGH),
433 GPIO_LOOKUP("GPIOH", 10, 433 GPIO_LOOKUP("GPIOH", 10,
434 "cs", GPIO_ACTIVE_HIGH), 434 "cs", GPIO_ACTIVE_HIGH),
435 { }, 435 { },
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 15402861bb59..87f7d2f9f17c 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -56,7 +56,11 @@ KBUILD_AFLAGS += $(lseinstr) $(brokengasinst)
56KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) 56KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
57KBUILD_AFLAGS += $(call cc-option,-mabi=lp64) 57KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
58 58
59ifeq ($(cc-name),clang)
60KBUILD_CFLAGS += -DCONFIG_ARCH_SUPPORTS_INT128
61else
59KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0500, -DCONFIG_ARCH_SUPPORTS_INT128) 62KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0500, -DCONFIG_ARCH_SUPPORTS_INT128)
63endif
60 64
61ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) 65ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
62KBUILD_CPPFLAGS += -mbig-endian 66KBUILD_CPPFLAGS += -mbig-endian
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
index 4eef36b22538..88e712ea757a 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
@@ -212,3 +212,7 @@
212 pinctrl-0 = <&uart_ao_a_pins>; 212 pinctrl-0 = <&uart_ao_a_pins>;
213 pinctrl-names = "default"; 213 pinctrl-names = "default";
214}; 214};
215
216&usb0 {
217 status = "okay";
218};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
index 22bf37404ff1..3e3eb31748a3 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
@@ -271,3 +271,15 @@
271 pinctrl-0 = <&uart_ao_a_pins>; 271 pinctrl-0 = <&uart_ao_a_pins>;
272 pinctrl-names = "default"; 272 pinctrl-names = "default";
273}; 273};
274
275&usb0 {
276 status = "okay";
277};
278
279&usb2_phy0 {
280 /*
281 * even though the schematics don't show it:
282 * HDMI_5V is also used as supply for the USB VBUS.
283 */
284 phy-supply = <&hdmi_5v>;
285};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
index 69c721a70e44..6739697be1de 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
@@ -215,3 +215,7 @@
215 pinctrl-0 = <&uart_ao_a_pins>; 215 pinctrl-0 = <&uart_ao_a_pins>;
216 pinctrl-names = "default"; 216 pinctrl-names = "default";
217}; 217};
218
219&usb0 {
220 status = "okay";
221};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
index 0a0953fbc7d4..0cfd701809de 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
@@ -185,3 +185,7 @@
185 pinctrl-0 = <&uart_ao_a_pins>; 185 pinctrl-0 = <&uart_ao_a_pins>;
186 pinctrl-names = "default"; 186 pinctrl-names = "default";
187}; 187};
188
189&usb0 {
190 status = "okay";
191};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
index e1a39cbed8c9..dba365ed4bd5 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
@@ -20,6 +20,67 @@
20 no-map; 20 no-map;
21 }; 21 };
22 }; 22 };
23
24 soc {
25 usb0: usb@c9000000 {
26 status = "disabled";
27 compatible = "amlogic,meson-gxl-dwc3";
28 #address-cells = <2>;
29 #size-cells = <2>;
30 ranges;
31
32 clocks = <&clkc CLKID_USB>;
33 clock-names = "usb_general";
34 resets = <&reset RESET_USB_OTG>;
35 reset-names = "usb_otg";
36
37 dwc3: dwc3@c9000000 {
38 compatible = "snps,dwc3";
39 reg = <0x0 0xc9000000 0x0 0x100000>;
40 interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
41 dr_mode = "host";
42 maximum-speed = "high-speed";
43 snps,dis_u2_susphy_quirk;
44 phys = <&usb3_phy>, <&usb2_phy0>, <&usb2_phy1>;
45 };
46 };
47 };
48};
49
50&apb {
51 usb2_phy0: phy@78000 {
52 compatible = "amlogic,meson-gxl-usb2-phy";
53 #phy-cells = <0>;
54 reg = <0x0 0x78000 0x0 0x20>;
55 clocks = <&clkc CLKID_USB>;
56 clock-names = "phy";
57 resets = <&reset RESET_USB_OTG>;
58 reset-names = "phy";
59 status = "okay";
60 };
61
62 usb2_phy1: phy@78020 {
63 compatible = "amlogic,meson-gxl-usb2-phy";
64 #phy-cells = <0>;
65 reg = <0x0 0x78020 0x0 0x20>;
66 clocks = <&clkc CLKID_USB>;
67 clock-names = "phy";
68 resets = <&reset RESET_USB_OTG>;
69 reset-names = "phy";
70 status = "okay";
71 };
72
73 usb3_phy: phy@78080 {
74 compatible = "amlogic,meson-gxl-usb3-phy";
75 #phy-cells = <0>;
76 reg = <0x0 0x78080 0x0 0x20>;
77 interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
78 clocks = <&clkc CLKID_USB>, <&clkc_AO CLKID_AO_CEC_32K>;
79 clock-names = "phy", "peripheral";
80 resets = <&reset RESET_USB_OTG>, <&reset RESET_USB_OTG>;
81 reset-names = "phy", "peripheral";
82 status = "okay";
83 };
23}; 84};
24 85
25&ethmac { 86&ethmac {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
index 4fd46c1546a7..0868da476e41 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
@@ -406,3 +406,7 @@
406 status = "okay"; 406 status = "okay";
407 vref-supply = <&vddio_ao18>; 407 vref-supply = <&vddio_ao18>;
408}; 408};
409
410&usb0 {
411 status = "okay";
412};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
index d076a7c425dd..247888d68a3a 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
@@ -80,6 +80,19 @@
80 }; 80 };
81}; 81};
82 82
83&apb {
84 usb2_phy2: phy@78040 {
85 compatible = "amlogic,meson-gxl-usb2-phy";
86 #phy-cells = <0>;
87 reg = <0x0 0x78040 0x0 0x20>;
88 clocks = <&clkc CLKID_USB>;
89 clock-names = "phy";
90 resets = <&reset RESET_USB_OTG>;
91 reset-names = "phy";
92 status = "okay";
93 };
94};
95
83&clkc_AO { 96&clkc_AO {
84 compatible = "amlogic,meson-gxm-aoclkc", "amlogic,meson-gx-aoclkc"; 97 compatible = "amlogic,meson-gxm-aoclkc", "amlogic,meson-gx-aoclkc";
85}; 98};
@@ -100,3 +113,7 @@
100&hdmi_tx { 113&hdmi_tx {
101 compatible = "amlogic,meson-gxm-dw-hdmi", "amlogic,meson-gx-dw-hdmi"; 114 compatible = "amlogic,meson-gxm-dw-hdmi", "amlogic,meson-gx-dw-hdmi";
102}; 115};
116
117&dwc3 {
118 phys = <&usb3_phy>, <&usb2_phy0>, <&usb2_phy1>, <&usb2_phy2>;
119};
diff --git a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
index 2ac43221ddb6..69804c5f1197 100644
--- a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
@@ -56,8 +56,6 @@
56 56
57 gpio_keys { 57 gpio_keys {
58 compatible = "gpio-keys"; 58 compatible = "gpio-keys";
59 #address-cells = <1>;
60 #size-cells = <0>;
61 59
62 power-button { 60 power-button {
63 debounce_interval = <50>; 61 debounce_interval = <50>;
diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi
index 4b5465da81d8..8c68e0c26f1b 100644
--- a/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi
+++ b/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi
@@ -36,11 +36,11 @@
36 #size-cells = <1>; 36 #size-cells = <1>;
37 ranges = <0x0 0x0 0x67d00000 0x00800000>; 37 ranges = <0x0 0x0 0x67d00000 0x00800000>;
38 38
39 sata0: ahci@210000 { 39 sata0: ahci@0 {
40 compatible = "brcm,iproc-ahci", "generic-ahci"; 40 compatible = "brcm,iproc-ahci", "generic-ahci";
41 reg = <0x00210000 0x1000>; 41 reg = <0x00000000 0x1000>;
42 reg-names = "ahci"; 42 reg-names = "ahci";
43 interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>; 43 interrupts = <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>;
44 #address-cells = <1>; 44 #address-cells = <1>;
45 #size-cells = <0>; 45 #size-cells = <0>;
46 status = "disabled"; 46 status = "disabled";
@@ -52,9 +52,9 @@
52 }; 52 };
53 }; 53 };
54 54
55 sata_phy0: sata_phy@212100 { 55 sata_phy0: sata_phy@2100 {
56 compatible = "brcm,iproc-sr-sata-phy"; 56 compatible = "brcm,iproc-sr-sata-phy";
57 reg = <0x00212100 0x1000>; 57 reg = <0x00002100 0x1000>;
58 reg-names = "phy"; 58 reg-names = "phy";
59 #address-cells = <1>; 59 #address-cells = <1>;
60 #size-cells = <0>; 60 #size-cells = <0>;
@@ -66,11 +66,11 @@
66 }; 66 };
67 }; 67 };
68 68
69 sata1: ahci@310000 { 69 sata1: ahci@10000 {
70 compatible = "brcm,iproc-ahci", "generic-ahci"; 70 compatible = "brcm,iproc-ahci", "generic-ahci";
71 reg = <0x00310000 0x1000>; 71 reg = <0x00010000 0x1000>;
72 reg-names = "ahci"; 72 reg-names = "ahci";
73 interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>; 73 interrupts = <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>;
74 #address-cells = <1>; 74 #address-cells = <1>;
75 #size-cells = <0>; 75 #size-cells = <0>;
76 status = "disabled"; 76 status = "disabled";
@@ -82,9 +82,9 @@
82 }; 82 };
83 }; 83 };
84 84
85 sata_phy1: sata_phy@312100 { 85 sata_phy1: sata_phy@12100 {
86 compatible = "brcm,iproc-sr-sata-phy"; 86 compatible = "brcm,iproc-sr-sata-phy";
87 reg = <0x00312100 0x1000>; 87 reg = <0x00012100 0x1000>;
88 reg-names = "phy"; 88 reg-names = "phy";
89 #address-cells = <1>; 89 #address-cells = <1>;
90 #size-cells = <0>; 90 #size-cells = <0>;
@@ -96,11 +96,11 @@
96 }; 96 };
97 }; 97 };
98 98
99 sata2: ahci@120000 { 99 sata2: ahci@20000 {
100 compatible = "brcm,iproc-ahci", "generic-ahci"; 100 compatible = "brcm,iproc-ahci", "generic-ahci";
101 reg = <0x00120000 0x1000>; 101 reg = <0x00020000 0x1000>;
102 reg-names = "ahci"; 102 reg-names = "ahci";
103 interrupts = <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>; 103 interrupts = <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>;
104 #address-cells = <1>; 104 #address-cells = <1>;
105 #size-cells = <0>; 105 #size-cells = <0>;
106 status = "disabled"; 106 status = "disabled";
@@ -112,9 +112,9 @@
112 }; 112 };
113 }; 113 };
114 114
115 sata_phy2: sata_phy@122100 { 115 sata_phy2: sata_phy@22100 {
116 compatible = "brcm,iproc-sr-sata-phy"; 116 compatible = "brcm,iproc-sr-sata-phy";
117 reg = <0x00122100 0x1000>; 117 reg = <0x00022100 0x1000>;
118 reg-names = "phy"; 118 reg-names = "phy";
119 #address-cells = <1>; 119 #address-cells = <1>;
120 #size-cells = <0>; 120 #size-cells = <0>;
@@ -126,11 +126,11 @@
126 }; 126 };
127 }; 127 };
128 128
129 sata3: ahci@130000 { 129 sata3: ahci@30000 {
130 compatible = "brcm,iproc-ahci", "generic-ahci"; 130 compatible = "brcm,iproc-ahci", "generic-ahci";
131 reg = <0x00130000 0x1000>; 131 reg = <0x00030000 0x1000>;
132 reg-names = "ahci"; 132 reg-names = "ahci";
133 interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>; 133 interrupts = <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>;
134 #address-cells = <1>; 134 #address-cells = <1>;
135 #size-cells = <0>; 135 #size-cells = <0>;
136 status = "disabled"; 136 status = "disabled";
@@ -142,9 +142,9 @@
142 }; 142 };
143 }; 143 };
144 144
145 sata_phy3: sata_phy@132100 { 145 sata_phy3: sata_phy@32100 {
146 compatible = "brcm,iproc-sr-sata-phy"; 146 compatible = "brcm,iproc-sr-sata-phy";
147 reg = <0x00132100 0x1000>; 147 reg = <0x00032100 0x1000>;
148 reg-names = "phy"; 148 reg-names = "phy";
149 #address-cells = <1>; 149 #address-cells = <1>;
150 #size-cells = <0>; 150 #size-cells = <0>;
@@ -156,11 +156,11 @@
156 }; 156 };
157 }; 157 };
158 158
159 sata4: ahci@330000 { 159 sata4: ahci@100000 {
160 compatible = "brcm,iproc-ahci", "generic-ahci"; 160 compatible = "brcm,iproc-ahci", "generic-ahci";
161 reg = <0x00330000 0x1000>; 161 reg = <0x00100000 0x1000>;
162 reg-names = "ahci"; 162 reg-names = "ahci";
163 interrupts = <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>; 163 interrupts = <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>;
164 #address-cells = <1>; 164 #address-cells = <1>;
165 #size-cells = <0>; 165 #size-cells = <0>;
166 status = "disabled"; 166 status = "disabled";
@@ -172,9 +172,9 @@
172 }; 172 };
173 }; 173 };
174 174
175 sata_phy4: sata_phy@332100 { 175 sata_phy4: sata_phy@102100 {
176 compatible = "brcm,iproc-sr-sata-phy"; 176 compatible = "brcm,iproc-sr-sata-phy";
177 reg = <0x00332100 0x1000>; 177 reg = <0x00102100 0x1000>;
178 reg-names = "phy"; 178 reg-names = "phy";
179 #address-cells = <1>; 179 #address-cells = <1>;
180 #size-cells = <0>; 180 #size-cells = <0>;
@@ -186,11 +186,11 @@
186 }; 186 };
187 }; 187 };
188 188
189 sata5: ahci@400000 { 189 sata5: ahci@110000 {
190 compatible = "brcm,iproc-ahci", "generic-ahci"; 190 compatible = "brcm,iproc-ahci", "generic-ahci";
191 reg = <0x00400000 0x1000>; 191 reg = <0x00110000 0x1000>;
192 reg-names = "ahci"; 192 reg-names = "ahci";
193 interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>; 193 interrupts = <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>;
194 #address-cells = <1>; 194 #address-cells = <1>;
195 #size-cells = <0>; 195 #size-cells = <0>;
196 status = "disabled"; 196 status = "disabled";
@@ -202,9 +202,9 @@
202 }; 202 };
203 }; 203 };
204 204
205 sata_phy5: sata_phy@402100 { 205 sata_phy5: sata_phy@112100 {
206 compatible = "brcm,iproc-sr-sata-phy"; 206 compatible = "brcm,iproc-sr-sata-phy";
207 reg = <0x00402100 0x1000>; 207 reg = <0x00112100 0x1000>;
208 reg-names = "phy"; 208 reg-names = "phy";
209 #address-cells = <1>; 209 #address-cells = <1>;
210 #size-cells = <0>; 210 #size-cells = <0>;
@@ -216,11 +216,11 @@
216 }; 216 };
217 }; 217 };
218 218
219 sata6: ahci@410000 { 219 sata6: ahci@120000 {
220 compatible = "brcm,iproc-ahci", "generic-ahci"; 220 compatible = "brcm,iproc-ahci", "generic-ahci";
221 reg = <0x00410000 0x1000>; 221 reg = <0x00120000 0x1000>;
222 reg-names = "ahci"; 222 reg-names = "ahci";
223 interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>; 223 interrupts = <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>;
224 #address-cells = <1>; 224 #address-cells = <1>;
225 #size-cells = <0>; 225 #size-cells = <0>;
226 status = "disabled"; 226 status = "disabled";
@@ -232,9 +232,9 @@
232 }; 232 };
233 }; 233 };
234 234
235 sata_phy6: sata_phy@412100 { 235 sata_phy6: sata_phy@122100 {
236 compatible = "brcm,iproc-sr-sata-phy"; 236 compatible = "brcm,iproc-sr-sata-phy";
237 reg = <0x00412100 0x1000>; 237 reg = <0x00122100 0x1000>;
238 reg-names = "phy"; 238 reg-names = "phy";
239 #address-cells = <1>; 239 #address-cells = <1>;
240 #size-cells = <0>; 240 #size-cells = <0>;
@@ -246,11 +246,11 @@
246 }; 246 };
247 }; 247 };
248 248
249 sata7: ahci@420000 { 249 sata7: ahci@130000 {
250 compatible = "brcm,iproc-ahci", "generic-ahci"; 250 compatible = "brcm,iproc-ahci", "generic-ahci";
251 reg = <0x00420000 0x1000>; 251 reg = <0x00130000 0x1000>;
252 reg-names = "ahci"; 252 reg-names = "ahci";
253 interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>; 253 interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
254 #address-cells = <1>; 254 #address-cells = <1>;
255 #size-cells = <0>; 255 #size-cells = <0>;
256 status = "disabled"; 256 status = "disabled";
@@ -262,9 +262,9 @@
262 }; 262 };
263 }; 263 };
264 264
265 sata_phy7: sata_phy@422100 { 265 sata_phy7: sata_phy@132100 {
266 compatible = "brcm,iproc-sr-sata-phy"; 266 compatible = "brcm,iproc-sr-sata-phy";
267 reg = <0x00422100 0x1000>; 267 reg = <0x00132100 0x1000>;
268 reg-names = "phy"; 268 reg-names = "phy";
269 #address-cells = <1>; 269 #address-cells = <1>;
270 #size-cells = <0>; 270 #size-cells = <0>;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index ab46bc70add6..469de8acd06f 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -75,6 +75,9 @@ struct kvm_arch {
75 75
76 /* Interrupt controller */ 76 /* Interrupt controller */
77 struct vgic_dist vgic; 77 struct vgic_dist vgic;
78
79 /* Mandated version of PSCI */
80 u32 psci_version;
78}; 81};
79 82
80#define KVM_NR_MEM_OBJS 40 83#define KVM_NR_MEM_OBJS 40
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index b6dbbe3123a9..97d0ef12e2ff 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -39,7 +39,7 @@ struct mod_arch_specific {
39u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, 39u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
40 Elf64_Sym *sym); 40 Elf64_Sym *sym);
41 41
42u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val); 42u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val);
43 43
44#ifdef CONFIG_RANDOMIZE_BASE 44#ifdef CONFIG_RANDOMIZE_BASE
45extern u64 module_alloc_base; 45extern u64 module_alloc_base;
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 7e2c27e63cd8..7c4c8f318ba9 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -230,7 +230,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
230 } 230 }
231} 231}
232 232
233extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); 233extern void __sync_icache_dcache(pte_t pteval);
234 234
235/* 235/*
236 * PTE bits configuration in the presence of hardware Dirty Bit Management 236 * PTE bits configuration in the presence of hardware Dirty Bit Management
@@ -253,7 +253,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
253 pte_t old_pte; 253 pte_t old_pte;
254 254
255 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) 255 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
256 __sync_icache_dcache(pte, addr); 256 __sync_icache_dcache(pte);
257 257
258 /* 258 /*
259 * If the existing pte is valid, check for potential race with 259 * If the existing pte is valid, check for potential race with
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 9abbf3044654..04b3256f8e6d 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -206,6 +206,12 @@ struct kvm_arch_memory_slot {
206#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) 206#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
207#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) 207#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
208 208
209/* KVM-as-firmware specific pseudo-registers */
210#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
211#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
212 KVM_REG_ARM_FW | ((r) & 0xffff))
213#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
214
209/* Device Control API: ARM VGIC */ 215/* Device Control API: ARM VGIC */
210#define KVM_DEV_ARM_VGIC_GRP_ADDR 0 216#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
211#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 217#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 536d572e5596..9d1b06d67c53 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -868,6 +868,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
868 static const struct midr_range kpti_safe_list[] = { 868 static const struct midr_range kpti_safe_list[] = {
869 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), 869 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
870 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), 870 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
871 { /* sentinel */ }
871 }; 872 };
872 char const *str = "command line option"; 873 char const *str = "command line option";
873 874
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
index fa3637284a3d..f0690c2ca3e0 100644
--- a/arch/arm64/kernel/module-plts.c
+++ b/arch/arm64/kernel/module-plts.c
@@ -43,7 +43,7 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
43} 43}
44 44
45#ifdef CONFIG_ARM64_ERRATUM_843419 45#ifdef CONFIG_ARM64_ERRATUM_843419
46u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val) 46u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val)
47{ 47{
48 struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core : 48 struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
49 &mod->arch.init; 49 &mod->arch.init;
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 719fde8dcc19..155fd91e78f4 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -215,7 +215,7 @@ static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val)
215 insn &= ~BIT(31); 215 insn &= ~BIT(31);
216 } else { 216 } else {
217 /* out of range for ADR -> emit a veneer */ 217 /* out of range for ADR -> emit a veneer */
218 val = module_emit_adrp_veneer(mod, place, val & ~0xfff); 218 val = module_emit_veneer_for_adrp(mod, place, val & ~0xfff);
219 if (!val) 219 if (!val)
220 return -ENOEXEC; 220 return -ENOEXEC;
221 insn = aarch64_insn_gen_branch_imm((u64)place, val, 221 insn = aarch64_insn_gen_branch_imm((u64)place, val,
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 71d99af24ef2..7ff81fed46e1 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -25,6 +25,7 @@
25#include <linux/sched/signal.h> 25#include <linux/sched/signal.h>
26#include <linux/sched/task_stack.h> 26#include <linux/sched/task_stack.h>
27#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/nospec.h>
28#include <linux/smp.h> 29#include <linux/smp.h>
29#include <linux/ptrace.h> 30#include <linux/ptrace.h>
30#include <linux/user.h> 31#include <linux/user.h>
@@ -249,15 +250,20 @@ static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
249 250
250 switch (note_type) { 251 switch (note_type) {
251 case NT_ARM_HW_BREAK: 252 case NT_ARM_HW_BREAK:
252 if (idx < ARM_MAX_BRP) 253 if (idx >= ARM_MAX_BRP)
253 bp = tsk->thread.debug.hbp_break[idx]; 254 goto out;
255 idx = array_index_nospec(idx, ARM_MAX_BRP);
256 bp = tsk->thread.debug.hbp_break[idx];
254 break; 257 break;
255 case NT_ARM_HW_WATCH: 258 case NT_ARM_HW_WATCH:
256 if (idx < ARM_MAX_WRP) 259 if (idx >= ARM_MAX_WRP)
257 bp = tsk->thread.debug.hbp_watch[idx]; 260 goto out;
261 idx = array_index_nospec(idx, ARM_MAX_WRP);
262 bp = tsk->thread.debug.hbp_watch[idx];
258 break; 263 break;
259 } 264 }
260 265
266out:
261 return bp; 267 return bp;
262} 268}
263 269
@@ -1458,9 +1464,7 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
1458{ 1464{
1459 int ret; 1465 int ret;
1460 u32 kdata; 1466 u32 kdata;
1461 mm_segment_t old_fs = get_fs();
1462 1467
1463 set_fs(KERNEL_DS);
1464 /* Watchpoint */ 1468 /* Watchpoint */
1465 if (num < 0) { 1469 if (num < 0) {
1466 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); 1470 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
@@ -1471,7 +1475,6 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
1471 } else { 1475 } else {
1472 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); 1476 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
1473 } 1477 }
1474 set_fs(old_fs);
1475 1478
1476 if (!ret) 1479 if (!ret)
1477 ret = put_user(kdata, data); 1480 ret = put_user(kdata, data);
@@ -1484,7 +1487,6 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
1484{ 1487{
1485 int ret; 1488 int ret;
1486 u32 kdata = 0; 1489 u32 kdata = 0;
1487 mm_segment_t old_fs = get_fs();
1488 1490
1489 if (num == 0) 1491 if (num == 0)
1490 return 0; 1492 return 0;
@@ -1493,12 +1495,10 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
1493 if (ret) 1495 if (ret)
1494 return ret; 1496 return ret;
1495 1497
1496 set_fs(KERNEL_DS);
1497 if (num < 0) 1498 if (num < 0)
1498 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); 1499 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
1499 else 1500 else
1500 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); 1501 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
1501 set_fs(old_fs);
1502 1502
1503 return ret; 1503 return ret;
1504} 1504}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 1cb2749a72bf..8bbdc17e49df 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -277,7 +277,8 @@ void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
277 * If we were single stepping, we want to get the step exception after 277 * If we were single stepping, we want to get the step exception after
278 * we return from the trap. 278 * we return from the trap.
279 */ 279 */
280 user_fastforward_single_step(current); 280 if (user_mode(regs))
281 user_fastforward_single_step(current);
281} 282}
282 283
283static LIST_HEAD(undef_hook); 284static LIST_HEAD(undef_hook);
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 959e50d2588c..56a0260ceb11 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/fs.h> 27#include <linux/fs.h>
28#include <kvm/arm_psci.h>
28#include <asm/cputype.h> 29#include <asm/cputype.h>
29#include <linux/uaccess.h> 30#include <linux/uaccess.h>
30#include <asm/kvm.h> 31#include <asm/kvm.h>
@@ -205,7 +206,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
205unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) 206unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
206{ 207{
207 return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu) 208 return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
208 + NUM_TIMER_REGS; 209 + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS;
209} 210}
210 211
211/** 212/**
@@ -225,6 +226,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
225 uindices++; 226 uindices++;
226 } 227 }
227 228
229 ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
230 if (ret)
231 return ret;
232 uindices += kvm_arm_get_fw_num_regs(vcpu);
233
228 ret = copy_timer_indices(vcpu, uindices); 234 ret = copy_timer_indices(vcpu, uindices);
229 if (ret) 235 if (ret)
230 return ret; 236 return ret;
@@ -243,6 +249,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
243 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 249 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
244 return get_core_reg(vcpu, reg); 250 return get_core_reg(vcpu, reg);
245 251
252 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
253 return kvm_arm_get_fw_reg(vcpu, reg);
254
246 if (is_timer_reg(reg->id)) 255 if (is_timer_reg(reg->id))
247 return get_timer_reg(vcpu, reg); 256 return get_timer_reg(vcpu, reg);
248 257
@@ -259,6 +268,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
259 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 268 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
260 return set_core_reg(vcpu, reg); 269 return set_core_reg(vcpu, reg);
261 270
271 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
272 return kvm_arm_set_fw_reg(vcpu, reg);
273
262 if (is_timer_reg(reg->id)) 274 if (is_timer_reg(reg->id))
263 return set_timer_reg(vcpu, reg); 275 return set_timer_reg(vcpu, reg);
264 276
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 806b0b126a64..6e3b969391fd 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -996,14 +996,12 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
996 996
997 if (id == SYS_ID_AA64PFR0_EL1) { 997 if (id == SYS_ID_AA64PFR0_EL1) {
998 if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT)) 998 if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT))
999 pr_err_once("kvm [%i]: SVE unsupported for guests, suppressing\n", 999 kvm_debug("SVE unsupported for guests, suppressing\n");
1000 task_pid_nr(current));
1001 1000
1002 val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); 1001 val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
1003 } else if (id == SYS_ID_AA64MMFR1_EL1) { 1002 } else if (id == SYS_ID_AA64MMFR1_EL1) {
1004 if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT)) 1003 if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))
1005 pr_err_once("kvm [%i]: LORegions unsupported for guests, suppressing\n", 1004 kvm_debug("LORegions unsupported for guests, suppressing\n");
1006 task_pid_nr(current));
1007 1005
1008 val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT); 1006 val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT);
1009 } 1007 }
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 0ead8a1d1679..137710f4dac3 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -19,5 +19,9 @@ CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2 \
19 -fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \ 19 -fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \
20 -fcall-saved-x18 -fomit-frame-pointer 20 -fcall-saved-x18 -fomit-frame-pointer
21CFLAGS_REMOVE_atomic_ll_sc.o := -pg 21CFLAGS_REMOVE_atomic_ll_sc.o := -pg
22GCOV_PROFILE_atomic_ll_sc.o := n
23KASAN_SANITIZE_atomic_ll_sc.o := n
24KCOV_INSTRUMENT_atomic_ll_sc.o := n
25UBSAN_SANITIZE_atomic_ll_sc.o := n
22 26
23lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o 27lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index e36ed5087b5c..1059884f9a6f 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -58,7 +58,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
58 flush_ptrace_access(vma, page, uaddr, dst, len); 58 flush_ptrace_access(vma, page, uaddr, dst, len);
59} 59}
60 60
61void __sync_icache_dcache(pte_t pte, unsigned long addr) 61void __sync_icache_dcache(pte_t pte)
62{ 62{
63 struct page *page = pte_page(pte); 63 struct page *page = pte_page(pte);
64 64
diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h
index 9e8621d94ee9..e17262ad125e 100644
--- a/arch/hexagon/include/asm/io.h
+++ b/arch/hexagon/include/asm/io.h
@@ -216,6 +216,12 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
216 memcpy((void *) dst, src, count); 216 memcpy((void *) dst, src, count);
217} 217}
218 218
219static inline void memset_io(volatile void __iomem *addr, int value,
220 size_t size)
221{
222 memset((void __force *)addr, value, size);
223}
224
219#define PCI_IO_ADDR (volatile void __iomem *) 225#define PCI_IO_ADDR (volatile void __iomem *)
220 226
221/* 227/*
diff --git a/arch/hexagon/lib/checksum.c b/arch/hexagon/lib/checksum.c
index 617506d1a559..7cd0a2259269 100644
--- a/arch/hexagon/lib/checksum.c
+++ b/arch/hexagon/lib/checksum.c
@@ -199,3 +199,4 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
199 memcpy(dst, src, len); 199 memcpy(dst, src, len);
200 return csum_partial(dst, len, sum); 200 return csum_partial(dst, len, sum);
201} 201}
202EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index e2364ff59180..34ac503e28ad 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -123,6 +123,9 @@ INSTALL_TARGETS = zinstall install
123 123
124PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS) 124PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
125 125
126# Default kernel to build
127all: bzImage
128
126zImage: vmlinuz 129zImage: vmlinuz
127Image: vmlinux 130Image: vmlinux
128 131
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index 3b8507f71050..ee5a78a151a6 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -448,7 +448,8 @@ static int match_by_id(struct device * dev, void * data)
448 * Checks all the children of @parent for a matching @id. If none 448 * Checks all the children of @parent for a matching @id. If none
449 * found, it allocates a new device and returns it. 449 * found, it allocates a new device and returns it.
450 */ 450 */
451static struct parisc_device * alloc_tree_node(struct device *parent, char id) 451static struct parisc_device * __init alloc_tree_node(
452 struct device *parent, char id)
452{ 453{
453 struct match_id_data d = { 454 struct match_id_data d = {
454 .id = id, 455 .id = id,
@@ -825,8 +826,8 @@ static void walk_lower_bus(struct parisc_device *dev)
825 * devices which are not physically connected (such as extra serial & 826 * devices which are not physically connected (such as extra serial &
826 * keyboard ports). This problem is not yet solved. 827 * keyboard ports). This problem is not yet solved.
827 */ 828 */
828static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high, 829static void __init walk_native_bus(unsigned long io_io_low,
829 struct device *parent) 830 unsigned long io_io_high, struct device *parent)
830{ 831{
831 int i, devices_found = 0; 832 int i, devices_found = 0;
832 unsigned long hpa = io_io_low; 833 unsigned long hpa = io_io_low;
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c
index 13ee3569959a..ae684ac6efb6 100644
--- a/arch/parisc/kernel/pci.c
+++ b/arch/parisc/kernel/pci.c
@@ -174,7 +174,7 @@ void pcibios_set_master(struct pci_dev *dev)
174 * pcibios_init_bridge() initializes cache line and default latency 174 * pcibios_init_bridge() initializes cache line and default latency
175 * for pci controllers and pci-pci bridges 175 * for pci controllers and pci-pci bridges
176 */ 176 */
177void __init pcibios_init_bridge(struct pci_dev *dev) 177void __ref pcibios_init_bridge(struct pci_dev *dev)
178{ 178{
179 unsigned short bridge_ctl, bridge_ctl_new; 179 unsigned short bridge_ctl, bridge_ctl_new;
180 180
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index c3830400ca28..a1e772f909cb 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -205,7 +205,7 @@ static int __init rtc_init(void)
205device_initcall(rtc_init); 205device_initcall(rtc_init);
206#endif 206#endif
207 207
208void read_persistent_clock(struct timespec *ts) 208void read_persistent_clock64(struct timespec64 *ts)
209{ 209{
210 static struct pdc_tod tod_data; 210 static struct pdc_tod tod_data;
211 if (pdc_tod_read(&tod_data) == 0) { 211 if (pdc_tod_read(&tod_data) == 0) {
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 68e671a11987..71d31274d782 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -837,6 +837,17 @@ void __init initialize_ivt(const void *iva)
837 if (pdc_instr(&instr) == PDC_OK) 837 if (pdc_instr(&instr) == PDC_OK)
838 ivap[0] = instr; 838 ivap[0] = instr;
839 839
840 /*
841 * Rules for the checksum of the HPMC handler:
842 * 1. The IVA does not point to PDC/PDH space (ie: the OS has installed
843 * its own IVA).
844 * 2. The word at IVA + 32 is nonzero.
845 * 3. If Length (IVA + 60) is not zero, then Length (IVA + 60) and
846 * Address (IVA + 56) are word-aligned.
847 * 4. The checksum of the 8 words starting at IVA + 32 plus the sum of
848 * the Length/4 words starting at Address is zero.
849 */
850
840 /* Compute Checksum for HPMC handler */ 851 /* Compute Checksum for HPMC handler */
841 length = os_hpmc_size; 852 length = os_hpmc_size;
842 ivap[7] = length; 853 ivap[7] = length;
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index cab32ee824d2..2607d2d33405 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -516,7 +516,7 @@ static void __init map_pages(unsigned long start_vaddr,
516 } 516 }
517} 517}
518 518
519void free_initmem(void) 519void __ref free_initmem(void)
520{ 520{
521 unsigned long init_begin = (unsigned long)__init_begin; 521 unsigned long init_begin = (unsigned long)__init_begin;
522 unsigned long init_end = (unsigned long)__init_end; 522 unsigned long init_end = (unsigned long)__init_end;
diff --git a/arch/powerpc/include/asm/powernv.h b/arch/powerpc/include/asm/powernv.h
index d1c2d2e658cf..2f3ff7a27881 100644
--- a/arch/powerpc/include/asm/powernv.h
+++ b/arch/powerpc/include/asm/powernv.h
@@ -15,7 +15,7 @@
15extern void powernv_set_nmmu_ptcr(unsigned long ptcr); 15extern void powernv_set_nmmu_ptcr(unsigned long ptcr);
16extern struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, 16extern struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
17 unsigned long flags, 17 unsigned long flags,
18 struct npu_context *(*cb)(struct npu_context *, void *), 18 void (*cb)(struct npu_context *, void *),
19 void *priv); 19 void *priv);
20extern void pnv_npu2_destroy_context(struct npu_context *context, 20extern void pnv_npu2_destroy_context(struct npu_context *context,
21 struct pci_dev *gpdev); 21 struct pci_dev *gpdev);
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index fe6fc63251fe..38c5b4764bfe 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -441,7 +441,6 @@ static int mce_handle_ierror(struct pt_regs *regs,
441 if (pfn != ULONG_MAX) { 441 if (pfn != ULONG_MAX) {
442 *phys_addr = 442 *phys_addr =
443 (pfn << PAGE_SHIFT); 443 (pfn << PAGE_SHIFT);
444 handled = 1;
445 } 444 }
446 } 445 }
447 } 446 }
@@ -532,9 +531,7 @@ static int mce_handle_derror(struct pt_regs *regs,
532 * kernel/exception-64s.h 531 * kernel/exception-64s.h
533 */ 532 */
534 if (get_paca()->in_mce < MAX_MCE_DEPTH) 533 if (get_paca()->in_mce < MAX_MCE_DEPTH)
535 if (!mce_find_instr_ea_and_pfn(regs, addr, 534 mce_find_instr_ea_and_pfn(regs, addr, phys_addr);
536 phys_addr))
537 handled = 1;
538 } 535 }
539 found = 1; 536 found = 1;
540 } 537 }
@@ -572,7 +569,7 @@ static long mce_handle_error(struct pt_regs *regs,
572 const struct mce_ierror_table itable[]) 569 const struct mce_ierror_table itable[])
573{ 570{
574 struct mce_error_info mce_err = { 0 }; 571 struct mce_error_info mce_err = { 0 };
575 uint64_t addr, phys_addr; 572 uint64_t addr, phys_addr = ULONG_MAX;
576 uint64_t srr1 = regs->msr; 573 uint64_t srr1 = regs->msr;
577 long handled; 574 long handled;
578 575
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index e16ec7b3b427..9ca7148b5881 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -566,10 +566,35 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
566#endif 566#endif
567 567
568#ifdef CONFIG_NMI_IPI 568#ifdef CONFIG_NMI_IPI
569static void stop_this_cpu(struct pt_regs *regs) 569static void nmi_stop_this_cpu(struct pt_regs *regs)
570#else 570{
571 /*
572 * This is a special case because it never returns, so the NMI IPI
573 * handling would never mark it as done, which makes any later
574 * smp_send_nmi_ipi() call spin forever. Mark it done now.
575 *
576 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
577 */
578 nmi_ipi_lock();
579 nmi_ipi_busy_count--;
580 nmi_ipi_unlock();
581
582 /* Remove this CPU */
583 set_cpu_online(smp_processor_id(), false);
584
585 spin_begin();
586 while (1)
587 spin_cpu_relax();
588}
589
590void smp_send_stop(void)
591{
592 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
593}
594
595#else /* CONFIG_NMI_IPI */
596
571static void stop_this_cpu(void *dummy) 597static void stop_this_cpu(void *dummy)
572#endif
573{ 598{
574 /* Remove this CPU */ 599 /* Remove this CPU */
575 set_cpu_online(smp_processor_id(), false); 600 set_cpu_online(smp_processor_id(), false);
@@ -582,12 +607,22 @@ static void stop_this_cpu(void *dummy)
582 607
583void smp_send_stop(void) 608void smp_send_stop(void)
584{ 609{
585#ifdef CONFIG_NMI_IPI 610 static bool stopped = false;
586 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, stop_this_cpu, 1000000); 611
587#else 612 /*
613 * Prevent waiting on csd lock from a previous smp_send_stop.
614 * This is racy, but in general callers try to do the right
615 * thing and only fire off one smp_send_stop (e.g., see
616 * kernel/panic.c)
617 */
618 if (stopped)
619 return;
620
621 stopped = true;
622
588 smp_call_function(stop_this_cpu, NULL, 0); 623 smp_call_function(stop_this_cpu, NULL, 0);
589#endif
590} 624}
625#endif /* CONFIG_NMI_IPI */
591 626
592struct thread_info *current_set[NR_CPUS]; 627struct thread_info *current_set[NR_CPUS];
593 628
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 6038e2e7aee0..876d4f294fdd 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -305,6 +305,13 @@ void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
305 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); 305 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
306} 306}
307 307
308#ifdef CONFIG_ALTIVEC
309void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
310{
311 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
312}
313#endif
314
308void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) 315void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
309{ 316{
310 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER); 317 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 737f8a4632cc..c3c39b02b2ba 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -133,6 +133,7 @@ int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *
133 start, start + size, rc); 133 start, start + size, rc);
134 return -EFAULT; 134 return -EFAULT;
135 } 135 }
136 flush_inval_dcache_range(start, start + size);
136 137
137 return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); 138 return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
138} 139}
@@ -159,6 +160,7 @@ int __meminit arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap
159 160
160 /* Remove htab bolted mappings for this section of memory */ 161 /* Remove htab bolted mappings for this section of memory */
161 start = (unsigned long)__va(start); 162 start = (unsigned long)__va(start);
163 flush_inval_dcache_range(start, start + size);
162 ret = remove_section_mapping(start, start + size); 164 ret = remove_section_mapping(start, start + size);
163 165
164 /* Ensure all vmalloc mappings are flushed in case they also 166 /* Ensure all vmalloc mappings are flushed in case they also
diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
index de470caf0784..fc222a0c2ac4 100644
--- a/arch/powerpc/platforms/powernv/memtrace.c
+++ b/arch/powerpc/platforms/powernv/memtrace.c
@@ -82,19 +82,6 @@ static const struct file_operations memtrace_fops = {
82 .open = simple_open, 82 .open = simple_open,
83}; 83};
84 84
85static void flush_memory_region(u64 base, u64 size)
86{
87 unsigned long line_size = ppc64_caches.l1d.size;
88 u64 end = base + size;
89 u64 addr;
90
91 base = round_down(base, line_size);
92 end = round_up(end, line_size);
93
94 for (addr = base; addr < end; addr += line_size)
95 asm volatile("dcbf 0,%0" : "=r" (addr) :: "memory");
96}
97
98static int check_memblock_online(struct memory_block *mem, void *arg) 85static int check_memblock_online(struct memory_block *mem, void *arg)
99{ 86{
100 if (mem->state != MEM_ONLINE) 87 if (mem->state != MEM_ONLINE)
@@ -132,10 +119,6 @@ static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages)
132 walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE, 119 walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE,
133 change_memblock_state); 120 change_memblock_state);
134 121
135 /* RCU grace period? */
136 flush_memory_region((u64)__va(start_pfn << PAGE_SHIFT),
137 nr_pages << PAGE_SHIFT);
138
139 lock_device_hotplug(); 122 lock_device_hotplug();
140 remove_memory(nid, start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT); 123 remove_memory(nid, start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
141 unlock_device_hotplug(); 124 unlock_device_hotplug();
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 69a4f9e8bd55..525e966dce34 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -34,6 +34,19 @@
34#define npu_to_phb(x) container_of(x, struct pnv_phb, npu) 34#define npu_to_phb(x) container_of(x, struct pnv_phb, npu)
35 35
36/* 36/*
37 * spinlock to protect initialisation of an npu_context for a particular
38 * mm_struct.
39 */
40static DEFINE_SPINLOCK(npu_context_lock);
41
42/*
43 * When an address shootdown range exceeds this threshold we invalidate the
44 * entire TLB on the GPU for the given PID rather than each specific address in
45 * the range.
46 */
47#define ATSD_THRESHOLD (2*1024*1024)
48
49/*
37 * Other types of TCE cache invalidation are not functional in the 50 * Other types of TCE cache invalidation are not functional in the
38 * hardware. 51 * hardware.
39 */ 52 */
@@ -401,7 +414,7 @@ struct npu_context {
401 bool nmmu_flush; 414 bool nmmu_flush;
402 415
403 /* Callback to stop translation requests on a given GPU */ 416 /* Callback to stop translation requests on a given GPU */
404 struct npu_context *(*release_cb)(struct npu_context *, void *); 417 void (*release_cb)(struct npu_context *context, void *priv);
405 418
406 /* 419 /*
407 * Private pointer passed to the above callback for usage by 420 * Private pointer passed to the above callback for usage by
@@ -671,11 +684,19 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
671 struct npu_context *npu_context = mn_to_npu_context(mn); 684 struct npu_context *npu_context = mn_to_npu_context(mn);
672 unsigned long address; 685 unsigned long address;
673 686
674 for (address = start; address < end; address += PAGE_SIZE) 687 if (end - start > ATSD_THRESHOLD) {
675 mmio_invalidate(npu_context, 1, address, false); 688 /*
689 * Just invalidate the entire PID if the address range is too
690 * large.
691 */
692 mmio_invalidate(npu_context, 0, 0, true);
693 } else {
694 for (address = start; address < end; address += PAGE_SIZE)
695 mmio_invalidate(npu_context, 1, address, false);
676 696
677 /* Do the flush only on the final addess == end */ 697 /* Do the flush only on the final addess == end */
678 mmio_invalidate(npu_context, 1, address, true); 698 mmio_invalidate(npu_context, 1, address, true);
699 }
679} 700}
680 701
681static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { 702static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
@@ -696,11 +717,12 @@ static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
696 * Returns an error if there no contexts are currently available or a 717 * Returns an error if there no contexts are currently available or a
697 * npu_context which should be passed to pnv_npu2_handle_fault(). 718 * npu_context which should be passed to pnv_npu2_handle_fault().
698 * 719 *
699 * mmap_sem must be held in write mode. 720 * mmap_sem must be held in write mode and must not be called from interrupt
721 * context.
700 */ 722 */
701struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, 723struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
702 unsigned long flags, 724 unsigned long flags,
703 struct npu_context *(*cb)(struct npu_context *, void *), 725 void (*cb)(struct npu_context *, void *),
704 void *priv) 726 void *priv)
705{ 727{
706 int rc; 728 int rc;
@@ -743,7 +765,9 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
743 /* 765 /*
744 * Setup the NPU context table for a particular GPU. These need to be 766 * Setup the NPU context table for a particular GPU. These need to be
745 * per-GPU as we need the tables to filter ATSDs when there are no 767 * per-GPU as we need the tables to filter ATSDs when there are no
746 * active contexts on a particular GPU. 768 * active contexts on a particular GPU. It is safe for these to be
769 * called concurrently with destroy as the OPAL call takes appropriate
770 * locks and refcounts on init/destroy.
747 */ 771 */
748 rc = opal_npu_init_context(nphb->opal_id, mm->context.id, flags, 772 rc = opal_npu_init_context(nphb->opal_id, mm->context.id, flags,
749 PCI_DEVID(gpdev->bus->number, gpdev->devfn)); 773 PCI_DEVID(gpdev->bus->number, gpdev->devfn));
@@ -754,8 +778,29 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
754 * We store the npu pci device so we can more easily get at the 778 * We store the npu pci device so we can more easily get at the
755 * associated npus. 779 * associated npus.
756 */ 780 */
781 spin_lock(&npu_context_lock);
757 npu_context = mm->context.npu_context; 782 npu_context = mm->context.npu_context;
783 if (npu_context) {
784 if (npu_context->release_cb != cb ||
785 npu_context->priv != priv) {
786 spin_unlock(&npu_context_lock);
787 opal_npu_destroy_context(nphb->opal_id, mm->context.id,
788 PCI_DEVID(gpdev->bus->number,
789 gpdev->devfn));
790 return ERR_PTR(-EINVAL);
791 }
792
793 WARN_ON(!kref_get_unless_zero(&npu_context->kref));
794 }
795 spin_unlock(&npu_context_lock);
796
758 if (!npu_context) { 797 if (!npu_context) {
798 /*
799 * We can set up these fields without holding the
800 * npu_context_lock as the npu_context hasn't been returned to
801 * the caller meaning it can't be destroyed. Parallel allocation
802 * is protected against by mmap_sem.
803 */
759 rc = -ENOMEM; 804 rc = -ENOMEM;
760 npu_context = kzalloc(sizeof(struct npu_context), GFP_KERNEL); 805 npu_context = kzalloc(sizeof(struct npu_context), GFP_KERNEL);
761 if (npu_context) { 806 if (npu_context) {
@@ -774,8 +819,6 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
774 } 819 }
775 820
776 mm->context.npu_context = npu_context; 821 mm->context.npu_context = npu_context;
777 } else {
778 WARN_ON(!kref_get_unless_zero(&npu_context->kref));
779 } 822 }
780 823
781 npu_context->release_cb = cb; 824 npu_context->release_cb = cb;
@@ -814,15 +857,16 @@ static void pnv_npu2_release_context(struct kref *kref)
814 mm_context_remove_copro(npu_context->mm); 857 mm_context_remove_copro(npu_context->mm);
815 858
816 npu_context->mm->context.npu_context = NULL; 859 npu_context->mm->context.npu_context = NULL;
817 mmu_notifier_unregister(&npu_context->mn,
818 npu_context->mm);
819
820 kfree(npu_context);
821} 860}
822 861
862/*
863 * Destroy a context on the given GPU. May free the npu_context if it is no
864 * longer active on any GPUs. Must not be called from interrupt context.
865 */
823void pnv_npu2_destroy_context(struct npu_context *npu_context, 866void pnv_npu2_destroy_context(struct npu_context *npu_context,
824 struct pci_dev *gpdev) 867 struct pci_dev *gpdev)
825{ 868{
869 int removed;
826 struct pnv_phb *nphb; 870 struct pnv_phb *nphb;
827 struct npu *npu; 871 struct npu *npu;
828 struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0); 872 struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
@@ -844,7 +888,21 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context,
844 WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL); 888 WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL);
845 opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id, 889 opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id,
846 PCI_DEVID(gpdev->bus->number, gpdev->devfn)); 890 PCI_DEVID(gpdev->bus->number, gpdev->devfn));
847 kref_put(&npu_context->kref, pnv_npu2_release_context); 891 spin_lock(&npu_context_lock);
892 removed = kref_put(&npu_context->kref, pnv_npu2_release_context);
893 spin_unlock(&npu_context_lock);
894
895 /*
896 * We need to do this outside of pnv_npu2_release_context so that it is
897 * outside the spinlock as mmu_notifier_destroy uses SRCU.
898 */
899 if (removed) {
900 mmu_notifier_unregister(&npu_context->mn,
901 npu_context->mm);
902
903 kfree(npu_context);
904 }
905
848} 906}
849EXPORT_SYMBOL(pnv_npu2_destroy_context); 907EXPORT_SYMBOL(pnv_npu2_destroy_context);
850 908
diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c
index f8868864f373..aa2a5139462e 100644
--- a/arch/powerpc/platforms/powernv/opal-rtc.c
+++ b/arch/powerpc/platforms/powernv/opal-rtc.c
@@ -48,10 +48,12 @@ unsigned long __init opal_get_boot_time(void)
48 48
49 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { 49 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
50 rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); 50 rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
51 if (rc == OPAL_BUSY_EVENT) 51 if (rc == OPAL_BUSY_EVENT) {
52 mdelay(OPAL_BUSY_DELAY_MS);
52 opal_poll_events(NULL); 53 opal_poll_events(NULL);
53 else if (rc == OPAL_BUSY) 54 } else if (rc == OPAL_BUSY) {
54 mdelay(10); 55 mdelay(OPAL_BUSY_DELAY_MS);
56 }
55 } 57 }
56 if (rc != OPAL_SUCCESS) 58 if (rc != OPAL_SUCCESS)
57 return 0; 59 return 0;
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 23d8acca5c90..cd4fd85fde84 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -11,6 +11,7 @@ config RISCV
11 select ARCH_WANT_FRAME_POINTERS 11 select ARCH_WANT_FRAME_POINTERS
12 select CLONE_BACKWARDS 12 select CLONE_BACKWARDS
13 select COMMON_CLK 13 select COMMON_CLK
14 select DMA_DIRECT_OPS
14 select GENERIC_CLOCKEVENTS 15 select GENERIC_CLOCKEVENTS
15 select GENERIC_CPU_DEVICES 16 select GENERIC_CPU_DEVICES
16 select GENERIC_IRQ_SHOW 17 select GENERIC_IRQ_SHOW
@@ -89,9 +90,6 @@ config PGTABLE_LEVELS
89config HAVE_KPROBES 90config HAVE_KPROBES
90 def_bool n 91 def_bool n
91 92
92config DMA_DIRECT_OPS
93 def_bool y
94
95menu "Platform type" 93menu "Platform type"
96 94
97choice 95choice
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index 1e5fd280fb4d..4286a5f83876 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -15,7 +15,6 @@ generic-y += fcntl.h
15generic-y += futex.h 15generic-y += futex.h
16generic-y += hardirq.h 16generic-y += hardirq.h
17generic-y += hash.h 17generic-y += hash.h
18generic-y += handle_irq.h
19generic-y += hw_irq.h 18generic-y += hw_irq.h
20generic-y += ioctl.h 19generic-y += ioctl.h
21generic-y += ioctls.h 20generic-y += ioctls.h
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index 324568d33921..f6561b783b61 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -52,7 +52,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
52# Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions. 52# Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions.
53# Make sure only to export the intended __vdso_xxx symbol offsets. 53# Make sure only to export the intended __vdso_xxx symbol offsets.
54quiet_cmd_vdsold = VDSOLD $@ 54quiet_cmd_vdsold = VDSOLD $@
55 cmd_vdsold = $(CC) $(KCFLAGS) -nostdlib $(SYSCFLAGS_$(@F)) \ 55 cmd_vdsold = $(CC) $(KCFLAGS) $(call cc-option, -no-pie) -nostdlib $(SYSCFLAGS_$(@F)) \
56 -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \ 56 -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \
57 $(CROSS_COMPILE)objcopy \ 57 $(CROSS_COMPILE)objcopy \
58 $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ 58 $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 83ba57533ce6..3c883c368eb0 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -45,6 +45,9 @@ struct thread_info {
45void arch_release_task_struct(struct task_struct *tsk); 45void arch_release_task_struct(struct task_struct *tsk);
46int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); 46int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
47 47
48void arch_setup_new_exec(void);
49#define arch_setup_new_exec arch_setup_new_exec
50
48#endif 51#endif
49 52
50/* 53/*
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 5a83be955c70..0dc8ac8548ee 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -465,11 +465,11 @@ int module_finalize(const Elf_Ehdr *hdr,
465 apply_alternatives(aseg, aseg + s->sh_size); 465 apply_alternatives(aseg, aseg + s->sh_size);
466 466
467 if (IS_ENABLED(CONFIG_EXPOLINE) && 467 if (IS_ENABLED(CONFIG_EXPOLINE) &&
468 (!strcmp(".nospec_call_table", secname))) 468 (!strncmp(".s390_indirect", secname, 14)))
469 nospec_revert(aseg, aseg + s->sh_size); 469 nospec_revert(aseg, aseg + s->sh_size);
470 470
471 if (IS_ENABLED(CONFIG_EXPOLINE) && 471 if (IS_ENABLED(CONFIG_EXPOLINE) &&
472 (!strcmp(".nospec_return_table", secname))) 472 (!strncmp(".s390_return", secname, 12)))
473 nospec_revert(aseg, aseg + s->sh_size); 473 nospec_revert(aseg, aseg + s->sh_size);
474 } 474 }
475 475
diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c
index 5ee27dc9a10c..feebb2944882 100644
--- a/arch/s390/kernel/perf_cpum_cf_events.c
+++ b/arch/s390/kernel/perf_cpum_cf_events.c
@@ -123,7 +123,7 @@ CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV, 0x00a1);
123CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TABORT, 0x00b1); 123CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TABORT, 0x00b1);
124CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_NO_SPECIAL, 0x00b2); 124CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_NO_SPECIAL, 0x00b2);
125CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_SPECIAL, 0x00b3); 125CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_SPECIAL, 0x00b3);
126CPUMF_EVENT_ATTR(cf_z13, L1D_WRITES_RO_EXCL, 0x0080); 126CPUMF_EVENT_ATTR(cf_z13, L1D_RO_EXCL_WRITES, 0x0080);
127CPUMF_EVENT_ATTR(cf_z13, DTLB1_WRITES, 0x0081); 127CPUMF_EVENT_ATTR(cf_z13, DTLB1_WRITES, 0x0081);
128CPUMF_EVENT_ATTR(cf_z13, DTLB1_MISSES, 0x0082); 128CPUMF_EVENT_ATTR(cf_z13, DTLB1_MISSES, 0x0082);
129CPUMF_EVENT_ATTR(cf_z13, DTLB1_HPAGE_WRITES, 0x0083); 129CPUMF_EVENT_ATTR(cf_z13, DTLB1_HPAGE_WRITES, 0x0083);
@@ -179,7 +179,7 @@ CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_NO_SPECIAL, 0x00db);
179CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_SPECIAL, 0x00dc); 179CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_SPECIAL, 0x00dc);
180CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0); 180CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
181CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1); 181CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
182CPUMF_EVENT_ATTR(cf_z14, L1D_WRITES_RO_EXCL, 0x0080); 182CPUMF_EVENT_ATTR(cf_z14, L1D_RO_EXCL_WRITES, 0x0080);
183CPUMF_EVENT_ATTR(cf_z14, DTLB2_WRITES, 0x0081); 183CPUMF_EVENT_ATTR(cf_z14, DTLB2_WRITES, 0x0081);
184CPUMF_EVENT_ATTR(cf_z14, DTLB2_MISSES, 0x0082); 184CPUMF_EVENT_ATTR(cf_z14, DTLB2_MISSES, 0x0082);
185CPUMF_EVENT_ATTR(cf_z14, DTLB2_HPAGE_WRITES, 0x0083); 185CPUMF_EVENT_ATTR(cf_z14, DTLB2_HPAGE_WRITES, 0x0083);
@@ -371,7 +371,7 @@ static struct attribute *cpumcf_zec12_pmu_event_attr[] __initdata = {
371}; 371};
372 372
373static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = { 373static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = {
374 CPUMF_EVENT_PTR(cf_z13, L1D_WRITES_RO_EXCL), 374 CPUMF_EVENT_PTR(cf_z13, L1D_RO_EXCL_WRITES),
375 CPUMF_EVENT_PTR(cf_z13, DTLB1_WRITES), 375 CPUMF_EVENT_PTR(cf_z13, DTLB1_WRITES),
376 CPUMF_EVENT_PTR(cf_z13, DTLB1_MISSES), 376 CPUMF_EVENT_PTR(cf_z13, DTLB1_MISSES),
377 CPUMF_EVENT_PTR(cf_z13, DTLB1_HPAGE_WRITES), 377 CPUMF_EVENT_PTR(cf_z13, DTLB1_HPAGE_WRITES),
@@ -431,7 +431,7 @@ static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = {
431}; 431};
432 432
433static struct attribute *cpumcf_z14_pmu_event_attr[] __initdata = { 433static struct attribute *cpumcf_z14_pmu_event_attr[] __initdata = {
434 CPUMF_EVENT_PTR(cf_z14, L1D_WRITES_RO_EXCL), 434 CPUMF_EVENT_PTR(cf_z14, L1D_RO_EXCL_WRITES),
435 CPUMF_EVENT_PTR(cf_z14, DTLB2_WRITES), 435 CPUMF_EVENT_PTR(cf_z14, DTLB2_WRITES),
436 CPUMF_EVENT_PTR(cf_z14, DTLB2_MISSES), 436 CPUMF_EVENT_PTR(cf_z14, DTLB2_MISSES),
437 CPUMF_EVENT_PTR(cf_z14, DTLB2_HPAGE_WRITES), 437 CPUMF_EVENT_PTR(cf_z14, DTLB2_HPAGE_WRITES),
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 70576a2f69cf..6e758bb6cd29 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -29,6 +29,7 @@
29#include <linux/random.h> 29#include <linux/random.h>
30#include <linux/export.h> 30#include <linux/export.h>
31#include <linux/init_task.h> 31#include <linux/init_task.h>
32#include <asm/cpu_mf.h>
32#include <asm/io.h> 33#include <asm/io.h>
33#include <asm/processor.h> 34#include <asm/processor.h>
34#include <asm/vtimer.h> 35#include <asm/vtimer.h>
@@ -48,6 +49,15 @@ void flush_thread(void)
48{ 49{
49} 50}
50 51
52void arch_setup_new_exec(void)
53{
54 if (S390_lowcore.current_pid != current->pid) {
55 S390_lowcore.current_pid = current->pid;
56 if (test_facility(40))
57 lpp(&S390_lowcore.lpp);
58 }
59}
60
51void arch_release_task_struct(struct task_struct *tsk) 61void arch_release_task_struct(struct task_struct *tsk)
52{ 62{
53 runtime_instr_release(tsk); 63 runtime_instr_release(tsk);
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index d9d1f512f019..5007fac01bb5 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -150,6 +150,15 @@ unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
150 return orig; 150 return orig;
151} 151}
152 152
153bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
154 struct pt_regs *regs)
155{
156 if (ctx == RP_CHECK_CHAIN_CALL)
157 return user_stack_pointer(regs) <= ret->stack;
158 else
159 return user_stack_pointer(regs) < ret->stack;
160}
161
153/* Instruction Emulation */ 162/* Instruction Emulation */
154 163
155static void adjust_psw_addr(psw_t *psw, unsigned long len) 164static void adjust_psw_addr(psw_t *psw, unsigned long len)
diff --git a/arch/sparc/include/uapi/asm/oradax.h b/arch/sparc/include/uapi/asm/oradax.h
index 722951908b0a..4f6676fe4bcc 100644
--- a/arch/sparc/include/uapi/asm/oradax.h
+++ b/arch/sparc/include/uapi/asm/oradax.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * This program is free software: you can redistribute it and/or modify 4 * This program is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation, either version 3 of the License, or 6 * the Free Software Foundation, either version 2 of the License, or
7 * (at your option) any later version. 7 * (at your option) any later version.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, 9 * This program is distributed in the hope that it will be useful,
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index 1a0fa10cb6b7..32bae68e34c1 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -403,7 +403,7 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
403 if (err) { 403 if (err) {
404 printk(KERN_ERR "VIO: Could not register device %s, err=%d\n", 404 printk(KERN_ERR "VIO: Could not register device %s, err=%d\n",
405 dev_name(&vdev->dev), err); 405 dev_name(&vdev->dev), err);
406 kfree(vdev); 406 put_device(&vdev->dev);
407 return NULL; 407 return NULL;
408 } 408 }
409 if (vdev->dp) 409 if (vdev->dp)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 1f5fa2f2c168..d51a71dcbac2 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -52,6 +52,7 @@ config X86
52 select ARCH_HAS_DEVMEM_IS_ALLOWED 52 select ARCH_HAS_DEVMEM_IS_ALLOWED
53 select ARCH_HAS_ELF_RANDOMIZE 53 select ARCH_HAS_ELF_RANDOMIZE
54 select ARCH_HAS_FAST_MULTIPLIER 54 select ARCH_HAS_FAST_MULTIPLIER
55 select ARCH_HAS_FILTER_PGPROT
55 select ARCH_HAS_FORTIFY_SOURCE 56 select ARCH_HAS_FORTIFY_SOURCE
56 select ARCH_HAS_GCOV_PROFILE_ALL 57 select ARCH_HAS_GCOV_PROFILE_ALL
57 select ARCH_HAS_KCOV if X86_64 58 select ARCH_HAS_KCOV if X86_64
@@ -273,6 +274,9 @@ config ARCH_HAS_CPU_RELAX
273config ARCH_HAS_CACHE_LINE_SIZE 274config ARCH_HAS_CACHE_LINE_SIZE
274 def_bool y 275 def_bool y
275 276
277config ARCH_HAS_FILTER_PGPROT
278 def_bool y
279
276config HAVE_SETUP_PER_CPU_AREA 280config HAVE_SETUP_PER_CPU_AREA
277 def_bool y 281 def_bool y
278 282
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 9af927e59d49..9de7f1e1dede 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -84,13 +84,13 @@ ENTRY(entry_SYSENTER_compat)
84 pushq %rdx /* pt_regs->dx */ 84 pushq %rdx /* pt_regs->dx */
85 pushq %rcx /* pt_regs->cx */ 85 pushq %rcx /* pt_regs->cx */
86 pushq $-ENOSYS /* pt_regs->ax */ 86 pushq $-ENOSYS /* pt_regs->ax */
87 pushq $0 /* pt_regs->r8 = 0 */ 87 pushq %r8 /* pt_regs->r8 */
88 xorl %r8d, %r8d /* nospec r8 */ 88 xorl %r8d, %r8d /* nospec r8 */
89 pushq $0 /* pt_regs->r9 = 0 */ 89 pushq %r9 /* pt_regs->r9 */
90 xorl %r9d, %r9d /* nospec r9 */ 90 xorl %r9d, %r9d /* nospec r9 */
91 pushq $0 /* pt_regs->r10 = 0 */ 91 pushq %r10 /* pt_regs->r10 */
92 xorl %r10d, %r10d /* nospec r10 */ 92 xorl %r10d, %r10d /* nospec r10 */
93 pushq $0 /* pt_regs->r11 = 0 */ 93 pushq %r11 /* pt_regs->r11 */
94 xorl %r11d, %r11d /* nospec r11 */ 94 xorl %r11d, %r11d /* nospec r11 */
95 pushq %rbx /* pt_regs->rbx */ 95 pushq %rbx /* pt_regs->rbx */
96 xorl %ebx, %ebx /* nospec rbx */ 96 xorl %ebx, %ebx /* nospec rbx */
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 607bf565a90c..707b2a96e516 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3339,7 +3339,8 @@ static void intel_pmu_cpu_starting(int cpu)
3339 3339
3340 cpuc->lbr_sel = NULL; 3340 cpuc->lbr_sel = NULL;
3341 3341
3342 flip_smm_bit(&x86_pmu.attr_freeze_on_smi); 3342 if (x86_pmu.version > 1)
3343 flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
3343 3344
3344 if (!cpuc->shared_regs) 3345 if (!cpuc->shared_regs)
3345 return; 3346 return;
@@ -3502,6 +3503,8 @@ static __initconst const struct x86_pmu core_pmu = {
3502 .cpu_dying = intel_pmu_cpu_dying, 3503 .cpu_dying = intel_pmu_cpu_dying,
3503}; 3504};
3504 3505
3506static struct attribute *intel_pmu_attrs[];
3507
3505static __initconst const struct x86_pmu intel_pmu = { 3508static __initconst const struct x86_pmu intel_pmu = {
3506 .name = "Intel", 3509 .name = "Intel",
3507 .handle_irq = intel_pmu_handle_irq, 3510 .handle_irq = intel_pmu_handle_irq,
@@ -3533,6 +3536,8 @@ static __initconst const struct x86_pmu intel_pmu = {
3533 .format_attrs = intel_arch3_formats_attr, 3536 .format_attrs = intel_arch3_formats_attr,
3534 .events_sysfs_show = intel_event_sysfs_show, 3537 .events_sysfs_show = intel_event_sysfs_show,
3535 3538
3539 .attrs = intel_pmu_attrs,
3540
3536 .cpu_prepare = intel_pmu_cpu_prepare, 3541 .cpu_prepare = intel_pmu_cpu_prepare,
3537 .cpu_starting = intel_pmu_cpu_starting, 3542 .cpu_starting = intel_pmu_cpu_starting,
3538 .cpu_dying = intel_pmu_cpu_dying, 3543 .cpu_dying = intel_pmu_cpu_dying,
@@ -3911,8 +3916,6 @@ __init int intel_pmu_init(void)
3911 3916
3912 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters); 3917 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
3913 3918
3914
3915 x86_pmu.attrs = intel_pmu_attrs;
3916 /* 3919 /*
3917 * Quirk: v2 perfmon does not report fixed-purpose events, so 3920 * Quirk: v2 perfmon does not report fixed-purpose events, so
3918 * assume at least 3 events, when not running in a hypervisor: 3921 * assume at least 3 events, when not running in a hypervisor:
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index d554c11e01ff..578793e97431 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -320,6 +320,7 @@
320#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ 320#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
321#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ 321#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
322#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ 322#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
323#define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */
323 324
324/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ 325/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
325#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */ 326#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 09ad88572746..cc8f8fcf9b4a 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -46,7 +46,21 @@ int ftrace_int3_handler(struct pt_regs *regs);
46#endif /* CONFIG_FUNCTION_TRACER */ 46#endif /* CONFIG_FUNCTION_TRACER */
47 47
48 48
49#if !defined(__ASSEMBLY__) && !defined(COMPILE_OFFSETS) 49#ifndef __ASSEMBLY__
50
51#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
52static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
53{
54 /*
55 * Compare the symbol name with the system call name. Skip the
56 * "__x64_sys", "__ia32_sys" or simple "sys" prefix.
57 */
58 return !strcmp(sym + 3, name + 3) ||
59 (!strncmp(sym, "__x64_", 6) && !strcmp(sym + 9, name + 3)) ||
60 (!strncmp(sym, "__ia32_", 7) && !strcmp(sym + 10, name + 3));
61}
62
63#ifndef COMPILE_OFFSETS
50 64
51#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION) 65#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION)
52#include <asm/compat.h> 66#include <asm/compat.h>
@@ -67,6 +81,7 @@ static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
67 return false; 81 return false;
68} 82}
69#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */ 83#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */
70#endif /* !__ASSEMBLY__ && !COMPILE_OFFSETS */ 84#endif /* !COMPILE_OFFSETS */
85#endif /* !__ASSEMBLY__ */
71 86
72#endif /* _ASM_X86_FTRACE_H */ 87#endif /* _ASM_X86_FTRACE_H */
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 404c5fdff859..548d90bbf919 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -34,11 +34,6 @@
34 * (0x80 is the syscall vector, 0x30-0x3f are for ISA) 34 * (0x80 is the syscall vector, 0x30-0x3f are for ISA)
35 */ 35 */
36#define FIRST_EXTERNAL_VECTOR 0x20 36#define FIRST_EXTERNAL_VECTOR 0x20
37/*
38 * We start allocating at 0x21 to spread out vectors evenly between
39 * priority levels. (0x80 is the syscall vector)
40 */
41#define VECTOR_OFFSET_START 1
42 37
43/* 38/*
44 * Reserve the lowest usable vector (and hence lowest priority) 0x20 for 39 * Reserve the lowest usable vector (and hence lowest priority) 0x20 for
@@ -119,8 +114,6 @@
119#define FIRST_SYSTEM_VECTOR NR_VECTORS 114#define FIRST_SYSTEM_VECTOR NR_VECTORS
120#endif 115#endif
121 116
122#define FPU_IRQ 13
123
124/* 117/*
125 * Size the maximum number of interrupts. 118 * Size the maximum number of interrupts.
126 * 119 *
diff --git a/arch/x86/include/asm/jailhouse_para.h b/arch/x86/include/asm/jailhouse_para.h
index b885a961a150..a34897aef2c2 100644
--- a/arch/x86/include/asm/jailhouse_para.h
+++ b/arch/x86/include/asm/jailhouse_para.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2 2
3/* 3/*
4 * Jailhouse paravirt detection 4 * Jailhouse paravirt detection
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 5f49b4ff0c24..f1633de5a675 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -601,6 +601,11 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
601 601
602#define canon_pgprot(p) __pgprot(massage_pgprot(p)) 602#define canon_pgprot(p) __pgprot(massage_pgprot(p))
603 603
604static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
605{
606 return canon_pgprot(prot);
607}
608
604static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, 609static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
605 enum page_cache_mode pcm, 610 enum page_cache_mode pcm,
606 enum page_cache_mode new_pcm) 611 enum page_cache_mode new_pcm)
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index d5c21a382475..adb47552e6bb 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -105,14 +105,14 @@ extern unsigned int ptrs_per_p4d;
105#define LDT_PGD_ENTRY (pgtable_l5_enabled ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4) 105#define LDT_PGD_ENTRY (pgtable_l5_enabled ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4)
106#define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) 106#define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
107 107
108#define __VMALLOC_BASE_L4 0xffffc90000000000 108#define __VMALLOC_BASE_L4 0xffffc90000000000UL
109#define __VMALLOC_BASE_L5 0xffa0000000000000 109#define __VMALLOC_BASE_L5 0xffa0000000000000UL
110 110
111#define VMALLOC_SIZE_TB_L4 32UL 111#define VMALLOC_SIZE_TB_L4 32UL
112#define VMALLOC_SIZE_TB_L5 12800UL 112#define VMALLOC_SIZE_TB_L5 12800UL
113 113
114#define __VMEMMAP_BASE_L4 0xffffea0000000000 114#define __VMEMMAP_BASE_L4 0xffffea0000000000UL
115#define __VMEMMAP_BASE_L5 0xffd4000000000000 115#define __VMEMMAP_BASE_L5 0xffd4000000000000UL
116 116
117#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT 117#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
118# define VMALLOC_START vmalloc_base 118# define VMALLOC_START vmalloc_base
diff --git a/arch/x86/include/uapi/asm/msgbuf.h b/arch/x86/include/uapi/asm/msgbuf.h
index 809134c644a6..90ab9a795b49 100644
--- a/arch/x86/include/uapi/asm/msgbuf.h
+++ b/arch/x86/include/uapi/asm/msgbuf.h
@@ -1 +1,32 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef __ASM_X64_MSGBUF_H
3#define __ASM_X64_MSGBUF_H
4
5#if !defined(__x86_64__) || !defined(__ILP32__)
1#include <asm-generic/msgbuf.h> 6#include <asm-generic/msgbuf.h>
7#else
8/*
9 * The msqid64_ds structure for x86 architecture with x32 ABI.
10 *
11 * On x86-32 and x86-64 we can just use the generic definition, but
12 * x32 uses the same binary layout as x86_64, which is differnet
13 * from other 32-bit architectures.
14 */
15
16struct msqid64_ds {
17 struct ipc64_perm msg_perm;
18 __kernel_time_t msg_stime; /* last msgsnd time */
19 __kernel_time_t msg_rtime; /* last msgrcv time */
20 __kernel_time_t msg_ctime; /* last change time */
21 __kernel_ulong_t msg_cbytes; /* current number of bytes on queue */
22 __kernel_ulong_t msg_qnum; /* number of messages in queue */
23 __kernel_ulong_t msg_qbytes; /* max number of bytes on queue */
24 __kernel_pid_t msg_lspid; /* pid of last msgsnd */
25 __kernel_pid_t msg_lrpid; /* last receive pid */
26 __kernel_ulong_t __unused4;
27 __kernel_ulong_t __unused5;
28};
29
30#endif
31
32#endif /* __ASM_GENERIC_MSGBUF_H */
diff --git a/arch/x86/include/uapi/asm/shmbuf.h b/arch/x86/include/uapi/asm/shmbuf.h
index 83c05fc2de38..644421f3823b 100644
--- a/arch/x86/include/uapi/asm/shmbuf.h
+++ b/arch/x86/include/uapi/asm/shmbuf.h
@@ -1 +1,43 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef __ASM_X86_SHMBUF_H
3#define __ASM_X86_SHMBUF_H
4
5#if !defined(__x86_64__) || !defined(__ILP32__)
1#include <asm-generic/shmbuf.h> 6#include <asm-generic/shmbuf.h>
7#else
8/*
9 * The shmid64_ds structure for x86 architecture with x32 ABI.
10 *
11 * On x86-32 and x86-64 we can just use the generic definition, but
12 * x32 uses the same binary layout as x86_64, which is differnet
13 * from other 32-bit architectures.
14 */
15
16struct shmid64_ds {
17 struct ipc64_perm shm_perm; /* operation perms */
18 size_t shm_segsz; /* size of segment (bytes) */
19 __kernel_time_t shm_atime; /* last attach time */
20 __kernel_time_t shm_dtime; /* last detach time */
21 __kernel_time_t shm_ctime; /* last change time */
22 __kernel_pid_t shm_cpid; /* pid of creator */
23 __kernel_pid_t shm_lpid; /* pid of last operator */
24 __kernel_ulong_t shm_nattch; /* no. of current attaches */
25 __kernel_ulong_t __unused4;
26 __kernel_ulong_t __unused5;
27};
28
29struct shminfo64 {
30 __kernel_ulong_t shmmax;
31 __kernel_ulong_t shmmin;
32 __kernel_ulong_t shmmni;
33 __kernel_ulong_t shmseg;
34 __kernel_ulong_t shmall;
35 __kernel_ulong_t __unused1;
36 __kernel_ulong_t __unused2;
37 __kernel_ulong_t __unused3;
38 __kernel_ulong_t __unused4;
39};
40
41#endif
42
43#endif /* __ASM_X86_SHMBUF_H */
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index b9693b80fc21..60d1897041da 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -835,6 +835,9 @@ static const struct _tlb_table intel_tlb_table[] = {
835 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" }, 835 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
836 { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" }, 836 { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
837 { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" }, 837 { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
838 { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" },
839 { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
840 { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" },
838 { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" }, 841 { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
839 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" }, 842 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
840 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" }, 843 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 10c4fc2c91f8..77e201301528 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -564,14 +564,12 @@ static int __reload_late(void *info)
564 apply_microcode_local(&err); 564 apply_microcode_local(&err);
565 spin_unlock(&update_lock); 565 spin_unlock(&update_lock);
566 566
567 /* siblings return UCODE_OK because their engine got updated already */
567 if (err > UCODE_NFOUND) { 568 if (err > UCODE_NFOUND) {
568 pr_warn("Error reloading microcode on CPU %d\n", cpu); 569 pr_warn("Error reloading microcode on CPU %d\n", cpu);
569 return -1; 570 ret = -1;
570 /* siblings return UCODE_OK because their engine got updated already */
571 } else if (err == UCODE_UPDATED || err == UCODE_OK) { 571 } else if (err == UCODE_UPDATED || err == UCODE_OK) {
572 ret = 1; 572 ret = 1;
573 } else {
574 return ret;
575 } 573 }
576 574
577 /* 575 /*
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 32b8e5724f96..1c2cfa0644aa 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -485,7 +485,6 @@ static void show_saved_mc(void)
485 */ 485 */
486static void save_mc_for_early(u8 *mc, unsigned int size) 486static void save_mc_for_early(u8 *mc, unsigned int size)
487{ 487{
488#ifdef CONFIG_HOTPLUG_CPU
489 /* Synchronization during CPU hotplug. */ 488 /* Synchronization during CPU hotplug. */
490 static DEFINE_MUTEX(x86_cpu_microcode_mutex); 489 static DEFINE_MUTEX(x86_cpu_microcode_mutex);
491 490
@@ -495,7 +494,6 @@ static void save_mc_for_early(u8 *mc, unsigned int size)
495 show_saved_mc(); 494 show_saved_mc();
496 495
497 mutex_unlock(&x86_cpu_microcode_mutex); 496 mutex_unlock(&x86_cpu_microcode_mutex);
498#endif
499} 497}
500 498
501static bool load_builtin_intel_microcode(struct cpio_data *cp) 499static bool load_builtin_intel_microcode(struct cpio_data *cp)
diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c
index fa183a131edc..a15fe0e92cf9 100644
--- a/arch/x86/kernel/jailhouse.c
+++ b/arch/x86/kernel/jailhouse.c
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * Jailhouse paravirt_ops implementation 3 * Jailhouse paravirt_ops implementation
4 * 4 *
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 6285697b6e56..5c623dfe39d1 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -50,6 +50,7 @@
50#include <linux/init_ohci1394_dma.h> 50#include <linux/init_ohci1394_dma.h>
51#include <linux/kvm_para.h> 51#include <linux/kvm_para.h>
52#include <linux/dma-contiguous.h> 52#include <linux/dma-contiguous.h>
53#include <xen/xen.h>
53 54
54#include <linux/errno.h> 55#include <linux/errno.h>
55#include <linux/kernel.h> 56#include <linux/kernel.h>
@@ -534,6 +535,11 @@ static void __init reserve_crashkernel(void)
534 high = true; 535 high = true;
535 } 536 }
536 537
538 if (xen_pv_domain()) {
539 pr_info("Ignoring crashkernel for a Xen PV domain\n");
540 return;
541 }
542
537 /* 0 means: find the address automatically */ 543 /* 0 means: find the address automatically */
538 if (crash_base <= 0) { 544 if (crash_base <= 0) {
539 /* 545 /*
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 45175b81dd5b..0f1cbb042f49 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1571,6 +1571,8 @@ static inline void mwait_play_dead(void)
1571 void *mwait_ptr; 1571 void *mwait_ptr;
1572 int i; 1572 int i;
1573 1573
1574 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
1575 return;
1574 if (!this_cpu_has(X86_FEATURE_MWAIT)) 1576 if (!this_cpu_has(X86_FEATURE_MWAIT))
1575 return; 1577 return;
1576 if (!this_cpu_has(X86_FEATURE_CLFLUSH)) 1578 if (!this_cpu_has(X86_FEATURE_CLFLUSH))
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index aa66ccd6ed6c..c7668806163f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4544,12 +4544,6 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
4544 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa); 4544 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
4545} 4545}
4546 4546
4547static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
4548{
4549 if (enable_ept)
4550 vmx_flush_tlb(vcpu, true);
4551}
4552
4553static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) 4547static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
4554{ 4548{
4555 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; 4549 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
@@ -9278,7 +9272,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
9278 } else { 9272 } else {
9279 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 9273 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
9280 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 9274 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
9281 vmx_flush_tlb_ept_only(vcpu); 9275 vmx_flush_tlb(vcpu, true);
9282 } 9276 }
9283 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); 9277 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
9284 9278
@@ -9306,7 +9300,7 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
9306 !nested_cpu_has2(get_vmcs12(&vmx->vcpu), 9300 !nested_cpu_has2(get_vmcs12(&vmx->vcpu),
9307 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 9301 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
9308 vmcs_write64(APIC_ACCESS_ADDR, hpa); 9302 vmcs_write64(APIC_ACCESS_ADDR, hpa);
9309 vmx_flush_tlb_ept_only(vcpu); 9303 vmx_flush_tlb(vcpu, true);
9310 } 9304 }
9311} 9305}
9312 9306
@@ -11220,7 +11214,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
11220 } 11214 }
11221 } else if (nested_cpu_has2(vmcs12, 11215 } else if (nested_cpu_has2(vmcs12,
11222 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 11216 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
11223 vmx_flush_tlb_ept_only(vcpu); 11217 vmx_flush_tlb(vcpu, true);
11224 } 11218 }
11225 11219
11226 /* 11220 /*
@@ -12073,7 +12067,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
12073 } else if (!nested_cpu_has_ept(vmcs12) && 12067 } else if (!nested_cpu_has_ept(vmcs12) &&
12074 nested_cpu_has2(vmcs12, 12068 nested_cpu_has2(vmcs12,
12075 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 12069 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
12076 vmx_flush_tlb_ept_only(vcpu); 12070 vmx_flush_tlb(vcpu, true);
12077 } 12071 }
12078 12072
12079 /* This is needed for same reason as it was needed in prepare_vmcs02 */ 12073 /* This is needed for same reason as it was needed in prepare_vmcs02 */
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 7d35ce672989..c9492f764902 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -302,13 +302,6 @@ static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
302 __rem; \ 302 __rem; \
303 }) 303 })
304 304
305#define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0)
306#define KVM_X86_DISABLE_EXITS_HTL (1 << 1)
307#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
308#define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \
309 KVM_X86_DISABLE_EXITS_HTL | \
310 KVM_X86_DISABLE_EXITS_PAUSE)
311
312static inline bool kvm_mwait_in_guest(struct kvm *kvm) 305static inline bool kvm_mwait_in_guest(struct kvm *kvm)
313{ 306{
314 return kvm->arch.mwait_in_guest; 307 return kvm->arch.mwait_in_guest;
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 0f3d50f4c48c..3bded76e8d5c 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -93,6 +93,18 @@ void arch_report_meminfo(struct seq_file *m)
93static inline void split_page_count(int level) { } 93static inline void split_page_count(int level) { }
94#endif 94#endif
95 95
96static inline int
97within(unsigned long addr, unsigned long start, unsigned long end)
98{
99 return addr >= start && addr < end;
100}
101
102static inline int
103within_inclusive(unsigned long addr, unsigned long start, unsigned long end)
104{
105 return addr >= start && addr <= end;
106}
107
96#ifdef CONFIG_X86_64 108#ifdef CONFIG_X86_64
97 109
98static inline unsigned long highmap_start_pfn(void) 110static inline unsigned long highmap_start_pfn(void)
@@ -106,20 +118,25 @@ static inline unsigned long highmap_end_pfn(void)
106 return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT; 118 return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT;
107} 119}
108 120
109#endif 121static bool __cpa_pfn_in_highmap(unsigned long pfn)
110
111static inline int
112within(unsigned long addr, unsigned long start, unsigned long end)
113{ 122{
114 return addr >= start && addr < end; 123 /*
124 * Kernel text has an alias mapping at a high address, known
125 * here as "highmap".
126 */
127 return within_inclusive(pfn, highmap_start_pfn(), highmap_end_pfn());
115} 128}
116 129
117static inline int 130#else
118within_inclusive(unsigned long addr, unsigned long start, unsigned long end) 131
132static bool __cpa_pfn_in_highmap(unsigned long pfn)
119{ 133{
120 return addr >= start && addr <= end; 134 /* There is no highmap on 32-bit */
135 return false;
121} 136}
122 137
138#endif
139
123/* 140/*
124 * Flushing functions 141 * Flushing functions
125 */ 142 */
@@ -172,7 +189,7 @@ static void __cpa_flush_all(void *arg)
172 189
173static void cpa_flush_all(unsigned long cache) 190static void cpa_flush_all(unsigned long cache)
174{ 191{
175 BUG_ON(irqs_disabled()); 192 BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
176 193
177 on_each_cpu(__cpa_flush_all, (void *) cache, 1); 194 on_each_cpu(__cpa_flush_all, (void *) cache, 1);
178} 195}
@@ -236,7 +253,7 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache,
236 unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */ 253 unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
237#endif 254#endif
238 255
239 BUG_ON(irqs_disabled()); 256 BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
240 257
241 on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1); 258 on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1);
242 259
@@ -1183,6 +1200,10 @@ static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
1183 cpa->numpages = 1; 1200 cpa->numpages = 1;
1184 cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; 1201 cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
1185 return 0; 1202 return 0;
1203
1204 } else if (__cpa_pfn_in_highmap(cpa->pfn)) {
1205 /* Faults in the highmap are OK, so do not warn: */
1206 return -EFAULT;
1186 } else { 1207 } else {
1187 WARN(1, KERN_WARNING "CPA: called for zero pte. " 1208 WARN(1, KERN_WARNING "CPA: called for zero pte. "
1188 "vaddr = %lx cpa->vaddr = %lx\n", vaddr, 1209 "vaddr = %lx cpa->vaddr = %lx\n", vaddr,
@@ -1335,8 +1356,7 @@ static int cpa_process_alias(struct cpa_data *cpa)
1335 * to touch the high mapped kernel as well: 1356 * to touch the high mapped kernel as well:
1336 */ 1357 */
1337 if (!within(vaddr, (unsigned long)_text, _brk_end) && 1358 if (!within(vaddr, (unsigned long)_text, _brk_end) &&
1338 within_inclusive(cpa->pfn, highmap_start_pfn(), 1359 __cpa_pfn_in_highmap(cpa->pfn)) {
1339 highmap_end_pfn())) {
1340 unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + 1360 unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
1341 __START_KERNEL_map - phys_base; 1361 __START_KERNEL_map - phys_base;
1342 alias_cpa = *cpa; 1362 alias_cpa = *cpa;
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index f1fd52f449e0..4d418e705878 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -421,6 +421,16 @@ static inline bool pti_kernel_image_global_ok(void)
421 if (boot_cpu_has(X86_FEATURE_K8)) 421 if (boot_cpu_has(X86_FEATURE_K8))
422 return false; 422 return false;
423 423
424 /*
425 * RANDSTRUCT derives its hardening benefits from the
426 * attacker's lack of knowledge about the layout of kernel
427 * data structures. Keep the kernel image non-global in
428 * cases where RANDSTRUCT is in use to help keep the layout a
429 * secret.
430 */
431 if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT))
432 return false;
433
424 return true; 434 return true;
425} 435}
426 436
@@ -430,12 +440,24 @@ static inline bool pti_kernel_image_global_ok(void)
430 */ 440 */
431void pti_clone_kernel_text(void) 441void pti_clone_kernel_text(void)
432{ 442{
443 /*
444 * rodata is part of the kernel image and is normally
445 * readable on the filesystem or on the web. But, do not
446 * clone the areas past rodata, they might contain secrets.
447 */
433 unsigned long start = PFN_ALIGN(_text); 448 unsigned long start = PFN_ALIGN(_text);
434 unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE); 449 unsigned long end = (unsigned long)__end_rodata_hpage_align;
435 450
436 if (!pti_kernel_image_global_ok()) 451 if (!pti_kernel_image_global_ok())
437 return; 452 return;
438 453
454 pr_debug("mapping partial kernel image into user address space\n");
455
456 /*
457 * Note that this will undo _some_ of the work that
458 * pti_set_kernel_image_nonglobal() did to clear the
459 * global bit.
460 */
439 pti_clone_pmds(start, end, _PAGE_RW); 461 pti_clone_pmds(start, end, _PAGE_RW);
440} 462}
441 463
@@ -458,8 +480,6 @@ void pti_set_kernel_image_nonglobal(void)
458 if (pti_kernel_image_global_ok()) 480 if (pti_kernel_image_global_ok())
459 return; 481 return;
460 482
461 pr_debug("set kernel image non-global\n");
462
463 set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT); 483 set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
464} 484}
465 485
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index ce08b7bb0304..8fca446aaef6 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1133,6 +1133,7 @@ skip_init_addrs:
1133 for (pass = 0; pass < 20 || image; pass++) { 1133 for (pass = 0; pass < 20 || image; pass++) {
1134 proglen = do_jit(prog, addrs, image, oldproglen, &ctx); 1134 proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
1135 if (proglen <= 0) { 1135 if (proglen <= 0) {
1136out_image:
1136 image = NULL; 1137 image = NULL;
1137 if (header) 1138 if (header)
1138 bpf_jit_binary_free(header); 1139 bpf_jit_binary_free(header);
@@ -1143,8 +1144,7 @@ skip_init_addrs:
1143 if (proglen != oldproglen) { 1144 if (proglen != oldproglen) {
1144 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", 1145 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
1145 proglen, oldproglen); 1146 proglen, oldproglen);
1146 prog = orig_prog; 1147 goto out_image;
1147 goto out_addrs;
1148 } 1148 }
1149 break; 1149 break;
1150 } 1150 }
@@ -1180,7 +1180,7 @@ skip_init_addrs:
1180 prog = orig_prog; 1180 prog = orig_prog;
1181 } 1181 }
1182 1182
1183 if (!prog->is_func || extra_pass) { 1183 if (!image || !prog->is_func || extra_pass) {
1184out_addrs: 1184out_addrs:
1185 kfree(addrs); 1185 kfree(addrs);
1186 kfree(jit_data); 1186 kfree(jit_data);
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index f0ecd98509d8..771ae9730ac6 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -4934,8 +4934,16 @@ static void bfq_prepare_request(struct request *rq, struct bio *bio)
4934 bool new_queue = false; 4934 bool new_queue = false;
4935 bool bfqq_already_existing = false, split = false; 4935 bool bfqq_already_existing = false, split = false;
4936 4936
4937 if (!rq->elv.icq) 4937 /*
4938 * Even if we don't have an icq attached, we should still clear
4939 * the scheduler pointers, as they might point to previously
4940 * allocated bic/bfqq structs.
4941 */
4942 if (!rq->elv.icq) {
4943 rq->elv.priv[0] = rq->elv.priv[1] = NULL;
4938 return; 4944 return;
4945 }
4946
4939 bic = icq_to_bic(rq->elv.icq); 4947 bic = icq_to_bic(rq->elv.icq);
4940 4948
4941 spin_lock_irq(&bfqd->lock); 4949 spin_lock_irq(&bfqd->lock);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 1c16694ae145..eb85cb87c40f 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1177,26 +1177,20 @@ int blkcg_init_queue(struct request_queue *q)
1177 1177
1178 preloaded = !radix_tree_preload(GFP_KERNEL); 1178 preloaded = !radix_tree_preload(GFP_KERNEL);
1179 1179
1180 /* 1180 /* Make sure the root blkg exists. */
1181 * Make sure the root blkg exists and count the existing blkgs. As
1182 * @q is bypassing at this point, blkg_lookup_create() can't be
1183 * used. Open code insertion.
1184 */
1185 rcu_read_lock(); 1181 rcu_read_lock();
1186 spin_lock_irq(q->queue_lock); 1182 spin_lock_irq(q->queue_lock);
1187 blkg = blkg_create(&blkcg_root, q, new_blkg); 1183 blkg = blkg_create(&blkcg_root, q, new_blkg);
1184 if (IS_ERR(blkg))
1185 goto err_unlock;
1186 q->root_blkg = blkg;
1187 q->root_rl.blkg = blkg;
1188 spin_unlock_irq(q->queue_lock); 1188 spin_unlock_irq(q->queue_lock);
1189 rcu_read_unlock(); 1189 rcu_read_unlock();
1190 1190
1191 if (preloaded) 1191 if (preloaded)
1192 radix_tree_preload_end(); 1192 radix_tree_preload_end();
1193 1193
1194 if (IS_ERR(blkg))
1195 return PTR_ERR(blkg);
1196
1197 q->root_blkg = blkg;
1198 q->root_rl.blkg = blkg;
1199
1200 ret = blk_throtl_init(q); 1194 ret = blk_throtl_init(q);
1201 if (ret) { 1195 if (ret) {
1202 spin_lock_irq(q->queue_lock); 1196 spin_lock_irq(q->queue_lock);
@@ -1204,6 +1198,13 @@ int blkcg_init_queue(struct request_queue *q)
1204 spin_unlock_irq(q->queue_lock); 1198 spin_unlock_irq(q->queue_lock);
1205 } 1199 }
1206 return ret; 1200 return ret;
1201
1202err_unlock:
1203 spin_unlock_irq(q->queue_lock);
1204 rcu_read_unlock();
1205 if (preloaded)
1206 radix_tree_preload_end();
1207 return PTR_ERR(blkg);
1207} 1208}
1208 1209
1209/** 1210/**
@@ -1410,9 +1411,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
1410 __clear_bit(pol->plid, q->blkcg_pols); 1411 __clear_bit(pol->plid, q->blkcg_pols);
1411 1412
1412 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1413 list_for_each_entry(blkg, &q->blkg_list, q_node) {
1413 /* grab blkcg lock too while removing @pd from @blkg */
1414 spin_lock(&blkg->blkcg->lock);
1415
1416 if (blkg->pd[pol->plid]) { 1414 if (blkg->pd[pol->plid]) {
1417 if (!blkg->pd[pol->plid]->offline && 1415 if (!blkg->pd[pol->plid]->offline &&
1418 pol->pd_offline_fn) { 1416 pol->pd_offline_fn) {
@@ -1422,8 +1420,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
1422 pol->pd_free_fn(blkg->pd[pol->plid]); 1420 pol->pd_free_fn(blkg->pd[pol->plid]);
1423 blkg->pd[pol->plid] = NULL; 1421 blkg->pd[pol->plid] = NULL;
1424 } 1422 }
1425
1426 spin_unlock(&blkg->blkcg->lock);
1427 } 1423 }
1428 1424
1429 spin_unlock_irq(q->queue_lock); 1425 spin_unlock_irq(q->queue_lock);
diff --git a/block/blk-core.c b/block/blk-core.c
index 806ce2442819..85909b431eb0 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -201,6 +201,10 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
201 rq->part = NULL; 201 rq->part = NULL;
202 seqcount_init(&rq->gstate_seq); 202 seqcount_init(&rq->gstate_seq);
203 u64_stats_init(&rq->aborted_gstate_sync); 203 u64_stats_init(&rq->aborted_gstate_sync);
204 /*
205 * See comment of blk_mq_init_request
206 */
207 WRITE_ONCE(rq->gstate, MQ_RQ_GEN_INC);
204} 208}
205EXPORT_SYMBOL(blk_rq_init); 209EXPORT_SYMBOL(blk_rq_init);
206 210
@@ -915,7 +919,6 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
915 919
916 while (true) { 920 while (true) {
917 bool success = false; 921 bool success = false;
918 int ret;
919 922
920 rcu_read_lock(); 923 rcu_read_lock();
921 if (percpu_ref_tryget_live(&q->q_usage_counter)) { 924 if (percpu_ref_tryget_live(&q->q_usage_counter)) {
@@ -947,14 +950,12 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
947 */ 950 */
948 smp_rmb(); 951 smp_rmb();
949 952
950 ret = wait_event_interruptible(q->mq_freeze_wq, 953 wait_event(q->mq_freeze_wq,
951 (atomic_read(&q->mq_freeze_depth) == 0 && 954 (atomic_read(&q->mq_freeze_depth) == 0 &&
952 (preempt || !blk_queue_preempt_only(q))) || 955 (preempt || !blk_queue_preempt_only(q))) ||
953 blk_queue_dying(q)); 956 blk_queue_dying(q));
954 if (blk_queue_dying(q)) 957 if (blk_queue_dying(q))
955 return -ENODEV; 958 return -ENODEV;
956 if (ret)
957 return ret;
958 } 959 }
959} 960}
960 961
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 0dc9e341c2a7..c3621453ad87 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2042,6 +2042,13 @@ static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
2042 2042
2043 seqcount_init(&rq->gstate_seq); 2043 seqcount_init(&rq->gstate_seq);
2044 u64_stats_init(&rq->aborted_gstate_sync); 2044 u64_stats_init(&rq->aborted_gstate_sync);
2045 /*
2046 * start gstate with gen 1 instead of 0, otherwise it will be equal
2047 * to aborted_gstate, and be identified timed out by
2048 * blk_mq_terminate_expired.
2049 */
2050 WRITE_ONCE(rq->gstate, MQ_RQ_GEN_INC);
2051
2045 return 0; 2052 return 0;
2046} 2053}
2047 2054
@@ -2329,7 +2336,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2329 2336
2330static void blk_mq_map_swqueue(struct request_queue *q) 2337static void blk_mq_map_swqueue(struct request_queue *q)
2331{ 2338{
2332 unsigned int i; 2339 unsigned int i, hctx_idx;
2333 struct blk_mq_hw_ctx *hctx; 2340 struct blk_mq_hw_ctx *hctx;
2334 struct blk_mq_ctx *ctx; 2341 struct blk_mq_ctx *ctx;
2335 struct blk_mq_tag_set *set = q->tag_set; 2342 struct blk_mq_tag_set *set = q->tag_set;
@@ -2346,8 +2353,23 @@ static void blk_mq_map_swqueue(struct request_queue *q)
2346 2353
2347 /* 2354 /*
2348 * Map software to hardware queues. 2355 * Map software to hardware queues.
2356 *
2357 * If the cpu isn't present, the cpu is mapped to first hctx.
2349 */ 2358 */
2350 for_each_possible_cpu(i) { 2359 for_each_possible_cpu(i) {
2360 hctx_idx = q->mq_map[i];
2361 /* unmapped hw queue can be remapped after CPU topo changed */
2362 if (!set->tags[hctx_idx] &&
2363 !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2364 /*
2365 * If tags initialization fail for some hctx,
2366 * that hctx won't be brought online. In this
2367 * case, remap the current ctx to hctx[0] which
2368 * is guaranteed to always have tags allocated
2369 */
2370 q->mq_map[i] = 0;
2371 }
2372
2351 ctx = per_cpu_ptr(q->queue_ctx, i); 2373 ctx = per_cpu_ptr(q->queue_ctx, i);
2352 hctx = blk_mq_map_queue(q, i); 2374 hctx = blk_mq_map_queue(q, i);
2353 2375
@@ -2359,8 +2381,21 @@ static void blk_mq_map_swqueue(struct request_queue *q)
2359 mutex_unlock(&q->sysfs_lock); 2381 mutex_unlock(&q->sysfs_lock);
2360 2382
2361 queue_for_each_hw_ctx(q, hctx, i) { 2383 queue_for_each_hw_ctx(q, hctx, i) {
2362 /* every hctx should get mapped by at least one CPU */ 2384 /*
2363 WARN_ON(!hctx->nr_ctx); 2385 * If no software queues are mapped to this hardware queue,
2386 * disable it and free the request entries.
2387 */
2388 if (!hctx->nr_ctx) {
2389 /* Never unmap queue 0. We need it as a
2390 * fallback in case of a new remap fails
2391 * allocation
2392 */
2393 if (i && set->tags[i])
2394 blk_mq_free_map_and_requests(set, i);
2395
2396 hctx->tags = NULL;
2397 continue;
2398 }
2364 2399
2365 hctx->tags = set->tags[i]; 2400 hctx->tags = set->tags[i];
2366 WARN_ON(!hctx->tags); 2401 WARN_ON(!hctx->tags);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 88c558f71819..89b5cd3a6c70 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -7,6 +7,9 @@
7 7
8struct blk_mq_tag_set; 8struct blk_mq_tag_set;
9 9
10/**
11 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
12 */
10struct blk_mq_ctx { 13struct blk_mq_ctx {
11 struct { 14 struct {
12 spinlock_t lock; 15 spinlock_t lock;
diff --git a/crypto/api.c b/crypto/api.c
index 1d5290c67108..0ee632bba064 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -204,9 +204,14 @@ static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
204 204
205 down_read(&crypto_alg_sem); 205 down_read(&crypto_alg_sem);
206 alg = __crypto_alg_lookup(name, type | test, mask | test); 206 alg = __crypto_alg_lookup(name, type | test, mask | test);
207 if (!alg && test) 207 if (!alg && test) {
208 alg = __crypto_alg_lookup(name, type, mask) ? 208 alg = __crypto_alg_lookup(name, type, mask);
209 ERR_PTR(-ELIBBAD) : NULL; 209 if (alg && !crypto_is_larval(alg)) {
210 /* Test failed */
211 crypto_mod_put(alg);
212 alg = ERR_PTR(-ELIBBAD);
213 }
214 }
210 up_read(&crypto_alg_sem); 215 up_read(&crypto_alg_sem);
211 216
212 return alg; 217 return alg;
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 4faa2781c964..466a112a4446 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1134,8 +1134,10 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
1134 if (!drbg) 1134 if (!drbg)
1135 return; 1135 return;
1136 kzfree(drbg->Vbuf); 1136 kzfree(drbg->Vbuf);
1137 drbg->Vbuf = NULL;
1137 drbg->V = NULL; 1138 drbg->V = NULL;
1138 kzfree(drbg->Cbuf); 1139 kzfree(drbg->Cbuf);
1140 drbg->Cbuf = NULL;
1139 drbg->C = NULL; 1141 drbg->C = NULL;
1140 kzfree(drbg->scratchpadbuf); 1142 kzfree(drbg->scratchpadbuf);
1141 drbg->scratchpadbuf = NULL; 1143 drbg->scratchpadbuf = NULL;
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 76fb96966f7b..2f2e737be0f8 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -2123,6 +2123,25 @@ static int __init intel_opregion_present(void)
2123 return opregion; 2123 return opregion;
2124} 2124}
2125 2125
2126static bool dmi_is_desktop(void)
2127{
2128 const char *chassis_type;
2129
2130 chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
2131 if (!chassis_type)
2132 return false;
2133
2134 if (!strcmp(chassis_type, "3") || /* 3: Desktop */
2135 !strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */
2136 !strcmp(chassis_type, "5") || /* 5: Pizza Box */
2137 !strcmp(chassis_type, "6") || /* 6: Mini Tower */
2138 !strcmp(chassis_type, "7") || /* 7: Tower */
2139 !strcmp(chassis_type, "11")) /* 11: Main Server Chassis */
2140 return true;
2141
2142 return false;
2143}
2144
2126int acpi_video_register(void) 2145int acpi_video_register(void)
2127{ 2146{
2128 int ret = 0; 2147 int ret = 0;
@@ -2143,8 +2162,12 @@ int acpi_video_register(void)
2143 * win8 ready (where we also prefer the native backlight driver, so 2162 * win8 ready (where we also prefer the native backlight driver, so
2144 * normally the acpi_video code should not register there anyways). 2163 * normally the acpi_video code should not register there anyways).
2145 */ 2164 */
2146 if (only_lcd == -1) 2165 if (only_lcd == -1) {
2147 only_lcd = acpi_osi_is_win8(); 2166 if (dmi_is_desktop() && acpi_osi_is_win8())
2167 only_lcd = true;
2168 else
2169 only_lcd = false;
2170 }
2148 2171
2149 dmi_check_system(video_dmi_table); 2172 dmi_check_system(video_dmi_table);
2150 2173
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index ebb626ffb5fa..4bde16fb97d8 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -12,23 +12,64 @@
12#define pr_fmt(fmt) "ACPI: watchdog: " fmt 12#define pr_fmt(fmt) "ACPI: watchdog: " fmt
13 13
14#include <linux/acpi.h> 14#include <linux/acpi.h>
15#include <linux/dmi.h>
15#include <linux/ioport.h> 16#include <linux/ioport.h>
16#include <linux/platform_device.h> 17#include <linux/platform_device.h>
17 18
18#include "internal.h" 19#include "internal.h"
19 20
21static const struct dmi_system_id acpi_watchdog_skip[] = {
22 {
23 /*
24 * On Lenovo Z50-70 there are two issues with the WDAT
25 * table. First some of the instructions use RTC SRAM
26 * to store persistent information. This does not work well
27 * with Linux RTC driver. Second, more important thing is
28 * that the instructions do not actually reset the system.
29 *
30 * On this particular system iTCO_wdt seems to work just
31 * fine so we prefer that over WDAT for now.
32 *
33 * See also https://bugzilla.kernel.org/show_bug.cgi?id=199033.
34 */
35 .ident = "Lenovo Z50-70",
36 .matches = {
37 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
38 DMI_MATCH(DMI_PRODUCT_NAME, "20354"),
39 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Z50-70"),
40 },
41 },
42 {}
43};
44
45static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
46{
47 const struct acpi_table_wdat *wdat = NULL;
48 acpi_status status;
49
50 if (acpi_disabled)
51 return NULL;
52
53 if (dmi_check_system(acpi_watchdog_skip))
54 return NULL;
55
56 status = acpi_get_table(ACPI_SIG_WDAT, 0,
57 (struct acpi_table_header **)&wdat);
58 if (ACPI_FAILURE(status)) {
59 /* It is fine if there is no WDAT */
60 return NULL;
61 }
62
63 return wdat;
64}
65
20/** 66/**
21 * Returns true if this system should prefer ACPI based watchdog instead of 67 * Returns true if this system should prefer ACPI based watchdog instead of
22 * the native one (which are typically the same hardware). 68 * the native one (which are typically the same hardware).
23 */ 69 */
24bool acpi_has_watchdog(void) 70bool acpi_has_watchdog(void)
25{ 71{
26 struct acpi_table_header hdr; 72 return !!acpi_watchdog_get_wdat();
27
28 if (acpi_disabled)
29 return false;
30
31 return ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_WDAT, 0, &hdr));
32} 73}
33EXPORT_SYMBOL_GPL(acpi_has_watchdog); 74EXPORT_SYMBOL_GPL(acpi_has_watchdog);
34 75
@@ -41,12 +82,10 @@ void __init acpi_watchdog_init(void)
41 struct platform_device *pdev; 82 struct platform_device *pdev;
42 struct resource *resources; 83 struct resource *resources;
43 size_t nresources = 0; 84 size_t nresources = 0;
44 acpi_status status;
45 int i; 85 int i;
46 86
47 status = acpi_get_table(ACPI_SIG_WDAT, 0, 87 wdat = acpi_watchdog_get_wdat();
48 (struct acpi_table_header **)&wdat); 88 if (!wdat) {
49 if (ACPI_FAILURE(status)) {
50 /* It is fine if there is no WDAT */ 89 /* It is fine if there is no WDAT */
51 return; 90 return;
52 } 91 }
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index e1eee7a60fad..f1cc4f9d31cd 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -635,4 +635,26 @@ module_param_call(lid_init_state,
635 NULL, 0644); 635 NULL, 0644);
636MODULE_PARM_DESC(lid_init_state, "Behavior for reporting LID initial state"); 636MODULE_PARM_DESC(lid_init_state, "Behavior for reporting LID initial state");
637 637
638module_acpi_driver(acpi_button_driver); 638static int acpi_button_register_driver(struct acpi_driver *driver)
639{
640 /*
641 * Modules such as nouveau.ko and i915.ko have a link time dependency
642 * on acpi_lid_open(), and would therefore not be loadable on ACPI
643 * capable kernels booted in non-ACPI mode if the return value of
644 * acpi_bus_register_driver() is returned from here with ACPI disabled
645 * when this driver is built as a module.
646 */
647 if (acpi_disabled)
648 return 0;
649
650 return acpi_bus_register_driver(driver);
651}
652
653static void acpi_button_unregister_driver(struct acpi_driver *driver)
654{
655 if (!acpi_disabled)
656 acpi_bus_unregister_driver(driver);
657}
658
659module_driver(acpi_button_driver, acpi_button_register_driver,
660 acpi_button_unregister_driver);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index cc234e6a6297..970dd87d347c 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2166,10 +2166,10 @@ int __init acpi_scan_init(void)
2166 acpi_cmos_rtc_init(); 2166 acpi_cmos_rtc_init();
2167 acpi_container_init(); 2167 acpi_container_init();
2168 acpi_memory_hotplug_init(); 2168 acpi_memory_hotplug_init();
2169 acpi_watchdog_init();
2169 acpi_pnp_init(); 2170 acpi_pnp_init();
2170 acpi_int340x_thermal_init(); 2171 acpi_int340x_thermal_init();
2171 acpi_amba_init(); 2172 acpi_amba_init();
2172 acpi_watchdog_init();
2173 acpi_init_lpit(); 2173 acpi_init_lpit();
2174 2174
2175 acpi_scan_add_handler(&generic_device_handler); 2175 acpi_scan_add_handler(&generic_device_handler);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 99a1a650326d..974e58457697 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -364,6 +364,19 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
364 DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"), 364 DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
365 }, 365 },
366 }, 366 },
367 /*
368 * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
369 * the Low Power S0 Idle firmware interface (see
370 * https://bugzilla.kernel.org/show_bug.cgi?id=199057).
371 */
372 {
373 .callback = init_no_lps0,
374 .ident = "ThinkPad X1 Tablet(2016)",
375 .matches = {
376 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
377 DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
378 },
379 },
367 {}, 380 {},
368}; 381};
369 382
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 594c228d2f02..4a3ac31c07d0 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -69,11 +69,12 @@ static ssize_t driver_override_show(struct device *_dev,
69 struct device_attribute *attr, char *buf) 69 struct device_attribute *attr, char *buf)
70{ 70{
71 struct amba_device *dev = to_amba_device(_dev); 71 struct amba_device *dev = to_amba_device(_dev);
72 ssize_t len;
72 73
73 if (!dev->driver_override) 74 device_lock(_dev);
74 return 0; 75 len = sprintf(buf, "%s\n", dev->driver_override);
75 76 device_unlock(_dev);
76 return sprintf(buf, "%s\n", dev->driver_override); 77 return len;
77} 78}
78 79
79static ssize_t driver_override_store(struct device *_dev, 80static ssize_t driver_override_store(struct device *_dev,
@@ -81,9 +82,10 @@ static ssize_t driver_override_store(struct device *_dev,
81 const char *buf, size_t count) 82 const char *buf, size_t count)
82{ 83{
83 struct amba_device *dev = to_amba_device(_dev); 84 struct amba_device *dev = to_amba_device(_dev);
84 char *driver_override, *old = dev->driver_override, *cp; 85 char *driver_override, *old, *cp;
85 86
86 if (count > PATH_MAX) 87 /* We need to keep extra room for a newline */
88 if (count >= (PAGE_SIZE - 1))
87 return -EINVAL; 89 return -EINVAL;
88 90
89 driver_override = kstrndup(buf, count, GFP_KERNEL); 91 driver_override = kstrndup(buf, count, GFP_KERNEL);
@@ -94,12 +96,15 @@ static ssize_t driver_override_store(struct device *_dev,
94 if (cp) 96 if (cp)
95 *cp = '\0'; 97 *cp = '\0';
96 98
99 device_lock(_dev);
100 old = dev->driver_override;
97 if (strlen(driver_override)) { 101 if (strlen(driver_override)) {
98 dev->driver_override = driver_override; 102 dev->driver_override = driver_override;
99 } else { 103 } else {
100 kfree(driver_override); 104 kfree(driver_override);
101 dev->driver_override = NULL; 105 dev->driver_override = NULL;
102 } 106 }
107 device_unlock(_dev);
103 108
104 kfree(old); 109 kfree(old);
105 110
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 764b63a5aade..e578eee31589 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2839,6 +2839,14 @@ static void binder_transaction(struct binder_proc *proc,
2839 else 2839 else
2840 return_error = BR_DEAD_REPLY; 2840 return_error = BR_DEAD_REPLY;
2841 mutex_unlock(&context->context_mgr_node_lock); 2841 mutex_unlock(&context->context_mgr_node_lock);
2842 if (target_node && target_proc == proc) {
2843 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2844 proc->pid, thread->pid);
2845 return_error = BR_FAILED_REPLY;
2846 return_error_param = -EINVAL;
2847 return_error_line = __LINE__;
2848 goto err_invalid_target_handle;
2849 }
2842 } 2850 }
2843 if (!target_node) { 2851 if (!target_node) {
2844 /* 2852 /*
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 1e6396bb807b..597d40893862 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -312,8 +312,9 @@ static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
312 * This checks whether the memory was allocated from the per-device 312 * This checks whether the memory was allocated from the per-device
313 * coherent memory pool and if so, maps that memory to the provided vma. 313 * coherent memory pool and if so, maps that memory to the provided vma.
314 * 314 *
315 * Returns 1 if we correctly mapped the memory, or 0 if the caller should 315 * Returns 1 if @vaddr belongs to the device coherent pool and the caller
316 * proceed with mapping memory from generic pools. 316 * should return @ret, or 0 if they should proceed with mapping memory from
317 * generic areas.
317 */ 318 */
318int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, 319int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
319 void *vaddr, size_t size, int *ret) 320 void *vaddr, size_t size, int *ret)
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 3b118353ea17..d82566d6e237 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -226,7 +226,6 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
226#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP 226#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
227 unsigned long user_count = vma_pages(vma); 227 unsigned long user_count = vma_pages(vma);
228 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 228 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
229 unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
230 unsigned long off = vma->vm_pgoff; 229 unsigned long off = vma->vm_pgoff;
231 230
232 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 231 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -234,12 +233,11 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
234 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 233 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
235 return ret; 234 return ret;
236 235
237 if (off < count && user_count <= (count - off)) { 236 if (off < count && user_count <= (count - off))
238 ret = remap_pfn_range(vma, vma->vm_start, 237 ret = remap_pfn_range(vma, vma->vm_start,
239 pfn + off, 238 page_to_pfn(virt_to_page(cpu_addr)) + off,
240 user_count << PAGE_SHIFT, 239 user_count << PAGE_SHIFT,
241 vma->vm_page_prot); 240 vma->vm_page_prot);
242 }
243#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ 241#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
244 242
245 return ret; 243 return ret;
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index 31b5015b59fe..358354148dec 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -537,8 +537,8 @@ exit:
537} 537}
538 538
539/** 539/**
540 * fw_load_sysfs_fallback - load a firmware via the syfs fallback mechanism 540 * fw_load_sysfs_fallback - load a firmware via the sysfs fallback mechanism
541 * @fw_sysfs: firmware syfs information for the firmware to load 541 * @fw_sysfs: firmware sysfs information for the firmware to load
542 * @opt_flags: flags of options, FW_OPT_* 542 * @opt_flags: flags of options, FW_OPT_*
543 * @timeout: timeout to wait for the load 543 * @timeout: timeout to wait for the load
544 * 544 *
diff --git a/drivers/base/firmware_loader/fallback.h b/drivers/base/firmware_loader/fallback.h
index dfebc644ed35..f8255670a663 100644
--- a/drivers/base/firmware_loader/fallback.h
+++ b/drivers/base/firmware_loader/fallback.h
@@ -6,7 +6,7 @@
6#include <linux/device.h> 6#include <linux/device.h>
7 7
8/** 8/**
9 * struct firmware_fallback_config - firmware fallback configuratioon settings 9 * struct firmware_fallback_config - firmware fallback configuration settings
10 * 10 *
11 * Helps describe and fine tune the fallback mechanism. 11 * Helps describe and fine tune the fallback mechanism.
12 * 12 *
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c9d04497a415..5d4e31655d96 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -451,25 +451,47 @@ static int lo_req_flush(struct loop_device *lo, struct request *rq)
451static void lo_complete_rq(struct request *rq) 451static void lo_complete_rq(struct request *rq)
452{ 452{
453 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 453 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
454 blk_status_t ret = BLK_STS_OK;
454 455
455 if (unlikely(req_op(cmd->rq) == REQ_OP_READ && cmd->use_aio && 456 if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
456 cmd->ret >= 0 && cmd->ret < blk_rq_bytes(cmd->rq))) { 457 req_op(rq) != REQ_OP_READ) {
457 struct bio *bio = cmd->rq->bio; 458 if (cmd->ret < 0)
458 459 ret = BLK_STS_IOERR;
459 bio_advance(bio, cmd->ret); 460 goto end_io;
460 zero_fill_bio(bio);
461 } 461 }
462 462
463 blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK); 463 /*
464 * Short READ - if we got some data, advance our request and
465 * retry it. If we got no data, end the rest with EIO.
466 */
467 if (cmd->ret) {
468 blk_update_request(rq, BLK_STS_OK, cmd->ret);
469 cmd->ret = 0;
470 blk_mq_requeue_request(rq, true);
471 } else {
472 if (cmd->use_aio) {
473 struct bio *bio = rq->bio;
474
475 while (bio) {
476 zero_fill_bio(bio);
477 bio = bio->bi_next;
478 }
479 }
480 ret = BLK_STS_IOERR;
481end_io:
482 blk_mq_end_request(rq, ret);
483 }
464} 484}
465 485
466static void lo_rw_aio_do_completion(struct loop_cmd *cmd) 486static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
467{ 487{
488 struct request *rq = blk_mq_rq_from_pdu(cmd);
489
468 if (!atomic_dec_and_test(&cmd->ref)) 490 if (!atomic_dec_and_test(&cmd->ref))
469 return; 491 return;
470 kfree(cmd->bvec); 492 kfree(cmd->bvec);
471 cmd->bvec = NULL; 493 cmd->bvec = NULL;
472 blk_mq_complete_request(cmd->rq); 494 blk_mq_complete_request(rq);
473} 495}
474 496
475static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2) 497static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
@@ -487,7 +509,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
487{ 509{
488 struct iov_iter iter; 510 struct iov_iter iter;
489 struct bio_vec *bvec; 511 struct bio_vec *bvec;
490 struct request *rq = cmd->rq; 512 struct request *rq = blk_mq_rq_from_pdu(cmd);
491 struct bio *bio = rq->bio; 513 struct bio *bio = rq->bio;
492 struct file *file = lo->lo_backing_file; 514 struct file *file = lo->lo_backing_file;
493 unsigned int offset; 515 unsigned int offset;
@@ -1702,15 +1724,16 @@ EXPORT_SYMBOL(loop_unregister_transfer);
1702static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, 1724static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1703 const struct blk_mq_queue_data *bd) 1725 const struct blk_mq_queue_data *bd)
1704{ 1726{
1705 struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 1727 struct request *rq = bd->rq;
1706 struct loop_device *lo = cmd->rq->q->queuedata; 1728 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
1729 struct loop_device *lo = rq->q->queuedata;
1707 1730
1708 blk_mq_start_request(bd->rq); 1731 blk_mq_start_request(rq);
1709 1732
1710 if (lo->lo_state != Lo_bound) 1733 if (lo->lo_state != Lo_bound)
1711 return BLK_STS_IOERR; 1734 return BLK_STS_IOERR;
1712 1735
1713 switch (req_op(cmd->rq)) { 1736 switch (req_op(rq)) {
1714 case REQ_OP_FLUSH: 1737 case REQ_OP_FLUSH:
1715 case REQ_OP_DISCARD: 1738 case REQ_OP_DISCARD:
1716 case REQ_OP_WRITE_ZEROES: 1739 case REQ_OP_WRITE_ZEROES:
@@ -1723,8 +1746,8 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1723 1746
1724 /* always use the first bio's css */ 1747 /* always use the first bio's css */
1725#ifdef CONFIG_BLK_CGROUP 1748#ifdef CONFIG_BLK_CGROUP
1726 if (cmd->use_aio && cmd->rq->bio && cmd->rq->bio->bi_css) { 1749 if (cmd->use_aio && rq->bio && rq->bio->bi_css) {
1727 cmd->css = cmd->rq->bio->bi_css; 1750 cmd->css = rq->bio->bi_css;
1728 css_get(cmd->css); 1751 css_get(cmd->css);
1729 } else 1752 } else
1730#endif 1753#endif
@@ -1736,8 +1759,9 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1736 1759
1737static void loop_handle_cmd(struct loop_cmd *cmd) 1760static void loop_handle_cmd(struct loop_cmd *cmd)
1738{ 1761{
1739 const bool write = op_is_write(req_op(cmd->rq)); 1762 struct request *rq = blk_mq_rq_from_pdu(cmd);
1740 struct loop_device *lo = cmd->rq->q->queuedata; 1763 const bool write = op_is_write(req_op(rq));
1764 struct loop_device *lo = rq->q->queuedata;
1741 int ret = 0; 1765 int ret = 0;
1742 1766
1743 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { 1767 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
@@ -1745,12 +1769,12 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
1745 goto failed; 1769 goto failed;
1746 } 1770 }
1747 1771
1748 ret = do_req_filebacked(lo, cmd->rq); 1772 ret = do_req_filebacked(lo, rq);
1749 failed: 1773 failed:
1750 /* complete non-aio request */ 1774 /* complete non-aio request */
1751 if (!cmd->use_aio || ret) { 1775 if (!cmd->use_aio || ret) {
1752 cmd->ret = ret ? -EIO : 0; 1776 cmd->ret = ret ? -EIO : 0;
1753 blk_mq_complete_request(cmd->rq); 1777 blk_mq_complete_request(rq);
1754 } 1778 }
1755} 1779}
1756 1780
@@ -1767,9 +1791,7 @@ static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq,
1767{ 1791{
1768 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 1792 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
1769 1793
1770 cmd->rq = rq;
1771 kthread_init_work(&cmd->work, loop_queue_work); 1794 kthread_init_work(&cmd->work, loop_queue_work);
1772
1773 return 0; 1795 return 0;
1774} 1796}
1775 1797
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 0f45416e4fcf..b78de9879f4f 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -66,7 +66,6 @@ struct loop_device {
66 66
67struct loop_cmd { 67struct loop_cmd {
68 struct kthread_work work; 68 struct kthread_work work;
69 struct request *rq;
70 bool use_aio; /* use AIO interface to handle I/O */ 69 bool use_aio; /* use AIO interface to handle I/O */
71 atomic_t ref; /* only for aio */ 70 atomic_t ref; /* only for aio */
72 long ret; 71 long ret;
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 64e066eba72e..0e31884a9519 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -110,7 +110,7 @@ struct iwm {
110/* Select values for swim_select and swim_readbit */ 110/* Select values for swim_select and swim_readbit */
111 111
112#define READ_DATA_0 0x074 112#define READ_DATA_0 0x074
113#define TWOMEG_DRIVE 0x075 113#define ONEMEG_DRIVE 0x075
114#define SINGLE_SIDED 0x076 114#define SINGLE_SIDED 0x076
115#define DRIVE_PRESENT 0x077 115#define DRIVE_PRESENT 0x077
116#define DISK_IN 0x170 116#define DISK_IN 0x170
@@ -118,9 +118,9 @@ struct iwm {
118#define TRACK_ZERO 0x172 118#define TRACK_ZERO 0x172
119#define TACHO 0x173 119#define TACHO 0x173
120#define READ_DATA_1 0x174 120#define READ_DATA_1 0x174
121#define MFM_MODE 0x175 121#define GCR_MODE 0x175
122#define SEEK_COMPLETE 0x176 122#define SEEK_COMPLETE 0x176
123#define ONEMEG_MEDIA 0x177 123#define TWOMEG_MEDIA 0x177
124 124
125/* Bits in handshake register */ 125/* Bits in handshake register */
126 126
@@ -612,7 +612,6 @@ static void setup_medium(struct floppy_state *fs)
612 struct floppy_struct *g; 612 struct floppy_struct *g;
613 fs->disk_in = 1; 613 fs->disk_in = 1;
614 fs->write_protected = swim_readbit(base, WRITE_PROT); 614 fs->write_protected = swim_readbit(base, WRITE_PROT);
615 fs->type = swim_readbit(base, ONEMEG_MEDIA);
616 615
617 if (swim_track00(base)) 616 if (swim_track00(base))
618 printk(KERN_ERR 617 printk(KERN_ERR
@@ -620,6 +619,9 @@ static void setup_medium(struct floppy_state *fs)
620 619
621 swim_track00(base); 620 swim_track00(base);
622 621
622 fs->type = swim_readbit(base, TWOMEG_MEDIA) ?
623 HD_MEDIA : DD_MEDIA;
624 fs->head_number = swim_readbit(base, SINGLE_SIDED) ? 1 : 2;
623 get_floppy_geometry(fs, 0, &g); 625 get_floppy_geometry(fs, 0, &g);
624 fs->total_secs = g->size; 626 fs->total_secs = g->size;
625 fs->secpercyl = g->head * g->sect; 627 fs->secpercyl = g->head * g->sect;
@@ -646,7 +648,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
646 648
647 swim_write(base, setup, S_IBM_DRIVE | S_FCLK_DIV2); 649 swim_write(base, setup, S_IBM_DRIVE | S_FCLK_DIV2);
648 udelay(10); 650 udelay(10);
649 swim_drive(base, INTERNAL_DRIVE); 651 swim_drive(base, fs->location);
650 swim_motor(base, ON); 652 swim_motor(base, ON);
651 swim_action(base, SETMFM); 653 swim_action(base, SETMFM);
652 if (fs->ejected) 654 if (fs->ejected)
@@ -656,6 +658,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
656 goto out; 658 goto out;
657 } 659 }
658 660
661 set_capacity(fs->disk, fs->total_secs);
662
659 if (mode & FMODE_NDELAY) 663 if (mode & FMODE_NDELAY)
660 return 0; 664 return 0;
661 665
@@ -727,14 +731,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
727 if (copy_to_user((void __user *) param, (void *) &floppy_type, 731 if (copy_to_user((void __user *) param, (void *) &floppy_type,
728 sizeof(struct floppy_struct))) 732 sizeof(struct floppy_struct)))
729 return -EFAULT; 733 return -EFAULT;
730 break; 734 return 0;
731
732 default:
733 printk(KERN_DEBUG "SWIM floppy_ioctl: unknown cmd %d\n",
734 cmd);
735 return -ENOSYS;
736 } 735 }
737 return 0; 736 return -ENOTTY;
738} 737}
739 738
740static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo) 739static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -795,7 +794,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
795 struct swim_priv *swd = data; 794 struct swim_priv *swd = data;
796 int drive = (*part & 3); 795 int drive = (*part & 3);
797 796
798 if (drive > swd->floppy_count) 797 if (drive >= swd->floppy_count)
799 return NULL; 798 return NULL;
800 799
801 *part = 0; 800 *part = 0;
@@ -813,10 +812,9 @@ static int swim_add_floppy(struct swim_priv *swd, enum drive_location location)
813 812
814 swim_motor(base, OFF); 813 swim_motor(base, OFF);
815 814
816 if (swim_readbit(base, SINGLE_SIDED)) 815 fs->type = HD_MEDIA;
817 fs->head_number = 1; 816 fs->head_number = 2;
818 else 817
819 fs->head_number = 2;
820 fs->ref_count = 0; 818 fs->ref_count = 0;
821 fs->ejected = 1; 819 fs->ejected = 1;
822 820
@@ -834,10 +832,12 @@ static int swim_floppy_init(struct swim_priv *swd)
834 /* scan floppy drives */ 832 /* scan floppy drives */
835 833
836 swim_drive(base, INTERNAL_DRIVE); 834 swim_drive(base, INTERNAL_DRIVE);
837 if (swim_readbit(base, DRIVE_PRESENT)) 835 if (swim_readbit(base, DRIVE_PRESENT) &&
836 !swim_readbit(base, ONEMEG_DRIVE))
838 swim_add_floppy(swd, INTERNAL_DRIVE); 837 swim_add_floppy(swd, INTERNAL_DRIVE);
839 swim_drive(base, EXTERNAL_DRIVE); 838 swim_drive(base, EXTERNAL_DRIVE);
840 if (swim_readbit(base, DRIVE_PRESENT)) 839 if (swim_readbit(base, DRIVE_PRESENT) &&
840 !swim_readbit(base, ONEMEG_DRIVE))
841 swim_add_floppy(swd, EXTERNAL_DRIVE); 841 swim_add_floppy(swd, EXTERNAL_DRIVE);
842 842
843 /* register floppy drives */ 843 /* register floppy drives */
@@ -861,7 +861,6 @@ static int swim_floppy_init(struct swim_priv *swd)
861 &swd->lock); 861 &swd->lock);
862 if (!swd->unit[drive].disk->queue) { 862 if (!swd->unit[drive].disk->queue) {
863 err = -ENOMEM; 863 err = -ENOMEM;
864 put_disk(swd->unit[drive].disk);
865 goto exit_put_disks; 864 goto exit_put_disks;
866 } 865 }
867 blk_queue_bounce_limit(swd->unit[drive].disk->queue, 866 blk_queue_bounce_limit(swd->unit[drive].disk->queue,
@@ -911,7 +910,7 @@ static int swim_probe(struct platform_device *dev)
911 goto out; 910 goto out;
912 } 911 }
913 912
914 swim_base = ioremap(res->start, resource_size(res)); 913 swim_base = (struct swim __iomem *)res->start;
915 if (!swim_base) { 914 if (!swim_base) {
916 ret = -ENOMEM; 915 ret = -ENOMEM;
917 goto out_release_io; 916 goto out_release_io;
@@ -923,7 +922,7 @@ static int swim_probe(struct platform_device *dev)
923 if (!get_swim_mode(swim_base)) { 922 if (!get_swim_mode(swim_base)) {
924 printk(KERN_INFO "SWIM device not found !\n"); 923 printk(KERN_INFO "SWIM device not found !\n");
925 ret = -ENODEV; 924 ret = -ENODEV;
926 goto out_iounmap; 925 goto out_release_io;
927 } 926 }
928 927
929 /* set platform driver data */ 928 /* set platform driver data */
@@ -931,7 +930,7 @@ static int swim_probe(struct platform_device *dev)
931 swd = kzalloc(sizeof(struct swim_priv), GFP_KERNEL); 930 swd = kzalloc(sizeof(struct swim_priv), GFP_KERNEL);
932 if (!swd) { 931 if (!swd) {
933 ret = -ENOMEM; 932 ret = -ENOMEM;
934 goto out_iounmap; 933 goto out_release_io;
935 } 934 }
936 platform_set_drvdata(dev, swd); 935 platform_set_drvdata(dev, swd);
937 936
@@ -945,8 +944,6 @@ static int swim_probe(struct platform_device *dev)
945 944
946out_kfree: 945out_kfree:
947 kfree(swd); 946 kfree(swd);
948out_iounmap:
949 iounmap(swim_base);
950out_release_io: 947out_release_io:
951 release_mem_region(res->start, resource_size(res)); 948 release_mem_region(res->start, resource_size(res));
952out: 949out:
@@ -974,8 +971,6 @@ static int swim_remove(struct platform_device *dev)
974 for (drive = 0; drive < swd->floppy_count; drive++) 971 for (drive = 0; drive < swd->floppy_count; drive++)
975 floppy_eject(&swd->unit[drive]); 972 floppy_eject(&swd->unit[drive]);
976 973
977 iounmap(swd->base);
978
979 res = platform_get_resource(dev, IORESOURCE_MEM, 0); 974 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
980 if (res) 975 if (res)
981 release_mem_region(res->start, resource_size(res)); 976 release_mem_region(res->start, resource_size(res));
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index af51015d056e..469541c1e51e 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -148,7 +148,7 @@ struct swim3 {
148#define MOTOR_ON 2 148#define MOTOR_ON 2
149#define RELAX 3 /* also eject in progress */ 149#define RELAX 3 /* also eject in progress */
150#define READ_DATA_0 4 150#define READ_DATA_0 4
151#define TWOMEG_DRIVE 5 151#define ONEMEG_DRIVE 5
152#define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */ 152#define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */
153#define DRIVE_PRESENT 7 153#define DRIVE_PRESENT 7
154#define DISK_IN 8 154#define DISK_IN 8
@@ -156,9 +156,9 @@ struct swim3 {
156#define TRACK_ZERO 10 156#define TRACK_ZERO 10
157#define TACHO 11 157#define TACHO 11
158#define READ_DATA_1 12 158#define READ_DATA_1 12
159#define MFM_MODE 13 159#define GCR_MODE 13
160#define SEEK_COMPLETE 14 160#define SEEK_COMPLETE 14
161#define ONEMEG_MEDIA 15 161#define TWOMEG_MEDIA 15
162 162
163/* Definitions of values used in writing and formatting */ 163/* Definitions of values used in writing and formatting */
164#define DATA_ESCAPE 0x99 164#define DATA_ESCAPE 0x99
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index d1c0b60e9326..6dc177bf4c42 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -33,6 +33,7 @@ config HISILICON_LPC
33 bool "Support for ISA I/O space on HiSilicon Hip06/7" 33 bool "Support for ISA I/O space on HiSilicon Hip06/7"
34 depends on ARM64 && (ARCH_HISI || COMPILE_TEST) 34 depends on ARM64 && (ARCH_HISI || COMPILE_TEST)
35 select INDIRECT_PIO 35 select INDIRECT_PIO
36 select MFD_CORE if ACPI
36 help 37 help
37 Driver to enable I/O access to devices attached to the Low Pin 38 Driver to enable I/O access to devices attached to the Low Pin
38 Count bus on the HiSilicon Hip06/7 SoC. 39 Count bus on the HiSilicon Hip06/7 SoC.
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 8327478effd0..bfc566d3f31a 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2371,7 +2371,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
2371 if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT) 2371 if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
2372 return media_changed(cdi, 1); 2372 return media_changed(cdi, 1);
2373 2373
2374 if ((unsigned int)arg >= cdi->capacity) 2374 if (arg >= cdi->capacity)
2375 return -EINVAL; 2375 return -EINVAL;
2376 2376
2377 info = kmalloc(sizeof(*info), GFP_KERNEL); 2377 info = kmalloc(sizeof(*info), GFP_KERNEL);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 3cd3aae24d6d..cd888d4ee605 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -261,6 +261,7 @@
261#include <linux/ptrace.h> 261#include <linux/ptrace.h>
262#include <linux/workqueue.h> 262#include <linux/workqueue.h>
263#include <linux/irq.h> 263#include <linux/irq.h>
264#include <linux/ratelimit.h>
264#include <linux/syscalls.h> 265#include <linux/syscalls.h>
265#include <linux/completion.h> 266#include <linux/completion.h>
266#include <linux/uuid.h> 267#include <linux/uuid.h>
@@ -438,6 +439,16 @@ static void _crng_backtrack_protect(struct crng_state *crng,
438static void process_random_ready_list(void); 439static void process_random_ready_list(void);
439static void _get_random_bytes(void *buf, int nbytes); 440static void _get_random_bytes(void *buf, int nbytes);
440 441
442static struct ratelimit_state unseeded_warning =
443 RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
444static struct ratelimit_state urandom_warning =
445 RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
446
447static int ratelimit_disable __read_mostly;
448
449module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
450MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
451
441/********************************************************************** 452/**********************************************************************
442 * 453 *
443 * OS independent entropy store. Here are the functions which handle 454 * OS independent entropy store. Here are the functions which handle
@@ -789,7 +800,7 @@ static void crng_initialize(struct crng_state *crng)
789} 800}
790 801
791#ifdef CONFIG_NUMA 802#ifdef CONFIG_NUMA
792static void numa_crng_init(void) 803static void do_numa_crng_init(struct work_struct *work)
793{ 804{
794 int i; 805 int i;
795 struct crng_state *crng; 806 struct crng_state *crng;
@@ -810,6 +821,13 @@ static void numa_crng_init(void)
810 kfree(pool); 821 kfree(pool);
811 } 822 }
812} 823}
824
825static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
826
827static void numa_crng_init(void)
828{
829 schedule_work(&numa_crng_init_work);
830}
813#else 831#else
814static void numa_crng_init(void) {} 832static void numa_crng_init(void) {}
815#endif 833#endif
@@ -925,6 +943,18 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
925 process_random_ready_list(); 943 process_random_ready_list();
926 wake_up_interruptible(&crng_init_wait); 944 wake_up_interruptible(&crng_init_wait);
927 pr_notice("random: crng init done\n"); 945 pr_notice("random: crng init done\n");
946 if (unseeded_warning.missed) {
947 pr_notice("random: %d get_random_xx warning(s) missed "
948 "due to ratelimiting\n",
949 unseeded_warning.missed);
950 unseeded_warning.missed = 0;
951 }
952 if (urandom_warning.missed) {
953 pr_notice("random: %d urandom warning(s) missed "
954 "due to ratelimiting\n",
955 urandom_warning.missed);
956 urandom_warning.missed = 0;
957 }
928 } 958 }
929} 959}
930 960
@@ -1565,8 +1595,9 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
1565#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM 1595#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1566 print_once = true; 1596 print_once = true;
1567#endif 1597#endif
1568 pr_notice("random: %s called from %pS with crng_init=%d\n", 1598 if (__ratelimit(&unseeded_warning))
1569 func_name, caller, crng_init); 1599 pr_notice("random: %s called from %pS with crng_init=%d\n",
1600 func_name, caller, crng_init);
1570} 1601}
1571 1602
1572/* 1603/*
@@ -1760,6 +1791,10 @@ static int rand_initialize(void)
1760 init_std_data(&blocking_pool); 1791 init_std_data(&blocking_pool);
1761 crng_initialize(&primary_crng); 1792 crng_initialize(&primary_crng);
1762 crng_global_init_time = jiffies; 1793 crng_global_init_time = jiffies;
1794 if (ratelimit_disable) {
1795 urandom_warning.interval = 0;
1796 unseeded_warning.interval = 0;
1797 }
1763 return 0; 1798 return 0;
1764} 1799}
1765early_initcall(rand_initialize); 1800early_initcall(rand_initialize);
@@ -1827,9 +1862,10 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1827 1862
1828 if (!crng_ready() && maxwarn > 0) { 1863 if (!crng_ready() && maxwarn > 0) {
1829 maxwarn--; 1864 maxwarn--;
1830 printk(KERN_NOTICE "random: %s: uninitialized urandom read " 1865 if (__ratelimit(&urandom_warning))
1831 "(%zd bytes read)\n", 1866 printk(KERN_NOTICE "random: %s: uninitialized "
1832 current->comm, nbytes); 1867 "urandom read (%zd bytes read)\n",
1868 current->comm, nbytes);
1833 spin_lock_irqsave(&primary_crng.lock, flags); 1869 spin_lock_irqsave(&primary_crng.lock, flags);
1834 crng_init_cnt = 0; 1870 crng_init_cnt = 0;
1835 spin_unlock_irqrestore(&primary_crng.lock, flags); 1871 spin_unlock_irqrestore(&primary_crng.lock, flags);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 468f06134012..21085515814f 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -422,7 +422,7 @@ static void reclaim_dma_bufs(void)
422 } 422 }
423} 423}
424 424
425static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size, 425static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size,
426 int pages) 426 int pages)
427{ 427{
428 struct port_buffer *buf; 428 struct port_buffer *buf;
@@ -445,16 +445,16 @@ static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
445 return buf; 445 return buf;
446 } 446 }
447 447
448 if (is_rproc_serial(vq->vdev)) { 448 if (is_rproc_serial(vdev)) {
449 /* 449 /*
450 * Allocate DMA memory from ancestor. When a virtio 450 * Allocate DMA memory from ancestor. When a virtio
451 * device is created by remoteproc, the DMA memory is 451 * device is created by remoteproc, the DMA memory is
452 * associated with the grandparent device: 452 * associated with the grandparent device:
453 * vdev => rproc => platform-dev. 453 * vdev => rproc => platform-dev.
454 */ 454 */
455 if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent) 455 if (!vdev->dev.parent || !vdev->dev.parent->parent)
456 goto free_buf; 456 goto free_buf;
457 buf->dev = vq->vdev->dev.parent->parent; 457 buf->dev = vdev->dev.parent->parent;
458 458
459 /* Increase device refcnt to avoid freeing it */ 459 /* Increase device refcnt to avoid freeing it */
460 get_device(buf->dev); 460 get_device(buf->dev);
@@ -838,7 +838,7 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
838 838
839 count = min((size_t)(32 * 1024), count); 839 count = min((size_t)(32 * 1024), count);
840 840
841 buf = alloc_buf(port->out_vq, count, 0); 841 buf = alloc_buf(port->portdev->vdev, count, 0);
842 if (!buf) 842 if (!buf)
843 return -ENOMEM; 843 return -ENOMEM;
844 844
@@ -957,7 +957,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
957 if (ret < 0) 957 if (ret < 0)
958 goto error_out; 958 goto error_out;
959 959
960 buf = alloc_buf(port->out_vq, 0, pipe->nrbufs); 960 buf = alloc_buf(port->portdev->vdev, 0, pipe->nrbufs);
961 if (!buf) { 961 if (!buf) {
962 ret = -ENOMEM; 962 ret = -ENOMEM;
963 goto error_out; 963 goto error_out;
@@ -1374,7 +1374,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
1374 1374
1375 nr_added_bufs = 0; 1375 nr_added_bufs = 0;
1376 do { 1376 do {
1377 buf = alloc_buf(vq, PAGE_SIZE, 0); 1377 buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
1378 if (!buf) 1378 if (!buf)
1379 break; 1379 break;
1380 1380
@@ -1402,7 +1402,6 @@ static int add_port(struct ports_device *portdev, u32 id)
1402{ 1402{
1403 char debugfs_name[16]; 1403 char debugfs_name[16];
1404 struct port *port; 1404 struct port *port;
1405 struct port_buffer *buf;
1406 dev_t devt; 1405 dev_t devt;
1407 unsigned int nr_added_bufs; 1406 unsigned int nr_added_bufs;
1408 int err; 1407 int err;
@@ -1513,8 +1512,6 @@ static int add_port(struct ports_device *portdev, u32 id)
1513 return 0; 1512 return 0;
1514 1513
1515free_inbufs: 1514free_inbufs:
1516 while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
1517 free_buf(buf, true);
1518free_device: 1515free_device:
1519 device_destroy(pdrvdata.class, port->dev->devt); 1516 device_destroy(pdrvdata.class, port->dev->devt);
1520free_cdev: 1517free_cdev:
@@ -1539,34 +1536,14 @@ static void remove_port(struct kref *kref)
1539 1536
1540static void remove_port_data(struct port *port) 1537static void remove_port_data(struct port *port)
1541{ 1538{
1542 struct port_buffer *buf;
1543
1544 spin_lock_irq(&port->inbuf_lock); 1539 spin_lock_irq(&port->inbuf_lock);
1545 /* Remove unused data this port might have received. */ 1540 /* Remove unused data this port might have received. */
1546 discard_port_data(port); 1541 discard_port_data(port);
1547 spin_unlock_irq(&port->inbuf_lock); 1542 spin_unlock_irq(&port->inbuf_lock);
1548 1543
1549 /* Remove buffers we queued up for the Host to send us data in. */
1550 do {
1551 spin_lock_irq(&port->inbuf_lock);
1552 buf = virtqueue_detach_unused_buf(port->in_vq);
1553 spin_unlock_irq(&port->inbuf_lock);
1554 if (buf)
1555 free_buf(buf, true);
1556 } while (buf);
1557
1558 spin_lock_irq(&port->outvq_lock); 1544 spin_lock_irq(&port->outvq_lock);
1559 reclaim_consumed_buffers(port); 1545 reclaim_consumed_buffers(port);
1560 spin_unlock_irq(&port->outvq_lock); 1546 spin_unlock_irq(&port->outvq_lock);
1561
1562 /* Free pending buffers from the out-queue. */
1563 do {
1564 spin_lock_irq(&port->outvq_lock);
1565 buf = virtqueue_detach_unused_buf(port->out_vq);
1566 spin_unlock_irq(&port->outvq_lock);
1567 if (buf)
1568 free_buf(buf, true);
1569 } while (buf);
1570} 1547}
1571 1548
1572/* 1549/*
@@ -1791,13 +1768,24 @@ static void control_work_handler(struct work_struct *work)
1791 spin_unlock(&portdev->c_ivq_lock); 1768 spin_unlock(&portdev->c_ivq_lock);
1792} 1769}
1793 1770
1771static void flush_bufs(struct virtqueue *vq, bool can_sleep)
1772{
1773 struct port_buffer *buf;
1774 unsigned int len;
1775
1776 while ((buf = virtqueue_get_buf(vq, &len)))
1777 free_buf(buf, can_sleep);
1778}
1779
1794static void out_intr(struct virtqueue *vq) 1780static void out_intr(struct virtqueue *vq)
1795{ 1781{
1796 struct port *port; 1782 struct port *port;
1797 1783
1798 port = find_port_by_vq(vq->vdev->priv, vq); 1784 port = find_port_by_vq(vq->vdev->priv, vq);
1799 if (!port) 1785 if (!port) {
1786 flush_bufs(vq, false);
1800 return; 1787 return;
1788 }
1801 1789
1802 wake_up_interruptible(&port->waitqueue); 1790 wake_up_interruptible(&port->waitqueue);
1803} 1791}
@@ -1808,8 +1796,10 @@ static void in_intr(struct virtqueue *vq)
1808 unsigned long flags; 1796 unsigned long flags;
1809 1797
1810 port = find_port_by_vq(vq->vdev->priv, vq); 1798 port = find_port_by_vq(vq->vdev->priv, vq);
1811 if (!port) 1799 if (!port) {
1800 flush_bufs(vq, false);
1812 return; 1801 return;
1802 }
1813 1803
1814 spin_lock_irqsave(&port->inbuf_lock, flags); 1804 spin_lock_irqsave(&port->inbuf_lock, flags);
1815 port->inbuf = get_inbuf(port); 1805 port->inbuf = get_inbuf(port);
@@ -1984,24 +1974,54 @@ static const struct file_operations portdev_fops = {
1984 1974
1985static void remove_vqs(struct ports_device *portdev) 1975static void remove_vqs(struct ports_device *portdev)
1986{ 1976{
1977 struct virtqueue *vq;
1978
1979 virtio_device_for_each_vq(portdev->vdev, vq) {
1980 struct port_buffer *buf;
1981
1982 flush_bufs(vq, true);
1983 while ((buf = virtqueue_detach_unused_buf(vq)))
1984 free_buf(buf, true);
1985 }
1987 portdev->vdev->config->del_vqs(portdev->vdev); 1986 portdev->vdev->config->del_vqs(portdev->vdev);
1988 kfree(portdev->in_vqs); 1987 kfree(portdev->in_vqs);
1989 kfree(portdev->out_vqs); 1988 kfree(portdev->out_vqs);
1990} 1989}
1991 1990
1992static void remove_controlq_data(struct ports_device *portdev) 1991static void virtcons_remove(struct virtio_device *vdev)
1993{ 1992{
1994 struct port_buffer *buf; 1993 struct ports_device *portdev;
1995 unsigned int len; 1994 struct port *port, *port2;
1996 1995
1997 if (!use_multiport(portdev)) 1996 portdev = vdev->priv;
1998 return;
1999 1997
2000 while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) 1998 spin_lock_irq(&pdrvdata_lock);
2001 free_buf(buf, true); 1999 list_del(&portdev->list);
2000 spin_unlock_irq(&pdrvdata_lock);
2002 2001
2003 while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) 2002 /* Disable interrupts for vqs */
2004 free_buf(buf, true); 2003 vdev->config->reset(vdev);
2004 /* Finish up work that's lined up */
2005 if (use_multiport(portdev))
2006 cancel_work_sync(&portdev->control_work);
2007 else
2008 cancel_work_sync(&portdev->config_work);
2009
2010 list_for_each_entry_safe(port, port2, &portdev->ports, list)
2011 unplug_port(port);
2012
2013 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
2014
2015 /*
2016 * When yanking out a device, we immediately lose the
2017 * (device-side) queues. So there's no point in keeping the
2018 * guest side around till we drop our final reference. This
2019 * also means that any ports which are in an open state will
2020 * have to just stop using the port, as the vqs are going
2021 * away.
2022 */
2023 remove_vqs(portdev);
2024 kfree(portdev);
2005} 2025}
2006 2026
2007/* 2027/*
@@ -2070,6 +2090,7 @@ static int virtcons_probe(struct virtio_device *vdev)
2070 2090
2071 spin_lock_init(&portdev->ports_lock); 2091 spin_lock_init(&portdev->ports_lock);
2072 INIT_LIST_HEAD(&portdev->ports); 2092 INIT_LIST_HEAD(&portdev->ports);
2093 INIT_LIST_HEAD(&portdev->list);
2073 2094
2074 virtio_device_ready(portdev->vdev); 2095 virtio_device_ready(portdev->vdev);
2075 2096
@@ -2087,8 +2108,15 @@ static int virtcons_probe(struct virtio_device *vdev)
2087 if (!nr_added_bufs) { 2108 if (!nr_added_bufs) {
2088 dev_err(&vdev->dev, 2109 dev_err(&vdev->dev,
2089 "Error allocating buffers for control queue\n"); 2110 "Error allocating buffers for control queue\n");
2090 err = -ENOMEM; 2111 /*
2091 goto free_vqs; 2112 * The host might want to notify mgmt sw about device
2113 * add failure.
2114 */
2115 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
2116 VIRTIO_CONSOLE_DEVICE_READY, 0);
2117 /* Device was functional: we need full cleanup. */
2118 virtcons_remove(vdev);
2119 return -ENOMEM;
2092 } 2120 }
2093 } else { 2121 } else {
2094 /* 2122 /*
@@ -2119,11 +2147,6 @@ static int virtcons_probe(struct virtio_device *vdev)
2119 2147
2120 return 0; 2148 return 0;
2121 2149
2122free_vqs:
2123 /* The host might want to notify mgmt sw about device add failure */
2124 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
2125 VIRTIO_CONSOLE_DEVICE_READY, 0);
2126 remove_vqs(portdev);
2127free_chrdev: 2150free_chrdev:
2128 unregister_chrdev(portdev->chr_major, "virtio-portsdev"); 2151 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
2129free: 2152free:
@@ -2132,43 +2155,6 @@ fail:
2132 return err; 2155 return err;
2133} 2156}
2134 2157
2135static void virtcons_remove(struct virtio_device *vdev)
2136{
2137 struct ports_device *portdev;
2138 struct port *port, *port2;
2139
2140 portdev = vdev->priv;
2141
2142 spin_lock_irq(&pdrvdata_lock);
2143 list_del(&portdev->list);
2144 spin_unlock_irq(&pdrvdata_lock);
2145
2146 /* Disable interrupts for vqs */
2147 vdev->config->reset(vdev);
2148 /* Finish up work that's lined up */
2149 if (use_multiport(portdev))
2150 cancel_work_sync(&portdev->control_work);
2151 else
2152 cancel_work_sync(&portdev->config_work);
2153
2154 list_for_each_entry_safe(port, port2, &portdev->ports, list)
2155 unplug_port(port);
2156
2157 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
2158
2159 /*
2160 * When yanking out a device, we immediately lose the
2161 * (device-side) queues. So there's no point in keeping the
2162 * guest side around till we drop our final reference. This
2163 * also means that any ports which are in an open state will
2164 * have to just stop using the port, as the vqs are going
2165 * away.
2166 */
2167 remove_controlq_data(portdev);
2168 remove_vqs(portdev);
2169 kfree(portdev);
2170}
2171
2172static struct virtio_device_id id_table[] = { 2158static struct virtio_device_id id_table[] = {
2173 { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, 2159 { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
2174 { 0 }, 2160 { 0 },
@@ -2209,7 +2195,6 @@ static int virtcons_freeze(struct virtio_device *vdev)
2209 */ 2195 */
2210 if (use_multiport(portdev)) 2196 if (use_multiport(portdev))
2211 virtqueue_disable_cb(portdev->c_ivq); 2197 virtqueue_disable_cb(portdev->c_ivq);
2212 remove_controlq_data(portdev);
2213 2198
2214 list_for_each_entry(port, &portdev->ports, list) { 2199 list_for_each_entry(port, &portdev->ports, list) {
2215 virtqueue_disable_cb(port->in_vq); 2200 virtqueue_disable_cb(port->in_vq);
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index a782ce87715c..ed5e42461094 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -262,6 +262,8 @@ void proc_coredump_connector(struct task_struct *task)
262 ev->what = PROC_EVENT_COREDUMP; 262 ev->what = PROC_EVENT_COREDUMP;
263 ev->event_data.coredump.process_pid = task->pid; 263 ev->event_data.coredump.process_pid = task->pid;
264 ev->event_data.coredump.process_tgid = task->tgid; 264 ev->event_data.coredump.process_tgid = task->tgid;
265 ev->event_data.coredump.parent_pid = task->real_parent->pid;
266 ev->event_data.coredump.parent_tgid = task->real_parent->tgid;
265 267
266 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 268 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
267 msg->ack = 0; /* not used */ 269 msg->ack = 0; /* not used */
@@ -288,6 +290,8 @@ void proc_exit_connector(struct task_struct *task)
288 ev->event_data.exit.process_tgid = task->tgid; 290 ev->event_data.exit.process_tgid = task->tgid;
289 ev->event_data.exit.exit_code = task->exit_code; 291 ev->event_data.exit.exit_code = task->exit_code;
290 ev->event_data.exit.exit_signal = task->exit_signal; 292 ev->event_data.exit.exit_signal = task->exit_signal;
293 ev->event_data.exit.parent_pid = task->real_parent->pid;
294 ev->event_data.exit.parent_tgid = task->real_parent->tgid;
291 295
292 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 296 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
293 msg->ack = 0; /* not used */ 297 msg->ack = 0; /* not used */
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 7f56fe5183f2..de55c7d57438 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -71,16 +71,6 @@ config ARM_BRCMSTB_AVS_CPUFREQ
71 71
72 Say Y, if you have a Broadcom SoC with AVS support for DFS or DVFS. 72 Say Y, if you have a Broadcom SoC with AVS support for DFS or DVFS.
73 73
74config ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
75 bool "Broadcom STB AVS CPUfreq driver sysfs debug capability"
76 depends on ARM_BRCMSTB_AVS_CPUFREQ
77 help
78 Enabling this option turns on debug support via sysfs under
79 /sys/kernel/debug/brcmstb-avs-cpufreq. It is possible to read all and
80 write some AVS mailbox registers through sysfs entries.
81
82 If in doubt, say N.
83
84config ARM_EXYNOS5440_CPUFREQ 74config ARM_EXYNOS5440_CPUFREQ
85 tristate "SAMSUNG EXYNOS5440" 75 tristate "SAMSUNG EXYNOS5440"
86 depends on SOC_EXYNOS5440 76 depends on SOC_EXYNOS5440
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 6cdac1aaf23c..b07559b9ed99 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -49,13 +49,6 @@
49#include <linux/platform_device.h> 49#include <linux/platform_device.h>
50#include <linux/semaphore.h> 50#include <linux/semaphore.h>
51 51
52#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
53#include <linux/ctype.h>
54#include <linux/debugfs.h>
55#include <linux/slab.h>
56#include <linux/uaccess.h>
57#endif
58
59/* Max number of arguments AVS calls take */ 52/* Max number of arguments AVS calls take */
60#define AVS_MAX_CMD_ARGS 4 53#define AVS_MAX_CMD_ARGS 4
61/* 54/*
@@ -182,88 +175,11 @@ struct private_data {
182 void __iomem *base; 175 void __iomem *base;
183 void __iomem *avs_intr_base; 176 void __iomem *avs_intr_base;
184 struct device *dev; 177 struct device *dev;
185#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
186 struct dentry *debugfs;
187#endif
188 struct completion done; 178 struct completion done;
189 struct semaphore sem; 179 struct semaphore sem;
190 struct pmap pmap; 180 struct pmap pmap;
191}; 181};
192 182
193#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
194
195enum debugfs_format {
196 DEBUGFS_NORMAL,
197 DEBUGFS_FLOAT,
198 DEBUGFS_REV,
199};
200
201struct debugfs_data {
202 struct debugfs_entry *entry;
203 struct private_data *priv;
204};
205
206struct debugfs_entry {
207 char *name;
208 u32 offset;
209 fmode_t mode;
210 enum debugfs_format format;
211};
212
213#define DEBUGFS_ENTRY(name, mode, format) { \
214 #name, AVS_MBOX_##name, mode, format \
215}
216
217/*
218 * These are used for debugfs only. Otherwise we use AVS_MBOX_PARAM() directly.
219 */
220#define AVS_MBOX_PARAM1 AVS_MBOX_PARAM(0)
221#define AVS_MBOX_PARAM2 AVS_MBOX_PARAM(1)
222#define AVS_MBOX_PARAM3 AVS_MBOX_PARAM(2)
223#define AVS_MBOX_PARAM4 AVS_MBOX_PARAM(3)
224
225/*
226 * This table stores the name, access permissions and offset for each hardware
227 * register and is used to generate debugfs entries.
228 */
229static struct debugfs_entry debugfs_entries[] = {
230 DEBUGFS_ENTRY(COMMAND, S_IWUSR, DEBUGFS_NORMAL),
231 DEBUGFS_ENTRY(STATUS, S_IWUSR, DEBUGFS_NORMAL),
232 DEBUGFS_ENTRY(VOLTAGE0, 0, DEBUGFS_FLOAT),
233 DEBUGFS_ENTRY(TEMP0, 0, DEBUGFS_FLOAT),
234 DEBUGFS_ENTRY(PV0, 0, DEBUGFS_FLOAT),
235 DEBUGFS_ENTRY(MV0, 0, DEBUGFS_FLOAT),
236 DEBUGFS_ENTRY(PARAM1, S_IWUSR, DEBUGFS_NORMAL),
237 DEBUGFS_ENTRY(PARAM2, S_IWUSR, DEBUGFS_NORMAL),
238 DEBUGFS_ENTRY(PARAM3, S_IWUSR, DEBUGFS_NORMAL),
239 DEBUGFS_ENTRY(PARAM4, S_IWUSR, DEBUGFS_NORMAL),
240 DEBUGFS_ENTRY(REVISION, 0, DEBUGFS_REV),
241 DEBUGFS_ENTRY(PSTATE, 0, DEBUGFS_NORMAL),
242 DEBUGFS_ENTRY(HEARTBEAT, 0, DEBUGFS_NORMAL),
243 DEBUGFS_ENTRY(MAGIC, S_IWUSR, DEBUGFS_NORMAL),
244 DEBUGFS_ENTRY(SIGMA_HVT, 0, DEBUGFS_NORMAL),
245 DEBUGFS_ENTRY(SIGMA_SVT, 0, DEBUGFS_NORMAL),
246 DEBUGFS_ENTRY(VOLTAGE1, 0, DEBUGFS_FLOAT),
247 DEBUGFS_ENTRY(TEMP1, 0, DEBUGFS_FLOAT),
248 DEBUGFS_ENTRY(PV1, 0, DEBUGFS_FLOAT),
249 DEBUGFS_ENTRY(MV1, 0, DEBUGFS_FLOAT),
250 DEBUGFS_ENTRY(FREQUENCY, 0, DEBUGFS_NORMAL),
251};
252
253static int brcm_avs_target_index(struct cpufreq_policy *, unsigned int);
254
255static char *__strtolower(char *s)
256{
257 char *p;
258
259 for (p = s; *p; p++)
260 *p = tolower(*p);
261
262 return s;
263}
264
265#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */
266
267static void __iomem *__map_region(const char *name) 183static void __iomem *__map_region(const char *name)
268{ 184{
269 struct device_node *np; 185 struct device_node *np;
@@ -516,238 +432,6 @@ brcm_avs_get_freq_table(struct device *dev, struct private_data *priv)
516 return table; 432 return table;
517} 433}
518 434
519#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
520
521#define MANT(x) (unsigned int)(abs((x)) / 1000)
522#define FRAC(x) (unsigned int)(abs((x)) - abs((x)) / 1000 * 1000)
523
524static int brcm_avs_debug_show(struct seq_file *s, void *data)
525{
526 struct debugfs_data *dbgfs = s->private;
527 void __iomem *base;
528 u32 val, offset;
529
530 if (!dbgfs) {
531 seq_puts(s, "No device pointer\n");
532 return 0;
533 }
534
535 base = dbgfs->priv->base;
536 offset = dbgfs->entry->offset;
537 val = readl(base + offset);
538 switch (dbgfs->entry->format) {
539 case DEBUGFS_NORMAL:
540 seq_printf(s, "%u\n", val);
541 break;
542 case DEBUGFS_FLOAT:
543 seq_printf(s, "%d.%03d\n", MANT(val), FRAC(val));
544 break;
545 case DEBUGFS_REV:
546 seq_printf(s, "%c.%c.%c.%c\n", (val >> 24 & 0xff),
547 (val >> 16 & 0xff), (val >> 8 & 0xff),
548 val & 0xff);
549 break;
550 }
551 seq_printf(s, "0x%08x\n", val);
552
553 return 0;
554}
555
556#undef MANT
557#undef FRAC
558
559static ssize_t brcm_avs_seq_write(struct file *file, const char __user *buf,
560 size_t size, loff_t *ppos)
561{
562 struct seq_file *s = file->private_data;
563 struct debugfs_data *dbgfs = s->private;
564 struct private_data *priv = dbgfs->priv;
565 void __iomem *base, *avs_intr_base;
566 bool use_issue_command = false;
567 unsigned long val, offset;
568 char str[128];
569 int ret;
570 char *str_ptr = str;
571
572 if (size >= sizeof(str))
573 return -E2BIG;
574
575 memset(str, 0, sizeof(str));
576 ret = copy_from_user(str, buf, size);
577 if (ret)
578 return ret;
579
580 base = priv->base;
581 avs_intr_base = priv->avs_intr_base;
582 offset = dbgfs->entry->offset;
583 /*
584 * Special case writing to "command" entry only: if the string starts
585 * with a 'c', we use the driver's __issue_avs_command() function.
586 * Otherwise, we perform a raw write. This should allow testing of raw
587 * access as well as using the higher level function. (Raw access
588 * doesn't clear the firmware return status after issuing the command.)
589 */
590 if (str_ptr[0] == 'c' && offset == AVS_MBOX_COMMAND) {
591 use_issue_command = true;
592 str_ptr++;
593 }
594 if (kstrtoul(str_ptr, 0, &val) != 0)
595 return -EINVAL;
596
597 /*
598 * Setting the P-state is a special case. We need to update the CPU
599 * frequency we report.
600 */
601 if (val == AVS_CMD_SET_PSTATE) {
602 struct cpufreq_policy *policy;
603 unsigned int pstate;
604
605 policy = cpufreq_cpu_get(smp_processor_id());
606 /* Read back the P-state we are about to set */
607 pstate = readl(base + AVS_MBOX_PARAM(0));
608 if (use_issue_command) {
609 ret = brcm_avs_target_index(policy, pstate);
610 return ret ? ret : size;
611 }
612 policy->cur = policy->freq_table[pstate].frequency;
613 }
614
615 if (use_issue_command) {
616 ret = __issue_avs_command(priv, val, false, NULL);
617 } else {
618 /* Locking here is not perfect, but is only for debug. */
619 ret = down_interruptible(&priv->sem);
620 if (ret)
621 return ret;
622
623 writel(val, base + offset);
624 /* We have to wake up the firmware to process a command. */
625 if (offset == AVS_MBOX_COMMAND)
626 writel(AVS_CPU_L2_INT_MASK,
627 avs_intr_base + AVS_CPU_L2_SET0);
628 up(&priv->sem);
629 }
630
631 return ret ? ret : size;
632}
633
634static struct debugfs_entry *__find_debugfs_entry(const char *name)
635{
636 int i;
637
638 for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++)
639 if (strcasecmp(debugfs_entries[i].name, name) == 0)
640 return &debugfs_entries[i];
641
642 return NULL;
643}
644
645static int brcm_avs_debug_open(struct inode *inode, struct file *file)
646{
647 struct debugfs_data *data;
648 fmode_t fmode;
649 int ret;
650
651 /*
652 * seq_open(), which is called by single_open(), clears "write" access.
653 * We need write access to some files, so we preserve our access mode
654 * and restore it.
655 */
656 fmode = file->f_mode;
657 /*
658 * Check access permissions even for root. We don't want to be writing
659 * to read-only registers. Access for regular users has already been
660 * checked by the VFS layer.
661 */
662 if ((fmode & FMODE_WRITER) && !(inode->i_mode & S_IWUSR))
663 return -EACCES;
664
665 data = kmalloc(sizeof(*data), GFP_KERNEL);
666 if (!data)
667 return -ENOMEM;
668 /*
669 * We use the same file system operations for all our debug files. To
670 * produce specific output, we look up the file name upon opening a
671 * debugfs entry and map it to a memory offset. This offset is then used
672 * in the generic "show" function to read a specific register.
673 */
674 data->entry = __find_debugfs_entry(file->f_path.dentry->d_iname);
675 data->priv = inode->i_private;
676
677 ret = single_open(file, brcm_avs_debug_show, data);
678 if (ret)
679 kfree(data);
680 file->f_mode = fmode;
681
682 return ret;
683}
684
685static int brcm_avs_debug_release(struct inode *inode, struct file *file)
686{
687 struct seq_file *seq_priv = file->private_data;
688 struct debugfs_data *data = seq_priv->private;
689
690 kfree(data);
691 return single_release(inode, file);
692}
693
694static const struct file_operations brcm_avs_debug_ops = {
695 .open = brcm_avs_debug_open,
696 .read = seq_read,
697 .write = brcm_avs_seq_write,
698 .llseek = seq_lseek,
699 .release = brcm_avs_debug_release,
700};
701
702static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev)
703{
704 struct private_data *priv = platform_get_drvdata(pdev);
705 struct dentry *dir;
706 int i;
707
708 if (!priv)
709 return;
710
711 dir = debugfs_create_dir(BRCM_AVS_CPUFREQ_NAME, NULL);
712 if (IS_ERR_OR_NULL(dir))
713 return;
714 priv->debugfs = dir;
715
716 for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++) {
717 /*
718 * The DEBUGFS_ENTRY macro generates uppercase strings. We
719 * convert them to lowercase before creating the debugfs
720 * entries.
721 */
722 char *entry = __strtolower(debugfs_entries[i].name);
723 fmode_t mode = debugfs_entries[i].mode;
724
725 if (!debugfs_create_file(entry, S_IFREG | S_IRUGO | mode,
726 dir, priv, &brcm_avs_debug_ops)) {
727 priv->debugfs = NULL;
728 debugfs_remove_recursive(dir);
729 break;
730 }
731 }
732}
733
734static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev)
735{
736 struct private_data *priv = platform_get_drvdata(pdev);
737
738 if (priv && priv->debugfs) {
739 debugfs_remove_recursive(priv->debugfs);
740 priv->debugfs = NULL;
741 }
742}
743
744#else
745
746static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev) {}
747static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev) {}
748
749#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */
750
751/* 435/*
752 * To ensure the right firmware is running we need to 436 * To ensure the right firmware is running we need to
753 * - check the MAGIC matches what we expect 437 * - check the MAGIC matches what we expect
@@ -1016,11 +700,8 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
1016 return ret; 700 return ret;
1017 701
1018 brcm_avs_driver.driver_data = pdev; 702 brcm_avs_driver.driver_data = pdev;
1019 ret = cpufreq_register_driver(&brcm_avs_driver);
1020 if (!ret)
1021 brcm_avs_cpufreq_debug_init(pdev);
1022 703
1023 return ret; 704 return cpufreq_register_driver(&brcm_avs_driver);
1024} 705}
1025 706
1026static int brcm_avs_cpufreq_remove(struct platform_device *pdev) 707static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
@@ -1032,8 +713,6 @@ static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
1032 if (ret) 713 if (ret)
1033 return ret; 714 return ret;
1034 715
1035 brcm_avs_cpufreq_debug_exit(pdev);
1036
1037 priv = platform_get_drvdata(pdev); 716 priv = platform_get_drvdata(pdev);
1038 iounmap(priv->base); 717 iounmap(priv->base);
1039 iounmap(priv->avs_intr_base); 718 iounmap(priv->avs_intr_base);
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 0591874856d3..54edaec1e608 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -679,6 +679,16 @@ void gpstate_timer_handler(struct timer_list *t)
679 679
680 if (!spin_trylock(&gpstates->gpstate_lock)) 680 if (!spin_trylock(&gpstates->gpstate_lock))
681 return; 681 return;
682 /*
683 * If the timer has migrated to the different cpu then bring
684 * it back to one of the policy->cpus
685 */
686 if (!cpumask_test_cpu(raw_smp_processor_id(), policy->cpus)) {
687 gpstates->timer.expires = jiffies + msecs_to_jiffies(1);
688 add_timer_on(&gpstates->timer, cpumask_first(policy->cpus));
689 spin_unlock(&gpstates->gpstate_lock);
690 return;
691 }
682 692
683 /* 693 /*
684 * If PMCR was last updated was using fast_swtich then 694 * If PMCR was last updated was using fast_swtich then
@@ -718,10 +728,8 @@ void gpstate_timer_handler(struct timer_list *t)
718 if (gpstate_idx != gpstates->last_lpstate_idx) 728 if (gpstate_idx != gpstates->last_lpstate_idx)
719 queue_gpstate_timer(gpstates); 729 queue_gpstate_timer(gpstates);
720 730
731 set_pstate(&freq_data);
721 spin_unlock(&gpstates->gpstate_lock); 732 spin_unlock(&gpstates->gpstate_lock);
722
723 /* Timer may get migrated to a different cpu on cpu hot unplug */
724 smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
725} 733}
726 734
727/* 735/*
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index e6f17825db79..2b90606452a2 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -284,7 +284,7 @@ scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id)
284 struct clock_info *ci = handle->clk_priv; 284 struct clock_info *ci = handle->clk_priv;
285 struct scmi_clock_info *clk = ci->clk + clk_id; 285 struct scmi_clock_info *clk = ci->clk + clk_id;
286 286
287 if (!clk->name || !clk->name[0]) 287 if (!clk->name[0])
288 return NULL; 288 return NULL;
289 289
290 return clk; 290 return clk;
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
index 14f14efdf0d5..06d212a3d49d 100644
--- a/drivers/fpga/altera-ps-spi.c
+++ b/drivers/fpga/altera-ps-spi.c
@@ -249,7 +249,7 @@ static int altera_ps_probe(struct spi_device *spi)
249 249
250 conf->data = of_id->data; 250 conf->data = of_id->data;
251 conf->spi = spi; 251 conf->spi = spi;
252 conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_HIGH); 252 conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_LOW);
253 if (IS_ERR(conf->config)) { 253 if (IS_ERR(conf->config)) {
254 dev_err(&spi->dev, "Failed to get config gpio: %ld\n", 254 dev_err(&spi->dev, "Failed to get config gpio: %ld\n",
255 PTR_ERR(conf->config)); 255 PTR_ERR(conf->config));
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index b0e591eaa71a..e14263fca1c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1459,10 +1459,11 @@ static const u32 sgpr_init_compute_shader[] =
1459static const u32 vgpr_init_regs[] = 1459static const u32 vgpr_init_regs[] =
1460{ 1460{
1461 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff, 1461 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
1462 mmCOMPUTE_RESOURCE_LIMITS, 0, 1462 mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1463 mmCOMPUTE_NUM_THREAD_X, 256*4, 1463 mmCOMPUTE_NUM_THREAD_X, 256*4,
1464 mmCOMPUTE_NUM_THREAD_Y, 1, 1464 mmCOMPUTE_NUM_THREAD_Y, 1,
1465 mmCOMPUTE_NUM_THREAD_Z, 1, 1465 mmCOMPUTE_NUM_THREAD_Z, 1,
1466 mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
1466 mmCOMPUTE_PGM_RSRC2, 20, 1467 mmCOMPUTE_PGM_RSRC2, 20,
1467 mmCOMPUTE_USER_DATA_0, 0xedcedc00, 1468 mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1468 mmCOMPUTE_USER_DATA_1, 0xedcedc01, 1469 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
@@ -1479,10 +1480,11 @@ static const u32 vgpr_init_regs[] =
1479static const u32 sgpr1_init_regs[] = 1480static const u32 sgpr1_init_regs[] =
1480{ 1481{
1481 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f, 1482 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
1482 mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, 1483 mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1483 mmCOMPUTE_NUM_THREAD_X, 256*5, 1484 mmCOMPUTE_NUM_THREAD_X, 256*5,
1484 mmCOMPUTE_NUM_THREAD_Y, 1, 1485 mmCOMPUTE_NUM_THREAD_Y, 1,
1485 mmCOMPUTE_NUM_THREAD_Z, 1, 1486 mmCOMPUTE_NUM_THREAD_Z, 1,
1487 mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1486 mmCOMPUTE_PGM_RSRC2, 20, 1488 mmCOMPUTE_PGM_RSRC2, 20,
1487 mmCOMPUTE_USER_DATA_0, 0xedcedc00, 1489 mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1488 mmCOMPUTE_USER_DATA_1, 0xedcedc01, 1490 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
@@ -1503,6 +1505,7 @@ static const u32 sgpr2_init_regs[] =
1503 mmCOMPUTE_NUM_THREAD_X, 256*5, 1505 mmCOMPUTE_NUM_THREAD_X, 256*5,
1504 mmCOMPUTE_NUM_THREAD_Y, 1, 1506 mmCOMPUTE_NUM_THREAD_Y, 1,
1505 mmCOMPUTE_NUM_THREAD_Z, 1, 1507 mmCOMPUTE_NUM_THREAD_Z, 1,
1508 mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1506 mmCOMPUTE_PGM_RSRC2, 20, 1509 mmCOMPUTE_PGM_RSRC2, 20,
1507 mmCOMPUTE_USER_DATA_0, 0xedcedc00, 1510 mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1508 mmCOMPUTE_USER_DATA_1, 0xedcedc01, 1511 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index ed2f06c9f346..3858820a0055 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -6,5 +6,6 @@ config HSA_AMD
6 tristate "HSA kernel driver for AMD GPU devices" 6 tristate "HSA kernel driver for AMD GPU devices"
7 depends on DRM_AMDGPU && X86_64 7 depends on DRM_AMDGPU && X86_64
8 imply AMD_IOMMU_V2 8 imply AMD_IOMMU_V2
9 select MMU_NOTIFIER
9 help 10 help
10 Enable this if you want to use HSA features on AMD GPU devices. 11 Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index cd679cf1fd30..59808a39ecf4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -749,12 +749,13 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
749 struct timespec64 time; 749 struct timespec64 time;
750 750
751 dev = kfd_device_by_id(args->gpu_id); 751 dev = kfd_device_by_id(args->gpu_id);
752 if (dev == NULL) 752 if (dev)
753 return -EINVAL; 753 /* Reading GPU clock counter from KGD */
754 754 args->gpu_clock_counter =
755 /* Reading GPU clock counter from KGD */ 755 dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
756 args->gpu_clock_counter = 756 else
757 dev->kfd2kgd->get_gpu_clock_counter(dev->kgd); 757 /* Node without GPU resource */
758 args->gpu_clock_counter = 0;
758 759
759 /* No access to rdtsc. Using raw monotonic time */ 760 /* No access to rdtsc. Using raw monotonic time */
760 getrawmonotonic64(&time); 761 getrawmonotonic64(&time);
@@ -1147,7 +1148,7 @@ err_unlock:
1147 return ret; 1148 return ret;
1148} 1149}
1149 1150
1150bool kfd_dev_is_large_bar(struct kfd_dev *dev) 1151static bool kfd_dev_is_large_bar(struct kfd_dev *dev)
1151{ 1152{
1152 struct kfd_local_mem_info mem_info; 1153 struct kfd_local_mem_info mem_info;
1153 1154
@@ -1421,7 +1422,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
1421 1422
1422 pdd = kfd_get_process_device_data(dev, p); 1423 pdd = kfd_get_process_device_data(dev, p);
1423 if (!pdd) { 1424 if (!pdd) {
1424 err = PTR_ERR(pdd); 1425 err = -EINVAL;
1425 goto bind_process_to_device_failed; 1426 goto bind_process_to_device_failed;
1426 } 1427 }
1427 1428
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4e2f379ce217..1dd1142246c2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4557,6 +4557,7 @@ static int dm_update_crtcs_state(struct dc *dc,
4557 struct amdgpu_dm_connector *aconnector = NULL; 4557 struct amdgpu_dm_connector *aconnector = NULL;
4558 struct drm_connector_state *new_con_state = NULL; 4558 struct drm_connector_state *new_con_state = NULL;
4559 struct dm_connector_state *dm_conn_state = NULL; 4559 struct dm_connector_state *dm_conn_state = NULL;
4560 struct drm_plane_state *new_plane_state = NULL;
4560 4561
4561 new_stream = NULL; 4562 new_stream = NULL;
4562 4563
@@ -4564,6 +4565,13 @@ static int dm_update_crtcs_state(struct dc *dc,
4564 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 4565 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4565 acrtc = to_amdgpu_crtc(crtc); 4566 acrtc = to_amdgpu_crtc(crtc);
4566 4567
4568 new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
4569
4570 if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
4571 ret = -EINVAL;
4572 goto fail;
4573 }
4574
4567 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); 4575 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
4568 4576
4569 /* TODO This hack should go away */ 4577 /* TODO This hack should go away */
@@ -4760,7 +4768,7 @@ static int dm_update_planes_state(struct dc *dc,
4760 if (!dm_old_crtc_state->stream) 4768 if (!dm_old_crtc_state->stream)
4761 continue; 4769 continue;
4762 4770
4763 DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n", 4771 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
4764 plane->base.id, old_plane_crtc->base.id); 4772 plane->base.id, old_plane_crtc->base.id);
4765 4773
4766 if (!dc_remove_plane_from_context( 4774 if (!dc_remove_plane_from_context(
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 490017df371d..4be21bf54749 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -329,14 +329,15 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
329{ 329{
330 int src; 330 int src;
331 struct irq_list_head *lh; 331 struct irq_list_head *lh;
332 unsigned long irq_table_flags;
332 DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n"); 333 DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
333
334 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { 334 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
335 335 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
336 /* The handler was removed from the table, 336 /* The handler was removed from the table,
337 * it means it is safe to flush all the 'work' 337 * it means it is safe to flush all the 'work'
338 * (because no code can schedule a new one). */ 338 * (because no code can schedule a new one). */
339 lh = &adev->dm.irq_handler_list_low_tab[src]; 339 lh = &adev->dm.irq_handler_list_low_tab[src];
340 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
340 flush_work(&lh->work); 341 flush_work(&lh->work);
341 } 342 }
342} 343}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 8291d74f26bc..ace9ad578ca0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -161,6 +161,11 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
161 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 161 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
162 struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder; 162 struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
163 163
164 if (amdgpu_dm_connector->edid) {
165 kfree(amdgpu_dm_connector->edid);
166 amdgpu_dm_connector->edid = NULL;
167 }
168
164 drm_encoder_cleanup(&amdgpu_encoder->base); 169 drm_encoder_cleanup(&amdgpu_encoder->base);
165 kfree(amdgpu_encoder); 170 kfree(amdgpu_encoder);
166 drm_connector_cleanup(connector); 171 drm_connector_cleanup(connector);
@@ -181,28 +186,22 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
181void dm_dp_mst_dc_sink_create(struct drm_connector *connector) 186void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
182{ 187{
183 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 188 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
184 struct edid *edid;
185 struct dc_sink *dc_sink; 189 struct dc_sink *dc_sink;
186 struct dc_sink_init_data init_params = { 190 struct dc_sink_init_data init_params = {
187 .link = aconnector->dc_link, 191 .link = aconnector->dc_link,
188 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; 192 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
189 193
194 /* FIXME none of this is safe. we shouldn't touch aconnector here in
195 * atomic_check
196 */
197
190 /* 198 /*
191 * TODO: Need to further figure out why ddc.algo is NULL while MST port exists 199 * TODO: Need to further figure out why ddc.algo is NULL while MST port exists
192 */ 200 */
193 if (!aconnector->port || !aconnector->port->aux.ddc.algo) 201 if (!aconnector->port || !aconnector->port->aux.ddc.algo)
194 return; 202 return;
195 203
196 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); 204 ASSERT(aconnector->edid);
197
198 if (!edid) {
199 drm_mode_connector_update_edid_property(
200 &aconnector->base,
201 NULL);
202 return;
203 }
204
205 aconnector->edid = edid;
206 205
207 dc_sink = dc_link_add_remote_sink( 206 dc_sink = dc_link_add_remote_sink(
208 aconnector->dc_link, 207 aconnector->dc_link,
@@ -215,9 +214,6 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
215 214
216 amdgpu_dm_add_sink_to_freesync_module( 215 amdgpu_dm_add_sink_to_freesync_module(
217 connector, aconnector->edid); 216 connector, aconnector->edid);
218
219 drm_mode_connector_update_edid_property(
220 &aconnector->base, aconnector->edid);
221} 217}
222 218
223static int dm_dp_mst_get_modes(struct drm_connector *connector) 219static int dm_dp_mst_get_modes(struct drm_connector *connector)
@@ -230,10 +226,6 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
230 226
231 if (!aconnector->edid) { 227 if (!aconnector->edid) {
232 struct edid *edid; 228 struct edid *edid;
233 struct dc_sink *dc_sink;
234 struct dc_sink_init_data init_params = {
235 .link = aconnector->dc_link,
236 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
237 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); 229 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
238 230
239 if (!edid) { 231 if (!edid) {
@@ -244,11 +236,17 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
244 } 236 }
245 237
246 aconnector->edid = edid; 238 aconnector->edid = edid;
239 }
247 240
241 if (!aconnector->dc_sink) {
242 struct dc_sink *dc_sink;
243 struct dc_sink_init_data init_params = {
244 .link = aconnector->dc_link,
245 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
248 dc_sink = dc_link_add_remote_sink( 246 dc_sink = dc_link_add_remote_sink(
249 aconnector->dc_link, 247 aconnector->dc_link,
250 (uint8_t *)edid, 248 (uint8_t *)aconnector->edid,
251 (edid->extensions + 1) * EDID_LENGTH, 249 (aconnector->edid->extensions + 1) * EDID_LENGTH,
252 &init_params); 250 &init_params);
253 251
254 dc_sink->priv = aconnector; 252 dc_sink->priv = aconnector;
@@ -256,12 +254,12 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
256 254
257 if (aconnector->dc_sink) 255 if (aconnector->dc_sink)
258 amdgpu_dm_add_sink_to_freesync_module( 256 amdgpu_dm_add_sink_to_freesync_module(
259 connector, edid); 257 connector, aconnector->edid);
260
261 drm_mode_connector_update_edid_property(
262 &aconnector->base, edid);
263 } 258 }
264 259
260 drm_mode_connector_update_edid_property(
261 &aconnector->base, aconnector->edid);
262
265 ret = drm_add_edid_modes(connector, aconnector->edid); 263 ret = drm_add_edid_modes(connector, aconnector->edid);
266 264
267 return ret; 265 return ret;
@@ -424,14 +422,6 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
424 dc_sink_release(aconnector->dc_sink); 422 dc_sink_release(aconnector->dc_sink);
425 aconnector->dc_sink = NULL; 423 aconnector->dc_sink = NULL;
426 } 424 }
427 if (aconnector->edid) {
428 kfree(aconnector->edid);
429 aconnector->edid = NULL;
430 }
431
432 drm_mode_connector_update_edid_property(
433 &aconnector->base,
434 NULL);
435 425
436 aconnector->mst_connected = false; 426 aconnector->mst_connected = false;
437} 427}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 134069f36482..39f1db4acda4 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -4451,6 +4451,7 @@ drm_reset_display_info(struct drm_connector *connector)
4451 info->max_tmds_clock = 0; 4451 info->max_tmds_clock = 0;
4452 info->dvi_dual = false; 4452 info->dvi_dual = false;
4453 info->has_hdmi_infoframe = false; 4453 info->has_hdmi_infoframe = false;
4454 memset(&info->hdmi, 0, sizeof(info->hdmi));
4454 4455
4455 info->non_desktop = 0; 4456 info->non_desktop = 0;
4456} 4457}
@@ -4462,17 +4463,11 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
4462 4463
4463 u32 quirks = edid_get_quirks(edid); 4464 u32 quirks = edid_get_quirks(edid);
4464 4465
4466 drm_reset_display_info(connector);
4467
4465 info->width_mm = edid->width_cm * 10; 4468 info->width_mm = edid->width_cm * 10;
4466 info->height_mm = edid->height_cm * 10; 4469 info->height_mm = edid->height_cm * 10;
4467 4470
4468 /* driver figures it out in this case */
4469 info->bpc = 0;
4470 info->color_formats = 0;
4471 info->cea_rev = 0;
4472 info->max_tmds_clock = 0;
4473 info->dvi_dual = false;
4474 info->has_hdmi_infoframe = false;
4475
4476 info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP); 4471 info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
4477 4472
4478 DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop); 4473 DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index fc8b2c6e3508..32d24c69da3c 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -2140,10 +2140,22 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
2140 } 2140 }
2141 } 2141 }
2142 2142
2143 /* According to BSpec, "The CD clock frequency must be at least twice 2143 /*
2144 * According to BSpec, "The CD clock frequency must be at least twice
2144 * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default. 2145 * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
2146 *
2147 * FIXME: Check the actual, not default, BCLK being used.
2148 *
2149 * FIXME: This does not depend on ->has_audio because the higher CDCLK
2150 * is required for audio probe, also when there are no audio capable
2151 * displays connected at probe time. This leads to unnecessarily high
2152 * CDCLK when audio is not required.
2153 *
2154 * FIXME: This limit is only applied when there are displays connected
2155 * at probe time. If we probe without displays, we'll still end up using
2156 * the platform minimum CDCLK, failing audio probe.
2145 */ 2157 */
2146 if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) 2158 if (INTEL_GEN(dev_priv) >= 9)
2147 min_cdclk = max(2 * 96000, min_cdclk); 2159 min_cdclk = max(2 * 96000, min_cdclk);
2148 2160
2149 /* 2161 /*
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index d4368589b355..a80fbad9be0f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -49,12 +49,12 @@
49 * check the condition before the timeout. 49 * check the condition before the timeout.
50 */ 50 */
51#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ 51#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
52 unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1; \ 52 const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
53 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ 53 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
54 int ret__; \ 54 int ret__; \
55 might_sleep(); \ 55 might_sleep(); \
56 for (;;) { \ 56 for (;;) { \
57 bool expired__ = time_after(jiffies, timeout__); \ 57 const bool expired__ = ktime_after(ktime_get_raw(), end__); \
58 OP; \ 58 OP; \
59 if (COND) { \ 59 if (COND) { \
60 ret__ = 0; \ 60 ret__ = 0; \
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 6f12adc06365..6467a5cc2ca3 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -806,7 +806,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
806 return; 806 return;
807 807
808 intel_fbdev_sync(ifbdev); 808 intel_fbdev_sync(ifbdev);
809 if (ifbdev->vma) 809 if (ifbdev->vma || ifbdev->helper.deferred_setup)
810 drm_fb_helper_hotplug_event(&ifbdev->helper); 810 drm_fb_helper_hotplug_event(&ifbdev->helper);
811} 811}
812 812
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 53ea564f971e..66de4b2dc8b7 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -641,19 +641,18 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
641 641
642 DRM_DEBUG_KMS("Enabling DC6\n"); 642 DRM_DEBUG_KMS("Enabling DC6\n");
643 643
644 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 644 /* Wa Display #1183: skl,kbl,cfl */
645 if (IS_GEN9_BC(dev_priv))
646 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
647 SKL_SELECT_ALTERNATE_DC_EXIT);
645 648
649 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
646} 650}
647 651
648void skl_disable_dc6(struct drm_i915_private *dev_priv) 652void skl_disable_dc6(struct drm_i915_private *dev_priv)
649{ 653{
650 DRM_DEBUG_KMS("Disabling DC6\n"); 654 DRM_DEBUG_KMS("Disabling DC6\n");
651 655
652 /* Wa Display #1183: skl,kbl,cfl */
653 if (IS_GEN9_BC(dev_priv))
654 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
655 SKL_SELECT_ALTERNATE_DC_EXIT);
656
657 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 656 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
658} 657}
659 658
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index 6e5e1aa54ce1..b001699297c4 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -351,6 +351,7 @@ static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
351 351
352 spin_lock_irqsave(&dev->event_lock, flags); 352 spin_lock_irqsave(&dev->event_lock, flags);
353 mdp4_crtc->event = crtc->state->event; 353 mdp4_crtc->event = crtc->state->event;
354 crtc->state->event = NULL;
354 spin_unlock_irqrestore(&dev->event_lock, flags); 355 spin_unlock_irqrestore(&dev->event_lock, flags);
355 356
356 blend_setup(crtc); 357 blend_setup(crtc);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 9893e43ba6c5..76b96081916f 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -708,6 +708,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
708 708
709 spin_lock_irqsave(&dev->event_lock, flags); 709 spin_lock_irqsave(&dev->event_lock, flags);
710 mdp5_crtc->event = crtc->state->event; 710 mdp5_crtc->event = crtc->state->event;
711 crtc->state->event = NULL;
711 spin_unlock_irqrestore(&dev->event_lock, flags); 712 spin_unlock_irqrestore(&dev->event_lock, flags);
712 713
713 /* 714 /*
diff --git a/drivers/gpu/drm/msm/disp/mdp_format.c b/drivers/gpu/drm/msm/disp/mdp_format.c
index b4a8aa4490ee..005760bee708 100644
--- a/drivers/gpu/drm/msm/disp/mdp_format.c
+++ b/drivers/gpu/drm/msm/disp/mdp_format.c
@@ -171,7 +171,8 @@ uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats,
171 return i; 171 return i;
172} 172}
173 173
174const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format) 174const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format,
175 uint64_t modifier)
175{ 176{
176 int i; 177 int i;
177 for (i = 0; i < ARRAY_SIZE(formats); i++) { 178 for (i = 0; i < ARRAY_SIZE(formats); i++) {
diff --git a/drivers/gpu/drm/msm/disp/mdp_kms.h b/drivers/gpu/drm/msm/disp/mdp_kms.h
index 1185487e7e5e..4fa8dbe4e165 100644
--- a/drivers/gpu/drm/msm/disp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp_kms.h
@@ -98,7 +98,7 @@ struct mdp_format {
98#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv) 98#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv)
99 99
100uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only); 100uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only);
101const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format); 101const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier);
102 102
103/* MDP capabilities */ 103/* MDP capabilities */
104#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */ 104#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 7a03a9489708..8baba30d6c65 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -173,6 +173,7 @@ struct msm_dsi_host {
173 173
174 bool registered; 174 bool registered;
175 bool power_on; 175 bool power_on;
176 bool enabled;
176 int irq; 177 int irq;
177}; 178};
178 179
@@ -775,7 +776,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
775 switch (mipi_fmt) { 776 switch (mipi_fmt) {
776 case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888; 777 case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
777 case MIPI_DSI_FMT_RGB666_PACKED: 778 case MIPI_DSI_FMT_RGB666_PACKED:
778 case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666; 779 case MIPI_DSI_FMT_RGB666: return CMD_DST_FORMAT_RGB666;
779 case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565; 780 case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
780 default: return CMD_DST_FORMAT_RGB888; 781 default: return CMD_DST_FORMAT_RGB888;
781 } 782 }
@@ -986,13 +987,19 @@ static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
986 987
987static void dsi_wait4video_done(struct msm_dsi_host *msm_host) 988static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
988{ 989{
990 u32 ret = 0;
991 struct device *dev = &msm_host->pdev->dev;
992
989 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1); 993 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
990 994
991 reinit_completion(&msm_host->video_comp); 995 reinit_completion(&msm_host->video_comp);
992 996
993 wait_for_completion_timeout(&msm_host->video_comp, 997 ret = wait_for_completion_timeout(&msm_host->video_comp,
994 msecs_to_jiffies(70)); 998 msecs_to_jiffies(70));
995 999
1000 if (ret <= 0)
1001 dev_err(dev, "wait for video done timed out\n");
1002
996 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0); 1003 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
997} 1004}
998 1005
@@ -1001,7 +1008,7 @@ static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
1001 if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO)) 1008 if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
1002 return; 1009 return;
1003 1010
1004 if (msm_host->power_on) { 1011 if (msm_host->power_on && msm_host->enabled) {
1005 dsi_wait4video_done(msm_host); 1012 dsi_wait4video_done(msm_host);
1006 /* delay 4 ms to skip BLLP */ 1013 /* delay 4 ms to skip BLLP */
1007 usleep_range(2000, 4000); 1014 usleep_range(2000, 4000);
@@ -2203,7 +2210,7 @@ int msm_dsi_host_enable(struct mipi_dsi_host *host)
2203 * pm_runtime_put_autosuspend(&msm_host->pdev->dev); 2210 * pm_runtime_put_autosuspend(&msm_host->pdev->dev);
2204 * } 2211 * }
2205 */ 2212 */
2206 2213 msm_host->enabled = true;
2207 return 0; 2214 return 0;
2208} 2215}
2209 2216
@@ -2211,6 +2218,7 @@ int msm_dsi_host_disable(struct mipi_dsi_host *host)
2211{ 2218{
2212 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2219 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2213 2220
2221 msm_host->enabled = false;
2214 dsi_op_mode_config(msm_host, 2222 dsi_op_mode_config(msm_host,
2215 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false); 2223 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
2216 2224
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 8e9d5c255820..9a9fa0c75a13 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -265,6 +265,115 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
265 return 0; 265 return 0;
266} 266}
267 267
268int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
269 struct msm_dsi_phy_clk_request *clk_req)
270{
271 const unsigned long bit_rate = clk_req->bitclk_rate;
272 const unsigned long esc_rate = clk_req->escclk_rate;
273 s32 ui, ui_x8, lpx;
274 s32 tmax, tmin;
275 s32 pcnt0 = 50;
276 s32 pcnt1 = 50;
277 s32 pcnt2 = 10;
278 s32 pcnt3 = 30;
279 s32 pcnt4 = 10;
280 s32 pcnt5 = 2;
281 s32 coeff = 1000; /* Precision, should avoid overflow */
282 s32 hb_en, hb_en_ckln;
283 s32 temp;
284
285 if (!bit_rate || !esc_rate)
286 return -EINVAL;
287
288 timing->hs_halfbyte_en = 0;
289 hb_en = 0;
290 timing->hs_halfbyte_en_ckln = 0;
291 hb_en_ckln = 0;
292
293 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
294 ui_x8 = ui << 3;
295 lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
296
297 temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
298 tmin = max_t(s32, temp, 0);
299 temp = (95 * coeff) / ui_x8;
300 tmax = max_t(s32, temp, 0);
301 timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
302
303 temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
304 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
305 tmax = (tmin > 255) ? 511 : 255;
306 timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
307
308 tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
309 temp = 105 * coeff + 12 * ui - 20 * coeff;
310 tmax = (temp + 3 * ui) / ui_x8;
311 timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
312
313 temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
314 tmin = max_t(s32, temp, 0);
315 temp = (85 * coeff + 6 * ui) / ui_x8;
316 tmax = max_t(s32, temp, 0);
317 timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
318
319 temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
320 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
321 tmax = 255;
322 timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
323
324 tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
325 temp = 105 * coeff + 12 * ui - 20 * coeff;
326 tmax = (temp / ui_x8) - 1;
327 timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
328
329 temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
330 timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
331
332 tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
333 tmax = 255;
334 timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
335
336 temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
337 timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
338
339 temp = 60 * coeff + 52 * ui - 43 * ui;
340 tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
341 tmax = 63;
342 timing->shared_timings.clk_post =
343 linear_inter(tmax, tmin, pcnt2, 0, false);
344
345 temp = 8 * ui + (timing->clk_prepare << 3) * ui;
346 temp += (((timing->clk_zero + 3) << 3) + 11) * ui;
347 temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
348 (((timing->hs_rqst_ckln << 3) + 8) * ui);
349 tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
350 tmax = 63;
351 if (tmin > tmax) {
352 temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
353 timing->shared_timings.clk_pre = temp >> 1;
354 timing->shared_timings.clk_pre_inc_by_2 = 1;
355 } else {
356 timing->shared_timings.clk_pre =
357 linear_inter(tmax, tmin, pcnt2, 0, false);
358 timing->shared_timings.clk_pre_inc_by_2 = 0;
359 }
360
361 timing->ta_go = 3;
362 timing->ta_sure = 0;
363 timing->ta_get = 4;
364
365 DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
366 timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
367 timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
368 timing->clk_trail, timing->clk_prepare, timing->hs_exit,
369 timing->hs_zero, timing->hs_prepare, timing->hs_trail,
370 timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
371 timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
372 timing->hs_prep_dly_ckln);
373
374 return 0;
375}
376
268void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, 377void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
269 u32 bit_mask) 378 u32 bit_mask)
270{ 379{
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index c56268cbdb3d..a24ab80994a3 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -101,6 +101,8 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
101 struct msm_dsi_phy_clk_request *clk_req); 101 struct msm_dsi_phy_clk_request *clk_req);
102int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing, 102int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
103 struct msm_dsi_phy_clk_request *clk_req); 103 struct msm_dsi_phy_clk_request *clk_req);
104int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
105 struct msm_dsi_phy_clk_request *clk_req);
104void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, 106void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
105 u32 bit_mask); 107 u32 bit_mask);
106int msm_dsi_phy_init_common(struct msm_dsi_phy *phy); 108int msm_dsi_phy_init_common(struct msm_dsi_phy *phy);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index 0af951aaeea1..b3fffc8dbb2a 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -79,34 +79,6 @@ static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
79 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04); 79 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
80} 80}
81 81
82static int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
83 struct msm_dsi_phy_clk_request *clk_req)
84{
85 /*
86 * TODO: These params need to be computed, they're currently hardcoded
87 * for a 1440x2560@60Hz panel with a byteclk of 100.618 Mhz, and a
88 * default escape clock of 19.2 Mhz.
89 */
90
91 timing->hs_halfbyte_en = 0;
92 timing->clk_zero = 0x1c;
93 timing->clk_prepare = 0x07;
94 timing->clk_trail = 0x07;
95 timing->hs_exit = 0x23;
96 timing->hs_zero = 0x21;
97 timing->hs_prepare = 0x07;
98 timing->hs_trail = 0x07;
99 timing->hs_rqst = 0x05;
100 timing->ta_sure = 0x00;
101 timing->ta_go = 0x03;
102 timing->ta_get = 0x04;
103
104 timing->shared_timings.clk_pre = 0x2d;
105 timing->shared_timings.clk_post = 0x0d;
106
107 return 0;
108}
109
110static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, 82static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
111 struct msm_dsi_phy_clk_request *clk_req) 83 struct msm_dsi_phy_clk_request *clk_req)
112{ 84{
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 0e0c87252ab0..7a16242bf8bf 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -183,7 +183,8 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
183 hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format); 183 hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
184 vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format); 184 vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
185 185
186 format = kms->funcs->get_format(kms, mode_cmd->pixel_format); 186 format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
187 mode_cmd->modifier[0]);
187 if (!format) { 188 if (!format) {
188 dev_err(dev->dev, "unsupported pixel format: %4.4s\n", 189 dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
189 (char *)&mode_cmd->pixel_format); 190 (char *)&mode_cmd->pixel_format);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index c178563fcd4d..456622b46335 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -92,8 +92,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
92 92
93 if (IS_ERR(fb)) { 93 if (IS_ERR(fb)) {
94 dev_err(dev->dev, "failed to allocate fb\n"); 94 dev_err(dev->dev, "failed to allocate fb\n");
95 ret = PTR_ERR(fb); 95 return PTR_ERR(fb);
96 goto fail;
97 } 96 }
98 97
99 bo = msm_framebuffer_bo(fb, 0); 98 bo = msm_framebuffer_bo(fb, 0);
@@ -151,13 +150,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
151 150
152fail_unlock: 151fail_unlock:
153 mutex_unlock(&dev->struct_mutex); 152 mutex_unlock(&dev->struct_mutex);
154fail: 153 drm_framebuffer_remove(fb);
155
156 if (ret) {
157 if (fb)
158 drm_framebuffer_remove(fb);
159 }
160
161 return ret; 154 return ret;
162} 155}
163 156
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 95196479f651..f583bb4222f9 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -132,17 +132,19 @@ static void put_pages(struct drm_gem_object *obj)
132 struct msm_gem_object *msm_obj = to_msm_bo(obj); 132 struct msm_gem_object *msm_obj = to_msm_bo(obj);
133 133
134 if (msm_obj->pages) { 134 if (msm_obj->pages) {
135 /* For non-cached buffers, ensure the new pages are clean 135 if (msm_obj->sgt) {
136 * because display controller, GPU, etc. are not coherent: 136 /* For non-cached buffers, ensure the new
137 */ 137 * pages are clean because display controller,
138 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 138 * GPU, etc. are not coherent:
139 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, 139 */
140 msm_obj->sgt->nents, DMA_BIDIRECTIONAL); 140 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
141 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
142 msm_obj->sgt->nents,
143 DMA_BIDIRECTIONAL);
141 144
142 if (msm_obj->sgt)
143 sg_free_table(msm_obj->sgt); 145 sg_free_table(msm_obj->sgt);
144 146 kfree(msm_obj->sgt);
145 kfree(msm_obj->sgt); 147 }
146 148
147 if (use_pages(obj)) 149 if (use_pages(obj))
148 drm_gem_put_pages(obj, msm_obj->pages, true, false); 150 drm_gem_put_pages(obj, msm_obj->pages, true, false);
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 17d5824417ad..aaa329dc020e 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -48,8 +48,11 @@ struct msm_kms_funcs {
48 /* functions to wait for atomic commit completed on each CRTC */ 48 /* functions to wait for atomic commit completed on each CRTC */
49 void (*wait_for_crtc_commit_done)(struct msm_kms *kms, 49 void (*wait_for_crtc_commit_done)(struct msm_kms *kms,
50 struct drm_crtc *crtc); 50 struct drm_crtc *crtc);
51 /* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */
52 const struct msm_format *(*get_format)(struct msm_kms *kms,
53 const uint32_t format,
54 const uint64_t modifiers);
51 /* misc: */ 55 /* misc: */
52 const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
53 long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, 56 long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
54 struct drm_encoder *encoder); 57 struct drm_encoder *encoder);
55 int (*set_split_display)(struct msm_kms *kms, 58 int (*set_split_display)(struct msm_kms *kms,
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index c0fb52c6d4ca..01665b98c57e 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -179,10 +179,9 @@ qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *relea
179 uint32_t type, bool interruptible) 179 uint32_t type, bool interruptible)
180{ 180{
181 struct qxl_command cmd; 181 struct qxl_command cmd;
182 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
183 182
184 cmd.type = type; 183 cmd.type = type;
185 cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); 184 cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
186 185
187 return qxl_ring_push(qdev->command_ring, &cmd, interruptible); 186 return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
188} 187}
@@ -192,10 +191,9 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas
192 uint32_t type, bool interruptible) 191 uint32_t type, bool interruptible)
193{ 192{
194 struct qxl_command cmd; 193 struct qxl_command cmd;
195 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
196 194
197 cmd.type = type; 195 cmd.type = type;
198 cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); 196 cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
199 197
200 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); 198 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
201} 199}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 00a1a66b052a..864b456080c4 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -167,6 +167,7 @@ struct qxl_release {
167 167
168 int id; 168 int id;
169 int type; 169 int type;
170 struct qxl_bo *release_bo;
170 uint32_t release_offset; 171 uint32_t release_offset;
171 uint32_t surface_release_id; 172 uint32_t surface_release_id;
172 struct ww_acquire_ctx ticket; 173 struct ww_acquire_ctx ticket;
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index e238a1a2eca1..6cc9f3367fa0 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -182,9 +182,9 @@ static int qxl_process_single_command(struct qxl_device *qdev,
182 goto out_free_reloc; 182 goto out_free_reloc;
183 183
184 /* TODO copy slow path code from i915 */ 184 /* TODO copy slow path code from i915 */
185 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); 185 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
186 unwritten = __copy_from_user_inatomic_nocache 186 unwritten = __copy_from_user_inatomic_nocache
187 (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), 187 (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK),
188 u64_to_user_ptr(cmd->command), cmd->command_size); 188 u64_to_user_ptr(cmd->command), cmd->command_size);
189 189
190 { 190 {
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 5d84a66fed36..7cb214577275 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -173,6 +173,7 @@ qxl_release_free_list(struct qxl_release *release)
173 list_del(&entry->tv.head); 173 list_del(&entry->tv.head);
174 kfree(entry); 174 kfree(entry);
175 } 175 }
176 release->release_bo = NULL;
176} 177}
177 178
178void 179void
@@ -296,7 +297,6 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
296{ 297{
297 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { 298 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
298 int idr_ret; 299 int idr_ret;
299 struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
300 struct qxl_bo *bo; 300 struct qxl_bo *bo;
301 union qxl_release_info *info; 301 union qxl_release_info *info;
302 302
@@ -304,8 +304,9 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
304 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); 304 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
305 if (idr_ret < 0) 305 if (idr_ret < 0)
306 return idr_ret; 306 return idr_ret;
307 bo = to_qxl_bo(entry->tv.bo); 307 bo = create_rel->release_bo;
308 308
309 (*release)->release_bo = bo;
309 (*release)->release_offset = create_rel->release_offset + 64; 310 (*release)->release_offset = create_rel->release_offset + 64;
310 311
311 qxl_release_list_add(*release, bo); 312 qxl_release_list_add(*release, bo);
@@ -365,6 +366,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
365 366
366 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); 367 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
367 368
369 (*release)->release_bo = bo;
368 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx]; 370 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
369 qdev->current_release_bo_offset[cur_idx]++; 371 qdev->current_release_bo_offset[cur_idx]++;
370 372
@@ -408,13 +410,12 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
408{ 410{
409 void *ptr; 411 void *ptr;
410 union qxl_release_info *info; 412 union qxl_release_info *info;
411 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); 413 struct qxl_bo *bo = release->release_bo;
412 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
413 414
414 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); 415 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
415 if (!ptr) 416 if (!ptr)
416 return NULL; 417 return NULL;
417 info = ptr + (release->release_offset & ~PAGE_SIZE); 418 info = ptr + (release->release_offset & ~PAGE_MASK);
418 return info; 419 return info;
419} 420}
420 421
@@ -422,11 +423,10 @@ void qxl_release_unmap(struct qxl_device *qdev,
422 struct qxl_release *release, 423 struct qxl_release *release,
423 union qxl_release_info *info) 424 union qxl_release_info *info)
424{ 425{
425 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); 426 struct qxl_bo *bo = release->release_bo;
426 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
427 void *ptr; 427 void *ptr;
428 428
429 ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); 429 ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
430 qxl_bo_kunmap_atomic_page(qdev, bo, ptr); 430 qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
431} 431}
432 432
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index bffff4c9fbf5..be3f14d7746d 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -94,64 +94,9 @@ static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder)
94 } 94 }
95} 95}
96 96
97static enum drm_mode_status sun4i_lvds_encoder_mode_valid(struct drm_encoder *crtc,
98 const struct drm_display_mode *mode)
99{
100 struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(crtc);
101 struct sun4i_tcon *tcon = lvds->tcon;
102 u32 hsync = mode->hsync_end - mode->hsync_start;
103 u32 vsync = mode->vsync_end - mode->vsync_start;
104 unsigned long rate = mode->clock * 1000;
105 long rounded_rate;
106
107 DRM_DEBUG_DRIVER("Validating modes...\n");
108
109 if (hsync < 1)
110 return MODE_HSYNC_NARROW;
111
112 if (hsync > 0x3ff)
113 return MODE_HSYNC_WIDE;
114
115 if ((mode->hdisplay < 1) || (mode->htotal < 1))
116 return MODE_H_ILLEGAL;
117
118 if ((mode->hdisplay > 0x7ff) || (mode->htotal > 0xfff))
119 return MODE_BAD_HVALUE;
120
121 DRM_DEBUG_DRIVER("Horizontal parameters OK\n");
122
123 if (vsync < 1)
124 return MODE_VSYNC_NARROW;
125
126 if (vsync > 0x3ff)
127 return MODE_VSYNC_WIDE;
128
129 if ((mode->vdisplay < 1) || (mode->vtotal < 1))
130 return MODE_V_ILLEGAL;
131
132 if ((mode->vdisplay > 0x7ff) || (mode->vtotal > 0xfff))
133 return MODE_BAD_VVALUE;
134
135 DRM_DEBUG_DRIVER("Vertical parameters OK\n");
136
137 tcon->dclk_min_div = 7;
138 tcon->dclk_max_div = 7;
139 rounded_rate = clk_round_rate(tcon->dclk, rate);
140 if (rounded_rate < rate)
141 return MODE_CLOCK_LOW;
142
143 if (rounded_rate > rate)
144 return MODE_CLOCK_HIGH;
145
146 DRM_DEBUG_DRIVER("Clock rate OK\n");
147
148 return MODE_OK;
149}
150
151static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = { 97static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = {
152 .disable = sun4i_lvds_encoder_disable, 98 .disable = sun4i_lvds_encoder_disable,
153 .enable = sun4i_lvds_encoder_enable, 99 .enable = sun4i_lvds_encoder_enable,
154 .mode_valid = sun4i_lvds_encoder_mode_valid,
155}; 100};
156 101
157static const struct drm_encoder_funcs sun4i_lvds_enc_funcs = { 102static const struct drm_encoder_funcs sun4i_lvds_enc_funcs = {
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 48e4f1df6e5d..020070d483d3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -293,7 +293,7 @@ retry:
293 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); 293 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
294 if (ret == -ENOSPC) { 294 if (ret == -ENOSPC) {
295 spin_unlock(&vgdev->ctrlq.qlock); 295 spin_unlock(&vgdev->ctrlq.qlock);
296 wait_event(vgdev->ctrlq.ack_queue, vq->num_free); 296 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
297 spin_lock(&vgdev->ctrlq.qlock); 297 spin_lock(&vgdev->ctrlq.qlock);
298 goto retry; 298 goto retry;
299 } else { 299 } else {
@@ -368,7 +368,7 @@ retry:
368 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC); 368 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
369 if (ret == -ENOSPC) { 369 if (ret == -ENOSPC) {
370 spin_unlock(&vgdev->cursorq.qlock); 370 spin_unlock(&vgdev->cursorq.qlock);
371 wait_event(vgdev->cursorq.ack_queue, vq->num_free); 371 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
372 spin_lock(&vgdev->cursorq.qlock); 372 spin_lock(&vgdev->cursorq.qlock);
373 goto retry; 373 goto retry;
374 } else { 374 } else {
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 051a72eecb24..d2cc55e21374 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -40,6 +40,10 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
40#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 40#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
41#endif 41#endif
42 42
43#ifndef PCI_DEVICE_ID_AMD_17H_RR_NB
44#define PCI_DEVICE_ID_AMD_17H_RR_NB 0x15d0
45#endif
46
43/* CPUID function 0x80000001, ebx */ 47/* CPUID function 0x80000001, ebx */
44#define CPUID_PKGTYPE_MASK 0xf0000000 48#define CPUID_PKGTYPE_MASK 0xf0000000
45#define CPUID_PKGTYPE_F 0x00000000 49#define CPUID_PKGTYPE_F 0x00000000
@@ -72,6 +76,7 @@ struct k10temp_data {
72 struct pci_dev *pdev; 76 struct pci_dev *pdev;
73 void (*read_tempreg)(struct pci_dev *pdev, u32 *regval); 77 void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
74 int temp_offset; 78 int temp_offset;
79 u32 temp_adjust_mask;
75}; 80};
76 81
77struct tctl_offset { 82struct tctl_offset {
@@ -84,6 +89,7 @@ static const struct tctl_offset tctl_offset_table[] = {
84 { 0x17, "AMD Ryzen 5 1600X", 20000 }, 89 { 0x17, "AMD Ryzen 5 1600X", 20000 },
85 { 0x17, "AMD Ryzen 7 1700X", 20000 }, 90 { 0x17, "AMD Ryzen 7 1700X", 20000 },
86 { 0x17, "AMD Ryzen 7 1800X", 20000 }, 91 { 0x17, "AMD Ryzen 7 1800X", 20000 },
92 { 0x17, "AMD Ryzen 7 2700X", 10000 },
87 { 0x17, "AMD Ryzen Threadripper 1950X", 27000 }, 93 { 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
88 { 0x17, "AMD Ryzen Threadripper 1920X", 27000 }, 94 { 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
89 { 0x17, "AMD Ryzen Threadripper 1900X", 27000 }, 95 { 0x17, "AMD Ryzen Threadripper 1900X", 27000 },
@@ -129,6 +135,8 @@ static ssize_t temp1_input_show(struct device *dev,
129 135
130 data->read_tempreg(data->pdev, &regval); 136 data->read_tempreg(data->pdev, &regval);
131 temp = (regval >> 21) * 125; 137 temp = (regval >> 21) * 125;
138 if (regval & data->temp_adjust_mask)
139 temp -= 49000;
132 if (temp > data->temp_offset) 140 if (temp > data->temp_offset)
133 temp -= data->temp_offset; 141 temp -= data->temp_offset;
134 else 142 else
@@ -259,12 +267,14 @@ static int k10temp_probe(struct pci_dev *pdev,
259 data->pdev = pdev; 267 data->pdev = pdev;
260 268
261 if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 || 269 if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 ||
262 boot_cpu_data.x86_model == 0x70)) 270 boot_cpu_data.x86_model == 0x70)) {
263 data->read_tempreg = read_tempreg_nb_f15; 271 data->read_tempreg = read_tempreg_nb_f15;
264 else if (boot_cpu_data.x86 == 0x17) 272 } else if (boot_cpu_data.x86 == 0x17) {
273 data->temp_adjust_mask = 0x80000;
265 data->read_tempreg = read_tempreg_nb_f17; 274 data->read_tempreg = read_tempreg_nb_f17;
266 else 275 } else {
267 data->read_tempreg = read_tempreg_pci; 276 data->read_tempreg = read_tempreg_pci;
277 }
268 278
269 for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) { 279 for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) {
270 const struct tctl_offset *entry = &tctl_offset_table[i]; 280 const struct tctl_offset *entry = &tctl_offset_table[i];
@@ -292,6 +302,7 @@ static const struct pci_device_id k10temp_id_table[] = {
292 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, 302 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
293 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, 303 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
294 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, 304 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
305 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_RR_NB) },
295 {} 306 {}
296}; 307};
297MODULE_DEVICE_TABLE(pci, k10temp_id_table); 308MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
index 8b0bc4fc06e8..b0bc77bf2cd9 100644
--- a/drivers/hwmon/nct6683.c
+++ b/drivers/hwmon/nct6683.c
@@ -1380,8 +1380,8 @@ static int __init nct6683_find(int sioaddr, struct nct6683_sio_data *sio_data)
1380 /* Activate logical device if needed */ 1380 /* Activate logical device if needed */
1381 val = superio_inb(sioaddr, SIO_REG_ENABLE); 1381 val = superio_inb(sioaddr, SIO_REG_ENABLE);
1382 if (!(val & 0x01)) { 1382 if (!(val & 0x01)) {
1383 pr_err("EC is disabled\n"); 1383 pr_warn("Forcibly enabling EC access. Data may be unusable.\n");
1384 goto fail; 1384 superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01);
1385 } 1385 }
1386 1386
1387 superio_exit(sioaddr); 1387 superio_exit(sioaddr);
diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c
index 363bf56eb0f2..91976b6ca300 100644
--- a/drivers/hwmon/scmi-hwmon.c
+++ b/drivers/hwmon/scmi-hwmon.c
@@ -170,7 +170,10 @@ static int scmi_hwmon_probe(struct scmi_device *sdev)
170 scmi_chip_info.info = ptr_scmi_ci; 170 scmi_chip_info.info = ptr_scmi_ci;
171 chip_info = &scmi_chip_info; 171 chip_info = &scmi_chip_info;
172 172
173 for (type = 0; type < hwmon_max && nr_count[type]; type++) { 173 for (type = 0; type < hwmon_max; type++) {
174 if (!nr_count[type])
175 continue;
176
174 scmi_hwmon_add_chan_info(scmi_hwmon_chan, dev, nr_count[type], 177 scmi_hwmon_add_chan_info(scmi_hwmon_chan, dev, nr_count[type],
175 type, hwmon_attributes[type]); 178 type, hwmon_attributes[type]);
176 *ptr_scmi_ci++ = scmi_hwmon_chan++; 179 *ptr_scmi_ci++ = scmi_hwmon_chan++;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index c4865b08d7fb..8d21b9825d71 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -707,7 +707,6 @@ config I2C_MPC
707config I2C_MT65XX 707config I2C_MT65XX
708 tristate "MediaTek I2C adapter" 708 tristate "MediaTek I2C adapter"
709 depends on ARCH_MEDIATEK || COMPILE_TEST 709 depends on ARCH_MEDIATEK || COMPILE_TEST
710 depends on HAS_DMA
711 help 710 help
712 This selects the MediaTek(R) Integrated Inter Circuit bus driver 711 This selects the MediaTek(R) Integrated Inter Circuit bus driver
713 for MT65xx and MT81xx. 712 for MT65xx and MT81xx.
@@ -885,7 +884,6 @@ config I2C_SH7760
885 884
886config I2C_SH_MOBILE 885config I2C_SH_MOBILE
887 tristate "SuperH Mobile I2C Controller" 886 tristate "SuperH Mobile I2C Controller"
888 depends on HAS_DMA
889 depends on ARCH_SHMOBILE || ARCH_RENESAS || COMPILE_TEST 887 depends on ARCH_SHMOBILE || ARCH_RENESAS || COMPILE_TEST
890 help 888 help
891 If you say yes to this option, support will be included for the 889 If you say yes to this option, support will be included for the
@@ -1098,7 +1096,6 @@ config I2C_XLP9XX
1098 1096
1099config I2C_RCAR 1097config I2C_RCAR
1100 tristate "Renesas R-Car I2C Controller" 1098 tristate "Renesas R-Car I2C Controller"
1101 depends on HAS_DMA
1102 depends on ARCH_RENESAS || COMPILE_TEST 1099 depends on ARCH_RENESAS || COMPILE_TEST
1103 select I2C_SLAVE 1100 select I2C_SLAVE
1104 help 1101 help
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index 25fcc3c1e32b..4053259bccb8 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -86,6 +86,7 @@ struct sprd_i2c {
86 u32 count; 86 u32 count;
87 int irq; 87 int irq;
88 int err; 88 int err;
89 bool is_suspended;
89}; 90};
90 91
91static void sprd_i2c_set_count(struct sprd_i2c *i2c_dev, u32 count) 92static void sprd_i2c_set_count(struct sprd_i2c *i2c_dev, u32 count)
@@ -283,6 +284,9 @@ static int sprd_i2c_master_xfer(struct i2c_adapter *i2c_adap,
283 struct sprd_i2c *i2c_dev = i2c_adap->algo_data; 284 struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
284 int im, ret; 285 int im, ret;
285 286
287 if (i2c_dev->is_suspended)
288 return -EBUSY;
289
286 ret = pm_runtime_get_sync(i2c_dev->dev); 290 ret = pm_runtime_get_sync(i2c_dev->dev);
287 if (ret < 0) 291 if (ret < 0)
288 return ret; 292 return ret;
@@ -364,13 +368,12 @@ static irqreturn_t sprd_i2c_isr_thread(int irq, void *dev_id)
364 struct sprd_i2c *i2c_dev = dev_id; 368 struct sprd_i2c *i2c_dev = dev_id;
365 struct i2c_msg *msg = i2c_dev->msg; 369 struct i2c_msg *msg = i2c_dev->msg;
366 bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK); 370 bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK);
367 u32 i2c_count = readl(i2c_dev->base + I2C_COUNT);
368 u32 i2c_tran; 371 u32 i2c_tran;
369 372
370 if (msg->flags & I2C_M_RD) 373 if (msg->flags & I2C_M_RD)
371 i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD; 374 i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD;
372 else 375 else
373 i2c_tran = i2c_count; 376 i2c_tran = i2c_dev->count;
374 377
375 /* 378 /*
376 * If we got one ACK from slave when writing data, and we did not 379 * If we got one ACK from slave when writing data, and we did not
@@ -408,14 +411,13 @@ static irqreturn_t sprd_i2c_isr(int irq, void *dev_id)
408{ 411{
409 struct sprd_i2c *i2c_dev = dev_id; 412 struct sprd_i2c *i2c_dev = dev_id;
410 struct i2c_msg *msg = i2c_dev->msg; 413 struct i2c_msg *msg = i2c_dev->msg;
411 u32 i2c_count = readl(i2c_dev->base + I2C_COUNT);
412 bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK); 414 bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK);
413 u32 i2c_tran; 415 u32 i2c_tran;
414 416
415 if (msg->flags & I2C_M_RD) 417 if (msg->flags & I2C_M_RD)
416 i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD; 418 i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD;
417 else 419 else
418 i2c_tran = i2c_count; 420 i2c_tran = i2c_dev->count;
419 421
420 /* 422 /*
421 * If we did not get one ACK from slave when writing data, then we 423 * If we did not get one ACK from slave when writing data, then we
@@ -586,11 +588,23 @@ static int sprd_i2c_remove(struct platform_device *pdev)
586 588
587static int __maybe_unused sprd_i2c_suspend_noirq(struct device *pdev) 589static int __maybe_unused sprd_i2c_suspend_noirq(struct device *pdev)
588{ 590{
591 struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev);
592
593 i2c_lock_adapter(&i2c_dev->adap);
594 i2c_dev->is_suspended = true;
595 i2c_unlock_adapter(&i2c_dev->adap);
596
589 return pm_runtime_force_suspend(pdev); 597 return pm_runtime_force_suspend(pdev);
590} 598}
591 599
592static int __maybe_unused sprd_i2c_resume_noirq(struct device *pdev) 600static int __maybe_unused sprd_i2c_resume_noirq(struct device *pdev)
593{ 601{
602 struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev);
603
604 i2c_lock_adapter(&i2c_dev->adap);
605 i2c_dev->is_suspended = false;
606 i2c_unlock_adapter(&i2c_dev->adap);
607
594 return pm_runtime_force_resume(pdev); 608 return pm_runtime_force_resume(pdev);
595} 609}
596 610
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 036a03f0d0a6..1667b6e7674f 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -280,7 +280,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
280 */ 280 */
281 if (msgs[i].flags & I2C_M_RECV_LEN) { 281 if (msgs[i].flags & I2C_M_RECV_LEN) {
282 if (!(msgs[i].flags & I2C_M_RD) || 282 if (!(msgs[i].flags & I2C_M_RD) ||
283 msgs[i].buf[0] < 1 || 283 msgs[i].len < 1 || msgs[i].buf[0] < 1 ||
284 msgs[i].len < msgs[i].buf[0] + 284 msgs[i].len < msgs[i].buf[0] +
285 I2C_SMBUS_BLOCK_MAX) { 285 I2C_SMBUS_BLOCK_MAX) {
286 res = -EINVAL; 286 res = -EINVAL;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index daa919e5a442..241cf4ff9901 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -4757,7 +4757,7 @@ mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
4757{ 4757{
4758 struct mlx5_ib_dev *dev = to_mdev(ibdev); 4758 struct mlx5_ib_dev *dev = to_mdev(ibdev);
4759 4759
4760 return mlx5_get_vector_affinity(dev->mdev, comp_vector); 4760 return mlx5_get_vector_affinity_hint(dev->mdev, comp_vector);
4761} 4761}
4762 4762
4763/* The mlx5_ib_multiport_mutex should be held when calling this function */ 4763/* The mlx5_ib_multiport_mutex should be held when calling this function */
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
index 4be3aef40bd2..267da8215e08 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
@@ -443,17 +443,16 @@ static u8 opa_vnic_get_rc(struct __opa_veswport_info *info,
443} 443}
444 444
445/* opa_vnic_calc_entropy - calculate the packet entropy */ 445/* opa_vnic_calc_entropy - calculate the packet entropy */
446u8 opa_vnic_calc_entropy(struct opa_vnic_adapter *adapter, struct sk_buff *skb) 446u8 opa_vnic_calc_entropy(struct sk_buff *skb)
447{ 447{
448 u16 hash16; 448 u32 hash = skb_get_hash(skb);
449 449
450 /* 450 /* store XOR of all bytes in lower 8 bits */
451 * Get flow based 16-bit hash and then XOR the upper and lower bytes 451 hash ^= hash >> 8;
452 * to get the entropy. 452 hash ^= hash >> 16;
453 * __skb_tx_hash limits qcount to 16 bits. Hence, get 15-bit hash. 453
454 */ 454 /* return lower 8 bits as entropy */
455 hash16 = __skb_tx_hash(adapter->netdev, skb, BIT(15)); 455 return (u8)(hash & 0xFF);
456 return (u8)((hash16 >> 8) ^ (hash16 & 0xff));
457} 456}
458 457
459/* opa_vnic_get_def_port - get default port based on entropy */ 458/* opa_vnic_get_def_port - get default port based on entropy */
@@ -490,7 +489,7 @@ void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
490 489
491 hdr = skb_push(skb, OPA_VNIC_HDR_LEN); 490 hdr = skb_push(skb, OPA_VNIC_HDR_LEN);
492 491
493 entropy = opa_vnic_calc_entropy(adapter, skb); 492 entropy = opa_vnic_calc_entropy(skb);
494 def_port = opa_vnic_get_def_port(adapter, entropy); 493 def_port = opa_vnic_get_def_port(adapter, entropy);
495 len = opa_vnic_wire_length(skb); 494 len = opa_vnic_wire_length(skb);
496 dlid = opa_vnic_get_dlid(adapter, skb, def_port); 495 dlid = opa_vnic_get_dlid(adapter, skb, def_port);
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
index afd95f432262..43ac61ffef4a 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
@@ -299,7 +299,7 @@ struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev,
299void opa_vnic_rem_netdev(struct opa_vnic_adapter *adapter); 299void opa_vnic_rem_netdev(struct opa_vnic_adapter *adapter);
300void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb); 300void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb);
301u8 opa_vnic_get_vl(struct opa_vnic_adapter *adapter, struct sk_buff *skb); 301u8 opa_vnic_get_vl(struct opa_vnic_adapter *adapter, struct sk_buff *skb);
302u8 opa_vnic_calc_entropy(struct opa_vnic_adapter *adapter, struct sk_buff *skb); 302u8 opa_vnic_calc_entropy(struct sk_buff *skb);
303void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter); 303void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter);
304void opa_vnic_release_mac_tbl(struct opa_vnic_adapter *adapter); 304void opa_vnic_release_mac_tbl(struct opa_vnic_adapter *adapter);
305void opa_vnic_query_mac_tbl(struct opa_vnic_adapter *adapter, 305void opa_vnic_query_mac_tbl(struct opa_vnic_adapter *adapter,
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
index ce57e0f10289..0c8aec62a425 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
@@ -104,7 +104,7 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
104 104
105 /* pass entropy and vl as metadata in skb */ 105 /* pass entropy and vl as metadata in skb */
106 mdata = skb_push(skb, sizeof(*mdata)); 106 mdata = skb_push(skb, sizeof(*mdata));
107 mdata->entropy = opa_vnic_calc_entropy(adapter, skb); 107 mdata->entropy = opa_vnic_calc_entropy(skb);
108 mdata->vl = opa_vnic_get_vl(adapter, skb); 108 mdata->vl = opa_vnic_get_vl(adapter, skb);
109 rc = adapter->rn_ops->ndo_select_queue(netdev, skb, 109 rc = adapter->rn_ops->ndo_select_queue(netdev, skb,
110 accel_priv, fallback); 110 accel_priv, fallback);
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 46115a392098..c81c79d01d93 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -31,6 +31,7 @@
31enum evdev_clock_type { 31enum evdev_clock_type {
32 EV_CLK_REAL = 0, 32 EV_CLK_REAL = 0,
33 EV_CLK_MONO, 33 EV_CLK_MONO,
34 EV_CLK_BOOT,
34 EV_CLK_MAX 35 EV_CLK_MAX
35}; 36};
36 37
@@ -197,10 +198,12 @@ static int evdev_set_clk_type(struct evdev_client *client, unsigned int clkid)
197 case CLOCK_REALTIME: 198 case CLOCK_REALTIME:
198 clk_type = EV_CLK_REAL; 199 clk_type = EV_CLK_REAL;
199 break; 200 break;
200 case CLOCK_BOOTTIME:
201 case CLOCK_MONOTONIC: 201 case CLOCK_MONOTONIC:
202 clk_type = EV_CLK_MONO; 202 clk_type = EV_CLK_MONO;
203 break; 203 break;
204 case CLOCK_BOOTTIME:
205 clk_type = EV_CLK_BOOT;
206 break;
204 default: 207 default:
205 return -EINVAL; 208 return -EINVAL;
206 } 209 }
@@ -311,6 +314,8 @@ static void evdev_events(struct input_handle *handle,
311 314
312 ev_time[EV_CLK_MONO] = ktime_get(); 315 ev_time[EV_CLK_MONO] = ktime_get();
313 ev_time[EV_CLK_REAL] = ktime_mono_to_real(ev_time[EV_CLK_MONO]); 316 ev_time[EV_CLK_REAL] = ktime_mono_to_real(ev_time[EV_CLK_MONO]);
317 ev_time[EV_CLK_BOOT] = ktime_mono_to_any(ev_time[EV_CLK_MONO],
318 TK_OFFS_BOOT);
314 319
315 rcu_read_lock(); 320 rcu_read_lock();
316 321
diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c
index 766bf2660116..5f04b2d94635 100644
--- a/drivers/input/input-leds.c
+++ b/drivers/input/input-leds.c
@@ -88,6 +88,7 @@ static int input_leds_connect(struct input_handler *handler,
88 const struct input_device_id *id) 88 const struct input_device_id *id)
89{ 89{
90 struct input_leds *leds; 90 struct input_leds *leds;
91 struct input_led *led;
91 unsigned int num_leds; 92 unsigned int num_leds;
92 unsigned int led_code; 93 unsigned int led_code;
93 int led_no; 94 int led_no;
@@ -119,14 +120,13 @@ static int input_leds_connect(struct input_handler *handler,
119 120
120 led_no = 0; 121 led_no = 0;
121 for_each_set_bit(led_code, dev->ledbit, LED_CNT) { 122 for_each_set_bit(led_code, dev->ledbit, LED_CNT) {
122 struct input_led *led = &leds->leds[led_no]; 123 if (!input_led_info[led_code].name)
124 continue;
123 125
126 led = &leds->leds[led_no];
124 led->handle = &leds->handle; 127 led->handle = &leds->handle;
125 led->code = led_code; 128 led->code = led_code;
126 129
127 if (!input_led_info[led_code].name)
128 continue;
129
130 led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s", 130 led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s",
131 dev_name(&dev->dev), 131 dev_name(&dev->dev),
132 input_led_info[led_code].name); 132 input_led_info[led_code].name);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 0a67f235ba88..38f9501acdf0 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -583,7 +583,7 @@ static void alps_process_trackstick_packet_v3(struct psmouse *psmouse)
583 583
584 x = (s8)(((packet[0] & 0x20) << 2) | (packet[1] & 0x7f)); 584 x = (s8)(((packet[0] & 0x20) << 2) | (packet[1] & 0x7f));
585 y = (s8)(((packet[0] & 0x10) << 3) | (packet[2] & 0x7f)); 585 y = (s8)(((packet[0] & 0x10) << 3) | (packet[2] & 0x7f));
586 z = packet[4] & 0x7c; 586 z = packet[4] & 0x7f;
587 587
588 /* 588 /*
589 * The x and y values tend to be quite large, and when used 589 * The x and y values tend to be quite large, and when used
diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c
index 76edbf2c1bce..082defc329a8 100644
--- a/drivers/input/rmi4/rmi_spi.c
+++ b/drivers/input/rmi4/rmi_spi.c
@@ -147,8 +147,11 @@ static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi,
147 if (len > RMI_SPI_XFER_SIZE_LIMIT) 147 if (len > RMI_SPI_XFER_SIZE_LIMIT)
148 return -EINVAL; 148 return -EINVAL;
149 149
150 if (rmi_spi->xfer_buf_size < len) 150 if (rmi_spi->xfer_buf_size < len) {
151 rmi_spi_manage_pools(rmi_spi, len); 151 ret = rmi_spi_manage_pools(rmi_spi, len);
152 if (ret < 0)
153 return ret;
154 }
152 155
153 if (addr == 0) 156 if (addr == 0)
154 /* 157 /*
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 4f15496fec8b..3e613afa10b4 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -362,7 +362,7 @@ config TOUCHSCREEN_HIDEEP
362 362
363 If unsure, say N. 363 If unsure, say N.
364 364
365 To compile this driver as a moudle, choose M here : the 365 To compile this driver as a module, choose M here : the
366 module will be called hideep_ts. 366 module will be called hideep_ts.
367 367
368config TOUCHSCREEN_ILI210X 368config TOUCHSCREEN_ILI210X
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 5d9699fe1b55..09194721aed2 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -280,7 +280,8 @@ struct mxt_data {
280 struct input_dev *input_dev; 280 struct input_dev *input_dev;
281 char phys[64]; /* device physical location */ 281 char phys[64]; /* device physical location */
282 struct mxt_object *object_table; 282 struct mxt_object *object_table;
283 struct mxt_info info; 283 struct mxt_info *info;
284 void *raw_info_block;
284 unsigned int irq; 285 unsigned int irq;
285 unsigned int max_x; 286 unsigned int max_x;
286 unsigned int max_y; 287 unsigned int max_y;
@@ -460,12 +461,13 @@ static int mxt_lookup_bootloader_address(struct mxt_data *data, bool retry)
460{ 461{
461 u8 appmode = data->client->addr; 462 u8 appmode = data->client->addr;
462 u8 bootloader; 463 u8 bootloader;
464 u8 family_id = data->info ? data->info->family_id : 0;
463 465
464 switch (appmode) { 466 switch (appmode) {
465 case 0x4a: 467 case 0x4a:
466 case 0x4b: 468 case 0x4b:
467 /* Chips after 1664S use different scheme */ 469 /* Chips after 1664S use different scheme */
468 if (retry || data->info.family_id >= 0xa2) { 470 if (retry || family_id >= 0xa2) {
469 bootloader = appmode - 0x24; 471 bootloader = appmode - 0x24;
470 break; 472 break;
471 } 473 }
@@ -692,7 +694,7 @@ mxt_get_object(struct mxt_data *data, u8 type)
692 struct mxt_object *object; 694 struct mxt_object *object;
693 int i; 695 int i;
694 696
695 for (i = 0; i < data->info.object_num; i++) { 697 for (i = 0; i < data->info->object_num; i++) {
696 object = data->object_table + i; 698 object = data->object_table + i;
697 if (object->type == type) 699 if (object->type == type)
698 return object; 700 return object;
@@ -1462,12 +1464,12 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
1462 data_pos += offset; 1464 data_pos += offset;
1463 } 1465 }
1464 1466
1465 if (cfg_info.family_id != data->info.family_id) { 1467 if (cfg_info.family_id != data->info->family_id) {
1466 dev_err(dev, "Family ID mismatch!\n"); 1468 dev_err(dev, "Family ID mismatch!\n");
1467 return -EINVAL; 1469 return -EINVAL;
1468 } 1470 }
1469 1471
1470 if (cfg_info.variant_id != data->info.variant_id) { 1472 if (cfg_info.variant_id != data->info->variant_id) {
1471 dev_err(dev, "Variant ID mismatch!\n"); 1473 dev_err(dev, "Variant ID mismatch!\n");
1472 return -EINVAL; 1474 return -EINVAL;
1473 } 1475 }
@@ -1512,7 +1514,7 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
1512 1514
1513 /* Malloc memory to store configuration */ 1515 /* Malloc memory to store configuration */
1514 cfg_start_ofs = MXT_OBJECT_START + 1516 cfg_start_ofs = MXT_OBJECT_START +
1515 data->info.object_num * sizeof(struct mxt_object) + 1517 data->info->object_num * sizeof(struct mxt_object) +
1516 MXT_INFO_CHECKSUM_SIZE; 1518 MXT_INFO_CHECKSUM_SIZE;
1517 config_mem_size = data->mem_size - cfg_start_ofs; 1519 config_mem_size = data->mem_size - cfg_start_ofs;
1518 config_mem = kzalloc(config_mem_size, GFP_KERNEL); 1520 config_mem = kzalloc(config_mem_size, GFP_KERNEL);
@@ -1563,20 +1565,6 @@ release_mem:
1563 return ret; 1565 return ret;
1564} 1566}
1565 1567
1566static int mxt_get_info(struct mxt_data *data)
1567{
1568 struct i2c_client *client = data->client;
1569 struct mxt_info *info = &data->info;
1570 int error;
1571
1572 /* Read 7-byte info block starting at address 0 */
1573 error = __mxt_read_reg(client, 0, sizeof(*info), info);
1574 if (error)
1575 return error;
1576
1577 return 0;
1578}
1579
1580static void mxt_free_input_device(struct mxt_data *data) 1568static void mxt_free_input_device(struct mxt_data *data)
1581{ 1569{
1582 if (data->input_dev) { 1570 if (data->input_dev) {
@@ -1591,9 +1579,10 @@ static void mxt_free_object_table(struct mxt_data *data)
1591 video_unregister_device(&data->dbg.vdev); 1579 video_unregister_device(&data->dbg.vdev);
1592 v4l2_device_unregister(&data->dbg.v4l2); 1580 v4l2_device_unregister(&data->dbg.v4l2);
1593#endif 1581#endif
1594
1595 kfree(data->object_table);
1596 data->object_table = NULL; 1582 data->object_table = NULL;
1583 data->info = NULL;
1584 kfree(data->raw_info_block);
1585 data->raw_info_block = NULL;
1597 kfree(data->msg_buf); 1586 kfree(data->msg_buf);
1598 data->msg_buf = NULL; 1587 data->msg_buf = NULL;
1599 data->T5_address = 0; 1588 data->T5_address = 0;
@@ -1609,34 +1598,18 @@ static void mxt_free_object_table(struct mxt_data *data)
1609 data->max_reportid = 0; 1598 data->max_reportid = 0;
1610} 1599}
1611 1600
1612static int mxt_get_object_table(struct mxt_data *data) 1601static int mxt_parse_object_table(struct mxt_data *data,
1602 struct mxt_object *object_table)
1613{ 1603{
1614 struct i2c_client *client = data->client; 1604 struct i2c_client *client = data->client;
1615 size_t table_size;
1616 struct mxt_object *object_table;
1617 int error;
1618 int i; 1605 int i;
1619 u8 reportid; 1606 u8 reportid;
1620 u16 end_address; 1607 u16 end_address;
1621 1608
1622 table_size = data->info.object_num * sizeof(struct mxt_object);
1623 object_table = kzalloc(table_size, GFP_KERNEL);
1624 if (!object_table) {
1625 dev_err(&data->client->dev, "Failed to allocate memory\n");
1626 return -ENOMEM;
1627 }
1628
1629 error = __mxt_read_reg(client, MXT_OBJECT_START, table_size,
1630 object_table);
1631 if (error) {
1632 kfree(object_table);
1633 return error;
1634 }
1635
1636 /* Valid Report IDs start counting from 1 */ 1609 /* Valid Report IDs start counting from 1 */
1637 reportid = 1; 1610 reportid = 1;
1638 data->mem_size = 0; 1611 data->mem_size = 0;
1639 for (i = 0; i < data->info.object_num; i++) { 1612 for (i = 0; i < data->info->object_num; i++) {
1640 struct mxt_object *object = object_table + i; 1613 struct mxt_object *object = object_table + i;
1641 u8 min_id, max_id; 1614 u8 min_id, max_id;
1642 1615
@@ -1660,8 +1633,8 @@ static int mxt_get_object_table(struct mxt_data *data)
1660 1633
1661 switch (object->type) { 1634 switch (object->type) {
1662 case MXT_GEN_MESSAGE_T5: 1635 case MXT_GEN_MESSAGE_T5:
1663 if (data->info.family_id == 0x80 && 1636 if (data->info->family_id == 0x80 &&
1664 data->info.version < 0x20) { 1637 data->info->version < 0x20) {
1665 /* 1638 /*
1666 * On mXT224 firmware versions prior to V2.0 1639 * On mXT224 firmware versions prior to V2.0
1667 * read and discard unused CRC byte otherwise 1640 * read and discard unused CRC byte otherwise
@@ -1716,24 +1689,102 @@ static int mxt_get_object_table(struct mxt_data *data)
1716 /* If T44 exists, T5 position has to be directly after */ 1689 /* If T44 exists, T5 position has to be directly after */
1717 if (data->T44_address && (data->T5_address != data->T44_address + 1)) { 1690 if (data->T44_address && (data->T5_address != data->T44_address + 1)) {
1718 dev_err(&client->dev, "Invalid T44 position\n"); 1691 dev_err(&client->dev, "Invalid T44 position\n");
1719 error = -EINVAL; 1692 return -EINVAL;
1720 goto free_object_table;
1721 } 1693 }
1722 1694
1723 data->msg_buf = kcalloc(data->max_reportid, 1695 data->msg_buf = kcalloc(data->max_reportid,
1724 data->T5_msg_size, GFP_KERNEL); 1696 data->T5_msg_size, GFP_KERNEL);
1725 if (!data->msg_buf) { 1697 if (!data->msg_buf)
1726 dev_err(&client->dev, "Failed to allocate message buffer\n"); 1698 return -ENOMEM;
1699
1700 return 0;
1701}
1702
1703static int mxt_read_info_block(struct mxt_data *data)
1704{
1705 struct i2c_client *client = data->client;
1706 int error;
1707 size_t size;
1708 void *id_buf, *buf;
1709 uint8_t num_objects;
1710 u32 calculated_crc;
1711 u8 *crc_ptr;
1712
1713 /* If info block already allocated, free it */
1714 if (data->raw_info_block)
1715 mxt_free_object_table(data);
1716
1717 /* Read 7-byte ID information block starting at address 0 */
1718 size = sizeof(struct mxt_info);
1719 id_buf = kzalloc(size, GFP_KERNEL);
1720 if (!id_buf)
1721 return -ENOMEM;
1722
1723 error = __mxt_read_reg(client, 0, size, id_buf);
1724 if (error)
1725 goto err_free_mem;
1726
1727 /* Resize buffer to give space for rest of info block */
1728 num_objects = ((struct mxt_info *)id_buf)->object_num;
1729 size += (num_objects * sizeof(struct mxt_object))
1730 + MXT_INFO_CHECKSUM_SIZE;
1731
1732 buf = krealloc(id_buf, size, GFP_KERNEL);
1733 if (!buf) {
1727 error = -ENOMEM; 1734 error = -ENOMEM;
1728 goto free_object_table; 1735 goto err_free_mem;
1736 }
1737 id_buf = buf;
1738
1739 /* Read rest of info block */
1740 error = __mxt_read_reg(client, MXT_OBJECT_START,
1741 size - MXT_OBJECT_START,
1742 id_buf + MXT_OBJECT_START);
1743 if (error)
1744 goto err_free_mem;
1745
1746 /* Extract & calculate checksum */
1747 crc_ptr = id_buf + size - MXT_INFO_CHECKSUM_SIZE;
1748 data->info_crc = crc_ptr[0] | (crc_ptr[1] << 8) | (crc_ptr[2] << 16);
1749
1750 calculated_crc = mxt_calculate_crc(id_buf, 0,
1751 size - MXT_INFO_CHECKSUM_SIZE);
1752
1753 /*
1754 * CRC mismatch can be caused by data corruption due to I2C comms
1755 * issue or else device is not using Object Based Protocol (eg i2c-hid)
1756 */
1757 if ((data->info_crc == 0) || (data->info_crc != calculated_crc)) {
1758 dev_err(&client->dev,
1759 "Info Block CRC error calculated=0x%06X read=0x%06X\n",
1760 calculated_crc, data->info_crc);
1761 error = -EIO;
1762 goto err_free_mem;
1763 }
1764
1765 data->raw_info_block = id_buf;
1766 data->info = (struct mxt_info *)id_buf;
1767
1768 dev_info(&client->dev,
1769 "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n",
1770 data->info->family_id, data->info->variant_id,
1771 data->info->version >> 4, data->info->version & 0xf,
1772 data->info->build, data->info->object_num);
1773
1774 /* Parse object table information */
1775 error = mxt_parse_object_table(data, id_buf + MXT_OBJECT_START);
1776 if (error) {
1777 dev_err(&client->dev, "Error %d parsing object table\n", error);
1778 mxt_free_object_table(data);
1779 goto err_free_mem;
1729 } 1780 }
1730 1781
1731 data->object_table = object_table; 1782 data->object_table = (struct mxt_object *)(id_buf + MXT_OBJECT_START);
1732 1783
1733 return 0; 1784 return 0;
1734 1785
1735free_object_table: 1786err_free_mem:
1736 mxt_free_object_table(data); 1787 kfree(id_buf);
1737 return error; 1788 return error;
1738} 1789}
1739 1790
@@ -2046,7 +2097,7 @@ static int mxt_initialize(struct mxt_data *data)
2046 int error; 2097 int error;
2047 2098
2048 while (1) { 2099 while (1) {
2049 error = mxt_get_info(data); 2100 error = mxt_read_info_block(data);
2050 if (!error) 2101 if (!error)
2051 break; 2102 break;
2052 2103
@@ -2077,16 +2128,9 @@ static int mxt_initialize(struct mxt_data *data)
2077 msleep(MXT_FW_RESET_TIME); 2128 msleep(MXT_FW_RESET_TIME);
2078 } 2129 }
2079 2130
2080 /* Get object table information */
2081 error = mxt_get_object_table(data);
2082 if (error) {
2083 dev_err(&client->dev, "Error %d reading object table\n", error);
2084 return error;
2085 }
2086
2087 error = mxt_acquire_irq(data); 2131 error = mxt_acquire_irq(data);
2088 if (error) 2132 if (error)
2089 goto err_free_object_table; 2133 return error;
2090 2134
2091 error = request_firmware_nowait(THIS_MODULE, true, MXT_CFG_NAME, 2135 error = request_firmware_nowait(THIS_MODULE, true, MXT_CFG_NAME,
2092 &client->dev, GFP_KERNEL, data, 2136 &client->dev, GFP_KERNEL, data,
@@ -2094,14 +2138,10 @@ static int mxt_initialize(struct mxt_data *data)
2094 if (error) { 2138 if (error) {
2095 dev_err(&client->dev, "Failed to invoke firmware loader: %d\n", 2139 dev_err(&client->dev, "Failed to invoke firmware loader: %d\n",
2096 error); 2140 error);
2097 goto err_free_object_table; 2141 return error;
2098 } 2142 }
2099 2143
2100 return 0; 2144 return 0;
2101
2102err_free_object_table:
2103 mxt_free_object_table(data);
2104 return error;
2105} 2145}
2106 2146
2107static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep) 2147static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep)
@@ -2162,7 +2202,7 @@ recheck:
2162static u16 mxt_get_debug_value(struct mxt_data *data, unsigned int x, 2202static u16 mxt_get_debug_value(struct mxt_data *data, unsigned int x,
2163 unsigned int y) 2203 unsigned int y)
2164{ 2204{
2165 struct mxt_info *info = &data->info; 2205 struct mxt_info *info = data->info;
2166 struct mxt_dbg *dbg = &data->dbg; 2206 struct mxt_dbg *dbg = &data->dbg;
2167 unsigned int ofs, page; 2207 unsigned int ofs, page;
2168 unsigned int col = 0; 2208 unsigned int col = 0;
@@ -2490,7 +2530,7 @@ static const struct video_device mxt_video_device = {
2490 2530
2491static void mxt_debug_init(struct mxt_data *data) 2531static void mxt_debug_init(struct mxt_data *data)
2492{ 2532{
2493 struct mxt_info *info = &data->info; 2533 struct mxt_info *info = data->info;
2494 struct mxt_dbg *dbg = &data->dbg; 2534 struct mxt_dbg *dbg = &data->dbg;
2495 struct mxt_object *object; 2535 struct mxt_object *object;
2496 int error; 2536 int error;
@@ -2576,7 +2616,6 @@ static int mxt_configure_objects(struct mxt_data *data,
2576 const struct firmware *cfg) 2616 const struct firmware *cfg)
2577{ 2617{
2578 struct device *dev = &data->client->dev; 2618 struct device *dev = &data->client->dev;
2579 struct mxt_info *info = &data->info;
2580 int error; 2619 int error;
2581 2620
2582 error = mxt_init_t7_power_cfg(data); 2621 error = mxt_init_t7_power_cfg(data);
@@ -2601,11 +2640,6 @@ static int mxt_configure_objects(struct mxt_data *data,
2601 2640
2602 mxt_debug_init(data); 2641 mxt_debug_init(data);
2603 2642
2604 dev_info(dev,
2605 "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n",
2606 info->family_id, info->variant_id, info->version >> 4,
2607 info->version & 0xf, info->build, info->object_num);
2608
2609 return 0; 2643 return 0;
2610} 2644}
2611 2645
@@ -2614,7 +2648,7 @@ static ssize_t mxt_fw_version_show(struct device *dev,
2614 struct device_attribute *attr, char *buf) 2648 struct device_attribute *attr, char *buf)
2615{ 2649{
2616 struct mxt_data *data = dev_get_drvdata(dev); 2650 struct mxt_data *data = dev_get_drvdata(dev);
2617 struct mxt_info *info = &data->info; 2651 struct mxt_info *info = data->info;
2618 return scnprintf(buf, PAGE_SIZE, "%u.%u.%02X\n", 2652 return scnprintf(buf, PAGE_SIZE, "%u.%u.%02X\n",
2619 info->version >> 4, info->version & 0xf, info->build); 2653 info->version >> 4, info->version & 0xf, info->build);
2620} 2654}
@@ -2624,7 +2658,7 @@ static ssize_t mxt_hw_version_show(struct device *dev,
2624 struct device_attribute *attr, char *buf) 2658 struct device_attribute *attr, char *buf)
2625{ 2659{
2626 struct mxt_data *data = dev_get_drvdata(dev); 2660 struct mxt_data *data = dev_get_drvdata(dev);
2627 struct mxt_info *info = &data->info; 2661 struct mxt_info *info = data->info;
2628 return scnprintf(buf, PAGE_SIZE, "%u.%u\n", 2662 return scnprintf(buf, PAGE_SIZE, "%u.%u\n",
2629 info->family_id, info->variant_id); 2663 info->family_id, info->variant_id);
2630} 2664}
@@ -2663,7 +2697,7 @@ static ssize_t mxt_object_show(struct device *dev,
2663 return -ENOMEM; 2697 return -ENOMEM;
2664 2698
2665 error = 0; 2699 error = 0;
2666 for (i = 0; i < data->info.object_num; i++) { 2700 for (i = 0; i < data->info->object_num; i++) {
2667 object = data->object_table + i; 2701 object = data->object_table + i;
2668 2702
2669 if (!mxt_object_readable(object->type)) 2703 if (!mxt_object_readable(object->type))
@@ -3035,6 +3069,15 @@ static const struct dmi_system_id mxt_dmi_table[] = {
3035 .driver_data = samus_platform_data, 3069 .driver_data = samus_platform_data,
3036 }, 3070 },
3037 { 3071 {
3072 /* Samsung Chromebook Pro */
3073 .ident = "Samsung Chromebook Pro",
3074 .matches = {
3075 DMI_MATCH(DMI_SYS_VENDOR, "Google"),
3076 DMI_MATCH(DMI_PRODUCT_NAME, "Caroline"),
3077 },
3078 .driver_data = samus_platform_data,
3079 },
3080 {
3038 /* Other Google Chromebooks */ 3081 /* Other Google Chromebooks */
3039 .ident = "Chromebook", 3082 .ident = "Chromebook",
3040 .matches = { 3083 .matches = {
@@ -3254,6 +3297,11 @@ static SIMPLE_DEV_PM_OPS(mxt_pm_ops, mxt_suspend, mxt_resume);
3254 3297
3255static const struct of_device_id mxt_of_match[] = { 3298static const struct of_device_id mxt_of_match[] = {
3256 { .compatible = "atmel,maxtouch", }, 3299 { .compatible = "atmel,maxtouch", },
3300 /* Compatibles listed below are deprecated */
3301 { .compatible = "atmel,qt602240_ts", },
3302 { .compatible = "atmel,atmel_mxt_ts", },
3303 { .compatible = "atmel,atmel_mxt_tp", },
3304 { .compatible = "atmel,mXT224", },
3257 {}, 3305 {},
3258}; 3306};
3259MODULE_DEVICE_TABLE(of, mxt_of_match); 3307MODULE_DEVICE_TABLE(of, mxt_of_match);
diff --git a/drivers/memory/emif-asm-offsets.c b/drivers/memory/emif-asm-offsets.c
index 71a89d5d3efd..db8043019ec6 100644
--- a/drivers/memory/emif-asm-offsets.c
+++ b/drivers/memory/emif-asm-offsets.c
@@ -16,77 +16,7 @@
16 16
17int main(void) 17int main(void)
18{ 18{
19 DEFINE(EMIF_SDCFG_VAL_OFFSET, 19 ti_emif_asm_offsets();
20 offsetof(struct emif_regs_amx3, emif_sdcfg_val));
21 DEFINE(EMIF_TIMING1_VAL_OFFSET,
22 offsetof(struct emif_regs_amx3, emif_timing1_val));
23 DEFINE(EMIF_TIMING2_VAL_OFFSET,
24 offsetof(struct emif_regs_amx3, emif_timing2_val));
25 DEFINE(EMIF_TIMING3_VAL_OFFSET,
26 offsetof(struct emif_regs_amx3, emif_timing3_val));
27 DEFINE(EMIF_REF_CTRL_VAL_OFFSET,
28 offsetof(struct emif_regs_amx3, emif_ref_ctrl_val));
29 DEFINE(EMIF_ZQCFG_VAL_OFFSET,
30 offsetof(struct emif_regs_amx3, emif_zqcfg_val));
31 DEFINE(EMIF_PMCR_VAL_OFFSET,
32 offsetof(struct emif_regs_amx3, emif_pmcr_val));
33 DEFINE(EMIF_PMCR_SHDW_VAL_OFFSET,
34 offsetof(struct emif_regs_amx3, emif_pmcr_shdw_val));
35 DEFINE(EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET,
36 offsetof(struct emif_regs_amx3, emif_rd_wr_level_ramp_ctrl));
37 DEFINE(EMIF_RD_WR_EXEC_THRESH_OFFSET,
38 offsetof(struct emif_regs_amx3, emif_rd_wr_exec_thresh));
39 DEFINE(EMIF_COS_CONFIG_OFFSET,
40 offsetof(struct emif_regs_amx3, emif_cos_config));
41 DEFINE(EMIF_PRIORITY_TO_COS_MAPPING_OFFSET,
42 offsetof(struct emif_regs_amx3, emif_priority_to_cos_mapping));
43 DEFINE(EMIF_CONNECT_ID_SERV_1_MAP_OFFSET,
44 offsetof(struct emif_regs_amx3, emif_connect_id_serv_1_map));
45 DEFINE(EMIF_CONNECT_ID_SERV_2_MAP_OFFSET,
46 offsetof(struct emif_regs_amx3, emif_connect_id_serv_2_map));
47 DEFINE(EMIF_OCP_CONFIG_VAL_OFFSET,
48 offsetof(struct emif_regs_amx3, emif_ocp_config_val));
49 DEFINE(EMIF_LPDDR2_NVM_TIM_OFFSET,
50 offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim));
51 DEFINE(EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET,
52 offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim_shdw));
53 DEFINE(EMIF_DLL_CALIB_CTRL_VAL_OFFSET,
54 offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val));
55 DEFINE(EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET,
56 offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val_shdw));
57 DEFINE(EMIF_DDR_PHY_CTLR_1_OFFSET,
58 offsetof(struct emif_regs_amx3, emif_ddr_phy_ctlr_1));
59 DEFINE(EMIF_EXT_PHY_CTRL_VALS_OFFSET,
60 offsetof(struct emif_regs_amx3, emif_ext_phy_ctrl_vals));
61 DEFINE(EMIF_REGS_AMX3_SIZE, sizeof(struct emif_regs_amx3));
62
63 BLANK();
64
65 DEFINE(EMIF_PM_BASE_ADDR_VIRT_OFFSET,
66 offsetof(struct ti_emif_pm_data, ti_emif_base_addr_virt));
67 DEFINE(EMIF_PM_BASE_ADDR_PHYS_OFFSET,
68 offsetof(struct ti_emif_pm_data, ti_emif_base_addr_phys));
69 DEFINE(EMIF_PM_CONFIG_OFFSET,
70 offsetof(struct ti_emif_pm_data, ti_emif_sram_config));
71 DEFINE(EMIF_PM_REGS_VIRT_OFFSET,
72 offsetof(struct ti_emif_pm_data, regs_virt));
73 DEFINE(EMIF_PM_REGS_PHYS_OFFSET,
74 offsetof(struct ti_emif_pm_data, regs_phys));
75 DEFINE(EMIF_PM_DATA_SIZE, sizeof(struct ti_emif_pm_data));
76
77 BLANK();
78
79 DEFINE(EMIF_PM_SAVE_CONTEXT_OFFSET,
80 offsetof(struct ti_emif_pm_functions, save_context));
81 DEFINE(EMIF_PM_RESTORE_CONTEXT_OFFSET,
82 offsetof(struct ti_emif_pm_functions, restore_context));
83 DEFINE(EMIF_PM_ENTER_SR_OFFSET,
84 offsetof(struct ti_emif_pm_functions, enter_sr));
85 DEFINE(EMIF_PM_EXIT_SR_OFFSET,
86 offsetof(struct ti_emif_pm_functions, exit_sr));
87 DEFINE(EMIF_PM_ABORT_SR_OFFSET,
88 offsetof(struct ti_emif_pm_functions, abort_sr));
89 DEFINE(EMIF_PM_FUNCTIONS_SIZE, sizeof(struct ti_emif_pm_functions));
90 20
91 return 0; 21 return 0;
92} 22}
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 231f3a1e27bf..86503f60468f 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1994,6 +1994,7 @@ static struct scsi_host_template mptsas_driver_template = {
1994 .cmd_per_lun = 7, 1994 .cmd_per_lun = 7,
1995 .use_clustering = ENABLE_CLUSTERING, 1995 .use_clustering = ENABLE_CLUSTERING,
1996 .shost_attrs = mptscsih_host_attrs, 1996 .shost_attrs = mptscsih_host_attrs,
1997 .no_write_same = 1,
1997}; 1998};
1998 1999
1999static int mptsas_get_linkerrors(struct sas_phy *phy) 2000static int mptsas_get_linkerrors(struct sas_phy *phy)
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index d4c07b85f18e..f5695be14499 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -45,6 +45,7 @@
45#define I82802AB 0x00ad 45#define I82802AB 0x00ad
46#define I82802AC 0x00ac 46#define I82802AC 0x00ac
47#define PF38F4476 0x881c 47#define PF38F4476 0x881c
48#define M28F00AP30 0x8963
48/* STMicroelectronics chips */ 49/* STMicroelectronics chips */
49#define M50LPW080 0x002F 50#define M50LPW080 0x002F
50#define M50FLW080A 0x0080 51#define M50FLW080A 0x0080
@@ -375,6 +376,17 @@ static void cfi_fixup_major_minor(struct cfi_private *cfi,
375 extp->MinorVersion = '1'; 376 extp->MinorVersion = '1';
376} 377}
377 378
379static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
380{
381 /*
382 * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
383 * Erase Supend for their small Erase Blocks(0x8000)
384 */
385 if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
386 return 1;
387 return 0;
388}
389
378static inline struct cfi_pri_intelext * 390static inline struct cfi_pri_intelext *
379read_pri_intelext(struct map_info *map, __u16 adr) 391read_pri_intelext(struct map_info *map, __u16 adr)
380{ 392{
@@ -831,21 +843,30 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
831 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1)))) 843 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
832 goto sleep; 844 goto sleep;
833 845
846 /* Do not allow suspend iff read/write to EB address */
847 if ((adr & chip->in_progress_block_mask) ==
848 chip->in_progress_block_addr)
849 goto sleep;
850
851 /* do not suspend small EBs, buggy Micron Chips */
852 if (cfi_is_micron_28F00AP30(cfi, chip) &&
853 (chip->in_progress_block_mask == ~(0x8000-1)))
854 goto sleep;
834 855
835 /* Erase suspend */ 856 /* Erase suspend */
836 map_write(map, CMD(0xB0), adr); 857 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
837 858
838 /* If the flash has finished erasing, then 'erase suspend' 859 /* If the flash has finished erasing, then 'erase suspend'
839 * appears to make some (28F320) flash devices switch to 860 * appears to make some (28F320) flash devices switch to
840 * 'read' mode. Make sure that we switch to 'read status' 861 * 'read' mode. Make sure that we switch to 'read status'
841 * mode so we get the right data. --rmk 862 * mode so we get the right data. --rmk
842 */ 863 */
843 map_write(map, CMD(0x70), adr); 864 map_write(map, CMD(0x70), chip->in_progress_block_addr);
844 chip->oldstate = FL_ERASING; 865 chip->oldstate = FL_ERASING;
845 chip->state = FL_ERASE_SUSPENDING; 866 chip->state = FL_ERASE_SUSPENDING;
846 chip->erase_suspended = 1; 867 chip->erase_suspended = 1;
847 for (;;) { 868 for (;;) {
848 status = map_read(map, adr); 869 status = map_read(map, chip->in_progress_block_addr);
849 if (map_word_andequal(map, status, status_OK, status_OK)) 870 if (map_word_andequal(map, status, status_OK, status_OK))
850 break; 871 break;
851 872
@@ -1041,8 +1062,8 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
1041 sending the 0x70 (Read Status) command to an erasing 1062 sending the 0x70 (Read Status) command to an erasing
1042 chip and expecting it to be ignored, that's what we 1063 chip and expecting it to be ignored, that's what we
1043 do. */ 1064 do. */
1044 map_write(map, CMD(0xd0), adr); 1065 map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1045 map_write(map, CMD(0x70), adr); 1066 map_write(map, CMD(0x70), chip->in_progress_block_addr);
1046 chip->oldstate = FL_READY; 1067 chip->oldstate = FL_READY;
1047 chip->state = FL_ERASING; 1068 chip->state = FL_ERASING;
1048 break; 1069 break;
@@ -1933,6 +1954,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1933 map_write(map, CMD(0xD0), adr); 1954 map_write(map, CMD(0xD0), adr);
1934 chip->state = FL_ERASING; 1955 chip->state = FL_ERASING;
1935 chip->erase_suspended = 0; 1956 chip->erase_suspended = 0;
1957 chip->in_progress_block_addr = adr;
1958 chip->in_progress_block_mask = ~(len - 1);
1936 1959
1937 ret = INVAL_CACHE_AND_WAIT(map, chip, adr, 1960 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1938 adr, len, 1961 adr, len,
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 668e2cbc155b..692902df2598 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -816,9 +816,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
816 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) 816 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
817 goto sleep; 817 goto sleep;
818 818
819 /* We could check to see if we're trying to access the sector 819 /* Do not allow suspend iff read/write to EB address */
820 * that is currently being erased. However, no user will try 820 if ((adr & chip->in_progress_block_mask) ==
821 * anything like that so we just wait for the timeout. */ 821 chip->in_progress_block_addr)
822 goto sleep;
822 823
823 /* Erase suspend */ 824 /* Erase suspend */
824 /* It's harmless to issue the Erase-Suspend and Erase-Resume 825 /* It's harmless to issue the Erase-Suspend and Erase-Resume
@@ -2267,6 +2268,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
2267 chip->state = FL_ERASING; 2268 chip->state = FL_ERASING;
2268 chip->erase_suspended = 0; 2269 chip->erase_suspended = 0;
2269 chip->in_progress_block_addr = adr; 2270 chip->in_progress_block_addr = adr;
2271 chip->in_progress_block_mask = ~(map->size - 1);
2270 2272
2271 INVALIDATE_CACHE_UDELAY(map, chip, 2273 INVALIDATE_CACHE_UDELAY(map, chip,
2272 adr, map->size, 2274 adr, map->size,
@@ -2356,6 +2358,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
2356 chip->state = FL_ERASING; 2358 chip->state = FL_ERASING;
2357 chip->erase_suspended = 0; 2359 chip->erase_suspended = 0;
2358 chip->in_progress_block_addr = adr; 2360 chip->in_progress_block_addr = adr;
2361 chip->in_progress_block_mask = ~(len - 1);
2359 2362
2360 INVALIDATE_CACHE_UDELAY(map, chip, 2363 INVALIDATE_CACHE_UDELAY(map, chip,
2361 adr, len, 2364 adr, len,
diff --git a/drivers/mtd/nand/core.c b/drivers/mtd/nand/core.c
index d0cd6f8635d7..9c9f8936b63b 100644
--- a/drivers/mtd/nand/core.c
+++ b/drivers/mtd/nand/core.c
@@ -162,7 +162,6 @@ int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
162 ret = nanddev_erase(nand, &pos); 162 ret = nanddev_erase(nand, &pos);
163 if (ret) { 163 if (ret) {
164 einfo->fail_addr = nanddev_pos_to_offs(nand, &pos); 164 einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
165 einfo->state = MTD_ERASE_FAILED;
166 165
167 return ret; 166 return ret;
168 } 167 }
@@ -170,8 +169,6 @@ int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
170 nanddev_pos_next_eraseblock(nand, &pos); 169 nanddev_pos_next_eraseblock(nand, &pos);
171 } 170 }
172 171
173 einfo->state = MTD_ERASE_DONE;
174
175 return 0; 172 return 0;
176} 173}
177EXPORT_SYMBOL_GPL(nanddev_mtd_erase); 174EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 10e953218948..1d779a35ac8e 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -2299,29 +2299,20 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
2299 /* 2299 /*
2300 * The legacy "num-cs" property indicates the number of CS on the only 2300 * The legacy "num-cs" property indicates the number of CS on the only
2301 * chip connected to the controller (legacy bindings does not support 2301 * chip connected to the controller (legacy bindings does not support
2302 * more than one chip). CS are only incremented one by one while the RB 2302 * more than one chip). The CS and RB pins are always the #0.
2303 * pin is always the #0.
2304 * 2303 *
2305 * When not using legacy bindings, a couple of "reg" and "nand-rb" 2304 * When not using legacy bindings, a couple of "reg" and "nand-rb"
2306 * properties must be filled. For each chip, expressed as a subnode, 2305 * properties must be filled. For each chip, expressed as a subnode,
2307 * "reg" points to the CS lines and "nand-rb" to the RB line. 2306 * "reg" points to the CS lines and "nand-rb" to the RB line.
2308 */ 2307 */
2309 if (pdata) { 2308 if (pdata || nfc->caps->legacy_of_bindings) {
2310 nsels = 1; 2309 nsels = 1;
2311 } else if (nfc->caps->legacy_of_bindings && 2310 } else {
2312 !of_get_property(np, "num-cs", &nsels)) { 2311 nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
2313 dev_err(dev, "missing num-cs property\n"); 2312 if (nsels <= 0) {
2314 return -EINVAL; 2313 dev_err(dev, "missing/invalid reg property\n");
2315 } else if (!of_get_property(np, "reg", &nsels)) { 2314 return -EINVAL;
2316 dev_err(dev, "missing reg property\n"); 2315 }
2317 return -EINVAL;
2318 }
2319
2320 if (!pdata)
2321 nsels /= sizeof(u32);
2322 if (!nsels) {
2323 dev_err(dev, "invalid reg property size\n");
2324 return -EINVAL;
2325 } 2316 }
2326 2317
2327 /* Alloc the nand chip structure */ 2318 /* Alloc the nand chip structure */
diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c
index f54518ffb36a..f2052fae21c7 100644
--- a/drivers/mtd/nand/raw/tango_nand.c
+++ b/drivers/mtd/nand/raw/tango_nand.c
@@ -645,7 +645,7 @@ static int tango_nand_probe(struct platform_device *pdev)
645 645
646 writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE); 646 writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE);
647 647
648 clk = clk_get(&pdev->dev, NULL); 648 clk = devm_clk_get(&pdev->dev, NULL);
649 if (IS_ERR(clk)) 649 if (IS_ERR(clk))
650 return PTR_ERR(clk); 650 return PTR_ERR(clk);
651 651
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 4b8e9183489a..5872f31eaa60 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -501,7 +501,9 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
501 void __iomem *reg_base = cqspi->iobase; 501 void __iomem *reg_base = cqspi->iobase;
502 void __iomem *ahb_base = cqspi->ahb_base; 502 void __iomem *ahb_base = cqspi->ahb_base;
503 unsigned int remaining = n_rx; 503 unsigned int remaining = n_rx;
504 unsigned int mod_bytes = n_rx % 4;
504 unsigned int bytes_to_read = 0; 505 unsigned int bytes_to_read = 0;
506 u8 *rxbuf_end = rxbuf + n_rx;
505 int ret = 0; 507 int ret = 0;
506 508
507 writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR); 509 writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
@@ -530,11 +532,24 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
530 } 532 }
531 533
532 while (bytes_to_read != 0) { 534 while (bytes_to_read != 0) {
535 unsigned int word_remain = round_down(remaining, 4);
536
533 bytes_to_read *= cqspi->fifo_width; 537 bytes_to_read *= cqspi->fifo_width;
534 bytes_to_read = bytes_to_read > remaining ? 538 bytes_to_read = bytes_to_read > remaining ?
535 remaining : bytes_to_read; 539 remaining : bytes_to_read;
536 ioread32_rep(ahb_base, rxbuf, 540 bytes_to_read = round_down(bytes_to_read, 4);
537 DIV_ROUND_UP(bytes_to_read, 4)); 541 /* Read 4 byte word chunks then single bytes */
542 if (bytes_to_read) {
543 ioread32_rep(ahb_base, rxbuf,
544 (bytes_to_read / 4));
545 } else if (!word_remain && mod_bytes) {
546 unsigned int temp = ioread32(ahb_base);
547
548 bytes_to_read = mod_bytes;
549 memcpy(rxbuf, &temp, min((unsigned int)
550 (rxbuf_end - rxbuf),
551 bytes_to_read));
552 }
538 rxbuf += bytes_to_read; 553 rxbuf += bytes_to_read;
539 remaining -= bytes_to_read; 554 remaining -= bytes_to_read;
540 bytes_to_read = cqspi_get_rd_sram_level(cqspi); 555 bytes_to_read = cqspi_get_rd_sram_level(cqspi);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 891846655000..a029b27fd002 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -198,6 +198,7 @@ config VXLAN
198config GENEVE 198config GENEVE
199 tristate "Generic Network Virtualization Encapsulation" 199 tristate "Generic Network Virtualization Encapsulation"
200 depends on INET && NET_UDP_TUNNEL 200 depends on INET && NET_UDP_TUNNEL
201 depends on IPV6 || !IPV6
201 select NET_IP_TUNNEL 202 select NET_IP_TUNNEL
202 select GRO_CELLS 203 select GRO_CELLS
203 ---help--- 204 ---help---
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 78616787f2a3..9f561fe505cb 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -806,16 +806,39 @@ static unsigned int b53_get_mib_size(struct b53_device *dev)
806 return B53_MIBS_SIZE; 806 return B53_MIBS_SIZE;
807} 807}
808 808
809void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data) 809static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port)
810{
811 /* These ports typically do not have built-in PHYs */
812 switch (port) {
813 case B53_CPU_PORT_25:
814 case 7:
815 case B53_CPU_PORT:
816 return NULL;
817 }
818
819 return mdiobus_get_phy(ds->slave_mii_bus, port);
820}
821
822void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
823 uint8_t *data)
810{ 824{
811 struct b53_device *dev = ds->priv; 825 struct b53_device *dev = ds->priv;
812 const struct b53_mib_desc *mibs = b53_get_mib(dev); 826 const struct b53_mib_desc *mibs = b53_get_mib(dev);
813 unsigned int mib_size = b53_get_mib_size(dev); 827 unsigned int mib_size = b53_get_mib_size(dev);
828 struct phy_device *phydev;
814 unsigned int i; 829 unsigned int i;
815 830
816 for (i = 0; i < mib_size; i++) 831 if (stringset == ETH_SS_STATS) {
817 strlcpy(data + i * ETH_GSTRING_LEN, 832 for (i = 0; i < mib_size; i++)
818 mibs[i].name, ETH_GSTRING_LEN); 833 strlcpy(data + i * ETH_GSTRING_LEN,
834 mibs[i].name, ETH_GSTRING_LEN);
835 } else if (stringset == ETH_SS_PHY_STATS) {
836 phydev = b53_get_phy_device(ds, port);
837 if (!phydev)
838 return;
839
840 phy_ethtool_get_strings(phydev, data);
841 }
819} 842}
820EXPORT_SYMBOL(b53_get_strings); 843EXPORT_SYMBOL(b53_get_strings);
821 844
@@ -852,11 +875,34 @@ void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
852} 875}
853EXPORT_SYMBOL(b53_get_ethtool_stats); 876EXPORT_SYMBOL(b53_get_ethtool_stats);
854 877
855int b53_get_sset_count(struct dsa_switch *ds, int port) 878void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data)
879{
880 struct phy_device *phydev;
881
882 phydev = b53_get_phy_device(ds, port);
883 if (!phydev)
884 return;
885
886 phy_ethtool_get_stats(phydev, NULL, data);
887}
888EXPORT_SYMBOL(b53_get_ethtool_phy_stats);
889
890int b53_get_sset_count(struct dsa_switch *ds, int port, int sset)
856{ 891{
857 struct b53_device *dev = ds->priv; 892 struct b53_device *dev = ds->priv;
893 struct phy_device *phydev;
894
895 if (sset == ETH_SS_STATS) {
896 return b53_get_mib_size(dev);
897 } else if (sset == ETH_SS_PHY_STATS) {
898 phydev = b53_get_phy_device(ds, port);
899 if (!phydev)
900 return 0;
901
902 return phy_ethtool_get_sset_count(phydev);
903 }
858 904
859 return b53_get_mib_size(dev); 905 return 0;
860} 906}
861EXPORT_SYMBOL(b53_get_sset_count); 907EXPORT_SYMBOL(b53_get_sset_count);
862 908
@@ -1477,7 +1523,7 @@ void b53_br_fast_age(struct dsa_switch *ds, int port)
1477} 1523}
1478EXPORT_SYMBOL(b53_br_fast_age); 1524EXPORT_SYMBOL(b53_br_fast_age);
1479 1525
1480static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port) 1526static bool b53_possible_cpu_port(struct dsa_switch *ds, int port)
1481{ 1527{
1482 /* Broadcom switches will accept enabling Broadcom tags on the 1528 /* Broadcom switches will accept enabling Broadcom tags on the
1483 * following ports: 5, 7 and 8, any other port is not supported 1529 * following ports: 5, 7 and 8, any other port is not supported
@@ -1489,10 +1535,19 @@ static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port)
1489 return true; 1535 return true;
1490 } 1536 }
1491 1537
1492 dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n", port);
1493 return false; 1538 return false;
1494} 1539}
1495 1540
1541static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port)
1542{
1543 bool ret = b53_possible_cpu_port(ds, port);
1544
1545 if (!ret)
1546 dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n",
1547 port);
1548 return ret;
1549}
1550
1496enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port) 1551enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port)
1497{ 1552{
1498 struct b53_device *dev = ds->priv; 1553 struct b53_device *dev = ds->priv;
@@ -1650,6 +1705,7 @@ static const struct dsa_switch_ops b53_switch_ops = {
1650 .get_strings = b53_get_strings, 1705 .get_strings = b53_get_strings,
1651 .get_ethtool_stats = b53_get_ethtool_stats, 1706 .get_ethtool_stats = b53_get_ethtool_stats,
1652 .get_sset_count = b53_get_sset_count, 1707 .get_sset_count = b53_get_sset_count,
1708 .get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
1653 .phy_read = b53_phy_read16, 1709 .phy_read = b53_phy_read16,
1654 .phy_write = b53_phy_write16, 1710 .phy_write = b53_phy_write16,
1655 .adjust_link = b53_adjust_link, 1711 .adjust_link = b53_adjust_link,
@@ -1954,6 +2010,15 @@ static int b53_switch_init(struct b53_device *dev)
1954 dev->num_ports = dev->cpu_port + 1; 2010 dev->num_ports = dev->cpu_port + 1;
1955 dev->enabled_ports |= BIT(dev->cpu_port); 2011 dev->enabled_ports |= BIT(dev->cpu_port);
1956 2012
2013 /* Include non standard CPU port built-in PHYs to be probed */
2014 if (is539x(dev) || is531x5(dev)) {
2015 for (i = 0; i < dev->num_ports; i++) {
2016 if (!(dev->ds->phys_mii_mask & BIT(i)) &&
2017 !b53_possible_cpu_port(dev->ds, i))
2018 dev->ds->phys_mii_mask |= BIT(i);
2019 }
2020 }
2021
1957 dev->ports = devm_kzalloc(dev->dev, 2022 dev->ports = devm_kzalloc(dev->dev,
1958 sizeof(struct b53_port) * dev->num_ports, 2023 sizeof(struct b53_port) * dev->num_ports,
1959 GFP_KERNEL); 2024 GFP_KERNEL);
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 1187ebd79287..cc284a514de9 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -286,9 +286,11 @@ static inline int b53_switch_get_reset_gpio(struct b53_device *dev)
286/* Exported functions towards other drivers */ 286/* Exported functions towards other drivers */
287void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port); 287void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port);
288int b53_configure_vlan(struct dsa_switch *ds); 288int b53_configure_vlan(struct dsa_switch *ds);
289void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data); 289void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
290 uint8_t *data);
290void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data); 291void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
291int b53_get_sset_count(struct dsa_switch *ds, int port); 292int b53_get_sset_count(struct dsa_switch *ds, int port, int sset);
293void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data);
292int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge); 294int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge);
293void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge); 295void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge);
294void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state); 296void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 0378eded31f2..97236cfcbae4 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -859,6 +859,7 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
859 .get_strings = b53_get_strings, 859 .get_strings = b53_get_strings,
860 .get_ethtool_stats = b53_get_ethtool_stats, 860 .get_ethtool_stats = b53_get_ethtool_stats,
861 .get_sset_count = b53_get_sset_count, 861 .get_sset_count = b53_get_sset_count,
862 .get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
862 .get_phy_flags = bcm_sf2_sw_get_phy_flags, 863 .get_phy_flags = bcm_sf2_sw_get_phy_flags,
863 .adjust_link = bcm_sf2_sw_adjust_link, 864 .adjust_link = bcm_sf2_sw_adjust_link,
864 .fixed_link_update = bcm_sf2_sw_fixed_link_update, 865 .fixed_link_update = bcm_sf2_sw_fixed_link_update,
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index f77be9f85cb3..58f14af04639 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -86,16 +86,23 @@ static int dsa_loop_setup(struct dsa_switch *ds)
86 return 0; 86 return 0;
87} 87}
88 88
89static int dsa_loop_get_sset_count(struct dsa_switch *ds, int port) 89static int dsa_loop_get_sset_count(struct dsa_switch *ds, int port, int sset)
90{ 90{
91 if (sset != ETH_SS_STATS && sset != ETH_SS_PHY_STATS)
92 return 0;
93
91 return __DSA_LOOP_CNT_MAX; 94 return __DSA_LOOP_CNT_MAX;
92} 95}
93 96
94static void dsa_loop_get_strings(struct dsa_switch *ds, int port, uint8_t *data) 97static void dsa_loop_get_strings(struct dsa_switch *ds, int port,
98 u32 stringset, uint8_t *data)
95{ 99{
96 struct dsa_loop_priv *ps = ds->priv; 100 struct dsa_loop_priv *ps = ds->priv;
97 unsigned int i; 101 unsigned int i;
98 102
103 if (stringset != ETH_SS_STATS && stringset != ETH_SS_PHY_STATS)
104 return;
105
99 for (i = 0; i < __DSA_LOOP_CNT_MAX; i++) 106 for (i = 0; i < __DSA_LOOP_CNT_MAX; i++)
100 memcpy(data + i * ETH_GSTRING_LEN, 107 memcpy(data + i * ETH_GSTRING_LEN,
101 ps->ports[port].mib[i].name, ETH_GSTRING_LEN); 108 ps->ports[port].mib[i].name, ETH_GSTRING_LEN);
@@ -256,6 +263,7 @@ static const struct dsa_switch_ops dsa_loop_driver = {
256 .get_strings = dsa_loop_get_strings, 263 .get_strings = dsa_loop_get_strings,
257 .get_ethtool_stats = dsa_loop_get_ethtool_stats, 264 .get_ethtool_stats = dsa_loop_get_ethtool_stats,
258 .get_sset_count = dsa_loop_get_sset_count, 265 .get_sset_count = dsa_loop_get_sset_count,
266 .get_ethtool_phy_stats = dsa_loop_get_ethtool_stats,
259 .phy_read = dsa_loop_phy_read, 267 .phy_read = dsa_loop_phy_read,
260 .phy_write = dsa_loop_phy_write, 268 .phy_write = dsa_loop_phy_write,
261 .port_bridge_join = dsa_loop_port_bridge_join, 269 .port_bridge_join = dsa_loop_port_bridge_join,
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index fefa454f3e56..b4f6e1a67dd9 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -977,10 +977,14 @@ static const struct lan9303_mib_desc lan9303_mib[] = {
977 { .offset = LAN9303_MAC_TX_LATECOL_0, .name = "TxLateCol", }, 977 { .offset = LAN9303_MAC_TX_LATECOL_0, .name = "TxLateCol", },
978}; 978};
979 979
980static void lan9303_get_strings(struct dsa_switch *ds, int port, uint8_t *data) 980static void lan9303_get_strings(struct dsa_switch *ds, int port,
981 u32 stringset, uint8_t *data)
981{ 982{
982 unsigned int u; 983 unsigned int u;
983 984
985 if (stringset != ETH_SS_STATS)
986 return;
987
984 for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) { 988 for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) {
985 strncpy(data + u * ETH_GSTRING_LEN, lan9303_mib[u].name, 989 strncpy(data + u * ETH_GSTRING_LEN, lan9303_mib[u].name,
986 ETH_GSTRING_LEN); 990 ETH_GSTRING_LEN);
@@ -1007,8 +1011,11 @@ static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port,
1007 } 1011 }
1008} 1012}
1009 1013
1010static int lan9303_get_sset_count(struct dsa_switch *ds, int port) 1014static int lan9303_get_sset_count(struct dsa_switch *ds, int port, int sset)
1011{ 1015{
1016 if (sset != ETH_SS_STATS)
1017 return 0;
1018
1012 return ARRAY_SIZE(lan9303_mib); 1019 return ARRAY_SIZE(lan9303_mib);
1013} 1020}
1014 1021
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index bcb3e6c734f2..7210c49b7922 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -439,15 +439,22 @@ static void ksz_disable_port(struct dsa_switch *ds, int port,
439 ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, true); 439 ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, true);
440} 440}
441 441
442static int ksz_sset_count(struct dsa_switch *ds, int port) 442static int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
443{ 443{
444 if (sset != ETH_SS_STATS)
445 return 0;
446
444 return TOTAL_SWITCH_COUNTER_NUM; 447 return TOTAL_SWITCH_COUNTER_NUM;
445} 448}
446 449
447static void ksz_get_strings(struct dsa_switch *ds, int port, uint8_t *buf) 450static void ksz_get_strings(struct dsa_switch *ds, int port,
451 u32 stringset, uint8_t *buf)
448{ 452{
449 int i; 453 int i;
450 454
455 if (stringset != ETH_SS_STATS)
456 return;
457
451 for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) { 458 for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
452 memcpy(buf + i * ETH_GSTRING_LEN, mib_names[i].string, 459 memcpy(buf + i * ETH_GSTRING_LEN, mib_names[i].string,
453 ETH_GSTRING_LEN); 460 ETH_GSTRING_LEN);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 80a4dbc3a499..62e486652e62 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -573,10 +573,14 @@ static int mt7530_phy_write(struct dsa_switch *ds, int port, int regnum,
573} 573}
574 574
575static void 575static void
576mt7530_get_strings(struct dsa_switch *ds, int port, uint8_t *data) 576mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset,
577 uint8_t *data)
577{ 578{
578 int i; 579 int i;
579 580
581 if (stringset != ETH_SS_STATS)
582 return;
583
580 for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) 584 for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++)
581 strncpy(data + i * ETH_GSTRING_LEN, mt7530_mib[i].name, 585 strncpy(data + i * ETH_GSTRING_LEN, mt7530_mib[i].name,
582 ETH_GSTRING_LEN); 586 ETH_GSTRING_LEN);
@@ -604,8 +608,11 @@ mt7530_get_ethtool_stats(struct dsa_switch *ds, int port,
604} 608}
605 609
606static int 610static int
607mt7530_get_sset_count(struct dsa_switch *ds, int port) 611mt7530_get_sset_count(struct dsa_switch *ds, int port, int sset)
608{ 612{
613 if (sset != ETH_SS_STATS)
614 return 0;
615
609 return ARRAY_SIZE(mt7530_mib); 616 return ARRAY_SIZE(mt7530_mib);
610} 617}
611 618
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 3d2091099f7f..9d62e4acc01b 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -665,13 +665,13 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
665 case STATS_TYPE_PORT: 665 case STATS_TYPE_PORT:
666 err = mv88e6xxx_port_read(chip, port, s->reg, &reg); 666 err = mv88e6xxx_port_read(chip, port, s->reg, &reg);
667 if (err) 667 if (err)
668 return UINT64_MAX; 668 return U64_MAX;
669 669
670 low = reg; 670 low = reg;
671 if (s->size == 4) { 671 if (s->size == 4) {
672 err = mv88e6xxx_port_read(chip, port, s->reg + 1, &reg); 672 err = mv88e6xxx_port_read(chip, port, s->reg + 1, &reg);
673 if (err) 673 if (err)
674 return UINT64_MAX; 674 return U64_MAX;
675 high = reg; 675 high = reg;
676 } 676 }
677 break; 677 break;
@@ -685,7 +685,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
685 mv88e6xxx_g1_stats_read(chip, reg + 1, &high); 685 mv88e6xxx_g1_stats_read(chip, reg + 1, &high);
686 break; 686 break;
687 default: 687 default:
688 return UINT64_MAX; 688 return U64_MAX;
689 } 689 }
690 value = (((u64)high) << 16) | low; 690 value = (((u64)high) << 16) | low;
691 return value; 691 return value;
@@ -742,11 +742,14 @@ static void mv88e6xxx_atu_vtu_get_strings(uint8_t *data)
742} 742}
743 743
744static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, 744static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
745 uint8_t *data) 745 u32 stringset, uint8_t *data)
746{ 746{
747 struct mv88e6xxx_chip *chip = ds->priv; 747 struct mv88e6xxx_chip *chip = ds->priv;
748 int count = 0; 748 int count = 0;
749 749
750 if (stringset != ETH_SS_STATS)
751 return;
752
750 mutex_lock(&chip->reg_lock); 753 mutex_lock(&chip->reg_lock);
751 754
752 if (chip->info->ops->stats_get_strings) 755 if (chip->info->ops->stats_get_strings)
@@ -789,12 +792,15 @@ static int mv88e6320_stats_get_sset_count(struct mv88e6xxx_chip *chip)
789 STATS_TYPE_BANK1); 792 STATS_TYPE_BANK1);
790} 793}
791 794
792static int mv88e6xxx_get_sset_count(struct dsa_switch *ds, int port) 795static int mv88e6xxx_get_sset_count(struct dsa_switch *ds, int port, int sset)
793{ 796{
794 struct mv88e6xxx_chip *chip = ds->priv; 797 struct mv88e6xxx_chip *chip = ds->priv;
795 int serdes_count = 0; 798 int serdes_count = 0;
796 int count = 0; 799 int count = 0;
797 800
801 if (sset != ETH_SS_STATS)
802 return 0;
803
798 mutex_lock(&chip->reg_lock); 804 mutex_lock(&chip->reg_lock);
799 if (chip->info->ops->stats_get_sset_count) 805 if (chip->info->ops->stats_get_sset_count)
800 count = chip->info->ops->stats_get_sset_count(chip); 806 count = chip->info->ops->stats_get_sset_count(chip);
@@ -1020,6 +1026,38 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
1020 dev_err(ds->dev, "p%d: failed to update state\n", port); 1026 dev_err(ds->dev, "p%d: failed to update state\n", port);
1021} 1027}
1022 1028
1029static int mv88e6xxx_devmap_setup(struct mv88e6xxx_chip *chip)
1030{
1031 int target, port;
1032 int err;
1033
1034 if (!chip->info->global2_addr)
1035 return 0;
1036
1037 /* Initialize the routing port to the 32 possible target devices */
1038 for (target = 0; target < 32; target++) {
1039 port = 0x1f;
1040 if (target < DSA_MAX_SWITCHES)
1041 if (chip->ds->rtable[target] != DSA_RTABLE_NONE)
1042 port = chip->ds->rtable[target];
1043
1044 err = mv88e6xxx_g2_device_mapping_write(chip, target, port);
1045 if (err)
1046 return err;
1047 }
1048
1049 return 0;
1050}
1051
1052static int mv88e6xxx_trunk_setup(struct mv88e6xxx_chip *chip)
1053{
1054 /* Clear all trunk masks and mapping */
1055 if (chip->info->global2_addr)
1056 return mv88e6xxx_g2_trunk_clear(chip);
1057
1058 return 0;
1059}
1060
1023static int mv88e6xxx_pot_setup(struct mv88e6xxx_chip *chip) 1061static int mv88e6xxx_pot_setup(struct mv88e6xxx_chip *chip)
1024{ 1062{
1025 if (chip->info->ops->pot_clear) 1063 if (chip->info->ops->pot_clear)
@@ -2190,13 +2228,6 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
2190 if (err) 2228 if (err)
2191 goto unlock; 2229 goto unlock;
2192 2230
2193 /* Setup Switch Global 2 Registers */
2194 if (chip->info->global2_addr) {
2195 err = mv88e6xxx_g2_setup(chip);
2196 if (err)
2197 goto unlock;
2198 }
2199
2200 err = mv88e6xxx_irl_setup(chip); 2231 err = mv88e6xxx_irl_setup(chip);
2201 if (err) 2232 if (err)
2202 goto unlock; 2233 goto unlock;
@@ -2233,6 +2264,14 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
2233 if (err) 2264 if (err)
2234 goto unlock; 2265 goto unlock;
2235 2266
2267 err = mv88e6xxx_trunk_setup(chip);
2268 if (err)
2269 goto unlock;
2270
2271 err = mv88e6xxx_devmap_setup(chip);
2272 if (err)
2273 goto unlock;
2274
2236 /* Setup PTP Hardware Clock and timestamping */ 2275 /* Setup PTP Hardware Clock and timestamping */
2237 if (chip->info->ptp_support) { 2276 if (chip->info->ptp_support) {
2238 err = mv88e6xxx_ptp_setup(chip); 2277 err = mv88e6xxx_ptp_setup(chip);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 80490f66bc06..4163c8099d0b 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -21,10 +21,6 @@
21#include <linux/timecounter.h> 21#include <linux/timecounter.h>
22#include <net/dsa.h> 22#include <net/dsa.h>
23 23
24#ifndef UINT64_MAX
25#define UINT64_MAX (u64)(~((u64)0))
26#endif
27
28#define SMI_CMD 0x00 24#define SMI_CMD 0x00
29#define SMI_CMD_BUSY BIT(15) 25#define SMI_CMD_BUSY BIT(15)
30#define SMI_CMD_CLAUSE_22 BIT(12) 26#define SMI_CMD_CLAUSE_22 BIT(12)
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index 0ce627fded48..e6d658181b27 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -119,37 +119,17 @@ int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
119 119
120/* Offset 0x06: Device Mapping Table register */ 120/* Offset 0x06: Device Mapping Table register */
121 121
122static int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, 122int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
123 int target, int port) 123 int port)
124{ 124{
125 u16 val = (target << 8) | (port & 0xf); 125 u16 val = (target << 8) | (port & 0x1f);
126 /* Modern chips use 5 bits to define a device mapping port,
127 * but bit 4 is reserved on older chips, so it is safe to use.
128 */
126 129
127 return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_DEVICE_MAPPING, val); 130 return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_DEVICE_MAPPING, val);
128} 131}
129 132
130static int mv88e6xxx_g2_set_device_mapping(struct mv88e6xxx_chip *chip)
131{
132 int target, port;
133 int err;
134
135 /* Initialize the routing port to the 32 possible target devices */
136 for (target = 0; target < 32; ++target) {
137 port = 0xf;
138
139 if (target < DSA_MAX_SWITCHES) {
140 port = chip->ds->rtable[target];
141 if (port == DSA_RTABLE_NONE)
142 port = 0xf;
143 }
144
145 err = mv88e6xxx_g2_device_mapping_write(chip, target, port);
146 if (err)
147 break;
148 }
149
150 return err;
151}
152
153/* Offset 0x07: Trunk Mask Table register */ 133/* Offset 0x07: Trunk Mask Table register */
154 134
155static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num, 135static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
@@ -174,7 +154,7 @@ static int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
174 return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_TRUNK_MAPPING, val); 154 return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_TRUNK_MAPPING, val);
175} 155}
176 156
177static int mv88e6xxx_g2_clear_trunk(struct mv88e6xxx_chip *chip) 157int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip)
178{ 158{
179 const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1; 159 const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
180 int i, err; 160 int i, err;
@@ -1138,31 +1118,3 @@ void mv88e6xxx_g2_irq_mdio_free(struct mv88e6xxx_chip *chip,
1138 for (phy = 0; phy < chip->info->num_internal_phys; phy++) 1118 for (phy = 0; phy < chip->info->num_internal_phys; phy++)
1139 irq_dispose_mapping(bus->irq[phy]); 1119 irq_dispose_mapping(bus->irq[phy]);
1140} 1120}
1141
1142int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip)
1143{
1144 u16 reg;
1145 int err;
1146
1147 /* Ignore removed tag data on doubly tagged packets, disable
1148 * flow control messages, force flow control priority to the
1149 * highest, and send all special multicast frames to the CPU
1150 * port at the highest priority.
1151 */
1152 reg = MV88E6XXX_G2_SWITCH_MGMT_FORCE_FLOW_CTL_PRI | (0x7 << 4);
1153 err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SWITCH_MGMT, reg);
1154 if (err)
1155 return err;
1156
1157 /* Program the DSA routing table. */
1158 err = mv88e6xxx_g2_set_device_mapping(chip);
1159 if (err)
1160 return err;
1161
1162 /* Clear all trunk masks and mapping. */
1163 err = mv88e6xxx_g2_clear_trunk(chip);
1164 if (err)
1165 return err;
1166
1167 return 0;
1168}
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 520ec70d32e8..37e8ce2c72a0 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -60,7 +60,8 @@
60#define MV88E6XXX_G2_DEVICE_MAPPING 0x06 60#define MV88E6XXX_G2_DEVICE_MAPPING 0x06
61#define MV88E6XXX_G2_DEVICE_MAPPING_UPDATE 0x8000 61#define MV88E6XXX_G2_DEVICE_MAPPING_UPDATE 0x8000
62#define MV88E6XXX_G2_DEVICE_MAPPING_DEV_MASK 0x1f00 62#define MV88E6XXX_G2_DEVICE_MAPPING_DEV_MASK 0x1f00
63#define MV88E6XXX_G2_DEVICE_MAPPING_PORT_MASK 0x000f 63#define MV88E6352_G2_DEVICE_MAPPING_PORT_MASK 0x000f
64#define MV88E6390_G2_DEVICE_MAPPING_PORT_MASK 0x001f
64 65
65/* Offset 0x07: Trunk Mask Table Register */ 66/* Offset 0x07: Trunk Mask Table Register */
66#define MV88E6XXX_G2_TRUNK_MASK 0x07 67#define MV88E6XXX_G2_TRUNK_MASK 0x07
@@ -313,7 +314,6 @@ int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev,
313 int src_port, u16 data); 314 int src_port, u16 data);
314int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip); 315int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip);
315 316
316int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip);
317int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip); 317int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip);
318void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip); 318void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip);
319 319
@@ -327,6 +327,11 @@ int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
327 327
328int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip); 328int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip);
329 329
330int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip);
331
332int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
333 int port);
334
330extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops; 335extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
331extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops; 336extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops;
332 337
@@ -441,11 +446,6 @@ static inline int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
441 return -EOPNOTSUPP; 446 return -EOPNOTSUPP;
442} 447}
443 448
444static inline int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip)
445{
446 return -EOPNOTSUPP;
447}
448
449static inline int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip) 449static inline int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
450{ 450{
451 return -EOPNOTSUPP; 451 return -EOPNOTSUPP;
@@ -495,6 +495,17 @@ static inline int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
495 return -EOPNOTSUPP; 495 return -EOPNOTSUPP;
496} 496}
497 497
498static inline int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip)
499{
500 return -EOPNOTSUPP;
501}
502
503static inline int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip,
504 int target, int port)
505{
506 return -EOPNOTSUPP;
507}
508
498#endif /* CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */ 509#endif /* CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
499 510
500#endif /* _MV88E6XXX_GLOBAL2_H */ 511#endif /* _MV88E6XXX_GLOBAL2_H */
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 600d5ad1fbde..757b6d90ea36 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -600,10 +600,13 @@ qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val)
600} 600}
601 601
602static void 602static void
603qca8k_get_strings(struct dsa_switch *ds, int port, uint8_t *data) 603qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
604{ 604{
605 int i; 605 int i;
606 606
607 if (stringset != ETH_SS_STATS)
608 return;
609
607 for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) 610 for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++)
608 strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name, 611 strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
609 ETH_GSTRING_LEN); 612 ETH_GSTRING_LEN);
@@ -631,8 +634,11 @@ qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
631} 634}
632 635
633static int 636static int
634qca8k_get_sset_count(struct dsa_switch *ds, int port) 637qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
635{ 638{
639 if (sset != ETH_SS_STATS)
640 return 0;
641
636 return ARRAY_SIZE(ar8327_mib); 642 return ARRAY_SIZE(ar8327_mib);
637} 643}
638 644
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 36c8950dbd2d..cabbe227bb98 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -765,8 +765,9 @@ static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb,
765 struct net_device *dev); 765 struct net_device *dev);
766static int vortex_rx(struct net_device *dev); 766static int vortex_rx(struct net_device *dev);
767static int boomerang_rx(struct net_device *dev); 767static int boomerang_rx(struct net_device *dev);
768static irqreturn_t vortex_interrupt(int irq, void *dev_id); 768static irqreturn_t vortex_boomerang_interrupt(int irq, void *dev_id);
769static irqreturn_t boomerang_interrupt(int irq, void *dev_id); 769static irqreturn_t _vortex_interrupt(int irq, struct net_device *dev);
770static irqreturn_t _boomerang_interrupt(int irq, struct net_device *dev);
770static int vortex_close(struct net_device *dev); 771static int vortex_close(struct net_device *dev);
771static void dump_tx_ring(struct net_device *dev); 772static void dump_tx_ring(struct net_device *dev);
772static void update_stats(void __iomem *ioaddr, struct net_device *dev); 773static void update_stats(void __iomem *ioaddr, struct net_device *dev);
@@ -838,11 +839,7 @@ MODULE_PARM_DESC(use_mmio, "3c59x: use memory-mapped PCI I/O resource (0-1)");
838#ifdef CONFIG_NET_POLL_CONTROLLER 839#ifdef CONFIG_NET_POLL_CONTROLLER
839static void poll_vortex(struct net_device *dev) 840static void poll_vortex(struct net_device *dev)
840{ 841{
841 struct vortex_private *vp = netdev_priv(dev); 842 vortex_boomerang_interrupt(dev->irq, dev);
842 unsigned long flags;
843 local_irq_save(flags);
844 (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
845 local_irq_restore(flags);
846} 843}
847#endif 844#endif
848 845
@@ -1729,8 +1726,7 @@ vortex_open(struct net_device *dev)
1729 dma_addr_t dma; 1726 dma_addr_t dma;
1730 1727
1731 /* Use the now-standard shared IRQ implementation. */ 1728 /* Use the now-standard shared IRQ implementation. */
1732 if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? 1729 if ((retval = request_irq(dev->irq, vortex_boomerang_interrupt, IRQF_SHARED, dev->name, dev))) {
1733 boomerang_interrupt : vortex_interrupt, IRQF_SHARED, dev->name, dev))) {
1734 pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq); 1730 pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
1735 goto err; 1731 goto err;
1736 } 1732 }
@@ -1905,18 +1901,7 @@ static void vortex_tx_timeout(struct net_device *dev)
1905 pr_err("%s: Interrupt posted but not delivered --" 1901 pr_err("%s: Interrupt posted but not delivered --"
1906 " IRQ blocked by another device?\n", dev->name); 1902 " IRQ blocked by another device?\n", dev->name);
1907 /* Bad idea here.. but we might as well handle a few events. */ 1903 /* Bad idea here.. but we might as well handle a few events. */
1908 { 1904 vortex_boomerang_interrupt(dev->irq, dev);
1909 /*
1910 * Block interrupts because vortex_interrupt does a bare spin_lock()
1911 */
1912 unsigned long flags;
1913 local_irq_save(flags);
1914 if (vp->full_bus_master_tx)
1915 boomerang_interrupt(dev->irq, dev);
1916 else
1917 vortex_interrupt(dev->irq, dev);
1918 local_irq_restore(flags);
1919 }
1920 } 1905 }
1921 1906
1922 if (vortex_debug > 0) 1907 if (vortex_debug > 0)
@@ -2267,9 +2252,8 @@ out_dma_err:
2267 */ 2252 */
2268 2253
2269static irqreturn_t 2254static irqreturn_t
2270vortex_interrupt(int irq, void *dev_id) 2255_vortex_interrupt(int irq, struct net_device *dev)
2271{ 2256{
2272 struct net_device *dev = dev_id;
2273 struct vortex_private *vp = netdev_priv(dev); 2257 struct vortex_private *vp = netdev_priv(dev);
2274 void __iomem *ioaddr; 2258 void __iomem *ioaddr;
2275 int status; 2259 int status;
@@ -2278,7 +2262,6 @@ vortex_interrupt(int irq, void *dev_id)
2278 unsigned int bytes_compl = 0, pkts_compl = 0; 2262 unsigned int bytes_compl = 0, pkts_compl = 0;
2279 2263
2280 ioaddr = vp->ioaddr; 2264 ioaddr = vp->ioaddr;
2281 spin_lock(&vp->lock);
2282 2265
2283 status = ioread16(ioaddr + EL3_STATUS); 2266 status = ioread16(ioaddr + EL3_STATUS);
2284 2267
@@ -2376,7 +2359,6 @@ vortex_interrupt(int irq, void *dev_id)
2376 pr_debug("%s: exiting interrupt, status %4.4x.\n", 2359 pr_debug("%s: exiting interrupt, status %4.4x.\n",
2377 dev->name, status); 2360 dev->name, status);
2378handler_exit: 2361handler_exit:
2379 spin_unlock(&vp->lock);
2380 return IRQ_RETVAL(handled); 2362 return IRQ_RETVAL(handled);
2381} 2363}
2382 2364
@@ -2386,9 +2368,8 @@ handler_exit:
2386 */ 2368 */
2387 2369
2388static irqreturn_t 2370static irqreturn_t
2389boomerang_interrupt(int irq, void *dev_id) 2371_boomerang_interrupt(int irq, struct net_device *dev)
2390{ 2372{
2391 struct net_device *dev = dev_id;
2392 struct vortex_private *vp = netdev_priv(dev); 2373 struct vortex_private *vp = netdev_priv(dev);
2393 void __iomem *ioaddr; 2374 void __iomem *ioaddr;
2394 int status; 2375 int status;
@@ -2398,12 +2379,6 @@ boomerang_interrupt(int irq, void *dev_id)
2398 2379
2399 ioaddr = vp->ioaddr; 2380 ioaddr = vp->ioaddr;
2400 2381
2401
2402 /*
2403 * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
2404 * and boomerang_start_xmit
2405 */
2406 spin_lock(&vp->lock);
2407 vp->handling_irq = 1; 2382 vp->handling_irq = 1;
2408 2383
2409 status = ioread16(ioaddr + EL3_STATUS); 2384 status = ioread16(ioaddr + EL3_STATUS);
@@ -2522,10 +2497,29 @@ boomerang_interrupt(int irq, void *dev_id)
2522 dev->name, status); 2497 dev->name, status);
2523handler_exit: 2498handler_exit:
2524 vp->handling_irq = 0; 2499 vp->handling_irq = 0;
2525 spin_unlock(&vp->lock);
2526 return IRQ_RETVAL(handled); 2500 return IRQ_RETVAL(handled);
2527} 2501}
2528 2502
2503static irqreturn_t
2504vortex_boomerang_interrupt(int irq, void *dev_id)
2505{
2506 struct net_device *dev = dev_id;
2507 struct vortex_private *vp = netdev_priv(dev);
2508 unsigned long flags;
2509 irqreturn_t ret;
2510
2511 spin_lock_irqsave(&vp->lock, flags);
2512
2513 if (vp->full_bus_master_rx)
2514 ret = _boomerang_interrupt(dev->irq, dev);
2515 else
2516 ret = _vortex_interrupt(dev->irq, dev);
2517
2518 spin_unlock_irqrestore(&vp->lock, flags);
2519
2520 return ret;
2521}
2522
2529static int vortex_rx(struct net_device *dev) 2523static int vortex_rx(struct net_device *dev)
2530{ 2524{
2531 struct vortex_private *vp = netdev_priv(dev); 2525 struct vortex_private *vp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index effc651a2a2f..d5fca2e5a9bc 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2144,14 +2144,21 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
2144 .ndo_select_queue = bcm_sysport_select_queue, 2144 .ndo_select_queue = bcm_sysport_select_queue,
2145}; 2145};
2146 2146
2147static int bcm_sysport_map_queues(struct net_device *dev, 2147static int bcm_sysport_map_queues(struct notifier_block *nb,
2148 struct dsa_notifier_register_info *info) 2148 struct dsa_notifier_register_info *info)
2149{ 2149{
2150 struct bcm_sysport_priv *priv = netdev_priv(dev);
2151 struct bcm_sysport_tx_ring *ring; 2150 struct bcm_sysport_tx_ring *ring;
2151 struct bcm_sysport_priv *priv;
2152 struct net_device *slave_dev; 2152 struct net_device *slave_dev;
2153 unsigned int num_tx_queues; 2153 unsigned int num_tx_queues;
2154 unsigned int q, start, port; 2154 unsigned int q, start, port;
2155 struct net_device *dev;
2156
2157 priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
2158 if (priv->netdev != info->master)
2159 return 0;
2160
2161 dev = info->master;
2155 2162
2156 /* We can't be setting up queue inspection for non directly attached 2163 /* We can't be setting up queue inspection for non directly attached
2157 * switches 2164 * switches
@@ -2174,11 +2181,12 @@ static int bcm_sysport_map_queues(struct net_device *dev,
2174 if (priv->is_lite) 2181 if (priv->is_lite)
2175 netif_set_real_num_tx_queues(slave_dev, 2182 netif_set_real_num_tx_queues(slave_dev,
2176 slave_dev->num_tx_queues / 2); 2183 slave_dev->num_tx_queues / 2);
2184
2177 num_tx_queues = slave_dev->real_num_tx_queues; 2185 num_tx_queues = slave_dev->real_num_tx_queues;
2178 2186
2179 if (priv->per_port_num_tx_queues && 2187 if (priv->per_port_num_tx_queues &&
2180 priv->per_port_num_tx_queues != num_tx_queues) 2188 priv->per_port_num_tx_queues != num_tx_queues)
2181 netdev_warn(slave_dev, "asymetric number of per-port queues\n"); 2189 netdev_warn(slave_dev, "asymmetric number of per-port queues\n");
2182 2190
2183 priv->per_port_num_tx_queues = num_tx_queues; 2191 priv->per_port_num_tx_queues = num_tx_queues;
2184 2192
@@ -2201,7 +2209,7 @@ static int bcm_sysport_map_queues(struct net_device *dev,
2201 return 0; 2209 return 0;
2202} 2210}
2203 2211
2204static int bcm_sysport_dsa_notifier(struct notifier_block *unused, 2212static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
2205 unsigned long event, void *ptr) 2213 unsigned long event, void *ptr)
2206{ 2214{
2207 struct dsa_notifier_register_info *info; 2215 struct dsa_notifier_register_info *info;
@@ -2211,7 +2219,7 @@ static int bcm_sysport_dsa_notifier(struct notifier_block *unused,
2211 2219
2212 info = ptr; 2220 info = ptr;
2213 2221
2214 return notifier_from_errno(bcm_sysport_map_queues(info->master, info)); 2222 return notifier_from_errno(bcm_sysport_map_queues(nb, info));
2215} 2223}
2216 2224
2217#define REV_FMT "v%2x.%02x" 2225#define REV_FMT "v%2x.%02x"
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
index 7c560d545c03..5a779b19d149 100644
--- a/drivers/net/ethernet/broadcom/bnxt/Makefile
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -2,3 +2,4 @@ obj-$(CONFIG_BNXT) += bnxt_en.o
2 2
3bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o 3bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o
4bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o 4bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o
5bnxt_en-$(CONFIG_DEBUG_FS) += bnxt_debugfs.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index f83769d8047b..efe5c7203f60 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -62,6 +62,7 @@
62#include "bnxt_vfr.h" 62#include "bnxt_vfr.h"
63#include "bnxt_tc.h" 63#include "bnxt_tc.h"
64#include "bnxt_devlink.h" 64#include "bnxt_devlink.h"
65#include "bnxt_debugfs.h"
65 66
66#define BNXT_TX_TIMEOUT (5 * HZ) 67#define BNXT_TX_TIMEOUT (5 * HZ)
67 68
@@ -2383,6 +2384,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
2383 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { 2384 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
2384 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 2385 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2385 struct bnxt_ring_struct *ring; 2386 struct bnxt_ring_struct *ring;
2387 u8 qidx;
2386 2388
2387 ring = &txr->tx_ring_struct; 2389 ring = &txr->tx_ring_struct;
2388 2390
@@ -2411,7 +2413,8 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
2411 2413
2412 memset(txr->tx_push, 0, sizeof(struct tx_push_bd)); 2414 memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
2413 } 2415 }
2414 ring->queue_id = bp->q_info[j].queue_id; 2416 qidx = bp->tc_to_qidx[j];
2417 ring->queue_id = bp->q_info[qidx].queue_id;
2415 if (i < bp->tx_nr_rings_xdp) 2418 if (i < bp->tx_nr_rings_xdp)
2416 continue; 2419 continue;
2417 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) 2420 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
@@ -3493,15 +3496,29 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3493 3496
3494 if (!timeout) 3497 if (!timeout)
3495 timeout = DFLT_HWRM_CMD_TIMEOUT; 3498 timeout = DFLT_HWRM_CMD_TIMEOUT;
3499 /* convert timeout to usec */
3500 timeout *= 1000;
3496 3501
3497 i = 0; 3502 i = 0;
3498 tmo_count = timeout * 40; 3503 /* Short timeout for the first few iterations:
3504 * number of loops = number of loops for short timeout +
3505 * number of loops for standard timeout.
3506 */
3507 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
3508 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
3509 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
3499 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; 3510 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
3500 if (intr_process) { 3511 if (intr_process) {
3501 /* Wait until hwrm response cmpl interrupt is processed */ 3512 /* Wait until hwrm response cmpl interrupt is processed */
3502 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && 3513 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
3503 i++ < tmo_count) { 3514 i++ < tmo_count) {
3504 usleep_range(25, 40); 3515 /* on first few passes, just barely sleep */
3516 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
3517 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
3518 HWRM_SHORT_MAX_TIMEOUT);
3519 else
3520 usleep_range(HWRM_MIN_TIMEOUT,
3521 HWRM_MAX_TIMEOUT);
3505 } 3522 }
3506 3523
3507 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) { 3524 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
@@ -3519,7 +3536,13 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3519 HWRM_RESP_LEN_SFT; 3536 HWRM_RESP_LEN_SFT;
3520 if (len) 3537 if (len)
3521 break; 3538 break;
3522 usleep_range(25, 40); 3539 /* on first few passes, just barely sleep */
3540 if (i < DFLT_HWRM_CMD_TIMEOUT)
3541 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
3542 HWRM_SHORT_MAX_TIMEOUT);
3543 else
3544 usleep_range(HWRM_MIN_TIMEOUT,
3545 HWRM_MAX_TIMEOUT);
3523 } 3546 }
3524 3547
3525 if (i >= tmo_count) { 3548 if (i >= tmo_count) {
@@ -4334,26 +4357,9 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
4334 mutex_unlock(&bp->hwrm_cmd_lock); 4357 mutex_unlock(&bp->hwrm_cmd_lock);
4335 4358
4336 if (rc || err) { 4359 if (rc || err) {
4337 switch (ring_type) { 4360 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
4338 case RING_FREE_REQ_RING_TYPE_L2_CMPL: 4361 ring_type, rc, err);
4339 netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n", 4362 return -EIO;
4340 rc, err);
4341 return -1;
4342
4343 case RING_FREE_REQ_RING_TYPE_RX:
4344 netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
4345 rc, err);
4346 return -1;
4347
4348 case RING_FREE_REQ_RING_TYPE_TX:
4349 netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
4350 rc, err);
4351 return -1;
4352
4353 default:
4354 netdev_err(bp->dev, "Invalid ring\n");
4355 return -1;
4356 }
4357 } 4363 }
4358 ring->fw_ring_id = ring_id; 4364 ring->fw_ring_id = ring_id;
4359 return rc; 4365 return rc;
@@ -4477,23 +4483,9 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
4477 mutex_unlock(&bp->hwrm_cmd_lock); 4483 mutex_unlock(&bp->hwrm_cmd_lock);
4478 4484
4479 if (rc || error_code) { 4485 if (rc || error_code) {
4480 switch (ring_type) { 4486 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
4481 case RING_FREE_REQ_RING_TYPE_L2_CMPL: 4487 ring_type, rc, error_code);
4482 netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n", 4488 return -EIO;
4483 rc);
4484 return rc;
4485 case RING_FREE_REQ_RING_TYPE_RX:
4486 netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
4487 rc);
4488 return rc;
4489 case RING_FREE_REQ_RING_TYPE_TX:
4490 netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
4491 rc);
4492 return rc;
4493 default:
4494 netdev_err(bp->dev, "Invalid ring\n");
4495 return -1;
4496 }
4497 } 4489 }
4498 return 0; 4490 return 0;
4499} 4491}
@@ -4721,6 +4713,10 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4721 4713
4722 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, 4714 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
4723 cp_rings, vnics); 4715 cp_rings, vnics);
4716 req.enables |= cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
4717 FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS);
4718 req.num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
4719 req.num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
4724 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4720 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4725 if (rc) 4721 if (rc)
4726 return -ENOMEM; 4722 return -ENOMEM;
@@ -5309,6 +5305,7 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
5309 for (i = 0; i < bp->max_tc; i++) { 5305 for (i = 0; i < bp->max_tc; i++) {
5310 bp->q_info[i].queue_id = *qptr++; 5306 bp->q_info[i].queue_id = *qptr++;
5311 bp->q_info[i].queue_profile = *qptr++; 5307 bp->q_info[i].queue_profile = *qptr++;
5308 bp->tc_to_qidx[i] = i;
5312 } 5309 }
5313 5310
5314qportcfg_exit: 5311qportcfg_exit:
@@ -5376,7 +5373,8 @@ int bnxt_hwrm_fw_set_time(struct bnxt *bp)
5376 struct tm tm; 5373 struct tm tm;
5377 time64_t now = ktime_get_real_seconds(); 5374 time64_t now = ktime_get_real_seconds();
5378 5375
5379 if (bp->hwrm_spec_code < 0x10400) 5376 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
5377 bp->hwrm_spec_code < 0x10400)
5380 return -EOPNOTSUPP; 5378 return -EOPNOTSUPP;
5381 5379
5382 time64_to_tm(now, 0, &tm); 5380 time64_to_tm(now, 0, &tm);
@@ -5958,6 +5956,9 @@ static int bnxt_init_msix(struct bnxt *bp)
5958 if (total_vecs > max) 5956 if (total_vecs > max)
5959 total_vecs = max; 5957 total_vecs = max;
5960 5958
5959 if (!total_vecs)
5960 return 0;
5961
5961 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); 5962 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
5962 if (!msix_ent) 5963 if (!msix_ent)
5963 return -ENOMEM; 5964 return -ENOMEM;
@@ -6843,6 +6844,8 @@ static void bnxt_preset_reg_win(struct bnxt *bp)
6843 } 6844 }
6844} 6845}
6845 6846
6847static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
6848
6846static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 6849static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6847{ 6850{
6848 int rc = 0; 6851 int rc = 0;
@@ -6850,6 +6853,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6850 bnxt_preset_reg_win(bp); 6853 bnxt_preset_reg_win(bp);
6851 netif_carrier_off(bp->dev); 6854 netif_carrier_off(bp->dev);
6852 if (irq_re_init) { 6855 if (irq_re_init) {
6856 /* Reserve rings now if none were reserved at driver probe. */
6857 rc = bnxt_init_dflt_ring_mode(bp);
6858 if (rc) {
6859 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
6860 return rc;
6861 }
6853 rc = bnxt_reserve_rings(bp); 6862 rc = bnxt_reserve_rings(bp);
6854 if (rc) 6863 if (rc)
6855 return rc; 6864 return rc;
@@ -6877,6 +6886,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6877 } 6886 }
6878 6887
6879 bnxt_enable_napi(bp); 6888 bnxt_enable_napi(bp);
6889 bnxt_debug_dev_init(bp);
6880 6890
6881 rc = bnxt_init_nic(bp, irq_re_init); 6891 rc = bnxt_init_nic(bp, irq_re_init);
6882 if (rc) { 6892 if (rc) {
@@ -6909,6 +6919,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6909 return 0; 6919 return 0;
6910 6920
6911open_err: 6921open_err:
6922 bnxt_debug_dev_exit(bp);
6912 bnxt_disable_napi(bp); 6923 bnxt_disable_napi(bp);
6913 bnxt_del_napi(bp); 6924 bnxt_del_napi(bp);
6914 6925
@@ -7002,6 +7013,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
7002 7013
7003 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ 7014 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
7004 7015
7016 bnxt_debug_dev_exit(bp);
7005 bnxt_disable_napi(bp); 7017 bnxt_disable_napi(bp);
7006 del_timer_sync(&bp->timer); 7018 del_timer_sync(&bp->timer);
7007 bnxt_free_skbs(bp); 7019 bnxt_free_skbs(bp);
@@ -7279,6 +7291,25 @@ skip_uc:
7279 return rc; 7291 return rc;
7280} 7292}
7281 7293
7294static bool bnxt_can_reserve_rings(struct bnxt *bp)
7295{
7296#ifdef CONFIG_BNXT_SRIOV
7297 if ((bp->flags & BNXT_FLAG_NEW_RM) && BNXT_VF(bp)) {
7298 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7299
7300 /* No minimum rings were provisioned by the PF. Don't
7301 * reserve rings by default when device is down.
7302 */
7303 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
7304 return true;
7305
7306 if (!netif_running(bp->dev))
7307 return false;
7308 }
7309#endif
7310 return true;
7311}
7312
7282/* If the chip and firmware supports RFS */ 7313/* If the chip and firmware supports RFS */
7283static bool bnxt_rfs_supported(struct bnxt *bp) 7314static bool bnxt_rfs_supported(struct bnxt *bp)
7284{ 7315{
@@ -7295,7 +7326,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
7295#ifdef CONFIG_RFS_ACCEL 7326#ifdef CONFIG_RFS_ACCEL
7296 int vnics, max_vnics, max_rss_ctxs; 7327 int vnics, max_vnics, max_rss_ctxs;
7297 7328
7298 if (!(bp->flags & BNXT_FLAG_MSIX_CAP)) 7329 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
7299 return false; 7330 return false;
7300 7331
7301 vnics = 1 + bp->rx_nr_rings; 7332 vnics = 1 + bp->rx_nr_rings;
@@ -7729,7 +7760,7 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
7729 coal->coal_bufs = 30; 7760 coal->coal_bufs = 30;
7730 coal->coal_ticks_irq = 1; 7761 coal->coal_ticks_irq = 1;
7731 coal->coal_bufs_irq = 2; 7762 coal->coal_bufs_irq = 2;
7732 coal->idle_thresh = 25; 7763 coal->idle_thresh = 50;
7733 coal->bufs_per_record = 2; 7764 coal->bufs_per_record = 2;
7734 coal->budget = 64; /* NAPI budget */ 7765 coal->budget = 64; /* NAPI budget */
7735 7766
@@ -8529,6 +8560,9 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
8529{ 8560{
8530 int dflt_rings, max_rx_rings, max_tx_rings, rc; 8561 int dflt_rings, max_rx_rings, max_tx_rings, rc;
8531 8562
8563 if (!bnxt_can_reserve_rings(bp))
8564 return 0;
8565
8532 if (sh) 8566 if (sh)
8533 bp->flags |= BNXT_FLAG_SHARED_RINGS; 8567 bp->flags |= BNXT_FLAG_SHARED_RINGS;
8534 dflt_rings = netif_get_num_default_rss_queues(); 8568 dflt_rings = netif_get_num_default_rss_queues();
@@ -8574,6 +8608,29 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
8574 return rc; 8608 return rc;
8575} 8609}
8576 8610
8611static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
8612{
8613 int rc;
8614
8615 if (bp->tx_nr_rings)
8616 return 0;
8617
8618 rc = bnxt_set_dflt_rings(bp, true);
8619 if (rc) {
8620 netdev_err(bp->dev, "Not enough rings available.\n");
8621 return rc;
8622 }
8623 rc = bnxt_init_int_mode(bp);
8624 if (rc)
8625 return rc;
8626 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8627 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
8628 bp->flags |= BNXT_FLAG_RFS;
8629 bp->dev->features |= NETIF_F_NTUPLE;
8630 }
8631 return 0;
8632}
8633
8577int bnxt_restore_pf_fw_resources(struct bnxt *bp) 8634int bnxt_restore_pf_fw_resources(struct bnxt *bp)
8578{ 8635{
8579 int rc; 8636 int rc;
@@ -9078,6 +9135,7 @@ static struct pci_driver bnxt_pci_driver = {
9078 9135
9079static int __init bnxt_init(void) 9136static int __init bnxt_init(void)
9080{ 9137{
9138 bnxt_debug_init();
9081 return pci_register_driver(&bnxt_pci_driver); 9139 return pci_register_driver(&bnxt_pci_driver);
9082} 9140}
9083 9141
@@ -9086,6 +9144,7 @@ static void __exit bnxt_exit(void)
9086 pci_unregister_driver(&bnxt_pci_driver); 9144 pci_unregister_driver(&bnxt_pci_driver);
9087 if (bnxt_pf_wq) 9145 if (bnxt_pf_wq)
9088 destroy_workqueue(bnxt_pf_wq); 9146 destroy_workqueue(bnxt_pf_wq);
9147 bnxt_debug_exit();
9089} 9148}
9090 9149
9091module_init(bnxt_init); 9150module_init(bnxt_init);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 3d55d3b56865..8df1d8b9d2e3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -532,6 +532,12 @@ struct rx_tpa_end_cmp_ext {
532#define BNXT_HWRM_REQ_MAX_SIZE 128 532#define BNXT_HWRM_REQ_MAX_SIZE 128
533#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ 533#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
534 BNXT_HWRM_REQ_MAX_SIZE) 534 BNXT_HWRM_REQ_MAX_SIZE)
535#define HWRM_SHORT_MIN_TIMEOUT 3
536#define HWRM_SHORT_MAX_TIMEOUT 10
537#define HWRM_SHORT_TIMEOUT_COUNTER 5
538
539#define HWRM_MIN_TIMEOUT 25
540#define HWRM_MAX_TIMEOUT 40
535 541
536#define BNXT_RX_EVENT 1 542#define BNXT_RX_EVENT 1
537#define BNXT_AGG_EVENT 2 543#define BNXT_AGG_EVENT 2
@@ -1242,6 +1248,7 @@ struct bnxt {
1242 u8 max_tc; 1248 u8 max_tc;
1243 u8 max_lltc; /* lossless TCs */ 1249 u8 max_lltc; /* lossless TCs */
1244 struct bnxt_queue_info q_info[BNXT_MAX_QUEUE]; 1250 struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
1251 u8 tc_to_qidx[BNXT_MAX_QUEUE];
1245 1252
1246 unsigned int current_interval; 1253 unsigned int current_interval;
1247#define BNXT_TIMER_INTERVAL HZ 1254#define BNXT_TIMER_INTERVAL HZ
@@ -1384,6 +1391,8 @@ struct bnxt {
1384 u16 *cfa_code_map; /* cfa_code -> vf_idx map */ 1391 u16 *cfa_code_map; /* cfa_code -> vf_idx map */
1385 u8 switch_id[8]; 1392 u8 switch_id[8];
1386 struct bnxt_tc_info *tc_info; 1393 struct bnxt_tc_info *tc_info;
1394 struct dentry *debugfs_pdev;
1395 struct dentry *debugfs_dim;
1387}; 1396};
1388 1397
1389#define BNXT_RX_STATS_OFFSET(counter) \ 1398#define BNXT_RX_STATS_OFFSET(counter) \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 3c746f2d9ed8..d5bc72cecde3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -21,6 +21,21 @@
21#include "bnxt_dcb.h" 21#include "bnxt_dcb.h"
22 22
23#ifdef CONFIG_BNXT_DCB 23#ifdef CONFIG_BNXT_DCB
24static int bnxt_queue_to_tc(struct bnxt *bp, u8 queue_id)
25{
26 int i, j;
27
28 for (i = 0; i < bp->max_tc; i++) {
29 if (bp->q_info[i].queue_id == queue_id) {
30 for (j = 0; j < bp->max_tc; j++) {
31 if (bp->tc_to_qidx[j] == i)
32 return j;
33 }
34 }
35 }
36 return -EINVAL;
37}
38
24static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets) 39static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
25{ 40{
26 struct hwrm_queue_pri2cos_cfg_input req = {0}; 41 struct hwrm_queue_pri2cos_cfg_input req = {0};
@@ -33,10 +48,13 @@ static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
33 48
34 pri2cos = &req.pri0_cos_queue_id; 49 pri2cos = &req.pri0_cos_queue_id;
35 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 50 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
51 u8 qidx;
52
36 req.enables |= cpu_to_le32( 53 req.enables |= cpu_to_le32(
37 QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i); 54 QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i);
38 55
39 pri2cos[i] = bp->q_info[ets->prio_tc[i]].queue_id; 56 qidx = bp->tc_to_qidx[ets->prio_tc[i]];
57 pri2cos[i] = bp->q_info[qidx].queue_id;
40 } 58 }
41 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 59 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
42 return rc; 60 return rc;
@@ -55,17 +73,15 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
55 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 73 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
56 if (!rc) { 74 if (!rc) {
57 u8 *pri2cos = &resp->pri0_cos_queue_id; 75 u8 *pri2cos = &resp->pri0_cos_queue_id;
58 int i, j; 76 int i;
59 77
60 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 78 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
61 u8 queue_id = pri2cos[i]; 79 u8 queue_id = pri2cos[i];
80 int tc;
62 81
63 for (j = 0; j < bp->max_tc; j++) { 82 tc = bnxt_queue_to_tc(bp, queue_id);
64 if (bp->q_info[j].queue_id == queue_id) { 83 if (tc >= 0)
65 ets->prio_tc[i] = j; 84 ets->prio_tc[i] = tc;
66 break;
67 }
68 }
69 } 85 }
70 } 86 }
71 mutex_unlock(&bp->hwrm_cmd_lock); 87 mutex_unlock(&bp->hwrm_cmd_lock);
@@ -81,13 +97,15 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
81 void *data; 97 void *data;
82 98
83 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1); 99 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
84 data = &req.unused_0; 100 for (i = 0; i < max_tc; i++) {
85 for (i = 0; i < max_tc; i++, data += sizeof(cos2bw) - 4) { 101 u8 qidx;
102
86 req.enables |= cpu_to_le32( 103 req.enables |= cpu_to_le32(
87 QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i); 104 QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i);
88 105
89 memset(&cos2bw, 0, sizeof(cos2bw)); 106 memset(&cos2bw, 0, sizeof(cos2bw));
90 cos2bw.queue_id = bp->q_info[i].queue_id; 107 qidx = bp->tc_to_qidx[i];
108 cos2bw.queue_id = bp->q_info[qidx].queue_id;
91 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) { 109 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
92 cos2bw.tsa = 110 cos2bw.tsa =
93 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP; 111 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP;
@@ -103,8 +121,9 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
103 cpu_to_le32((ets->tc_tx_bw[i] * 100) | 121 cpu_to_le32((ets->tc_tx_bw[i] * 100) |
104 BW_VALUE_UNIT_PERCENT1_100); 122 BW_VALUE_UNIT_PERCENT1_100);
105 } 123 }
124 data = &req.unused_0 + qidx * (sizeof(cos2bw) - 4);
106 memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4); 125 memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4);
107 if (i == 0) { 126 if (qidx == 0) {
108 req.queue_id0 = cos2bw.queue_id; 127 req.queue_id0 = cos2bw.queue_id;
109 req.unused_0 = 0; 128 req.unused_0 = 0;
110 } 129 }
@@ -132,66 +151,81 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
132 151
133 data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id); 152 data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
134 for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) { 153 for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
135 int j; 154 int tc;
136 155
137 memcpy(&cos2bw.queue_id, data, sizeof(cos2bw) - 4); 156 memcpy(&cos2bw.queue_id, data, sizeof(cos2bw) - 4);
138 if (i == 0) 157 if (i == 0)
139 cos2bw.queue_id = resp->queue_id0; 158 cos2bw.queue_id = resp->queue_id0;
140 159
141 for (j = 0; j < bp->max_tc; j++) { 160 tc = bnxt_queue_to_tc(bp, cos2bw.queue_id);
142 if (bp->q_info[j].queue_id != cos2bw.queue_id) 161 if (tc < 0)
143 continue; 162 continue;
144 if (cos2bw.tsa == 163
145 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) { 164 if (cos2bw.tsa ==
146 ets->tc_tsa[j] = IEEE_8021QAZ_TSA_STRICT; 165 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) {
147 } else { 166 ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_STRICT;
148 ets->tc_tsa[j] = IEEE_8021QAZ_TSA_ETS; 167 } else {
149 ets->tc_tx_bw[j] = cos2bw.bw_weight; 168 ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_ETS;
150 } 169 ets->tc_tx_bw[tc] = cos2bw.bw_weight;
151 } 170 }
152 } 171 }
153 mutex_unlock(&bp->hwrm_cmd_lock); 172 mutex_unlock(&bp->hwrm_cmd_lock);
154 return 0; 173 return 0;
155} 174}
156 175
157static int bnxt_hwrm_queue_cfg(struct bnxt *bp, unsigned int lltc_mask) 176static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask)
158{ 177{
159 struct hwrm_queue_cfg_input req = {0}; 178 unsigned long qmap = 0;
160 int i; 179 int max = bp->max_tc;
180 int i, j, rc;
161 181
162 if (netif_running(bp->dev)) 182 /* Assign lossless TCs first */
163 bnxt_tx_disable(bp); 183 for (i = 0, j = 0; i < max; ) {
184 if (lltc_mask & (1 << i)) {
185 if (BNXT_LLQ(bp->q_info[j].queue_profile)) {
186 bp->tc_to_qidx[i] = j;
187 __set_bit(j, &qmap);
188 i++;
189 }
190 j++;
191 continue;
192 }
193 i++;
194 }
164 195
165 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_CFG, -1, -1); 196 for (i = 0, j = 0; i < max; i++) {
166 req.flags = cpu_to_le32(QUEUE_CFG_REQ_FLAGS_PATH_BIDIR); 197 if (lltc_mask & (1 << i))
167 req.enables = cpu_to_le32(QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE); 198 continue;
199 j = find_next_zero_bit(&qmap, max, j);
200 bp->tc_to_qidx[i] = j;
201 __set_bit(j, &qmap);
202 j++;
203 }
168 204
169 /* Configure lossless queues to lossy first */ 205 if (netif_running(bp->dev)) {
170 req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY; 206 bnxt_close_nic(bp, false, false);
171 for (i = 0; i < bp->max_tc; i++) { 207 rc = bnxt_open_nic(bp, false, false);
172 if (BNXT_LLQ(bp->q_info[i].queue_profile)) { 208 if (rc) {
173 req.queue_id = cpu_to_le32(bp->q_info[i].queue_id); 209 netdev_warn(bp->dev, "failed to open NIC, rc = %d\n", rc);
174 hwrm_send_message(bp, &req, sizeof(req), 210 return rc;
175 HWRM_CMD_TIMEOUT);
176 bp->q_info[i].queue_profile =
177 QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY;
178 } 211 }
179 } 212 }
180 213 if (bp->ieee_ets) {
181 /* Now configure desired queues to lossless */ 214 int tc = netdev_get_num_tc(bp->dev);
182 req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS; 215
183 for (i = 0; i < bp->max_tc; i++) { 216 if (!tc)
184 if (lltc_mask & (1 << i)) { 217 tc = 1;
185 req.queue_id = cpu_to_le32(bp->q_info[i].queue_id); 218 rc = bnxt_hwrm_queue_cos2bw_cfg(bp, bp->ieee_ets, tc);
186 hwrm_send_message(bp, &req, sizeof(req), 219 if (rc) {
187 HWRM_CMD_TIMEOUT); 220 netdev_warn(bp->dev, "failed to config BW, rc = %d\n", rc);
188 bp->q_info[i].queue_profile = 221 return rc;
189 QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS; 222 }
223 rc = bnxt_hwrm_queue_pri2cos_cfg(bp, bp->ieee_ets);
224 if (rc) {
225 netdev_warn(bp->dev, "failed to config prio, rc = %d\n", rc);
226 return rc;
190 } 227 }
191 } 228 }
192 if (netif_running(bp->dev))
193 bnxt_tx_enable(bp);
194
195 return 0; 229 return 0;
196} 230}
197 231
@@ -201,7 +235,7 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
201 struct ieee_ets *my_ets = bp->ieee_ets; 235 struct ieee_ets *my_ets = bp->ieee_ets;
202 unsigned int tc_mask = 0, pri_mask = 0; 236 unsigned int tc_mask = 0, pri_mask = 0;
203 u8 i, pri, lltc_count = 0; 237 u8 i, pri, lltc_count = 0;
204 bool need_q_recfg = false; 238 bool need_q_remap = false;
205 int rc; 239 int rc;
206 240
207 if (!my_ets) 241 if (!my_ets)
@@ -221,21 +255,25 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
221 if (lltc_count > bp->max_lltc) 255 if (lltc_count > bp->max_lltc)
222 return -EINVAL; 256 return -EINVAL;
223 257
224 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
225 req.flags = cpu_to_le32(pri_mask);
226 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
227 if (rc)
228 return rc;
229
230 for (i = 0; i < bp->max_tc; i++) { 258 for (i = 0; i < bp->max_tc; i++) {
231 if (tc_mask & (1 << i)) { 259 if (tc_mask & (1 << i)) {
232 if (!BNXT_LLQ(bp->q_info[i].queue_profile)) 260 u8 qidx = bp->tc_to_qidx[i];
233 need_q_recfg = true; 261
262 if (!BNXT_LLQ(bp->q_info[qidx].queue_profile)) {
263 need_q_remap = true;
264 break;
265 }
234 } 266 }
235 } 267 }
236 268
237 if (need_q_recfg) 269 if (need_q_remap)
238 rc = bnxt_hwrm_queue_cfg(bp, tc_mask); 270 rc = bnxt_queue_remap(bp, tc_mask);
271
272 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
273 req.flags = cpu_to_le32(pri_mask);
274 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
275 if (rc)
276 return rc;
239 277
240 return rc; 278 return rc;
241} 279}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
new file mode 100644
index 000000000000..94e208e9789f
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
@@ -0,0 +1,124 @@
1/* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2017-2018 Broadcom Limited
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/debugfs.h>
11#include <linux/module.h>
12#include <linux/pci.h>
13#include "bnxt_hsi.h"
14#include <linux/net_dim.h>
15#include "bnxt.h"
16#include "bnxt_debugfs.h"
17
18static struct dentry *bnxt_debug_mnt;
19
20static ssize_t debugfs_dim_read(struct file *filep,
21 char __user *buffer,
22 size_t count, loff_t *ppos)
23{
24 struct net_dim *dim = filep->private_data;
25 int len;
26 char *buf;
27
28 if (*ppos)
29 return 0;
30 if (!dim)
31 return -ENODEV;
32 buf = kasprintf(GFP_KERNEL,
33 "state = %d\n" \
34 "profile_ix = %d\n" \
35 "mode = %d\n" \
36 "tune_state = %d\n" \
37 "steps_right = %d\n" \
38 "steps_left = %d\n" \
39 "tired = %d\n",
40 dim->state,
41 dim->profile_ix,
42 dim->mode,
43 dim->tune_state,
44 dim->steps_right,
45 dim->steps_left,
46 dim->tired);
47 if (!buf)
48 return -ENOMEM;
49 if (count < strlen(buf)) {
50 kfree(buf);
51 return -ENOSPC;
52 }
53 len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
54 kfree(buf);
55 return len;
56}
57
58static const struct file_operations debugfs_dim_fops = {
59 .owner = THIS_MODULE,
60 .open = simple_open,
61 .read = debugfs_dim_read,
62};
63
64static struct dentry *debugfs_dim_ring_init(struct net_dim *dim, int ring_idx,
65 struct dentry *dd)
66{
67 static char qname[16];
68
69 snprintf(qname, 10, "%d", ring_idx);
70 return debugfs_create_file(qname, 0600, dd,
71 dim, &debugfs_dim_fops);
72}
73
74void bnxt_debug_dev_init(struct bnxt *bp)
75{
76 const char *pname = pci_name(bp->pdev);
77 struct dentry *pdevf;
78 int i;
79
80 bp->debugfs_pdev = debugfs_create_dir(pname, bnxt_debug_mnt);
81 if (bp->debugfs_pdev) {
82 pdevf = debugfs_create_dir("dim", bp->debugfs_pdev);
83 if (!pdevf) {
84 pr_err("failed to create debugfs entry %s/dim\n",
85 pname);
86 return;
87 }
88 bp->debugfs_dim = pdevf;
89 /* create files for each rx ring */
90 for (i = 0; i < bp->cp_nr_rings; i++) {
91 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
92
93 if (cpr && bp->bnapi[i]->rx_ring) {
94 pdevf = debugfs_dim_ring_init(&cpr->dim, i,
95 bp->debugfs_dim);
96 if (!pdevf)
97 pr_err("failed to create debugfs entry %s/dim/%d\n",
98 pname, i);
99 }
100 }
101 } else {
102 pr_err("failed to create debugfs entry %s\n", pname);
103 }
104}
105
106void bnxt_debug_dev_exit(struct bnxt *bp)
107{
108 if (bp) {
109 debugfs_remove_recursive(bp->debugfs_pdev);
110 bp->debugfs_pdev = NULL;
111 }
112}
113
114void bnxt_debug_init(void)
115{
116 bnxt_debug_mnt = debugfs_create_dir("bnxt_en", NULL);
117 if (!bnxt_debug_mnt)
118 pr_err("failed to init bnxt_en debugfs\n");
119}
120
121void bnxt_debug_exit(void)
122{
123 debugfs_remove_recursive(bnxt_debug_mnt);
124}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h
new file mode 100644
index 000000000000..d0bb4887acd0
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h
@@ -0,0 +1,23 @@
1/* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2017-2018 Broadcom Limited
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10#include "bnxt_hsi.h"
11#include "bnxt.h"
12
13#ifdef CONFIG_DEBUG_FS
14void bnxt_debug_init(void);
15void bnxt_debug_exit(void);
16void bnxt_debug_dev_init(struct bnxt *bp);
17void bnxt_debug_dev_exit(struct bnxt *bp);
18#else
19static inline void bnxt_debug_init(void) {}
20static inline void bnxt_debug_exit(void) {}
21static inline void bnxt_debug_dev_init(struct bnxt *bp) {}
22static inline void bnxt_debug_dev_exit(struct bnxt *bp) {}
23#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 8ba14ae00e8f..ad98b78f5aa1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -140,6 +140,19 @@ reset_coalesce:
140#define BNXT_RX_STATS_EXT_ENTRY(counter) \ 140#define BNXT_RX_STATS_EXT_ENTRY(counter) \
141 { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) } 141 { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
142 142
143enum {
144 RX_TOTAL_DISCARDS,
145 TX_TOTAL_DISCARDS,
146};
147
148static struct {
149 u64 counter;
150 char string[ETH_GSTRING_LEN];
151} bnxt_sw_func_stats[] = {
152 {0, "rx_total_discard_pkts"},
153 {0, "tx_total_discard_pkts"},
154};
155
143static const struct { 156static const struct {
144 long offset; 157 long offset;
145 char string[ETH_GSTRING_LEN]; 158 char string[ETH_GSTRING_LEN];
@@ -237,6 +250,7 @@ static const struct {
237 BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events), 250 BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
238}; 251};
239 252
253#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
240#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) 254#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
241#define BNXT_NUM_PORT_STATS_EXT ARRAY_SIZE(bnxt_port_stats_ext_arr) 255#define BNXT_NUM_PORT_STATS_EXT ARRAY_SIZE(bnxt_port_stats_ext_arr)
242 256
@@ -244,6 +258,8 @@ static int bnxt_get_num_stats(struct bnxt *bp)
244{ 258{
245 int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings; 259 int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
246 260
261 num_stats += BNXT_NUM_SW_FUNC_STATS;
262
247 if (bp->flags & BNXT_FLAG_PORT_STATS) 263 if (bp->flags & BNXT_FLAG_PORT_STATS)
248 num_stats += BNXT_NUM_PORT_STATS; 264 num_stats += BNXT_NUM_PORT_STATS;
249 265
@@ -279,6 +295,9 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
279 if (!bp->bnapi) 295 if (!bp->bnapi)
280 return; 296 return;
281 297
298 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++)
299 bnxt_sw_func_stats[i].counter = 0;
300
282 for (i = 0; i < bp->cp_nr_rings; i++) { 301 for (i = 0; i < bp->cp_nr_rings; i++) {
283 struct bnxt_napi *bnapi = bp->bnapi[i]; 302 struct bnxt_napi *bnapi = bp->bnapi[i];
284 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 303 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
@@ -288,7 +307,16 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
288 for (k = 0; k < stat_fields; j++, k++) 307 for (k = 0; k < stat_fields; j++, k++)
289 buf[j] = le64_to_cpu(hw_stats[k]); 308 buf[j] = le64_to_cpu(hw_stats[k]);
290 buf[j++] = cpr->rx_l4_csum_errors; 309 buf[j++] = cpr->rx_l4_csum_errors;
310
311 bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
312 le64_to_cpu(cpr->hw_stats->rx_discard_pkts);
313 bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter +=
314 le64_to_cpu(cpr->hw_stats->tx_discard_pkts);
291 } 315 }
316
317 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++)
318 buf[j] = bnxt_sw_func_stats[i].counter;
319
292 if (bp->flags & BNXT_FLAG_PORT_STATS) { 320 if (bp->flags & BNXT_FLAG_PORT_STATS) {
293 __le64 *port_stats = (__le64 *)bp->hw_rx_port_stats; 321 __le64 *port_stats = (__le64 *)bp->hw_rx_port_stats;
294 322
@@ -359,6 +387,11 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
359 sprintf(buf, "[%d]: rx_l4_csum_errors", i); 387 sprintf(buf, "[%d]: rx_l4_csum_errors", i);
360 buf += ETH_GSTRING_LEN; 388 buf += ETH_GSTRING_LEN;
361 } 389 }
390 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
391 strcpy(buf, bnxt_sw_func_stats[i].string);
392 buf += ETH_GSTRING_LEN;
393 }
394
362 if (bp->flags & BNXT_FLAG_PORT_STATS) { 395 if (bp->flags & BNXT_FLAG_PORT_STATS) {
363 for (i = 0; i < BNXT_NUM_PORT_STATS; i++) { 396 for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
364 strcpy(buf, bnxt_port_stats_arr[i].string); 397 strcpy(buf, bnxt_port_stats_arr[i].string);
@@ -551,6 +584,8 @@ static int bnxt_set_channels(struct net_device *dev,
551 * to renable 584 * to renable
552 */ 585 */
553 } 586 }
587 } else {
588 rc = bnxt_reserve_rings(bp);
554 } 589 }
555 590
556 return rc; 591 return rc;
@@ -1785,6 +1820,11 @@ static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
1785 1820
1786static int bnxt_get_eeprom_len(struct net_device *dev) 1821static int bnxt_get_eeprom_len(struct net_device *dev)
1787{ 1822{
1823 struct bnxt *bp = netdev_priv(dev);
1824
1825 if (BNXT_VF(bp))
1826 return 0;
1827
1788 /* The -1 return value allows the entire 32-bit range of offsets to be 1828 /* The -1 return value allows the entire 32-bit range of offsets to be
1789 * passed via the ethtool command-line utility. 1829 * passed via the ethtool command-line utility.
1790 */ 1830 */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index f952963d594e..cc21d874eb6e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -462,13 +462,13 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
462 vf_vnics = hw_resc->max_vnics - bp->nr_vnics; 462 vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
463 vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); 463 vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
464 464
465 req.min_rsscos_ctx = cpu_to_le16(1); 465 req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
466 req.max_rsscos_ctx = cpu_to_le16(1); 466 req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
467 if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) { 467 if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) {
468 req.min_cmpl_rings = cpu_to_le16(1); 468 req.min_cmpl_rings = cpu_to_le16(1);
469 req.min_tx_rings = cpu_to_le16(1); 469 req.min_tx_rings = cpu_to_le16(1);
470 req.min_rx_rings = cpu_to_le16(1); 470 req.min_rx_rings = cpu_to_le16(1);
471 req.min_l2_ctxs = cpu_to_le16(1); 471 req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MIN_L2_CTX);
472 req.min_vnics = cpu_to_le16(1); 472 req.min_vnics = cpu_to_le16(1);
473 req.min_stat_ctx = cpu_to_le16(1); 473 req.min_stat_ctx = cpu_to_le16(1);
474 req.min_hw_ring_grps = cpu_to_le16(1); 474 req.min_hw_ring_grps = cpu_to_le16(1);
@@ -483,7 +483,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
483 req.min_cmpl_rings = cpu_to_le16(vf_cp_rings); 483 req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
484 req.min_tx_rings = cpu_to_le16(vf_tx_rings); 484 req.min_tx_rings = cpu_to_le16(vf_tx_rings);
485 req.min_rx_rings = cpu_to_le16(vf_rx_rings); 485 req.min_rx_rings = cpu_to_le16(vf_rx_rings);
486 req.min_l2_ctxs = cpu_to_le16(4); 486 req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
487 req.min_vnics = cpu_to_le16(vf_vnics); 487 req.min_vnics = cpu_to_le16(vf_vnics);
488 req.min_stat_ctx = cpu_to_le16(vf_stat_ctx); 488 req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
489 req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps); 489 req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
@@ -491,7 +491,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
491 req.max_cmpl_rings = cpu_to_le16(vf_cp_rings); 491 req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
492 req.max_tx_rings = cpu_to_le16(vf_tx_rings); 492 req.max_tx_rings = cpu_to_le16(vf_tx_rings);
493 req.max_rx_rings = cpu_to_le16(vf_rx_rings); 493 req.max_rx_rings = cpu_to_le16(vf_rx_rings);
494 req.max_l2_ctxs = cpu_to_le16(4); 494 req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
495 req.max_vnics = cpu_to_le16(vf_vnics); 495 req.max_vnics = cpu_to_le16(vf_vnics);
496 req.max_stat_ctx = cpu_to_le16(vf_stat_ctx); 496 req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
497 req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps); 497 req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
@@ -809,6 +809,9 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
809 struct hwrm_fwd_resp_input req = {0}; 809 struct hwrm_fwd_resp_input req = {0};
810 struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; 810 struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
811 811
812 if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
813 return -EINVAL;
814
812 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1); 815 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
813 816
814 /* Set the new target id */ 817 /* Set the new target id */
@@ -845,6 +848,9 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
845 struct hwrm_reject_fwd_resp_input req = {0}; 848 struct hwrm_reject_fwd_resp_input req = {0};
846 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; 849 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
847 850
851 if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
852 return -EINVAL;
853
848 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1); 854 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
849 /* Set the new target id */ 855 /* Set the new target id */
850 req.target_id = cpu_to_le16(vf->fw_fid); 856 req.target_id = cpu_to_le16(vf->fw_fid);
@@ -877,6 +883,9 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
877 struct hwrm_exec_fwd_resp_input req = {0}; 883 struct hwrm_exec_fwd_resp_input req = {0};
878 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; 884 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
879 885
886 if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
887 return -EINVAL;
888
880 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); 889 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
881 /* Set the new target id */ 890 /* Set the new target id */
882 req.target_id = cpu_to_le16(vf->fw_fid); 891 req.target_id = cpu_to_le16(vf->fw_fid);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index d10f6f6c7860..e9b20cd19881 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -11,6 +11,23 @@
11#ifndef BNXT_SRIOV_H 11#ifndef BNXT_SRIOV_H
12#define BNXT_SRIOV_H 12#define BNXT_SRIOV_H
13 13
14#define BNXT_FWD_RESP_SIZE_ERR(n) \
15 ((offsetof(struct hwrm_fwd_resp_input, encap_resp) + n) > \
16 sizeof(struct hwrm_fwd_resp_input))
17
18#define BNXT_EXEC_FWD_RESP_SIZE_ERR(n) \
19 ((offsetof(struct hwrm_exec_fwd_resp_input, encap_request) + n) >\
20 offsetof(struct hwrm_exec_fwd_resp_input, encap_resp_target_id))
21
22#define BNXT_REJ_FWD_RESP_SIZE_ERR(n) \
23 ((offsetof(struct hwrm_reject_fwd_resp_input, encap_request) + n) >\
24 offsetof(struct hwrm_reject_fwd_resp_input, encap_resp_target_id))
25
26#define BNXT_VF_MIN_RSS_CTX 1
27#define BNXT_VF_MAX_RSS_CTX 1
28#define BNXT_VF_MIN_L2_CTX 1
29#define BNXT_VF_MAX_L2_CTX 4
30
14int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *); 31int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *);
15int bnxt_set_vf_mac(struct net_device *, int, u8 *); 32int bnxt_set_vf_mac(struct net_device *, int, u8 *);
16int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16); 33int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16);
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index bc9861c90ea3..929d485a3a2f 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -1245,7 +1245,7 @@ static void cn23xx_setup_reg_address(struct octeon_device *oct)
1245 CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num); 1245 CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
1246} 1246}
1247 1247
1248static int cn23xx_sriov_config(struct octeon_device *oct) 1248int cn23xx_sriov_config(struct octeon_device *oct)
1249{ 1249{
1250 struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; 1250 struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
1251 u32 max_rings, total_rings, max_vfs, rings_per_vf; 1251 u32 max_rings, total_rings, max_vfs, rings_per_vf;
@@ -1269,8 +1269,8 @@ static int cn23xx_sriov_config(struct octeon_device *oct)
1269 break; 1269 break;
1270 } 1270 }
1271 1271
1272 if (max_rings <= num_present_cpus()) 1272 if (oct->sriov_info.num_pf_rings)
1273 num_pf_rings = 1; 1273 num_pf_rings = oct->sriov_info.num_pf_rings;
1274 else 1274 else
1275 num_pf_rings = num_present_cpus(); 1275 num_pf_rings = num_present_cpus();
1276 1276
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
index 63b3de4f2bfe..e6f31d0d5c0b 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
@@ -61,6 +61,8 @@ u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
61 61
62void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct); 62void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct);
63 63
64int cn23xx_sriov_config(struct octeon_device *oct);
65
64int cn23xx_fw_loaded(struct octeon_device *oct); 66int cn23xx_fw_loaded(struct octeon_device *oct);
65 67
66void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx, 68void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 2a94eee943b2..8093c5eafea2 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -29,6 +29,162 @@
29/* OOM task polling interval */ 29/* OOM task polling interval */
30#define LIO_OOM_POLL_INTERVAL_MS 250 30#define LIO_OOM_POLL_INTERVAL_MS 250
31 31
32#define OCTNIC_MAX_SG MAX_SKB_FRAGS
33
34/**
35 * \brief Callback for getting interface configuration
36 * @param status status of request
37 * @param buf pointer to resp structure
38 */
39void lio_if_cfg_callback(struct octeon_device *oct,
40 u32 status __attribute__((unused)), void *buf)
41{
42 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
43 struct liquidio_if_cfg_context *ctx;
44 struct liquidio_if_cfg_resp *resp;
45
46 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
47 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
48
49 oct = lio_get_device(ctx->octeon_id);
50 if (resp->status)
51 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
52 CVM_CAST64(resp->status));
53 WRITE_ONCE(ctx->cond, 1);
54
55 snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
56 resp->cfg_info.liquidio_firmware_version);
57
58 /* This barrier is required to be sure that the response has been
59 * written fully before waking up the handler
60 */
61 wmb();
62
63 wake_up_interruptible(&ctx->wc);
64}
65
66/**
67 * \brief Delete gather lists
68 * @param lio per-network private data
69 */
70void lio_delete_glists(struct lio *lio)
71{
72 struct octnic_gather *g;
73 int i;
74
75 kfree(lio->glist_lock);
76 lio->glist_lock = NULL;
77
78 if (!lio->glist)
79 return;
80
81 for (i = 0; i < lio->oct_dev->num_iqs; i++) {
82 do {
83 g = (struct octnic_gather *)
84 lio_list_delete_head(&lio->glist[i]);
85 kfree(g);
86 } while (g);
87
88 if (lio->glists_virt_base && lio->glists_virt_base[i] &&
89 lio->glists_dma_base && lio->glists_dma_base[i]) {
90 lio_dma_free(lio->oct_dev,
91 lio->glist_entry_size * lio->tx_qsize,
92 lio->glists_virt_base[i],
93 lio->glists_dma_base[i]);
94 }
95 }
96
97 kfree(lio->glists_virt_base);
98 lio->glists_virt_base = NULL;
99
100 kfree(lio->glists_dma_base);
101 lio->glists_dma_base = NULL;
102
103 kfree(lio->glist);
104 lio->glist = NULL;
105}
106
107/**
108 * \brief Setup gather lists
109 * @param lio per-network private data
110 */
111int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
112{
113 struct octnic_gather *g;
114 int i, j;
115
116 lio->glist_lock =
117 kcalloc(num_iqs, sizeof(*lio->glist_lock), GFP_KERNEL);
118 if (!lio->glist_lock)
119 return -ENOMEM;
120
121 lio->glist =
122 kcalloc(num_iqs, sizeof(*lio->glist), GFP_KERNEL);
123 if (!lio->glist) {
124 kfree(lio->glist_lock);
125 lio->glist_lock = NULL;
126 return -ENOMEM;
127 }
128
129 lio->glist_entry_size =
130 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
131
132 /* allocate memory to store virtual and dma base address of
133 * per glist consistent memory
134 */
135 lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
136 GFP_KERNEL);
137 lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
138 GFP_KERNEL);
139
140 if (!lio->glists_virt_base || !lio->glists_dma_base) {
141 lio_delete_glists(lio);
142 return -ENOMEM;
143 }
144
145 for (i = 0; i < num_iqs; i++) {
146 int numa_node = dev_to_node(&oct->pci_dev->dev);
147
148 spin_lock_init(&lio->glist_lock[i]);
149
150 INIT_LIST_HEAD(&lio->glist[i]);
151
152 lio->glists_virt_base[i] =
153 lio_dma_alloc(oct,
154 lio->glist_entry_size * lio->tx_qsize,
155 &lio->glists_dma_base[i]);
156
157 if (!lio->glists_virt_base[i]) {
158 lio_delete_glists(lio);
159 return -ENOMEM;
160 }
161
162 for (j = 0; j < lio->tx_qsize; j++) {
163 g = kzalloc_node(sizeof(*g), GFP_KERNEL,
164 numa_node);
165 if (!g)
166 g = kzalloc(sizeof(*g), GFP_KERNEL);
167 if (!g)
168 break;
169
170 g->sg = lio->glists_virt_base[i] +
171 (j * lio->glist_entry_size);
172
173 g->sg_dma_ptr = lio->glists_dma_base[i] +
174 (j * lio->glist_entry_size);
175
176 list_add_tail(&g->list, &lio->glist[i]);
177 }
178
179 if (j != lio->tx_qsize) {
180 lio_delete_glists(lio);
181 return -ENOMEM;
182 }
183 }
184
185 return 0;
186}
187
32int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1) 188int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
33{ 189{
34 struct lio *lio = GET_LIO(netdev); 190 struct lio *lio = GET_LIO(netdev);
@@ -880,8 +1036,8 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
880 int num_ioq_vectors; 1036 int num_ioq_vectors;
881 int irqret, err; 1037 int irqret, err;
882 1038
883 oct->num_msix_irqs = num_ioqs;
884 if (oct->msix_on) { 1039 if (oct->msix_on) {
1040 oct->num_msix_irqs = num_ioqs;
885 if (OCTEON_CN23XX_PF(oct)) { 1041 if (OCTEON_CN23XX_PF(oct)) {
886 num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1; 1042 num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
887 1043
@@ -1169,3 +1325,355 @@ int lio_wait_for_clean_oq(struct octeon_device *oct)
1169 1325
1170 return pending_pkts; 1326 return pending_pkts;
1171} 1327}
1328
1329static void
1330octnet_nic_stats_callback(struct octeon_device *oct_dev,
1331 u32 status, void *ptr)
1332{
1333 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1334 struct oct_nic_stats_resp *resp =
1335 (struct oct_nic_stats_resp *)sc->virtrptr;
1336 struct oct_nic_stats_ctrl *ctrl =
1337 (struct oct_nic_stats_ctrl *)sc->ctxptr;
1338 struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1339 struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1340 struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1341 struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1342
1343 if (status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
1344 octeon_swap_8B_data((u64 *)&resp->stats,
1345 (sizeof(struct oct_link_stats)) >> 3);
1346
1347 /* RX link-level stats */
1348 rstats->total_rcvd = rsp_rstats->total_rcvd;
1349 rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1350 rstats->total_bcst = rsp_rstats->total_bcst;
1351 rstats->total_mcst = rsp_rstats->total_mcst;
1352 rstats->runts = rsp_rstats->runts;
1353 rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
1354 /* Accounts for over/under-run of buffers */
1355 rstats->fifo_err = rsp_rstats->fifo_err;
1356 rstats->dmac_drop = rsp_rstats->dmac_drop;
1357 rstats->fcs_err = rsp_rstats->fcs_err;
1358 rstats->jabber_err = rsp_rstats->jabber_err;
1359 rstats->l2_err = rsp_rstats->l2_err;
1360 rstats->frame_err = rsp_rstats->frame_err;
1361 rstats->red_drops = rsp_rstats->red_drops;
1362
1363 /* RX firmware stats */
1364 rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1365 rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1366 rstats->fw_total_mcast = rsp_rstats->fw_total_mcast;
1367 rstats->fw_total_bcast = rsp_rstats->fw_total_bcast;
1368 rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1369 rstats->fw_err_link = rsp_rstats->fw_err_link;
1370 rstats->fw_err_drop = rsp_rstats->fw_err_drop;
1371 rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
1372 rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
1373
1374 /* Number of packets that are LROed */
1375 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1376 /* Number of octets that are LROed */
1377 rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1378 /* Number of LRO packets formed */
1379 rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1380 /* Number of times lRO of packet aborted */
1381 rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1382 rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1383 rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1384 rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1385 rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1386 /* intrmod: packet forward rate */
1387 rstats->fwd_rate = rsp_rstats->fwd_rate;
1388
1389 /* TX link-level stats */
1390 tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1391 tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1392 tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1393 tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1394 tstats->ctl_sent = rsp_tstats->ctl_sent;
1395 /* Packets sent after one collision*/
1396 tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1397 /* Packets sent after multiple collision*/
1398 tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1399 /* Packets not sent due to max collisions */
1400 tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1401 /* Packets not sent due to max deferrals */
1402 tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1403 /* Accounts for over/under-run of buffers */
1404 tstats->fifo_err = rsp_tstats->fifo_err;
1405 tstats->runts = rsp_tstats->runts;
1406 /* Total number of collisions detected */
1407 tstats->total_collisions = rsp_tstats->total_collisions;
1408
1409 /* firmware stats */
1410 tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1411 tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1412 tstats->fw_total_mcast_sent = rsp_tstats->fw_total_mcast_sent;
1413 tstats->fw_total_bcast_sent = rsp_tstats->fw_total_bcast_sent;
1414 tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1415 tstats->fw_err_pki = rsp_tstats->fw_err_pki;
1416 tstats->fw_err_link = rsp_tstats->fw_err_link;
1417 tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1418 tstats->fw_tso = rsp_tstats->fw_tso;
1419 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1420 tstats->fw_err_tso = rsp_tstats->fw_err_tso;
1421 tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
1422
1423 resp->status = 1;
1424 } else {
1425 resp->status = -1;
1426 }
1427 complete(&ctrl->complete);
1428}
1429
1430int octnet_get_link_stats(struct net_device *netdev)
1431{
1432 struct lio *lio = GET_LIO(netdev);
1433 struct octeon_device *oct_dev = lio->oct_dev;
1434 struct octeon_soft_command *sc;
1435 struct oct_nic_stats_ctrl *ctrl;
1436 struct oct_nic_stats_resp *resp;
1437 int retval;
1438
1439 /* Alloc soft command */
1440 sc = (struct octeon_soft_command *)
1441 octeon_alloc_soft_command(oct_dev,
1442 0,
1443 sizeof(struct oct_nic_stats_resp),
1444 sizeof(struct octnic_ctrl_pkt));
1445
1446 if (!sc)
1447 return -ENOMEM;
1448
1449 resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1450 memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1451
1452 ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
1453 memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
1454 ctrl->netdev = netdev;
1455 init_completion(&ctrl->complete);
1456
1457 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1458
1459 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1460 OPCODE_NIC_PORT_STATS, 0, 0, 0);
1461
1462 sc->callback = octnet_nic_stats_callback;
1463 sc->callback_arg = sc;
1464 sc->wait_time = 500; /*in milli seconds*/
1465
1466 retval = octeon_send_soft_command(oct_dev, sc);
1467 if (retval == IQ_SEND_FAILED) {
1468 octeon_free_soft_command(oct_dev, sc);
1469 return -EINVAL;
1470 }
1471
1472 wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
1473
1474 if (resp->status != 1) {
1475 octeon_free_soft_command(oct_dev, sc);
1476
1477 return -EINVAL;
1478 }
1479
1480 octeon_free_soft_command(oct_dev, sc);
1481
1482 return 0;
1483}
1484
1485static void liquidio_nic_seapi_ctl_callback(struct octeon_device *oct,
1486 u32 status,
1487 void *buf)
1488{
1489 struct liquidio_nic_seapi_ctl_context *ctx;
1490 struct octeon_soft_command *sc = buf;
1491
1492 ctx = sc->ctxptr;
1493
1494 oct = lio_get_device(ctx->octeon_id);
1495 if (status) {
1496 dev_err(&oct->pci_dev->dev, "%s: instruction failed. Status: %llx\n",
1497 __func__,
1498 CVM_CAST64(status));
1499 }
1500 ctx->status = status;
1501 complete(&ctx->complete);
1502}
1503
1504int liquidio_set_speed(struct lio *lio, int speed)
1505{
1506 struct liquidio_nic_seapi_ctl_context *ctx;
1507 struct octeon_device *oct = lio->oct_dev;
1508 struct oct_nic_seapi_resp *resp;
1509 struct octeon_soft_command *sc;
1510 union octnet_cmd *ncmd;
1511 u32 ctx_size;
1512 int retval;
1513 u32 var;
1514
1515 if (oct->speed_setting == speed)
1516 return 0;
1517
1518 if (!OCTEON_CN23XX_PF(oct)) {
1519 dev_err(&oct->pci_dev->dev, "%s: SET SPEED only for PF\n",
1520 __func__);
1521 return -EOPNOTSUPP;
1522 }
1523
1524 ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context);
1525 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1526 sizeof(struct oct_nic_seapi_resp),
1527 ctx_size);
1528 if (!sc)
1529 return -ENOMEM;
1530
1531 ncmd = sc->virtdptr;
1532 ctx = sc->ctxptr;
1533 resp = sc->virtrptr;
1534 memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1535
1536 ctx->octeon_id = lio_get_device_id(oct);
1537 ctx->status = 0;
1538 init_completion(&ctx->complete);
1539
1540 ncmd->u64 = 0;
1541 ncmd->s.cmd = SEAPI_CMD_SPEED_SET;
1542 ncmd->s.param1 = speed;
1543
1544 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1545
1546 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1547
1548 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1549 OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1550
1551 sc->callback = liquidio_nic_seapi_ctl_callback;
1552 sc->callback_arg = sc;
1553 sc->wait_time = 5000;
1554
1555 retval = octeon_send_soft_command(oct, sc);
1556 if (retval == IQ_SEND_FAILED) {
1557 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1558 retval = -EBUSY;
1559 } else {
1560 /* Wait for response or timeout */
1561 if (wait_for_completion_timeout(&ctx->complete,
1562 msecs_to_jiffies(10000)) == 0) {
1563 dev_err(&oct->pci_dev->dev, "%s: sc timeout\n",
1564 __func__);
1565 octeon_free_soft_command(oct, sc);
1566 return -EINTR;
1567 }
1568
1569 retval = resp->status;
1570
1571 if (retval) {
1572 dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n",
1573 __func__, retval);
1574 octeon_free_soft_command(oct, sc);
1575 return -EIO;
1576 }
1577
1578 var = be32_to_cpu((__force __be32)resp->speed);
1579 if (var != speed) {
1580 dev_err(&oct->pci_dev->dev,
1581 "%s: setting failed speed= %x, expect %x\n",
1582 __func__, var, speed);
1583 }
1584
1585 oct->speed_setting = var;
1586 }
1587
1588 octeon_free_soft_command(oct, sc);
1589
1590 return retval;
1591}
1592
1593int liquidio_get_speed(struct lio *lio)
1594{
1595 struct liquidio_nic_seapi_ctl_context *ctx;
1596 struct octeon_device *oct = lio->oct_dev;
1597 struct oct_nic_seapi_resp *resp;
1598 struct octeon_soft_command *sc;
1599 union octnet_cmd *ncmd;
1600 u32 ctx_size;
1601 int retval;
1602
1603 ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context);
1604 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1605 sizeof(struct oct_nic_seapi_resp),
1606 ctx_size);
1607 if (!sc)
1608 return -ENOMEM;
1609
1610 ncmd = sc->virtdptr;
1611 ctx = sc->ctxptr;
1612 resp = sc->virtrptr;
1613 memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1614
1615 ctx->octeon_id = lio_get_device_id(oct);
1616 ctx->status = 0;
1617 init_completion(&ctx->complete);
1618
1619 ncmd->u64 = 0;
1620 ncmd->s.cmd = SEAPI_CMD_SPEED_GET;
1621
1622 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1623
1624 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1625
1626 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1627 OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1628
1629 sc->callback = liquidio_nic_seapi_ctl_callback;
1630 sc->callback_arg = sc;
1631 sc->wait_time = 5000;
1632
1633 retval = octeon_send_soft_command(oct, sc);
1634 if (retval == IQ_SEND_FAILED) {
1635 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1636 oct->no_speed_setting = 1;
1637 oct->speed_setting = 25;
1638
1639 retval = -EBUSY;
1640 } else {
1641 if (wait_for_completion_timeout(&ctx->complete,
1642 msecs_to_jiffies(10000)) == 0) {
1643 dev_err(&oct->pci_dev->dev, "%s: sc timeout\n",
1644 __func__);
1645
1646 oct->speed_setting = 25;
1647 oct->no_speed_setting = 1;
1648
1649 octeon_free_soft_command(oct, sc);
1650
1651 return -EINTR;
1652 }
1653 retval = resp->status;
1654 if (retval) {
1655 dev_err(&oct->pci_dev->dev,
1656 "%s failed retval=%d\n", __func__, retval);
1657 oct->no_speed_setting = 1;
1658 oct->speed_setting = 25;
1659 octeon_free_soft_command(oct, sc);
1660 retval = -EIO;
1661 } else {
1662 u32 var;
1663
1664 var = be32_to_cpu((__force __be32)resp->speed);
1665 oct->speed_setting = var;
1666 if (var == 0xffff) {
1667 oct->no_speed_setting = 1;
1668 /* unable to access boot variables
1669 * get the default value based on the NIC type
1670 */
1671 oct->speed_setting = 25;
1672 }
1673 }
1674 }
1675
1676 octeon_free_soft_command(oct, sc);
1677
1678 return retval;
1679}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 9926a12dd805..06f7449c569d 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -32,7 +32,6 @@
32#include "cn23xx_vf_device.h" 32#include "cn23xx_vf_device.h"
33 33
34static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs); 34static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs);
35static int octnet_get_link_stats(struct net_device *netdev);
36 35
37struct oct_intrmod_context { 36struct oct_intrmod_context {
38 int octeon_id; 37 int octeon_id;
@@ -113,6 +112,9 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
113 "tx_tso_err", 112 "tx_tso_err",
114 "tx_vxlan", 113 "tx_vxlan",
115 114
115 "tx_mcast",
116 "tx_bcast",
117
116 "mac_tx_total_pkts", 118 "mac_tx_total_pkts",
117 "mac_tx_total_bytes", 119 "mac_tx_total_bytes",
118 "mac_tx_mcast_pkts", 120 "mac_tx_mcast_pkts",
@@ -120,7 +122,7 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
120 "mac_tx_ctl_packets", 122 "mac_tx_ctl_packets",
121 "mac_tx_total_collisions", 123 "mac_tx_total_collisions",
122 "mac_tx_one_collision", 124 "mac_tx_one_collision",
123 "mac_tx_multi_collison", 125 "mac_tx_multi_collision",
124 "mac_tx_max_collision_fail", 126 "mac_tx_max_collision_fail",
125 "mac_tx_max_deferal_fail", 127 "mac_tx_max_deferal_fail",
126 "mac_tx_fifo_err", 128 "mac_tx_fifo_err",
@@ -128,6 +130,8 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
128 130
129 "rx_total_rcvd", 131 "rx_total_rcvd",
130 "rx_total_fwd", 132 "rx_total_fwd",
133 "rx_mcast",
134 "rx_bcast",
131 "rx_jabber_err", 135 "rx_jabber_err",
132 "rx_l2_err", 136 "rx_l2_err",
133 "rx_frame_err", 137 "rx_frame_err",
@@ -172,6 +176,10 @@ static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = {
172 "tx_errors", 176 "tx_errors",
173 "rx_dropped", 177 "rx_dropped",
174 "tx_dropped", 178 "tx_dropped",
179 "rx_mcast",
180 "tx_mcast",
181 "rx_bcast",
182 "tx_bcast",
175 "link_state_changes", 183 "link_state_changes",
176}; 184};
177 185
@@ -222,46 +230,147 @@ static int lio_get_link_ksettings(struct net_device *netdev,
222 struct lio *lio = GET_LIO(netdev); 230 struct lio *lio = GET_LIO(netdev);
223 struct octeon_device *oct = lio->oct_dev; 231 struct octeon_device *oct = lio->oct_dev;
224 struct oct_link_info *linfo; 232 struct oct_link_info *linfo;
225 u32 supported = 0, advertising = 0;
226 233
227 linfo = &lio->linfo; 234 linfo = &lio->linfo;
228 235
236 ethtool_link_ksettings_zero_link_mode(ecmd, supported);
237 ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
238
229 switch (linfo->link.s.phy_type) { 239 switch (linfo->link.s.phy_type) {
230 case LIO_PHY_PORT_TP: 240 case LIO_PHY_PORT_TP:
231 ecmd->base.port = PORT_TP; 241 ecmd->base.port = PORT_TP;
232 supported = (SUPPORTED_10000baseT_Full |
233 SUPPORTED_TP | SUPPORTED_Pause);
234 advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
235 ecmd->base.autoneg = AUTONEG_DISABLE; 242 ecmd->base.autoneg = AUTONEG_DISABLE;
243 ethtool_link_ksettings_add_link_mode(ecmd, supported, TP);
244 ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause);
245 ethtool_link_ksettings_add_link_mode(ecmd, supported,
246 10000baseT_Full);
247
248 ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause);
249 ethtool_link_ksettings_add_link_mode(ecmd, advertising,
250 10000baseT_Full);
251
236 break; 252 break;
237 253
238 case LIO_PHY_PORT_FIBRE: 254 case LIO_PHY_PORT_FIBRE:
239 ecmd->base.port = PORT_FIBRE; 255 if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
240 256 linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
241 if (linfo->link.s.speed == SPEED_10000) { 257 linfo->link.s.if_mode == INTERFACE_MODE_XLAUI ||
242 supported = SUPPORTED_10000baseT_Full; 258 linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
243 advertising = ADVERTISED_10000baseT_Full; 259 dev_dbg(&oct->pci_dev->dev, "ecmd->base.transceiver is XCVR_EXTERNAL\n");
260 } else {
261 dev_err(&oct->pci_dev->dev, "Unknown link interface mode: %d\n",
262 linfo->link.s.if_mode);
244 } 263 }
245 264
246 supported |= SUPPORTED_FIBRE | SUPPORTED_Pause; 265 ecmd->base.port = PORT_FIBRE;
247 advertising |= ADVERTISED_Pause;
248 ecmd->base.autoneg = AUTONEG_DISABLE; 266 ecmd->base.autoneg = AUTONEG_DISABLE;
267 ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
268
269 ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause);
270 ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause);
271 if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
272 oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
273 if (OCTEON_CN23XX_PF(oct)) {
274 ethtool_link_ksettings_add_link_mode
275 (ecmd, supported, 25000baseSR_Full);
276 ethtool_link_ksettings_add_link_mode
277 (ecmd, supported, 25000baseKR_Full);
278 ethtool_link_ksettings_add_link_mode
279 (ecmd, supported, 25000baseCR_Full);
280
281 if (oct->no_speed_setting == 0) {
282 ethtool_link_ksettings_add_link_mode
283 (ecmd, supported,
284 10000baseSR_Full);
285 ethtool_link_ksettings_add_link_mode
286 (ecmd, supported,
287 10000baseKR_Full);
288 ethtool_link_ksettings_add_link_mode
289 (ecmd, supported,
290 10000baseCR_Full);
291 }
292
293 if (oct->no_speed_setting == 0)
294 liquidio_get_speed(lio);
295 else
296 oct->speed_setting = 25;
297
298 if (oct->speed_setting == 10) {
299 ethtool_link_ksettings_add_link_mode
300 (ecmd, advertising,
301 10000baseSR_Full);
302 ethtool_link_ksettings_add_link_mode
303 (ecmd, advertising,
304 10000baseKR_Full);
305 ethtool_link_ksettings_add_link_mode
306 (ecmd, advertising,
307 10000baseCR_Full);
308 }
309 if (oct->speed_setting == 25) {
310 ethtool_link_ksettings_add_link_mode
311 (ecmd, advertising,
312 25000baseSR_Full);
313 ethtool_link_ksettings_add_link_mode
314 (ecmd, advertising,
315 25000baseKR_Full);
316 ethtool_link_ksettings_add_link_mode
317 (ecmd, advertising,
318 25000baseCR_Full);
319 }
320 } else { /* VF */
321 if (linfo->link.s.speed == 10000) {
322 ethtool_link_ksettings_add_link_mode
323 (ecmd, supported,
324 10000baseSR_Full);
325 ethtool_link_ksettings_add_link_mode
326 (ecmd, supported,
327 10000baseKR_Full);
328 ethtool_link_ksettings_add_link_mode
329 (ecmd, supported,
330 10000baseCR_Full);
331
332 ethtool_link_ksettings_add_link_mode
333 (ecmd, advertising,
334 10000baseSR_Full);
335 ethtool_link_ksettings_add_link_mode
336 (ecmd, advertising,
337 10000baseKR_Full);
338 ethtool_link_ksettings_add_link_mode
339 (ecmd, advertising,
340 10000baseCR_Full);
341 }
342
343 if (linfo->link.s.speed == 25000) {
344 ethtool_link_ksettings_add_link_mode
345 (ecmd, supported,
346 25000baseSR_Full);
347 ethtool_link_ksettings_add_link_mode
348 (ecmd, supported,
349 25000baseKR_Full);
350 ethtool_link_ksettings_add_link_mode
351 (ecmd, supported,
352 25000baseCR_Full);
353
354 ethtool_link_ksettings_add_link_mode
355 (ecmd, advertising,
356 25000baseSR_Full);
357 ethtool_link_ksettings_add_link_mode
358 (ecmd, advertising,
359 25000baseKR_Full);
360 ethtool_link_ksettings_add_link_mode
361 (ecmd, advertising,
362 25000baseCR_Full);
363 }
364 }
365 } else {
366 ethtool_link_ksettings_add_link_mode(ecmd, supported,
367 10000baseT_Full);
368 ethtool_link_ksettings_add_link_mode(ecmd, advertising,
369 10000baseT_Full);
370 }
249 break; 371 break;
250 } 372 }
251 373
252 if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
253 linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
254 linfo->link.s.if_mode == INTERFACE_MODE_XLAUI ||
255 linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
256 ethtool_convert_legacy_u32_to_link_mode(
257 ecmd->link_modes.supported, supported);
258 ethtool_convert_legacy_u32_to_link_mode(
259 ecmd->link_modes.advertising, advertising);
260 } else {
261 dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n",
262 linfo->link.s.if_mode);
263 }
264
265 if (linfo->link.s.link_up) { 374 if (linfo->link.s.link_up) {
266 ecmd->base.speed = linfo->link.s.speed; 375 ecmd->base.speed = linfo->link.s.speed;
267 ecmd->base.duplex = linfo->link.s.duplex; 376 ecmd->base.duplex = linfo->link.s.duplex;
@@ -273,6 +382,51 @@ static int lio_get_link_ksettings(struct net_device *netdev,
273 return 0; 382 return 0;
274} 383}
275 384
385static int lio_set_link_ksettings(struct net_device *netdev,
386 const struct ethtool_link_ksettings *ecmd)
387{
388 const int speed = ecmd->base.speed;
389 struct lio *lio = GET_LIO(netdev);
390 struct oct_link_info *linfo;
391 struct octeon_device *oct;
392 u32 is25G = 0;
393
394 oct = lio->oct_dev;
395
396 linfo = &lio->linfo;
397
398 if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
399 oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
400 is25G = 1;
401 } else {
402 return -EOPNOTSUPP;
403 }
404
405 if (oct->no_speed_setting) {
406 dev_err(&oct->pci_dev->dev, "%s: Changing speed is not supported\n",
407 __func__);
408 return -EOPNOTSUPP;
409 }
410
411 if ((ecmd->base.duplex != DUPLEX_UNKNOWN &&
412 ecmd->base.duplex != linfo->link.s.duplex) ||
413 ecmd->base.autoneg != AUTONEG_DISABLE ||
414 (ecmd->base.speed != 10000 && ecmd->base.speed != 25000 &&
415 ecmd->base.speed != SPEED_UNKNOWN))
416 return -EOPNOTSUPP;
417
418 if ((oct->speed_boot == speed / 1000) &&
419 oct->speed_boot == oct->speed_setting)
420 return 0;
421
422 liquidio_set_speed(lio, speed / 1000);
423
424 dev_dbg(&oct->pci_dev->dev, "Port speed is set to %dG\n",
425 oct->speed_setting);
426
427 return 0;
428}
429
276static void 430static void
277lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 431lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
278{ 432{
@@ -353,7 +507,14 @@ lio_ethtool_get_channels(struct net_device *dev,
353 rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx); 507 rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
354 tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx); 508 tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
355 } else if (OCTEON_CN23XX_PF(oct)) { 509 } else if (OCTEON_CN23XX_PF(oct)) {
356 max_combined = lio->linfo.num_txpciq; 510 if (oct->sriov_info.sriov_enabled) {
511 max_combined = lio->linfo.num_txpciq;
512 } else {
513 struct octeon_config *conf23_pf =
514 CHIP_CONF(oct, cn23xx_pf);
515
516 max_combined = CFG_GET_IQ_MAX_Q(conf23_pf);
517 }
357 combined_count = oct->num_iqs; 518 combined_count = oct->num_iqs;
358 } else if (OCTEON_CN23XX_VF(oct)) { 519 } else if (OCTEON_CN23XX_VF(oct)) {
359 u64 reg_val = 0ULL; 520 u64 reg_val = 0ULL;
@@ -417,9 +578,15 @@ lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs)
417 578
418 kfree(oct->irq_name_storage); 579 kfree(oct->irq_name_storage);
419 oct->irq_name_storage = NULL; 580 oct->irq_name_storage = NULL;
581
582 if (octeon_allocate_ioq_vector(oct, num_ioqs)) {
583 dev_err(&oct->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
584 return -1;
585 }
586
420 if (octeon_setup_interrupt(oct, num_ioqs)) { 587 if (octeon_setup_interrupt(oct, num_ioqs)) {
421 dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n"); 588 dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n");
422 return 1; 589 return -1;
423 } 590 }
424 591
425 /* Enable Octeon device interrupts */ 592 /* Enable Octeon device interrupts */
@@ -449,7 +616,16 @@ lio_ethtool_set_channels(struct net_device *dev,
449 combined_count = channel->combined_count; 616 combined_count = channel->combined_count;
450 617
451 if (OCTEON_CN23XX_PF(oct)) { 618 if (OCTEON_CN23XX_PF(oct)) {
452 max_combined = channel->max_combined; 619 if (oct->sriov_info.sriov_enabled) {
620 max_combined = lio->linfo.num_txpciq;
621 } else {
622 struct octeon_config *conf23_pf =
623 CHIP_CONF(oct,
624 cn23xx_pf);
625
626 max_combined =
627 CFG_GET_IQ_MAX_Q(conf23_pf);
628 }
453 } else if (OCTEON_CN23XX_VF(oct)) { 629 } else if (OCTEON_CN23XX_VF(oct)) {
454 u64 reg_val = 0ULL; 630 u64 reg_val = 0ULL;
455 u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0); 631 u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
@@ -477,7 +653,6 @@ lio_ethtool_set_channels(struct net_device *dev,
477 if (lio_reset_queues(dev, combined_count)) 653 if (lio_reset_queues(dev, combined_count))
478 return -EINVAL; 654 return -EINVAL;
479 655
480 lio_irq_reallocate_irqs(oct, combined_count);
481 if (stopped) 656 if (stopped)
482 dev->netdev_ops->ndo_open(dev); 657 dev->netdev_ops->ndo_open(dev);
483 658
@@ -816,12 +991,120 @@ lio_ethtool_get_ringparam(struct net_device *netdev,
816 ering->rx_jumbo_max_pending = 0; 991 ering->rx_jumbo_max_pending = 0;
817} 992}
818 993
994static int lio_23xx_reconfigure_queue_count(struct lio *lio)
995{
996 struct octeon_device *oct = lio->oct_dev;
997 struct liquidio_if_cfg_context *ctx;
998 u32 resp_size, ctx_size, data_size;
999 struct liquidio_if_cfg_resp *resp;
1000 struct octeon_soft_command *sc;
1001 union oct_nic_if_cfg if_cfg;
1002 struct lio_version *vdata;
1003 u32 ifidx_or_pfnum;
1004 int retval;
1005 int j;
1006
1007 resp_size = sizeof(struct liquidio_if_cfg_resp);
1008 ctx_size = sizeof(struct liquidio_if_cfg_context);
1009 data_size = sizeof(struct lio_version);
1010 sc = (struct octeon_soft_command *)
1011 octeon_alloc_soft_command(oct, data_size,
1012 resp_size, ctx_size);
1013 if (!sc) {
1014 dev_err(&oct->pci_dev->dev, "%s: Failed to allocate soft command\n",
1015 __func__);
1016 return -1;
1017 }
1018
1019 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1020 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
1021 vdata = (struct lio_version *)sc->virtdptr;
1022
1023 vdata->major = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
1024 vdata->minor = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
1025 vdata->micro = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
1026
1027 ifidx_or_pfnum = oct->pf_num;
1028 WRITE_ONCE(ctx->cond, 0);
1029 ctx->octeon_id = lio_get_device_id(oct);
1030 init_waitqueue_head(&ctx->wc);
1031
1032 if_cfg.u64 = 0;
1033 if_cfg.s.num_iqueues = oct->sriov_info.num_pf_rings;
1034 if_cfg.s.num_oqueues = oct->sriov_info.num_pf_rings;
1035 if_cfg.s.base_queue = oct->sriov_info.pf_srn;
1036 if_cfg.s.gmx_port_id = oct->pf_num;
1037
1038 sc->iq_no = 0;
1039 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1040 OPCODE_NIC_QCOUNT_UPDATE, 0,
1041 if_cfg.u64, 0);
1042 sc->callback = lio_if_cfg_callback;
1043 sc->callback_arg = sc;
1044 sc->wait_time = LIO_IFCFG_WAIT_TIME;
1045
1046 retval = octeon_send_soft_command(oct, sc);
1047 if (retval == IQ_SEND_FAILED) {
1048 dev_err(&oct->pci_dev->dev,
1049 "iq/oq config failed status: %x\n",
1050 retval);
1051 goto qcount_update_fail;
1052 }
1053
1054 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
1055 dev_err(&oct->pci_dev->dev, "Wait interrupted\n");
1056 return -1;
1057 }
1058
1059 retval = resp->status;
1060 if (retval) {
1061 dev_err(&oct->pci_dev->dev, "iq/oq config failed\n");
1062 goto qcount_update_fail;
1063 }
1064
1065 octeon_swap_8B_data((u64 *)(&resp->cfg_info),
1066 (sizeof(struct liquidio_if_cfg_info)) >> 3);
1067
1068 lio->ifidx = ifidx_or_pfnum;
1069 lio->linfo.num_rxpciq = hweight64(resp->cfg_info.iqmask);
1070 lio->linfo.num_txpciq = hweight64(resp->cfg_info.iqmask);
1071 for (j = 0; j < lio->linfo.num_rxpciq; j++) {
1072 lio->linfo.rxpciq[j].u64 =
1073 resp->cfg_info.linfo.rxpciq[j].u64;
1074 }
1075
1076 for (j = 0; j < lio->linfo.num_txpciq; j++) {
1077 lio->linfo.txpciq[j].u64 =
1078 resp->cfg_info.linfo.txpciq[j].u64;
1079 }
1080
1081 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
1082 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
1083 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
1084 lio->txq = lio->linfo.txpciq[0].s.q_no;
1085 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
1086
1087 octeon_free_soft_command(oct, sc);
1088 dev_info(&oct->pci_dev->dev, "Queue count updated to %d\n",
1089 lio->linfo.num_rxpciq);
1090
1091 return 0;
1092
1093qcount_update_fail:
1094 octeon_free_soft_command(oct, sc);
1095
1096 return -1;
1097}
1098
819static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) 1099static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
820{ 1100{
821 struct lio *lio = GET_LIO(netdev); 1101 struct lio *lio = GET_LIO(netdev);
822 struct octeon_device *oct = lio->oct_dev; 1102 struct octeon_device *oct = lio->oct_dev;
1103 int i, queue_count_update = 0;
823 struct napi_struct *napi, *n; 1104 struct napi_struct *napi, *n;
824 int i, update = 0; 1105 int ret;
1106
1107 schedule_timeout_uninterruptible(msecs_to_jiffies(100));
825 1108
826 if (wait_for_pending_requests(oct)) 1109 if (wait_for_pending_requests(oct))
827 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 1110 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
@@ -830,7 +1113,7 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
830 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 1113 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
831 1114
832 if (octeon_set_io_queues_off(oct)) { 1115 if (octeon_set_io_queues_off(oct)) {
833 dev_err(&oct->pci_dev->dev, "setting io queues off failed\n"); 1116 dev_err(&oct->pci_dev->dev, "Setting io queues off failed\n");
834 return -1; 1117 return -1;
835 } 1118 }
836 1119
@@ -843,9 +1126,40 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
843 netif_napi_del(napi); 1126 netif_napi_del(napi);
844 1127
845 if (num_qs != oct->num_iqs) { 1128 if (num_qs != oct->num_iqs) {
846 netif_set_real_num_rx_queues(netdev, num_qs); 1129 ret = netif_set_real_num_rx_queues(netdev, num_qs);
847 netif_set_real_num_tx_queues(netdev, num_qs); 1130 if (ret) {
848 update = 1; 1131 dev_err(&oct->pci_dev->dev,
1132 "Setting real number rx failed\n");
1133 return ret;
1134 }
1135
1136 ret = netif_set_real_num_tx_queues(netdev, num_qs);
1137 if (ret) {
1138 dev_err(&oct->pci_dev->dev,
1139 "Setting real number tx failed\n");
1140 return ret;
1141 }
1142
1143 /* The value of queue_count_update decides whether it is the
1144 * queue count or the descriptor count that is being
1145 * re-configured.
1146 */
1147 queue_count_update = 1;
1148 }
1149
1150 /* Re-configuration of queues can happen in two scenarios, SRIOV enabled
1151 * and SRIOV disabled. Few things like recreating queue zero, resetting
1152 * glists and IRQs are required for both. For the latter, some more
1153 * steps like updating sriov_info for the octeon device need to be done.
1154 */
1155 if (queue_count_update) {
1156 lio_delete_glists(lio);
1157
1158 /* Delete mbox for PF which is SRIOV disabled because sriov_info
1159 * will be now changed.
1160 */
1161 if ((OCTEON_CN23XX_PF(oct)) && !oct->sriov_info.sriov_enabled)
1162 oct->fn_list.free_mbox(oct);
849 } 1163 }
850 1164
851 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 1165 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
@@ -860,24 +1174,91 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
860 octeon_delete_instr_queue(oct, i); 1174 octeon_delete_instr_queue(oct, i);
861 } 1175 }
862 1176
1177 if (queue_count_update) {
1178 /* For PF re-configure sriov related information */
1179 if ((OCTEON_CN23XX_PF(oct)) &&
1180 !oct->sriov_info.sriov_enabled) {
1181 oct->sriov_info.num_pf_rings = num_qs;
1182 if (cn23xx_sriov_config(oct)) {
1183 dev_err(&oct->pci_dev->dev,
1184 "Queue reset aborted: SRIOV config failed\n");
1185 return -1;
1186 }
1187
1188 num_qs = oct->sriov_info.num_pf_rings;
1189 }
1190 }
1191
863 if (oct->fn_list.setup_device_regs(oct)) { 1192 if (oct->fn_list.setup_device_regs(oct)) {
864 dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n"); 1193 dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n");
865 return -1; 1194 return -1;
866 } 1195 }
867 1196
868 if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) { 1197 /* The following are needed in case of queue count re-configuration and
869 dev_err(&oct->pci_dev->dev, "IO queues initialization failed\n"); 1198 * not for descriptor count re-configuration.
870 return -1; 1199 */
1200 if (queue_count_update) {
1201 if (octeon_setup_instr_queues(oct))
1202 return -1;
1203
1204 if (octeon_setup_output_queues(oct))
1205 return -1;
1206
1207 /* Recreating mbox for PF that is SRIOV disabled */
1208 if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) {
1209 if (oct->fn_list.setup_mbox(oct)) {
1210 dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
1211 return -1;
1212 }
1213 }
1214
1215 /* Deleting and recreating IRQs whether the interface is SRIOV
1216 * enabled or disabled.
1217 */
1218 if (lio_irq_reallocate_irqs(oct, num_qs)) {
1219 dev_err(&oct->pci_dev->dev, "IRQs could not be allocated\n");
1220 return -1;
1221 }
1222
1223 /* Enable the input and output queues for this Octeon device */
1224 if (oct->fn_list.enable_io_queues(oct)) {
1225 dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues\n");
1226 return -1;
1227 }
1228
1229 for (i = 0; i < oct->num_oqs; i++)
1230 writel(oct->droq[i]->max_count,
1231 oct->droq[i]->pkts_credit_reg);
1232
1233 /* Informing firmware about the new queue count. It is required
1234 * for firmware to allocate more number of queues than those at
1235 * load time.
1236 */
1237 if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) {
1238 if (lio_23xx_reconfigure_queue_count(lio))
1239 return -1;
1240 }
871 } 1241 }
872 1242
873 /* Enable the input and output queues for this Octeon device */ 1243 /* Once firmware is aware of the new value, queues can be recreated */
874 if (oct->fn_list.enable_io_queues(oct)) { 1244 if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) {
875 dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues"); 1245 dev_err(&oct->pci_dev->dev, "I/O queues creation failed\n");
876 return -1; 1246 return -1;
877 } 1247 }
878 1248
879 if (update && lio_send_queue_count_update(netdev, num_qs)) 1249 if (queue_count_update) {
880 return -1; 1250 if (lio_setup_glists(oct, lio, num_qs)) {
1251 dev_err(&oct->pci_dev->dev, "Gather list allocation failed\n");
1252 return -1;
1253 }
1254
1255 /* Send firmware the information about new number of queues
1256 * if the interface is a VF or a PF that is SRIOV enabled.
1257 */
1258 if (oct->sriov_info.sriov_enabled || OCTEON_CN23XX_VF(oct))
1259 if (lio_send_queue_count_update(netdev, num_qs))
1260 return -1;
1261 }
881 1262
882 return 0; 1263 return 0;
883} 1264}
@@ -922,7 +1303,7 @@ static int lio_ethtool_set_ringparam(struct net_device *netdev,
922 CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, 1303 CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
923 rx_count); 1304 rx_count);
924 1305
925 if (lio_reset_queues(netdev, lio->linfo.num_txpciq)) 1306 if (lio_reset_queues(netdev, oct->num_iqs))
926 goto err_lio_reset_queues; 1307 goto err_lio_reset_queues;
927 1308
928 if (stopped) 1309 if (stopped)
@@ -1057,50 +1438,48 @@ lio_get_ethtool_stats(struct net_device *netdev,
1057{ 1438{
1058 struct lio *lio = GET_LIO(netdev); 1439 struct lio *lio = GET_LIO(netdev);
1059 struct octeon_device *oct_dev = lio->oct_dev; 1440 struct octeon_device *oct_dev = lio->oct_dev;
1060 struct net_device_stats *netstats = &netdev->stats; 1441 struct rtnl_link_stats64 lstats;
1061 int i = 0, j; 1442 int i = 0, j;
1062 1443
1063 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 1444 if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1064 return; 1445 return;
1065 1446
1066 netdev->netdev_ops->ndo_get_stats(netdev); 1447 netdev->netdev_ops->ndo_get_stats64(netdev, &lstats);
1067 octnet_get_link_stats(netdev);
1068
1069 /*sum of oct->droq[oq_no]->stats->rx_pkts_received */ 1448 /*sum of oct->droq[oq_no]->stats->rx_pkts_received */
1070 data[i++] = CVM_CAST64(netstats->rx_packets); 1449 data[i++] = lstats.rx_packets;
1071 /*sum of oct->instr_queue[iq_no]->stats.tx_done */ 1450 /*sum of oct->instr_queue[iq_no]->stats.tx_done */
1072 data[i++] = CVM_CAST64(netstats->tx_packets); 1451 data[i++] = lstats.tx_packets;
1073 /*sum of oct->droq[oq_no]->stats->rx_bytes_received */ 1452 /*sum of oct->droq[oq_no]->stats->rx_bytes_received */
1074 data[i++] = CVM_CAST64(netstats->rx_bytes); 1453 data[i++] = lstats.rx_bytes;
1075 /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ 1454 /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
1076 data[i++] = CVM_CAST64(netstats->tx_bytes); 1455 data[i++] = lstats.tx_bytes;
1077 data[i++] = CVM_CAST64(netstats->rx_errors + 1456 data[i++] = lstats.rx_errors +
1078 oct_dev->link_stats.fromwire.fcs_err + 1457 oct_dev->link_stats.fromwire.fcs_err +
1079 oct_dev->link_stats.fromwire.jabber_err + 1458 oct_dev->link_stats.fromwire.jabber_err +
1080 oct_dev->link_stats.fromwire.l2_err + 1459 oct_dev->link_stats.fromwire.l2_err +
1081 oct_dev->link_stats.fromwire.frame_err); 1460 oct_dev->link_stats.fromwire.frame_err;
1082 data[i++] = CVM_CAST64(netstats->tx_errors); 1461 data[i++] = lstats.tx_errors;
1083 /*sum of oct->droq[oq_no]->stats->rx_dropped + 1462 /*sum of oct->droq[oq_no]->stats->rx_dropped +
1084 *oct->droq[oq_no]->stats->dropped_nodispatch + 1463 *oct->droq[oq_no]->stats->dropped_nodispatch +
1085 *oct->droq[oq_no]->stats->dropped_toomany + 1464 *oct->droq[oq_no]->stats->dropped_toomany +
1086 *oct->droq[oq_no]->stats->dropped_nomem 1465 *oct->droq[oq_no]->stats->dropped_nomem
1087 */ 1466 */
1088 data[i++] = CVM_CAST64(netstats->rx_dropped + 1467 data[i++] = lstats.rx_dropped +
1089 oct_dev->link_stats.fromwire.fifo_err + 1468 oct_dev->link_stats.fromwire.fifo_err +
1090 oct_dev->link_stats.fromwire.dmac_drop + 1469 oct_dev->link_stats.fromwire.dmac_drop +
1091 oct_dev->link_stats.fromwire.red_drops + 1470 oct_dev->link_stats.fromwire.red_drops +
1092 oct_dev->link_stats.fromwire.fw_err_pko + 1471 oct_dev->link_stats.fromwire.fw_err_pko +
1093 oct_dev->link_stats.fromwire.fw_err_link + 1472 oct_dev->link_stats.fromwire.fw_err_link +
1094 oct_dev->link_stats.fromwire.fw_err_drop); 1473 oct_dev->link_stats.fromwire.fw_err_drop;
1095 /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */ 1474 /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
1096 data[i++] = CVM_CAST64(netstats->tx_dropped + 1475 data[i++] = lstats.tx_dropped +
1097 oct_dev->link_stats.fromhost.max_collision_fail + 1476 oct_dev->link_stats.fromhost.max_collision_fail +
1098 oct_dev->link_stats.fromhost.max_deferral_fail + 1477 oct_dev->link_stats.fromhost.max_deferral_fail +
1099 oct_dev->link_stats.fromhost.total_collisions + 1478 oct_dev->link_stats.fromhost.total_collisions +
1100 oct_dev->link_stats.fromhost.fw_err_pko + 1479 oct_dev->link_stats.fromhost.fw_err_pko +
1101 oct_dev->link_stats.fromhost.fw_err_link + 1480 oct_dev->link_stats.fromhost.fw_err_link +
1102 oct_dev->link_stats.fromhost.fw_err_drop + 1481 oct_dev->link_stats.fromhost.fw_err_drop +
1103 oct_dev->link_stats.fromhost.fw_err_pki); 1482 oct_dev->link_stats.fromhost.fw_err_pki;
1104 1483
1105 /* firmware tx stats */ 1484 /* firmware tx stats */
1106 /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx]. 1485 /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
@@ -1135,6 +1514,10 @@ lio_get_ethtool_stats(struct net_device *netdev,
1135 */ 1514 */
1136 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan); 1515 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
1137 1516
1517 /* Multicast packets sent by this port */
1518 data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent;
1519 data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent;
1520
1138 /* mac tx statistics */ 1521 /* mac tx statistics */
1139 /*CVMX_BGXX_CMRX_TX_STAT5 */ 1522 /*CVMX_BGXX_CMRX_TX_STAT5 */
1140 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent); 1523 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
@@ -1171,6 +1554,9 @@ lio_get_ethtool_stats(struct net_device *netdev,
1171 *fw_total_fwd 1554 *fw_total_fwd
1172 */ 1555 */
1173 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd); 1556 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
1557 /* Multicast packets received on this port */
1558 data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast;
1559 data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast;
1174 /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */ 1560 /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
1175 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err); 1561 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
1176 /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */ 1562 /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
@@ -1339,7 +1725,7 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev,
1339 __attribute__((unused)), 1725 __attribute__((unused)),
1340 u64 *data) 1726 u64 *data)
1341{ 1727{
1342 struct net_device_stats *netstats = &netdev->stats; 1728 struct rtnl_link_stats64 lstats;
1343 struct lio *lio = GET_LIO(netdev); 1729 struct lio *lio = GET_LIO(netdev);
1344 struct octeon_device *oct_dev = lio->oct_dev; 1730 struct octeon_device *oct_dev = lio->oct_dev;
1345 int i = 0, j, vj; 1731 int i = 0, j, vj;
@@ -1347,25 +1733,31 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev,
1347 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 1733 if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1348 return; 1734 return;
1349 1735
1350 netdev->netdev_ops->ndo_get_stats(netdev); 1736 netdev->netdev_ops->ndo_get_stats64(netdev, &lstats);
1351 /* sum of oct->droq[oq_no]->stats->rx_pkts_received */ 1737 /* sum of oct->droq[oq_no]->stats->rx_pkts_received */
1352 data[i++] = CVM_CAST64(netstats->rx_packets); 1738 data[i++] = lstats.rx_packets;
1353 /* sum of oct->instr_queue[iq_no]->stats.tx_done */ 1739 /* sum of oct->instr_queue[iq_no]->stats.tx_done */
1354 data[i++] = CVM_CAST64(netstats->tx_packets); 1740 data[i++] = lstats.tx_packets;
1355 /* sum of oct->droq[oq_no]->stats->rx_bytes_received */ 1741 /* sum of oct->droq[oq_no]->stats->rx_bytes_received */
1356 data[i++] = CVM_CAST64(netstats->rx_bytes); 1742 data[i++] = lstats.rx_bytes;
1357 /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ 1743 /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
1358 data[i++] = CVM_CAST64(netstats->tx_bytes); 1744 data[i++] = lstats.tx_bytes;
1359 data[i++] = CVM_CAST64(netstats->rx_errors); 1745 data[i++] = lstats.rx_errors;
1360 data[i++] = CVM_CAST64(netstats->tx_errors); 1746 data[i++] = lstats.tx_errors;
1361 /* sum of oct->droq[oq_no]->stats->rx_dropped + 1747 /* sum of oct->droq[oq_no]->stats->rx_dropped +
1362 * oct->droq[oq_no]->stats->dropped_nodispatch + 1748 * oct->droq[oq_no]->stats->dropped_nodispatch +
1363 * oct->droq[oq_no]->stats->dropped_toomany + 1749 * oct->droq[oq_no]->stats->dropped_toomany +
1364 * oct->droq[oq_no]->stats->dropped_nomem 1750 * oct->droq[oq_no]->stats->dropped_nomem
1365 */ 1751 */
1366 data[i++] = CVM_CAST64(netstats->rx_dropped); 1752 data[i++] = lstats.rx_dropped;
1367 /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */ 1753 /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */
1368 data[i++] = CVM_CAST64(netstats->tx_dropped); 1754 data[i++] = lstats.tx_dropped;
1755
1756 data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast;
1757 data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent;
1758 data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast;
1759 data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent;
1760
1369 /* lio->link_changes */ 1761 /* lio->link_changes */
1370 data[i++] = CVM_CAST64(lio->link_changes); 1762 data[i++] = CVM_CAST64(lio->link_changes);
1371 1763
@@ -1776,162 +2168,6 @@ static int octnet_set_intrmod_cfg(struct lio *lio,
1776 return -EINTR; 2168 return -EINTR;
1777} 2169}
1778 2170
1779static void
1780octnet_nic_stats_callback(struct octeon_device *oct_dev,
1781 u32 status, void *ptr)
1782{
1783 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1784 struct oct_nic_stats_resp *resp =
1785 (struct oct_nic_stats_resp *)sc->virtrptr;
1786 struct oct_nic_stats_ctrl *ctrl =
1787 (struct oct_nic_stats_ctrl *)sc->ctxptr;
1788 struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1789 struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1790
1791 struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1792 struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1793
1794 if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) {
1795 octeon_swap_8B_data((u64 *)&resp->stats,
1796 (sizeof(struct oct_link_stats)) >> 3);
1797
1798 /* RX link-level stats */
1799 rstats->total_rcvd = rsp_rstats->total_rcvd;
1800 rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1801 rstats->total_bcst = rsp_rstats->total_bcst;
1802 rstats->total_mcst = rsp_rstats->total_mcst;
1803 rstats->runts = rsp_rstats->runts;
1804 rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
1805 /* Accounts for over/under-run of buffers */
1806 rstats->fifo_err = rsp_rstats->fifo_err;
1807 rstats->dmac_drop = rsp_rstats->dmac_drop;
1808 rstats->fcs_err = rsp_rstats->fcs_err;
1809 rstats->jabber_err = rsp_rstats->jabber_err;
1810 rstats->l2_err = rsp_rstats->l2_err;
1811 rstats->frame_err = rsp_rstats->frame_err;
1812 rstats->red_drops = rsp_rstats->red_drops;
1813
1814 /* RX firmware stats */
1815 rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1816 rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1817 rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1818 rstats->fw_err_link = rsp_rstats->fw_err_link;
1819 rstats->fw_err_drop = rsp_rstats->fw_err_drop;
1820 rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
1821 rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
1822
1823 /* Number of packets that are LROed */
1824 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1825 /* Number of octets that are LROed */
1826 rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1827 /* Number of LRO packets formed */
1828 rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1829 /* Number of times lRO of packet aborted */
1830 rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1831 rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1832 rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1833 rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1834 rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1835 /* intrmod: packet forward rate */
1836 rstats->fwd_rate = rsp_rstats->fwd_rate;
1837
1838 /* TX link-level stats */
1839 tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1840 tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1841 tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1842 tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1843 tstats->ctl_sent = rsp_tstats->ctl_sent;
1844 /* Packets sent after one collision*/
1845 tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1846 /* Packets sent after multiple collision*/
1847 tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1848 /* Packets not sent due to max collisions */
1849 tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1850 /* Packets not sent due to max deferrals */
1851 tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1852 /* Accounts for over/under-run of buffers */
1853 tstats->fifo_err = rsp_tstats->fifo_err;
1854 tstats->runts = rsp_tstats->runts;
1855 /* Total number of collisions detected */
1856 tstats->total_collisions = rsp_tstats->total_collisions;
1857
1858 /* firmware stats */
1859 tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1860 tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1861 tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1862 tstats->fw_err_pki = rsp_tstats->fw_err_pki;
1863 tstats->fw_err_link = rsp_tstats->fw_err_link;
1864 tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1865 tstats->fw_tso = rsp_tstats->fw_tso;
1866 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1867 tstats->fw_err_tso = rsp_tstats->fw_err_tso;
1868 tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
1869
1870 resp->status = 1;
1871 } else {
1872 resp->status = -1;
1873 }
1874 complete(&ctrl->complete);
1875}
1876
1877/* Configure interrupt moderation parameters */
1878static int octnet_get_link_stats(struct net_device *netdev)
1879{
1880 struct lio *lio = GET_LIO(netdev);
1881 struct octeon_device *oct_dev = lio->oct_dev;
1882
1883 struct octeon_soft_command *sc;
1884 struct oct_nic_stats_ctrl *ctrl;
1885 struct oct_nic_stats_resp *resp;
1886
1887 int retval;
1888
1889 /* Alloc soft command */
1890 sc = (struct octeon_soft_command *)
1891 octeon_alloc_soft_command(oct_dev,
1892 0,
1893 sizeof(struct oct_nic_stats_resp),
1894 sizeof(struct octnic_ctrl_pkt));
1895
1896 if (!sc)
1897 return -ENOMEM;
1898
1899 resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1900 memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1901
1902 ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
1903 memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
1904 ctrl->netdev = netdev;
1905 init_completion(&ctrl->complete);
1906
1907 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1908
1909 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1910 OPCODE_NIC_PORT_STATS, 0, 0, 0);
1911
1912 sc->callback = octnet_nic_stats_callback;
1913 sc->callback_arg = sc;
1914 sc->wait_time = 500; /*in milli seconds*/
1915
1916 retval = octeon_send_soft_command(oct_dev, sc);
1917 if (retval == IQ_SEND_FAILED) {
1918 octeon_free_soft_command(oct_dev, sc);
1919 return -EINVAL;
1920 }
1921
1922 wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
1923
1924 if (resp->status != 1) {
1925 octeon_free_soft_command(oct_dev, sc);
1926
1927 return -EINVAL;
1928 }
1929
1930 octeon_free_soft_command(oct_dev, sc);
1931
1932 return 0;
1933}
1934
1935static int lio_get_intr_coalesce(struct net_device *netdev, 2171static int lio_get_intr_coalesce(struct net_device *netdev,
1936 struct ethtool_coalesce *intr_coal) 2172 struct ethtool_coalesce *intr_coal)
1937{ 2173{
@@ -2876,6 +3112,7 @@ static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
2876 3112
2877static const struct ethtool_ops lio_ethtool_ops = { 3113static const struct ethtool_ops lio_ethtool_ops = {
2878 .get_link_ksettings = lio_get_link_ksettings, 3114 .get_link_ksettings = lio_get_link_ksettings,
3115 .set_link_ksettings = lio_set_link_ksettings,
2879 .get_link = ethtool_op_get_link, 3116 .get_link = ethtool_op_get_link,
2880 .get_drvinfo = lio_get_drvinfo, 3117 .get_drvinfo = lio_get_drvinfo,
2881 .get_ringparam = lio_ethtool_get_ringparam, 3118 .get_ringparam = lio_ethtool_get_ringparam,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index f3891ae11b02..e500528ad751 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -138,33 +138,10 @@ union tx_info {
138 * by this structure in the NIC module. 138 * by this structure in the NIC module.
139 */ 139 */
140 140
141#define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
142
143#define OCTNIC_GSO_MAX_HEADER_SIZE 128 141#define OCTNIC_GSO_MAX_HEADER_SIZE 128
144#define OCTNIC_GSO_MAX_SIZE \ 142#define OCTNIC_GSO_MAX_SIZE \
145 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) 143 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
146 144
147/** Structure of a node in list of gather components maintained by
148 * NIC driver for each network device.
149 */
150struct octnic_gather {
151 /** List manipulation. Next and prev pointers. */
152 struct list_head list;
153
154 /** Size of the gather component at sg in bytes. */
155 int sg_size;
156
157 /** Number of bytes that sg was adjusted to make it 8B-aligned. */
158 int adjust;
159
160 /** Gather component that can accommodate max sized fragment list
161 * received from the IP layer.
162 */
163 struct octeon_sg_entry *sg;
164
165 dma_addr_t sg_dma_ptr;
166};
167
168struct handshake { 145struct handshake {
169 struct completion init; 146 struct completion init;
170 struct completion started; 147 struct completion started;
@@ -520,7 +497,7 @@ static void liquidio_deinit_pci(void)
520 */ 497 */
521static inline int check_txq_status(struct lio *lio) 498static inline int check_txq_status(struct lio *lio)
522{ 499{
523 int numqs = lio->netdev->num_tx_queues; 500 int numqs = lio->netdev->real_num_tx_queues;
524 int ret_val = 0; 501 int ret_val = 0;
525 int q, iq; 502 int q, iq;
526 503
@@ -542,148 +519,6 @@ static inline int check_txq_status(struct lio *lio)
542} 519}
543 520
544/** 521/**
545 * Remove the node at the head of the list. The list would be empty at
546 * the end of this call if there are no more nodes in the list.
547 */
548static inline struct list_head *list_delete_head(struct list_head *root)
549{
550 struct list_head *node;
551
552 if ((root->prev == root) && (root->next == root))
553 node = NULL;
554 else
555 node = root->next;
556
557 if (node)
558 list_del(node);
559
560 return node;
561}
562
563/**
564 * \brief Delete gather lists
565 * @param lio per-network private data
566 */
567static void delete_glists(struct lio *lio)
568{
569 struct octnic_gather *g;
570 int i;
571
572 kfree(lio->glist_lock);
573 lio->glist_lock = NULL;
574
575 if (!lio->glist)
576 return;
577
578 for (i = 0; i < lio->linfo.num_txpciq; i++) {
579 do {
580 g = (struct octnic_gather *)
581 list_delete_head(&lio->glist[i]);
582 if (g)
583 kfree(g);
584 } while (g);
585
586 if (lio->glists_virt_base && lio->glists_virt_base[i] &&
587 lio->glists_dma_base && lio->glists_dma_base[i]) {
588 lio_dma_free(lio->oct_dev,
589 lio->glist_entry_size * lio->tx_qsize,
590 lio->glists_virt_base[i],
591 lio->glists_dma_base[i]);
592 }
593 }
594
595 kfree(lio->glists_virt_base);
596 lio->glists_virt_base = NULL;
597
598 kfree(lio->glists_dma_base);
599 lio->glists_dma_base = NULL;
600
601 kfree(lio->glist);
602 lio->glist = NULL;
603}
604
605/**
606 * \brief Setup gather lists
607 * @param lio per-network private data
608 */
609static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
610{
611 int i, j;
612 struct octnic_gather *g;
613
614 lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
615 GFP_KERNEL);
616 if (!lio->glist_lock)
617 return -ENOMEM;
618
619 lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
620 GFP_KERNEL);
621 if (!lio->glist) {
622 kfree(lio->glist_lock);
623 lio->glist_lock = NULL;
624 return -ENOMEM;
625 }
626
627 lio->glist_entry_size =
628 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
629
630 /* allocate memory to store virtual and dma base address of
631 * per glist consistent memory
632 */
633 lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
634 GFP_KERNEL);
635 lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
636 GFP_KERNEL);
637
638 if (!lio->glists_virt_base || !lio->glists_dma_base) {
639 delete_glists(lio);
640 return -ENOMEM;
641 }
642
643 for (i = 0; i < num_iqs; i++) {
644 int numa_node = dev_to_node(&oct->pci_dev->dev);
645
646 spin_lock_init(&lio->glist_lock[i]);
647
648 INIT_LIST_HEAD(&lio->glist[i]);
649
650 lio->glists_virt_base[i] =
651 lio_dma_alloc(oct,
652 lio->glist_entry_size * lio->tx_qsize,
653 &lio->glists_dma_base[i]);
654
655 if (!lio->glists_virt_base[i]) {
656 delete_glists(lio);
657 return -ENOMEM;
658 }
659
660 for (j = 0; j < lio->tx_qsize; j++) {
661 g = kzalloc_node(sizeof(*g), GFP_KERNEL,
662 numa_node);
663 if (!g)
664 g = kzalloc(sizeof(*g), GFP_KERNEL);
665 if (!g)
666 break;
667
668 g->sg = lio->glists_virt_base[i] +
669 (j * lio->glist_entry_size);
670
671 g->sg_dma_ptr = lio->glists_dma_base[i] +
672 (j * lio->glist_entry_size);
673
674 list_add_tail(&g->list, &lio->glist[i]);
675 }
676
677 if (j != lio->tx_qsize) {
678 delete_glists(lio);
679 return -ENOMEM;
680 }
681 }
682
683 return 0;
684}
685
686/**
687 * \brief Print link information 522 * \brief Print link information
688 * @param netdev network device 523 * @param netdev network device
689 */ 524 */
@@ -1077,6 +912,9 @@ liquidio_probe(struct pci_dev *pdev,
1077 /* set linux specific device pointer */ 912 /* set linux specific device pointer */
1078 oct_dev->pci_dev = (void *)pdev; 913 oct_dev->pci_dev = (void *)pdev;
1079 914
915 oct_dev->subsystem_id = pdev->subsystem_vendor |
916 (pdev->subsystem_device << 16);
917
1080 hs = &handshake[oct_dev->octeon_id]; 918 hs = &handshake[oct_dev->octeon_id];
1081 init_completion(&hs->init); 919 init_completion(&hs->init);
1082 init_completion(&hs->started); 920 init_completion(&hs->started);
@@ -1471,7 +1309,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1471 1309
1472 cleanup_rx_oom_poll_fn(netdev); 1310 cleanup_rx_oom_poll_fn(netdev);
1473 1311
1474 delete_glists(lio); 1312 lio_delete_glists(lio);
1475 1313
1476 free_netdev(netdev); 1314 free_netdev(netdev);
1477 1315
@@ -1686,7 +1524,7 @@ static void free_netsgbuf(void *buf)
1686 i++; 1524 i++;
1687 } 1525 }
1688 1526
1689 iq = skb_iq(lio, skb); 1527 iq = skb_iq(lio->oct_dev, skb);
1690 spin_lock(&lio->glist_lock[iq]); 1528 spin_lock(&lio->glist_lock[iq]);
1691 list_add_tail(&g->list, &lio->glist[iq]); 1529 list_add_tail(&g->list, &lio->glist[iq]);
1692 spin_unlock(&lio->glist_lock[iq]); 1530 spin_unlock(&lio->glist_lock[iq]);
@@ -1729,7 +1567,7 @@ static void free_netsgbuf_with_resp(void *buf)
1729 i++; 1567 i++;
1730 } 1568 }
1731 1569
1732 iq = skb_iq(lio, skb); 1570 iq = skb_iq(lio->oct_dev, skb);
1733 1571
1734 spin_lock(&lio->glist_lock[iq]); 1572 spin_lock(&lio->glist_lock[iq]);
1735 list_add_tail(&g->list, &lio->glist[iq]); 1573 list_add_tail(&g->list, &lio->glist[iq]);
@@ -1942,39 +1780,6 @@ static int load_firmware(struct octeon_device *oct)
1942} 1780}
1943 1781
1944/** 1782/**
1945 * \brief Callback for getting interface configuration
1946 * @param status status of request
1947 * @param buf pointer to resp structure
1948 */
1949static void if_cfg_callback(struct octeon_device *oct,
1950 u32 status __attribute__((unused)),
1951 void *buf)
1952{
1953 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1954 struct liquidio_if_cfg_resp *resp;
1955 struct liquidio_if_cfg_context *ctx;
1956
1957 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1958 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
1959
1960 oct = lio_get_device(ctx->octeon_id);
1961 if (resp->status)
1962 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: 0x%llx (0x%08x)\n",
1963 CVM_CAST64(resp->status), status);
1964 WRITE_ONCE(ctx->cond, 1);
1965
1966 snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
1967 resp->cfg_info.liquidio_firmware_version);
1968
1969 /* This barrier is required to be sure that the response has been
1970 * written fully before waking up the handler
1971 */
1972 wmb();
1973
1974 wake_up_interruptible(&ctx->wc);
1975}
1976
1977/**
1978 * \brief Poll routine for checking transmit queue status 1783 * \brief Poll routine for checking transmit queue status
1979 * @param work work_struct data structure 1784 * @param work work_struct data structure
1980 */ 1785 */
@@ -2049,11 +1854,6 @@ static int liquidio_open(struct net_device *netdev)
2049 1854
2050 ifstate_set(lio, LIO_IFSTATE_RUNNING); 1855 ifstate_set(lio, LIO_IFSTATE_RUNNING);
2051 1856
2052 /* Ready for link status updates */
2053 lio->intf_open = 1;
2054
2055 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
2056
2057 if (OCTEON_CN23XX_PF(oct)) { 1857 if (OCTEON_CN23XX_PF(oct)) {
2058 if (!oct->msix_on) 1858 if (!oct->msix_on)
2059 if (setup_tx_poll_fn(netdev)) 1859 if (setup_tx_poll_fn(netdev))
@@ -2063,7 +1863,12 @@ static int liquidio_open(struct net_device *netdev)
2063 return -1; 1863 return -1;
2064 } 1864 }
2065 1865
2066 start_txqs(netdev); 1866 netif_tx_start_all_queues(netdev);
1867
1868 /* Ready for link status updates */
1869 lio->intf_open = 1;
1870
1871 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
2067 1872
2068 /* tell Octeon to start forwarding packets to host */ 1873 /* tell Octeon to start forwarding packets to host */
2069 send_rx_ctrl_cmd(lio, 1); 1874 send_rx_ctrl_cmd(lio, 1);
@@ -2086,11 +1891,15 @@ static int liquidio_stop(struct net_device *netdev)
2086 1891
2087 ifstate_reset(lio, LIO_IFSTATE_RUNNING); 1892 ifstate_reset(lio, LIO_IFSTATE_RUNNING);
2088 1893
2089 netif_tx_disable(netdev); 1894 /* Stop any link updates */
1895 lio->intf_open = 0;
1896
1897 stop_txqs(netdev);
2090 1898
2091 /* Inform that netif carrier is down */ 1899 /* Inform that netif carrier is down */
2092 netif_carrier_off(netdev); 1900 netif_carrier_off(netdev);
2093 lio->intf_open = 0; 1901 netif_tx_disable(netdev);
1902
2094 lio->linfo.link.s.link_up = 0; 1903 lio->linfo.link.s.link_up = 0;
2095 lio->link_changes++; 1904 lio->link_changes++;
2096 1905
@@ -2252,14 +2061,11 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
2252 return 0; 2061 return 0;
2253} 2062}
2254 2063
2255/** 2064static void
2256 * \brief Net device get_stats 2065liquidio_get_stats64(struct net_device *netdev,
2257 * @param netdev network device 2066 struct rtnl_link_stats64 *lstats)
2258 */
2259static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
2260{ 2067{
2261 struct lio *lio = GET_LIO(netdev); 2068 struct lio *lio = GET_LIO(netdev);
2262 struct net_device_stats *stats = &netdev->stats;
2263 struct octeon_device *oct; 2069 struct octeon_device *oct;
2264 u64 pkts = 0, drop = 0, bytes = 0; 2070 u64 pkts = 0, drop = 0, bytes = 0;
2265 struct oct_droq_stats *oq_stats; 2071 struct oct_droq_stats *oq_stats;
@@ -2269,7 +2075,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
2269 oct = lio->oct_dev; 2075 oct = lio->oct_dev;
2270 2076
2271 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 2077 if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
2272 return stats; 2078 return;
2273 2079
2274 for (i = 0; i < oct->num_iqs; i++) { 2080 for (i = 0; i < oct->num_iqs; i++) {
2275 iq_no = lio->linfo.txpciq[i].s.q_no; 2081 iq_no = lio->linfo.txpciq[i].s.q_no;
@@ -2279,9 +2085,9 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
2279 bytes += iq_stats->tx_tot_bytes; 2085 bytes += iq_stats->tx_tot_bytes;
2280 } 2086 }
2281 2087
2282 stats->tx_packets = pkts; 2088 lstats->tx_packets = pkts;
2283 stats->tx_bytes = bytes; 2089 lstats->tx_bytes = bytes;
2284 stats->tx_dropped = drop; 2090 lstats->tx_dropped = drop;
2285 2091
2286 pkts = 0; 2092 pkts = 0;
2287 drop = 0; 2093 drop = 0;
@@ -2298,11 +2104,34 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
2298 bytes += oq_stats->rx_bytes_received; 2104 bytes += oq_stats->rx_bytes_received;
2299 } 2105 }
2300 2106
2301 stats->rx_bytes = bytes; 2107 lstats->rx_bytes = bytes;
2302 stats->rx_packets = pkts; 2108 lstats->rx_packets = pkts;
2303 stats->rx_dropped = drop; 2109 lstats->rx_dropped = drop;
2304 2110
2305 return stats; 2111 octnet_get_link_stats(netdev);
2112 lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
2113 lstats->collisions = oct->link_stats.fromhost.total_collisions;
2114
2115 /* detailed rx_errors: */
2116 lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
2117 /* recved pkt with crc error */
2118 lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
2119 /* recv'd frame alignment error */
2120 lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
2121 /* recv'r fifo overrun */
2122 lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err;
2123
2124 lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
2125 lstats->rx_frame_errors + lstats->rx_fifo_errors;
2126
2127 /* detailed tx_errors */
2128 lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
2129 lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
2130 lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err;
2131
2132 lstats->tx_errors = lstats->tx_aborted_errors +
2133 lstats->tx_carrier_errors +
2134 lstats->tx_fifo_errors;
2306} 2135}
2307 2136
2308/** 2137/**
@@ -2510,7 +2339,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2510 lio = GET_LIO(netdev); 2339 lio = GET_LIO(netdev);
2511 oct = lio->oct_dev; 2340 oct = lio->oct_dev;
2512 2341
2513 q_idx = skb_iq(lio, skb); 2342 q_idx = skb_iq(oct, skb);
2514 tag = q_idx; 2343 tag = q_idx;
2515 iq_no = lio->linfo.txpciq[q_idx].s.q_no; 2344 iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2516 2345
@@ -2603,7 +2432,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2603 2432
2604 spin_lock(&lio->glist_lock[q_idx]); 2433 spin_lock(&lio->glist_lock[q_idx]);
2605 g = (struct octnic_gather *) 2434 g = (struct octnic_gather *)
2606 list_delete_head(&lio->glist[q_idx]); 2435 lio_list_delete_head(&lio->glist[q_idx]);
2607 spin_unlock(&lio->glist_lock[q_idx]); 2436 spin_unlock(&lio->glist_lock[q_idx]);
2608 2437
2609 if (!g) { 2438 if (!g) {
@@ -3355,7 +3184,7 @@ static const struct net_device_ops lionetdevops = {
3355 .ndo_open = liquidio_open, 3184 .ndo_open = liquidio_open,
3356 .ndo_stop = liquidio_stop, 3185 .ndo_stop = liquidio_stop,
3357 .ndo_start_xmit = liquidio_xmit, 3186 .ndo_start_xmit = liquidio_xmit,
3358 .ndo_get_stats = liquidio_get_stats, 3187 .ndo_get_stats64 = liquidio_get_stats64,
3359 .ndo_set_mac_address = liquidio_set_mac, 3188 .ndo_set_mac_address = liquidio_set_mac,
3360 .ndo_set_rx_mode = liquidio_set_mcast_list, 3189 .ndo_set_rx_mode = liquidio_set_mcast_list,
3361 .ndo_tx_timeout = liquidio_tx_timeout, 3190 .ndo_tx_timeout = liquidio_tx_timeout,
@@ -3476,6 +3305,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
3476 struct liquidio_if_cfg_resp *resp; 3305 struct liquidio_if_cfg_resp *resp;
3477 struct octdev_props *props; 3306 struct octdev_props *props;
3478 int retval, num_iqueues, num_oqueues; 3307 int retval, num_iqueues, num_oqueues;
3308 int max_num_queues = 0;
3479 union oct_nic_if_cfg if_cfg; 3309 union oct_nic_if_cfg if_cfg;
3480 unsigned int base_queue; 3310 unsigned int base_queue;
3481 unsigned int gmx_port_id; 3311 unsigned int gmx_port_id;
@@ -3556,9 +3386,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
3556 OPCODE_NIC_IF_CFG, 0, 3386 OPCODE_NIC_IF_CFG, 0,
3557 if_cfg.u64, 0); 3387 if_cfg.u64, 0);
3558 3388
3559 sc->callback = if_cfg_callback; 3389 sc->callback = lio_if_cfg_callback;
3560 sc->callback_arg = sc; 3390 sc->callback_arg = sc;
3561 sc->wait_time = 3000; 3391 sc->wait_time = LIO_IFCFG_WAIT_TIME;
3562 3392
3563 retval = octeon_send_soft_command(octeon_dev, sc); 3393 retval = octeon_send_soft_command(octeon_dev, sc);
3564 if (retval == IQ_SEND_FAILED) { 3394 if (retval == IQ_SEND_FAILED) {
@@ -3612,11 +3442,20 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
3612 resp->cfg_info.oqmask); 3442 resp->cfg_info.oqmask);
3613 goto setup_nic_dev_fail; 3443 goto setup_nic_dev_fail;
3614 } 3444 }
3445
3446 if (OCTEON_CN6XXX(octeon_dev)) {
3447 max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3448 cn6xxx));
3449 } else if (OCTEON_CN23XX_PF(octeon_dev)) {
3450 max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3451 cn23xx_pf));
3452 }
3453
3615 dev_dbg(&octeon_dev->pci_dev->dev, 3454 dev_dbg(&octeon_dev->pci_dev->dev,
3616 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", 3455 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n",
3617 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, 3456 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3618 num_iqueues, num_oqueues); 3457 num_iqueues, num_oqueues, max_num_queues);
3619 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues); 3458 netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues);
3620 3459
3621 if (!netdev) { 3460 if (!netdev) {
3622 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); 3461 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
@@ -3631,6 +3470,20 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
3631 netdev->netdev_ops = &lionetdevops; 3470 netdev->netdev_ops = &lionetdevops;
3632 SWITCHDEV_SET_OPS(netdev, &lio_pf_switchdev_ops); 3471 SWITCHDEV_SET_OPS(netdev, &lio_pf_switchdev_ops);
3633 3472
3473 retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
3474 if (retval) {
3475 dev_err(&octeon_dev->pci_dev->dev,
3476 "setting real number rx failed\n");
3477 goto setup_nic_dev_fail;
3478 }
3479
3480 retval = netif_set_real_num_tx_queues(netdev, num_iqueues);
3481 if (retval) {
3482 dev_err(&octeon_dev->pci_dev->dev,
3483 "setting real number tx failed\n");
3484 goto setup_nic_dev_fail;
3485 }
3486
3634 lio = GET_LIO(netdev); 3487 lio = GET_LIO(netdev);
3635 3488
3636 memset(lio, 0, sizeof(struct lio)); 3489 memset(lio, 0, sizeof(struct lio));
@@ -3752,7 +3605,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
3752 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); 3605 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3753 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); 3606 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3754 3607
3755 if (setup_glists(octeon_dev, lio, num_iqueues)) { 3608 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
3756 dev_err(&octeon_dev->pci_dev->dev, 3609 dev_err(&octeon_dev->pci_dev->dev,
3757 "Gather list allocation failed\n"); 3610 "Gather list allocation failed\n");
3758 goto setup_nic_dev_fail; 3611 goto setup_nic_dev_fail;
@@ -3814,6 +3667,23 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
3814 "NIC ifidx:%d Setup successful\n", i); 3667 "NIC ifidx:%d Setup successful\n", i);
3815 3668
3816 octeon_free_soft_command(octeon_dev, sc); 3669 octeon_free_soft_command(octeon_dev, sc);
3670
3671 if (octeon_dev->subsystem_id ==
3672 OCTEON_CN2350_25GB_SUBSYS_ID ||
3673 octeon_dev->subsystem_id ==
3674 OCTEON_CN2360_25GB_SUBSYS_ID) {
3675 liquidio_get_speed(lio);
3676
3677 if (octeon_dev->speed_setting == 0) {
3678 octeon_dev->speed_setting = 25;
3679 octeon_dev->no_speed_setting = 1;
3680 }
3681 } else {
3682 octeon_dev->no_speed_setting = 1;
3683 octeon_dev->speed_setting = 10;
3684 }
3685 octeon_dev->speed_boot = octeon_dev->speed_setting;
3686
3817 } 3687 }
3818 3688
3819 devlink = devlink_alloc(&liquidio_devlink_ops, 3689 devlink = devlink_alloc(&liquidio_devlink_ops,
@@ -4251,7 +4121,9 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
4251 } 4121 }
4252 atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE); 4122 atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4253 4123
4254 if (octeon_allocate_ioq_vector(octeon_dev)) { 4124 if (octeon_allocate_ioq_vector
4125 (octeon_dev,
4126 octeon_dev->sriov_info.num_pf_rings)) {
4255 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n"); 4127 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4256 return 1; 4128 return 1;
4257 } 4129 }
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index f92dfa411de6..7fa0212873ac 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -69,30 +69,10 @@ union tx_info {
69 } s; 69 } s;
70}; 70};
71 71
72#define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
73
74#define OCTNIC_GSO_MAX_HEADER_SIZE 128 72#define OCTNIC_GSO_MAX_HEADER_SIZE 128
75#define OCTNIC_GSO_MAX_SIZE \ 73#define OCTNIC_GSO_MAX_SIZE \
76 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) 74 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
77 75
78struct octnic_gather {
79 /* List manipulation. Next and prev pointers. */
80 struct list_head list;
81
82 /* Size of the gather component at sg in bytes. */
83 int sg_size;
84
85 /* Number of bytes that sg was adjusted to make it 8B-aligned. */
86 int adjust;
87
88 /* Gather component that can accommodate max sized fragment list
89 * received from the IP layer.
90 */
91 struct octeon_sg_entry *sg;
92
93 dma_addr_t sg_dma_ptr;
94};
95
96static int 76static int
97liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 77liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
98static void liquidio_vf_remove(struct pci_dev *pdev); 78static void liquidio_vf_remove(struct pci_dev *pdev);
@@ -285,142 +265,6 @@ static struct pci_driver liquidio_vf_pci_driver = {
285}; 265};
286 266
287/** 267/**
288 * Remove the node at the head of the list. The list would be empty at
289 * the end of this call if there are no more nodes in the list.
290 */
291static struct list_head *list_delete_head(struct list_head *root)
292{
293 struct list_head *node;
294
295 if ((root->prev == root) && (root->next == root))
296 node = NULL;
297 else
298 node = root->next;
299
300 if (node)
301 list_del(node);
302
303 return node;
304}
305
306/**
307 * \brief Delete gather lists
308 * @param lio per-network private data
309 */
310static void delete_glists(struct lio *lio)
311{
312 struct octnic_gather *g;
313 int i;
314
315 kfree(lio->glist_lock);
316 lio->glist_lock = NULL;
317
318 if (!lio->glist)
319 return;
320
321 for (i = 0; i < lio->linfo.num_txpciq; i++) {
322 do {
323 g = (struct octnic_gather *)
324 list_delete_head(&lio->glist[i]);
325 kfree(g);
326 } while (g);
327
328 if (lio->glists_virt_base && lio->glists_virt_base[i] &&
329 lio->glists_dma_base && lio->glists_dma_base[i]) {
330 lio_dma_free(lio->oct_dev,
331 lio->glist_entry_size * lio->tx_qsize,
332 lio->glists_virt_base[i],
333 lio->glists_dma_base[i]);
334 }
335 }
336
337 kfree(lio->glists_virt_base);
338 lio->glists_virt_base = NULL;
339
340 kfree(lio->glists_dma_base);
341 lio->glists_dma_base = NULL;
342
343 kfree(lio->glist);
344 lio->glist = NULL;
345}
346
347/**
348 * \brief Setup gather lists
349 * @param lio per-network private data
350 */
351static int setup_glists(struct lio *lio, int num_iqs)
352{
353 struct octnic_gather *g;
354 int i, j;
355
356 lio->glist_lock =
357 kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL);
358 if (!lio->glist_lock)
359 return -ENOMEM;
360
361 lio->glist =
362 kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL);
363 if (!lio->glist) {
364 kfree(lio->glist_lock);
365 lio->glist_lock = NULL;
366 return -ENOMEM;
367 }
368
369 lio->glist_entry_size =
370 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
371
372 /* allocate memory to store virtual and dma base address of
373 * per glist consistent memory
374 */
375 lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
376 GFP_KERNEL);
377 lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
378 GFP_KERNEL);
379
380 if (!lio->glists_virt_base || !lio->glists_dma_base) {
381 delete_glists(lio);
382 return -ENOMEM;
383 }
384
385 for (i = 0; i < num_iqs; i++) {
386 spin_lock_init(&lio->glist_lock[i]);
387
388 INIT_LIST_HEAD(&lio->glist[i]);
389
390 lio->glists_virt_base[i] =
391 lio_dma_alloc(lio->oct_dev,
392 lio->glist_entry_size * lio->tx_qsize,
393 &lio->glists_dma_base[i]);
394
395 if (!lio->glists_virt_base[i]) {
396 delete_glists(lio);
397 return -ENOMEM;
398 }
399
400 for (j = 0; j < lio->tx_qsize; j++) {
401 g = kzalloc(sizeof(*g), GFP_KERNEL);
402 if (!g)
403 break;
404
405 g->sg = lio->glists_virt_base[i] +
406 (j * lio->glist_entry_size);
407
408 g->sg_dma_ptr = lio->glists_dma_base[i] +
409 (j * lio->glist_entry_size);
410
411 list_add_tail(&g->list, &lio->glist[i]);
412 }
413
414 if (j != lio->tx_qsize) {
415 delete_glists(lio);
416 return -ENOMEM;
417 }
418 }
419
420 return 0;
421}
422
423/**
424 * \brief Print link information 268 * \brief Print link information
425 * @param netdev network device 269 * @param netdev network device
426 */ 270 */
@@ -567,6 +411,9 @@ liquidio_vf_probe(struct pci_dev *pdev,
567 /* set linux specific device pointer */ 411 /* set linux specific device pointer */
568 oct_dev->pci_dev = pdev; 412 oct_dev->pci_dev = pdev;
569 413
414 oct_dev->subsystem_id = pdev->subsystem_vendor |
415 (pdev->subsystem_device << 16);
416
570 if (octeon_device_init(oct_dev)) { 417 if (octeon_device_init(oct_dev)) {
571 liquidio_vf_remove(pdev); 418 liquidio_vf_remove(pdev);
572 return -ENOMEM; 419 return -ENOMEM;
@@ -856,7 +703,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
856 703
857 cleanup_link_status_change_wq(netdev); 704 cleanup_link_status_change_wq(netdev);
858 705
859 delete_glists(lio); 706 lio_delete_glists(lio);
860 707
861 free_netdev(netdev); 708 free_netdev(netdev);
862 709
@@ -1005,7 +852,7 @@ static void free_netsgbuf(void *buf)
1005 i++; 852 i++;
1006 } 853 }
1007 854
1008 iq = skb_iq(lio, skb); 855 iq = skb_iq(lio->oct_dev, skb);
1009 856
1010 spin_lock(&lio->glist_lock[iq]); 857 spin_lock(&lio->glist_lock[iq]);
1011 list_add_tail(&g->list, &lio->glist[iq]); 858 list_add_tail(&g->list, &lio->glist[iq]);
@@ -1049,7 +896,7 @@ static void free_netsgbuf_with_resp(void *buf)
1049 i++; 896 i++;
1050 } 897 }
1051 898
1052 iq = skb_iq(lio, skb); 899 iq = skb_iq(lio->oct_dev, skb);
1053 900
1054 spin_lock(&lio->glist_lock[iq]); 901 spin_lock(&lio->glist_lock[iq]);
1055 list_add_tail(&g->list, &lio->glist[iq]); 902 list_add_tail(&g->list, &lio->glist[iq]);
@@ -1059,38 +906,6 @@ static void free_netsgbuf_with_resp(void *buf)
1059} 906}
1060 907
1061/** 908/**
1062 * \brief Callback for getting interface configuration
1063 * @param status status of request
1064 * @param buf pointer to resp structure
1065 */
1066static void if_cfg_callback(struct octeon_device *oct,
1067 u32 status __attribute__((unused)), void *buf)
1068{
1069 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1070 struct liquidio_if_cfg_context *ctx;
1071 struct liquidio_if_cfg_resp *resp;
1072
1073 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1074 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
1075
1076 oct = lio_get_device(ctx->octeon_id);
1077 if (resp->status)
1078 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
1079 CVM_CAST64(resp->status));
1080 WRITE_ONCE(ctx->cond, 1);
1081
1082 snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
1083 resp->cfg_info.liquidio_firmware_version);
1084
1085 /* This barrier is required to be sure that the response has been
1086 * written fully before waking up the handler
1087 */
1088 wmb();
1089
1090 wake_up_interruptible(&ctx->wc);
1091}
1092
1093/**
1094 * \brief Net device open for LiquidIO 909 * \brief Net device open for LiquidIO
1095 * @param netdev network device 910 * @param netdev network device
1096 */ 911 */
@@ -1336,24 +1151,21 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
1336 return 0; 1151 return 0;
1337} 1152}
1338 1153
1339/** 1154static void
1340 * \brief Net device get_stats 1155liquidio_get_stats64(struct net_device *netdev,
1341 * @param netdev network device 1156 struct rtnl_link_stats64 *lstats)
1342 */
1343static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
1344{ 1157{
1345 struct lio *lio = GET_LIO(netdev); 1158 struct lio *lio = GET_LIO(netdev);
1346 struct net_device_stats *stats = &netdev->stats; 1159 struct octeon_device *oct;
1347 u64 pkts = 0, drop = 0, bytes = 0; 1160 u64 pkts = 0, drop = 0, bytes = 0;
1348 struct oct_droq_stats *oq_stats; 1161 struct oct_droq_stats *oq_stats;
1349 struct oct_iq_stats *iq_stats; 1162 struct oct_iq_stats *iq_stats;
1350 struct octeon_device *oct;
1351 int i, iq_no, oq_no; 1163 int i, iq_no, oq_no;
1352 1164
1353 oct = lio->oct_dev; 1165 oct = lio->oct_dev;
1354 1166
1355 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 1167 if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1356 return stats; 1168 return;
1357 1169
1358 for (i = 0; i < oct->num_iqs; i++) { 1170 for (i = 0; i < oct->num_iqs; i++) {
1359 iq_no = lio->linfo.txpciq[i].s.q_no; 1171 iq_no = lio->linfo.txpciq[i].s.q_no;
@@ -1363,9 +1175,9 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
1363 bytes += iq_stats->tx_tot_bytes; 1175 bytes += iq_stats->tx_tot_bytes;
1364 } 1176 }
1365 1177
1366 stats->tx_packets = pkts; 1178 lstats->tx_packets = pkts;
1367 stats->tx_bytes = bytes; 1179 lstats->tx_bytes = bytes;
1368 stats->tx_dropped = drop; 1180 lstats->tx_dropped = drop;
1369 1181
1370 pkts = 0; 1182 pkts = 0;
1371 drop = 0; 1183 drop = 0;
@@ -1382,11 +1194,29 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
1382 bytes += oq_stats->rx_bytes_received; 1194 bytes += oq_stats->rx_bytes_received;
1383 } 1195 }
1384 1196
1385 stats->rx_bytes = bytes; 1197 lstats->rx_bytes = bytes;
1386 stats->rx_packets = pkts; 1198 lstats->rx_packets = pkts;
1387 stats->rx_dropped = drop; 1199 lstats->rx_dropped = drop;
1200
1201 octnet_get_link_stats(netdev);
1202 lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
1203
1204 /* detailed rx_errors: */
1205 lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
1206 /* recved pkt with crc error */
1207 lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
1208 /* recv'd frame alignment error */
1209 lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
1388 1210
1389 return stats; 1211 lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
1212 lstats->rx_frame_errors;
1213
1214 /* detailed tx_errors */
1215 lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
1216 lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
1217
1218 lstats->tx_errors = lstats->tx_aborted_errors +
1219 lstats->tx_carrier_errors;
1390} 1220}
1391 1221
1392/** 1222/**
@@ -1580,7 +1410,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
1580 lio = GET_LIO(netdev); 1410 lio = GET_LIO(netdev);
1581 oct = lio->oct_dev; 1411 oct = lio->oct_dev;
1582 1412
1583 q_idx = skb_iq(lio, skb); 1413 q_idx = skb_iq(lio->oct_dev, skb);
1584 tag = q_idx; 1414 tag = q_idx;
1585 iq_no = lio->linfo.txpciq[q_idx].s.q_no; 1415 iq_no = lio->linfo.txpciq[q_idx].s.q_no;
1586 1416
@@ -1661,8 +1491,8 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
1661 int i, frags; 1491 int i, frags;
1662 1492
1663 spin_lock(&lio->glist_lock[q_idx]); 1493 spin_lock(&lio->glist_lock[q_idx]);
1664 g = (struct octnic_gather *)list_delete_head( 1494 g = (struct octnic_gather *)
1665 &lio->glist[q_idx]); 1495 lio_list_delete_head(&lio->glist[q_idx]);
1666 spin_unlock(&lio->glist_lock[q_idx]); 1496 spin_unlock(&lio->glist_lock[q_idx]);
1667 1497
1668 if (!g) { 1498 if (!g) {
@@ -2034,7 +1864,7 @@ static const struct net_device_ops lionetdevops = {
2034 .ndo_open = liquidio_open, 1864 .ndo_open = liquidio_open,
2035 .ndo_stop = liquidio_stop, 1865 .ndo_stop = liquidio_stop,
2036 .ndo_start_xmit = liquidio_xmit, 1866 .ndo_start_xmit = liquidio_xmit,
2037 .ndo_get_stats = liquidio_get_stats, 1867 .ndo_get_stats64 = liquidio_get_stats64,
2038 .ndo_set_mac_address = liquidio_set_mac, 1868 .ndo_set_mac_address = liquidio_set_mac,
2039 .ndo_set_rx_mode = liquidio_set_mcast_list, 1869 .ndo_set_rx_mode = liquidio_set_mcast_list,
2040 .ndo_tx_timeout = liquidio_tx_timeout, 1870 .ndo_tx_timeout = liquidio_tx_timeout,
@@ -2156,7 +1986,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
2156 OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 1986 OPCODE_NIC_IF_CFG, 0, if_cfg.u64,
2157 0); 1987 0);
2158 1988
2159 sc->callback = if_cfg_callback; 1989 sc->callback = lio_if_cfg_callback;
2160 sc->callback_arg = sc; 1990 sc->callback_arg = sc;
2161 sc->wait_time = 5000; 1991 sc->wait_time = 5000;
2162 1992
@@ -2273,6 +2103,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
2273 netdev->features = (lio->dev_capability & ~NETIF_F_LRO); 2103 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
2274 2104
2275 netdev->hw_features = lio->dev_capability; 2105 netdev->hw_features = lio->dev_capability;
2106 netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
2276 2107
2277 /* MTU range: 68 - 16000 */ 2108 /* MTU range: 68 - 16000 */
2278 netdev->min_mtu = LIO_MIN_MTU_SIZE; 2109 netdev->min_mtu = LIO_MIN_MTU_SIZE;
@@ -2321,7 +2152,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
2321 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); 2152 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
2322 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); 2153 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
2323 2154
2324 if (setup_glists(lio, num_iqueues)) { 2155 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
2325 dev_err(&octeon_dev->pci_dev->dev, 2156 dev_err(&octeon_dev->pci_dev->dev,
2326 "Gather list allocation failed\n"); 2157 "Gather list allocation failed\n");
2327 goto setup_nic_dev_fail; 2158 goto setup_nic_dev_fail;
@@ -2371,6 +2202,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
2371 "NIC ifidx:%d Setup successful\n", i); 2202 "NIC ifidx:%d Setup successful\n", i);
2372 2203
2373 octeon_free_soft_command(octeon_dev, sc); 2204 octeon_free_soft_command(octeon_dev, sc);
2205
2206 octeon_dev->no_speed_setting = 1;
2374 } 2207 }
2375 2208
2376 return 0; 2209 return 0;
@@ -2512,7 +2345,7 @@ static int octeon_device_init(struct octeon_device *oct)
2512 } 2345 }
2513 atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE); 2346 atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE);
2514 2347
2515 if (octeon_allocate_ioq_vector(oct)) { 2348 if (octeon_allocate_ioq_vector(oct, oct->sriov_info.rings_per_vf)) {
2516 dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n"); 2349 dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n");
2517 return 1; 2350 return 1;
2518 } 2351 }
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 34a94daca590..285b24836974 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -84,6 +84,7 @@ enum octeon_tag_type {
84#define OPCODE_NIC_IF_CFG 0x09 84#define OPCODE_NIC_IF_CFG 0x09
85#define OPCODE_NIC_VF_DRV_NOTICE 0x0A 85#define OPCODE_NIC_VF_DRV_NOTICE 0x0A
86#define OPCODE_NIC_INTRMOD_PARAMS 0x0B 86#define OPCODE_NIC_INTRMOD_PARAMS 0x0B
87#define OPCODE_NIC_QCOUNT_UPDATE 0x12
87#define OPCODE_NIC_SET_TRUSTED_VF 0x13 88#define OPCODE_NIC_SET_TRUSTED_VF 0x13
88#define OPCODE_NIC_SYNC_OCTEON_TIME 0x14 89#define OPCODE_NIC_SYNC_OCTEON_TIME 0x14
89#define VF_DRV_LOADED 1 90#define VF_DRV_LOADED 1
@@ -92,6 +93,7 @@ enum octeon_tag_type {
92 93
93#define OPCODE_NIC_VF_REP_PKT 0x15 94#define OPCODE_NIC_VF_REP_PKT 0x15
94#define OPCODE_NIC_VF_REP_CMD 0x16 95#define OPCODE_NIC_VF_REP_CMD 0x16
96#define OPCODE_NIC_UBOOT_CTL 0x17
95 97
96#define CORE_DRV_TEST_SCATTER_OP 0xFFF5 98#define CORE_DRV_TEST_SCATTER_OP 0xFFF5
97 99
@@ -248,6 +250,9 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
248#define OCTNET_CMD_VLAN_FILTER_ENABLE 0x1 250#define OCTNET_CMD_VLAN_FILTER_ENABLE 0x1
249#define OCTNET_CMD_VLAN_FILTER_DISABLE 0x0 251#define OCTNET_CMD_VLAN_FILTER_DISABLE 0x0
250 252
253#define SEAPI_CMD_SPEED_SET 0x2
254#define SEAPI_CMD_SPEED_GET 0x3
255
251#define LIO_CMD_WAIT_TM 100 256#define LIO_CMD_WAIT_TM 100
252 257
253/* RX(packets coming from wire) Checksum verification flags */ 258/* RX(packets coming from wire) Checksum verification flags */
@@ -802,6 +807,9 @@ struct nic_rx_stats {
802 u64 fw_total_rcvd; 807 u64 fw_total_rcvd;
803 u64 fw_total_fwd; 808 u64 fw_total_fwd;
804 u64 fw_total_fwd_bytes; 809 u64 fw_total_fwd_bytes;
810 u64 fw_total_mcast;
811 u64 fw_total_bcast;
812
805 u64 fw_err_pko; 813 u64 fw_err_pko;
806 u64 fw_err_link; 814 u64 fw_err_link;
807 u64 fw_err_drop; 815 u64 fw_err_drop;
@@ -858,6 +866,8 @@ struct nic_tx_stats {
858 u64 fw_total_sent; 866 u64 fw_total_sent;
859 u64 fw_total_fwd; 867 u64 fw_total_fwd;
860 u64 fw_total_fwd_bytes; 868 u64 fw_total_fwd_bytes;
869 u64 fw_total_mcast_sent;
870 u64 fw_total_bcast_sent;
861 u64 fw_err_pko; 871 u64 fw_err_pko;
862 u64 fw_err_link; 872 u64 fw_err_link;
863 u64 fw_err_drop; 873 u64 fw_err_drop;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index f38abf626412..f878a552fef3 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -824,23 +824,18 @@ int octeon_deregister_device(struct octeon_device *oct)
824} 824}
825 825
826int 826int
827octeon_allocate_ioq_vector(struct octeon_device *oct) 827octeon_allocate_ioq_vector(struct octeon_device *oct, u32 num_ioqs)
828{ 828{
829 int i, num_ioqs = 0;
830 struct octeon_ioq_vector *ioq_vector; 829 struct octeon_ioq_vector *ioq_vector;
831 int cpu_num; 830 int cpu_num;
832 int size; 831 int size;
833 832 int i;
834 if (OCTEON_CN23XX_PF(oct))
835 num_ioqs = oct->sriov_info.num_pf_rings;
836 else if (OCTEON_CN23XX_VF(oct))
837 num_ioqs = oct->sriov_info.rings_per_vf;
838 833
839 size = sizeof(struct octeon_ioq_vector) * num_ioqs; 834 size = sizeof(struct octeon_ioq_vector) * num_ioqs;
840 835
841 oct->ioq_vector = vzalloc(size); 836 oct->ioq_vector = vzalloc(size);
842 if (!oct->ioq_vector) 837 if (!oct->ioq_vector)
843 return 1; 838 return -1;
844 for (i = 0; i < num_ioqs; i++) { 839 for (i = 0; i < num_ioqs; i++) {
845 ioq_vector = &oct->ioq_vector[i]; 840 ioq_vector = &oct->ioq_vector[i];
846 ioq_vector->oct_dev = oct; 841 ioq_vector->oct_dev = oct;
@@ -856,6 +851,7 @@ octeon_allocate_ioq_vector(struct octeon_device *oct)
856 else 851 else
857 ioq_vector->ioq_num = i; 852 ioq_vector->ioq_num = i;
858 } 853 }
854
859 return 0; 855 return 0;
860} 856}
861 857
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index 91937cc5c1d7..94a4ed88d618 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -43,6 +43,13 @@
43#define OCTEON_CN23XX_REV_1_1 0x01 43#define OCTEON_CN23XX_REV_1_1 0x01
44#define OCTEON_CN23XX_REV_2_0 0x80 44#define OCTEON_CN23XX_REV_2_0 0x80
45 45
46/**SubsystemId for the chips */
47#define OCTEON_CN2350_10GB_SUBSYS_ID_1 0X3177d
48#define OCTEON_CN2350_10GB_SUBSYS_ID_2 0X4177d
49#define OCTEON_CN2360_10GB_SUBSYS_ID 0X5177d
50#define OCTEON_CN2350_25GB_SUBSYS_ID 0X7177d
51#define OCTEON_CN2360_25GB_SUBSYS_ID 0X6177d
52
46/** Endian-swap modes supported by Octeon. */ 53/** Endian-swap modes supported by Octeon. */
47enum octeon_pci_swap_mode { 54enum octeon_pci_swap_mode {
48 OCTEON_PCI_PASSTHROUGH = 0, 55 OCTEON_PCI_PASSTHROUGH = 0,
@@ -430,6 +437,8 @@ struct octeon_device {
430 437
431 u16 rev_id; 438 u16 rev_id;
432 439
440 u32 subsystem_id;
441
433 u16 pf_num; 442 u16 pf_num;
434 443
435 u16 vf_num; 444 u16 vf_num;
@@ -584,6 +593,11 @@ struct octeon_device {
584 struct lio_vf_rep_list vf_rep_list; 593 struct lio_vf_rep_list vf_rep_list;
585 struct devlink *devlink; 594 struct devlink *devlink;
586 enum devlink_eswitch_mode eswitch_mode; 595 enum devlink_eswitch_mode eswitch_mode;
596
597 /* for 25G NIC speed change */
598 u8 speed_boot;
599 u8 speed_setting;
600 u8 no_speed_setting;
587}; 601};
588 602
589#define OCT_DRV_ONLINE 1 603#define OCT_DRV_ONLINE 1
@@ -867,7 +881,7 @@ void *oct_get_config_info(struct octeon_device *oct, u16 card_type);
867struct octeon_config *octeon_get_conf(struct octeon_device *oct); 881struct octeon_config *octeon_get_conf(struct octeon_device *oct);
868 882
869void octeon_free_ioq_vector(struct octeon_device *oct); 883void octeon_free_ioq_vector(struct octeon_device *oct);
870int octeon_allocate_ioq_vector(struct octeon_device *oct); 884int octeon_allocate_ioq_vector(struct octeon_device *oct, u32 num_ioqs);
871void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq); 885void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq);
872 886
873/* LiquidIO driver pivate flags */ 887/* LiquidIO driver pivate flags */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index 4069710796a8..dd3177a526d2 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -47,6 +47,29 @@ struct liquidio_if_cfg_resp {
47 u64 status; 47 u64 status;
48}; 48};
49 49
50#define LIO_IFCFG_WAIT_TIME 3000 /* In milli seconds */
51
52/* Structure of a node in list of gather components maintained by
53 * NIC driver for each network device.
54 */
55struct octnic_gather {
56 /* List manipulation. Next and prev pointers. */
57 struct list_head list;
58
59 /* Size of the gather component at sg in bytes. */
60 int sg_size;
61
62 /* Number of bytes that sg was adjusted to make it 8B-aligned. */
63 int adjust;
64
65 /* Gather component that can accommodate max sized fragment list
66 * received from the IP layer.
67 */
68 struct octeon_sg_entry *sg;
69
70 dma_addr_t sg_dma_ptr;
71};
72
50struct oct_nic_stats_resp { 73struct oct_nic_stats_resp {
51 u64 rh; 74 u64 rh;
52 struct oct_link_stats stats; 75 struct oct_link_stats stats;
@@ -58,6 +81,18 @@ struct oct_nic_stats_ctrl {
58 struct net_device *netdev; 81 struct net_device *netdev;
59}; 82};
60 83
84struct oct_nic_seapi_resp {
85 u64 rh;
86 u32 speed;
87 u64 status;
88};
89
90struct liquidio_nic_seapi_ctl_context {
91 int octeon_id;
92 u32 status;
93 struct completion complete;
94};
95
61/** LiquidIO per-interface network private data */ 96/** LiquidIO per-interface network private data */
62struct lio { 97struct lio {
63 /** State of the interface. Rx/Tx happens only in the RUNNING state. */ 98 /** State of the interface. Rx/Tx happens only in the RUNNING state. */
@@ -190,6 +225,8 @@ irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)),
190 225
191int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs); 226int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs);
192 227
228int octnet_get_link_stats(struct net_device *netdev);
229
193int lio_wait_for_clean_oq(struct octeon_device *oct); 230int lio_wait_for_clean_oq(struct octeon_device *oct);
194/** 231/**
195 * \brief Register ethtool operations 232 * \brief Register ethtool operations
@@ -197,6 +234,17 @@ int lio_wait_for_clean_oq(struct octeon_device *oct);
197 */ 234 */
198void liquidio_set_ethtool_ops(struct net_device *netdev); 235void liquidio_set_ethtool_ops(struct net_device *netdev);
199 236
237void lio_if_cfg_callback(struct octeon_device *oct,
238 u32 status __attribute__((unused)),
239 void *buf);
240
241void lio_delete_glists(struct lio *lio);
242
243int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_qs);
244
245int liquidio_get_speed(struct lio *lio);
246int liquidio_set_speed(struct lio *lio, int speed);
247
200/** 248/**
201 * \brief Net device change_mtu 249 * \brief Net device change_mtu
202 * @param netdev network device 250 * @param netdev network device
@@ -515,7 +563,7 @@ static inline void stop_txqs(struct net_device *netdev)
515{ 563{
516 int i; 564 int i;
517 565
518 for (i = 0; i < netdev->num_tx_queues; i++) 566 for (i = 0; i < netdev->real_num_tx_queues; i++)
519 netif_stop_subqueue(netdev, i); 567 netif_stop_subqueue(netdev, i);
520} 568}
521 569
@@ -528,7 +576,7 @@ static inline void wake_txqs(struct net_device *netdev)
528 struct lio *lio = GET_LIO(netdev); 576 struct lio *lio = GET_LIO(netdev);
529 int i, qno; 577 int i, qno;
530 578
531 for (i = 0; i < netdev->num_tx_queues; i++) { 579 for (i = 0; i < netdev->real_num_tx_queues; i++) {
532 qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no; 580 qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no;
533 581
534 if (__netif_subqueue_stopped(netdev, i)) { 582 if (__netif_subqueue_stopped(netdev, i)) {
@@ -549,14 +597,33 @@ static inline void start_txqs(struct net_device *netdev)
549 int i; 597 int i;
550 598
551 if (lio->linfo.link.s.link_up) { 599 if (lio->linfo.link.s.link_up) {
552 for (i = 0; i < netdev->num_tx_queues; i++) 600 for (i = 0; i < netdev->real_num_tx_queues; i++)
553 netif_start_subqueue(netdev, i); 601 netif_start_subqueue(netdev, i);
554 } 602 }
555} 603}
556 604
557static inline int skb_iq(struct lio *lio, struct sk_buff *skb) 605static inline int skb_iq(struct octeon_device *oct, struct sk_buff *skb)
558{ 606{
559 return skb->queue_mapping % lio->linfo.num_txpciq; 607 return skb->queue_mapping % oct->num_iqs;
608}
609
610/**
611 * Remove the node at the head of the list. The list would be empty at
612 * the end of this call if there are no more nodes in the list.
613 */
614static inline struct list_head *lio_list_delete_head(struct list_head *root)
615{
616 struct list_head *node;
617
618 if (root->prev == root && root->next == root)
619 node = NULL;
620 else
621 node = root->next;
622
623 if (node)
624 list_del(node);
625
626 return node;
560} 627}
561 628
562#endif 629#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index db92f1858060..aae980205ece 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -64,8 +64,7 @@ static int set_tcb_field(struct adapter *adap, struct filter_entry *f,
64 if (!skb) 64 if (!skb)
65 return -ENOMEM; 65 return -ENOMEM;
66 66
67 req = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*req)); 67 req = (struct cpl_set_tcb_field *)__skb_put_zero(skb, sizeof(*req));
68 memset(req, 0, sizeof(*req));
69 INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, ftid); 68 INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, ftid);
70 req->reply_ctrl = htons(REPLY_CHAN_V(0) | 69 req->reply_ctrl = htons(REPLY_CHAN_V(0) |
71 QUEUENO_V(adap->sge.fw_evtq.abs_id) | 70 QUEUENO_V(adap->sge.fw_evtq.abs_id) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/srq.c b/drivers/net/ethernet/chelsio/cxgb4/srq.c
index 6228a5708307..82b70a565e24 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/srq.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/srq.c
@@ -84,8 +84,7 @@ int cxgb4_get_srq_entry(struct net_device *dev,
84 if (!skb) 84 if (!skb)
85 return -ENOMEM; 85 return -ENOMEM;
86 req = (struct cpl_srq_table_req *) 86 req = (struct cpl_srq_table_req *)
87 __skb_put(skb, sizeof(*req)); 87 __skb_put_zero(skb, sizeof(*req));
88 memset(req, 0, sizeof(*req));
89 INIT_TP_WR(req, 0); 88 INIT_TP_WR(req, 0);
90 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SRQ_TABLE_REQ, 89 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SRQ_TABLE_REQ,
91 TID_TID_V(srq_idx) | 90 TID_TID_V(srq_idx) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 51b18035d691..90b5274a8ba1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -145,6 +145,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
145 CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */ 145 CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */
146 CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */ 146 CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */
147 CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */ 147 CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */
148 CH_PCI_ID_TABLE_FENTRY(0x5019), /* T540-LP-BT */
149 CH_PCI_ID_TABLE_FENTRY(0x501a), /* T540-SO-BT */
150 CH_PCI_ID_TABLE_FENTRY(0x501b), /* T540-SO-CR */
148 CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */ 151 CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
149 CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */ 152 CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
150 CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */ 153 CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index 123e2c1b65f5..4eb15ceddca3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,8 +36,8 @@
36#define __T4FW_VERSION_H__ 36#define __T4FW_VERSION_H__
37 37
38#define T4FW_VERSION_MAJOR 0x01 38#define T4FW_VERSION_MAJOR 0x01
39#define T4FW_VERSION_MINOR 0x10 39#define T4FW_VERSION_MINOR 0x13
40#define T4FW_VERSION_MICRO 0x3F 40#define T4FW_VERSION_MICRO 0x01
41#define T4FW_VERSION_BUILD 0x00 41#define T4FW_VERSION_BUILD 0x00
42 42
43#define T4FW_MIN_VERSION_MAJOR 0x01 43#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
45#define T4FW_MIN_VERSION_MICRO 0x00 45#define T4FW_MIN_VERSION_MICRO 0x00
46 46
47#define T5FW_VERSION_MAJOR 0x01 47#define T5FW_VERSION_MAJOR 0x01
48#define T5FW_VERSION_MINOR 0x10 48#define T5FW_VERSION_MINOR 0x13
49#define T5FW_VERSION_MICRO 0x3F 49#define T5FW_VERSION_MICRO 0x01
50#define T5FW_VERSION_BUILD 0x00 50#define T5FW_VERSION_BUILD 0x00
51 51
52#define T5FW_MIN_VERSION_MAJOR 0x00 52#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
54#define T5FW_MIN_VERSION_MICRO 0x00 54#define T5FW_MIN_VERSION_MICRO 0x00
55 55
56#define T6FW_VERSION_MAJOR 0x01 56#define T6FW_VERSION_MAJOR 0x01
57#define T6FW_VERSION_MINOR 0x10 57#define T6FW_VERSION_MINOR 0x13
58#define T6FW_VERSION_MICRO 0x3F 58#define T6FW_VERSION_MICRO 0x01
59#define T6FW_VERSION_BUILD 0x00 59#define T6FW_VERSION_BUILD 0x00
60 60
61#define T6FW_MIN_VERSION_MAJOR 0x00 61#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
index 4b5aacc09cab..240ba9d4c399 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
@@ -90,8 +90,7 @@ cxgb_mk_tid_release(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
90{ 90{
91 struct cpl_tid_release *req; 91 struct cpl_tid_release *req;
92 92
93 req = __skb_put(skb, len); 93 req = __skb_put_zero(skb, len);
94 memset(req, 0, len);
95 94
96 INIT_TP_WR(req, tid); 95 INIT_TP_WR(req, tid);
97 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); 96 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
@@ -104,8 +103,7 @@ cxgb_mk_close_con_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
104{ 103{
105 struct cpl_close_con_req *req; 104 struct cpl_close_con_req *req;
106 105
107 req = __skb_put(skb, len); 106 req = __skb_put_zero(skb, len);
108 memset(req, 0, len);
109 107
110 INIT_TP_WR(req, tid); 108 INIT_TP_WR(req, tid);
111 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); 109 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
@@ -119,8 +117,7 @@ cxgb_mk_abort_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
119{ 117{
120 struct cpl_abort_req *req; 118 struct cpl_abort_req *req;
121 119
122 req = __skb_put(skb, len); 120 req = __skb_put_zero(skb, len);
123 memset(req, 0, len);
124 121
125 INIT_TP_WR(req, tid); 122 INIT_TP_WR(req, tid);
126 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid)); 123 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
@@ -134,8 +131,7 @@ cxgb_mk_abort_rpl(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
134{ 131{
135 struct cpl_abort_rpl *rpl; 132 struct cpl_abort_rpl *rpl;
136 133
137 rpl = __skb_put(skb, len); 134 rpl = __skb_put_zero(skb, len);
138 memset(rpl, 0, len);
139 135
140 INIT_TP_WR(rpl, tid); 136 INIT_TP_WR(rpl, tid);
141 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid)); 137 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
@@ -149,8 +145,7 @@ cxgb_mk_rx_data_ack(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
149{ 145{
150 struct cpl_rx_data_ack *req; 146 struct cpl_rx_data_ack *req;
151 147
152 req = __skb_put(skb, len); 148 req = __skb_put_zero(skb, len);
153 memset(req, 0, len);
154 149
155 INIT_TP_WR(req, tid); 150 INIT_TP_WR(req, tid);
156 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, tid)); 151 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, tid));
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 4df282ed22c7..0beee2cc2ddd 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -61,7 +61,7 @@ static const char hw_stat_gstrings[][ETH_GSTRING_LEN] = {
61static const char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { 61static const char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
62 "tx-single-collision", 62 "tx-single-collision",
63 "tx-multiple-collision", 63 "tx-multiple-collision",
64 "tx-late-collsion", 64 "tx-late-collision",
65 "tx-aborted-frames", 65 "tx-aborted-frames",
66 "tx-lost-frames", 66 "tx-lost-frames",
67 "tx-carrier-sense-errors", 67 "tx-carrier-sense-errors",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 8c55965a66ac..c4e295094da7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -502,7 +502,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
502 502
503 /* find outer header point */ 503 /* find outer header point */
504 l3.hdr = skb_network_header(skb); 504 l3.hdr = skb_network_header(skb);
505 l4_hdr = skb_inner_transport_header(skb); 505 l4_hdr = skb_transport_header(skb);
506 506
507 if (skb->protocol == htons(ETH_P_IPV6)) { 507 if (skb->protocol == htons(ETH_P_IPV6)) {
508 exthdr = l3.hdr + sizeof(*l3.v6); 508 exthdr = l3.hdr + sizeof(*l3.v6);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index ff13d1876d9e..fab70683bbf7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -31,6 +31,17 @@ static int hclge_ring_space(struct hclge_cmq_ring *ring)
31 return ring->desc_num - used - 1; 31 return ring->desc_num - used - 1;
32} 32}
33 33
34static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int h)
35{
36 int u = ring->next_to_use;
37 int c = ring->next_to_clean;
38
39 if (unlikely(h >= ring->desc_num))
40 return 0;
41
42 return u > c ? (h > c && h <= u) : (h > c || h <= u);
43}
44
34static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring) 45static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
35{ 46{
36 int size = ring->desc_num * sizeof(struct hclge_desc); 47 int size = ring->desc_num * sizeof(struct hclge_desc);
@@ -141,6 +152,7 @@ static void hclge_cmd_init_regs(struct hclge_hw *hw)
141 152
142static int hclge_cmd_csq_clean(struct hclge_hw *hw) 153static int hclge_cmd_csq_clean(struct hclge_hw *hw)
143{ 154{
155 struct hclge_dev *hdev = (struct hclge_dev *)hw->back;
144 struct hclge_cmq_ring *csq = &hw->cmq.csq; 156 struct hclge_cmq_ring *csq = &hw->cmq.csq;
145 u16 ntc = csq->next_to_clean; 157 u16 ntc = csq->next_to_clean;
146 struct hclge_desc *desc; 158 struct hclge_desc *desc;
@@ -149,6 +161,13 @@ static int hclge_cmd_csq_clean(struct hclge_hw *hw)
149 161
150 desc = &csq->desc[ntc]; 162 desc = &csq->desc[ntc];
151 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG); 163 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
164 rmb(); /* Make sure head is ready before touch any data */
165
166 if (!is_valid_csq_clean_head(csq, head)) {
167 dev_warn(&hdev->pdev->dev, "wrong head (%d, %d-%d)\n", head,
168 csq->next_to_use, csq->next_to_clean);
169 return 0;
170 }
152 171
153 while (head != ntc) { 172 while (head != ntc) {
154 memset(desc, 0, sizeof(*desc)); 173 memset(desc, 0, sizeof(*desc));
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 2066dd734444..dd5d65c9cca6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -304,8 +304,6 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
304 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, 304 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
305 {"mac_tx_4096_8191_oct_pkt_num", 305 {"mac_tx_4096_8191_oct_pkt_num",
306 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, 306 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
307 {"mac_tx_8192_12287_oct_pkt_num",
308 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_12287_oct_pkt_num)},
309 {"mac_tx_8192_9216_oct_pkt_num", 307 {"mac_tx_8192_9216_oct_pkt_num",
310 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, 308 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
311 {"mac_tx_9217_12287_oct_pkt_num", 309 {"mac_tx_9217_12287_oct_pkt_num",
@@ -356,8 +354,6 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
356 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, 354 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
357 {"mac_rx_4096_8191_oct_pkt_num", 355 {"mac_rx_4096_8191_oct_pkt_num",
358 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, 356 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
359 {"mac_rx_8192_12287_oct_pkt_num",
360 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_12287_oct_pkt_num)},
361 {"mac_rx_8192_9216_oct_pkt_num", 357 {"mac_rx_8192_9216_oct_pkt_num",
362 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, 358 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
363 {"mac_rx_9217_12287_oct_pkt_num", 359 {"mac_rx_9217_12287_oct_pkt_num",
@@ -1459,8 +1455,11 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
1459 /* We need to alloc a vport for main NIC of PF */ 1455 /* We need to alloc a vport for main NIC of PF */
1460 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; 1456 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1461 1457
1462 if (hdev->num_tqps < num_vport) 1458 if (hdev->num_tqps < num_vport) {
1463 num_vport = hdev->num_tqps; 1459 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1460 hdev->num_tqps, num_vport);
1461 return -EINVAL;
1462 }
1464 1463
1465 /* Alloc the same number of TQPs for every vport */ 1464 /* Alloc the same number of TQPs for every vport */
1466 tqp_per_vport = hdev->num_tqps / num_vport; 1465 tqp_per_vport = hdev->num_tqps / num_vport;
@@ -4540,8 +4539,9 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
4540 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable); 4539 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
4541} 4540}
4542 4541
4543int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, 4542static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
4544 bool is_kill, u16 vlan, u8 qos, __be16 proto) 4543 bool is_kill, u16 vlan, u8 qos,
4544 __be16 proto)
4545{ 4545{
4546#define HCLGE_MAX_VF_BYTES 16 4546#define HCLGE_MAX_VF_BYTES 16
4547 struct hclge_vlan_filter_vf_cfg_cmd *req0; 4547 struct hclge_vlan_filter_vf_cfg_cmd *req0;
@@ -4599,12 +4599,9 @@ int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
4599 return -EIO; 4599 return -EIO;
4600} 4600}
4601 4601
4602static int hclge_set_port_vlan_filter(struct hnae3_handle *handle, 4602static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
4603 __be16 proto, u16 vlan_id, 4603 u16 vlan_id, bool is_kill)
4604 bool is_kill)
4605{ 4604{
4606 struct hclge_vport *vport = hclge_get_vport(handle);
4607 struct hclge_dev *hdev = vport->back;
4608 struct hclge_vlan_filter_pf_cfg_cmd *req; 4605 struct hclge_vlan_filter_pf_cfg_cmd *req;
4609 struct hclge_desc desc; 4606 struct hclge_desc desc;
4610 u8 vlan_offset_byte_val; 4607 u8 vlan_offset_byte_val;
@@ -4624,22 +4621,66 @@ static int hclge_set_port_vlan_filter(struct hnae3_handle *handle,
4624 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 4621 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
4625 4622
4626 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4623 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4624 if (ret)
4625 dev_err(&hdev->pdev->dev,
4626 "port vlan command, send fail, ret =%d.\n", ret);
4627 return ret;
4628}
4629
4630static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
4631 u16 vport_id, u16 vlan_id, u8 qos,
4632 bool is_kill)
4633{
4634 u16 vport_idx, vport_num = 0;
4635 int ret;
4636
4637 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
4638 0, proto);
4627 if (ret) { 4639 if (ret) {
4628 dev_err(&hdev->pdev->dev, 4640 dev_err(&hdev->pdev->dev,
4629 "port vlan command, send fail, ret =%d.\n", 4641 "Set %d vport vlan filter config fail, ret =%d.\n",
4630 ret); 4642 vport_id, ret);
4631 return ret; 4643 return ret;
4632 } 4644 }
4633 4645
4634 ret = hclge_set_vf_vlan_common(hdev, 0, is_kill, vlan_id, 0, proto); 4646 /* vlan 0 may be added twice when 8021q module is enabled */
4635 if (ret) { 4647 if (!is_kill && !vlan_id &&
4648 test_bit(vport_id, hdev->vlan_table[vlan_id]))
4649 return 0;
4650
4651 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
4636 dev_err(&hdev->pdev->dev, 4652 dev_err(&hdev->pdev->dev,
4637 "Set pf vlan filter config fail, ret =%d.\n", 4653 "Add port vlan failed, vport %d is already in vlan %d\n",
4638 ret); 4654 vport_id, vlan_id);
4639 return -EIO; 4655 return -EINVAL;
4640 } 4656 }
4641 4657
4642 return 0; 4658 if (is_kill &&
4659 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
4660 dev_err(&hdev->pdev->dev,
4661 "Delete port vlan failed, vport %d is not in vlan %d\n",
4662 vport_id, vlan_id);
4663 return -EINVAL;
4664 }
4665
4666 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], VLAN_N_VID)
4667 vport_num++;
4668
4669 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
4670 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
4671 is_kill);
4672
4673 return ret;
4674}
4675
4676int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
4677 u16 vlan_id, bool is_kill)
4678{
4679 struct hclge_vport *vport = hclge_get_vport(handle);
4680 struct hclge_dev *hdev = vport->back;
4681
4682 return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
4683 0, is_kill);
4643} 4684}
4644 4685
4645static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, 4686static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
@@ -4653,7 +4694,7 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
4653 if (proto != htons(ETH_P_8021Q)) 4694 if (proto != htons(ETH_P_8021Q))
4654 return -EPROTONOSUPPORT; 4695 return -EPROTONOSUPPORT;
4655 4696
4656 return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto); 4697 return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
4657} 4698}
4658 4699
4659static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) 4700static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
@@ -4818,7 +4859,7 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
4818 } 4859 }
4819 4860
4820 handle = &hdev->vport[0].nic; 4861 handle = &hdev->vport[0].nic;
4821 return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); 4862 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
4822} 4863}
4823 4864
4824static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 4865static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
@@ -5166,12 +5207,6 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
5166 struct phy_device *phydev = hdev->hw.mac.phydev; 5207 struct phy_device *phydev = hdev->hw.mac.phydev;
5167 u32 fc_autoneg; 5208 u32 fc_autoneg;
5168 5209
5169 /* Only support flow control negotiation for netdev with
5170 * phy attached for now.
5171 */
5172 if (!phydev)
5173 return -EOPNOTSUPP;
5174
5175 fc_autoneg = hclge_get_autoneg(handle); 5210 fc_autoneg = hclge_get_autoneg(handle);
5176 if (auto_neg != fc_autoneg) { 5211 if (auto_neg != fc_autoneg) {
5177 dev_info(&hdev->pdev->dev, 5212 dev_info(&hdev->pdev->dev,
@@ -5190,6 +5225,12 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
5190 if (!fc_autoneg) 5225 if (!fc_autoneg)
5191 return hclge_cfg_pauseparam(hdev, rx_en, tx_en); 5226 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
5192 5227
5228 /* Only support flow control negotiation for netdev with
5229 * phy attached for now.
5230 */
5231 if (!phydev)
5232 return -EOPNOTSUPP;
5233
5193 return phy_start_aneg(phydev); 5234 return phy_start_aneg(phydev);
5194} 5235}
5195 5236
@@ -5427,7 +5468,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
5427 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 5468 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
5428 if (!hdev) { 5469 if (!hdev) {
5429 ret = -ENOMEM; 5470 ret = -ENOMEM;
5430 goto err_hclge_dev; 5471 goto out;
5431 } 5472 }
5432 5473
5433 hdev->pdev = pdev; 5474 hdev->pdev = pdev;
@@ -5440,38 +5481,38 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
5440 ret = hclge_pci_init(hdev); 5481 ret = hclge_pci_init(hdev);
5441 if (ret) { 5482 if (ret) {
5442 dev_err(&pdev->dev, "PCI init failed\n"); 5483 dev_err(&pdev->dev, "PCI init failed\n");
5443 goto err_pci_init; 5484 goto out;
5444 } 5485 }
5445 5486
5446 /* Firmware command queue initialize */ 5487 /* Firmware command queue initialize */
5447 ret = hclge_cmd_queue_init(hdev); 5488 ret = hclge_cmd_queue_init(hdev);
5448 if (ret) { 5489 if (ret) {
5449 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret); 5490 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
5450 return ret; 5491 goto err_pci_uninit;
5451 } 5492 }
5452 5493
5453 /* Firmware command initialize */ 5494 /* Firmware command initialize */
5454 ret = hclge_cmd_init(hdev); 5495 ret = hclge_cmd_init(hdev);
5455 if (ret) 5496 if (ret)
5456 goto err_cmd_init; 5497 goto err_cmd_uninit;
5457 5498
5458 ret = hclge_get_cap(hdev); 5499 ret = hclge_get_cap(hdev);
5459 if (ret) { 5500 if (ret) {
5460 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", 5501 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
5461 ret); 5502 ret);
5462 return ret; 5503 goto err_cmd_uninit;
5463 } 5504 }
5464 5505
5465 ret = hclge_configure(hdev); 5506 ret = hclge_configure(hdev);
5466 if (ret) { 5507 if (ret) {
5467 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); 5508 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
5468 return ret; 5509 goto err_cmd_uninit;
5469 } 5510 }
5470 5511
5471 ret = hclge_init_msi(hdev); 5512 ret = hclge_init_msi(hdev);
5472 if (ret) { 5513 if (ret) {
5473 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); 5514 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
5474 return ret; 5515 goto err_cmd_uninit;
5475 } 5516 }
5476 5517
5477 ret = hclge_misc_irq_init(hdev); 5518 ret = hclge_misc_irq_init(hdev);
@@ -5479,69 +5520,71 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
5479 dev_err(&pdev->dev, 5520 dev_err(&pdev->dev,
5480 "Misc IRQ(vector0) init error, ret = %d.\n", 5521 "Misc IRQ(vector0) init error, ret = %d.\n",
5481 ret); 5522 ret);
5482 return ret; 5523 goto err_msi_uninit;
5483 } 5524 }
5484 5525
5485 ret = hclge_alloc_tqps(hdev); 5526 ret = hclge_alloc_tqps(hdev);
5486 if (ret) { 5527 if (ret) {
5487 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); 5528 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
5488 return ret; 5529 goto err_msi_irq_uninit;
5489 } 5530 }
5490 5531
5491 ret = hclge_alloc_vport(hdev); 5532 ret = hclge_alloc_vport(hdev);
5492 if (ret) { 5533 if (ret) {
5493 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret); 5534 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
5494 return ret; 5535 goto err_msi_irq_uninit;
5495 } 5536 }
5496 5537
5497 ret = hclge_map_tqp(hdev); 5538 ret = hclge_map_tqp(hdev);
5498 if (ret) { 5539 if (ret) {
5499 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); 5540 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
5500 return ret; 5541 goto err_sriov_disable;
5501 } 5542 }
5502 5543
5503 ret = hclge_mac_mdio_config(hdev); 5544 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
5504 if (ret) { 5545 ret = hclge_mac_mdio_config(hdev);
5505 dev_warn(&hdev->pdev->dev, 5546 if (ret) {
5506 "mdio config fail ret=%d\n", ret); 5547 dev_err(&hdev->pdev->dev,
5507 return ret; 5548 "mdio config fail ret=%d\n", ret);
5549 goto err_sriov_disable;
5550 }
5508 } 5551 }
5509 5552
5510 ret = hclge_mac_init(hdev); 5553 ret = hclge_mac_init(hdev);
5511 if (ret) { 5554 if (ret) {
5512 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 5555 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
5513 return ret; 5556 goto err_mdiobus_unreg;
5514 } 5557 }
5515 5558
5516 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 5559 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
5517 if (ret) { 5560 if (ret) {
5518 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 5561 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
5519 return ret; 5562 goto err_mdiobus_unreg;
5520 } 5563 }
5521 5564
5522 ret = hclge_init_vlan_config(hdev); 5565 ret = hclge_init_vlan_config(hdev);
5523 if (ret) { 5566 if (ret) {
5524 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 5567 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
5525 return ret; 5568 goto err_mdiobus_unreg;
5526 } 5569 }
5527 5570
5528 ret = hclge_tm_schd_init(hdev); 5571 ret = hclge_tm_schd_init(hdev);
5529 if (ret) { 5572 if (ret) {
5530 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); 5573 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
5531 return ret; 5574 goto err_mdiobus_unreg;
5532 } 5575 }
5533 5576
5534 hclge_rss_init_cfg(hdev); 5577 hclge_rss_init_cfg(hdev);
5535 ret = hclge_rss_init_hw(hdev); 5578 ret = hclge_rss_init_hw(hdev);
5536 if (ret) { 5579 if (ret) {
5537 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 5580 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
5538 return ret; 5581 goto err_mdiobus_unreg;
5539 } 5582 }
5540 5583
5541 ret = init_mgr_tbl(hdev); 5584 ret = init_mgr_tbl(hdev);
5542 if (ret) { 5585 if (ret) {
5543 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); 5586 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
5544 return ret; 5587 goto err_mdiobus_unreg;
5545 } 5588 }
5546 5589
5547 hclge_dcb_ops_set(hdev); 5590 hclge_dcb_ops_set(hdev);
@@ -5564,11 +5607,24 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
5564 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); 5607 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
5565 return 0; 5608 return 0;
5566 5609
5567err_cmd_init: 5610err_mdiobus_unreg:
5611 if (hdev->hw.mac.phydev)
5612 mdiobus_unregister(hdev->hw.mac.mdio_bus);
5613err_sriov_disable:
5614 if (IS_ENABLED(CONFIG_PCI_IOV))
5615 hclge_disable_sriov(hdev);
5616err_msi_irq_uninit:
5617 hclge_misc_irq_uninit(hdev);
5618err_msi_uninit:
5619 pci_free_irq_vectors(pdev);
5620err_cmd_uninit:
5621 hclge_destroy_cmd_queue(&hdev->hw);
5622err_pci_uninit:
5623 pci_clear_master(pdev);
5568 pci_release_regions(pdev); 5624 pci_release_regions(pdev);
5569err_pci_init: 5625 pci_disable_device(pdev);
5570 pci_set_drvdata(pdev, NULL); 5626 pci_set_drvdata(pdev, NULL);
5571err_hclge_dev: 5627out:
5572 return ret; 5628 return ret;
5573} 5629}
5574 5630
@@ -5586,6 +5642,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
5586 set_bit(HCLGE_STATE_DOWN, &hdev->state); 5642 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5587 5643
5588 hclge_stats_clear(hdev); 5644 hclge_stats_clear(hdev);
5645 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
5589 5646
5590 ret = hclge_cmd_init(hdev); 5647 ret = hclge_cmd_init(hdev);
5591 if (ret) { 5648 if (ret) {
@@ -6203,7 +6260,7 @@ static const struct hnae3_ae_ops hclge_ops = {
6203 .get_fw_version = hclge_get_fw_version, 6260 .get_fw_version = hclge_get_fw_version,
6204 .get_mdix_mode = hclge_get_mdix_mode, 6261 .get_mdix_mode = hclge_get_mdix_mode,
6205 .enable_vlan_filter = hclge_enable_vlan_filter, 6262 .enable_vlan_filter = hclge_enable_vlan_filter,
6206 .set_vlan_filter = hclge_set_port_vlan_filter, 6263 .set_vlan_filter = hclge_set_vlan_filter,
6207 .set_vf_vlan_filter = hclge_set_vf_vlan_filter, 6264 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
6208 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, 6265 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
6209 .reset_event = hclge_reset_event, 6266 .reset_event = hclge_reset_event,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 0f4157e71282..b7ee91daea0c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -12,6 +12,8 @@
12#include <linux/fs.h> 12#include <linux/fs.h>
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/phy.h> 14#include <linux/phy.h>
15#include <linux/if_vlan.h>
16
15#include "hclge_cmd.h" 17#include "hclge_cmd.h"
16#include "hnae3.h" 18#include "hnae3.h"
17 19
@@ -406,9 +408,9 @@ struct hclge_mac_stats {
406 u64 mac_tx_1519_2047_oct_pkt_num; 408 u64 mac_tx_1519_2047_oct_pkt_num;
407 u64 mac_tx_2048_4095_oct_pkt_num; 409 u64 mac_tx_2048_4095_oct_pkt_num;
408 u64 mac_tx_4096_8191_oct_pkt_num; 410 u64 mac_tx_4096_8191_oct_pkt_num;
409 u64 mac_tx_8192_12287_oct_pkt_num; /* valid for GE MAC only */ 411 u64 rsv0;
410 u64 mac_tx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */ 412 u64 mac_tx_8192_9216_oct_pkt_num;
411 u64 mac_tx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC */ 413 u64 mac_tx_9217_12287_oct_pkt_num;
412 u64 mac_tx_12288_16383_oct_pkt_num; 414 u64 mac_tx_12288_16383_oct_pkt_num;
413 u64 mac_tx_1519_max_good_oct_pkt_num; 415 u64 mac_tx_1519_max_good_oct_pkt_num;
414 u64 mac_tx_1519_max_bad_oct_pkt_num; 416 u64 mac_tx_1519_max_bad_oct_pkt_num;
@@ -433,9 +435,9 @@ struct hclge_mac_stats {
433 u64 mac_rx_1519_2047_oct_pkt_num; 435 u64 mac_rx_1519_2047_oct_pkt_num;
434 u64 mac_rx_2048_4095_oct_pkt_num; 436 u64 mac_rx_2048_4095_oct_pkt_num;
435 u64 mac_rx_4096_8191_oct_pkt_num; 437 u64 mac_rx_4096_8191_oct_pkt_num;
436 u64 mac_rx_8192_12287_oct_pkt_num;/* valid for GE MAC only */ 438 u64 rsv1;
437 u64 mac_rx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */ 439 u64 mac_rx_8192_9216_oct_pkt_num;
438 u64 mac_rx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC only */ 440 u64 mac_rx_9217_12287_oct_pkt_num;
439 u64 mac_rx_12288_16383_oct_pkt_num; 441 u64 mac_rx_12288_16383_oct_pkt_num;
440 u64 mac_rx_1519_max_good_oct_pkt_num; 442 u64 mac_rx_1519_max_good_oct_pkt_num;
441 u64 mac_rx_1519_max_bad_oct_pkt_num; 443 u64 mac_rx_1519_max_bad_oct_pkt_num;
@@ -471,6 +473,7 @@ struct hclge_vlan_type_cfg {
471 u16 tx_in_vlan_type; 473 u16 tx_in_vlan_type;
472}; 474};
473 475
476#define HCLGE_VPORT_NUM 256
474struct hclge_dev { 477struct hclge_dev {
475 struct pci_dev *pdev; 478 struct pci_dev *pdev;
476 struct hnae3_ae_dev *ae_dev; 479 struct hnae3_ae_dev *ae_dev;
@@ -562,6 +565,7 @@ struct hclge_dev {
562 565
563 u64 rx_pkts_for_led; 566 u64 rx_pkts_for_led;
564 u64 tx_pkts_for_led; 567 u64 tx_pkts_for_led;
568 unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
565}; 569};
566 570
567/* VPort level vlan tag configuration for TX direction */ 571/* VPort level vlan tag configuration for TX direction */
@@ -646,8 +650,8 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue)
646} 650}
647 651
648int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex); 652int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
649int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid, 653int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
650 bool is_kill, u16 vlan, u8 qos, __be16 proto); 654 u16 vlan_id, bool is_kill);
651 655
652int hclge_buffer_alloc(struct hclge_dev *hdev); 656int hclge_buffer_alloc(struct hclge_dev *hdev);
653int hclge_rss_init_hw(struct hclge_dev *hdev); 657int hclge_rss_init_hw(struct hclge_dev *hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index a6f7ffa9c259..7563335b0c7f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -264,19 +264,18 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
264 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 264 struct hclge_mbx_vf_to_pf_cmd *mbx_req,
265 bool gen_resp) 265 bool gen_resp)
266{ 266{
267 struct hclge_dev *hdev = vport->back;
268 int status = 0; 267 int status = 0;
269 268
270 if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) { 269 if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) {
270 struct hnae3_handle *handle = &vport->nic;
271 u16 vlan, proto; 271 u16 vlan, proto;
272 bool is_kill; 272 bool is_kill;
273 273
274 is_kill = !!mbx_req->msg[2]; 274 is_kill = !!mbx_req->msg[2];
275 memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan)); 275 memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan));
276 memcpy(&proto, &mbx_req->msg[5], sizeof(proto)); 276 memcpy(&proto, &mbx_req->msg[5], sizeof(proto));
277 status = hclge_set_vf_vlan_common(hdev, vport->vport_id, 277 status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
278 is_kill, vlan, 0, 278 vlan, is_kill);
279 cpu_to_be16(proto));
280 } 279 }
281 280
282 if (gen_resp) 281 if (gen_resp)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 682c2d6618e7..9f7932e423b5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -140,8 +140,11 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev)
140 struct mii_bus *mdio_bus; 140 struct mii_bus *mdio_bus;
141 int ret; 141 int ret;
142 142
143 if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) 143 if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) {
144 return 0; 144 dev_err(&hdev->pdev->dev, "phy_addr(%d) is too large.\n",
145 hdev->hw.mac.phy_addr);
146 return -EINVAL;
147 }
145 148
146 mdio_bus = devm_mdiobus_alloc(&hdev->pdev->dev); 149 mdio_bus = devm_mdiobus_alloc(&hdev->pdev->dev);
147 if (!mdio_bus) 150 if (!mdio_bus)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 885f25cd7be4..c69ecab460f9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -134,11 +134,8 @@ static int hclge_pfc_stats_get(struct hclge_dev *hdev,
134 } 134 }
135 135
136 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM); 136 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
137 if (ret) { 137 if (ret)
138 dev_err(&hdev->pdev->dev,
139 "Get pfc pause stats fail, ret = %d.\n", ret);
140 return ret; 138 return ret;
141 }
142 139
143 for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) { 140 for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
144 struct hclge_pfc_stats_cmd *pfc_stats = 141 struct hclge_pfc_stats_cmd *pfc_stats =
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 41ad56edfb96..27d5f27163d2 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1,31 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 1999 - 2006 Intel Corporation. */
3
4 Intel PRO/100 Linux driver
5 Copyright(c) 1999 - 2006 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29 3
30/* 4/*
31 * e100.c: Intel(R) PRO/100 ethernet driver 5 * e100.c: Intel(R) PRO/100 ethernet driver
diff --git a/drivers/net/ethernet/intel/e1000/Makefile b/drivers/net/ethernet/intel/e1000/Makefile
index c7caadd3c8af..314c52d44b7c 100644
--- a/drivers/net/ethernet/intel/e1000/Makefile
+++ b/drivers/net/ethernet/intel/e1000/Makefile
@@ -1,31 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2################################################################################
3#
4# Intel PRO/1000 Linux driver
5# Copyright(c) 1999 - 2006 Intel Corporation. 2# Copyright(c) 1999 - 2006 Intel Corporation.
6#
7# This program is free software; you can redistribute it and/or modify it
8# under the terms and conditions of the GNU General Public License,
9# version 2, as published by the Free Software Foundation.
10#
11# This program is distributed in the hope it will be useful, but WITHOUT
12# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14# more details.
15#
16# You should have received a copy of the GNU General Public License along with
17# this program; if not, write to the Free Software Foundation, Inc.,
18# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19#
20# The full GNU General Public License is included in this distribution in
21# the file called "COPYING".
22#
23# Contact Information:
24# Linux NICS <linux.nics@intel.com>
25# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27#
28################################################################################
29 3
30# 4#
31# Makefile for the Intel(R) PRO/1000 ethernet driver 5# Makefile for the Intel(R) PRO/1000 ethernet driver
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 3a0feea2df54..c40729b2c184 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -1,32 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2006 Intel Corporation. */
3
4 Intel PRO/1000 Linux driver
5 Copyright(c) 1999 - 2006 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29
30 3
31/* Linux PRO/1000 Ethernet Driver main header file */ 4/* Linux PRO/1000 Ethernet Driver main header file */
32 5
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 3e80ca170dd7..5d365a986bb0 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1,26 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 1999 - 2006 Intel Corporation. */
3 * Intel PRO/1000 Linux driver
4 * Copyright(c) 1999 - 2006 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
17 *
18 * Contact Information:
19 * Linux NICS <linux.nics@intel.com>
20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 *
23 ******************************************************************************/
24 3
25/* ethtool support for e1000 */ 4/* ethtool support for e1000 */
26 5
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 6e7e923d57bf..48428d6a00be 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -1,31 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 1999 - 2006 Intel Corporation. */
3*
4 Intel PRO/1000 Linux driver
5 Copyright(c) 1999 - 2006 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28 */
29 3
30/* e1000_hw.c 4/* e1000_hw.c
31 * Shared functions for accessing and configuring the MAC 5 * Shared functions for accessing and configuring the MAC
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.h b/drivers/net/ethernet/intel/e1000/e1000_hw.h
index f09c569ec19b..b57a04954ccf 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.h
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.h
@@ -1,31 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2006 Intel Corporation. */
3
4 Intel PRO/1000 Linux driver
5 Copyright(c) 1999 - 2006 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29 3
30/* e1000_hw.h 4/* e1000_hw.h
31 * Structures, enums, and macros for the MAC 5 * Structures, enums, and macros for the MAC
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index d5eb19b86a0a..2110d5f2da19 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1,31 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 1999 - 2006 Intel Corporation. */
3
4 Intel PRO/1000 Linux driver
5 Copyright(c) 1999 - 2006 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29 3
30#include "e1000.h" 4#include "e1000.h"
31#include <net/ip6_checksum.h> 5#include <net/ip6_checksum.h>
diff --git a/drivers/net/ethernet/intel/e1000/e1000_osdep.h b/drivers/net/ethernet/intel/e1000/e1000_osdep.h
index ae0559b8b011..e966bb290797 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_osdep.h
+++ b/drivers/net/ethernet/intel/e1000/e1000_osdep.h
@@ -1,32 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2006 Intel Corporation. */
3
4 Intel PRO/1000 Linux driver
5 Copyright(c) 1999 - 2006 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29
30 3
31/* glue for the OS independent part of e1000 4/* glue for the OS independent part of e1000
32 * includes register access macros 5 * includes register access macros
diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c
index 345f23927bcc..d3f29ffe1e47 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_param.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_param.c
@@ -1,31 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 1999 - 2006 Intel Corporation. */
3
4 Intel PRO/1000 Linux driver
5 Copyright(c) 1999 - 2006 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29 3
30#include "e1000.h" 4#include "e1000.h"
31 5
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 953e99df420c..257bd59bc9c6 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1,24 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel PRO/1000 Linux driver 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 * Copyright(c) 1999 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * Linux NICS <linux.nics@intel.com>
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 */
22 3
23/* 80003ES2LAN Gigabit Ethernet Controller (Copper) 4/* 80003ES2LAN Gigabit Ethernet Controller (Copper)
24 * 80003ES2LAN Gigabit Ethernet Controller (Serdes) 5 * 80003ES2LAN Gigabit Ethernet Controller (Serdes)
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
index ee6d1256fda4..aa9d639c6cbb 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.h
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
@@ -1,24 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel PRO/1000 Linux driver 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 * Copyright(c) 1999 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * Linux NICS <linux.nics@intel.com>
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 */
22 3
23#ifndef _E1000E_80003ES2LAN_H_ 4#ifndef _E1000E_80003ES2LAN_H_
24#define _E1000E_80003ES2LAN_H_ 5#define _E1000E_80003ES2LAN_H_
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 924f2c8dfa6c..b9309302c29e 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1,24 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel PRO/1000 Linux driver 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 * Copyright(c) 1999 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * Linux NICS <linux.nics@intel.com>
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 */
22 3
23/* 82571EB Gigabit Ethernet Controller 4/* 82571EB Gigabit Ethernet Controller
24 * 82571EB Gigabit Ethernet Controller (Copper) 5 * 82571EB Gigabit Ethernet Controller (Copper)
diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h
index 9a24c645f726..834c238d02db 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.h
+++ b/drivers/net/ethernet/intel/e1000e/82571.h
@@ -1,24 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel PRO/1000 Linux driver 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 * Copyright(c) 1999 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * Linux NICS <linux.nics@intel.com>
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 */
22 3
23#ifndef _E1000E_82571_H_ 4#ifndef _E1000E_82571_H_
24#define _E1000E_82571_H_ 5#define _E1000E_82571_H_
diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile
index 24e391a4ac68..44e58b6e7660 100644
--- a/drivers/net/ethernet/intel/e1000e/Makefile
+++ b/drivers/net/ethernet/intel/e1000e/Makefile
@@ -1,30 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2################################################################################ 2# Copyright(c) 1999 - 2018 Intel Corporation.
3#
4# Intel PRO/1000 Linux driver
5# Copyright(c) 1999 - 2014 Intel Corporation.
6#
7# This program is free software; you can redistribute it and/or modify it
8# under the terms and conditions of the GNU General Public License,
9# version 2, as published by the Free Software Foundation.
10#
11# This program is distributed in the hope it will be useful, but WITHOUT
12# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14# more details.
15#
16# You should have received a copy of the GNU General Public License
17# along with this program; if not, see <http://www.gnu.org/licenses/>.
18#
19# The full GNU General Public License is included in this distribution in
20# the file called "COPYING".
21#
22# Contact Information:
23# Linux NICS <linux.nics@intel.com>
24# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26#
27################################################################################
28 3
29# 4#
30# Makefile for the Intel(R) PRO/1000 ethernet driver 5# Makefile for the Intel(R) PRO/1000 ethernet driver
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 22883015a695..fd550dee4982 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -1,24 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel PRO/1000 Linux driver 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 * Copyright(c) 1999 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * Linux NICS <linux.nics@intel.com>
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 */
22 3
23#ifndef _E1000_DEFINES_H_ 4#ifndef _E1000_DEFINES_H_
24#define _E1000_DEFINES_H_ 5#define _E1000_DEFINES_H_
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index da88555ba1fd..c760dc72c520 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -1,24 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel PRO/1000 Linux driver 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 * Copyright(c) 1999 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * Linux NICS <linux.nics@intel.com>
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 */
22 3
23/* Linux PRO/1000 Ethernet Driver main header file */ 4/* Linux PRO/1000 Ethernet Driver main header file */
24 5
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 64dc0c11147f..e084cb734eb1 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1,24 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel PRO/1000 Linux driver 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 * Copyright(c) 1999 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * Linux NICS <linux.nics@intel.com>
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 */
22 3
23/* ethtool support for e1000 */ 4/* ethtool support for e1000 */
24 5
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 21802396bed6..eff75bd8a8f0 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -1,24 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel PRO/1000 Linux driver 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 * Copyright(c) 1999 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * Linux NICS <linux.nics@intel.com>
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 */
22 3
23#ifndef _E1000_HW_H_ 4#ifndef _E1000_HW_H_
24#define _E1000_HW_H_ 5#define _E1000_HW_H_
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 1551d6ce5341..cdae0efde8e6 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1,24 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel PRO/1000 Linux driver 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 * Copyright(c) 1999 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * Linux NICS <linux.nics@intel.com>
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 */
22 3
23/* 82562G 10/100 Network Connection 4/* 82562G 10/100 Network Connection
24 * 82562G-2 10/100 Network Connection 5 * 82562G-2 10/100 Network Connection
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 3c4f82c21084..eb09c755fa17 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -1,24 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel PRO/1000 Linux driver 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 * Copyright(c) 1999 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * Linux NICS <linux.nics@intel.com>
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 */
22 3
23#ifndef _E1000E_ICH8LAN_H_ 4#ifndef _E1000E_ICH8LAN_H_
24#define _E1000E_ICH8LAN_H_ 5#define _E1000E_ICH8LAN_H_
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index b293464a9f27..4abd55d646c5 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -1,24 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel PRO/1000 Linux driver 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 * Copyright(c) 1999 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * Linux NICS <linux.nics@intel.com>
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 */
22 3
23#include "e1000.h" 4#include "e1000.h"
24 5
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
index cb0abf6c76a5..6ab261119801 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.h
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -1,24 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel PRO/1000 Linux driver 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 * Copyright(c) 1999 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * Linux NICS <linux.nics@intel.com>
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 */
22 3
23#ifndef _E1000E_MAC_H_ 4#ifndef _E1000E_MAC_H_
24#define _E1000E_MAC_H_ 5#define _E1000E_MAC_H_
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index e027660aeb92..c4c9b20bc51f 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -1,24 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel PRO/1000 Linux driver 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 * Copyright(c) 1999 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * Linux NICS <linux.nics@intel.com>
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 */
22 3
23#include "e1000.h" 4#include "e1000.h"
24 5
diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h
index 3268f2e58593..d868aad806d4 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.h
+++ b/drivers/net/ethernet/intel/e1000e/manage.h
@@ -1,24 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel PRO/1000 Linux driver 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 * Copyright(c) 1999 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * Linux NICS <linux.nics@intel.com>
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 */
22 3
23#ifndef _E1000E_MANAGE_H_ 4#ifndef _E1000E_MANAGE_H_
24#define _E1000E_MANAGE_H_ 5#define _E1000E_MANAGE_H_
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index ec4a9759a6f2..d3fef7fefea8 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1,24 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel PRO/1000 Linux driver 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 * Copyright(c) 1999 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * Linux NICS <linux.nics@intel.com>
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 */
22 3
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 5
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index 68949bb41b7b..937f9af22d26 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -1,24 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel PRO/1000 Linux driver 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 * Copyright(c) 1999 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * Linux NICS <linux.nics@intel.com>
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 */
22 3
23#include "e1000.h" 4#include "e1000.h"
24 5
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h
index 8e082028be7d..6a30dfea4117 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.h
+++ b/drivers/net/ethernet/intel/e1000e/nvm.h
@@ -1,24 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel PRO/1000 Linux driver 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 * Copyright(c) 1999 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * Linux NICS <linux.nics@intel.com>
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 */
22 3
23#ifndef _E1000E_NVM_H_ 4#ifndef _E1000E_NVM_H_
24#define _E1000E_NVM_H_ 5#define _E1000E_NVM_H_
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 2def33eba9e6..098369fd3e65 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -1,24 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel PRO/1000 Linux driver 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 * Copyright(c) 1999 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * Linux NICS <linux.nics@intel.com>
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 */
22 3
23#include <linux/netdevice.h> 4#include <linux/netdevice.h>
24#include <linux/module.h> 5#include <linux/module.h>
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index b8226ed0e338..42233019255a 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1,24 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel PRO/1000 Linux driver 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 * Copyright(c) 1999 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * Linux NICS <linux.nics@intel.com>
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 */
22 3
23#include "e1000.h" 4#include "e1000.h"
24 5
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index d4180b5e9196..c48777d09523 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -1,24 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel PRO/1000 Linux driver 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 * Copyright(c) 1999 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * Linux NICS <linux.nics@intel.com>
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 */
22 3
23#ifndef _E1000E_PHY_H_ 4#ifndef _E1000E_PHY_H_
24#define _E1000E_PHY_H_ 5#define _E1000E_PHY_H_
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index f941e5085f44..37c76945ad9b 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -1,24 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel PRO/1000 Linux driver 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 * Copyright(c) 1999 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * Linux NICS <linux.nics@intel.com>
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 */
22 3
23/* PTP 1588 Hardware Clock (PHC) 4/* PTP 1588 Hardware Clock (PHC)
24 * Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb) 5 * Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb)
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index 16afc3c2a986..47f5ca793970 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -1,24 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel PRO/1000 Linux driver 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 * Copyright(c) 1999 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * Linux NICS <linux.nics@intel.com>
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 */
22 3
23#ifndef _E1000E_REGS_H_ 4#ifndef _E1000E_REGS_H_
24#define _E1000E_REGS_H_ 5#define _E1000E_REGS_H_
diff --git a/drivers/net/ethernet/intel/fm10k/Makefile b/drivers/net/ethernet/intel/fm10k/Makefile
index 93277cb99cb7..26a9746ccb14 100644
--- a/drivers/net/ethernet/intel/fm10k/Makefile
+++ b/drivers/net/ethernet/intel/fm10k/Makefile
@@ -1,26 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2################################################################################ 2# Copyright(c) 2013 - 2018 Intel Corporation.
3#
4# Intel(R) Ethernet Switch Host Interface Driver
5# Copyright(c) 2013 - 2016 Intel Corporation.
6#
7# This program is free software; you can redistribute it and/or modify it
8# under the terms and conditions of the GNU General Public License,
9# version 2, as published by the Free Software Foundation.
10#
11# This program is distributed in the hope it will be useful, but WITHOUT
12# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14# more details.
15#
16# The full GNU General Public License is included in this distribution in
17# the file called "COPYING".
18#
19# Contact Information:
20# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22#
23################################################################################
24 3
25# 4#
26# Makefile for the Intel(R) Ethernet Switch Host Interface Driver 5# Makefile for the Intel(R) Ethernet Switch Host Interface Driver
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index a9cdf763c59d..a903a0ba45e1 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -1,23 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel(R) Ethernet Switch Host Interface Driver 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 * Copyright(c) 2013 - 2017 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 */
21 3
22#ifndef _FM10K_H_ 4#ifndef _FM10K_H_
23#define _FM10K_H_ 5#define _FM10K_H_
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
index e303d88720ef..f51a63fca513 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
@@ -1,23 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel(R) Ethernet Switch Host Interface Driver 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 * Copyright(c) 2013 - 2018 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 */
21 3
22#include "fm10k_common.h" 4#include "fm10k_common.h"
23 5
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.h b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
index 2bdb24d2ca9d..4c48fb73b3e7 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
@@ -1,23 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel(R) Ethernet Switch Host Interface Driver 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 * Copyright(c) 2013 - 2016 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 */
21 3
22#ifndef _FM10K_COMMON_H_ 4#ifndef _FM10K_COMMON_H_
23#define _FM10K_COMMON_H_ 5#define _FM10K_COMMON_H_
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
index c4f733452ef2..20768ac7f17e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
@@ -1,23 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel(R) Ethernet Switch Host Interface Driver 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 * Copyright(c) 2013 - 2016 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 */
21 3
22#include "fm10k.h" 4#include "fm10k.h"
23 5
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
index 43e8d839831f..dca104121c05 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
@@ -1,23 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel(R) Ethernet Switch Host Interface Driver 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 * Copyright(c) 2013 - 2016 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 */
21 3
22#include "fm10k.h" 4#include "fm10k.h"
23 5
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 28b6b4e56487..eeac2b75a195 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -1,23 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel(R) Ethernet Switch Host Interface Driver 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 * Copyright(c) 2013 - 2017 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 */
21 3
22#include <linux/vmalloc.h> 4#include <linux/vmalloc.h>
23 5
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index 30395f5e5e87..e707d717012f 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -1,23 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel(R) Ethernet Switch Host Interface Driver 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 * Copyright(c) 2013 - 2017 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 */
21 3
22#include "fm10k.h" 4#include "fm10k.h"
23#include "fm10k_vf.h" 5#include "fm10k_vf.h"
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index c51d61f5f715..3f536541f45f 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1,23 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel(R) Ethernet Switch Host Interface Driver 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 * Copyright(c) 2013 - 2017 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 */
21 3
22#include <linux/types.h> 4#include <linux/types.h>
23#include <linux/module.h> 5#include <linux/module.h>
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index c01bf30a0c9e..21021fe4f1c3 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -1,23 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel(R) Ethernet Switch Host Interface Driver 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 * Copyright(c) 2013 - 2017 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 */
21 3
22#include "fm10k_common.h" 4#include "fm10k_common.h"
23 5
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
index 007e1dfa9b7a..56d1abff04e2 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
@@ -1,23 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel(R) Ethernet Switch Host Interface Driver 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 * Copyright(c) 2013 - 2016 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 */
21 3
22#ifndef _FM10K_MBX_H_ 4#ifndef _FM10K_MBX_H_
23#define _FM10K_MBX_H_ 5#define _FM10K_MBX_H_
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 26e749766337..c879af72bbf5 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1,23 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel(R) Ethernet Switch Host Interface Driver 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 * Copyright(c) 2013 - 2018 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 */
21 3
22#include "fm10k.h" 4#include "fm10k.h"
23#include <linux/vmalloc.h> 5#include <linux/vmalloc.h>
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index c4a2b688b38b..15071e4adb98 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1,23 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel(R) Ethernet Switch Host Interface Driver 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 * Copyright(c) 2013 - 2018 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 */
21 3
22#include <linux/module.h> 4#include <linux/module.h>
23#include <linux/interrupt.h> 5#include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 7ba54c534f8c..8f0a99b6a537 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1,23 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel(R) Ethernet Switch Host Interface Driver 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 * Copyright(c) 2013 - 2018 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 */
21 3
22#include "fm10k_pf.h" 4#include "fm10k_pf.h"
23#include "fm10k_vf.h" 5#include "fm10k_vf.h"
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
index ae81f9a16602..8e814df709d2 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
@@ -1,23 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel(R) Ethernet Switch Host Interface Driver 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 * Copyright(c) 2013 - 2017 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 */
21 3
22#ifndef _FM10K_PF_H_ 4#ifndef _FM10K_PF_H_
23#define _FM10K_PF_H_ 5#define _FM10K_PF_H_
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
index 725ecb7abccd..2a7a40bf2b1c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
@@ -1,23 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel(R) Ethernet Switch Host Interface Driver 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 * Copyright(c) 2013 - 2018 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 */
21 3
22#include "fm10k_tlv.h" 4#include "fm10k_tlv.h"
23 5
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
index 5d2ee759507e..160bc5b78f99 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
@@ -1,23 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel(R) Ethernet Switch Host Interface Driver 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 * Copyright(c) 2013 - 2016 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 */
21 3
22#ifndef _FM10K_TLV_H_ 4#ifndef _FM10K_TLV_H_
23#define _FM10K_TLV_H_ 5#define _FM10K_TLV_H_
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index dd23af11e2c1..3e608e493f9d 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -1,23 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel(R) Ethernet Switch Host Interface Driver 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 * Copyright(c) 2013 - 2016 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 */
21 3
22#ifndef _FM10K_TYPE_H_ 4#ifndef _FM10K_TYPE_H_
23#define _FM10K_TYPE_H_ 5#define _FM10K_TYPE_H_
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index f06913630b39..a8519c1f0406 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -1,23 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel(R) Ethernet Switch Host Interface Driver 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 * Copyright(c) 2013 - 2016 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 */
21 3
22#include "fm10k_vf.h" 4#include "fm10k_vf.h"
23 5
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
index 66a66b73a2f1..787d0d570a28 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
@@ -1,23 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel(R) Ethernet Switch Host Interface Driver 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 * Copyright(c) 2013 - 2016 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 * Contact Information:
18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 */
21 3
22#ifndef _FM10K_VF_H_ 4#ifndef _FM10K_VF_H_
23#define _FM10K_VF_H_ 5#define _FM10K_VF_H_
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index 75437768a07c..14397e7e9925 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -1,29 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2################################################################################ 2# Copyright(c) 2013 - 2018 Intel Corporation.
3#
4# Intel Ethernet Controller XL710 Family Linux Driver
5# Copyright(c) 2013 - 2015 Intel Corporation.
6#
7# This program is free software; you can redistribute it and/or modify it
8# under the terms and conditions of the GNU General Public License,
9# version 2, as published by the Free Software Foundation.
10#
11# This program is distributed in the hope it will be useful, but WITHOUT
12# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14# more details.
15#
16# You should have received a copy of the GNU General Public License along
17# with this program. If not, see <http://www.gnu.org/licenses/>.
18#
19# The full GNU General Public License is included in this distribution in
20# the file called "COPYING".
21#
22# Contact Information:
23# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25#
26################################################################################
27 3
28# 4#
29# Makefile for the Intel(R) Ethernet Connection XL710 (i40e.ko) driver 5# Makefile for the Intel(R) Ethernet Connection XL710 (i40e.ko) driver
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index a44139c1de80..7a80652e2500 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2017 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_H_ 4#ifndef _I40E_H_
29#define _I40E_H_ 5#define _I40E_H_
@@ -334,10 +310,12 @@ struct i40e_tc_configuration {
334 struct i40e_tc_info tc_info[I40E_MAX_TRAFFIC_CLASS]; 310 struct i40e_tc_info tc_info[I40E_MAX_TRAFFIC_CLASS];
335}; 311};
336 312
313#define I40E_UDP_PORT_INDEX_UNUSED 255
337struct i40e_udp_port_config { 314struct i40e_udp_port_config {
338 /* AdminQ command interface expects port number in Host byte order */ 315 /* AdminQ command interface expects port number in Host byte order */
339 u16 port; 316 u16 port;
340 u8 type; 317 u8 type;
318 u8 filter_index;
341}; 319};
342 320
343/* macros related to FLX_PIT */ 321/* macros related to FLX_PIT */
@@ -608,7 +586,7 @@ struct i40e_pf {
608 unsigned long ptp_tx_start; 586 unsigned long ptp_tx_start;
609 struct hwtstamp_config tstamp_config; 587 struct hwtstamp_config tstamp_config;
610 struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */ 588 struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */
611 u64 ptp_base_adj; 589 u32 ptp_adj_mult;
612 u32 tx_hwtstamp_timeouts; 590 u32 tx_hwtstamp_timeouts;
613 u32 tx_hwtstamp_skipped; 591 u32 tx_hwtstamp_skipped;
614 u32 rx_hwtstamp_cleared; 592 u32 rx_hwtstamp_cleared;
@@ -1009,6 +987,9 @@ void i40e_service_event_schedule(struct i40e_pf *pf);
1009void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, 987void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id,
1010 u8 *msg, u16 len); 988 u8 *msg, u16 len);
1011 989
990int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q, bool is_xdp,
991 bool enable);
992int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable);
1012int i40e_vsi_start_rings(struct i40e_vsi *vsi); 993int i40e_vsi_start_rings(struct i40e_vsi *vsi);
1013void i40e_vsi_stop_rings(struct i40e_vsi *vsi); 994void i40e_vsi_stop_rings(struct i40e_vsi *vsi);
1014void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi); 995void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 843fc7781ef8..ddbea79d18e5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2016 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#include "i40e_status.h" 4#include "i40e_status.h"
29#include "i40e_type.h" 5#include "i40e_type.h"
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 0a8749ee9fd3..edec3df78971 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_ADMINQ_H_ 4#ifndef _I40E_ADMINQ_H_
29#define _I40E_ADMINQ_H_ 5#define _I40E_ADMINQ_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 0244923edeb8..7d888e05f96f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2017 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_ADMINQ_CMD_H_ 4#ifndef _I40E_ADMINQ_CMD_H_
29#define _I40E_ADMINQ_CMD_H_ 5#define _I40E_ADMINQ_CMD_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
index abed0c52e782..cb8689222c8b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_ALLOC_H_ 4#ifndef _I40E_ALLOC_H_
29#define _I40E_ALLOC_H_ 5#define _I40E_ALLOC_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index d8ce4999864f..5f3b8b9ff511 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2017 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#include <linux/list.h> 4#include <linux/list.h>
29#include <linux/errno.h> 5#include <linux/errno.h>
@@ -64,7 +40,7 @@ static struct i40e_ops i40e_lan_ops = {
64/** 40/**
65 * i40e_client_get_params - Get the params that can change at runtime 41 * i40e_client_get_params - Get the params that can change at runtime
66 * @vsi: the VSI with the message 42 * @vsi: the VSI with the message
67 * @param: clinet param struct 43 * @params: client param struct
68 * 44 *
69 **/ 45 **/
70static 46static
@@ -590,7 +566,7 @@ static int i40e_client_virtchnl_send(struct i40e_info *ldev,
590 * i40e_client_setup_qvlist 566 * i40e_client_setup_qvlist
591 * @ldev: pointer to L2 context. 567 * @ldev: pointer to L2 context.
592 * @client: Client pointer. 568 * @client: Client pointer.
593 * @qv_info: queue and vector list 569 * @qvlist_info: queue and vector list
594 * 570 *
595 * Return 0 on success or < 0 on error 571 * Return 0 on success or < 0 on error
596 **/ 572 **/
@@ -665,7 +641,7 @@ err:
665 * i40e_client_request_reset 641 * i40e_client_request_reset
666 * @ldev: pointer to L2 context. 642 * @ldev: pointer to L2 context.
667 * @client: Client pointer. 643 * @client: Client pointer.
668 * @level: reset level 644 * @reset_level: reset level
669 **/ 645 **/
670static void i40e_client_request_reset(struct i40e_info *ldev, 646static void i40e_client_request_reset(struct i40e_info *ldev,
671 struct i40e_client *client, 647 struct i40e_client *client,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
index 9d464d40bc17..72994baf4941 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2015 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_CLIENT_H_ 4#ifndef _I40E_CLIENT_H_
29#define _I40E_CLIENT_H_ 5#define _I40E_CLIENT_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index c0a3dae8a2db..eb2d1530d331 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2016 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#include "i40e_type.h" 4#include "i40e_type.h"
29#include "i40e_adminq.h" 5#include "i40e_adminq.h"
@@ -1695,6 +1671,8 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
1695/** 1671/**
1696 * i40e_set_fc 1672 * i40e_set_fc
1697 * @hw: pointer to the hw struct 1673 * @hw: pointer to the hw struct
1674 * @aq_failures: buffer to return AdminQ failure information
1675 * @atomic_restart: whether to enable atomic link restart
1698 * 1676 *
1699 * Set the requested flow control mode using set_phy_config. 1677 * Set the requested flow control mode using set_phy_config.
1700 **/ 1678 **/
@@ -2831,8 +2809,8 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
2831 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2809 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2832 * @cmd_details: pointer to command details structure or NULL 2810 * @cmd_details: pointer to command details structure or NULL
2833 * @rule_id: Rule ID returned from FW 2811 * @rule_id: Rule ID returned from FW
2834 * @rule_used: Number of rules used in internal switch 2812 * @rules_used: Number of rules used in internal switch
2835 * @rule_free: Number of rules free in internal switch 2813 * @rules_free: Number of rules free in internal switch
2836 * 2814 *
2837 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for 2815 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
2838 * VEBs/VEPA elements only 2816 * VEBs/VEPA elements only
@@ -2892,8 +2870,8 @@ static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
2892 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2870 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
2893 * @cmd_details: pointer to command details structure or NULL 2871 * @cmd_details: pointer to command details structure or NULL
2894 * @rule_id: Rule ID returned from FW 2872 * @rule_id: Rule ID returned from FW
2895 * @rule_used: Number of rules used in internal switch 2873 * @rules_used: Number of rules used in internal switch
2896 * @rule_free: Number of rules free in internal switch 2874 * @rules_free: Number of rules free in internal switch
2897 * 2875 *
2898 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only 2876 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
2899 **/ 2877 **/
@@ -2923,8 +2901,8 @@ i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
2923 * add_mirrorrule. 2901 * add_mirrorrule.
2924 * @mr_list: list of mirrored VLAN IDs to be removed 2902 * @mr_list: list of mirrored VLAN IDs to be removed
2925 * @cmd_details: pointer to command details structure or NULL 2903 * @cmd_details: pointer to command details structure or NULL
2926 * @rule_used: Number of rules used in internal switch 2904 * @rules_used: Number of rules used in internal switch
2927 * @rule_free: Number of rules free in internal switch 2905 * @rules_free: Number of rules free in internal switch
2928 * 2906 *
2929 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only 2907 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
2930 **/ 2908 **/
@@ -3672,6 +3650,8 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
3672/** 3650/**
3673 * i40e_aq_start_lldp 3651 * i40e_aq_start_lldp
3674 * @hw: pointer to the hw struct 3652 * @hw: pointer to the hw struct
3653 * @buff: buffer for result
3654 * @buff_size: buffer size
3675 * @cmd_details: pointer to command details structure or NULL 3655 * @cmd_details: pointer to command details structure or NULL
3676 * 3656 *
3677 * Start the embedded LLDP Agent on all ports. 3657 * Start the embedded LLDP Agent on all ports.
@@ -3752,7 +3732,6 @@ i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
3752 * i40e_aq_add_udp_tunnel 3732 * i40e_aq_add_udp_tunnel
3753 * @hw: pointer to the hw struct 3733 * @hw: pointer to the hw struct
3754 * @udp_port: the UDP port to add in Host byte order 3734 * @udp_port: the UDP port to add in Host byte order
3755 * @header_len: length of the tunneling header length in DWords
3756 * @protocol_index: protocol index type 3735 * @protocol_index: protocol index type
3757 * @filter_index: pointer to filter index 3736 * @filter_index: pointer to filter index
3758 * @cmd_details: pointer to command details structure or NULL 3737 * @cmd_details: pointer to command details structure or NULL
@@ -3971,6 +3950,7 @@ i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
3971 * @hw: pointer to the hw struct 3950 * @hw: pointer to the hw struct
3972 * @seid: seid of the switching component connected to Physical Port 3951 * @seid: seid of the switching component connected to Physical Port
3973 * @ets_data: Buffer holding ETS parameters 3952 * @ets_data: Buffer holding ETS parameters
3953 * @opcode: Tx scheduler AQ command opcode
3974 * @cmd_details: pointer to command details structure or NULL 3954 * @cmd_details: pointer to command details structure or NULL
3975 **/ 3955 **/
3976i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, 3956i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
@@ -4314,10 +4294,10 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
4314 * @hw: pointer to the hw struct 4294 * @hw: pointer to the hw struct
4315 * @seid: VSI seid to add ethertype filter from 4295 * @seid: VSI seid to add ethertype filter from
4316 **/ 4296 **/
4317#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
4318void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, 4297void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
4319 u16 seid) 4298 u16 seid)
4320{ 4299{
4300#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
4321 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | 4301 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
4322 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | 4302 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
4323 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; 4303 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
@@ -4448,6 +4428,7 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
4448 * @ret_buff_size: actual buffer size returned 4428 * @ret_buff_size: actual buffer size returned
4449 * @ret_next_table: next block to read 4429 * @ret_next_table: next block to read
4450 * @ret_next_index: next index to read 4430 * @ret_next_index: next index to read
4431 * @cmd_details: pointer to command details structure or NULL
4451 * 4432 *
4452 * Dump internal FW/HW data for debug purposes. 4433 * Dump internal FW/HW data for debug purposes.
4453 * 4434 *
@@ -4574,7 +4555,7 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
4574 * i40e_read_phy_register_clause22 4555 * i40e_read_phy_register_clause22
4575 * @hw: pointer to the HW structure 4556 * @hw: pointer to the HW structure
4576 * @reg: register address in the page 4557 * @reg: register address in the page
4577 * @phy_adr: PHY address on MDIO interface 4558 * @phy_addr: PHY address on MDIO interface
4578 * @value: PHY register value 4559 * @value: PHY register value
4579 * 4560 *
4580 * Reads specified PHY register value 4561 * Reads specified PHY register value
@@ -4619,7 +4600,7 @@ i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
4619 * i40e_write_phy_register_clause22 4600 * i40e_write_phy_register_clause22
4620 * @hw: pointer to the HW structure 4601 * @hw: pointer to the HW structure
4621 * @reg: register address in the page 4602 * @reg: register address in the page
4622 * @phy_adr: PHY address on MDIO interface 4603 * @phy_addr: PHY address on MDIO interface
4623 * @value: PHY register value 4604 * @value: PHY register value
4624 * 4605 *
4625 * Writes specified PHY register value 4606 * Writes specified PHY register value
@@ -4660,7 +4641,7 @@ i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
4660 * @hw: pointer to the HW structure 4641 * @hw: pointer to the HW structure
4661 * @page: registers page number 4642 * @page: registers page number
4662 * @reg: register address in the page 4643 * @reg: register address in the page
4663 * @phy_adr: PHY address on MDIO interface 4644 * @phy_addr: PHY address on MDIO interface
4664 * @value: PHY register value 4645 * @value: PHY register value
4665 * 4646 *
4666 * Reads specified PHY register value 4647 * Reads specified PHY register value
@@ -4734,7 +4715,7 @@ phy_read_end:
4734 * @hw: pointer to the HW structure 4715 * @hw: pointer to the HW structure
4735 * @page: registers page number 4716 * @page: registers page number
4736 * @reg: register address in the page 4717 * @reg: register address in the page
4737 * @phy_adr: PHY address on MDIO interface 4718 * @phy_addr: PHY address on MDIO interface
4738 * @value: PHY register value 4719 * @value: PHY register value
4739 * 4720 *
4740 * Writes value to specified PHY register 4721 * Writes value to specified PHY register
@@ -4801,7 +4782,7 @@ phy_write_end:
4801 * @hw: pointer to the HW structure 4782 * @hw: pointer to the HW structure
4802 * @page: registers page number 4783 * @page: registers page number
4803 * @reg: register address in the page 4784 * @reg: register address in the page
4804 * @phy_adr: PHY address on MDIO interface 4785 * @phy_addr: PHY address on MDIO interface
4805 * @value: PHY register value 4786 * @value: PHY register value
4806 * 4787 *
4807 * Writes value to specified PHY register 4788 * Writes value to specified PHY register
@@ -4837,7 +4818,7 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,
4837 * @hw: pointer to the HW structure 4818 * @hw: pointer to the HW structure
4838 * @page: registers page number 4819 * @page: registers page number
4839 * @reg: register address in the page 4820 * @reg: register address in the page
4840 * @phy_adr: PHY address on MDIO interface 4821 * @phy_addr: PHY address on MDIO interface
4841 * @value: PHY register value 4822 * @value: PHY register value
4842 * 4823 *
4843 * Reads specified PHY register value 4824 * Reads specified PHY register value
@@ -4872,7 +4853,6 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw,
4872 * i40e_get_phy_address 4853 * i40e_get_phy_address
4873 * @hw: pointer to the HW structure 4854 * @hw: pointer to the HW structure
4874 * @dev_num: PHY port num that address we want 4855 * @dev_num: PHY port num that address we want
4875 * @phy_addr: Returned PHY address
4876 * 4856 *
4877 * Gets PHY address for current port 4857 * Gets PHY address for current port
4878 **/ 4858 **/
@@ -5082,7 +5062,9 @@ i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
5082 * i40e_led_set_phy 5062 * i40e_led_set_phy
5083 * @hw: pointer to the HW structure 5063 * @hw: pointer to the HW structure
5084 * @on: true or false 5064 * @on: true or false
5065 * @led_addr: address of led register to use
5085 * @mode: original val plus bit for set or ignore 5066 * @mode: original val plus bit for set or ignore
5067 *
5086 * Set led's on or off when controlled by the PHY 5068 * Set led's on or off when controlled by the PHY
5087 * 5069 *
5088 **/ 5070 **/
@@ -5371,6 +5353,7 @@ i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
5371 * @hw: pointer to the hw struct 5353 * @hw: pointer to the hw struct
5372 * @buff: command buffer (size in bytes = buff_size) 5354 * @buff: command buffer (size in bytes = buff_size)
5373 * @buff_size: buffer size in bytes 5355 * @buff_size: buffer size in bytes
5356 * @flags: AdminQ command flags
5374 * @cmd_details: pointer to command details structure or NULL 5357 * @cmd_details: pointer to command details structure or NULL
5375 **/ 5358 **/
5376enum 5359enum
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 9fec728dc4b9..56bff8faf371 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2017 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#include "i40e_adminq.h" 4#include "i40e_adminq.h"
29#include "i40e_prototype.h" 5#include "i40e_prototype.h"
@@ -945,6 +921,70 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw)
945} 921}
946 922
947/** 923/**
924 * _i40e_read_lldp_cfg - generic read of LLDP Configuration data from NVM
925 * @hw: pointer to the HW structure
926 * @lldp_cfg: pointer to hold lldp configuration variables
927 * @module: address of the module pointer
928 * @word_offset: offset of LLDP configuration
929 *
930 * Reads the LLDP configuration data from NVM using passed addresses
931 **/
932static i40e_status _i40e_read_lldp_cfg(struct i40e_hw *hw,
933 struct i40e_lldp_variables *lldp_cfg,
934 u8 module, u32 word_offset)
935{
936 u32 address, offset = (2 * word_offset);
937 i40e_status ret;
938 __le16 raw_mem;
939 u16 mem;
940
941 ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
942 if (ret)
943 return ret;
944
945 ret = i40e_aq_read_nvm(hw, 0x0, module * 2, sizeof(raw_mem), &raw_mem,
946 true, NULL);
947 i40e_release_nvm(hw);
948 if (ret)
949 return ret;
950
951 mem = le16_to_cpu(raw_mem);
952 /* Check if this pointer needs to be read in word size or 4K sector
953 * units.
954 */
955 if (mem & I40E_PTR_TYPE)
956 address = (0x7FFF & mem) * 4096;
957 else
958 address = (0x7FFF & mem) * 2;
959
960 ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
961 if (ret)
962 goto err_lldp_cfg;
963
964 ret = i40e_aq_read_nvm(hw, module, offset, sizeof(raw_mem), &raw_mem,
965 true, NULL);
966 i40e_release_nvm(hw);
967 if (ret)
968 return ret;
969
970 mem = le16_to_cpu(raw_mem);
971 offset = mem + word_offset;
972 offset *= 2;
973
974 ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
975 if (ret)
976 goto err_lldp_cfg;
977
978 ret = i40e_aq_read_nvm(hw, 0, address + offset,
979 sizeof(struct i40e_lldp_variables), lldp_cfg,
980 true, NULL);
981 i40e_release_nvm(hw);
982
983err_lldp_cfg:
984 return ret;
985}
986
987/**
948 * i40e_read_lldp_cfg - read LLDP Configuration data from NVM 988 * i40e_read_lldp_cfg - read LLDP Configuration data from NVM
949 * @hw: pointer to the HW structure 989 * @hw: pointer to the HW structure
950 * @lldp_cfg: pointer to hold lldp configuration variables 990 * @lldp_cfg: pointer to hold lldp configuration variables
@@ -955,21 +995,34 @@ i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
955 struct i40e_lldp_variables *lldp_cfg) 995 struct i40e_lldp_variables *lldp_cfg)
956{ 996{
957 i40e_status ret = 0; 997 i40e_status ret = 0;
958 u32 offset = (2 * I40E_NVM_LLDP_CFG_PTR); 998 u32 mem;
959 999
960 if (!lldp_cfg) 1000 if (!lldp_cfg)
961 return I40E_ERR_PARAM; 1001 return I40E_ERR_PARAM;
962 1002
963 ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 1003 ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
964 if (ret) 1004 if (ret)
965 goto err_lldp_cfg; 1005 return ret;
966 1006
967 ret = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, offset, 1007 ret = i40e_aq_read_nvm(hw, I40E_SR_NVM_CONTROL_WORD, 0, sizeof(mem),
968 sizeof(struct i40e_lldp_variables), 1008 &mem, true, NULL);
969 (u8 *)lldp_cfg,
970 true, NULL);
971 i40e_release_nvm(hw); 1009 i40e_release_nvm(hw);
1010 if (ret)
1011 return ret;
1012
1013 /* Read a bit that holds information whether we are running flat or
1014 * structured NVM image. Flat image has LLDP configuration in shadow
1015 * ram, so there is a need to pass different addresses for both cases.
1016 */
1017 if (mem & I40E_SR_NVM_MAP_STRUCTURE_TYPE) {
1018 /* Flat NVM case */
1019 ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_SR_EMP_MODULE_PTR,
1020 I40E_SR_LLDP_CFG_PTR);
1021 } else {
1022 /* Good old structured NVM image */
1023 ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_EMP_MODULE_PTR,
1024 I40E_NVM_LLDP_CFG_PTR);
1025 }
972 1026
973err_lldp_cfg:
974 return ret; 1027 return ret;
975} 1028}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 4f806386cb22..2b748a60a843 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_DCB_H_ 4#ifndef _I40E_DCB_H_
29#define _I40E_DCB_H_ 5#define _I40E_DCB_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 502818e3da78..9deae9a35423 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifdef CONFIG_I40E_DCB 4#ifdef CONFIG_I40E_DCB
29#include "i40e.h" 5#include "i40e.h"
@@ -47,7 +23,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
47 23
48/** 24/**
49 * i40e_dcbnl_ieee_getets - retrieve local IEEE ETS configuration 25 * i40e_dcbnl_ieee_getets - retrieve local IEEE ETS configuration
50 * @netdev: the corresponding netdev 26 * @dev: the corresponding netdev
51 * @ets: structure to hold the ETS information 27 * @ets: structure to hold the ETS information
52 * 28 *
53 * Returns local IEEE ETS configuration 29 * Returns local IEEE ETS configuration
@@ -86,8 +62,8 @@ static int i40e_dcbnl_ieee_getets(struct net_device *dev,
86 62
87/** 63/**
88 * i40e_dcbnl_ieee_getpfc - retrieve local IEEE PFC configuration 64 * i40e_dcbnl_ieee_getpfc - retrieve local IEEE PFC configuration
89 * @netdev: the corresponding netdev 65 * @dev: the corresponding netdev
90 * @ets: structure to hold the PFC information 66 * @pfc: structure to hold the PFC information
91 * 67 *
92 * Returns local IEEE PFC configuration 68 * Returns local IEEE PFC configuration
93 **/ 69 **/
@@ -119,7 +95,7 @@ static int i40e_dcbnl_ieee_getpfc(struct net_device *dev,
119 95
120/** 96/**
121 * i40e_dcbnl_getdcbx - retrieve current DCBx capability 97 * i40e_dcbnl_getdcbx - retrieve current DCBx capability
122 * @netdev: the corresponding netdev 98 * @dev: the corresponding netdev
123 * 99 *
124 * Returns DCBx capability features 100 * Returns DCBx capability features
125 **/ 101 **/
@@ -132,7 +108,8 @@ static u8 i40e_dcbnl_getdcbx(struct net_device *dev)
132 108
133/** 109/**
134 * i40e_dcbnl_get_perm_hw_addr - MAC address used by DCBx 110 * i40e_dcbnl_get_perm_hw_addr - MAC address used by DCBx
135 * @netdev: the corresponding netdev 111 * @dev: the corresponding netdev
112 * @perm_addr: buffer to store the MAC address
136 * 113 *
137 * Returns the SAN MAC address used for LLDP exchange 114 * Returns the SAN MAC address used for LLDP exchange
138 **/ 115 **/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index d494dcaf18d0..56b911a5dd8b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2016 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifdef CONFIG_DEBUG_FS 4#ifdef CONFIG_DEBUG_FS
29 5
@@ -36,8 +12,8 @@ static struct dentry *i40e_dbg_root;
36 12
37/** 13/**
38 * i40e_dbg_find_vsi - searches for the vsi with the given seid 14 * i40e_dbg_find_vsi - searches for the vsi with the given seid
39 * @pf - the PF structure to search for the vsi 15 * @pf: the PF structure to search for the vsi
40 * @seid - seid of the vsi it is searching for 16 * @seid: seid of the vsi it is searching for
41 **/ 17 **/
42static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid) 18static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
43{ 19{
@@ -55,8 +31,8 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
55 31
56/** 32/**
57 * i40e_dbg_find_veb - searches for the veb with the given seid 33 * i40e_dbg_find_veb - searches for the veb with the given seid
58 * @pf - the PF structure to search for the veb 34 * @pf: the PF structure to search for the veb
59 * @seid - seid of the veb it is searching for 35 * @seid: seid of the veb it is searching for
60 **/ 36 **/
61static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid) 37static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
62{ 38{
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index ad6a66ccb576..334b05ff685a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2015 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_DEVIDS_H_ 4#ifndef _I40E_DEVIDS_H_
29#define _I40E_DEVIDS_H_ 5#define _I40E_DEVIDS_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index df3e60470f8b..ef4d3762bf37 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#include "i40e_diag.h" 4#include "i40e_diag.h"
29#include "i40e_prototype.h" 5#include "i40e_prototype.h"
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
index be8341763475..c3340f320a18 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_DIAG_H_ 4#ifndef _I40E_DIAG_H_
29#define _I40E_DIAG_H_ 5#define _I40E_DIAG_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index b974482ff630..fc6a5eef141c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2016 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28/* ethtool support for i40e */ 4/* ethtool support for i40e */
29 5
@@ -977,7 +953,9 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
977 ethtool_link_ksettings_test_link_mode(ks, advertising, 953 ethtool_link_ksettings_test_link_mode(ks, advertising,
978 10000baseCR_Full) || 954 10000baseCR_Full) ||
979 ethtool_link_ksettings_test_link_mode(ks, advertising, 955 ethtool_link_ksettings_test_link_mode(ks, advertising,
980 10000baseSR_Full)) 956 10000baseSR_Full) ||
957 ethtool_link_ksettings_test_link_mode(ks, advertising,
958 10000baseLR_Full))
981 config.link_speed |= I40E_LINK_SPEED_10GB; 959 config.link_speed |= I40E_LINK_SPEED_10GB;
982 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 960 if (ethtool_link_ksettings_test_link_mode(ks, advertising,
983 20000baseKR2_Full)) 961 20000baseKR2_Full))
@@ -1079,6 +1057,9 @@ static int i40e_nway_reset(struct net_device *netdev)
1079 1057
1080/** 1058/**
1081 * i40e_get_pauseparam - Get Flow Control status 1059 * i40e_get_pauseparam - Get Flow Control status
1060 * @netdev: netdevice structure
1061 * @pause: buffer to return pause parameters
1062 *
1082 * Return tx/rx-pause status 1063 * Return tx/rx-pause status
1083 **/ 1064 **/
1084static void i40e_get_pauseparam(struct net_device *netdev, 1065static void i40e_get_pauseparam(struct net_device *netdev,
@@ -2550,7 +2531,7 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
2550/** 2531/**
2551 * i40e_check_mask - Check whether a mask field is set 2532 * i40e_check_mask - Check whether a mask field is set
2552 * @mask: the full mask value 2533 * @mask: the full mask value
2553 * @field; mask of the field to check 2534 * @field: mask of the field to check
2554 * 2535 *
2555 * If the given mask is fully set, return positive value. If the mask for the 2536 * If the given mask is fully set, return positive value. If the mask for the
2556 * field is fully unset, return zero. Otherwise return a negative error code. 2537 * field is fully unset, return zero. Otherwise return a negative error code.
@@ -2621,6 +2602,7 @@ static int i40e_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
2621/** 2602/**
2622 * i40e_fill_rx_flow_user_data - Fill in user-defined data field 2603 * i40e_fill_rx_flow_user_data - Fill in user-defined data field
2623 * @fsp: pointer to rx_flow specification 2604 * @fsp: pointer to rx_flow specification
2605 * @data: pointer to return userdef data
2624 * 2606 *
2625 * Reads the userdef data structure and properly fills in the user defined 2607 * Reads the userdef data structure and properly fills in the user defined
2626 * fields of the rx_flow_spec. 2608 * fields of the rx_flow_spec.
@@ -2799,6 +2781,7 @@ no_input_set:
2799 * i40e_get_rxnfc - command to get RX flow classification rules 2781 * i40e_get_rxnfc - command to get RX flow classification rules
2800 * @netdev: network interface device structure 2782 * @netdev: network interface device structure
2801 * @cmd: ethtool rxnfc command 2783 * @cmd: ethtool rxnfc command
2784 * @rule_locs: pointer to store rule data
2802 * 2785 *
2803 * Returns Success if the command is supported. 2786 * Returns Success if the command is supported.
2804 **/ 2787 **/
@@ -2840,7 +2823,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
2840/** 2823/**
2841 * i40e_get_rss_hash_bits - Read RSS Hash bits from register 2824 * i40e_get_rss_hash_bits - Read RSS Hash bits from register
2842 * @nfc: pointer to user request 2825 * @nfc: pointer to user request
2843 * @i_setc bits currently set 2826 * @i_setc: bits currently set
2844 * 2827 *
2845 * Returns value of bits to be set per user request 2828 * Returns value of bits to be set per user request
2846 **/ 2829 **/
@@ -2885,7 +2868,7 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
2885/** 2868/**
2886 * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash 2869 * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
2887 * @pf: pointer to the physical function struct 2870 * @pf: pointer to the physical function struct
2888 * @cmd: ethtool rxnfc command 2871 * @nfc: ethtool rxnfc command
2889 * 2872 *
2890 * Returns Success if the flow input set is supported. 2873 * Returns Success if the flow input set is supported.
2891 **/ 2874 **/
@@ -3284,7 +3267,7 @@ static int i40e_add_flex_offset(struct list_head *flex_pit_list,
3284 * __i40e_reprogram_flex_pit - Re-program specific FLX_PIT table 3267 * __i40e_reprogram_flex_pit - Re-program specific FLX_PIT table
3285 * @pf: Pointer to the PF structure 3268 * @pf: Pointer to the PF structure
3286 * @flex_pit_list: list of flexible src offsets in use 3269 * @flex_pit_list: list of flexible src offsets in use
3287 * #flex_pit_start: index to first entry for this section of the table 3270 * @flex_pit_start: index to first entry for this section of the table
3288 * 3271 *
3289 * In order to handle flexible data, the hardware uses a table of values 3272 * In order to handle flexible data, the hardware uses a table of values
3290 * called the FLX_PIT table. This table is used to indicate which sections of 3273 * called the FLX_PIT table. This table is used to indicate which sections of
@@ -3398,7 +3381,7 @@ static void i40e_reprogram_flex_pit(struct i40e_pf *pf)
3398 3381
3399/** 3382/**
3400 * i40e_flow_str - Converts a flow_type into a human readable string 3383 * i40e_flow_str - Converts a flow_type into a human readable string
3401 * @flow_type: the flow type from a flow specification 3384 * @fsp: the flow specification
3402 * 3385 *
3403 * Currently only flow types we support are included here, and the string 3386 * Currently only flow types we support are included here, and the string
3404 * value attempts to match what ethtool would use to configure this flow type. 3387 * value attempts to match what ethtool would use to configure this flow type.
@@ -4103,7 +4086,7 @@ static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
4103 4086
4104/** 4087/**
4105 * i40e_get_channels - Get the current channels enabled and max supported etc. 4088 * i40e_get_channels - Get the current channels enabled and max supported etc.
4106 * @netdev: network interface device structure 4089 * @dev: network interface device structure
4107 * @ch: ethtool channels structure 4090 * @ch: ethtool channels structure
4108 * 4091 *
4109 * We don't support separate tx and rx queues as channels. The other count 4092 * We don't support separate tx and rx queues as channels. The other count
@@ -4112,7 +4095,7 @@ static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
4112 * q_vectors since we support a lot more queue pairs than q_vectors. 4095 * q_vectors since we support a lot more queue pairs than q_vectors.
4113 **/ 4096 **/
4114static void i40e_get_channels(struct net_device *dev, 4097static void i40e_get_channels(struct net_device *dev,
4115 struct ethtool_channels *ch) 4098 struct ethtool_channels *ch)
4116{ 4099{
4117 struct i40e_netdev_priv *np = netdev_priv(dev); 4100 struct i40e_netdev_priv *np = netdev_priv(dev);
4118 struct i40e_vsi *vsi = np->vsi; 4101 struct i40e_vsi *vsi = np->vsi;
@@ -4131,14 +4114,14 @@ static void i40e_get_channels(struct net_device *dev,
4131 4114
4132/** 4115/**
4133 * i40e_set_channels - Set the new channels count. 4116 * i40e_set_channels - Set the new channels count.
4134 * @netdev: network interface device structure 4117 * @dev: network interface device structure
4135 * @ch: ethtool channels structure 4118 * @ch: ethtool channels structure
4136 * 4119 *
4137 * The new channels count may not be the same as requested by the user 4120 * The new channels count may not be the same as requested by the user
4138 * since it gets rounded down to a power of 2 value. 4121 * since it gets rounded down to a power of 2 value.
4139 **/ 4122 **/
4140static int i40e_set_channels(struct net_device *dev, 4123static int i40e_set_channels(struct net_device *dev,
4141 struct ethtool_channels *ch) 4124 struct ethtool_channels *ch)
4142{ 4125{
4143 const u8 drop = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET; 4126 const u8 drop = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
4144 struct i40e_netdev_priv *np = netdev_priv(dev); 4127 struct i40e_netdev_priv *np = netdev_priv(dev);
@@ -4273,6 +4256,7 @@ out:
4273 * @netdev: network interface device structure 4256 * @netdev: network interface device structure
4274 * @indir: indirection table 4257 * @indir: indirection table
4275 * @key: hash key 4258 * @key: hash key
4259 * @hfunc: hash function to use
4276 * 4260 *
4277 * Returns -EINVAL if the table specifies an invalid queue id, otherwise 4261 * Returns -EINVAL if the table specifies an invalid queue id, otherwise
4278 * returns 0 after programming the table. 4262 * returns 0 after programming the table.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 6d4b590f851b..19ce93d7fd0a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#include "i40e_osdep.h" 4#include "i40e_osdep.h"
29#include "i40e_register.h" 5#include "i40e_register.h"
@@ -198,7 +174,6 @@ exit:
198 * @hw: pointer to our HW structure 174 * @hw: pointer to our HW structure
199 * @hmc_info: pointer to the HMC configuration information structure 175 * @hmc_info: pointer to the HMC configuration information structure
200 * @idx: the page index 176 * @idx: the page index
201 * @is_pf: distinguishes a VF from a PF
202 * 177 *
203 * This function: 178 * This function:
204 * 1. Marks the entry in pd tabe (for paged address mode) or in sd table 179 * 1. Marks the entry in pd tabe (for paged address mode) or in sd table
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index 7b5fd33d70ae..1c78de838857 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_HMC_H_ 4#ifndef _I40E_HMC_H_
29#define _I40E_HMC_H_ 5#define _I40E_HMC_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index cd40dc487b38..994011c38fb4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#include "i40e_osdep.h" 4#include "i40e_osdep.h"
29#include "i40e_register.h" 5#include "i40e_register.h"
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
index 79e1396735d9..c46a2c449e60 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_LAN_HMC_H_ 4#ifndef _I40E_LAN_HMC_H_
29#define _I40E_LAN_HMC_H_ 5#define _I40E_LAN_HMC_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 16229998fb1e..c8659fbd7111 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2017 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#include <linux/etherdevice.h> 4#include <linux/etherdevice.h>
29#include <linux/of_net.h> 5#include <linux/of_net.h>
@@ -278,8 +254,8 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
278 254
279/** 255/**
280 * i40e_find_vsi_from_id - searches for the vsi with the given id 256 * i40e_find_vsi_from_id - searches for the vsi with the given id
281 * @pf - the pf structure to search for the vsi 257 * @pf: the pf structure to search for the vsi
282 * @id - id of the vsi it is searching for 258 * @id: id of the vsi it is searching for
283 **/ 259 **/
284struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) 260struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
285{ 261{
@@ -435,6 +411,7 @@ static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
435/** 411/**
436 * i40e_get_netdev_stats_struct - Get statistics for netdev interface 412 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
437 * @netdev: network interface device structure 413 * @netdev: network interface device structure
414 * @stats: data structure to store statistics
438 * 415 *
439 * Returns the address of the device statistics structure. 416 * Returns the address of the device statistics structure.
440 * The statistics are actually updated from the service task. 417 * The statistics are actually updated from the service task.
@@ -2027,7 +2004,7 @@ struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2027 * from firmware 2004 * from firmware
2028 * @count: Number of filters added 2005 * @count: Number of filters added
2029 * @add_list: return data from fw 2006 * @add_list: return data from fw
2030 * @head: pointer to first filter in current batch 2007 * @add_head: pointer to first filter in current batch
2031 * 2008 *
2032 * MAC filter entries from list were slated to be added to device. Returns 2009 * MAC filter entries from list were slated to be added to device. Returns
2033 * number of successful filters. Note that 0 does NOT mean success! 2010 * number of successful filters. Note that 0 does NOT mean success!
@@ -2134,6 +2111,7 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2134/** 2111/**
2135 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags 2112 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2136 * @vsi: pointer to the VSI 2113 * @vsi: pointer to the VSI
2114 * @vsi_name: the VSI name
2137 * @f: filter data 2115 * @f: filter data
2138 * 2116 *
2139 * This function sets or clears the promiscuous broadcast flags for VLAN 2117 * This function sets or clears the promiscuous broadcast flags for VLAN
@@ -2840,6 +2818,7 @@ void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2840/** 2818/**
2841 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload 2819 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2842 * @netdev: network interface to be adjusted 2820 * @netdev: network interface to be adjusted
2821 * @proto: unused protocol value
2843 * @vid: vlan id to be added 2822 * @vid: vlan id to be added
2844 * 2823 *
2845 * net_device_ops implementation for adding vlan ids 2824 * net_device_ops implementation for adding vlan ids
@@ -2864,6 +2843,7 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2864/** 2843/**
2865 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload 2844 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2866 * @netdev: network interface to be adjusted 2845 * @netdev: network interface to be adjusted
2846 * @proto: unused protocol value
2867 * @vid: vlan id to be removed 2847 * @vid: vlan id to be removed
2868 * 2848 *
2869 * net_device_ops implementation for removing vlan ids 2849 * net_device_ops implementation for removing vlan ids
@@ -3485,7 +3465,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3485 3465
3486/** 3466/**
3487 * i40e_enable_misc_int_causes - enable the non-queue interrupts 3467 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3488 * @hw: ptr to the hardware info 3468 * @pf: pointer to private device data structure
3489 **/ 3469 **/
3490static void i40e_enable_misc_int_causes(struct i40e_pf *pf) 3470static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3491{ 3471{
@@ -4255,8 +4235,8 @@ static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4255 * @is_xdp: true if the queue is used for XDP 4235 * @is_xdp: true if the queue is used for XDP
4256 * @enable: start or stop the queue 4236 * @enable: start or stop the queue
4257 **/ 4237 **/
4258static int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q, 4238int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4259 bool is_xdp, bool enable) 4239 bool is_xdp, bool enable)
4260{ 4240{
4261 int ret; 4241 int ret;
4262 4242
@@ -4301,7 +4281,6 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
4301 if (ret) 4281 if (ret)
4302 break; 4282 break;
4303 } 4283 }
4304
4305 return ret; 4284 return ret;
4306} 4285}
4307 4286
@@ -4340,9 +4319,9 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4340 * @pf_q: the PF queue to configure 4319 * @pf_q: the PF queue to configure
4341 * @enable: start or stop the queue 4320 * @enable: start or stop the queue
4342 * 4321 *
4343 * This function enables or disables a single queue. Note that any delay 4322 * This function enables or disables a single queue. Note that
4344 * required after the operation is expected to be handled by the caller of 4323 * any delay required after the operation is expected to be
4345 * this function. 4324 * handled by the caller of this function.
4346 **/ 4325 **/
4347static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable) 4326static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4348{ 4327{
@@ -4372,6 +4351,30 @@ static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4372} 4351}
4373 4352
4374/** 4353/**
4354 * i40e_control_wait_rx_q
4355 * @pf: the PF structure
4356 * @pf_q: queue being configured
4357 * @enable: start or stop the rings
4358 *
4359 * This function enables or disables a single queue along with waiting
4360 * for the change to finish. The caller of this function should handle
4361 * the delays needed in the case of disabling queues.
4362 **/
4363int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4364{
4365 int ret = 0;
4366
4367 i40e_control_rx_q(pf, pf_q, enable);
4368
4369 /* wait for the change to finish */
4370 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4371 if (ret)
4372 return ret;
4373
4374 return ret;
4375}
4376
4377/**
4375 * i40e_vsi_control_rx - Start or stop a VSI's rings 4378 * i40e_vsi_control_rx - Start or stop a VSI's rings
4376 * @vsi: the VSI being configured 4379 * @vsi: the VSI being configured
4377 * @enable: start or stop the rings 4380 * @enable: start or stop the rings
@@ -4383,10 +4386,7 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
4383 4386
4384 pf_q = vsi->base_queue; 4387 pf_q = vsi->base_queue;
4385 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 4388 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4386 i40e_control_rx_q(pf, pf_q, enable); 4389 ret = i40e_control_wait_rx_q(pf, pf_q, enable);
4387
4388 /* wait for the change to finish */
4389 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4390 if (ret) { 4390 if (ret) {
4391 dev_info(&pf->pdev->dev, 4391 dev_info(&pf->pdev->dev,
4392 "VSI seid %d Rx ring %d %sable timeout\n", 4392 "VSI seid %d Rx ring %d %sable timeout\n",
@@ -5096,7 +5096,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5096 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC 5096 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5097 * @vsi: the VSI being configured 5097 * @vsi: the VSI being configured
5098 * @enabled_tc: TC bitmap 5098 * @enabled_tc: TC bitmap
5099 * @bw_credits: BW shared credits per TC 5099 * @bw_share: BW shared credits per TC
5100 * 5100 *
5101 * Returns 0 on success, negative value on failure 5101 * Returns 0 on success, negative value on failure
5102 **/ 5102 **/
@@ -6353,6 +6353,7 @@ out:
6353/** 6353/**
6354 * i40e_print_link_message - print link up or down 6354 * i40e_print_link_message - print link up or down
6355 * @vsi: the VSI for which link needs a message 6355 * @vsi: the VSI for which link needs a message
6356 * @isup: true of link is up, false otherwise
6356 */ 6357 */
6357void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) 6358void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
6358{ 6359{
@@ -7212,8 +7213,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
7212 if (mask->dst == cpu_to_be32(0xffffffff)) { 7213 if (mask->dst == cpu_to_be32(0xffffffff)) {
7213 field_flags |= I40E_CLOUD_FIELD_IIP; 7214 field_flags |= I40E_CLOUD_FIELD_IIP;
7214 } else { 7215 } else {
7215 mask->dst = be32_to_cpu(mask->dst); 7216 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
7216 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4\n",
7217 &mask->dst); 7217 &mask->dst);
7218 return I40E_ERR_CONFIG; 7218 return I40E_ERR_CONFIG;
7219 } 7219 }
@@ -7223,8 +7223,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
7223 if (mask->src == cpu_to_be32(0xffffffff)) { 7223 if (mask->src == cpu_to_be32(0xffffffff)) {
7224 field_flags |= I40E_CLOUD_FIELD_IIP; 7224 field_flags |= I40E_CLOUD_FIELD_IIP;
7225 } else { 7225 } else {
7226 mask->src = be32_to_cpu(mask->src); 7226 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
7227 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4\n",
7228 &mask->src); 7227 &mask->src);
7229 return I40E_ERR_CONFIG; 7228 return I40E_ERR_CONFIG;
7230 } 7229 }
@@ -9691,9 +9690,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
9691 i40e_flush(hw); 9690 i40e_flush(hw);
9692} 9691}
9693 9692
9694static const char *i40e_tunnel_name(struct i40e_udp_port_config *port) 9693static const char *i40e_tunnel_name(u8 type)
9695{ 9694{
9696 switch (port->type) { 9695 switch (type) {
9697 case UDP_TUNNEL_TYPE_VXLAN: 9696 case UDP_TUNNEL_TYPE_VXLAN:
9698 return "vxlan"; 9697 return "vxlan";
9699 case UDP_TUNNEL_TYPE_GENEVE: 9698 case UDP_TUNNEL_TYPE_GENEVE:
@@ -9727,37 +9726,68 @@ static void i40e_sync_udp_filters(struct i40e_pf *pf)
9727static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) 9726static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
9728{ 9727{
9729 struct i40e_hw *hw = &pf->hw; 9728 struct i40e_hw *hw = &pf->hw;
9730 i40e_status ret; 9729 u8 filter_index, type;
9731 u16 port; 9730 u16 port;
9732 int i; 9731 int i;
9733 9732
9734 if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state)) 9733 if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state))
9735 return; 9734 return;
9736 9735
9736 /* acquire RTNL to maintain state of flags and port requests */
9737 rtnl_lock();
9738
9737 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 9739 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
9738 if (pf->pending_udp_bitmap & BIT_ULL(i)) { 9740 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
9741 struct i40e_udp_port_config *udp_port;
9742 i40e_status ret = 0;
9743
9744 udp_port = &pf->udp_ports[i];
9739 pf->pending_udp_bitmap &= ~BIT_ULL(i); 9745 pf->pending_udp_bitmap &= ~BIT_ULL(i);
9740 port = pf->udp_ports[i].port; 9746
9747 port = READ_ONCE(udp_port->port);
9748 type = READ_ONCE(udp_port->type);
9749 filter_index = READ_ONCE(udp_port->filter_index);
9750
9751 /* release RTNL while we wait on AQ command */
9752 rtnl_unlock();
9753
9741 if (port) 9754 if (port)
9742 ret = i40e_aq_add_udp_tunnel(hw, port, 9755 ret = i40e_aq_add_udp_tunnel(hw, port,
9743 pf->udp_ports[i].type, 9756 type,
9744 NULL, NULL); 9757 &filter_index,
9745 else 9758 NULL);
9746 ret = i40e_aq_del_udp_tunnel(hw, i, NULL); 9759 else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED)
9760 ret = i40e_aq_del_udp_tunnel(hw, filter_index,
9761 NULL);
9762
9763 /* reacquire RTNL so we can update filter_index */
9764 rtnl_lock();
9747 9765
9748 if (ret) { 9766 if (ret) {
9749 dev_info(&pf->pdev->dev, 9767 dev_info(&pf->pdev->dev,
9750 "%s %s port %d, index %d failed, err %s aq_err %s\n", 9768 "%s %s port %d, index %d failed, err %s aq_err %s\n",
9751 i40e_tunnel_name(&pf->udp_ports[i]), 9769 i40e_tunnel_name(type),
9752 port ? "add" : "delete", 9770 port ? "add" : "delete",
9753 port, i, 9771 port,
9772 filter_index,
9754 i40e_stat_str(&pf->hw, ret), 9773 i40e_stat_str(&pf->hw, ret),
9755 i40e_aq_str(&pf->hw, 9774 i40e_aq_str(&pf->hw,
9756 pf->hw.aq.asq_last_status)); 9775 pf->hw.aq.asq_last_status));
9757 pf->udp_ports[i].port = 0; 9776 if (port) {
9777 /* failed to add, just reset port,
9778 * drop pending bit for any deletion
9779 */
9780 udp_port->port = 0;
9781 pf->pending_udp_bitmap &= ~BIT_ULL(i);
9782 }
9783 } else if (port) {
9784 /* record filter index on success */
9785 udp_port->filter_index = filter_index;
9758 } 9786 }
9759 } 9787 }
9760 } 9788 }
9789
9790 rtnl_unlock();
9761} 9791}
9762 9792
9763/** 9793/**
@@ -10004,7 +10034,7 @@ unlock_pf:
10004 10034
10005/** 10035/**
10006 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI 10036 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
10007 * @type: VSI pointer 10037 * @vsi: VSI pointer
10008 * @free_qvectors: a bool to specify if q_vectors need to be freed. 10038 * @free_qvectors: a bool to specify if q_vectors need to be freed.
10009 * 10039 *
10010 * On error: returns error code (negative) 10040 * On error: returns error code (negative)
@@ -10800,7 +10830,7 @@ int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
10800 * @vsi: Pointer to VSI structure 10830 * @vsi: Pointer to VSI structure
10801 * @seed: Buffer to store the keys 10831 * @seed: Buffer to store the keys
10802 * @lut: Buffer to store the lookup table entries 10832 * @lut: Buffer to store the lookup table entries
10803 * lut_size: Size of buffer to store the lookup table entries 10833 * @lut_size: Size of buffer to store the lookup table entries
10804 * 10834 *
10805 * Returns 0 on success, negative on failure 10835 * Returns 0 on success, negative on failure
10806 */ 10836 */
@@ -11374,6 +11404,11 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
11374 u8 i; 11404 u8 i;
11375 11405
11376 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { 11406 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
11407 /* Do not report ports with pending deletions as
11408 * being available.
11409 */
11410 if (!port && (pf->pending_udp_bitmap & BIT_ULL(i)))
11411 continue;
11377 if (pf->udp_ports[i].port == port) 11412 if (pf->udp_ports[i].port == port)
11378 return i; 11413 return i;
11379 } 11414 }
@@ -11428,6 +11463,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
11428 11463
11429 /* New port: add it and mark its index in the bitmap */ 11464 /* New port: add it and mark its index in the bitmap */
11430 pf->udp_ports[next_idx].port = port; 11465 pf->udp_ports[next_idx].port = port;
11466 pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED;
11431 pf->pending_udp_bitmap |= BIT_ULL(next_idx); 11467 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
11432 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state); 11468 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
11433} 11469}
@@ -11469,7 +11505,12 @@ static void i40e_udp_tunnel_del(struct net_device *netdev,
11469 * and make it pending 11505 * and make it pending
11470 */ 11506 */
11471 pf->udp_ports[idx].port = 0; 11507 pf->udp_ports[idx].port = 0;
11472 pf->pending_udp_bitmap |= BIT_ULL(idx); 11508
11509 /* Toggle pending bit instead of setting it. This way if we are
11510 * deleting a port that has yet to be added we just clear the pending
11511 * bit and don't have to worry about it.
11512 */
11513 pf->pending_udp_bitmap ^= BIT_ULL(idx);
11473 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state); 11514 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
11474 11515
11475 return; 11516 return;
@@ -11500,6 +11541,7 @@ static int i40e_get_phys_port_id(struct net_device *netdev,
11500 * @tb: pointer to array of nladdr (unused) 11541 * @tb: pointer to array of nladdr (unused)
11501 * @dev: the net device pointer 11542 * @dev: the net device pointer
11502 * @addr: the MAC address entry being added 11543 * @addr: the MAC address entry being added
11544 * @vid: VLAN ID
11503 * @flags: instructions from stack about fdb operation 11545 * @flags: instructions from stack about fdb operation
11504 */ 11546 */
11505static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 11547static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
@@ -11545,6 +11587,7 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
11545 * i40e_ndo_bridge_setlink - Set the hardware bridge mode 11587 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
11546 * @dev: the netdev being configured 11588 * @dev: the netdev being configured
11547 * @nlh: RTNL message 11589 * @nlh: RTNL message
11590 * @flags: bridge flags
11548 * 11591 *
11549 * Inserts a new hardware bridge if not already created and 11592 * Inserts a new hardware bridge if not already created and
11550 * enables the bridging mode requested (VEB or VEPA). If the 11593 * enables the bridging mode requested (VEB or VEPA). If the
@@ -14118,6 +14161,7 @@ static void i40e_remove(struct pci_dev *pdev)
14118/** 14161/**
14119 * i40e_pci_error_detected - warning that something funky happened in PCI land 14162 * i40e_pci_error_detected - warning that something funky happened in PCI land
14120 * @pdev: PCI device information struct 14163 * @pdev: PCI device information struct
14164 * @error: the type of PCI error
14121 * 14165 *
14122 * Called to warn that something happened and the error handling steps 14166 * Called to warn that something happened and the error handling steps
14123 * are in progress. Allows the driver to quiesce things, be ready for 14167 * are in progress. Allows the driver to quiesce things, be ready for
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index ba9687c03795..0299e5bbb902 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#include "i40e_prototype.h" 4#include "i40e_prototype.h"
29 5
@@ -1173,6 +1149,7 @@ void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
1173 * i40e_nvmupd_check_wait_event - handle NVM update operation events 1149 * i40e_nvmupd_check_wait_event - handle NVM update operation events
1174 * @hw: pointer to the hardware structure 1150 * @hw: pointer to the hardware structure
1175 * @opcode: the event that just happened 1151 * @opcode: the event that just happened
1152 * @desc: AdminQ descriptor
1176 **/ 1153 **/
1177void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, 1154void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
1178 struct i40e_aq_desc *desc) 1155 struct i40e_aq_desc *desc)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index 9c3c3b0d3ac4..a07574bff550 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_OSDEP_H_ 4#ifndef _I40E_OSDEP_H_
29#define _I40E_OSDEP_H_ 5#define _I40E_OSDEP_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 2ec24188d6e2..3170655cdeb9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2016 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_PROTOTYPE_H_ 4#ifndef _I40E_PROTOTYPE_H_
29#define _I40E_PROTOTYPE_H_ 5#define _I40E_PROTOTYPE_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 5b47dd1f75a5..aa3daec2049d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#include "i40e.h" 4#include "i40e.h"
29#include <linux/ptp_classify.h> 5#include <linux/ptp_classify.h>
@@ -40,9 +16,9 @@
40 * At 1Gb link, the period is multiplied by 20. (32ns) 16 * At 1Gb link, the period is multiplied by 20. (32ns)
41 * 1588 functionality is not supported at 100Mbps. 17 * 1588 functionality is not supported at 100Mbps.
42 */ 18 */
43#define I40E_PTP_40GB_INCVAL 0x0199999999ULL 19#define I40E_PTP_40GB_INCVAL 0x0199999999ULL
44#define I40E_PTP_10GB_INCVAL 0x0333333333ULL 20#define I40E_PTP_10GB_INCVAL_MULT 2
45#define I40E_PTP_1GB_INCVAL 0x2000000000ULL 21#define I40E_PTP_1GB_INCVAL_MULT 20
46 22
47#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) 23#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
48#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \ 24#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \
@@ -130,17 +106,24 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
130 ppb = -ppb; 106 ppb = -ppb;
131 } 107 }
132 108
133 smp_mb(); /* Force any pending update before accessing. */ 109 freq = I40E_PTP_40GB_INCVAL;
134 adj = READ_ONCE(pf->ptp_base_adj);
135
136 freq = adj;
137 freq *= ppb; 110 freq *= ppb;
138 diff = div_u64(freq, 1000000000ULL); 111 diff = div_u64(freq, 1000000000ULL);
139 112
140 if (neg_adj) 113 if (neg_adj)
141 adj -= diff; 114 adj = I40E_PTP_40GB_INCVAL - diff;
142 else 115 else
143 adj += diff; 116 adj = I40E_PTP_40GB_INCVAL + diff;
117
118 /* At some link speeds, the base incval is so large that directly
119 * multiplying by ppb would result in arithmetic overflow even when
120 * using a u64. Avoid this by instead calculating the new incval
121 * always in terms of the 40GbE clock rate and then multiplying by the
122 * link speed factor afterwards. This does result in slightly lower
123 * precision at lower link speeds, but it is fairly minor.
124 */
125 smp_mb(); /* Force any pending update before accessing. */
126 adj *= READ_ONCE(pf->ptp_adj_mult);
144 127
145 wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF); 128 wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF);
146 wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32); 129 wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32);
@@ -462,6 +445,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
462 struct i40e_link_status *hw_link_info; 445 struct i40e_link_status *hw_link_info;
463 struct i40e_hw *hw = &pf->hw; 446 struct i40e_hw *hw = &pf->hw;
464 u64 incval; 447 u64 incval;
448 u32 mult;
465 449
466 hw_link_info = &hw->phy.link_info; 450 hw_link_info = &hw->phy.link_info;
467 451
@@ -469,10 +453,10 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
469 453
470 switch (hw_link_info->link_speed) { 454 switch (hw_link_info->link_speed) {
471 case I40E_LINK_SPEED_10GB: 455 case I40E_LINK_SPEED_10GB:
472 incval = I40E_PTP_10GB_INCVAL; 456 mult = I40E_PTP_10GB_INCVAL_MULT;
473 break; 457 break;
474 case I40E_LINK_SPEED_1GB: 458 case I40E_LINK_SPEED_1GB:
475 incval = I40E_PTP_1GB_INCVAL; 459 mult = I40E_PTP_1GB_INCVAL_MULT;
476 break; 460 break;
477 case I40E_LINK_SPEED_100MB: 461 case I40E_LINK_SPEED_100MB:
478 { 462 {
@@ -483,15 +467,20 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
483 "1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n"); 467 "1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n");
484 warn_once++; 468 warn_once++;
485 } 469 }
486 incval = 0; 470 mult = 0;
487 break; 471 break;
488 } 472 }
489 case I40E_LINK_SPEED_40GB: 473 case I40E_LINK_SPEED_40GB:
490 default: 474 default:
491 incval = I40E_PTP_40GB_INCVAL; 475 mult = 1;
492 break; 476 break;
493 } 477 }
494 478
479 /* The increment value is calculated by taking the base 40GbE incvalue
480 * and multiplying it by a factor based on the link speed.
481 */
482 incval = I40E_PTP_40GB_INCVAL * mult;
483
495 /* Write the new increment value into the increment register. The 484 /* Write the new increment value into the increment register. The
496 * hardware will not update the clock until both registers have been 485 * hardware will not update the clock until both registers have been
497 * written. 486 * written.
@@ -500,14 +489,14 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
500 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32); 489 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
501 490
502 /* Update the base adjustement value. */ 491 /* Update the base adjustement value. */
503 WRITE_ONCE(pf->ptp_base_adj, incval); 492 WRITE_ONCE(pf->ptp_adj_mult, mult);
504 smp_mb(); /* Force the above update. */ 493 smp_mb(); /* Force the above update. */
505} 494}
506 495
507/** 496/**
508 * i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping 497 * i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping
509 * @pf: Board private structure 498 * @pf: Board private structure
510 * @ifreq: ioctl data 499 * @ifr: ioctl data
511 * 500 *
512 * Obtain the current hardware timestamping settigs as requested. To do this, 501 * Obtain the current hardware timestamping settigs as requested. To do this,
513 * keep a shadow copy of the timestamp settings rather than attempting to 502 * keep a shadow copy of the timestamp settings rather than attempting to
@@ -651,7 +640,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
651/** 640/**
652 * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping 641 * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping
653 * @pf: Board private structure 642 * @pf: Board private structure
654 * @ifreq: ioctl data 643 * @ifr: ioctl data
655 * 644 *
656 * Respond to the user filter requests and make the appropriate hardware 645 * Respond to the user filter requests and make the appropriate hardware
657 * changes here. The XL710 cannot support splitting of the Tx/Rx timestamping 646 * changes here. The XL710 cannot support splitting of the Tx/Rx timestamping
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index b3e206e49cc2..52e3680c57f8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_REGISTER_H_ 4#ifndef _I40E_REGISTER_H_
29#define _I40E_REGISTER_H_ 5#define _I40E_REGISTER_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
index 10c86f63dc52..77be0702d07c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_STATUS_H_ 4#ifndef _I40E_STATUS_H_
29#define _I40E_STATUS_H_ 5#define _I40E_STATUS_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h
index 410ba13bcf21..424f02077e2e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_trace.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h
@@ -1,26 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver
5 * Copyright(c) 2013 - 2017 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * The full GNU General Public License is included in this distribution in
17 * the file called "COPYING".
18 *
19 * Contact Information:
20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 *
23 ******************************************************************************/
24 3
25/* Modeled on trace-events-sample.h */ 4/* Modeled on trace-events-sample.h */
26 5
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 87fb27ab9c24..5efa68de935b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2016 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#include <linux/prefetch.h> 4#include <linux/prefetch.h>
29#include <net/busy_poll.h> 5#include <net/busy_poll.h>
@@ -495,7 +471,7 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
495/** 471/**
496 * i40e_add_del_fdir - Build raw packets to add/del fdir filter 472 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
497 * @vsi: pointer to the targeted VSI 473 * @vsi: pointer to the targeted VSI
498 * @cmd: command to get or set RX flow classification rules 474 * @input: filter to add or delete
499 * @add: true adds a filter, false removes it 475 * @add: true adds a filter, false removes it
500 * 476 *
501 **/ 477 **/
@@ -713,7 +689,7 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
713 689
714/** 690/**
715 * i40e_get_tx_pending - how many tx descriptors not processed 691 * i40e_get_tx_pending - how many tx descriptors not processed
716 * @tx_ring: the ring of descriptors 692 * @ring: the ring of descriptors
717 * @in_sw: use SW variables 693 * @in_sw: use SW variables
718 * 694 *
719 * Since there is no access to the ring head register 695 * Since there is no access to the ring head register
@@ -1795,6 +1771,8 @@ static inline int i40e_ptype_to_htype(u8 ptype)
1795 * i40e_rx_hash - set the hash value in the skb 1771 * i40e_rx_hash - set the hash value in the skb
1796 * @ring: descriptor ring 1772 * @ring: descriptor ring
1797 * @rx_desc: specific descriptor 1773 * @rx_desc: specific descriptor
1774 * @skb: skb currently being received and modified
1775 * @rx_ptype: Rx packet type
1798 **/ 1776 **/
1799static inline void i40e_rx_hash(struct i40e_ring *ring, 1777static inline void i40e_rx_hash(struct i40e_ring *ring,
1800 union i40e_rx_desc *rx_desc, 1778 union i40e_rx_desc *rx_desc,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 4bf318b8be85..fdd2c55f03a6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2016 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_TXRX_H_ 4#ifndef _I40E_TXRX_H_
29#define _I40E_TXRX_H_ 5#define _I40E_TXRX_H_
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index bfb80092b352..7df969c59855 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2015 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_TYPE_H_ 4#ifndef _I40E_TYPE_H_
29#define _I40E_TYPE_H_ 5#define _I40E_TYPE_H_
@@ -1318,7 +1294,8 @@ struct i40e_hw_port_stats {
1318 1294
1319/* Checksum and Shadow RAM pointers */ 1295/* Checksum and Shadow RAM pointers */
1320#define I40E_SR_NVM_CONTROL_WORD 0x00 1296#define I40E_SR_NVM_CONTROL_WORD 0x00
1321#define I40E_SR_EMP_MODULE_PTR 0x0F 1297#define I40E_EMP_MODULE_PTR 0x0F
1298#define I40E_SR_EMP_MODULE_PTR 0x48
1322#define I40E_SR_PBA_FLAGS 0x15 1299#define I40E_SR_PBA_FLAGS 0x15
1323#define I40E_SR_PBA_BLOCK_PTR 0x16 1300#define I40E_SR_PBA_BLOCK_PTR 0x16
1324#define I40E_SR_BOOT_CONFIG_PTR 0x17 1301#define I40E_SR_BOOT_CONFIG_PTR 0x17
@@ -1337,6 +1314,8 @@ struct i40e_hw_port_stats {
1337#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 1314#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
1338#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 1315#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
1339#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) 1316#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
1317#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5)
1318#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12)
1340#define I40E_PTR_TYPE BIT(15) 1319#define I40E_PTR_TYPE BIT(15)
1341#define I40E_SR_OCP_CFG_WORD0 0x2B 1320#define I40E_SR_OCP_CFG_WORD0 0x2B
1342#define I40E_SR_OCP_ENABLED BIT(15) 1321#define I40E_SR_OCP_ENABLED BIT(15)
@@ -1454,7 +1433,8 @@ enum i40e_reset_type {
1454}; 1433};
1455 1434
1456/* IEEE 802.1AB LLDP Agent Variables from NVM */ 1435/* IEEE 802.1AB LLDP Agent Variables from NVM */
1457#define I40E_NVM_LLDP_CFG_PTR 0xD 1436#define I40E_NVM_LLDP_CFG_PTR 0x06
1437#define I40E_SR_LLDP_CFG_PTR 0x31
1458struct i40e_lldp_variables { 1438struct i40e_lldp_variables {
1459 u16 length; 1439 u16 length;
1460 u16 adminstatus; 1440 u16 adminstatus;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 35173cbe80f7..c6d24eaede18 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2016 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#include "i40e.h" 4#include "i40e.h"
29 5
@@ -32,8 +8,8 @@
32/** 8/**
33 * i40e_vc_vf_broadcast 9 * i40e_vc_vf_broadcast
34 * @pf: pointer to the PF structure 10 * @pf: pointer to the PF structure
35 * @opcode: operation code 11 * @v_opcode: operation code
36 * @retval: return value 12 * @v_retval: return value
37 * @msg: pointer to the msg buffer 13 * @msg: pointer to the msg buffer
38 * @msglen: msg length 14 * @msglen: msg length
39 * 15 *
@@ -1663,6 +1639,7 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1663/** 1639/**
1664 * i40e_vc_get_version_msg 1640 * i40e_vc_get_version_msg
1665 * @vf: pointer to the VF info 1641 * @vf: pointer to the VF info
1642 * @msg: pointer to the msg buffer
1666 * 1643 *
1667 * called from the VF to request the API version used by the PF 1644 * called from the VF to request the API version used by the PF
1668 **/ 1645 **/
@@ -1706,7 +1683,6 @@ static void i40e_del_qch(struct i40e_vf *vf)
1706 * i40e_vc_get_vf_resources_msg 1683 * i40e_vc_get_vf_resources_msg
1707 * @vf: pointer to the VF info 1684 * @vf: pointer to the VF info
1708 * @msg: pointer to the msg buffer 1685 * @msg: pointer to the msg buffer
1709 * @msglen: msg length
1710 * 1686 *
1711 * called from the VF to request its resources 1687 * called from the VF to request its resources
1712 **/ 1688 **/
@@ -1830,8 +1806,6 @@ err:
1830/** 1806/**
1831 * i40e_vc_reset_vf_msg 1807 * i40e_vc_reset_vf_msg
1832 * @vf: pointer to the VF info 1808 * @vf: pointer to the VF info
1833 * @msg: pointer to the msg buffer
1834 * @msglen: msg length
1835 * 1809 *
1836 * called from the VF to reset itself, 1810 * called from the VF to reset itself,
1837 * unlike other virtchnl messages, PF driver 1811 * unlike other virtchnl messages, PF driver
@@ -2180,6 +2154,51 @@ error_param:
2180} 2154}
2181 2155
2182/** 2156/**
2157 * i40e_ctrl_vf_tx_rings
2158 * @vsi: the SRIOV VSI being configured
2159 * @q_map: bit map of the queues to be enabled
2160 * @enable: start or stop the queue
2161 **/
2162static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2163 bool enable)
2164{
2165 struct i40e_pf *pf = vsi->back;
2166 int ret = 0;
2167 u16 q_id;
2168
2169 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2170 ret = i40e_control_wait_tx_q(vsi->seid, pf,
2171 vsi->base_queue + q_id,
2172 false /*is xdp*/, enable);
2173 if (ret)
2174 break;
2175 }
2176 return ret;
2177}
2178
2179/**
2180 * i40e_ctrl_vf_rx_rings
2181 * @vsi: the SRIOV VSI being configured
2182 * @q_map: bit map of the queues to be enabled
2183 * @enable: start or stop the queue
2184 **/
2185static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2186 bool enable)
2187{
2188 struct i40e_pf *pf = vsi->back;
2189 int ret = 0;
2190 u16 q_id;
2191
2192 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2193 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
2194 enable);
2195 if (ret)
2196 break;
2197 }
2198 return ret;
2199}
2200
2201/**
2183 * i40e_vc_enable_queues_msg 2202 * i40e_vc_enable_queues_msg
2184 * @vf: pointer to the VF info 2203 * @vf: pointer to the VF info
2185 * @msg: pointer to the msg buffer 2204 * @msg: pointer to the msg buffer
@@ -2211,8 +2230,17 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2211 goto error_param; 2230 goto error_param;
2212 } 2231 }
2213 2232
2214 if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx])) 2233 /* Use the queue bit map sent by the VF */
2234 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2235 true)) {
2215 aq_ret = I40E_ERR_TIMEOUT; 2236 aq_ret = I40E_ERR_TIMEOUT;
2237 goto error_param;
2238 }
2239 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2240 true)) {
2241 aq_ret = I40E_ERR_TIMEOUT;
2242 goto error_param;
2243 }
2216 2244
2217 /* need to start the rings for additional ADq VSI's as well */ 2245 /* need to start the rings for additional ADq VSI's as well */
2218 if (vf->adq_enabled) { 2246 if (vf->adq_enabled) {
@@ -2260,8 +2288,17 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2260 goto error_param; 2288 goto error_param;
2261 } 2289 }
2262 2290
2263 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); 2291 /* Use the queue bit map sent by the VF */
2264 2292 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2293 false)) {
2294 aq_ret = I40E_ERR_TIMEOUT;
2295 goto error_param;
2296 }
2297 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2298 false)) {
2299 aq_ret = I40E_ERR_TIMEOUT;
2300 goto error_param;
2301 }
2265error_param: 2302error_param:
2266 /* send the response to the VF */ 2303 /* send the response to the VF */
2267 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, 2304 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
@@ -3556,15 +3593,16 @@ err:
3556 * i40e_vc_process_vf_msg 3593 * i40e_vc_process_vf_msg
3557 * @pf: pointer to the PF structure 3594 * @pf: pointer to the PF structure
3558 * @vf_id: source VF id 3595 * @vf_id: source VF id
3596 * @v_opcode: operation code
3597 * @v_retval: unused return value code
3559 * @msg: pointer to the msg buffer 3598 * @msg: pointer to the msg buffer
3560 * @msglen: msg length 3599 * @msglen: msg length
3561 * @msghndl: msg handle
3562 * 3600 *
3563 * called from the common aeq/arq handler to 3601 * called from the common aeq/arq handler to
3564 * process request from VF 3602 * process request from VF
3565 **/ 3603 **/
3566int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, 3604int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
3567 u32 v_retval, u8 *msg, u16 msglen) 3605 u32 __always_unused v_retval, u8 *msg, u16 msglen)
3568{ 3606{
3569 struct i40e_hw *hw = &pf->hw; 3607 struct i40e_hw *hw = &pf->hw;
3570 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id; 3608 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
@@ -4015,7 +4053,8 @@ error_pvid:
4015 * i40e_ndo_set_vf_bw 4053 * i40e_ndo_set_vf_bw
4016 * @netdev: network interface device structure 4054 * @netdev: network interface device structure
4017 * @vf_id: VF identifier 4055 * @vf_id: VF identifier
4018 * @tx_rate: Tx rate 4056 * @min_tx_rate: Minimum Tx rate
4057 * @max_tx_rate: Maximum Tx rate
4019 * 4058 *
4020 * configure VF Tx rate 4059 * configure VF Tx rate
4021 **/ 4060 **/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 57f727bb9e36..bf67d62e2b5f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2015 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_VIRTCHNL_PF_H_ 4#ifndef _I40E_VIRTCHNL_PF_H_
29#define _I40E_VIRTCHNL_PF_H_ 5#define _I40E_VIRTCHNL_PF_H_
diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile
index 1e89c5487676..3c5c6e962280 100644
--- a/drivers/net/ethernet/intel/i40evf/Makefile
+++ b/drivers/net/ethernet/intel/i40evf/Makefile
@@ -1,29 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2################################################################################ 2# Copyright(c) 2013 - 2018 Intel Corporation.
3#
4# Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
5# Copyright(c) 2013 - 2014 Intel Corporation.
6#
7# This program is free software; you can redistribute it and/or modify it
8# under the terms and conditions of the GNU General Public License,
9# version 2, as published by the Free Software Foundation.
10#
11# This program is distributed in the hope it will be useful, but WITHOUT
12# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14# more details.
15#
16# You should have received a copy of the GNU General Public License along
17# with this program. If not, see <http://www.gnu.org/licenses/>.
18#
19# The full GNU General Public License is included in this distribution in
20# the file called "COPYING".
21#
22# Contact Information:
23# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25#
26################################################################################
27 3
28# 4#
29## Makefile for the Intel(R) 40GbE VF driver 5## Makefile for the Intel(R) 40GbE VF driver
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 6fd677efa9da..c355120dfdfd 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
5 * Copyright(c) 2013 - 2016 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#include "i40e_status.h" 4#include "i40e_status.h"
29#include "i40e_type.h" 5#include "i40e_type.h"
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index a7137c165256..1f264b9b6805 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_ADMINQ_H_ 4#ifndef _I40E_ADMINQ_H_
29#define _I40E_ADMINQ_H_ 5#define _I40E_ADMINQ_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 439e71882049..aa81e87cd471 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
5 * Copyright(c) 2013 - 2017 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_ADMINQ_CMD_H_ 4#ifndef _I40E_ADMINQ_CMD_H_
29#define _I40E_ADMINQ_CMD_H_ 5#define _I40E_ADMINQ_CMD_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
index 7e0fddd8af36..cb8689222c8b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_ALLOC_H_ 4#ifndef _I40E_ALLOC_H_
29#define _I40E_ALLOC_H_ 5#define _I40E_ALLOC_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 67140cdbcd7a..9cef54971312 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#include "i40e_type.h" 4#include "i40e_type.h"
29#include "i40e_adminq.h" 5#include "i40e_adminq.h"
@@ -1255,6 +1231,7 @@ i40e_status_code i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff,
1255 * @hw: pointer to the hw struct 1231 * @hw: pointer to the hw struct
1256 * @buff: command buffer (size in bytes = buff_size) 1232 * @buff: command buffer (size in bytes = buff_size)
1257 * @buff_size: buffer size in bytes 1233 * @buff_size: buffer size in bytes
1234 * @flags: AdminQ command flags
1258 * @cmd_details: pointer to command details structure or NULL 1235 * @cmd_details: pointer to command details structure or NULL
1259 **/ 1236 **/
1260enum 1237enum
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
index 352dd3f3eb6a..f300bf271824 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
5 * Copyright(c) 2013 - 2015 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_DEVIDS_H_ 4#ifndef _I40E_DEVIDS_H_
29#define _I40E_DEVIDS_H_ 5#define _I40E_DEVIDS_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
index 7432596164f4..1c78de838857 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_HMC_H_ 4#ifndef _I40E_HMC_H_
29#define _I40E_HMC_H_ 5#define _I40E_HMC_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
index ddac0e4908d3..82b00f70a632 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_LAN_HMC_H_ 4#ifndef _I40E_LAN_HMC_H_
29#define _I40E_LAN_HMC_H_ 5#define _I40E_LAN_HMC_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
index 8668ad6c1a65..3ddddb46455b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_OSDEP_H_ 4#ifndef _I40E_OSDEP_H_
29#define _I40E_OSDEP_H_ 5#define _I40E_OSDEP_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 72501bd0f1a9..a358f4b9d5aa 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_PROTOTYPE_H_ 4#ifndef _I40E_PROTOTYPE_H_
29#define _I40E_PROTOTYPE_H_ 5#define _I40E_PROTOTYPE_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h
index c9c935659758..49e1f57d99cc 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_REGISTER_H_ 4#ifndef _I40E_REGISTER_H_
29#define _I40E_REGISTER_H_ 5#define _I40E_REGISTER_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h
index 0d7993ecb99a..77be0702d07c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_status.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_STATUS_H_ 4#ifndef _I40E_STATUS_H_
29#define _I40E_STATUS_H_ 5#define _I40E_STATUS_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_trace.h b/drivers/net/ethernet/intel/i40evf/i40e_trace.h
index ece01dd12a3c..d7a4e68820a8 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_trace.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_trace.h
@@ -1,26 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel(R) 40-10 Gigabit Ethernet Virtual Function Driver
5 * Copyright(c) 2013 - 2017 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * The full GNU General Public License is included in this distribution in
17 * the file called "COPYING".
18 *
19 * Contact Information:
20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 *
23 ******************************************************************************/
24 3
25/* Modeled on trace-events-sample.h */ 4/* Modeled on trace-events-sample.h */
26 5
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 12bd937861e7..a9730711e257 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
5 * Copyright(c) 2013 - 2016 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#include <linux/prefetch.h> 4#include <linux/prefetch.h>
29#include <net/busy_poll.h> 5#include <net/busy_poll.h>
@@ -129,7 +105,7 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
129 105
130/** 106/**
131 * i40evf_get_tx_pending - how many Tx descriptors not processed 107 * i40evf_get_tx_pending - how many Tx descriptors not processed
132 * @tx_ring: the ring of descriptors 108 * @ring: the ring of descriptors
133 * @in_sw: is tx_pending being checked in SW or HW 109 * @in_sw: is tx_pending being checked in SW or HW
134 * 110 *
135 * Since there is no access to the ring head register 111 * Since there is no access to the ring head register
@@ -1070,6 +1046,8 @@ static inline int i40e_ptype_to_htype(u8 ptype)
1070 * i40e_rx_hash - set the hash value in the skb 1046 * i40e_rx_hash - set the hash value in the skb
1071 * @ring: descriptor ring 1047 * @ring: descriptor ring
1072 * @rx_desc: specific descriptor 1048 * @rx_desc: specific descriptor
1049 * @skb: skb currently being received and modified
1050 * @rx_ptype: Rx packet type
1073 **/ 1051 **/
1074static inline void i40e_rx_hash(struct i40e_ring *ring, 1052static inline void i40e_rx_hash(struct i40e_ring *ring,
1075 union i40e_rx_desc *rx_desc, 1053 union i40e_rx_desc *rx_desc,
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 5790897eae2e..3b5a63b3236e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
5 * Copyright(c) 2013 - 2016 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_TXRX_H_ 4#ifndef _I40E_TXRX_H_
29#define _I40E_TXRX_H_ 5#define _I40E_TXRX_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 449de4b0058e..094387db3c11 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
5 * Copyright(c) 2013 - 2015 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40E_TYPE_H_ 4#ifndef _I40E_TYPE_H_
29#define _I40E_TYPE_H_ 5#define _I40E_TYPE_H_
@@ -1257,7 +1233,8 @@ struct i40e_hw_port_stats {
1257 1233
1258/* Checksum and Shadow RAM pointers */ 1234/* Checksum and Shadow RAM pointers */
1259#define I40E_SR_NVM_CONTROL_WORD 0x00 1235#define I40E_SR_NVM_CONTROL_WORD 0x00
1260#define I40E_SR_EMP_MODULE_PTR 0x0F 1236#define I40E_EMP_MODULE_PTR 0x0F
1237#define I40E_SR_EMP_MODULE_PTR 0x48
1261#define I40E_NVM_OEM_VER_OFF 0x83 1238#define I40E_NVM_OEM_VER_OFF 0x83
1262#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 1239#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18
1263#define I40E_SR_NVM_WAKE_ON_LAN 0x19 1240#define I40E_SR_NVM_WAKE_ON_LAN 0x19
@@ -1273,6 +1250,9 @@ struct i40e_hw_port_stats {
1273#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 1250#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
1274#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 1251#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
1275#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) 1252#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
1253#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5)
1254#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12)
1255#define I40E_PTR_TYPE BIT(15)
1276 1256
1277/* Shadow RAM related */ 1257/* Shadow RAM related */
1278#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800 1258#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
@@ -1386,6 +1366,10 @@ enum i40e_reset_type {
1386 I40E_RESET_EMPR = 3, 1366 I40E_RESET_EMPR = 3,
1387}; 1367};
1388 1368
1369/* IEEE 802.1AB LLDP Agent Variables from NVM */
1370#define I40E_NVM_LLDP_CFG_PTR 0x06
1371#define I40E_SR_LLDP_CFG_PTR 0x31
1372
1389/* RSS Hash Table Size */ 1373/* RSS Hash Table Size */
1390#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 1374#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
1391 1375
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 3a7a1e77bf39..98b834932dd3 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
5 * Copyright(c) 2013 - 2016 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _I40EVF_H_ 4#ifndef _I40EVF_H_
29#define _I40EVF_H_ 5#define _I40EVF_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
index da60ce12b33d..3cc9d60d0d72 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_client.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
@@ -1,4 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3
2#include <linux/list.h> 4#include <linux/list.h>
3#include <linux/errno.h> 5#include <linux/errno.h>
4 6
@@ -176,7 +178,6 @@ void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset)
176/** 178/**
177 * i40evf_client_add_instance - add a client instance to the instance list 179 * i40evf_client_add_instance - add a client instance to the instance list
178 * @adapter: pointer to the board struct 180 * @adapter: pointer to the board struct
179 * @client: pointer to a client struct in the client list.
180 * 181 *
181 * Returns cinst ptr on success, NULL on failure 182 * Returns cinst ptr on success, NULL on failure
182 **/ 183 **/
@@ -234,7 +235,6 @@ out:
234/** 235/**
235 * i40evf_client_del_instance - removes a client instance from the list 236 * i40evf_client_del_instance - removes a client instance from the list
236 * @adapter: pointer to the board struct 237 * @adapter: pointer to the board struct
237 * @client: pointer to the client struct
238 * 238 *
239 **/ 239 **/
240static 240static
@@ -438,7 +438,7 @@ static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
438 * i40evf_client_setup_qvlist - send a message to the PF to setup iwarp qv map 438 * i40evf_client_setup_qvlist - send a message to the PF to setup iwarp qv map
439 * @ldev: pointer to L2 context. 439 * @ldev: pointer to L2 context.
440 * @client: Client pointer. 440 * @client: Client pointer.
441 * @qv_info: queue and vector list 441 * @qvlist_info: queue and vector list
442 * 442 *
443 * Return 0 on success or < 0 on error 443 * Return 0 on success or < 0 on error
444 **/ 444 **/
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.h b/drivers/net/ethernet/intel/i40evf/i40evf_client.h
index 15a10da5bd4a..fc6592c3de9c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_client.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.h
@@ -1,4 +1,6 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3
2#ifndef _I40E_CLIENT_H_ 4#ifndef _I40E_CLIENT_H_
3#define _I40E_CLIENT_H_ 5#define _I40E_CLIENT_H_
4 6
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index dc4cde274fb8..69efe0aec76a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
5 * Copyright(c) 2013 - 2016 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28/* ethtool support for i40evf */ 4/* ethtool support for i40evf */
29#include "i40evf.h" 5#include "i40evf.h"
@@ -226,7 +202,7 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
226 202
227/** 203/**
228 * i40evf_get_priv_flags - report device private flags 204 * i40evf_get_priv_flags - report device private flags
229 * @dev: network interface device structure 205 * @netdev: network interface device structure
230 * 206 *
231 * The get string set count and the string set should be matched for each 207 * The get string set count and the string set should be matched for each
232 * flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags 208 * flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags
@@ -253,7 +229,7 @@ static u32 i40evf_get_priv_flags(struct net_device *netdev)
253 229
254/** 230/**
255 * i40evf_set_priv_flags - set private flags 231 * i40evf_set_priv_flags - set private flags
256 * @dev: network interface device structure 232 * @netdev: network interface device structure
257 * @flags: bit flags to be set 233 * @flags: bit flags to be set
258 **/ 234 **/
259static int i40evf_set_priv_flags(struct net_device *netdev, u32 flags) 235static int i40evf_set_priv_flags(struct net_device *netdev, u32 flags)
@@ -627,6 +603,7 @@ static int i40evf_set_per_queue_coalesce(struct net_device *netdev,
627 * i40evf_get_rxnfc - command to get RX flow classification rules 603 * i40evf_get_rxnfc - command to get RX flow classification rules
628 * @netdev: network interface device structure 604 * @netdev: network interface device structure
629 * @cmd: ethtool rxnfc command 605 * @cmd: ethtool rxnfc command
606 * @rule_locs: pointer to store rule locations
630 * 607 *
631 * Returns Success if the command is supported. 608 * Returns Success if the command is supported.
632 **/ 609 **/
@@ -746,6 +723,7 @@ static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
746 * @netdev: network interface device structure 723 * @netdev: network interface device structure
747 * @indir: indirection table 724 * @indir: indirection table
748 * @key: hash key 725 * @key: hash key
726 * @hfunc: hash function in use
749 * 727 *
750 * Reads the indirection table directly from the hardware. Always returns 0. 728 * Reads the indirection table directly from the hardware. Always returns 0.
751 **/ 729 **/
@@ -774,6 +752,7 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
774 * @netdev: network interface device structure 752 * @netdev: network interface device structure
775 * @indir: indirection table 753 * @indir: indirection table
776 * @key: hash key 754 * @key: hash key
755 * @hfunc: hash function to use
777 * 756 *
778 * Returns -EINVAL if the table specifies an inavlid queue id, otherwise 757 * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
779 * returns 0 after programming the table. 758 * returns 0 after programming the table.
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 5f71532be7f1..3f04a182903d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
5 * Copyright(c) 2013 - 2016 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#include "i40evf.h" 4#include "i40evf.h"
29#include "i40e_prototype.h" 5#include "i40e_prototype.h"
@@ -473,6 +449,7 @@ static void i40evf_irq_affinity_release(struct kref *ref) {}
473/** 449/**
474 * i40evf_request_traffic_irqs - Initialize MSI-X interrupts 450 * i40evf_request_traffic_irqs - Initialize MSI-X interrupts
475 * @adapter: board private structure 451 * @adapter: board private structure
452 * @basename: device basename
476 * 453 *
477 * Allocates MSI-X vectors for tx and rx handling, and requests 454 * Allocates MSI-X vectors for tx and rx handling, and requests
478 * interrupts from the kernel. 455 * interrupts from the kernel.
@@ -705,7 +682,7 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
705 682
706 f = i40evf_find_vlan(adapter, vlan); 683 f = i40evf_find_vlan(adapter, vlan);
707 if (!f) { 684 if (!f) {
708 f = kzalloc(sizeof(*f), GFP_ATOMIC); 685 f = kzalloc(sizeof(*f), GFP_KERNEL);
709 if (!f) 686 if (!f)
710 goto clearout; 687 goto clearout;
711 688
@@ -745,6 +722,7 @@ static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
745/** 722/**
746 * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device 723 * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device
747 * @netdev: network device struct 724 * @netdev: network device struct
725 * @proto: unused protocol data
748 * @vid: VLAN tag 726 * @vid: VLAN tag
749 **/ 727 **/
750static int i40evf_vlan_rx_add_vid(struct net_device *netdev, 728static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
@@ -762,6 +740,7 @@ static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
762/** 740/**
763 * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device 741 * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device
764 * @netdev: network device struct 742 * @netdev: network device struct
743 * @proto: unused protocol data
765 * @vid: VLAN tag 744 * @vid: VLAN tag
766 **/ 745 **/
767static int i40evf_vlan_rx_kill_vid(struct net_device *netdev, 746static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
@@ -3160,7 +3139,7 @@ static int i40evf_set_features(struct net_device *netdev,
3160/** 3139/**
3161 * i40evf_features_check - Validate encapsulated packet conforms to limits 3140 * i40evf_features_check - Validate encapsulated packet conforms to limits
3162 * @skb: skb buff 3141 * @skb: skb buff
3163 * @netdev: This physical port's netdev 3142 * @dev: This physical port's netdev
3164 * @features: Offload features that the stack believes apply 3143 * @features: Offload features that the stack believes apply
3165 **/ 3144 **/
3166static netdev_features_t i40evf_features_check(struct sk_buff *skb, 3145static netdev_features_t i40evf_features_check(struct sk_buff *skb,
@@ -3378,6 +3357,24 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
3378 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) 3357 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3379 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 3358 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3380 3359
3360 /* Do not turn on offloads when they are requested to be turned off.
3361 * TSO needs minimum 576 bytes to work correctly.
3362 */
3363 if (netdev->wanted_features) {
3364 if (!(netdev->wanted_features & NETIF_F_TSO) ||
3365 netdev->mtu < 576)
3366 netdev->features &= ~NETIF_F_TSO;
3367 if (!(netdev->wanted_features & NETIF_F_TSO6) ||
3368 netdev->mtu < 576)
3369 netdev->features &= ~NETIF_F_TSO6;
3370 if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
3371 netdev->features &= ~NETIF_F_TSO_ECN;
3372 if (!(netdev->wanted_features & NETIF_F_GRO))
3373 netdev->features &= ~NETIF_F_GRO;
3374 if (!(netdev->wanted_features & NETIF_F_GSO))
3375 netdev->features &= ~NETIF_F_GSO;
3376 }
3377
3381 adapter->vsi.id = adapter->vsi_res->vsi_id; 3378 adapter->vsi.id = adapter->vsi_res->vsi_id;
3382 3379
3383 adapter->vsi.back = adapter; 3380 adapter->vsi.back = adapter;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 26a59890532f..565677de5ba3 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3 *
4 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
5 * Copyright(c) 2013 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#include "i40evf.h" 4#include "i40evf.h"
29#include "i40e_prototype.h" 5#include "i40e_prototype.h"
@@ -179,8 +155,7 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
179 155
180/** 156/**
181 * i40evf_get_vf_config 157 * i40evf_get_vf_config
182 * @hw: pointer to the hardware structure 158 * @adapter: private adapter structure
183 * @len: length of buffer
184 * 159 *
185 * Get VF configuration from PF and populate hw structure. Must be called after 160 * Get VF configuration from PF and populate hw structure. Must be called after
186 * admin queue is initialized. Busy waits until response is received from PF, 161 * admin queue is initialized. Busy waits until response is received from PF,
@@ -423,8 +398,6 @@ int i40evf_request_queues(struct i40evf_adapter *adapter, int num)
423/** 398/**
424 * i40evf_add_ether_addrs 399 * i40evf_add_ether_addrs
425 * @adapter: adapter structure 400 * @adapter: adapter structure
426 * @addrs: the MAC address filters to add (contiguous)
427 * @count: number of filters
428 * 401 *
429 * Request that the PF add one or more addresses to our filters. 402 * Request that the PF add one or more addresses to our filters.
430 **/ 403 **/
@@ -497,8 +470,6 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
497/** 470/**
498 * i40evf_del_ether_addrs 471 * i40evf_del_ether_addrs
499 * @adapter: adapter structure 472 * @adapter: adapter structure
500 * @addrs: the MAC address filters to remove (contiguous)
501 * @count: number of filtes
502 * 473 *
503 * Request that the PF remove one or more addresses from our filters. 474 * Request that the PF remove one or more addresses from our filters.
504 **/ 475 **/
@@ -571,8 +542,6 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
571/** 542/**
572 * i40evf_add_vlans 543 * i40evf_add_vlans
573 * @adapter: adapter structure 544 * @adapter: adapter structure
574 * @vlans: the VLANs to add
575 * @count: number of VLANs
576 * 545 *
577 * Request that the PF add one or more VLAN filters to our VSI. 546 * Request that the PF add one or more VLAN filters to our VSI.
578 **/ 547 **/
@@ -643,8 +612,6 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
643/** 612/**
644 * i40evf_del_vlans 613 * i40evf_del_vlans
645 * @adapter: adapter structure 614 * @adapter: adapter structure
646 * @vlans: the VLANs to remove
647 * @count: number of VLANs
648 * 615 *
649 * Request that the PF remove one or more VLAN filters from our VSI. 616 * Request that the PF remove one or more VLAN filters from our VSI.
650 **/ 617 **/
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index c48583e98ac1..394c1e0656b9 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -1,31 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2################################################################################ 2# Copyright(c) 1999 - 2018 Intel Corporation.
3#
4# Intel 82575 PCI-Express Ethernet Linux driver
5# Copyright(c) 1999 - 2014 Intel Corporation.
6#
7# This program is free software; you can redistribute it and/or modify it
8# under the terms and conditions of the GNU General Public License,
9# version 2, as published by the Free Software Foundation.
10#
11# This program is distributed in the hope it will be useful, but WITHOUT
12# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14# more details.
15#
16# You should have received a copy of the GNU General Public License along with
17# this program; if not, see <http://www.gnu.org/licenses/>.
18#
19# The full GNU General Public License is included in this distribution in
20# the file called "COPYING".
21#
22# Contact Information:
23# Linux NICS <linux.nics@intel.com>
24# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26#
27################################################################################
28
29# 3#
30# Makefile for the Intel(R) 82575 PCI-Express ethernet driver 4# Makefile for the Intel(R) 82575 PCI-Express ethernet driver
31# 5#
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index dd9b6cac220d..b13b42e5a1d9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1,26 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel(R) Gigabit Ethernet Linux driver 2/* Copyright(c) 2007 - 2018 Intel Corporation. */
3 * Copyright(c) 2007-2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
19 *
20 * Contact Information:
21 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
23 */
24 3
25/* e1000_82575 4/* e1000_82575
26 * e1000_82576 5 * e1000_82576
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index e53ebe97d709..6ad775b1a4c5 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -1,26 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel(R) Gigabit Ethernet Linux driver 2/* Copyright(c) 2007 - 2018 Intel Corporation. */
3 * Copyright(c) 2007-2014 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
19 *
20 * Contact Information:
21 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
23 */
24 3
25#ifndef _E1000_82575_H_ 4#ifndef _E1000_82575_H_
26#define _E1000_82575_H_ 5#define _E1000_82575_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index d3d1d868e7ba..252440a418dc 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1,26 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel(R) Gigabit Ethernet Linux driver 2/* Copyright(c) 2007 - 2018 Intel Corporation. */
3 * Copyright(c) 2007-2014 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
19 *
20 * Contact Information:
21 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
23 */
24 3
25#ifndef _E1000_DEFINES_H_ 4#ifndef _E1000_DEFINES_H_
26#define _E1000_DEFINES_H_ 5#define _E1000_DEFINES_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index ff835e1e853d..5d87957b2627 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -1,25 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel(R) Gigabit Ethernet Linux driver 2/* Copyright(c) 2007 - 2018 Intel Corporation. */
3 * Copyright(c) 2007-2014 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 *
16 * The full GNU General Public License is included in this distribution in
17 * the file called "COPYING".
18 *
19 * Contact Information:
20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 */
23 3
24#ifndef _E1000_HW_H_ 4#ifndef _E1000_HW_H_
25#define _E1000_HW_H_ 5#define _E1000_HW_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 6f548247e6d8..c54ebedca6da 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -1,26 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel(R) Gigabit Ethernet Linux driver 2/* Copyright(c) 2007 - 2018 Intel Corporation. */
3 * Copyright(c) 2007-2014 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
19 *
20 * Contact Information:
21 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
23 */
24 3
25/* e1000_i210 4/* e1000_i210
26 * e1000_i211 5 * e1000_i211
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 56f015ccb206..5c437fdc49ee 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -1,26 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel(R) Gigabit Ethernet Linux driver 2/* Copyright(c) 2007 - 2018 Intel Corporation. */
3 * Copyright(c) 2007-2014 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
19 *
20 * Contact Information:
21 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
23 */
24 3
25#ifndef _E1000_I210_H_ 4#ifndef _E1000_I210_H_
26#define _E1000_I210_H_ 5#define _E1000_I210_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 298afa0d9159..79ee0a747260 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,26 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel(R) Gigabit Ethernet Linux driver 2/* Copyright(c) 2007 - 2018 Intel Corporation. */
3 * Copyright(c) 2007-2014 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
19 *
20 * Contact Information:
21 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
23 */
24 3
25#include <linux/if_ether.h> 4#include <linux/if_ether.h>
26#include <linux/delay.h> 5#include <linux/delay.h>
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index 04d80c765aee..6e110f28f922 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -1,26 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel(R) Gigabit Ethernet Linux driver 2/* Copyright(c) 2007 - 2018 Intel Corporation. */
3 * Copyright(c) 2007-2014 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
19 *
20 * Contact Information:
21 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
23 */
24 3
25#ifndef _E1000_MAC_H_ 4#ifndef _E1000_MAC_H_
26#define _E1000_MAC_H_ 5#define _E1000_MAC_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index ef42f1689b3b..46debd991bfe 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -1,26 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel(R) Gigabit Ethernet Linux driver 2/* Copyright(c) 2007 - 2018 Intel Corporation. */
3 * Copyright(c) 2007-2014 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
19 *
20 * Contact Information:
21 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
23 */
24 3
25#include "e1000_mbx.h" 4#include "e1000_mbx.h"
26 5
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index 4f0ecd28354d..178e60ec71d4 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -1,26 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel(R) Gigabit Ethernet Linux driver 2/* Copyright(c) 2007 - 2018 Intel Corporation. */
3 * Copyright(c) 2007-2014 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
19 *
20 * Contact Information:
21 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
23 */
24 3
25#ifndef _E1000_MBX_H_ 4#ifndef _E1000_MBX_H_
26#define _E1000_MBX_H_ 5#define _E1000_MBX_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index e4596f151cd4..09f4dcb09632 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,25 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel(R) Gigabit Ethernet Linux driver 2/* Copyright(c) 2007 - 2018 Intel Corporation. */
3 * Copyright(c) 2007-2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 *
16 * The full GNU General Public License is included in this distribution in
17 * the file called "COPYING".
18 *
19 * Contact Information:
20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 */
23 3
24#include <linux/if_ether.h> 4#include <linux/if_ether.h>
25#include <linux/delay.h> 5#include <linux/delay.h>
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index dde68cd54a53..091cddf4ada8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -1,26 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel(R) Gigabit Ethernet Linux driver 2/* Copyright(c) 2007 - 2018 Intel Corporation. */
3 * Copyright(c) 2007-2014 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
19 *
20 * Contact Information:
21 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
23 */
24 3
25#ifndef _E1000_NVM_H_ 4#ifndef _E1000_NVM_H_
26#define _E1000_NVM_H_ 5#define _E1000_NVM_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 4ec61243da82..2be0e762ec69 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,26 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel(R) Gigabit Ethernet Linux driver 2/* Copyright(c) 2007 - 2018 Intel Corporation. */
3 * Copyright(c) 2007-2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
19 *
20 * Contact Information:
21 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
23 */
24 3
25#include <linux/if_ether.h> 4#include <linux/if_ether.h>
26#include <linux/delay.h> 5#include <linux/delay.h>
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 856d2cda0643..5894e4b1d0a8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -1,26 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel(R) Gigabit Ethernet Linux driver 2/* Copyright(c) 2007 - 2018 Intel Corporation. */
3 * Copyright(c) 2007-2014 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
19 *
20 * Contact Information:
21 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
23 */
24 3
25#ifndef _E1000_PHY_H_ 4#ifndef _E1000_PHY_H_
26#define _E1000_PHY_H_ 5#define _E1000_PHY_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index e8fa8c6530e0..0ad737d2f289 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -1,26 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel(R) Gigabit Ethernet Linux driver 2/* Copyright(c) 2007 - 2018 Intel Corporation. */
3 * Copyright(c) 2007-2014 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
19 *
20 * Contact Information:
21 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
23 */
24 3
25#ifndef _E1000_REGS_H_ 4#ifndef _E1000_REGS_H_
26#define _E1000_REGS_H_ 5#define _E1000_REGS_H_
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 990a7fb32e4e..9643b5b3d444 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -1,26 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Intel(R) Gigabit Ethernet Linux driver 2/* Copyright(c) 2007 - 2018 Intel Corporation. */
3 * Copyright(c) 2007-2014 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
19 *
20 * Contact Information:
21 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
23 */
24 3
25/* Linux PRO/1000 Ethernet Driver main header file */ 4/* Linux PRO/1000 Ethernet Driver main header file */
26 5
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 6697c273ab59..2d798499d35e 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1,26 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel(R) Gigabit Ethernet Linux driver 2/* Copyright(c) 2007 - 2018 Intel Corporation. */
3 * Copyright(c) 2007-2014 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
19 *
20 * Contact Information:
21 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
23 */
24 3
25/* ethtool support for igb */ 4/* ethtool support for igb */
26 5
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index bebe43b3a836..3b83747b2700 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -1,26 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel(R) Gigabit Ethernet Linux driver 2/* Copyright(c) 2007 - 2018 Intel Corporation. */
3 * Copyright(c) 2007-2014 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
19 *
20 * Contact Information:
21 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
23 */
24 3
25#include "igb.h" 4#include "igb.h"
26#include "e1000_82575.h" 5#include "e1000_82575.h"
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 3ce43207a35f..78574c06635b 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1,26 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Intel(R) Gigabit Ethernet Linux driver 2/* Copyright(c) 2007 - 2018 Intel Corporation. */
3 * Copyright(c) 2007-2014 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
19 *
20 * Contact Information:
21 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
23 */
24 3
25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 5
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 7454b9895a65..9f4d700e09df 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -1,21 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0+ 1// SPDX-License-Identifier: GPL-2.0+
2/* PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580 2/* Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com> */
3 * 3
4 * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19#include <linux/module.h> 4#include <linux/module.h>
20#include <linux/device.h> 5#include <linux/device.h>
21#include <linux/pci.h> 6#include <linux/pci.h>
diff --git a/drivers/net/ethernet/intel/igbvf/Makefile b/drivers/net/ethernet/intel/igbvf/Makefile
index efe29dae384a..afd3e36eae75 100644
--- a/drivers/net/ethernet/intel/igbvf/Makefile
+++ b/drivers/net/ethernet/intel/igbvf/Makefile
@@ -1,31 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2################################################################################ 2# Copyright(c) 2009 - 2018 Intel Corporation.
3#
4# Intel(R) 82576 Virtual Function Linux driver
5# Copyright(c) 2009 - 2012 Intel Corporation.
6#
7# This program is free software; you can redistribute it and/or modify it
8# under the terms and conditions of the GNU General Public License,
9# version 2, as published by the Free Software Foundation.
10#
11# This program is distributed in the hope it will be useful, but WITHOUT
12# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14# more details.
15#
16# You should have received a copy of the GNU General Public License along with
17# this program; if not, write to the Free Software Foundation, Inc.,
18# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19#
20# The full GNU General Public License is included in this distribution in
21# the file called "COPYING".
22#
23# Contact Information:
24# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26#
27################################################################################
28
29# 3#
30# Makefile for the Intel(R) 82576 VF ethernet driver 4# Makefile for the Intel(R) 82576 VF ethernet driver
31# 5#
diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h
index 04bcfec0641b..4437f832412d 100644
--- a/drivers/net/ethernet/intel/igbvf/defines.h
+++ b/drivers/net/ethernet/intel/igbvf/defines.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 Intel(R) 82576 Virtual Function Linux driver
5 Copyright(c) 1999 - 2012 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, see <http://www.gnu.org/licenses/>.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27 3
28#ifndef _E1000_DEFINES_H_ 4#ifndef _E1000_DEFINES_H_
29#define _E1000_DEFINES_H_ 5#define _E1000_DEFINES_H_
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index ca39e3cccaeb..3ae358b35227 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2009 - 2018 Intel Corporation. */
3
4 Intel(R) 82576 Virtual Function Linux driver
5 Copyright(c) 2009 - 2012 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, see <http://www.gnu.org/licenses/>.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27 3
28/* ethtool support for igbvf */ 4/* ethtool support for igbvf */
29 5
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index f5bf248e22eb..eee26a3be90b 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2009 - 2018 Intel Corporation. */
3
4 Intel(R) 82576 Virtual Function Linux driver
5 Copyright(c) 2009 - 2012 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, see <http://www.gnu.org/licenses/>.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27 3
28/* Linux PRO/1000 Ethernet Driver main header file */ 4/* Linux PRO/1000 Ethernet Driver main header file */
29 5
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c
index 9195884096f8..163e5838f7c2 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.c
+++ b/drivers/net/ethernet/intel/igbvf/mbx.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2009 - 2018 Intel Corporation. */
3
4 Intel(R) 82576 Virtual Function Linux driver
5 Copyright(c) 2009 - 2012 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, see <http://www.gnu.org/licenses/>.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27 3
28#include "mbx.h" 4#include "mbx.h"
29 5
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h
index 479b062fe9ee..e5b31818d565 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.h
+++ b/drivers/net/ethernet/intel/igbvf/mbx.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 Intel(R) 82576 Virtual Function Linux driver
5 Copyright(c) 1999 - 2012 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, see <http://www.gnu.org/licenses/>.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27 3
28#ifndef _E1000_MBX_H_ 4#ifndef _E1000_MBX_H_
29#define _E1000_MBX_H_ 5#define _E1000_MBX_H_
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index e2b7502f1953..f818f060e5a7 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2009 - 2018 Intel Corporation. */
3
4 Intel(R) 82576 Virtual Function Linux driver
5 Copyright(c) 2009 - 2012 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, see <http://www.gnu.org/licenses/>.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27 3
28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29 5
diff --git a/drivers/net/ethernet/intel/igbvf/regs.h b/drivers/net/ethernet/intel/igbvf/regs.h
index 614e52409f11..625a309a3355 100644
--- a/drivers/net/ethernet/intel/igbvf/regs.h
+++ b/drivers/net/ethernet/intel/igbvf/regs.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2009 - 2018 Intel Corporation. */
3
4 Intel(R) 82576 Virtual Function Linux driver
5 Copyright(c) 2009 - 2012 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, see <http://www.gnu.org/licenses/>.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27 3
28#ifndef _E1000_REGS_H_ 4#ifndef _E1000_REGS_H_
29#define _E1000_REGS_H_ 5#define _E1000_REGS_H_
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index bfe8d8297b2e..b8ba3f94c363 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -1,29 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 2009 - 2018 Intel Corporation. */
3
4 Intel(R) 82576 Virtual Function Linux driver
5 Copyright(c) 2009 - 2012 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, see <http://www.gnu.org/licenses/>.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27 3
28#include "vf.h" 4#include "vf.h"
29 5
diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
index 193b50026246..c71b0d7dbcee 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.h
+++ b/drivers/net/ethernet/intel/igbvf/vf.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2009 - 2018 Intel Corporation. */
3
4 Intel(R) 82576 Virtual Function Linux driver
5 Copyright(c) 2009 - 2012 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, see <http://www.gnu.org/licenses/>.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27 3
28#ifndef _E1000_VF_H_ 4#ifndef _E1000_VF_H_
29#define _E1000_VF_H_ 5#define _E1000_VF_H_
diff --git a/drivers/net/ethernet/intel/ixgb/Makefile b/drivers/net/ethernet/intel/ixgb/Makefile
index 1b42dd554dd2..2433e9300a33 100644
--- a/drivers/net/ethernet/intel/ixgb/Makefile
+++ b/drivers/net/ethernet/intel/ixgb/Makefile
@@ -1,33 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2################################################################################
3#
4# Intel PRO/10GbE Linux driver
5# Copyright(c) 1999 - 2008 Intel Corporation. 2# Copyright(c) 1999 - 2008 Intel Corporation.
6# 3#
7# This program is free software; you can redistribute it and/or modify it
8# under the terms and conditions of the GNU General Public License,
9# version 2, as published by the Free Software Foundation.
10#
11# This program is distributed in the hope it will be useful, but WITHOUT
12# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14# more details.
15#
16# You should have received a copy of the GNU General Public License along with
17# this program; if not, write to the Free Software Foundation, Inc.,
18# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19#
20# The full GNU General Public License is included in this distribution in
21# the file called "COPYING".
22#
23# Contact Information:
24# Linux NICS <linux.nics@intel.com>
25# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27#
28################################################################################
29
30#
31# Makefile for the Intel(R) PRO/10GbE ethernet driver 4# Makefile for the Intel(R) PRO/10GbE ethernet driver
32# 5#
33 6
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h
index 92022841755f..e85271b68410 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb.h
@@ -1,31 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2008 Intel Corporation. */
3
4 Intel PRO/10GbE Linux driver
5 Copyright(c) 1999 - 2008 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29 3
30#ifndef _IXGB_H_ 4#ifndef _IXGB_H_
31#define _IXGB_H_ 5#define _IXGB_H_
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ee.c b/drivers/net/ethernet/intel/ixgb/ixgb_ee.c
index eca216b9b859..129286fc1634 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ee.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ee.c
@@ -1,30 +1,5 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2 2/* Copyright(c) 1999 - 2008 Intel Corporation. */
3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 3
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 5
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ee.h b/drivers/net/ethernet/intel/ixgb/ixgb_ee.h
index 475297a810fe..3ee0a09e5d0a 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ee.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ee.h
@@ -1,31 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2008 Intel Corporation. */
3
4 Intel PRO/10GbE Linux driver
5 Copyright(c) 1999 - 2008 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29 3
30#ifndef _IXGB_EE_H_ 4#ifndef _IXGB_EE_H_
31#define _IXGB_EE_H_ 5#define _IXGB_EE_H_
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index d10a0d242dda..43744bf0fc1c 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -1,30 +1,5 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2 2/* Copyright(c) 1999 - 2008 Intel Corporation. */
3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 3
29/* ethtool support for ixgb */ 4/* ethtool support for ixgb */
30 5
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
index bf9a220f71fb..cbaa933ef30d 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
@@ -1,30 +1,5 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2 2/* Copyright(c) 1999 - 2008 Intel Corporation. */
3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 3
29/* ixgb_hw.c 4/* ixgb_hw.c
30 * Shared functions for accessing and configuring the adapter 5 * Shared functions for accessing and configuring the adapter
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
index 19f36d87ef61..6064583095da 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
@@ -1,31 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2008 Intel Corporation. */
3
4 Intel PRO/10GbE Linux driver
5 Copyright(c) 1999 - 2008 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29 3
30#ifndef _IXGB_HW_H_ 4#ifndef _IXGB_HW_H_
31#define _IXGB_HW_H_ 5#define _IXGB_HW_H_
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
index 24e849902d60..9695b8215f01 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
@@ -1,31 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2008 Intel Corporation. */
3
4 Intel PRO/10GbE Linux driver
5 Copyright(c) 1999 - 2008 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29 3
30#ifndef _IXGB_IDS_H_ 4#ifndef _IXGB_IDS_H_
31#define _IXGB_IDS_H_ 5#define _IXGB_IDS_H_
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 2353c383f0a7..62f2173bc20e 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1,30 +1,5 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2 2/* Copyright(c) 1999 - 2008 Intel Corporation. */
3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 3
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 5
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h b/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h
index b1710379192e..7bd54efa698d 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h
@@ -1,31 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2008 Intel Corporation. */
3
4 Intel PRO/10GbE Linux driver
5 Copyright(c) 1999 - 2008 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29 3
30/* glue for the OS independent part of ixgb 4/* glue for the OS independent part of ixgb
31 * includes register access macros 5 * includes register access macros
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_param.c b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
index 04a60640ddda..f0cadd532c53 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_param.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
@@ -1,30 +1,5 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2 2/* Copyright(c) 1999 - 2008 Intel Corporation. */
3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 3
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 5
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 4cd96c88cb5d..5414685189ce 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -1,32 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2################################################################################ 2# Copyright(c) 1999 - 2018 Intel Corporation.
3#
4# Intel 10 Gigabit PCI Express Linux driver
5# Copyright(c) 1999 - 2013 Intel Corporation.
6#
7# This program is free software; you can redistribute it and/or modify it
8# under the terms and conditions of the GNU General Public License,
9# version 2, as published by the Free Software Foundation.
10#
11# This program is distributed in the hope it will be useful, but WITHOUT
12# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14# more details.
15#
16# You should have received a copy of the GNU General Public License along with
17# this program; if not, write to the Free Software Foundation, Inc.,
18# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19#
20# The full GNU General Public License is included in this distribution in
21# the file called "COPYING".
22#
23# Contact Information:
24# Linux NICS <linux.nics@intel.com>
25# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27#
28################################################################################
29
30# 3#
31# Makefile for the Intel(R) 10GbE PCI Express ethernet driver 4# Makefile for the Intel(R) 10GbE PCI Express ethernet driver
32# 5#
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 8fccca57cd6a..fc534e91c6b2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -1,31 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 Intel 10 Gigabit PCI Express Linux driver
5 Copyright(c) 1999 - 2016 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29 3
30#ifndef _IXGBE_H_ 4#ifndef _IXGBE_H_
31#define _IXGBE_H_ 5#define _IXGBE_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index cb0fe5fedb33..eee277c1bedf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1,31 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 Intel 10 Gigabit PCI Express Linux driver
5 Copyright(c) 1999 - 2016 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29 3
30#include <linux/pci.h> 4#include <linux/pci.h>
31#include <linux/delay.h> 5#include <linux/delay.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 66a74f4651e8..42f63b943ea0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,31 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 Intel 10 Gigabit PCI Express Linux driver
5 Copyright(c) 1999 - 2016 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29 3
30#include <linux/pci.h> 4#include <linux/pci.h>
31#include <linux/delay.h> 5#include <linux/delay.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 633be93f3dbb..8d038837f72b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,31 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 Intel 10 Gigabit PCI Express Linux driver
5 Copyright(c) 1999 - 2016 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29 3
30#include <linux/pci.h> 4#include <linux/pci.h>
31#include <linux/delay.h> 5#include <linux/delay.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 2b311382167a..4b531e8ae38a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -1,31 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 Intel 10 Gigabit PCI Express Linux driver
5 Copyright(c) 1999 - 2016 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29 3
30#ifndef _IXGBE_COMMON_H_ 4#ifndef _IXGBE_COMMON_H_
31#define _IXGBE_COMMON_H_ 5#define _IXGBE_COMMON_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index aaea8282bfd2..d26cea5b43bd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -1,31 +1,5 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2016 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29 3
30#include "ixgbe.h" 4#include "ixgbe.h"
31#include "ixgbe_type.h" 5#include "ixgbe_type.h"
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
index 73b6362d4327..60cd5863bf5e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
@@ -1,31 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 Intel 10 Gigabit PCI Express Linux driver
5 Copyright(c) 1999 - 2013 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29 3
30#ifndef _DCB_CONFIG_H_ 4#ifndef _DCB_CONFIG_H_
31#define _DCB_CONFIG_H_ 5#define _DCB_CONFIG_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index 085130626330..379ae747cdce 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -1,31 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/******************************************************************************* 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 Intel 10 Gigabit PCI Express Linux driver
5 Copyright(c) 1999 - 2013 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29 3
30#include "ixgbe.h" 4#include "ixgbe.h"
31#include "ixgbe_type.h" 5#include "ixgbe_type.h"
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
index 7edce607f901..fdca41abb44c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
@@ -1,31 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 Intel 10 Gigabit PCI Express Linux driver
5 Copyright(c) 1999 - 2013 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29 3
30#ifndef _DCB_82598_CONFIG_H_ 4#ifndef _DCB_82598_CONFIG_H_
31#define _DCB_82598_CONFIG_H_ 5#define _DCB_82598_CONFIG_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 1eed6811e914..7948849840a5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -1,30 +1,5 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 3
29#include "ixgbe.h" 4#include "ixgbe.h"
30#include "ixgbe_type.h" 5#include "ixgbe_type.h"
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index fa030f0abc18..c6f084883cab 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -1,31 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 Intel 10 Gigabit PCI Express Linux driver
5 Copyright(c) 1999 - 2013 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29 3
30#ifndef _DCB_82599_CONFIG_H_ 4#ifndef _DCB_82599_CONFIG_H_
31#define _DCB_82599_CONFIG_H_ 5#define _DCB_82599_CONFIG_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index b33f3f87e4b1..c00332d2e02a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -1,30 +1,5 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2014 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 3
29#include "ixgbe.h" 4#include "ixgbe.h"
30#include <linux/dcbnl.h> 5#include <linux/dcbnl.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index ad54080488ee..55fe8114fe99 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -1,30 +1,6 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 1999 - 2018 Intel Corporation. */
2 3
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28#include <linux/debugfs.h> 4#include <linux/debugfs.h>
29#include <linux/module.h> 5#include <linux/module.h>
30 6
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index c0e6ab42e0e1..bdd179c29ea4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,30 +1,5 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2016 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 3
29/* ethtool support for ixgbe */ 4/* ethtool support for ixgbe */
30 5
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 7a09a40e4472..b5219e024f70 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -1,30 +1,5 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2014 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 3
29#include "ixgbe.h" 4#include "ixgbe.h"
30#include <linux/if_ether.h> 5#include <linux/if_ether.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index cf1919901514..724f5382329f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -1,31 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 Intel 10 Gigabit PCI Express Linux driver
5 Copyright(c) 1999 - 2013 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29 3
30#ifndef _IXGBE_FCOE_H 4#ifndef _IXGBE_FCOE_H
31#define _IXGBE_FCOE_H 5#define _IXGBE_FCOE_H
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 68af127987bc..41af2b81e960 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -1,29 +1,5 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2 * 2/* Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. */
3 * Intel 10 Gigabit PCI Express Linux driver
4 * Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * Linux NICS <linux.nics@intel.com>
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#include "ixgbe.h" 4#include "ixgbe.h"
29#include <net/xfrm.h> 5#include <net/xfrm.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
index 4f099f516645..9ef7faadda69 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
@@ -1,30 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. */
3
4 Intel 10 Gigabit PCI Express Linux driver
5 Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program. If not, see <http://www.gnu.org/licenses/>.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 3
29#ifndef _IXGBE_IPSEC_H_ 4#ifndef _IXGBE_IPSEC_H_
30#define _IXGBE_IPSEC_H_ 5#define _IXGBE_IPSEC_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index ed4cbe94c355..893a9206e718 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1,30 +1,5 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2016 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 3
29#include "ixgbe.h" 4#include "ixgbe.h"
30#include "ixgbe_sriov.h" 5#include "ixgbe_sriov.h"
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index b6e5cea84949..6652b201df5b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,30 +1,5 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2016 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 3
29#include <linux/types.h> 4#include <linux/types.h>
30#include <linux/module.h> 5#include <linux/module.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index a0cb84381cd0..5679293e53f7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -1,30 +1,5 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2016 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 3
29#include <linux/pci.h> 4#include <linux/pci.h>
30#include <linux/delay.h> 5#include <linux/delay.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index c4628b663590..e085b6520dac 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -1,31 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 Intel 10 Gigabit PCI Express Linux driver
5 Copyright(c) 1999 - 2016 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29 3
30#ifndef _IXGBE_MBX_H_ 4#ifndef _IXGBE_MBX_H_
31#define _IXGBE_MBX_H_ 5#define _IXGBE_MBX_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
index 72446644f9fa..fcae520647ce 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 *
4 * Intel 10 Gigabit PCI Express Linux drive
5 * Copyright(c) 2016 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27 3
28#ifndef _IXGBE_MODEL_H_ 4#ifndef _IXGBE_MODEL_H_
29#define _IXGBE_MODEL_H_ 5#define _IXGBE_MODEL_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 91bde90f9265..919a7af84b42 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1,30 +1,5 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2014 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 3
29#include <linux/pci.h> 4#include <linux/pci.h>
30#include <linux/delay.h> 5#include <linux/delay.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index d6a7e77348c5..64e44e01c973 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -1,31 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 Intel 10 Gigabit PCI Express Linux driver
5 Copyright(c) 1999 - 2016 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29 3
30#ifndef _IXGBE_PHY_H_ 4#ifndef _IXGBE_PHY_H_
31#define _IXGBE_PHY_H_ 5#define _IXGBE_PHY_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index f6cc9166082a..b3e0d8bb5cbd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -1,30 +1,6 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 1999 - 2018 Intel Corporation. */
2 3
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2016 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28#include "ixgbe.h" 4#include "ixgbe.h"
29#include <linux/ptp_classify.h> 5#include <linux/ptp_classify.h>
30#include <linux/clocksource.h> 6#include <linux/clocksource.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index bfc4171cd3f9..2649c06d877b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -1,30 +1,5 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2015 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 3
29#include <linux/types.h> 4#include <linux/types.h>
30#include <linux/module.h> 5#include <linux/module.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index e30d1f07e891..3ec21923c89c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -1,31 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 Intel 10 Gigabit PCI Express Linux driver
5 Copyright(c) 1999 - 2013 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29 3
30#ifndef _IXGBE_SRIOV_H_ 4#ifndef _IXGBE_SRIOV_H_
31#define _IXGBE_SRIOV_H_ 5#define _IXGBE_SRIOV_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index 24766e125592..204844288c16 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -1,30 +1,5 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 3
29#include "ixgbe.h" 4#include "ixgbe.h"
30#include "ixgbe_common.h" 5#include "ixgbe_common.h"
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 2daa81e6e9b2..e8ed37749ab1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,31 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 Intel 10 Gigabit PCI Express Linux driver
5 Copyright(c) 1999 - 2016 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29 3
30#ifndef _IXGBE_TYPE_H_ 4#ifndef _IXGBE_TYPE_H_
31#define _IXGBE_TYPE_H_ 5#define _IXGBE_TYPE_H_
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index b8c5fd2a2115..de563cfd294d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -1,30 +1,5 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2016 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 3
29#include <linux/pci.h> 4#include <linux/pci.h>
30#include <linux/delay.h> 5#include <linux/delay.h>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
index 182d640e9f7a..e246c0d2a427 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
@@ -1,27 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 *
4 * Intel 10 Gigabit PCI Express Linux driver
5 * Copyright(c) 1999 - 2014 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * The full GNU General Public License is included in this distribution in
17 * the file called "COPYING".
18 *
19 * Contact Information:
20 * Linux NICS <linux.nics@intel.com>
21 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
23 *
24 *****************************************************************************/
25 3
26#include "ixgbe_type.h" 4#include "ixgbe_type.h"
27 5
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 3123267dfba9..959a37599a6b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1,26 +1,6 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2 * 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 * Intel 10 Gigabit PCI Express Linux driver 3
4 * Copyright(c) 1999 - 2016 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
17 *
18 * Contact Information:
19 * Linux NICS <linux.nics@intel.com>
20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 *
23 ******************************************************************************/
24#include "ixgbe_x540.h" 4#include "ixgbe_x540.h"
25#include "ixgbe_type.h" 5#include "ixgbe_type.h"
26#include "ixgbe_common.h" 6#include "ixgbe_common.h"
diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile
index bb47814cfa90..aba1e6a37a6a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/Makefile
+++ b/drivers/net/ethernet/intel/ixgbevf/Makefile
@@ -1,31 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2################################################################################ 2# Copyright(c) 1999 - 2018 Intel Corporation.
3#
4# Intel 82599 Virtual Function driver
5# Copyright(c) 1999 - 2012 Intel Corporation.
6#
7# This program is free software; you can redistribute it and/or modify it
8# under the terms and conditions of the GNU General Public License,
9# version 2, as published by the Free Software Foundation.
10#
11# This program is distributed in the hope it will be useful, but WITHOUT
12# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14# more details.
15#
16# You should have received a copy of the GNU General Public License along with
17# this program; if not, write to the Free Software Foundation, Inc.,
18# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19#
20# The full GNU General Public License is included in this distribution in
21# the file called "COPYING".
22#
23# Contact Information:
24# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26#
27################################################################################
28
29# 3#
30# Makefile for the Intel(R) 82599 VF ethernet driver 4# Makefile for the Intel(R) 82599 VF ethernet driver
31# 5#
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 71c828842b11..700d8eb2f6f8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 Intel 82599 Virtual Function driver
5 Copyright(c) 1999 - 2015 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, see <http://www.gnu.org/licenses/>.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27 3
28#ifndef _IXGBEVF_DEFINES_H_ 4#ifndef _IXGBEVF_DEFINES_H_
29#define _IXGBEVF_DEFINES_H_ 5#define _IXGBEVF_DEFINES_H_
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 8e7d6c6f5c92..e7813d76527c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -1,28 +1,5 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2018 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, see <http://www.gnu.org/licenses/>.
17
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
20
21 Contact Information:
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 3
27/* ethtool support for ixgbevf */ 4/* ethtool support for ixgbevf */
28 5
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 447ce1d5e0e3..70c75681495f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 Intel 82599 Virtual Function driver
5 Copyright(c) 1999 - 2018 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, see <http://www.gnu.org/licenses/>.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27 3
28#ifndef _IXGBEVF_H_ 4#ifndef _IXGBEVF_H_
29#define _IXGBEVF_H_ 5#define _IXGBEVF_H_
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index e3d04f226d57..e91c3d1a43ce 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1,28 +1,5 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2018 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, see <http://www.gnu.org/licenses/>.
17
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
20
21 Contact Information:
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 3
27/****************************************************************************** 4/******************************************************************************
28 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code 5 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index 2819abc454c7..6bc1953263b9 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -1,28 +1,5 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2015 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, see <http://www.gnu.org/licenses/>.
17
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
20
21 Contact Information:
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 3
27#include "mbx.h" 4#include "mbx.h"
28#include "ixgbevf.h" 5#include "ixgbevf.h"
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 5ec947fe3d09..bfd9ae150808 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 Intel 82599 Virtual Function driver
5 Copyright(c) 1999 - 2015 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, see <http://www.gnu.org/licenses/>.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27 3
28#ifndef _IXGBE_MBX_H_ 4#ifndef _IXGBE_MBX_H_
29#define _IXGBE_MBX_H_ 5#define _IXGBE_MBX_H_
diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h
index 278f73980501..68d16ae5b65a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/regs.h
+++ b/drivers/net/ethernet/intel/ixgbevf/regs.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 Intel 82599 Virtual Function driver
5 Copyright(c) 1999 - 2015 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, see <http://www.gnu.org/licenses/>.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27 3
28#ifndef _IXGBEVF_REGS_H_ 4#ifndef _IXGBEVF_REGS_H_
29#define _IXGBEVF_REGS_H_ 5#define _IXGBEVF_REGS_H_
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 38d3a327c1bc..bf0577e819e1 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -1,28 +1,5 @@
1/******************************************************************************* 1// SPDX-License-Identifier: GPL-2.0
2 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2015 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, see <http://www.gnu.org/licenses/>.
17
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
20
21 Contact Information:
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26 3
27#include "vf.h" 4#include "vf.h"
28#include "ixgbevf.h" 5#include "ixgbevf.h"
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 194fbdaa4519..d1e9e306653b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -1,29 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/******************************************************************************* 2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 Intel 82599 Virtual Function driver
5 Copyright(c) 1999 - 2015 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, see <http://www.gnu.org/licenses/>.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27 3
28#ifndef __IXGBE_VF_H__ 4#ifndef __IXGBE_VF_H__
29#define __IXGBE_VF_H__ 5#define __IXGBE_VF_H__
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 4202f9b5b966..6f410235987c 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -942,6 +942,7 @@ struct mvpp2 {
942 struct clk *pp_clk; 942 struct clk *pp_clk;
943 struct clk *gop_clk; 943 struct clk *gop_clk;
944 struct clk *mg_clk; 944 struct clk *mg_clk;
945 struct clk *mg_core_clk;
945 struct clk *axi_clk; 946 struct clk *axi_clk;
946 947
947 /* List of pointers to port structures */ 948 /* List of pointers to port structures */
@@ -8768,18 +8769,27 @@ static int mvpp2_probe(struct platform_device *pdev)
8768 err = clk_prepare_enable(priv->mg_clk); 8769 err = clk_prepare_enable(priv->mg_clk);
8769 if (err < 0) 8770 if (err < 0)
8770 goto err_gop_clk; 8771 goto err_gop_clk;
8772
8773 priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk");
8774 if (IS_ERR(priv->mg_core_clk)) {
8775 priv->mg_core_clk = NULL;
8776 } else {
8777 err = clk_prepare_enable(priv->mg_core_clk);
8778 if (err < 0)
8779 goto err_mg_clk;
8780 }
8771 } 8781 }
8772 8782
8773 priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk"); 8783 priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
8774 if (IS_ERR(priv->axi_clk)) { 8784 if (IS_ERR(priv->axi_clk)) {
8775 err = PTR_ERR(priv->axi_clk); 8785 err = PTR_ERR(priv->axi_clk);
8776 if (err == -EPROBE_DEFER) 8786 if (err == -EPROBE_DEFER)
8777 goto err_gop_clk; 8787 goto err_mg_core_clk;
8778 priv->axi_clk = NULL; 8788 priv->axi_clk = NULL;
8779 } else { 8789 } else {
8780 err = clk_prepare_enable(priv->axi_clk); 8790 err = clk_prepare_enable(priv->axi_clk);
8781 if (err < 0) 8791 if (err < 0)
8782 goto err_gop_clk; 8792 goto err_mg_core_clk;
8783 } 8793 }
8784 8794
8785 /* Get system's tclk rate */ 8795 /* Get system's tclk rate */
@@ -8793,7 +8803,7 @@ static int mvpp2_probe(struct platform_device *pdev)
8793 if (priv->hw_version == MVPP22) { 8803 if (priv->hw_version == MVPP22) {
8794 err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK); 8804 err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
8795 if (err) 8805 if (err)
8796 goto err_mg_clk; 8806 goto err_axi_clk;
8797 /* Sadly, the BM pools all share the same register to 8807 /* Sadly, the BM pools all share the same register to
8798 * store the high 32 bits of their address. So they 8808 * store the high 32 bits of their address. So they
8799 * must all have the same high 32 bits, which forces 8809 * must all have the same high 32 bits, which forces
@@ -8801,14 +8811,14 @@ static int mvpp2_probe(struct platform_device *pdev)
8801 */ 8811 */
8802 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 8812 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
8803 if (err) 8813 if (err)
8804 goto err_mg_clk; 8814 goto err_axi_clk;
8805 } 8815 }
8806 8816
8807 /* Initialize network controller */ 8817 /* Initialize network controller */
8808 err = mvpp2_init(pdev, priv); 8818 err = mvpp2_init(pdev, priv);
8809 if (err < 0) { 8819 if (err < 0) {
8810 dev_err(&pdev->dev, "failed to initialize controller\n"); 8820 dev_err(&pdev->dev, "failed to initialize controller\n");
8811 goto err_mg_clk; 8821 goto err_axi_clk;
8812 } 8822 }
8813 8823
8814 /* Initialize ports */ 8824 /* Initialize ports */
@@ -8821,7 +8831,7 @@ static int mvpp2_probe(struct platform_device *pdev)
8821 if (priv->port_count == 0) { 8831 if (priv->port_count == 0) {
8822 dev_err(&pdev->dev, "no ports enabled\n"); 8832 dev_err(&pdev->dev, "no ports enabled\n");
8823 err = -ENODEV; 8833 err = -ENODEV;
8824 goto err_mg_clk; 8834 goto err_axi_clk;
8825 } 8835 }
8826 8836
8827 /* Statistics must be gathered regularly because some of them (like 8837 /* Statistics must be gathered regularly because some of them (like
@@ -8849,8 +8859,13 @@ err_port_probe:
8849 mvpp2_port_remove(priv->port_list[i]); 8859 mvpp2_port_remove(priv->port_list[i]);
8850 i++; 8860 i++;
8851 } 8861 }
8852err_mg_clk: 8862err_axi_clk:
8853 clk_disable_unprepare(priv->axi_clk); 8863 clk_disable_unprepare(priv->axi_clk);
8864
8865err_mg_core_clk:
8866 if (priv->hw_version == MVPP22)
8867 clk_disable_unprepare(priv->mg_core_clk);
8868err_mg_clk:
8854 if (priv->hw_version == MVPP22) 8869 if (priv->hw_version == MVPP22)
8855 clk_disable_unprepare(priv->mg_clk); 8870 clk_disable_unprepare(priv->mg_clk);
8856err_gop_clk: 8871err_gop_clk:
@@ -8897,6 +8912,7 @@ static int mvpp2_remove(struct platform_device *pdev)
8897 return 0; 8912 return 0;
8898 8913
8899 clk_disable_unprepare(priv->axi_clk); 8914 clk_disable_unprepare(priv->axi_clk);
8915 clk_disable_unprepare(priv->mg_core_clk);
8900 clk_disable_unprepare(priv->mg_clk); 8916 clk_disable_unprepare(priv->mg_clk);
8901 clk_disable_unprepare(priv->pp_clk); 8917 clk_disable_unprepare(priv->pp_clk);
8902 clk_disable_unprepare(priv->gop_clk); 8918 clk_disable_unprepare(priv->gop_clk);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index efc55feddc5c..9f54ccbddea7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -593,30 +593,25 @@ static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
593} 593}
594 594
595#if IS_ENABLED(CONFIG_IPV6) 595#if IS_ENABLED(CONFIG_IPV6)
596/* In IPv6 packets, besides subtracting the pseudo header checksum, 596/* In IPv6 packets, hw_checksum lacks 6 bytes from IPv6 header:
597 * we also compute/add the IP header checksum which 597 * 4 first bytes : priority, version, flow_lbl
598 * is not added by the HW. 598 * and 2 additional bytes : nexthdr, hop_limit.
599 */ 599 */
600static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, 600static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
601 struct ipv6hdr *ipv6h) 601 struct ipv6hdr *ipv6h)
602{ 602{
603 __u8 nexthdr = ipv6h->nexthdr; 603 __u8 nexthdr = ipv6h->nexthdr;
604 __wsum csum_pseudo_hdr = 0; 604 __wsum temp;
605 605
606 if (unlikely(nexthdr == IPPROTO_FRAGMENT || 606 if (unlikely(nexthdr == IPPROTO_FRAGMENT ||
607 nexthdr == IPPROTO_HOPOPTS || 607 nexthdr == IPPROTO_HOPOPTS ||
608 nexthdr == IPPROTO_SCTP)) 608 nexthdr == IPPROTO_SCTP))
609 return -1; 609 return -1;
610 hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr));
611 610
612 csum_pseudo_hdr = csum_partial(&ipv6h->saddr, 611 /* priority, version, flow_lbl */
613 sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); 612 temp = csum_add(hw_checksum, *(__wsum *)ipv6h);
614 csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len); 613 /* nexthdr and hop_limit */
615 csum_pseudo_hdr = csum_add(csum_pseudo_hdr, 614 skb->csum = csum_add(temp, (__force __wsum)*(__be16 *)&ipv6h->nexthdr);
616 (__force __wsum)htons(nexthdr));
617
618 skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr);
619 skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0));
620 return 0; 615 return 0;
621} 616}
622#endif 617#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 6b6853773848..0227786308af 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -694,7 +694,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
694 u16 rings_p_up = priv->num_tx_rings_p_up; 694 u16 rings_p_up = priv->num_tx_rings_p_up;
695 695
696 if (netdev_get_num_tc(dev)) 696 if (netdev_get_num_tc(dev))
697 return skb_tx_hash(dev, skb); 697 return fallback(dev, skb);
698 698
699 return fallback(dev, skb) % rings_p_up; 699 return fallback(dev, skb) % rings_p_up;
700} 700}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index bfef69235d71..211578ffc70d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1317,7 +1317,7 @@ static int mlx4_mf_unbond(struct mlx4_dev *dev)
1317 1317
1318 ret = mlx4_unbond_fs_rules(dev); 1318 ret = mlx4_unbond_fs_rules(dev);
1319 if (ret) 1319 if (ret)
1320 mlx4_warn(dev, "multifunction unbond for flow rules failedi (%d)\n", ret); 1320 mlx4_warn(dev, "multifunction unbond for flow rules failed (%d)\n", ret);
1321 ret1 = mlx4_unbond_mac_table(dev); 1321 ret1 = mlx4_unbond_mac_table(dev);
1322 if (ret1) { 1322 if (ret1) {
1323 mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1); 1323 mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 12257034131e..ee6684779d11 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -86,3 +86,14 @@ config MLX5_EN_IPSEC
86 Build support for IPsec cryptography-offload accelaration in the NIC. 86 Build support for IPsec cryptography-offload accelaration in the NIC.
87 Note: Support for hardware with this capability needs to be selected 87 Note: Support for hardware with this capability needs to be selected
88 for this option to become available. 88 for this option to become available.
89
90config MLX5_EN_TLS
91 bool "TLS cryptography-offload accelaration"
92 depends on MLX5_CORE_EN
93 depends on TLS_DEVICE
94 depends on MLX5_ACCEL
95 default n
96 ---help---
97 Build support for TLS cryptography-offload accelaration in the NIC.
98 Note: Support for hardware with this capability needs to be selected
99 for this option to become available.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index c805769d92a9..a7135f5d5cf6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -8,10 +8,10 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
8 fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o lib/clock.o \ 8 fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o lib/clock.o \
9 diag/fs_tracepoint.o 9 diag/fs_tracepoint.o
10 10
11mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o 11mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o accel/tls.o
12 12
13mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ 13mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
14 fpga/ipsec.o 14 fpga/ipsec.o fpga/tls.o
15 15
16mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ 16mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
17 en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \ 17 en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \
@@ -28,4 +28,6 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib
28mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ 28mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
29 en_accel/ipsec_stats.o 29 en_accel/ipsec_stats.o
30 30
31mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o
32
31CFLAGS_tracepoint.o := -I$(src) 33CFLAGS_tracepoint.o := -I$(src)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
new file mode 100644
index 000000000000..77ac19f38cbe
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
@@ -0,0 +1,71 @@
1/*
2 * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/mlx5/device.h>
35
36#include "accel/tls.h"
37#include "mlx5_core.h"
38#include "fpga/tls.h"
39
40int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
41 struct tls_crypto_info *crypto_info,
42 u32 start_offload_tcp_sn, u32 *p_swid)
43{
44 return mlx5_fpga_tls_add_tx_flow(mdev, flow, crypto_info,
45 start_offload_tcp_sn, p_swid);
46}
47
48void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid)
49{
50 mlx5_fpga_tls_del_tx_flow(mdev, swid, GFP_KERNEL);
51}
52
53bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
54{
55 return mlx5_fpga_is_tls_device(mdev);
56}
57
58u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev)
59{
60 return mlx5_fpga_tls_device_caps(mdev);
61}
62
63int mlx5_accel_tls_init(struct mlx5_core_dev *mdev)
64{
65 return mlx5_fpga_tls_init(mdev);
66}
67
68void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev)
69{
70 mlx5_fpga_tls_cleanup(mdev);
71}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
new file mode 100644
index 000000000000..6f9c9f446ecc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
@@ -0,0 +1,86 @@
1/*
2 * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#ifndef __MLX5_ACCEL_TLS_H__
35#define __MLX5_ACCEL_TLS_H__
36
37#include <linux/mlx5/driver.h>
38#include <linux/tls.h>
39
40#ifdef CONFIG_MLX5_ACCEL
41
42enum {
43 MLX5_ACCEL_TLS_TX = BIT(0),
44 MLX5_ACCEL_TLS_RX = BIT(1),
45 MLX5_ACCEL_TLS_V12 = BIT(2),
46 MLX5_ACCEL_TLS_V13 = BIT(3),
47 MLX5_ACCEL_TLS_LRO = BIT(4),
48 MLX5_ACCEL_TLS_IPV6 = BIT(5),
49 MLX5_ACCEL_TLS_AES_GCM128 = BIT(30),
50 MLX5_ACCEL_TLS_AES_GCM256 = BIT(31),
51};
52
53struct mlx5_ifc_tls_flow_bits {
54 u8 src_port[0x10];
55 u8 dst_port[0x10];
56 union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
57 union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
58 u8 ipv6[0x1];
59 u8 direction_sx[0x1];
60 u8 reserved_at_2[0x1e];
61};
62
63int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
64 struct tls_crypto_info *crypto_info,
65 u32 start_offload_tcp_sn, u32 *p_swid);
66void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid);
67bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev);
68u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev);
69int mlx5_accel_tls_init(struct mlx5_core_dev *mdev);
70void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev);
71
72#else
73
74static inline int
75mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
76 struct tls_crypto_info *crypto_info,
77 u32 start_offload_tcp_sn, u32 *p_swid) { return 0; }
78static inline void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid) { }
79static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; }
80static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; }
81static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; }
82static inline void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev) { }
83
84#endif
85
86#endif /* __MLX5_ACCEL_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 56c748ca2a09..9cc07da09b70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -55,6 +55,9 @@
55 55
56struct page_pool; 56struct page_pool;
57 57
58#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
59#define MLX5E_METADATA_ETHER_LEN 8
60
58#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) 61#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
59 62
60#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) 63#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
@@ -332,6 +335,7 @@ enum {
332 MLX5E_SQ_STATE_RECOVERING, 335 MLX5E_SQ_STATE_RECOVERING,
333 MLX5E_SQ_STATE_IPSEC, 336 MLX5E_SQ_STATE_IPSEC,
334 MLX5E_SQ_STATE_AM, 337 MLX5E_SQ_STATE_AM,
338 MLX5E_SQ_STATE_TLS,
335}; 339};
336 340
337struct mlx5e_sq_wqe_info { 341struct mlx5e_sq_wqe_info {
@@ -797,6 +801,9 @@ struct mlx5e_priv {
797#ifdef CONFIG_MLX5_EN_IPSEC 801#ifdef CONFIG_MLX5_EN_IPSEC
798 struct mlx5e_ipsec *ipsec; 802 struct mlx5e_ipsec *ipsec;
799#endif 803#endif
804#ifdef CONFIG_MLX5_EN_TLS
805 struct mlx5e_tls *tls;
806#endif
800}; 807};
801 808
802struct mlx5e_profile { 809struct mlx5e_profile {
@@ -827,6 +834,8 @@ void mlx5e_build_ptys2ethtool_map(void);
827u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, 834u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
828 void *accel_priv, select_queue_fallback_t fallback); 835 void *accel_priv, select_queue_fallback_t fallback);
829netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); 836netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
837netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
838 struct mlx5e_tx_wqe *wqe, u16 pi);
830 839
831void mlx5e_completion_event(struct mlx5_core_cq *mcq); 840void mlx5e_completion_event(struct mlx5_core_cq *mcq);
832void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); 841void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
@@ -942,6 +951,18 @@ static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
942 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version)); 951 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
943} 952}
944 953
954static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq,
955 struct mlx5e_tx_wqe **wqe,
956 u16 *pi)
957{
958 struct mlx5_wq_cyc *wq;
959
960 wq = &sq->wq;
961 *pi = sq->pc & wq->sz_m1;
962 *wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
963 memset(*wqe, 0, sizeof(**wqe));
964}
965
945static inline 966static inline
946struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) 967struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
947{ 968{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
new file mode 100644
index 000000000000..68fcb40a2847
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -0,0 +1,72 @@
1/*
2 * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#ifndef __MLX5E_EN_ACCEL_H__
35#define __MLX5E_EN_ACCEL_H__
36
37#ifdef CONFIG_MLX5_ACCEL
38
39#include <linux/skbuff.h>
40#include <linux/netdevice.h>
41#include "en_accel/ipsec_rxtx.h"
42#include "en_accel/tls_rxtx.h"
43#include "en.h"
44
45static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb,
46 struct mlx5e_txqsq *sq,
47 struct net_device *dev,
48 struct mlx5e_tx_wqe **wqe,
49 u16 *pi)
50{
51#ifdef CONFIG_MLX5_EN_TLS
52 if (sq->state & BIT(MLX5E_SQ_STATE_TLS)) {
53 skb = mlx5e_tls_handle_tx_skb(dev, sq, skb, wqe, pi);
54 if (unlikely(!skb))
55 return NULL;
56 }
57#endif
58
59#ifdef CONFIG_MLX5_EN_IPSEC
60 if (sq->state & BIT(MLX5E_SQ_STATE_IPSEC)) {
61 skb = mlx5e_ipsec_handle_tx_skb(dev, *wqe, skb);
62 if (unlikely(!skb))
63 return NULL;
64 }
65#endif
66
67 return skb;
68}
69
70#endif /* CONFIG_MLX5_ACCEL */
71
72#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 1198fc1eba4c..93bf10e6508c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -45,9 +45,6 @@
45#define MLX5E_IPSEC_SADB_RX_BITS 10 45#define MLX5E_IPSEC_SADB_RX_BITS 10
46#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L 46#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
47 47
48#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
49#define MLX5E_METADATA_ETHER_LEN 8
50
51struct mlx5e_priv; 48struct mlx5e_priv;
52 49
53struct mlx5e_ipsec_sw_stats { 50struct mlx5e_ipsec_sw_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
new file mode 100644
index 000000000000..d167845271c3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
@@ -0,0 +1,197 @@
1/*
2 * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/netdevice.h>
35#include <net/ipv6.h>
36#include "en_accel/tls.h"
37#include "accel/tls.h"
38
39static void mlx5e_tls_set_ipv4_flow(void *flow, struct sock *sk)
40{
41 struct inet_sock *inet = inet_sk(sk);
42
43 MLX5_SET(tls_flow, flow, ipv6, 0);
44 memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
45 &inet->inet_daddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4));
46 memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv4_layout.ipv4),
47 &inet->inet_rcv_saddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4));
48}
49
50#if IS_ENABLED(CONFIG_IPV6)
51static void mlx5e_tls_set_ipv6_flow(void *flow, struct sock *sk)
52{
53 struct ipv6_pinfo *np = inet6_sk(sk);
54
55 MLX5_SET(tls_flow, flow, ipv6, 1);
56 memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
57 &sk->sk_v6_daddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
58 memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv6_layout.ipv6),
59 &np->saddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
60}
61#endif
62
63static void mlx5e_tls_set_flow_tcp_ports(void *flow, struct sock *sk)
64{
65 struct inet_sock *inet = inet_sk(sk);
66
67 memcpy(MLX5_ADDR_OF(tls_flow, flow, src_port), &inet->inet_sport,
68 MLX5_FLD_SZ_BYTES(tls_flow, src_port));
69 memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_port), &inet->inet_dport,
70 MLX5_FLD_SZ_BYTES(tls_flow, dst_port));
71}
72
73static int mlx5e_tls_set_flow(void *flow, struct sock *sk, u32 caps)
74{
75 switch (sk->sk_family) {
76 case AF_INET:
77 mlx5e_tls_set_ipv4_flow(flow, sk);
78 break;
79#if IS_ENABLED(CONFIG_IPV6)
80 case AF_INET6:
81 if (!sk->sk_ipv6only &&
82 ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
83 mlx5e_tls_set_ipv4_flow(flow, sk);
84 break;
85 }
86 if (!(caps & MLX5_ACCEL_TLS_IPV6))
87 goto error_out;
88
89 mlx5e_tls_set_ipv6_flow(flow, sk);
90 break;
91#endif
92 default:
93 goto error_out;
94 }
95
96 mlx5e_tls_set_flow_tcp_ports(flow, sk);
97 return 0;
98error_out:
99 return -EINVAL;
100}
101
102static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
103 enum tls_offload_ctx_dir direction,
104 struct tls_crypto_info *crypto_info,
105 u32 start_offload_tcp_sn)
106{
107 struct mlx5e_priv *priv = netdev_priv(netdev);
108 struct tls_context *tls_ctx = tls_get_ctx(sk);
109 struct mlx5_core_dev *mdev = priv->mdev;
110 u32 caps = mlx5_accel_tls_device_caps(mdev);
111 int ret = -ENOMEM;
112 void *flow;
113
114 if (direction != TLS_OFFLOAD_CTX_DIR_TX)
115 return -EINVAL;
116
117 flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL);
118 if (!flow)
119 return ret;
120
121 ret = mlx5e_tls_set_flow(flow, sk, caps);
122 if (ret)
123 goto free_flow;
124
125 if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
126 struct mlx5e_tls_offload_context *tx_ctx =
127 mlx5e_get_tls_tx_context(tls_ctx);
128 u32 swid;
129
130 ret = mlx5_accel_tls_add_tx_flow(mdev, flow, crypto_info,
131 start_offload_tcp_sn, &swid);
132 if (ret < 0)
133 goto free_flow;
134
135 tx_ctx->swid = htonl(swid);
136 tx_ctx->expected_seq = start_offload_tcp_sn;
137 }
138
139 return 0;
140free_flow:
141 kfree(flow);
142 return ret;
143}
144
145static void mlx5e_tls_del(struct net_device *netdev,
146 struct tls_context *tls_ctx,
147 enum tls_offload_ctx_dir direction)
148{
149 struct mlx5e_priv *priv = netdev_priv(netdev);
150
151 if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
152 u32 swid = ntohl(mlx5e_get_tls_tx_context(tls_ctx)->swid);
153
154 mlx5_accel_tls_del_tx_flow(priv->mdev, swid);
155 } else {
156 netdev_err(netdev, "unsupported direction %d\n", direction);
157 }
158}
159
160static const struct tlsdev_ops mlx5e_tls_ops = {
161 .tls_dev_add = mlx5e_tls_add,
162 .tls_dev_del = mlx5e_tls_del,
163};
164
165void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
166{
167 struct net_device *netdev = priv->netdev;
168
169 if (!mlx5_accel_is_tls_device(priv->mdev))
170 return;
171
172 netdev->features |= NETIF_F_HW_TLS_TX;
173 netdev->hw_features |= NETIF_F_HW_TLS_TX;
174 netdev->tlsdev_ops = &mlx5e_tls_ops;
175}
176
177int mlx5e_tls_init(struct mlx5e_priv *priv)
178{
179 struct mlx5e_tls *tls = kzalloc(sizeof(*tls), GFP_KERNEL);
180
181 if (!tls)
182 return -ENOMEM;
183
184 priv->tls = tls;
185 return 0;
186}
187
188void mlx5e_tls_cleanup(struct mlx5e_priv *priv)
189{
190 struct mlx5e_tls *tls = priv->tls;
191
192 if (!tls)
193 return;
194
195 kfree(tls);
196 priv->tls = NULL;
197}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
new file mode 100644
index 000000000000..b6162178f621
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
@@ -0,0 +1,87 @@
1/*
2 * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#ifndef __MLX5E_TLS_H__
34#define __MLX5E_TLS_H__
35
36#ifdef CONFIG_MLX5_EN_TLS
37
38#include <net/tls.h>
39#include "en.h"
40
41struct mlx5e_tls_sw_stats {
42 atomic64_t tx_tls_drop_metadata;
43 atomic64_t tx_tls_drop_resync_alloc;
44 atomic64_t tx_tls_drop_no_sync_data;
45 atomic64_t tx_tls_drop_bypass_required;
46};
47
48struct mlx5e_tls {
49 struct mlx5e_tls_sw_stats sw_stats;
50};
51
52struct mlx5e_tls_offload_context {
53 struct tls_offload_context base;
54 u32 expected_seq;
55 __be32 swid;
56};
57
58static inline struct mlx5e_tls_offload_context *
59mlx5e_get_tls_tx_context(struct tls_context *tls_ctx)
60{
61 BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context) >
62 TLS_OFFLOAD_CONTEXT_SIZE);
63 return container_of(tls_offload_ctx(tls_ctx),
64 struct mlx5e_tls_offload_context,
65 base);
66}
67
68void mlx5e_tls_build_netdev(struct mlx5e_priv *priv);
69int mlx5e_tls_init(struct mlx5e_priv *priv);
70void mlx5e_tls_cleanup(struct mlx5e_priv *priv);
71
72int mlx5e_tls_get_count(struct mlx5e_priv *priv);
73int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data);
74int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data);
75
76#else
77
78static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) { }
79static inline int mlx5e_tls_init(struct mlx5e_priv *priv) { return 0; }
80static inline void mlx5e_tls_cleanup(struct mlx5e_priv *priv) { }
81static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; }
82static inline int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) { return 0; }
83static inline int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) { return 0; }
84
85#endif
86
87#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
new file mode 100644
index 000000000000..ad2790fb5966
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -0,0 +1,278 @@
1/*
2 * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include "en_accel/tls.h"
35#include "en_accel/tls_rxtx.h"
36
37#define SYNDROME_OFFLOAD_REQUIRED 32
38#define SYNDROME_SYNC 33
39
40struct sync_info {
41 u64 rcd_sn;
42 s32 sync_len;
43 int nr_frags;
44 skb_frag_t frags[MAX_SKB_FRAGS];
45};
46
47struct mlx5e_tls_metadata {
48 /* One byte of syndrome followed by 3 bytes of swid */
49 __be32 syndrome_swid;
50 __be16 first_seq;
51 /* packet type ID field */
52 __be16 ethertype;
53} __packed;
54
55static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid)
56{
57 struct mlx5e_tls_metadata *pet;
58 struct ethhdr *eth;
59
60 if (skb_cow_head(skb, sizeof(struct mlx5e_tls_metadata)))
61 return -ENOMEM;
62
63 eth = (struct ethhdr *)skb_push(skb, sizeof(struct mlx5e_tls_metadata));
64 skb->mac_header -= sizeof(struct mlx5e_tls_metadata);
65 pet = (struct mlx5e_tls_metadata *)(eth + 1);
66
67 memmove(skb->data, skb->data + sizeof(struct mlx5e_tls_metadata),
68 2 * ETH_ALEN);
69
70 eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
71 pet->syndrome_swid = htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
72
73 return 0;
74}
75
76static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context *context,
77 u32 tcp_seq, struct sync_info *info)
78{
79 int remaining, i = 0, ret = -EINVAL;
80 struct tls_record_info *record;
81 unsigned long flags;
82 s32 sync_size;
83
84 spin_lock_irqsave(&context->base.lock, flags);
85 record = tls_get_record(&context->base, tcp_seq, &info->rcd_sn);
86
87 if (unlikely(!record))
88 goto out;
89
90 sync_size = tcp_seq - tls_record_start_seq(record);
91 info->sync_len = sync_size;
92 if (unlikely(sync_size < 0)) {
93 if (tls_record_is_start_marker(record))
94 goto done;
95
96 goto out;
97 }
98
99 remaining = sync_size;
100 while (remaining > 0) {
101 info->frags[i] = record->frags[i];
102 __skb_frag_ref(&info->frags[i]);
103 remaining -= skb_frag_size(&info->frags[i]);
104
105 if (remaining < 0)
106 skb_frag_size_add(&info->frags[i], remaining);
107
108 i++;
109 }
110 info->nr_frags = i;
111done:
112 ret = 0;
113out:
114 spin_unlock_irqrestore(&context->base.lock, flags);
115 return ret;
116}
117
118static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
119 struct sk_buff *nskb, u32 tcp_seq,
120 int headln, __be64 rcd_sn)
121{
122 struct mlx5e_tls_metadata *pet;
123 u8 syndrome = SYNDROME_SYNC;
124 struct iphdr *iph;
125 struct tcphdr *th;
126 int data_len, mss;
127
128 nskb->dev = skb->dev;
129 skb_reset_mac_header(nskb);
130 skb_set_network_header(nskb, skb_network_offset(skb));
131 skb_set_transport_header(nskb, skb_transport_offset(skb));
132 memcpy(nskb->data, skb->data, headln);
133 memcpy(nskb->data + headln, &rcd_sn, sizeof(rcd_sn));
134
135 iph = ip_hdr(nskb);
136 iph->tot_len = htons(nskb->len - skb_network_offset(nskb));
137 th = tcp_hdr(nskb);
138 data_len = nskb->len - headln;
139 tcp_seq -= data_len;
140 th->seq = htonl(tcp_seq);
141
142 mss = nskb->dev->mtu - (headln - skb_network_offset(nskb));
143 skb_shinfo(nskb)->gso_size = 0;
144 if (data_len > mss) {
145 skb_shinfo(nskb)->gso_size = mss;
146 skb_shinfo(nskb)->gso_segs = DIV_ROUND_UP(data_len, mss);
147 }
148 skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
149
150 pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr));
151 memcpy(pet, &syndrome, sizeof(syndrome));
152 pet->first_seq = htons(tcp_seq);
153
154 /* MLX5 devices don't care about the checksum partial start, offset
155 * and pseudo header
156 */
157 nskb->ip_summed = CHECKSUM_PARTIAL;
158
159 nskb->xmit_more = 1;
160 nskb->queue_mapping = skb->queue_mapping;
161}
162
163static struct sk_buff *
164mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context,
165 struct mlx5e_txqsq *sq, struct sk_buff *skb,
166 struct mlx5e_tx_wqe **wqe,
167 u16 *pi,
168 struct mlx5e_tls *tls)
169{
170 u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
171 struct sync_info info;
172 struct sk_buff *nskb;
173 int linear_len = 0;
174 int headln;
175 int i;
176
177 sq->stats.tls_ooo++;
178
179 if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) {
180 /* We might get here if a retransmission reaches the driver
181 * after the relevant record is acked.
182 * It should be safe to drop the packet in this case
183 */
184 atomic64_inc(&tls->sw_stats.tx_tls_drop_no_sync_data);
185 goto err_out;
186 }
187
188 if (unlikely(info.sync_len < 0)) {
189 u32 payload;
190
191 headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
192 payload = skb->len - headln;
193 if (likely(payload <= -info.sync_len))
194 /* SKB payload doesn't require offload
195 */
196 return skb;
197
198 atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required);
199 goto err_out;
200 }
201
202 if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
203 atomic64_inc(&tls->sw_stats.tx_tls_drop_metadata);
204 goto err_out;
205 }
206
207 headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
208 linear_len += headln + sizeof(info.rcd_sn);
209 nskb = alloc_skb(linear_len, GFP_ATOMIC);
210 if (unlikely(!nskb)) {
211 atomic64_inc(&tls->sw_stats.tx_tls_drop_resync_alloc);
212 goto err_out;
213 }
214
215 context->expected_seq = tcp_seq + skb->len - headln;
216 skb_put(nskb, linear_len);
217 for (i = 0; i < info.nr_frags; i++)
218 skb_shinfo(nskb)->frags[i] = info.frags[i];
219
220 skb_shinfo(nskb)->nr_frags = info.nr_frags;
221 nskb->data_len = info.sync_len;
222 nskb->len += info.sync_len;
223 sq->stats.tls_resync_bytes += nskb->len;
224 mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
225 cpu_to_be64(info.rcd_sn));
226 mlx5e_sq_xmit(sq, nskb, *wqe, *pi);
227 mlx5e_sq_fetch_wqe(sq, wqe, pi);
228 return skb;
229
230err_out:
231 dev_kfree_skb_any(skb);
232 return NULL;
233}
234
235struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
236 struct mlx5e_txqsq *sq,
237 struct sk_buff *skb,
238 struct mlx5e_tx_wqe **wqe,
239 u16 *pi)
240{
241 struct mlx5e_priv *priv = netdev_priv(netdev);
242 struct mlx5e_tls_offload_context *context;
243 struct tls_context *tls_ctx;
244 u32 expected_seq;
245 int datalen;
246 u32 skb_seq;
247
248 if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
249 goto out;
250
251 datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
252 if (!datalen)
253 goto out;
254
255 tls_ctx = tls_get_ctx(skb->sk);
256 if (unlikely(tls_ctx->netdev != netdev))
257 goto out;
258
259 skb_seq = ntohl(tcp_hdr(skb)->seq);
260 context = mlx5e_get_tls_tx_context(tls_ctx);
261 expected_seq = context->expected_seq;
262
263 if (unlikely(expected_seq != skb_seq)) {
264 skb = mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi, priv->tls);
265 goto out;
266 }
267
268 if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
269 atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata);
270 dev_kfree_skb_any(skb);
271 skb = NULL;
272 goto out;
273 }
274
275 context->expected_seq = skb_seq + datalen;
276out:
277 return skb;
278}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
new file mode 100644
index 000000000000..405dfd302225
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
@@ -0,0 +1,50 @@
1/*
2 * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#ifndef __MLX5E_TLS_RXTX_H__
35#define __MLX5E_TLS_RXTX_H__
36
37#ifdef CONFIG_MLX5_EN_TLS
38
39#include <linux/skbuff.h>
40#include "en.h"
41
42struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
43 struct mlx5e_txqsq *sq,
44 struct sk_buff *skb,
45 struct mlx5e_tx_wqe **wqe,
46 u16 *pi);
47
48#endif /* CONFIG_MLX5_EN_TLS */
49
50#endif /* __MLX5E_TLS_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
new file mode 100644
index 000000000000..01468ec27446
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
@@ -0,0 +1,89 @@
1/*
2 * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/ethtool.h>
35#include <net/sock.h>
36
37#include "en.h"
38#include "accel/tls.h"
39#include "fpga/sdk.h"
40#include "en_accel/tls.h"
41
42static const struct counter_desc mlx5e_tls_sw_stats_desc[] = {
43 { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_metadata) },
44 { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_resync_alloc) },
45 { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_no_sync_data) },
46 { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_bypass_required) },
47};
48
49#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
50 atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
51
52#define NUM_TLS_SW_COUNTERS ARRAY_SIZE(mlx5e_tls_sw_stats_desc)
53
54int mlx5e_tls_get_count(struct mlx5e_priv *priv)
55{
56 if (!priv->tls)
57 return 0;
58
59 return NUM_TLS_SW_COUNTERS;
60}
61
62int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
63{
64 unsigned int i, idx = 0;
65
66 if (!priv->tls)
67 return 0;
68
69 for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
70 strcpy(data + (idx++) * ETH_GSTRING_LEN,
71 mlx5e_tls_sw_stats_desc[i].format);
72
73 return NUM_TLS_SW_COUNTERS;
74}
75
76int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data)
77{
78 int i, idx = 0;
79
80 if (!priv->tls)
81 return 0;
82
83 for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
84 data[idx++] =
85 MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
86 mlx5e_tls_sw_stats_desc, i);
87
88 return NUM_TLS_SW_COUNTERS;
89}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 3d46ef48d5b8..c641d5656b2d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -1007,12 +1007,14 @@ static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv)
1007 1007
1008 mutex_lock(&priv->state_lock); 1008 mutex_lock(&priv->state_lock);
1009 1009
1010 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
1011 goto out;
1012
1013 new_channels.params = priv->channels.params; 1010 new_channels.params = priv->channels.params;
1014 mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params); 1011 mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params);
1015 1012
1013 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
1014 priv->channels.params = new_channels.params;
1015 goto out;
1016 }
1017
1016 /* Skip if tx_min_inline is the same */ 1018 /* Skip if tx_min_inline is the same */
1017 if (new_channels.params.tx_min_inline_mode == 1019 if (new_channels.params.tx_min_inline_mode ==
1018 priv->channels.params.tx_min_inline_mode) 1020 priv->channels.params.tx_min_inline_mode)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index f1fe490ed794..553eb63753ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -42,7 +42,9 @@
42#include "en_rep.h" 42#include "en_rep.h"
43#include "en_accel/ipsec.h" 43#include "en_accel/ipsec.h"
44#include "en_accel/ipsec_rxtx.h" 44#include "en_accel/ipsec_rxtx.h"
45#include "en_accel/tls.h"
45#include "accel/ipsec.h" 46#include "accel/ipsec.h"
47#include "accel/tls.h"
46#include "vxlan.h" 48#include "vxlan.h"
47 49
48struct mlx5e_rq_param { 50struct mlx5e_rq_param {
@@ -1014,6 +1016,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
1014 INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover); 1016 INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover);
1015 if (MLX5_IPSEC_DEV(c->priv->mdev)) 1017 if (MLX5_IPSEC_DEV(c->priv->mdev))
1016 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); 1018 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
1019 if (mlx5_accel_is_tls_device(c->priv->mdev))
1020 set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
1017 1021
1018 param->wq.db_numa_node = cpu_to_node(c->cpu); 1022 param->wq.db_numa_node = cpu_to_node(c->cpu);
1019 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); 1023 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
@@ -4376,6 +4380,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
4376#endif 4380#endif
4377 4381
4378 mlx5e_ipsec_build_netdev(priv); 4382 mlx5e_ipsec_build_netdev(priv);
4383 mlx5e_tls_build_netdev(priv);
4379} 4384}
4380 4385
4381static void mlx5e_create_q_counters(struct mlx5e_priv *priv) 4386static void mlx5e_create_q_counters(struct mlx5e_priv *priv)
@@ -4417,12 +4422,16 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
4417 err = mlx5e_ipsec_init(priv); 4422 err = mlx5e_ipsec_init(priv);
4418 if (err) 4423 if (err)
4419 mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err); 4424 mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
4425 err = mlx5e_tls_init(priv);
4426 if (err)
4427 mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
4420 mlx5e_build_nic_netdev(netdev); 4428 mlx5e_build_nic_netdev(netdev);
4421 mlx5e_vxlan_init(priv); 4429 mlx5e_vxlan_init(priv);
4422} 4430}
4423 4431
4424static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) 4432static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
4425{ 4433{
4434 mlx5e_tls_cleanup(priv);
4426 mlx5e_ipsec_cleanup(priv); 4435 mlx5e_ipsec_cleanup(priv);
4427 mlx5e_vxlan_cleanup(priv); 4436 mlx5e_vxlan_cleanup(priv);
4428} 4437}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index d8f68e4d1018..876c3e4c6193 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -877,13 +877,14 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
877}; 877};
878 878
879static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, 879static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
880 struct mlx5e_params *params) 880 struct mlx5e_params *params, u16 mtu)
881{ 881{
882 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 882 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
883 MLX5_CQ_PERIOD_MODE_START_FROM_CQE : 883 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
884 MLX5_CQ_PERIOD_MODE_START_FROM_EQE; 884 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
885 885
886 params->hard_mtu = MLX5E_ETH_HARD_MTU; 886 params->hard_mtu = MLX5E_ETH_HARD_MTU;
887 params->sw_mtu = mtu;
887 params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE; 888 params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
888 params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; 889 params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
889 params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE; 890 params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE;
@@ -931,7 +932,7 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
931 932
932 priv->channels.params.num_channels = profile->max_nch(mdev); 933 priv->channels.params.num_channels = profile->max_nch(mdev);
933 934
934 mlx5e_build_rep_params(mdev, &priv->channels.params); 935 mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu);
935 mlx5e_build_rep_netdev(netdev); 936 mlx5e_build_rep_netdev(netdev);
936 937
937 mlx5e_timestamp_init(priv); 938 mlx5e_timestamp_init(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 707976482c09..027f54ac1ca2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -290,7 +290,7 @@ static int mlx5e_test_loopback(struct mlx5e_priv *priv)
290 290
291 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { 291 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
292 netdev_err(priv->netdev, 292 netdev_err(priv->netdev,
293 "\tCan't perform loobpack test while device is down\n"); 293 "\tCan't perform loopback test while device is down\n");
294 return -ENODEV; 294 return -ENODEV;
295 } 295 }
296 296
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index b08c94422907..e17919c0af08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -32,6 +32,7 @@
32 32
33#include "en.h" 33#include "en.h"
34#include "en_accel/ipsec.h" 34#include "en_accel/ipsec.h"
35#include "en_accel/tls.h"
35 36
36static const struct counter_desc sw_stats_desc[] = { 37static const struct counter_desc sw_stats_desc[] = {
37 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) }, 38 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
@@ -43,6 +44,12 @@ static const struct counter_desc sw_stats_desc[] = {
43 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) }, 44 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
44 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) }, 45 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
45 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) }, 46 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
47
48#ifdef CONFIG_MLX5_EN_TLS
49 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
50 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
51#endif
52
46 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, 53 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
47 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) }, 54 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
48 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) }, 55 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
@@ -161,6 +168,10 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
161 s->tx_csum_partial_inner += sq_stats->csum_partial_inner; 168 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
162 s->tx_csum_none += sq_stats->csum_none; 169 s->tx_csum_none += sq_stats->csum_none;
163 s->tx_csum_partial += sq_stats->csum_partial; 170 s->tx_csum_partial += sq_stats->csum_partial;
171#ifdef CONFIG_MLX5_EN_TLS
172 s->tx_tls_ooo += sq_stats->tls_ooo;
173 s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
174#endif
164 } 175 }
165 } 176 }
166 177
@@ -1065,6 +1076,22 @@ static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv)
1065 mlx5e_ipsec_update_stats(priv); 1076 mlx5e_ipsec_update_stats(priv);
1066} 1077}
1067 1078
1079static int mlx5e_grp_tls_get_num_stats(struct mlx5e_priv *priv)
1080{
1081 return mlx5e_tls_get_count(priv);
1082}
1083
1084static int mlx5e_grp_tls_fill_strings(struct mlx5e_priv *priv, u8 *data,
1085 int idx)
1086{
1087 return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
1088}
1089
1090static int mlx5e_grp_tls_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
1091{
1092 return idx + mlx5e_tls_get_stats(priv, data + idx);
1093}
1094
1068static const struct counter_desc rq_stats_desc[] = { 1095static const struct counter_desc rq_stats_desc[] = {
1069 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, 1096 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1070 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, 1097 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
@@ -1268,6 +1295,11 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
1268 .update_stats = mlx5e_grp_ipsec_update_stats, 1295 .update_stats = mlx5e_grp_ipsec_update_stats,
1269 }, 1296 },
1270 { 1297 {
1298 .get_num_stats = mlx5e_grp_tls_get_num_stats,
1299 .fill_strings = mlx5e_grp_tls_fill_strings,
1300 .fill_stats = mlx5e_grp_tls_fill_stats,
1301 },
1302 {
1271 .get_num_stats = mlx5e_grp_channels_get_num_stats, 1303 .get_num_stats = mlx5e_grp_channels_get_num_stats,
1272 .fill_strings = mlx5e_grp_channels_fill_strings, 1304 .fill_strings = mlx5e_grp_channels_fill_strings,
1273 .fill_stats = mlx5e_grp_channels_fill_stats, 1305 .fill_stats = mlx5e_grp_channels_fill_stats,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 53111a2df587..a36e6a87066b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -93,6 +93,11 @@ struct mlx5e_sw_stats {
93 u64 rx_cache_waive; 93 u64 rx_cache_waive;
94 u64 ch_eq_rearm; 94 u64 ch_eq_rearm;
95 95
96#ifdef CONFIG_MLX5_EN_TLS
97 u64 tx_tls_ooo;
98 u64 tx_tls_resync_bytes;
99#endif
100
96 /* Special handling counters */ 101 /* Special handling counters */
97 u64 link_down_events_phy; 102 u64 link_down_events_phy;
98}; 103};
@@ -194,6 +199,10 @@ struct mlx5e_sq_stats {
194 u64 csum_partial_inner; 199 u64 csum_partial_inner;
195 u64 added_vlan_packets; 200 u64 added_vlan_packets;
196 u64 nop; 201 u64 nop;
202#ifdef CONFIG_MLX5_EN_TLS
203 u64 tls_ooo;
204 u64 tls_resync_bytes;
205#endif
197 /* less likely accessed in data path */ 206 /* less likely accessed in data path */
198 u64 csum_none; 207 u64 csum_none;
199 u64 stopped; 208 u64 stopped;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 4197001f9801..3c534fc43400 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1864,7 +1864,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
1864 } 1864 }
1865 1865
1866 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); 1866 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
1867 if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { 1867 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
1868 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
1868 pr_info("can't offload re-write of ip proto %d\n", ip_proto); 1869 pr_info("can't offload re-write of ip proto %d\n", ip_proto);
1869 return false; 1870 return false;
1870 } 1871 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 20297108528a..047614d2eda2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -35,12 +35,21 @@
35#include <net/dsfield.h> 35#include <net/dsfield.h>
36#include "en.h" 36#include "en.h"
37#include "ipoib/ipoib.h" 37#include "ipoib/ipoib.h"
38#include "en_accel/ipsec_rxtx.h" 38#include "en_accel/en_accel.h"
39#include "lib/clock.h" 39#include "lib/clock.h"
40 40
41#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS 41#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
42
43#ifndef CONFIG_MLX5_EN_TLS
42#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\ 44#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
43 MLX5E_SQ_NOPS_ROOM) 45 MLX5E_SQ_NOPS_ROOM)
46#else
47/* TLS offload requires MLX5E_SQ_STOP_ROOM to have
48 * enough room for a resync SKB, a normal SKB and a NOP
49 */
50#define MLX5E_SQ_STOP_ROOM (2 * MLX5_SEND_WQE_MAX_WQEBBS +\
51 MLX5E_SQ_NOPS_ROOM)
52#endif
44 53
45static inline void mlx5e_tx_dma_unmap(struct device *pdev, 54static inline void mlx5e_tx_dma_unmap(struct device *pdev,
46 struct mlx5e_sq_dma *dma) 55 struct mlx5e_sq_dma *dma)
@@ -255,7 +264,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
255 dma_addr = dma_map_single(sq->pdev, skb_data, headlen, 264 dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
256 DMA_TO_DEVICE); 265 DMA_TO_DEVICE);
257 if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) 266 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
258 return -ENOMEM; 267 goto dma_unmap_wqe_err;
259 268
260 dseg->addr = cpu_to_be64(dma_addr); 269 dseg->addr = cpu_to_be64(dma_addr);
261 dseg->lkey = sq->mkey_be; 270 dseg->lkey = sq->mkey_be;
@@ -273,7 +282,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
273 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, 282 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
274 DMA_TO_DEVICE); 283 DMA_TO_DEVICE);
275 if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) 284 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
276 return -ENOMEM; 285 goto dma_unmap_wqe_err;
277 286
278 dseg->addr = cpu_to_be64(dma_addr); 287 dseg->addr = cpu_to_be64(dma_addr);
279 dseg->lkey = sq->mkey_be; 288 dseg->lkey = sq->mkey_be;
@@ -285,6 +294,10 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
285 } 294 }
286 295
287 return num_dma; 296 return num_dma;
297
298dma_unmap_wqe_err:
299 mlx5e_dma_unmap_wqe_err(sq, num_dma);
300 return -ENOMEM;
288} 301}
289 302
290static inline void 303static inline void
@@ -325,8 +338,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
325 } 338 }
326} 339}
327 340
328static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, 341netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
329 struct mlx5e_tx_wqe *wqe, u16 pi) 342 struct mlx5e_tx_wqe *wqe, u16 pi)
330{ 343{
331 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; 344 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
332 345
@@ -380,17 +393,15 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
380 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, 393 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
381 (struct mlx5_wqe_data_seg *)cseg + ds_cnt); 394 (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
382 if (unlikely(num_dma < 0)) 395 if (unlikely(num_dma < 0))
383 goto dma_unmap_wqe_err; 396 goto err_drop;
384 397
385 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, 398 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
386 num_bytes, num_dma, wi, cseg); 399 num_bytes, num_dma, wi, cseg);
387 400
388 return NETDEV_TX_OK; 401 return NETDEV_TX_OK;
389 402
390dma_unmap_wqe_err: 403err_drop:
391 sq->stats.dropped++; 404 sq->stats.dropped++;
392 mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
393
394 dev_kfree_skb_any(skb); 405 dev_kfree_skb_any(skb);
395 406
396 return NETDEV_TX_OK; 407 return NETDEV_TX_OK;
@@ -399,21 +410,19 @@ dma_unmap_wqe_err:
399netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) 410netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
400{ 411{
401 struct mlx5e_priv *priv = netdev_priv(dev); 412 struct mlx5e_priv *priv = netdev_priv(dev);
402 struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)]; 413 struct mlx5e_tx_wqe *wqe;
403 struct mlx5_wq_cyc *wq = &sq->wq; 414 struct mlx5e_txqsq *sq;
404 u16 pi = sq->pc & wq->sz_m1; 415 u16 pi;
405 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
406 416
407 memset(wqe, 0, sizeof(*wqe)); 417 sq = priv->txq2sq[skb_get_queue_mapping(skb)];
418 mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
408 419
409#ifdef CONFIG_MLX5_EN_IPSEC 420#ifdef CONFIG_MLX5_ACCEL
410 if (sq->state & BIT(MLX5E_SQ_STATE_IPSEC)) { 421 /* might send skbs and update wqe and pi */
411 skb = mlx5e_ipsec_handle_tx_skb(dev, wqe, skb); 422 skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi);
412 if (unlikely(!skb)) 423 if (unlikely(!skb))
413 return NETDEV_TX_OK; 424 return NETDEV_TX_OK;
414 }
415#endif 425#endif
416
417 return mlx5e_sq_xmit(sq, skb, wqe, pi); 426 return mlx5e_sq_xmit(sq, skb, wqe, pi);
418} 427}
419 428
@@ -645,17 +654,15 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
645 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, 654 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
646 (struct mlx5_wqe_data_seg *)cseg + ds_cnt); 655 (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
647 if (unlikely(num_dma < 0)) 656 if (unlikely(num_dma < 0))
648 goto dma_unmap_wqe_err; 657 goto err_drop;
649 658
650 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, 659 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
651 num_bytes, num_dma, wi, cseg); 660 num_bytes, num_dma, wi, cseg);
652 661
653 return NETDEV_TX_OK; 662 return NETDEV_TX_OK;
654 663
655dma_unmap_wqe_err: 664err_drop:
656 sq->stats.dropped++; 665 sq->stats.dropped++;
657 mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
658
659 dev_kfree_skb_any(skb); 666 dev_kfree_skb_any(skb);
660 667
661 return NETDEV_TX_OK; 668 return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
index 82405ed84725..3e2355c8df3f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
@@ -53,6 +53,7 @@ struct mlx5_fpga_device {
53 } conn_res; 53 } conn_res;
54 54
55 struct mlx5_fpga_ipsec *ipsec; 55 struct mlx5_fpga_ipsec *ipsec;
56 struct mlx5_fpga_tls *tls;
56}; 57};
57 58
58#define mlx5_fpga_dbg(__adev, format, ...) \ 59#define mlx5_fpga_dbg(__adev, format, ...) \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 0f5da499a223..3c4f1f326e13 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -43,9 +43,6 @@
43#include "fpga/sdk.h" 43#include "fpga/sdk.h"
44#include "fpga/core.h" 44#include "fpga/core.h"
45 45
46#define SBU_QP_QUEUE_SIZE 8
47#define MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC (60 * 1000)
48
49enum mlx5_fpga_ipsec_cmd_status { 46enum mlx5_fpga_ipsec_cmd_status {
50 MLX5_FPGA_IPSEC_CMD_PENDING, 47 MLX5_FPGA_IPSEC_CMD_PENDING,
51 MLX5_FPGA_IPSEC_CMD_SEND_FAIL, 48 MLX5_FPGA_IPSEC_CMD_SEND_FAIL,
@@ -258,7 +255,7 @@ static int mlx5_fpga_ipsec_cmd_wait(void *ctx)
258{ 255{
259 struct mlx5_fpga_ipsec_cmd_context *context = ctx; 256 struct mlx5_fpga_ipsec_cmd_context *context = ctx;
260 unsigned long timeout = 257 unsigned long timeout =
261 msecs_to_jiffies(MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC); 258 msecs_to_jiffies(MLX5_FPGA_CMD_TIMEOUT_MSEC);
262 int res; 259 int res;
263 260
264 res = wait_for_completion_timeout(&context->complete, timeout); 261 res = wait_for_completion_timeout(&context->complete, timeout);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h
index baa537e54a49..a0573cc2fc9b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h
@@ -41,6 +41,8 @@
41 * DOC: Innova SDK 41 * DOC: Innova SDK
42 * This header defines the in-kernel API for Innova FPGA client drivers. 42 * This header defines the in-kernel API for Innova FPGA client drivers.
43 */ 43 */
44#define SBU_QP_QUEUE_SIZE 8
45#define MLX5_FPGA_CMD_TIMEOUT_MSEC (60 * 1000)
44 46
45enum mlx5_fpga_access_type { 47enum mlx5_fpga_access_type {
46 MLX5_FPGA_ACCESS_TYPE_I2C = 0x0, 48 MLX5_FPGA_ACCESS_TYPE_I2C = 0x0,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
new file mode 100644
index 000000000000..21048013826c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
@@ -0,0 +1,562 @@
1/*
2 * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/mlx5/device.h>
35#include "fpga/tls.h"
36#include "fpga/cmd.h"
37#include "fpga/sdk.h"
38#include "fpga/core.h"
39#include "accel/tls.h"
40
41struct mlx5_fpga_tls_command_context;
42
43typedef void (*mlx5_fpga_tls_command_complete)
44 (struct mlx5_fpga_conn *conn, struct mlx5_fpga_device *fdev,
45 struct mlx5_fpga_tls_command_context *ctx,
46 struct mlx5_fpga_dma_buf *resp);
47
48struct mlx5_fpga_tls_command_context {
49 struct list_head list;
50 /* There is no guarantee on the order between the TX completion
51 * and the command response.
52 * The TX completion is going to touch cmd->buf even in
53 * the case of successful transmission.
54 * So instead of requiring separate allocations for cmd
55 * and cmd->buf we've decided to use a reference counter
56 */
57 refcount_t ref;
58 struct mlx5_fpga_dma_buf buf;
59 mlx5_fpga_tls_command_complete complete;
60};
61
62static void
63mlx5_fpga_tls_put_command_ctx(struct mlx5_fpga_tls_command_context *ctx)
64{
65 if (refcount_dec_and_test(&ctx->ref))
66 kfree(ctx);
67}
68
69static void mlx5_fpga_tls_cmd_complete(struct mlx5_fpga_device *fdev,
70 struct mlx5_fpga_dma_buf *resp)
71{
72 struct mlx5_fpga_conn *conn = fdev->tls->conn;
73 struct mlx5_fpga_tls_command_context *ctx;
74 struct mlx5_fpga_tls *tls = fdev->tls;
75 unsigned long flags;
76
77 spin_lock_irqsave(&tls->pending_cmds_lock, flags);
78 ctx = list_first_entry(&tls->pending_cmds,
79 struct mlx5_fpga_tls_command_context, list);
80 list_del(&ctx->list);
81 spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
82 ctx->complete(conn, fdev, ctx, resp);
83}
84
85static void mlx5_fpga_cmd_send_complete(struct mlx5_fpga_conn *conn,
86 struct mlx5_fpga_device *fdev,
87 struct mlx5_fpga_dma_buf *buf,
88 u8 status)
89{
90 struct mlx5_fpga_tls_command_context *ctx =
91 container_of(buf, struct mlx5_fpga_tls_command_context, buf);
92
93 mlx5_fpga_tls_put_command_ctx(ctx);
94
95 if (unlikely(status))
96 mlx5_fpga_tls_cmd_complete(fdev, NULL);
97}
98
99static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev,
100 struct mlx5_fpga_tls_command_context *cmd,
101 mlx5_fpga_tls_command_complete complete)
102{
103 struct mlx5_fpga_tls *tls = fdev->tls;
104 unsigned long flags;
105 int ret;
106
107 refcount_set(&cmd->ref, 2);
108 cmd->complete = complete;
109 cmd->buf.complete = mlx5_fpga_cmd_send_complete;
110
111 spin_lock_irqsave(&tls->pending_cmds_lock, flags);
112 /* mlx5_fpga_sbu_conn_sendmsg is called under pending_cmds_lock
113 * to make sure commands are inserted to the tls->pending_cmds list
114 * and the command QP in the same order.
115 */
116 ret = mlx5_fpga_sbu_conn_sendmsg(tls->conn, &cmd->buf);
117 if (likely(!ret))
118 list_add_tail(&cmd->list, &tls->pending_cmds);
119 else
120 complete(tls->conn, fdev, cmd, NULL);
121 spin_unlock_irqrestore(&tls->pending_cmds_lock, flags);
122}
123
124/* Start of context identifiers range (inclusive) */
125#define SWID_START 0
126/* End of context identifiers range (exclusive) */
127#define SWID_END BIT(24)
128
129static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
130 void *ptr)
131{
132 int ret;
133
134 /* TLS metadata format is 1 byte for syndrome followed
135 * by 3 bytes of swid (software ID)
136 * swid must not exceed 3 bytes.
137 * See tls_rxtx.c:insert_pet() for details
138 */
139 BUILD_BUG_ON((SWID_END - 1) & 0xFF000000);
140
141 idr_preload(GFP_KERNEL);
142 spin_lock_irq(idr_spinlock);
143 ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC);
144 spin_unlock_irq(idr_spinlock);
145 idr_preload_end();
146
147 return ret;
148}
149
150static void mlx5_fpga_tls_release_swid(struct idr *idr,
151 spinlock_t *idr_spinlock, u32 swid)
152{
153 unsigned long flags;
154
155 spin_lock_irqsave(idr_spinlock, flags);
156 idr_remove(idr, swid);
157 spin_unlock_irqrestore(idr_spinlock, flags);
158}
159
160struct mlx5_teardown_stream_context {
161 struct mlx5_fpga_tls_command_context cmd;
162 u32 swid;
163};
164
165static void
166mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
167 struct mlx5_fpga_device *fdev,
168 struct mlx5_fpga_tls_command_context *cmd,
169 struct mlx5_fpga_dma_buf *resp)
170{
171 struct mlx5_teardown_stream_context *ctx =
172 container_of(cmd, struct mlx5_teardown_stream_context, cmd);
173
174 if (resp) {
175 u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
176
177 if (syndrome)
178 mlx5_fpga_err(fdev,
179 "Teardown stream failed with syndrome = %d",
180 syndrome);
181 else
182 mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
183 &fdev->tls->idr_spinlock,
184 ctx->swid);
185 }
186 mlx5_fpga_tls_put_command_ctx(cmd);
187}
188
189static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd)
190{
191 memcpy(MLX5_ADDR_OF(tls_cmd, cmd, src_port), flow,
192 MLX5_BYTE_OFF(tls_flow, ipv6));
193
194 MLX5_SET(tls_cmd, cmd, ipv6, MLX5_GET(tls_flow, flow, ipv6));
195 MLX5_SET(tls_cmd, cmd, direction_sx,
196 MLX5_GET(tls_flow, flow, direction_sx));
197}
198
199void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, void *flow,
200 u32 swid, gfp_t flags)
201{
202 struct mlx5_teardown_stream_context *ctx;
203 struct mlx5_fpga_dma_buf *buf;
204 void *cmd;
205
206 ctx = kzalloc(sizeof(*ctx) + MLX5_TLS_COMMAND_SIZE, flags);
207 if (!ctx)
208 return;
209
210 buf = &ctx->cmd.buf;
211 cmd = (ctx + 1);
212 MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
213 MLX5_SET(tls_cmd, cmd, swid, swid);
214
215 mlx5_fpga_tls_flow_to_cmd(flow, cmd);
216 kfree(flow);
217
218 buf->sg[0].data = cmd;
219 buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
220
221 ctx->swid = swid;
222 mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
223 mlx5_fpga_tls_teardown_completion);
224}
225
226void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid,
227 gfp_t flags)
228{
229 struct mlx5_fpga_tls *tls = mdev->fpga->tls;
230 void *flow;
231
232 rcu_read_lock();
233 flow = idr_find(&tls->tx_idr, swid);
234 rcu_read_unlock();
235
236 if (!flow) {
237 mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
238 swid);
239 return;
240 }
241
242 mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
243}
244
245enum mlx5_fpga_setup_stream_status {
246 MLX5_FPGA_CMD_PENDING,
247 MLX5_FPGA_CMD_SEND_FAILED,
248 MLX5_FPGA_CMD_RESPONSE_RECEIVED,
249 MLX5_FPGA_CMD_ABANDONED,
250};
251
252struct mlx5_setup_stream_context {
253 struct mlx5_fpga_tls_command_context cmd;
254 atomic_t status;
255 u32 syndrome;
256 struct completion comp;
257};
258
259static void
260mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn,
261 struct mlx5_fpga_device *fdev,
262 struct mlx5_fpga_tls_command_context *cmd,
263 struct mlx5_fpga_dma_buf *resp)
264{
265 struct mlx5_setup_stream_context *ctx =
266 container_of(cmd, struct mlx5_setup_stream_context, cmd);
267 int status = MLX5_FPGA_CMD_SEND_FAILED;
268 void *tls_cmd = ctx + 1;
269
270 /* If we failed to send to command resp == NULL */
271 if (resp) {
272 ctx->syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
273 status = MLX5_FPGA_CMD_RESPONSE_RECEIVED;
274 }
275
276 status = atomic_xchg_release(&ctx->status, status);
277 if (likely(status != MLX5_FPGA_CMD_ABANDONED)) {
278 complete(&ctx->comp);
279 return;
280 }
281
282 mlx5_fpga_err(fdev, "Command was abandoned, syndrome = %u\n",
283 ctx->syndrome);
284
285 if (!ctx->syndrome) {
286 /* The process was killed while waiting for the context to be
287 * added, and the add completed successfully.
288 * We need to destroy the HW context, and we can't can't reuse
289 * the command context because we might not have received
290 * the tx completion yet.
291 */
292 mlx5_fpga_tls_del_tx_flow(fdev->mdev,
293 MLX5_GET(tls_cmd, tls_cmd, swid),
294 GFP_ATOMIC);
295 }
296
297 mlx5_fpga_tls_put_command_ctx(cmd);
298}
299
300static int mlx5_fpga_tls_setup_stream_cmd(struct mlx5_core_dev *mdev,
301 struct mlx5_setup_stream_context *ctx)
302{
303 struct mlx5_fpga_dma_buf *buf;
304 void *cmd = ctx + 1;
305 int status, ret = 0;
306
307 buf = &ctx->cmd.buf;
308 buf->sg[0].data = cmd;
309 buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
310 MLX5_SET(tls_cmd, cmd, command_type, CMD_SETUP_STREAM);
311
312 init_completion(&ctx->comp);
313 atomic_set(&ctx->status, MLX5_FPGA_CMD_PENDING);
314 ctx->syndrome = -1;
315
316 mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
317 mlx5_fpga_tls_setup_completion);
318 wait_for_completion_killable(&ctx->comp);
319
320 status = atomic_xchg_acquire(&ctx->status, MLX5_FPGA_CMD_ABANDONED);
321 if (unlikely(status == MLX5_FPGA_CMD_PENDING))
322 /* ctx is going to be released in mlx5_fpga_tls_setup_completion */
323 return -EINTR;
324
325 if (unlikely(ctx->syndrome))
326 ret = -ENOMEM;
327
328 mlx5_fpga_tls_put_command_ctx(&ctx->cmd);
329 return ret;
330}
331
332static void mlx5_fpga_tls_hw_qp_recv_cb(void *cb_arg,
333 struct mlx5_fpga_dma_buf *buf)
334{
335 struct mlx5_fpga_device *fdev = (struct mlx5_fpga_device *)cb_arg;
336
337 mlx5_fpga_tls_cmd_complete(fdev, buf);
338}
339
340bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev)
341{
342 if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
343 return false;
344
345 if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
346 MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
347 return false;
348
349 if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
350 MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS)
351 return false;
352
353 if (MLX5_CAP_FPGA(mdev, sandbox_product_version) != 0)
354 return false;
355
356 return true;
357}
358
359static int mlx5_fpga_tls_get_caps(struct mlx5_fpga_device *fdev,
360 u32 *p_caps)
361{
362 int err, cap_size = MLX5_ST_SZ_BYTES(tls_extended_cap);
363 u32 caps = 0;
364 void *buf;
365
366 buf = kzalloc(cap_size, GFP_KERNEL);
367 if (!buf)
368 return -ENOMEM;
369
370 err = mlx5_fpga_get_sbu_caps(fdev, cap_size, buf);
371 if (err)
372 goto out;
373
374 if (MLX5_GET(tls_extended_cap, buf, tx))
375 caps |= MLX5_ACCEL_TLS_TX;
376 if (MLX5_GET(tls_extended_cap, buf, rx))
377 caps |= MLX5_ACCEL_TLS_RX;
378 if (MLX5_GET(tls_extended_cap, buf, tls_v12))
379 caps |= MLX5_ACCEL_TLS_V12;
380 if (MLX5_GET(tls_extended_cap, buf, tls_v13))
381 caps |= MLX5_ACCEL_TLS_V13;
382 if (MLX5_GET(tls_extended_cap, buf, lro))
383 caps |= MLX5_ACCEL_TLS_LRO;
384 if (MLX5_GET(tls_extended_cap, buf, ipv6))
385 caps |= MLX5_ACCEL_TLS_IPV6;
386
387 if (MLX5_GET(tls_extended_cap, buf, aes_gcm_128))
388 caps |= MLX5_ACCEL_TLS_AES_GCM128;
389 if (MLX5_GET(tls_extended_cap, buf, aes_gcm_256))
390 caps |= MLX5_ACCEL_TLS_AES_GCM256;
391
392 *p_caps = caps;
393 err = 0;
394out:
395 kfree(buf);
396 return err;
397}
398
399int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
400{
401 struct mlx5_fpga_device *fdev = mdev->fpga;
402 struct mlx5_fpga_conn_attr init_attr = {0};
403 struct mlx5_fpga_conn *conn;
404 struct mlx5_fpga_tls *tls;
405 int err = 0;
406
407 if (!mlx5_fpga_is_tls_device(mdev) || !fdev)
408 return 0;
409
410 tls = kzalloc(sizeof(*tls), GFP_KERNEL);
411 if (!tls)
412 return -ENOMEM;
413
414 err = mlx5_fpga_tls_get_caps(fdev, &tls->caps);
415 if (err)
416 goto error;
417
418 if (!(tls->caps & (MLX5_ACCEL_TLS_TX | MLX5_ACCEL_TLS_V12 |
419 MLX5_ACCEL_TLS_AES_GCM128))) {
420 err = -ENOTSUPP;
421 goto error;
422 }
423
424 init_attr.rx_size = SBU_QP_QUEUE_SIZE;
425 init_attr.tx_size = SBU_QP_QUEUE_SIZE;
426 init_attr.recv_cb = mlx5_fpga_tls_hw_qp_recv_cb;
427 init_attr.cb_arg = fdev;
428 conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
429 if (IS_ERR(conn)) {
430 err = PTR_ERR(conn);
431 mlx5_fpga_err(fdev, "Error creating TLS command connection %d\n",
432 err);
433 goto error;
434 }
435
436 tls->conn = conn;
437 spin_lock_init(&tls->pending_cmds_lock);
438 INIT_LIST_HEAD(&tls->pending_cmds);
439
440 idr_init(&tls->tx_idr);
441 spin_lock_init(&tls->idr_spinlock);
442 fdev->tls = tls;
443 return 0;
444
445error:
446 kfree(tls);
447 return err;
448}
449
450void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev)
451{
452 struct mlx5_fpga_device *fdev = mdev->fpga;
453
454 if (!fdev || !fdev->tls)
455 return;
456
457 mlx5_fpga_sbu_conn_destroy(fdev->tls->conn);
458 kfree(fdev->tls);
459 fdev->tls = NULL;
460}
461
462static void mlx5_fpga_tls_set_aes_gcm128_ctx(void *cmd,
463 struct tls_crypto_info *info,
464 __be64 *rcd_sn)
465{
466 struct tls12_crypto_info_aes_gcm_128 *crypto_info =
467 (struct tls12_crypto_info_aes_gcm_128 *)info;
468
469 memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_rcd_sn), crypto_info->rec_seq,
470 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
471
472 memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_implicit_iv),
473 crypto_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
474 memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key),
475 crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
476
477 /* in AES-GCM 128 we need to write the key twice */
478 memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key) +
479 TLS_CIPHER_AES_GCM_128_KEY_SIZE,
480 crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
481
482 MLX5_SET(tls_cmd, cmd, alg, MLX5_TLS_ALG_AES_GCM_128);
483}
484
485static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps,
486 struct tls_crypto_info *crypto_info)
487{
488 __be64 rcd_sn;
489
490 switch (crypto_info->cipher_type) {
491 case TLS_CIPHER_AES_GCM_128:
492 if (!(caps & MLX5_ACCEL_TLS_AES_GCM128))
493 return -EINVAL;
494 mlx5_fpga_tls_set_aes_gcm128_ctx(cmd, crypto_info, &rcd_sn);
495 break;
496 default:
497 return -EINVAL;
498 }
499
500 return 0;
501}
502
503static int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
504 struct tls_crypto_info *crypto_info, u32 swid,
505 u32 tcp_sn)
506{
507 u32 caps = mlx5_fpga_tls_device_caps(mdev);
508 struct mlx5_setup_stream_context *ctx;
509 int ret = -ENOMEM;
510 size_t cmd_size;
511 void *cmd;
512
513 cmd_size = MLX5_TLS_COMMAND_SIZE + sizeof(*ctx);
514 ctx = kzalloc(cmd_size, GFP_KERNEL);
515 if (!ctx)
516 goto out;
517
518 cmd = ctx + 1;
519 ret = mlx5_fpga_tls_set_key_material(cmd, caps, crypto_info);
520 if (ret)
521 goto free_ctx;
522
523 mlx5_fpga_tls_flow_to_cmd(flow, cmd);
524
525 MLX5_SET(tls_cmd, cmd, swid, swid);
526 MLX5_SET(tls_cmd, cmd, tcp_sn, tcp_sn);
527
528 return mlx5_fpga_tls_setup_stream_cmd(mdev, ctx);
529
530free_ctx:
531 kfree(ctx);
532out:
533 return ret;
534}
535
536int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
537 struct tls_crypto_info *crypto_info,
538 u32 start_offload_tcp_sn, u32 *p_swid)
539{
540 struct mlx5_fpga_tls *tls = mdev->fpga->tls;
541 int ret = -ENOMEM;
542 u32 swid;
543
544 ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr, &tls->idr_spinlock, flow);
545 if (ret < 0)
546 return ret;
547
548 swid = ret;
549 MLX5_SET(tls_flow, flow, direction_sx, 1);
550
551 ret = mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
552 start_offload_tcp_sn);
553 if (ret && ret != -EINTR)
554 goto free_swid;
555
556 *p_swid = swid;
557 return 0;
558free_swid:
559 mlx5_fpga_tls_release_swid(&tls->tx_idr, &tls->idr_spinlock, swid);
560
561 return ret;
562}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
new file mode 100644
index 000000000000..800a214e4e49
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h
@@ -0,0 +1,68 @@
1/*
2 * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#ifndef __MLX5_FPGA_TLS_H__
35#define __MLX5_FPGA_TLS_H__
36
37#include <linux/mlx5/driver.h>
38
39#include <net/tls.h>
40#include "fpga/core.h"
41
42struct mlx5_fpga_tls {
43 struct list_head pending_cmds;
44 spinlock_t pending_cmds_lock; /* Protects pending_cmds */
45 u32 caps;
46 struct mlx5_fpga_conn *conn;
47
48 struct idr tx_idr;
49 spinlock_t idr_spinlock; /* protects the IDR */
50};
51
52int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow,
53 struct tls_crypto_info *crypto_info,
54 u32 start_offload_tcp_sn, u32 *p_swid);
55
56void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid,
57 gfp_t flags);
58
59bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev);
60int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev);
61void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev);
62
63static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev)
64{
65 return mdev->fpga->tls->caps;
66}
67
68#endif /* __MLX5_FPGA_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index de51e7c39bc8..c39c1692e674 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -187,6 +187,7 @@ static void del_sw_ns(struct fs_node *node);
187static void del_sw_hw_rule(struct fs_node *node); 187static void del_sw_hw_rule(struct fs_node *node);
188static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, 188static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
189 struct mlx5_flow_destination *d2); 189 struct mlx5_flow_destination *d2);
190static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
190static struct mlx5_flow_rule * 191static struct mlx5_flow_rule *
191find_flow_rule(struct fs_fte *fte, 192find_flow_rule(struct fs_fte *fte,
192 struct mlx5_flow_destination *dest); 193 struct mlx5_flow_destination *dest);
@@ -481,7 +482,8 @@ static void del_sw_hw_rule(struct fs_node *node)
481 482
482 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER && 483 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
483 --fte->dests_size) { 484 --fte->dests_size) {
484 modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION); 485 modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
486 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
485 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT; 487 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
486 update_fte = true; 488 update_fte = true;
487 goto out; 489 goto out;
@@ -2351,23 +2353,27 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
2351 2353
2352static int init_root_ns(struct mlx5_flow_steering *steering) 2354static int init_root_ns(struct mlx5_flow_steering *steering)
2353{ 2355{
2356 int err;
2357
2354 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX); 2358 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
2355 if (!steering->root_ns) 2359 if (!steering->root_ns)
2356 goto cleanup; 2360 return -ENOMEM;
2357 2361
2358 if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node)) 2362 err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2359 goto cleanup; 2363 if (err)
2364 goto out_err;
2360 2365
2361 set_prio_attrs(steering->root_ns); 2366 set_prio_attrs(steering->root_ns);
2362 2367 err = create_anchor_flow_table(steering);
2363 if (create_anchor_flow_table(steering)) 2368 if (err)
2364 goto cleanup; 2369 goto out_err;
2365 2370
2366 return 0; 2371 return 0;
2367 2372
2368cleanup: 2373out_err:
2369 mlx5_cleanup_fs(steering->dev); 2374 cleanup_root_ns(steering->root_ns);
2370 return -ENOMEM; 2375 steering->root_ns = NULL;
2376 return err;
2371} 2377}
2372 2378
2373static void clean_tree(struct fs_node *node) 2379static void clean_tree(struct fs_node *node)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 63a8ea31601c..b865597630ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -60,6 +60,7 @@
60#include "fpga/core.h" 60#include "fpga/core.h"
61#include "fpga/ipsec.h" 61#include "fpga/ipsec.h"
62#include "accel/ipsec.h" 62#include "accel/ipsec.h"
63#include "accel/tls.h"
63#include "lib/clock.h" 64#include "lib/clock.h"
64 65
65MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); 66MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
@@ -1190,6 +1191,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1190 goto err_ipsec_start; 1191 goto err_ipsec_start;
1191 } 1192 }
1192 1193
1194 err = mlx5_accel_tls_init(dev);
1195 if (err) {
1196 dev_err(&pdev->dev, "TLS device start failed %d\n", err);
1197 goto err_tls_start;
1198 }
1199
1193 err = mlx5_init_fs(dev); 1200 err = mlx5_init_fs(dev);
1194 if (err) { 1201 if (err) {
1195 dev_err(&pdev->dev, "Failed to init flow steering\n"); 1202 dev_err(&pdev->dev, "Failed to init flow steering\n");
@@ -1231,6 +1238,9 @@ err_sriov:
1231 mlx5_cleanup_fs(dev); 1238 mlx5_cleanup_fs(dev);
1232 1239
1233err_fs: 1240err_fs:
1241 mlx5_accel_tls_cleanup(dev);
1242
1243err_tls_start:
1234 mlx5_accel_ipsec_cleanup(dev); 1244 mlx5_accel_ipsec_cleanup(dev);
1235 1245
1236err_ipsec_start: 1246err_ipsec_start:
@@ -1306,6 +1316,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1306 mlx5_sriov_detach(dev); 1316 mlx5_sriov_detach(dev);
1307 mlx5_cleanup_fs(dev); 1317 mlx5_cleanup_fs(dev);
1308 mlx5_accel_ipsec_cleanup(dev); 1318 mlx5_accel_ipsec_cleanup(dev);
1319 mlx5_accel_tls_cleanup(dev);
1309 mlx5_fpga_device_stop(dev); 1320 mlx5_fpga_device_stop(dev);
1310 mlx5_irq_clear_affinity_hints(dev); 1321 mlx5_irq_clear_affinity_hints(dev);
1311 free_comp_eqs(dev); 1322 free_comp_eqs(dev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index 479511cf79bc..8da91b023b13 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -424,10 +424,15 @@ MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_rdq_sz, 0x04, 24, 8);
424MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_rdqs, 0x04, 0, 8); 424MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_rdqs, 0x04, 0, 8);
425 425
426/* cmd_mbox_query_aq_cap_log_max_cq_sz 426/* cmd_mbox_query_aq_cap_log_max_cq_sz
427 * Log (base 2) of max CQEs allowed on CQ. 427 * Log (base 2) of the Maximum CQEs allowed in a CQ for CQEv0 and CQEv1.
428 */ 428 */
429MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cq_sz, 0x08, 24, 8); 429MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cq_sz, 0x08, 24, 8);
430 430
431/* cmd_mbox_query_aq_cap_log_max_cqv2_sz
432 * Log (base 2) of the Maximum CQEs allowed in a CQ for CQEv2.
433 */
434MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cqv2_sz, 0x08, 16, 8);
435
431/* cmd_mbox_query_aq_cap_max_num_cqs 436/* cmd_mbox_query_aq_cap_max_num_cqs
432 * Maximum number of CQs. 437 * Maximum number of CQs.
433 */ 438 */
@@ -662,6 +667,12 @@ MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_single_size, 0x0C, 25, 1);
662 */ 667 */
663MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1); 668MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1);
664 669
670/* cmd_mbox_config_set_cqe_version
671 * Capability bit. Setting a bit to 1 configures the profile
672 * according to the mailbox contents.
673 */
674MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_version, 0x08, 0, 1);
675
665/* cmd_mbox_config_profile_max_vepa_channels 676/* cmd_mbox_config_profile_max_vepa_channels
666 * Maximum number of VEPA channels per port (0 through 16) 677 * Maximum number of VEPA channels per port (0 through 16)
667 * 0 - multi-channel VEPA is disabled 678 * 0 - multi-channel VEPA is disabled
@@ -841,6 +852,14 @@ MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type,
841MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties, 852MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties,
842 0x60, 0, 8, 0x08, 0x00, false); 853 0x60, 0, 8, 0x08, 0x00, false);
843 854
855/* cmd_mbox_config_profile_cqe_version
856 * CQE version:
857 * 0: CQE version is 0
858 * 1: CQE version is either 1 or 2
859 * CQE ver 1 or 2 is configured by Completion Queue Context field cqe_ver.
860 */
861MLXSW_ITEM32(cmd_mbox, config_profile, cqe_version, 0xB0, 0, 8);
862
844/* ACCESS_REG - Access EMAD Supported Register 863/* ACCESS_REG - Access EMAD Supported Register
845 * ---------------------------------- 864 * ----------------------------------
846 * OpMod == 0 (N/A), INMmod == 0 (N/A) 865 * OpMod == 0 (N/A), INMmod == 0 (N/A)
@@ -1032,11 +1051,15 @@ static inline int mlxsw_cmd_sw2hw_cq(struct mlxsw_core *mlxsw_core,
1032 0, cq_number, in_mbox, MLXSW_CMD_MBOX_SIZE); 1051 0, cq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
1033} 1052}
1034 1053
1035/* cmd_mbox_sw2hw_cq_cv 1054enum mlxsw_cmd_mbox_sw2hw_cq_cqe_ver {
1055 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1,
1056 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2,
1057};
1058
1059/* cmd_mbox_sw2hw_cq_cqe_ver
1036 * CQE Version. 1060 * CQE Version.
1037 * 0 - CQE Version 0, 1 - CQE Version 1
1038 */ 1061 */
1039MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4); 1062MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cqe_ver, 0x00, 28, 4);
1040 1063
1041/* cmd_mbox_sw2hw_cq_c_eqn 1064/* cmd_mbox_sw2hw_cq_c_eqn
1042 * Event Queue this CQ reports completion events to. 1065 * Event Queue this CQ reports completion events to.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 3a9381977d6d..db794a1a3a7e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -117,6 +117,7 @@ struct mlxsw_pci_queue {
117 struct { 117 struct {
118 u32 comp_sdq_count; 118 u32 comp_sdq_count;
119 u32 comp_rdq_count; 119 u32 comp_rdq_count;
120 enum mlxsw_pci_cqe_v v;
120 } cq; 121 } cq;
121 struct { 122 struct {
122 u32 ev_cmd_count; 123 u32 ev_cmd_count;
@@ -155,6 +156,8 @@ struct mlxsw_pci {
155 } cmd; 156 } cmd;
156 struct mlxsw_bus_info bus_info; 157 struct mlxsw_bus_info bus_info;
157 const struct pci_device_id *id; 158 const struct pci_device_id *id;
159 enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
160 u8 num_sdq_cqs; /* Number of CQs used for SDQs */
158}; 161};
159 162
160static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q) 163static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
@@ -202,24 +205,6 @@ static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
202 return owner_bit != !!(q->consumer_counter & q->count); 205 return owner_bit != !!(q->consumer_counter & q->count);
203} 206}
204 207
205static char *
206mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q,
207 u32 (*get_elem_owner_func)(const char *))
208{
209 struct mlxsw_pci_queue_elem_info *elem_info;
210 char *elem;
211 bool owner_bit;
212
213 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
214 elem = elem_info->elem;
215 owner_bit = get_elem_owner_func(elem);
216 if (mlxsw_pci_elem_hw_owned(q, owner_bit))
217 return NULL;
218 q->consumer_counter++;
219 rmb(); /* make sure we read owned bit before the rest of elem */
220 return elem;
221}
222
223static struct mlxsw_pci_queue_type_group * 208static struct mlxsw_pci_queue_type_group *
224mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci, 209mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
225 enum mlxsw_pci_queue_type q_type) 210 enum mlxsw_pci_queue_type q_type)
@@ -494,6 +479,17 @@ static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
494 } 479 }
495} 480}
496 481
482static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
483 struct mlxsw_pci_queue *q)
484{
485 q->u.cq.v = mlxsw_pci->max_cqe_ver;
486
487 /* For SDQ it is pointless to use CQEv2, so use CQEv1 instead */
488 if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
489 q->num < mlxsw_pci->num_sdq_cqs)
490 q->u.cq.v = MLXSW_PCI_CQE_V1;
491}
492
497static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, 493static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
498 struct mlxsw_pci_queue *q) 494 struct mlxsw_pci_queue *q)
499{ 495{
@@ -505,10 +501,16 @@ static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
505 for (i = 0; i < q->count; i++) { 501 for (i = 0; i < q->count; i++) {
506 char *elem = mlxsw_pci_queue_elem_get(q, i); 502 char *elem = mlxsw_pci_queue_elem_get(q, i);
507 503
508 mlxsw_pci_cqe_owner_set(elem, 1); 504 mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
509 } 505 }
510 506
511 mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */ 507 if (q->u.cq.v == MLXSW_PCI_CQE_V1)
508 mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
509 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
510 else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
511 mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
512 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
513
512 mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM); 514 mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
513 mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0); 515 mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
514 mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count)); 516 mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
@@ -559,7 +561,7 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
559static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, 561static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
560 struct mlxsw_pci_queue *q, 562 struct mlxsw_pci_queue *q,
561 u16 consumer_counter_limit, 563 u16 consumer_counter_limit,
562 char *cqe) 564 enum mlxsw_pci_cqe_v cqe_v, char *cqe)
563{ 565{
564 struct pci_dev *pdev = mlxsw_pci->pdev; 566 struct pci_dev *pdev = mlxsw_pci->pdev;
565 struct mlxsw_pci_queue_elem_info *elem_info; 567 struct mlxsw_pci_queue_elem_info *elem_info;
@@ -579,10 +581,11 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
579 if (q->consumer_counter++ != consumer_counter_limit) 581 if (q->consumer_counter++ != consumer_counter_limit)
580 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n"); 582 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
581 583
582 if (mlxsw_pci_cqe_lag_get(cqe)) { 584 if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
583 rx_info.is_lag = true; 585 rx_info.is_lag = true;
584 rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe); 586 rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
585 rx_info.lag_port_index = mlxsw_pci_cqe_lag_port_index_get(cqe); 587 rx_info.lag_port_index =
588 mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe);
586 } else { 589 } else {
587 rx_info.is_lag = false; 590 rx_info.is_lag = false;
588 rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe); 591 rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
@@ -591,7 +594,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
591 rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe); 594 rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
592 595
593 byte_count = mlxsw_pci_cqe_byte_count_get(cqe); 596 byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
594 if (mlxsw_pci_cqe_crc_get(cqe)) 597 if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
595 byte_count -= ETH_FCS_LEN; 598 byte_count -= ETH_FCS_LEN;
596 skb_put(skb, byte_count); 599 skb_put(skb, byte_count);
597 mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info); 600 mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
@@ -608,7 +611,18 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
608 611
609static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q) 612static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
610{ 613{
611 return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get); 614 struct mlxsw_pci_queue_elem_info *elem_info;
615 char *elem;
616 bool owner_bit;
617
618 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
619 elem = elem_info->elem;
620 owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem);
621 if (mlxsw_pci_elem_hw_owned(q, owner_bit))
622 return NULL;
623 q->consumer_counter++;
624 rmb(); /* make sure we read owned bit before the rest of elem */
625 return elem;
612} 626}
613 627
614static void mlxsw_pci_cq_tasklet(unsigned long data) 628static void mlxsw_pci_cq_tasklet(unsigned long data)
@@ -621,8 +635,8 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
621 635
622 while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) { 636 while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
623 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); 637 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
624 u8 sendq = mlxsw_pci_cqe_sr_get(cqe); 638 u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
625 u8 dqn = mlxsw_pci_cqe_dqn_get(cqe); 639 u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
626 640
627 if (sendq) { 641 if (sendq) {
628 struct mlxsw_pci_queue *sdq; 642 struct mlxsw_pci_queue *sdq;
@@ -636,7 +650,7 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
636 650
637 rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn); 651 rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
638 mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, 652 mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
639 wqe_counter, cqe); 653 wqe_counter, q->u.cq.v, cqe);
640 q->u.cq.comp_rdq_count++; 654 q->u.cq.comp_rdq_count++;
641 } 655 }
642 if (++items == credits) 656 if (++items == credits)
@@ -648,6 +662,18 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
648 } 662 }
649} 663}
650 664
665static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
666{
667 return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
668 MLXSW_PCI_CQE01_COUNT;
669}
670
671static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
672{
673 return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
674 MLXSW_PCI_CQE01_SIZE;
675}
676
651static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, 677static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
652 struct mlxsw_pci_queue *q) 678 struct mlxsw_pci_queue *q)
653{ 679{
@@ -696,7 +722,18 @@ static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
696 722
697static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q) 723static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
698{ 724{
699 return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get); 725 struct mlxsw_pci_queue_elem_info *elem_info;
726 char *elem;
727 bool owner_bit;
728
729 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
730 elem = elem_info->elem;
731 owner_bit = mlxsw_pci_eqe_owner_get(elem);
732 if (mlxsw_pci_elem_hw_owned(q, owner_bit))
733 return NULL;
734 q->consumer_counter++;
735 rmb(); /* make sure we read owned bit before the rest of elem */
736 return elem;
700} 737}
701 738
702static void mlxsw_pci_eq_tasklet(unsigned long data) 739static void mlxsw_pci_eq_tasklet(unsigned long data)
@@ -749,11 +786,15 @@ static void mlxsw_pci_eq_tasklet(unsigned long data)
749struct mlxsw_pci_queue_ops { 786struct mlxsw_pci_queue_ops {
750 const char *name; 787 const char *name;
751 enum mlxsw_pci_queue_type type; 788 enum mlxsw_pci_queue_type type;
789 void (*pre_init)(struct mlxsw_pci *mlxsw_pci,
790 struct mlxsw_pci_queue *q);
752 int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox, 791 int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
753 struct mlxsw_pci_queue *q); 792 struct mlxsw_pci_queue *q);
754 void (*fini)(struct mlxsw_pci *mlxsw_pci, 793 void (*fini)(struct mlxsw_pci *mlxsw_pci,
755 struct mlxsw_pci_queue *q); 794 struct mlxsw_pci_queue *q);
756 void (*tasklet)(unsigned long data); 795 void (*tasklet)(unsigned long data);
796 u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
797 u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
757 u16 elem_count; 798 u16 elem_count;
758 u8 elem_size; 799 u8 elem_size;
759}; 800};
@@ -776,11 +817,12 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
776 817
777static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = { 818static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
778 .type = MLXSW_PCI_QUEUE_TYPE_CQ, 819 .type = MLXSW_PCI_QUEUE_TYPE_CQ,
820 .pre_init = mlxsw_pci_cq_pre_init,
779 .init = mlxsw_pci_cq_init, 821 .init = mlxsw_pci_cq_init,
780 .fini = mlxsw_pci_cq_fini, 822 .fini = mlxsw_pci_cq_fini,
781 .tasklet = mlxsw_pci_cq_tasklet, 823 .tasklet = mlxsw_pci_cq_tasklet,
782 .elem_count = MLXSW_PCI_CQE_COUNT, 824 .elem_count_f = mlxsw_pci_cq_elem_count,
783 .elem_size = MLXSW_PCI_CQE_SIZE 825 .elem_size_f = mlxsw_pci_cq_elem_size
784}; 826};
785 827
786static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = { 828static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
@@ -800,10 +842,15 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
800 int i; 842 int i;
801 int err; 843 int err;
802 844
803 spin_lock_init(&q->lock);
804 q->num = q_num; 845 q->num = q_num;
805 q->count = q_ops->elem_count; 846 if (q_ops->pre_init)
806 q->elem_size = q_ops->elem_size; 847 q_ops->pre_init(mlxsw_pci, q);
848
849 spin_lock_init(&q->lock);
850 q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) :
851 q_ops->elem_count;
852 q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) :
853 q_ops->elem_size;
807 q->type = q_ops->type; 854 q->type = q_ops->type;
808 q->pci = mlxsw_pci; 855 q->pci = mlxsw_pci;
809 856
@@ -832,7 +879,7 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
832 879
833 elem_info = mlxsw_pci_queue_elem_info_get(q, i); 880 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
834 elem_info->elem = 881 elem_info->elem =
835 __mlxsw_pci_queue_elem_get(q, q_ops->elem_size, i); 882 __mlxsw_pci_queue_elem_get(q, q->elem_size, i);
836 } 883 }
837 884
838 mlxsw_cmd_mbox_zero(mbox); 885 mlxsw_cmd_mbox_zero(mbox);
@@ -912,6 +959,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
912 u8 rdq_log2sz; 959 u8 rdq_log2sz;
913 u8 num_cqs; 960 u8 num_cqs;
914 u8 cq_log2sz; 961 u8 cq_log2sz;
962 u8 cqv2_log2sz;
915 u8 num_eqs; 963 u8 num_eqs;
916 u8 eq_log2sz; 964 u8 eq_log2sz;
917 int err; 965 int err;
@@ -927,6 +975,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
927 rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox); 975 rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
928 num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox); 976 num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
929 cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox); 977 cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
978 cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(mbox);
930 num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox); 979 num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
931 eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox); 980 eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
932 981
@@ -938,12 +987,16 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
938 987
939 if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) || 988 if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
940 (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) || 989 (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
941 (1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) || 990 (1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) ||
991 (mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 &&
992 (1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT)) ||
942 (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) { 993 (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
943 dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n"); 994 dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
944 return -EINVAL; 995 return -EINVAL;
945 } 996 }
946 997
998 mlxsw_pci->num_sdq_cqs = num_sdqs;
999
947 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops, 1000 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
948 num_eqs); 1001 num_eqs);
949 if (err) { 1002 if (err) {
@@ -1184,6 +1237,11 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
1184 mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i, 1237 mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
1185 &profile->swid_config[i]); 1238 &profile->swid_config[i]);
1186 1239
1240 if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) {
1241 mlxsw_cmd_mbox_config_profile_set_cqe_version_set(mbox, 1);
1242 mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1);
1243 }
1244
1187 return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox); 1245 return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
1188} 1246}
1189 1247
@@ -1378,6 +1436,21 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
1378 if (err) 1436 if (err)
1379 goto err_query_resources; 1437 goto err_query_resources;
1380 1438
1439 if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V2) &&
1440 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V2))
1441 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2;
1442 else if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V1) &&
1443 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V1))
1444 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1;
1445 else if ((MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0) &&
1446 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V0)) ||
1447 !MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0)) {
1448 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0;
1449 } else {
1450 dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n");
1451 goto err_cqe_v_check;
1452 }
1453
1381 err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res); 1454 err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res);
1382 if (err) 1455 if (err)
1383 goto err_config_profile; 1456 goto err_config_profile;
@@ -1400,6 +1473,7 @@ err_request_eq_irq:
1400 mlxsw_pci_aqs_fini(mlxsw_pci); 1473 mlxsw_pci_aqs_fini(mlxsw_pci);
1401err_aqs_init: 1474err_aqs_init:
1402err_config_profile: 1475err_config_profile:
1476err_cqe_v_check:
1403err_query_resources: 1477err_query_resources:
1404err_boardinfo: 1478err_boardinfo:
1405 mlxsw_pci_fw_area_fini(mlxsw_pci); 1479 mlxsw_pci_fw_area_fini(mlxsw_pci);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index fb082ad21b00..963155f6a17a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -82,10 +82,12 @@
82#define MLXSW_PCI_AQ_PAGES 8 82#define MLXSW_PCI_AQ_PAGES 8
83#define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES) 83#define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES)
84#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */ 84#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
85#define MLXSW_PCI_CQE_SIZE 16 /* 16 bytes per element */ 85#define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */
86#define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */
86#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */ 87#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */
87#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE) 88#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
88#define MLXSW_PCI_CQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE_SIZE) 89#define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE)
90#define MLXSW_PCI_CQE2_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE2_SIZE)
89#define MLXSW_PCI_EQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE) 91#define MLXSW_PCI_EQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE)
90#define MLXSW_PCI_EQE_UPDATE_COUNT 0x80 92#define MLXSW_PCI_EQE_UPDATE_COUNT 0x80
91 93
@@ -126,10 +128,48 @@ MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false);
126 */ 128 */
127MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false); 129MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false);
128 130
131enum mlxsw_pci_cqe_v {
132 MLXSW_PCI_CQE_V0,
133 MLXSW_PCI_CQE_V1,
134 MLXSW_PCI_CQE_V2,
135};
136
137#define mlxsw_pci_cqe_item_helpers(name, v0, v1, v2) \
138static inline u32 mlxsw_pci_cqe_##name##_get(enum mlxsw_pci_cqe_v v, char *cqe) \
139{ \
140 switch (v) { \
141 default: \
142 case MLXSW_PCI_CQE_V0: \
143 return mlxsw_pci_cqe##v0##_##name##_get(cqe); \
144 case MLXSW_PCI_CQE_V1: \
145 return mlxsw_pci_cqe##v1##_##name##_get(cqe); \
146 case MLXSW_PCI_CQE_V2: \
147 return mlxsw_pci_cqe##v2##_##name##_get(cqe); \
148 } \
149} \
150static inline void mlxsw_pci_cqe_##name##_set(enum mlxsw_pci_cqe_v v, \
151 char *cqe, u32 val) \
152{ \
153 switch (v) { \
154 default: \
155 case MLXSW_PCI_CQE_V0: \
156 mlxsw_pci_cqe##v0##_##name##_set(cqe, val); \
157 break; \
158 case MLXSW_PCI_CQE_V1: \
159 mlxsw_pci_cqe##v1##_##name##_set(cqe, val); \
160 break; \
161 case MLXSW_PCI_CQE_V2: \
162 mlxsw_pci_cqe##v2##_##name##_set(cqe, val); \
163 break; \
164 } \
165}
166
129/* pci_cqe_lag 167/* pci_cqe_lag
130 * Packet arrives from a port which is a LAG 168 * Packet arrives from a port which is a LAG
131 */ 169 */
132MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1); 170MLXSW_ITEM32(pci, cqe0, lag, 0x00, 23, 1);
171MLXSW_ITEM32(pci, cqe12, lag, 0x00, 24, 1);
172mlxsw_pci_cqe_item_helpers(lag, 0, 12, 12);
133 173
134/* pci_cqe_system_port/lag_id 174/* pci_cqe_system_port/lag_id
135 * When lag=0: System port on which the packet was received 175 * When lag=0: System port on which the packet was received
@@ -138,8 +178,12 @@ MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1);
138 * bits [3:0] sub_port on which the packet was received 178 * bits [3:0] sub_port on which the packet was received
139 */ 179 */
140MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16); 180MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
141MLXSW_ITEM32(pci, cqe, lag_id, 0x00, 4, 12); 181MLXSW_ITEM32(pci, cqe0, lag_id, 0x00, 4, 12);
142MLXSW_ITEM32(pci, cqe, lag_port_index, 0x00, 0, 4); 182MLXSW_ITEM32(pci, cqe12, lag_id, 0x00, 0, 16);
183mlxsw_pci_cqe_item_helpers(lag_id, 0, 12, 12);
184MLXSW_ITEM32(pci, cqe0, lag_subport, 0x00, 0, 4);
185MLXSW_ITEM32(pci, cqe12, lag_subport, 0x00, 16, 8);
186mlxsw_pci_cqe_item_helpers(lag_subport, 0, 12, 12);
143 187
144/* pci_cqe_wqe_counter 188/* pci_cqe_wqe_counter
145 * WQE count of the WQEs completed on the associated dqn 189 * WQE count of the WQEs completed on the associated dqn
@@ -162,28 +206,38 @@ MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 9);
162 * Length include CRC. Indicates the length field includes 206 * Length include CRC. Indicates the length field includes
163 * the packet's CRC. 207 * the packet's CRC.
164 */ 208 */
165MLXSW_ITEM32(pci, cqe, crc, 0x0C, 8, 1); 209MLXSW_ITEM32(pci, cqe0, crc, 0x0C, 8, 1);
210MLXSW_ITEM32(pci, cqe12, crc, 0x0C, 9, 1);
211mlxsw_pci_cqe_item_helpers(crc, 0, 12, 12);
166 212
167/* pci_cqe_e 213/* pci_cqe_e
168 * CQE with Error. 214 * CQE with Error.
169 */ 215 */
170MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1); 216MLXSW_ITEM32(pci, cqe0, e, 0x0C, 7, 1);
217MLXSW_ITEM32(pci, cqe12, e, 0x00, 27, 1);
218mlxsw_pci_cqe_item_helpers(e, 0, 12, 12);
171 219
172/* pci_cqe_sr 220/* pci_cqe_sr
173 * 1 - Send Queue 221 * 1 - Send Queue
174 * 0 - Receive Queue 222 * 0 - Receive Queue
175 */ 223 */
176MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1); 224MLXSW_ITEM32(pci, cqe0, sr, 0x0C, 6, 1);
225MLXSW_ITEM32(pci, cqe12, sr, 0x00, 26, 1);
226mlxsw_pci_cqe_item_helpers(sr, 0, 12, 12);
177 227
178/* pci_cqe_dqn 228/* pci_cqe_dqn
179 * Descriptor Queue (DQ) Number. 229 * Descriptor Queue (DQ) Number.
180 */ 230 */
181MLXSW_ITEM32(pci, cqe, dqn, 0x0C, 1, 5); 231MLXSW_ITEM32(pci, cqe0, dqn, 0x0C, 1, 5);
232MLXSW_ITEM32(pci, cqe12, dqn, 0x0C, 1, 6);
233mlxsw_pci_cqe_item_helpers(dqn, 0, 12, 12);
182 234
183/* pci_cqe_owner 235/* pci_cqe_owner
184 * Ownership bit. 236 * Ownership bit.
185 */ 237 */
186MLXSW_ITEM32(pci, cqe, owner, 0x0C, 0, 1); 238MLXSW_ITEM32(pci, cqe01, owner, 0x0C, 0, 1);
239MLXSW_ITEM32(pci, cqe2, owner, 0x1C, 0, 1);
240mlxsw_pci_cqe_item_helpers(owner, 01, 01, 2);
187 241
188/* pci_eqe_event_type 242/* pci_eqe_event_type
189 * Event type. 243 * Event type.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index 087aad52c195..fd9299ccec72 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -43,6 +43,9 @@ enum mlxsw_res_id {
43 MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE, 43 MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
44 MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE, 44 MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
45 MLXSW_RES_ID_MAX_TRAP_GROUPS, 45 MLXSW_RES_ID_MAX_TRAP_GROUPS,
46 MLXSW_RES_ID_CQE_V0,
47 MLXSW_RES_ID_CQE_V1,
48 MLXSW_RES_ID_CQE_V2,
46 MLXSW_RES_ID_COUNTER_POOL_SIZE, 49 MLXSW_RES_ID_COUNTER_POOL_SIZE,
47 MLXSW_RES_ID_MAX_SPAN, 50 MLXSW_RES_ID_MAX_SPAN,
48 MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES, 51 MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
@@ -81,6 +84,9 @@ static u16 mlxsw_res_ids[] = {
81 [MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002, 84 [MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
82 [MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003, 85 [MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
83 [MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201, 86 [MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
87 [MLXSW_RES_ID_CQE_V0] = 0x2210,
88 [MLXSW_RES_ID_CQE_V1] = 0x2211,
89 [MLXSW_RES_ID_CQE_V2] = 0x2212,
84 [MLXSW_RES_ID_COUNTER_POOL_SIZE] = 0x2410, 90 [MLXSW_RES_ID_COUNTER_POOL_SIZE] = 0x2410,
85 [MLXSW_RES_ID_MAX_SPAN] = 0x2420, 91 [MLXSW_RES_ID_MAX_SPAN] = 0x2420,
86 [MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443, 92 [MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index ca38a30fbe91..94132f6cec61 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -441,29 +441,29 @@ static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
441 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); 441 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
442} 442}
443 443
444int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 444enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
445 u8 state)
446{ 445{
447 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
448 enum mlxsw_reg_spms_state spms_state;
449 char *spms_pl;
450 int err;
451
452 switch (state) { 446 switch (state) {
453 case BR_STATE_FORWARDING: 447 case BR_STATE_FORWARDING:
454 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING; 448 return MLXSW_REG_SPMS_STATE_FORWARDING;
455 break;
456 case BR_STATE_LEARNING: 449 case BR_STATE_LEARNING:
457 spms_state = MLXSW_REG_SPMS_STATE_LEARNING; 450 return MLXSW_REG_SPMS_STATE_LEARNING;
458 break;
459 case BR_STATE_LISTENING: /* fall-through */ 451 case BR_STATE_LISTENING: /* fall-through */
460 case BR_STATE_DISABLED: /* fall-through */ 452 case BR_STATE_DISABLED: /* fall-through */
461 case BR_STATE_BLOCKING: 453 case BR_STATE_BLOCKING:
462 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING; 454 return MLXSW_REG_SPMS_STATE_DISCARDING;
463 break;
464 default: 455 default:
465 BUG(); 456 BUG();
466 } 457 }
458}
459
460int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
461 u8 state)
462{
463 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
464 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
465 char *spms_pl;
466 int err;
467 467
468 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); 468 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
469 if (!spms_pl) 469 if (!spms_pl)
@@ -3666,6 +3666,15 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3666 goto err_lag_init; 3666 goto err_lag_init;
3667 } 3667 }
3668 3668
3669 /* Initialize SPAN before router and switchdev, so that those components
3670 * can call mlxsw_sp_span_respin().
3671 */
3672 err = mlxsw_sp_span_init(mlxsw_sp);
3673 if (err) {
3674 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3675 goto err_span_init;
3676 }
3677
3669 err = mlxsw_sp_switchdev_init(mlxsw_sp); 3678 err = mlxsw_sp_switchdev_init(mlxsw_sp);
3670 if (err) { 3679 if (err) {
3671 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); 3680 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
@@ -3684,15 +3693,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3684 goto err_afa_init; 3693 goto err_afa_init;
3685 } 3694 }
3686 3695
3687 err = mlxsw_sp_span_init(mlxsw_sp);
3688 if (err) {
3689 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3690 goto err_span_init;
3691 }
3692
3693 /* Initialize router after SPAN is initialized, so that the FIB and
3694 * neighbor event handlers can issue SPAN respin.
3695 */
3696 err = mlxsw_sp_router_init(mlxsw_sp); 3696 err = mlxsw_sp_router_init(mlxsw_sp);
3697 if (err) { 3697 if (err) {
3698 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); 3698 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
@@ -3739,14 +3739,14 @@ err_acl_init:
3739err_netdev_notifier: 3739err_netdev_notifier:
3740 mlxsw_sp_router_fini(mlxsw_sp); 3740 mlxsw_sp_router_fini(mlxsw_sp);
3741err_router_init: 3741err_router_init:
3742 mlxsw_sp_span_fini(mlxsw_sp);
3743err_span_init:
3744 mlxsw_sp_afa_fini(mlxsw_sp); 3742 mlxsw_sp_afa_fini(mlxsw_sp);
3745err_afa_init: 3743err_afa_init:
3746 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3744 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3747err_counter_pool_init: 3745err_counter_pool_init:
3748 mlxsw_sp_switchdev_fini(mlxsw_sp); 3746 mlxsw_sp_switchdev_fini(mlxsw_sp);
3749err_switchdev_init: 3747err_switchdev_init:
3748 mlxsw_sp_span_fini(mlxsw_sp);
3749err_span_init:
3750 mlxsw_sp_lag_fini(mlxsw_sp); 3750 mlxsw_sp_lag_fini(mlxsw_sp);
3751err_lag_init: 3751err_lag_init:
3752 mlxsw_sp_buffers_fini(mlxsw_sp); 3752 mlxsw_sp_buffers_fini(mlxsw_sp);
@@ -3768,10 +3768,10 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3768 mlxsw_sp_acl_fini(mlxsw_sp); 3768 mlxsw_sp_acl_fini(mlxsw_sp);
3769 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); 3769 unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
3770 mlxsw_sp_router_fini(mlxsw_sp); 3770 mlxsw_sp_router_fini(mlxsw_sp);
3771 mlxsw_sp_span_fini(mlxsw_sp);
3772 mlxsw_sp_afa_fini(mlxsw_sp); 3771 mlxsw_sp_afa_fini(mlxsw_sp);
3773 mlxsw_sp_counter_pool_fini(mlxsw_sp); 3772 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3774 mlxsw_sp_switchdev_fini(mlxsw_sp); 3773 mlxsw_sp_switchdev_fini(mlxsw_sp);
3774 mlxsw_sp_span_fini(mlxsw_sp);
3775 mlxsw_sp_lag_fini(mlxsw_sp); 3775 mlxsw_sp_lag_fini(mlxsw_sp);
3776 mlxsw_sp_buffers_fini(mlxsw_sp); 3776 mlxsw_sp_buffers_fini(mlxsw_sp);
3777 mlxsw_sp_traps_fini(mlxsw_sp); 3777 mlxsw_sp_traps_fini(mlxsw_sp);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 804d4d2c8031..4a519d8edec8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -364,6 +364,7 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
364int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, 364int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
365 enum mlxsw_reg_qeec_hr hr, u8 index, 365 enum mlxsw_reg_qeec_hr hr, u8 index,
366 u8 next_index, u32 maxrate); 366 u8 next_index, u32 maxrate);
367enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 stp_state);
367int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, 368int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
368 u8 state); 369 u8 state);
369int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable); 370int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 8e4edb634b11..8028d221aece 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -5882,24 +5882,24 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event,
5882 switch (info->family) { 5882 switch (info->family) {
5883 case AF_INET: 5883 case AF_INET:
5884 if (!fib4_rule_default(rule) && !rule->l3mdev) 5884 if (!fib4_rule_default(rule) && !rule->l3mdev)
5885 err = -1; 5885 err = -EOPNOTSUPP;
5886 break; 5886 break;
5887 case AF_INET6: 5887 case AF_INET6:
5888 if (!fib6_rule_default(rule) && !rule->l3mdev) 5888 if (!fib6_rule_default(rule) && !rule->l3mdev)
5889 err = -1; 5889 err = -EOPNOTSUPP;
5890 break; 5890 break;
5891 case RTNL_FAMILY_IPMR: 5891 case RTNL_FAMILY_IPMR:
5892 if (!ipmr_rule_default(rule) && !rule->l3mdev) 5892 if (!ipmr_rule_default(rule) && !rule->l3mdev)
5893 err = -1; 5893 err = -EOPNOTSUPP;
5894 break; 5894 break;
5895 case RTNL_FAMILY_IP6MR: 5895 case RTNL_FAMILY_IP6MR:
5896 if (!ip6mr_rule_default(rule) && !rule->l3mdev) 5896 if (!ip6mr_rule_default(rule) && !rule->l3mdev)
5897 err = -1; 5897 err = -EOPNOTSUPP;
5898 break; 5898 break;
5899 } 5899 }
5900 5900
5901 if (err < 0) 5901 if (err < 0)
5902 NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported. Aborting offload"); 5902 NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
5903 5903
5904 return err; 5904 return err;
5905} 5905}
@@ -5926,8 +5926,15 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
5926 case FIB_EVENT_RULE_DEL: 5926 case FIB_EVENT_RULE_DEL:
5927 err = mlxsw_sp_router_fib_rule_event(event, info, 5927 err = mlxsw_sp_router_fib_rule_event(event, info,
5928 router->mlxsw_sp); 5928 router->mlxsw_sp);
5929 if (!err) 5929 if (!err || info->extack)
5930 return NOTIFY_DONE; 5930 return notifier_from_errno(err);
5931 break;
5932 case FIB_EVENT_ENTRY_ADD:
5933 if (router->aborted) {
5934 NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
5935 return notifier_from_errno(-EINVAL);
5936 }
5937 break;
5931 } 5938 }
5932 5939
5933 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); 5940 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 65a77708ff61..cd9071ee19ad 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -32,6 +32,7 @@
32 * POSSIBILITY OF SUCH DAMAGE. 32 * POSSIBILITY OF SUCH DAMAGE.
33 */ 33 */
34 34
35#include <linux/if_bridge.h>
35#include <linux/list.h> 36#include <linux/list.h>
36#include <net/arp.h> 37#include <net/arp.h>
37#include <net/gre.h> 38#include <net/gre.h>
@@ -39,8 +40,9 @@
39#include <net/ip6_tunnel.h> 40#include <net/ip6_tunnel.h>
40 41
41#include "spectrum.h" 42#include "spectrum.h"
42#include "spectrum_span.h"
43#include "spectrum_ipip.h" 43#include "spectrum_ipip.h"
44#include "spectrum_span.h"
45#include "spectrum_switchdev.h"
44 46
45int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) 47int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
46{ 48{
@@ -167,6 +169,72 @@ mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp)
167 return 0; 169 return 0;
168} 170}
169 171
172static struct net_device *
173mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev,
174 unsigned char *dmac,
175 u16 *p_vid)
176{
177 struct bridge_vlan_info vinfo;
178 struct net_device *edev;
179 u16 pvid;
180
181 if (WARN_ON(br_vlan_get_pvid(br_dev, &pvid)))
182 return NULL;
183 if (!pvid)
184 return NULL;
185
186 edev = br_fdb_find_port(br_dev, dmac, pvid);
187 if (!edev)
188 return NULL;
189
190 if (br_vlan_get_info(edev, pvid, &vinfo))
191 return NULL;
192 if (!(vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED))
193 *p_vid = pvid;
194 return edev;
195}
196
197static struct net_device *
198mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev,
199 unsigned char *dmac)
200{
201 return br_fdb_find_port(br_dev, dmac, 0);
202}
203
204static struct net_device *
205mlxsw_sp_span_entry_bridge(const struct net_device *br_dev,
206 unsigned char dmac[ETH_ALEN],
207 u16 *p_vid)
208{
209 struct mlxsw_sp_bridge_port *bridge_port;
210 enum mlxsw_reg_spms_state spms_state;
211 struct mlxsw_sp_port *port;
212 struct net_device *dev;
213 u8 stp_state;
214
215 if (br_vlan_enabled(br_dev))
216 dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid);
217 else
218 dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac);
219 if (!dev)
220 return NULL;
221
222 port = mlxsw_sp_port_dev_lower_find(dev);
223 if (!port)
224 return NULL;
225
226 bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev);
227 if (!bridge_port)
228 return NULL;
229
230 stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port);
231 spms_state = mlxsw_sp_stp_spms_state(stp_state);
232 if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING)
233 return NULL;
234
235 return dev;
236}
237
170static __maybe_unused int 238static __maybe_unused int
171mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *l3edev, 239mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *l3edev,
172 union mlxsw_sp_l3addr saddr, 240 union mlxsw_sp_l3addr saddr,
@@ -177,13 +245,22 @@ mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *l3edev,
177 struct mlxsw_sp_span_parms *sparmsp) 245 struct mlxsw_sp_span_parms *sparmsp)
178{ 246{
179 unsigned char dmac[ETH_ALEN]; 247 unsigned char dmac[ETH_ALEN];
248 u16 vid = 0;
180 249
181 if (mlxsw_sp_l3addr_is_zero(gw)) 250 if (mlxsw_sp_l3addr_is_zero(gw))
182 gw = daddr; 251 gw = daddr;
183 252
184 if (!l3edev || !mlxsw_sp_port_dev_check(l3edev) || 253 if (!l3edev || mlxsw_sp_span_dmac(tbl, &gw, l3edev, dmac))
185 mlxsw_sp_span_dmac(tbl, &gw, l3edev, dmac)) 254 goto unoffloadable;
186 return mlxsw_sp_span_entry_unoffloadable(sparmsp); 255
256 if (netif_is_bridge_master(l3edev)) {
257 l3edev = mlxsw_sp_span_entry_bridge(l3edev, dmac, &vid);
258 if (!l3edev)
259 goto unoffloadable;
260 }
261
262 if (!mlxsw_sp_port_dev_check(l3edev))
263 goto unoffloadable;
187 264
188 sparmsp->dest_port = netdev_priv(l3edev); 265 sparmsp->dest_port = netdev_priv(l3edev);
189 sparmsp->ttl = ttl; 266 sparmsp->ttl = ttl;
@@ -191,7 +268,11 @@ mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *l3edev,
191 memcpy(sparmsp->smac, l3edev->dev_addr, ETH_ALEN); 268 memcpy(sparmsp->smac, l3edev->dev_addr, ETH_ALEN);
192 sparmsp->saddr = saddr; 269 sparmsp->saddr = saddr;
193 sparmsp->daddr = daddr; 270 sparmsp->daddr = daddr;
271 sparmsp->vid = vid;
194 return 0; 272 return 0;
273
274unoffloadable:
275 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
195} 276}
196 277
197#if IS_ENABLED(CONFIG_NET_IPGRE) 278#if IS_ENABLED(CONFIG_NET_IPGRE)
@@ -268,9 +349,10 @@ mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
268 /* Create a new port analayzer entry for local_port. */ 349 /* Create a new port analayzer entry for local_port. */
269 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 350 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
270 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 351 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
352 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
271 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, 353 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
272 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, 354 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
273 sparms.dmac, false); 355 sparms.dmac, !!sparms.vid);
274 mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl, 356 mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl,
275 sparms.ttl, sparms.smac, 357 sparms.ttl, sparms.smac,
276 be32_to_cpu(sparms.saddr.addr4), 358 be32_to_cpu(sparms.saddr.addr4),
@@ -368,9 +450,10 @@ mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
368 /* Create a new port analayzer entry for local_port. */ 450 /* Create a new port analayzer entry for local_port. */
369 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, 451 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
370 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); 452 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
453 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
371 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, 454 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
372 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, 455 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
373 sparms.dmac, false); 456 sparms.dmac, !!sparms.vid);
374 mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac, 457 mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac,
375 sparms.saddr.addr6, 458 sparms.saddr.addr6,
376 sparms.daddr.addr6); 459 sparms.daddr.addr6);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index 4b87ec20e658..14a6de904db1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -63,6 +63,7 @@ struct mlxsw_sp_span_parms {
63 unsigned char smac[ETH_ALEN]; 63 unsigned char smac[ETH_ALEN];
64 union mlxsw_sp_l3addr daddr; 64 union mlxsw_sp_l3addr daddr;
65 union mlxsw_sp_l3addr saddr; 65 union mlxsw_sp_l3addr saddr;
66 u16 vid;
66}; 67};
67 68
68struct mlxsw_sp_span_entry_ops; 69struct mlxsw_sp_span_entry_ops;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index c11c9a635866..8c9cf8ee9398 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -49,7 +49,9 @@
49#include <linux/netlink.h> 49#include <linux/netlink.h>
50#include <net/switchdev.h> 50#include <net/switchdev.h>
51 51
52#include "spectrum_span.h"
52#include "spectrum_router.h" 53#include "spectrum_router.h"
54#include "spectrum_switchdev.h"
53#include "spectrum.h" 55#include "spectrum.h"
54#include "core.h" 56#include "core.h"
55#include "reg.h" 57#include "reg.h"
@@ -239,7 +241,7 @@ __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
239 return NULL; 241 return NULL;
240} 242}
241 243
242static struct mlxsw_sp_bridge_port * 244struct mlxsw_sp_bridge_port *
243mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge, 245mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
244 struct net_device *brport_dev) 246 struct net_device *brport_dev)
245{ 247{
@@ -922,6 +924,9 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
922 break; 924 break;
923 } 925 }
924 926
927 if (switchdev_trans_ph_commit(trans))
928 mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
929
925 return err; 930 return err;
926} 931}
927 932
@@ -1646,18 +1651,57 @@ mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
1646 } 1651 }
1647} 1652}
1648 1653
1654struct mlxsw_sp_span_respin_work {
1655 struct work_struct work;
1656 struct mlxsw_sp *mlxsw_sp;
1657};
1658
1659static void mlxsw_sp_span_respin_work(struct work_struct *work)
1660{
1661 struct mlxsw_sp_span_respin_work *respin_work =
1662 container_of(work, struct mlxsw_sp_span_respin_work, work);
1663
1664 rtnl_lock();
1665 mlxsw_sp_span_respin(respin_work->mlxsw_sp);
1666 rtnl_unlock();
1667 kfree(respin_work);
1668}
1669
1670static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp)
1671{
1672 struct mlxsw_sp_span_respin_work *respin_work;
1673
1674 respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC);
1675 if (!respin_work)
1676 return;
1677
1678 INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work);
1679 respin_work->mlxsw_sp = mlxsw_sp;
1680
1681 mlxsw_core_schedule_work(&respin_work->work);
1682}
1683
1649static int mlxsw_sp_port_obj_add(struct net_device *dev, 1684static int mlxsw_sp_port_obj_add(struct net_device *dev,
1650 const struct switchdev_obj *obj, 1685 const struct switchdev_obj *obj,
1651 struct switchdev_trans *trans) 1686 struct switchdev_trans *trans)
1652{ 1687{
1653 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 1688 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1689 const struct switchdev_obj_port_vlan *vlan;
1654 int err = 0; 1690 int err = 0;
1655 1691
1656 switch (obj->id) { 1692 switch (obj->id) {
1657 case SWITCHDEV_OBJ_ID_PORT_VLAN: 1693 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1658 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, 1694 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
1659 SWITCHDEV_OBJ_PORT_VLAN(obj), 1695 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans);
1660 trans); 1696
1697 if (switchdev_trans_ph_commit(trans)) {
1698 /* The event is emitted before the changes are actually
1699 * applied to the bridge. Therefore schedule the respin
1700 * call for later, so that the respin logic sees the
1701 * updated bridge state.
1702 */
1703 mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
1704 }
1661 break; 1705 break;
1662 case SWITCHDEV_OBJ_ID_PORT_MDB: 1706 case SWITCHDEV_OBJ_ID_PORT_MDB:
1663 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port, 1707 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
@@ -1718,13 +1762,11 @@ __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1718 struct net_device *dev = mlxsw_sp_port->dev; 1762 struct net_device *dev = mlxsw_sp_port->dev;
1719 int err; 1763 int err;
1720 1764
1721 if (bridge_port->bridge_device->multicast_enabled) { 1765 if (bridge_port->bridge_device->multicast_enabled &&
1722 if (bridge_port->bridge_device->multicast_enabled) { 1766 !bridge_port->mrouter) {
1723 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, 1767 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1724 false); 1768 if (err)
1725 if (err) 1769 netdev_err(dev, "Unable to remove port from SMID\n");
1726 netdev_err(dev, "Unable to remove port from SMID\n");
1727 }
1728 } 1770 }
1729 1771
1730 err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid); 1772 err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
@@ -1808,6 +1850,8 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev,
1808 break; 1850 break;
1809 } 1851 }
1810 1852
1853 mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
1854
1811 return err; 1855 return err;
1812} 1856}
1813 1857
@@ -2224,6 +2268,8 @@ static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
2224 switch (switchdev_work->event) { 2268 switch (switchdev_work->event) {
2225 case SWITCHDEV_FDB_ADD_TO_DEVICE: 2269 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2226 fdb_info = &switchdev_work->fdb_info; 2270 fdb_info = &switchdev_work->fdb_info;
2271 if (!fdb_info->added_by_user)
2272 break;
2227 err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true); 2273 err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
2228 if (err) 2274 if (err)
2229 break; 2275 break;
@@ -2233,10 +2279,20 @@ static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
2233 break; 2279 break;
2234 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2280 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2235 fdb_info = &switchdev_work->fdb_info; 2281 fdb_info = &switchdev_work->fdb_info;
2282 if (!fdb_info->added_by_user)
2283 break;
2236 mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false); 2284 mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
2237 break; 2285 break;
2286 case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
2287 case SWITCHDEV_FDB_DEL_TO_BRIDGE:
2288 /* These events are only used to potentially update an existing
2289 * SPAN mirror.
2290 */
2291 break;
2238 } 2292 }
2239 2293
2294 mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2295
2240out: 2296out:
2241 rtnl_unlock(); 2297 rtnl_unlock();
2242 kfree(switchdev_work->fdb_info.addr); 2298 kfree(switchdev_work->fdb_info.addr);
@@ -2265,7 +2321,9 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
2265 2321
2266 switch (event) { 2322 switch (event) {
2267 case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */ 2323 case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
2268 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2324 case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */
2325 case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
2326 case SWITCHDEV_FDB_DEL_TO_BRIDGE:
2269 memcpy(&switchdev_work->fdb_info, ptr, 2327 memcpy(&switchdev_work->fdb_info, ptr,
2270 sizeof(switchdev_work->fdb_info)); 2328 sizeof(switchdev_work->fdb_info));
2271 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); 2329 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
@@ -2297,6 +2355,12 @@ static struct notifier_block mlxsw_sp_switchdev_notifier = {
2297 .notifier_call = mlxsw_sp_switchdev_event, 2355 .notifier_call = mlxsw_sp_switchdev_event,
2298}; 2356};
2299 2357
2358u8
2359mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
2360{
2361 return bridge_port->stp_state;
2362}
2363
2300static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp) 2364static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
2301{ 2365{
2302 struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge; 2366 struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h
new file mode 100644
index 000000000000..bc44d5effc28
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h
@@ -0,0 +1,43 @@
1/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h
3 * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the names of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * Alternatively, this software may be distributed under the terms of the
18 * GNU General Public License ("GPL") version 2 as published by the Free
19 * Software Foundation.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <linux/netdevice.h>
35
36struct mlxsw_sp_bridge;
37struct mlxsw_sp_bridge_port;
38
39struct mlxsw_sp_bridge_port *
40mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
41 struct net_device *brport_dev);
42
43u8 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index b3567a596fc1..80df9a5d4217 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -183,17 +183,21 @@ static int
183nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun, 183nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
184 const struct tc_action *action, 184 const struct tc_action *action,
185 struct nfp_fl_pre_tunnel *pre_tun, 185 struct nfp_fl_pre_tunnel *pre_tun,
186 enum nfp_flower_tun_type tun_type) 186 enum nfp_flower_tun_type tun_type,
187 struct net_device *netdev)
187{ 188{
188 size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun); 189 size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
189 struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action); 190 struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
190 u32 tmp_set_ip_tun_type_index = 0; 191 u32 tmp_set_ip_tun_type_index = 0;
191 /* Currently support one pre-tunnel so index is always 0. */ 192 /* Currently support one pre-tunnel so index is always 0. */
192 int pretun_idx = 0; 193 int pretun_idx = 0;
194 struct net *net;
193 195
194 if (ip_tun->options_len) 196 if (ip_tun->options_len)
195 return -EOPNOTSUPP; 197 return -EOPNOTSUPP;
196 198
199 net = dev_net(netdev);
200
197 set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL; 201 set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
198 set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ; 202 set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
199 203
@@ -204,6 +208,7 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
204 208
205 set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index); 209 set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
206 set_tun->tun_id = ip_tun->key.tun_id; 210 set_tun->tun_id = ip_tun->key.tun_id;
211 set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
207 212
208 /* Complete pre_tunnel action. */ 213 /* Complete pre_tunnel action. */
209 pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst; 214 pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
@@ -511,7 +516,8 @@ nfp_flower_loop_action(const struct tc_action *a,
511 *a_len += sizeof(struct nfp_fl_pre_tunnel); 516 *a_len += sizeof(struct nfp_fl_pre_tunnel);
512 517
513 set_tun = (void *)&nfp_fl->action_data[*a_len]; 518 set_tun = (void *)&nfp_fl->action_data[*a_len];
514 err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type); 519 err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type,
520 netdev);
515 if (err) 521 if (err)
516 return err; 522 return err;
517 *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun); 523 *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index b6c0fd053a50..bee4367a2c38 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -190,7 +190,10 @@ struct nfp_fl_set_ipv4_udp_tun {
190 __be16 reserved; 190 __be16 reserved;
191 __be64 tun_id __packed; 191 __be64 tun_id __packed;
192 __be32 tun_type_index; 192 __be32 tun_type_index;
193 __be32 extra[3]; 193 __be16 reserved2;
194 u8 ttl;
195 u8 reserved3;
196 __be32 extra[2];
194}; 197};
195 198
196/* Metadata with L2 (1W/4B) 199/* Metadata with L2 (1W/4B)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index ad02592a82b7..a997e34bcec2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -360,7 +360,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
360 } 360 }
361 361
362 SET_NETDEV_DEV(repr, &priv->nn->pdev->dev); 362 SET_NETDEV_DEV(repr, &priv->nn->pdev->dev);
363 nfp_net_get_mac_addr(app->pf, port); 363 nfp_net_get_mac_addr(app->pf, repr, port);
364 364
365 cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port); 365 cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
366 err = nfp_repr_init(app, repr, 366 err = nfp_repr_init(app, repr,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
index 2a2f2fbc8850..b9618c37403f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
@@ -69,7 +69,7 @@ int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
69 if (err) 69 if (err)
70 return err < 0 ? err : 0; 70 return err < 0 ? err : 0;
71 71
72 nfp_net_get_mac_addr(app->pf, nn->port); 72 nfp_net_get_mac_addr(app->pf, nn->dp.netdev, nn->port);
73 73
74 return 0; 74 return 0;
75} 75}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index add46e28212b..42211083b51f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -171,7 +171,9 @@ void nfp_net_pci_remove(struct nfp_pf *pf);
171int nfp_hwmon_register(struct nfp_pf *pf); 171int nfp_hwmon_register(struct nfp_pf *pf);
172void nfp_hwmon_unregister(struct nfp_pf *pf); 172void nfp_hwmon_unregister(struct nfp_pf *pf);
173 173
174void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port); 174void
175nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev,
176 struct nfp_port *port);
175 177
176bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb); 178bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
177 179
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 15fa47f622aa..45cd2092e498 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -67,23 +67,26 @@
67/** 67/**
68 * nfp_net_get_mac_addr() - Get the MAC address. 68 * nfp_net_get_mac_addr() - Get the MAC address.
69 * @pf: NFP PF handle 69 * @pf: NFP PF handle
70 * @netdev: net_device to set MAC address on
70 * @port: NFP port structure 71 * @port: NFP port structure
71 * 72 *
72 * First try to get the MAC address from NSP ETH table. If that 73 * First try to get the MAC address from NSP ETH table. If that
73 * fails generate a random address. 74 * fails generate a random address.
74 */ 75 */
75void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port) 76void
77nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev,
78 struct nfp_port *port)
76{ 79{
77 struct nfp_eth_table_port *eth_port; 80 struct nfp_eth_table_port *eth_port;
78 81
79 eth_port = __nfp_port_get_eth_port(port); 82 eth_port = __nfp_port_get_eth_port(port);
80 if (!eth_port) { 83 if (!eth_port) {
81 eth_hw_addr_random(port->netdev); 84 eth_hw_addr_random(netdev);
82 return; 85 return;
83 } 86 }
84 87
85 ether_addr_copy(port->netdev->dev_addr, eth_port->mac_addr); 88 ether_addr_copy(netdev->dev_addr, eth_port->mac_addr);
86 ether_addr_copy(port->netdev->perm_addr, eth_port->mac_addr); 89 ether_addr_copy(netdev->perm_addr, eth_port->mac_addr);
87} 90}
88 91
89static struct nfp_eth_table_port * 92static struct nfp_eth_table_port *
@@ -511,16 +514,18 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf)
511 return PTR_ERR(mem); 514 return PTR_ERR(mem);
512 } 515 }
513 516
514 min_size = NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1); 517 if (pf->eth_tbl) {
515 pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats", 518 min_size = NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1);
516 "net.macstats", min_size, 519 pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats",
517 &pf->mac_stats_bar); 520 "net.macstats", min_size,
518 if (IS_ERR(pf->mac_stats_mem)) { 521 &pf->mac_stats_bar);
519 if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) { 522 if (IS_ERR(pf->mac_stats_mem)) {
520 err = PTR_ERR(pf->mac_stats_mem); 523 if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) {
521 goto err_unmap_ctrl; 524 err = PTR_ERR(pf->mac_stats_mem);
525 goto err_unmap_ctrl;
526 }
527 pf->mac_stats_mem = NULL;
522 } 528 }
523 pf->mac_stats_mem = NULL;
524 } 529 }
525 530
526 pf->vf_cfg_mem = nfp_net_pf_map_rtsym(pf, "net.vfcfg", 531 pf->vf_cfg_mem = nfp_net_pf_map_rtsym(pf, "net.vfcfg",
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 74fc626b1ec1..38502815d681 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -2370,7 +2370,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
2370 u8 flags = 0; 2370 u8 flags = 0;
2371 2371
2372 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) { 2372 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2373 DP_INFO(cdev, "Cannot transmit a checksumed packet\n"); 2373 DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
2374 return -EINVAL; 2374 return -EINVAL;
2375 } 2375 }
2376 2376
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index fb7c2d1562ae..6acfd43c1a4f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -848,7 +848,7 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
848 848
849 if (!(qp->resp_offloaded)) { 849 if (!(qp->resp_offloaded)) {
850 DP_NOTICE(p_hwfn, 850 DP_NOTICE(p_hwfn,
851 "The responder's qp should be offloded before requester's\n"); 851 "The responder's qp should be offloaded before requester's\n");
852 return -EINVAL; 852 return -EINVAL;
853 } 853 }
854 854
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index d24b47b8e0b2..d118da5a10a2 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2224,7 +2224,7 @@ static void rtl8139_poll_controller(struct net_device *dev)
2224 struct rtl8139_private *tp = netdev_priv(dev); 2224 struct rtl8139_private *tp = netdev_priv(dev);
2225 const int irq = tp->pci_dev->irq; 2225 const int irq = tp->pci_dev->irq;
2226 2226
2227 disable_irq(irq); 2227 disable_irq_nosync(irq);
2228 rtl8139_interrupt(irq, dev); 2228 rtl8139_interrupt(irq, dev);
2229 enable_irq(irq); 2229 enable_irq(irq);
2230} 2230}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index a5d00ee94245..6d99b141a7aa 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -410,13 +410,8 @@ enum rtl8168_8101_registers {
410 CSIAR = 0x68, 410 CSIAR = 0x68,
411#define CSIAR_FLAG 0x80000000 411#define CSIAR_FLAG 0x80000000
412#define CSIAR_WRITE_CMD 0x80000000 412#define CSIAR_WRITE_CMD 0x80000000
413#define CSIAR_BYTE_ENABLE 0x0f 413#define CSIAR_BYTE_ENABLE 0x0000f000
414#define CSIAR_BYTE_ENABLE_SHIFT 12 414#define CSIAR_ADDR_MASK 0x00000fff
415#define CSIAR_ADDR_MASK 0x0fff
416#define CSIAR_FUNC_CARD 0x00000000
417#define CSIAR_FUNC_SDIO 0x00010000
418#define CSIAR_FUNC_NIC 0x00020000
419#define CSIAR_FUNC_NIC2 0x00010000
420 PMCH = 0x6f, 415 PMCH = 0x6f,
421 EPHYAR = 0x80, 416 EPHYAR = 0x80,
422#define EPHYAR_FLAG 0x80000000 417#define EPHYAR_FLAG 0x80000000
@@ -599,6 +594,7 @@ enum rtl_register_content {
599 RxChkSum = (1 << 5), 594 RxChkSum = (1 << 5),
600 PCIDAC = (1 << 4), 595 PCIDAC = (1 << 4),
601 PCIMulRW = (1 << 3), 596 PCIMulRW = (1 << 3),
597#define INTT_MASK GENMASK(1, 0)
602 INTT_0 = 0x0000, // 8168 598 INTT_0 = 0x0000, // 8168
603 INTT_1 = 0x0001, // 8168 599 INTT_1 = 0x0001, // 8168
604 INTT_2 = 0x0002, // 8168 600 INTT_2 = 0x0002, // 8168
@@ -689,6 +685,7 @@ enum rtl_rx_desc_bit {
689}; 685};
690 686
691#define RsvdMask 0x3fffc000 687#define RsvdMask 0x3fffc000
688#define CPCMD_QUIRK_MASK (Normal_mode | RxVlan | RxChkSum | INTT_MASK)
692 689
693struct TxDesc { 690struct TxDesc {
694 __le32 opts1; 691 __le32 opts1;
@@ -774,21 +771,11 @@ struct rtl8169_private {
774 int (*read)(struct rtl8169_private *, int); 771 int (*read)(struct rtl8169_private *, int);
775 } mdio_ops; 772 } mdio_ops;
776 773
777 struct pll_power_ops {
778 void (*down)(struct rtl8169_private *);
779 void (*up)(struct rtl8169_private *);
780 } pll_power_ops;
781
782 struct jumbo_ops { 774 struct jumbo_ops {
783 void (*enable)(struct rtl8169_private *); 775 void (*enable)(struct rtl8169_private *);
784 void (*disable)(struct rtl8169_private *); 776 void (*disable)(struct rtl8169_private *);
785 } jumbo_ops; 777 } jumbo_ops;
786 778
787 struct csi_ops {
788 void (*write)(struct rtl8169_private *, int, int);
789 u32 (*read)(struct rtl8169_private *, int);
790 } csi_ops;
791
792 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); 779 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
793 int (*get_link_ksettings)(struct net_device *, 780 int (*get_link_ksettings)(struct net_device *,
794 struct ethtool_link_ksettings *); 781 struct ethtool_link_ksettings *);
@@ -1614,23 +1601,8 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1614 if (options & LinkUp) 1601 if (options & LinkUp)
1615 wolopts |= WAKE_PHY; 1602 wolopts |= WAKE_PHY;
1616 switch (tp->mac_version) { 1603 switch (tp->mac_version) {
1617 case RTL_GIGA_MAC_VER_34: 1604 case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
1618 case RTL_GIGA_MAC_VER_35: 1605 case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
1619 case RTL_GIGA_MAC_VER_36:
1620 case RTL_GIGA_MAC_VER_37:
1621 case RTL_GIGA_MAC_VER_38:
1622 case RTL_GIGA_MAC_VER_40:
1623 case RTL_GIGA_MAC_VER_41:
1624 case RTL_GIGA_MAC_VER_42:
1625 case RTL_GIGA_MAC_VER_43:
1626 case RTL_GIGA_MAC_VER_44:
1627 case RTL_GIGA_MAC_VER_45:
1628 case RTL_GIGA_MAC_VER_46:
1629 case RTL_GIGA_MAC_VER_47:
1630 case RTL_GIGA_MAC_VER_48:
1631 case RTL_GIGA_MAC_VER_49:
1632 case RTL_GIGA_MAC_VER_50:
1633 case RTL_GIGA_MAC_VER_51:
1634 if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2) 1606 if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2)
1635 wolopts |= WAKE_MAGIC; 1607 wolopts |= WAKE_MAGIC;
1636 break; 1608 break;
@@ -1691,23 +1663,8 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1691 RTL_W8(tp, Cfg9346, Cfg9346_Unlock); 1663 RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
1692 1664
1693 switch (tp->mac_version) { 1665 switch (tp->mac_version) {
1694 case RTL_GIGA_MAC_VER_34: 1666 case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
1695 case RTL_GIGA_MAC_VER_35: 1667 case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
1696 case RTL_GIGA_MAC_VER_36:
1697 case RTL_GIGA_MAC_VER_37:
1698 case RTL_GIGA_MAC_VER_38:
1699 case RTL_GIGA_MAC_VER_40:
1700 case RTL_GIGA_MAC_VER_41:
1701 case RTL_GIGA_MAC_VER_42:
1702 case RTL_GIGA_MAC_VER_43:
1703 case RTL_GIGA_MAC_VER_44:
1704 case RTL_GIGA_MAC_VER_45:
1705 case RTL_GIGA_MAC_VER_46:
1706 case RTL_GIGA_MAC_VER_47:
1707 case RTL_GIGA_MAC_VER_48:
1708 case RTL_GIGA_MAC_VER_49:
1709 case RTL_GIGA_MAC_VER_50:
1710 case RTL_GIGA_MAC_VER_51:
1711 tmp = ARRAY_SIZE(cfg) - 1; 1668 tmp = ARRAY_SIZE(cfg) - 1;
1712 if (wolopts & WAKE_MAGIC) 1669 if (wolopts & WAKE_MAGIC)
1713 rtl_w0w1_eri(tp, 1670 rtl_w0w1_eri(tp,
@@ -1935,12 +1892,14 @@ static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1935 return features; 1892 return features;
1936} 1893}
1937 1894
1938static void __rtl8169_set_features(struct net_device *dev, 1895static int rtl8169_set_features(struct net_device *dev,
1939 netdev_features_t features) 1896 netdev_features_t features)
1940{ 1897{
1941 struct rtl8169_private *tp = netdev_priv(dev); 1898 struct rtl8169_private *tp = netdev_priv(dev);
1942 u32 rx_config; 1899 u32 rx_config;
1943 1900
1901 rtl_lock_work(tp);
1902
1944 rx_config = RTL_R32(tp, RxConfig); 1903 rx_config = RTL_R32(tp, RxConfig);
1945 if (features & NETIF_F_RXALL) 1904 if (features & NETIF_F_RXALL)
1946 rx_config |= (AcceptErr | AcceptRunt); 1905 rx_config |= (AcceptErr | AcceptRunt);
@@ -1959,28 +1918,14 @@ static void __rtl8169_set_features(struct net_device *dev,
1959 else 1918 else
1960 tp->cp_cmd &= ~RxVlan; 1919 tp->cp_cmd &= ~RxVlan;
1961 1920
1962 tp->cp_cmd |= RTL_R16(tp, CPlusCmd) & ~(RxVlan | RxChkSum);
1963
1964 RTL_W16(tp, CPlusCmd, tp->cp_cmd); 1921 RTL_W16(tp, CPlusCmd, tp->cp_cmd);
1965 RTL_R16(tp, CPlusCmd); 1922 RTL_R16(tp, CPlusCmd);
1966}
1967 1923
1968static int rtl8169_set_features(struct net_device *dev,
1969 netdev_features_t features)
1970{
1971 struct rtl8169_private *tp = netdev_priv(dev);
1972
1973 features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
1974
1975 rtl_lock_work(tp);
1976 if (features ^ dev->features)
1977 __rtl8169_set_features(dev, features);
1978 rtl_unlock_work(tp); 1924 rtl_unlock_work(tp);
1979 1925
1980 return 0; 1926 return 0;
1981} 1927}
1982 1928
1983
1984static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb) 1929static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
1985{ 1930{
1986 return (skb_vlan_tag_present(skb)) ? 1931 return (skb_vlan_tag_present(skb)) ?
@@ -2354,7 +2299,7 @@ static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
2354 if (IS_ERR(ci)) 2299 if (IS_ERR(ci))
2355 return PTR_ERR(ci); 2300 return PTR_ERR(ci);
2356 2301
2357 scale = &ci->scalev[RTL_R16(tp, CPlusCmd) & 3]; 2302 scale = &ci->scalev[tp->cp_cmd & INTT_MASK];
2358 2303
2359 /* read IntrMitigate and adjust according to scale */ 2304 /* read IntrMitigate and adjust according to scale */
2360 for (w = RTL_R16(tp, IntrMitigate); w; w >>= RTL_COALESCE_SHIFT, p++) { 2305 for (w = RTL_R16(tp, IntrMitigate); w; w >>= RTL_COALESCE_SHIFT, p++) {
@@ -2453,7 +2398,7 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
2453 2398
2454 RTL_W16(tp, IntrMitigate, swab16(w)); 2399 RTL_W16(tp, IntrMitigate, swab16(w));
2455 2400
2456 tp->cp_cmd = (tp->cp_cmd & ~3) | cp01; 2401 tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01;
2457 RTL_W16(tp, CPlusCmd, tp->cp_cmd); 2402 RTL_W16(tp, CPlusCmd, tp->cp_cmd);
2458 RTL_R16(tp, CPlusCmd); 2403 RTL_R16(tp, CPlusCmd);
2459 2404
@@ -4638,18 +4583,7 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp)
4638 ops->write = r8168dp_2_mdio_write; 4583 ops->write = r8168dp_2_mdio_write;
4639 ops->read = r8168dp_2_mdio_read; 4584 ops->read = r8168dp_2_mdio_read;
4640 break; 4585 break;
4641 case RTL_GIGA_MAC_VER_40: 4586 case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
4642 case RTL_GIGA_MAC_VER_41:
4643 case RTL_GIGA_MAC_VER_42:
4644 case RTL_GIGA_MAC_VER_43:
4645 case RTL_GIGA_MAC_VER_44:
4646 case RTL_GIGA_MAC_VER_45:
4647 case RTL_GIGA_MAC_VER_46:
4648 case RTL_GIGA_MAC_VER_47:
4649 case RTL_GIGA_MAC_VER_48:
4650 case RTL_GIGA_MAC_VER_49:
4651 case RTL_GIGA_MAC_VER_50:
4652 case RTL_GIGA_MAC_VER_51:
4653 ops->write = r8168g_mdio_write; 4587 ops->write = r8168g_mdio_write;
4654 ops->read = r8168g_mdio_read; 4588 ops->read = r8168g_mdio_read;
4655 break; 4589 break;
@@ -4694,21 +4628,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
4694 case RTL_GIGA_MAC_VER_32: 4628 case RTL_GIGA_MAC_VER_32:
4695 case RTL_GIGA_MAC_VER_33: 4629 case RTL_GIGA_MAC_VER_33:
4696 case RTL_GIGA_MAC_VER_34: 4630 case RTL_GIGA_MAC_VER_34:
4697 case RTL_GIGA_MAC_VER_37: 4631 case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_51:
4698 case RTL_GIGA_MAC_VER_38:
4699 case RTL_GIGA_MAC_VER_39:
4700 case RTL_GIGA_MAC_VER_40:
4701 case RTL_GIGA_MAC_VER_41:
4702 case RTL_GIGA_MAC_VER_42:
4703 case RTL_GIGA_MAC_VER_43:
4704 case RTL_GIGA_MAC_VER_44:
4705 case RTL_GIGA_MAC_VER_45:
4706 case RTL_GIGA_MAC_VER_46:
4707 case RTL_GIGA_MAC_VER_47:
4708 case RTL_GIGA_MAC_VER_48:
4709 case RTL_GIGA_MAC_VER_49:
4710 case RTL_GIGA_MAC_VER_50:
4711 case RTL_GIGA_MAC_VER_51:
4712 RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | 4632 RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
4713 AcceptBroadcast | AcceptMulticast | AcceptMyPhys); 4633 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
4714 break; 4634 break;
@@ -4728,79 +4648,13 @@ static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
4728 return true; 4648 return true;
4729} 4649}
4730 4650
4731static void r810x_phy_power_down(struct rtl8169_private *tp)
4732{
4733 rtl_writephy(tp, 0x1f, 0x0000);
4734 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
4735}
4736
4737static void r810x_phy_power_up(struct rtl8169_private *tp)
4738{
4739 rtl_writephy(tp, 0x1f, 0x0000);
4740 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
4741}
4742
4743static void r810x_pll_power_down(struct rtl8169_private *tp)
4744{
4745 if (rtl_wol_pll_power_down(tp))
4746 return;
4747
4748 r810x_phy_power_down(tp);
4749
4750 switch (tp->mac_version) {
4751 case RTL_GIGA_MAC_VER_07:
4752 case RTL_GIGA_MAC_VER_08:
4753 case RTL_GIGA_MAC_VER_09:
4754 case RTL_GIGA_MAC_VER_10:
4755 case RTL_GIGA_MAC_VER_13:
4756 case RTL_GIGA_MAC_VER_16:
4757 break;
4758 default:
4759 RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
4760 break;
4761 }
4762}
4763
4764static void r810x_pll_power_up(struct rtl8169_private *tp)
4765{
4766 r810x_phy_power_up(tp);
4767
4768 switch (tp->mac_version) {
4769 case RTL_GIGA_MAC_VER_07:
4770 case RTL_GIGA_MAC_VER_08:
4771 case RTL_GIGA_MAC_VER_09:
4772 case RTL_GIGA_MAC_VER_10:
4773 case RTL_GIGA_MAC_VER_13:
4774 case RTL_GIGA_MAC_VER_16:
4775 break;
4776 case RTL_GIGA_MAC_VER_47:
4777 case RTL_GIGA_MAC_VER_48:
4778 RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
4779 break;
4780 default:
4781 RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80);
4782 break;
4783 }
4784}
4785
4786static void r8168_phy_power_up(struct rtl8169_private *tp) 4651static void r8168_phy_power_up(struct rtl8169_private *tp)
4787{ 4652{
4788 rtl_writephy(tp, 0x1f, 0x0000); 4653 rtl_writephy(tp, 0x1f, 0x0000);
4789 switch (tp->mac_version) { 4654 switch (tp->mac_version) {
4790 case RTL_GIGA_MAC_VER_11: 4655 case RTL_GIGA_MAC_VER_11:
4791 case RTL_GIGA_MAC_VER_12: 4656 case RTL_GIGA_MAC_VER_12:
4792 case RTL_GIGA_MAC_VER_17: 4657 case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28:
4793 case RTL_GIGA_MAC_VER_18:
4794 case RTL_GIGA_MAC_VER_19:
4795 case RTL_GIGA_MAC_VER_20:
4796 case RTL_GIGA_MAC_VER_21:
4797 case RTL_GIGA_MAC_VER_22:
4798 case RTL_GIGA_MAC_VER_23:
4799 case RTL_GIGA_MAC_VER_24:
4800 case RTL_GIGA_MAC_VER_25:
4801 case RTL_GIGA_MAC_VER_26:
4802 case RTL_GIGA_MAC_VER_27:
4803 case RTL_GIGA_MAC_VER_28:
4804 case RTL_GIGA_MAC_VER_31: 4658 case RTL_GIGA_MAC_VER_31:
4805 rtl_writephy(tp, 0x0e, 0x0000); 4659 rtl_writephy(tp, 0x0e, 0x0000);
4806 break; 4660 break;
@@ -4823,18 +4677,7 @@ static void r8168_phy_power_down(struct rtl8169_private *tp)
4823 4677
4824 case RTL_GIGA_MAC_VER_11: 4678 case RTL_GIGA_MAC_VER_11:
4825 case RTL_GIGA_MAC_VER_12: 4679 case RTL_GIGA_MAC_VER_12:
4826 case RTL_GIGA_MAC_VER_17: 4680 case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28:
4827 case RTL_GIGA_MAC_VER_18:
4828 case RTL_GIGA_MAC_VER_19:
4829 case RTL_GIGA_MAC_VER_20:
4830 case RTL_GIGA_MAC_VER_21:
4831 case RTL_GIGA_MAC_VER_22:
4832 case RTL_GIGA_MAC_VER_23:
4833 case RTL_GIGA_MAC_VER_24:
4834 case RTL_GIGA_MAC_VER_25:
4835 case RTL_GIGA_MAC_VER_26:
4836 case RTL_GIGA_MAC_VER_27:
4837 case RTL_GIGA_MAC_VER_28:
4838 case RTL_GIGA_MAC_VER_31: 4681 case RTL_GIGA_MAC_VER_31:
4839 rtl_writephy(tp, 0x0e, 0x0200); 4682 rtl_writephy(tp, 0x0e, 0x0200);
4840 default: 4683 default:
@@ -4848,12 +4691,6 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
4848 if (r8168_check_dash(tp)) 4691 if (r8168_check_dash(tp))
4849 return; 4692 return;
4850 4693
4851 if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
4852 tp->mac_version == RTL_GIGA_MAC_VER_24) &&
4853 (RTL_R16(tp, CPlusCmd) & ASF)) {
4854 return;
4855 }
4856
4857 if (tp->mac_version == RTL_GIGA_MAC_VER_32 || 4694 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
4858 tp->mac_version == RTL_GIGA_MAC_VER_33) 4695 tp->mac_version == RTL_GIGA_MAC_VER_33)
4859 rtl_ephy_write(tp, 0x19, 0xff64); 4696 rtl_ephy_write(tp, 0x19, 0xff64);
@@ -4864,16 +4701,15 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
4864 r8168_phy_power_down(tp); 4701 r8168_phy_power_down(tp);
4865 4702
4866 switch (tp->mac_version) { 4703 switch (tp->mac_version) {
4867 case RTL_GIGA_MAC_VER_25: 4704 case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
4868 case RTL_GIGA_MAC_VER_26: 4705 case RTL_GIGA_MAC_VER_37:
4869 case RTL_GIGA_MAC_VER_27: 4706 case RTL_GIGA_MAC_VER_39:
4870 case RTL_GIGA_MAC_VER_28: 4707 case RTL_GIGA_MAC_VER_43:
4871 case RTL_GIGA_MAC_VER_31:
4872 case RTL_GIGA_MAC_VER_32:
4873 case RTL_GIGA_MAC_VER_33:
4874 case RTL_GIGA_MAC_VER_44: 4708 case RTL_GIGA_MAC_VER_44:
4875 case RTL_GIGA_MAC_VER_45: 4709 case RTL_GIGA_MAC_VER_45:
4876 case RTL_GIGA_MAC_VER_46: 4710 case RTL_GIGA_MAC_VER_46:
4711 case RTL_GIGA_MAC_VER_47:
4712 case RTL_GIGA_MAC_VER_48:
4877 case RTL_GIGA_MAC_VER_50: 4713 case RTL_GIGA_MAC_VER_50:
4878 case RTL_GIGA_MAC_VER_51: 4714 case RTL_GIGA_MAC_VER_51:
4879 RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); 4715 RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
@@ -4891,18 +4727,17 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
4891static void r8168_pll_power_up(struct rtl8169_private *tp) 4727static void r8168_pll_power_up(struct rtl8169_private *tp)
4892{ 4728{
4893 switch (tp->mac_version) { 4729 switch (tp->mac_version) {
4894 case RTL_GIGA_MAC_VER_25: 4730 case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
4895 case RTL_GIGA_MAC_VER_26: 4731 case RTL_GIGA_MAC_VER_37:
4896 case RTL_GIGA_MAC_VER_27: 4732 case RTL_GIGA_MAC_VER_39:
4897 case RTL_GIGA_MAC_VER_28: 4733 case RTL_GIGA_MAC_VER_43:
4898 case RTL_GIGA_MAC_VER_31:
4899 case RTL_GIGA_MAC_VER_32:
4900 case RTL_GIGA_MAC_VER_33:
4901 RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80); 4734 RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80);
4902 break; 4735 break;
4903 case RTL_GIGA_MAC_VER_44: 4736 case RTL_GIGA_MAC_VER_44:
4904 case RTL_GIGA_MAC_VER_45: 4737 case RTL_GIGA_MAC_VER_45:
4905 case RTL_GIGA_MAC_VER_46: 4738 case RTL_GIGA_MAC_VER_46:
4739 case RTL_GIGA_MAC_VER_47:
4740 case RTL_GIGA_MAC_VER_48:
4906 case RTL_GIGA_MAC_VER_50: 4741 case RTL_GIGA_MAC_VER_50:
4907 case RTL_GIGA_MAC_VER_51: 4742 case RTL_GIGA_MAC_VER_51:
4908 RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); 4743 RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
@@ -4919,127 +4754,41 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
4919 r8168_phy_power_up(tp); 4754 r8168_phy_power_up(tp);
4920} 4755}
4921 4756
4922static void rtl_generic_op(struct rtl8169_private *tp,
4923 void (*op)(struct rtl8169_private *))
4924{
4925 if (op)
4926 op(tp);
4927}
4928
4929static void rtl_pll_power_down(struct rtl8169_private *tp) 4757static void rtl_pll_power_down(struct rtl8169_private *tp)
4930{ 4758{
4931 rtl_generic_op(tp, tp->pll_power_ops.down); 4759 switch (tp->mac_version) {
4760 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
4761 case RTL_GIGA_MAC_VER_13 ... RTL_GIGA_MAC_VER_15:
4762 break;
4763 default:
4764 r8168_pll_power_down(tp);
4765 }
4932} 4766}
4933 4767
4934static void rtl_pll_power_up(struct rtl8169_private *tp) 4768static void rtl_pll_power_up(struct rtl8169_private *tp)
4935{ 4769{
4936 rtl_generic_op(tp, tp->pll_power_ops.up);
4937}
4938
4939static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
4940{
4941 struct pll_power_ops *ops = &tp->pll_power_ops;
4942
4943 switch (tp->mac_version) { 4770 switch (tp->mac_version) {
4944 case RTL_GIGA_MAC_VER_07: 4771 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
4945 case RTL_GIGA_MAC_VER_08: 4772 case RTL_GIGA_MAC_VER_13 ... RTL_GIGA_MAC_VER_15:
4946 case RTL_GIGA_MAC_VER_09:
4947 case RTL_GIGA_MAC_VER_10:
4948 case RTL_GIGA_MAC_VER_16:
4949 case RTL_GIGA_MAC_VER_29:
4950 case RTL_GIGA_MAC_VER_30:
4951 case RTL_GIGA_MAC_VER_37:
4952 case RTL_GIGA_MAC_VER_39:
4953 case RTL_GIGA_MAC_VER_43:
4954 case RTL_GIGA_MAC_VER_47:
4955 case RTL_GIGA_MAC_VER_48:
4956 ops->down = r810x_pll_power_down;
4957 ops->up = r810x_pll_power_up;
4958 break; 4773 break;
4959
4960 case RTL_GIGA_MAC_VER_11:
4961 case RTL_GIGA_MAC_VER_12:
4962 case RTL_GIGA_MAC_VER_17:
4963 case RTL_GIGA_MAC_VER_18:
4964 case RTL_GIGA_MAC_VER_19:
4965 case RTL_GIGA_MAC_VER_20:
4966 case RTL_GIGA_MAC_VER_21:
4967 case RTL_GIGA_MAC_VER_22:
4968 case RTL_GIGA_MAC_VER_23:
4969 case RTL_GIGA_MAC_VER_24:
4970 case RTL_GIGA_MAC_VER_25:
4971 case RTL_GIGA_MAC_VER_26:
4972 case RTL_GIGA_MAC_VER_27:
4973 case RTL_GIGA_MAC_VER_28:
4974 case RTL_GIGA_MAC_VER_31:
4975 case RTL_GIGA_MAC_VER_32:
4976 case RTL_GIGA_MAC_VER_33:
4977 case RTL_GIGA_MAC_VER_34:
4978 case RTL_GIGA_MAC_VER_35:
4979 case RTL_GIGA_MAC_VER_36:
4980 case RTL_GIGA_MAC_VER_38:
4981 case RTL_GIGA_MAC_VER_40:
4982 case RTL_GIGA_MAC_VER_41:
4983 case RTL_GIGA_MAC_VER_42:
4984 case RTL_GIGA_MAC_VER_44:
4985 case RTL_GIGA_MAC_VER_45:
4986 case RTL_GIGA_MAC_VER_46:
4987 case RTL_GIGA_MAC_VER_49:
4988 case RTL_GIGA_MAC_VER_50:
4989 case RTL_GIGA_MAC_VER_51:
4990 ops->down = r8168_pll_power_down;
4991 ops->up = r8168_pll_power_up;
4992 break;
4993
4994 default: 4774 default:
4995 ops->down = NULL; 4775 r8168_pll_power_up(tp);
4996 ops->up = NULL;
4997 break;
4998 } 4776 }
4999} 4777}
5000 4778
5001static void rtl_init_rxcfg(struct rtl8169_private *tp) 4779static void rtl_init_rxcfg(struct rtl8169_private *tp)
5002{ 4780{
5003 switch (tp->mac_version) { 4781 switch (tp->mac_version) {
5004 case RTL_GIGA_MAC_VER_01: 4782 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
5005 case RTL_GIGA_MAC_VER_02: 4783 case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
5006 case RTL_GIGA_MAC_VER_03:
5007 case RTL_GIGA_MAC_VER_04:
5008 case RTL_GIGA_MAC_VER_05:
5009 case RTL_GIGA_MAC_VER_06:
5010 case RTL_GIGA_MAC_VER_10:
5011 case RTL_GIGA_MAC_VER_11:
5012 case RTL_GIGA_MAC_VER_12:
5013 case RTL_GIGA_MAC_VER_13:
5014 case RTL_GIGA_MAC_VER_14:
5015 case RTL_GIGA_MAC_VER_15:
5016 case RTL_GIGA_MAC_VER_16:
5017 case RTL_GIGA_MAC_VER_17:
5018 RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); 4784 RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
5019 break; 4785 break;
5020 case RTL_GIGA_MAC_VER_18: 4786 case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
5021 case RTL_GIGA_MAC_VER_19:
5022 case RTL_GIGA_MAC_VER_20:
5023 case RTL_GIGA_MAC_VER_21:
5024 case RTL_GIGA_MAC_VER_22:
5025 case RTL_GIGA_MAC_VER_23:
5026 case RTL_GIGA_MAC_VER_24:
5027 case RTL_GIGA_MAC_VER_34: 4787 case RTL_GIGA_MAC_VER_34:
5028 case RTL_GIGA_MAC_VER_35: 4788 case RTL_GIGA_MAC_VER_35:
5029 RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); 4789 RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
5030 break; 4790 break;
5031 case RTL_GIGA_MAC_VER_40: 4791 case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
5032 case RTL_GIGA_MAC_VER_41:
5033 case RTL_GIGA_MAC_VER_42:
5034 case RTL_GIGA_MAC_VER_43:
5035 case RTL_GIGA_MAC_VER_44:
5036 case RTL_GIGA_MAC_VER_45:
5037 case RTL_GIGA_MAC_VER_46:
5038 case RTL_GIGA_MAC_VER_47:
5039 case RTL_GIGA_MAC_VER_48:
5040 case RTL_GIGA_MAC_VER_49:
5041 case RTL_GIGA_MAC_VER_50:
5042 case RTL_GIGA_MAC_VER_51:
5043 RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); 4792 RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
5044 break; 4793 break;
5045 default: 4794 default:
@@ -5055,16 +4804,20 @@ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
5055 4804
5056static void rtl_hw_jumbo_enable(struct rtl8169_private *tp) 4805static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
5057{ 4806{
5058 RTL_W8(tp, Cfg9346, Cfg9346_Unlock); 4807 if (tp->jumbo_ops.enable) {
5059 rtl_generic_op(tp, tp->jumbo_ops.enable); 4808 RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
5060 RTL_W8(tp, Cfg9346, Cfg9346_Lock); 4809 tp->jumbo_ops.enable(tp);
4810 RTL_W8(tp, Cfg9346, Cfg9346_Lock);
4811 }
5061} 4812}
5062 4813
5063static void rtl_hw_jumbo_disable(struct rtl8169_private *tp) 4814static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
5064{ 4815{
5065 RTL_W8(tp, Cfg9346, Cfg9346_Unlock); 4816 if (tp->jumbo_ops.disable) {
5066 rtl_generic_op(tp, tp->jumbo_ops.disable); 4817 RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
5067 RTL_W8(tp, Cfg9346, Cfg9346_Lock); 4818 tp->jumbo_ops.disable(tp);
4819 RTL_W8(tp, Cfg9346, Cfg9346_Lock);
4820 }
5068} 4821}
5069 4822
5070static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) 4823static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
@@ -5176,18 +4929,7 @@ static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
5176 * No action needed for jumbo frames with 8169. 4929 * No action needed for jumbo frames with 8169.
5177 * No jumbo for 810x at all. 4930 * No jumbo for 810x at all.
5178 */ 4931 */
5179 case RTL_GIGA_MAC_VER_40: 4932 case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
5180 case RTL_GIGA_MAC_VER_41:
5181 case RTL_GIGA_MAC_VER_42:
5182 case RTL_GIGA_MAC_VER_43:
5183 case RTL_GIGA_MAC_VER_44:
5184 case RTL_GIGA_MAC_VER_45:
5185 case RTL_GIGA_MAC_VER_46:
5186 case RTL_GIGA_MAC_VER_47:
5187 case RTL_GIGA_MAC_VER_48:
5188 case RTL_GIGA_MAC_VER_49:
5189 case RTL_GIGA_MAC_VER_50:
5190 case RTL_GIGA_MAC_VER_51:
5191 default: 4933 default:
5192 ops->disable = NULL; 4934 ops->disable = NULL;
5193 ops->enable = NULL; 4935 ops->enable = NULL;
@@ -5273,32 +5015,21 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
5273 5015
5274 rtl_rx_close(tp); 5016 rtl_rx_close(tp);
5275 5017
5276 if (tp->mac_version == RTL_GIGA_MAC_VER_27 || 5018 switch (tp->mac_version) {
5277 tp->mac_version == RTL_GIGA_MAC_VER_28 || 5019 case RTL_GIGA_MAC_VER_27:
5278 tp->mac_version == RTL_GIGA_MAC_VER_31) { 5020 case RTL_GIGA_MAC_VER_28:
5021 case RTL_GIGA_MAC_VER_31:
5279 rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42); 5022 rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
5280 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 || 5023 break;
5281 tp->mac_version == RTL_GIGA_MAC_VER_35 || 5024 case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
5282 tp->mac_version == RTL_GIGA_MAC_VER_36 || 5025 case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
5283 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
5284 tp->mac_version == RTL_GIGA_MAC_VER_38 ||
5285 tp->mac_version == RTL_GIGA_MAC_VER_40 ||
5286 tp->mac_version == RTL_GIGA_MAC_VER_41 ||
5287 tp->mac_version == RTL_GIGA_MAC_VER_42 ||
5288 tp->mac_version == RTL_GIGA_MAC_VER_43 ||
5289 tp->mac_version == RTL_GIGA_MAC_VER_44 ||
5290 tp->mac_version == RTL_GIGA_MAC_VER_45 ||
5291 tp->mac_version == RTL_GIGA_MAC_VER_46 ||
5292 tp->mac_version == RTL_GIGA_MAC_VER_47 ||
5293 tp->mac_version == RTL_GIGA_MAC_VER_48 ||
5294 tp->mac_version == RTL_GIGA_MAC_VER_49 ||
5295 tp->mac_version == RTL_GIGA_MAC_VER_50 ||
5296 tp->mac_version == RTL_GIGA_MAC_VER_51) {
5297 RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); 5026 RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
5298 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); 5027 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
5299 } else { 5028 break;
5029 default:
5300 RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); 5030 RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
5301 udelay(100); 5031 udelay(100);
5032 break;
5302 } 5033 }
5303 5034
5304 rtl_hw_reset(tp); 5035 rtl_hw_reset(tp);
@@ -5311,10 +5042,10 @@ static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
5311 (InterFrameGap << TxInterFrameGapShift)); 5042 (InterFrameGap << TxInterFrameGapShift));
5312} 5043}
5313 5044
5314static void rtl_hw_start(struct rtl8169_private *tp) 5045static void rtl_set_rx_max_size(struct rtl8169_private *tp)
5315{ 5046{
5316 tp->hw_start(tp); 5047 /* Low hurts. Let's disable the filtering. */
5317 rtl_irq_enable_all(tp); 5048 RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1);
5318} 5049}
5319 5050
5320static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp) 5051static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp)
@@ -5330,19 +5061,23 @@ static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp)
5330 RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32)); 5061 RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
5331} 5062}
5332 5063
5333static u16 rtl_rw_cpluscmd(struct rtl8169_private *tp) 5064static void rtl_hw_start(struct rtl8169_private *tp)
5334{ 5065{
5335 u16 cmd; 5066 RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
5336 5067
5337 cmd = RTL_R16(tp, CPlusCmd); 5068 tp->hw_start(tp);
5338 RTL_W16(tp, CPlusCmd, cmd);
5339 return cmd;
5340}
5341 5069
5342static void rtl_set_rx_max_size(struct rtl8169_private *tp) 5070 rtl_set_rx_max_size(tp);
5343{ 5071 rtl_set_rx_tx_desc_registers(tp);
5344 /* Low hurts. Let's disable the filtering. */ 5072 rtl_set_rx_tx_config_registers(tp);
5345 RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1); 5073 RTL_W8(tp, Cfg9346, Cfg9346_Lock);
5074
5075 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
5076 RTL_R8(tp, IntrMask);
5077 RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
5078 /* no early-rx interrupts */
5079 RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
5080 rtl_irq_enable_all(tp);
5346} 5081}
5347 5082
5348static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_version) 5083static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_version)
@@ -5424,31 +5159,12 @@ static void rtl_set_rx_mode(struct net_device *dev)
5424 5159
5425static void rtl_hw_start_8169(struct rtl8169_private *tp) 5160static void rtl_hw_start_8169(struct rtl8169_private *tp)
5426{ 5161{
5427 if (tp->mac_version == RTL_GIGA_MAC_VER_05) { 5162 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
5428 RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) | PCIMulRW);
5429 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); 5163 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
5430 }
5431
5432 RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
5433 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
5434 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
5435 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
5436 tp->mac_version == RTL_GIGA_MAC_VER_04)
5437 RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
5438
5439 rtl_init_rxcfg(tp);
5440 5164
5441 RTL_W8(tp, EarlyTxThres, NoEarlyTx); 5165 RTL_W8(tp, EarlyTxThres, NoEarlyTx);
5442 5166
5443 rtl_set_rx_max_size(tp); 5167 tp->cp_cmd |= PCIMulRW;
5444
5445 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
5446 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
5447 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
5448 tp->mac_version == RTL_GIGA_MAC_VER_04)
5449 rtl_set_rx_tx_config_registers(tp);
5450
5451 tp->cp_cmd |= rtl_rw_cpluscmd(tp) | PCIMulRW;
5452 5168
5453 if (tp->mac_version == RTL_GIGA_MAC_VER_02 || 5169 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
5454 tp->mac_version == RTL_GIGA_MAC_VER_03) { 5170 tp->mac_version == RTL_GIGA_MAC_VER_03) {
@@ -5467,56 +5183,7 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp)
5467 */ 5183 */
5468 RTL_W16(tp, IntrMitigate, 0x0000); 5184 RTL_W16(tp, IntrMitigate, 0x0000);
5469 5185
5470 rtl_set_rx_tx_desc_registers(tp);
5471
5472 if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
5473 tp->mac_version != RTL_GIGA_MAC_VER_02 &&
5474 tp->mac_version != RTL_GIGA_MAC_VER_03 &&
5475 tp->mac_version != RTL_GIGA_MAC_VER_04) {
5476 RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
5477 rtl_set_rx_tx_config_registers(tp);
5478 }
5479
5480 RTL_W8(tp, Cfg9346, Cfg9346_Lock);
5481
5482 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
5483 RTL_R8(tp, IntrMask);
5484
5485 RTL_W32(tp, RxMissed, 0); 5186 RTL_W32(tp, RxMissed, 0);
5486
5487 rtl_set_rx_mode(tp->dev);
5488
5489 /* no early-rx interrupts */
5490 RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
5491}
5492
5493static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
5494{
5495 if (tp->csi_ops.write)
5496 tp->csi_ops.write(tp, addr, value);
5497}
5498
5499static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
5500{
5501 return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
5502}
5503
5504static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
5505{
5506 u32 csi;
5507
5508 csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
5509 rtl_csi_write(tp, 0x070c, csi | bits);
5510}
5511
5512static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
5513{
5514 rtl_csi_access_enable(tp, 0x17000000);
5515}
5516
5517static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
5518{
5519 rtl_csi_access_enable(tp, 0x27000000);
5520} 5187}
5521 5188
5522DECLARE_RTL_COND(rtl_csiar_cond) 5189DECLARE_RTL_COND(rtl_csiar_cond)
@@ -5524,101 +5191,55 @@ DECLARE_RTL_COND(rtl_csiar_cond)
5524 return RTL_R32(tp, CSIAR) & CSIAR_FLAG; 5191 return RTL_R32(tp, CSIAR) & CSIAR_FLAG;
5525} 5192}
5526 5193
5527static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value) 5194static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
5528{ 5195{
5196 u32 func = PCI_FUNC(tp->pci_dev->devfn);
5197
5529 RTL_W32(tp, CSIDR, value); 5198 RTL_W32(tp, CSIDR, value);
5530 RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | 5199 RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
5531 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); 5200 CSIAR_BYTE_ENABLE | func << 16);
5532 5201
5533 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); 5202 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
5534} 5203}
5535 5204
5536static u32 r8169_csi_read(struct rtl8169_private *tp, int addr) 5205static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
5537{ 5206{
5538 RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | 5207 u32 func = PCI_FUNC(tp->pci_dev->devfn);
5539 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); 5208
5209 RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 |
5210 CSIAR_BYTE_ENABLE);
5540 5211
5541 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? 5212 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
5542 RTL_R32(tp, CSIDR) : ~0; 5213 RTL_R32(tp, CSIDR) : ~0;
5543} 5214}
5544 5215
5545static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value) 5216static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val)
5546{ 5217{
5547 RTL_W32(tp, CSIDR, value); 5218 struct pci_dev *pdev = tp->pci_dev;
5548 RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | 5219 u32 csi;
5549 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
5550 CSIAR_FUNC_NIC);
5551
5552 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
5553}
5554 5220
5555static u32 r8402_csi_read(struct rtl8169_private *tp, int addr) 5221 /* According to Realtek the value at config space address 0x070f
5556{ 5222 * controls the L0s/L1 entrance latency. We try standard ECAM access
5557 RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC | 5223 * first and if it fails fall back to CSI.
5558 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); 5224 */
5225 if (pdev->cfg_size > 0x070f &&
5226 pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL)
5227 return;
5559 5228
5560 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? 5229 netdev_notice_once(tp->dev,
5561 RTL_R32(tp, CSIDR) : ~0; 5230 "No native access to PCI extended config space, falling back to CSI\n");
5231 csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
5232 rtl_csi_write(tp, 0x070c, csi | val << 24);
5562} 5233}
5563 5234
5564static void r8411_csi_write(struct rtl8169_private *tp, int addr, int value) 5235static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
5565{ 5236{
5566 RTL_W32(tp, CSIDR, value); 5237 rtl_csi_access_enable(tp, 0x17);
5567 RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
5568 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
5569 CSIAR_FUNC_NIC2);
5570
5571 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
5572} 5238}
5573 5239
5574static u32 r8411_csi_read(struct rtl8169_private *tp, int addr) 5240static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
5575{ 5241{
5576 RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC2 | 5242 rtl_csi_access_enable(tp, 0x27);
5577 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
5578
5579 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
5580 RTL_R32(tp, CSIDR) : ~0;
5581}
5582
5583static void rtl_init_csi_ops(struct rtl8169_private *tp)
5584{
5585 struct csi_ops *ops = &tp->csi_ops;
5586
5587 switch (tp->mac_version) {
5588 case RTL_GIGA_MAC_VER_01:
5589 case RTL_GIGA_MAC_VER_02:
5590 case RTL_GIGA_MAC_VER_03:
5591 case RTL_GIGA_MAC_VER_04:
5592 case RTL_GIGA_MAC_VER_05:
5593 case RTL_GIGA_MAC_VER_06:
5594 case RTL_GIGA_MAC_VER_10:
5595 case RTL_GIGA_MAC_VER_11:
5596 case RTL_GIGA_MAC_VER_12:
5597 case RTL_GIGA_MAC_VER_13:
5598 case RTL_GIGA_MAC_VER_14:
5599 case RTL_GIGA_MAC_VER_15:
5600 case RTL_GIGA_MAC_VER_16:
5601 case RTL_GIGA_MAC_VER_17:
5602 ops->write = NULL;
5603 ops->read = NULL;
5604 break;
5605
5606 case RTL_GIGA_MAC_VER_37:
5607 case RTL_GIGA_MAC_VER_38:
5608 ops->write = r8402_csi_write;
5609 ops->read = r8402_csi_read;
5610 break;
5611
5612 case RTL_GIGA_MAC_VER_44:
5613 ops->write = r8411_csi_write;
5614 ops->read = r8411_csi_read;
5615 break;
5616
5617 default:
5618 ops->write = r8169_csi_write;
5619 ops->read = r8169_csi_read;
5620 break;
5621 }
5622} 5243}
5623 5244
5624struct ephy_info { 5245struct ephy_info {
@@ -5665,22 +5286,12 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
5665 RTL_W8(tp, Config3, data); 5286 RTL_W8(tp, Config3, data);
5666} 5287}
5667 5288
5668#define R8168_CPCMD_QUIRK_MASK (\
5669 EnableBist | \
5670 Mac_dbgo_oe | \
5671 Force_half_dup | \
5672 Force_rxflow_en | \
5673 Force_txflow_en | \
5674 Cxpl_dbg_sel | \
5675 ASF | \
5676 PktCntrDisable | \
5677 Mac_dbgo_sel)
5678
5679static void rtl_hw_start_8168bb(struct rtl8169_private *tp) 5289static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
5680{ 5290{
5681 RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); 5291 RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
5682 5292
5683 RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 5293 tp->cp_cmd &= CPCMD_QUIRK_MASK;
5294 RTL_W16(tp, CPlusCmd, tp->cp_cmd);
5684 5295
5685 if (tp->dev->mtu <= ETH_DATA_LEN) { 5296 if (tp->dev->mtu <= ETH_DATA_LEN) {
5686 rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B | 5297 rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B |
@@ -5708,7 +5319,8 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
5708 5319
5709 rtl_disable_clock_request(tp); 5320 rtl_disable_clock_request(tp);
5710 5321
5711 RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 5322 tp->cp_cmd &= CPCMD_QUIRK_MASK;
5323 RTL_W16(tp, CPlusCmd, tp->cp_cmd);
5712} 5324}
5713 5325
5714static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) 5326static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
@@ -5737,7 +5349,8 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
5737 if (tp->dev->mtu <= ETH_DATA_LEN) 5349 if (tp->dev->mtu <= ETH_DATA_LEN)
5738 rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); 5350 rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
5739 5351
5740 RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 5352 tp->cp_cmd &= CPCMD_QUIRK_MASK;
5353 RTL_W16(tp, CPlusCmd, tp->cp_cmd);
5741} 5354}
5742 5355
5743static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) 5356static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
@@ -5754,7 +5367,8 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
5754 if (tp->dev->mtu <= ETH_DATA_LEN) 5367 if (tp->dev->mtu <= ETH_DATA_LEN)
5755 rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); 5368 rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
5756 5369
5757 RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 5370 tp->cp_cmd &= CPCMD_QUIRK_MASK;
5371 RTL_W16(tp, CPlusCmd, tp->cp_cmd);
5758} 5372}
5759 5373
5760static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) 5374static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
@@ -5811,7 +5425,8 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp)
5811 if (tp->dev->mtu <= ETH_DATA_LEN) 5425 if (tp->dev->mtu <= ETH_DATA_LEN)
5812 rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); 5426 rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
5813 5427
5814 RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 5428 tp->cp_cmd &= CPCMD_QUIRK_MASK;
5429 RTL_W16(tp, CPlusCmd, tp->cp_cmd);
5815} 5430}
5816 5431
5817static void rtl_hw_start_8168dp(struct rtl8169_private *tp) 5432static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
@@ -6274,14 +5889,10 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
6274 5889
6275static void rtl_hw_start_8168(struct rtl8169_private *tp) 5890static void rtl_hw_start_8168(struct rtl8169_private *tp)
6276{ 5891{
6277 RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
6278
6279 RTL_W8(tp, MaxTxPacketSize, TxPacketMax); 5892 RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
6280 5893
6281 rtl_set_rx_max_size(tp); 5894 tp->cp_cmd &= ~INTT_MASK;
6282 5895 tp->cp_cmd |= PktCntrDisable | INTT_1;
6283 tp->cp_cmd |= RTL_R16(tp, CPlusCmd) | PktCntrDisable | INTT_1;
6284
6285 RTL_W16(tp, CPlusCmd, tp->cp_cmd); 5896 RTL_W16(tp, CPlusCmd, tp->cp_cmd);
6286 5897
6287 RTL_W16(tp, IntrMitigate, 0x5151); 5898 RTL_W16(tp, IntrMitigate, 0x5151);
@@ -6292,12 +5903,6 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
6292 tp->event_slow &= ~RxOverflow; 5903 tp->event_slow &= ~RxOverflow;
6293 } 5904 }
6294 5905
6295 rtl_set_rx_tx_desc_registers(tp);
6296
6297 rtl_set_rx_tx_config_registers(tp);
6298
6299 RTL_R8(tp, IntrMask);
6300
6301 switch (tp->mac_version) { 5906 switch (tp->mac_version) {
6302 case RTL_GIGA_MAC_VER_11: 5907 case RTL_GIGA_MAC_VER_11:
6303 rtl_hw_start_8168bb(tp); 5908 rtl_hw_start_8168bb(tp);
@@ -6401,27 +6006,8 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
6401 tp->dev->name, tp->mac_version); 6006 tp->dev->name, tp->mac_version);
6402 break; 6007 break;
6403 } 6008 }
6404
6405 RTL_W8(tp, Cfg9346, Cfg9346_Lock);
6406
6407 RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
6408
6409 rtl_set_rx_mode(tp->dev);
6410
6411 RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
6412} 6009}
6413 6010
6414#define R810X_CPCMD_QUIRK_MASK (\
6415 EnableBist | \
6416 Mac_dbgo_oe | \
6417 Force_half_dup | \
6418 Force_rxflow_en | \
6419 Force_txflow_en | \
6420 Cxpl_dbg_sel | \
6421 ASF | \
6422 PktCntrDisable | \
6423 Mac_dbgo_sel)
6424
6425static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) 6011static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
6426{ 6012{
6427 static const struct ephy_info e_info_8102e_1[] = { 6013 static const struct ephy_info e_info_8102e_1[] = {
@@ -6555,19 +6141,11 @@ static void rtl_hw_start_8101(struct rtl8169_private *tp)
6555 pcie_capability_set_word(tp->pci_dev, PCI_EXP_DEVCTL, 6141 pcie_capability_set_word(tp->pci_dev, PCI_EXP_DEVCTL,
6556 PCI_EXP_DEVCTL_NOSNOOP_EN); 6142 PCI_EXP_DEVCTL_NOSNOOP_EN);
6557 6143
6558 RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
6559
6560 RTL_W8(tp, MaxTxPacketSize, TxPacketMax); 6144 RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
6561 6145
6562 rtl_set_rx_max_size(tp); 6146 tp->cp_cmd &= CPCMD_QUIRK_MASK;
6563
6564 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
6565 RTL_W16(tp, CPlusCmd, tp->cp_cmd); 6147 RTL_W16(tp, CPlusCmd, tp->cp_cmd);
6566 6148
6567 rtl_set_rx_tx_desc_registers(tp);
6568
6569 rtl_set_rx_tx_config_registers(tp);
6570
6571 switch (tp->mac_version) { 6149 switch (tp->mac_version) {
6572 case RTL_GIGA_MAC_VER_07: 6150 case RTL_GIGA_MAC_VER_07:
6573 rtl_hw_start_8102e_1(tp); 6151 rtl_hw_start_8102e_1(tp);
@@ -6604,17 +6182,7 @@ static void rtl_hw_start_8101(struct rtl8169_private *tp)
6604 break; 6182 break;
6605 } 6183 }
6606 6184
6607 RTL_W8(tp, Cfg9346, Cfg9346_Lock);
6608
6609 RTL_W16(tp, IntrMitigate, 0x0000); 6185 RTL_W16(tp, IntrMitigate, 0x0000);
6610
6611 RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
6612
6613 rtl_set_rx_mode(tp->dev);
6614
6615 RTL_R8(tp, IntrMask);
6616
6617 RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
6618} 6186}
6619 6187
6620static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) 6188static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
@@ -6942,18 +6510,6 @@ static int msdn_giant_send_check(struct sk_buff *skb)
6942 return ret; 6510 return ret;
6943} 6511}
6944 6512
6945static inline __be16 get_protocol(struct sk_buff *skb)
6946{
6947 __be16 protocol;
6948
6949 if (skb->protocol == htons(ETH_P_8021Q))
6950 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
6951 else
6952 protocol = skb->protocol;
6953
6954 return protocol;
6955}
6956
6957static bool rtl8169_tso_csum_v1(struct rtl8169_private *tp, 6513static bool rtl8169_tso_csum_v1(struct rtl8169_private *tp,
6958 struct sk_buff *skb, u32 *opts) 6514 struct sk_buff *skb, u32 *opts)
6959{ 6515{
@@ -6990,7 +6546,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
6990 return false; 6546 return false;
6991 } 6547 }
6992 6548
6993 switch (get_protocol(skb)) { 6549 switch (vlan_get_protocol(skb)) {
6994 case htons(ETH_P_IP): 6550 case htons(ETH_P_IP):
6995 opts[0] |= TD1_GTSENV4; 6551 opts[0] |= TD1_GTSENV4;
6996 break; 6552 break;
@@ -7022,7 +6578,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
7022 return false; 6578 return false;
7023 } 6579 }
7024 6580
7025 switch (get_protocol(skb)) { 6581 switch (vlan_get_protocol(skb)) {
7026 case htons(ETH_P_IP): 6582 case htons(ETH_P_IP):
7027 opts[1] |= TD1_IPv4_CS; 6583 opts[1] |= TD1_IPv4_CS;
7028 ip_protocol = ip_hdr(skb)->protocol; 6584 ip_protocol = ip_hdr(skb)->protocol;
@@ -7637,8 +7193,6 @@ static int rtl_open(struct net_device *dev)
7637 7193
7638 rtl8169_init_phy(dev, tp); 7194 rtl8169_init_phy(dev, tp);
7639 7195
7640 __rtl8169_set_features(dev, dev->features);
7641
7642 rtl_pll_power_up(tp); 7196 rtl_pll_power_up(tp);
7643 7197
7644 rtl_hw_start(tp); 7198 rtl_hw_start(tp);
@@ -8045,20 +7599,10 @@ static void rtl_hw_init_8168ep(struct rtl8169_private *tp)
8045static void rtl_hw_initialize(struct rtl8169_private *tp) 7599static void rtl_hw_initialize(struct rtl8169_private *tp)
8046{ 7600{
8047 switch (tp->mac_version) { 7601 switch (tp->mac_version) {
8048 case RTL_GIGA_MAC_VER_40: 7602 case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
8049 case RTL_GIGA_MAC_VER_41:
8050 case RTL_GIGA_MAC_VER_42:
8051 case RTL_GIGA_MAC_VER_43:
8052 case RTL_GIGA_MAC_VER_44:
8053 case RTL_GIGA_MAC_VER_45:
8054 case RTL_GIGA_MAC_VER_46:
8055 case RTL_GIGA_MAC_VER_47:
8056 case RTL_GIGA_MAC_VER_48:
8057 rtl_hw_init_8168g(tp); 7603 rtl_hw_init_8168g(tp);
8058 break; 7604 break;
8059 case RTL_GIGA_MAC_VER_49: 7605 case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_51:
8060 case RTL_GIGA_MAC_VER_50:
8061 case RTL_GIGA_MAC_VER_51:
8062 rtl_hw_init_8168ep(tp); 7606 rtl_hw_init_8168ep(tp);
8063 break; 7607 break;
8064 default: 7608 default:
@@ -8141,7 +7685,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8141 /* Identify chip attached to board */ 7685 /* Identify chip attached to board */
8142 rtl8169_get_mac_version(tp, cfg->default_ver); 7686 rtl8169_get_mac_version(tp, cfg->default_ver);
8143 7687
8144 tp->cp_cmd = 0; 7688 tp->cp_cmd = RTL_R16(tp, CPlusCmd);
8145 7689
8146 if ((sizeof(dma_addr_t) > 4) && 7690 if ((sizeof(dma_addr_t) > 4) &&
8147 (use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) && 7691 (use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) &&
@@ -8174,9 +7718,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8174 pci_set_master(pdev); 7718 pci_set_master(pdev);
8175 7719
8176 rtl_init_mdio_ops(tp); 7720 rtl_init_mdio_ops(tp);
8177 rtl_init_pll_power_ops(tp);
8178 rtl_init_jumbo_ops(tp); 7721 rtl_init_jumbo_ops(tp);
8179 rtl_init_csi_ops(tp);
8180 7722
8181 rtl8169_print_mac_version(tp); 7723 rtl8169_print_mac_version(tp);
8182 7724
@@ -8212,29 +7754,18 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8212 u64_stats_init(&tp->tx_stats.syncp); 7754 u64_stats_init(&tp->tx_stats.syncp);
8213 7755
8214 /* Get MAC address */ 7756 /* Get MAC address */
8215 if (tp->mac_version == RTL_GIGA_MAC_VER_35 || 7757 switch (tp->mac_version) {
8216 tp->mac_version == RTL_GIGA_MAC_VER_36 || 7758 u8 mac_addr[ETH_ALEN] __aligned(4);
8217 tp->mac_version == RTL_GIGA_MAC_VER_37 || 7759 case RTL_GIGA_MAC_VER_35 ... RTL_GIGA_MAC_VER_38:
8218 tp->mac_version == RTL_GIGA_MAC_VER_38 || 7760 case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
8219 tp->mac_version == RTL_GIGA_MAC_VER_40 ||
8220 tp->mac_version == RTL_GIGA_MAC_VER_41 ||
8221 tp->mac_version == RTL_GIGA_MAC_VER_42 ||
8222 tp->mac_version == RTL_GIGA_MAC_VER_43 ||
8223 tp->mac_version == RTL_GIGA_MAC_VER_44 ||
8224 tp->mac_version == RTL_GIGA_MAC_VER_45 ||
8225 tp->mac_version == RTL_GIGA_MAC_VER_46 ||
8226 tp->mac_version == RTL_GIGA_MAC_VER_47 ||
8227 tp->mac_version == RTL_GIGA_MAC_VER_48 ||
8228 tp->mac_version == RTL_GIGA_MAC_VER_49 ||
8229 tp->mac_version == RTL_GIGA_MAC_VER_50 ||
8230 tp->mac_version == RTL_GIGA_MAC_VER_51) {
8231 u16 mac_addr[3];
8232
8233 *(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC); 7761 *(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC);
8234 *(u16 *)&mac_addr[2] = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC); 7762 *(u16 *)&mac_addr[4] = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC);
8235 7763
8236 if (is_valid_ether_addr((u8 *)mac_addr)) 7764 if (is_valid_ether_addr(mac_addr))
8237 rtl_rar_set(tp, (u8 *)mac_addr); 7765 rtl_rar_set(tp, mac_addr);
7766 break;
7767 default:
7768 break;
8238 } 7769 }
8239 for (i = 0; i < ETH_ALEN; i++) 7770 for (i = 0; i < ETH_ALEN; i++)
8240 dev->dev_addr[i] = RTL_R8(tp, MAC0 + i); 7771 dev->dev_addr[i] = RTL_R8(tp, MAC0 + i);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index b6b90a6314e3..5970d9e5ddf1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -442,12 +442,22 @@ static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
442static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, 442static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
443 int enum_index) 443 int enum_index)
444{ 444{
445 iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]); 445 u16 offset = mdp->reg_offset[enum_index];
446
447 if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
448 return;
449
450 iowrite32(data, mdp->tsu_addr + offset);
446} 451}
447 452
448static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) 453static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
449{ 454{
450 return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]); 455 u16 offset = mdp->reg_offset[enum_index];
456
457 if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
458 return ~0U;
459
460 return ioread32(mdp->tsu_addr + offset);
451} 461}
452 462
453static void sh_eth_select_mii(struct net_device *ndev) 463static void sh_eth_select_mii(struct net_device *ndev)
@@ -2610,12 +2620,6 @@ static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu)
2610} 2620}
2611 2621
2612/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */ 2622/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2613static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2614 int entry)
2615{
2616 return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2617}
2618
2619static u32 sh_eth_tsu_get_post_mask(int entry) 2623static u32 sh_eth_tsu_get_post_mask(int entry)
2620{ 2624{
2621 return 0x0f << (28 - ((entry % 8) * 4)); 2625 return 0x0f << (28 - ((entry % 8) * 4));
@@ -2630,27 +2634,25 @@ static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2630 int entry) 2634 int entry)
2631{ 2635{
2632 struct sh_eth_private *mdp = netdev_priv(ndev); 2636 struct sh_eth_private *mdp = netdev_priv(ndev);
2637 int reg = TSU_POST1 + entry / 8;
2633 u32 tmp; 2638 u32 tmp;
2634 void *reg_offset;
2635 2639
2636 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); 2640 tmp = sh_eth_tsu_read(mdp, reg);
2637 tmp = ioread32(reg_offset); 2641 sh_eth_tsu_write(mdp, tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg);
2638 iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2639} 2642}
2640 2643
2641static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev, 2644static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2642 int entry) 2645 int entry)
2643{ 2646{
2644 struct sh_eth_private *mdp = netdev_priv(ndev); 2647 struct sh_eth_private *mdp = netdev_priv(ndev);
2648 int reg = TSU_POST1 + entry / 8;
2645 u32 post_mask, ref_mask, tmp; 2649 u32 post_mask, ref_mask, tmp;
2646 void *reg_offset;
2647 2650
2648 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2649 post_mask = sh_eth_tsu_get_post_mask(entry); 2651 post_mask = sh_eth_tsu_get_post_mask(entry);
2650 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask; 2652 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2651 2653
2652 tmp = ioread32(reg_offset); 2654 tmp = sh_eth_tsu_read(mdp, reg);
2653 iowrite32(tmp & ~post_mask, reg_offset); 2655 sh_eth_tsu_write(mdp, tmp & ~post_mask, reg);
2654 2656
2655 /* If other port enables, the function returns "true" */ 2657 /* If other port enables, the function returns "true" */
2656 return tmp & ref_mask; 2658 return tmp & ref_mask;
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 056cb6093630..152d6948611c 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2783,6 +2783,8 @@ static int rocker_switchdev_event(struct notifier_block *unused,
2783 switch (event) { 2783 switch (event) {
2784 case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */ 2784 case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
2785 case SWITCHDEV_FDB_DEL_TO_DEVICE: 2785 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2786 if (!fdb_info->added_by_user)
2787 break;
2786 memcpy(&switchdev_work->fdb_info, ptr, 2788 memcpy(&switchdev_work->fdb_info, ptr,
2787 sizeof(switchdev_work->fdb_info)); 2789 sizeof(switchdev_work->fdb_info));
2788 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); 2790 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 63036d9bf3e6..d90a7b1f4088 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -4784,8 +4784,9 @@ expire:
4784 * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that 4784 * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that
4785 * the rule is not removed by efx_rps_hash_del() below. 4785 * the rule is not removed by efx_rps_hash_del() below.
4786 */ 4786 */
4787 ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority, 4787 if (ret)
4788 filter_idx, true) == 0; 4788 ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority,
4789 filter_idx, true) == 0;
4789 /* While we can't safely dereference rule (we dropped the lock), we can 4790 /* While we can't safely dereference rule (we dropped the lock), we can
4790 * still test it for NULL. 4791 * still test it for NULL.
4791 */ 4792 */
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 64a94f242027..d2e254f2f72b 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -839,6 +839,8 @@ static void efx_filter_rfs_work(struct work_struct *data)
839 int rc; 839 int rc;
840 840
841 rc = efx->type->filter_insert(efx, &req->spec, true); 841 rc = efx->type->filter_insert(efx, &req->spec, true);
842 if (rc >= 0)
843 rc %= efx->type->max_rx_ip_filters;
842 if (efx->rps_hash_table) { 844 if (efx->rps_hash_table) {
843 spin_lock_bh(&efx->rps_hash_lock); 845 spin_lock_bh(&efx->rps_hash_lock);
844 rule = efx_rps_hash_find(efx, &req->spec); 846 rule = efx_rps_hash_find(efx, &req->spec);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 7cb794094a70..4ff231df7322 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -18,6 +18,7 @@
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/ioport.h> 19#include <linux/ioport.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/of_device.h>
21#include <linux/of_net.h> 22#include <linux/of_net.h>
22#include <linux/mfd/syscon.h> 23#include <linux/mfd/syscon.h>
23#include <linux/platform_device.h> 24#include <linux/platform_device.h>
@@ -29,6 +30,10 @@
29 30
30#define PRG_ETH0_RGMII_MODE BIT(0) 31#define PRG_ETH0_RGMII_MODE BIT(0)
31 32
33#define PRG_ETH0_EXT_PHY_MODE_MASK GENMASK(2, 0)
34#define PRG_ETH0_EXT_RGMII_MODE 1
35#define PRG_ETH0_EXT_RMII_MODE 4
36
32/* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */ 37/* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */
33#define PRG_ETH0_CLK_M250_SEL_SHIFT 4 38#define PRG_ETH0_CLK_M250_SEL_SHIFT 4
34#define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4) 39#define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4)
@@ -47,12 +52,20 @@
47 52
48#define MUX_CLK_NUM_PARENTS 2 53#define MUX_CLK_NUM_PARENTS 2
49 54
55struct meson8b_dwmac;
56
57struct meson8b_dwmac_data {
58 int (*set_phy_mode)(struct meson8b_dwmac *dwmac);
59};
60
50struct meson8b_dwmac { 61struct meson8b_dwmac {
51 struct device *dev; 62 struct device *dev;
52 void __iomem *regs; 63 void __iomem *regs;
53 phy_interface_t phy_mode; 64
54 struct clk *rgmii_tx_clk; 65 const struct meson8b_dwmac_data *data;
55 u32 tx_delay_ns; 66 phy_interface_t phy_mode;
67 struct clk *rgmii_tx_clk;
68 u32 tx_delay_ns;
56}; 69};
57 70
58struct meson8b_dwmac_clk_configs { 71struct meson8b_dwmac_clk_configs {
@@ -171,6 +184,59 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
171 return 0; 184 return 0;
172} 185}
173 186
187static int meson8b_set_phy_mode(struct meson8b_dwmac *dwmac)
188{
189 switch (dwmac->phy_mode) {
190 case PHY_INTERFACE_MODE_RGMII:
191 case PHY_INTERFACE_MODE_RGMII_RXID:
192 case PHY_INTERFACE_MODE_RGMII_ID:
193 case PHY_INTERFACE_MODE_RGMII_TXID:
194 /* enable RGMII mode */
195 meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
196 PRG_ETH0_RGMII_MODE,
197 PRG_ETH0_RGMII_MODE);
198 break;
199 case PHY_INTERFACE_MODE_RMII:
200 /* disable RGMII mode -> enables RMII mode */
201 meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
202 PRG_ETH0_RGMII_MODE, 0);
203 break;
204 default:
205 dev_err(dwmac->dev, "fail to set phy-mode %s\n",
206 phy_modes(dwmac->phy_mode));
207 return -EINVAL;
208 }
209
210 return 0;
211}
212
213static int meson_axg_set_phy_mode(struct meson8b_dwmac *dwmac)
214{
215 switch (dwmac->phy_mode) {
216 case PHY_INTERFACE_MODE_RGMII:
217 case PHY_INTERFACE_MODE_RGMII_RXID:
218 case PHY_INTERFACE_MODE_RGMII_ID:
219 case PHY_INTERFACE_MODE_RGMII_TXID:
220 /* enable RGMII mode */
221 meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
222 PRG_ETH0_EXT_PHY_MODE_MASK,
223 PRG_ETH0_EXT_RGMII_MODE);
224 break;
225 case PHY_INTERFACE_MODE_RMII:
226 /* disable RGMII mode -> enables RMII mode */
227 meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
228 PRG_ETH0_EXT_PHY_MODE_MASK,
229 PRG_ETH0_EXT_RMII_MODE);
230 break;
231 default:
232 dev_err(dwmac->dev, "fail to set phy-mode %s\n",
233 phy_modes(dwmac->phy_mode));
234 return -EINVAL;
235 }
236
237 return 0;
238}
239
174static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) 240static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
175{ 241{
176 int ret; 242 int ret;
@@ -188,10 +254,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
188 254
189 case PHY_INTERFACE_MODE_RGMII_ID: 255 case PHY_INTERFACE_MODE_RGMII_ID:
190 case PHY_INTERFACE_MODE_RGMII_TXID: 256 case PHY_INTERFACE_MODE_RGMII_TXID:
191 /* enable RGMII mode */
192 meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE,
193 PRG_ETH0_RGMII_MODE);
194
195 /* only relevant for RMII mode -> disable in RGMII mode */ 257 /* only relevant for RMII mode -> disable in RGMII mode */
196 meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, 258 meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
197 PRG_ETH0_INVERTED_RMII_CLK, 0); 259 PRG_ETH0_INVERTED_RMII_CLK, 0);
@@ -224,10 +286,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
224 break; 286 break;
225 287
226 case PHY_INTERFACE_MODE_RMII: 288 case PHY_INTERFACE_MODE_RMII:
227 /* disable RGMII mode -> enables RMII mode */
228 meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE,
229 0);
230
231 /* invert internal clk_rmii_i to generate 25/2.5 tx_rx_clk */ 289 /* invert internal clk_rmii_i to generate 25/2.5 tx_rx_clk */
232 meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, 290 meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
233 PRG_ETH0_INVERTED_RMII_CLK, 291 PRG_ETH0_INVERTED_RMII_CLK,
@@ -274,6 +332,11 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
274 goto err_remove_config_dt; 332 goto err_remove_config_dt;
275 } 333 }
276 334
335 dwmac->data = (const struct meson8b_dwmac_data *)
336 of_device_get_match_data(&pdev->dev);
337 if (!dwmac->data)
338 return -EINVAL;
339
277 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 340 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
278 dwmac->regs = devm_ioremap_resource(&pdev->dev, res); 341 dwmac->regs = devm_ioremap_resource(&pdev->dev, res);
279 if (IS_ERR(dwmac->regs)) { 342 if (IS_ERR(dwmac->regs)) {
@@ -298,6 +361,10 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
298 if (ret) 361 if (ret)
299 goto err_remove_config_dt; 362 goto err_remove_config_dt;
300 363
364 ret = dwmac->data->set_phy_mode(dwmac);
365 if (ret)
366 goto err_remove_config_dt;
367
301 ret = meson8b_init_prg_eth(dwmac); 368 ret = meson8b_init_prg_eth(dwmac);
302 if (ret) 369 if (ret)
303 goto err_remove_config_dt; 370 goto err_remove_config_dt;
@@ -316,10 +383,31 @@ err_remove_config_dt:
316 return ret; 383 return ret;
317} 384}
318 385
386static const struct meson8b_dwmac_data meson8b_dwmac_data = {
387 .set_phy_mode = meson8b_set_phy_mode,
388};
389
390static const struct meson8b_dwmac_data meson_axg_dwmac_data = {
391 .set_phy_mode = meson_axg_set_phy_mode,
392};
393
319static const struct of_device_id meson8b_dwmac_match[] = { 394static const struct of_device_id meson8b_dwmac_match[] = {
320 { .compatible = "amlogic,meson8b-dwmac" }, 395 {
321 { .compatible = "amlogic,meson8m2-dwmac" }, 396 .compatible = "amlogic,meson8b-dwmac",
322 { .compatible = "amlogic,meson-gxbb-dwmac" }, 397 .data = &meson8b_dwmac_data,
398 },
399 {
400 .compatible = "amlogic,meson8m2-dwmac",
401 .data = &meson8b_dwmac_data,
402 },
403 {
404 .compatible = "amlogic,meson-gxbb-dwmac",
405 .data = &meson8b_dwmac_data,
406 },
407 {
408 .compatible = "amlogic,meson-axg-dwmac",
409 .data = &meson_axg_dwmac_data,
410 },
323 { } 411 { }
324}; 412};
325MODULE_DEVICE_TABLE(of, meson8b_dwmac_match); 413MODULE_DEVICE_TABLE(of, meson8b_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 0135fd3aa6ef..b935478df55f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2022,7 +2022,11 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2022 tx_channel_count : rx_channel_count; 2022 tx_channel_count : rx_channel_count;
2023 u32 chan; 2023 u32 chan;
2024 bool poll_scheduled = false; 2024 bool poll_scheduled = false;
2025 int status[channels_to_check]; 2025 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2026
2027 /* Make sure we never check beyond our status buffer. */
2028 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2029 channels_to_check = ARRAY_SIZE(status);
2026 2030
2027 /* Each DMA channel can be used for rx and tx simultaneously, yet 2031 /* Each DMA channel can be used for rx and tx simultaneously, yet
2028 * napi_struct is embedded in struct stmmac_rx_queue rather than in a 2032 * napi_struct is embedded in struct stmmac_rx_queue rather than in a
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 74f828412055..28d893b93d30 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1340,6 +1340,8 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
1340 cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, 1340 cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
1341 HOST_PORT_NUM, ALE_VLAN | 1341 HOST_PORT_NUM, ALE_VLAN |
1342 ALE_SECURE, slave->port_vlan); 1342 ALE_SECURE, slave->port_vlan);
1343 cpsw_ale_control_set(cpsw->ale, slave_port,
1344 ALE_PORT_DROP_UNKNOWN_VLAN, 1);
1343} 1345}
1344 1346
1345static void soft_reset_slave(struct cpsw_slave *slave) 1347static void soft_reset_slave(struct cpsw_slave *slave)
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index e7308958b7a9..d2ee66c259a7 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -652,16 +652,14 @@ static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
652 sync_change_bit(index, net_device->send_section_map); 652 sync_change_bit(index, net_device->send_section_map);
653} 653}
654 654
655static void netvsc_send_tx_complete(struct netvsc_device *net_device, 655static void netvsc_send_tx_complete(struct net_device *ndev,
656 struct vmbus_channel *incoming_channel, 656 struct netvsc_device *net_device,
657 struct hv_device *device, 657 struct vmbus_channel *channel,
658 const struct vmpacket_descriptor *desc, 658 const struct vmpacket_descriptor *desc,
659 int budget) 659 int budget)
660{ 660{
661 struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id; 661 struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
662 struct net_device *ndev = hv_get_drvdata(device);
663 struct net_device_context *ndev_ctx = netdev_priv(ndev); 662 struct net_device_context *ndev_ctx = netdev_priv(ndev);
664 struct vmbus_channel *channel = device->channel;
665 u16 q_idx = 0; 663 u16 q_idx = 0;
666 int queue_sends; 664 int queue_sends;
667 665
@@ -675,7 +673,6 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
675 if (send_index != NETVSC_INVALID_INDEX) 673 if (send_index != NETVSC_INVALID_INDEX)
676 netvsc_free_send_slot(net_device, send_index); 674 netvsc_free_send_slot(net_device, send_index);
677 q_idx = packet->q_idx; 675 q_idx = packet->q_idx;
678 channel = incoming_channel;
679 676
680 tx_stats = &net_device->chan_table[q_idx].tx_stats; 677 tx_stats = &net_device->chan_table[q_idx].tx_stats;
681 678
@@ -705,14 +702,13 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
705 } 702 }
706} 703}
707 704
708static void netvsc_send_completion(struct netvsc_device *net_device, 705static void netvsc_send_completion(struct net_device *ndev,
706 struct netvsc_device *net_device,
709 struct vmbus_channel *incoming_channel, 707 struct vmbus_channel *incoming_channel,
710 struct hv_device *device,
711 const struct vmpacket_descriptor *desc, 708 const struct vmpacket_descriptor *desc,
712 int budget) 709 int budget)
713{ 710{
714 struct nvsp_message *nvsp_packet = hv_pkt_data(desc); 711 const struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
715 struct net_device *ndev = hv_get_drvdata(device);
716 712
717 switch (nvsp_packet->hdr.msg_type) { 713 switch (nvsp_packet->hdr.msg_type) {
718 case NVSP_MSG_TYPE_INIT_COMPLETE: 714 case NVSP_MSG_TYPE_INIT_COMPLETE:
@@ -726,8 +722,8 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
726 break; 722 break;
727 723
728 case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE: 724 case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
729 netvsc_send_tx_complete(net_device, incoming_channel, 725 netvsc_send_tx_complete(ndev, net_device, incoming_channel,
730 device, desc, budget); 726 desc, budget);
731 break; 727 break;
732 728
733 default: 729 default:
@@ -1092,12 +1088,11 @@ static void enq_receive_complete(struct net_device *ndev,
1092 1088
1093static int netvsc_receive(struct net_device *ndev, 1089static int netvsc_receive(struct net_device *ndev,
1094 struct netvsc_device *net_device, 1090 struct netvsc_device *net_device,
1095 struct net_device_context *net_device_ctx,
1096 struct hv_device *device,
1097 struct vmbus_channel *channel, 1091 struct vmbus_channel *channel,
1098 const struct vmpacket_descriptor *desc, 1092 const struct vmpacket_descriptor *desc,
1099 struct nvsp_message *nvsp) 1093 const struct nvsp_message *nvsp)
1100{ 1094{
1095 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1101 const struct vmtransfer_page_packet_header *vmxferpage_packet 1096 const struct vmtransfer_page_packet_header *vmxferpage_packet
1102 = container_of(desc, const struct vmtransfer_page_packet_header, d); 1097 = container_of(desc, const struct vmtransfer_page_packet_header, d);
1103 u16 q_idx = channel->offermsg.offer.sub_channel_index; 1098 u16 q_idx = channel->offermsg.offer.sub_channel_index;
@@ -1158,13 +1153,12 @@ static int netvsc_receive(struct net_device *ndev,
1158 return count; 1153 return count;
1159} 1154}
1160 1155
1161static void netvsc_send_table(struct hv_device *hdev, 1156static void netvsc_send_table(struct net_device *ndev,
1162 struct nvsp_message *nvmsg) 1157 const struct nvsp_message *nvmsg)
1163{ 1158{
1164 struct net_device *ndev = hv_get_drvdata(hdev);
1165 struct net_device_context *net_device_ctx = netdev_priv(ndev); 1159 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1166 int i;
1167 u32 count, *tab; 1160 u32 count, *tab;
1161 int i;
1168 1162
1169 count = nvmsg->msg.v5_msg.send_table.count; 1163 count = nvmsg->msg.v5_msg.send_table.count;
1170 if (count != VRSS_SEND_TAB_SIZE) { 1164 if (count != VRSS_SEND_TAB_SIZE) {
@@ -1179,24 +1173,25 @@ static void netvsc_send_table(struct hv_device *hdev,
1179 net_device_ctx->tx_table[i] = tab[i]; 1173 net_device_ctx->tx_table[i] = tab[i];
1180} 1174}
1181 1175
1182static void netvsc_send_vf(struct net_device_context *net_device_ctx, 1176static void netvsc_send_vf(struct net_device *ndev,
1183 struct nvsp_message *nvmsg) 1177 const struct nvsp_message *nvmsg)
1184{ 1178{
1179 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1180
1185 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; 1181 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1186 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; 1182 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1187} 1183}
1188 1184
1189static inline void netvsc_receive_inband(struct hv_device *hdev, 1185static void netvsc_receive_inband(struct net_device *ndev,
1190 struct net_device_context *net_device_ctx, 1186 const struct nvsp_message *nvmsg)
1191 struct nvsp_message *nvmsg)
1192{ 1187{
1193 switch (nvmsg->hdr.msg_type) { 1188 switch (nvmsg->hdr.msg_type) {
1194 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: 1189 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1195 netvsc_send_table(hdev, nvmsg); 1190 netvsc_send_table(ndev, nvmsg);
1196 break; 1191 break;
1197 1192
1198 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: 1193 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1199 netvsc_send_vf(net_device_ctx, nvmsg); 1194 netvsc_send_vf(ndev, nvmsg);
1200 break; 1195 break;
1201 } 1196 }
1202} 1197}
@@ -1208,24 +1203,23 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
1208 const struct vmpacket_descriptor *desc, 1203 const struct vmpacket_descriptor *desc,
1209 int budget) 1204 int budget)
1210{ 1205{
1211 struct net_device_context *net_device_ctx = netdev_priv(ndev); 1206 const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1212 struct nvsp_message *nvmsg = hv_pkt_data(desc);
1213 1207
1214 trace_nvsp_recv(ndev, channel, nvmsg); 1208 trace_nvsp_recv(ndev, channel, nvmsg);
1215 1209
1216 switch (desc->type) { 1210 switch (desc->type) {
1217 case VM_PKT_COMP: 1211 case VM_PKT_COMP:
1218 netvsc_send_completion(net_device, channel, device, 1212 netvsc_send_completion(ndev, net_device, channel,
1219 desc, budget); 1213 desc, budget);
1220 break; 1214 break;
1221 1215
1222 case VM_PKT_DATA_USING_XFER_PAGES: 1216 case VM_PKT_DATA_USING_XFER_PAGES:
1223 return netvsc_receive(ndev, net_device, net_device_ctx, 1217 return netvsc_receive(ndev, net_device, channel,
1224 device, channel, desc, nvmsg); 1218 desc, nvmsg);
1225 break; 1219 break;
1226 1220
1227 case VM_PKT_DATA_INBAND: 1221 case VM_PKT_DATA_INBAND:
1228 netvsc_receive_inband(device, net_device_ctx, nvmsg); 1222 netvsc_receive_inband(ndev, nvmsg);
1229 break; 1223 break;
1230 1224
1231 default: 1225 default:
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index 5ad130c3da43..0876aec7328c 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -346,10 +346,6 @@ void bcm_phy_get_strings(struct phy_device *phydev, u8 *data)
346} 346}
347EXPORT_SYMBOL_GPL(bcm_phy_get_strings); 347EXPORT_SYMBOL_GPL(bcm_phy_get_strings);
348 348
349#ifndef UINT64_MAX
350#define UINT64_MAX (u64)(~((u64)0))
351#endif
352
353/* Caller is supposed to provide appropriate storage for the library code to 349/* Caller is supposed to provide appropriate storage for the library code to
354 * access the shadow copy 350 * access the shadow copy
355 */ 351 */
@@ -362,7 +358,7 @@ static u64 bcm_phy_get_stat(struct phy_device *phydev, u64 *shadow,
362 358
363 val = phy_read(phydev, stat.reg); 359 val = phy_read(phydev, stat.reg);
364 if (val < 0) { 360 if (val < 0) {
365 ret = UINT64_MAX; 361 ret = U64_MAX;
366 } else { 362 } else {
367 val >>= stat.shift; 363 val >>= stat.shift;
368 val = val & ((1 << stat.bits) - 1); 364 val = val & ((1 << stat.bits) - 1);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 25e2a099b71c..b8f57e9b9379 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1482,9 +1482,6 @@ static void marvell_get_strings(struct phy_device *phydev, u8 *data)
1482 } 1482 }
1483} 1483}
1484 1484
1485#ifndef UINT64_MAX
1486#define UINT64_MAX (u64)(~((u64)0))
1487#endif
1488static u64 marvell_get_stat(struct phy_device *phydev, int i) 1485static u64 marvell_get_stat(struct phy_device *phydev, int i)
1489{ 1486{
1490 struct marvell_hw_stat stat = marvell_hw_stats[i]; 1487 struct marvell_hw_stat stat = marvell_hw_stats[i];
@@ -1494,7 +1491,7 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
1494 1491
1495 val = phy_read_paged(phydev, stat.page, stat.reg); 1492 val = phy_read_paged(phydev, stat.page, stat.reg);
1496 if (val < 0) { 1493 if (val < 0) {
1497 ret = UINT64_MAX; 1494 ret = U64_MAX;
1498 } else { 1495 } else {
1499 val = val & ((1 << stat.bits) - 1); 1496 val = val & ((1 << stat.bits) - 1);
1500 priv->stats[i] += val; 1497 priv->stats[i] += val;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index f41b224a9cdb..de31c5170a5b 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -650,9 +650,6 @@ static void kszphy_get_strings(struct phy_device *phydev, u8 *data)
650 } 650 }
651} 651}
652 652
653#ifndef UINT64_MAX
654#define UINT64_MAX (u64)(~((u64)0))
655#endif
656static u64 kszphy_get_stat(struct phy_device *phydev, int i) 653static u64 kszphy_get_stat(struct phy_device *phydev, int i)
657{ 654{
658 struct kszphy_hw_stat stat = kszphy_hw_stats[i]; 655 struct kszphy_hw_stat stat = kszphy_hw_stats[i];
@@ -662,7 +659,7 @@ static u64 kszphy_get_stat(struct phy_device *phydev, int i)
662 659
663 val = phy_read(phydev, stat.reg); 660 val = phy_read(phydev, stat.reg);
664 if (val < 0) { 661 if (val < 0) {
665 ret = UINT64_MAX; 662 ret = U64_MAX;
666 } else { 663 } else {
667 val = val & ((1 << stat.bits) - 1); 664 val = val & ((1 << stat.bits) - 1);
668 priv->stats[i] += val; 665 priv->stats[i] += val;
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index be399d645224..c328208388da 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -168,9 +168,6 @@ static void smsc_get_strings(struct phy_device *phydev, u8 *data)
168 } 168 }
169} 169}
170 170
171#ifndef UINT64_MAX
172#define UINT64_MAX (u64)(~((u64)0))
173#endif
174static u64 smsc_get_stat(struct phy_device *phydev, int i) 171static u64 smsc_get_stat(struct phy_device *phydev, int i)
175{ 172{
176 struct smsc_hw_stat stat = smsc_hw_stats[i]; 173 struct smsc_hw_stat stat = smsc_hw_stats[i];
@@ -179,7 +176,7 @@ static u64 smsc_get_stat(struct phy_device *phydev, int i)
179 176
180 val = phy_read(phydev, stat.reg); 177 val = phy_read(phydev, stat.reg);
181 if (val < 0) 178 if (val < 0)
182 ret = UINT64_MAX; 179 ret = U64_MAX;
183 else 180 else
184 ret = val; 181 ret = val;
185 182
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index f28bd74ac275..418b0904cecb 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -111,6 +111,7 @@ config USB_LAN78XX
111 select MII 111 select MII
112 select PHYLIB 112 select PHYLIB
113 select MICROCHIP_PHY 113 select MICROCHIP_PHY
114 select FIXED_PHY
114 help 115 help
115 This option adds support for Microchip LAN78XX based USB 2 116 This option adds support for Microchip LAN78XX based USB 2
116 & USB 3 10/100/1000 Ethernet adapters. 117 & USB 3 10/100/1000 Ethernet adapters.
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index c59f8afd0d73..91761436709a 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -36,7 +36,7 @@
36#include <linux/irq.h> 36#include <linux/irq.h>
37#include <linux/irqchip/chained_irq.h> 37#include <linux/irqchip/chained_irq.h>
38#include <linux/microchipphy.h> 38#include <linux/microchipphy.h>
39#include <linux/phy.h> 39#include <linux/phy_fixed.h>
40#include <linux/of_mdio.h> 40#include <linux/of_mdio.h>
41#include <linux/of_net.h> 41#include <linux/of_net.h>
42#include "lan78xx.h" 42#include "lan78xx.h"
@@ -44,7 +44,6 @@
44#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>" 44#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
45#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices" 45#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
46#define DRIVER_NAME "lan78xx" 46#define DRIVER_NAME "lan78xx"
47#define DRIVER_VERSION "1.0.6"
48 47
49#define TX_TIMEOUT_JIFFIES (5 * HZ) 48#define TX_TIMEOUT_JIFFIES (5 * HZ)
50#define THROTTLE_JIFFIES (HZ / 8) 49#define THROTTLE_JIFFIES (HZ / 8)
@@ -1503,7 +1502,6 @@ static void lan78xx_get_drvinfo(struct net_device *net,
1503 struct lan78xx_net *dev = netdev_priv(net); 1502 struct lan78xx_net *dev = netdev_priv(net);
1504 1503
1505 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver)); 1504 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1506 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1507 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info)); 1505 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1508} 1506}
1509 1507
@@ -2063,52 +2061,91 @@ static int ksz9031rnx_fixup(struct phy_device *phydev)
2063 return 1; 2061 return 1;
2064} 2062}
2065 2063
2066static int lan78xx_phy_init(struct lan78xx_net *dev) 2064static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2067{ 2065{
2066 u32 buf;
2068 int ret; 2067 int ret;
2069 u32 mii_adv; 2068 struct fixed_phy_status fphy_status = {
2069 .link = 1,
2070 .speed = SPEED_1000,
2071 .duplex = DUPLEX_FULL,
2072 };
2070 struct phy_device *phydev; 2073 struct phy_device *phydev;
2071 2074
2072 phydev = phy_find_first(dev->mdiobus); 2075 phydev = phy_find_first(dev->mdiobus);
2073 if (!phydev) { 2076 if (!phydev) {
2074 netdev_err(dev->net, "no PHY found\n"); 2077 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2075 return -EIO; 2078 phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1,
2076 } 2079 NULL);
2077 2080 if (IS_ERR(phydev)) {
2078 if ((dev->chipid == ID_REV_CHIP_ID_7800_) || 2081 netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2079 (dev->chipid == ID_REV_CHIP_ID_7850_)) { 2082 return NULL;
2080 phydev->is_internal = true; 2083 }
2081 dev->interface = PHY_INTERFACE_MODE_GMII; 2084 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2082 2085 dev->interface = PHY_INTERFACE_MODE_RGMII;
2083 } else if (dev->chipid == ID_REV_CHIP_ID_7801_) { 2086 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2087 MAC_RGMII_ID_TXC_DELAY_EN_);
2088 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2089 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2090 buf |= HW_CFG_CLK125_EN_;
2091 buf |= HW_CFG_REFCLK25_EN_;
2092 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2093 } else {
2084 if (!phydev->drv) { 2094 if (!phydev->drv) {
2085 netdev_err(dev->net, "no PHY driver found\n"); 2095 netdev_err(dev->net, "no PHY driver found\n");
2086 return -EIO; 2096 return NULL;
2087 } 2097 }
2088
2089 dev->interface = PHY_INTERFACE_MODE_RGMII; 2098 dev->interface = PHY_INTERFACE_MODE_RGMII;
2090
2091 /* external PHY fixup for KSZ9031RNX */ 2099 /* external PHY fixup for KSZ9031RNX */
2092 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0, 2100 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2093 ksz9031rnx_fixup); 2101 ksz9031rnx_fixup);
2094 if (ret < 0) { 2102 if (ret < 0) {
2095 netdev_err(dev->net, "fail to register fixup\n"); 2103 netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2096 return ret; 2104 return NULL;
2097 } 2105 }
2098 /* external PHY fixup for LAN8835 */ 2106 /* external PHY fixup for LAN8835 */
2099 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0, 2107 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2100 lan8835_fixup); 2108 lan8835_fixup);
2101 if (ret < 0) { 2109 if (ret < 0) {
2102 netdev_err(dev->net, "fail to register fixup\n"); 2110 netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2103 return ret; 2111 return NULL;
2104 } 2112 }
2105 /* add more external PHY fixup here if needed */ 2113 /* add more external PHY fixup here if needed */
2106 2114
2107 phydev->is_internal = false; 2115 phydev->is_internal = false;
2108 } else { 2116 }
2109 netdev_err(dev->net, "unknown ID found\n"); 2117 return phydev;
2110 ret = -EIO; 2118}
2111 goto error; 2119
2120static int lan78xx_phy_init(struct lan78xx_net *dev)
2121{
2122 int ret;
2123 u32 mii_adv;
2124 struct phy_device *phydev;
2125
2126 switch (dev->chipid) {
2127 case ID_REV_CHIP_ID_7801_:
2128 phydev = lan7801_phy_init(dev);
2129 if (!phydev) {
2130 netdev_err(dev->net, "lan7801: PHY Init Failed");
2131 return -EIO;
2132 }
2133 break;
2134
2135 case ID_REV_CHIP_ID_7800_:
2136 case ID_REV_CHIP_ID_7850_:
2137 phydev = phy_find_first(dev->mdiobus);
2138 if (!phydev) {
2139 netdev_err(dev->net, "no PHY found\n");
2140 return -EIO;
2141 }
2142 phydev->is_internal = true;
2143 dev->interface = PHY_INTERFACE_MODE_GMII;
2144 break;
2145
2146 default:
2147 netdev_err(dev->net, "Unknown CHIP ID found\n");
2148 return -EIO;
2112 } 2149 }
2113 2150
2114 /* if phyirq is not set, use polling mode in phylib */ 2151 /* if phyirq is not set, use polling mode in phylib */
@@ -2127,6 +2164,16 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
2127 if (ret) { 2164 if (ret) {
2128 netdev_err(dev->net, "can't attach PHY to %s\n", 2165 netdev_err(dev->net, "can't attach PHY to %s\n",
2129 dev->mdiobus->id); 2166 dev->mdiobus->id);
2167 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2168 if (phy_is_pseudo_fixed_link(phydev)) {
2169 fixed_phy_unregister(phydev);
2170 } else {
2171 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2172 0xfffffff0);
2173 phy_unregister_fixup_for_uid(PHY_LAN8835,
2174 0xfffffff0);
2175 }
2176 }
2130 return -EIO; 2177 return -EIO;
2131 } 2178 }
2132 2179
@@ -2166,12 +2213,6 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
2166 dev->fc_autoneg = phydev->autoneg; 2213 dev->fc_autoneg = phydev->autoneg;
2167 2214
2168 return 0; 2215 return 0;
2169
2170error:
2171 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2172 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2173
2174 return ret;
2175} 2216}
2176 2217
2177static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size) 2218static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
@@ -3569,6 +3610,7 @@ static void lan78xx_disconnect(struct usb_interface *intf)
3569 struct lan78xx_net *dev; 3610 struct lan78xx_net *dev;
3570 struct usb_device *udev; 3611 struct usb_device *udev;
3571 struct net_device *net; 3612 struct net_device *net;
3613 struct phy_device *phydev;
3572 3614
3573 dev = usb_get_intfdata(intf); 3615 dev = usb_get_intfdata(intf);
3574 usb_set_intfdata(intf, NULL); 3616 usb_set_intfdata(intf, NULL);
@@ -3577,12 +3619,16 @@ static void lan78xx_disconnect(struct usb_interface *intf)
3577 3619
3578 udev = interface_to_usbdev(intf); 3620 udev = interface_to_usbdev(intf);
3579 net = dev->net; 3621 net = dev->net;
3622 phydev = net->phydev;
3580 3623
3581 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0); 3624 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3582 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0); 3625 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3583 3626
3584 phy_disconnect(net->phydev); 3627 phy_disconnect(net->phydev);
3585 3628
3629 if (phy_is_pseudo_fixed_link(phydev))
3630 fixed_phy_unregister(phydev);
3631
3586 unregister_netdev(net); 3632 unregister_netdev(net);
3587 3633
3588 cancel_delayed_work_sync(&dev->wq); 3634 cancel_delayed_work_sync(&dev->wq);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index c853e7410f5a..42565dd33aa6 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1098,6 +1098,7 @@ static const struct usb_device_id products[] = {
1098 {QMI_FIXED_INTF(0x05c6, 0x9080, 8)}, 1098 {QMI_FIXED_INTF(0x05c6, 0x9080, 8)},
1099 {QMI_FIXED_INTF(0x05c6, 0x9083, 3)}, 1099 {QMI_FIXED_INTF(0x05c6, 0x9083, 3)},
1100 {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, 1100 {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
1101 {QMI_FIXED_INTF(0x05c6, 0x90b2, 3)}, /* ublox R410M */
1101 {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, 1102 {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
1102 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, 1103 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
1103 {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */ 1104 {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
@@ -1343,6 +1344,18 @@ static int qmi_wwan_probe(struct usb_interface *intf,
1343 id->driver_info = (unsigned long)&qmi_wwan_info; 1344 id->driver_info = (unsigned long)&qmi_wwan_info;
1344 } 1345 }
1345 1346
1347 /* There are devices where the same interface number can be
1348 * configured as different functions. We should only bind to
1349 * vendor specific functions when matching on interface number
1350 */
1351 if (id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER &&
1352 desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) {
1353 dev_dbg(&intf->dev,
1354 "Rejecting interface number match for class %02x\n",
1355 desc->bInterfaceClass);
1356 return -ENODEV;
1357 }
1358
1346 /* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */ 1359 /* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */
1347 if (quectel_ec20_detected(intf) && desc->bInterfaceNumber == 0) { 1360 if (quectel_ec20_detected(intf) && desc->bInterfaceNumber == 0) {
1348 dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n"); 1361 dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n");
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index 9277f4c2bfeb..94e177d7c9b5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -459,7 +459,7 @@ static void brcmf_fw_free_request(struct brcmf_fw_request *req)
459 kfree(req); 459 kfree(req);
460} 460}
461 461
462static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) 462static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
463{ 463{
464 struct brcmf_fw *fwctx = ctx; 464 struct brcmf_fw *fwctx = ctx;
465 struct brcmf_fw_item *cur; 465 struct brcmf_fw_item *cur;
@@ -498,13 +498,10 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
498 brcmf_dbg(TRACE, "nvram %p len %d\n", nvram, nvram_length); 498 brcmf_dbg(TRACE, "nvram %p len %d\n", nvram, nvram_length);
499 cur->nv_data.data = nvram; 499 cur->nv_data.data = nvram;
500 cur->nv_data.len = nvram_length; 500 cur->nv_data.len = nvram_length;
501 return; 501 return 0;
502 502
503fail: 503fail:
504 brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); 504 return -ENOENT;
505 fwctx->done(fwctx->dev, -ENOENT, NULL);
506 brcmf_fw_free_request(fwctx->req);
507 kfree(fwctx);
508} 505}
509 506
510static int brcmf_fw_request_next_item(struct brcmf_fw *fwctx, bool async) 507static int brcmf_fw_request_next_item(struct brcmf_fw *fwctx, bool async)
@@ -553,20 +550,27 @@ static void brcmf_fw_request_done(const struct firmware *fw, void *ctx)
553 brcmf_dbg(TRACE, "enter: firmware %s %sfound\n", cur->path, 550 brcmf_dbg(TRACE, "enter: firmware %s %sfound\n", cur->path,
554 fw ? "" : "not "); 551 fw ? "" : "not ");
555 552
556 if (fw) { 553 if (!fw)
557 if (cur->type == BRCMF_FW_TYPE_BINARY)
558 cur->binary = fw;
559 else if (cur->type == BRCMF_FW_TYPE_NVRAM)
560 brcmf_fw_request_nvram_done(fw, fwctx);
561 else
562 release_firmware(fw);
563 } else if (cur->type == BRCMF_FW_TYPE_NVRAM) {
564 brcmf_fw_request_nvram_done(NULL, fwctx);
565 } else if (!(cur->flags & BRCMF_FW_REQF_OPTIONAL)) {
566 ret = -ENOENT; 554 ret = -ENOENT;
555
556 switch (cur->type) {
557 case BRCMF_FW_TYPE_NVRAM:
558 ret = brcmf_fw_request_nvram_done(fw, fwctx);
559 break;
560 case BRCMF_FW_TYPE_BINARY:
561 cur->binary = fw;
562 break;
563 default:
564 /* something fishy here so bail out early */
565 brcmf_err("unknown fw type: %d\n", cur->type);
566 release_firmware(fw);
567 ret = -EINVAL;
567 goto fail; 568 goto fail;
568 } 569 }
569 570
571 if (ret < 0 && !(cur->flags & BRCMF_FW_REQF_OPTIONAL))
572 goto fail;
573
570 do { 574 do {
571 if (++fwctx->curpos == fwctx->req->n_items) { 575 if (++fwctx->curpos == fwctx->req->n_items) {
572 ret = 0; 576 ret = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 7af3a0f51b77..a17c4a79b8d4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -8,6 +8,7 @@
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 14 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +31,7 @@
30 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 31 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
31 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
32 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
33 * Copyright(c) 2018 Intel Corporation 34 * Copyright(c) 2018 Intel Corporation
34 * All rights reserved. 35 * All rights reserved.
35 * 36 *
36 * Redistribution and use in source and binary forms, with or without 37 * Redistribution and use in source and binary forms, with or without
@@ -749,13 +750,9 @@ struct iwl_scan_req_umac {
749} __packed; 750} __packed;
750 751
751#define IWL_SCAN_REQ_UMAC_SIZE_V8 sizeof(struct iwl_scan_req_umac) 752#define IWL_SCAN_REQ_UMAC_SIZE_V8 sizeof(struct iwl_scan_req_umac)
752#define IWL_SCAN_REQ_UMAC_SIZE_V7 (sizeof(struct iwl_scan_req_umac) - \ 753#define IWL_SCAN_REQ_UMAC_SIZE_V7 48
753 4 * sizeof(u8)) 754#define IWL_SCAN_REQ_UMAC_SIZE_V6 44
754#define IWL_SCAN_REQ_UMAC_SIZE_V6 (sizeof(struct iwl_scan_req_umac) - \ 755#define IWL_SCAN_REQ_UMAC_SIZE_V1 36
755 2 * sizeof(u8) - sizeof(__le16))
756#define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \
757 2 * sizeof(__le32) - 2 * sizeof(u8) - \
758 sizeof(__le16))
759 756
760/** 757/**
761 * struct iwl_umac_scan_abort 758 * struct iwl_umac_scan_abort
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 8928613e033e..ca0174680af9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -76,6 +76,7 @@
76#include "iwl-io.h" 76#include "iwl-io.h"
77#include "iwl-csr.h" 77#include "iwl-csr.h"
78#include "fw/acpi.h" 78#include "fw/acpi.h"
79#include "fw/api/nvm-reg.h"
79 80
80/* NVM offsets (in words) definitions */ 81/* NVM offsets (in words) definitions */
81enum nvm_offsets { 82enum nvm_offsets {
@@ -146,8 +147,8 @@ static const u8 iwl_ext_nvm_channels[] = {
146 149, 153, 157, 161, 165, 169, 173, 177, 181 147 149, 153, 157, 161, 165, 169, 173, 177, 181
147}; 148};
148 149
149#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels) 150#define IWL_NVM_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
150#define IWL_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels) 151#define IWL_NVM_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels)
151#define NUM_2GHZ_CHANNELS 14 152#define NUM_2GHZ_CHANNELS 14
152#define NUM_2GHZ_CHANNELS_EXT 14 153#define NUM_2GHZ_CHANNELS_EXT 14
153#define FIRST_2GHZ_HT_MINUS 5 154#define FIRST_2GHZ_HT_MINUS 5
@@ -301,11 +302,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
301 const u8 *nvm_chan; 302 const u8 *nvm_chan;
302 303
303 if (cfg->nvm_type != IWL_NVM_EXT) { 304 if (cfg->nvm_type != IWL_NVM_EXT) {
304 num_of_ch = IWL_NUM_CHANNELS; 305 num_of_ch = IWL_NVM_NUM_CHANNELS;
305 nvm_chan = &iwl_nvm_channels[0]; 306 nvm_chan = &iwl_nvm_channels[0];
306 num_2ghz_channels = NUM_2GHZ_CHANNELS; 307 num_2ghz_channels = NUM_2GHZ_CHANNELS;
307 } else { 308 } else {
308 num_of_ch = IWL_NUM_CHANNELS_EXT; 309 num_of_ch = IWL_NVM_NUM_CHANNELS_EXT;
309 nvm_chan = &iwl_ext_nvm_channels[0]; 310 nvm_chan = &iwl_ext_nvm_channels[0];
310 num_2ghz_channels = NUM_2GHZ_CHANNELS_EXT; 311 num_2ghz_channels = NUM_2GHZ_CHANNELS_EXT;
311 } 312 }
@@ -720,12 +721,12 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
720 if (cfg->nvm_type != IWL_NVM_EXT) 721 if (cfg->nvm_type != IWL_NVM_EXT)
721 data = kzalloc(sizeof(*data) + 722 data = kzalloc(sizeof(*data) +
722 sizeof(struct ieee80211_channel) * 723 sizeof(struct ieee80211_channel) *
723 IWL_NUM_CHANNELS, 724 IWL_NVM_NUM_CHANNELS,
724 GFP_KERNEL); 725 GFP_KERNEL);
725 else 726 else
726 data = kzalloc(sizeof(*data) + 727 data = kzalloc(sizeof(*data) +
727 sizeof(struct ieee80211_channel) * 728 sizeof(struct ieee80211_channel) *
728 IWL_NUM_CHANNELS_EXT, 729 IWL_NVM_NUM_CHANNELS_EXT,
729 GFP_KERNEL); 730 GFP_KERNEL);
730 if (!data) 731 if (!data)
731 return NULL; 732 return NULL;
@@ -842,24 +843,34 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
842 return flags; 843 return flags;
843} 844}
844 845
846struct regdb_ptrs {
847 struct ieee80211_wmm_rule *rule;
848 u32 token;
849};
850
845struct ieee80211_regdomain * 851struct ieee80211_regdomain *
846iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, 852iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
847 int num_of_ch, __le32 *channels, u16 fw_mcc) 853 int num_of_ch, __le32 *channels, u16 fw_mcc,
854 u16 geo_info)
848{ 855{
849 int ch_idx; 856 int ch_idx;
850 u16 ch_flags; 857 u16 ch_flags;
851 u32 reg_rule_flags, prev_reg_rule_flags = 0; 858 u32 reg_rule_flags, prev_reg_rule_flags = 0;
852 const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ? 859 const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ?
853 iwl_ext_nvm_channels : iwl_nvm_channels; 860 iwl_ext_nvm_channels : iwl_nvm_channels;
854 struct ieee80211_regdomain *regd; 861 struct ieee80211_regdomain *regd, *copy_rd;
855 int size_of_regd; 862 int size_of_regd, regd_to_copy, wmms_to_copy;
863 int size_of_wmms = 0;
856 struct ieee80211_reg_rule *rule; 864 struct ieee80211_reg_rule *rule;
865 struct ieee80211_wmm_rule *wmm_rule, *d_wmm, *s_wmm;
866 struct regdb_ptrs *regdb_ptrs;
857 enum nl80211_band band; 867 enum nl80211_band band;
858 int center_freq, prev_center_freq = 0; 868 int center_freq, prev_center_freq = 0;
859 int valid_rules = 0; 869 int valid_rules = 0, n_wmms = 0;
870 int i;
860 bool new_rule; 871 bool new_rule;
861 int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? 872 int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
862 IWL_NUM_CHANNELS_EXT : IWL_NUM_CHANNELS; 873 IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS;
863 874
864 if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) 875 if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
865 return ERR_PTR(-EINVAL); 876 return ERR_PTR(-EINVAL);
@@ -875,10 +886,26 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
875 sizeof(struct ieee80211_regdomain) + 886 sizeof(struct ieee80211_regdomain) +
876 num_of_ch * sizeof(struct ieee80211_reg_rule); 887 num_of_ch * sizeof(struct ieee80211_reg_rule);
877 888
878 regd = kzalloc(size_of_regd, GFP_KERNEL); 889 if (geo_info & GEO_WMM_ETSI_5GHZ_INFO)
890 size_of_wmms =
891 num_of_ch * sizeof(struct ieee80211_wmm_rule);
892
893 regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL);
879 if (!regd) 894 if (!regd)
880 return ERR_PTR(-ENOMEM); 895 return ERR_PTR(-ENOMEM);
881 896
897 regdb_ptrs = kcalloc(num_of_ch, sizeof(*regdb_ptrs), GFP_KERNEL);
898 if (!regdb_ptrs) {
899 copy_rd = ERR_PTR(-ENOMEM);
900 goto out;
901 }
902
903 /* set alpha2 from FW. */
904 regd->alpha2[0] = fw_mcc >> 8;
905 regd->alpha2[1] = fw_mcc & 0xff;
906
907 wmm_rule = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
908
882 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { 909 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
883 ch_flags = (u16)__le32_to_cpup(channels + ch_idx); 910 ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
884 band = (ch_idx < NUM_2GHZ_CHANNELS) ? 911 band = (ch_idx < NUM_2GHZ_CHANNELS) ?
@@ -927,14 +954,66 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
927 954
928 iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, 955 iwl_nvm_print_channel_flags(dev, IWL_DL_LAR,
929 nvm_chan[ch_idx], ch_flags); 956 nvm_chan[ch_idx], ch_flags);
957
958 if (!(geo_info & GEO_WMM_ETSI_5GHZ_INFO) ||
959 band == NL80211_BAND_2GHZ)
960 continue;
961
962 if (!reg_query_regdb_wmm(regd->alpha2, center_freq,
963 &regdb_ptrs[n_wmms].token, wmm_rule)) {
964 /* Add only new rules */
965 for (i = 0; i < n_wmms; i++) {
966 if (regdb_ptrs[i].token ==
967 regdb_ptrs[n_wmms].token) {
968 rule->wmm_rule = regdb_ptrs[i].rule;
969 break;
970 }
971 }
972 if (i == n_wmms) {
973 rule->wmm_rule = wmm_rule;
974 regdb_ptrs[n_wmms++].rule = wmm_rule;
975 wmm_rule++;
976 }
977 }
930 } 978 }
931 979
932 regd->n_reg_rules = valid_rules; 980 regd->n_reg_rules = valid_rules;
981 regd->n_wmm_rules = n_wmms;
933 982
934 /* set alpha2 from FW. */ 983 /*
935 regd->alpha2[0] = fw_mcc >> 8; 984 * Narrow down regdom for unused regulatory rules to prevent hole
936 regd->alpha2[1] = fw_mcc & 0xff; 985 * between reg rules to wmm rules.
986 */
987 regd_to_copy = sizeof(struct ieee80211_regdomain) +
988 valid_rules * sizeof(struct ieee80211_reg_rule);
989
990 wmms_to_copy = sizeof(struct ieee80211_wmm_rule) * n_wmms;
991
992 copy_rd = kzalloc(regd_to_copy + wmms_to_copy, GFP_KERNEL);
993 if (!copy_rd) {
994 copy_rd = ERR_PTR(-ENOMEM);
995 goto out;
996 }
997
998 memcpy(copy_rd, regd, regd_to_copy);
999 memcpy((u8 *)copy_rd + regd_to_copy, (u8 *)regd + size_of_regd,
1000 wmms_to_copy);
1001
1002 d_wmm = (struct ieee80211_wmm_rule *)((u8 *)copy_rd + regd_to_copy);
1003 s_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
1004
1005 for (i = 0; i < regd->n_reg_rules; i++) {
1006 if (!regd->reg_rules[i].wmm_rule)
1007 continue;
1008
1009 copy_rd->reg_rules[i].wmm_rule = d_wmm +
1010 (regd->reg_rules[i].wmm_rule - s_wmm) /
1011 sizeof(struct ieee80211_wmm_rule);
1012 }
937 1013
938 return regd; 1014out:
1015 kfree(regdb_ptrs);
1016 kfree(regd);
1017 return copy_rd;
939} 1018}
940IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info); 1019IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index 306736c7a042..3071a23b7606 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -101,12 +101,14 @@ void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
101 * 101 *
102 * This function parses the regulatory channel data received as a 102 * This function parses the regulatory channel data received as a
103 * MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain, 103 * MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain,
104 * to be fed into the regulatory core. An ERR_PTR is returned on error. 104 * to be fed into the regulatory core. In case the geo_info is set handle
105 * accordingly. An ERR_PTR is returned on error.
105 * If not given to the regulatory core, the user is responsible for freeing 106 * If not given to the regulatory core, the user is responsible for freeing
106 * the regdomain returned here with kfree. 107 * the regdomain returned here with kfree.
107 */ 108 */
108struct ieee80211_regdomain * 109struct ieee80211_regdomain *
109iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, 110iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
110 int num_of_ch, __le32 *channels, u16 fw_mcc); 111 int num_of_ch, __le32 *channels, u16 fw_mcc,
112 u16 geo_info);
111 113
112#endif /* __iwl_nvm_parse_h__ */ 114#endif /* __iwl_nvm_parse_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 51b30424575b..90f8c89ea59c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -311,7 +311,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
311 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, 311 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
312 __le32_to_cpu(resp->n_channels), 312 __le32_to_cpu(resp->n_channels),
313 resp->channels, 313 resp->channels,
314 __le16_to_cpu(resp->mcc)); 314 __le16_to_cpu(resp->mcc),
315 __le16_to_cpu(resp->geo_info));
315 /* Store the return source id */ 316 /* Store the return source id */
316 src_id = resp->source_id; 317 src_id = resp->source_id;
317 kfree(resp); 318 kfree(resp);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index 8b6b07a936f5..b026e80940a4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -158,16 +158,6 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
158 158
159static u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv) 159static u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv)
160{ 160{
161 struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
162
163 /* override ant_num / ant_path */
164 if (mod_params->ant_sel) {
165 rtlpriv->btcoexist.btc_info.ant_num =
166 (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1);
167
168 rtlpriv->btcoexist.btc_info.single_ant_path =
169 (mod_params->ant_sel == 1 ? 0 : 1);
170 }
171 return rtlpriv->btcoexist.btc_info.single_ant_path; 161 return rtlpriv->btcoexist.btc_info.single_ant_path;
172} 162}
173 163
@@ -178,7 +168,6 @@ static u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
178 168
179static u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv) 169static u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
180{ 170{
181 struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
182 u8 num; 171 u8 num;
183 172
184 if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2) 173 if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2)
@@ -186,10 +175,6 @@ static u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
186 else 175 else
187 num = 1; 176 num = 1;
188 177
189 /* override ant_num / ant_path */
190 if (mod_params->ant_sel)
191 num = (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1) + 1;
192
193 return num; 178 return num;
194} 179}
195 180
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index e7bbbc95cdb1..b4f3f91b590e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -848,6 +848,9 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw)
848 return false; 848 return false;
849 } 849 }
850 850
851 if (rtlpriv->cfg->ops->get_btc_status())
852 rtlpriv->btcoexist.btc_ops->btc_power_on_setting(rtlpriv);
853
851 bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL); 854 bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL);
852 rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3)); 855 rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3));
853 856
@@ -2696,21 +2699,21 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
2696 rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B; 2699 rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B;
2697 rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1); 2700 rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1);
2698 rtlpriv->btcoexist.btc_info.single_ant_path = 2701 rtlpriv->btcoexist.btc_info.single_ant_path =
2699 (value & 0x40); /*0xc3[6]*/ 2702 (value & 0x40 ? ANT_AUX : ANT_MAIN); /*0xc3[6]*/
2700 } else { 2703 } else {
2701 rtlpriv->btcoexist.btc_info.btcoexist = 0; 2704 rtlpriv->btcoexist.btc_info.btcoexist = 0;
2702 rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B; 2705 rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B;
2703 rtlpriv->btcoexist.btc_info.ant_num = ANT_X2; 2706 rtlpriv->btcoexist.btc_info.ant_num = ANT_X2;
2704 rtlpriv->btcoexist.btc_info.single_ant_path = 0; 2707 rtlpriv->btcoexist.btc_info.single_ant_path = ANT_MAIN;
2705 } 2708 }
2706 2709
2707 /* override ant_num / ant_path */ 2710 /* override ant_num / ant_path */
2708 if (mod_params->ant_sel) { 2711 if (mod_params->ant_sel) {
2709 rtlpriv->btcoexist.btc_info.ant_num = 2712 rtlpriv->btcoexist.btc_info.ant_num =
2710 (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1); 2713 (mod_params->ant_sel == 1 ? ANT_X1 : ANT_X2);
2711 2714
2712 rtlpriv->btcoexist.btc_info.single_ant_path = 2715 rtlpriv->btcoexist.btc_info.single_ant_path =
2713 (mod_params->ant_sel == 1 ? 0 : 1); 2716 (mod_params->ant_sel == 1 ? ANT_AUX : ANT_MAIN);
2714 } 2717 }
2715} 2718}
2716 2719
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index d27e33960e77..ce1754054a07 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -2823,6 +2823,11 @@ enum bt_ant_num {
2823 ANT_X1 = 1, 2823 ANT_X1 = 1,
2824}; 2824};
2825 2825
2826enum bt_ant_path {
2827 ANT_MAIN = 0,
2828 ANT_AUX = 1,
2829};
2830
2826enum bt_co_type { 2831enum bt_co_type {
2827 BT_2WIRE = 0, 2832 BT_2WIRE = 0,
2828 BT_ISSC_3WIRE = 1, 2833 BT_ISSC_3WIRE = 1,
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 84aa9d676375..6da20b9688f7 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -942,7 +942,7 @@ int __init early_init_dt_scan_chosen_stdout(void)
942 int offset; 942 int offset;
943 const char *p, *q, *options = NULL; 943 const char *p, *q, *options = NULL;
944 int l; 944 int l;
945 const struct earlycon_id *match; 945 const struct earlycon_id **p_match;
946 const void *fdt = initial_boot_params; 946 const void *fdt = initial_boot_params;
947 947
948 offset = fdt_path_offset(fdt, "/chosen"); 948 offset = fdt_path_offset(fdt, "/chosen");
@@ -969,7 +969,10 @@ int __init early_init_dt_scan_chosen_stdout(void)
969 return 0; 969 return 0;
970 } 970 }
971 971
972 for (match = __earlycon_table; match < __earlycon_table_end; match++) { 972 for (p_match = __earlycon_table; p_match < __earlycon_table_end;
973 p_match++) {
974 const struct earlycon_id *match = *p_match;
975
973 if (!match->compatible[0]) 976 if (!match->compatible[0])
974 continue; 977 continue;
975 978
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index acba1f56af3e..126cf19e869b 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -1263,7 +1263,7 @@ static struct parisc_driver ccio_driver __refdata = {
1263 * I/O Page Directory, the resource map, and initalizing the 1263 * I/O Page Directory, the resource map, and initalizing the
1264 * U2/Uturn chip into virtual mode. 1264 * U2/Uturn chip into virtual mode.
1265 */ 1265 */
1266static void 1266static void __init
1267ccio_ioc_init(struct ioc *ioc) 1267ccio_ioc_init(struct ioc *ioc)
1268{ 1268{
1269 int i; 1269 int i;
diff --git a/drivers/pci/dwc/pcie-kirin.c b/drivers/pci/dwc/pcie-kirin.c
index a6b88c7f6e3e..d2970a009eb5 100644
--- a/drivers/pci/dwc/pcie-kirin.c
+++ b/drivers/pci/dwc/pcie-kirin.c
@@ -486,7 +486,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
486 return ret; 486 return ret;
487 487
488 kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node, 488 kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node,
489 "reset-gpio", 0); 489 "reset-gpios", 0);
490 if (kirin_pcie->gpio_id_reset < 0) 490 if (kirin_pcie->gpio_id_reset < 0)
491 return -ENODEV; 491 return -ENODEV;
492 492
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index b04d37b3c5de..9abf549631b4 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -29,6 +29,7 @@
29#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5 29#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5
30#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11) 30#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11)
31#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12 31#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12
32#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ 0x2
32#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0 33#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0
33#define PCIE_CORE_LINK_L0S_ENTRY BIT(0) 34#define PCIE_CORE_LINK_L0S_ENTRY BIT(0)
34#define PCIE_CORE_LINK_TRAINING BIT(5) 35#define PCIE_CORE_LINK_TRAINING BIT(5)
@@ -100,7 +101,8 @@
100#define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C) 101#define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C)
101#define PCIE_ISR1_POWER_STATE_CHANGE BIT(4) 102#define PCIE_ISR1_POWER_STATE_CHANGE BIT(4)
102#define PCIE_ISR1_FLUSH BIT(5) 103#define PCIE_ISR1_FLUSH BIT(5)
103#define PCIE_ISR1_ALL_MASK GENMASK(5, 4) 104#define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val))
105#define PCIE_ISR1_ALL_MASK GENMASK(11, 4)
104#define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50) 106#define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50)
105#define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54) 107#define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54)
106#define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58) 108#define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58)
@@ -172,8 +174,6 @@
172#define PCIE_CONFIG_WR_TYPE0 0xa 174#define PCIE_CONFIG_WR_TYPE0 0xa
173#define PCIE_CONFIG_WR_TYPE1 0xb 175#define PCIE_CONFIG_WR_TYPE1 0xb
174 176
175/* PCI_BDF shifts 8bit, so we need extra 4bit shift */
176#define PCIE_BDF(dev) (dev << 4)
177#define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20) 177#define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20)
178#define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15) 178#define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15)
179#define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12) 179#define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12)
@@ -296,7 +296,8 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
296 reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE | 296 reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
297 (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) | 297 (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
298 PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE | 298 PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
299 PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT; 299 (PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ <<
300 PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT);
300 advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG); 301 advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
301 302
302 /* Program PCIe Control 2 to disable strict ordering */ 303 /* Program PCIe Control 2 to disable strict ordering */
@@ -437,7 +438,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
437 u32 reg; 438 u32 reg;
438 int ret; 439 int ret;
439 440
440 if (PCI_SLOT(devfn) != 0) { 441 if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) {
441 *val = 0xffffffff; 442 *val = 0xffffffff;
442 return PCIBIOS_DEVICE_NOT_FOUND; 443 return PCIBIOS_DEVICE_NOT_FOUND;
443 } 444 }
@@ -456,7 +457,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
456 advk_writel(pcie, reg, PIO_CTRL); 457 advk_writel(pcie, reg, PIO_CTRL);
457 458
458 /* Program the address registers */ 459 /* Program the address registers */
459 reg = PCIE_BDF(devfn) | PCIE_CONF_REG(where); 460 reg = PCIE_CONF_ADDR(bus->number, devfn, where);
460 advk_writel(pcie, reg, PIO_ADDR_LS); 461 advk_writel(pcie, reg, PIO_ADDR_LS);
461 advk_writel(pcie, 0, PIO_ADDR_MS); 462 advk_writel(pcie, 0, PIO_ADDR_MS);
462 463
@@ -491,7 +492,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
491 int offset; 492 int offset;
492 int ret; 493 int ret;
493 494
494 if (PCI_SLOT(devfn) != 0) 495 if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
495 return PCIBIOS_DEVICE_NOT_FOUND; 496 return PCIBIOS_DEVICE_NOT_FOUND;
496 497
497 if (where % size) 498 if (where % size)
@@ -609,9 +610,9 @@ static void advk_pcie_irq_mask(struct irq_data *d)
609 irq_hw_number_t hwirq = irqd_to_hwirq(d); 610 irq_hw_number_t hwirq = irqd_to_hwirq(d);
610 u32 mask; 611 u32 mask;
611 612
612 mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); 613 mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
613 mask |= PCIE_ISR0_INTX_ASSERT(hwirq); 614 mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
614 advk_writel(pcie, mask, PCIE_ISR0_MASK_REG); 615 advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
615} 616}
616 617
617static void advk_pcie_irq_unmask(struct irq_data *d) 618static void advk_pcie_irq_unmask(struct irq_data *d)
@@ -620,9 +621,9 @@ static void advk_pcie_irq_unmask(struct irq_data *d)
620 irq_hw_number_t hwirq = irqd_to_hwirq(d); 621 irq_hw_number_t hwirq = irqd_to_hwirq(d);
621 u32 mask; 622 u32 mask;
622 623
623 mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); 624 mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
624 mask &= ~PCIE_ISR0_INTX_ASSERT(hwirq); 625 mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
625 advk_writel(pcie, mask, PCIE_ISR0_MASK_REG); 626 advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
626} 627}
627 628
628static int advk_pcie_irq_map(struct irq_domain *h, 629static int advk_pcie_irq_map(struct irq_domain *h,
@@ -765,29 +766,35 @@ static void advk_pcie_handle_msi(struct advk_pcie *pcie)
765 766
766static void advk_pcie_handle_int(struct advk_pcie *pcie) 767static void advk_pcie_handle_int(struct advk_pcie *pcie)
767{ 768{
768 u32 val, mask, status; 769 u32 isr0_val, isr0_mask, isr0_status;
770 u32 isr1_val, isr1_mask, isr1_status;
769 int i, virq; 771 int i, virq;
770 772
771 val = advk_readl(pcie, PCIE_ISR0_REG); 773 isr0_val = advk_readl(pcie, PCIE_ISR0_REG);
772 mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); 774 isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
773 status = val & ((~mask) & PCIE_ISR0_ALL_MASK); 775 isr0_status = isr0_val & ((~isr0_mask) & PCIE_ISR0_ALL_MASK);
776
777 isr1_val = advk_readl(pcie, PCIE_ISR1_REG);
778 isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
779 isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK);
774 780
775 if (!status) { 781 if (!isr0_status && !isr1_status) {
776 advk_writel(pcie, val, PCIE_ISR0_REG); 782 advk_writel(pcie, isr0_val, PCIE_ISR0_REG);
783 advk_writel(pcie, isr1_val, PCIE_ISR1_REG);
777 return; 784 return;
778 } 785 }
779 786
780 /* Process MSI interrupts */ 787 /* Process MSI interrupts */
781 if (status & PCIE_ISR0_MSI_INT_PENDING) 788 if (isr0_status & PCIE_ISR0_MSI_INT_PENDING)
782 advk_pcie_handle_msi(pcie); 789 advk_pcie_handle_msi(pcie);
783 790
784 /* Process legacy interrupts */ 791 /* Process legacy interrupts */
785 for (i = 0; i < PCI_NUM_INTX; i++) { 792 for (i = 0; i < PCI_NUM_INTX; i++) {
786 if (!(status & PCIE_ISR0_INTX_ASSERT(i))) 793 if (!(isr1_status & PCIE_ISR1_INTX_ASSERT(i)))
787 continue; 794 continue;
788 795
789 advk_writel(pcie, PCIE_ISR0_INTX_ASSERT(i), 796 advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i),
790 PCIE_ISR0_REG); 797 PCIE_ISR1_REG);
791 798
792 virq = irq_find_mapping(pcie->irq_domain, i); 799 virq = irq_find_mapping(pcie->irq_domain, i);
793 generic_handle_irq(virq); 800 generic_handle_irq(virq);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 6ace47099fc5..b9a131137e64 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -958,10 +958,11 @@ static int pci_pm_freeze(struct device *dev)
958 * devices should not be touched during freeze/thaw transitions, 958 * devices should not be touched during freeze/thaw transitions,
959 * however. 959 * however.
960 */ 960 */
961 if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) 961 if (!dev_pm_smart_suspend_and_suspended(dev)) {
962 pm_runtime_resume(dev); 962 pm_runtime_resume(dev);
963 pci_dev->state_saved = false;
964 }
963 965
964 pci_dev->state_saved = false;
965 if (pm->freeze) { 966 if (pm->freeze) {
966 int error; 967 int error;
967 968
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e597655a5643..a04197ce767d 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -5273,11 +5273,11 @@ void pcie_print_link_status(struct pci_dev *dev)
5273 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width); 5273 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
5274 5274
5275 if (bw_avail >= bw_cap) 5275 if (bw_avail >= bw_cap)
5276 pci_info(dev, "%u.%03u Gb/s available bandwidth (%s x%d link)\n", 5276 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
5277 bw_cap / 1000, bw_cap % 1000, 5277 bw_cap / 1000, bw_cap % 1000,
5278 PCIE_SPEED2STR(speed_cap), width_cap); 5278 PCIE_SPEED2STR(speed_cap), width_cap);
5279 else 5279 else
5280 pci_info(dev, "%u.%03u Gb/s available bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", 5280 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
5281 bw_avail / 1000, bw_avail % 1000, 5281 bw_avail / 1000, bw_avail % 1000,
5282 PCIE_SPEED2STR(speed), width, 5282 PCIE_SPEED2STR(speed), width,
5283 limiting_dev ? pci_name(limiting_dev) : "<unknown>", 5283 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
index b3285175f20f..78ccf936d356 100644
--- a/drivers/ptp/ptp_pch.c
+++ b/drivers/ptp/ptp_pch.c
@@ -452,7 +452,6 @@ static int ptp_pch_adjtime(struct ptp_clock_info *ptp, s64 delta)
452static int ptp_pch_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) 452static int ptp_pch_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
453{ 453{
454 u64 ns; 454 u64 ns;
455 u32 remainder;
456 unsigned long flags; 455 unsigned long flags;
457 struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); 456 struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
458 struct pch_ts_regs __iomem *regs = pch_dev->regs; 457 struct pch_ts_regs __iomem *regs = pch_dev->regs;
@@ -461,8 +460,7 @@ static int ptp_pch_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
461 ns = pch_systime_read(regs); 460 ns = pch_systime_read(regs);
462 spin_unlock_irqrestore(&pch_dev->register_lock, flags); 461 spin_unlock_irqrestore(&pch_dev->register_lock, flags);
463 462
464 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); 463 *ts = ns_to_timespec64(ns);
465 ts->tv_nsec = remainder;
466 return 0; 464 return 0;
467} 465}
468 466
@@ -474,8 +472,7 @@ static int ptp_pch_settime(struct ptp_clock_info *ptp,
474 struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); 472 struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
475 struct pch_ts_regs __iomem *regs = pch_dev->regs; 473 struct pch_ts_regs __iomem *regs = pch_dev->regs;
476 474
477 ns = ts->tv_sec * 1000000000ULL; 475 ns = timespec64_to_ns(ts);
478 ns += ts->tv_nsec;
479 476
480 spin_lock_irqsave(&pch_dev->register_lock, flags); 477 spin_lock_irqsave(&pch_dev->register_lock, flags);
481 pch_systime_write(regs, ns); 478 pch_systime_write(regs, ns);
diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
index 304e891e35fc..60f2250fd96b 100644
--- a/drivers/rtc/rtc-opal.c
+++ b/drivers/rtc/rtc-opal.c
@@ -57,7 +57,7 @@ static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms)
57 57
58static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) 58static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
59{ 59{
60 long rc = OPAL_BUSY; 60 s64 rc = OPAL_BUSY;
61 int retries = 10; 61 int retries = 10;
62 u32 y_m_d; 62 u32 y_m_d;
63 u64 h_m_s_ms; 63 u64 h_m_s_ms;
@@ -66,13 +66,17 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
66 66
67 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { 67 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
68 rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); 68 rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
69 if (rc == OPAL_BUSY_EVENT) 69 if (rc == OPAL_BUSY_EVENT) {
70 msleep(OPAL_BUSY_DELAY_MS);
70 opal_poll_events(NULL); 71 opal_poll_events(NULL);
71 else if (retries-- && (rc == OPAL_HARDWARE 72 } else if (rc == OPAL_BUSY) {
72 || rc == OPAL_INTERNAL_ERROR)) 73 msleep(OPAL_BUSY_DELAY_MS);
73 msleep(10); 74 } else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) {
74 else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT) 75 if (retries--) {
75 break; 76 msleep(10); /* Wait 10ms before retry */
77 rc = OPAL_BUSY; /* go around again */
78 }
79 }
76 } 80 }
77 81
78 if (rc != OPAL_SUCCESS) 82 if (rc != OPAL_SUCCESS)
@@ -87,21 +91,26 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
87 91
88static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm) 92static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
89{ 93{
90 long rc = OPAL_BUSY; 94 s64 rc = OPAL_BUSY;
91 int retries = 10; 95 int retries = 10;
92 u32 y_m_d = 0; 96 u32 y_m_d = 0;
93 u64 h_m_s_ms = 0; 97 u64 h_m_s_ms = 0;
94 98
95 tm_to_opal(tm, &y_m_d, &h_m_s_ms); 99 tm_to_opal(tm, &y_m_d, &h_m_s_ms);
100
96 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { 101 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
97 rc = opal_rtc_write(y_m_d, h_m_s_ms); 102 rc = opal_rtc_write(y_m_d, h_m_s_ms);
98 if (rc == OPAL_BUSY_EVENT) 103 if (rc == OPAL_BUSY_EVENT) {
104 msleep(OPAL_BUSY_DELAY_MS);
99 opal_poll_events(NULL); 105 opal_poll_events(NULL);
100 else if (retries-- && (rc == OPAL_HARDWARE 106 } else if (rc == OPAL_BUSY) {
101 || rc == OPAL_INTERNAL_ERROR)) 107 msleep(OPAL_BUSY_DELAY_MS);
102 msleep(10); 108 } else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) {
103 else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT) 109 if (retries--) {
104 break; 110 msleep(10); /* Wait 10ms before retry */
111 rc = OPAL_BUSY; /* go around again */
112 }
113 }
105 } 114 }
106 115
107 return rc == OPAL_SUCCESS ? 0 : -EIO; 116 return rc == OPAL_SUCCESS ? 0 : -EIO;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 62f5f04d8f61..5e963fe0e38d 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -592,13 +592,22 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
592int dasd_alias_add_device(struct dasd_device *device) 592int dasd_alias_add_device(struct dasd_device *device)
593{ 593{
594 struct dasd_eckd_private *private = device->private; 594 struct dasd_eckd_private *private = device->private;
595 struct alias_lcu *lcu; 595 __u8 uaddr = private->uid.real_unit_addr;
596 struct alias_lcu *lcu = private->lcu;
596 unsigned long flags; 597 unsigned long flags;
597 int rc; 598 int rc;
598 599
599 lcu = private->lcu;
600 rc = 0; 600 rc = 0;
601 spin_lock_irqsave(&lcu->lock, flags); 601 spin_lock_irqsave(&lcu->lock, flags);
602 /*
603 * Check if device and lcu type differ. If so, the uac data may be
604 * outdated and needs to be updated.
605 */
606 if (private->uid.type != lcu->uac->unit[uaddr].ua_type) {
607 lcu->flags |= UPDATE_PENDING;
608 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
609 "uid type mismatch - trigger rescan");
610 }
602 if (!(lcu->flags & UPDATE_PENDING)) { 611 if (!(lcu->flags & UPDATE_PENDING)) {
603 rc = _add_device_to_lcu(lcu, device, device); 612 rc = _add_device_to_lcu(lcu, device, device);
604 if (rc) 613 if (rc)
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 6652a49a49b1..9029804dcd22 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -452,6 +452,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
452 452
453static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) 453static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
454{ 454{
455 struct channel_path *chp;
455 struct chp_link link; 456 struct chp_link link;
456 struct chp_id chpid; 457 struct chp_id chpid;
457 int status; 458 int status;
@@ -464,10 +465,17 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
464 chpid.id = sei_area->rsid; 465 chpid.id = sei_area->rsid;
465 /* allocate a new channel path structure, if needed */ 466 /* allocate a new channel path structure, if needed */
466 status = chp_get_status(chpid); 467 status = chp_get_status(chpid);
467 if (status < 0) 468 if (!status)
468 chp_new(chpid);
469 else if (!status)
470 return; 469 return;
470
471 if (status < 0) {
472 chp_new(chpid);
473 } else {
474 chp = chpid_to_chp(chpid);
475 mutex_lock(&chp->lock);
476 chp_update_desc(chp);
477 mutex_unlock(&chp->lock);
478 }
471 memset(&link, 0, sizeof(struct chp_link)); 479 memset(&link, 0, sizeof(struct chp_link));
472 link.chpid = chpid; 480 link.chpid = chpid;
473 if ((sei_area->vf & 0xc0) != 0) { 481 if ((sei_area->vf & 0xc0) != 0) {
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index ff6963ad6e39..3c800642134e 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -20,12 +20,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
20 int ccode; 20 int ccode;
21 __u8 lpm; 21 __u8 lpm;
22 unsigned long flags; 22 unsigned long flags;
23 int ret;
23 24
24 sch = private->sch; 25 sch = private->sch;
25 26
26 spin_lock_irqsave(sch->lock, flags); 27 spin_lock_irqsave(sch->lock, flags);
27 private->state = VFIO_CCW_STATE_BUSY; 28 private->state = VFIO_CCW_STATE_BUSY;
28 spin_unlock_irqrestore(sch->lock, flags);
29 29
30 orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm); 30 orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
31 31
@@ -38,10 +38,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
38 * Initialize device status information 38 * Initialize device status information
39 */ 39 */
40 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND; 40 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
41 return 0; 41 ret = 0;
42 break;
42 case 1: /* Status pending */ 43 case 1: /* Status pending */
43 case 2: /* Busy */ 44 case 2: /* Busy */
44 return -EBUSY; 45 ret = -EBUSY;
46 break;
45 case 3: /* Device/path not operational */ 47 case 3: /* Device/path not operational */
46 { 48 {
47 lpm = orb->cmd.lpm; 49 lpm = orb->cmd.lpm;
@@ -51,13 +53,16 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
51 sch->lpm = 0; 53 sch->lpm = 0;
52 54
53 if (cio_update_schib(sch)) 55 if (cio_update_schib(sch))
54 return -ENODEV; 56 ret = -ENODEV;
55 57 else
56 return sch->lpm ? -EACCES : -ENODEV; 58 ret = sch->lpm ? -EACCES : -ENODEV;
59 break;
57 } 60 }
58 default: 61 default:
59 return ccode; 62 ret = ccode;
60 } 63 }
64 spin_unlock_irqrestore(sch->lock, flags);
65 return ret;
61} 66}
62 67
63static void fsm_notoper(struct vfio_ccw_private *private, 68static void fsm_notoper(struct vfio_ccw_private *private,
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 0ee8f33efb54..2d9fe7e4ee40 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1928,6 +1928,8 @@ lcs_portno_store (struct device *dev, struct device_attribute *attr, const char
1928 return -EINVAL; 1928 return -EINVAL;
1929 /* TODO: sanity checks */ 1929 /* TODO: sanity checks */
1930 card->portno = value; 1930 card->portno = value;
1931 if (card->dev)
1932 card->dev->dev_port = card->portno;
1931 1933
1932 return count; 1934 return count;
1933 1935
@@ -2158,6 +2160,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
2158 card->dev = dev; 2160 card->dev = dev;
2159 card->dev->ml_priv = card; 2161 card->dev->ml_priv = card;
2160 card->dev->netdev_ops = &lcs_netdev_ops; 2162 card->dev->netdev_ops = &lcs_netdev_ops;
2163 card->dev->dev_port = card->portno;
2161 memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH); 2164 memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH);
2162#ifdef CONFIG_IP_MULTICAST 2165#ifdef CONFIG_IP_MULTICAST
2163 if (!lcs_check_multicast_support(card)) 2166 if (!lcs_check_multicast_support(card))
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 78b98b3e7efa..2a5fec55bf60 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -148,6 +148,7 @@ struct qeth_perf_stats {
148 unsigned int tx_csum; 148 unsigned int tx_csum;
149 unsigned int tx_lin; 149 unsigned int tx_lin;
150 unsigned int tx_linfail; 150 unsigned int tx_linfail;
151 unsigned int rx_csum;
151}; 152};
152 153
153/* Routing stuff */ 154/* Routing stuff */
@@ -712,9 +713,6 @@ enum qeth_discipline_id {
712 713
713struct qeth_discipline { 714struct qeth_discipline {
714 const struct device_type *devtype; 715 const struct device_type *devtype;
715 void (*start_poll)(struct ccw_device *, int, unsigned long);
716 qdio_handler_t *input_handler;
717 qdio_handler_t *output_handler;
718 int (*process_rx_buffer)(struct qeth_card *card, int budget, int *done); 716 int (*process_rx_buffer)(struct qeth_card *card, int budget, int *done);
719 int (*recover)(void *ptr); 717 int (*recover)(void *ptr);
720 int (*setup) (struct ccwgroup_device *); 718 int (*setup) (struct ccwgroup_device *);
@@ -780,9 +778,9 @@ struct qeth_card {
780 struct qeth_card_options options; 778 struct qeth_card_options options;
781 779
782 wait_queue_head_t wait_q; 780 wait_queue_head_t wait_q;
783 spinlock_t vlanlock;
784 spinlock_t mclock; 781 spinlock_t mclock;
785 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 782 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
783 struct mutex vid_list_mutex; /* vid_list */
786 struct list_head vid_list; 784 struct list_head vid_list;
787 DECLARE_HASHTABLE(mac_htable, 4); 785 DECLARE_HASHTABLE(mac_htable, 4);
788 DECLARE_HASHTABLE(ip_htable, 4); 786 DECLARE_HASHTABLE(ip_htable, 4);
@@ -867,6 +865,32 @@ static inline int qeth_get_ip_version(struct sk_buff *skb)
867 } 865 }
868} 866}
869 867
868static inline void qeth_rx_csum(struct qeth_card *card, struct sk_buff *skb,
869 u8 flags)
870{
871 if ((card->dev->features & NETIF_F_RXCSUM) &&
872 (flags & QETH_HDR_EXT_CSUM_TRANSP_REQ)) {
873 skb->ip_summed = CHECKSUM_UNNECESSARY;
874 if (card->options.performance_stats)
875 card->perf_stats.rx_csum++;
876 } else {
877 skb->ip_summed = CHECKSUM_NONE;
878 }
879}
880
881static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, int ipv)
882{
883 *flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ;
884 if ((ipv == 4 && ip_hdr(skb)->protocol == IPPROTO_UDP) ||
885 (ipv == 6 && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP))
886 *flags |= QETH_HDR_EXT_UDP;
887 if (ipv == 4) {
888 /* some HW requires combined L3+L4 csum offload: */
889 *flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
890 ip_hdr(skb)->check = 0;
891 }
892}
893
870static inline void qeth_put_buffer_pool_entry(struct qeth_card *card, 894static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
871 struct qeth_buffer_pool_entry *entry) 895 struct qeth_buffer_pool_entry *entry)
872{ 896{
@@ -879,6 +903,27 @@ static inline int qeth_is_diagass_supported(struct qeth_card *card,
879 return card->info.diagass_support & (__u32)cmd; 903 return card->info.diagass_support & (__u32)cmd;
880} 904}
881 905
906int qeth_send_simple_setassparms_prot(struct qeth_card *card,
907 enum qeth_ipa_funcs ipa_func,
908 u16 cmd_code, long data,
909 enum qeth_prot_versions prot);
910/* IPv4 variant */
911static inline int qeth_send_simple_setassparms(struct qeth_card *card,
912 enum qeth_ipa_funcs ipa_func,
913 u16 cmd_code, long data)
914{
915 return qeth_send_simple_setassparms_prot(card, ipa_func, cmd_code,
916 data, QETH_PROT_IPV4);
917}
918
919static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card,
920 enum qeth_ipa_funcs ipa_func,
921 u16 cmd_code, long data)
922{
923 return qeth_send_simple_setassparms_prot(card, ipa_func, cmd_code,
924 data, QETH_PROT_IPV6);
925}
926
882extern struct qeth_discipline qeth_l2_discipline; 927extern struct qeth_discipline qeth_l2_discipline;
883extern struct qeth_discipline qeth_l3_discipline; 928extern struct qeth_discipline qeth_l3_discipline;
884extern const struct attribute_group *qeth_generic_attr_groups[]; 929extern const struct attribute_group *qeth_generic_attr_groups[];
@@ -921,13 +966,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
921 struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *, 966 struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *,
922 struct qeth_hdr **); 967 struct qeth_hdr **);
923void qeth_schedule_recovery(struct qeth_card *); 968void qeth_schedule_recovery(struct qeth_card *);
924void qeth_qdio_start_poll(struct ccw_device *, int, unsigned long);
925int qeth_poll(struct napi_struct *napi, int budget); 969int qeth_poll(struct napi_struct *napi, int budget);
926void qeth_qdio_input_handler(struct ccw_device *,
927 unsigned int, unsigned int, int,
928 int, unsigned long);
929void qeth_qdio_output_handler(struct ccw_device *, unsigned int,
930 int, int, int, unsigned long);
931void qeth_clear_ipacmd_list(struct qeth_card *); 970void qeth_clear_ipacmd_list(struct qeth_card *);
932int qeth_qdio_clear_card(struct qeth_card *, int); 971int qeth_qdio_clear_card(struct qeth_card *, int);
933void qeth_clear_working_pool_list(struct qeth_card *); 972void qeth_clear_working_pool_list(struct qeth_card *);
@@ -979,8 +1018,6 @@ int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
979int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot); 1018int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot);
980void qeth_trace_features(struct qeth_card *); 1019void qeth_trace_features(struct qeth_card *);
981void qeth_close_dev(struct qeth_card *); 1020void qeth_close_dev(struct qeth_card *);
982int qeth_send_simple_setassparms(struct qeth_card *, enum qeth_ipa_funcs,
983 __u16, long);
984int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16, 1021int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16,
985 long, 1022 long,
986 int (*reply_cb)(struct qeth_card *, 1023 int (*reply_cb)(struct qeth_card *,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index dffd820731f2..06415b6a8f68 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1467,13 +1467,13 @@ static int qeth_setup_card(struct qeth_card *card)
1467 card->lan_online = 0; 1467 card->lan_online = 0;
1468 card->read_or_write_problem = 0; 1468 card->read_or_write_problem = 0;
1469 card->dev = NULL; 1469 card->dev = NULL;
1470 spin_lock_init(&card->vlanlock);
1471 spin_lock_init(&card->mclock); 1470 spin_lock_init(&card->mclock);
1472 spin_lock_init(&card->lock); 1471 spin_lock_init(&card->lock);
1473 spin_lock_init(&card->ip_lock); 1472 spin_lock_init(&card->ip_lock);
1474 spin_lock_init(&card->thread_mask_lock); 1473 spin_lock_init(&card->thread_mask_lock);
1475 mutex_init(&card->conf_mutex); 1474 mutex_init(&card->conf_mutex);
1476 mutex_init(&card->discipline_mutex); 1475 mutex_init(&card->discipline_mutex);
1476 mutex_init(&card->vid_list_mutex);
1477 card->thread_start_mask = 0; 1477 card->thread_start_mask = 0;
1478 card->thread_allowed_mask = 0; 1478 card->thread_allowed_mask = 0;
1479 card->thread_running_mask = 0; 1479 card->thread_running_mask = 0;
@@ -3588,15 +3588,14 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
3588 } 3588 }
3589} 3589}
3590 3590
3591void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue, 3591static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
3592 unsigned long card_ptr) 3592 unsigned long card_ptr)
3593{ 3593{
3594 struct qeth_card *card = (struct qeth_card *)card_ptr; 3594 struct qeth_card *card = (struct qeth_card *)card_ptr;
3595 3595
3596 if (card->dev && (card->dev->flags & IFF_UP)) 3596 if (card->dev && (card->dev->flags & IFF_UP))
3597 napi_schedule(&card->napi); 3597 napi_schedule(&card->napi);
3598} 3598}
3599EXPORT_SYMBOL_GPL(qeth_qdio_start_poll);
3600 3599
3601int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) 3600int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
3602{ 3601{
@@ -3698,9 +3697,10 @@ out:
3698 return; 3697 return;
3699} 3698}
3700 3699
3701void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err, 3700static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
3702 unsigned int queue, int first_elem, int count, 3701 unsigned int qdio_err, int queue,
3703 unsigned long card_ptr) 3702 int first_elem, int count,
3703 unsigned long card_ptr)
3704{ 3704{
3705 struct qeth_card *card = (struct qeth_card *)card_ptr; 3705 struct qeth_card *card = (struct qeth_card *)card_ptr;
3706 3706
@@ -3711,14 +3711,12 @@ void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err,
3711 qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count); 3711 qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
3712 else if (qdio_err) 3712 else if (qdio_err)
3713 qeth_schedule_recovery(card); 3713 qeth_schedule_recovery(card);
3714
3715
3716} 3714}
3717EXPORT_SYMBOL_GPL(qeth_qdio_input_handler);
3718 3715
3719void qeth_qdio_output_handler(struct ccw_device *ccwdev, 3716static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3720 unsigned int qdio_error, int __queue, int first_element, 3717 unsigned int qdio_error, int __queue,
3721 int count, unsigned long card_ptr) 3718 int first_element, int count,
3719 unsigned long card_ptr)
3722{ 3720{
3723 struct qeth_card *card = (struct qeth_card *) card_ptr; 3721 struct qeth_card *card = (struct qeth_card *) card_ptr;
3724 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; 3722 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
@@ -3787,7 +3785,6 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3787 card->perf_stats.outbound_handler_time += qeth_get_micros() - 3785 card->perf_stats.outbound_handler_time += qeth_get_micros() -
3788 card->perf_stats.outbound_handler_start_time; 3786 card->perf_stats.outbound_handler_start_time;
3789} 3787}
3790EXPORT_SYMBOL_GPL(qeth_qdio_output_handler);
3791 3788
3792/* We cannot use outbound queue 3 for unicast packets on HiperSockets */ 3789/* We cannot use outbound queue 3 for unicast packets on HiperSockets */
3793static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num) 3790static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num)
@@ -4995,7 +4992,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
4995 goto out_free_in_sbals; 4992 goto out_free_in_sbals;
4996 } 4993 }
4997 for (i = 0; i < card->qdio.no_in_queues; ++i) 4994 for (i = 0; i < card->qdio.no_in_queues; ++i)
4998 queue_start_poll[i] = card->discipline->start_poll; 4995 queue_start_poll[i] = qeth_qdio_start_poll;
4999 4996
5000 qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll); 4997 qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
5001 4998
@@ -5019,8 +5016,8 @@ static int qeth_qdio_establish(struct qeth_card *card)
5019 init_data.qib_param_field = qib_param_field; 5016 init_data.qib_param_field = qib_param_field;
5020 init_data.no_input_qs = card->qdio.no_in_queues; 5017 init_data.no_input_qs = card->qdio.no_in_queues;
5021 init_data.no_output_qs = card->qdio.no_out_queues; 5018 init_data.no_output_qs = card->qdio.no_out_queues;
5022 init_data.input_handler = card->discipline->input_handler; 5019 init_data.input_handler = qeth_qdio_input_handler;
5023 init_data.output_handler = card->discipline->output_handler; 5020 init_data.output_handler = qeth_qdio_output_handler;
5024 init_data.queue_start_poll_array = queue_start_poll; 5021 init_data.queue_start_poll_array = queue_start_poll;
5025 init_data.int_parm = (unsigned long) card; 5022 init_data.int_parm = (unsigned long) card;
5026 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs; 5023 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
@@ -5204,6 +5201,11 @@ retriable:
5204 rc = qeth_query_ipassists(card, QETH_PROT_IPV4); 5201 rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
5205 if (rc == -ENOMEM) 5202 if (rc == -ENOMEM)
5206 goto out; 5203 goto out;
5204 if (qeth_is_supported(card, IPA_IPV6)) {
5205 rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
5206 if (rc == -ENOMEM)
5207 goto out;
5208 }
5207 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { 5209 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
5208 rc = qeth_query_setadapterparms(card); 5210 rc = qeth_query_setadapterparms(card);
5209 if (rc < 0) { 5211 if (rc < 0) {
@@ -5511,26 +5513,26 @@ int qeth_send_setassparms(struct qeth_card *card,
5511} 5513}
5512EXPORT_SYMBOL_GPL(qeth_send_setassparms); 5514EXPORT_SYMBOL_GPL(qeth_send_setassparms);
5513 5515
5514int qeth_send_simple_setassparms(struct qeth_card *card, 5516int qeth_send_simple_setassparms_prot(struct qeth_card *card,
5515 enum qeth_ipa_funcs ipa_func, 5517 enum qeth_ipa_funcs ipa_func,
5516 __u16 cmd_code, long data) 5518 u16 cmd_code, long data,
5519 enum qeth_prot_versions prot)
5517{ 5520{
5518 int rc; 5521 int rc;
5519 int length = 0; 5522 int length = 0;
5520 struct qeth_cmd_buffer *iob; 5523 struct qeth_cmd_buffer *iob;
5521 5524
5522 QETH_CARD_TEXT(card, 4, "simassp4"); 5525 QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
5523 if (data) 5526 if (data)
5524 length = sizeof(__u32); 5527 length = sizeof(__u32);
5525 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, 5528 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
5526 length, QETH_PROT_IPV4);
5527 if (!iob) 5529 if (!iob)
5528 return -ENOMEM; 5530 return -ENOMEM;
5529 rc = qeth_send_setassparms(card, iob, length, data, 5531 rc = qeth_send_setassparms(card, iob, length, data,
5530 qeth_setassparms_cb, NULL); 5532 qeth_setassparms_cb, NULL);
5531 return rc; 5533 return rc;
5532} 5534}
5533EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms); 5535EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
5534 5536
5535static void qeth_unregister_dbf_views(void) 5537static void qeth_unregister_dbf_views(void)
5536{ 5538{
@@ -6008,7 +6010,8 @@ static struct {
6008 {"tx lin"}, 6010 {"tx lin"},
6009 {"tx linfail"}, 6011 {"tx linfail"},
6010 {"cq handler count"}, 6012 {"cq handler count"},
6011 {"cq handler time"} 6013 {"cq handler time"},
6014 {"rx csum"}
6012}; 6015};
6013 6016
6014int qeth_core_get_sset_count(struct net_device *dev, int stringset) 6017int qeth_core_get_sset_count(struct net_device *dev, int stringset)
@@ -6070,6 +6073,7 @@ void qeth_core_get_ethtool_stats(struct net_device *dev,
6070 data[35] = card->perf_stats.tx_linfail; 6073 data[35] = card->perf_stats.tx_linfail;
6071 data[36] = card->perf_stats.cq_cnt; 6074 data[36] = card->perf_stats.cq_cnt;
6072 data[37] = card->perf_stats.cq_time; 6075 data[37] = card->perf_stats.cq_time;
6076 data[38] = card->perf_stats.rx_csum;
6073} 6077}
6074EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats); 6078EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
6075 6079
@@ -6326,14 +6330,15 @@ static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card,
6326static int qeth_ipa_checksum_run_cmd(struct qeth_card *card, 6330static int qeth_ipa_checksum_run_cmd(struct qeth_card *card,
6327 enum qeth_ipa_funcs ipa_func, 6331 enum qeth_ipa_funcs ipa_func,
6328 __u16 cmd_code, long data, 6332 __u16 cmd_code, long data,
6329 struct qeth_checksum_cmd *chksum_cb) 6333 struct qeth_checksum_cmd *chksum_cb,
6334 enum qeth_prot_versions prot)
6330{ 6335{
6331 struct qeth_cmd_buffer *iob; 6336 struct qeth_cmd_buffer *iob;
6332 int rc = -ENOMEM; 6337 int rc = -ENOMEM;
6333 6338
6334 QETH_CARD_TEXT(card, 4, "chkdocmd"); 6339 QETH_CARD_TEXT(card, 4, "chkdocmd");
6335 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, 6340 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
6336 sizeof(__u32), QETH_PROT_IPV4); 6341 sizeof(__u32), prot);
6337 if (iob) 6342 if (iob)
6338 rc = qeth_send_setassparms(card, iob, sizeof(__u32), data, 6343 rc = qeth_send_setassparms(card, iob, sizeof(__u32), data,
6339 qeth_ipa_checksum_run_cmd_cb, 6344 qeth_ipa_checksum_run_cmd_cb,
@@ -6341,16 +6346,17 @@ static int qeth_ipa_checksum_run_cmd(struct qeth_card *card,
6341 return rc; 6346 return rc;
6342} 6347}
6343 6348
6344static int qeth_send_checksum_on(struct qeth_card *card, int cstype) 6349static int qeth_send_checksum_on(struct qeth_card *card, int cstype,
6350 enum qeth_prot_versions prot)
6345{ 6351{
6346 const __u32 required_features = QETH_IPA_CHECKSUM_IP_HDR | 6352 u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6347 QETH_IPA_CHECKSUM_UDP |
6348 QETH_IPA_CHECKSUM_TCP;
6349 struct qeth_checksum_cmd chksum_cb; 6353 struct qeth_checksum_cmd chksum_cb;
6350 int rc; 6354 int rc;
6351 6355
6356 if (prot == QETH_PROT_IPV4)
6357 required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6352 rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_START, 0, 6358 rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_START, 0,
6353 &chksum_cb); 6359 &chksum_cb, prot);
6354 if (!rc) { 6360 if (!rc) {
6355 if ((required_features & chksum_cb.supported) != 6361 if ((required_features & chksum_cb.supported) !=
6356 required_features) 6362 required_features)
@@ -6362,37 +6368,42 @@ static int qeth_send_checksum_on(struct qeth_card *card, int cstype)
6362 QETH_CARD_IFNAME(card)); 6368 QETH_CARD_IFNAME(card));
6363 } 6369 }
6364 if (rc) { 6370 if (rc) {
6365 qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0); 6371 qeth_send_simple_setassparms_prot(card, cstype,
6372 IPA_CMD_ASS_STOP, 0, prot);
6366 dev_warn(&card->gdev->dev, 6373 dev_warn(&card->gdev->dev,
6367 "Starting HW checksumming for %s failed, using SW checksumming\n", 6374 "Starting HW IPv%d checksumming for %s failed, using SW checksumming\n",
6368 QETH_CARD_IFNAME(card)); 6375 prot, QETH_CARD_IFNAME(card));
6369 return rc; 6376 return rc;
6370 } 6377 }
6371 rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_ENABLE, 6378 rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
6372 chksum_cb.supported, &chksum_cb); 6379 chksum_cb.supported, &chksum_cb,
6380 prot);
6373 if (!rc) { 6381 if (!rc) {
6374 if ((required_features & chksum_cb.enabled) != 6382 if ((required_features & chksum_cb.enabled) !=
6375 required_features) 6383 required_features)
6376 rc = -EIO; 6384 rc = -EIO;
6377 } 6385 }
6378 if (rc) { 6386 if (rc) {
6379 qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0); 6387 qeth_send_simple_setassparms_prot(card, cstype,
6388 IPA_CMD_ASS_STOP, 0, prot);
6380 dev_warn(&card->gdev->dev, 6389 dev_warn(&card->gdev->dev,
6381 "Enabling HW checksumming for %s failed, using SW checksumming\n", 6390 "Enabling HW IPv%d checksumming for %s failed, using SW checksumming\n",
6382 QETH_CARD_IFNAME(card)); 6391 prot, QETH_CARD_IFNAME(card));
6383 return rc; 6392 return rc;
6384 } 6393 }
6385 6394
6386 dev_info(&card->gdev->dev, "HW Checksumming (%sbound) enabled\n", 6395 dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
6387 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out"); 6396 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6388 return 0; 6397 return 0;
6389} 6398}
6390 6399
6391static int qeth_set_ipa_csum(struct qeth_card *card, int on, int cstype) 6400static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6401 enum qeth_prot_versions prot)
6392{ 6402{
6393 int rc = (on) ? qeth_send_checksum_on(card, cstype) 6403 int rc = (on) ? qeth_send_checksum_on(card, cstype, prot)
6394 : qeth_send_simple_setassparms(card, cstype, 6404 : qeth_send_simple_setassparms_prot(card, cstype,
6395 IPA_CMD_ASS_STOP, 0); 6405 IPA_CMD_ASS_STOP, 0,
6406 prot);
6396 return rc ? -EIO : 0; 6407 return rc ? -EIO : 0;
6397} 6408}
6398 6409
@@ -6419,8 +6430,31 @@ static int qeth_set_ipa_tso(struct qeth_card *card, int on)
6419 return rc; 6430 return rc;
6420} 6431}
6421 6432
6422#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO) 6433static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
6434{
6435 int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
6436 int rc_ipv6;
6437
6438 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
6439 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6440 QETH_PROT_IPV4);
6441 if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6442 /* no/one Offload Assist available, so the rc is trivial */
6443 return rc_ipv4;
6423 6444
6445 rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6446 QETH_PROT_IPV6);
6447
6448 if (on)
6449 /* enable: success if any Assist is active */
6450 return (rc_ipv6) ? rc_ipv4 : 0;
6451
6452 /* disable: failure if any Assist is still active */
6453 return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
6454}
6455
6456#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \
6457 NETIF_F_IPV6_CSUM)
6424/** 6458/**
6425 * qeth_recover_features() - Restore device features after recovery 6459 * qeth_recover_features() - Restore device features after recovery
6426 * @dev: the recovering net_device 6460 * @dev: the recovering net_device
@@ -6455,16 +6489,19 @@ int qeth_set_features(struct net_device *dev, netdev_features_t features)
6455 QETH_DBF_HEX(SETUP, 2, &features, sizeof(features)); 6489 QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
6456 6490
6457 if ((changed & NETIF_F_IP_CSUM)) { 6491 if ((changed & NETIF_F_IP_CSUM)) {
6458 rc = qeth_set_ipa_csum(card, 6492 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6459 features & NETIF_F_IP_CSUM ? 1 : 0, 6493 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4);
6460 IPA_OUTBOUND_CHECKSUM);
6461 if (rc) 6494 if (rc)
6462 changed ^= NETIF_F_IP_CSUM; 6495 changed ^= NETIF_F_IP_CSUM;
6463 } 6496 }
6464 if ((changed & NETIF_F_RXCSUM)) { 6497 if (changed & NETIF_F_IPV6_CSUM) {
6465 rc = qeth_set_ipa_csum(card, 6498 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6466 features & NETIF_F_RXCSUM ? 1 : 0, 6499 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6);
6467 IPA_INBOUND_CHECKSUM); 6500 if (rc)
6501 changed ^= NETIF_F_IPV6_CSUM;
6502 }
6503 if (changed & NETIF_F_RXCSUM) {
6504 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6468 if (rc) 6505 if (rc)
6469 changed ^= NETIF_F_RXCSUM; 6506 changed ^= NETIF_F_RXCSUM;
6470 } 6507 }
@@ -6491,7 +6528,10 @@ netdev_features_t qeth_fix_features(struct net_device *dev,
6491 QETH_DBF_TEXT(SETUP, 2, "fixfeat"); 6528 QETH_DBF_TEXT(SETUP, 2, "fixfeat");
6492 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) 6529 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
6493 features &= ~NETIF_F_IP_CSUM; 6530 features &= ~NETIF_F_IP_CSUM;
6494 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) 6531 if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
6532 features &= ~NETIF_F_IPV6_CSUM;
6533 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
6534 !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6495 features &= ~NETIF_F_RXCSUM; 6535 features &= ~NETIF_F_RXCSUM;
6496 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) 6536 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6497 features &= ~NETIF_F_TSO; 6537 features &= ~NETIF_F_TSO;
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index f4d1ec0b8f5a..878e62f35169 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -246,6 +246,8 @@ enum qeth_ipa_funcs {
246 IPA_QUERY_ARP_ASSIST = 0x00040000L, 246 IPA_QUERY_ARP_ASSIST = 0x00040000L,
247 IPA_INBOUND_TSO = 0x00080000L, 247 IPA_INBOUND_TSO = 0x00080000L,
248 IPA_OUTBOUND_TSO = 0x00100000L, 248 IPA_OUTBOUND_TSO = 0x00100000L,
249 IPA_INBOUND_CHECKSUM_V6 = 0x00400000L,
250 IPA_OUTBOUND_CHECKSUM_V6 = 0x00800000L,
249}; 251};
250 252
251/* SETIP/DELIP IPA Command: ***************************************************/ 253/* SETIP/DELIP IPA Command: ***************************************************/
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index ae81534de912..c3f18afb368b 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -144,6 +144,8 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
144 goto out; 144 goto out;
145 } 145 }
146 card->info.portno = portno; 146 card->info.portno = portno;
147 if (card->dev)
148 card->dev->dev_port = portno;
147out: 149out:
148 mutex_unlock(&card->conf_mutex); 150 mutex_unlock(&card->conf_mutex);
149 return rc ? rc : count; 151 return rc ? rc : count;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index b8079f2a65b3..a7cb37da6a21 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -17,7 +17,6 @@
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/etherdevice.h> 19#include <linux/etherdevice.h>
20#include <linux/ip.h>
21#include <linux/list.h> 20#include <linux/list.h>
22#include <linux/hash.h> 21#include <linux/hash.h>
23#include <linux/hashtable.h> 22#include <linux/hashtable.h>
@@ -195,23 +194,6 @@ static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
195 return RTN_UNSPEC; 194 return RTN_UNSPEC;
196} 195}
197 196
198static void qeth_l2_hdr_csum(struct qeth_card *card, struct qeth_hdr *hdr,
199 struct sk_buff *skb)
200{
201 struct iphdr *iph = ip_hdr(skb);
202
203 /* tcph->check contains already the pseudo hdr checksum
204 * so just set the header flags
205 */
206 if (iph->protocol == IPPROTO_UDP)
207 hdr->hdr.l2.flags[1] |= QETH_HDR_EXT_UDP;
208 hdr->hdr.l2.flags[1] |= QETH_HDR_EXT_CSUM_TRANSP_REQ |
209 QETH_HDR_EXT_CSUM_HDR_REQ;
210 iph->check = 0;
211 if (card->options.performance_stats)
212 card->perf_stats.tx_csum++;
213}
214
215static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb, 197static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb,
216 int cast_type, unsigned int data_len) 198 int cast_type, unsigned int data_len)
217{ 199{
@@ -297,12 +279,13 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
297static void qeth_l2_process_vlans(struct qeth_card *card) 279static void qeth_l2_process_vlans(struct qeth_card *card)
298{ 280{
299 struct qeth_vlan_vid *id; 281 struct qeth_vlan_vid *id;
282
300 QETH_CARD_TEXT(card, 3, "L2prcvln"); 283 QETH_CARD_TEXT(card, 3, "L2prcvln");
301 spin_lock_bh(&card->vlanlock); 284 mutex_lock(&card->vid_list_mutex);
302 list_for_each_entry(id, &card->vid_list, list) { 285 list_for_each_entry(id, &card->vid_list, list) {
303 qeth_l2_send_setdelvlan(card, id->vid, IPA_CMD_SETVLAN); 286 qeth_l2_send_setdelvlan(card, id->vid, IPA_CMD_SETVLAN);
304 } 287 }
305 spin_unlock_bh(&card->vlanlock); 288 mutex_unlock(&card->vid_list_mutex);
306} 289}
307 290
308static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, 291static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
@@ -319,7 +302,7 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
319 QETH_CARD_TEXT(card, 3, "aidREC"); 302 QETH_CARD_TEXT(card, 3, "aidREC");
320 return 0; 303 return 0;
321 } 304 }
322 id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC); 305 id = kmalloc(sizeof(*id), GFP_KERNEL);
323 if (id) { 306 if (id) {
324 id->vid = vid; 307 id->vid = vid;
325 rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN); 308 rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
@@ -327,9 +310,9 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
327 kfree(id); 310 kfree(id);
328 return rc; 311 return rc;
329 } 312 }
330 spin_lock_bh(&card->vlanlock); 313 mutex_lock(&card->vid_list_mutex);
331 list_add_tail(&id->list, &card->vid_list); 314 list_add_tail(&id->list, &card->vid_list);
332 spin_unlock_bh(&card->vlanlock); 315 mutex_unlock(&card->vid_list_mutex);
333 } else { 316 } else {
334 return -ENOMEM; 317 return -ENOMEM;
335 } 318 }
@@ -348,7 +331,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
348 QETH_CARD_TEXT(card, 3, "kidREC"); 331 QETH_CARD_TEXT(card, 3, "kidREC");
349 return 0; 332 return 0;
350 } 333 }
351 spin_lock_bh(&card->vlanlock); 334 mutex_lock(&card->vid_list_mutex);
352 list_for_each_entry(id, &card->vid_list, list) { 335 list_for_each_entry(id, &card->vid_list, list) {
353 if (id->vid == vid) { 336 if (id->vid == vid) {
354 list_del(&id->list); 337 list_del(&id->list);
@@ -356,7 +339,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
356 break; 339 break;
357 } 340 }
358 } 341 }
359 spin_unlock_bh(&card->vlanlock); 342 mutex_unlock(&card->vid_list_mutex);
360 if (tmpid) { 343 if (tmpid) {
361 rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); 344 rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
362 kfree(tmpid); 345 kfree(tmpid);
@@ -423,15 +406,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
423 switch (hdr->hdr.l2.id) { 406 switch (hdr->hdr.l2.id) {
424 case QETH_HEADER_TYPE_LAYER2: 407 case QETH_HEADER_TYPE_LAYER2:
425 skb->protocol = eth_type_trans(skb, skb->dev); 408 skb->protocol = eth_type_trans(skb, skb->dev);
426 if ((card->dev->features & NETIF_F_RXCSUM) 409 qeth_rx_csum(card, skb, hdr->hdr.l2.flags[1]);
427 && ((hdr->hdr.l2.flags[1] &
428 (QETH_HDR_EXT_CSUM_HDR_REQ |
429 QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
430 (QETH_HDR_EXT_CSUM_HDR_REQ |
431 QETH_HDR_EXT_CSUM_TRANSP_REQ)))
432 skb->ip_summed = CHECKSUM_UNNECESSARY;
433 else
434 skb->ip_summed = CHECKSUM_NONE;
435 if (skb->protocol == htons(ETH_P_802_2)) 410 if (skb->protocol == htons(ETH_P_802_2))
436 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; 411 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
437 len = skb->len; 412 len = skb->len;
@@ -464,7 +439,6 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
464static int qeth_l2_request_initial_mac(struct qeth_card *card) 439static int qeth_l2_request_initial_mac(struct qeth_card *card)
465{ 440{
466 int rc = 0; 441 int rc = 0;
467 char vendor_pre[] = {0x02, 0x00, 0x00};
468 442
469 QETH_DBF_TEXT(SETUP, 2, "l2reqmac"); 443 QETH_DBF_TEXT(SETUP, 2, "l2reqmac");
470 QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card)); 444 QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card));
@@ -484,16 +458,20 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
484 card->info.type == QETH_CARD_TYPE_OSX || 458 card->info.type == QETH_CARD_TYPE_OSX ||
485 card->info.guestlan) { 459 card->info.guestlan) {
486 rc = qeth_setadpparms_change_macaddr(card); 460 rc = qeth_setadpparms_change_macaddr(card);
487 if (rc) { 461 if (!rc)
488 QETH_DBF_MESSAGE(2, "couldn't get MAC address on " 462 goto out;
489 "device %s: x%x\n", CARD_BUS_ID(card), rc); 463 QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %s: x%x\n",
490 QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc); 464 CARD_BUS_ID(card), rc);
491 return rc; 465 QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
492 } 466 /* fall back once more: */
493 } else {
494 eth_random_addr(card->dev->dev_addr);
495 memcpy(card->dev->dev_addr, vendor_pre, 3);
496 } 467 }
468
469 /* some devices don't support a custom MAC address: */
470 if (card->info.type == QETH_CARD_TYPE_OSM ||
471 card->info.type == QETH_CARD_TYPE_OSX)
472 return (rc) ? rc : -EADDRNOTAVAIL;
473 eth_hw_addr_random(card->dev);
474
497out: 475out:
498 QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, card->dev->addr_len); 476 QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, card->dev->addr_len);
499 return 0; 477 return 0;
@@ -685,7 +663,8 @@ out:
685} 663}
686 664
687static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb, 665static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb,
688 struct qeth_qdio_out_q *queue, int cast_type) 666 struct qeth_qdio_out_q *queue, int cast_type,
667 int ipv)
689{ 668{
690 int push_len = sizeof(struct qeth_hdr); 669 int push_len = sizeof(struct qeth_hdr);
691 unsigned int elements, nr_frags; 670 unsigned int elements, nr_frags;
@@ -723,8 +702,11 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb,
723 hdr_elements = 1; 702 hdr_elements = 1;
724 } 703 }
725 qeth_l2_fill_header(hdr, skb, cast_type, skb->len - push_len); 704 qeth_l2_fill_header(hdr, skb, cast_type, skb->len - push_len);
726 if (skb->ip_summed == CHECKSUM_PARTIAL) 705 if (skb->ip_summed == CHECKSUM_PARTIAL) {
727 qeth_l2_hdr_csum(card, hdr, skb); 706 qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
707 if (card->options.performance_stats)
708 card->perf_stats.tx_csum++;
709 }
728 710
729 elements = qeth_get_elements_no(card, skb, hdr_elements, 0); 711 elements = qeth_get_elements_no(card, skb, hdr_elements, 0);
730 if (!elements) { 712 if (!elements) {
@@ -776,6 +758,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
776{ 758{
777 struct qeth_card *card = dev->ml_priv; 759 struct qeth_card *card = dev->ml_priv;
778 int cast_type = qeth_l2_get_cast_type(card, skb); 760 int cast_type = qeth_l2_get_cast_type(card, skb);
761 int ipv = qeth_get_ip_version(skb);
779 struct qeth_qdio_out_q *queue; 762 struct qeth_qdio_out_q *queue;
780 int tx_bytes = skb->len; 763 int tx_bytes = skb->len;
781 int rc; 764 int rc;
@@ -783,7 +766,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
783 if (card->qdio.do_prio_queueing || (cast_type && 766 if (card->qdio.do_prio_queueing || (cast_type &&
784 card->info.is_multicast_different)) 767 card->info.is_multicast_different))
785 queue = card->qdio.out_qs[qeth_get_priority_queue(card, skb, 768 queue = card->qdio.out_qs[qeth_get_priority_queue(card, skb,
786 qeth_get_ip_version(skb), cast_type)]; 769 ipv, cast_type)];
787 else 770 else
788 queue = card->qdio.out_qs[card->qdio.default_out_queue]; 771 queue = card->qdio.out_qs[card->qdio.default_out_queue];
789 772
@@ -806,7 +789,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
806 rc = qeth_l2_xmit_iqd(card, skb, queue, cast_type); 789 rc = qeth_l2_xmit_iqd(card, skb, queue, cast_type);
807 break; 790 break;
808 default: 791 default:
809 rc = qeth_l2_xmit_osa(card, skb, queue, cast_type); 792 rc = qeth_l2_xmit_osa(card, skb, queue, cast_type, ipv);
810 } 793 }
811 794
812 if (!rc) { 795 if (!rc) {
@@ -983,6 +966,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
983 card->dev->mtu = card->info.initial_mtu; 966 card->dev->mtu = card->info.initial_mtu;
984 card->dev->min_mtu = 64; 967 card->dev->min_mtu = 64;
985 card->dev->max_mtu = ETH_MAX_MTU; 968 card->dev->max_mtu = ETH_MAX_MTU;
969 card->dev->dev_port = card->info.portno;
986 card->dev->netdev_ops = &qeth_l2_netdev_ops; 970 card->dev->netdev_ops = &qeth_l2_netdev_ops;
987 if (card->info.type == QETH_CARD_TYPE_OSN) { 971 if (card->info.type == QETH_CARD_TYPE_OSN) {
988 card->dev->ethtool_ops = &qeth_l2_osn_ops; 972 card->dev->ethtool_ops = &qeth_l2_osn_ops;
@@ -1011,10 +995,15 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
1011 card->dev->hw_features |= NETIF_F_IP_CSUM; 995 card->dev->hw_features |= NETIF_F_IP_CSUM;
1012 card->dev->vlan_features |= NETIF_F_IP_CSUM; 996 card->dev->vlan_features |= NETIF_F_IP_CSUM;
1013 } 997 }
1014 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) { 998 }
1015 card->dev->hw_features |= NETIF_F_RXCSUM; 999 if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) {
1016 card->dev->vlan_features |= NETIF_F_RXCSUM; 1000 card->dev->hw_features |= NETIF_F_IPV6_CSUM;
1017 } 1001 card->dev->vlan_features |= NETIF_F_IPV6_CSUM;
1002 }
1003 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM) ||
1004 qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) {
1005 card->dev->hw_features |= NETIF_F_RXCSUM;
1006 card->dev->vlan_features |= NETIF_F_RXCSUM;
1018 } 1007 }
1019 1008
1020 card->info.broadcast_capable = 1; 1009 card->info.broadcast_capable = 1;
@@ -1315,9 +1304,6 @@ static int qeth_l2_control_event(struct qeth_card *card,
1315 1304
1316struct qeth_discipline qeth_l2_discipline = { 1305struct qeth_discipline qeth_l2_discipline = {
1317 .devtype = &qeth_l2_devtype, 1306 .devtype = &qeth_l2_devtype,
1318 .start_poll = qeth_qdio_start_poll,
1319 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
1320 .output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
1321 .process_rx_buffer = qeth_l2_process_inbound_buffer, 1307 .process_rx_buffer = qeth_l2_process_inbound_buffer,
1322 .recover = qeth_l2_recover, 1308 .recover = qeth_l2_recover,
1323 .setup = qeth_l2_probe_device, 1309 .setup = qeth_l2_probe_device,
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index c1a16a74aa83..e7fa479adf47 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -735,22 +735,6 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card)
735 return rc; 735 return rc;
736} 736}
737 737
738static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
739 enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
740{
741 int rc;
742 struct qeth_cmd_buffer *iob;
743
744 QETH_CARD_TEXT(card, 4, "simassp6");
745 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
746 0, QETH_PROT_IPV6);
747 if (!iob)
748 return -ENOMEM;
749 rc = qeth_send_setassparms(card, iob, 0, 0,
750 qeth_setassparms_cb, NULL);
751 return rc;
752}
753
754static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card) 738static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
755{ 739{
756 int rc; 740 int rc;
@@ -851,14 +835,6 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
851 835
852 QETH_CARD_TEXT(card, 3, "softipv6"); 836 QETH_CARD_TEXT(card, 3, "softipv6");
853 837
854 rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
855 if (rc) {
856 dev_err(&card->gdev->dev,
857 "Activating IPv6 support for %s failed\n",
858 QETH_CARD_IFNAME(card));
859 return rc;
860 }
861
862 if (card->info.type == QETH_CARD_TYPE_IQD) 838 if (card->info.type == QETH_CARD_TYPE_IQD)
863 goto out; 839 goto out;
864 840
@@ -870,16 +846,16 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
870 QETH_CARD_IFNAME(card)); 846 QETH_CARD_IFNAME(card));
871 return rc; 847 return rc;
872 } 848 }
873 rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_IPV6, 849 rc = qeth_send_simple_setassparms_v6(card, IPA_IPV6,
874 IPA_CMD_ASS_START); 850 IPA_CMD_ASS_START, 0);
875 if (rc) { 851 if (rc) {
876 dev_err(&card->gdev->dev, 852 dev_err(&card->gdev->dev,
877 "Activating IPv6 support for %s failed\n", 853 "Activating IPv6 support for %s failed\n",
878 QETH_CARD_IFNAME(card)); 854 QETH_CARD_IFNAME(card));
879 return rc; 855 return rc;
880 } 856 }
881 rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_PASSTHRU, 857 rc = qeth_send_simple_setassparms_v6(card, IPA_PASSTHRU,
882 IPA_CMD_ASS_START); 858 IPA_CMD_ASS_START, 0);
883 if (rc) { 859 if (rc) {
884 dev_warn(&card->gdev->dev, 860 dev_warn(&card->gdev->dev,
885 "Enabling the passthrough mode for %s failed\n", 861 "Enabling the passthrough mode for %s failed\n",
@@ -1293,91 +1269,6 @@ static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
1293 in6_dev_put(in6_dev); 1269 in6_dev_put(in6_dev);
1294} 1270}
1295 1271
1296static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
1297 unsigned short vid)
1298{
1299 struct in_device *in_dev;
1300 struct in_ifaddr *ifa;
1301 struct qeth_ipaddr *addr;
1302 struct net_device *netdev;
1303
1304 QETH_CARD_TEXT(card, 4, "frvaddr4");
1305
1306 netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), vid);
1307 if (!netdev)
1308 return;
1309 in_dev = in_dev_get(netdev);
1310 if (!in_dev)
1311 return;
1312
1313 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
1314 if (!addr)
1315 goto out;
1316
1317 spin_lock_bh(&card->ip_lock);
1318
1319 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1320 addr->u.a4.addr = be32_to_cpu(ifa->ifa_address);
1321 addr->u.a4.mask = be32_to_cpu(ifa->ifa_mask);
1322 addr->type = QETH_IP_TYPE_NORMAL;
1323 qeth_l3_delete_ip(card, addr);
1324 }
1325
1326 spin_unlock_bh(&card->ip_lock);
1327
1328 kfree(addr);
1329out:
1330 in_dev_put(in_dev);
1331}
1332
1333static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
1334 unsigned short vid)
1335{
1336 struct inet6_dev *in6_dev;
1337 struct inet6_ifaddr *ifa;
1338 struct qeth_ipaddr *addr;
1339 struct net_device *netdev;
1340
1341 QETH_CARD_TEXT(card, 4, "frvaddr6");
1342
1343 netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), vid);
1344 if (!netdev)
1345 return;
1346
1347 in6_dev = in6_dev_get(netdev);
1348 if (!in6_dev)
1349 return;
1350
1351 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
1352 if (!addr)
1353 goto out;
1354
1355 spin_lock_bh(&card->ip_lock);
1356
1357 list_for_each_entry(ifa, &in6_dev->addr_list, if_list) {
1358 memcpy(&addr->u.a6.addr, &ifa->addr,
1359 sizeof(struct in6_addr));
1360 addr->u.a6.pfxlen = ifa->prefix_len;
1361 addr->type = QETH_IP_TYPE_NORMAL;
1362 qeth_l3_delete_ip(card, addr);
1363 }
1364
1365 spin_unlock_bh(&card->ip_lock);
1366
1367 kfree(addr);
1368out:
1369 in6_dev_put(in6_dev);
1370}
1371
1372static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
1373 unsigned short vid)
1374{
1375 rcu_read_lock();
1376 qeth_l3_free_vlan_addresses4(card, vid);
1377 qeth_l3_free_vlan_addresses6(card, vid);
1378 rcu_read_unlock();
1379}
1380
1381static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, 1272static int qeth_l3_vlan_rx_add_vid(struct net_device *dev,
1382 __be16 proto, u16 vid) 1273 __be16 proto, u16 vid)
1383{ 1274{
@@ -1398,8 +1289,6 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
1398 QETH_CARD_TEXT(card, 3, "kidREC"); 1289 QETH_CARD_TEXT(card, 3, "kidREC");
1399 return 0; 1290 return 0;
1400 } 1291 }
1401 /* unregister IP addresses of vlan device */
1402 qeth_l3_free_vlan_addresses(card, vid);
1403 clear_bit(vid, card->active_vlans); 1292 clear_bit(vid, card->active_vlans);
1404 qeth_l3_set_rx_mode(dev); 1293 qeth_l3_set_rx_mode(dev);
1405 return 0; 1294 return 0;
@@ -1454,17 +1343,7 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
1454 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); 1343 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
1455 } 1344 }
1456 1345
1457 if (card->dev->features & NETIF_F_RXCSUM) { 1346 qeth_rx_csum(card, skb, hdr->hdr.l3.ext_flags);
1458 if ((hdr->hdr.l3.ext_flags &
1459 (QETH_HDR_EXT_CSUM_HDR_REQ |
1460 QETH_HDR_EXT_CSUM_TRANSP_REQ)) ==
1461 (QETH_HDR_EXT_CSUM_HDR_REQ |
1462 QETH_HDR_EXT_CSUM_TRANSP_REQ))
1463 skb->ip_summed = CHECKSUM_UNNECESSARY;
1464 else
1465 skb->ip_summed = CHECKSUM_NONE;
1466 } else
1467 skb->ip_summed = CHECKSUM_NONE;
1468} 1347}
1469 1348
1470static int qeth_l3_process_inbound_buffer(struct qeth_card *card, 1349static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
@@ -2210,23 +2089,6 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2210 rcu_read_unlock(); 2089 rcu_read_unlock();
2211} 2090}
2212 2091
2213static void qeth_l3_hdr_csum(struct qeth_card *card, struct qeth_hdr *hdr,
2214 struct sk_buff *skb)
2215{
2216 struct iphdr *iph = ip_hdr(skb);
2217
2218 /* tcph->check contains already the pseudo hdr checksum
2219 * so just set the header flags
2220 */
2221 if (iph->protocol == IPPROTO_UDP)
2222 hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_UDP;
2223 hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ |
2224 QETH_HDR_EXT_CSUM_HDR_REQ;
2225 iph->check = 0;
2226 if (card->options.performance_stats)
2227 card->perf_stats.tx_csum++;
2228}
2229
2230static void qeth_tso_fill_header(struct qeth_card *card, 2092static void qeth_tso_fill_header(struct qeth_card *card,
2231 struct qeth_hdr *qhdr, struct sk_buff *skb) 2093 struct qeth_hdr *qhdr, struct sk_buff *skb)
2232{ 2094{
@@ -2418,8 +2280,11 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
2418 } 2280 }
2419 } 2281 }
2420 2282
2421 if (skb->ip_summed == CHECKSUM_PARTIAL) 2283 if (new_skb->ip_summed == CHECKSUM_PARTIAL) {
2422 qeth_l3_hdr_csum(card, hdr, new_skb); 2284 qeth_tx_csum(new_skb, &hdr->hdr.l3.ext_flags, ipv);
2285 if (card->options.performance_stats)
2286 card->perf_stats.tx_csum++;
2287 }
2423 } 2288 }
2424 2289
2425 elements = use_tso ? 2290 elements = use_tso ?
@@ -2620,28 +2485,32 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
2620 (card->info.link_type == QETH_LINK_TYPE_HSTR)) { 2485 (card->info.link_type == QETH_LINK_TYPE_HSTR)) {
2621 pr_info("qeth_l3: ignoring TR device\n"); 2486 pr_info("qeth_l3: ignoring TR device\n");
2622 return -ENODEV; 2487 return -ENODEV;
2623 } else { 2488 }
2624 card->dev = alloc_etherdev(0); 2489
2625 if (!card->dev) 2490 card->dev = alloc_etherdev(0);
2626 return -ENODEV; 2491 if (!card->dev)
2627 card->dev->netdev_ops = &qeth_l3_osa_netdev_ops; 2492 return -ENODEV;
2628 2493 card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
2629 /*IPv6 address autoconfiguration stuff*/ 2494
2630 qeth_l3_get_unique_id(card); 2495 /*IPv6 address autoconfiguration stuff*/
2631 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD)) 2496 qeth_l3_get_unique_id(card);
2632 card->dev->dev_id = card->info.unique_id & 2497 if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
2633 0xffff; 2498 card->dev->dev_id = card->info.unique_id & 0xffff;
2634 2499
2635 card->dev->hw_features |= NETIF_F_SG; 2500 card->dev->hw_features |= NETIF_F_SG;
2636 card->dev->vlan_features |= NETIF_F_SG; 2501 card->dev->vlan_features |= NETIF_F_SG;
2637 2502
2638 if (!card->info.guestlan) { 2503 if (!card->info.guestlan) {
2639 card->dev->features |= NETIF_F_SG; 2504 card->dev->features |= NETIF_F_SG;
2640 card->dev->hw_features |= NETIF_F_TSO | 2505 card->dev->hw_features |= NETIF_F_TSO |
2641 NETIF_F_RXCSUM | NETIF_F_IP_CSUM; 2506 NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
2642 card->dev->vlan_features |= NETIF_F_TSO | 2507 card->dev->vlan_features |= NETIF_F_TSO |
2643 NETIF_F_RXCSUM | NETIF_F_IP_CSUM; 2508 NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
2644 } 2509 }
2510
2511 if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) {
2512 card->dev->hw_features |= NETIF_F_IPV6_CSUM;
2513 card->dev->vlan_features |= NETIF_F_IPV6_CSUM;
2645 } 2514 }
2646 } else if (card->info.type == QETH_CARD_TYPE_IQD) { 2515 } else if (card->info.type == QETH_CARD_TYPE_IQD) {
2647 card->dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, 2516 card->dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN,
@@ -2663,6 +2532,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
2663 card->dev->mtu = card->info.initial_mtu; 2532 card->dev->mtu = card->info.initial_mtu;
2664 card->dev->min_mtu = 64; 2533 card->dev->min_mtu = 64;
2665 card->dev->max_mtu = ETH_MAX_MTU; 2534 card->dev->max_mtu = ETH_MAX_MTU;
2535 card->dev->dev_port = card->info.portno;
2666 card->dev->ethtool_ops = &qeth_l3_ethtool_ops; 2536 card->dev->ethtool_ops = &qeth_l3_ethtool_ops;
2667 card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | 2537 card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
2668 NETIF_F_HW_VLAN_CTAG_RX | 2538 NETIF_F_HW_VLAN_CTAG_RX |
@@ -2960,9 +2830,6 @@ static int qeth_l3_control_event(struct qeth_card *card,
2960 2830
2961struct qeth_discipline qeth_l3_discipline = { 2831struct qeth_discipline qeth_l3_discipline = {
2962 .devtype = &qeth_l3_devtype, 2832 .devtype = &qeth_l3_devtype,
2963 .start_poll = qeth_qdio_start_poll,
2964 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
2965 .output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
2966 .process_rx_buffer = qeth_l3_process_inbound_buffer, 2833 .process_rx_buffer = qeth_l3_process_inbound_buffer,
2967 .recover = qeth_l3_recover, 2834 .recover = qeth_l3_recover,
2968 .setup = qeth_l3_probe_device, 2835 .setup = qeth_l3_probe_device,
diff --git a/drivers/sbus/char/oradax.c b/drivers/sbus/char/oradax.c
index c44d7c7ffc92..1754f55e2fac 100644
--- a/drivers/sbus/char/oradax.c
+++ b/drivers/sbus/char/oradax.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * This program is free software: you can redistribute it and/or modify 4 * This program is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation, either version 3 of the License, or 6 * the Free Software Foundation, either version 2 of the License, or
7 * (at your option) any later version. 7 * (at your option) any later version.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, 9 * This program is distributed in the hope that it will be useful,
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index abddde11982b..98597b59c12a 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -296,7 +296,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
296 "Number of Abort FW Timeouts: %lld\n" 296 "Number of Abort FW Timeouts: %lld\n"
297 "Number of Abort IO NOT Found: %lld\n" 297 "Number of Abort IO NOT Found: %lld\n"
298 298
299 "Abord issued times: \n" 299 "Abort issued times: \n"
300 " < 6 sec : %lld\n" 300 " < 6 sec : %lld\n"
301 " 6 sec - 20 sec : %lld\n" 301 " 6 sec - 20 sec : %lld\n"
302 " 20 sec - 30 sec : %lld\n" 302 " 20 sec - 30 sec : %lld\n"
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index edb7be786c65..9e8de1462593 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -291,7 +291,7 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
291 * Note: We have not moved the current phy_index so we will actually 291 * Note: We have not moved the current phy_index so we will actually
292 * compare the startting phy with itself. 292 * compare the startting phy with itself.
293 * This is expected and required to add the phy to the port. */ 293 * This is expected and required to add the phy to the port. */
294 while (phy_index < SCI_MAX_PHYS) { 294 for (; phy_index < SCI_MAX_PHYS; phy_index++) {
295 if ((phy_mask & (1 << phy_index)) == 0) 295 if ((phy_mask & (1 << phy_index)) == 0)
296 continue; 296 continue;
297 sci_phy_get_sas_address(&ihost->phys[phy_index], 297 sci_phy_get_sas_address(&ihost->phys[phy_index],
@@ -311,7 +311,6 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
311 &ihost->phys[phy_index]); 311 &ihost->phys[phy_index]);
312 312
313 assigned_phy_mask |= (1 << phy_index); 313 assigned_phy_mask |= (1 << phy_index);
314 phy_index++;
315 } 314 }
316 315
317 } 316 }
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index ce97cde3b41c..f4d988dd1e9d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1124,12 +1124,12 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
1124 goto fail_fw_init; 1124 goto fail_fw_init;
1125 } 1125 }
1126 1126
1127 ret = 0; 1127 return 0;
1128 1128
1129fail_fw_init: 1129fail_fw_init:
1130 dev_err(&instance->pdev->dev, 1130 dev_err(&instance->pdev->dev,
1131 "Init cmd return status %s for SCSI host %d\n", 1131 "Init cmd return status FAILED for SCSI host %d\n",
1132 ret ? "FAILED" : "SUCCESS", instance->host->host_no); 1132 instance->host->host_no);
1133 1133
1134 return ret; 1134 return ret;
1135} 1135}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 9ef5e3b810f6..656c98e116a9 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -234,11 +234,13 @@ static const char *sdebug_version_date = "20180128";
234#define F_INV_OP 0x200 234#define F_INV_OP 0x200
235#define F_FAKE_RW 0x400 235#define F_FAKE_RW 0x400
236#define F_M_ACCESS 0x800 /* media access */ 236#define F_M_ACCESS 0x800 /* media access */
237#define F_LONG_DELAY 0x1000 237#define F_SSU_DELAY 0x1000
238#define F_SYNC_DELAY 0x2000
238 239
239#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR) 240#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
240#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW) 241#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
241#define FF_SA (F_SA_HIGH | F_SA_LOW) 242#define FF_SA (F_SA_HIGH | F_SA_LOW)
243#define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
242 244
243#define SDEBUG_MAX_PARTS 4 245#define SDEBUG_MAX_PARTS 4
244 246
@@ -510,7 +512,7 @@ static const struct opcode_info_t release_iarr[] = {
510}; 512};
511 513
512static const struct opcode_info_t sync_cache_iarr[] = { 514static const struct opcode_info_t sync_cache_iarr[] = {
513 {0, 0x91, 0, F_LONG_DELAY | F_M_ACCESS, resp_sync_cache, NULL, 515 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
514 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 516 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
515 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */ 517 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
516}; 518};
@@ -553,7 +555,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
553 resp_write_dt0, write_iarr, /* WRITE(16) */ 555 resp_write_dt0, write_iarr, /* WRITE(16) */
554 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 556 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
555 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} }, 557 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
556 {0, 0x1b, 0, F_LONG_DELAY, resp_start_stop, NULL,/* START STOP UNIT */ 558 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
557 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 559 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
558 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN, 560 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
559 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */ 561 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
@@ -606,7 +608,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
606 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */ 608 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
607 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 609 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
608 0, 0, 0, 0, 0} }, 610 0, 0, 0, 0, 0} },
609 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_LONG_DELAY | F_M_ACCESS, 611 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
610 resp_sync_cache, sync_cache_iarr, 612 resp_sync_cache, sync_cache_iarr,
611 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 613 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
612 0, 0, 0, 0} }, /* SYNC_CACHE (10) */ 614 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
@@ -667,6 +669,7 @@ static bool sdebug_strict = DEF_STRICT;
667static bool sdebug_any_injecting_opt; 669static bool sdebug_any_injecting_opt;
668static bool sdebug_verbose; 670static bool sdebug_verbose;
669static bool have_dif_prot; 671static bool have_dif_prot;
672static bool write_since_sync;
670static bool sdebug_statistics = DEF_STATISTICS; 673static bool sdebug_statistics = DEF_STATISTICS;
671 674
672static unsigned int sdebug_store_sectors; 675static unsigned int sdebug_store_sectors;
@@ -1607,6 +1610,7 @@ static int resp_start_stop(struct scsi_cmnd *scp,
1607{ 1610{
1608 unsigned char *cmd = scp->cmnd; 1611 unsigned char *cmd = scp->cmnd;
1609 int power_cond, stop; 1612 int power_cond, stop;
1613 bool changing;
1610 1614
1611 power_cond = (cmd[4] & 0xf0) >> 4; 1615 power_cond = (cmd[4] & 0xf0) >> 4;
1612 if (power_cond) { 1616 if (power_cond) {
@@ -1614,8 +1618,12 @@ static int resp_start_stop(struct scsi_cmnd *scp,
1614 return check_condition_result; 1618 return check_condition_result;
1615 } 1619 }
1616 stop = !(cmd[4] & 1); 1620 stop = !(cmd[4] & 1);
1621 changing = atomic_read(&devip->stopped) == !stop;
1617 atomic_xchg(&devip->stopped, stop); 1622 atomic_xchg(&devip->stopped, stop);
1618 return (cmd[1] & 0x1) ? SDEG_RES_IMMED_MASK : 0; /* check IMMED bit */ 1623 if (!changing || cmd[1] & 0x1) /* state unchanged or IMMED set */
1624 return SDEG_RES_IMMED_MASK;
1625 else
1626 return 0;
1619} 1627}
1620 1628
1621static sector_t get_sdebug_capacity(void) 1629static sector_t get_sdebug_capacity(void)
@@ -2473,6 +2481,7 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
2473 if (do_write) { 2481 if (do_write) {
2474 sdb = scsi_out(scmd); 2482 sdb = scsi_out(scmd);
2475 dir = DMA_TO_DEVICE; 2483 dir = DMA_TO_DEVICE;
2484 write_since_sync = true;
2476 } else { 2485 } else {
2477 sdb = scsi_in(scmd); 2486 sdb = scsi_in(scmd);
2478 dir = DMA_FROM_DEVICE; 2487 dir = DMA_FROM_DEVICE;
@@ -3583,6 +3592,7 @@ static int resp_get_lba_status(struct scsi_cmnd *scp,
3583static int resp_sync_cache(struct scsi_cmnd *scp, 3592static int resp_sync_cache(struct scsi_cmnd *scp,
3584 struct sdebug_dev_info *devip) 3593 struct sdebug_dev_info *devip)
3585{ 3594{
3595 int res = 0;
3586 u64 lba; 3596 u64 lba;
3587 u32 num_blocks; 3597 u32 num_blocks;
3588 u8 *cmd = scp->cmnd; 3598 u8 *cmd = scp->cmnd;
@@ -3598,7 +3608,11 @@ static int resp_sync_cache(struct scsi_cmnd *scp,
3598 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); 3608 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3599 return check_condition_result; 3609 return check_condition_result;
3600 } 3610 }
3601 return (cmd[1] & 0x2) ? SDEG_RES_IMMED_MASK : 0; /* check IMMED bit */ 3611 if (!write_since_sync || cmd[1] & 0x2)
3612 res = SDEG_RES_IMMED_MASK;
3613 else /* delay if write_since_sync and IMMED clear */
3614 write_since_sync = false;
3615 return res;
3602} 3616}
3603 3617
3604#define RL_BUCKET_ELEMS 8 3618#define RL_BUCKET_ELEMS 8
@@ -5777,13 +5791,14 @@ fini:
5777 return schedule_resp(scp, devip, errsts, pfp, 0, 0); 5791 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
5778 else if ((sdebug_jdelay || sdebug_ndelay) && (flags & F_LONG_DELAY)) { 5792 else if ((sdebug_jdelay || sdebug_ndelay) && (flags & F_LONG_DELAY)) {
5779 /* 5793 /*
5780 * If any delay is active, want F_LONG_DELAY to be at least 1 5794 * If any delay is active, for F_SSU_DELAY want at least 1
5781 * second and if sdebug_jdelay>0 want a long delay of that 5795 * second and if sdebug_jdelay>0 want a long delay of that
5782 * many seconds. 5796 * many seconds; for F_SYNC_DELAY want 1/20 of that.
5783 */ 5797 */
5784 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay; 5798 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
5799 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
5785 5800
5786 jdelay = mult_frac(USER_HZ * jdelay, HZ, USER_HZ); 5801 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
5787 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0); 5802 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
5788 } else 5803 } else
5789 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay, 5804 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index f4b52b44b966..65f6c94f2e9b 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2322,6 +2322,12 @@ iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp)
2322 return nlmsg_multicast(nls, skb, 0, group, gfp); 2322 return nlmsg_multicast(nls, skb, 0, group, gfp);
2323} 2323}
2324 2324
2325static int
2326iscsi_unicast_skb(struct sk_buff *skb, u32 portid)
2327{
2328 return nlmsg_unicast(nls, skb, portid);
2329}
2330
2325int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, 2331int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
2326 char *data, uint32_t data_size) 2332 char *data, uint32_t data_size)
2327{ 2333{
@@ -2524,14 +2530,11 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
2524EXPORT_SYMBOL_GPL(iscsi_ping_comp_event); 2530EXPORT_SYMBOL_GPL(iscsi_ping_comp_event);
2525 2531
2526static int 2532static int
2527iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi, 2533iscsi_if_send_reply(u32 portid, int type, void *payload, int size)
2528 void *payload, int size)
2529{ 2534{
2530 struct sk_buff *skb; 2535 struct sk_buff *skb;
2531 struct nlmsghdr *nlh; 2536 struct nlmsghdr *nlh;
2532 int len = nlmsg_total_size(size); 2537 int len = nlmsg_total_size(size);
2533 int flags = multi ? NLM_F_MULTI : 0;
2534 int t = done ? NLMSG_DONE : type;
2535 2538
2536 skb = alloc_skb(len, GFP_ATOMIC); 2539 skb = alloc_skb(len, GFP_ATOMIC);
2537 if (!skb) { 2540 if (!skb) {
@@ -2539,10 +2542,9 @@ iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
2539 return -ENOMEM; 2542 return -ENOMEM;
2540 } 2543 }
2541 2544
2542 nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0); 2545 nlh = __nlmsg_put(skb, 0, 0, type, (len - sizeof(*nlh)), 0);
2543 nlh->nlmsg_flags = flags;
2544 memcpy(nlmsg_data(nlh), payload, size); 2546 memcpy(nlmsg_data(nlh), payload, size);
2545 return iscsi_multicast_skb(skb, group, GFP_ATOMIC); 2547 return iscsi_unicast_skb(skb, portid);
2546} 2548}
2547 2549
2548static int 2550static int
@@ -3470,6 +3472,7 @@ static int
3470iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) 3472iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
3471{ 3473{
3472 int err = 0; 3474 int err = 0;
3475 u32 portid;
3473 struct iscsi_uevent *ev = nlmsg_data(nlh); 3476 struct iscsi_uevent *ev = nlmsg_data(nlh);
3474 struct iscsi_transport *transport = NULL; 3477 struct iscsi_transport *transport = NULL;
3475 struct iscsi_internal *priv; 3478 struct iscsi_internal *priv;
@@ -3490,10 +3493,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
3490 if (!try_module_get(transport->owner)) 3493 if (!try_module_get(transport->owner))
3491 return -EINVAL; 3494 return -EINVAL;
3492 3495
3496 portid = NETLINK_CB(skb).portid;
3497
3493 switch (nlh->nlmsg_type) { 3498 switch (nlh->nlmsg_type) {
3494 case ISCSI_UEVENT_CREATE_SESSION: 3499 case ISCSI_UEVENT_CREATE_SESSION:
3495 err = iscsi_if_create_session(priv, ep, ev, 3500 err = iscsi_if_create_session(priv, ep, ev,
3496 NETLINK_CB(skb).portid, 3501 portid,
3497 ev->u.c_session.initial_cmdsn, 3502 ev->u.c_session.initial_cmdsn,
3498 ev->u.c_session.cmds_max, 3503 ev->u.c_session.cmds_max,
3499 ev->u.c_session.queue_depth); 3504 ev->u.c_session.queue_depth);
@@ -3506,7 +3511,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
3506 } 3511 }
3507 3512
3508 err = iscsi_if_create_session(priv, ep, ev, 3513 err = iscsi_if_create_session(priv, ep, ev,
3509 NETLINK_CB(skb).portid, 3514 portid,
3510 ev->u.c_bound_session.initial_cmdsn, 3515 ev->u.c_bound_session.initial_cmdsn,
3511 ev->u.c_bound_session.cmds_max, 3516 ev->u.c_bound_session.cmds_max,
3512 ev->u.c_bound_session.queue_depth); 3517 ev->u.c_bound_session.queue_depth);
@@ -3664,6 +3669,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
3664static void 3669static void
3665iscsi_if_rx(struct sk_buff *skb) 3670iscsi_if_rx(struct sk_buff *skb)
3666{ 3671{
3672 u32 portid = NETLINK_CB(skb).portid;
3673
3667 mutex_lock(&rx_queue_mutex); 3674 mutex_lock(&rx_queue_mutex);
3668 while (skb->len >= NLMSG_HDRLEN) { 3675 while (skb->len >= NLMSG_HDRLEN) {
3669 int err; 3676 int err;
@@ -3699,8 +3706,8 @@ iscsi_if_rx(struct sk_buff *skb)
3699 break; 3706 break;
3700 if (ev->type == ISCSI_UEVENT_GET_CHAP && !err) 3707 if (ev->type == ISCSI_UEVENT_GET_CHAP && !err)
3701 break; 3708 break;
3702 err = iscsi_if_send_reply(group, nlh->nlmsg_seq, 3709 err = iscsi_if_send_reply(portid, nlh->nlmsg_type,
3703 nlh->nlmsg_type, 0, 0, ev, sizeof(*ev)); 3710 ev, sizeof(*ev));
3704 } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH); 3711 } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH);
3705 skb_pull(skb, rlen); 3712 skb_pull(skb, rlen);
3706 } 3713 }
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a6201e696ab9..9421d9877730 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2121,6 +2121,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
2121 break; /* standby */ 2121 break; /* standby */
2122 if (sshdr.asc == 4 && sshdr.ascq == 0xc) 2122 if (sshdr.asc == 4 && sshdr.ascq == 0xc)
2123 break; /* unavailable */ 2123 break; /* unavailable */
2124 if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
2125 break; /* sanitize in progress */
2124 /* 2126 /*
2125 * Issue command to spin up drive when not ready 2127 * Issue command to spin up drive when not ready
2126 */ 2128 */
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 41df75eea57b..210407cd2341 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -400,8 +400,10 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
400 * 400 *
401 * Check that all zones of the device are equal. The last zone can however 401 * Check that all zones of the device are equal. The last zone can however
402 * be smaller. The zone size must also be a power of two number of LBAs. 402 * be smaller. The zone size must also be a power of two number of LBAs.
403 *
404 * Returns the zone size in bytes upon success or an error code upon failure.
403 */ 405 */
404static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) 406static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
405{ 407{
406 u64 zone_blocks = 0; 408 u64 zone_blocks = 0;
407 sector_t block = 0; 409 sector_t block = 0;
@@ -412,8 +414,6 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
412 int ret; 414 int ret;
413 u8 same; 415 u8 same;
414 416
415 sdkp->zone_blocks = 0;
416
417 /* Get a buffer */ 417 /* Get a buffer */
418 buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL); 418 buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
419 if (!buf) 419 if (!buf)
@@ -445,16 +445,17 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
445 445
446 /* Parse zone descriptors */ 446 /* Parse zone descriptors */
447 while (rec < buf + buf_len) { 447 while (rec < buf + buf_len) {
448 zone_blocks = get_unaligned_be64(&rec[8]); 448 u64 this_zone_blocks = get_unaligned_be64(&rec[8]);
449 if (sdkp->zone_blocks == 0) { 449
450 sdkp->zone_blocks = zone_blocks; 450 if (zone_blocks == 0) {
451 } else if (zone_blocks != sdkp->zone_blocks && 451 zone_blocks = this_zone_blocks;
452 (block + zone_blocks < sdkp->capacity 452 } else if (this_zone_blocks != zone_blocks &&
453 || zone_blocks > sdkp->zone_blocks)) { 453 (block + this_zone_blocks < sdkp->capacity
454 zone_blocks = 0; 454 || this_zone_blocks > zone_blocks)) {
455 this_zone_blocks = 0;
455 goto out; 456 goto out;
456 } 457 }
457 block += zone_blocks; 458 block += this_zone_blocks;
458 rec += 64; 459 rec += 64;
459 } 460 }
460 461
@@ -467,8 +468,6 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
467 468
468 } while (block < sdkp->capacity); 469 } while (block < sdkp->capacity);
469 470
470 zone_blocks = sdkp->zone_blocks;
471
472out: 471out:
473 if (!zone_blocks) { 472 if (!zone_blocks) {
474 if (sdkp->first_scan) 473 if (sdkp->first_scan)
@@ -488,8 +487,7 @@ out:
488 "Zone size too large\n"); 487 "Zone size too large\n");
489 ret = -ENODEV; 488 ret = -ENODEV;
490 } else { 489 } else {
491 sdkp->zone_blocks = zone_blocks; 490 ret = zone_blocks;
492 sdkp->zone_shift = ilog2(zone_blocks);
493 } 491 }
494 492
495out_free: 493out_free:
@@ -500,15 +498,14 @@ out_free:
500 498
501/** 499/**
502 * sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone). 500 * sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone).
503 * @sdkp: The disk of the bitmap 501 * @nr_zones: Number of zones to allocate space for.
502 * @numa_node: NUMA node to allocate the memory from.
504 */ 503 */
505static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp) 504static inline unsigned long *
505sd_zbc_alloc_zone_bitmap(u32 nr_zones, int numa_node)
506{ 506{
507 struct request_queue *q = sdkp->disk->queue; 507 return kzalloc_node(BITS_TO_LONGS(nr_zones) * sizeof(unsigned long),
508 508 GFP_KERNEL, numa_node);
509 return kzalloc_node(BITS_TO_LONGS(sdkp->nr_zones)
510 * sizeof(unsigned long),
511 GFP_KERNEL, q->node);
512} 509}
513 510
514/** 511/**
@@ -516,6 +513,7 @@ static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp)
516 * @sdkp: disk used 513 * @sdkp: disk used
517 * @buf: report reply buffer 514 * @buf: report reply buffer
518 * @buflen: length of @buf 515 * @buflen: length of @buf
516 * @zone_shift: logarithm base 2 of the number of blocks in a zone
519 * @seq_zones_bitmap: bitmap of sequential zones to set 517 * @seq_zones_bitmap: bitmap of sequential zones to set
520 * 518 *
521 * Parse reported zone descriptors in @buf to identify sequential zones and 519 * Parse reported zone descriptors in @buf to identify sequential zones and
@@ -525,7 +523,7 @@ static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp)
525 * Return the LBA after the last zone reported. 523 * Return the LBA after the last zone reported.
526 */ 524 */
527static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf, 525static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
528 unsigned int buflen, 526 unsigned int buflen, u32 zone_shift,
529 unsigned long *seq_zones_bitmap) 527 unsigned long *seq_zones_bitmap)
530{ 528{
531 sector_t lba, next_lba = sdkp->capacity; 529 sector_t lba, next_lba = sdkp->capacity;
@@ -544,7 +542,7 @@ static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
544 if (type != ZBC_ZONE_TYPE_CONV && 542 if (type != ZBC_ZONE_TYPE_CONV &&
545 cond != ZBC_ZONE_COND_READONLY && 543 cond != ZBC_ZONE_COND_READONLY &&
546 cond != ZBC_ZONE_COND_OFFLINE) 544 cond != ZBC_ZONE_COND_OFFLINE)
547 set_bit(lba >> sdkp->zone_shift, seq_zones_bitmap); 545 set_bit(lba >> zone_shift, seq_zones_bitmap);
548 next_lba = lba + get_unaligned_be64(&rec[8]); 546 next_lba = lba + get_unaligned_be64(&rec[8]);
549 rec += 64; 547 rec += 64;
550 } 548 }
@@ -553,12 +551,16 @@ static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
553} 551}
554 552
555/** 553/**
556 * sd_zbc_setup_seq_zones_bitmap - Initialize the disk seq zone bitmap. 554 * sd_zbc_setup_seq_zones_bitmap - Initialize a seq zone bitmap.
557 * @sdkp: target disk 555 * @sdkp: target disk
556 * @zone_shift: logarithm base 2 of the number of blocks in a zone
557 * @nr_zones: number of zones to set up a seq zone bitmap for
558 * 558 *
559 * Allocate a zone bitmap and initialize it by identifying sequential zones. 559 * Allocate a zone bitmap and initialize it by identifying sequential zones.
560 */ 560 */
561static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp) 561static unsigned long *
562sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp, u32 zone_shift,
563 u32 nr_zones)
562{ 564{
563 struct request_queue *q = sdkp->disk->queue; 565 struct request_queue *q = sdkp->disk->queue;
564 unsigned long *seq_zones_bitmap; 566 unsigned long *seq_zones_bitmap;
@@ -566,9 +568,9 @@ static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
566 unsigned char *buf; 568 unsigned char *buf;
567 int ret = -ENOMEM; 569 int ret = -ENOMEM;
568 570
569 seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(sdkp); 571 seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(nr_zones, q->node);
570 if (!seq_zones_bitmap) 572 if (!seq_zones_bitmap)
571 return -ENOMEM; 573 return ERR_PTR(-ENOMEM);
572 574
573 buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL); 575 buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
574 if (!buf) 576 if (!buf)
@@ -579,7 +581,7 @@ static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
579 if (ret) 581 if (ret)
580 goto out; 582 goto out;
581 lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 583 lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
582 seq_zones_bitmap); 584 zone_shift, seq_zones_bitmap);
583 } 585 }
584 586
585 if (lba != sdkp->capacity) { 587 if (lba != sdkp->capacity) {
@@ -591,12 +593,9 @@ out:
591 kfree(buf); 593 kfree(buf);
592 if (ret) { 594 if (ret) {
593 kfree(seq_zones_bitmap); 595 kfree(seq_zones_bitmap);
594 return ret; 596 return ERR_PTR(ret);
595 } 597 }
596 598 return seq_zones_bitmap;
597 q->seq_zones_bitmap = seq_zones_bitmap;
598
599 return 0;
600} 599}
601 600
602static void sd_zbc_cleanup(struct scsi_disk *sdkp) 601static void sd_zbc_cleanup(struct scsi_disk *sdkp)
@@ -612,44 +611,64 @@ static void sd_zbc_cleanup(struct scsi_disk *sdkp)
612 q->nr_zones = 0; 611 q->nr_zones = 0;
613} 612}
614 613
615static int sd_zbc_setup(struct scsi_disk *sdkp) 614static int sd_zbc_setup(struct scsi_disk *sdkp, u32 zone_blocks)
616{ 615{
617 struct request_queue *q = sdkp->disk->queue; 616 struct request_queue *q = sdkp->disk->queue;
617 u32 zone_shift = ilog2(zone_blocks);
618 u32 nr_zones;
618 int ret; 619 int ret;
619 620
620 /* READ16/WRITE16 is mandatory for ZBC disks */
621 sdkp->device->use_16_for_rw = 1;
622 sdkp->device->use_10_for_rw = 0;
623
624 /* chunk_sectors indicates the zone size */ 621 /* chunk_sectors indicates the zone size */
625 blk_queue_chunk_sectors(sdkp->disk->queue, 622 blk_queue_chunk_sectors(q,
626 logical_to_sectors(sdkp->device, sdkp->zone_blocks)); 623 logical_to_sectors(sdkp->device, zone_blocks));
627 sdkp->nr_zones = 624 nr_zones = round_up(sdkp->capacity, zone_blocks) >> zone_shift;
628 round_up(sdkp->capacity, sdkp->zone_blocks) >> sdkp->zone_shift;
629 625
630 /* 626 /*
631 * Initialize the device request queue information if the number 627 * Initialize the device request queue information if the number
632 * of zones changed. 628 * of zones changed.
633 */ 629 */
634 if (sdkp->nr_zones != q->nr_zones) { 630 if (nr_zones != sdkp->nr_zones || nr_zones != q->nr_zones) {
635 631 unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL;
636 sd_zbc_cleanup(sdkp); 632 size_t zone_bitmap_size;
637 633
638 q->nr_zones = sdkp->nr_zones; 634 if (nr_zones) {
639 if (sdkp->nr_zones) { 635 seq_zones_wlock = sd_zbc_alloc_zone_bitmap(nr_zones,
640 q->seq_zones_wlock = sd_zbc_alloc_zone_bitmap(sdkp); 636 q->node);
641 if (!q->seq_zones_wlock) { 637 if (!seq_zones_wlock) {
642 ret = -ENOMEM; 638 ret = -ENOMEM;
643 goto err; 639 goto err;
644 } 640 }
645 641
646 ret = sd_zbc_setup_seq_zones_bitmap(sdkp); 642 seq_zones_bitmap = sd_zbc_setup_seq_zones_bitmap(sdkp,
647 if (ret) { 643 zone_shift, nr_zones);
648 sd_zbc_cleanup(sdkp); 644 if (IS_ERR(seq_zones_bitmap)) {
645 ret = PTR_ERR(seq_zones_bitmap);
646 kfree(seq_zones_wlock);
649 goto err; 647 goto err;
650 } 648 }
651 } 649 }
652 650 zone_bitmap_size = BITS_TO_LONGS(nr_zones) *
651 sizeof(unsigned long);
652 blk_mq_freeze_queue(q);
653 if (q->nr_zones != nr_zones) {
654 /* READ16/WRITE16 is mandatory for ZBC disks */
655 sdkp->device->use_16_for_rw = 1;
656 sdkp->device->use_10_for_rw = 0;
657
658 sdkp->zone_blocks = zone_blocks;
659 sdkp->zone_shift = zone_shift;
660 sdkp->nr_zones = nr_zones;
661 q->nr_zones = nr_zones;
662 swap(q->seq_zones_wlock, seq_zones_wlock);
663 swap(q->seq_zones_bitmap, seq_zones_bitmap);
664 } else if (memcmp(q->seq_zones_bitmap, seq_zones_bitmap,
665 zone_bitmap_size) != 0) {
666 memcpy(q->seq_zones_bitmap, seq_zones_bitmap,
667 zone_bitmap_size);
668 }
669 blk_mq_unfreeze_queue(q);
670 kfree(seq_zones_wlock);
671 kfree(seq_zones_bitmap);
653 } 672 }
654 673
655 return 0; 674 return 0;
@@ -661,6 +680,7 @@ err:
661 680
662int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf) 681int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
663{ 682{
683 int64_t zone_blocks;
664 int ret; 684 int ret;
665 685
666 if (!sd_is_zoned(sdkp)) 686 if (!sd_is_zoned(sdkp))
@@ -697,12 +717,16 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
697 * Check zone size: only devices with a constant zone size (except 717 * Check zone size: only devices with a constant zone size (except
698 * an eventual last runt zone) that is a power of 2 are supported. 718 * an eventual last runt zone) that is a power of 2 are supported.
699 */ 719 */
700 ret = sd_zbc_check_zone_size(sdkp); 720 zone_blocks = sd_zbc_check_zone_size(sdkp);
701 if (ret) 721 ret = -EFBIG;
722 if (zone_blocks != (u32)zone_blocks)
723 goto err;
724 ret = zone_blocks;
725 if (ret < 0)
702 goto err; 726 goto err;
703 727
704 /* The drive satisfies the kernel restrictions: set it up */ 728 /* The drive satisfies the kernel restrictions: set it up */
705 ret = sd_zbc_setup(sdkp); 729 ret = sd_zbc_setup(sdkp, zone_blocks);
706 if (ret) 730 if (ret)
707 goto err; 731 goto err;
708 732
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 8c51d628b52e..a2ec0bc9e9fa 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1722,11 +1722,14 @@ static int storvsc_probe(struct hv_device *device,
1722 max_targets = STORVSC_MAX_TARGETS; 1722 max_targets = STORVSC_MAX_TARGETS;
1723 max_channels = STORVSC_MAX_CHANNELS; 1723 max_channels = STORVSC_MAX_CHANNELS;
1724 /* 1724 /*
1725 * On Windows8 and above, we support sub-channels for storage. 1725 * On Windows8 and above, we support sub-channels for storage
1726 * on SCSI and FC controllers.
1726 * The number of sub-channels offerred is based on the number of 1727 * The number of sub-channels offerred is based on the number of
1727 * VCPUs in the guest. 1728 * VCPUs in the guest.
1728 */ 1729 */
1729 max_sub_channels = (num_cpus / storvsc_vcpus_per_sub_channel); 1730 if (!dev_is_ide)
1731 max_sub_channels =
1732 (num_cpus - 1) / storvsc_vcpus_per_sub_channel;
1730 } 1733 }
1731 1734
1732 scsi_driver.can_queue = (max_outstanding_req_per_channel * 1735 scsi_driver.can_queue = (max_outstanding_req_per_channel *
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index c5b1bf1cadcb..00e79057f870 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -276,6 +276,35 @@ static inline void ufshcd_remove_non_printable(char *val)
276 *val = ' '; 276 *val = ' ';
277} 277}
278 278
279static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
280 const char *str)
281{
282 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
283
284 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
285}
286
287static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
288 const char *str)
289{
290 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
291
292 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
293}
294
295static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
296 const char *str)
297{
298 struct utp_task_req_desc *descp;
299 struct utp_upiu_task_req *task_req;
300 int off = (int)tag - hba->nutrs;
301
302 descp = &hba->utmrdl_base_addr[off];
303 task_req = (struct utp_upiu_task_req *)descp->task_req_upiu;
304 trace_ufshcd_upiu(dev_name(hba->dev), str, &task_req->header,
305 &task_req->input_param1);
306}
307
279static void ufshcd_add_command_trace(struct ufs_hba *hba, 308static void ufshcd_add_command_trace(struct ufs_hba *hba,
280 unsigned int tag, const char *str) 309 unsigned int tag, const char *str)
281{ 310{
@@ -285,6 +314,9 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
285 struct ufshcd_lrb *lrbp; 314 struct ufshcd_lrb *lrbp;
286 int transfer_len = -1; 315 int transfer_len = -1;
287 316
317 /* trace UPIU also */
318 ufshcd_add_cmd_upiu_trace(hba, tag, str);
319
288 if (!trace_ufshcd_command_enabled()) 320 if (!trace_ufshcd_command_enabled())
289 return; 321 return;
290 322
@@ -2550,6 +2582,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2550 2582
2551 hba->dev_cmd.complete = &wait; 2583 hba->dev_cmd.complete = &wait;
2552 2584
2585 ufshcd_add_query_upiu_trace(hba, tag, "query_send");
2553 /* Make sure descriptors are ready before ringing the doorbell */ 2586 /* Make sure descriptors are ready before ringing the doorbell */
2554 wmb(); 2587 wmb();
2555 spin_lock_irqsave(hba->host->host_lock, flags); 2588 spin_lock_irqsave(hba->host->host_lock, flags);
@@ -2559,6 +2592,9 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2559 2592
2560 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); 2593 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2561 2594
2595 ufshcd_add_query_upiu_trace(hba, tag,
2596 err ? "query_complete_err" : "query_complete");
2597
2562out_put_tag: 2598out_put_tag:
2563 ufshcd_put_dev_cmd_tag(hba, tag); 2599 ufshcd_put_dev_cmd_tag(hba, tag);
2564 wake_up(&hba->dev_cmd.tag_wq); 2600 wake_up(&hba->dev_cmd.tag_wq);
@@ -5443,11 +5479,14 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5443 5479
5444 spin_unlock_irqrestore(host->host_lock, flags); 5480 spin_unlock_irqrestore(host->host_lock, flags);
5445 5481
5482 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
5483
5446 /* wait until the task management command is completed */ 5484 /* wait until the task management command is completed */
5447 err = wait_event_timeout(hba->tm_wq, 5485 err = wait_event_timeout(hba->tm_wq,
5448 test_bit(free_slot, &hba->tm_condition), 5486 test_bit(free_slot, &hba->tm_condition),
5449 msecs_to_jiffies(TM_CMD_TIMEOUT)); 5487 msecs_to_jiffies(TM_CMD_TIMEOUT));
5450 if (!err) { 5488 if (!err) {
5489 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
5451 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", 5490 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
5452 __func__, tm_function); 5491 __func__, tm_function);
5453 if (ufshcd_clear_tm_cmd(hba, free_slot)) 5492 if (ufshcd_clear_tm_cmd(hba, free_slot))
@@ -5456,6 +5495,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5456 err = -ETIMEDOUT; 5495 err = -ETIMEDOUT;
5457 } else { 5496 } else {
5458 err = ufshcd_task_req_compl(hba, free_slot, tm_response); 5497 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
5498 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
5459 } 5499 }
5460 5500
5461 clear_bit(free_slot, &hba->tm_condition); 5501 clear_bit(free_slot, &hba->tm_condition);
diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
index 884419c37e84..457ea1f8db30 100644
--- a/drivers/slimbus/messaging.c
+++ b/drivers/slimbus/messaging.c
@@ -183,7 +183,7 @@ static u16 slim_slicesize(int code)
183 0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7 183 0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7
184 }; 184 };
185 185
186 clamp(code, 1, (int)ARRAY_SIZE(sizetocode)); 186 code = clamp(code, 1, (int)ARRAY_SIZE(sizetocode));
187 187
188 return sizetocode[code - 1]; 188 return sizetocode[code - 1];
189} 189}
diff --git a/drivers/soc/bcm/raspberrypi-power.c b/drivers/soc/bcm/raspberrypi-power.c
index fe96a8b956fb..f7ed1187518b 100644
--- a/drivers/soc/bcm/raspberrypi-power.c
+++ b/drivers/soc/bcm/raspberrypi-power.c
@@ -45,7 +45,7 @@ struct rpi_power_domains {
45struct rpi_power_domain_packet { 45struct rpi_power_domain_packet {
46 u32 domain; 46 u32 domain;
47 u32 on; 47 u32 on;
48} __packet; 48};
49 49
50/* 50/*
51 * Asks the firmware to enable or disable power on a specific power 51 * Asks the firmware to enable or disable power on a specific power
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 6b5300ca44a6..885f5fcead77 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -1390,7 +1390,7 @@ static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif,
1390 } 1390 }
1391 1391
1392 if (hif_drv->usr_conn_req.ies) { 1392 if (hif_drv->usr_conn_req.ies) {
1393 conn_info.req_ies = kmemdup(conn_info.req_ies, 1393 conn_info.req_ies = kmemdup(hif_drv->usr_conn_req.ies,
1394 hif_drv->usr_conn_req.ies_len, 1394 hif_drv->usr_conn_req.ies_len,
1395 GFP_KERNEL); 1395 GFP_KERNEL);
1396 if (conn_info.req_ies) 1396 if (conn_info.req_ies)
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 07c814c42648..60429011292a 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -427,8 +427,8 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
427{ 427{
428 struct se_device *dev = cmd->se_dev; 428 struct se_device *dev = cmd->se_dev;
429 struct scatterlist *sg = &cmd->t_data_sg[0]; 429 struct scatterlist *sg = &cmd->t_data_sg[0];
430 unsigned char *buf, zero = 0x00, *p = &zero; 430 unsigned char *buf, *not_zero;
431 int rc, ret; 431 int ret;
432 432
433 buf = kmap(sg_page(sg)) + sg->offset; 433 buf = kmap(sg_page(sg)) + sg->offset;
434 if (!buf) 434 if (!buf)
@@ -437,10 +437,10 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
437 * Fall back to block_execute_write_same() slow-path if 437 * Fall back to block_execute_write_same() slow-path if
438 * incoming WRITE_SAME payload does not contain zeros. 438 * incoming WRITE_SAME payload does not contain zeros.
439 */ 439 */
440 rc = memcmp(buf, p, cmd->data_length); 440 not_zero = memchr_inv(buf, 0x00, cmd->data_length);
441 kunmap(sg_page(sg)); 441 kunmap(sg_page(sg));
442 442
443 if (rc) 443 if (not_zero)
444 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 444 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
445 445
446 ret = blkdev_issue_zeroout(bdev, 446 ret = blkdev_issue_zeroout(bdev,
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 0d99b242e82e..6cb933ecc084 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -890,6 +890,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
890 bytes = min(bytes, data_len); 890 bytes = min(bytes, data_len);
891 891
892 if (!bio) { 892 if (!bio) {
893new_bio:
893 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); 894 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
894 nr_pages -= nr_vecs; 895 nr_pages -= nr_vecs;
895 /* 896 /*
@@ -931,6 +932,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
931 * be allocated with pscsi_get_bio() above. 932 * be allocated with pscsi_get_bio() above.
932 */ 933 */
933 bio = NULL; 934 bio = NULL;
935 goto new_bio;
934 } 936 }
935 937
936 data_len -= bytes; 938 data_len -= bytes;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 3b3e1f6632d7..1dbe27c9946c 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -121,6 +121,9 @@ struct gsm_dlci {
121 struct mutex mutex; 121 struct mutex mutex;
122 122
123 /* Link layer */ 123 /* Link layer */
124 int mode;
125#define DLCI_MODE_ABM 0 /* Normal Asynchronous Balanced Mode */
126#define DLCI_MODE_ADM 1 /* Asynchronous Disconnected Mode */
124 spinlock_t lock; /* Protects the internal state */ 127 spinlock_t lock; /* Protects the internal state */
125 struct timer_list t1; /* Retransmit timer for SABM and UA */ 128 struct timer_list t1; /* Retransmit timer for SABM and UA */
126 int retries; 129 int retries;
@@ -1364,7 +1367,13 @@ retry:
1364 ctrl->data = data; 1367 ctrl->data = data;
1365 ctrl->len = clen; 1368 ctrl->len = clen;
1366 gsm->pending_cmd = ctrl; 1369 gsm->pending_cmd = ctrl;
1367 gsm->cretries = gsm->n2; 1370
1371 /* If DLCI0 is in ADM mode skip retries, it won't respond */
1372 if (gsm->dlci[0]->mode == DLCI_MODE_ADM)
1373 gsm->cretries = 1;
1374 else
1375 gsm->cretries = gsm->n2;
1376
1368 mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100); 1377 mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
1369 gsm_control_transmit(gsm, ctrl); 1378 gsm_control_transmit(gsm, ctrl);
1370 spin_unlock_irqrestore(&gsm->control_lock, flags); 1379 spin_unlock_irqrestore(&gsm->control_lock, flags);
@@ -1472,6 +1481,7 @@ static void gsm_dlci_t1(struct timer_list *t)
1472 if (debug & 8) 1481 if (debug & 8)
1473 pr_info("DLCI %d opening in ADM mode.\n", 1482 pr_info("DLCI %d opening in ADM mode.\n",
1474 dlci->addr); 1483 dlci->addr);
1484 dlci->mode = DLCI_MODE_ADM;
1475 gsm_dlci_open(dlci); 1485 gsm_dlci_open(dlci);
1476 } else { 1486 } else {
1477 gsm_dlci_close(dlci); 1487 gsm_dlci_close(dlci);
@@ -2861,11 +2871,22 @@ static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk)
2861static int gsm_carrier_raised(struct tty_port *port) 2871static int gsm_carrier_raised(struct tty_port *port)
2862{ 2872{
2863 struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port); 2873 struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
2874 struct gsm_mux *gsm = dlci->gsm;
2875
2864 /* Not yet open so no carrier info */ 2876 /* Not yet open so no carrier info */
2865 if (dlci->state != DLCI_OPEN) 2877 if (dlci->state != DLCI_OPEN)
2866 return 0; 2878 return 0;
2867 if (debug & 2) 2879 if (debug & 2)
2868 return 1; 2880 return 1;
2881
2882 /*
2883 * Basic mode with control channel in ADM mode may not respond
2884 * to CMD_MSC at all and modem_rx is empty.
2885 */
2886 if (gsm->encoding == 0 && gsm->dlci[0]->mode == DLCI_MODE_ADM &&
2887 !dlci->modem_rx)
2888 return 1;
2889
2869 return dlci->modem_rx & TIOCM_CD; 2890 return dlci->modem_rx & TIOCM_CD;
2870} 2891}
2871 2892
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index a24278380fec..22683393a0f2 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -169,7 +169,7 @@ static int __init register_earlycon(char *buf, const struct earlycon_id *match)
169 */ 169 */
170int __init setup_earlycon(char *buf) 170int __init setup_earlycon(char *buf)
171{ 171{
172 const struct earlycon_id *match; 172 const struct earlycon_id **p_match;
173 173
174 if (!buf || !buf[0]) 174 if (!buf || !buf[0])
175 return -EINVAL; 175 return -EINVAL;
@@ -177,7 +177,9 @@ int __init setup_earlycon(char *buf)
177 if (early_con.flags & CON_ENABLED) 177 if (early_con.flags & CON_ENABLED)
178 return -EALREADY; 178 return -EALREADY;
179 179
180 for (match = __earlycon_table; match < __earlycon_table_end; match++) { 180 for (p_match = __earlycon_table; p_match < __earlycon_table_end;
181 p_match++) {
182 const struct earlycon_id *match = *p_match;
181 size_t len = strlen(match->name); 183 size_t len = strlen(match->name);
182 184
183 if (strncmp(buf, match->name, len)) 185 if (strncmp(buf, match->name, len))
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 91f3a1a5cb7f..c2fc6bef7a6f 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -316,7 +316,7 @@ static u32 imx_uart_readl(struct imx_port *sport, u32 offset)
316 * differ from the value that was last written. As it only 316 * differ from the value that was last written. As it only
317 * clears after being set, reread conditionally. 317 * clears after being set, reread conditionally.
318 */ 318 */
319 if (sport->ucr2 & UCR2_SRST) 319 if (!(sport->ucr2 & UCR2_SRST))
320 sport->ucr2 = readl(sport->port.membase + offset); 320 sport->ucr2 = readl(sport->port.membase + offset);
321 return sport->ucr2; 321 return sport->ucr2;
322 break; 322 break;
@@ -1833,6 +1833,11 @@ static int imx_uart_rs485_config(struct uart_port *port,
1833 rs485conf->flags &= ~SER_RS485_ENABLED; 1833 rs485conf->flags &= ~SER_RS485_ENABLED;
1834 1834
1835 if (rs485conf->flags & SER_RS485_ENABLED) { 1835 if (rs485conf->flags & SER_RS485_ENABLED) {
1836 /* Enable receiver if low-active RTS signal is requested */
1837 if (sport->have_rtscts && !sport->have_rtsgpio &&
1838 !(rs485conf->flags & SER_RS485_RTS_ON_SEND))
1839 rs485conf->flags |= SER_RS485_RX_DURING_TX;
1840
1836 /* disable transmitter */ 1841 /* disable transmitter */
1837 ucr2 = imx_uart_readl(sport, UCR2); 1842 ucr2 = imx_uart_readl(sport, UCR2);
1838 if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND) 1843 if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND)
@@ -2265,6 +2270,18 @@ static int imx_uart_probe(struct platform_device *pdev)
2265 (!sport->have_rtscts && !sport->have_rtsgpio)) 2270 (!sport->have_rtscts && !sport->have_rtsgpio))
2266 dev_err(&pdev->dev, "no RTS control, disabling rs485\n"); 2271 dev_err(&pdev->dev, "no RTS control, disabling rs485\n");
2267 2272
2273 /*
2274 * If using the i.MX UART RTS/CTS control then the RTS (CTS_B)
2275 * signal cannot be set low during transmission in case the
2276 * receiver is off (limitation of the i.MX UART IP).
2277 */
2278 if (sport->port.rs485.flags & SER_RS485_ENABLED &&
2279 sport->have_rtscts && !sport->have_rtsgpio &&
2280 (!(sport->port.rs485.flags & SER_RS485_RTS_ON_SEND) &&
2281 !(sport->port.rs485.flags & SER_RS485_RX_DURING_TX)))
2282 dev_err(&pdev->dev,
2283 "low-active RTS not possible when receiver is off, enabling receiver\n");
2284
2268 imx_uart_rs485_config(&sport->port, &sport->port.rs485); 2285 imx_uart_rs485_config(&sport->port, &sport->port.rs485);
2269 2286
2270 /* Disable interrupts before requesting them */ 2287 /* Disable interrupts before requesting them */
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 750e5645dc85..f503fab1e268 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -495,7 +495,6 @@ static void mvebu_uart_set_termios(struct uart_port *port,
495 termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR); 495 termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR);
496 termios->c_cflag &= CREAD | CBAUD; 496 termios->c_cflag &= CREAD | CBAUD;
497 termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD); 497 termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD);
498 termios->c_lflag = old->c_lflag;
499 } 498 }
500 499
501 spin_unlock_irqrestore(&port->lock, flags); 500 spin_unlock_irqrestore(&port->lock, flags);
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 65ff669373d4..a1b3eb04cb32 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1022,6 +1022,7 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
1022 struct qcom_geni_serial_port *port; 1022 struct qcom_geni_serial_port *port;
1023 struct uart_port *uport; 1023 struct uart_port *uport;
1024 struct resource *res; 1024 struct resource *res;
1025 int irq;
1025 1026
1026 if (pdev->dev.of_node) 1027 if (pdev->dev.of_node)
1027 line = of_alias_get_id(pdev->dev.of_node, "serial"); 1028 line = of_alias_get_id(pdev->dev.of_node, "serial");
@@ -1061,11 +1062,12 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
1061 port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS; 1062 port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
1062 port->tx_fifo_width = DEF_FIFO_WIDTH_BITS; 1063 port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
1063 1064
1064 uport->irq = platform_get_irq(pdev, 0); 1065 irq = platform_get_irq(pdev, 0);
1065 if (uport->irq < 0) { 1066 if (irq < 0) {
1066 dev_err(&pdev->dev, "Failed to get IRQ %d\n", uport->irq); 1067 dev_err(&pdev->dev, "Failed to get IRQ %d\n", irq);
1067 return uport->irq; 1068 return irq;
1068 } 1069 }
1070 uport->irq = irq;
1069 1071
1070 uport->private_data = &qcom_geni_console_driver; 1072 uport->private_data = &qcom_geni_console_driver;
1071 platform_set_drvdata(pdev, port); 1073 platform_set_drvdata(pdev, port);
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index abcb4d09a2d8..bd72dd843338 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1181,7 +1181,7 @@ static int __init cdns_early_console_setup(struct earlycon_device *device,
1181 /* only set baud if specified on command line - otherwise 1181 /* only set baud if specified on command line - otherwise
1182 * assume it has been initialized by a boot loader. 1182 * assume it has been initialized by a boot loader.
1183 */ 1183 */
1184 if (device->baud) { 1184 if (port->uartclk && device->baud) {
1185 u32 cd = 0, bdiv = 0; 1185 u32 cd = 0, bdiv = 0;
1186 u32 mr; 1186 u32 mr;
1187 int div8; 1187 int div8;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 63114ea35ec1..7c838b90a31d 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2816,7 +2816,10 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
2816 2816
2817 kref_init(&tty->kref); 2817 kref_init(&tty->kref);
2818 tty->magic = TTY_MAGIC; 2818 tty->magic = TTY_MAGIC;
2819 tty_ldisc_init(tty); 2819 if (tty_ldisc_init(tty)) {
2820 kfree(tty);
2821 return NULL;
2822 }
2820 tty->session = NULL; 2823 tty->session = NULL;
2821 tty->pgrp = NULL; 2824 tty->pgrp = NULL;
2822 mutex_init(&tty->legacy_mutex); 2825 mutex_init(&tty->legacy_mutex);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 050f4d650891..fb7329ab2b37 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -176,12 +176,11 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
176 return ERR_CAST(ldops); 176 return ERR_CAST(ldops);
177 } 177 }
178 178
179 ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL); 179 /*
180 if (ld == NULL) { 180 * There is no way to handle allocation failure of only 16 bytes.
181 put_ldops(ldops); 181 * Let's simplify error handling and save more memory.
182 return ERR_PTR(-ENOMEM); 182 */
183 } 183 ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL | __GFP_NOFAIL);
184
185 ld->ops = ldops; 184 ld->ops = ldops;
186 ld->tty = tty; 185 ld->tty = tty;
187 186
@@ -527,19 +526,16 @@ static int tty_ldisc_failto(struct tty_struct *tty, int ld)
527static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old) 526static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
528{ 527{
529 /* There is an outstanding reference here so this is safe */ 528 /* There is an outstanding reference here so this is safe */
530 old = tty_ldisc_get(tty, old->ops->num); 529 if (tty_ldisc_failto(tty, old->ops->num) < 0) {
531 WARN_ON(IS_ERR(old)); 530 const char *name = tty_name(tty);
532 tty->ldisc = old; 531
533 tty_set_termios_ldisc(tty, old->ops->num); 532 pr_warn("Falling back ldisc for %s.\n", name);
534 if (tty_ldisc_open(tty, old) < 0) {
535 tty_ldisc_put(old);
536 /* The traditional behaviour is to fall back to N_TTY, we 533 /* The traditional behaviour is to fall back to N_TTY, we
537 want to avoid falling back to N_NULL unless we have no 534 want to avoid falling back to N_NULL unless we have no
538 choice to avoid the risk of breaking anything */ 535 choice to avoid the risk of breaking anything */
539 if (tty_ldisc_failto(tty, N_TTY) < 0 && 536 if (tty_ldisc_failto(tty, N_TTY) < 0 &&
540 tty_ldisc_failto(tty, N_NULL) < 0) 537 tty_ldisc_failto(tty, N_NULL) < 0)
541 panic("Couldn't open N_NULL ldisc for %s.", 538 panic("Couldn't open N_NULL ldisc for %s.", name);
542 tty_name(tty));
543 } 539 }
544} 540}
545 541
@@ -824,12 +820,13 @@ EXPORT_SYMBOL_GPL(tty_ldisc_release);
824 * the tty structure is not completely set up when this call is made. 820 * the tty structure is not completely set up when this call is made.
825 */ 821 */
826 822
827void tty_ldisc_init(struct tty_struct *tty) 823int tty_ldisc_init(struct tty_struct *tty)
828{ 824{
829 struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY); 825 struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY);
830 if (IS_ERR(ld)) 826 if (IS_ERR(ld))
831 panic("n_tty: init_tty"); 827 return PTR_ERR(ld);
832 tty->ldisc = ld; 828 tty->ldisc = ld;
829 return 0;
833} 830}
834 831
835/** 832/**
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index f695a7e8c314..c690d100adcd 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -19,7 +19,7 @@
19 * # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \ 19 * # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \
20 * > /sys/bus/vmbus/drivers/uio_hv_generic/bind 20 * > /sys/bus/vmbus/drivers/uio_hv_generic/bind
21 */ 21 */
22 22#define DEBUG 1
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 24
25#include <linux/device.h> 25#include <linux/device.h>
@@ -94,10 +94,11 @@ hv_uio_irqcontrol(struct uio_info *info, s32 irq_state)
94 */ 94 */
95static void hv_uio_channel_cb(void *context) 95static void hv_uio_channel_cb(void *context)
96{ 96{
97 struct hv_uio_private_data *pdata = context; 97 struct vmbus_channel *chan = context;
98 struct hv_device *dev = pdata->device; 98 struct hv_device *hv_dev = chan->device_obj;
99 struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
99 100
100 dev->channel->inbound.ring_buffer->interrupt_mask = 1; 101 chan->inbound.ring_buffer->interrupt_mask = 1;
101 virt_mb(); 102 virt_mb();
102 103
103 uio_event_notify(&pdata->info); 104 uio_event_notify(&pdata->info);
@@ -121,78 +122,46 @@ static void hv_uio_rescind(struct vmbus_channel *channel)
121 uio_event_notify(&pdata->info); 122 uio_event_notify(&pdata->info);
122} 123}
123 124
124/* 125/* Sysfs API to allow mmap of the ring buffers
125 * Handle fault when looking for sub channel ring buffer 126 * The ring buffer is allocated as contiguous memory by vmbus_open
126 * Subchannel ring buffer is same as resource 0 which is main ring buffer
127 * This is derived from uio_vma_fault
128 */ 127 */
129static int hv_uio_vma_fault(struct vm_fault *vmf)
130{
131 struct vm_area_struct *vma = vmf->vma;
132 void *ring_buffer = vma->vm_private_data;
133 struct page *page;
134 void *addr;
135
136 addr = ring_buffer + (vmf->pgoff << PAGE_SHIFT);
137 page = virt_to_page(addr);
138 get_page(page);
139 vmf->page = page;
140 return 0;
141}
142
143static const struct vm_operations_struct hv_uio_vm_ops = {
144 .fault = hv_uio_vma_fault,
145};
146
147/* Sysfs API to allow mmap of the ring buffers */
148static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj, 128static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj,
149 struct bin_attribute *attr, 129 struct bin_attribute *attr,
150 struct vm_area_struct *vma) 130 struct vm_area_struct *vma)
151{ 131{
152 struct vmbus_channel *channel 132 struct vmbus_channel *channel
153 = container_of(kobj, struct vmbus_channel, kobj); 133 = container_of(kobj, struct vmbus_channel, kobj);
154 unsigned long requested_pages, actual_pages; 134 struct hv_device *dev = channel->primary_channel->device_obj;
155 135 u16 q_idx = channel->offermsg.offer.sub_channel_index;
156 if (vma->vm_end < vma->vm_start)
157 return -EINVAL;
158
159 /* only allow 0 for now */
160 if (vma->vm_pgoff > 0)
161 return -EINVAL;
162 136
163 requested_pages = vma_pages(vma); 137 dev_dbg(&dev->device, "mmap channel %u pages %#lx at %#lx\n",
164 actual_pages = 2 * HV_RING_SIZE; 138 q_idx, vma_pages(vma), vma->vm_pgoff);
165 if (requested_pages > actual_pages)
166 return -EINVAL;
167 139
168 vma->vm_private_data = channel->ringbuffer_pages; 140 return vm_iomap_memory(vma, virt_to_phys(channel->ringbuffer_pages),
169 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 141 channel->ringbuffer_pagecount << PAGE_SHIFT);
170 vma->vm_ops = &hv_uio_vm_ops;
171 return 0;
172} 142}
173 143
174static struct bin_attribute ring_buffer_bin_attr __ro_after_init = { 144static const struct bin_attribute ring_buffer_bin_attr = {
175 .attr = { 145 .attr = {
176 .name = "ring", 146 .name = "ring",
177 .mode = 0600, 147 .mode = 0600,
178 /* size is set at init time */
179 }, 148 },
149 .size = 2 * HV_RING_SIZE * PAGE_SIZE,
180 .mmap = hv_uio_ring_mmap, 150 .mmap = hv_uio_ring_mmap,
181}; 151};
182 152
183/* Callback from VMBUS subystem when new channel created. */ 153/* Callback from VMBUS subsystem when new channel created. */
184static void 154static void
185hv_uio_new_channel(struct vmbus_channel *new_sc) 155hv_uio_new_channel(struct vmbus_channel *new_sc)
186{ 156{
187 struct hv_device *hv_dev = new_sc->primary_channel->device_obj; 157 struct hv_device *hv_dev = new_sc->primary_channel->device_obj;
188 struct device *device = &hv_dev->device; 158 struct device *device = &hv_dev->device;
189 struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
190 const size_t ring_bytes = HV_RING_SIZE * PAGE_SIZE; 159 const size_t ring_bytes = HV_RING_SIZE * PAGE_SIZE;
191 int ret; 160 int ret;
192 161
193 /* Create host communication ring */ 162 /* Create host communication ring */
194 ret = vmbus_open(new_sc, ring_bytes, ring_bytes, NULL, 0, 163 ret = vmbus_open(new_sc, ring_bytes, ring_bytes, NULL, 0,
195 hv_uio_channel_cb, pdata); 164 hv_uio_channel_cb, new_sc);
196 if (ret) { 165 if (ret) {
197 dev_err(device, "vmbus_open subchannel failed: %d\n", ret); 166 dev_err(device, "vmbus_open subchannel failed: %d\n", ret);
198 return; 167 return;
@@ -234,7 +203,7 @@ hv_uio_probe(struct hv_device *dev,
234 203
235 ret = vmbus_open(dev->channel, HV_RING_SIZE * PAGE_SIZE, 204 ret = vmbus_open(dev->channel, HV_RING_SIZE * PAGE_SIZE,
236 HV_RING_SIZE * PAGE_SIZE, NULL, 0, 205 HV_RING_SIZE * PAGE_SIZE, NULL, 0,
237 hv_uio_channel_cb, pdata); 206 hv_uio_channel_cb, dev->channel);
238 if (ret) 207 if (ret)
239 goto fail; 208 goto fail;
240 209
@@ -326,6 +295,11 @@ hv_uio_probe(struct hv_device *dev,
326 vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind); 295 vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind);
327 vmbus_set_sc_create_callback(dev->channel, hv_uio_new_channel); 296 vmbus_set_sc_create_callback(dev->channel, hv_uio_new_channel);
328 297
298 ret = sysfs_create_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr);
299 if (ret)
300 dev_notice(&dev->device,
301 "sysfs create ring bin file failed; %d\n", ret);
302
329 hv_set_drvdata(dev, pdata); 303 hv_set_drvdata(dev, pdata);
330 304
331 return 0; 305 return 0;
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 75f7fb151f71..987fc5ba6321 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -207,5 +207,6 @@ config USB_ULPI_BUS
207 207
208config USB_ROLE_SWITCH 208config USB_ROLE_SWITCH
209 tristate 209 tristate
210 select USB_COMMON
210 211
211endif # USB_SUPPORT 212endif # USB_SUPPORT
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 777036ae6367..0a42c5df3c0f 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2262,7 +2262,8 @@ int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg)
2262 hcd->state = HC_STATE_SUSPENDED; 2262 hcd->state = HC_STATE_SUSPENDED;
2263 2263
2264 if (!PMSG_IS_AUTO(msg)) 2264 if (!PMSG_IS_AUTO(msg))
2265 usb_phy_roothub_power_off(hcd->phy_roothub); 2265 usb_phy_roothub_suspend(hcd->self.sysdev,
2266 hcd->phy_roothub);
2266 2267
2267 /* Did we race with a root-hub wakeup event? */ 2268 /* Did we race with a root-hub wakeup event? */
2268 if (rhdev->do_remote_wakeup) { 2269 if (rhdev->do_remote_wakeup) {
@@ -2302,7 +2303,8 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
2302 } 2303 }
2303 2304
2304 if (!PMSG_IS_AUTO(msg)) { 2305 if (!PMSG_IS_AUTO(msg)) {
2305 status = usb_phy_roothub_power_on(hcd->phy_roothub); 2306 status = usb_phy_roothub_resume(hcd->self.sysdev,
2307 hcd->phy_roothub);
2306 if (status) 2308 if (status)
2307 return status; 2309 return status;
2308 } 2310 }
@@ -2344,7 +2346,7 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
2344 } 2346 }
2345 } else { 2347 } else {
2346 hcd->state = old_state; 2348 hcd->state = old_state;
2347 usb_phy_roothub_power_off(hcd->phy_roothub); 2349 usb_phy_roothub_suspend(hcd->self.sysdev, hcd->phy_roothub);
2348 dev_dbg(&rhdev->dev, "bus %s fail, err %d\n", 2350 dev_dbg(&rhdev->dev, "bus %s fail, err %d\n",
2349 "resume", status); 2351 "resume", status);
2350 if (status != -ESHUTDOWN) 2352 if (status != -ESHUTDOWN)
@@ -2377,6 +2379,7 @@ void usb_hcd_resume_root_hub (struct usb_hcd *hcd)
2377 2379
2378 spin_lock_irqsave (&hcd_root_hub_lock, flags); 2380 spin_lock_irqsave (&hcd_root_hub_lock, flags);
2379 if (hcd->rh_registered) { 2381 if (hcd->rh_registered) {
2382 pm_wakeup_event(&hcd->self.root_hub->dev, 0);
2380 set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags); 2383 set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
2381 queue_work(pm_wq, &hcd->wakeup_work); 2384 queue_work(pm_wq, &hcd->wakeup_work);
2382 } 2385 }
@@ -2758,12 +2761,16 @@ int usb_add_hcd(struct usb_hcd *hcd,
2758 } 2761 }
2759 2762
2760 if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) { 2763 if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
2761 hcd->phy_roothub = usb_phy_roothub_init(hcd->self.sysdev); 2764 hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
2762 if (IS_ERR(hcd->phy_roothub)) { 2765 if (IS_ERR(hcd->phy_roothub)) {
2763 retval = PTR_ERR(hcd->phy_roothub); 2766 retval = PTR_ERR(hcd->phy_roothub);
2764 goto err_phy_roothub_init; 2767 goto err_phy_roothub_alloc;
2765 } 2768 }
2766 2769
2770 retval = usb_phy_roothub_init(hcd->phy_roothub);
2771 if (retval)
2772 goto err_phy_roothub_alloc;
2773
2767 retval = usb_phy_roothub_power_on(hcd->phy_roothub); 2774 retval = usb_phy_roothub_power_on(hcd->phy_roothub);
2768 if (retval) 2775 if (retval)
2769 goto err_usb_phy_roothub_power_on; 2776 goto err_usb_phy_roothub_power_on;
@@ -2936,7 +2943,7 @@ err_create_buf:
2936 usb_phy_roothub_power_off(hcd->phy_roothub); 2943 usb_phy_roothub_power_off(hcd->phy_roothub);
2937err_usb_phy_roothub_power_on: 2944err_usb_phy_roothub_power_on:
2938 usb_phy_roothub_exit(hcd->phy_roothub); 2945 usb_phy_roothub_exit(hcd->phy_roothub);
2939err_phy_roothub_init: 2946err_phy_roothub_alloc:
2940 if (hcd->remove_phy && hcd->usb_phy) { 2947 if (hcd->remove_phy && hcd->usb_phy) {
2941 usb_phy_shutdown(hcd->usb_phy); 2948 usb_phy_shutdown(hcd->usb_phy);
2942 usb_put_phy(hcd->usb_phy); 2949 usb_put_phy(hcd->usb_phy);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index f6ea16e9f6bb..aa9968d90a48 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -653,12 +653,17 @@ void usb_wakeup_notification(struct usb_device *hdev,
653 unsigned int portnum) 653 unsigned int portnum)
654{ 654{
655 struct usb_hub *hub; 655 struct usb_hub *hub;
656 struct usb_port *port_dev;
656 657
657 if (!hdev) 658 if (!hdev)
658 return; 659 return;
659 660
660 hub = usb_hub_to_struct_hub(hdev); 661 hub = usb_hub_to_struct_hub(hdev);
661 if (hub) { 662 if (hub) {
663 port_dev = hub->ports[portnum - 1];
664 if (port_dev && port_dev->child)
665 pm_wakeup_event(&port_dev->child->dev, 0);
666
662 set_bit(portnum, hub->wakeup_bits); 667 set_bit(portnum, hub->wakeup_bits);
663 kick_hub_wq(hub); 668 kick_hub_wq(hub);
664 } 669 }
@@ -3434,8 +3439,11 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
3434 3439
3435 /* Skip the initial Clear-Suspend step for a remote wakeup */ 3440 /* Skip the initial Clear-Suspend step for a remote wakeup */
3436 status = hub_port_status(hub, port1, &portstatus, &portchange); 3441 status = hub_port_status(hub, port1, &portstatus, &portchange);
3437 if (status == 0 && !port_is_suspended(hub, portstatus)) 3442 if (status == 0 && !port_is_suspended(hub, portstatus)) {
3443 if (portchange & USB_PORT_STAT_C_SUSPEND)
3444 pm_wakeup_event(&udev->dev, 0);
3438 goto SuspendCleared; 3445 goto SuspendCleared;
3446 }
3439 3447
3440 /* see 7.1.7.7; affects power usage, but not budgeting */ 3448 /* see 7.1.7.7; affects power usage, but not budgeting */
3441 if (hub_is_superspeed(hub->hdev)) 3449 if (hub_is_superspeed(hub->hdev))
diff --git a/drivers/usb/core/phy.c b/drivers/usb/core/phy.c
index 09b7c43c0ea4..9879767452a2 100644
--- a/drivers/usb/core/phy.c
+++ b/drivers/usb/core/phy.c
@@ -19,19 +19,6 @@ struct usb_phy_roothub {
19 struct list_head list; 19 struct list_head list;
20}; 20};
21 21
22static struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev)
23{
24 struct usb_phy_roothub *roothub_entry;
25
26 roothub_entry = devm_kzalloc(dev, sizeof(*roothub_entry), GFP_KERNEL);
27 if (!roothub_entry)
28 return ERR_PTR(-ENOMEM);
29
30 INIT_LIST_HEAD(&roothub_entry->list);
31
32 return roothub_entry;
33}
34
35static int usb_phy_roothub_add_phy(struct device *dev, int index, 22static int usb_phy_roothub_add_phy(struct device *dev, int index,
36 struct list_head *list) 23 struct list_head *list)
37{ 24{
@@ -45,9 +32,11 @@ static int usb_phy_roothub_add_phy(struct device *dev, int index,
45 return PTR_ERR(phy); 32 return PTR_ERR(phy);
46 } 33 }
47 34
48 roothub_entry = usb_phy_roothub_alloc(dev); 35 roothub_entry = devm_kzalloc(dev, sizeof(*roothub_entry), GFP_KERNEL);
49 if (IS_ERR(roothub_entry)) 36 if (!roothub_entry)
50 return PTR_ERR(roothub_entry); 37 return -ENOMEM;
38
39 INIT_LIST_HEAD(&roothub_entry->list);
51 40
52 roothub_entry->phy = phy; 41 roothub_entry->phy = phy;
53 42
@@ -56,28 +45,44 @@ static int usb_phy_roothub_add_phy(struct device *dev, int index,
56 return 0; 45 return 0;
57} 46}
58 47
59struct usb_phy_roothub *usb_phy_roothub_init(struct device *dev) 48struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev)
60{ 49{
61 struct usb_phy_roothub *phy_roothub; 50 struct usb_phy_roothub *phy_roothub;
62 struct usb_phy_roothub *roothub_entry;
63 struct list_head *head;
64 int i, num_phys, err; 51 int i, num_phys, err;
65 52
53 if (!IS_ENABLED(CONFIG_GENERIC_PHY))
54 return NULL;
55
66 num_phys = of_count_phandle_with_args(dev->of_node, "phys", 56 num_phys = of_count_phandle_with_args(dev->of_node, "phys",
67 "#phy-cells"); 57 "#phy-cells");
68 if (num_phys <= 0) 58 if (num_phys <= 0)
69 return NULL; 59 return NULL;
70 60
71 phy_roothub = usb_phy_roothub_alloc(dev); 61 phy_roothub = devm_kzalloc(dev, sizeof(*phy_roothub), GFP_KERNEL);
72 if (IS_ERR(phy_roothub)) 62 if (!phy_roothub)
73 return phy_roothub; 63 return ERR_PTR(-ENOMEM);
64
65 INIT_LIST_HEAD(&phy_roothub->list);
74 66
75 for (i = 0; i < num_phys; i++) { 67 for (i = 0; i < num_phys; i++) {
76 err = usb_phy_roothub_add_phy(dev, i, &phy_roothub->list); 68 err = usb_phy_roothub_add_phy(dev, i, &phy_roothub->list);
77 if (err) 69 if (err)
78 goto err_out; 70 return ERR_PTR(err);
79 } 71 }
80 72
73 return phy_roothub;
74}
75EXPORT_SYMBOL_GPL(usb_phy_roothub_alloc);
76
77int usb_phy_roothub_init(struct usb_phy_roothub *phy_roothub)
78{
79 struct usb_phy_roothub *roothub_entry;
80 struct list_head *head;
81 int err;
82
83 if (!phy_roothub)
84 return 0;
85
81 head = &phy_roothub->list; 86 head = &phy_roothub->list;
82 87
83 list_for_each_entry(roothub_entry, head, list) { 88 list_for_each_entry(roothub_entry, head, list) {
@@ -86,14 +91,13 @@ struct usb_phy_roothub *usb_phy_roothub_init(struct device *dev)
86 goto err_exit_phys; 91 goto err_exit_phys;
87 } 92 }
88 93
89 return phy_roothub; 94 return 0;
90 95
91err_exit_phys: 96err_exit_phys:
92 list_for_each_entry_continue_reverse(roothub_entry, head, list) 97 list_for_each_entry_continue_reverse(roothub_entry, head, list)
93 phy_exit(roothub_entry->phy); 98 phy_exit(roothub_entry->phy);
94 99
95err_out: 100 return err;
96 return ERR_PTR(err);
97} 101}
98EXPORT_SYMBOL_GPL(usb_phy_roothub_init); 102EXPORT_SYMBOL_GPL(usb_phy_roothub_init);
99 103
@@ -111,7 +115,7 @@ int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub)
111 list_for_each_entry(roothub_entry, head, list) { 115 list_for_each_entry(roothub_entry, head, list) {
112 err = phy_exit(roothub_entry->phy); 116 err = phy_exit(roothub_entry->phy);
113 if (err) 117 if (err)
114 ret = ret; 118 ret = err;
115 } 119 }
116 120
117 return ret; 121 return ret;
@@ -156,3 +160,38 @@ void usb_phy_roothub_power_off(struct usb_phy_roothub *phy_roothub)
156 phy_power_off(roothub_entry->phy); 160 phy_power_off(roothub_entry->phy);
157} 161}
158EXPORT_SYMBOL_GPL(usb_phy_roothub_power_off); 162EXPORT_SYMBOL_GPL(usb_phy_roothub_power_off);
163
164int usb_phy_roothub_suspend(struct device *controller_dev,
165 struct usb_phy_roothub *phy_roothub)
166{
167 usb_phy_roothub_power_off(phy_roothub);
168
169 /* keep the PHYs initialized so the device can wake up the system */
170 if (device_may_wakeup(controller_dev))
171 return 0;
172
173 return usb_phy_roothub_exit(phy_roothub);
174}
175EXPORT_SYMBOL_GPL(usb_phy_roothub_suspend);
176
177int usb_phy_roothub_resume(struct device *controller_dev,
178 struct usb_phy_roothub *phy_roothub)
179{
180 int err;
181
182 /* if the device can't wake up the system _exit was called */
183 if (!device_may_wakeup(controller_dev)) {
184 err = usb_phy_roothub_init(phy_roothub);
185 if (err)
186 return err;
187 }
188
189 err = usb_phy_roothub_power_on(phy_roothub);
190
191 /* undo _init if _power_on failed */
192 if (err && !device_may_wakeup(controller_dev))
193 usb_phy_roothub_exit(phy_roothub);
194
195 return err;
196}
197EXPORT_SYMBOL_GPL(usb_phy_roothub_resume);
diff --git a/drivers/usb/core/phy.h b/drivers/usb/core/phy.h
index 6fde59bfbff8..88a3c037e9df 100644
--- a/drivers/usb/core/phy.h
+++ b/drivers/usb/core/phy.h
@@ -1,7 +1,27 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * USB roothub wrapper
4 *
5 * Copyright (C) 2018 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
6 */
7
8#ifndef __USB_CORE_PHY_H_
9#define __USB_CORE_PHY_H_
10
11struct device;
1struct usb_phy_roothub; 12struct usb_phy_roothub;
2 13
3struct usb_phy_roothub *usb_phy_roothub_init(struct device *dev); 14struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev);
15
16int usb_phy_roothub_init(struct usb_phy_roothub *phy_roothub);
4int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub); 17int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub);
5 18
6int usb_phy_roothub_power_on(struct usb_phy_roothub *phy_roothub); 19int usb_phy_roothub_power_on(struct usb_phy_roothub *phy_roothub);
7void usb_phy_roothub_power_off(struct usb_phy_roothub *phy_roothub); 20void usb_phy_roothub_power_off(struct usb_phy_roothub *phy_roothub);
21
22int usb_phy_roothub_suspend(struct device *controller_dev,
23 struct usb_phy_roothub *phy_roothub);
24int usb_phy_roothub_resume(struct device *controller_dev,
25 struct usb_phy_roothub *phy_roothub);
26
27#endif /* __USB_CORE_PHY_H_ */
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 920f48a49a87..c55def2f1320 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -186,6 +186,9 @@ static const struct usb_device_id usb_quirk_list[] = {
186 { USB_DEVICE(0x03f0, 0x0701), .driver_info = 186 { USB_DEVICE(0x03f0, 0x0701), .driver_info =
187 USB_QUIRK_STRING_FETCH_255 }, 187 USB_QUIRK_STRING_FETCH_255 },
188 188
189 /* HP v222w 16GB Mini USB Drive */
190 { USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT },
191
189 /* Creative SB Audigy 2 NX */ 192 /* Creative SB Audigy 2 NX */
190 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, 193 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
191 194
diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
index 48779c44c361..eb494ec547e8 100644
--- a/drivers/usb/host/xhci-dbgtty.c
+++ b/drivers/usb/host/xhci-dbgtty.c
@@ -320,9 +320,11 @@ int xhci_dbc_tty_register_driver(struct xhci_hcd *xhci)
320 320
321void xhci_dbc_tty_unregister_driver(void) 321void xhci_dbc_tty_unregister_driver(void)
322{ 322{
323 tty_unregister_driver(dbc_tty_driver); 323 if (dbc_tty_driver) {
324 put_tty_driver(dbc_tty_driver); 324 tty_unregister_driver(dbc_tty_driver);
325 dbc_tty_driver = NULL; 325 put_tty_driver(dbc_tty_driver);
326 dbc_tty_driver = NULL;
327 }
326} 328}
327 329
328static void dbc_rx_push(unsigned long _port) 330static void dbc_rx_push(unsigned long _port)
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index f17b7eab66cf..85ffda85f8ab 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -126,7 +126,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
126 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) 126 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
127 xhci->quirks |= XHCI_AMD_PLL_FIX; 127 xhci->quirks |= XHCI_AMD_PLL_FIX;
128 128
129 if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x43bb) 129 if (pdev->vendor == PCI_VENDOR_ID_AMD &&
130 (pdev->device == 0x15e0 ||
131 pdev->device == 0x15e1 ||
132 pdev->device == 0x43bb))
130 xhci->quirks |= XHCI_SUSPEND_DELAY; 133 xhci->quirks |= XHCI_SUSPEND_DELAY;
131 134
132 if (pdev->vendor == PCI_VENDOR_ID_AMD) 135 if (pdev->vendor == PCI_VENDOR_ID_AMD)
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index df327dcc2bac..c1b22fc64e38 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -157,6 +157,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
157 struct resource *res; 157 struct resource *res;
158 struct usb_hcd *hcd; 158 struct usb_hcd *hcd;
159 struct clk *clk; 159 struct clk *clk;
160 struct clk *reg_clk;
160 int ret; 161 int ret;
161 int irq; 162 int irq;
162 163
@@ -226,17 +227,27 @@ static int xhci_plat_probe(struct platform_device *pdev)
226 hcd->rsrc_len = resource_size(res); 227 hcd->rsrc_len = resource_size(res);
227 228
228 /* 229 /*
229 * Not all platforms have a clk so it is not an error if the 230 * Not all platforms have clks so it is not an error if the
230 * clock does not exists. 231 * clock do not exist.
231 */ 232 */
233 reg_clk = devm_clk_get(&pdev->dev, "reg");
234 if (!IS_ERR(reg_clk)) {
235 ret = clk_prepare_enable(reg_clk);
236 if (ret)
237 goto put_hcd;
238 } else if (PTR_ERR(reg_clk) == -EPROBE_DEFER) {
239 ret = -EPROBE_DEFER;
240 goto put_hcd;
241 }
242
232 clk = devm_clk_get(&pdev->dev, NULL); 243 clk = devm_clk_get(&pdev->dev, NULL);
233 if (!IS_ERR(clk)) { 244 if (!IS_ERR(clk)) {
234 ret = clk_prepare_enable(clk); 245 ret = clk_prepare_enable(clk);
235 if (ret) 246 if (ret)
236 goto put_hcd; 247 goto disable_reg_clk;
237 } else if (PTR_ERR(clk) == -EPROBE_DEFER) { 248 } else if (PTR_ERR(clk) == -EPROBE_DEFER) {
238 ret = -EPROBE_DEFER; 249 ret = -EPROBE_DEFER;
239 goto put_hcd; 250 goto disable_reg_clk;
240 } 251 }
241 252
242 xhci = hcd_to_xhci(hcd); 253 xhci = hcd_to_xhci(hcd);
@@ -252,6 +263,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
252 device_wakeup_enable(hcd->self.controller); 263 device_wakeup_enable(hcd->self.controller);
253 264
254 xhci->clk = clk; 265 xhci->clk = clk;
266 xhci->reg_clk = reg_clk;
255 xhci->main_hcd = hcd; 267 xhci->main_hcd = hcd;
256 xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev, 268 xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
257 dev_name(&pdev->dev), hcd); 269 dev_name(&pdev->dev), hcd);
@@ -320,8 +332,10 @@ put_usb3_hcd:
320 usb_put_hcd(xhci->shared_hcd); 332 usb_put_hcd(xhci->shared_hcd);
321 333
322disable_clk: 334disable_clk:
323 if (!IS_ERR(clk)) 335 clk_disable_unprepare(clk);
324 clk_disable_unprepare(clk); 336
337disable_reg_clk:
338 clk_disable_unprepare(reg_clk);
325 339
326put_hcd: 340put_hcd:
327 usb_put_hcd(hcd); 341 usb_put_hcd(hcd);
@@ -338,6 +352,7 @@ static int xhci_plat_remove(struct platform_device *dev)
338 struct usb_hcd *hcd = platform_get_drvdata(dev); 352 struct usb_hcd *hcd = platform_get_drvdata(dev);
339 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 353 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
340 struct clk *clk = xhci->clk; 354 struct clk *clk = xhci->clk;
355 struct clk *reg_clk = xhci->reg_clk;
341 356
342 xhci->xhc_state |= XHCI_STATE_REMOVING; 357 xhci->xhc_state |= XHCI_STATE_REMOVING;
343 358
@@ -347,8 +362,8 @@ static int xhci_plat_remove(struct platform_device *dev)
347 usb_remove_hcd(hcd); 362 usb_remove_hcd(hcd);
348 usb_put_hcd(xhci->shared_hcd); 363 usb_put_hcd(xhci->shared_hcd);
349 364
350 if (!IS_ERR(clk)) 365 clk_disable_unprepare(clk);
351 clk_disable_unprepare(clk); 366 clk_disable_unprepare(reg_clk);
352 usb_put_hcd(hcd); 367 usb_put_hcd(hcd);
353 368
354 pm_runtime_set_suspended(&dev->dev); 369 pm_runtime_set_suspended(&dev->dev);
@@ -420,7 +435,6 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
420static struct platform_driver usb_xhci_driver = { 435static struct platform_driver usb_xhci_driver = {
421 .probe = xhci_plat_probe, 436 .probe = xhci_plat_probe,
422 .remove = xhci_plat_remove, 437 .remove = xhci_plat_remove,
423 .shutdown = usb_hcd_platform_shutdown,
424 .driver = { 438 .driver = {
425 .name = "xhci-hcd", 439 .name = "xhci-hcd",
426 .pm = &xhci_plat_pm_ops, 440 .pm = &xhci_plat_pm_ops,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 05c909b04f14..6dfc4867dbcf 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1729,8 +1729,9 @@ struct xhci_hcd {
1729 int page_shift; 1729 int page_shift;
1730 /* msi-x vectors */ 1730 /* msi-x vectors */
1731 int msix_count; 1731 int msix_count;
1732 /* optional clock */ 1732 /* optional clocks */
1733 struct clk *clk; 1733 struct clk *clk;
1734 struct clk *reg_clk;
1734 /* data structures */ 1735 /* data structures */
1735 struct xhci_device_context_array *dcbaa; 1736 struct xhci_device_context_array *dcbaa;
1736 struct xhci_ring *cmd_ring; 1737 struct xhci_ring *cmd_ring;
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 05a679d5e3a2..6a60bc0490c5 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -451,7 +451,6 @@ static int dsps_musb_init(struct musb *musb)
451 if (!rev) 451 if (!rev)
452 return -ENODEV; 452 return -ENODEV;
453 453
454 usb_phy_init(musb->xceiv);
455 if (IS_ERR(musb->phy)) { 454 if (IS_ERR(musb->phy)) {
456 musb->phy = NULL; 455 musb->phy = NULL;
457 } else { 456 } else {
@@ -501,7 +500,6 @@ static int dsps_musb_exit(struct musb *musb)
501 struct dsps_glue *glue = dev_get_drvdata(dev->parent); 500 struct dsps_glue *glue = dev_get_drvdata(dev->parent);
502 501
503 del_timer_sync(&musb->dev_timer); 502 del_timer_sync(&musb->dev_timer);
504 usb_phy_shutdown(musb->xceiv);
505 phy_power_off(musb->phy); 503 phy_power_off(musb->phy);
506 phy_exit(musb->phy); 504 phy_exit(musb->phy);
507 debugfs_remove_recursive(glue->dbgfs_root); 505 debugfs_remove_recursive(glue->dbgfs_root);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 3a8451a15f7f..4fa372c845e1 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2754,6 +2754,7 @@ int musb_host_setup(struct musb *musb, int power_budget)
2754 hcd->self.otg_port = 1; 2754 hcd->self.otg_port = 1;
2755 musb->xceiv->otg->host = &hcd->self; 2755 musb->xceiv->otg->host = &hcd->self;
2756 hcd->power_budget = 2 * (power_budget ? : 250); 2756 hcd->power_budget = 2 * (power_budget ? : 250);
2757 hcd->skip_phy_initialization = 1;
2757 2758
2758 ret = usb_add_hcd(hcd, 0, 0); 2759 ret = usb_add_hcd(hcd, 0, 0);
2759 if (ret < 0) 2760 if (ret < 0)
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index a646820f5a78..533f127c30ad 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -62,6 +62,7 @@ config USB_SERIAL_SIMPLE
62 - Fundamental Software dongle. 62 - Fundamental Software dongle.
63 - Google USB serial devices 63 - Google USB serial devices
64 - HP4x calculators 64 - HP4x calculators
65 - Libtransistor USB console
65 - a number of Motorola phones 66 - a number of Motorola phones
66 - Motorola Tetra devices 67 - Motorola Tetra devices
67 - Novatel Wireless GPS receivers 68 - Novatel Wireless GPS receivers
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index de1e759dd512..eb6c26cbe579 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -214,6 +214,7 @@ static const struct usb_device_id id_table[] = {
214 { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */ 214 { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
215 { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */ 215 { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
216 { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */ 216 { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
217 { USB_DEVICE(0x3923, 0x7A0B) }, /* National Instruments USB Serial Console */
217 { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */ 218 { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
218 { } /* Terminating Entry */ 219 { } /* Terminating Entry */
219}; 220};
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 87202ad5a50d..7ea221d42dba 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1898,7 +1898,8 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
1898 return ftdi_jtag_probe(serial); 1898 return ftdi_jtag_probe(serial);
1899 1899
1900 if (udev->product && 1900 if (udev->product &&
1901 (!strcmp(udev->product, "BeagleBone/XDS100V2") || 1901 (!strcmp(udev->product, "Arrow USB Blaster") ||
1902 !strcmp(udev->product, "BeagleBone/XDS100V2") ||
1902 !strcmp(udev->product, "SNAP Connect E10"))) 1903 !strcmp(udev->product, "SNAP Connect E10")))
1903 return ftdi_jtag_probe(serial); 1904 return ftdi_jtag_probe(serial);
1904 1905
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 4ef79e29cb26..40864c2bd9dc 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -63,6 +63,11 @@ DEVICE(flashloader, FLASHLOADER_IDS);
63 0x01) } 63 0x01) }
64DEVICE(google, GOOGLE_IDS); 64DEVICE(google, GOOGLE_IDS);
65 65
66/* Libtransistor USB console */
67#define LIBTRANSISTOR_IDS() \
68 { USB_DEVICE(0x1209, 0x8b00) }
69DEVICE(libtransistor, LIBTRANSISTOR_IDS);
70
66/* ViVOpay USB Serial Driver */ 71/* ViVOpay USB Serial Driver */
67#define VIVOPAY_IDS() \ 72#define VIVOPAY_IDS() \
68 { USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */ 73 { USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */
@@ -110,6 +115,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
110 &funsoft_device, 115 &funsoft_device,
111 &flashloader_device, 116 &flashloader_device,
112 &google_device, 117 &google_device,
118 &libtransistor_device,
113 &vivopay_device, 119 &vivopay_device,
114 &moto_modem_device, 120 &moto_modem_device,
115 &motorola_tetra_device, 121 &motorola_tetra_device,
@@ -126,6 +132,7 @@ static const struct usb_device_id id_table[] = {
126 FUNSOFT_IDS(), 132 FUNSOFT_IDS(),
127 FLASHLOADER_IDS(), 133 FLASHLOADER_IDS(),
128 GOOGLE_IDS(), 134 GOOGLE_IDS(),
135 LIBTRANSISTOR_IDS(),
129 VIVOPAY_IDS(), 136 VIVOPAY_IDS(),
130 MOTO_IDS(), 137 MOTO_IDS(),
131 MOTOROLA_TETRA_IDS(), 138 MOTOROLA_TETRA_IDS(),
diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile
index b57891c1fd31..7afbea512207 100644
--- a/drivers/usb/typec/ucsi/Makefile
+++ b/drivers/usb/typec/ucsi/Makefile
@@ -5,6 +5,6 @@ obj-$(CONFIG_TYPEC_UCSI) += typec_ucsi.o
5 5
6typec_ucsi-y := ucsi.o 6typec_ucsi-y := ucsi.o
7 7
8typec_ucsi-$(CONFIG_FTRACE) += trace.o 8typec_ucsi-$(CONFIG_TRACING) += trace.o
9 9
10obj-$(CONFIG_UCSI_ACPI) += ucsi_acpi.o 10obj-$(CONFIG_UCSI_ACPI) += ucsi_acpi.o
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index bf0977fbd100..bd5cca5632b3 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -28,7 +28,7 @@
28 * difficult to estimate the time it takes for the system to process the command 28 * difficult to estimate the time it takes for the system to process the command
29 * before it is actually passed to the PPM. 29 * before it is actually passed to the PPM.
30 */ 30 */
31#define UCSI_TIMEOUT_MS 1000 31#define UCSI_TIMEOUT_MS 5000
32 32
33/* 33/*
34 * UCSI_SWAP_TIMEOUT_MS - Timeout for role swap requests 34 * UCSI_SWAP_TIMEOUT_MS - Timeout for role swap requests
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index c31c8402a0c5..d41d0cdeec0f 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -186,7 +186,12 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf,
186 if (!bid) 186 if (!bid)
187 return -ENODEV; 187 return -ENODEV;
188 188
189 /* device_attach() callers should hold parent lock for USB */
190 if (bid->udev->dev.parent)
191 device_lock(bid->udev->dev.parent);
189 ret = device_attach(&bid->udev->dev); 192 ret = device_attach(&bid->udev->dev);
193 if (bid->udev->dev.parent)
194 device_unlock(bid->udev->dev.parent);
190 if (ret < 0) { 195 if (ret < 0) {
191 dev_err(&bid->udev->dev, "rebind failed\n"); 196 dev_err(&bid->udev->dev, "rebind failed\n");
192 return ret; 197 return ret;
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index 473fb8a87289..bf8afe9b5883 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -243,7 +243,7 @@ enum usbip_side {
243#define VUDC_EVENT_ERROR_USB (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE) 243#define VUDC_EVENT_ERROR_USB (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
244#define VUDC_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE) 244#define VUDC_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
245 245
246#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_BYE) 246#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_RESET | USBIP_EH_BYE)
247#define VDEV_EVENT_DOWN (USBIP_EH_SHUTDOWN | USBIP_EH_RESET) 247#define VDEV_EVENT_DOWN (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
248#define VDEV_EVENT_ERROR_TCP (USBIP_EH_SHUTDOWN | USBIP_EH_RESET) 248#define VDEV_EVENT_ERROR_TCP (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
249#define VDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE) 249#define VDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c
index 5b4c0864ad92..5d88917c9631 100644
--- a/drivers/usb/usbip/usbip_event.c
+++ b/drivers/usb/usbip/usbip_event.c
@@ -91,10 +91,6 @@ static void event_handler(struct work_struct *work)
91 unset_event(ud, USBIP_EH_UNUSABLE); 91 unset_event(ud, USBIP_EH_UNUSABLE);
92 } 92 }
93 93
94 /* Stop the error handler. */
95 if (ud->event & USBIP_EH_BYE)
96 usbip_dbg_eh("removed %p\n", ud);
97
98 wake_up(&ud->eh_waitq); 94 wake_up(&ud->eh_waitq);
99 } 95 }
100} 96}
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 20e3d4609583..d11f3f8dad40 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -354,6 +354,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
354 usbip_dbg_vhci_rh(" ClearHubFeature\n"); 354 usbip_dbg_vhci_rh(" ClearHubFeature\n");
355 break; 355 break;
356 case ClearPortFeature: 356 case ClearPortFeature:
357 if (rhport < 0)
358 goto error;
357 switch (wValue) { 359 switch (wValue) {
358 case USB_PORT_FEAT_SUSPEND: 360 case USB_PORT_FEAT_SUSPEND:
359 if (hcd->speed == HCD_USB3) { 361 if (hcd->speed == HCD_USB3) {
@@ -511,11 +513,16 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
511 goto error; 513 goto error;
512 } 514 }
513 515
516 if (rhport < 0)
517 goto error;
518
514 vhci_hcd->port_status[rhport] |= USB_PORT_STAT_SUSPEND; 519 vhci_hcd->port_status[rhport] |= USB_PORT_STAT_SUSPEND;
515 break; 520 break;
516 case USB_PORT_FEAT_POWER: 521 case USB_PORT_FEAT_POWER:
517 usbip_dbg_vhci_rh( 522 usbip_dbg_vhci_rh(
518 " SetPortFeature: USB_PORT_FEAT_POWER\n"); 523 " SetPortFeature: USB_PORT_FEAT_POWER\n");
524 if (rhport < 0)
525 goto error;
519 if (hcd->speed == HCD_USB3) 526 if (hcd->speed == HCD_USB3)
520 vhci_hcd->port_status[rhport] |= USB_SS_PORT_STAT_POWER; 527 vhci_hcd->port_status[rhport] |= USB_SS_PORT_STAT_POWER;
521 else 528 else
@@ -524,6 +531,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
524 case USB_PORT_FEAT_BH_PORT_RESET: 531 case USB_PORT_FEAT_BH_PORT_RESET:
525 usbip_dbg_vhci_rh( 532 usbip_dbg_vhci_rh(
526 " SetPortFeature: USB_PORT_FEAT_BH_PORT_RESET\n"); 533 " SetPortFeature: USB_PORT_FEAT_BH_PORT_RESET\n");
534 if (rhport < 0)
535 goto error;
527 /* Applicable only for USB3.0 hub */ 536 /* Applicable only for USB3.0 hub */
528 if (hcd->speed != HCD_USB3) { 537 if (hcd->speed != HCD_USB3) {
529 pr_err("USB_PORT_FEAT_BH_PORT_RESET req not " 538 pr_err("USB_PORT_FEAT_BH_PORT_RESET req not "
@@ -534,6 +543,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
534 case USB_PORT_FEAT_RESET: 543 case USB_PORT_FEAT_RESET:
535 usbip_dbg_vhci_rh( 544 usbip_dbg_vhci_rh(
536 " SetPortFeature: USB_PORT_FEAT_RESET\n"); 545 " SetPortFeature: USB_PORT_FEAT_RESET\n");
546 if (rhport < 0)
547 goto error;
537 /* if it's already enabled, disable */ 548 /* if it's already enabled, disable */
538 if (hcd->speed == HCD_USB3) { 549 if (hcd->speed == HCD_USB3) {
539 vhci_hcd->port_status[rhport] = 0; 550 vhci_hcd->port_status[rhport] = 0;
@@ -554,6 +565,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
554 default: 565 default:
555 usbip_dbg_vhci_rh(" SetPortFeature: default %d\n", 566 usbip_dbg_vhci_rh(" SetPortFeature: default %d\n",
556 wValue); 567 wValue);
568 if (rhport < 0)
569 goto error;
557 if (hcd->speed == HCD_USB3) { 570 if (hcd->speed == HCD_USB3) {
558 if ((vhci_hcd->port_status[rhport] & 571 if ((vhci_hcd->port_status[rhport] &
559 USB_SS_PORT_STAT_POWER) != 0) { 572 USB_SS_PORT_STAT_POWER) != 0) {
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index 190dbf8cfcb5..2f3856a95856 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -114,7 +114,7 @@ static void vbg_guest_mappings_init(struct vbg_dev *gdev)
114 } 114 }
115 115
116out: 116out:
117 kfree(req); 117 vbg_req_free(req, sizeof(*req));
118 kfree(pages); 118 kfree(pages);
119} 119}
120 120
@@ -144,7 +144,7 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
144 144
145 rc = vbg_req_perform(gdev, req); 145 rc = vbg_req_perform(gdev, req);
146 146
147 kfree(req); 147 vbg_req_free(req, sizeof(*req));
148 148
149 if (rc < 0) { 149 if (rc < 0) {
150 vbg_err("%s error: %d\n", __func__, rc); 150 vbg_err("%s error: %d\n", __func__, rc);
@@ -214,8 +214,8 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
214 ret = vbg_status_code_to_errno(rc); 214 ret = vbg_status_code_to_errno(rc);
215 215
216out_free: 216out_free:
217 kfree(req2); 217 vbg_req_free(req2, sizeof(*req2));
218 kfree(req1); 218 vbg_req_free(req1, sizeof(*req1));
219 return ret; 219 return ret;
220} 220}
221 221
@@ -245,7 +245,7 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
245 if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */ 245 if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
246 rc = VINF_SUCCESS; 246 rc = VINF_SUCCESS;
247 247
248 kfree(req); 248 vbg_req_free(req, sizeof(*req));
249 249
250 return vbg_status_code_to_errno(rc); 250 return vbg_status_code_to_errno(rc);
251} 251}
@@ -431,7 +431,7 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
431 rc = vbg_req_perform(gdev, req); 431 rc = vbg_req_perform(gdev, req);
432 do_div(req->interval_ns, 1000000); /* ns -> ms */ 432 do_div(req->interval_ns, 1000000); /* ns -> ms */
433 gdev->heartbeat_interval_ms = req->interval_ns; 433 gdev->heartbeat_interval_ms = req->interval_ns;
434 kfree(req); 434 vbg_req_free(req, sizeof(*req));
435 435
436 return vbg_status_code_to_errno(rc); 436 return vbg_status_code_to_errno(rc);
437} 437}
@@ -454,12 +454,6 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev)
454 if (ret < 0) 454 if (ret < 0)
455 return ret; 455 return ret;
456 456
457 /*
458 * Preallocate the request to use it from the timer callback because:
459 * 1) on Windows vbg_req_alloc must be called at IRQL <= APC_LEVEL
460 * and the timer callback runs at DISPATCH_LEVEL;
461 * 2) avoid repeated allocations.
462 */
463 gdev->guest_heartbeat_req = vbg_req_alloc( 457 gdev->guest_heartbeat_req = vbg_req_alloc(
464 sizeof(*gdev->guest_heartbeat_req), 458 sizeof(*gdev->guest_heartbeat_req),
465 VMMDEVREQ_GUEST_HEARTBEAT); 459 VMMDEVREQ_GUEST_HEARTBEAT);
@@ -481,8 +475,8 @@ static void vbg_heartbeat_exit(struct vbg_dev *gdev)
481{ 475{
482 del_timer_sync(&gdev->heartbeat_timer); 476 del_timer_sync(&gdev->heartbeat_timer);
483 vbg_heartbeat_host_config(gdev, false); 477 vbg_heartbeat_host_config(gdev, false);
484 kfree(gdev->guest_heartbeat_req); 478 vbg_req_free(gdev->guest_heartbeat_req,
485 479 sizeof(*gdev->guest_heartbeat_req));
486} 480}
487 481
488/** 482/**
@@ -543,7 +537,7 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
543 if (rc < 0) 537 if (rc < 0)
544 vbg_err("%s error, rc: %d\n", __func__, rc); 538 vbg_err("%s error, rc: %d\n", __func__, rc);
545 539
546 kfree(req); 540 vbg_req_free(req, sizeof(*req));
547 return vbg_status_code_to_errno(rc); 541 return vbg_status_code_to_errno(rc);
548} 542}
549 543
@@ -617,7 +611,7 @@ static int vbg_set_session_event_filter(struct vbg_dev *gdev,
617 611
618out: 612out:
619 mutex_unlock(&gdev->session_mutex); 613 mutex_unlock(&gdev->session_mutex);
620 kfree(req); 614 vbg_req_free(req, sizeof(*req));
621 615
622 return ret; 616 return ret;
623} 617}
@@ -642,7 +636,7 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
642 if (rc < 0) 636 if (rc < 0)
643 vbg_err("%s error, rc: %d\n", __func__, rc); 637 vbg_err("%s error, rc: %d\n", __func__, rc);
644 638
645 kfree(req); 639 vbg_req_free(req, sizeof(*req));
646 return vbg_status_code_to_errno(rc); 640 return vbg_status_code_to_errno(rc);
647} 641}
648 642
@@ -712,7 +706,7 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
712 706
713out: 707out:
714 mutex_unlock(&gdev->session_mutex); 708 mutex_unlock(&gdev->session_mutex);
715 kfree(req); 709 vbg_req_free(req, sizeof(*req));
716 710
717 return ret; 711 return ret;
718} 712}
@@ -733,8 +727,10 @@ static int vbg_query_host_version(struct vbg_dev *gdev)
733 727
734 rc = vbg_req_perform(gdev, req); 728 rc = vbg_req_perform(gdev, req);
735 ret = vbg_status_code_to_errno(rc); 729 ret = vbg_status_code_to_errno(rc);
736 if (ret) 730 if (ret) {
731 vbg_err("%s error: %d\n", __func__, rc);
737 goto out; 732 goto out;
733 }
738 734
739 snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u", 735 snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u",
740 req->major, req->minor, req->build, req->revision); 736 req->major, req->minor, req->build, req->revision);
@@ -749,7 +745,7 @@ static int vbg_query_host_version(struct vbg_dev *gdev)
749 } 745 }
750 746
751out: 747out:
752 kfree(req); 748 vbg_req_free(req, sizeof(*req));
753 return ret; 749 return ret;
754} 750}
755 751
@@ -847,11 +843,16 @@ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
847 return 0; 843 return 0;
848 844
849err_free_reqs: 845err_free_reqs:
850 kfree(gdev->mouse_status_req); 846 vbg_req_free(gdev->mouse_status_req,
851 kfree(gdev->ack_events_req); 847 sizeof(*gdev->mouse_status_req));
852 kfree(gdev->cancel_req); 848 vbg_req_free(gdev->ack_events_req,
853 kfree(gdev->mem_balloon.change_req); 849 sizeof(*gdev->ack_events_req));
854 kfree(gdev->mem_balloon.get_req); 850 vbg_req_free(gdev->cancel_req,
851 sizeof(*gdev->cancel_req));
852 vbg_req_free(gdev->mem_balloon.change_req,
853 sizeof(*gdev->mem_balloon.change_req));
854 vbg_req_free(gdev->mem_balloon.get_req,
855 sizeof(*gdev->mem_balloon.get_req));
855 return ret; 856 return ret;
856} 857}
857 858
@@ -872,11 +873,16 @@ void vbg_core_exit(struct vbg_dev *gdev)
872 vbg_reset_host_capabilities(gdev); 873 vbg_reset_host_capabilities(gdev);
873 vbg_core_set_mouse_status(gdev, 0); 874 vbg_core_set_mouse_status(gdev, 0);
874 875
875 kfree(gdev->mouse_status_req); 876 vbg_req_free(gdev->mouse_status_req,
876 kfree(gdev->ack_events_req); 877 sizeof(*gdev->mouse_status_req));
877 kfree(gdev->cancel_req); 878 vbg_req_free(gdev->ack_events_req,
878 kfree(gdev->mem_balloon.change_req); 879 sizeof(*gdev->ack_events_req));
879 kfree(gdev->mem_balloon.get_req); 880 vbg_req_free(gdev->cancel_req,
881 sizeof(*gdev->cancel_req));
882 vbg_req_free(gdev->mem_balloon.change_req,
883 sizeof(*gdev->mem_balloon.change_req));
884 vbg_req_free(gdev->mem_balloon.get_req,
885 sizeof(*gdev->mem_balloon.get_req));
880} 886}
881 887
882/** 888/**
@@ -1415,7 +1421,7 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
1415 req->flags = dump->u.in.flags; 1421 req->flags = dump->u.in.flags;
1416 dump->hdr.rc = vbg_req_perform(gdev, req); 1422 dump->hdr.rc = vbg_req_perform(gdev, req);
1417 1423
1418 kfree(req); 1424 vbg_req_free(req, sizeof(*req));
1419 return 0; 1425 return 0;
1420} 1426}
1421 1427
@@ -1513,7 +1519,7 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
1513 if (rc < 0) 1519 if (rc < 0)
1514 vbg_err("%s error, rc: %d\n", __func__, rc); 1520 vbg_err("%s error, rc: %d\n", __func__, rc);
1515 1521
1516 kfree(req); 1522 vbg_req_free(req, sizeof(*req));
1517 return vbg_status_code_to_errno(rc); 1523 return vbg_status_code_to_errno(rc);
1518} 1524}
1519 1525
diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
index 6c784bf4fa6d..7ad9ec45bfa9 100644
--- a/drivers/virt/vboxguest/vboxguest_core.h
+++ b/drivers/virt/vboxguest/vboxguest_core.h
@@ -171,4 +171,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id);
171 171
172void vbg_linux_mouse_event(struct vbg_dev *gdev); 172void vbg_linux_mouse_event(struct vbg_dev *gdev);
173 173
174/* Private (non exported) functions form vboxguest_utils.c */
175void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
176void vbg_req_free(void *req, size_t len);
177int vbg_req_perform(struct vbg_dev *gdev, void *req);
178int vbg_hgcm_call32(
179 struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
180 struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
181 int *vbox_status);
182
174#endif 183#endif
diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
index 82e280d38cc2..398d22693234 100644
--- a/drivers/virt/vboxguest/vboxguest_linux.c
+++ b/drivers/virt/vboxguest/vboxguest_linux.c
@@ -87,6 +87,7 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
87 struct vbg_session *session = filp->private_data; 87 struct vbg_session *session = filp->private_data;
88 size_t returned_size, size; 88 size_t returned_size, size;
89 struct vbg_ioctl_hdr hdr; 89 struct vbg_ioctl_hdr hdr;
90 bool is_vmmdev_req;
90 int ret = 0; 91 int ret = 0;
91 void *buf; 92 void *buf;
92 93
@@ -106,8 +107,17 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
106 if (size > SZ_16M) 107 if (size > SZ_16M)
107 return -E2BIG; 108 return -E2BIG;
108 109
109 /* __GFP_DMA32 because IOCTL_VMMDEV_REQUEST passes this to the host */ 110 /*
110 buf = kmalloc(size, GFP_KERNEL | __GFP_DMA32); 111 * IOCTL_VMMDEV_REQUEST needs the buffer to be below 4G to avoid
112 * the need for a bounce-buffer and another copy later on.
113 */
114 is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) ||
115 req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
116
117 if (is_vmmdev_req)
118 buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT);
119 else
120 buf = kmalloc(size, GFP_KERNEL);
111 if (!buf) 121 if (!buf)
112 return -ENOMEM; 122 return -ENOMEM;
113 123
@@ -132,7 +142,10 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
132 ret = -EFAULT; 142 ret = -EFAULT;
133 143
134out: 144out:
135 kfree(buf); 145 if (is_vmmdev_req)
146 vbg_req_free(buf, size);
147 else
148 kfree(buf);
136 149
137 return ret; 150 return ret;
138} 151}
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
index 0f0dab8023cf..bf4474214b4d 100644
--- a/drivers/virt/vboxguest/vboxguest_utils.c
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -65,8 +65,9 @@ VBG_LOG(vbg_debug, pr_debug);
65void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type) 65void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
66{ 66{
67 struct vmmdev_request_header *req; 67 struct vmmdev_request_header *req;
68 int order = get_order(PAGE_ALIGN(len));
68 69
69 req = kmalloc(len, GFP_KERNEL | __GFP_DMA32); 70 req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
70 if (!req) 71 if (!req)
71 return NULL; 72 return NULL;
72 73
@@ -82,6 +83,14 @@ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
82 return req; 83 return req;
83} 84}
84 85
86void vbg_req_free(void *req, size_t len)
87{
88 if (!req)
89 return;
90
91 free_pages((unsigned long)req, get_order(PAGE_ALIGN(len)));
92}
93
85/* Note this function returns a VBox status code, not a negative errno!! */ 94/* Note this function returns a VBox status code, not a negative errno!! */
86int vbg_req_perform(struct vbg_dev *gdev, void *req) 95int vbg_req_perform(struct vbg_dev *gdev, void *req)
87{ 96{
@@ -137,7 +146,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
137 rc = hgcm_connect->header.result; 146 rc = hgcm_connect->header.result;
138 } 147 }
139 148
140 kfree(hgcm_connect); 149 vbg_req_free(hgcm_connect, sizeof(*hgcm_connect));
141 150
142 *vbox_status = rc; 151 *vbox_status = rc;
143 return 0; 152 return 0;
@@ -166,7 +175,7 @@ int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status)
166 if (rc >= 0) 175 if (rc >= 0)
167 rc = hgcm_disconnect->header.result; 176 rc = hgcm_disconnect->header.result;
168 177
169 kfree(hgcm_disconnect); 178 vbg_req_free(hgcm_disconnect, sizeof(*hgcm_disconnect));
170 179
171 *vbox_status = rc; 180 *vbox_status = rc;
172 return 0; 181 return 0;
@@ -623,7 +632,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
623 } 632 }
624 633
625 if (!leak_it) 634 if (!leak_it)
626 kfree(call); 635 vbg_req_free(call, size);
627 636
628free_bounce_bufs: 637free_bounce_bufs:
629 if (bounce_bufs) { 638 if (bounce_bufs) {
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 7e72348639e4..315f7e63e7cc 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -228,7 +228,15 @@ static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
228 228
229static bool ceph_vxattrcb_quota_exists(struct ceph_inode_info *ci) 229static bool ceph_vxattrcb_quota_exists(struct ceph_inode_info *ci)
230{ 230{
231 return (ci->i_max_files || ci->i_max_bytes); 231 bool ret = false;
232 spin_lock(&ci->i_ceph_lock);
233 if ((ci->i_max_files || ci->i_max_bytes) &&
234 ci->i_vino.snap == CEPH_NOSNAP &&
235 ci->i_snap_realm &&
236 ci->i_snap_realm->ino == ci->i_vino.ino)
237 ret = true;
238 spin_unlock(&ci->i_ceph_lock);
239 return ret;
232} 240}
233 241
234static size_t ceph_vxattrcb_quota(struct ceph_inode_info *ci, char *val, 242static size_t ceph_vxattrcb_quota(struct ceph_inode_info *ci, char *val,
@@ -1008,14 +1016,19 @@ int __ceph_setxattr(struct inode *inode, const char *name,
1008 char *newval = NULL; 1016 char *newval = NULL;
1009 struct ceph_inode_xattr *xattr = NULL; 1017 struct ceph_inode_xattr *xattr = NULL;
1010 int required_blob_size; 1018 int required_blob_size;
1019 bool check_realm = false;
1011 bool lock_snap_rwsem = false; 1020 bool lock_snap_rwsem = false;
1012 1021
1013 if (ceph_snap(inode) != CEPH_NOSNAP) 1022 if (ceph_snap(inode) != CEPH_NOSNAP)
1014 return -EROFS; 1023 return -EROFS;
1015 1024
1016 vxattr = ceph_match_vxattr(inode, name); 1025 vxattr = ceph_match_vxattr(inode, name);
1017 if (vxattr && vxattr->readonly) 1026 if (vxattr) {
1018 return -EOPNOTSUPP; 1027 if (vxattr->readonly)
1028 return -EOPNOTSUPP;
1029 if (value && !strncmp(vxattr->name, "ceph.quota", 10))
1030 check_realm = true;
1031 }
1019 1032
1020 /* pass any unhandled ceph.* xattrs through to the MDS */ 1033 /* pass any unhandled ceph.* xattrs through to the MDS */
1021 if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN)) 1034 if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
@@ -1109,6 +1122,15 @@ do_sync_unlocked:
1109 err = -EBUSY; 1122 err = -EBUSY;
1110 } else { 1123 } else {
1111 err = ceph_sync_setxattr(inode, name, value, size, flags); 1124 err = ceph_sync_setxattr(inode, name, value, size, flags);
1125 if (err >= 0 && check_realm) {
1126 /* check if snaprealm was created for quota inode */
1127 spin_lock(&ci->i_ceph_lock);
1128 if ((ci->i_max_files || ci->i_max_bytes) &&
1129 !(ci->i_snap_realm &&
1130 ci->i_snap_realm->ino == ci->i_vino.ino))
1131 err = -EOPNOTSUPP;
1132 spin_unlock(&ci->i_ceph_lock);
1133 }
1112 } 1134 }
1113out: 1135out:
1114 ceph_free_cap_flush(prealloc_cf); 1136 ceph_free_cap_flush(prealloc_cf);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 6d3e40d7029c..1529a088383d 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -455,6 +455,9 @@ cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required)
455 server->sign = true; 455 server->sign = true;
456 } 456 }
457 457
458 if (cifs_rdma_enabled(server) && server->sign)
459 cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled");
460
458 return 0; 461 return 0;
459} 462}
460 463
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index e8830f076a7f..a5aa158d535a 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2959,6 +2959,22 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
2959 } 2959 }
2960 } 2960 }
2961 2961
2962 if (volume_info->seal) {
2963 if (ses->server->vals->protocol_id == 0) {
2964 cifs_dbg(VFS,
2965 "SMB3 or later required for encryption\n");
2966 rc = -EOPNOTSUPP;
2967 goto out_fail;
2968 } else if (tcon->ses->server->capabilities &
2969 SMB2_GLOBAL_CAP_ENCRYPTION)
2970 tcon->seal = true;
2971 else {
2972 cifs_dbg(VFS, "Encryption is not supported on share\n");
2973 rc = -EOPNOTSUPP;
2974 goto out_fail;
2975 }
2976 }
2977
2962 /* 2978 /*
2963 * BB Do we need to wrap session_mutex around this TCon call and Unix 2979 * BB Do we need to wrap session_mutex around this TCon call and Unix
2964 * SetFS as we do on SessSetup and reconnect? 2980 * SetFS as we do on SessSetup and reconnect?
@@ -3007,22 +3023,6 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
3007 tcon->use_resilient = true; 3023 tcon->use_resilient = true;
3008 } 3024 }
3009 3025
3010 if (volume_info->seal) {
3011 if (ses->server->vals->protocol_id == 0) {
3012 cifs_dbg(VFS,
3013 "SMB3 or later required for encryption\n");
3014 rc = -EOPNOTSUPP;
3015 goto out_fail;
3016 } else if (tcon->ses->server->capabilities &
3017 SMB2_GLOBAL_CAP_ENCRYPTION)
3018 tcon->seal = true;
3019 else {
3020 cifs_dbg(VFS, "Encryption is not supported on share\n");
3021 rc = -EOPNOTSUPP;
3022 goto out_fail;
3023 }
3024 }
3025
3026 /* 3026 /*
3027 * We can have only one retry value for a connection to a share so for 3027 * We can have only one retry value for a connection to a share so for
3028 * resources mounted more than once to the same server share the last 3028 * resources mounted more than once to the same server share the last
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 38ebf3f357d2..b76b85881dcc 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -252,9 +252,14 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
252 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE; 252 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
253 wsize = min_t(unsigned int, wsize, server->max_write); 253 wsize = min_t(unsigned int, wsize, server->max_write);
254#ifdef CONFIG_CIFS_SMB_DIRECT 254#ifdef CONFIG_CIFS_SMB_DIRECT
255 if (server->rdma) 255 if (server->rdma) {
256 wsize = min_t(unsigned int, 256 if (server->sign)
257 wsize = min_t(unsigned int,
258 wsize, server->smbd_conn->max_fragmented_send_size);
259 else
260 wsize = min_t(unsigned int,
257 wsize, server->smbd_conn->max_readwrite_size); 261 wsize, server->smbd_conn->max_readwrite_size);
262 }
258#endif 263#endif
259 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) 264 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
260 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE); 265 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
@@ -272,9 +277,14 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
272 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE; 277 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
273 rsize = min_t(unsigned int, rsize, server->max_read); 278 rsize = min_t(unsigned int, rsize, server->max_read);
274#ifdef CONFIG_CIFS_SMB_DIRECT 279#ifdef CONFIG_CIFS_SMB_DIRECT
275 if (server->rdma) 280 if (server->rdma) {
276 rsize = min_t(unsigned int, 281 if (server->sign)
282 rsize = min_t(unsigned int,
283 rsize, server->smbd_conn->max_fragmented_recv_size);
284 else
285 rsize = min_t(unsigned int,
277 rsize, server->smbd_conn->max_readwrite_size); 286 rsize, server->smbd_conn->max_readwrite_size);
287 }
278#endif 288#endif
279 289
280 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) 290 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 0f044c4a2dc9..60db51bae0e3 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -383,10 +383,10 @@ static void
383build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt) 383build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt)
384{ 384{
385 pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES; 385 pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES;
386 pneg_ctxt->DataLength = cpu_to_le16(6); 386 pneg_ctxt->DataLength = cpu_to_le16(4); /* Cipher Count + le16 cipher */
387 pneg_ctxt->CipherCount = cpu_to_le16(2); 387 pneg_ctxt->CipherCount = cpu_to_le16(1);
388 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM; 388/* pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;*/ /* not supported yet */
389 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM; 389 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_CCM;
390} 390}
391 391
392static void 392static void
@@ -444,6 +444,7 @@ static int decode_encrypt_ctx(struct TCP_Server_Info *server,
444 return -EINVAL; 444 return -EINVAL;
445 } 445 }
446 server->cipher_type = ctxt->Ciphers[0]; 446 server->cipher_type = ctxt->Ciphers[0];
447 server->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
447 return 0; 448 return 0;
448} 449}
449 450
@@ -2590,7 +2591,7 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
2590 * If we want to do a RDMA write, fill in and append 2591 * If we want to do a RDMA write, fill in and append
2591 * smbd_buffer_descriptor_v1 to the end of read request 2592 * smbd_buffer_descriptor_v1 to the end of read request
2592 */ 2593 */
2593 if (server->rdma && rdata && 2594 if (server->rdma && rdata && !server->sign &&
2594 rdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) { 2595 rdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) {
2595 2596
2596 struct smbd_buffer_descriptor_v1 *v1; 2597 struct smbd_buffer_descriptor_v1 *v1;
@@ -2968,7 +2969,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
2968 * If we want to do a server RDMA read, fill in and append 2969 * If we want to do a server RDMA read, fill in and append
2969 * smbd_buffer_descriptor_v1 to the end of write request 2970 * smbd_buffer_descriptor_v1 to the end of write request
2970 */ 2971 */
2971 if (server->rdma && wdata->bytes >= 2972 if (server->rdma && !server->sign && wdata->bytes >=
2972 server->smbd_conn->rdma_readwrite_threshold) { 2973 server->smbd_conn->rdma_readwrite_threshold) {
2973 2974
2974 struct smbd_buffer_descriptor_v1 *v1; 2975 struct smbd_buffer_descriptor_v1 *v1;
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 6093e5142b2b..d28f358022c5 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -297,7 +297,7 @@ struct smb2_encryption_neg_context {
297 __le16 DataLength; 297 __le16 DataLength;
298 __le32 Reserved; 298 __le32 Reserved;
299 __le16 CipherCount; /* AES-128-GCM and AES-128-CCM */ 299 __le16 CipherCount; /* AES-128-GCM and AES-128-CCM */
300 __le16 Ciphers[2]; /* Ciphers[0] since only one used now */ 300 __le16 Ciphers[1]; /* Ciphers[0] since only one used now */
301} __packed; 301} __packed;
302 302
303struct smb2_negotiate_rsp { 303struct smb2_negotiate_rsp {
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index 87817ddcc096..c62f7c95683c 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -2086,7 +2086,7 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
2086 int start, i, j; 2086 int start, i, j;
2087 int max_iov_size = 2087 int max_iov_size =
2088 info->max_send_size - sizeof(struct smbd_data_transfer); 2088 info->max_send_size - sizeof(struct smbd_data_transfer);
2089 struct kvec iov[SMBDIRECT_MAX_SGE]; 2089 struct kvec *iov;
2090 int rc; 2090 int rc;
2091 2091
2092 info->smbd_send_pending++; 2092 info->smbd_send_pending++;
@@ -2096,32 +2096,20 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
2096 } 2096 }
2097 2097
2098 /* 2098 /*
2099 * This usually means a configuration error 2099 * Skip the RFC1002 length defined in MS-SMB2 section 2.1
2100 * We use RDMA read/write for packet size > rdma_readwrite_threshold 2100 * It is used only for TCP transport in the iov[0]
2101 * as long as it's properly configured we should never get into this
2102 * situation
2103 */
2104 if (rqst->rq_nvec + rqst->rq_npages > SMBDIRECT_MAX_SGE) {
2105 log_write(ERR, "maximum send segment %x exceeding %x\n",
2106 rqst->rq_nvec + rqst->rq_npages, SMBDIRECT_MAX_SGE);
2107 rc = -EINVAL;
2108 goto done;
2109 }
2110
2111 /*
2112 * Remove the RFC1002 length defined in MS-SMB2 section 2.1
2113 * It is used only for TCP transport
2114 * In future we may want to add a transport layer under protocol 2101 * In future we may want to add a transport layer under protocol
2115 * layer so this will only be issued to TCP transport 2102 * layer so this will only be issued to TCP transport
2116 */ 2103 */
2117 iov[0].iov_base = (char *)rqst->rq_iov[0].iov_base + 4; 2104
2118 iov[0].iov_len = rqst->rq_iov[0].iov_len - 4; 2105 if (rqst->rq_iov[0].iov_len != 4) {
2119 buflen += iov[0].iov_len; 2106 log_write(ERR, "expected the pdu length in 1st iov, but got %zu\n", rqst->rq_iov[0].iov_len);
2107 return -EINVAL;
2108 }
2109 iov = &rqst->rq_iov[1];
2120 2110
2121 /* total up iov array first */ 2111 /* total up iov array first */
2122 for (i = 1; i < rqst->rq_nvec; i++) { 2112 for (i = 0; i < rqst->rq_nvec-1; i++) {
2123 iov[i].iov_base = rqst->rq_iov[i].iov_base;
2124 iov[i].iov_len = rqst->rq_iov[i].iov_len;
2125 buflen += iov[i].iov_len; 2113 buflen += iov[i].iov_len;
2126 } 2114 }
2127 2115
@@ -2198,14 +2186,14 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
2198 goto done; 2186 goto done;
2199 } 2187 }
2200 i++; 2188 i++;
2201 if (i == rqst->rq_nvec) 2189 if (i == rqst->rq_nvec-1)
2202 break; 2190 break;
2203 } 2191 }
2204 start = i; 2192 start = i;
2205 buflen = 0; 2193 buflen = 0;
2206 } else { 2194 } else {
2207 i++; 2195 i++;
2208 if (i == rqst->rq_nvec) { 2196 if (i == rqst->rq_nvec-1) {
2209 /* send out all remaining vecs */ 2197 /* send out all remaining vecs */
2210 remaining_data_length -= buflen; 2198 remaining_data_length -= buflen;
2211 log_write(INFO, 2199 log_write(INFO,
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 8f6f25918229..927226a2122f 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -753,7 +753,7 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
753 goto out; 753 goto out;
754 754
755#ifdef CONFIG_CIFS_SMB311 755#ifdef CONFIG_CIFS_SMB311
756 if (ses->status == CifsNew) 756 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
757 smb311_update_preauth_hash(ses, rqst->rq_iov+1, 757 smb311_update_preauth_hash(ses, rqst->rq_iov+1,
758 rqst->rq_nvec-1); 758 rqst->rq_nvec-1);
759#endif 759#endif
@@ -798,7 +798,7 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
798 *resp_buf_type = CIFS_SMALL_BUFFER; 798 *resp_buf_type = CIFS_SMALL_BUFFER;
799 799
800#ifdef CONFIG_CIFS_SMB311 800#ifdef CONFIG_CIFS_SMB311
801 if (ses->status == CifsNew) { 801 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
802 struct kvec iov = { 802 struct kvec iov = {
803 .iov_base = buf + 4, 803 .iov_base = buf + 4,
804 .iov_len = get_rfc1002_length(buf) 804 .iov_len = get_rfc1002_length(buf)
@@ -834,8 +834,11 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
834 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) { 834 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
835 new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1), 835 new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1),
836 GFP_KERNEL); 836 GFP_KERNEL);
837 if (!new_iov) 837 if (!new_iov) {
838 /* otherwise cifs_send_recv below sets resp_buf_type */
839 *resp_buf_type = CIFS_NO_BUFFER;
838 return -ENOMEM; 840 return -ENOMEM;
841 }
839 } else 842 } else
840 new_iov = s_iov; 843 new_iov = s_iov;
841 844
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index a33d8fb1bf2a..508b905d744d 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -321,6 +321,7 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
321 struct ext4_sb_info *sbi = EXT4_SB(sb); 321 struct ext4_sb_info *sbi = EXT4_SB(sb);
322 ext4_grpblk_t offset; 322 ext4_grpblk_t offset;
323 ext4_grpblk_t next_zero_bit; 323 ext4_grpblk_t next_zero_bit;
324 ext4_grpblk_t max_bit = EXT4_CLUSTERS_PER_GROUP(sb);
324 ext4_fsblk_t blk; 325 ext4_fsblk_t blk;
325 ext4_fsblk_t group_first_block; 326 ext4_fsblk_t group_first_block;
326 327
@@ -338,7 +339,7 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
338 /* check whether block bitmap block number is set */ 339 /* check whether block bitmap block number is set */
339 blk = ext4_block_bitmap(sb, desc); 340 blk = ext4_block_bitmap(sb, desc);
340 offset = blk - group_first_block; 341 offset = blk - group_first_block;
341 if (offset < 0 || EXT4_B2C(sbi, offset) >= sb->s_blocksize || 342 if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
342 !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data)) 343 !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
343 /* bad block bitmap */ 344 /* bad block bitmap */
344 return blk; 345 return blk;
@@ -346,7 +347,7 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
346 /* check whether the inode bitmap block number is set */ 347 /* check whether the inode bitmap block number is set */
347 blk = ext4_inode_bitmap(sb, desc); 348 blk = ext4_inode_bitmap(sb, desc);
348 offset = blk - group_first_block; 349 offset = blk - group_first_block;
349 if (offset < 0 || EXT4_B2C(sbi, offset) >= sb->s_blocksize || 350 if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
350 !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data)) 351 !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
351 /* bad block bitmap */ 352 /* bad block bitmap */
352 return blk; 353 return blk;
@@ -354,8 +355,8 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
354 /* check whether the inode table block number is set */ 355 /* check whether the inode table block number is set */
355 blk = ext4_inode_table(sb, desc); 356 blk = ext4_inode_table(sb, desc);
356 offset = blk - group_first_block; 357 offset = blk - group_first_block;
357 if (offset < 0 || EXT4_B2C(sbi, offset) >= sb->s_blocksize || 358 if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
358 EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= sb->s_blocksize) 359 EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= max_bit)
359 return blk; 360 return blk;
360 next_zero_bit = ext4_find_next_zero_bit(bh->b_data, 361 next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
361 EXT4_B2C(sbi, offset + sbi->s_itb_per_group), 362 EXT4_B2C(sbi, offset + sbi->s_itb_per_group),
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 0a7315961bac..c969275ce3ee 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -5329,8 +5329,9 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
5329 stop = le32_to_cpu(extent->ee_block); 5329 stop = le32_to_cpu(extent->ee_block);
5330 5330
5331 /* 5331 /*
5332 * In case of left shift, Don't start shifting extents until we make 5332 * For left shifts, make sure the hole on the left is big enough to
5333 * sure the hole is big enough to accommodate the shift. 5333 * accommodate the shift. For right shifts, make sure the last extent
5334 * won't be shifted beyond EXT_MAX_BLOCKS.
5334 */ 5335 */
5335 if (SHIFT == SHIFT_LEFT) { 5336 if (SHIFT == SHIFT_LEFT) {
5336 path = ext4_find_extent(inode, start - 1, &path, 5337 path = ext4_find_extent(inode, start - 1, &path,
@@ -5350,9 +5351,14 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
5350 5351
5351 if ((start == ex_start && shift > ex_start) || 5352 if ((start == ex_start && shift > ex_start) ||
5352 (shift > start - ex_end)) { 5353 (shift > start - ex_end)) {
5353 ext4_ext_drop_refs(path); 5354 ret = -EINVAL;
5354 kfree(path); 5355 goto out;
5355 return -EINVAL; 5356 }
5357 } else {
5358 if (shift > EXT_MAX_BLOCKS -
5359 (stop + ext4_ext_get_actual_len(extent))) {
5360 ret = -EINVAL;
5361 goto out;
5356 } 5362 }
5357 } 5363 }
5358 5364
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 185f7e61f4cf..eb104e8476f0 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -5886,5 +5886,6 @@ static void __exit ext4_exit_fs(void)
5886MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); 5886MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
5887MODULE_DESCRIPTION("Fourth Extended Filesystem"); 5887MODULE_DESCRIPTION("Fourth Extended Filesystem");
5888MODULE_LICENSE("GPL"); 5888MODULE_LICENSE("GPL");
5889MODULE_SOFTDEP("pre: crc32c");
5889module_init(ext4_init_fs) 5890module_init(ext4_init_fs)
5890module_exit(ext4_exit_fs) 5891module_exit(ext4_exit_fs)
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index ac311037d7a5..8aa453784402 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -532,6 +532,7 @@ int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
532 */ 532 */
533 ret = start_this_handle(journal, handle, GFP_NOFS); 533 ret = start_this_handle(journal, handle, GFP_NOFS);
534 if (ret < 0) { 534 if (ret < 0) {
535 handle->h_journal = journal;
535 jbd2_journal_free_reserved(handle); 536 jbd2_journal_free_reserved(handle);
536 return ret; 537 return ret;
537 } 538 }
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index ce4a34a2751d..35a124400d60 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -511,7 +511,14 @@ xfs_attr_shortform_addname(xfs_da_args_t *args)
511 if (args->flags & ATTR_CREATE) 511 if (args->flags & ATTR_CREATE)
512 return retval; 512 return retval;
513 retval = xfs_attr_shortform_remove(args); 513 retval = xfs_attr_shortform_remove(args);
514 ASSERT(retval == 0); 514 if (retval)
515 return retval;
516 /*
517 * Since we have removed the old attr, clear ATTR_REPLACE so
518 * that the leaf format add routine won't trip over the attr
519 * not being around.
520 */
521 args->flags &= ~ATTR_REPLACE;
515 } 522 }
516 523
517 if (args->namelen >= XFS_ATTR_SF_ENTSIZE_MAX || 524 if (args->namelen >= XFS_ATTR_SF_ENTSIZE_MAX ||
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 6a7c2f03ea11..040eeda8426f 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -725,12 +725,16 @@ xfs_bmap_extents_to_btree(
725 *logflagsp = 0; 725 *logflagsp = 0;
726 if ((error = xfs_alloc_vextent(&args))) { 726 if ((error = xfs_alloc_vextent(&args))) {
727 xfs_iroot_realloc(ip, -1, whichfork); 727 xfs_iroot_realloc(ip, -1, whichfork);
728 ASSERT(ifp->if_broot == NULL);
729 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
728 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 730 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
729 return error; 731 return error;
730 } 732 }
731 733
732 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { 734 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
733 xfs_iroot_realloc(ip, -1, whichfork); 735 xfs_iroot_realloc(ip, -1, whichfork);
736 ASSERT(ifp->if_broot == NULL);
737 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
734 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 738 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
735 return -ENOSPC; 739 return -ENOSPC;
736 } 740 }
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index ef68b1de006a..1201107eabc6 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -466,6 +466,8 @@ xfs_dinode_verify(
466 return __this_address; 466 return __this_address;
467 if (di_size > XFS_DFORK_DSIZE(dip, mp)) 467 if (di_size > XFS_DFORK_DSIZE(dip, mp))
468 return __this_address; 468 return __this_address;
469 if (dip->di_nextents)
470 return __this_address;
469 /* fall through */ 471 /* fall through */
470 case XFS_DINODE_FMT_EXTENTS: 472 case XFS_DINODE_FMT_EXTENTS:
471 case XFS_DINODE_FMT_BTREE: 473 case XFS_DINODE_FMT_BTREE:
@@ -484,12 +486,31 @@ xfs_dinode_verify(
484 if (XFS_DFORK_Q(dip)) { 486 if (XFS_DFORK_Q(dip)) {
485 switch (dip->di_aformat) { 487 switch (dip->di_aformat) {
486 case XFS_DINODE_FMT_LOCAL: 488 case XFS_DINODE_FMT_LOCAL:
489 if (dip->di_anextents)
490 return __this_address;
491 /* fall through */
487 case XFS_DINODE_FMT_EXTENTS: 492 case XFS_DINODE_FMT_EXTENTS:
488 case XFS_DINODE_FMT_BTREE: 493 case XFS_DINODE_FMT_BTREE:
489 break; 494 break;
490 default: 495 default:
491 return __this_address; 496 return __this_address;
492 } 497 }
498 } else {
499 /*
500 * If there is no fork offset, this may be a freshly-made inode
501 * in a new disk cluster, in which case di_aformat is zeroed.
502 * Otherwise, such an inode must be in EXTENTS format; this goes
503 * for freed inodes as well.
504 */
505 switch (dip->di_aformat) {
506 case 0:
507 case XFS_DINODE_FMT_EXTENTS:
508 break;
509 default:
510 return __this_address;
511 }
512 if (dip->di_anextents)
513 return __this_address;
493 } 514 }
494 515
495 /* only version 3 or greater inodes are extensively verified here */ 516 /* only version 3 or greater inodes are extensively verified here */
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 299aee4b7b0b..eed073cc4778 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -778,22 +778,26 @@ xfs_file_fallocate(
778 if (error) 778 if (error)
779 goto out_unlock; 779 goto out_unlock;
780 } else if (mode & FALLOC_FL_INSERT_RANGE) { 780 } else if (mode & FALLOC_FL_INSERT_RANGE) {
781 unsigned int blksize_mask = i_blocksize(inode) - 1; 781 unsigned int blksize_mask = i_blocksize(inode) - 1;
782 loff_t isize = i_size_read(inode);
782 783
783 new_size = i_size_read(inode) + len;
784 if (offset & blksize_mask || len & blksize_mask) { 784 if (offset & blksize_mask || len & blksize_mask) {
785 error = -EINVAL; 785 error = -EINVAL;
786 goto out_unlock; 786 goto out_unlock;
787 } 787 }
788 788
789 /* check the new inode size does not wrap through zero */ 789 /*
790 if (new_size > inode->i_sb->s_maxbytes) { 790 * New inode size must not exceed ->s_maxbytes, accounting for
791 * possible signed overflow.
792 */
793 if (inode->i_sb->s_maxbytes - isize < len) {
791 error = -EFBIG; 794 error = -EFBIG;
792 goto out_unlock; 795 goto out_unlock;
793 } 796 }
797 new_size = isize + len;
794 798
795 /* Offset should be less than i_size */ 799 /* Offset should be less than i_size */
796 if (offset >= i_size_read(inode)) { 800 if (offset >= isize) {
797 error = -EINVAL; 801 error = -EINVAL;
798 goto out_unlock; 802 goto out_unlock;
799 } 803 }
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 278841c75b97..af240573e482 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -188,7 +188,7 @@
188#endif 188#endif
189 189
190#ifdef CONFIG_SERIAL_EARLYCON 190#ifdef CONFIG_SERIAL_EARLYCON
191#define EARLYCON_TABLE() STRUCT_ALIGN(); \ 191#define EARLYCON_TABLE() . = ALIGN(8); \
192 VMLINUX_SYMBOL(__earlycon_table) = .; \ 192 VMLINUX_SYMBOL(__earlycon_table) = .; \
193 KEEP(*(__earlycon_table)) \ 193 KEEP(*(__earlycon_table)) \
194 VMLINUX_SYMBOL(__earlycon_table_end) = .; 194 VMLINUX_SYMBOL(__earlycon_table_end) = .;
diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
index e518e4e3dfb5..4b1548129fa2 100644
--- a/include/kvm/arm_psci.h
+++ b/include/kvm/arm_psci.h
@@ -37,10 +37,15 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
37 * Our PSCI implementation stays the same across versions from 37 * Our PSCI implementation stays the same across versions from
38 * v0.2 onward, only adding the few mandatory functions (such 38 * v0.2 onward, only adding the few mandatory functions (such
39 * as FEATURES with 1.0) that are required by newer 39 * as FEATURES with 1.0) that are required by newer
40 * revisions. It is thus safe to return the latest. 40 * revisions. It is thus safe to return the latest, unless
41 * userspace has instructed us otherwise.
41 */ 42 */
42 if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) 43 if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) {
44 if (vcpu->kvm->arch.psci_version)
45 return vcpu->kvm->arch.psci_version;
46
43 return KVM_ARM_PSCI_LATEST; 47 return KVM_ARM_PSCI_LATEST;
48 }
44 49
45 return KVM_ARM_PSCI_0_1; 50 return KVM_ARM_PSCI_0_1;
46} 51}
@@ -48,4 +53,11 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
48 53
49int kvm_hvc_call_handler(struct kvm_vcpu *vcpu); 54int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
50 55
56struct kvm_one_reg;
57
58int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu);
59int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
60int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
61int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
62
51#endif /* __KVM_ARM_PSCI_H__ */ 63#endif /* __KVM_ARM_PSCI_H__ */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index e3986f4b3461..ebc34a5686dc 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -9,6 +9,9 @@
9struct blk_mq_tags; 9struct blk_mq_tags;
10struct blk_flush_queue; 10struct blk_flush_queue;
11 11
12/**
13 * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware block device
14 */
12struct blk_mq_hw_ctx { 15struct blk_mq_hw_ctx {
13 struct { 16 struct {
14 spinlock_t lock; 17 spinlock_t lock;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 9af3e0f430bc..5c4eee043191 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -605,6 +605,11 @@ struct request_queue {
605 * initialized by the low level device driver (e.g. scsi/sd.c). 605 * initialized by the low level device driver (e.g. scsi/sd.c).
606 * Stacking drivers (device mappers) may or may not initialize 606 * Stacking drivers (device mappers) may or may not initialize
607 * these fields. 607 * these fields.
608 *
609 * Reads of this information must be protected with blk_queue_enter() /
610 * blk_queue_exit(). Modifying this information is only allowed while
611 * no requests are being processed. See also blk_mq_freeze_queue() and
612 * blk_mq_unfreeze_queue().
608 */ 613 */
609 unsigned int nr_zones; 614 unsigned int nr_zones;
610 unsigned long *seq_zones_bitmap; 615 unsigned long *seq_zones_bitmap;
@@ -737,6 +742,7 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
737#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) 742#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
738#define blk_queue_preempt_only(q) \ 743#define blk_queue_preempt_only(q) \
739 test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags) 744 test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
745#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
740 746
741extern int blk_set_preempt_only(struct request_queue *q); 747extern int blk_set_preempt_only(struct request_queue *q);
742extern void blk_clear_preempt_only(struct request_queue *q); 748extern void blk_clear_preempt_only(struct request_queue *q);
diff --git a/include/linux/device.h b/include/linux/device.h
index 0059b99e1f25..477956990f5e 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -256,7 +256,9 @@ enum probe_type {
256 * automatically. 256 * automatically.
257 * @pm: Power management operations of the device which matched 257 * @pm: Power management operations of the device which matched
258 * this driver. 258 * this driver.
259 * @coredump: Called through sysfs to initiate a device coredump. 259 * @coredump: Called when sysfs entry is written to. The device driver
260 * is expected to call the dev_coredump API resulting in a
261 * uevent.
260 * @p: Driver core's private data, no one other than the driver 262 * @p: Driver core's private data, no one other than the driver
261 * core can touch this. 263 * core can touch this.
262 * 264 *
@@ -288,7 +290,7 @@ struct device_driver {
288 const struct attribute_group **groups; 290 const struct attribute_group **groups;
289 291
290 const struct dev_pm_ops *pm; 292 const struct dev_pm_ops *pm;
291 int (*coredump) (struct device *dev); 293 void (*coredump) (struct device *dev);
292 294
293 struct driver_private *p; 295 struct driver_private *p;
294}; 296};
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index b32cd2062f18..f8a2245b70ac 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -312,6 +312,9 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
312 * by kernel. Returns a negative error code or zero. 312 * by kernel. Returns a negative error code or zero.
313 * @get_fecparam: Get the network device Forward Error Correction parameters. 313 * @get_fecparam: Get the network device Forward Error Correction parameters.
314 * @set_fecparam: Set the network device Forward Error Correction parameters. 314 * @set_fecparam: Set the network device Forward Error Correction parameters.
315 * @get_ethtool_phy_stats: Return extended statistics about the PHY device.
316 * This is only useful if the device maintains PHY statistics and
317 * cannot use the standard PHY library helpers.
315 * 318 *
316 * All operations are optional (i.e. the function pointer may be set 319 * All operations are optional (i.e. the function pointer may be set
317 * to %NULL) and callers must take this into account. Callers must 320 * to %NULL) and callers must take this into account. Callers must
@@ -407,5 +410,7 @@ struct ethtool_ops {
407 struct ethtool_fecparam *); 410 struct ethtool_fecparam *);
408 int (*set_fecparam)(struct net_device *, 411 int (*set_fecparam)(struct net_device *,
409 struct ethtool_fecparam *); 412 struct ethtool_fecparam *);
413 void (*get_ethtool_phy_stats)(struct net_device *,
414 struct ethtool_stats *, u64 *);
410}; 415};
411#endif /* _LINUX_ETHTOOL_H */ 416#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index e0c95c9f1e29..e64c0294f50b 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -217,12 +217,10 @@ struct fsnotify_mark_connector {
217 union { /* Object pointer [lock] */ 217 union { /* Object pointer [lock] */
218 struct inode *inode; 218 struct inode *inode;
219 struct vfsmount *mnt; 219 struct vfsmount *mnt;
220 };
221 union {
222 struct hlist_head list;
223 /* Used listing heads to free after srcu period expires */ 220 /* Used listing heads to free after srcu period expires */
224 struct fsnotify_mark_connector *destroy_next; 221 struct fsnotify_mark_connector *destroy_next;
225 }; 222 };
223 struct hlist_head list;
226}; 224};
227 225
228/* 226/*
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index a2656c3ebe81..3892e9c8b2de 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -161,9 +161,11 @@ struct hrtimer_clock_base {
161enum hrtimer_base_type { 161enum hrtimer_base_type {
162 HRTIMER_BASE_MONOTONIC, 162 HRTIMER_BASE_MONOTONIC,
163 HRTIMER_BASE_REALTIME, 163 HRTIMER_BASE_REALTIME,
164 HRTIMER_BASE_BOOTTIME,
164 HRTIMER_BASE_TAI, 165 HRTIMER_BASE_TAI,
165 HRTIMER_BASE_MONOTONIC_SOFT, 166 HRTIMER_BASE_MONOTONIC_SOFT,
166 HRTIMER_BASE_REALTIME_SOFT, 167 HRTIMER_BASE_REALTIME_SOFT,
168 HRTIMER_BASE_BOOTTIME_SOFT,
167 HRTIMER_BASE_TAI_SOFT, 169 HRTIMER_BASE_TAI_SOFT,
168 HRTIMER_MAX_CLOCK_BASES, 170 HRTIMER_MAX_CLOCK_BASES,
169}; 171};
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 02639ebea2f0..585d27182425 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -93,11 +93,39 @@ static inline bool br_multicast_router(const struct net_device *dev)
93 93
94#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING) 94#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING)
95bool br_vlan_enabled(const struct net_device *dev); 95bool br_vlan_enabled(const struct net_device *dev);
96int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid);
97int br_vlan_get_info(const struct net_device *dev, u16 vid,
98 struct bridge_vlan_info *p_vinfo);
96#else 99#else
97static inline bool br_vlan_enabled(const struct net_device *dev) 100static inline bool br_vlan_enabled(const struct net_device *dev)
98{ 101{
99 return false; 102 return false;
100} 103}
104
105static inline int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
106{
107 return -1;
108}
109
110static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
111 struct bridge_vlan_info *p_vinfo)
112{
113 return -1;
114}
115#endif
116
117#if IS_ENABLED(CONFIG_BRIDGE)
118struct net_device *br_fdb_find_port(const struct net_device *br_dev,
119 const unsigned char *addr,
120 __u16 vid);
121#else
122static inline struct net_device *
123br_fdb_find_port(const struct net_device *br_dev,
124 const unsigned char *addr,
125 __u16 vid)
126{
127 return NULL;
128}
101#endif 129#endif
102 130
103#endif 131#endif
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 767d193c269a..2a156c5dfadd 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -1284,25 +1284,19 @@ enum {
1284}; 1284};
1285 1285
1286static inline const struct cpumask * 1286static inline const struct cpumask *
1287mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector) 1287mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
1288{ 1288{
1289 const struct cpumask *mask;
1290 struct irq_desc *desc; 1289 struct irq_desc *desc;
1291 unsigned int irq; 1290 unsigned int irq;
1292 int eqn; 1291 int eqn;
1293 int err; 1292 int err;
1294 1293
1295 err = mlx5_vector2eqn(dev, MLX5_EQ_VEC_COMP_BASE + vector, &eqn, &irq); 1294 err = mlx5_vector2eqn(dev, vector, &eqn, &irq);
1296 if (err) 1295 if (err)
1297 return NULL; 1296 return NULL;
1298 1297
1299 desc = irq_to_desc(irq); 1298 desc = irq_to_desc(irq);
1300#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 1299 return desc->affinity_hint;
1301 mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
1302#else
1303 mask = desc->irq_common_data.affinity;
1304#endif
1305 return mask;
1306} 1300}
1307 1301
1308#endif /* MLX5_DRIVER_H */ 1302#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 1aad455538f4..b8918a1da11f 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -356,22 +356,6 @@ struct mlx5_ifc_odp_per_transport_service_cap_bits {
356 u8 reserved_at_6[0x1a]; 356 u8 reserved_at_6[0x1a];
357}; 357};
358 358
359struct mlx5_ifc_ipv4_layout_bits {
360 u8 reserved_at_0[0x60];
361
362 u8 ipv4[0x20];
363};
364
365struct mlx5_ifc_ipv6_layout_bits {
366 u8 ipv6[16][0x8];
367};
368
369union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
370 struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
371 struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
372 u8 reserved_at_0[0x80];
373};
374
375struct mlx5_ifc_fte_match_set_lyr_2_4_bits { 359struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
376 u8 smac_47_16[0x20]; 360 u8 smac_47_16[0x20];
377 361
diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h
index ec052491ba3d..193091537cb6 100644
--- a/include/linux/mlx5/mlx5_ifc_fpga.h
+++ b/include/linux/mlx5/mlx5_ifc_fpga.h
@@ -32,12 +32,29 @@
32#ifndef MLX5_IFC_FPGA_H 32#ifndef MLX5_IFC_FPGA_H
33#define MLX5_IFC_FPGA_H 33#define MLX5_IFC_FPGA_H
34 34
35struct mlx5_ifc_ipv4_layout_bits {
36 u8 reserved_at_0[0x60];
37
38 u8 ipv4[0x20];
39};
40
41struct mlx5_ifc_ipv6_layout_bits {
42 u8 ipv6[16][0x8];
43};
44
45union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
46 struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
47 struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
48 u8 reserved_at_0[0x80];
49};
50
35enum { 51enum {
36 MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX = 0x2c9, 52 MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX = 0x2c9,
37}; 53};
38 54
39enum { 55enum {
40 MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC = 0x2, 56 MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC = 0x2,
57 MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS = 0x3,
41}; 58};
42 59
43struct mlx5_ifc_fpga_shell_caps_bits { 60struct mlx5_ifc_fpga_shell_caps_bits {
@@ -370,6 +387,27 @@ struct mlx5_ifc_fpga_destroy_qp_out_bits {
370 u8 reserved_at_40[0x40]; 387 u8 reserved_at_40[0x40];
371}; 388};
372 389
390struct mlx5_ifc_tls_extended_cap_bits {
391 u8 aes_gcm_128[0x1];
392 u8 aes_gcm_256[0x1];
393 u8 reserved_at_2[0x1e];
394 u8 reserved_at_20[0x20];
395 u8 context_capacity_total[0x20];
396 u8 context_capacity_rx[0x20];
397 u8 context_capacity_tx[0x20];
398 u8 reserved_at_a0[0x10];
399 u8 tls_counter_size[0x10];
400 u8 tls_counters_addr_low[0x20];
401 u8 tls_counters_addr_high[0x20];
402 u8 rx[0x1];
403 u8 tx[0x1];
404 u8 tls_v12[0x1];
405 u8 tls_v13[0x1];
406 u8 lro[0x1];
407 u8 ipv6[0x1];
408 u8 reserved_at_106[0x1a];
409};
410
373struct mlx5_ifc_ipsec_extended_cap_bits { 411struct mlx5_ifc_ipsec_extended_cap_bits {
374 u8 encapsulation[0x20]; 412 u8 encapsulation[0x20];
375 413
@@ -519,4 +557,43 @@ struct mlx5_ifc_fpga_ipsec_sa {
519 __be16 reserved2; 557 __be16 reserved2;
520} __packed; 558} __packed;
521 559
560enum fpga_tls_cmds {
561 CMD_SETUP_STREAM = 0x1001,
562 CMD_TEARDOWN_STREAM = 0x1002,
563};
564
565#define MLX5_TLS_1_2 (0)
566
567#define MLX5_TLS_ALG_AES_GCM_128 (0)
568#define MLX5_TLS_ALG_AES_GCM_256 (1)
569
570struct mlx5_ifc_tls_cmd_bits {
571 u8 command_type[0x20];
572 u8 ipv6[0x1];
573 u8 direction_sx[0x1];
574 u8 tls_version[0x2];
575 u8 reserved[0x1c];
576 u8 swid[0x20];
577 u8 src_port[0x10];
578 u8 dst_port[0x10];
579 union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
580 union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
581 u8 tls_rcd_sn[0x40];
582 u8 tcp_sn[0x20];
583 u8 tls_implicit_iv[0x20];
584 u8 tls_xor_iv[0x40];
585 u8 encryption_key[0x100];
586 u8 alg[4];
587 u8 reserved2[0x1c];
588 u8 reserved3[0x4a0];
589};
590
591struct mlx5_ifc_tls_resp_bits {
592 u8 syndrome[0x20];
593 u8 stream_id[0x20];
594 u8 reserverd[0x40];
595};
596
597#define MLX5_TLS_COMMAND_SIZE (0x100)
598
522#endif /* MLX5_IFC_FPGA_H */ 599#endif /* MLX5_IFC_FPGA_H */
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index b63fa457febd..3529683f691e 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -85,6 +85,7 @@ struct flchip {
85 unsigned int write_suspended:1; 85 unsigned int write_suspended:1;
86 unsigned int erase_suspended:1; 86 unsigned int erase_suspended:1;
87 unsigned long in_progress_block_addr; 87 unsigned long in_progress_block_addr;
88 unsigned long in_progress_block_mask;
88 89
89 struct mutex mutex; 90 struct mutex mutex;
90 wait_queue_head_t wq; /* Wait on here when we're waiting for the chip 91 wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index fe2f3b30960e..c87c3a3453c1 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -78,6 +78,7 @@ enum {
78 NETIF_F_HW_ESP_BIT, /* Hardware ESP transformation offload */ 78 NETIF_F_HW_ESP_BIT, /* Hardware ESP transformation offload */
79 NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */ 79 NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */
80 NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */ 80 NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */
81 NETIF_F_HW_TLS_TX_BIT, /* Hardware TLS TX offload */
81 82
82 NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */ 83 NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */
83 NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */ 84 NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */
@@ -149,6 +150,7 @@ enum {
149#define NETIF_F_RX_UDP_TUNNEL_PORT __NETIF_F(RX_UDP_TUNNEL_PORT) 150#define NETIF_F_RX_UDP_TUNNEL_PORT __NETIF_F(RX_UDP_TUNNEL_PORT)
150#define NETIF_F_HW_TLS_RECORD __NETIF_F(HW_TLS_RECORD) 151#define NETIF_F_HW_TLS_RECORD __NETIF_F(HW_TLS_RECORD)
151#define NETIF_F_GSO_UDP_L4 __NETIF_F(GSO_UDP_L4) 152#define NETIF_F_GSO_UDP_L4 __NETIF_F(GSO_UDP_L4)
153#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX)
152 154
153#define for_each_netdev_feature(mask_addr, bit) \ 155#define for_each_netdev_feature(mask_addr, bit) \
154 for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) 156 for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index a30435118530..03ed492c4e14 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -865,6 +865,26 @@ struct xfrmdev_ops {
865}; 865};
866#endif 866#endif
867 867
868#if IS_ENABLED(CONFIG_TLS_DEVICE)
869enum tls_offload_ctx_dir {
870 TLS_OFFLOAD_CTX_DIR_RX,
871 TLS_OFFLOAD_CTX_DIR_TX,
872};
873
874struct tls_crypto_info;
875struct tls_context;
876
877struct tlsdev_ops {
878 int (*tls_dev_add)(struct net_device *netdev, struct sock *sk,
879 enum tls_offload_ctx_dir direction,
880 struct tls_crypto_info *crypto_info,
881 u32 start_offload_tcp_sn);
882 void (*tls_dev_del)(struct net_device *netdev,
883 struct tls_context *ctx,
884 enum tls_offload_ctx_dir direction);
885};
886#endif
887
868struct dev_ifalias { 888struct dev_ifalias {
869 struct rcu_head rcuhead; 889 struct rcu_head rcuhead;
870 char ifalias[]; 890 char ifalias[];
@@ -1750,6 +1770,10 @@ struct net_device {
1750 const struct xfrmdev_ops *xfrmdev_ops; 1770 const struct xfrmdev_ops *xfrmdev_ops;
1751#endif 1771#endif
1752 1772
1773#if IS_ENABLED(CONFIG_TLS_DEVICE)
1774 const struct tlsdev_ops *tlsdev_ops;
1775#endif
1776
1753 const struct header_ops *header_ops; 1777 const struct header_ops *header_ops;
1754 1778
1755 unsigned int flags; 1779 unsigned int flags;
@@ -3214,19 +3238,6 @@ static inline int netif_set_xps_queue(struct net_device *dev,
3214} 3238}
3215#endif 3239#endif
3216 3240
3217u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
3218 unsigned int num_tx_queues);
3219
3220/*
3221 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
3222 * as a distribution range limit for the returned value.
3223 */
3224static inline u16 skb_tx_hash(const struct net_device *dev,
3225 struct sk_buff *skb)
3226{
3227 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
3228}
3229
3230/** 3241/**
3231 * netif_is_multiqueue - test if device has multiple transmit queues 3242 * netif_is_multiqueue - test if device has multiple transmit queues
3232 * @dev: network device 3243 * @dev: network device
diff --git a/include/linux/netfilter/nf_osf.h b/include/linux/netfilter/nf_osf.h
new file mode 100644
index 000000000000..a2b39602e87d
--- /dev/null
+++ b/include/linux/netfilter/nf_osf.h
@@ -0,0 +1,27 @@
1#include <uapi/linux/netfilter/nf_osf.h>
2
3/* Initial window size option state machine: multiple of mss, mtu or
4 * plain numeric value. Can also be made as plain numeric value which
5 * is not a multiple of specified value.
6 */
7enum nf_osf_window_size_options {
8 OSF_WSS_PLAIN = 0,
9 OSF_WSS_MSS,
10 OSF_WSS_MTU,
11 OSF_WSS_MODULO,
12 OSF_WSS_MAX,
13};
14
15enum osf_fmatch_states {
16 /* Packet does not match the fingerprint */
17 FMATCH_WRONG = 0,
18 /* Packet matches the fingerprint */
19 FMATCH_OK,
20 /* Options do not match the fingerprint, but header does */
21 FMATCH_OPT_WRONG,
22};
23
24bool nf_osf_match(const struct sk_buff *skb, u_int8_t family,
25 int hooknum, struct net_device *in, struct net_device *out,
26 const struct nf_osf_info *info, struct net *net,
27 const struct list_head *nf_osf_fingers);
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index 0773b5a032f1..c6935be7c6ca 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -17,10 +17,6 @@
17#include <linux/if_ether.h> 17#include <linux/if_ether.h>
18#include <uapi/linux/netfilter_bridge/ebtables.h> 18#include <uapi/linux/netfilter_bridge/ebtables.h>
19 19
20/* return values for match() functions */
21#define EBT_MATCH 0
22#define EBT_NOMATCH 1
23
24struct ebt_match { 20struct ebt_match {
25 struct list_head list; 21 struct list_head list;
26 const char name[EBT_FUNCTION_MAXNAMELEN]; 22 const char name[EBT_FUNCTION_MAXNAMELEN];
diff --git a/include/linux/phy.h b/include/linux/phy.h
index f0b5870a6d40..073235e70442 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -1068,6 +1068,52 @@ int __init mdio_bus_init(void);
1068void mdio_bus_exit(void); 1068void mdio_bus_exit(void);
1069#endif 1069#endif
1070 1070
1071/* Inline function for use within net/core/ethtool.c (built-in) */
1072static inline int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data)
1073{
1074 if (!phydev->drv)
1075 return -EIO;
1076
1077 mutex_lock(&phydev->lock);
1078 phydev->drv->get_strings(phydev, data);
1079 mutex_unlock(&phydev->lock);
1080
1081 return 0;
1082}
1083
1084static inline int phy_ethtool_get_sset_count(struct phy_device *phydev)
1085{
1086 int ret;
1087
1088 if (!phydev->drv)
1089 return -EIO;
1090
1091 if (phydev->drv->get_sset_count &&
1092 phydev->drv->get_strings &&
1093 phydev->drv->get_stats) {
1094 mutex_lock(&phydev->lock);
1095 ret = phydev->drv->get_sset_count(phydev);
1096 mutex_unlock(&phydev->lock);
1097
1098 return ret;
1099 }
1100
1101 return -EOPNOTSUPP;
1102}
1103
1104static inline int phy_ethtool_get_stats(struct phy_device *phydev,
1105 struct ethtool_stats *stats, u64 *data)
1106{
1107 if (!phydev->drv)
1108 return -EIO;
1109
1110 mutex_lock(&phydev->lock);
1111 phydev->drv->get_stats(phydev, stats, data);
1112 mutex_unlock(&phydev->lock);
1113
1114 return 0;
1115}
1116
1071extern struct bus_type mdio_bus_type; 1117extern struct bus_type mdio_bus_type;
1072 1118
1073struct mdio_board_info { 1119struct mdio_board_info {
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 1d356105f25a..b4c9fda9d833 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -351,10 +351,10 @@ struct earlycon_id {
351 char name[16]; 351 char name[16];
352 char compatible[128]; 352 char compatible[128];
353 int (*setup)(struct earlycon_device *, const char *options); 353 int (*setup)(struct earlycon_device *, const char *options);
354} __aligned(32); 354};
355 355
356extern const struct earlycon_id __earlycon_table[]; 356extern const struct earlycon_id *__earlycon_table[];
357extern const struct earlycon_id __earlycon_table_end[]; 357extern const struct earlycon_id *__earlycon_table_end[];
358 358
359#if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE) 359#if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE)
360#define EARLYCON_USED_OR_UNUSED __used 360#define EARLYCON_USED_OR_UNUSED __used
@@ -362,12 +362,19 @@ extern const struct earlycon_id __earlycon_table_end[];
362#define EARLYCON_USED_OR_UNUSED __maybe_unused 362#define EARLYCON_USED_OR_UNUSED __maybe_unused
363#endif 363#endif
364 364
365#define OF_EARLYCON_DECLARE(_name, compat, fn) \ 365#define _OF_EARLYCON_DECLARE(_name, compat, fn, unique_id) \
366 static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \ 366 static const struct earlycon_id unique_id \
367 EARLYCON_USED_OR_UNUSED __section(__earlycon_table) \ 367 EARLYCON_USED_OR_UNUSED __initconst \
368 = { .name = __stringify(_name), \ 368 = { .name = __stringify(_name), \
369 .compatible = compat, \ 369 .compatible = compat, \
370 .setup = fn } 370 .setup = fn }; \
371 static const struct earlycon_id EARLYCON_USED_OR_UNUSED \
372 __section(__earlycon_table) \
373 * const __PASTE(__p, unique_id) = &unique_id
374
375#define OF_EARLYCON_DECLARE(_name, compat, fn) \
376 _OF_EARLYCON_DECLARE(_name, compat, fn, \
377 __UNIQUE_ID(__earlycon_##_name))
371 378
372#define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn) 379#define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn)
373 380
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index a4a5c0c5cba8..908d66e55b14 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1034,6 +1034,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
1034struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 1034struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
1035int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); 1035int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
1036struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); 1036struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
1037void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
1037struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority); 1038struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
1038struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, 1039struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1039 gfp_t gfp_mask, bool fclone); 1040 gfp_t gfp_mask, bool fclone);
diff --git a/include/linux/stringhash.h b/include/linux/stringhash.h
index e8f0f852968f..c0c5c5b73dc0 100644
--- a/include/linux/stringhash.h
+++ b/include/linux/stringhash.h
@@ -50,9 +50,9 @@ partial_name_hash(unsigned long c, unsigned long prevhash)
50 * losing bits). This also has the property (wanted by the dcache) 50 * losing bits). This also has the property (wanted by the dcache)
51 * that the msbits make a good hash table index. 51 * that the msbits make a good hash table index.
52 */ 52 */
53static inline unsigned long end_name_hash(unsigned long hash) 53static inline unsigned int end_name_hash(unsigned long hash)
54{ 54{
55 return __hash_32((unsigned int)hash); 55 return hash_long(hash, 32);
56} 56}
57 57
58/* 58/*
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 20585d5c4e1c..807776928cb8 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -228,7 +228,7 @@ struct tcp_sock {
228 unused:2; 228 unused:2;
229 u8 nonagle : 4,/* Disable Nagle algorithm? */ 229 u8 nonagle : 4,/* Disable Nagle algorithm? */
230 thin_lto : 1,/* Use linear timeouts for thin streams */ 230 thin_lto : 1,/* Use linear timeouts for thin streams */
231 unused1 : 1, 231 recvmsg_inq : 1,/* Indicate # of bytes in queue upon recvmsg */
232 repair : 1, 232 repair : 1,
233 frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */ 233 frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */
234 u8 repair_queue; 234 u8 repair_queue;
diff --git a/include/linux/ti-emif-sram.h b/include/linux/ti-emif-sram.h
index 45bc6b376492..53604b087f2c 100644
--- a/include/linux/ti-emif-sram.h
+++ b/include/linux/ti-emif-sram.h
@@ -60,6 +60,81 @@ struct ti_emif_pm_functions {
60 u32 abort_sr; 60 u32 abort_sr;
61} __packed __aligned(8); 61} __packed __aligned(8);
62 62
63static inline void ti_emif_asm_offsets(void)
64{
65 DEFINE(EMIF_SDCFG_VAL_OFFSET,
66 offsetof(struct emif_regs_amx3, emif_sdcfg_val));
67 DEFINE(EMIF_TIMING1_VAL_OFFSET,
68 offsetof(struct emif_regs_amx3, emif_timing1_val));
69 DEFINE(EMIF_TIMING2_VAL_OFFSET,
70 offsetof(struct emif_regs_amx3, emif_timing2_val));
71 DEFINE(EMIF_TIMING3_VAL_OFFSET,
72 offsetof(struct emif_regs_amx3, emif_timing3_val));
73 DEFINE(EMIF_REF_CTRL_VAL_OFFSET,
74 offsetof(struct emif_regs_amx3, emif_ref_ctrl_val));
75 DEFINE(EMIF_ZQCFG_VAL_OFFSET,
76 offsetof(struct emif_regs_amx3, emif_zqcfg_val));
77 DEFINE(EMIF_PMCR_VAL_OFFSET,
78 offsetof(struct emif_regs_amx3, emif_pmcr_val));
79 DEFINE(EMIF_PMCR_SHDW_VAL_OFFSET,
80 offsetof(struct emif_regs_amx3, emif_pmcr_shdw_val));
81 DEFINE(EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET,
82 offsetof(struct emif_regs_amx3, emif_rd_wr_level_ramp_ctrl));
83 DEFINE(EMIF_RD_WR_EXEC_THRESH_OFFSET,
84 offsetof(struct emif_regs_amx3, emif_rd_wr_exec_thresh));
85 DEFINE(EMIF_COS_CONFIG_OFFSET,
86 offsetof(struct emif_regs_amx3, emif_cos_config));
87 DEFINE(EMIF_PRIORITY_TO_COS_MAPPING_OFFSET,
88 offsetof(struct emif_regs_amx3, emif_priority_to_cos_mapping));
89 DEFINE(EMIF_CONNECT_ID_SERV_1_MAP_OFFSET,
90 offsetof(struct emif_regs_amx3, emif_connect_id_serv_1_map));
91 DEFINE(EMIF_CONNECT_ID_SERV_2_MAP_OFFSET,
92 offsetof(struct emif_regs_amx3, emif_connect_id_serv_2_map));
93 DEFINE(EMIF_OCP_CONFIG_VAL_OFFSET,
94 offsetof(struct emif_regs_amx3, emif_ocp_config_val));
95 DEFINE(EMIF_LPDDR2_NVM_TIM_OFFSET,
96 offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim));
97 DEFINE(EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET,
98 offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim_shdw));
99 DEFINE(EMIF_DLL_CALIB_CTRL_VAL_OFFSET,
100 offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val));
101 DEFINE(EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET,
102 offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val_shdw));
103 DEFINE(EMIF_DDR_PHY_CTLR_1_OFFSET,
104 offsetof(struct emif_regs_amx3, emif_ddr_phy_ctlr_1));
105 DEFINE(EMIF_EXT_PHY_CTRL_VALS_OFFSET,
106 offsetof(struct emif_regs_amx3, emif_ext_phy_ctrl_vals));
107 DEFINE(EMIF_REGS_AMX3_SIZE, sizeof(struct emif_regs_amx3));
108
109 BLANK();
110
111 DEFINE(EMIF_PM_BASE_ADDR_VIRT_OFFSET,
112 offsetof(struct ti_emif_pm_data, ti_emif_base_addr_virt));
113 DEFINE(EMIF_PM_BASE_ADDR_PHYS_OFFSET,
114 offsetof(struct ti_emif_pm_data, ti_emif_base_addr_phys));
115 DEFINE(EMIF_PM_CONFIG_OFFSET,
116 offsetof(struct ti_emif_pm_data, ti_emif_sram_config));
117 DEFINE(EMIF_PM_REGS_VIRT_OFFSET,
118 offsetof(struct ti_emif_pm_data, regs_virt));
119 DEFINE(EMIF_PM_REGS_PHYS_OFFSET,
120 offsetof(struct ti_emif_pm_data, regs_phys));
121 DEFINE(EMIF_PM_DATA_SIZE, sizeof(struct ti_emif_pm_data));
122
123 BLANK();
124
125 DEFINE(EMIF_PM_SAVE_CONTEXT_OFFSET,
126 offsetof(struct ti_emif_pm_functions, save_context));
127 DEFINE(EMIF_PM_RESTORE_CONTEXT_OFFSET,
128 offsetof(struct ti_emif_pm_functions, restore_context));
129 DEFINE(EMIF_PM_ENTER_SR_OFFSET,
130 offsetof(struct ti_emif_pm_functions, enter_sr));
131 DEFINE(EMIF_PM_EXIT_SR_OFFSET,
132 offsetof(struct ti_emif_pm_functions, exit_sr));
133 DEFINE(EMIF_PM_ABORT_SR_OFFSET,
134 offsetof(struct ti_emif_pm_functions, abort_sr));
135 DEFINE(EMIF_PM_FUNCTIONS_SIZE, sizeof(struct ti_emif_pm_functions));
136}
137
63struct gen_pool; 138struct gen_pool;
64 139
65int ti_emif_copy_pm_function_table(struct gen_pool *sram_pool, void *dst); 140int ti_emif_copy_pm_function_table(struct gen_pool *sram_pool, void *dst);
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 4b3dca173e89..7acb953298a7 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -52,7 +52,6 @@ struct tk_read_base {
52 * @offs_real: Offset clock monotonic -> clock realtime 52 * @offs_real: Offset clock monotonic -> clock realtime
53 * @offs_boot: Offset clock monotonic -> clock boottime 53 * @offs_boot: Offset clock monotonic -> clock boottime
54 * @offs_tai: Offset clock monotonic -> clock tai 54 * @offs_tai: Offset clock monotonic -> clock tai
55 * @time_suspended: Accumulated suspend time
56 * @tai_offset: The current UTC to TAI offset in seconds 55 * @tai_offset: The current UTC to TAI offset in seconds
57 * @clock_was_set_seq: The sequence number of clock was set events 56 * @clock_was_set_seq: The sequence number of clock was set events
58 * @cs_was_changed_seq: The sequence number of clocksource change events 57 * @cs_was_changed_seq: The sequence number of clocksource change events
@@ -95,7 +94,6 @@ struct timekeeper {
95 ktime_t offs_real; 94 ktime_t offs_real;
96 ktime_t offs_boot; 95 ktime_t offs_boot;
97 ktime_t offs_tai; 96 ktime_t offs_tai;
98 ktime_t time_suspended;
99 s32 tai_offset; 97 s32 tai_offset;
100 unsigned int clock_was_set_seq; 98 unsigned int clock_was_set_seq;
101 u8 cs_was_changed_seq; 99 u8 cs_was_changed_seq;
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 9737fbec7019..588a0e4b1ab9 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -33,25 +33,20 @@ extern void ktime_get_ts64(struct timespec64 *ts);
33extern time64_t ktime_get_seconds(void); 33extern time64_t ktime_get_seconds(void);
34extern time64_t __ktime_get_real_seconds(void); 34extern time64_t __ktime_get_real_seconds(void);
35extern time64_t ktime_get_real_seconds(void); 35extern time64_t ktime_get_real_seconds(void);
36extern void ktime_get_active_ts64(struct timespec64 *ts);
37 36
38extern int __getnstimeofday64(struct timespec64 *tv); 37extern int __getnstimeofday64(struct timespec64 *tv);
39extern void getnstimeofday64(struct timespec64 *tv); 38extern void getnstimeofday64(struct timespec64 *tv);
40extern void getboottime64(struct timespec64 *ts); 39extern void getboottime64(struct timespec64 *ts);
41 40
42#define ktime_get_real_ts64(ts) getnstimeofday64(ts) 41#define ktime_get_real_ts64(ts) getnstimeofday64(ts)
43
44/* Clock BOOTTIME compatibility wrappers */
45static inline void get_monotonic_boottime64(struct timespec64 *ts)
46{
47 ktime_get_ts64(ts);
48}
49 42
50/* 43/*
51 * ktime_t based interfaces 44 * ktime_t based interfaces
52 */ 45 */
46
53enum tk_offsets { 47enum tk_offsets {
54 TK_OFFS_REAL, 48 TK_OFFS_REAL,
49 TK_OFFS_BOOT,
55 TK_OFFS_TAI, 50 TK_OFFS_TAI,
56 TK_OFFS_MAX, 51 TK_OFFS_MAX,
57}; 52};
@@ -62,10 +57,6 @@ extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs);
62extern ktime_t ktime_get_raw(void); 57extern ktime_t ktime_get_raw(void);
63extern u32 ktime_get_resolution_ns(void); 58extern u32 ktime_get_resolution_ns(void);
64 59
65/* Clock BOOTTIME compatibility wrappers */
66static inline ktime_t ktime_get_boottime(void) { return ktime_get(); }
67static inline u64 ktime_get_boot_ns(void) { return ktime_get(); }
68
69/** 60/**
70 * ktime_get_real - get the real (wall-) time in ktime_t format 61 * ktime_get_real - get the real (wall-) time in ktime_t format
71 */ 62 */
@@ -75,6 +66,17 @@ static inline ktime_t ktime_get_real(void)
75} 66}
76 67
77/** 68/**
69 * ktime_get_boottime - Returns monotonic time since boot in ktime_t format
70 *
71 * This is similar to CLOCK_MONTONIC/ktime_get, but also includes the
72 * time spent in suspend.
73 */
74static inline ktime_t ktime_get_boottime(void)
75{
76 return ktime_get_with_offset(TK_OFFS_BOOT);
77}
78
79/**
78 * ktime_get_clocktai - Returns the TAI time of day in ktime_t format 80 * ktime_get_clocktai - Returns the TAI time of day in ktime_t format
79 */ 81 */
80static inline ktime_t ktime_get_clocktai(void) 82static inline ktime_t ktime_get_clocktai(void)
@@ -100,6 +102,11 @@ static inline u64 ktime_get_real_ns(void)
100 return ktime_to_ns(ktime_get_real()); 102 return ktime_to_ns(ktime_get_real());
101} 103}
102 104
105static inline u64 ktime_get_boot_ns(void)
106{
107 return ktime_to_ns(ktime_get_boottime());
108}
109
103static inline u64 ktime_get_tai_ns(void) 110static inline u64 ktime_get_tai_ns(void)
104{ 111{
105 return ktime_to_ns(ktime_get_clocktai()); 112 return ktime_to_ns(ktime_get_clocktai());
@@ -112,11 +119,17 @@ static inline u64 ktime_get_raw_ns(void)
112 119
113extern u64 ktime_get_mono_fast_ns(void); 120extern u64 ktime_get_mono_fast_ns(void);
114extern u64 ktime_get_raw_fast_ns(void); 121extern u64 ktime_get_raw_fast_ns(void);
122extern u64 ktime_get_boot_fast_ns(void);
115extern u64 ktime_get_real_fast_ns(void); 123extern u64 ktime_get_real_fast_ns(void);
116 124
117/* 125/*
118 * timespec64 interfaces utilizing the ktime based ones 126 * timespec64 interfaces utilizing the ktime based ones
119 */ 127 */
128static inline void get_monotonic_boottime64(struct timespec64 *ts)
129{
130 *ts = ktime_to_timespec64(ktime_get_boottime());
131}
132
120static inline void timekeeping_clocktai64(struct timespec64 *ts) 133static inline void timekeeping_clocktai64(struct timespec64 *ts)
121{ 134{
122 *ts = ktime_to_timespec64(ktime_get_clocktai()); 135 *ts = ktime_to_timespec64(ktime_get_clocktai());
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 47f8af22f216..1dd587ba6d88 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -701,7 +701,7 @@ extern int tty_unregister_ldisc(int disc);
701extern int tty_set_ldisc(struct tty_struct *tty, int disc); 701extern int tty_set_ldisc(struct tty_struct *tty, int disc);
702extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty); 702extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
703extern void tty_ldisc_release(struct tty_struct *tty); 703extern void tty_ldisc_release(struct tty_struct *tty);
704extern void tty_ldisc_init(struct tty_struct *tty); 704extern int __must_check tty_ldisc_init(struct tty_struct *tty);
705extern void tty_ldisc_deinit(struct tty_struct *tty); 705extern void tty_ldisc_deinit(struct tty_struct *tty);
706extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p, 706extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
707 char *f, int count); 707 char *f, int count);
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 07ee0f84a46c..a27604f99ed0 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -112,20 +112,6 @@ u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
112#endif 112#endif
113} 113}
114 114
115static inline void u64_stats_update_begin_raw(struct u64_stats_sync *syncp)
116{
117#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
118 raw_write_seqcount_begin(&syncp->seq);
119#endif
120}
121
122static inline void u64_stats_update_end_raw(struct u64_stats_sync *syncp)
123{
124#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
125 raw_write_seqcount_end(&syncp->seq);
126#endif
127}
128
129static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp) 115static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
130{ 116{
131#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 117#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h
index c71def6b310f..a240ed2a0372 100644
--- a/include/linux/vbox_utils.h
+++ b/include/linux/vbox_utils.h
@@ -24,24 +24,6 @@ __printf(1, 2) void vbg_debug(const char *fmt, ...);
24#define vbg_debug pr_debug 24#define vbg_debug pr_debug
25#endif 25#endif
26 26
27/**
28 * Allocate memory for generic request and initialize the request header.
29 *
30 * Return: the allocated memory
31 * @len: Size of memory block required for the request.
32 * @req_type: The generic request type.
33 */
34void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
35
36/**
37 * Perform a generic request.
38 *
39 * Return: VBox status code
40 * @gdev: The Guest extension device.
41 * @req: Pointer to the request structure.
42 */
43int vbg_req_perform(struct vbg_dev *gdev, void *req);
44
45int vbg_hgcm_connect(struct vbg_dev *gdev, 27int vbg_hgcm_connect(struct vbg_dev *gdev,
46 struct vmmdev_hgcm_service_location *loc, 28 struct vmmdev_hgcm_service_location *loc,
47 u32 *client_id, int *vbox_status); 29 u32 *client_id, int *vbox_status);
@@ -52,11 +34,6 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
52 u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms, 34 u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
53 u32 parm_count, int *vbox_status); 35 u32 parm_count, int *vbox_status);
54 36
55int vbg_hgcm_call32(
56 struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
57 struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
58 int *vbox_status);
59
60/** 37/**
61 * Convert a VirtualBox status code to a standard Linux kernel return value. 38 * Convert a VirtualBox status code to a standard Linux kernel return value.
62 * Return: 0 or negative errno value. 39 * Return: 0 or negative errno value.
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 988c7355bc22..fa1b5da2804e 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -157,6 +157,9 @@ int virtio_device_freeze(struct virtio_device *dev);
157int virtio_device_restore(struct virtio_device *dev); 157int virtio_device_restore(struct virtio_device *dev);
158#endif 158#endif
159 159
160#define virtio_device_for_each_vq(vdev, vq) \
161 list_for_each_entry(vq, &vdev->vqs, list)
162
160/** 163/**
161 * virtio_driver - operations for a virtio I/O driver 164 * virtio_driver - operations for a virtio I/O driver
162 * @driver: underlying device driver (populate name and owner). 165 * @driver: underlying device driver (populate name and owner).
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 60fb4ec8ba61..462e9741b210 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -356,10 +356,13 @@ struct dsa_switch_ops {
356 /* 356 /*
357 * ethtool hardware statistics. 357 * ethtool hardware statistics.
358 */ 358 */
359 void (*get_strings)(struct dsa_switch *ds, int port, uint8_t *data); 359 void (*get_strings)(struct dsa_switch *ds, int port,
360 u32 stringset, uint8_t *data);
360 void (*get_ethtool_stats)(struct dsa_switch *ds, 361 void (*get_ethtool_stats)(struct dsa_switch *ds,
361 int port, uint64_t *data); 362 int port, uint64_t *data);
362 int (*get_sset_count)(struct dsa_switch *ds, int port); 363 int (*get_sset_count)(struct dsa_switch *ds, int port, int sset);
364 void (*get_ethtool_phy_stats)(struct dsa_switch *ds,
365 int port, uint64_t *data);
363 366
364 /* 367 /*
365 * ethtool Wake-on-LAN 368 * ethtool Wake-on-LAN
@@ -588,4 +591,9 @@ static inline int call_dsa_notifiers(unsigned long val, struct net_device *dev,
588#define BRCM_TAG_GET_PORT(v) ((v) >> 8) 591#define BRCM_TAG_GET_PORT(v) ((v) >> 8)
589#define BRCM_TAG_GET_QUEUE(v) ((v) & 0xff) 592#define BRCM_TAG_GET_QUEUE(v) ((v) & 0xff)
590 593
594
595int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data);
596int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data);
597int dsa_port_get_phy_sset_count(struct dsa_port *dp);
598
591#endif 599#endif
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index b68fea022a82..2ab6667275df 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -77,6 +77,7 @@ struct inet_connection_sock_af_ops {
77 * @icsk_af_ops Operations which are AF_INET{4,6} specific 77 * @icsk_af_ops Operations which are AF_INET{4,6} specific
78 * @icsk_ulp_ops Pluggable ULP control hook 78 * @icsk_ulp_ops Pluggable ULP control hook
79 * @icsk_ulp_data ULP private data 79 * @icsk_ulp_data ULP private data
80 * @icsk_clean_acked Clean acked data hook
80 * @icsk_listen_portaddr_node hash to the portaddr listener hashtable 81 * @icsk_listen_portaddr_node hash to the portaddr listener hashtable
81 * @icsk_ca_state: Congestion control state 82 * @icsk_ca_state: Congestion control state
82 * @icsk_retransmits: Number of unrecovered [RTO] timeouts 83 * @icsk_retransmits: Number of unrecovered [RTO] timeouts
@@ -102,6 +103,7 @@ struct inet_connection_sock {
102 const struct inet_connection_sock_af_ops *icsk_af_ops; 103 const struct inet_connection_sock_af_ops *icsk_af_ops;
103 const struct tcp_ulp_ops *icsk_ulp_ops; 104 const struct tcp_ulp_ops *icsk_ulp_ops;
104 void *icsk_ulp_data; 105 void *icsk_ulp_data;
106 void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq);
105 struct hlist_node icsk_listen_portaddr_node; 107 struct hlist_node icsk_listen_portaddr_node;
106 unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu); 108 unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
107 __u8 icsk_ca_state:6, 109 __u8 icsk_ca_state:6,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 1af450d4e923..a3ec08d05756 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -135,7 +135,7 @@ struct fib6_nh {
135 135
136struct fib6_info { 136struct fib6_info {
137 struct fib6_table *fib6_table; 137 struct fib6_table *fib6_table;
138 struct fib6_info __rcu *rt6_next; 138 struct fib6_info __rcu *fib6_next;
139 struct fib6_node __rcu *fib6_node; 139 struct fib6_node __rcu *fib6_node;
140 140
141 /* Multipath routes: 141 /* Multipath routes:
@@ -192,11 +192,11 @@ struct rt6_info {
192 192
193#define for_each_fib6_node_rt_rcu(fn) \ 193#define for_each_fib6_node_rt_rcu(fn) \
194 for (rt = rcu_dereference((fn)->leaf); rt; \ 194 for (rt = rcu_dereference((fn)->leaf); rt; \
195 rt = rcu_dereference(rt->rt6_next)) 195 rt = rcu_dereference(rt->fib6_next))
196 196
197#define for_each_fib6_walker_rt(w) \ 197#define for_each_fib6_walker_rt(w) \
198 for (rt = (w)->leaf; rt; \ 198 for (rt = (w)->leaf; rt; \
199 rt = rcu_dereference_protected(rt->rt6_next, 1)) 199 rt = rcu_dereference_protected(rt->fib6_next, 1))
200 200
201static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst) 201static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
202{ 202{
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 8df4ff798b04..4cf1ef935ed9 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -279,6 +279,27 @@ static inline bool rt6_duplicate_nexthop(struct fib6_info *a, struct fib6_info *
279 !lwtunnel_cmp_encap(a->fib6_nh.nh_lwtstate, b->fib6_nh.nh_lwtstate); 279 !lwtunnel_cmp_encap(a->fib6_nh.nh_lwtstate, b->fib6_nh.nh_lwtstate);
280} 280}
281 281
282static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
283{
284 struct inet6_dev *idev;
285 unsigned int mtu;
286
287 if (dst_metric_locked(dst, RTAX_MTU)) {
288 mtu = dst_metric_raw(dst, RTAX_MTU);
289 if (mtu)
290 return mtu;
291 }
292
293 mtu = IPV6_MIN_MTU;
294 rcu_read_lock();
295 idev = __in6_dev_get(dst->dev);
296 if (idev)
297 mtu = idev->cnf.mtu6;
298 rcu_read_unlock();
299
300 return mtu;
301}
302
282struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw, 303struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
283 struct net_device *dev, struct sk_buff *skb, 304 struct net_device *dev, struct sk_buff *skb,
284 const void *daddr); 305 const void *daddr);
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index eb0bec043c96..0ac795b41ab8 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -668,6 +668,7 @@ struct ip_vs_dest {
668 volatile unsigned int flags; /* dest status flags */ 668 volatile unsigned int flags; /* dest status flags */
669 atomic_t conn_flags; /* flags to copy to conn */ 669 atomic_t conn_flags; /* flags to copy to conn */
670 atomic_t weight; /* server weight */ 670 atomic_t weight; /* server weight */
671 atomic_t last_weight; /* server latest weight */
671 672
672 refcount_t refcnt; /* reference counter */ 673 refcount_t refcnt; /* reference counter */
673 struct ip_vs_stats stats; /* statistics */ 674 struct ip_vs_stats stats; /* statistics */
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 0a872a7c33c8..798558fd1681 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -960,8 +960,6 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
960 &inet6_sk(sk)->cork); 960 &inet6_sk(sk)->cork);
961} 961}
962 962
963unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst);
964
965int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst, 963int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
966 struct flowi6 *fl6); 964 struct flowi6 *fl6);
967struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6, 965struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
diff --git a/include/net/netfilter/ipv4/nf_nat_masquerade.h b/include/net/netfilter/ipv4/nf_nat_masquerade.h
index ebd869473603..cd24be4c4a99 100644
--- a/include/net/netfilter/ipv4/nf_nat_masquerade.h
+++ b/include/net/netfilter/ipv4/nf_nat_masquerade.h
@@ -6,7 +6,7 @@
6 6
7unsigned int 7unsigned int
8nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum, 8nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
9 const struct nf_nat_range *range, 9 const struct nf_nat_range2 *range,
10 const struct net_device *out); 10 const struct net_device *out);
11 11
12void nf_nat_masquerade_ipv4_register_notifier(void); 12void nf_nat_masquerade_ipv4_register_notifier(void);
diff --git a/include/net/netfilter/ipv6/nf_nat_masquerade.h b/include/net/netfilter/ipv6/nf_nat_masquerade.h
index 1ed4f2631ed6..0c3b5ebf0bb8 100644
--- a/include/net/netfilter/ipv6/nf_nat_masquerade.h
+++ b/include/net/netfilter/ipv6/nf_nat_masquerade.h
@@ -3,7 +3,7 @@
3#define _NF_NAT_MASQUERADE_IPV6_H_ 3#define _NF_NAT_MASQUERADE_IPV6_H_
4 4
5unsigned int 5unsigned int
6nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, 6nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
7 const struct net_device *out); 7 const struct net_device *out);
8void nf_nat_masquerade_ipv6_register_notifier(void); 8void nf_nat_masquerade_ipv6_register_notifier(void);
9void nf_nat_masquerade_ipv6_unregister_notifier(void); 9void nf_nat_masquerade_ipv6_unregister_notifier(void);
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 833752dd0c58..ba9fa4592f2b 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -6,6 +6,7 @@
6#include <linux/netdevice.h> 6#include <linux/netdevice.h>
7#include <linux/rhashtable.h> 7#include <linux/rhashtable.h>
8#include <linux/rcupdate.h> 8#include <linux/rcupdate.h>
9#include <linux/netfilter/nf_conntrack_tuple_common.h>
9#include <net/dst.h> 10#include <net/dst.h>
10 11
11struct nf_flowtable; 12struct nf_flowtable;
@@ -13,25 +14,24 @@ struct nf_flowtable;
13struct nf_flowtable_type { 14struct nf_flowtable_type {
14 struct list_head list; 15 struct list_head list;
15 int family; 16 int family;
16 void (*gc)(struct work_struct *work); 17 int (*init)(struct nf_flowtable *ft);
17 void (*free)(struct nf_flowtable *ft); 18 void (*free)(struct nf_flowtable *ft);
18 const struct rhashtable_params *params;
19 nf_hookfn *hook; 19 nf_hookfn *hook;
20 struct module *owner; 20 struct module *owner;
21}; 21};
22 22
23struct nf_flowtable { 23struct nf_flowtable {
24 struct list_head list;
24 struct rhashtable rhashtable; 25 struct rhashtable rhashtable;
25 const struct nf_flowtable_type *type; 26 const struct nf_flowtable_type *type;
26 struct delayed_work gc_work; 27 struct delayed_work gc_work;
27}; 28};
28 29
29enum flow_offload_tuple_dir { 30enum flow_offload_tuple_dir {
30 FLOW_OFFLOAD_DIR_ORIGINAL, 31 FLOW_OFFLOAD_DIR_ORIGINAL = IP_CT_DIR_ORIGINAL,
31 FLOW_OFFLOAD_DIR_REPLY, 32 FLOW_OFFLOAD_DIR_REPLY = IP_CT_DIR_REPLY,
32 __FLOW_OFFLOAD_DIR_MAX = FLOW_OFFLOAD_DIR_REPLY, 33 FLOW_OFFLOAD_DIR_MAX = IP_CT_DIR_MAX
33}; 34};
34#define FLOW_OFFLOAD_DIR_MAX (__FLOW_OFFLOAD_DIR_MAX + 1)
35 35
36struct flow_offload_tuple { 36struct flow_offload_tuple {
37 union { 37 union {
@@ -55,6 +55,8 @@ struct flow_offload_tuple {
55 55
56 int oifidx; 56 int oifidx;
57 57
58 u16 mtu;
59
58 struct dst_entry *dst_cache; 60 struct dst_entry *dst_cache;
59}; 61};
60 62
@@ -66,6 +68,7 @@ struct flow_offload_tuple_rhash {
66#define FLOW_OFFLOAD_SNAT 0x1 68#define FLOW_OFFLOAD_SNAT 0x1
67#define FLOW_OFFLOAD_DNAT 0x2 69#define FLOW_OFFLOAD_DNAT 0x2
68#define FLOW_OFFLOAD_DYING 0x4 70#define FLOW_OFFLOAD_DYING 0x4
71#define FLOW_OFFLOAD_TEARDOWN 0x8
69 72
70struct flow_offload { 73struct flow_offload {
71 struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX]; 74 struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX];
@@ -98,11 +101,14 @@ int nf_flow_table_iterate(struct nf_flowtable *flow_table,
98 101
99void nf_flow_table_cleanup(struct net *net, struct net_device *dev); 102void nf_flow_table_cleanup(struct net *net, struct net_device *dev);
100 103
104int nf_flow_table_init(struct nf_flowtable *flow_table);
101void nf_flow_table_free(struct nf_flowtable *flow_table); 105void nf_flow_table_free(struct nf_flowtable *flow_table);
102void nf_flow_offload_work_gc(struct work_struct *work);
103extern const struct rhashtable_params nf_flow_offload_rhash_params;
104 106
105void flow_offload_dead(struct flow_offload *flow); 107void flow_offload_teardown(struct flow_offload *flow);
108static inline void flow_offload_dead(struct flow_offload *flow)
109{
110 flow->flags |= FLOW_OFFLOAD_DYING;
111}
106 112
107int nf_flow_snat_port(const struct flow_offload *flow, 113int nf_flow_snat_port(const struct flow_offload *flow,
108 struct sk_buff *skb, unsigned int thoff, 114 struct sk_buff *skb, unsigned int thoff,
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index 207a467e7ca6..da3d601cadee 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -39,7 +39,7 @@ struct nf_conn_nat {
39 39
40/* Set up the info structure to map into this range. */ 40/* Set up the info structure to map into this range. */
41unsigned int nf_nat_setup_info(struct nf_conn *ct, 41unsigned int nf_nat_setup_info(struct nf_conn *ct,
42 const struct nf_nat_range *range, 42 const struct nf_nat_range2 *range,
43 enum nf_nat_manip_type maniptype); 43 enum nf_nat_manip_type maniptype);
44 44
45extern unsigned int nf_nat_alloc_null_binding(struct nf_conn *ct, 45extern unsigned int nf_nat_alloc_null_binding(struct nf_conn *ct,
diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h
index ce7c2b4e64bb..8bad2560576f 100644
--- a/include/net/netfilter/nf_nat_l3proto.h
+++ b/include/net/netfilter/nf_nat_l3proto.h
@@ -7,7 +7,7 @@ struct nf_nat_l3proto {
7 u8 l3proto; 7 u8 l3proto;
8 8
9 bool (*in_range)(const struct nf_conntrack_tuple *t, 9 bool (*in_range)(const struct nf_conntrack_tuple *t,
10 const struct nf_nat_range *range); 10 const struct nf_nat_range2 *range);
11 11
12 u32 (*secure_port)(const struct nf_conntrack_tuple *t, __be16); 12 u32 (*secure_port)(const struct nf_conntrack_tuple *t, __be16);
13 13
@@ -33,7 +33,7 @@ struct nf_nat_l3proto {
33 struct flowi *fl); 33 struct flowi *fl);
34 34
35 int (*nlattr_to_range)(struct nlattr *tb[], 35 int (*nlattr_to_range)(struct nlattr *tb[],
36 struct nf_nat_range *range); 36 struct nf_nat_range2 *range);
37}; 37};
38 38
39int nf_nat_l3proto_register(const struct nf_nat_l3proto *); 39int nf_nat_l3proto_register(const struct nf_nat_l3proto *);
@@ -48,30 +48,26 @@ unsigned int nf_nat_ipv4_in(void *priv, struct sk_buff *skb,
48 const struct nf_hook_state *state, 48 const struct nf_hook_state *state,
49 unsigned int (*do_chain)(void *priv, 49 unsigned int (*do_chain)(void *priv,
50 struct sk_buff *skb, 50 struct sk_buff *skb,
51 const struct nf_hook_state *state, 51 const struct nf_hook_state *state));
52 struct nf_conn *ct));
53 52
54unsigned int nf_nat_ipv4_out(void *priv, struct sk_buff *skb, 53unsigned int nf_nat_ipv4_out(void *priv, struct sk_buff *skb,
55 const struct nf_hook_state *state, 54 const struct nf_hook_state *state,
56 unsigned int (*do_chain)(void *priv, 55 unsigned int (*do_chain)(void *priv,
57 struct sk_buff *skb, 56 struct sk_buff *skb,
58 const struct nf_hook_state *state, 57 const struct nf_hook_state *state));
59 struct nf_conn *ct));
60 58
61unsigned int nf_nat_ipv4_local_fn(void *priv, 59unsigned int nf_nat_ipv4_local_fn(void *priv,
62 struct sk_buff *skb, 60 struct sk_buff *skb,
63 const struct nf_hook_state *state, 61 const struct nf_hook_state *state,
64 unsigned int (*do_chain)(void *priv, 62 unsigned int (*do_chain)(void *priv,
65 struct sk_buff *skb, 63 struct sk_buff *skb,
66 const struct nf_hook_state *state, 64 const struct nf_hook_state *state));
67 struct nf_conn *ct));
68 65
69unsigned int nf_nat_ipv4_fn(void *priv, struct sk_buff *skb, 66unsigned int nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
70 const struct nf_hook_state *state, 67 const struct nf_hook_state *state,
71 unsigned int (*do_chain)(void *priv, 68 unsigned int (*do_chain)(void *priv,
72 struct sk_buff *skb, 69 struct sk_buff *skb,
73 const struct nf_hook_state *state, 70 const struct nf_hook_state *state));
74 struct nf_conn *ct));
75 71
76int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct, 72int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
77 enum ip_conntrack_info ctinfo, 73 enum ip_conntrack_info ctinfo,
@@ -81,29 +77,25 @@ unsigned int nf_nat_ipv6_in(void *priv, struct sk_buff *skb,
81 const struct nf_hook_state *state, 77 const struct nf_hook_state *state,
82 unsigned int (*do_chain)(void *priv, 78 unsigned int (*do_chain)(void *priv,
83 struct sk_buff *skb, 79 struct sk_buff *skb,
84 const struct nf_hook_state *state, 80 const struct nf_hook_state *state));
85 struct nf_conn *ct));
86 81
87unsigned int nf_nat_ipv6_out(void *priv, struct sk_buff *skb, 82unsigned int nf_nat_ipv6_out(void *priv, struct sk_buff *skb,
88 const struct nf_hook_state *state, 83 const struct nf_hook_state *state,
89 unsigned int (*do_chain)(void *priv, 84 unsigned int (*do_chain)(void *priv,
90 struct sk_buff *skb, 85 struct sk_buff *skb,
91 const struct nf_hook_state *state, 86 const struct nf_hook_state *state));
92 struct nf_conn *ct));
93 87
94unsigned int nf_nat_ipv6_local_fn(void *priv, 88unsigned int nf_nat_ipv6_local_fn(void *priv,
95 struct sk_buff *skb, 89 struct sk_buff *skb,
96 const struct nf_hook_state *state, 90 const struct nf_hook_state *state,
97 unsigned int (*do_chain)(void *priv, 91 unsigned int (*do_chain)(void *priv,
98 struct sk_buff *skb, 92 struct sk_buff *skb,
99 const struct nf_hook_state *state, 93 const struct nf_hook_state *state));
100 struct nf_conn *ct));
101 94
102unsigned int nf_nat_ipv6_fn(void *priv, struct sk_buff *skb, 95unsigned int nf_nat_ipv6_fn(void *priv, struct sk_buff *skb,
103 const struct nf_hook_state *state, 96 const struct nf_hook_state *state,
104 unsigned int (*do_chain)(void *priv, 97 unsigned int (*do_chain)(void *priv,
105 struct sk_buff *skb, 98 struct sk_buff *skb,
106 const struct nf_hook_state *state, 99 const struct nf_hook_state *state));
107 struct nf_conn *ct));
108 100
109#endif /* _NF_NAT_L3PROTO_H */ 101#endif /* _NF_NAT_L3PROTO_H */
diff --git a/include/net/netfilter/nf_nat_l4proto.h b/include/net/netfilter/nf_nat_l4proto.h
index 67835ff8a2d9..b4d6b29bca62 100644
--- a/include/net/netfilter/nf_nat_l4proto.h
+++ b/include/net/netfilter/nf_nat_l4proto.h
@@ -34,12 +34,12 @@ struct nf_nat_l4proto {
34 */ 34 */
35 void (*unique_tuple)(const struct nf_nat_l3proto *l3proto, 35 void (*unique_tuple)(const struct nf_nat_l3proto *l3proto,
36 struct nf_conntrack_tuple *tuple, 36 struct nf_conntrack_tuple *tuple,
37 const struct nf_nat_range *range, 37 const struct nf_nat_range2 *range,
38 enum nf_nat_manip_type maniptype, 38 enum nf_nat_manip_type maniptype,
39 const struct nf_conn *ct); 39 const struct nf_conn *ct);
40 40
41 int (*nlattr_to_range)(struct nlattr *tb[], 41 int (*nlattr_to_range)(struct nlattr *tb[],
42 struct nf_nat_range *range); 42 struct nf_nat_range2 *range);
43}; 43};
44 44
45/* Protocol registration. */ 45/* Protocol registration. */
@@ -72,11 +72,11 @@ bool nf_nat_l4proto_in_range(const struct nf_conntrack_tuple *tuple,
72 72
73void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto, 73void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
74 struct nf_conntrack_tuple *tuple, 74 struct nf_conntrack_tuple *tuple,
75 const struct nf_nat_range *range, 75 const struct nf_nat_range2 *range,
76 enum nf_nat_manip_type maniptype, 76 enum nf_nat_manip_type maniptype,
77 const struct nf_conn *ct, u16 *rover); 77 const struct nf_conn *ct, u16 *rover);
78 78
79int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[], 79int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
80 struct nf_nat_range *range); 80 struct nf_nat_range2 *range);
81 81
82#endif /*_NF_NAT_L4PROTO_H*/ 82#endif /*_NF_NAT_L4PROTO_H*/
diff --git a/include/net/netfilter/nf_nat_redirect.h b/include/net/netfilter/nf_nat_redirect.h
index 5ddabb08c472..c129aacc8ae8 100644
--- a/include/net/netfilter/nf_nat_redirect.h
+++ b/include/net/netfilter/nf_nat_redirect.h
@@ -7,7 +7,7 @@ nf_nat_redirect_ipv4(struct sk_buff *skb,
7 const struct nf_nat_ipv4_multi_range_compat *mr, 7 const struct nf_nat_ipv4_multi_range_compat *mr,
8 unsigned int hooknum); 8 unsigned int hooknum);
9unsigned int 9unsigned int
10nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, 10nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
11 unsigned int hooknum); 11 unsigned int hooknum);
12 12
13#endif /* _NF_NAT_REDIRECT_H_ */ 13#endif /* _NF_NAT_REDIRECT_H_ */
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index cd368d1b8cb8..435c9e3b9181 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -275,23 +275,6 @@ struct nft_set_estimate {
275 enum nft_set_class space; 275 enum nft_set_class space;
276}; 276};
277 277
278/**
279 * struct nft_set_type - nf_tables set type
280 *
281 * @select_ops: function to select nft_set_ops
282 * @ops: default ops, used when no select_ops functions is present
283 * @list: used internally
284 * @owner: module reference
285 */
286struct nft_set_type {
287 const struct nft_set_ops *(*select_ops)(const struct nft_ctx *,
288 const struct nft_set_desc *desc,
289 u32 flags);
290 const struct nft_set_ops *ops;
291 struct list_head list;
292 struct module *owner;
293};
294
295struct nft_set_ext; 278struct nft_set_ext;
296struct nft_expr; 279struct nft_expr;
297 280
@@ -310,7 +293,6 @@ struct nft_expr;
310 * @init: initialize private data of new set instance 293 * @init: initialize private data of new set instance
311 * @destroy: destroy private data of set instance 294 * @destroy: destroy private data of set instance
312 * @elemsize: element private size 295 * @elemsize: element private size
313 * @features: features supported by the implementation
314 */ 296 */
315struct nft_set_ops { 297struct nft_set_ops {
316 bool (*lookup)(const struct net *net, 298 bool (*lookup)(const struct net *net,
@@ -361,9 +343,23 @@ struct nft_set_ops {
361 void (*destroy)(const struct nft_set *set); 343 void (*destroy)(const struct nft_set *set);
362 344
363 unsigned int elemsize; 345 unsigned int elemsize;
346};
347
348/**
349 * struct nft_set_type - nf_tables set type
350 *
351 * @ops: set ops for this type
352 * @list: used internally
353 * @owner: module reference
354 * @features: features supported by the implementation
355 */
356struct nft_set_type {
357 const struct nft_set_ops ops;
358 struct list_head list;
359 struct module *owner;
364 u32 features; 360 u32 features;
365 const struct nft_set_type *type;
366}; 361};
362#define to_set_type(o) container_of(o, struct nft_set_type, ops)
367 363
368int nft_register_set(struct nft_set_type *type); 364int nft_register_set(struct nft_set_type *type);
369void nft_unregister_set(struct nft_set_type *type); 365void nft_unregister_set(struct nft_set_type *type);
@@ -589,7 +585,7 @@ static inline u64 *nft_set_ext_timeout(const struct nft_set_ext *ext)
589 return nft_set_ext(ext, NFT_SET_EXT_TIMEOUT); 585 return nft_set_ext(ext, NFT_SET_EXT_TIMEOUT);
590} 586}
591 587
592static inline unsigned long *nft_set_ext_expiration(const struct nft_set_ext *ext) 588static inline u64 *nft_set_ext_expiration(const struct nft_set_ext *ext)
593{ 589{
594 return nft_set_ext(ext, NFT_SET_EXT_EXPIRATION); 590 return nft_set_ext(ext, NFT_SET_EXT_EXPIRATION);
595} 591}
@@ -607,7 +603,7 @@ static inline struct nft_expr *nft_set_ext_expr(const struct nft_set_ext *ext)
607static inline bool nft_set_elem_expired(const struct nft_set_ext *ext) 603static inline bool nft_set_elem_expired(const struct nft_set_ext *ext)
608{ 604{
609 return nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION) && 605 return nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION) &&
610 time_is_before_eq_jiffies(*nft_set_ext_expiration(ext)); 606 time_is_before_eq_jiffies64(*nft_set_ext_expiration(ext));
611} 607}
612 608
613static inline struct nft_set_ext *nft_set_elem_ext(const struct nft_set *set, 609static inline struct nft_set_ext *nft_set_elem_ext(const struct nft_set *set,
@@ -1015,9 +1011,9 @@ static inline void *nft_obj_data(const struct nft_object *obj)
1015 1011
1016#define nft_expr_obj(expr) *((struct nft_object **)nft_expr_priv(expr)) 1012#define nft_expr_obj(expr) *((struct nft_object **)nft_expr_priv(expr))
1017 1013
1018struct nft_object *nf_tables_obj_lookup(const struct nft_table *table, 1014struct nft_object *nft_obj_lookup(const struct nft_table *table,
1019 const struct nlattr *nla, u32 objtype, 1015 const struct nlattr *nla, u32 objtype,
1020 u8 genmask); 1016 u8 genmask);
1021 1017
1022void nft_obj_notify(struct net *net, struct nft_table *table, 1018void nft_obj_notify(struct net *net, struct nft_table *table,
1023 struct nft_object *obj, u32 portid, u32 seq, 1019 struct nft_object *obj, u32 portid, u32 seq,
@@ -1106,12 +1102,9 @@ struct nft_flowtable {
1106 struct nf_flowtable data; 1102 struct nf_flowtable data;
1107}; 1103};
1108 1104
1109struct nft_flowtable *nf_tables_flowtable_lookup(const struct nft_table *table, 1105struct nft_flowtable *nft_flowtable_lookup(const struct nft_table *table,
1110 const struct nlattr *nla, 1106 const struct nlattr *nla,
1111 u8 genmask); 1107 u8 genmask);
1112void nft_flow_table_iterate(struct net *net,
1113 void (*iter)(struct nf_flowtable *flowtable, void *data),
1114 void *data);
1115 1108
1116void nft_register_flowtable_type(struct nf_flowtable_type *type); 1109void nft_register_flowtable_type(struct nf_flowtable_type *type);
1117void nft_unregister_flowtable_type(struct nf_flowtable_type *type); 1110void nft_unregister_flowtable_type(struct nf_flowtable_type *type);
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index ea5aab568be8..cd6915b6c054 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -10,6 +10,9 @@ extern struct nft_expr_type nft_byteorder_type;
10extern struct nft_expr_type nft_payload_type; 10extern struct nft_expr_type nft_payload_type;
11extern struct nft_expr_type nft_dynset_type; 11extern struct nft_expr_type nft_dynset_type;
12extern struct nft_expr_type nft_range_type; 12extern struct nft_expr_type nft_range_type;
13extern struct nft_expr_type nft_meta_type;
14extern struct nft_expr_type nft_rt_type;
15extern struct nft_expr_type nft_exthdr_type;
13 16
14int nf_tables_core_module_init(void); 17int nf_tables_core_module_init(void);
15void nf_tables_core_module_exit(void); 18void nf_tables_core_module_exit(void);
diff --git a/include/net/netfilter/nfnetlink_log.h b/include/net/netfilter/nfnetlink_log.h
index 612cfb63ac68..ea32a7d3cf1b 100644
--- a/include/net/netfilter/nfnetlink_log.h
+++ b/include/net/netfilter/nfnetlink_log.h
@@ -1,18 +1 @@
1/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _KER_NFNETLINK_LOG_H
3#define _KER_NFNETLINK_LOG_H
4
5void
6nfulnl_log_packet(struct net *net,
7 u_int8_t pf,
8 unsigned int hooknum,
9 const struct sk_buff *skb,
10 const struct net_device *in,
11 const struct net_device *out,
12 const struct nf_loginfo *li_user,
13 const char *prefix);
14
15#define NFULNL_COPY_DISABLED 0xff
16
17#endif /* _KER_NFNETLINK_LOG_H */
18
diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h
deleted file mode 100644
index 5c69e9b09388..000000000000
--- a/include/net/netfilter/nft_meta.h
+++ /dev/null
@@ -1,44 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _NFT_META_H_
3#define _NFT_META_H_
4
5struct nft_meta {
6 enum nft_meta_keys key:8;
7 union {
8 enum nft_registers dreg:8;
9 enum nft_registers sreg:8;
10 };
11};
12
13extern const struct nla_policy nft_meta_policy[];
14
15int nft_meta_get_init(const struct nft_ctx *ctx,
16 const struct nft_expr *expr,
17 const struct nlattr * const tb[]);
18
19int nft_meta_set_init(const struct nft_ctx *ctx,
20 const struct nft_expr *expr,
21 const struct nlattr * const tb[]);
22
23int nft_meta_get_dump(struct sk_buff *skb,
24 const struct nft_expr *expr);
25
26int nft_meta_set_dump(struct sk_buff *skb,
27 const struct nft_expr *expr);
28
29void nft_meta_get_eval(const struct nft_expr *expr,
30 struct nft_regs *regs,
31 const struct nft_pktinfo *pkt);
32
33void nft_meta_set_eval(const struct nft_expr *expr,
34 struct nft_regs *regs,
35 const struct nft_pktinfo *pkt);
36
37void nft_meta_set_destroy(const struct nft_ctx *ctx,
38 const struct nft_expr *expr);
39
40int nft_meta_set_validate(const struct nft_ctx *ctx,
41 const struct nft_expr *expr,
42 const struct nft_data **data);
43
44#endif
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 20ff237c5eb2..86f034b524d4 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -254,11 +254,10 @@ enum { SCTP_ARBITRARY_COOKIE_ECHO_LEN = 200 };
254#define SCTP_TSN_MAP_SIZE 4096 254#define SCTP_TSN_MAP_SIZE 4096
255 255
256/* We will not record more than this many duplicate TSNs between two 256/* We will not record more than this many duplicate TSNs between two
257 * SACKs. The minimum PMTU is 576. Remove all the headers and there 257 * SACKs. The minimum PMTU is 512. Remove all the headers and there
258 * is enough room for 131 duplicate reports. Round down to the 258 * is enough room for 117 duplicate reports. Round down to the
259 * nearest power of 2. 259 * nearest power of 2.
260 */ 260 */
261enum { SCTP_MIN_PMTU = 576 };
262enum { SCTP_MAX_DUP_TSNS = 16 }; 261enum { SCTP_MAX_DUP_TSNS = 16 };
263enum { SCTP_MAX_GABS = 16 }; 262enum { SCTP_MAX_GABS = 16 };
264 263
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 28b996d63490..f66d44350007 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -428,32 +428,6 @@ static inline int sctp_list_single_entry(struct list_head *head)
428 return (head->next != head) && (head->next == head->prev); 428 return (head->next != head) && (head->next == head->prev);
429} 429}
430 430
431/* Break down data chunks at this point. */
432static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
433{
434 struct sctp_sock *sp = sctp_sk(asoc->base.sk);
435 struct sctp_af *af = sp->pf->af;
436 int frag = pmtu;
437
438 frag -= af->ip_options_len(asoc->base.sk);
439 frag -= af->net_header_len;
440 frag -= sizeof(struct sctphdr) + sctp_datachk_len(&asoc->stream);
441
442 if (asoc->user_frag)
443 frag = min_t(int, frag, asoc->user_frag);
444
445 frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN -
446 sctp_datachk_len(&asoc->stream)));
447
448 return frag;
449}
450
451static inline void sctp_assoc_pending_pmtu(struct sctp_association *asoc)
452{
453 sctp_assoc_sync_pmtu(asoc);
454 asoc->pmtu_pending = 0;
455}
456
457static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk) 431static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk)
458{ 432{
459 return !list_empty(&chunk->list); 433 return !list_empty(&chunk->list);
@@ -607,17 +581,29 @@ static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *
607 return t->dst; 581 return t->dst;
608} 582}
609 583
610static inline bool sctp_transport_pmtu_check(struct sctp_transport *t) 584/* Calculate max payload size given a MTU, or the total overhead if
585 * given MTU is zero
586 */
587static inline __u32 sctp_mtu_payload(const struct sctp_sock *sp,
588 __u32 mtu, __u32 extra)
611{ 589{
612 __u32 pmtu = max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)), 590 __u32 overhead = sizeof(struct sctphdr) + extra;
613 SCTP_DEFAULT_MINSEGMENT);
614 591
615 if (t->pathmtu == pmtu) 592 if (sp)
616 return true; 593 overhead += sp->pf->af->net_header_len;
594 else
595 overhead += sizeof(struct ipv6hdr);
617 596
618 t->pathmtu = pmtu; 597 if (WARN_ON_ONCE(mtu && mtu <= overhead))
598 mtu = overhead;
619 599
620 return false; 600 return mtu ? mtu - overhead : overhead;
601}
602
603static inline __u32 sctp_dst_mtu(const struct dst_entry *dst)
604{
605 return SCTP_TRUNC4(max_t(__u32, dst_mtu(dst),
606 SCTP_DEFAULT_MINSEGMENT));
621} 607}
622 608
623#endif /* __net_sctp_h__ */ 609#endif /* __net_sctp_h__ */
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index f4b657478a30..5ef1bad81ef5 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -215,7 +215,7 @@ struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc,
215struct sctp_chunk *sctp_make_shutdown_complete( 215struct sctp_chunk *sctp_make_shutdown_complete(
216 const struct sctp_association *asoc, 216 const struct sctp_association *asoc,
217 const struct sctp_chunk *chunk); 217 const struct sctp_chunk *chunk);
218void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause, size_t paylen); 218int sctp_init_cause(struct sctp_chunk *chunk, __be16 cause, size_t paylen);
219struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc, 219struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc,
220 const struct sctp_chunk *chunk, 220 const struct sctp_chunk *chunk,
221 const size_t hint); 221 const size_t hint);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 05594b248e52..ebf809eed33a 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -2097,6 +2097,8 @@ int sctp_assoc_update(struct sctp_association *old,
2097 2097
2098__u32 sctp_association_get_next_tsn(struct sctp_association *); 2098__u32 sctp_association_get_next_tsn(struct sctp_association *);
2099 2099
2100void sctp_assoc_update_frag_point(struct sctp_association *asoc);
2101void sctp_assoc_set_pmtu(struct sctp_association *asoc, __u32 pmtu);
2100void sctp_assoc_sync_pmtu(struct sctp_association *asoc); 2102void sctp_assoc_sync_pmtu(struct sctp_association *asoc);
2101void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int); 2103void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int);
2102void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int); 2104void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
diff --git a/include/net/sock.h b/include/net/sock.h
index 74d725fdbe0f..3c568b36ee36 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -481,6 +481,11 @@ struct sock {
481 void (*sk_error_report)(struct sock *sk); 481 void (*sk_error_report)(struct sock *sk);
482 int (*sk_backlog_rcv)(struct sock *sk, 482 int (*sk_backlog_rcv)(struct sock *sk,
483 struct sk_buff *skb); 483 struct sk_buff *skb);
484#ifdef CONFIG_SOCK_VALIDATE_XMIT
485 struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
486 struct net_device *dev,
487 struct sk_buff *skb);
488#endif
484 void (*sk_destruct)(struct sock *sk); 489 void (*sk_destruct)(struct sock *sk);
485 struct sock_reuseport __rcu *sk_reuseport_cb; 490 struct sock_reuseport __rcu *sk_reuseport_cb;
486 struct rcu_head sk_rcu; 491 struct rcu_head sk_rcu;
@@ -2332,6 +2337,22 @@ static inline bool sk_fullsock(const struct sock *sk)
2332 return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV); 2337 return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
2333} 2338}
2334 2339
2340/* Checks if this SKB belongs to an HW offloaded socket
2341 * and whether any SW fallbacks are required based on dev.
2342 */
2343static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
2344 struct net_device *dev)
2345{
2346#ifdef CONFIG_SOCK_VALIDATE_XMIT
2347 struct sock *sk = skb->sk;
2348
2349 if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb)
2350 skb = sk->sk_validate_xmit_skb(sk, dev, skb);
2351#endif
2352
2353 return skb;
2354}
2355
2335/* This helper checks if a socket is a LISTEN or NEW_SYN_RECV 2356/* This helper checks if a socket is a LISTEN or NEW_SYN_RECV
2336 * SYNACK messages can be attached to either ones (depending on SYNCOOKIE) 2357 * SYNACK messages can be attached to either ones (depending on SYNCOOKIE)
2337 */ 2358 */
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 39bc855d7fee..d574ce63bf22 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -155,6 +155,7 @@ struct switchdev_notifier_fdb_info {
155 struct switchdev_notifier_info info; /* must be first */ 155 struct switchdev_notifier_info info; /* must be first */
156 const unsigned char *addr; 156 const unsigned char *addr;
157 u16 vid; 157 u16 vid;
158 bool added_by_user;
158}; 159};
159 160
160static inline struct net_device * 161static inline struct net_device *
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 833154e3df17..cf803fe0fb86 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -2105,4 +2105,12 @@ static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2105#if IS_ENABLED(CONFIG_SMC) 2105#if IS_ENABLED(CONFIG_SMC)
2106extern struct static_key_false tcp_have_smc; 2106extern struct static_key_false tcp_have_smc;
2107#endif 2107#endif
2108
2109#if IS_ENABLED(CONFIG_TLS_DEVICE)
2110void clean_acked_data_enable(struct inet_connection_sock *icsk,
2111 void (*cad)(struct sock *sk, u32 ack_seq));
2112void clean_acked_data_disable(struct inet_connection_sock *icsk);
2113
2114#endif
2115
2108#endif /* _TCP_H */ 2116#endif /* _TCP_H */
diff --git a/include/net/tls.h b/include/net/tls.h
index 3da8e13a6d96..ee78f339b4b3 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -83,21 +83,10 @@ struct tls_device {
83 void (*unhash)(struct tls_device *device, struct sock *sk); 83 void (*unhash)(struct tls_device *device, struct sock *sk);
84}; 84};
85 85
86struct tls_sw_context { 86struct tls_sw_context_tx {
87 struct crypto_aead *aead_send; 87 struct crypto_aead *aead_send;
88 struct crypto_aead *aead_recv;
89 struct crypto_wait async_wait; 88 struct crypto_wait async_wait;
90 89
91 /* Receive context */
92 struct strparser strp;
93 void (*saved_data_ready)(struct sock *sk);
94 unsigned int (*sk_poll)(struct file *file, struct socket *sock,
95 struct poll_table_struct *wait);
96 struct sk_buff *recv_pkt;
97 u8 control;
98 bool decrypted;
99
100 /* Sending context */
101 char aad_space[TLS_AAD_SPACE_SIZE]; 90 char aad_space[TLS_AAD_SPACE_SIZE];
102 91
103 unsigned int sg_plaintext_size; 92 unsigned int sg_plaintext_size;
@@ -114,6 +103,50 @@ struct tls_sw_context {
114 struct scatterlist sg_aead_out[2]; 103 struct scatterlist sg_aead_out[2];
115}; 104};
116 105
106struct tls_sw_context_rx {
107 struct crypto_aead *aead_recv;
108 struct crypto_wait async_wait;
109
110 struct strparser strp;
111 void (*saved_data_ready)(struct sock *sk);
112 unsigned int (*sk_poll)(struct file *file, struct socket *sock,
113 struct poll_table_struct *wait);
114 struct sk_buff *recv_pkt;
115 u8 control;
116 bool decrypted;
117};
118
119struct tls_record_info {
120 struct list_head list;
121 u32 end_seq;
122 int len;
123 int num_frags;
124 skb_frag_t frags[MAX_SKB_FRAGS];
125};
126
127struct tls_offload_context {
128 struct crypto_aead *aead_send;
129 spinlock_t lock; /* protects records list */
130 struct list_head records_list;
131 struct tls_record_info *open_record;
132 struct tls_record_info *retransmit_hint;
133 u64 hint_record_sn;
134 u64 unacked_record_sn;
135
136 struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
137 void (*sk_destruct)(struct sock *sk);
138 u8 driver_state[];
139 /* The TLS layer reserves room for driver specific state
140 * Currently the belief is that there is not enough
141 * driver specific state to justify another layer of indirection
142 */
143#define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *)))
144};
145
146#define TLS_OFFLOAD_CONTEXT_SIZE \
147 (ALIGN(sizeof(struct tls_offload_context), sizeof(void *)) + \
148 TLS_DRIVER_STATE_SIZE)
149
117enum { 150enum {
118 TLS_PENDING_CLOSED_RECORD 151 TLS_PENDING_CLOSED_RECORD
119}; 152};
@@ -138,9 +171,15 @@ struct tls_context {
138 struct tls12_crypto_info_aes_gcm_128 crypto_recv_aes_gcm_128; 171 struct tls12_crypto_info_aes_gcm_128 crypto_recv_aes_gcm_128;
139 }; 172 };
140 173
141 void *priv_ctx; 174 struct list_head list;
175 struct net_device *netdev;
176 refcount_t refcount;
177
178 void *priv_ctx_tx;
179 void *priv_ctx_rx;
142 180
143 u8 conf:3; 181 u8 tx_conf:3;
182 u8 rx_conf:3;
144 183
145 struct cipher_context tx; 184 struct cipher_context tx;
146 struct cipher_context rx; 185 struct cipher_context rx;
@@ -148,6 +187,7 @@ struct tls_context {
148 struct scatterlist *partially_sent_record; 187 struct scatterlist *partially_sent_record;
149 u16 partially_sent_offset; 188 u16 partially_sent_offset;
150 unsigned long flags; 189 unsigned long flags;
190 bool in_tcp_sendpages;
151 191
152 u16 pending_open_record_frags; 192 u16 pending_open_record_frags;
153 int (*push_pending_record)(struct sock *sk, int flags); 193 int (*push_pending_record)(struct sock *sk, int flags);
@@ -177,7 +217,8 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
177int tls_sw_sendpage(struct sock *sk, struct page *page, 217int tls_sw_sendpage(struct sock *sk, struct page *page,
178 int offset, size_t size, int flags); 218 int offset, size_t size, int flags);
179void tls_sw_close(struct sock *sk, long timeout); 219void tls_sw_close(struct sock *sk, long timeout);
180void tls_sw_free_resources(struct sock *sk); 220void tls_sw_free_resources_tx(struct sock *sk);
221void tls_sw_free_resources_rx(struct sock *sk);
181int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 222int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
182 int nonblock, int flags, int *addr_len); 223 int nonblock, int flags, int *addr_len);
183unsigned int tls_sw_poll(struct file *file, struct socket *sock, 224unsigned int tls_sw_poll(struct file *file, struct socket *sock,
@@ -186,9 +227,28 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
186 struct pipe_inode_info *pipe, 227 struct pipe_inode_info *pipe,
187 size_t len, unsigned int flags); 228 size_t len, unsigned int flags);
188 229
189void tls_sk_destruct(struct sock *sk, struct tls_context *ctx); 230int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
190void tls_icsk_clean_acked(struct sock *sk); 231int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
232int tls_device_sendpage(struct sock *sk, struct page *page,
233 int offset, size_t size, int flags);
234void tls_device_sk_destruct(struct sock *sk);
235void tls_device_init(void);
236void tls_device_cleanup(void);
191 237
238struct tls_record_info *tls_get_record(struct tls_offload_context *context,
239 u32 seq, u64 *p_record_sn);
240
241static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
242{
243 return rec->len == 0;
244}
245
246static inline u32 tls_record_start_seq(struct tls_record_info *rec)
247{
248 return rec->end_seq - rec->len;
249}
250
251void tls_sk_destruct(struct sock *sk, struct tls_context *ctx);
192int tls_push_sg(struct sock *sk, struct tls_context *ctx, 252int tls_push_sg(struct sock *sk, struct tls_context *ctx,
193 struct scatterlist *sg, u16 first_offset, 253 struct scatterlist *sg, u16 first_offset,
194 int flags); 254 int flags);
@@ -225,6 +285,13 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
225 return tls_ctx->pending_open_record_frags; 285 return tls_ctx->pending_open_record_frags;
226} 286}
227 287
288static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
289{
290 return sk_fullsock(sk) &&
291 /* matches smp_store_release in tls_set_device_offload */
292 smp_load_acquire(&sk->sk_destruct) == &tls_device_sk_destruct;
293}
294
228static inline void tls_err_abort(struct sock *sk, int err) 295static inline void tls_err_abort(struct sock *sk, int err)
229{ 296{
230 sk->sk_err = err; 297 sk->sk_err = err;
@@ -297,16 +364,22 @@ static inline struct tls_context *tls_get_ctx(const struct sock *sk)
297 return icsk->icsk_ulp_data; 364 return icsk->icsk_ulp_data;
298} 365}
299 366
300static inline struct tls_sw_context *tls_sw_ctx( 367static inline struct tls_sw_context_rx *tls_sw_ctx_rx(
301 const struct tls_context *tls_ctx) 368 const struct tls_context *tls_ctx)
302{ 369{
303 return (struct tls_sw_context *)tls_ctx->priv_ctx; 370 return (struct tls_sw_context_rx *)tls_ctx->priv_ctx_rx;
371}
372
373static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
374 const struct tls_context *tls_ctx)
375{
376 return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
304} 377}
305 378
306static inline struct tls_offload_context *tls_offload_ctx( 379static inline struct tls_offload_context *tls_offload_ctx(
307 const struct tls_context *tls_ctx) 380 const struct tls_context *tls_ctx)
308{ 381{
309 return (struct tls_offload_context *)tls_ctx->priv_ctx; 382 return (struct tls_offload_context *)tls_ctx->priv_ctx_tx;
310} 383}
311 384
312int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, 385int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
@@ -314,4 +387,12 @@ int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
314void tls_register_device(struct tls_device *device); 387void tls_register_device(struct tls_device *device);
315void tls_unregister_device(struct tls_device *device); 388void tls_unregister_device(struct tls_device *device);
316 389
390struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
391 struct net_device *dev,
392 struct sk_buff *skb);
393
394int tls_sw_fallback_init(struct sock *sk,
395 struct tls_offload_context *offload_ctx,
396 struct tls_crypto_info *crypto_info);
397
317#endif /* _TLS_OFFLOAD_H */ 398#endif /* _TLS_OFFLOAD_H */
diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h
index 04e0679767f6..e03bd9d41fa8 100644
--- a/include/scsi/scsi_dbg.h
+++ b/include/scsi/scsi_dbg.h
@@ -11,8 +11,6 @@ struct scsi_sense_hdr;
11extern void scsi_print_command(struct scsi_cmnd *); 11extern void scsi_print_command(struct scsi_cmnd *);
12extern size_t __scsi_format_command(char *, size_t, 12extern size_t __scsi_format_command(char *, size_t,
13 const unsigned char *, size_t); 13 const unsigned char *, size_t);
14extern void scsi_show_extd_sense(const struct scsi_device *, const char *,
15 unsigned char, unsigned char);
16extern void scsi_print_sense_hdr(const struct scsi_device *, const char *, 14extern void scsi_print_sense_hdr(const struct scsi_device *, const char *,
17 const struct scsi_sense_hdr *); 15 const struct scsi_sense_hdr *);
18extern void scsi_print_sense(const struct scsi_cmnd *); 16extern void scsi_print_sense(const struct scsi_cmnd *);
diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h
index 50df5b28d2c9..8ee8991aa099 100644
--- a/include/soc/bcm2835/raspberrypi-firmware.h
+++ b/include/soc/bcm2835/raspberrypi-firmware.h
@@ -143,13 +143,13 @@ struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node);
143static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag, 143static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag,
144 void *data, size_t len) 144 void *data, size_t len)
145{ 145{
146 return 0; 146 return -ENOSYS;
147} 147}
148 148
149static inline int rpi_firmware_property_list(struct rpi_firmware *fw, 149static inline int rpi_firmware_property_list(struct rpi_firmware *fw,
150 void *data, size_t tag_size) 150 void *data, size_t tag_size)
151{ 151{
152 return 0; 152 return -ENOSYS;
153} 153}
154 154
155static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node) 155static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
diff --git a/include/sound/control.h b/include/sound/control.h
index ca13a44ae9d4..6011a58d3e20 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <linux/wait.h> 25#include <linux/wait.h>
26#include <linux/nospec.h>
26#include <sound/asound.h> 27#include <sound/asound.h>
27 28
28#define snd_kcontrol_chip(kcontrol) ((kcontrol)->private_data) 29#define snd_kcontrol_chip(kcontrol) ((kcontrol)->private_data)
@@ -148,12 +149,14 @@ int snd_ctl_get_preferred_subdevice(struct snd_card *card, int type);
148 149
149static inline unsigned int snd_ctl_get_ioffnum(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id) 150static inline unsigned int snd_ctl_get_ioffnum(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
150{ 151{
151 return id->numid - kctl->id.numid; 152 unsigned int ioff = id->numid - kctl->id.numid;
153 return array_index_nospec(ioff, kctl->count);
152} 154}
153 155
154static inline unsigned int snd_ctl_get_ioffidx(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id) 156static inline unsigned int snd_ctl_get_ioffidx(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
155{ 157{
156 return id->index - kctl->id.index; 158 unsigned int ioff = id->index - kctl->id.index;
159 return array_index_nospec(ioff, kctl->count);
157} 160}
158 161
159static inline unsigned int snd_ctl_get_ioff(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id) 162static inline unsigned int snd_ctl_get_ioff(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
diff --git a/include/trace/events/initcall.h b/include/trace/events/initcall.h
index 8d6cf10d27c9..eb903c3f195f 100644
--- a/include/trace/events/initcall.h
+++ b/include/trace/events/initcall.h
@@ -31,7 +31,11 @@ TRACE_EVENT(initcall_start,
31 TP_ARGS(func), 31 TP_ARGS(func),
32 32
33 TP_STRUCT__entry( 33 TP_STRUCT__entry(
34 __field(initcall_t, func) 34 /*
35 * Use field_struct to avoid is_signed_type()
36 * comparison of a function pointer
37 */
38 __field_struct(initcall_t, func)
35 ), 39 ),
36 40
37 TP_fast_assign( 41 TP_fast_assign(
@@ -48,8 +52,12 @@ TRACE_EVENT(initcall_finish,
48 TP_ARGS(func, ret), 52 TP_ARGS(func, ret),
49 53
50 TP_STRUCT__entry( 54 TP_STRUCT__entry(
51 __field(initcall_t, func) 55 /*
52 __field(int, ret) 56 * Use field_struct to avoid is_signed_type()
57 * comparison of a function pointer
58 */
59 __field_struct(initcall_t, func)
60 __field(int, ret)
53 ), 61 ),
54 62
55 TP_fast_assign( 63 TP_fast_assign(
diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h
index bf6f82673492..f8260e5c79ad 100644
--- a/include/trace/events/ufs.h
+++ b/include/trace/events/ufs.h
@@ -257,6 +257,33 @@ TRACE_EVENT(ufshcd_command,
257 ) 257 )
258); 258);
259 259
260TRACE_EVENT(ufshcd_upiu,
261 TP_PROTO(const char *dev_name, const char *str, void *hdr, void *tsf),
262
263 TP_ARGS(dev_name, str, hdr, tsf),
264
265 TP_STRUCT__entry(
266 __string(dev_name, dev_name)
267 __string(str, str)
268 __array(unsigned char, hdr, 12)
269 __array(unsigned char, tsf, 16)
270 ),
271
272 TP_fast_assign(
273 __assign_str(dev_name, dev_name);
274 __assign_str(str, str);
275 memcpy(__entry->hdr, hdr, sizeof(__entry->hdr));
276 memcpy(__entry->tsf, tsf, sizeof(__entry->tsf));
277 ),
278
279 TP_printk(
280 "%s: %s: HDR:%s, CDB:%s",
281 __get_str(str), __get_str(dev_name),
282 __print_hex(__entry->hdr, sizeof(__entry->hdr)),
283 __print_hex(__entry->tsf, sizeof(__entry->tsf))
284 )
285);
286
260#endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */ 287#endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */
261 288
262/* This part must be outside protection */ 289/* This part must be outside protection */
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
index 2f057a494d93..9a761bc6a251 100644
--- a/include/trace/events/workqueue.h
+++ b/include/trace/events/workqueue.h
@@ -25,6 +25,8 @@ DECLARE_EVENT_CLASS(workqueue_work,
25 TP_printk("work struct %p", __entry->work) 25 TP_printk("work struct %p", __entry->work)
26); 26);
27 27
28struct pool_workqueue;
29
28/** 30/**
29 * workqueue_queue_work - called when a work gets queued 31 * workqueue_queue_work - called when a work gets queued
30 * @req_cpu: the requested cpu 32 * @req_cpu: the requested cpu
diff --git a/include/uapi/linux/cn_proc.h b/include/uapi/linux/cn_proc.h
index 68ff25414700..db210625cee8 100644
--- a/include/uapi/linux/cn_proc.h
+++ b/include/uapi/linux/cn_proc.h
@@ -116,12 +116,16 @@ struct proc_event {
116 struct coredump_proc_event { 116 struct coredump_proc_event {
117 __kernel_pid_t process_pid; 117 __kernel_pid_t process_pid;
118 __kernel_pid_t process_tgid; 118 __kernel_pid_t process_tgid;
119 __kernel_pid_t parent_pid;
120 __kernel_pid_t parent_tgid;
119 } coredump; 121 } coredump;
120 122
121 struct exit_proc_event { 123 struct exit_proc_event {
122 __kernel_pid_t process_pid; 124 __kernel_pid_t process_pid;
123 __kernel_pid_t process_tgid; 125 __kernel_pid_t process_tgid;
124 __u32 exit_code, exit_signal; 126 __u32 exit_code, exit_signal;
127 __kernel_pid_t parent_pid;
128 __kernel_pid_t parent_tgid;
125 } exit; 129 } exit;
126 130
127 } event_data; 131 } event_data;
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 1065006c9bf5..b02c41e53d56 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -676,6 +676,13 @@ struct kvm_ioeventfd {
676 __u8 pad[36]; 676 __u8 pad[36];
677}; 677};
678 678
679#define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0)
680#define KVM_X86_DISABLE_EXITS_HTL (1 << 1)
681#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
682#define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \
683 KVM_X86_DISABLE_EXITS_HTL | \
684 KVM_X86_DISABLE_EXITS_PAUSE)
685
679/* for KVM_ENABLE_CAP */ 686/* for KVM_ENABLE_CAP */
680struct kvm_enable_cap { 687struct kvm_enable_cap {
681 /* in */ 688 /* in */
diff --git a/include/uapi/linux/netfilter/nf_nat.h b/include/uapi/linux/netfilter/nf_nat.h
index a33000da7229..4a95c0db14d4 100644
--- a/include/uapi/linux/netfilter/nf_nat.h
+++ b/include/uapi/linux/netfilter/nf_nat.h
@@ -10,6 +10,7 @@
10#define NF_NAT_RANGE_PROTO_RANDOM (1 << 2) 10#define NF_NAT_RANGE_PROTO_RANDOM (1 << 2)
11#define NF_NAT_RANGE_PERSISTENT (1 << 3) 11#define NF_NAT_RANGE_PERSISTENT (1 << 3)
12#define NF_NAT_RANGE_PROTO_RANDOM_FULLY (1 << 4) 12#define NF_NAT_RANGE_PROTO_RANDOM_FULLY (1 << 4)
13#define NF_NAT_RANGE_PROTO_OFFSET (1 << 5)
13 14
14#define NF_NAT_RANGE_PROTO_RANDOM_ALL \ 15#define NF_NAT_RANGE_PROTO_RANDOM_ALL \
15 (NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY) 16 (NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY)
@@ -17,7 +18,7 @@
17#define NF_NAT_RANGE_MASK \ 18#define NF_NAT_RANGE_MASK \
18 (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \ 19 (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \
19 NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \ 20 NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \
20 NF_NAT_RANGE_PROTO_RANDOM_FULLY) 21 NF_NAT_RANGE_PROTO_RANDOM_FULLY | NF_NAT_RANGE_PROTO_OFFSET)
21 22
22struct nf_nat_ipv4_range { 23struct nf_nat_ipv4_range {
23 unsigned int flags; 24 unsigned int flags;
@@ -40,4 +41,13 @@ struct nf_nat_range {
40 union nf_conntrack_man_proto max_proto; 41 union nf_conntrack_man_proto max_proto;
41}; 42};
42 43
44struct nf_nat_range2 {
45 unsigned int flags;
46 union nf_inet_addr min_addr;
47 union nf_inet_addr max_addr;
48 union nf_conntrack_man_proto min_proto;
49 union nf_conntrack_man_proto max_proto;
50 union nf_conntrack_man_proto base_proto;
51};
52
43#endif /* _NETFILTER_NF_NAT_H */ 53#endif /* _NETFILTER_NF_NAT_H */
diff --git a/include/uapi/linux/netfilter/nf_osf.h b/include/uapi/linux/netfilter/nf_osf.h
new file mode 100644
index 000000000000..45376eae31ef
--- /dev/null
+++ b/include/uapi/linux/netfilter/nf_osf.h
@@ -0,0 +1,90 @@
1#ifndef _NF_OSF_H
2#define _NF_OSF_H
3
4#define MAXGENRELEN 32
5
6#define NF_OSF_GENRE (1 << 0)
7#define NF_OSF_TTL (1 << 1)
8#define NF_OSF_LOG (1 << 2)
9#define NF_OSF_INVERT (1 << 3)
10
11#define NF_OSF_LOGLEVEL_ALL 0 /* log all matched fingerprints */
12#define NF_OSF_LOGLEVEL_FIRST 1 /* log only the first matced fingerprint */
13#define NF_OSF_LOGLEVEL_ALL_KNOWN 2 /* do not log unknown packets */
14
15#define NF_OSF_TTL_TRUE 0 /* True ip and fingerprint TTL comparison */
16
17/* Do not compare ip and fingerprint TTL at all */
18#define NF_OSF_TTL_NOCHECK 2
19
20/* Wildcard MSS (kind of).
21 * It is used to implement a state machine for the different wildcard values
22 * of the MSS and window sizes.
23 */
24struct nf_osf_wc {
25 __u32 wc;
26 __u32 val;
27};
28
29/* This struct represents IANA options
30 * http://www.iana.org/assignments/tcp-parameters
31 */
32struct nf_osf_opt {
33 __u16 kind, length;
34 struct nf_osf_wc wc;
35};
36
37struct nf_osf_info {
38 char genre[MAXGENRELEN];
39 __u32 len;
40 __u32 flags;
41 __u32 loglevel;
42 __u32 ttl;
43};
44
45struct nf_osf_user_finger {
46 struct nf_osf_wc wss;
47
48 __u8 ttl, df;
49 __u16 ss, mss;
50 __u16 opt_num;
51
52 char genre[MAXGENRELEN];
53 char version[MAXGENRELEN];
54 char subtype[MAXGENRELEN];
55
56 /* MAX_IPOPTLEN is maximum if all options are NOPs or EOLs */
57 struct nf_osf_opt opt[MAX_IPOPTLEN];
58};
59
60struct nf_osf_finger {
61 struct rcu_head rcu_head;
62 struct list_head finger_entry;
63 struct nf_osf_user_finger finger;
64};
65
66struct nf_osf_nlmsg {
67 struct nf_osf_user_finger f;
68 struct iphdr ip;
69 struct tcphdr tcp;
70};
71
72/* Defines for IANA option kinds */
73enum iana_options {
74 OSFOPT_EOL = 0, /* End of options */
75 OSFOPT_NOP, /* NOP */
76 OSFOPT_MSS, /* Maximum segment size */
77 OSFOPT_WSO, /* Window scale option */
78 OSFOPT_SACKP, /* SACK permitted */
79 OSFOPT_SACK, /* SACK */
80 OSFOPT_ECHO,
81 OSFOPT_ECHOREPLY,
82 OSFOPT_TS, /* Timestamp option */
83 OSFOPT_POCP, /* Partial Order Connection Permitted */
84 OSFOPT_POSP, /* Partial Order Service Profile */
85
86 /* Others are not used in the current OSF */
87 OSFOPT_EMPTY = 255,
88};
89
90#endif /* _NF_OSF_H */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 6a3d653d5b27..ce031cf72288 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -831,7 +831,9 @@ enum nft_rt_keys {
831 NFT_RT_NEXTHOP4, 831 NFT_RT_NEXTHOP4,
832 NFT_RT_NEXTHOP6, 832 NFT_RT_NEXTHOP6,
833 NFT_RT_TCPMSS, 833 NFT_RT_TCPMSS,
834 __NFT_RT_MAX
834}; 835};
836#define NFT_RT_MAX (__NFT_RT_MAX - 1)
835 837
836/** 838/**
837 * enum nft_hash_types - nf_tables hash expression types 839 * enum nft_hash_types - nf_tables hash expression types
@@ -949,7 +951,9 @@ enum nft_ct_keys {
949 NFT_CT_DST_IP, 951 NFT_CT_DST_IP,
950 NFT_CT_SRC_IP6, 952 NFT_CT_SRC_IP6,
951 NFT_CT_DST_IP6, 953 NFT_CT_DST_IP6,
954 __NFT_CT_MAX
952}; 955};
956#define NFT_CT_MAX (__NFT_CT_MAX - 1)
953 957
954/** 958/**
955 * enum nft_ct_attributes - nf_tables ct expression netlink attributes 959 * enum nft_ct_attributes - nf_tables ct expression netlink attributes
@@ -1450,6 +1454,8 @@ enum nft_trace_types {
1450 * @NFTA_NG_MODULUS: maximum counter value (NLA_U32) 1454 * @NFTA_NG_MODULUS: maximum counter value (NLA_U32)
1451 * @NFTA_NG_TYPE: operation type (NLA_U32) 1455 * @NFTA_NG_TYPE: operation type (NLA_U32)
1452 * @NFTA_NG_OFFSET: offset to be added to the counter (NLA_U32) 1456 * @NFTA_NG_OFFSET: offset to be added to the counter (NLA_U32)
1457 * @NFTA_NG_SET_NAME: name of the map to lookup (NLA_STRING)
1458 * @NFTA_NG_SET_ID: id of the map (NLA_U32)
1453 */ 1459 */
1454enum nft_ng_attributes { 1460enum nft_ng_attributes {
1455 NFTA_NG_UNSPEC, 1461 NFTA_NG_UNSPEC,
@@ -1457,6 +1463,8 @@ enum nft_ng_attributes {
1457 NFTA_NG_MODULUS, 1463 NFTA_NG_MODULUS,
1458 NFTA_NG_TYPE, 1464 NFTA_NG_TYPE,
1459 NFTA_NG_OFFSET, 1465 NFTA_NG_OFFSET,
1466 NFTA_NG_SET_NAME,
1467 NFTA_NG_SET_ID,
1460 __NFTA_NG_MAX 1468 __NFTA_NG_MAX
1461}; 1469};
1462#define NFTA_NG_MAX (__NFTA_NG_MAX - 1) 1470#define NFTA_NG_MAX (__NFTA_NG_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
index 77987111cab0..1d41810d17e2 100644
--- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
@@ -262,6 +262,7 @@ enum ctattr_stats_cpu {
262enum ctattr_stats_global { 262enum ctattr_stats_global {
263 CTA_STATS_GLOBAL_UNSPEC, 263 CTA_STATS_GLOBAL_UNSPEC,
264 CTA_STATS_GLOBAL_ENTRIES, 264 CTA_STATS_GLOBAL_ENTRIES,
265 CTA_STATS_GLOBAL_MAX_ENTRIES,
265 __CTA_STATS_GLOBAL_MAX, 266 __CTA_STATS_GLOBAL_MAX,
266}; 267};
267#define CTA_STATS_GLOBAL_MAX (__CTA_STATS_GLOBAL_MAX - 1) 268#define CTA_STATS_GLOBAL_MAX (__CTA_STATS_GLOBAL_MAX - 1)
diff --git a/include/uapi/linux/netfilter/xt_osf.h b/include/uapi/linux/netfilter/xt_osf.h
index dad197e2ab99..72956eceeb09 100644
--- a/include/uapi/linux/netfilter/xt_osf.h
+++ b/include/uapi/linux/netfilter/xt_osf.h
@@ -23,101 +23,29 @@
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/ip.h> 24#include <linux/ip.h>
25#include <linux/tcp.h> 25#include <linux/tcp.h>
26#include <linux/netfilter/nf_osf.h>
26 27
27#define MAXGENRELEN 32 28#define XT_OSF_GENRE NF_OSF_GENRE
29#define XT_OSF_INVERT NF_OSF_INVERT
28 30
29#define XT_OSF_GENRE (1<<0) 31#define XT_OSF_TTL NF_OSF_TTL
30#define XT_OSF_TTL (1<<1) 32#define XT_OSF_LOG NF_OSF_LOG
31#define XT_OSF_LOG (1<<2)
32#define XT_OSF_INVERT (1<<3)
33 33
34#define XT_OSF_LOGLEVEL_ALL 0 /* log all matched fingerprints */ 34#define XT_OSF_LOGLEVEL_ALL NF_OSF_LOGLEVEL_ALL
35#define XT_OSF_LOGLEVEL_FIRST 1 /* log only the first matced fingerprint */ 35#define XT_OSF_LOGLEVEL_FIRST NF_OSF_LOGLEVEL_FIRST
36#define XT_OSF_LOGLEVEL_ALL_KNOWN 2 /* do not log unknown packets */ 36#define XT_OSF_LOGLEVEL_ALL_KNOWN NF_OSF_LOGLEVEL_ALL_KNOWN
37 37
38#define XT_OSF_TTL_TRUE 0 /* True ip and fingerprint TTL comparison */ 38#define XT_OSF_TTL_TRUE NF_OSF_TTL_TRUE
39#define XT_OSF_TTL_LESS 1 /* Check if ip TTL is less than fingerprint one */ 39#define XT_OSF_TTL_NOCHECK NF_OSF_TTL_NOCHECK
40#define XT_OSF_TTL_NOCHECK 2 /* Do not compare ip and fingerprint TTL at all */
41 40
42struct xt_osf_info { 41#define XT_OSF_TTL_LESS 1 /* Check if ip TTL is less than fingerprint one */
43 char genre[MAXGENRELEN];
44 __u32 len;
45 __u32 flags;
46 __u32 loglevel;
47 __u32 ttl;
48};
49
50/*
51 * Wildcard MSS (kind of).
52 * It is used to implement a state machine for the different wildcard values
53 * of the MSS and window sizes.
54 */
55struct xt_osf_wc {
56 __u32 wc;
57 __u32 val;
58};
59
60/*
61 * This struct represents IANA options
62 * http://www.iana.org/assignments/tcp-parameters
63 */
64struct xt_osf_opt {
65 __u16 kind, length;
66 struct xt_osf_wc wc;
67};
68
69struct xt_osf_user_finger {
70 struct xt_osf_wc wss;
71
72 __u8 ttl, df;
73 __u16 ss, mss;
74 __u16 opt_num;
75
76 char genre[MAXGENRELEN];
77 char version[MAXGENRELEN];
78 char subtype[MAXGENRELEN];
79 42
80 /* MAX_IPOPTLEN is maximum if all options are NOPs or EOLs */ 43#define xt_osf_wc nf_osf_wc
81 struct xt_osf_opt opt[MAX_IPOPTLEN]; 44#define xt_osf_opt nf_osf_opt
82}; 45#define xt_osf_info nf_osf_info
83 46#define xt_osf_user_finger nf_osf_user_finger
84struct xt_osf_nlmsg { 47#define xt_osf_finger nf_osf_finger
85 struct xt_osf_user_finger f; 48#define xt_osf_nlmsg nf_osf_nlmsg
86 struct iphdr ip;
87 struct tcphdr tcp;
88};
89
90/* Defines for IANA option kinds */
91
92enum iana_options {
93 OSFOPT_EOL = 0, /* End of options */
94 OSFOPT_NOP, /* NOP */
95 OSFOPT_MSS, /* Maximum segment size */
96 OSFOPT_WSO, /* Window scale option */
97 OSFOPT_SACKP, /* SACK permitted */
98 OSFOPT_SACK, /* SACK */
99 OSFOPT_ECHO,
100 OSFOPT_ECHOREPLY,
101 OSFOPT_TS, /* Timestamp option */
102 OSFOPT_POCP, /* Partial Order Connection Permitted */
103 OSFOPT_POSP, /* Partial Order Service Profile */
104
105 /* Others are not used in the current OSF */
106 OSFOPT_EMPTY = 255,
107};
108
109/*
110 * Initial window size option state machine: multiple of mss, mtu or
111 * plain numeric value. Can also be made as plain numeric value which
112 * is not a multiple of specified value.
113 */
114enum xt_osf_window_size_options {
115 OSF_WSS_PLAIN = 0,
116 OSF_WSS_MSS,
117 OSF_WSS_MTU,
118 OSF_WSS_MODULO,
119 OSF_WSS_MAX,
120};
121 49
122/* 50/*
123 * Add/remove fingerprint from the kernel. 51 * Add/remove fingerprint from the kernel.
diff --git a/include/uapi/linux/netfilter_bridge/ebtables.h b/include/uapi/linux/netfilter_bridge/ebtables.h
index 0c7dc8315013..3b86c14ea49d 100644
--- a/include/uapi/linux/netfilter_bridge/ebtables.h
+++ b/include/uapi/linux/netfilter_bridge/ebtables.h
@@ -191,6 +191,12 @@ struct ebt_entry {
191 unsigned char elems[0] __attribute__ ((aligned (__alignof__(struct ebt_replace)))); 191 unsigned char elems[0] __attribute__ ((aligned (__alignof__(struct ebt_replace))));
192}; 192};
193 193
194static __inline__ struct ebt_entry_target *
195ebt_get_target(struct ebt_entry *e)
196{
197 return (void *)e + e->target_offset;
198}
199
194/* {g,s}etsockopt numbers */ 200/* {g,s}etsockopt numbers */
195#define EBT_BASE_CTL 128 201#define EBT_BASE_CTL 128
196 202
diff --git a/include/uapi/linux/netfilter_ipv6/ip6t_srh.h b/include/uapi/linux/netfilter_ipv6/ip6t_srh.h
index f3cc0ef514a7..54ed83360dac 100644
--- a/include/uapi/linux/netfilter_ipv6/ip6t_srh.h
+++ b/include/uapi/linux/netfilter_ipv6/ip6t_srh.h
@@ -17,7 +17,10 @@
17#define IP6T_SRH_LAST_GT 0x0100 17#define IP6T_SRH_LAST_GT 0x0100
18#define IP6T_SRH_LAST_LT 0x0200 18#define IP6T_SRH_LAST_LT 0x0200
19#define IP6T_SRH_TAG 0x0400 19#define IP6T_SRH_TAG 0x0400
20#define IP6T_SRH_MASK 0x07FF 20#define IP6T_SRH_PSID 0x0800
21#define IP6T_SRH_NSID 0x1000
22#define IP6T_SRH_LSID 0x2000
23#define IP6T_SRH_MASK 0x3FFF
21 24
22/* Values for "mt_invflags" field in struct ip6t_srh */ 25/* Values for "mt_invflags" field in struct ip6t_srh */
23#define IP6T_SRH_INV_NEXTHDR 0x0001 26#define IP6T_SRH_INV_NEXTHDR 0x0001
@@ -31,7 +34,10 @@
31#define IP6T_SRH_INV_LAST_GT 0x0100 34#define IP6T_SRH_INV_LAST_GT 0x0100
32#define IP6T_SRH_INV_LAST_LT 0x0200 35#define IP6T_SRH_INV_LAST_LT 0x0200
33#define IP6T_SRH_INV_TAG 0x0400 36#define IP6T_SRH_INV_TAG 0x0400
34#define IP6T_SRH_INV_MASK 0x07FF 37#define IP6T_SRH_INV_PSID 0x0800
38#define IP6T_SRH_INV_NSID 0x1000
39#define IP6T_SRH_INV_LSID 0x2000
40#define IP6T_SRH_INV_MASK 0x3FFF
35 41
36/** 42/**
37 * struct ip6t_srh - SRH match options 43 * struct ip6t_srh - SRH match options
@@ -54,4 +60,37 @@ struct ip6t_srh {
54 __u16 mt_invflags; 60 __u16 mt_invflags;
55}; 61};
56 62
63/**
64 * struct ip6t_srh1 - SRH match options (revision 1)
65 * @ next_hdr: Next header field of SRH
66 * @ hdr_len: Extension header length field of SRH
67 * @ segs_left: Segments left field of SRH
68 * @ last_entry: Last entry field of SRH
69 * @ tag: Tag field of SRH
70 * @ psid_addr: Address of previous SID in SRH SID list
71 * @ nsid_addr: Address of NEXT SID in SRH SID list
72 * @ lsid_addr: Address of LAST SID in SRH SID list
73 * @ psid_msk: Mask of previous SID in SRH SID list
74 * @ nsid_msk: Mask of next SID in SRH SID list
75 * @ lsid_msk: MAsk of last SID in SRH SID list
76 * @ mt_flags: match options
77 * @ mt_invflags: Invert the sense of match options
78 */
79
80struct ip6t_srh1 {
81 __u8 next_hdr;
82 __u8 hdr_len;
83 __u8 segs_left;
84 __u8 last_entry;
85 __u16 tag;
86 struct in6_addr psid_addr;
87 struct in6_addr nsid_addr;
88 struct in6_addr lsid_addr;
89 struct in6_addr psid_msk;
90 struct in6_addr nsid_msk;
91 struct in6_addr lsid_msk;
92 __u16 mt_flags;
93 __u16 mt_invflags;
94};
95
57#endif /*_IP6T_SRH_H*/ 96#endif /*_IP6T_SRH_H*/
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index 0f272818a4d2..6b58371b1f0d 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -780,24 +780,6 @@ enum {
780 NET_BRIDGE_NF_FILTER_PPPOE_TAGGED = 5, 780 NET_BRIDGE_NF_FILTER_PPPOE_TAGGED = 5,
781}; 781};
782 782
783/* proc/sys/net/irda */
784enum {
785 NET_IRDA_DISCOVERY=1,
786 NET_IRDA_DEVNAME=2,
787 NET_IRDA_DEBUG=3,
788 NET_IRDA_FAST_POLL=4,
789 NET_IRDA_DISCOVERY_SLOTS=5,
790 NET_IRDA_DISCOVERY_TIMEOUT=6,
791 NET_IRDA_SLOT_TIMEOUT=7,
792 NET_IRDA_MAX_BAUD_RATE=8,
793 NET_IRDA_MIN_TX_TURN_TIME=9,
794 NET_IRDA_MAX_TX_DATA_SIZE=10,
795 NET_IRDA_MAX_TX_WINDOW=11,
796 NET_IRDA_MAX_NOREPLY_TIME=12,
797 NET_IRDA_WARN_NOREPLY_TIME=13,
798 NET_IRDA_LAP_KEEPALIVE_TIME=14,
799};
800
801 783
802/* CTL_FS names: */ 784/* CTL_FS names: */
803enum 785enum
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 379b08700a54..29eb659aa77a 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -122,6 +122,10 @@ enum {
122#define TCP_MD5SIG_EXT 32 /* TCP MD5 Signature with extensions */ 122#define TCP_MD5SIG_EXT 32 /* TCP MD5 Signature with extensions */
123#define TCP_FASTOPEN_KEY 33 /* Set the key for Fast Open (cookie) */ 123#define TCP_FASTOPEN_KEY 33 /* Set the key for Fast Open (cookie) */
124#define TCP_FASTOPEN_NO_COOKIE 34 /* Enable TFO without a TFO cookie */ 124#define TCP_FASTOPEN_NO_COOKIE 34 /* Enable TFO without a TFO cookie */
125#define TCP_ZEROCOPY_RECEIVE 35
126#define TCP_INQ 36 /* Notify bytes available to read as a cmsg on read */
127
128#define TCP_CM_INQ TCP_INQ
125 129
126struct tcp_repair_opt { 130struct tcp_repair_opt {
127 __u32 opt_code; 131 __u32 opt_code;
@@ -276,4 +280,11 @@ struct tcp_diag_md5sig {
276 __u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN]; 280 __u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN];
277}; 281};
278 282
283/* setsockopt(fd, IPPROTO_TCP, TCP_ZEROCOPY_RECEIVE, ...) */
284
285struct tcp_zerocopy_receive {
286 __u64 address; /* in: address of mapping */
287 __u32 length; /* in/out: number of bytes to map/mapped */
288 __u32 recv_skip_hint; /* out: amount of bytes to skip */
289};
279#endif /* _UAPI_LINUX_TCP_H */ 290#endif /* _UAPI_LINUX_TCP_H */
diff --git a/include/uapi/linux/time.h b/include/uapi/linux/time.h
index 16a296612ba4..4c0338ea308a 100644
--- a/include/uapi/linux/time.h
+++ b/include/uapi/linux/time.h
@@ -73,7 +73,6 @@ struct __kernel_old_timeval {
73 */ 73 */
74#define CLOCK_SGI_CYCLE 10 74#define CLOCK_SGI_CYCLE 10
75#define CLOCK_TAI 11 75#define CLOCK_TAI 11
76#define CLOCK_MONOTONIC_ACTIVE 12
77 76
78#define MAX_CLOCKS 16 77#define MAX_CLOCKS 16
79#define CLOCKS_MASK (CLOCK_REALTIME | CLOCK_MONOTONIC) 78#define CLOCKS_MASK (CLOCK_REALTIME | CLOCK_MONOTONIC)
diff --git a/include/uapi/linux/tipc.h b/include/uapi/linux/tipc.h
index bf6d28677cfe..6b2fd4d9655f 100644
--- a/include/uapi/linux/tipc.h
+++ b/include/uapi/linux/tipc.h
@@ -209,16 +209,16 @@ struct tipc_group_req {
209 * The string formatting for each name element is: 209 * The string formatting for each name element is:
210 * media: media 210 * media: media
211 * interface: media:interface name 211 * interface: media:interface name
212 * link: Z.C.N:interface-Z.C.N:interface 212 * link: node:interface-node:interface
213 *
214 */ 213 */
215 214#define TIPC_NODEID_LEN 16
216#define TIPC_MAX_MEDIA_NAME 16 215#define TIPC_MAX_MEDIA_NAME 16
217#define TIPC_MAX_IF_NAME 16 216#define TIPC_MAX_IF_NAME 16
218#define TIPC_MAX_BEARER_NAME 32 217#define TIPC_MAX_BEARER_NAME 32
219#define TIPC_MAX_LINK_NAME 68 218#define TIPC_MAX_LINK_NAME 68
220 219
221#define SIOCGETLINKNAME SIOCPROTOPRIVATE 220#define SIOCGETLINKNAME SIOCPROTOPRIVATE
221#define SIOCGETNODEID (SIOCPROTOPRIVATE + 1)
222 222
223struct tipc_sioc_ln_req { 223struct tipc_sioc_ln_req {
224 __u32 peer; 224 __u32 peer;
@@ -226,6 +226,10 @@ struct tipc_sioc_ln_req {
226 char linkname[TIPC_MAX_LINK_NAME]; 226 char linkname[TIPC_MAX_LINK_NAME];
227}; 227};
228 228
229struct tipc_sioc_nodeid_req {
230 __u32 peer;
231 char node_id[TIPC_NODEID_LEN];
232};
229 233
230/* The macros and functions below are deprecated: 234/* The macros and functions below are deprecated:
231 */ 235 */
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index 40297a3181ed..13b8cb563892 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -57,6 +57,21 @@ struct virtio_balloon_config {
57#define VIRTIO_BALLOON_S_HTLB_PGFAIL 9 /* Hugetlb page allocation failures */ 57#define VIRTIO_BALLOON_S_HTLB_PGFAIL 9 /* Hugetlb page allocation failures */
58#define VIRTIO_BALLOON_S_NR 10 58#define VIRTIO_BALLOON_S_NR 10
59 59
60#define VIRTIO_BALLOON_S_NAMES_WITH_PREFIX(VIRTIO_BALLOON_S_NAMES_prefix) { \
61 VIRTIO_BALLOON_S_NAMES_prefix "swap-in", \
62 VIRTIO_BALLOON_S_NAMES_prefix "swap-out", \
63 VIRTIO_BALLOON_S_NAMES_prefix "major-faults", \
64 VIRTIO_BALLOON_S_NAMES_prefix "minor-faults", \
65 VIRTIO_BALLOON_S_NAMES_prefix "free-memory", \
66 VIRTIO_BALLOON_S_NAMES_prefix "total-memory", \
67 VIRTIO_BALLOON_S_NAMES_prefix "available-memory", \
68 VIRTIO_BALLOON_S_NAMES_prefix "disk-caches", \
69 VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-allocations", \
70 VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-failures" \
71}
72
73#define VIRTIO_BALLOON_S_NAMES VIRTIO_BALLOON_S_NAMES_WITH_PREFIX("")
74
60/* 75/*
61 * Memory statistics structure. 76 * Memory statistics structure.
62 * Driver fills an array of these structures and passes to device. 77 * Driver fills an array of these structures and passes to device.
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 634415c7fbcd..098eca568c2b 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -326,6 +326,9 @@ retry:
326 if (ret > 0) { 326 if (ret > 0) {
327 if (apply) 327 if (apply)
328 apply_bytes -= ret; 328 apply_bytes -= ret;
329
330 sg->offset += ret;
331 sg->length -= ret;
329 size -= ret; 332 size -= ret;
330 offset += ret; 333 offset += ret;
331 if (uncharge) 334 if (uncharge)
@@ -333,8 +336,6 @@ retry:
333 goto retry; 336 goto retry;
334 } 337 }
335 338
336 sg->length = size;
337 sg->offset = offset;
338 return ret; 339 return ret;
339 } 340 }
340 341
@@ -392,7 +393,8 @@ static void return_mem_sg(struct sock *sk, int bytes, struct sk_msg_buff *md)
392 } while (i != md->sg_end); 393 } while (i != md->sg_end);
393} 394}
394 395
395static void free_bytes_sg(struct sock *sk, int bytes, struct sk_msg_buff *md) 396static void free_bytes_sg(struct sock *sk, int bytes,
397 struct sk_msg_buff *md, bool charge)
396{ 398{
397 struct scatterlist *sg = md->sg_data; 399 struct scatterlist *sg = md->sg_data;
398 int i = md->sg_start, free; 400 int i = md->sg_start, free;
@@ -402,11 +404,13 @@ static void free_bytes_sg(struct sock *sk, int bytes, struct sk_msg_buff *md)
402 if (bytes < free) { 404 if (bytes < free) {
403 sg[i].length -= bytes; 405 sg[i].length -= bytes;
404 sg[i].offset += bytes; 406 sg[i].offset += bytes;
405 sk_mem_uncharge(sk, bytes); 407 if (charge)
408 sk_mem_uncharge(sk, bytes);
406 break; 409 break;
407 } 410 }
408 411
409 sk_mem_uncharge(sk, sg[i].length); 412 if (charge)
413 sk_mem_uncharge(sk, sg[i].length);
410 put_page(sg_page(&sg[i])); 414 put_page(sg_page(&sg[i]));
411 bytes -= sg[i].length; 415 bytes -= sg[i].length;
412 sg[i].length = 0; 416 sg[i].length = 0;
@@ -417,6 +421,7 @@ static void free_bytes_sg(struct sock *sk, int bytes, struct sk_msg_buff *md)
417 if (i == MAX_SKB_FRAGS) 421 if (i == MAX_SKB_FRAGS)
418 i = 0; 422 i = 0;
419 } 423 }
424 md->sg_start = i;
420} 425}
421 426
422static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md) 427static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
@@ -575,10 +580,10 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
575 struct sk_msg_buff *md, 580 struct sk_msg_buff *md,
576 int flags) 581 int flags)
577{ 582{
583 bool ingress = !!(md->flags & BPF_F_INGRESS);
578 struct smap_psock *psock; 584 struct smap_psock *psock;
579 struct scatterlist *sg; 585 struct scatterlist *sg;
580 int i, err, free = 0; 586 int err = 0;
581 bool ingress = !!(md->flags & BPF_F_INGRESS);
582 587
583 sg = md->sg_data; 588 sg = md->sg_data;
584 589
@@ -606,16 +611,8 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
606out_rcu: 611out_rcu:
607 rcu_read_unlock(); 612 rcu_read_unlock();
608out: 613out:
609 i = md->sg_start; 614 free_bytes_sg(NULL, send, md, false);
610 while (sg[i].length) { 615 return err;
611 free += sg[i].length;
612 put_page(sg_page(&sg[i]));
613 sg[i].length = 0;
614 i++;
615 if (i == MAX_SKB_FRAGS)
616 i = 0;
617 }
618 return free;
619} 616}
620 617
621static inline void bpf_md_init(struct smap_psock *psock) 618static inline void bpf_md_init(struct smap_psock *psock)
@@ -700,19 +697,26 @@ more_data:
700 err = bpf_tcp_sendmsg_do_redirect(redir, send, m, flags); 697 err = bpf_tcp_sendmsg_do_redirect(redir, send, m, flags);
701 lock_sock(sk); 698 lock_sock(sk);
702 699
700 if (unlikely(err < 0)) {
701 free_start_sg(sk, m);
702 psock->sg_size = 0;
703 if (!cork)
704 *copied -= send;
705 } else {
706 psock->sg_size -= send;
707 }
708
703 if (cork) { 709 if (cork) {
704 free_start_sg(sk, m); 710 free_start_sg(sk, m);
711 psock->sg_size = 0;
705 kfree(m); 712 kfree(m);
706 m = NULL; 713 m = NULL;
714 err = 0;
707 } 715 }
708 if (unlikely(err))
709 *copied -= err;
710 else
711 psock->sg_size -= send;
712 break; 716 break;
713 case __SK_DROP: 717 case __SK_DROP:
714 default: 718 default:
715 free_bytes_sg(sk, send, m); 719 free_bytes_sg(sk, send, m, true);
716 apply_bytes_dec(psock, send); 720 apply_bytes_dec(psock, send);
717 *copied -= send; 721 *copied -= send;
718 psock->sg_size -= send; 722 psock->sg_size -= send;
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index ce6848e46e94..1725b902983f 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -491,7 +491,7 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
491 if (!uprobe) 491 if (!uprobe)
492 return NULL; 492 return NULL;
493 493
494 uprobe->inode = igrab(inode); 494 uprobe->inode = inode;
495 uprobe->offset = offset; 495 uprobe->offset = offset;
496 init_rwsem(&uprobe->register_rwsem); 496 init_rwsem(&uprobe->register_rwsem);
497 init_rwsem(&uprobe->consumer_rwsem); 497 init_rwsem(&uprobe->consumer_rwsem);
@@ -502,7 +502,6 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
502 if (cur_uprobe) { 502 if (cur_uprobe) {
503 kfree(uprobe); 503 kfree(uprobe);
504 uprobe = cur_uprobe; 504 uprobe = cur_uprobe;
505 iput(inode);
506 } 505 }
507 506
508 return uprobe; 507 return uprobe;
@@ -701,7 +700,6 @@ static void delete_uprobe(struct uprobe *uprobe)
701 rb_erase(&uprobe->rb_node, &uprobes_tree); 700 rb_erase(&uprobe->rb_node, &uprobes_tree);
702 spin_unlock(&uprobes_treelock); 701 spin_unlock(&uprobes_treelock);
703 RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */ 702 RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
704 iput(uprobe->inode);
705 put_uprobe(uprobe); 703 put_uprobe(uprobe);
706} 704}
707 705
@@ -873,7 +871,8 @@ static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *u
873 * tuple). Creation refcount stops uprobe_unregister from freeing the 871 * tuple). Creation refcount stops uprobe_unregister from freeing the
874 * @uprobe even before the register operation is complete. Creation 872 * @uprobe even before the register operation is complete. Creation
875 * refcount is released when the last @uc for the @uprobe 873 * refcount is released when the last @uc for the @uprobe
876 * unregisters. 874 * unregisters. Caller of uprobe_register() is required to keep @inode
875 * (and the containing mount) referenced.
877 * 876 *
878 * Return errno if it cannot successully install probes 877 * Return errno if it cannot successully install probes
879 * else return 0 (success) 878 * else return 0 (success)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 102160ff5c66..ea619021d901 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2428,7 +2428,7 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2428 struct kprobe_blacklist_entry *ent = 2428 struct kprobe_blacklist_entry *ent =
2429 list_entry(v, struct kprobe_blacklist_entry, list); 2429 list_entry(v, struct kprobe_blacklist_entry, list);
2430 2430
2431 seq_printf(m, "0x%p-0x%p\t%ps\n", (void *)ent->start_addr, 2431 seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
2432 (void *)ent->end_addr, (void *)ent->start_addr); 2432 (void *)ent->end_addr, (void *)ent->start_addr);
2433 return 0; 2433 return 0;
2434} 2434}
diff --git a/kernel/module.c b/kernel/module.c
index a6e43a5806a1..ce8066b88178 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1472,7 +1472,8 @@ static ssize_t module_sect_show(struct module_attribute *mattr,
1472{ 1472{
1473 struct module_sect_attr *sattr = 1473 struct module_sect_attr *sattr =
1474 container_of(mattr, struct module_sect_attr, mattr); 1474 container_of(mattr, struct module_sect_attr, mattr);
1475 return sprintf(buf, "0x%pK\n", (void *)sattr->address); 1475 return sprintf(buf, "0x%px\n", kptr_restrict < 2 ?
1476 (void *)sattr->address : NULL);
1476} 1477}
1477 1478
1478static void free_sect_attrs(struct module_sect_attrs *sect_attrs) 1479static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index e8c0dab4fd65..07148b497451 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -704,24 +704,6 @@ static const struct bin_table bin_net_netfilter_table[] = {
704 {} 704 {}
705}; 705};
706 706
707static const struct bin_table bin_net_irda_table[] = {
708 { CTL_INT, NET_IRDA_DISCOVERY, "discovery" },
709 { CTL_STR, NET_IRDA_DEVNAME, "devname" },
710 { CTL_INT, NET_IRDA_DEBUG, "debug" },
711 { CTL_INT, NET_IRDA_FAST_POLL, "fast_poll_increase" },
712 { CTL_INT, NET_IRDA_DISCOVERY_SLOTS, "discovery_slots" },
713 { CTL_INT, NET_IRDA_DISCOVERY_TIMEOUT, "discovery_timeout" },
714 { CTL_INT, NET_IRDA_SLOT_TIMEOUT, "slot_timeout" },
715 { CTL_INT, NET_IRDA_MAX_BAUD_RATE, "max_baud_rate" },
716 { CTL_INT, NET_IRDA_MIN_TX_TURN_TIME, "min_tx_turn_time" },
717 { CTL_INT, NET_IRDA_MAX_TX_DATA_SIZE, "max_tx_data_size" },
718 { CTL_INT, NET_IRDA_MAX_TX_WINDOW, "max_tx_window" },
719 { CTL_INT, NET_IRDA_MAX_NOREPLY_TIME, "max_noreply_time" },
720 { CTL_INT, NET_IRDA_WARN_NOREPLY_TIME, "warn_noreply_time" },
721 { CTL_INT, NET_IRDA_LAP_KEEPALIVE_TIME, "lap_keepalive_time" },
722 {}
723};
724
725static const struct bin_table bin_net_table[] = { 707static const struct bin_table bin_net_table[] = {
726 { CTL_DIR, NET_CORE, "core", bin_net_core_table }, 708 { CTL_DIR, NET_CORE, "core", bin_net_core_table },
727 /* NET_ETHER not used */ 709 /* NET_ETHER not used */
@@ -743,7 +725,7 @@ static const struct bin_table bin_net_table[] = {
743 { CTL_DIR, NET_LLC, "llc", bin_net_llc_table }, 725 { CTL_DIR, NET_LLC, "llc", bin_net_llc_table },
744 { CTL_DIR, NET_NETFILTER, "netfilter", bin_net_netfilter_table }, 726 { CTL_DIR, NET_NETFILTER, "netfilter", bin_net_netfilter_table },
745 /* NET_DCCP "dccp" no longer used */ 727 /* NET_DCCP "dccp" no longer used */
746 { CTL_DIR, NET_IRDA, "irda", bin_net_irda_table }, 728 /* NET_IRDA "irda" no longer used */
747 { CTL_INT, 2089, "nf_conntrack_max" }, 729 { CTL_INT, 2089, "nf_conntrack_max" },
748 {} 730 {}
749}; 731};
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index eda1210ce50f..14e858753d76 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -91,6 +91,11 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
91 .get_time = &ktime_get_real, 91 .get_time = &ktime_get_real,
92 }, 92 },
93 { 93 {
94 .index = HRTIMER_BASE_BOOTTIME,
95 .clockid = CLOCK_BOOTTIME,
96 .get_time = &ktime_get_boottime,
97 },
98 {
94 .index = HRTIMER_BASE_TAI, 99 .index = HRTIMER_BASE_TAI,
95 .clockid = CLOCK_TAI, 100 .clockid = CLOCK_TAI,
96 .get_time = &ktime_get_clocktai, 101 .get_time = &ktime_get_clocktai,
@@ -106,6 +111,11 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
106 .get_time = &ktime_get_real, 111 .get_time = &ktime_get_real,
107 }, 112 },
108 { 113 {
114 .index = HRTIMER_BASE_BOOTTIME_SOFT,
115 .clockid = CLOCK_BOOTTIME,
116 .get_time = &ktime_get_boottime,
117 },
118 {
109 .index = HRTIMER_BASE_TAI_SOFT, 119 .index = HRTIMER_BASE_TAI_SOFT,
110 .clockid = CLOCK_TAI, 120 .clockid = CLOCK_TAI,
111 .get_time = &ktime_get_clocktai, 121 .get_time = &ktime_get_clocktai,
@@ -119,7 +129,7 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
119 129
120 [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME, 130 [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
121 [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC, 131 [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
122 [CLOCK_BOOTTIME] = HRTIMER_BASE_MONOTONIC, 132 [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
123 [CLOCK_TAI] = HRTIMER_BASE_TAI, 133 [CLOCK_TAI] = HRTIMER_BASE_TAI,
124}; 134};
125 135
@@ -571,12 +581,14 @@ __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_
571static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) 581static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
572{ 582{
573 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; 583 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
584 ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
574 ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset; 585 ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
575 586
576 ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq, 587 ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq,
577 offs_real, offs_tai); 588 offs_real, offs_boot, offs_tai);
578 589
579 base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real; 590 base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real;
591 base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot;
580 base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai; 592 base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai;
581 593
582 return now; 594 return now;
diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c
index e0dbae98db9d..69a937c3cd81 100644
--- a/kernel/time/posix-stubs.c
+++ b/kernel/time/posix-stubs.c
@@ -83,8 +83,6 @@ int do_clock_gettime(clockid_t which_clock, struct timespec64 *tp)
83 case CLOCK_BOOTTIME: 83 case CLOCK_BOOTTIME:
84 get_monotonic_boottime64(tp); 84 get_monotonic_boottime64(tp);
85 break; 85 break;
86 case CLOCK_MONOTONIC_ACTIVE:
87 ktime_get_active_ts64(tp);
88 default: 86 default:
89 return -EINVAL; 87 return -EINVAL;
90 } 88 }
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index b6899b5060bd..10b7186d0638 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -252,16 +252,15 @@ static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *
252 return 0; 252 return 0;
253} 253}
254 254
255static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp) 255static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp)
256{ 256{
257 timekeeping_clocktai64(tp); 257 get_monotonic_boottime64(tp);
258 return 0; 258 return 0;
259} 259}
260 260
261static int posix_get_monotonic_active(clockid_t which_clock, 261static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp)
262 struct timespec64 *tp)
263{ 262{
264 ktime_get_active_ts64(tp); 263 timekeeping_clocktai64(tp);
265 return 0; 264 return 0;
266} 265}
267 266
@@ -1317,9 +1316,19 @@ static const struct k_clock clock_tai = {
1317 .timer_arm = common_hrtimer_arm, 1316 .timer_arm = common_hrtimer_arm,
1318}; 1317};
1319 1318
1320static const struct k_clock clock_monotonic_active = { 1319static const struct k_clock clock_boottime = {
1321 .clock_getres = posix_get_hrtimer_res, 1320 .clock_getres = posix_get_hrtimer_res,
1322 .clock_get = posix_get_monotonic_active, 1321 .clock_get = posix_get_boottime,
1322 .nsleep = common_nsleep,
1323 .timer_create = common_timer_create,
1324 .timer_set = common_timer_set,
1325 .timer_get = common_timer_get,
1326 .timer_del = common_timer_del,
1327 .timer_rearm = common_hrtimer_rearm,
1328 .timer_forward = common_hrtimer_forward,
1329 .timer_remaining = common_hrtimer_remaining,
1330 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1331 .timer_arm = common_hrtimer_arm,
1323}; 1332};
1324 1333
1325static const struct k_clock * const posix_clocks[] = { 1334static const struct k_clock * const posix_clocks[] = {
@@ -1330,11 +1339,10 @@ static const struct k_clock * const posix_clocks[] = {
1330 [CLOCK_MONOTONIC_RAW] = &clock_monotonic_raw, 1339 [CLOCK_MONOTONIC_RAW] = &clock_monotonic_raw,
1331 [CLOCK_REALTIME_COARSE] = &clock_realtime_coarse, 1340 [CLOCK_REALTIME_COARSE] = &clock_realtime_coarse,
1332 [CLOCK_MONOTONIC_COARSE] = &clock_monotonic_coarse, 1341 [CLOCK_MONOTONIC_COARSE] = &clock_monotonic_coarse,
1333 [CLOCK_BOOTTIME] = &clock_monotonic, 1342 [CLOCK_BOOTTIME] = &clock_boottime,
1334 [CLOCK_REALTIME_ALARM] = &alarm_clock, 1343 [CLOCK_REALTIME_ALARM] = &alarm_clock,
1335 [CLOCK_BOOTTIME_ALARM] = &alarm_clock, 1344 [CLOCK_BOOTTIME_ALARM] = &alarm_clock,
1336 [CLOCK_TAI] = &clock_tai, 1345 [CLOCK_TAI] = &clock_tai,
1337 [CLOCK_MONOTONIC_ACTIVE] = &clock_monotonic_active,
1338}; 1346};
1339 1347
1340static const struct k_clock *clockid_to_kclock(const clockid_t id) 1348static const struct k_clock *clockid_to_kclock(const clockid_t id)
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 099572ca4a8f..49edc1c4f3e6 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -419,19 +419,6 @@ void tick_suspend_local(void)
419 clockevents_shutdown(td->evtdev); 419 clockevents_shutdown(td->evtdev);
420} 420}
421 421
422static void tick_forward_next_period(void)
423{
424 ktime_t delta, now = ktime_get();
425 u64 n;
426
427 delta = ktime_sub(now, tick_next_period);
428 n = ktime_divns(delta, tick_period);
429 tick_next_period += n * tick_period;
430 if (tick_next_period < now)
431 tick_next_period += tick_period;
432 tick_sched_forward_next_period();
433}
434
435/** 422/**
436 * tick_resume_local - Resume the local tick device 423 * tick_resume_local - Resume the local tick device
437 * 424 *
@@ -444,8 +431,6 @@ void tick_resume_local(void)
444 struct tick_device *td = this_cpu_ptr(&tick_cpu_device); 431 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
445 bool broadcast = tick_resume_check_broadcast(); 432 bool broadcast = tick_resume_check_broadcast();
446 433
447 tick_forward_next_period();
448
449 clockevents_tick_resume(td->evtdev); 434 clockevents_tick_resume(td->evtdev);
450 if (!broadcast) { 435 if (!broadcast) {
451 if (td->mode == TICKDEV_MODE_PERIODIC) 436 if (td->mode == TICKDEV_MODE_PERIODIC)
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 21efab7485ca..e277284c2831 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -141,12 +141,6 @@ static inline void tick_check_oneshot_broadcast_this_cpu(void) { }
141static inline bool tick_broadcast_oneshot_available(void) { return tick_oneshot_possible(); } 141static inline bool tick_broadcast_oneshot_available(void) { return tick_oneshot_possible(); }
142#endif /* !(BROADCAST && ONESHOT) */ 142#endif /* !(BROADCAST && ONESHOT) */
143 143
144#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
145extern void tick_sched_forward_next_period(void);
146#else
147static inline void tick_sched_forward_next_period(void) { }
148#endif
149
150/* NO_HZ_FULL internal */ 144/* NO_HZ_FULL internal */
151#ifdef CONFIG_NO_HZ_FULL 145#ifdef CONFIG_NO_HZ_FULL
152extern void tick_nohz_init(void); 146extern void tick_nohz_init(void);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 646645e981f9..da9455a6b42b 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -52,15 +52,6 @@ struct tick_sched *tick_get_tick_sched(int cpu)
52static ktime_t last_jiffies_update; 52static ktime_t last_jiffies_update;
53 53
54/* 54/*
55 * Called after resume. Make sure that jiffies are not fast forwarded due to
56 * clock monotonic being forwarded by the suspended time.
57 */
58void tick_sched_forward_next_period(void)
59{
60 last_jiffies_update = tick_next_period;
61}
62
63/*
64 * Must be called with interrupts disabled ! 55 * Must be called with interrupts disabled !
65 */ 56 */
66static void tick_do_update_jiffies64(ktime_t now) 57static void tick_do_update_jiffies64(ktime_t now)
@@ -804,12 +795,12 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
804 return; 795 return;
805 } 796 }
806 797
807 hrtimer_set_expires(&ts->sched_timer, tick); 798 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
808 799 hrtimer_start(&ts->sched_timer, tick, HRTIMER_MODE_ABS_PINNED);
809 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) 800 } else {
810 hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED); 801 hrtimer_set_expires(&ts->sched_timer, tick);
811 else
812 tick_program_event(tick, 1); 802 tick_program_event(tick, 1);
803 }
813} 804}
814 805
815static void tick_nohz_retain_tick(struct tick_sched *ts) 806static void tick_nohz_retain_tick(struct tick_sched *ts)
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index dcf7f20fcd12..49cbceef5deb 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -138,12 +138,7 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
138 138
139static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) 139static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
140{ 140{
141 /* Update both bases so mono and raw stay coupled. */ 141 tk->offs_boot = ktime_add(tk->offs_boot, delta);
142 tk->tkr_mono.base += delta;
143 tk->tkr_raw.base += delta;
144
145 /* Accumulate time spent in suspend */
146 tk->time_suspended += delta;
147} 142}
148 143
149/* 144/*
@@ -473,6 +468,36 @@ u64 ktime_get_raw_fast_ns(void)
473} 468}
474EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns); 469EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
475 470
471/**
472 * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
473 *
474 * To keep it NMI safe since we're accessing from tracing, we're not using a
475 * separate timekeeper with updates to monotonic clock and boot offset
476 * protected with seqlocks. This has the following minor side effects:
477 *
478 * (1) Its possible that a timestamp be taken after the boot offset is updated
479 * but before the timekeeper is updated. If this happens, the new boot offset
480 * is added to the old timekeeping making the clock appear to update slightly
481 * earlier:
482 * CPU 0 CPU 1
483 * timekeeping_inject_sleeptime64()
484 * __timekeeping_inject_sleeptime(tk, delta);
485 * timestamp();
486 * timekeeping_update(tk, TK_CLEAR_NTP...);
487 *
488 * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
489 * partially updated. Since the tk->offs_boot update is a rare event, this
490 * should be a rare occurrence which postprocessing should be able to handle.
491 */
492u64 notrace ktime_get_boot_fast_ns(void)
493{
494 struct timekeeper *tk = &tk_core.timekeeper;
495
496 return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
497}
498EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
499
500
476/* 501/*
477 * See comment for __ktime_get_fast_ns() vs. timestamp ordering 502 * See comment for __ktime_get_fast_ns() vs. timestamp ordering
478 */ 503 */
@@ -764,6 +789,7 @@ EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
764 789
765static ktime_t *offsets[TK_OFFS_MAX] = { 790static ktime_t *offsets[TK_OFFS_MAX] = {
766 [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real, 791 [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
792 [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
767 [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai, 793 [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
768}; 794};
769 795
@@ -861,39 +887,6 @@ void ktime_get_ts64(struct timespec64 *ts)
861EXPORT_SYMBOL_GPL(ktime_get_ts64); 887EXPORT_SYMBOL_GPL(ktime_get_ts64);
862 888
863/** 889/**
864 * ktime_get_active_ts64 - Get the active non-suspended monotonic clock
865 * @ts: pointer to timespec variable
866 *
867 * The function calculates the monotonic clock from the realtime clock and
868 * the wall_to_monotonic offset, subtracts the accumulated suspend time and
869 * stores the result in normalized timespec64 format in the variable
870 * pointed to by @ts.
871 */
872void ktime_get_active_ts64(struct timespec64 *ts)
873{
874 struct timekeeper *tk = &tk_core.timekeeper;
875 struct timespec64 tomono, tsusp;
876 u64 nsec, nssusp;
877 unsigned int seq;
878
879 WARN_ON(timekeeping_suspended);
880
881 do {
882 seq = read_seqcount_begin(&tk_core.seq);
883 ts->tv_sec = tk->xtime_sec;
884 nsec = timekeeping_get_ns(&tk->tkr_mono);
885 tomono = tk->wall_to_monotonic;
886 nssusp = tk->time_suspended;
887 } while (read_seqcount_retry(&tk_core.seq, seq));
888
889 ts->tv_sec += tomono.tv_sec;
890 ts->tv_nsec = 0;
891 timespec64_add_ns(ts, nsec + tomono.tv_nsec);
892 tsusp = ns_to_timespec64(nssusp);
893 *ts = timespec64_sub(*ts, tsusp);
894}
895
896/**
897 * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC 890 * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
898 * 891 *
899 * Returns the seconds portion of CLOCK_MONOTONIC with a single non 892 * Returns the seconds portion of CLOCK_MONOTONIC with a single non
@@ -1593,6 +1586,7 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1593 return; 1586 return;
1594 } 1587 }
1595 tk_xtime_add(tk, delta); 1588 tk_xtime_add(tk, delta);
1589 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1596 tk_update_sleep_time(tk, timespec64_to_ktime(*delta)); 1590 tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1597 tk_debug_account_sleep_time(delta); 1591 tk_debug_account_sleep_time(delta);
1598} 1592}
@@ -2125,7 +2119,7 @@ out:
2125void getboottime64(struct timespec64 *ts) 2119void getboottime64(struct timespec64 *ts)
2126{ 2120{
2127 struct timekeeper *tk = &tk_core.timekeeper; 2121 struct timekeeper *tk = &tk_core.timekeeper;
2128 ktime_t t = ktime_sub(tk->offs_real, tk->time_suspended); 2122 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
2129 2123
2130 *ts = ktime_to_timespec64(t); 2124 *ts = ktime_to_timespec64(t);
2131} 2125}
@@ -2188,6 +2182,7 @@ void do_timer(unsigned long ticks)
2188 * ktime_get_update_offsets_now - hrtimer helper 2182 * ktime_get_update_offsets_now - hrtimer helper
2189 * @cwsseq: pointer to check and store the clock was set sequence number 2183 * @cwsseq: pointer to check and store the clock was set sequence number
2190 * @offs_real: pointer to storage for monotonic -> realtime offset 2184 * @offs_real: pointer to storage for monotonic -> realtime offset
2185 * @offs_boot: pointer to storage for monotonic -> boottime offset
2191 * @offs_tai: pointer to storage for monotonic -> clock tai offset 2186 * @offs_tai: pointer to storage for monotonic -> clock tai offset
2192 * 2187 *
2193 * Returns current monotonic time and updates the offsets if the 2188 * Returns current monotonic time and updates the offsets if the
@@ -2197,7 +2192,7 @@ void do_timer(unsigned long ticks)
2197 * Called from hrtimer_interrupt() or retrigger_next_event() 2192 * Called from hrtimer_interrupt() or retrigger_next_event()
2198 */ 2193 */
2199ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real, 2194ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
2200 ktime_t *offs_tai) 2195 ktime_t *offs_boot, ktime_t *offs_tai)
2201{ 2196{
2202 struct timekeeper *tk = &tk_core.timekeeper; 2197 struct timekeeper *tk = &tk_core.timekeeper;
2203 unsigned int seq; 2198 unsigned int seq;
@@ -2214,6 +2209,7 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
2214 if (*cwsseq != tk->clock_was_set_seq) { 2209 if (*cwsseq != tk->clock_was_set_seq) {
2215 *cwsseq = tk->clock_was_set_seq; 2210 *cwsseq = tk->clock_was_set_seq;
2216 *offs_real = tk->offs_real; 2211 *offs_real = tk->offs_real;
2212 *offs_boot = tk->offs_boot;
2217 *offs_tai = tk->offs_tai; 2213 *offs_tai = tk->offs_tai;
2218 } 2214 }
2219 2215
diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h
index 79b67f5e0343..7a9b4eb7a1d5 100644
--- a/kernel/time/timekeeping.h
+++ b/kernel/time/timekeeping.h
@@ -6,6 +6,7 @@
6 */ 6 */
7extern ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, 7extern ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq,
8 ktime_t *offs_real, 8 ktime_t *offs_real,
9 ktime_t *offs_boot,
9 ktime_t *offs_tai); 10 ktime_t *offs_tai);
10 11
11extern int timekeeping_valid_for_hres(void); 12extern int timekeeping_valid_for_hres(void);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index dfbcf9ee1447..414d7210b2ec 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1165,7 +1165,7 @@ static struct {
1165 { trace_clock, "perf", 1 }, 1165 { trace_clock, "perf", 1 },
1166 { ktime_get_mono_fast_ns, "mono", 1 }, 1166 { ktime_get_mono_fast_ns, "mono", 1 },
1167 { ktime_get_raw_fast_ns, "mono_raw", 1 }, 1167 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1168 { ktime_get_mono_fast_ns, "boot", 1 }, 1168 { ktime_get_boot_fast_ns, "boot", 1 },
1169 ARCH_TRACE_CLOCKS 1169 ARCH_TRACE_CLOCKS
1170}; 1170};
1171 1171
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index e954ae3d82c0..e3a658bac10f 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -356,7 +356,7 @@ FTRACE_ENTRY(hwlat, hwlat_entry,
356 __field( unsigned int, seqnum ) 356 __field( unsigned int, seqnum )
357 ), 357 ),
358 358
359 F_printk("cnt:%u\tts:%010llu.%010lu\tinner:%llu\touter:%llunmi-ts:%llu\tnmi-count:%u\n", 359 F_printk("cnt:%u\tts:%010llu.%010lu\tinner:%llu\touter:%llu\tnmi-ts:%llu\tnmi-count:%u\n",
360 __entry->seqnum, 360 __entry->seqnum,
361 __entry->tv_sec, 361 __entry->tv_sec,
362 __entry->tv_nsec, 362 __entry->tv_nsec,
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 9b4716bb8bb0..1f951b3df60c 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1499,14 +1499,14 @@ static int process_preds(struct trace_event_call *call,
1499 return ret; 1499 return ret;
1500 } 1500 }
1501 1501
1502 if (!nr_preds) { 1502 if (!nr_preds)
1503 prog = NULL; 1503 return -EINVAL;
1504 } else { 1504
1505 prog = predicate_parse(filter_string, nr_parens, nr_preds, 1505 prog = predicate_parse(filter_string, nr_parens, nr_preds,
1506 parse_pred, call, pe); 1506 parse_pred, call, pe);
1507 if (IS_ERR(prog)) 1507 if (IS_ERR(prog))
1508 return PTR_ERR(prog); 1508 return PTR_ERR(prog);
1509 } 1509
1510 rcu_assign_pointer(filter->prog, prog); 1510 rcu_assign_pointer(filter->prog, prog);
1511 return 0; 1511 return 0;
1512} 1512}
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 0d7b3ffbecc2..b9061ed59bbd 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -2466,6 +2466,7 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
2466 else if (strcmp(modifier, "usecs") == 0) 2466 else if (strcmp(modifier, "usecs") == 0)
2467 *flags |= HIST_FIELD_FL_TIMESTAMP_USECS; 2467 *flags |= HIST_FIELD_FL_TIMESTAMP_USECS;
2468 else { 2468 else {
2469 hist_err("Invalid field modifier: ", modifier);
2469 field = ERR_PTR(-EINVAL); 2470 field = ERR_PTR(-EINVAL);
2470 goto out; 2471 goto out;
2471 } 2472 }
@@ -2481,6 +2482,7 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
2481 else { 2482 else {
2482 field = trace_find_event_field(file->event_call, field_name); 2483 field = trace_find_event_field(file->event_call, field_name);
2483 if (!field || !field->size) { 2484 if (!field || !field->size) {
2485 hist_err("Couldn't find field: ", field_name);
2484 field = ERR_PTR(-EINVAL); 2486 field = ERR_PTR(-EINVAL);
2485 goto out; 2487 goto out;
2486 } 2488 }
@@ -4913,6 +4915,16 @@ static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
4913 seq_printf(m, "%s", field_name); 4915 seq_printf(m, "%s", field_name);
4914 } else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP) 4916 } else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP)
4915 seq_puts(m, "common_timestamp"); 4917 seq_puts(m, "common_timestamp");
4918
4919 if (hist_field->flags) {
4920 if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) &&
4921 !(hist_field->flags & HIST_FIELD_FL_EXPR)) {
4922 const char *flags = get_hist_field_flags(hist_field);
4923
4924 if (flags)
4925 seq_printf(m, ".%s", flags);
4926 }
4927 }
4916} 4928}
4917 4929
4918static int event_hist_trigger_print(struct seq_file *m, 4930static int event_hist_trigger_print(struct seq_file *m,
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 34fd0e0ec51d..ac892878dbe6 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -55,6 +55,7 @@ struct trace_uprobe {
55 struct list_head list; 55 struct list_head list;
56 struct trace_uprobe_filter filter; 56 struct trace_uprobe_filter filter;
57 struct uprobe_consumer consumer; 57 struct uprobe_consumer consumer;
58 struct path path;
58 struct inode *inode; 59 struct inode *inode;
59 char *filename; 60 char *filename;
60 unsigned long offset; 61 unsigned long offset;
@@ -289,7 +290,7 @@ static void free_trace_uprobe(struct trace_uprobe *tu)
289 for (i = 0; i < tu->tp.nr_args; i++) 290 for (i = 0; i < tu->tp.nr_args; i++)
290 traceprobe_free_probe_arg(&tu->tp.args[i]); 291 traceprobe_free_probe_arg(&tu->tp.args[i]);
291 292
292 iput(tu->inode); 293 path_put(&tu->path);
293 kfree(tu->tp.call.class->system); 294 kfree(tu->tp.call.class->system);
294 kfree(tu->tp.call.name); 295 kfree(tu->tp.call.name);
295 kfree(tu->filename); 296 kfree(tu->filename);
@@ -363,7 +364,6 @@ end:
363static int create_trace_uprobe(int argc, char **argv) 364static int create_trace_uprobe(int argc, char **argv)
364{ 365{
365 struct trace_uprobe *tu; 366 struct trace_uprobe *tu;
366 struct inode *inode;
367 char *arg, *event, *group, *filename; 367 char *arg, *event, *group, *filename;
368 char buf[MAX_EVENT_NAME_LEN]; 368 char buf[MAX_EVENT_NAME_LEN];
369 struct path path; 369 struct path path;
@@ -371,7 +371,6 @@ static int create_trace_uprobe(int argc, char **argv)
371 bool is_delete, is_return; 371 bool is_delete, is_return;
372 int i, ret; 372 int i, ret;
373 373
374 inode = NULL;
375 ret = 0; 374 ret = 0;
376 is_delete = false; 375 is_delete = false;
377 is_return = false; 376 is_return = false;
@@ -437,21 +436,16 @@ static int create_trace_uprobe(int argc, char **argv)
437 } 436 }
438 /* Find the last occurrence, in case the path contains ':' too. */ 437 /* Find the last occurrence, in case the path contains ':' too. */
439 arg = strrchr(argv[1], ':'); 438 arg = strrchr(argv[1], ':');
440 if (!arg) { 439 if (!arg)
441 ret = -EINVAL; 440 return -EINVAL;
442 goto fail_address_parse;
443 }
444 441
445 *arg++ = '\0'; 442 *arg++ = '\0';
446 filename = argv[1]; 443 filename = argv[1];
447 ret = kern_path(filename, LOOKUP_FOLLOW, &path); 444 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
448 if (ret) 445 if (ret)
449 goto fail_address_parse; 446 return ret;
450
451 inode = igrab(d_real_inode(path.dentry));
452 path_put(&path);
453 447
454 if (!inode || !S_ISREG(inode->i_mode)) { 448 if (!d_is_reg(path.dentry)) {
455 ret = -EINVAL; 449 ret = -EINVAL;
456 goto fail_address_parse; 450 goto fail_address_parse;
457 } 451 }
@@ -490,7 +484,7 @@ static int create_trace_uprobe(int argc, char **argv)
490 goto fail_address_parse; 484 goto fail_address_parse;
491 } 485 }
492 tu->offset = offset; 486 tu->offset = offset;
493 tu->inode = inode; 487 tu->path = path;
494 tu->filename = kstrdup(filename, GFP_KERNEL); 488 tu->filename = kstrdup(filename, GFP_KERNEL);
495 489
496 if (!tu->filename) { 490 if (!tu->filename) {
@@ -558,7 +552,7 @@ error:
558 return ret; 552 return ret;
559 553
560fail_address_parse: 554fail_address_parse:
561 iput(inode); 555 path_put(&path);
562 556
563 pr_info("Failed to parse address or file.\n"); 557 pr_info("Failed to parse address or file.\n");
564 558
@@ -922,6 +916,7 @@ probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
922 goto err_flags; 916 goto err_flags;
923 917
924 tu->consumer.filter = filter; 918 tu->consumer.filter = filter;
919 tu->inode = d_real_inode(tu->path.dentry);
925 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); 920 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
926 if (ret) 921 if (ret)
927 goto err_buffer; 922 goto err_buffer;
@@ -967,6 +962,7 @@ probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
967 WARN_ON(!uprobe_filter_is_empty(&tu->filter)); 962 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
968 963
969 uprobe_unregister(tu->inode, tu->offset, &tu->consumer); 964 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
965 tu->inode = NULL;
970 tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE; 966 tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
971 967
972 uprobe_buffer_disable(); 968 uprobe_buffer_disable();
@@ -1337,7 +1333,6 @@ struct trace_event_call *
1337create_local_trace_uprobe(char *name, unsigned long offs, bool is_return) 1333create_local_trace_uprobe(char *name, unsigned long offs, bool is_return)
1338{ 1334{
1339 struct trace_uprobe *tu; 1335 struct trace_uprobe *tu;
1340 struct inode *inode;
1341 struct path path; 1336 struct path path;
1342 int ret; 1337 int ret;
1343 1338
@@ -1345,11 +1340,8 @@ create_local_trace_uprobe(char *name, unsigned long offs, bool is_return)
1345 if (ret) 1340 if (ret)
1346 return ERR_PTR(ret); 1341 return ERR_PTR(ret);
1347 1342
1348 inode = igrab(d_inode(path.dentry)); 1343 if (!d_is_reg(path.dentry)) {
1349 path_put(&path); 1344 path_put(&path);
1350
1351 if (!inode || !S_ISREG(inode->i_mode)) {
1352 iput(inode);
1353 return ERR_PTR(-EINVAL); 1345 return ERR_PTR(-EINVAL);
1354 } 1346 }
1355 1347
@@ -1364,11 +1356,12 @@ create_local_trace_uprobe(char *name, unsigned long offs, bool is_return)
1364 if (IS_ERR(tu)) { 1356 if (IS_ERR(tu)) {
1365 pr_info("Failed to allocate trace_uprobe.(%d)\n", 1357 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1366 (int)PTR_ERR(tu)); 1358 (int)PTR_ERR(tu));
1359 path_put(&path);
1367 return ERR_CAST(tu); 1360 return ERR_CAST(tu);
1368 } 1361 }
1369 1362
1370 tu->offset = offs; 1363 tu->offset = offs;
1371 tu->inode = inode; 1364 tu->path = path;
1372 tu->filename = kstrdup(name, GFP_KERNEL); 1365 tu->filename = kstrdup(name, GFP_KERNEL);
1373 init_trace_event_call(tu, &tu->tp.call); 1366 init_trace_event_call(tu, &tu->tp.call);
1374 1367
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 671b13457387..1e37da2e0c25 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -207,7 +207,7 @@ static int tracepoint_add_func(struct tracepoint *tp,
207 lockdep_is_held(&tracepoints_mutex)); 207 lockdep_is_held(&tracepoints_mutex));
208 old = func_add(&tp_funcs, func, prio); 208 old = func_add(&tp_funcs, func, prio);
209 if (IS_ERR(old)) { 209 if (IS_ERR(old)) {
210 WARN_ON_ONCE(1); 210 WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
211 return PTR_ERR(old); 211 return PTR_ERR(old);
212 } 212 }
213 213
@@ -239,7 +239,7 @@ static int tracepoint_remove_func(struct tracepoint *tp,
239 lockdep_is_held(&tracepoints_mutex)); 239 lockdep_is_held(&tracepoints_mutex));
240 old = func_remove(&tp_funcs, func); 240 old = func_remove(&tp_funcs, func);
241 if (IS_ERR(old)) { 241 if (IS_ERR(old)) {
242 WARN_ON_ONCE(1); 242 WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
243 return PTR_ERR(old); 243 return PTR_ERR(old);
244 } 244 }
245 245
diff --git a/lib/dma-direct.c b/lib/dma-direct.c
index c0bba30fef0a..bbfb229aa067 100644
--- a/lib/dma-direct.c
+++ b/lib/dma-direct.c
@@ -84,7 +84,8 @@ again:
84 __free_pages(page, page_order); 84 __free_pages(page, page_order);
85 page = NULL; 85 page = NULL;
86 86
87 if (dev->coherent_dma_mask < DMA_BIT_MASK(32) && 87 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
88 dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
88 !(gfp & GFP_DMA)) { 89 !(gfp & GFP_DMA)) {
89 gfp = (gfp & ~GFP_DMA32) | GFP_DMA; 90 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
90 goto again; 91 goto again;
diff --git a/lib/errseq.c b/lib/errseq.c
index df782418b333..81f9e33aa7e7 100644
--- a/lib/errseq.c
+++ b/lib/errseq.c
@@ -111,27 +111,22 @@ EXPORT_SYMBOL(errseq_set);
111 * errseq_sample() - Grab current errseq_t value. 111 * errseq_sample() - Grab current errseq_t value.
112 * @eseq: Pointer to errseq_t to be sampled. 112 * @eseq: Pointer to errseq_t to be sampled.
113 * 113 *
114 * This function allows callers to sample an errseq_t value, marking it as 114 * This function allows callers to initialise their errseq_t variable.
115 * "seen" if required. 115 * If the error has been "seen", new callers will not see an old error.
116 * If there is an unseen error in @eseq, the caller of this function will
117 * see it the next time it checks for an error.
116 * 118 *
119 * Context: Any context.
117 * Return: The current errseq value. 120 * Return: The current errseq value.
118 */ 121 */
119errseq_t errseq_sample(errseq_t *eseq) 122errseq_t errseq_sample(errseq_t *eseq)
120{ 123{
121 errseq_t old = READ_ONCE(*eseq); 124 errseq_t old = READ_ONCE(*eseq);
122 errseq_t new = old;
123 125
124 /* 126 /* If nobody has seen this error yet, then we can be the first. */
125 * For the common case of no errors ever having been set, we can skip 127 if (!(old & ERRSEQ_SEEN))
126 * marking the SEEN bit. Once an error has been set, the value will 128 old = 0;
127 * never go back to zero. 129 return old;
128 */
129 if (old != 0) {
130 new |= ERRSEQ_SEEN;
131 if (old != new)
132 cmpxchg(eseq, old, new);
133 }
134 return new;
135} 130}
136EXPORT_SYMBOL(errseq_sample); 131EXPORT_SYMBOL(errseq_sample);
137 132
diff --git a/lib/kobject.c b/lib/kobject.c
index e1d1f290bf35..18989b5b3b56 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -233,13 +233,12 @@ static int kobject_add_internal(struct kobject *kobj)
233 233
234 /* be noisy on error issues */ 234 /* be noisy on error issues */
235 if (error == -EEXIST) 235 if (error == -EEXIST)
236 WARN(1, 236 pr_err("%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n",
237 "%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n", 237 __func__, kobject_name(kobj));
238 __func__, kobject_name(kobj));
239 else 238 else
240 WARN(1, "%s failed for %s (error: %d parent: %s)\n", 239 pr_err("%s failed for %s (error: %d parent: %s)\n",
241 __func__, kobject_name(kobj), error, 240 __func__, kobject_name(kobj), error,
242 parent ? kobject_name(parent) : "'none'"); 241 parent ? kobject_name(parent) : "'none'");
243 } else 242 } else
244 kobj->state_in_sysfs = 1; 243 kobj->state_in_sysfs = 1;
245 244
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 15ea216a67ce..63d0816ab23b 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -22,6 +22,7 @@
22#include <linux/socket.h> 22#include <linux/socket.h>
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <linux/netlink.h> 24#include <linux/netlink.h>
25#include <linux/uidgid.h>
25#include <linux/uuid.h> 26#include <linux/uuid.h>
26#include <linux/ctype.h> 27#include <linux/ctype.h>
27#include <net/sock.h> 28#include <net/sock.h>
@@ -231,30 +232,6 @@ out:
231 return r; 232 return r;
232} 233}
233 234
234#ifdef CONFIG_NET
235static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data)
236{
237 struct kobject *kobj = data, *ksobj;
238 const struct kobj_ns_type_operations *ops;
239
240 ops = kobj_ns_ops(kobj);
241 if (!ops && kobj->kset) {
242 ksobj = &kobj->kset->kobj;
243 if (ksobj->parent != NULL)
244 ops = kobj_ns_ops(ksobj->parent);
245 }
246
247 if (ops && ops->netlink_ns && kobj->ktype->namespace) {
248 const void *sock_ns, *ns;
249 ns = kobj->ktype->namespace(kobj);
250 sock_ns = ops->netlink_ns(dsk);
251 return sock_ns != ns;
252 }
253
254 return 0;
255}
256#endif
257
258#ifdef CONFIG_UEVENT_HELPER 235#ifdef CONFIG_UEVENT_HELPER
259static int kobj_usermode_filter(struct kobject *kobj) 236static int kobj_usermode_filter(struct kobject *kobj)
260{ 237{
@@ -296,15 +273,44 @@ static void cleanup_uevent_env(struct subprocess_info *info)
296} 273}
297#endif 274#endif
298 275
299static int kobject_uevent_net_broadcast(struct kobject *kobj, 276#ifdef CONFIG_NET
300 struct kobj_uevent_env *env, 277static struct sk_buff *alloc_uevent_skb(struct kobj_uevent_env *env,
301 const char *action_string, 278 const char *action_string,
302 const char *devpath) 279 const char *devpath)
303{ 280{
304 int retval = 0; 281 struct netlink_skb_parms *parms;
305#if defined(CONFIG_NET) 282 struct sk_buff *skb = NULL;
283 char *scratch;
284 size_t len;
285
286 /* allocate message with maximum possible size */
287 len = strlen(action_string) + strlen(devpath) + 2;
288 skb = alloc_skb(len + env->buflen, GFP_KERNEL);
289 if (!skb)
290 return NULL;
291
292 /* add header */
293 scratch = skb_put(skb, len);
294 sprintf(scratch, "%s@%s", action_string, devpath);
295
296 skb_put_data(skb, env->buf, env->buflen);
297
298 parms = &NETLINK_CB(skb);
299 parms->creds.uid = GLOBAL_ROOT_UID;
300 parms->creds.gid = GLOBAL_ROOT_GID;
301 parms->dst_group = 1;
302 parms->portid = 0;
303
304 return skb;
305}
306
307static int uevent_net_broadcast_untagged(struct kobj_uevent_env *env,
308 const char *action_string,
309 const char *devpath)
310{
306 struct sk_buff *skb = NULL; 311 struct sk_buff *skb = NULL;
307 struct uevent_sock *ue_sk; 312 struct uevent_sock *ue_sk;
313 int retval = 0;
308 314
309 /* send netlink message */ 315 /* send netlink message */
310 list_for_each_entry(ue_sk, &uevent_sock_list, list) { 316 list_for_each_entry(ue_sk, &uevent_sock_list, list) {
@@ -314,37 +320,99 @@ static int kobject_uevent_net_broadcast(struct kobject *kobj,
314 continue; 320 continue;
315 321
316 if (!skb) { 322 if (!skb) {
317 /* allocate message with the maximum possible size */
318 size_t len = strlen(action_string) + strlen(devpath) + 2;
319 char *scratch;
320
321 retval = -ENOMEM; 323 retval = -ENOMEM;
322 skb = alloc_skb(len + env->buflen, GFP_KERNEL); 324 skb = alloc_uevent_skb(env, action_string, devpath);
323 if (!skb) 325 if (!skb)
324 continue; 326 continue;
325
326 /* add header */
327 scratch = skb_put(skb, len);
328 sprintf(scratch, "%s@%s", action_string, devpath);
329
330 skb_put_data(skb, env->buf, env->buflen);
331
332 NETLINK_CB(skb).dst_group = 1;
333 } 327 }
334 328
335 retval = netlink_broadcast_filtered(uevent_sock, skb_get(skb), 329 retval = netlink_broadcast(uevent_sock, skb_get(skb), 0, 1,
336 0, 1, GFP_KERNEL, 330 GFP_KERNEL);
337 kobj_bcast_filter,
338 kobj);
339 /* ENOBUFS should be handled in userspace */ 331 /* ENOBUFS should be handled in userspace */
340 if (retval == -ENOBUFS || retval == -ESRCH) 332 if (retval == -ENOBUFS || retval == -ESRCH)
341 retval = 0; 333 retval = 0;
342 } 334 }
343 consume_skb(skb); 335 consume_skb(skb);
344#endif 336
345 return retval; 337 return retval;
346} 338}
347 339
340static int uevent_net_broadcast_tagged(struct sock *usk,
341 struct kobj_uevent_env *env,
342 const char *action_string,
343 const char *devpath)
344{
345 struct user_namespace *owning_user_ns = sock_net(usk)->user_ns;
346 struct sk_buff *skb = NULL;
347 int ret = 0;
348
349 skb = alloc_uevent_skb(env, action_string, devpath);
350 if (!skb)
351 return -ENOMEM;
352
353 /* fix credentials */
354 if (owning_user_ns != &init_user_ns) {
355 struct netlink_skb_parms *parms = &NETLINK_CB(skb);
356 kuid_t root_uid;
357 kgid_t root_gid;
358
359 /* fix uid */
360 root_uid = make_kuid(owning_user_ns, 0);
361 if (uid_valid(root_uid))
362 parms->creds.uid = root_uid;
363
364 /* fix gid */
365 root_gid = make_kgid(owning_user_ns, 0);
366 if (gid_valid(root_gid))
367 parms->creds.gid = root_gid;
368 }
369
370 ret = netlink_broadcast(usk, skb, 0, 1, GFP_KERNEL);
371 /* ENOBUFS should be handled in userspace */
372 if (ret == -ENOBUFS || ret == -ESRCH)
373 ret = 0;
374
375 return ret;
376}
377#endif
378
379static int kobject_uevent_net_broadcast(struct kobject *kobj,
380 struct kobj_uevent_env *env,
381 const char *action_string,
382 const char *devpath)
383{
384 int ret = 0;
385
386#ifdef CONFIG_NET
387 const struct kobj_ns_type_operations *ops;
388 const struct net *net = NULL;
389
390 ops = kobj_ns_ops(kobj);
391 if (!ops && kobj->kset) {
392 struct kobject *ksobj = &kobj->kset->kobj;
393 if (ksobj->parent != NULL)
394 ops = kobj_ns_ops(ksobj->parent);
395 }
396
397 /* kobjects currently only carry network namespace tags and they
398 * are the only tag relevant here since we want to decide which
399 * network namespaces to broadcast the uevent into.
400 */
401 if (ops && ops->netlink_ns && kobj->ktype->namespace)
402 if (ops->type == KOBJ_NS_TYPE_NET)
403 net = kobj->ktype->namespace(kobj);
404
405 if (!net)
406 ret = uevent_net_broadcast_untagged(env, action_string,
407 devpath);
408 else
409 ret = uevent_net_broadcast_tagged(net->uevent_sock->sk, env,
410 action_string, devpath);
411#endif
412
413 return ret;
414}
415
348static void zap_modalias_env(struct kobj_uevent_env *env) 416static void zap_modalias_env(struct kobj_uevent_env *env)
349{ 417{
350 static const char modalias_prefix[] = "MODALIAS="; 418 static const char modalias_prefix[] = "MODALIAS=";
@@ -703,9 +771,13 @@ static int uevent_net_init(struct net *net)
703 771
704 net->uevent_sock = ue_sk; 772 net->uevent_sock = ue_sk;
705 773
706 mutex_lock(&uevent_sock_mutex); 774 /* Restrict uevents to initial user namespace. */
707 list_add_tail(&ue_sk->list, &uevent_sock_list); 775 if (sock_net(ue_sk->sk)->user_ns == &init_user_ns) {
708 mutex_unlock(&uevent_sock_mutex); 776 mutex_lock(&uevent_sock_mutex);
777 list_add_tail(&ue_sk->list, &uevent_sock_list);
778 mutex_unlock(&uevent_sock_mutex);
779 }
780
709 return 0; 781 return 0;
710} 782}
711 783
@@ -713,9 +785,11 @@ static void uevent_net_exit(struct net *net)
713{ 785{
714 struct uevent_sock *ue_sk = net->uevent_sock; 786 struct uevent_sock *ue_sk = net->uevent_sock;
715 787
716 mutex_lock(&uevent_sock_mutex); 788 if (sock_net(ue_sk->sk)->user_ns == &init_user_ns) {
717 list_del(&ue_sk->list); 789 mutex_lock(&uevent_sock_mutex);
718 mutex_unlock(&uevent_sock_mutex); 790 list_del(&ue_sk->list);
791 mutex_unlock(&uevent_sock_mutex);
792 }
719 793
720 netlink_kernel_release(ue_sk->sk); 794 netlink_kernel_release(ue_sk->sk);
721 kfree(ue_sk); 795 kfree(ue_sk);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index fece57566d45..12fbaa445637 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -737,7 +737,7 @@ out_unmap:
737 swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE, 737 swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
738 DMA_ATTR_SKIP_CPU_SYNC); 738 DMA_ATTR_SKIP_CPU_SYNC);
739out_warn: 739out_warn:
740 if ((attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) { 740 if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) {
741 dev_warn(dev, 741 dev_warn(dev,
742 "swiotlb: coherent allocation failed, size=%zu\n", 742 "swiotlb: coherent allocation failed, size=%zu\n",
743 size); 743 size);
diff --git a/mm/mmap.c b/mm/mmap.c
index 188f195883b9..9d5968d1e8e3 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -100,11 +100,20 @@ pgprot_t protection_map[16] __ro_after_init = {
100 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 100 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
101}; 101};
102 102
103#ifndef CONFIG_ARCH_HAS_FILTER_PGPROT
104static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
105{
106 return prot;
107}
108#endif
109
103pgprot_t vm_get_page_prot(unsigned long vm_flags) 110pgprot_t vm_get_page_prot(unsigned long vm_flags)
104{ 111{
105 return __pgprot(pgprot_val(protection_map[vm_flags & 112 pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags &
106 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | 113 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
107 pgprot_val(arch_vm_get_page_prot(vm_flags))); 114 pgprot_val(arch_vm_get_page_prot(vm_flags)));
115
116 return arch_filter_pgprot(ret);
108} 117}
109EXPORT_SYMBOL(vm_get_page_prot); 118EXPORT_SYMBOL(vm_get_page_prot);
110 119
diff --git a/net/Kconfig b/net/Kconfig
index 86471a1c1ed4..df8d45ef47d8 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -408,6 +408,9 @@ config GRO_CELLS
408 bool 408 bool
409 default n 409 default n
410 410
411config SOCK_VALIDATE_XMIT
412 bool
413
411config NET_DEVLINK 414config NET_DEVLINK
412 tristate "Network physical/parent device Netlink interface" 415 tristate "Network physical/parent device Netlink interface"
413 help 416 help
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 671d13c10f6f..b0a0b82e2d91 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -34,6 +34,7 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
34 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 34 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
35 struct net_bridge_port *p; 35 struct net_bridge_port *p;
36 struct net_bridge *br; 36 struct net_bridge *br;
37 bool notified = false;
37 bool changed_addr; 38 bool changed_addr;
38 int err; 39 int err;
39 40
@@ -67,7 +68,7 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
67 break; 68 break;
68 69
69 case NETDEV_CHANGE: 70 case NETDEV_CHANGE:
70 br_port_carrier_check(p); 71 br_port_carrier_check(p, &notified);
71 break; 72 break;
72 73
73 case NETDEV_FEAT_CHANGE: 74 case NETDEV_FEAT_CHANGE:
@@ -76,8 +77,10 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
76 77
77 case NETDEV_DOWN: 78 case NETDEV_DOWN:
78 spin_lock_bh(&br->lock); 79 spin_lock_bh(&br->lock);
79 if (br->dev->flags & IFF_UP) 80 if (br->dev->flags & IFF_UP) {
80 br_stp_disable_port(p); 81 br_stp_disable_port(p);
82 notified = true;
83 }
81 spin_unlock_bh(&br->lock); 84 spin_unlock_bh(&br->lock);
82 break; 85 break;
83 86
@@ -85,6 +88,7 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
85 if (netif_running(br->dev) && netif_oper_up(dev)) { 88 if (netif_running(br->dev) && netif_oper_up(dev)) {
86 spin_lock_bh(&br->lock); 89 spin_lock_bh(&br->lock);
87 br_stp_enable_port(p); 90 br_stp_enable_port(p);
91 notified = true;
88 spin_unlock_bh(&br->lock); 92 spin_unlock_bh(&br->lock);
89 } 93 }
90 break; 94 break;
@@ -110,8 +114,8 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
110 } 114 }
111 115
112 /* Events that may cause spanning tree to refresh */ 116 /* Events that may cause spanning tree to refresh */
113 if (event == NETDEV_CHANGEADDR || event == NETDEV_UP || 117 if (!notified && (event == NETDEV_CHANGEADDR || event == NETDEV_UP ||
114 event == NETDEV_CHANGE || event == NETDEV_DOWN) 118 event == NETDEV_CHANGE || event == NETDEV_DOWN))
115 br_ifinfo_notify(RTM_NEWLINK, NULL, p); 119 br_ifinfo_notify(RTM_NEWLINK, NULL, p);
116 120
117 return NOTIFY_DONE; 121 return NOTIFY_DONE;
@@ -141,7 +145,7 @@ static int br_switchdev_event(struct notifier_block *unused,
141 case SWITCHDEV_FDB_ADD_TO_BRIDGE: 145 case SWITCHDEV_FDB_ADD_TO_BRIDGE:
142 fdb_info = ptr; 146 fdb_info = ptr;
143 err = br_fdb_external_learn_add(br, p, fdb_info->addr, 147 err = br_fdb_external_learn_add(br, p, fdb_info->addr,
144 fdb_info->vid); 148 fdb_info->vid, false);
145 if (err) { 149 if (err) {
146 err = notifier_from_errno(err); 150 err = notifier_from_errno(err);
147 break; 151 break;
@@ -152,7 +156,7 @@ static int br_switchdev_event(struct notifier_block *unused,
152 case SWITCHDEV_FDB_DEL_TO_BRIDGE: 156 case SWITCHDEV_FDB_DEL_TO_BRIDGE:
153 fdb_info = ptr; 157 fdb_info = ptr;
154 err = br_fdb_external_learn_del(br, p, fdb_info->addr, 158 err = br_fdb_external_learn_del(br, p, fdb_info->addr,
155 fdb_info->vid); 159 fdb_info->vid, false);
156 if (err) 160 if (err)
157 err = notifier_from_errno(err); 161 err = notifier_from_errno(err);
158 break; 162 break;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index d9e69e4514be..b19e3104afd6 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -40,7 +40,7 @@ static struct kmem_cache *br_fdb_cache __read_mostly;
40static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 40static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
41 const unsigned char *addr, u16 vid); 41 const unsigned char *addr, u16 vid);
42static void fdb_notify(struct net_bridge *br, 42static void fdb_notify(struct net_bridge *br,
43 const struct net_bridge_fdb_entry *, int); 43 const struct net_bridge_fdb_entry *, int, bool);
44 44
45int __init br_fdb_init(void) 45int __init br_fdb_init(void)
46{ 46{
@@ -121,6 +121,28 @@ static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
121 return fdb; 121 return fdb;
122} 122}
123 123
124struct net_device *br_fdb_find_port(const struct net_device *br_dev,
125 const unsigned char *addr,
126 __u16 vid)
127{
128 struct net_bridge_fdb_entry *f;
129 struct net_device *dev = NULL;
130 struct net_bridge *br;
131
132 ASSERT_RTNL();
133
134 if (!netif_is_bridge_master(br_dev))
135 return NULL;
136
137 br = netdev_priv(br_dev);
138 f = br_fdb_find(br, addr, vid);
139 if (f && f->dst)
140 dev = f->dst->dev;
141
142 return dev;
143}
144EXPORT_SYMBOL_GPL(br_fdb_find_port);
145
124struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br, 146struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br,
125 const unsigned char *addr, 147 const unsigned char *addr,
126 __u16 vid) 148 __u16 vid)
@@ -173,7 +195,8 @@ static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr)
173 } 195 }
174} 196}
175 197
176static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f) 198static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f,
199 bool swdev_notify)
177{ 200{
178 trace_fdb_delete(br, f); 201 trace_fdb_delete(br, f);
179 202
@@ -183,7 +206,7 @@ static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
183 hlist_del_init_rcu(&f->fdb_node); 206 hlist_del_init_rcu(&f->fdb_node);
184 rhashtable_remove_fast(&br->fdb_hash_tbl, &f->rhnode, 207 rhashtable_remove_fast(&br->fdb_hash_tbl, &f->rhnode,
185 br_fdb_rht_params); 208 br_fdb_rht_params);
186 fdb_notify(br, f, RTM_DELNEIGH); 209 fdb_notify(br, f, RTM_DELNEIGH, swdev_notify);
187 call_rcu(&f->rcu, fdb_rcu_free); 210 call_rcu(&f->rcu, fdb_rcu_free);
188} 211}
189 212
@@ -219,7 +242,7 @@ static void fdb_delete_local(struct net_bridge *br,
219 return; 242 return;
220 } 243 }
221 244
222 fdb_delete(br, f); 245 fdb_delete(br, f, true);
223} 246}
224 247
225void br_fdb_find_delete_local(struct net_bridge *br, 248void br_fdb_find_delete_local(struct net_bridge *br,
@@ -334,7 +357,7 @@ void br_fdb_cleanup(struct work_struct *work)
334 } else { 357 } else {
335 spin_lock_bh(&br->hash_lock); 358 spin_lock_bh(&br->hash_lock);
336 if (!hlist_unhashed(&f->fdb_node)) 359 if (!hlist_unhashed(&f->fdb_node))
337 fdb_delete(br, f); 360 fdb_delete(br, f, true);
338 spin_unlock_bh(&br->hash_lock); 361 spin_unlock_bh(&br->hash_lock);
339 } 362 }
340 } 363 }
@@ -354,7 +377,7 @@ void br_fdb_flush(struct net_bridge *br)
354 spin_lock_bh(&br->hash_lock); 377 spin_lock_bh(&br->hash_lock);
355 hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) { 378 hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
356 if (!f->is_static) 379 if (!f->is_static)
357 fdb_delete(br, f); 380 fdb_delete(br, f, true);
358 } 381 }
359 spin_unlock_bh(&br->hash_lock); 382 spin_unlock_bh(&br->hash_lock);
360} 383}
@@ -383,7 +406,7 @@ void br_fdb_delete_by_port(struct net_bridge *br,
383 if (f->is_local) 406 if (f->is_local)
384 fdb_delete_local(br, p, f); 407 fdb_delete_local(br, p, f);
385 else 408 else
386 fdb_delete(br, f); 409 fdb_delete(br, f, true);
387 } 410 }
388 spin_unlock_bh(&br->hash_lock); 411 spin_unlock_bh(&br->hash_lock);
389} 412}
@@ -509,7 +532,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
509 return 0; 532 return 0;
510 br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n", 533 br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n",
511 source ? source->dev->name : br->dev->name, addr, vid); 534 source ? source->dev->name : br->dev->name, addr, vid);
512 fdb_delete(br, fdb); 535 fdb_delete(br, fdb, true);
513 } 536 }
514 537
515 fdb = fdb_create(br, source, addr, vid, 1, 1); 538 fdb = fdb_create(br, source, addr, vid, 1, 1);
@@ -517,7 +540,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
517 return -ENOMEM; 540 return -ENOMEM;
518 541
519 fdb_add_hw_addr(br, addr); 542 fdb_add_hw_addr(br, addr);
520 fdb_notify(br, fdb, RTM_NEWNEIGH); 543 fdb_notify(br, fdb, RTM_NEWNEIGH, true);
521 return 0; 544 return 0;
522} 545}
523 546
@@ -572,7 +595,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
572 fdb->added_by_user = 1; 595 fdb->added_by_user = 1;
573 if (unlikely(fdb_modified)) { 596 if (unlikely(fdb_modified)) {
574 trace_br_fdb_update(br, source, addr, vid, added_by_user); 597 trace_br_fdb_update(br, source, addr, vid, added_by_user);
575 fdb_notify(br, fdb, RTM_NEWNEIGH); 598 fdb_notify(br, fdb, RTM_NEWNEIGH, true);
576 } 599 }
577 } 600 }
578 } else { 601 } else {
@@ -583,7 +606,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
583 fdb->added_by_user = 1; 606 fdb->added_by_user = 1;
584 trace_br_fdb_update(br, source, addr, vid, 607 trace_br_fdb_update(br, source, addr, vid,
585 added_by_user); 608 added_by_user);
586 fdb_notify(br, fdb, RTM_NEWNEIGH); 609 fdb_notify(br, fdb, RTM_NEWNEIGH, true);
587 } 610 }
588 /* else we lose race and someone else inserts 611 /* else we lose race and someone else inserts
589 * it first, don't bother updating 612 * it first, don't bother updating
@@ -665,13 +688,15 @@ static inline size_t fdb_nlmsg_size(void)
665} 688}
666 689
667static void fdb_notify(struct net_bridge *br, 690static void fdb_notify(struct net_bridge *br,
668 const struct net_bridge_fdb_entry *fdb, int type) 691 const struct net_bridge_fdb_entry *fdb, int type,
692 bool swdev_notify)
669{ 693{
670 struct net *net = dev_net(br->dev); 694 struct net *net = dev_net(br->dev);
671 struct sk_buff *skb; 695 struct sk_buff *skb;
672 int err = -ENOBUFS; 696 int err = -ENOBUFS;
673 697
674 br_switchdev_fdb_notify(fdb, type); 698 if (swdev_notify)
699 br_switchdev_fdb_notify(fdb, type);
675 700
676 skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC); 701 skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
677 if (skb == NULL) 702 if (skb == NULL)
@@ -810,7 +835,7 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
810 fdb->used = jiffies; 835 fdb->used = jiffies;
811 if (modified) { 836 if (modified) {
812 fdb->updated = jiffies; 837 fdb->updated = jiffies;
813 fdb_notify(br, fdb, RTM_NEWNEIGH); 838 fdb_notify(br, fdb, RTM_NEWNEIGH, true);
814 } 839 }
815 840
816 return 0; 841 return 0;
@@ -834,7 +859,7 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
834 rcu_read_unlock(); 859 rcu_read_unlock();
835 local_bh_enable(); 860 local_bh_enable();
836 } else if (ndm->ndm_flags & NTF_EXT_LEARNED) { 861 } else if (ndm->ndm_flags & NTF_EXT_LEARNED) {
837 err = br_fdb_external_learn_add(br, p, addr, vid); 862 err = br_fdb_external_learn_add(br, p, addr, vid, true);
838 } else { 863 } else {
839 spin_lock_bh(&br->hash_lock); 864 spin_lock_bh(&br->hash_lock);
840 err = fdb_add_entry(br, p, addr, ndm->ndm_state, 865 err = fdb_add_entry(br, p, addr, ndm->ndm_state,
@@ -923,7 +948,7 @@ static int fdb_delete_by_addr_and_port(struct net_bridge *br,
923 if (!fdb || fdb->dst != p) 948 if (!fdb || fdb->dst != p)
924 return -ENOENT; 949 return -ENOENT;
925 950
926 fdb_delete(br, fdb); 951 fdb_delete(br, fdb, true);
927 952
928 return 0; 953 return 0;
929} 954}
@@ -1043,7 +1068,8 @@ void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
1043} 1068}
1044 1069
1045int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, 1070int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
1046 const unsigned char *addr, u16 vid) 1071 const unsigned char *addr, u16 vid,
1072 bool swdev_notify)
1047{ 1073{
1048 struct net_bridge_fdb_entry *fdb; 1074 struct net_bridge_fdb_entry *fdb;
1049 bool modified = false; 1075 bool modified = false;
@@ -1061,7 +1087,7 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
1061 goto err_unlock; 1087 goto err_unlock;
1062 } 1088 }
1063 fdb->added_by_external_learn = 1; 1089 fdb->added_by_external_learn = 1;
1064 fdb_notify(br, fdb, RTM_NEWNEIGH); 1090 fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
1065 } else { 1091 } else {
1066 fdb->updated = jiffies; 1092 fdb->updated = jiffies;
1067 1093
@@ -1080,7 +1106,7 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
1080 } 1106 }
1081 1107
1082 if (modified) 1108 if (modified)
1083 fdb_notify(br, fdb, RTM_NEWNEIGH); 1109 fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
1084 } 1110 }
1085 1111
1086err_unlock: 1112err_unlock:
@@ -1090,7 +1116,8 @@ err_unlock:
1090} 1116}
1091 1117
1092int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p, 1118int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
1093 const unsigned char *addr, u16 vid) 1119 const unsigned char *addr, u16 vid,
1120 bool swdev_notify)
1094{ 1121{
1095 struct net_bridge_fdb_entry *fdb; 1122 struct net_bridge_fdb_entry *fdb;
1096 int err = 0; 1123 int err = 0;
@@ -1099,7 +1126,7 @@ int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
1099 1126
1100 fdb = br_fdb_find(br, addr, vid); 1127 fdb = br_fdb_find(br, addr, vid);
1101 if (fdb && fdb->added_by_external_learn) 1128 if (fdb && fdb->added_by_external_learn)
1102 fdb_delete(br, fdb); 1129 fdb_delete(br, fdb, swdev_notify);
1103 else 1130 else
1104 err = -ENOENT; 1131 err = -ENOENT;
1105 1132
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index b4eed113d2ec..7a7fd672ccf2 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -274,8 +274,7 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
274 struct net_bridge_port *port, *lport, *rport; 274 struct net_bridge_port *port, *lport, *rport;
275 275
276 lport = p ? p->port : NULL; 276 lport = p ? p->port : NULL;
277 rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) : 277 rport = hlist_entry_safe(rp, struct net_bridge_port, rlist);
278 NULL;
279 278
280 if ((unsigned long)lport > (unsigned long)rport) { 279 if ((unsigned long)lport > (unsigned long)rport) {
281 port = lport; 280 port = lport;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 82c1a6f430b3..05e42d86882d 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -64,7 +64,7 @@ static int port_cost(struct net_device *dev)
64 64
65 65
66/* Check for port carrier transitions. */ 66/* Check for port carrier transitions. */
67void br_port_carrier_check(struct net_bridge_port *p) 67void br_port_carrier_check(struct net_bridge_port *p, bool *notified)
68{ 68{
69 struct net_device *dev = p->dev; 69 struct net_device *dev = p->dev;
70 struct net_bridge *br = p->br; 70 struct net_bridge *br = p->br;
@@ -73,16 +73,21 @@ void br_port_carrier_check(struct net_bridge_port *p)
73 netif_running(dev) && netif_oper_up(dev)) 73 netif_running(dev) && netif_oper_up(dev))
74 p->path_cost = port_cost(dev); 74 p->path_cost = port_cost(dev);
75 75
76 *notified = false;
76 if (!netif_running(br->dev)) 77 if (!netif_running(br->dev))
77 return; 78 return;
78 79
79 spin_lock_bh(&br->lock); 80 spin_lock_bh(&br->lock);
80 if (netif_running(dev) && netif_oper_up(dev)) { 81 if (netif_running(dev) && netif_oper_up(dev)) {
81 if (p->state == BR_STATE_DISABLED) 82 if (p->state == BR_STATE_DISABLED) {
82 br_stp_enable_port(p); 83 br_stp_enable_port(p);
84 *notified = true;
85 }
83 } else { 86 } else {
84 if (p->state != BR_STATE_DISABLED) 87 if (p->state != BR_STATE_DISABLED) {
85 br_stp_disable_port(p); 88 br_stp_disable_port(p);
89 *notified = true;
90 }
86 } 91 }
87 spin_unlock_bh(&br->lock); 92 spin_unlock_bh(&br->lock);
88} 93}
@@ -518,8 +523,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
518 return -ELOOP; 523 return -ELOOP;
519 } 524 }
520 525
521 /* Device is already being bridged */ 526 /* Device has master upper dev */
522 if (br_port_exists(dev)) 527 if (netdev_master_upper_dev_get(dev))
523 return -EBUSY; 528 return -EBUSY;
524 529
525 /* No bridging devices that dislike that (e.g. wireless) */ 530 /* No bridging devices that dislike that (e.g. wireless) */
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index a7cb3ece5031..742f40aefdaf 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -553,9 +553,11 @@ int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
553int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p); 553int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p);
554void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p); 554void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p);
555int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, 555int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
556 const unsigned char *addr, u16 vid); 556 const unsigned char *addr, u16 vid,
557 bool swdev_notify);
557int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p, 558int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
558 const unsigned char *addr, u16 vid); 559 const unsigned char *addr, u16 vid,
560 bool swdev_notify);
559void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p, 561void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
560 const unsigned char *addr, u16 vid); 562 const unsigned char *addr, u16 vid);
561 563
@@ -573,7 +575,7 @@ void br_flood(struct net_bridge *br, struct sk_buff *skb,
573 enum br_pkt_type pkt_type, bool local_rcv, bool local_orig); 575 enum br_pkt_type pkt_type, bool local_rcv, bool local_orig);
574 576
575/* br_if.c */ 577/* br_if.c */
576void br_port_carrier_check(struct net_bridge_port *p); 578void br_port_carrier_check(struct net_bridge_port *p, bool *notified);
577int br_add_bridge(struct net *net, const char *name); 579int br_add_bridge(struct net *net, const char *name);
578int br_del_bridge(struct net *net, const char *name); 580int br_del_bridge(struct net *net, const char *name);
579int br_add_if(struct net_bridge *br, struct net_device *dev, 581int br_add_if(struct net_bridge *br, struct net_device *dev,
@@ -594,11 +596,22 @@ static inline bool br_rx_handler_check_rcu(const struct net_device *dev)
594 return rcu_dereference(dev->rx_handler) == br_handle_frame; 596 return rcu_dereference(dev->rx_handler) == br_handle_frame;
595} 597}
596 598
599static inline bool br_rx_handler_check_rtnl(const struct net_device *dev)
600{
601 return rcu_dereference_rtnl(dev->rx_handler) == br_handle_frame;
602}
603
597static inline struct net_bridge_port *br_port_get_check_rcu(const struct net_device *dev) 604static inline struct net_bridge_port *br_port_get_check_rcu(const struct net_device *dev)
598{ 605{
599 return br_rx_handler_check_rcu(dev) ? br_port_get_rcu(dev) : NULL; 606 return br_rx_handler_check_rcu(dev) ? br_port_get_rcu(dev) : NULL;
600} 607}
601 608
609static inline struct net_bridge_port *
610br_port_get_check_rtnl(const struct net_device *dev)
611{
612 return br_rx_handler_check_rtnl(dev) ? br_port_get_rtnl_rcu(dev) : NULL;
613}
614
602/* br_ioctl.c */ 615/* br_ioctl.c */
603int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 616int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
604int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, 617int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd,
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
index ee775f4ff76c..35474d49555d 100644
--- a/net/bridge/br_switchdev.c
+++ b/net/bridge/br_switchdev.c
@@ -102,13 +102,15 @@ int br_switchdev_set_port_flag(struct net_bridge_port *p,
102 102
103static void 103static void
104br_switchdev_fdb_call_notifiers(bool adding, const unsigned char *mac, 104br_switchdev_fdb_call_notifiers(bool adding, const unsigned char *mac,
105 u16 vid, struct net_device *dev) 105 u16 vid, struct net_device *dev,
106 bool added_by_user)
106{ 107{
107 struct switchdev_notifier_fdb_info info; 108 struct switchdev_notifier_fdb_info info;
108 unsigned long notifier_type; 109 unsigned long notifier_type;
109 110
110 info.addr = mac; 111 info.addr = mac;
111 info.vid = vid; 112 info.vid = vid;
113 info.added_by_user = added_by_user;
112 notifier_type = adding ? SWITCHDEV_FDB_ADD_TO_DEVICE : SWITCHDEV_FDB_DEL_TO_DEVICE; 114 notifier_type = adding ? SWITCHDEV_FDB_ADD_TO_DEVICE : SWITCHDEV_FDB_DEL_TO_DEVICE;
113 call_switchdev_notifiers(notifier_type, dev, &info.info); 115 call_switchdev_notifiers(notifier_type, dev, &info.info);
114} 116}
@@ -116,19 +118,21 @@ br_switchdev_fdb_call_notifiers(bool adding, const unsigned char *mac,
116void 118void
117br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type) 119br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
118{ 120{
119 if (!fdb->added_by_user || !fdb->dst) 121 if (!fdb->dst)
120 return; 122 return;
121 123
122 switch (type) { 124 switch (type) {
123 case RTM_DELNEIGH: 125 case RTM_DELNEIGH:
124 br_switchdev_fdb_call_notifiers(false, fdb->key.addr.addr, 126 br_switchdev_fdb_call_notifiers(false, fdb->key.addr.addr,
125 fdb->key.vlan_id, 127 fdb->key.vlan_id,
126 fdb->dst->dev); 128 fdb->dst->dev,
129 fdb->added_by_user);
127 break; 130 break;
128 case RTM_NEWNEIGH: 131 case RTM_NEWNEIGH:
129 br_switchdev_fdb_call_notifiers(true, fdb->key.addr.addr, 132 br_switchdev_fdb_call_notifiers(true, fdb->key.addr.addr,
130 fdb->key.vlan_id, 133 fdb->key.vlan_id,
131 fdb->dst->dev); 134 fdb->dst->dev,
135 fdb->added_by_user);
132 break; 136 break;
133 } 137 }
134} 138}
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 9896f4975353..df37a5137c25 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -1149,3 +1149,42 @@ void br_vlan_get_stats(const struct net_bridge_vlan *v,
1149 stats->tx_packets += txpackets; 1149 stats->tx_packets += txpackets;
1150 } 1150 }
1151} 1151}
1152
1153int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
1154{
1155 struct net_bridge_vlan_group *vg;
1156
1157 ASSERT_RTNL();
1158 if (netif_is_bridge_master(dev))
1159 vg = br_vlan_group(netdev_priv(dev));
1160 else
1161 return -EINVAL;
1162
1163 *p_pvid = br_get_pvid(vg);
1164 return 0;
1165}
1166EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
1167
1168int br_vlan_get_info(const struct net_device *dev, u16 vid,
1169 struct bridge_vlan_info *p_vinfo)
1170{
1171 struct net_bridge_vlan_group *vg;
1172 struct net_bridge_vlan *v;
1173 struct net_bridge_port *p;
1174
1175 ASSERT_RTNL();
1176 p = br_port_get_check_rtnl(dev);
1177 if (p)
1178 vg = nbp_vlan_group(p);
1179 else
1180 return -EINVAL;
1181
1182 v = br_vlan_find(vg, vid);
1183 if (!v)
1184 return -ENOENT;
1185
1186 p_vinfo->vid = vid;
1187 p_vinfo->flags = v->flags;
1188 return 0;
1189}
1190EXPORT_SYMBOL_GPL(br_vlan_get_info);
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index f212447794bd..9a0159aebe1a 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -8,13 +8,6 @@ menuconfig NF_TABLES_BRIDGE
8 bool "Ethernet Bridge nf_tables support" 8 bool "Ethernet Bridge nf_tables support"
9 9
10if NF_TABLES_BRIDGE 10if NF_TABLES_BRIDGE
11
12config NFT_BRIDGE_META
13 tristate "Netfilter nf_table bridge meta support"
14 depends on NFT_META
15 help
16 Add support for bridge dedicated meta key.
17
18config NFT_BRIDGE_REJECT 11config NFT_BRIDGE_REJECT
19 tristate "Netfilter nf_tables bridge reject support" 12 tristate "Netfilter nf_tables bridge reject support"
20 depends on NFT_REJECT && NFT_REJECT_IPV4 && NFT_REJECT_IPV6 13 depends on NFT_REJECT && NFT_REJECT_IPV4 && NFT_REJECT_IPV6
diff --git a/net/bridge/netfilter/Makefile b/net/bridge/netfilter/Makefile
index 4bc758dd4a8c..9b868861f21a 100644
--- a/net/bridge/netfilter/Makefile
+++ b/net/bridge/netfilter/Makefile
@@ -3,7 +3,6 @@
3# Makefile for the netfilter modules for Link Layer filtering on a bridge. 3# Makefile for the netfilter modules for Link Layer filtering on a bridge.
4# 4#
5 5
6obj-$(CONFIG_NFT_BRIDGE_META) += nft_meta_bridge.o
7obj-$(CONFIG_NFT_BRIDGE_REJECT) += nft_reject_bridge.o 6obj-$(CONFIG_NFT_BRIDGE_REJECT) += nft_reject_bridge.o
8 7
9# packet logging 8# packet logging
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 28a4c3490359..b286ed5596c3 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -101,7 +101,7 @@ ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
101{ 101{
102 par->match = m->u.match; 102 par->match = m->u.match;
103 par->matchinfo = m->data; 103 par->matchinfo = m->data;
104 return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH; 104 return !m->u.match->match(skb, par);
105} 105}
106 106
107static inline int 107static inline int
@@ -177,6 +177,12 @@ struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
177 return (void *)entry + entry->next_offset; 177 return (void *)entry + entry->next_offset;
178} 178}
179 179
180static inline const struct ebt_entry_target *
181ebt_get_target_c(const struct ebt_entry *e)
182{
183 return ebt_get_target((struct ebt_entry *)e);
184}
185
180/* Do some firewalling */ 186/* Do some firewalling */
181unsigned int ebt_do_table(struct sk_buff *skb, 187unsigned int ebt_do_table(struct sk_buff *skb,
182 const struct nf_hook_state *state, 188 const struct nf_hook_state *state,
@@ -230,8 +236,7 @@ unsigned int ebt_do_table(struct sk_buff *skb,
230 */ 236 */
231 EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar); 237 EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
232 238
233 t = (struct ebt_entry_target *) 239 t = ebt_get_target_c(point);
234 (((char *)point) + point->target_offset);
235 /* standard target */ 240 /* standard target */
236 if (!t->u.target->target) 241 if (!t->u.target->target)
237 verdict = ((struct ebt_standard_target *)t)->verdict; 242 verdict = ((struct ebt_standard_target *)t)->verdict;
@@ -343,6 +348,16 @@ find_table_lock(struct net *net, const char *name, int *error,
343 "ebtable_", error, mutex); 348 "ebtable_", error, mutex);
344} 349}
345 350
351static inline void ebt_free_table_info(struct ebt_table_info *info)
352{
353 int i;
354
355 if (info->chainstack) {
356 for_each_possible_cpu(i)
357 vfree(info->chainstack[i]);
358 vfree(info->chainstack);
359 }
360}
346static inline int 361static inline int
347ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par, 362ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
348 unsigned int *cnt) 363 unsigned int *cnt)
@@ -627,7 +642,7 @@ ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
627 return 1; 642 return 1;
628 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL); 643 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
629 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL); 644 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
630 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); 645 t = ebt_get_target(e);
631 646
632 par.net = net; 647 par.net = net;
633 par.target = t->u.target; 648 par.target = t->u.target;
@@ -706,7 +721,7 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
706 ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j); 721 ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
707 if (ret != 0) 722 if (ret != 0)
708 goto cleanup_watchers; 723 goto cleanup_watchers;
709 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); 724 t = ebt_get_target(e);
710 gap = e->next_offset - e->target_offset; 725 gap = e->next_offset - e->target_offset;
711 726
712 target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0); 727 target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
@@ -779,8 +794,7 @@ static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack
779 if (pos == nentries) 794 if (pos == nentries)
780 continue; 795 continue;
781 } 796 }
782 t = (struct ebt_entry_target *) 797 t = ebt_get_target_c(e);
783 (((char *)e) + e->target_offset);
784 if (strcmp(t->u.name, EBT_STANDARD_TARGET)) 798 if (strcmp(t->u.name, EBT_STANDARD_TARGET))
785 goto letscontinue; 799 goto letscontinue;
786 if (e->target_offset + sizeof(struct ebt_standard_target) > 800 if (e->target_offset + sizeof(struct ebt_standard_target) >
@@ -975,7 +989,7 @@ static void get_counters(const struct ebt_counter *oldcounters,
975static int do_replace_finish(struct net *net, struct ebt_replace *repl, 989static int do_replace_finish(struct net *net, struct ebt_replace *repl,
976 struct ebt_table_info *newinfo) 990 struct ebt_table_info *newinfo)
977{ 991{
978 int ret, i; 992 int ret;
979 struct ebt_counter *counterstmp = NULL; 993 struct ebt_counter *counterstmp = NULL;
980 /* used to be able to unlock earlier */ 994 /* used to be able to unlock earlier */
981 struct ebt_table_info *table; 995 struct ebt_table_info *table;
@@ -1051,13 +1065,8 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
1051 ebt_cleanup_entry, net, NULL); 1065 ebt_cleanup_entry, net, NULL);
1052 1066
1053 vfree(table->entries); 1067 vfree(table->entries);
1054 if (table->chainstack) { 1068 ebt_free_table_info(table);
1055 for_each_possible_cpu(i)
1056 vfree(table->chainstack[i]);
1057 vfree(table->chainstack);
1058 }
1059 vfree(table); 1069 vfree(table);
1060
1061 vfree(counterstmp); 1070 vfree(counterstmp);
1062 1071
1063#ifdef CONFIG_AUDIT 1072#ifdef CONFIG_AUDIT
@@ -1078,11 +1087,7 @@ free_iterate:
1078free_counterstmp: 1087free_counterstmp:
1079 vfree(counterstmp); 1088 vfree(counterstmp);
1080 /* can be initialized in translate_table() */ 1089 /* can be initialized in translate_table() */
1081 if (newinfo->chainstack) { 1090 ebt_free_table_info(newinfo);
1082 for_each_possible_cpu(i)
1083 vfree(newinfo->chainstack[i]);
1084 vfree(newinfo->chainstack);
1085 }
1086 return ret; 1091 return ret;
1087} 1092}
1088 1093
@@ -1147,8 +1152,6 @@ free_newinfo:
1147 1152
1148static void __ebt_unregister_table(struct net *net, struct ebt_table *table) 1153static void __ebt_unregister_table(struct net *net, struct ebt_table *table)
1149{ 1154{
1150 int i;
1151
1152 mutex_lock(&ebt_mutex); 1155 mutex_lock(&ebt_mutex);
1153 list_del(&table->list); 1156 list_del(&table->list);
1154 mutex_unlock(&ebt_mutex); 1157 mutex_unlock(&ebt_mutex);
@@ -1157,11 +1160,7 @@ static void __ebt_unregister_table(struct net *net, struct ebt_table *table)
1157 if (table->private->nentries) 1160 if (table->private->nentries)
1158 module_put(table->me); 1161 module_put(table->me);
1159 vfree(table->private->entries); 1162 vfree(table->private->entries);
1160 if (table->private->chainstack) { 1163 ebt_free_table_info(table->private);
1161 for_each_possible_cpu(i)
1162 vfree(table->private->chainstack[i]);
1163 vfree(table->private->chainstack);
1164 }
1165 vfree(table->private); 1164 vfree(table->private);
1166 kfree(table); 1165 kfree(table);
1167} 1166}
@@ -1263,11 +1262,7 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
1263free_unlock: 1262free_unlock:
1264 mutex_unlock(&ebt_mutex); 1263 mutex_unlock(&ebt_mutex);
1265free_chainstack: 1264free_chainstack:
1266 if (newinfo->chainstack) { 1265 ebt_free_table_info(newinfo);
1267 for_each_possible_cpu(i)
1268 vfree(newinfo->chainstack[i]);
1269 vfree(newinfo->chainstack);
1270 }
1271 vfree(newinfo->entries); 1266 vfree(newinfo->entries);
1272free_newinfo: 1267free_newinfo:
1273 vfree(newinfo); 1268 vfree(newinfo);
@@ -1405,7 +1400,7 @@ static inline int ebt_entry_to_user(struct ebt_entry *e, const char *base,
1405 return -EFAULT; 1400 return -EFAULT;
1406 1401
1407 hlp = ubase + (((char *)e + e->target_offset) - base); 1402 hlp = ubase + (((char *)e + e->target_offset) - base);
1408 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); 1403 t = ebt_get_target_c(e);
1409 1404
1410 ret = EBT_MATCH_ITERATE(e, ebt_match_to_user, base, ubase); 1405 ret = EBT_MATCH_ITERATE(e, ebt_match_to_user, base, ubase);
1411 if (ret != 0) 1406 if (ret != 0)
@@ -1746,7 +1741,7 @@ static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1746 return ret; 1741 return ret;
1747 target_offset = e->target_offset - (origsize - *size); 1742 target_offset = e->target_offset - (origsize - *size);
1748 1743
1749 t = (struct ebt_entry_target *) ((char *) e + e->target_offset); 1744 t = ebt_get_target(e);
1750 1745
1751 ret = compat_target_to_user(t, dstptr, size); 1746 ret = compat_target_to_user(t, dstptr, size);
1752 if (ret) 1747 if (ret)
@@ -1794,7 +1789,7 @@ static int compat_calc_entry(const struct ebt_entry *e,
1794 EBT_MATCH_ITERATE(e, compat_calc_match, &off); 1789 EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1795 EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off); 1790 EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1796 1791
1797 t = (const struct ebt_entry_target *) ((char *) e + e->target_offset); 1792 t = ebt_get_target_c(e);
1798 1793
1799 off += xt_compat_target_offset(t->u.target); 1794 off += xt_compat_target_offset(t->u.target);
1800 off += ebt_compat_entry_padsize(); 1795 off += ebt_compat_entry_padsize();
diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c
deleted file mode 100644
index bb63c9aed55d..000000000000
--- a/net/bridge/netfilter/nft_meta_bridge.c
+++ /dev/null
@@ -1,135 +0,0 @@
1/*
2 * Copyright (c) 2014 Intel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 */
9
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/netlink.h>
14#include <linux/netfilter.h>
15#include <linux/netfilter/nf_tables.h>
16#include <net/netfilter/nf_tables.h>
17#include <net/netfilter/nft_meta.h>
18
19#include "../br_private.h"
20
21static void nft_meta_bridge_get_eval(const struct nft_expr *expr,
22 struct nft_regs *regs,
23 const struct nft_pktinfo *pkt)
24{
25 const struct nft_meta *priv = nft_expr_priv(expr);
26 const struct net_device *in = nft_in(pkt), *out = nft_out(pkt);
27 u32 *dest = &regs->data[priv->dreg];
28 const struct net_bridge_port *p;
29
30 switch (priv->key) {
31 case NFT_META_BRI_IIFNAME:
32 if (in == NULL || (p = br_port_get_rcu(in)) == NULL)
33 goto err;
34 break;
35 case NFT_META_BRI_OIFNAME:
36 if (out == NULL || (p = br_port_get_rcu(out)) == NULL)
37 goto err;
38 break;
39 default:
40 goto out;
41 }
42
43 strncpy((char *)dest, p->br->dev->name, IFNAMSIZ);
44 return;
45out:
46 return nft_meta_get_eval(expr, regs, pkt);
47err:
48 regs->verdict.code = NFT_BREAK;
49}
50
51static int nft_meta_bridge_get_init(const struct nft_ctx *ctx,
52 const struct nft_expr *expr,
53 const struct nlattr * const tb[])
54{
55 struct nft_meta *priv = nft_expr_priv(expr);
56 unsigned int len;
57
58 priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
59 switch (priv->key) {
60 case NFT_META_BRI_IIFNAME:
61 case NFT_META_BRI_OIFNAME:
62 len = IFNAMSIZ;
63 break;
64 default:
65 return nft_meta_get_init(ctx, expr, tb);
66 }
67
68 priv->dreg = nft_parse_register(tb[NFTA_META_DREG]);
69 return nft_validate_register_store(ctx, priv->dreg, NULL,
70 NFT_DATA_VALUE, len);
71}
72
73static struct nft_expr_type nft_meta_bridge_type;
74static const struct nft_expr_ops nft_meta_bridge_get_ops = {
75 .type = &nft_meta_bridge_type,
76 .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
77 .eval = nft_meta_bridge_get_eval,
78 .init = nft_meta_bridge_get_init,
79 .dump = nft_meta_get_dump,
80};
81
82static const struct nft_expr_ops nft_meta_bridge_set_ops = {
83 .type = &nft_meta_bridge_type,
84 .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
85 .eval = nft_meta_set_eval,
86 .init = nft_meta_set_init,
87 .destroy = nft_meta_set_destroy,
88 .dump = nft_meta_set_dump,
89 .validate = nft_meta_set_validate,
90};
91
92static const struct nft_expr_ops *
93nft_meta_bridge_select_ops(const struct nft_ctx *ctx,
94 const struct nlattr * const tb[])
95{
96 if (tb[NFTA_META_KEY] == NULL)
97 return ERR_PTR(-EINVAL);
98
99 if (tb[NFTA_META_DREG] && tb[NFTA_META_SREG])
100 return ERR_PTR(-EINVAL);
101
102 if (tb[NFTA_META_DREG])
103 return &nft_meta_bridge_get_ops;
104
105 if (tb[NFTA_META_SREG])
106 return &nft_meta_bridge_set_ops;
107
108 return ERR_PTR(-EINVAL);
109}
110
111static struct nft_expr_type nft_meta_bridge_type __read_mostly = {
112 .family = NFPROTO_BRIDGE,
113 .name = "meta",
114 .select_ops = nft_meta_bridge_select_ops,
115 .policy = nft_meta_policy,
116 .maxattr = NFTA_META_MAX,
117 .owner = THIS_MODULE,
118};
119
120static int __init nft_meta_bridge_module_init(void)
121{
122 return nft_register_expr(&nft_meta_bridge_type);
123}
124
125static void __exit nft_meta_bridge_module_exit(void)
126{
127 nft_unregister_expr(&nft_meta_bridge_type);
128}
129
130module_init(nft_meta_bridge_module_init);
131module_exit(nft_meta_bridge_module_exit);
132
133MODULE_LICENSE("GPL");
134MODULE_AUTHOR("Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>");
135MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "meta");
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index fcb40c12b1f8..3b3d33ea9ed8 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -2569,6 +2569,11 @@ static int try_write(struct ceph_connection *con)
2569 int ret = 1; 2569 int ret = 1;
2570 2570
2571 dout("try_write start %p state %lu\n", con, con->state); 2571 dout("try_write start %p state %lu\n", con, con->state);
2572 if (con->state != CON_STATE_PREOPEN &&
2573 con->state != CON_STATE_CONNECTING &&
2574 con->state != CON_STATE_NEGOTIATING &&
2575 con->state != CON_STATE_OPEN)
2576 return 0;
2572 2577
2573more: 2578more:
2574 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes); 2579 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
@@ -2594,6 +2599,8 @@ more:
2594 } 2599 }
2595 2600
2596more_kvec: 2601more_kvec:
2602 BUG_ON(!con->sock);
2603
2597 /* kvec data queued? */ 2604 /* kvec data queued? */
2598 if (con->out_kvec_left) { 2605 if (con->out_kvec_left) {
2599 ret = write_partial_kvec(con); 2606 ret = write_partial_kvec(con);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index b3dac24412d3..21ac6e3b96bb 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -209,6 +209,14 @@ static void reopen_session(struct ceph_mon_client *monc)
209 __open_session(monc); 209 __open_session(monc);
210} 210}
211 211
212static void un_backoff(struct ceph_mon_client *monc)
213{
214 monc->hunt_mult /= 2; /* reduce by 50% */
215 if (monc->hunt_mult < 1)
216 monc->hunt_mult = 1;
217 dout("%s hunt_mult now %d\n", __func__, monc->hunt_mult);
218}
219
212/* 220/*
213 * Reschedule delayed work timer. 221 * Reschedule delayed work timer.
214 */ 222 */
@@ -963,6 +971,7 @@ static void delayed_work(struct work_struct *work)
963 if (!monc->hunting) { 971 if (!monc->hunting) {
964 ceph_con_keepalive(&monc->con); 972 ceph_con_keepalive(&monc->con);
965 __validate_auth(monc); 973 __validate_auth(monc);
974 un_backoff(monc);
966 } 975 }
967 976
968 if (is_auth && 977 if (is_auth &&
@@ -1123,9 +1132,8 @@ static void finish_hunting(struct ceph_mon_client *monc)
1123 dout("%s found mon%d\n", __func__, monc->cur_mon); 1132 dout("%s found mon%d\n", __func__, monc->cur_mon);
1124 monc->hunting = false; 1133 monc->hunting = false;
1125 monc->had_a_connection = true; 1134 monc->had_a_connection = true;
1126 monc->hunt_mult /= 2; /* reduce by 50% */ 1135 un_backoff(monc);
1127 if (monc->hunt_mult < 1) 1136 __schedule_delayed(monc);
1128 monc->hunt_mult = 1;
1129 } 1137 }
1130} 1138}
1131 1139
diff --git a/net/compat.c b/net/compat.c
index 5ae7437d3853..7242cce5631b 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -377,7 +377,8 @@ static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
377 optname == SO_ATTACH_REUSEPORT_CBPF) 377 optname == SO_ATTACH_REUSEPORT_CBPF)
378 return do_set_attach_filter(sock, level, optname, 378 return do_set_attach_filter(sock, level, optname,
379 optval, optlen); 379 optval, optlen);
380 if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO) 380 if (!COMPAT_USE_64BIT_TIME &&
381 (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
381 return do_set_sock_timeout(sock, level, optname, optval, optlen); 382 return do_set_sock_timeout(sock, level, optname, optval, optlen);
382 383
383 return sock_setsockopt(sock, level, optname, optval, optlen); 384 return sock_setsockopt(sock, level, optname, optval, optlen);
@@ -448,7 +449,8 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
448static int compat_sock_getsockopt(struct socket *sock, int level, int optname, 449static int compat_sock_getsockopt(struct socket *sock, int level, int optname,
449 char __user *optval, int __user *optlen) 450 char __user *optval, int __user *optlen)
450{ 451{
451 if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO) 452 if (!COMPAT_USE_64BIT_TIME &&
453 (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
452 return do_get_sock_timeout(sock, level, optname, optval, optlen); 454 return do_get_sock_timeout(sock, level, optname, optval, optlen);
453 return sock_getsockopt(sock, level, optname, optval, optlen); 455 return sock_getsockopt(sock, level, optname, optval, optlen);
454} 456}
diff --git a/net/core/dev.c b/net/core/dev.c
index d3fdc86516e8..29bf39174900 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1587,7 +1587,7 @@ const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1587 N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN) 1587 N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
1588 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO) 1588 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1589 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO) 1589 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
1590 }; 1590 }
1591#undef N 1591#undef N
1592 return "UNKNOWN_NETDEV_EVENT"; 1592 return "UNKNOWN_NETDEV_EVENT";
1593} 1593}
@@ -2615,17 +2615,16 @@ EXPORT_SYMBOL(netif_device_attach);
2615 * Returns a Tx hash based on the given packet descriptor a Tx queues' number 2615 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2616 * to be used as a distribution range. 2616 * to be used as a distribution range.
2617 */ 2617 */
2618u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb, 2618static u16 skb_tx_hash(const struct net_device *dev, struct sk_buff *skb)
2619 unsigned int num_tx_queues)
2620{ 2619{
2621 u32 hash; 2620 u32 hash;
2622 u16 qoffset = 0; 2621 u16 qoffset = 0;
2623 u16 qcount = num_tx_queues; 2622 u16 qcount = dev->real_num_tx_queues;
2624 2623
2625 if (skb_rx_queue_recorded(skb)) { 2624 if (skb_rx_queue_recorded(skb)) {
2626 hash = skb_get_rx_queue(skb); 2625 hash = skb_get_rx_queue(skb);
2627 while (unlikely(hash >= num_tx_queues)) 2626 while (unlikely(hash >= qcount))
2628 hash -= num_tx_queues; 2627 hash -= qcount;
2629 return hash; 2628 return hash;
2630 } 2629 }
2631 2630
@@ -2638,7 +2637,6 @@ u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
2638 2637
2639 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; 2638 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2640} 2639}
2641EXPORT_SYMBOL(__skb_tx_hash);
2642 2640
2643static void skb_warn_bad_offload(const struct sk_buff *skb) 2641static void skb_warn_bad_offload(const struct sk_buff *skb)
2644{ 2642{
@@ -3114,6 +3112,10 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
3114 if (unlikely(!skb)) 3112 if (unlikely(!skb))
3115 goto out_null; 3113 goto out_null;
3116 3114
3115 skb = sk_validate_xmit_skb(skb, dev);
3116 if (unlikely(!skb))
3117 goto out_null;
3118
3117 if (netif_needs_gso(skb, features)) { 3119 if (netif_needs_gso(skb, features)) {
3118 struct sk_buff *segs; 3120 struct sk_buff *segs;
3119 3121
@@ -7922,6 +7924,8 @@ int register_netdevice(struct net_device *dev)
7922 int ret; 7924 int ret;
7923 struct net *net = dev_net(dev); 7925 struct net *net = dev_net(dev);
7924 7926
7927 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
7928 NETDEV_FEATURE_COUNT);
7925 BUG_ON(dev_boot_phase); 7929 BUG_ON(dev_boot_phase);
7926 ASSERT_RTNL(); 7930 ASSERT_RTNL();
7927 7931
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 4650fd6d678c..c15075dc7572 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -110,6 +110,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
110 [NETIF_F_HW_ESP_TX_CSUM_BIT] = "esp-tx-csum-hw-offload", 110 [NETIF_F_HW_ESP_TX_CSUM_BIT] = "esp-tx-csum-hw-offload",
111 [NETIF_F_RX_UDP_TUNNEL_PORT_BIT] = "rx-udp_tunnel-port-offload", 111 [NETIF_F_RX_UDP_TUNNEL_PORT_BIT] = "rx-udp_tunnel-port-offload",
112 [NETIF_F_HW_TLS_RECORD_BIT] = "tls-hw-record", 112 [NETIF_F_HW_TLS_RECORD_BIT] = "tls-hw-record",
113 [NETIF_F_HW_TLS_TX_BIT] = "tls-hw-tx-offload",
113}; 114};
114 115
115static const char 116static const char
@@ -211,23 +212,6 @@ static int ethtool_set_features(struct net_device *dev, void __user *useraddr)
211 return ret; 212 return ret;
212} 213}
213 214
214static int phy_get_sset_count(struct phy_device *phydev)
215{
216 int ret;
217
218 if (phydev->drv->get_sset_count &&
219 phydev->drv->get_strings &&
220 phydev->drv->get_stats) {
221 mutex_lock(&phydev->lock);
222 ret = phydev->drv->get_sset_count(phydev);
223 mutex_unlock(&phydev->lock);
224
225 return ret;
226 }
227
228 return -EOPNOTSUPP;
229}
230
231static int __ethtool_get_sset_count(struct net_device *dev, int sset) 215static int __ethtool_get_sset_count(struct net_device *dev, int sset)
232{ 216{
233 const struct ethtool_ops *ops = dev->ethtool_ops; 217 const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -244,12 +228,9 @@ static int __ethtool_get_sset_count(struct net_device *dev, int sset)
244 if (sset == ETH_SS_PHY_TUNABLES) 228 if (sset == ETH_SS_PHY_TUNABLES)
245 return ARRAY_SIZE(phy_tunable_strings); 229 return ARRAY_SIZE(phy_tunable_strings);
246 230
247 if (sset == ETH_SS_PHY_STATS) { 231 if (sset == ETH_SS_PHY_STATS && dev->phydev &&
248 if (dev->phydev) 232 !ops->get_ethtool_phy_stats)
249 return phy_get_sset_count(dev->phydev); 233 return phy_ethtool_get_sset_count(dev->phydev);
250 else
251 return -EOPNOTSUPP;
252 }
253 234
254 if (ops->get_sset_count && ops->get_strings) 235 if (ops->get_sset_count && ops->get_strings)
255 return ops->get_sset_count(dev, sset); 236 return ops->get_sset_count(dev, sset);
@@ -272,17 +253,10 @@ static void __ethtool_get_strings(struct net_device *dev,
272 memcpy(data, tunable_strings, sizeof(tunable_strings)); 253 memcpy(data, tunable_strings, sizeof(tunable_strings));
273 else if (stringset == ETH_SS_PHY_TUNABLES) 254 else if (stringset == ETH_SS_PHY_TUNABLES)
274 memcpy(data, phy_tunable_strings, sizeof(phy_tunable_strings)); 255 memcpy(data, phy_tunable_strings, sizeof(phy_tunable_strings));
275 else if (stringset == ETH_SS_PHY_STATS) { 256 else if (stringset == ETH_SS_PHY_STATS && dev->phydev &&
276 struct phy_device *phydev = dev->phydev; 257 !ops->get_ethtool_phy_stats)
277 258 phy_ethtool_get_strings(dev->phydev, data);
278 if (phydev) { 259 else
279 mutex_lock(&phydev->lock);
280 phydev->drv->get_strings(phydev, data);
281 mutex_unlock(&phydev->lock);
282 } else {
283 return;
284 }
285 } else
286 /* ops->get_strings is valid because checked earlier */ 260 /* ops->get_strings is valid because checked earlier */
287 ops->get_strings(dev, stringset, data); 261 ops->get_strings(dev, stringset, data);
288} 262}
@@ -1033,6 +1007,11 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
1033 info_size = sizeof(info); 1007 info_size = sizeof(info);
1034 if (copy_from_user(&info, useraddr, info_size)) 1008 if (copy_from_user(&info, useraddr, info_size))
1035 return -EFAULT; 1009 return -EFAULT;
1010 /* Since malicious users may modify the original data,
1011 * we need to check whether FLOW_RSS is still requested.
1012 */
1013 if (!(info.flow_type & FLOW_RSS))
1014 return -EINVAL;
1036 } 1015 }
1037 1016
1038 if (info.cmd == ETHTOOL_GRXCLSRLALL) { 1017 if (info.cmd == ETHTOOL_GRXCLSRLALL) {
@@ -1994,15 +1973,19 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
1994 1973
1995static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr) 1974static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
1996{ 1975{
1997 struct ethtool_stats stats; 1976 const struct ethtool_ops *ops = dev->ethtool_ops;
1998 struct phy_device *phydev = dev->phydev; 1977 struct phy_device *phydev = dev->phydev;
1978 struct ethtool_stats stats;
1999 u64 *data; 1979 u64 *data;
2000 int ret, n_stats; 1980 int ret, n_stats;
2001 1981
2002 if (!phydev) 1982 if (!phydev && (!ops->get_ethtool_phy_stats || !ops->get_sset_count))
2003 return -EOPNOTSUPP; 1983 return -EOPNOTSUPP;
2004 1984
2005 n_stats = phy_get_sset_count(phydev); 1985 if (dev->phydev && !ops->get_ethtool_phy_stats)
1986 n_stats = phy_ethtool_get_sset_count(dev->phydev);
1987 else
1988 n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
2006 if (n_stats < 0) 1989 if (n_stats < 0)
2007 return n_stats; 1990 return n_stats;
2008 if (n_stats > S32_MAX / sizeof(u64)) 1991 if (n_stats > S32_MAX / sizeof(u64))
@@ -2017,9 +2000,13 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
2017 if (n_stats && !data) 2000 if (n_stats && !data)
2018 return -ENOMEM; 2001 return -ENOMEM;
2019 2002
2020 mutex_lock(&phydev->lock); 2003 if (dev->phydev && !ops->get_ethtool_phy_stats) {
2021 phydev->drv->get_stats(phydev, &stats, data); 2004 ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
2022 mutex_unlock(&phydev->lock); 2005 if (ret < 0)
2006 return ret;
2007 } else {
2008 ops->get_ethtool_phy_stats(dev, &stats, data);
2009 }
2023 2010
2024 ret = -EFAULT; 2011 ret = -EFAULT;
2025 if (copy_to_user(useraddr, &stats, sizeof(stats))) 2012 if (copy_to_user(useraddr, &stats, sizeof(stats)))
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index c647cfe114e0..c642304f178c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1305,7 +1305,7 @@ static void skb_headers_offset_update(struct sk_buff *skb, int off)
1305 skb->inner_mac_header += off; 1305 skb->inner_mac_header += off;
1306} 1306}
1307 1307
1308static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 1308void skb_copy_header(struct sk_buff *new, const struct sk_buff *old)
1309{ 1309{
1310 __copy_skb_header(new, old); 1310 __copy_skb_header(new, old);
1311 1311
@@ -1313,6 +1313,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
1313 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 1313 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
1314 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 1314 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
1315} 1315}
1316EXPORT_SYMBOL(skb_copy_header);
1316 1317
1317static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 1318static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
1318{ 1319{
@@ -1355,7 +1356,7 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
1355 1356
1356 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); 1357 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
1357 1358
1358 copy_skb_header(n, skb); 1359 skb_copy_header(n, skb);
1359 return n; 1360 return n;
1360} 1361}
1361EXPORT_SYMBOL(skb_copy); 1362EXPORT_SYMBOL(skb_copy);
@@ -1419,7 +1420,7 @@ struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1419 skb_clone_fraglist(n); 1420 skb_clone_fraglist(n);
1420 } 1421 }
1421 1422
1422 copy_skb_header(n, skb); 1423 skb_copy_header(n, skb);
1423out: 1424out:
1424 return n; 1425 return n;
1425} 1426}
@@ -1599,7 +1600,7 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1599 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 1600 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
1600 skb->len + head_copy_len)); 1601 skb->len + head_copy_len));
1601 1602
1602 copy_skb_header(n, skb); 1603 skb_copy_header(n, skb);
1603 1604
1604 skb_headers_offset_update(n, newheadroom - oldheadroom); 1605 skb_headers_offset_update(n, newheadroom - oldheadroom);
1605 1606
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 92d016e87816..385f153fe031 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -126,6 +126,16 @@ static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
126 DCCPF_SEQ_WMAX)); 126 DCCPF_SEQ_WMAX));
127} 127}
128 128
129static void dccp_tasklet_schedule(struct sock *sk)
130{
131 struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet;
132
133 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
134 sock_hold(sk);
135 __tasklet_schedule(t);
136 }
137}
138
129static void ccid2_hc_tx_rto_expire(struct timer_list *t) 139static void ccid2_hc_tx_rto_expire(struct timer_list *t)
130{ 140{
131 struct ccid2_hc_tx_sock *hc = from_timer(hc, t, tx_rtotimer); 141 struct ccid2_hc_tx_sock *hc = from_timer(hc, t, tx_rtotimer);
@@ -166,7 +176,7 @@ static void ccid2_hc_tx_rto_expire(struct timer_list *t)
166 176
167 /* if we were blocked before, we may now send cwnd=1 packet */ 177 /* if we were blocked before, we may now send cwnd=1 packet */
168 if (sender_was_blocked) 178 if (sender_was_blocked)
169 tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); 179 dccp_tasklet_schedule(sk);
170 /* restart backed-off timer */ 180 /* restart backed-off timer */
171 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); 181 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
172out: 182out:
@@ -706,7 +716,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
706done: 716done:
707 /* check if incoming Acks allow pending packets to be sent */ 717 /* check if incoming Acks allow pending packets to be sent */
708 if (sender_was_blocked && !ccid2_cwnd_network_limited(hc)) 718 if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
709 tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); 719 dccp_tasklet_schedule(sk);
710 dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks); 720 dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
711} 721}
712 722
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index b50a8732ff43..1501a20a94ca 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -232,6 +232,7 @@ static void dccp_write_xmitlet(unsigned long data)
232 else 232 else
233 dccp_write_xmit(sk); 233 dccp_write_xmit(sk);
234 bh_unlock_sock(sk); 234 bh_unlock_sock(sk);
235 sock_put(sk);
235} 236}
236 237
237static void dccp_write_xmit_timer(struct timer_list *t) 238static void dccp_write_xmit_timer(struct timer_list *t)
@@ -240,7 +241,6 @@ static void dccp_write_xmit_timer(struct timer_list *t)
240 struct sock *sk = &dp->dccps_inet_connection.icsk_inet.sk; 241 struct sock *sk = &dp->dccps_inet_connection.icsk_inet.sk;
241 242
242 dccp_write_xmitlet((unsigned long)sk); 243 dccp_write_xmitlet((unsigned long)sk);
243 sock_put(sk);
244} 244}
245 245
246void dccp_init_xmit_timers(struct sock *sk) 246void dccp_init_xmit_timers(struct sock *sk)
diff --git a/net/dsa/master.c b/net/dsa/master.c
index 90e6df0351eb..c90ee3227dea 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -22,7 +22,7 @@ static void dsa_master_get_ethtool_stats(struct net_device *dev,
22 int port = cpu_dp->index; 22 int port = cpu_dp->index;
23 int count = 0; 23 int count = 0;
24 24
25 if (ops && ops->get_sset_count && ops->get_ethtool_stats) { 25 if (ops->get_sset_count && ops->get_ethtool_stats) {
26 count = ops->get_sset_count(dev, ETH_SS_STATS); 26 count = ops->get_sset_count(dev, ETH_SS_STATS);
27 ops->get_ethtool_stats(dev, stats, data); 27 ops->get_ethtool_stats(dev, stats, data);
28 } 28 }
@@ -31,6 +31,32 @@ static void dsa_master_get_ethtool_stats(struct net_device *dev,
31 ds->ops->get_ethtool_stats(ds, port, data + count); 31 ds->ops->get_ethtool_stats(ds, port, data + count);
32} 32}
33 33
34static void dsa_master_get_ethtool_phy_stats(struct net_device *dev,
35 struct ethtool_stats *stats,
36 uint64_t *data)
37{
38 struct dsa_port *cpu_dp = dev->dsa_ptr;
39 const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
40 struct dsa_switch *ds = cpu_dp->ds;
41 int port = cpu_dp->index;
42 int count = 0;
43
44 if (dev->phydev && !ops->get_ethtool_phy_stats) {
45 count = phy_ethtool_get_sset_count(dev->phydev);
46 if (count >= 0)
47 phy_ethtool_get_stats(dev->phydev, stats, data);
48 } else if (ops->get_sset_count && ops->get_ethtool_phy_stats) {
49 count = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
50 ops->get_ethtool_phy_stats(dev, stats, data);
51 }
52
53 if (count < 0)
54 count = 0;
55
56 if (ds->ops->get_ethtool_phy_stats)
57 ds->ops->get_ethtool_phy_stats(ds, port, data + count);
58}
59
34static int dsa_master_get_sset_count(struct net_device *dev, int sset) 60static int dsa_master_get_sset_count(struct net_device *dev, int sset)
35{ 61{
36 struct dsa_port *cpu_dp = dev->dsa_ptr; 62 struct dsa_port *cpu_dp = dev->dsa_ptr;
@@ -38,11 +64,17 @@ static int dsa_master_get_sset_count(struct net_device *dev, int sset)
38 struct dsa_switch *ds = cpu_dp->ds; 64 struct dsa_switch *ds = cpu_dp->ds;
39 int count = 0; 65 int count = 0;
40 66
41 if (ops && ops->get_sset_count) 67 if (sset == ETH_SS_PHY_STATS && dev->phydev &&
42 count += ops->get_sset_count(dev, sset); 68 !ops->get_ethtool_phy_stats)
69 count = phy_ethtool_get_sset_count(dev->phydev);
70 else if (ops->get_sset_count)
71 count = ops->get_sset_count(dev, sset);
72
73 if (count < 0)
74 count = 0;
43 75
44 if (sset == ETH_SS_STATS && ds->ops->get_sset_count) 76 if (ds->ops->get_sset_count)
45 count += ds->ops->get_sset_count(ds, cpu_dp->index); 77 count += ds->ops->get_sset_count(ds, cpu_dp->index, sset);
46 78
47 return count; 79 return count;
48} 80}
@@ -64,19 +96,28 @@ static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
64 /* We do not want to be NULL-terminated, since this is a prefix */ 96 /* We do not want to be NULL-terminated, since this is a prefix */
65 pfx[sizeof(pfx) - 1] = '_'; 97 pfx[sizeof(pfx) - 1] = '_';
66 98
67 if (ops && ops->get_sset_count && ops->get_strings) { 99 if (stringset == ETH_SS_PHY_STATS && dev->phydev &&
68 mcount = ops->get_sset_count(dev, ETH_SS_STATS); 100 !ops->get_ethtool_phy_stats) {
101 mcount = phy_ethtool_get_sset_count(dev->phydev);
102 if (mcount < 0)
103 mcount = 0;
104 else
105 phy_ethtool_get_strings(dev->phydev, data);
106 } else if (ops->get_sset_count && ops->get_strings) {
107 mcount = ops->get_sset_count(dev, stringset);
108 if (mcount < 0)
109 mcount = 0;
69 ops->get_strings(dev, stringset, data); 110 ops->get_strings(dev, stringset, data);
70 } 111 }
71 112
72 if (stringset == ETH_SS_STATS && ds->ops->get_strings) { 113 if (ds->ops->get_strings) {
73 ndata = data + mcount * len; 114 ndata = data + mcount * len;
74 /* This function copies ETH_GSTRINGS_LEN bytes, we will mangle 115 /* This function copies ETH_GSTRINGS_LEN bytes, we will mangle
75 * the output after to prepend our CPU port prefix we 116 * the output after to prepend our CPU port prefix we
76 * constructed earlier 117 * constructed earlier
77 */ 118 */
78 ds->ops->get_strings(ds, port, ndata); 119 ds->ops->get_strings(ds, port, stringset, ndata);
79 count = ds->ops->get_sset_count(ds, port); 120 count = ds->ops->get_sset_count(ds, port, stringset);
80 for (i = 0; i < count; i++) { 121 for (i = 0; i < count; i++) {
81 memmove(ndata + (i * len + sizeof(pfx)), 122 memmove(ndata + (i * len + sizeof(pfx)),
82 ndata + i * len, len - sizeof(pfx)); 123 ndata + i * len, len - sizeof(pfx));
@@ -102,6 +143,7 @@ static int dsa_master_ethtool_setup(struct net_device *dev)
102 ops->get_sset_count = dsa_master_get_sset_count; 143 ops->get_sset_count = dsa_master_get_sset_count;
103 ops->get_ethtool_stats = dsa_master_get_ethtool_stats; 144 ops->get_ethtool_stats = dsa_master_get_ethtool_stats;
104 ops->get_strings = dsa_master_get_strings; 145 ops->get_strings = dsa_master_get_strings;
146 ops->get_ethtool_phy_stats = dsa_master_get_ethtool_phy_stats;
105 147
106 dev->ethtool_ops = ops; 148 dev->ethtool_ops = ops;
107 149
diff --git a/net/dsa/port.c b/net/dsa/port.c
index 7acc1169d75e..2413beb995be 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -273,25 +273,38 @@ int dsa_port_vlan_del(struct dsa_port *dp,
273 return 0; 273 return 0;
274} 274}
275 275
276static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable) 276static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
277{ 277{
278 struct device_node *port_dn = dp->dn;
279 struct device_node *phy_dn; 278 struct device_node *phy_dn;
280 struct dsa_switch *ds = dp->ds;
281 struct phy_device *phydev; 279 struct phy_device *phydev;
282 int port = dp->index;
283 int err = 0;
284 280
285 phy_dn = of_parse_phandle(port_dn, "phy-handle", 0); 281 phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0);
286 if (!phy_dn) 282 if (!phy_dn)
287 return 0; 283 return NULL;
288 284
289 phydev = of_phy_find_device(phy_dn); 285 phydev = of_phy_find_device(phy_dn);
290 if (!phydev) { 286 if (!phydev) {
291 err = -EPROBE_DEFER; 287 of_node_put(phy_dn);
292 goto err_put_of; 288 return ERR_PTR(-EPROBE_DEFER);
293 } 289 }
294 290
291 return phydev;
292}
293
294static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
295{
296 struct dsa_switch *ds = dp->ds;
297 struct phy_device *phydev;
298 int port = dp->index;
299 int err = 0;
300
301 phydev = dsa_port_get_phy_device(dp);
302 if (!phydev)
303 return 0;
304
305 if (IS_ERR(phydev))
306 return PTR_ERR(phydev);
307
295 if (enable) { 308 if (enable) {
296 err = genphy_config_init(phydev); 309 err = genphy_config_init(phydev);
297 if (err < 0) 310 if (err < 0)
@@ -317,8 +330,6 @@ static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
317 330
318err_put_dev: 331err_put_dev:
319 put_device(&phydev->mdio.dev); 332 put_device(&phydev->mdio.dev);
320err_put_of:
321 of_node_put(phy_dn);
322 return err; 333 return err;
323} 334}
324 335
@@ -372,3 +383,60 @@ void dsa_port_link_unregister_of(struct dsa_port *dp)
372 else 383 else
373 dsa_port_setup_phy_of(dp, false); 384 dsa_port_setup_phy_of(dp, false);
374} 385}
386
387int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data)
388{
389 struct phy_device *phydev;
390 int ret = -EOPNOTSUPP;
391
392 if (of_phy_is_fixed_link(dp->dn))
393 return ret;
394
395 phydev = dsa_port_get_phy_device(dp);
396 if (IS_ERR_OR_NULL(phydev))
397 return ret;
398
399 ret = phy_ethtool_get_strings(phydev, data);
400 put_device(&phydev->mdio.dev);
401
402 return ret;
403}
404EXPORT_SYMBOL_GPL(dsa_port_get_phy_strings);
405
406int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data)
407{
408 struct phy_device *phydev;
409 int ret = -EOPNOTSUPP;
410
411 if (of_phy_is_fixed_link(dp->dn))
412 return ret;
413
414 phydev = dsa_port_get_phy_device(dp);
415 if (IS_ERR_OR_NULL(phydev))
416 return ret;
417
418 ret = phy_ethtool_get_stats(phydev, NULL, data);
419 put_device(&phydev->mdio.dev);
420
421 return ret;
422}
423EXPORT_SYMBOL_GPL(dsa_port_get_ethtool_phy_stats);
424
425int dsa_port_get_phy_sset_count(struct dsa_port *dp)
426{
427 struct phy_device *phydev;
428 int ret = -EOPNOTSUPP;
429
430 if (of_phy_is_fixed_link(dp->dn))
431 return ret;
432
433 phydev = dsa_port_get_phy_device(dp);
434 if (IS_ERR_OR_NULL(phydev))
435 return ret;
436
437 ret = phy_ethtool_get_sset_count(phydev);
438 put_device(&phydev->mdio.dev);
439
440 return ret;
441}
442EXPORT_SYMBOL_GPL(dsa_port_get_phy_sset_count);
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 18561af7a8f1..c287f1ef964c 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -560,7 +560,8 @@ static void dsa_slave_get_strings(struct net_device *dev,
560 strncpy(data + 2 * len, "rx_packets", len); 560 strncpy(data + 2 * len, "rx_packets", len);
561 strncpy(data + 3 * len, "rx_bytes", len); 561 strncpy(data + 3 * len, "rx_bytes", len);
562 if (ds->ops->get_strings) 562 if (ds->ops->get_strings)
563 ds->ops->get_strings(ds, dp->index, data + 4 * len); 563 ds->ops->get_strings(ds, dp->index, stringset,
564 data + 4 * len);
564 } 565 }
565} 566}
566 567
@@ -605,7 +606,7 @@ static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
605 606
606 count = 4; 607 count = 4;
607 if (ds->ops->get_sset_count) 608 if (ds->ops->get_sset_count)
608 count += ds->ops->get_sset_count(ds, dp->index); 609 count += ds->ops->get_sset_count(ds, dp->index, sset);
609 610
610 return count; 611 return count;
611 } 612 }
@@ -1440,6 +1441,7 @@ static int dsa_slave_switchdev_event(struct notifier_block *unused,
1440 unsigned long event, void *ptr) 1441 unsigned long event, void *ptr)
1441{ 1442{
1442 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 1443 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
1444 struct switchdev_notifier_fdb_info *fdb_info = ptr;
1443 struct dsa_switchdev_event_work *switchdev_work; 1445 struct dsa_switchdev_event_work *switchdev_work;
1444 1446
1445 if (!dsa_slave_dev_check(dev)) 1447 if (!dsa_slave_dev_check(dev))
@@ -1457,8 +1459,10 @@ static int dsa_slave_switchdev_event(struct notifier_block *unused,
1457 switch (event) { 1459 switch (event) {
1458 case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */ 1460 case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
1459 case SWITCHDEV_FDB_DEL_TO_DEVICE: 1461 case SWITCHDEV_FDB_DEL_TO_DEVICE:
1462 if (!fdb_info->added_by_user)
1463 break;
1460 if (dsa_slave_switchdev_fdb_work_init(switchdev_work, 1464 if (dsa_slave_switchdev_fdb_work_init(switchdev_work,
1461 ptr)) 1465 fdb_info))
1462 goto err_fdb_work_init; 1466 goto err_fdb_work_init;
1463 dev_hold(dev); 1467 dev_hold(dev);
1464 break; 1468 break;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 3ebf599cebae..b403499fdabe 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -994,7 +994,9 @@ const struct proto_ops inet_stream_ops = {
994 .getsockopt = sock_common_getsockopt, 994 .getsockopt = sock_common_getsockopt,
995 .sendmsg = inet_sendmsg, 995 .sendmsg = inet_sendmsg,
996 .recvmsg = inet_recvmsg, 996 .recvmsg = inet_recvmsg,
997#ifdef CONFIG_MMU
997 .mmap = tcp_mmap, 998 .mmap = tcp_mmap,
999#endif
998 .sendpage = inet_sendpage, 1000 .sendpage = inet_sendpage,
999 .splice_read = tcp_splice_read, 1001 .splice_read = tcp_splice_read,
1000 .read_sock = tcp_read_sock, 1002 .read_sock = tcp_read_sock,
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 9c169bb2444d..dfe5b22f6ed4 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -578,6 +578,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
578 int tunnel_hlen; 578 int tunnel_hlen;
579 int version; 579 int version;
580 __be16 df; 580 __be16 df;
581 int nhoff;
581 582
582 tun_info = skb_tunnel_info(skb); 583 tun_info = skb_tunnel_info(skb);
583 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || 584 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
@@ -605,6 +606,11 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
605 truncate = true; 606 truncate = true;
606 } 607 }
607 608
609 nhoff = skb_network_header(skb) - skb_mac_header(skb);
610 if (skb->protocol == htons(ETH_P_IP) &&
611 (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
612 truncate = true;
613
608 if (version == 1) { 614 if (version == 1) {
609 erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)), 615 erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
610 ntohl(md->u.index), truncate, true); 616 ntohl(md->u.index), truncate, true);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 44b308d93ec2..444f125f3974 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -300,7 +300,7 @@ ipt_do_table(struct sk_buff *skb,
300 counter = xt_get_this_cpu_counter(&e->counters); 300 counter = xt_get_this_cpu_counter(&e->counters);
301 ADD_COUNTER(*counter, skb->len, 1); 301 ADD_COUNTER(*counter, skb->len, 1);
302 302
303 t = ipt_get_target(e); 303 t = ipt_get_target_c(e);
304 WARN_ON(!t->u.kernel.target); 304 WARN_ON(!t->u.kernel.target);
305 305
306#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) 306#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index a03e4e7ef5f9..ce1512b02cb2 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -47,7 +47,7 @@ static int masquerade_tg_check(const struct xt_tgchk_param *par)
47static unsigned int 47static unsigned int
48masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par) 48masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
49{ 49{
50 struct nf_nat_range range; 50 struct nf_nat_range2 range;
51 const struct nf_nat_ipv4_multi_range_compat *mr; 51 const struct nf_nat_ipv4_multi_range_compat *mr;
52 52
53 mr = par->targinfo; 53 mr = par->targinfo;
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index 0f7255cc65ee..529d89ec31e8 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -33,8 +33,7 @@ static const struct xt_table nf_nat_ipv4_table = {
33 33
34static unsigned int iptable_nat_do_chain(void *priv, 34static unsigned int iptable_nat_do_chain(void *priv,
35 struct sk_buff *skb, 35 struct sk_buff *skb,
36 const struct nf_hook_state *state, 36 const struct nf_hook_state *state)
37 struct nf_conn *ct)
38{ 37{
39 return ipt_do_table(skb, state, state->net->ipv4.nat_table); 38 return ipt_do_table(skb, state, state->net->ipv4.nat_table);
40} 39}
diff --git a/net/ipv4/netfilter/nf_flow_table_ipv4.c b/net/ipv4/netfilter/nf_flow_table_ipv4.c
index 0cd46bffa469..e1e56d7123d2 100644
--- a/net/ipv4/netfilter/nf_flow_table_ipv4.c
+++ b/net/ipv4/netfilter/nf_flow_table_ipv4.c
@@ -2,265 +2,12 @@
2#include <linux/init.h> 2#include <linux/init.h>
3#include <linux/module.h> 3#include <linux/module.h>
4#include <linux/netfilter.h> 4#include <linux/netfilter.h>
5#include <linux/rhashtable.h>
6#include <linux/ip.h>
7#include <linux/netdevice.h>
8#include <net/ip.h>
9#include <net/neighbour.h>
10#include <net/netfilter/nf_flow_table.h> 5#include <net/netfilter/nf_flow_table.h>
11#include <net/netfilter/nf_tables.h> 6#include <net/netfilter/nf_tables.h>
12/* For layer 4 checksum field offset. */
13#include <linux/tcp.h>
14#include <linux/udp.h>
15
16static int nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
17 __be32 addr, __be32 new_addr)
18{
19 struct tcphdr *tcph;
20
21 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
22 skb_try_make_writable(skb, thoff + sizeof(*tcph)))
23 return -1;
24
25 tcph = (void *)(skb_network_header(skb) + thoff);
26 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
27
28 return 0;
29}
30
31static int nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
32 __be32 addr, __be32 new_addr)
33{
34 struct udphdr *udph;
35
36 if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
37 skb_try_make_writable(skb, thoff + sizeof(*udph)))
38 return -1;
39
40 udph = (void *)(skb_network_header(skb) + thoff);
41 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
42 inet_proto_csum_replace4(&udph->check, skb, addr,
43 new_addr, true);
44 if (!udph->check)
45 udph->check = CSUM_MANGLED_0;
46 }
47
48 return 0;
49}
50
51static int nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
52 unsigned int thoff, __be32 addr,
53 __be32 new_addr)
54{
55 switch (iph->protocol) {
56 case IPPROTO_TCP:
57 if (nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr) < 0)
58 return NF_DROP;
59 break;
60 case IPPROTO_UDP:
61 if (nf_flow_nat_ip_udp(skb, thoff, addr, new_addr) < 0)
62 return NF_DROP;
63 break;
64 }
65
66 return 0;
67}
68
69static int nf_flow_snat_ip(const struct flow_offload *flow, struct sk_buff *skb,
70 struct iphdr *iph, unsigned int thoff,
71 enum flow_offload_tuple_dir dir)
72{
73 __be32 addr, new_addr;
74
75 switch (dir) {
76 case FLOW_OFFLOAD_DIR_ORIGINAL:
77 addr = iph->saddr;
78 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
79 iph->saddr = new_addr;
80 break;
81 case FLOW_OFFLOAD_DIR_REPLY:
82 addr = iph->daddr;
83 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
84 iph->daddr = new_addr;
85 break;
86 default:
87 return -1;
88 }
89 csum_replace4(&iph->check, addr, new_addr);
90
91 return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
92}
93
94static int nf_flow_dnat_ip(const struct flow_offload *flow, struct sk_buff *skb,
95 struct iphdr *iph, unsigned int thoff,
96 enum flow_offload_tuple_dir dir)
97{
98 __be32 addr, new_addr;
99
100 switch (dir) {
101 case FLOW_OFFLOAD_DIR_ORIGINAL:
102 addr = iph->daddr;
103 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
104 iph->daddr = new_addr;
105 break;
106 case FLOW_OFFLOAD_DIR_REPLY:
107 addr = iph->saddr;
108 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
109 iph->saddr = new_addr;
110 break;
111 default:
112 return -1;
113 }
114 csum_replace4(&iph->check, addr, new_addr);
115
116 return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
117}
118
119static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
120 enum flow_offload_tuple_dir dir)
121{
122 struct iphdr *iph = ip_hdr(skb);
123 unsigned int thoff = iph->ihl * 4;
124
125 if (flow->flags & FLOW_OFFLOAD_SNAT &&
126 (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
127 nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0))
128 return -1;
129 if (flow->flags & FLOW_OFFLOAD_DNAT &&
130 (nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
131 nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0))
132 return -1;
133
134 return 0;
135}
136
137static bool ip_has_options(unsigned int thoff)
138{
139 return thoff != sizeof(struct iphdr);
140}
141
142static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
143 struct flow_offload_tuple *tuple)
144{
145 struct flow_ports *ports;
146 unsigned int thoff;
147 struct iphdr *iph;
148
149 if (!pskb_may_pull(skb, sizeof(*iph)))
150 return -1;
151
152 iph = ip_hdr(skb);
153 thoff = iph->ihl * 4;
154
155 if (ip_is_fragment(iph) ||
156 unlikely(ip_has_options(thoff)))
157 return -1;
158
159 if (iph->protocol != IPPROTO_TCP &&
160 iph->protocol != IPPROTO_UDP)
161 return -1;
162
163 thoff = iph->ihl * 4;
164 if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
165 return -1;
166
167 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
168
169 tuple->src_v4.s_addr = iph->saddr;
170 tuple->dst_v4.s_addr = iph->daddr;
171 tuple->src_port = ports->source;
172 tuple->dst_port = ports->dest;
173 tuple->l3proto = AF_INET;
174 tuple->l4proto = iph->protocol;
175 tuple->iifidx = dev->ifindex;
176
177 return 0;
178}
179
180/* Based on ip_exceeds_mtu(). */
181static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
182{
183 if (skb->len <= mtu)
184 return false;
185
186 if ((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0)
187 return false;
188
189 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
190 return false;
191
192 return true;
193}
194
195static bool nf_flow_exceeds_mtu(struct sk_buff *skb, const struct rtable *rt)
196{
197 u32 mtu;
198
199 mtu = ip_dst_mtu_maybe_forward(&rt->dst, true);
200 if (__nf_flow_exceeds_mtu(skb, mtu))
201 return true;
202
203 return false;
204}
205
206unsigned int
207nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
208 const struct nf_hook_state *state)
209{
210 struct flow_offload_tuple_rhash *tuplehash;
211 struct nf_flowtable *flow_table = priv;
212 struct flow_offload_tuple tuple = {};
213 enum flow_offload_tuple_dir dir;
214 struct flow_offload *flow;
215 struct net_device *outdev;
216 const struct rtable *rt;
217 struct iphdr *iph;
218 __be32 nexthop;
219
220 if (skb->protocol != htons(ETH_P_IP))
221 return NF_ACCEPT;
222
223 if (nf_flow_tuple_ip(skb, state->in, &tuple) < 0)
224 return NF_ACCEPT;
225
226 tuplehash = flow_offload_lookup(flow_table, &tuple);
227 if (tuplehash == NULL)
228 return NF_ACCEPT;
229
230 outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx);
231 if (!outdev)
232 return NF_ACCEPT;
233
234 dir = tuplehash->tuple.dir;
235 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
236
237 rt = (const struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
238 if (unlikely(nf_flow_exceeds_mtu(skb, rt)))
239 return NF_ACCEPT;
240
241 if (skb_try_make_writable(skb, sizeof(*iph)))
242 return NF_DROP;
243
244 if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) &&
245 nf_flow_nat_ip(flow, skb, dir) < 0)
246 return NF_DROP;
247
248 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
249 iph = ip_hdr(skb);
250 ip_decrease_ttl(iph);
251
252 skb->dev = outdev;
253 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
254 neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
255
256 return NF_STOLEN;
257}
258EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
259 7
260static struct nf_flowtable_type flowtable_ipv4 = { 8static struct nf_flowtable_type flowtable_ipv4 = {
261 .family = NFPROTO_IPV4, 9 .family = NFPROTO_IPV4,
262 .params = &nf_flow_offload_rhash_params, 10 .init = nf_flow_table_init,
263 .gc = nf_flow_offload_work_gc,
264 .free = nf_flow_table_free, 11 .free = nf_flow_table_free,
265 .hook = nf_flow_offload_ip_hook, 12 .hook = nf_flow_offload_ip_hook,
266 .owner = THIS_MODULE, 13 .owner = THIS_MODULE,
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index ac8342dcb55e..4e6b53ab6c33 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -395,7 +395,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
395static void ip_nat_q931_expect(struct nf_conn *new, 395static void ip_nat_q931_expect(struct nf_conn *new,
396 struct nf_conntrack_expect *this) 396 struct nf_conntrack_expect *this)
397{ 397{
398 struct nf_nat_range range; 398 struct nf_nat_range2 range;
399 399
400 if (this->tuple.src.u3.ip != 0) { /* Only accept calls from GK */ 400 if (this->tuple.src.u3.ip != 0) { /* Only accept calls from GK */
401 nf_nat_follow_master(new, this); 401 nf_nat_follow_master(new, this);
@@ -497,7 +497,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
497static void ip_nat_callforwarding_expect(struct nf_conn *new, 497static void ip_nat_callforwarding_expect(struct nf_conn *new,
498 struct nf_conntrack_expect *this) 498 struct nf_conntrack_expect *this)
499{ 499{
500 struct nf_nat_range range; 500 struct nf_nat_range2 range;
501 501
502 /* This must be a fresh one. */ 502 /* This must be a fresh one. */
503 BUG_ON(new->status & IPS_NAT_DONE_MASK); 503 BUG_ON(new->status & IPS_NAT_DONE_MASK);
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index f7ff6a364d7b..325e02956bf5 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -63,7 +63,7 @@ static void nf_nat_ipv4_decode_session(struct sk_buff *skb,
63#endif /* CONFIG_XFRM */ 63#endif /* CONFIG_XFRM */
64 64
65static bool nf_nat_ipv4_in_range(const struct nf_conntrack_tuple *t, 65static bool nf_nat_ipv4_in_range(const struct nf_conntrack_tuple *t,
66 const struct nf_nat_range *range) 66 const struct nf_nat_range2 *range)
67{ 67{
68 return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) && 68 return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) &&
69 ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip); 69 ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip);
@@ -143,7 +143,7 @@ static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb,
143 143
144#if IS_ENABLED(CONFIG_NF_CT_NETLINK) 144#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
145static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[], 145static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
146 struct nf_nat_range *range) 146 struct nf_nat_range2 *range)
147{ 147{
148 if (tb[CTA_NAT_V4_MINIP]) { 148 if (tb[CTA_NAT_V4_MINIP]) {
149 range->min_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MINIP]); 149 range->min_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MINIP]);
@@ -246,8 +246,7 @@ nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
246 const struct nf_hook_state *state, 246 const struct nf_hook_state *state,
247 unsigned int (*do_chain)(void *priv, 247 unsigned int (*do_chain)(void *priv,
248 struct sk_buff *skb, 248 struct sk_buff *skb,
249 const struct nf_hook_state *state, 249 const struct nf_hook_state *state))
250 struct nf_conn *ct))
251{ 250{
252 struct nf_conn *ct; 251 struct nf_conn *ct;
253 enum ip_conntrack_info ctinfo; 252 enum ip_conntrack_info ctinfo;
@@ -285,7 +284,7 @@ nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
285 if (!nf_nat_initialized(ct, maniptype)) { 284 if (!nf_nat_initialized(ct, maniptype)) {
286 unsigned int ret; 285 unsigned int ret;
287 286
288 ret = do_chain(priv, skb, state, ct); 287 ret = do_chain(priv, skb, state);
289 if (ret != NF_ACCEPT) 288 if (ret != NF_ACCEPT)
290 return ret; 289 return ret;
291 290
@@ -326,8 +325,7 @@ nf_nat_ipv4_in(void *priv, struct sk_buff *skb,
326 const struct nf_hook_state *state, 325 const struct nf_hook_state *state,
327 unsigned int (*do_chain)(void *priv, 326 unsigned int (*do_chain)(void *priv,
328 struct sk_buff *skb, 327 struct sk_buff *skb,
329 const struct nf_hook_state *state, 328 const struct nf_hook_state *state))
330 struct nf_conn *ct))
331{ 329{
332 unsigned int ret; 330 unsigned int ret;
333 __be32 daddr = ip_hdr(skb)->daddr; 331 __be32 daddr = ip_hdr(skb)->daddr;
@@ -346,8 +344,7 @@ nf_nat_ipv4_out(void *priv, struct sk_buff *skb,
346 const struct nf_hook_state *state, 344 const struct nf_hook_state *state,
347 unsigned int (*do_chain)(void *priv, 345 unsigned int (*do_chain)(void *priv,
348 struct sk_buff *skb, 346 struct sk_buff *skb,
349 const struct nf_hook_state *state, 347 const struct nf_hook_state *state))
350 struct nf_conn *ct))
351{ 348{
352#ifdef CONFIG_XFRM 349#ifdef CONFIG_XFRM
353 const struct nf_conn *ct; 350 const struct nf_conn *ct;
@@ -383,8 +380,7 @@ nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb,
383 const struct nf_hook_state *state, 380 const struct nf_hook_state *state,
384 unsigned int (*do_chain)(void *priv, 381 unsigned int (*do_chain)(void *priv,
385 struct sk_buff *skb, 382 struct sk_buff *skb,
386 const struct nf_hook_state *state, 383 const struct nf_hook_state *state))
387 struct nf_conn *ct))
388{ 384{
389 const struct nf_conn *ct; 385 const struct nf_conn *ct;
390 enum ip_conntrack_info ctinfo; 386 enum ip_conntrack_info ctinfo;
diff --git a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
index 0c366aad89cb..f538c5001547 100644
--- a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
@@ -24,13 +24,13 @@
24 24
25unsigned int 25unsigned int
26nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum, 26nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
27 const struct nf_nat_range *range, 27 const struct nf_nat_range2 *range,
28 const struct net_device *out) 28 const struct net_device *out)
29{ 29{
30 struct nf_conn *ct; 30 struct nf_conn *ct;
31 struct nf_conn_nat *nat; 31 struct nf_conn_nat *nat;
32 enum ip_conntrack_info ctinfo; 32 enum ip_conntrack_info ctinfo;
33 struct nf_nat_range newrange; 33 struct nf_nat_range2 newrange;
34 const struct rtable *rt; 34 const struct rtable *rt;
35 __be32 newsrc, nh; 35 __be32 newsrc, nh;
36 36
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index 8a69363b4884..5d259a12e25f 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -48,7 +48,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
48 struct nf_conntrack_tuple t = {}; 48 struct nf_conntrack_tuple t = {};
49 const struct nf_ct_pptp_master *ct_pptp_info; 49 const struct nf_ct_pptp_master *ct_pptp_info;
50 const struct nf_nat_pptp *nat_pptp_info; 50 const struct nf_nat_pptp *nat_pptp_info;
51 struct nf_nat_range range; 51 struct nf_nat_range2 range;
52 struct nf_conn_nat *nat; 52 struct nf_conn_nat *nat;
53 53
54 nat = nf_ct_nat_ext_add(ct); 54 nat = nf_ct_nat_ext_add(ct);
diff --git a/net/ipv4/netfilter/nf_nat_proto_gre.c b/net/ipv4/netfilter/nf_nat_proto_gre.c
index edf05002d674..00fda6331ce5 100644
--- a/net/ipv4/netfilter/nf_nat_proto_gre.c
+++ b/net/ipv4/netfilter/nf_nat_proto_gre.c
@@ -41,7 +41,7 @@ MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE");
41static void 41static void
42gre_unique_tuple(const struct nf_nat_l3proto *l3proto, 42gre_unique_tuple(const struct nf_nat_l3proto *l3proto,
43 struct nf_conntrack_tuple *tuple, 43 struct nf_conntrack_tuple *tuple,
44 const struct nf_nat_range *range, 44 const struct nf_nat_range2 *range,
45 enum nf_nat_manip_type maniptype, 45 enum nf_nat_manip_type maniptype,
46 const struct nf_conn *ct) 46 const struct nf_conn *ct)
47{ 47{
diff --git a/net/ipv4/netfilter/nf_nat_proto_icmp.c b/net/ipv4/netfilter/nf_nat_proto_icmp.c
index 7b98baa13ede..6d7cf1d79baf 100644
--- a/net/ipv4/netfilter/nf_nat_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_icmp.c
@@ -30,7 +30,7 @@ icmp_in_range(const struct nf_conntrack_tuple *tuple,
30static void 30static void
31icmp_unique_tuple(const struct nf_nat_l3proto *l3proto, 31icmp_unique_tuple(const struct nf_nat_l3proto *l3proto,
32 struct nf_conntrack_tuple *tuple, 32 struct nf_conntrack_tuple *tuple,
33 const struct nf_nat_range *range, 33 const struct nf_nat_range2 *range,
34 enum nf_nat_manip_type maniptype, 34 enum nf_nat_manip_type maniptype,
35 const struct nf_conn *ct) 35 const struct nf_conn *ct)
36{ 36{
diff --git a/net/ipv4/netfilter/nft_chain_nat_ipv4.c b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
index b5464a3f253b..285baccfbdea 100644
--- a/net/ipv4/netfilter/nft_chain_nat_ipv4.c
+++ b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
@@ -28,8 +28,7 @@
28 28
29static unsigned int nft_nat_do_chain(void *priv, 29static unsigned int nft_nat_do_chain(void *priv,
30 struct sk_buff *skb, 30 struct sk_buff *skb,
31 const struct nf_hook_state *state, 31 const struct nf_hook_state *state)
32 struct nf_conn *ct)
33{ 32{
34 struct nft_pktinfo pkt; 33 struct nft_pktinfo pkt;
35 34
diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c
index f18677277119..f1193e1e928a 100644
--- a/net/ipv4/netfilter/nft_masq_ipv4.c
+++ b/net/ipv4/netfilter/nft_masq_ipv4.c
@@ -21,7 +21,7 @@ static void nft_masq_ipv4_eval(const struct nft_expr *expr,
21 const struct nft_pktinfo *pkt) 21 const struct nft_pktinfo *pkt)
22{ 22{
23 struct nft_masq *priv = nft_expr_priv(expr); 23 struct nft_masq *priv = nft_expr_priv(expr);
24 struct nf_nat_range range; 24 struct nf_nat_range2 range;
25 25
26 memset(&range, 0, sizeof(range)); 26 memset(&range, 0, sizeof(range));
27 range.flags = priv->flags; 27 range.flags = priv->flags;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ccb25d80f679..1412a7baf0b9 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -709,7 +709,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
709 fnhe->fnhe_gw = gw; 709 fnhe->fnhe_gw = gw;
710 fnhe->fnhe_pmtu = pmtu; 710 fnhe->fnhe_pmtu = pmtu;
711 fnhe->fnhe_mtu_locked = lock; 711 fnhe->fnhe_mtu_locked = lock;
712 fnhe->fnhe_expires = expires; 712 fnhe->fnhe_expires = max(1UL, expires);
713 713
714 /* Exception created; mark the cached routes for the nexthop 714 /* Exception created; mark the cached routes for the nexthop
715 * stale, so anyone caching it rechecks if this exception 715 * stale, so anyone caching it rechecks if this exception
@@ -1297,6 +1297,36 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
1297 return mtu - lwtunnel_headroom(dst->lwtstate, mtu); 1297 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
1298} 1298}
1299 1299
1300static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
1301{
1302 struct fnhe_hash_bucket *hash;
1303 struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1304 u32 hval = fnhe_hashfun(daddr);
1305
1306 spin_lock_bh(&fnhe_lock);
1307
1308 hash = rcu_dereference_protected(nh->nh_exceptions,
1309 lockdep_is_held(&fnhe_lock));
1310 hash += hval;
1311
1312 fnhe_p = &hash->chain;
1313 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1314 while (fnhe) {
1315 if (fnhe->fnhe_daddr == daddr) {
1316 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1317 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1318 fnhe_flush_routes(fnhe);
1319 kfree_rcu(fnhe, rcu);
1320 break;
1321 }
1322 fnhe_p = &fnhe->fnhe_next;
1323 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1324 lockdep_is_held(&fnhe_lock));
1325 }
1326
1327 spin_unlock_bh(&fnhe_lock);
1328}
1329
1300static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr) 1330static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
1301{ 1331{
1302 struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions); 1332 struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions);
@@ -1310,8 +1340,14 @@ static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
1310 1340
1311 for (fnhe = rcu_dereference(hash[hval].chain); fnhe; 1341 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1312 fnhe = rcu_dereference(fnhe->fnhe_next)) { 1342 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1313 if (fnhe->fnhe_daddr == daddr) 1343 if (fnhe->fnhe_daddr == daddr) {
1344 if (fnhe->fnhe_expires &&
1345 time_after(jiffies, fnhe->fnhe_expires)) {
1346 ip_del_fnhe(nh, daddr);
1347 break;
1348 }
1314 return fnhe; 1349 return fnhe;
1350 }
1315 } 1351 }
1316 return NULL; 1352 return NULL;
1317} 1353}
@@ -1636,36 +1672,6 @@ static void ip_handle_martian_source(struct net_device *dev,
1636#endif 1672#endif
1637} 1673}
1638 1674
1639static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
1640{
1641 struct fnhe_hash_bucket *hash;
1642 struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1643 u32 hval = fnhe_hashfun(daddr);
1644
1645 spin_lock_bh(&fnhe_lock);
1646
1647 hash = rcu_dereference_protected(nh->nh_exceptions,
1648 lockdep_is_held(&fnhe_lock));
1649 hash += hval;
1650
1651 fnhe_p = &hash->chain;
1652 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1653 while (fnhe) {
1654 if (fnhe->fnhe_daddr == daddr) {
1655 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1656 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1657 fnhe_flush_routes(fnhe);
1658 kfree_rcu(fnhe, rcu);
1659 break;
1660 }
1661 fnhe_p = &fnhe->fnhe_next;
1662 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1663 lockdep_is_held(&fnhe_lock));
1664 }
1665
1666 spin_unlock_bh(&fnhe_lock);
1667}
1668
1669/* called in rcu_read_lock() section */ 1675/* called in rcu_read_lock() section */
1670static int __mkroute_input(struct sk_buff *skb, 1676static int __mkroute_input(struct sk_buff *skb,
1671 const struct fib_result *res, 1677 const struct fib_result *res,
@@ -1719,20 +1725,10 @@ static int __mkroute_input(struct sk_buff *skb,
1719 1725
1720 fnhe = find_exception(&FIB_RES_NH(*res), daddr); 1726 fnhe = find_exception(&FIB_RES_NH(*res), daddr);
1721 if (do_cache) { 1727 if (do_cache) {
1722 if (fnhe) { 1728 if (fnhe)
1723 rth = rcu_dereference(fnhe->fnhe_rth_input); 1729 rth = rcu_dereference(fnhe->fnhe_rth_input);
1724 if (rth && rth->dst.expires && 1730 else
1725 time_after(jiffies, rth->dst.expires)) { 1731 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1726 ip_del_fnhe(&FIB_RES_NH(*res), daddr);
1727 fnhe = NULL;
1728 } else {
1729 goto rt_cache;
1730 }
1731 }
1732
1733 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1734
1735rt_cache:
1736 if (rt_cache_valid(rth)) { 1732 if (rt_cache_valid(rth)) {
1737 skb_dst_set_noref(skb, &rth->dst); 1733 skb_dst_set_noref(skb, &rth->dst);
1738 goto out; 1734 goto out;
@@ -2216,39 +2212,31 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2216 * the loopback interface and the IP_PKTINFO ipi_ifindex will 2212 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2217 * be set to the loopback interface as well. 2213 * be set to the loopback interface as well.
2218 */ 2214 */
2219 fi = NULL; 2215 do_cache = false;
2220 } 2216 }
2221 2217
2222 fnhe = NULL; 2218 fnhe = NULL;
2223 do_cache &= fi != NULL; 2219 do_cache &= fi != NULL;
2224 if (do_cache) { 2220 if (fi) {
2225 struct rtable __rcu **prth; 2221 struct rtable __rcu **prth;
2226 struct fib_nh *nh = &FIB_RES_NH(*res); 2222 struct fib_nh *nh = &FIB_RES_NH(*res);
2227 2223
2228 fnhe = find_exception(nh, fl4->daddr); 2224 fnhe = find_exception(nh, fl4->daddr);
2225 if (!do_cache)
2226 goto add;
2229 if (fnhe) { 2227 if (fnhe) {
2230 prth = &fnhe->fnhe_rth_output; 2228 prth = &fnhe->fnhe_rth_output;
2231 rth = rcu_dereference(*prth); 2229 } else {
2232 if (rth && rth->dst.expires && 2230 if (unlikely(fl4->flowi4_flags &
2233 time_after(jiffies, rth->dst.expires)) { 2231 FLOWI_FLAG_KNOWN_NH &&
2234 ip_del_fnhe(nh, fl4->daddr); 2232 !(nh->nh_gw &&
2235 fnhe = NULL; 2233 nh->nh_scope == RT_SCOPE_LINK))) {
2236 } else { 2234 do_cache = false;
2237 goto rt_cache; 2235 goto add;
2238 } 2236 }
2237 prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
2239 } 2238 }
2240
2241 if (unlikely(fl4->flowi4_flags &
2242 FLOWI_FLAG_KNOWN_NH &&
2243 !(nh->nh_gw &&
2244 nh->nh_scope == RT_SCOPE_LINK))) {
2245 do_cache = false;
2246 goto add;
2247 }
2248 prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
2249 rth = rcu_dereference(*prth); 2239 rth = rcu_dereference(*prth);
2250
2251rt_cache:
2252 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst)) 2240 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2253 return rth; 2241 return rth;
2254 } 2242 }
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index dfd090ea54ad..62b776f90037 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -697,7 +697,7 @@ static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
697{ 697{
698 return skb->len < size_goal && 698 return skb->len < size_goal &&
699 sock_net(sk)->ipv4.sysctl_tcp_autocorking && 699 sock_net(sk)->ipv4.sysctl_tcp_autocorking &&
700 skb != tcp_write_queue_head(sk) && 700 !tcp_rtx_queue_empty(sk) &&
701 refcount_read(&sk->sk_wmem_alloc) > skb->truesize; 701 refcount_read(&sk->sk_wmem_alloc) > skb->truesize;
702} 702}
703 703
@@ -1204,7 +1204,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
1204 uarg->zerocopy = 0; 1204 uarg->zerocopy = 0;
1205 } 1205 }
1206 1206
1207 if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) { 1207 if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) &&
1208 !tp->repair) {
1208 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size); 1209 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
1209 if (err == -EINPROGRESS && copied_syn > 0) 1210 if (err == -EINPROGRESS && copied_syn > 0)
1210 goto out; 1211 goto out;
@@ -1726,118 +1727,113 @@ int tcp_set_rcvlowat(struct sock *sk, int val)
1726} 1727}
1727EXPORT_SYMBOL(tcp_set_rcvlowat); 1728EXPORT_SYMBOL(tcp_set_rcvlowat);
1728 1729
1729/* When user wants to mmap X pages, we first need to perform the mapping 1730#ifdef CONFIG_MMU
1730 * before freeing any skbs in receive queue, otherwise user would be unable 1731static const struct vm_operations_struct tcp_vm_ops = {
1731 * to fallback to standard recvmsg(). This happens if some data in the 1732};
1732 * requested block is not exactly fitting in a page. 1733
1733 *
1734 * We only support order-0 pages for the moment.
1735 * mmap() on TCP is very strict, there is no point
1736 * trying to accommodate with pathological layouts.
1737 */
1738int tcp_mmap(struct file *file, struct socket *sock, 1734int tcp_mmap(struct file *file, struct socket *sock,
1739 struct vm_area_struct *vma) 1735 struct vm_area_struct *vma)
1740{ 1736{
1741 unsigned long size = vma->vm_end - vma->vm_start; 1737 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
1742 unsigned int nr_pages = size >> PAGE_SHIFT; 1738 return -EPERM;
1743 struct page **pages_array = NULL; 1739 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
1744 u32 seq, len, offset, nr = 0; 1740
1745 struct sock *sk = sock->sk; 1741 /* Instruct vm_insert_page() to not down_read(mmap_sem) */
1746 const skb_frag_t *frags; 1742 vma->vm_flags |= VM_MIXEDMAP;
1743
1744 vma->vm_ops = &tcp_vm_ops;
1745 return 0;
1746}
1747EXPORT_SYMBOL(tcp_mmap);
1748
1749static int tcp_zerocopy_receive(struct sock *sk,
1750 struct tcp_zerocopy_receive *zc)
1751{
1752 unsigned long address = (unsigned long)zc->address;
1753 const skb_frag_t *frags = NULL;
1754 u32 length = 0, seq, offset;
1755 struct vm_area_struct *vma;
1756 struct sk_buff *skb = NULL;
1747 struct tcp_sock *tp; 1757 struct tcp_sock *tp;
1748 struct sk_buff *skb;
1749 int ret; 1758 int ret;
1750 1759
1751 if (vma->vm_pgoff || !nr_pages) 1760 if (address & (PAGE_SIZE - 1) || address != zc->address)
1752 return -EINVAL; 1761 return -EINVAL;
1753 1762
1754 if (vma->vm_flags & VM_WRITE)
1755 return -EPERM;
1756 /* TODO: Maybe the following is not needed if pages are COW */
1757 vma->vm_flags &= ~VM_MAYWRITE;
1758
1759 lock_sock(sk);
1760
1761 ret = -ENOTCONN;
1762 if (sk->sk_state == TCP_LISTEN) 1763 if (sk->sk_state == TCP_LISTEN)
1763 goto out; 1764 return -ENOTCONN;
1764 1765
1765 sock_rps_record_flow(sk); 1766 sock_rps_record_flow(sk);
1766 1767
1767 if (tcp_inq(sk) < size) { 1768 down_read(&current->mm->mmap_sem);
1768 ret = sock_flag(sk, SOCK_DONE) ? -EIO : -EAGAIN; 1769
1770 ret = -EINVAL;
1771 vma = find_vma(current->mm, address);
1772 if (!vma || vma->vm_start > address || vma->vm_ops != &tcp_vm_ops)
1769 goto out; 1773 goto out;
1770 } 1774 zc->length = min_t(unsigned long, zc->length, vma->vm_end - address);
1775
1771 tp = tcp_sk(sk); 1776 tp = tcp_sk(sk);
1772 seq = tp->copied_seq; 1777 seq = tp->copied_seq;
1773 /* Abort if urgent data is in the area */ 1778 zc->length = min_t(u32, zc->length, tcp_inq(sk));
1774 if (unlikely(tp->urg_data)) { 1779 zc->length &= ~(PAGE_SIZE - 1);
1775 u32 urg_offset = tp->urg_seq - seq;
1776 1780
1777 ret = -EINVAL; 1781 zap_page_range(vma, address, zc->length);
1778 if (urg_offset < size) 1782
1779 goto out; 1783 zc->recv_skip_hint = 0;
1780 } 1784 ret = 0;
1781 ret = -ENOMEM; 1785 while (length + PAGE_SIZE <= zc->length) {
1782 pages_array = kvmalloc_array(nr_pages, sizeof(struct page *), 1786 if (zc->recv_skip_hint < PAGE_SIZE) {
1783 GFP_KERNEL); 1787 if (skb) {
1784 if (!pages_array) 1788 skb = skb->next;
1785 goto out; 1789 offset = seq - TCP_SKB_CB(skb)->seq;
1786 skb = tcp_recv_skb(sk, seq, &offset); 1790 } else {
1787 ret = -EINVAL; 1791 skb = tcp_recv_skb(sk, seq, &offset);
1788skb_start: 1792 }
1789 /* We do not support anything not in page frags */ 1793
1790 offset -= skb_headlen(skb); 1794 zc->recv_skip_hint = skb->len - offset;
1791 if ((int)offset < 0) 1795 offset -= skb_headlen(skb);
1792 goto out; 1796 if ((int)offset < 0 || skb_has_frag_list(skb))
1793 if (skb_has_frag_list(skb)) 1797 break;
1794 goto out; 1798 frags = skb_shinfo(skb)->frags;
1795 len = skb->data_len - offset; 1799 while (offset) {
1796 frags = skb_shinfo(skb)->frags; 1800 if (frags->size > offset)
1797 while (offset) { 1801 goto out;
1798 if (frags->size > offset) 1802 offset -= frags->size;
1799 goto out; 1803 frags++;
1800 offset -= frags->size; 1804 }
1801 frags++;
1802 }
1803 while (nr < nr_pages) {
1804 if (len) {
1805 if (len < PAGE_SIZE)
1806 goto out;
1807 if (frags->size != PAGE_SIZE || frags->page_offset)
1808 goto out;
1809 pages_array[nr++] = skb_frag_page(frags);
1810 frags++;
1811 len -= PAGE_SIZE;
1812 seq += PAGE_SIZE;
1813 continue;
1814 } 1805 }
1815 skb = skb->next; 1806 if (frags->size != PAGE_SIZE || frags->page_offset)
1816 offset = seq - TCP_SKB_CB(skb)->seq; 1807 break;
1817 goto skb_start; 1808 ret = vm_insert_page(vma, address + length,
1818 } 1809 skb_frag_page(frags));
1819 /* OK, we have a full set of pages ready to be inserted into vma */
1820 for (nr = 0; nr < nr_pages; nr++) {
1821 ret = vm_insert_page(vma, vma->vm_start + (nr << PAGE_SHIFT),
1822 pages_array[nr]);
1823 if (ret) 1810 if (ret)
1824 goto out; 1811 break;
1812 length += PAGE_SIZE;
1813 seq += PAGE_SIZE;
1814 zc->recv_skip_hint -= PAGE_SIZE;
1815 frags++;
1825 } 1816 }
1826 /* operation is complete, we can 'consume' all skbs */
1827 tp->copied_seq = seq;
1828 tcp_rcv_space_adjust(sk);
1829
1830 /* Clean up data we have read: This will do ACK frames. */
1831 tcp_recv_skb(sk, seq, &offset);
1832 tcp_cleanup_rbuf(sk, size);
1833
1834 ret = 0;
1835out: 1817out:
1836 release_sock(sk); 1818 up_read(&current->mm->mmap_sem);
1837 kvfree(pages_array); 1819 if (length) {
1820 tp->copied_seq = seq;
1821 tcp_rcv_space_adjust(sk);
1822
1823 /* Clean up data we have read: This will do ACK frames. */
1824 tcp_recv_skb(sk, seq, &offset);
1825 tcp_cleanup_rbuf(sk, length);
1826 ret = 0;
1827 if (length == zc->length)
1828 zc->recv_skip_hint = 0;
1829 } else {
1830 if (!zc->recv_skip_hint && sock_flag(sk, SOCK_DONE))
1831 ret = -EIO;
1832 }
1833 zc->length = length;
1838 return ret; 1834 return ret;
1839} 1835}
1840EXPORT_SYMBOL(tcp_mmap); 1836#endif
1841 1837
1842static void tcp_update_recv_tstamps(struct sk_buff *skb, 1838static void tcp_update_recv_tstamps(struct sk_buff *skb,
1843 struct scm_timestamping *tss) 1839 struct scm_timestamping *tss)
@@ -1894,6 +1890,22 @@ static void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
1894 } 1890 }
1895} 1891}
1896 1892
1893static int tcp_inq_hint(struct sock *sk)
1894{
1895 const struct tcp_sock *tp = tcp_sk(sk);
1896 u32 copied_seq = READ_ONCE(tp->copied_seq);
1897 u32 rcv_nxt = READ_ONCE(tp->rcv_nxt);
1898 int inq;
1899
1900 inq = rcv_nxt - copied_seq;
1901 if (unlikely(inq < 0 || copied_seq != READ_ONCE(tp->copied_seq))) {
1902 lock_sock(sk);
1903 inq = tp->rcv_nxt - tp->copied_seq;
1904 release_sock(sk);
1905 }
1906 return inq;
1907}
1908
1897/* 1909/*
1898 * This routine copies from a sock struct into the user buffer. 1910 * This routine copies from a sock struct into the user buffer.
1899 * 1911 *
@@ -1910,13 +1922,14 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1910 u32 peek_seq; 1922 u32 peek_seq;
1911 u32 *seq; 1923 u32 *seq;
1912 unsigned long used; 1924 unsigned long used;
1913 int err; 1925 int err, inq;
1914 int target; /* Read at least this many bytes */ 1926 int target; /* Read at least this many bytes */
1915 long timeo; 1927 long timeo;
1916 struct sk_buff *skb, *last; 1928 struct sk_buff *skb, *last;
1917 u32 urg_hole = 0; 1929 u32 urg_hole = 0;
1918 struct scm_timestamping tss; 1930 struct scm_timestamping tss;
1919 bool has_tss = false; 1931 bool has_tss = false;
1932 bool has_cmsg;
1920 1933
1921 if (unlikely(flags & MSG_ERRQUEUE)) 1934 if (unlikely(flags & MSG_ERRQUEUE))
1922 return inet_recv_error(sk, msg, len, addr_len); 1935 return inet_recv_error(sk, msg, len, addr_len);
@@ -1931,6 +1944,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1931 if (sk->sk_state == TCP_LISTEN) 1944 if (sk->sk_state == TCP_LISTEN)
1932 goto out; 1945 goto out;
1933 1946
1947 has_cmsg = tp->recvmsg_inq;
1934 timeo = sock_rcvtimeo(sk, nonblock); 1948 timeo = sock_rcvtimeo(sk, nonblock);
1935 1949
1936 /* Urgent data needs to be handled specially. */ 1950 /* Urgent data needs to be handled specially. */
@@ -2117,6 +2131,7 @@ skip_copy:
2117 if (TCP_SKB_CB(skb)->has_rxtstamp) { 2131 if (TCP_SKB_CB(skb)->has_rxtstamp) {
2118 tcp_update_recv_tstamps(skb, &tss); 2132 tcp_update_recv_tstamps(skb, &tss);
2119 has_tss = true; 2133 has_tss = true;
2134 has_cmsg = true;
2120 } 2135 }
2121 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 2136 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
2122 goto found_fin_ok; 2137 goto found_fin_ok;
@@ -2136,13 +2151,20 @@ skip_copy:
2136 * on connected socket. I was just happy when found this 8) --ANK 2151 * on connected socket. I was just happy when found this 8) --ANK
2137 */ 2152 */
2138 2153
2139 if (has_tss)
2140 tcp_recv_timestamp(msg, sk, &tss);
2141
2142 /* Clean up data we have read: This will do ACK frames. */ 2154 /* Clean up data we have read: This will do ACK frames. */
2143 tcp_cleanup_rbuf(sk, copied); 2155 tcp_cleanup_rbuf(sk, copied);
2144 2156
2145 release_sock(sk); 2157 release_sock(sk);
2158
2159 if (has_cmsg) {
2160 if (has_tss)
2161 tcp_recv_timestamp(msg, sk, &tss);
2162 if (tp->recvmsg_inq) {
2163 inq = tcp_inq_hint(sk);
2164 put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq);
2165 }
2166 }
2167
2146 return copied; 2168 return copied;
2147 2169
2148out: 2170out:
@@ -2812,7 +2834,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2812 case TCP_REPAIR_QUEUE: 2834 case TCP_REPAIR_QUEUE:
2813 if (!tp->repair) 2835 if (!tp->repair)
2814 err = -EPERM; 2836 err = -EPERM;
2815 else if (val < TCP_QUEUES_NR) 2837 else if ((unsigned int)val < TCP_QUEUES_NR)
2816 tp->repair_queue = val; 2838 tp->repair_queue = val;
2817 else 2839 else
2818 err = -EINVAL; 2840 err = -EINVAL;
@@ -3011,6 +3033,12 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
3011 tp->notsent_lowat = val; 3033 tp->notsent_lowat = val;
3012 sk->sk_write_space(sk); 3034 sk->sk_write_space(sk);
3013 break; 3035 break;
3036 case TCP_INQ:
3037 if (val > 1 || val < 0)
3038 err = -EINVAL;
3039 else
3040 tp->recvmsg_inq = val;
3041 break;
3014 default: 3042 default:
3015 err = -ENOPROTOOPT; 3043 err = -ENOPROTOOPT;
3016 break; 3044 break;
@@ -3436,6 +3464,9 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
3436 case TCP_NOTSENT_LOWAT: 3464 case TCP_NOTSENT_LOWAT:
3437 val = tp->notsent_lowat; 3465 val = tp->notsent_lowat;
3438 break; 3466 break;
3467 case TCP_INQ:
3468 val = tp->recvmsg_inq;
3469 break;
3439 case TCP_SAVE_SYN: 3470 case TCP_SAVE_SYN:
3440 val = tp->save_syn; 3471 val = tp->save_syn;
3441 break; 3472 break;
@@ -3472,6 +3503,25 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
3472 } 3503 }
3473 return 0; 3504 return 0;
3474 } 3505 }
3506#ifdef CONFIG_MMU
3507 case TCP_ZEROCOPY_RECEIVE: {
3508 struct tcp_zerocopy_receive zc;
3509 int err;
3510
3511 if (get_user(len, optlen))
3512 return -EFAULT;
3513 if (len != sizeof(zc))
3514 return -EINVAL;
3515 if (copy_from_user(&zc, optval, len))
3516 return -EFAULT;
3517 lock_sock(sk);
3518 err = tcp_zerocopy_receive(sk, &zc);
3519 release_sock(sk);
3520 if (!err && copy_to_user(optval, &zc, len))
3521 err = -EFAULT;
3522 return err;
3523 }
3524#endif
3475 default: 3525 default:
3476 return -ENOPROTOOPT; 3526 return -ENOPROTOOPT;
3477 } 3527 }
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 158d105e76da..58e2f479ffb4 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -806,7 +806,9 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
806 } 806 }
807 } 807 }
808 } 808 }
809 bbr->idle_restart = 0; 809 /* Restart after idle ends only once we process a new S/ACK for data */
810 if (rs->delivered > 0)
811 bbr->idle_restart = 0;
810} 812}
811 813
812static void bbr_update_model(struct sock *sk, const struct rate_sample *rs) 814static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8acbe5fd2098..b188e0d75edd 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -111,6 +111,25 @@ int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
111#define REXMIT_LOST 1 /* retransmit packets marked lost */ 111#define REXMIT_LOST 1 /* retransmit packets marked lost */
112#define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */ 112#define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */
113 113
114#if IS_ENABLED(CONFIG_TLS_DEVICE)
115static DEFINE_STATIC_KEY_FALSE(clean_acked_data_enabled);
116
117void clean_acked_data_enable(struct inet_connection_sock *icsk,
118 void (*cad)(struct sock *sk, u32 ack_seq))
119{
120 icsk->icsk_clean_acked = cad;
121 static_branch_inc(&clean_acked_data_enabled);
122}
123EXPORT_SYMBOL_GPL(clean_acked_data_enable);
124
125void clean_acked_data_disable(struct inet_connection_sock *icsk)
126{
127 static_branch_dec(&clean_acked_data_enabled);
128 icsk->icsk_clean_acked = NULL;
129}
130EXPORT_SYMBOL_GPL(clean_acked_data_disable);
131#endif
132
114static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb, 133static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb,
115 unsigned int len) 134 unsigned int len)
116{ 135{
@@ -3560,6 +3579,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3560 if (after(ack, prior_snd_una)) { 3579 if (after(ack, prior_snd_una)) {
3561 flag |= FLAG_SND_UNA_ADVANCED; 3580 flag |= FLAG_SND_UNA_ADVANCED;
3562 icsk->icsk_retransmits = 0; 3581 icsk->icsk_retransmits = 0;
3582
3583#if IS_ENABLED(CONFIG_TLS_DEVICE)
3584 if (static_branch_unlikely(&clean_acked_data_enabled))
3585 if (icsk->icsk_clean_acked)
3586 icsk->icsk_clean_acked(sk, ack);
3587#endif
3563 } 3588 }
3564 3589
3565 prior_fack = tcp_is_sack(tp) ? tcp_highest_sack_seq(tp) : tp->snd_una; 3590 prior_fack = tcp_is_sack(tp) ? tcp_highest_sack_seq(tp) : tp->snd_una;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 95feffb6d53f..d07c0dcc99aa 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -229,11 +229,9 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
229 } 229 }
230 } 230 }
231 231
232 if (mss > (1 << *rcv_wscale)) { 232 if (!init_rcv_wnd) /* Use default unless specified otherwise */
233 if (!init_rcv_wnd) /* Use default unless specified otherwise */ 233 init_rcv_wnd = tcp_default_init_rwnd(mss);
234 init_rcv_wnd = tcp_default_init_rwnd(mss); 234 *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
235 *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
236 }
237 235
238 /* Set the clamp no higher than max representable value */ 236 /* Set the clamp no higher than max representable value */
239 (*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp); 237 (*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 794aeafeb782..dd3102a37ef9 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -786,11 +786,14 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
786 return -EINVAL; 786 return -EINVAL;
787 if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) 787 if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
788 return -EINVAL; 788 return -EINVAL;
789 if (sk->sk_no_check_tx)
790 return -EINVAL;
789 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite) 791 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite)
790 return -EIO; 792 return -EIO;
791 793
792 skb_shinfo(skb)->gso_size = cork->gso_size; 794 skb_shinfo(skb)->gso_size = cork->gso_size;
793 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; 795 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
796 goto csum_partial;
794 } 797 }
795 798
796 if (is_udplite) /* UDP-Lite */ 799 if (is_udplite) /* UDP-Lite */
@@ -802,6 +805,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
802 goto send; 805 goto send;
803 806
804 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 807 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
808csum_partial:
805 809
806 udp4_hwcsum(skb, fl4->saddr, fl4->daddr); 810 udp4_hwcsum(skb, fl4->saddr, fl4->daddr);
807 goto send; 811 goto send;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index dc5158cba66e..006257092f06 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -223,6 +223,7 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
223 csum_replace2(&uh->check, htons(mss), 223 csum_replace2(&uh->check, htons(mss),
224 htons(seg->len - hdrlen - sizeof(*uh))); 224 htons(seg->len - hdrlen - sizeof(*uh)));
225 225
226 uh->check = ~uh->check;
226 seg->destructor = sock_wfree; 227 seg->destructor = sock_wfree;
227 seg->sk = sk; 228 seg->sk = sk;
228 sum_truesize += seg->truesize; 229 sum_truesize += seg->truesize;
@@ -247,7 +248,6 @@ static struct sk_buff *__udp4_gso_segment(struct sk_buff *gso_skb,
247 udp_v4_check(sizeof(struct udphdr) + mss, 248 udp_v4_check(sizeof(struct udphdr) + mss,
248 iph->saddr, iph->daddr, 0)); 249 iph->saddr, iph->daddr, 0));
249} 250}
250EXPORT_SYMBOL_GPL(__udp4_gso_segment);
251 251
252static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, 252static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
253 netdev_features_t features) 253 netdev_features_t features)
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 36d622c477b1..d0af96e0d109 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -578,7 +578,9 @@ const struct proto_ops inet6_stream_ops = {
578 .getsockopt = sock_common_getsockopt, /* ok */ 578 .getsockopt = sock_common_getsockopt, /* ok */
579 .sendmsg = inet_sendmsg, /* ok */ 579 .sendmsg = inet_sendmsg, /* ok */
580 .recvmsg = inet_recvmsg, /* ok */ 580 .recvmsg = inet_recvmsg, /* ok */
581#ifdef CONFIG_MMU
581 .mmap = tcp_mmap, 582 .mmap = tcp_mmap,
583#endif
582 .sendpage = inet_sendpage, 584 .sendpage = inet_sendpage,
583 .sendmsg_locked = tcp_sendmsg_locked, 585 .sendmsg_locked = tcp_sendmsg_locked,
584 .sendpage_locked = tcp_sendpage_locked, 586 .sendpage_locked = tcp_sendpage_locked,
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 6421c893466e..f0a4262a4789 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -945,7 +945,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
945 ins = &fn->leaf; 945 ins = &fn->leaf;
946 946
947 for (iter = leaf; iter; 947 for (iter = leaf; iter;
948 iter = rcu_dereference_protected(iter->rt6_next, 948 iter = rcu_dereference_protected(iter->fib6_next,
949 lockdep_is_held(&rt->fib6_table->tb6_lock))) { 949 lockdep_is_held(&rt->fib6_table->tb6_lock))) {
950 /* 950 /*
951 * Search for duplicates 951 * Search for duplicates
@@ -1002,7 +1002,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
1002 break; 1002 break;
1003 1003
1004next_iter: 1004next_iter:
1005 ins = &iter->rt6_next; 1005 ins = &iter->fib6_next;
1006 } 1006 }
1007 1007
1008 if (fallback_ins && !found) { 1008 if (fallback_ins && !found) {
@@ -1031,7 +1031,7 @@ next_iter:
1031 &sibling->fib6_siblings); 1031 &sibling->fib6_siblings);
1032 break; 1032 break;
1033 } 1033 }
1034 sibling = rcu_dereference_protected(sibling->rt6_next, 1034 sibling = rcu_dereference_protected(sibling->fib6_next,
1035 lockdep_is_held(&rt->fib6_table->tb6_lock)); 1035 lockdep_is_held(&rt->fib6_table->tb6_lock));
1036 } 1036 }
1037 /* For each sibling in the list, increment the counter of 1037 /* For each sibling in the list, increment the counter of
@@ -1065,7 +1065,7 @@ add:
1065 if (err) 1065 if (err)
1066 return err; 1066 return err;
1067 1067
1068 rcu_assign_pointer(rt->rt6_next, iter); 1068 rcu_assign_pointer(rt->fib6_next, iter);
1069 atomic_inc(&rt->fib6_ref); 1069 atomic_inc(&rt->fib6_ref);
1070 rcu_assign_pointer(rt->fib6_node, fn); 1070 rcu_assign_pointer(rt->fib6_node, fn);
1071 rcu_assign_pointer(*ins, rt); 1071 rcu_assign_pointer(*ins, rt);
@@ -1096,7 +1096,7 @@ add:
1096 1096
1097 atomic_inc(&rt->fib6_ref); 1097 atomic_inc(&rt->fib6_ref);
1098 rcu_assign_pointer(rt->fib6_node, fn); 1098 rcu_assign_pointer(rt->fib6_node, fn);
1099 rt->rt6_next = iter->rt6_next; 1099 rt->fib6_next = iter->fib6_next;
1100 rcu_assign_pointer(*ins, rt); 1100 rcu_assign_pointer(*ins, rt);
1101 if (!info->skip_notify) 1101 if (!info->skip_notify)
1102 inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE); 1102 inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE);
@@ -1113,14 +1113,14 @@ add:
1113 1113
1114 if (nsiblings) { 1114 if (nsiblings) {
1115 /* Replacing an ECMP route, remove all siblings */ 1115 /* Replacing an ECMP route, remove all siblings */
1116 ins = &rt->rt6_next; 1116 ins = &rt->fib6_next;
1117 iter = rcu_dereference_protected(*ins, 1117 iter = rcu_dereference_protected(*ins,
1118 lockdep_is_held(&rt->fib6_table->tb6_lock)); 1118 lockdep_is_held(&rt->fib6_table->tb6_lock));
1119 while (iter) { 1119 while (iter) {
1120 if (iter->fib6_metric > rt->fib6_metric) 1120 if (iter->fib6_metric > rt->fib6_metric)
1121 break; 1121 break;
1122 if (rt6_qualify_for_ecmp(iter)) { 1122 if (rt6_qualify_for_ecmp(iter)) {
1123 *ins = iter->rt6_next; 1123 *ins = iter->fib6_next;
1124 iter->fib6_node = NULL; 1124 iter->fib6_node = NULL;
1125 fib6_purge_rt(iter, fn, info->nl_net); 1125 fib6_purge_rt(iter, fn, info->nl_net);
1126 if (rcu_access_pointer(fn->rr_ptr) == iter) 1126 if (rcu_access_pointer(fn->rr_ptr) == iter)
@@ -1129,7 +1129,7 @@ add:
1129 nsiblings--; 1129 nsiblings--;
1130 info->nl_net->ipv6.rt6_stats->fib_rt_entries--; 1130 info->nl_net->ipv6.rt6_stats->fib_rt_entries--;
1131 } else { 1131 } else {
1132 ins = &iter->rt6_next; 1132 ins = &iter->fib6_next;
1133 } 1133 }
1134 iter = rcu_dereference_protected(*ins, 1134 iter = rcu_dereference_protected(*ins,
1135 lockdep_is_held(&rt->fib6_table->tb6_lock)); 1135 lockdep_is_held(&rt->fib6_table->tb6_lock));
@@ -1712,7 +1712,7 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
1712 RT6_TRACE("fib6_del_route\n"); 1712 RT6_TRACE("fib6_del_route\n");
1713 1713
1714 /* Unlink it */ 1714 /* Unlink it */
1715 *rtp = rt->rt6_next; 1715 *rtp = rt->fib6_next;
1716 rt->fib6_node = NULL; 1716 rt->fib6_node = NULL;
1717 net->ipv6.rt6_stats->fib_rt_entries--; 1717 net->ipv6.rt6_stats->fib_rt_entries--;
1718 net->ipv6.rt6_stats->fib_discarded_routes++; 1718 net->ipv6.rt6_stats->fib_discarded_routes++;
@@ -1741,7 +1741,7 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
1741 FOR_WALKERS(net, w) { 1741 FOR_WALKERS(net, w) {
1742 if (w->state == FWS_C && w->leaf == rt) { 1742 if (w->state == FWS_C && w->leaf == rt) {
1743 RT6_TRACE("walker %p adjusted by delroute\n", w); 1743 RT6_TRACE("walker %p adjusted by delroute\n", w);
1744 w->leaf = rcu_dereference_protected(rt->rt6_next, 1744 w->leaf = rcu_dereference_protected(rt->fib6_next,
1745 lockdep_is_held(&table->tb6_lock)); 1745 lockdep_is_held(&table->tb6_lock));
1746 if (!w->leaf) 1746 if (!w->leaf)
1747 w->state = FWS_U; 1747 w->state = FWS_U;
@@ -1795,7 +1795,7 @@ int fib6_del(struct fib6_info *rt, struct nl_info *info)
1795 fib6_del_route(table, fn, rtp, info); 1795 fib6_del_route(table, fn, rtp, info);
1796 return 0; 1796 return 0;
1797 } 1797 }
1798 rtp_next = &cur->rt6_next; 1798 rtp_next = &cur->fib6_next;
1799 } 1799 }
1800 return -ENOENT; 1800 return -ENOENT;
1801} 1801}
@@ -2279,7 +2279,7 @@ static int ipv6_route_yield(struct fib6_walker *w)
2279 2279
2280 do { 2280 do {
2281 iter->w.leaf = rcu_dereference_protected( 2281 iter->w.leaf = rcu_dereference_protected(
2282 iter->w.leaf->rt6_next, 2282 iter->w.leaf->fib6_next,
2283 lockdep_is_held(&iter->tbl->tb6_lock)); 2283 lockdep_is_held(&iter->tbl->tb6_lock));
2284 iter->skip--; 2284 iter->skip--;
2285 if (!iter->skip && iter->w.leaf) 2285 if (!iter->skip && iter->w.leaf)
@@ -2345,7 +2345,7 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2345 if (!v) 2345 if (!v)
2346 goto iter_table; 2346 goto iter_table;
2347 2347
2348 n = rcu_dereference_bh(((struct fib6_info *)v)->rt6_next); 2348 n = rcu_dereference_bh(((struct fib6_info *)v)->fib6_next);
2349 if (n) { 2349 if (n) {
2350 ++*pos; 2350 ++*pos;
2351 return n; 2351 return n;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 69727bc168cb..04c69e0c84b3 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -807,7 +807,7 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
807} 807}
808 808
809/** 809/**
810 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own 810 * ip6gre_tnl_addr_conflict - compare packet addresses to tunnel's own
811 * @t: the outgoing tunnel device 811 * @t: the outgoing tunnel device
812 * @hdr: IPv6 header from the incoming packet 812 * @hdr: IPv6 header from the incoming packet
813 * 813 *
@@ -896,6 +896,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
896 struct flowi6 fl6; 896 struct flowi6 fl6;
897 int err = -EINVAL; 897 int err = -EINVAL;
898 __u32 mtu; 898 __u32 mtu;
899 int nhoff;
899 900
900 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr)) 901 if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
901 goto tx_err; 902 goto tx_err;
@@ -908,6 +909,11 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
908 truncate = true; 909 truncate = true;
909 } 910 }
910 911
912 nhoff = skb_network_header(skb) - skb_mac_header(skb);
913 if (skb->protocol == htons(ETH_P_IP) &&
914 (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
915 truncate = true;
916
911 if (skb_cow_head(skb, dev->needed_headroom)) 917 if (skb_cow_head(skb, dev->needed_headroom))
912 goto tx_err; 918 goto tx_err;
913 919
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index dfd8af41824e..7f4493080df6 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -383,28 +383,6 @@ static inline int ip6_forward_finish(struct net *net, struct sock *sk,
383 return dst_output(net, sk, skb); 383 return dst_output(net, sk, skb);
384} 384}
385 385
386unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
387{
388 unsigned int mtu;
389 struct inet6_dev *idev;
390
391 if (dst_metric_locked(dst, RTAX_MTU)) {
392 mtu = dst_metric_raw(dst, RTAX_MTU);
393 if (mtu)
394 return mtu;
395 }
396
397 mtu = IPV6_MIN_MTU;
398 rcu_read_lock();
399 idev = __in6_dev_get(dst->dev);
400 if (idev)
401 mtu = idev->cnf.mtu6;
402 rcu_read_unlock();
403
404 return mtu;
405}
406EXPORT_SYMBOL_GPL(ip6_dst_mtu_forward);
407
408static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu) 386static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
409{ 387{
410 if (skb->len <= mtu) 388 if (skb->len <= mtu)
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 65c9e1a58305..7097bbf95843 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -528,7 +528,6 @@ static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
528 .family = NFPROTO_IPV6, 528 .family = NFPROTO_IPV6,
529 }; 529 };
530 530
531 t = ip6t_get_target(e);
532 return xt_check_target(&par, t->u.target_size - sizeof(*t), 531 return xt_check_target(&par, t->u.target_size - sizeof(*t),
533 e->ipv6.proto, 532 e->ipv6.proto,
534 e->ipv6.invflags & IP6T_INV_PROTO); 533 e->ipv6.invflags & IP6T_INV_PROTO);
diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c
index 92c0047e7e33..491f808e356a 100644
--- a/net/ipv6/netfilter/ip6t_MASQUERADE.c
+++ b/net/ipv6/netfilter/ip6t_MASQUERADE.c
@@ -29,7 +29,7 @@ masquerade_tg6(struct sk_buff *skb, const struct xt_action_param *par)
29 29
30static int masquerade_tg6_checkentry(const struct xt_tgchk_param *par) 30static int masquerade_tg6_checkentry(const struct xt_tgchk_param *par)
31{ 31{
32 const struct nf_nat_range *range = par->targinfo; 32 const struct nf_nat_range2 *range = par->targinfo;
33 33
34 if (range->flags & NF_NAT_RANGE_MAP_IPS) 34 if (range->flags & NF_NAT_RANGE_MAP_IPS)
35 return -EINVAL; 35 return -EINVAL;
diff --git a/net/ipv6/netfilter/ip6t_srh.c b/net/ipv6/netfilter/ip6t_srh.c
index 33719d5560c8..1059894a6f4c 100644
--- a/net/ipv6/netfilter/ip6t_srh.c
+++ b/net/ipv6/netfilter/ip6t_srh.c
@@ -117,6 +117,130 @@ static bool srh_mt6(const struct sk_buff *skb, struct xt_action_param *par)
117 return true; 117 return true;
118} 118}
119 119
120static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
121{
122 int hdrlen, psidoff, nsidoff, lsidoff, srhoff = 0;
123 const struct ip6t_srh1 *srhinfo = par->matchinfo;
124 struct in6_addr *psid, *nsid, *lsid;
125 struct in6_addr _psid, _nsid, _lsid;
126 struct ipv6_sr_hdr *srh;
127 struct ipv6_sr_hdr _srh;
128
129 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
130 return false;
131 srh = skb_header_pointer(skb, srhoff, sizeof(_srh), &_srh);
132 if (!srh)
133 return false;
134
135 hdrlen = ipv6_optlen(srh);
136 if (skb->len - srhoff < hdrlen)
137 return false;
138
139 if (srh->type != IPV6_SRCRT_TYPE_4)
140 return false;
141
142 if (srh->segments_left > srh->first_segment)
143 return false;
144
145 /* Next Header matching */
146 if (srhinfo->mt_flags & IP6T_SRH_NEXTHDR)
147 if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_NEXTHDR,
148 !(srh->nexthdr == srhinfo->next_hdr)))
149 return false;
150
151 /* Header Extension Length matching */
152 if (srhinfo->mt_flags & IP6T_SRH_LEN_EQ)
153 if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LEN_EQ,
154 !(srh->hdrlen == srhinfo->hdr_len)))
155 return false;
156 if (srhinfo->mt_flags & IP6T_SRH_LEN_GT)
157 if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LEN_GT,
158 !(srh->hdrlen > srhinfo->hdr_len)))
159 return false;
160 if (srhinfo->mt_flags & IP6T_SRH_LEN_LT)
161 if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LEN_LT,
162 !(srh->hdrlen < srhinfo->hdr_len)))
163 return false;
164
165 /* Segments Left matching */
166 if (srhinfo->mt_flags & IP6T_SRH_SEGS_EQ)
167 if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_SEGS_EQ,
168 !(srh->segments_left == srhinfo->segs_left)))
169 return false;
170 if (srhinfo->mt_flags & IP6T_SRH_SEGS_GT)
171 if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_SEGS_GT,
172 !(srh->segments_left > srhinfo->segs_left)))
173 return false;
174 if (srhinfo->mt_flags & IP6T_SRH_SEGS_LT)
175 if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_SEGS_LT,
176 !(srh->segments_left < srhinfo->segs_left)))
177 return false;
178
179 /**
180 * Last Entry matching
181 * Last_Entry field was introduced in revision 6 of the SRH draft.
182 * It was called First_Segment in the previous revision
183 */
184 if (srhinfo->mt_flags & IP6T_SRH_LAST_EQ)
185 if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LAST_EQ,
186 !(srh->first_segment == srhinfo->last_entry)))
187 return false;
188 if (srhinfo->mt_flags & IP6T_SRH_LAST_GT)
189 if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LAST_GT,
190 !(srh->first_segment > srhinfo->last_entry)))
191 return false;
192 if (srhinfo->mt_flags & IP6T_SRH_LAST_LT)
193 if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LAST_LT,
194 !(srh->first_segment < srhinfo->last_entry)))
195 return false;
196
197 /**
198 * Tag matchig
199 * Tag field was introduced in revision 6 of the SRH draft
200 */
201 if (srhinfo->mt_flags & IP6T_SRH_TAG)
202 if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_TAG,
203 !(srh->tag == srhinfo->tag)))
204 return false;
205
206 /* Previous SID matching */
207 if (srhinfo->mt_flags & IP6T_SRH_PSID) {
208 if (srh->segments_left == srh->first_segment)
209 return false;
210 psidoff = srhoff + sizeof(struct ipv6_sr_hdr) +
211 ((srh->segments_left + 1) * sizeof(struct in6_addr));
212 psid = skb_header_pointer(skb, psidoff, sizeof(_psid), &_psid);
213 if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_PSID,
214 ipv6_masked_addr_cmp(psid, &srhinfo->psid_msk,
215 &srhinfo->psid_addr)))
216 return false;
217 }
218
219 /* Next SID matching */
220 if (srhinfo->mt_flags & IP6T_SRH_NSID) {
221 if (srh->segments_left == 0)
222 return false;
223 nsidoff = srhoff + sizeof(struct ipv6_sr_hdr) +
224 ((srh->segments_left - 1) * sizeof(struct in6_addr));
225 nsid = skb_header_pointer(skb, nsidoff, sizeof(_nsid), &_nsid);
226 if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_NSID,
227 ipv6_masked_addr_cmp(nsid, &srhinfo->nsid_msk,
228 &srhinfo->nsid_addr)))
229 return false;
230 }
231
232 /* Last SID matching */
233 if (srhinfo->mt_flags & IP6T_SRH_LSID) {
234 lsidoff = srhoff + sizeof(struct ipv6_sr_hdr);
235 lsid = skb_header_pointer(skb, lsidoff, sizeof(_lsid), &_lsid);
236 if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LSID,
237 ipv6_masked_addr_cmp(lsid, &srhinfo->lsid_msk,
238 &srhinfo->lsid_addr)))
239 return false;
240 }
241 return true;
242}
243
120static int srh_mt6_check(const struct xt_mtchk_param *par) 244static int srh_mt6_check(const struct xt_mtchk_param *par)
121{ 245{
122 const struct ip6t_srh *srhinfo = par->matchinfo; 246 const struct ip6t_srh *srhinfo = par->matchinfo;
@@ -136,23 +260,54 @@ static int srh_mt6_check(const struct xt_mtchk_param *par)
136 return 0; 260 return 0;
137} 261}
138 262
139static struct xt_match srh_mt6_reg __read_mostly = { 263static int srh1_mt6_check(const struct xt_mtchk_param *par)
140 .name = "srh", 264{
141 .family = NFPROTO_IPV6, 265 const struct ip6t_srh1 *srhinfo = par->matchinfo;
142 .match = srh_mt6, 266
143 .matchsize = sizeof(struct ip6t_srh), 267 if (srhinfo->mt_flags & ~IP6T_SRH_MASK) {
144 .checkentry = srh_mt6_check, 268 pr_info_ratelimited("unknown srh match flags %X\n",
145 .me = THIS_MODULE, 269 srhinfo->mt_flags);
270 return -EINVAL;
271 }
272
273 if (srhinfo->mt_invflags & ~IP6T_SRH_INV_MASK) {
274 pr_info_ratelimited("unknown srh invflags %X\n",
275 srhinfo->mt_invflags);
276 return -EINVAL;
277 }
278
279 return 0;
280}
281
282static struct xt_match srh_mt6_reg[] __read_mostly = {
283 {
284 .name = "srh",
285 .revision = 0,
286 .family = NFPROTO_IPV6,
287 .match = srh_mt6,
288 .matchsize = sizeof(struct ip6t_srh),
289 .checkentry = srh_mt6_check,
290 .me = THIS_MODULE,
291 },
292 {
293 .name = "srh",
294 .revision = 1,
295 .family = NFPROTO_IPV6,
296 .match = srh1_mt6,
297 .matchsize = sizeof(struct ip6t_srh1),
298 .checkentry = srh1_mt6_check,
299 .me = THIS_MODULE,
300 }
146}; 301};
147 302
148static int __init srh_mt6_init(void) 303static int __init srh_mt6_init(void)
149{ 304{
150 return xt_register_match(&srh_mt6_reg); 305 return xt_register_matches(srh_mt6_reg, ARRAY_SIZE(srh_mt6_reg));
151} 306}
152 307
153static void __exit srh_mt6_exit(void) 308static void __exit srh_mt6_exit(void)
154{ 309{
155 xt_unregister_match(&srh_mt6_reg); 310 xt_unregister_matches(srh_mt6_reg, ARRAY_SIZE(srh_mt6_reg));
156} 311}
157 312
158module_init(srh_mt6_init); 313module_init(srh_mt6_init);
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index 47306e45a80a..2bf554e18af8 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -35,8 +35,7 @@ static const struct xt_table nf_nat_ipv6_table = {
35 35
36static unsigned int ip6table_nat_do_chain(void *priv, 36static unsigned int ip6table_nat_do_chain(void *priv,
37 struct sk_buff *skb, 37 struct sk_buff *skb,
38 const struct nf_hook_state *state, 38 const struct nf_hook_state *state)
39 struct nf_conn *ct)
40{ 39{
41 return ip6t_do_table(skb, state, state->net->ipv6.ip6table_nat); 40 return ip6t_do_table(skb, state, state->net->ipv6.ip6table_nat);
42} 41}
diff --git a/net/ipv6/netfilter/nf_flow_table_ipv6.c b/net/ipv6/netfilter/nf_flow_table_ipv6.c
index 207cb35569b1..c511d206bf9b 100644
--- a/net/ipv6/netfilter/nf_flow_table_ipv6.c
+++ b/net/ipv6/netfilter/nf_flow_table_ipv6.c
@@ -3,256 +3,12 @@
3#include <linux/module.h> 3#include <linux/module.h>
4#include <linux/netfilter.h> 4#include <linux/netfilter.h>
5#include <linux/rhashtable.h> 5#include <linux/rhashtable.h>
6#include <linux/ipv6.h>
7#include <linux/netdevice.h>
8#include <net/ipv6.h>
9#include <net/ip6_route.h>
10#include <net/neighbour.h>
11#include <net/netfilter/nf_flow_table.h> 6#include <net/netfilter/nf_flow_table.h>
12#include <net/netfilter/nf_tables.h> 7#include <net/netfilter/nf_tables.h>
13/* For layer 4 checksum field offset. */
14#include <linux/tcp.h>
15#include <linux/udp.h>
16
17static int nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
18 struct in6_addr *addr,
19 struct in6_addr *new_addr)
20{
21 struct tcphdr *tcph;
22
23 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
24 skb_try_make_writable(skb, thoff + sizeof(*tcph)))
25 return -1;
26
27 tcph = (void *)(skb_network_header(skb) + thoff);
28 inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
29 new_addr->s6_addr32, true);
30
31 return 0;
32}
33
34static int nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
35 struct in6_addr *addr,
36 struct in6_addr *new_addr)
37{
38 struct udphdr *udph;
39
40 if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
41 skb_try_make_writable(skb, thoff + sizeof(*udph)))
42 return -1;
43
44 udph = (void *)(skb_network_header(skb) + thoff);
45 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
46 inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
47 new_addr->s6_addr32, true);
48 if (!udph->check)
49 udph->check = CSUM_MANGLED_0;
50 }
51
52 return 0;
53}
54
55static int nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
56 unsigned int thoff, struct in6_addr *addr,
57 struct in6_addr *new_addr)
58{
59 switch (ip6h->nexthdr) {
60 case IPPROTO_TCP:
61 if (nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr) < 0)
62 return NF_DROP;
63 break;
64 case IPPROTO_UDP:
65 if (nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr) < 0)
66 return NF_DROP;
67 break;
68 }
69
70 return 0;
71}
72
73static int nf_flow_snat_ipv6(const struct flow_offload *flow,
74 struct sk_buff *skb, struct ipv6hdr *ip6h,
75 unsigned int thoff,
76 enum flow_offload_tuple_dir dir)
77{
78 struct in6_addr addr, new_addr;
79
80 switch (dir) {
81 case FLOW_OFFLOAD_DIR_ORIGINAL:
82 addr = ip6h->saddr;
83 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
84 ip6h->saddr = new_addr;
85 break;
86 case FLOW_OFFLOAD_DIR_REPLY:
87 addr = ip6h->daddr;
88 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
89 ip6h->daddr = new_addr;
90 break;
91 default:
92 return -1;
93 }
94
95 return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
96}
97
98static int nf_flow_dnat_ipv6(const struct flow_offload *flow,
99 struct sk_buff *skb, struct ipv6hdr *ip6h,
100 unsigned int thoff,
101 enum flow_offload_tuple_dir dir)
102{
103 struct in6_addr addr, new_addr;
104
105 switch (dir) {
106 case FLOW_OFFLOAD_DIR_ORIGINAL:
107 addr = ip6h->daddr;
108 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
109 ip6h->daddr = new_addr;
110 break;
111 case FLOW_OFFLOAD_DIR_REPLY:
112 addr = ip6h->saddr;
113 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
114 ip6h->saddr = new_addr;
115 break;
116 default:
117 return -1;
118 }
119
120 return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
121}
122
123static int nf_flow_nat_ipv6(const struct flow_offload *flow,
124 struct sk_buff *skb,
125 enum flow_offload_tuple_dir dir)
126{
127 struct ipv6hdr *ip6h = ipv6_hdr(skb);
128 unsigned int thoff = sizeof(*ip6h);
129
130 if (flow->flags & FLOW_OFFLOAD_SNAT &&
131 (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
132 nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
133 return -1;
134 if (flow->flags & FLOW_OFFLOAD_DNAT &&
135 (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
136 nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
137 return -1;
138
139 return 0;
140}
141
142static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
143 struct flow_offload_tuple *tuple)
144{
145 struct flow_ports *ports;
146 struct ipv6hdr *ip6h;
147 unsigned int thoff;
148
149 if (!pskb_may_pull(skb, sizeof(*ip6h)))
150 return -1;
151
152 ip6h = ipv6_hdr(skb);
153
154 if (ip6h->nexthdr != IPPROTO_TCP &&
155 ip6h->nexthdr != IPPROTO_UDP)
156 return -1;
157
158 thoff = sizeof(*ip6h);
159 if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
160 return -1;
161
162 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
163
164 tuple->src_v6 = ip6h->saddr;
165 tuple->dst_v6 = ip6h->daddr;
166 tuple->src_port = ports->source;
167 tuple->dst_port = ports->dest;
168 tuple->l3proto = AF_INET6;
169 tuple->l4proto = ip6h->nexthdr;
170 tuple->iifidx = dev->ifindex;
171
172 return 0;
173}
174
175/* Based on ip_exceeds_mtu(). */
176static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
177{
178 if (skb->len <= mtu)
179 return false;
180
181 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
182 return false;
183
184 return true;
185}
186
187static bool nf_flow_exceeds_mtu(struct sk_buff *skb, const struct rt6_info *rt)
188{
189 u32 mtu;
190
191 mtu = ip6_dst_mtu_forward(&rt->dst);
192 if (__nf_flow_exceeds_mtu(skb, mtu))
193 return true;
194
195 return false;
196}
197
198unsigned int
199nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
200 const struct nf_hook_state *state)
201{
202 struct flow_offload_tuple_rhash *tuplehash;
203 struct nf_flowtable *flow_table = priv;
204 struct flow_offload_tuple tuple = {};
205 enum flow_offload_tuple_dir dir;
206 struct flow_offload *flow;
207 struct net_device *outdev;
208 struct in6_addr *nexthop;
209 struct ipv6hdr *ip6h;
210 struct rt6_info *rt;
211
212 if (skb->protocol != htons(ETH_P_IPV6))
213 return NF_ACCEPT;
214
215 if (nf_flow_tuple_ipv6(skb, state->in, &tuple) < 0)
216 return NF_ACCEPT;
217
218 tuplehash = flow_offload_lookup(flow_table, &tuple);
219 if (tuplehash == NULL)
220 return NF_ACCEPT;
221
222 outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx);
223 if (!outdev)
224 return NF_ACCEPT;
225
226 dir = tuplehash->tuple.dir;
227 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
228
229 rt = (struct rt6_info *)flow->tuplehash[dir].tuple.dst_cache;
230 if (unlikely(nf_flow_exceeds_mtu(skb, rt)))
231 return NF_ACCEPT;
232
233 if (skb_try_make_writable(skb, sizeof(*ip6h)))
234 return NF_DROP;
235
236 if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) &&
237 nf_flow_nat_ipv6(flow, skb, dir) < 0)
238 return NF_DROP;
239
240 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
241 ip6h = ipv6_hdr(skb);
242 ip6h->hop_limit--;
243
244 skb->dev = outdev;
245 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
246 neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
247
248 return NF_STOLEN;
249}
250EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);
251 8
252static struct nf_flowtable_type flowtable_ipv6 = { 9static struct nf_flowtable_type flowtable_ipv6 = {
253 .family = NFPROTO_IPV6, 10 .family = NFPROTO_IPV6,
254 .params = &nf_flow_offload_rhash_params, 11 .init = nf_flow_table_init,
255 .gc = nf_flow_offload_work_gc,
256 .free = nf_flow_table_free, 12 .free = nf_flow_table_free,
257 .hook = nf_flow_offload_ipv6_hook, 13 .hook = nf_flow_offload_ipv6_hook,
258 .owner = THIS_MODULE, 14 .owner = THIS_MODULE,
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
index 6b7f075f811f..f1582b6f9588 100644
--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
@@ -62,7 +62,7 @@ static void nf_nat_ipv6_decode_session(struct sk_buff *skb,
62#endif 62#endif
63 63
64static bool nf_nat_ipv6_in_range(const struct nf_conntrack_tuple *t, 64static bool nf_nat_ipv6_in_range(const struct nf_conntrack_tuple *t,
65 const struct nf_nat_range *range) 65 const struct nf_nat_range2 *range)
66{ 66{
67 return ipv6_addr_cmp(&t->src.u3.in6, &range->min_addr.in6) >= 0 && 67 return ipv6_addr_cmp(&t->src.u3.in6, &range->min_addr.in6) >= 0 &&
68 ipv6_addr_cmp(&t->src.u3.in6, &range->max_addr.in6) <= 0; 68 ipv6_addr_cmp(&t->src.u3.in6, &range->max_addr.in6) <= 0;
@@ -151,7 +151,7 @@ static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb,
151 151
152#if IS_ENABLED(CONFIG_NF_CT_NETLINK) 152#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
153static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[], 153static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[],
154 struct nf_nat_range *range) 154 struct nf_nat_range2 *range)
155{ 155{
156 if (tb[CTA_NAT_V6_MINIP]) { 156 if (tb[CTA_NAT_V6_MINIP]) {
157 nla_memcpy(&range->min_addr.ip6, tb[CTA_NAT_V6_MINIP], 157 nla_memcpy(&range->min_addr.ip6, tb[CTA_NAT_V6_MINIP],
@@ -257,8 +257,7 @@ nf_nat_ipv6_fn(void *priv, struct sk_buff *skb,
257 const struct nf_hook_state *state, 257 const struct nf_hook_state *state,
258 unsigned int (*do_chain)(void *priv, 258 unsigned int (*do_chain)(void *priv,
259 struct sk_buff *skb, 259 struct sk_buff *skb,
260 const struct nf_hook_state *state, 260 const struct nf_hook_state *state))
261 struct nf_conn *ct))
262{ 261{
263 struct nf_conn *ct; 262 struct nf_conn *ct;
264 enum ip_conntrack_info ctinfo; 263 enum ip_conntrack_info ctinfo;
@@ -303,7 +302,7 @@ nf_nat_ipv6_fn(void *priv, struct sk_buff *skb,
303 if (!nf_nat_initialized(ct, maniptype)) { 302 if (!nf_nat_initialized(ct, maniptype)) {
304 unsigned int ret; 303 unsigned int ret;
305 304
306 ret = do_chain(priv, skb, state, ct); 305 ret = do_chain(priv, skb, state);
307 if (ret != NF_ACCEPT) 306 if (ret != NF_ACCEPT)
308 return ret; 307 return ret;
309 308
@@ -343,8 +342,7 @@ nf_nat_ipv6_in(void *priv, struct sk_buff *skb,
343 const struct nf_hook_state *state, 342 const struct nf_hook_state *state,
344 unsigned int (*do_chain)(void *priv, 343 unsigned int (*do_chain)(void *priv,
345 struct sk_buff *skb, 344 struct sk_buff *skb,
346 const struct nf_hook_state *state, 345 const struct nf_hook_state *state))
347 struct nf_conn *ct))
348{ 346{
349 unsigned int ret; 347 unsigned int ret;
350 struct in6_addr daddr = ipv6_hdr(skb)->daddr; 348 struct in6_addr daddr = ipv6_hdr(skb)->daddr;
@@ -363,8 +361,7 @@ nf_nat_ipv6_out(void *priv, struct sk_buff *skb,
363 const struct nf_hook_state *state, 361 const struct nf_hook_state *state,
364 unsigned int (*do_chain)(void *priv, 362 unsigned int (*do_chain)(void *priv,
365 struct sk_buff *skb, 363 struct sk_buff *skb,
366 const struct nf_hook_state *state, 364 const struct nf_hook_state *state))
367 struct nf_conn *ct))
368{ 365{
369#ifdef CONFIG_XFRM 366#ifdef CONFIG_XFRM
370 const struct nf_conn *ct; 367 const struct nf_conn *ct;
@@ -400,8 +397,7 @@ nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb,
400 const struct nf_hook_state *state, 397 const struct nf_hook_state *state,
401 unsigned int (*do_chain)(void *priv, 398 unsigned int (*do_chain)(void *priv,
402 struct sk_buff *skb, 399 struct sk_buff *skb,
403 const struct nf_hook_state *state, 400 const struct nf_hook_state *state))
404 struct nf_conn *ct))
405{ 401{
406 const struct nf_conn *ct; 402 const struct nf_conn *ct;
407 enum ip_conntrack_info ctinfo; 403 enum ip_conntrack_info ctinfo;
diff --git a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
index 98f61fcb9108..9dfc2b90c362 100644
--- a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
@@ -26,14 +26,14 @@
26static atomic_t v6_worker_count; 26static atomic_t v6_worker_count;
27 27
28unsigned int 28unsigned int
29nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, 29nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
30 const struct net_device *out) 30 const struct net_device *out)
31{ 31{
32 enum ip_conntrack_info ctinfo; 32 enum ip_conntrack_info ctinfo;
33 struct nf_conn_nat *nat; 33 struct nf_conn_nat *nat;
34 struct in6_addr src; 34 struct in6_addr src;
35 struct nf_conn *ct; 35 struct nf_conn *ct;
36 struct nf_nat_range newrange; 36 struct nf_nat_range2 newrange;
37 37
38 ct = nf_ct_get(skb, &ctinfo); 38 ct = nf_ct_get(skb, &ctinfo);
39 WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || 39 WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
diff --git a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
index 57593b00c5b4..d9bf42ba44fa 100644
--- a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
@@ -32,7 +32,7 @@ icmpv6_in_range(const struct nf_conntrack_tuple *tuple,
32static void 32static void
33icmpv6_unique_tuple(const struct nf_nat_l3proto *l3proto, 33icmpv6_unique_tuple(const struct nf_nat_l3proto *l3proto,
34 struct nf_conntrack_tuple *tuple, 34 struct nf_conntrack_tuple *tuple,
35 const struct nf_nat_range *range, 35 const struct nf_nat_range2 *range,
36 enum nf_nat_manip_type maniptype, 36 enum nf_nat_manip_type maniptype,
37 const struct nf_conn *ct) 37 const struct nf_conn *ct)
38{ 38{
diff --git a/net/ipv6/netfilter/nft_chain_nat_ipv6.c b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
index 3557b114446c..100a6bd1046a 100644
--- a/net/ipv6/netfilter/nft_chain_nat_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
@@ -26,8 +26,7 @@
26 26
27static unsigned int nft_nat_do_chain(void *priv, 27static unsigned int nft_nat_do_chain(void *priv,
28 struct sk_buff *skb, 28 struct sk_buff *skb,
29 const struct nf_hook_state *state, 29 const struct nf_hook_state *state)
30 struct nf_conn *ct)
31{ 30{
32 struct nft_pktinfo pkt; 31 struct nft_pktinfo pkt;
33 32
diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c
index 4146536e9c15..dd0122f3cffe 100644
--- a/net/ipv6/netfilter/nft_masq_ipv6.c
+++ b/net/ipv6/netfilter/nft_masq_ipv6.c
@@ -22,7 +22,7 @@ static void nft_masq_ipv6_eval(const struct nft_expr *expr,
22 const struct nft_pktinfo *pkt) 22 const struct nft_pktinfo *pkt)
23{ 23{
24 struct nft_masq *priv = nft_expr_priv(expr); 24 struct nft_masq *priv = nft_expr_priv(expr);
25 struct nf_nat_range range; 25 struct nf_nat_range2 range;
26 26
27 memset(&range, 0, sizeof(range)); 27 memset(&range, 0, sizeof(range));
28 range.flags = priv->flags; 28 range.flags = priv->flags;
diff --git a/net/ipv6/netfilter/nft_redir_ipv6.c b/net/ipv6/netfilter/nft_redir_ipv6.c
index a27e424f690d..74269865acc8 100644
--- a/net/ipv6/netfilter/nft_redir_ipv6.c
+++ b/net/ipv6/netfilter/nft_redir_ipv6.c
@@ -22,7 +22,7 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr,
22 const struct nft_pktinfo *pkt) 22 const struct nft_pktinfo *pkt)
23{ 23{
24 struct nft_redir *priv = nft_expr_priv(expr); 24 struct nft_redir *priv = nft_expr_priv(expr);
25 struct nf_nat_range range; 25 struct nf_nat_range2 range;
26 26
27 memset(&range, 0, sizeof(range)); 27 memset(&range, 0, sizeof(range));
28 if (priv->sreg_proto_min) { 28 if (priv->sreg_proto_min) {
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7ee0a34fba46..daa3662da0ee 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -468,7 +468,7 @@ static inline struct fib6_info *rt6_device_match(struct net *net,
468 !(rt->fib6_nh.nh_flags & RTNH_F_DEAD)) 468 !(rt->fib6_nh.nh_flags & RTNH_F_DEAD))
469 return rt; 469 return rt;
470 470
471 for (sprt = rt; sprt; sprt = rcu_dereference(sprt->rt6_next)) { 471 for (sprt = rt; sprt; sprt = rcu_dereference(sprt->fib6_next)) {
472 const struct net_device *dev = sprt->fib6_nh.nh_dev; 472 const struct net_device *dev = sprt->fib6_nh.nh_dev;
473 473
474 if (sprt->fib6_nh.nh_flags & RTNH_F_DEAD) 474 if (sprt->fib6_nh.nh_flags & RTNH_F_DEAD)
@@ -696,7 +696,7 @@ static struct fib6_info *find_rr_leaf(struct fib6_node *fn,
696 696
697 match = NULL; 697 match = NULL;
698 cont = NULL; 698 cont = NULL;
699 for (rt = rr_head; rt; rt = rcu_dereference(rt->rt6_next)) { 699 for (rt = rr_head; rt; rt = rcu_dereference(rt->fib6_next)) {
700 if (rt->fib6_metric != metric) { 700 if (rt->fib6_metric != metric) {
701 cont = rt; 701 cont = rt;
702 break; 702 break;
@@ -706,7 +706,7 @@ static struct fib6_info *find_rr_leaf(struct fib6_node *fn,
706 } 706 }
707 707
708 for (rt = leaf; rt && rt != rr_head; 708 for (rt = leaf; rt && rt != rr_head;
709 rt = rcu_dereference(rt->rt6_next)) { 709 rt = rcu_dereference(rt->fib6_next)) {
710 if (rt->fib6_metric != metric) { 710 if (rt->fib6_metric != metric) {
711 cont = rt; 711 cont = rt;
712 break; 712 break;
@@ -718,7 +718,7 @@ static struct fib6_info *find_rr_leaf(struct fib6_node *fn,
718 if (match || !cont) 718 if (match || !cont)
719 return match; 719 return match;
720 720
721 for (rt = cont; rt; rt = rcu_dereference(rt->rt6_next)) 721 for (rt = cont; rt; rt = rcu_dereference(rt->fib6_next))
722 match = find_match(rt, oif, strict, &mpri, match, do_rr); 722 match = find_match(rt, oif, strict, &mpri, match, do_rr);
723 723
724 return match; 724 return match;
@@ -756,7 +756,7 @@ static struct fib6_info *rt6_select(struct net *net, struct fib6_node *fn,
756 &do_rr); 756 &do_rr);
757 757
758 if (do_rr) { 758 if (do_rr) {
759 struct fib6_info *next = rcu_dereference(rt0->rt6_next); 759 struct fib6_info *next = rcu_dereference(rt0->fib6_next);
760 760
761 /* no entries matched; do round-robin */ 761 /* no entries matched; do round-robin */
762 if (!next || next->fib6_metric != rt0->fib6_metric) 762 if (!next || next->fib6_metric != rt0->fib6_metric)
@@ -1932,11 +1932,16 @@ static void ip6_multipath_l3_keys(const struct sk_buff *skb,
1932 const struct ipv6hdr *inner_iph; 1932 const struct ipv6hdr *inner_iph;
1933 const struct icmp6hdr *icmph; 1933 const struct icmp6hdr *icmph;
1934 struct ipv6hdr _inner_iph; 1934 struct ipv6hdr _inner_iph;
1935 struct icmp6hdr _icmph;
1935 1936
1936 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6)) 1937 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
1937 goto out; 1938 goto out;
1938 1939
1939 icmph = icmp6_hdr(skb); 1940 icmph = skb_header_pointer(skb, skb_transport_offset(skb),
1941 sizeof(_icmph), &_icmph);
1942 if (!icmph)
1943 goto out;
1944
1940 if (icmph->icmp6_type != ICMPV6_DEST_UNREACH && 1945 if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
1941 icmph->icmp6_type != ICMPV6_PKT_TOOBIG && 1946 icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
1942 icmph->icmp6_type != ICMPV6_TIME_EXCEED && 1947 icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
@@ -3776,7 +3781,7 @@ static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
3776 if (iter->fib6_metric == rt->fib6_metric && 3781 if (iter->fib6_metric == rt->fib6_metric &&
3777 rt6_qualify_for_ecmp(iter)) 3782 rt6_qualify_for_ecmp(iter))
3778 return iter; 3783 return iter;
3779 iter = rcu_dereference_protected(iter->rt6_next, 3784 iter = rcu_dereference_protected(iter->fib6_next,
3780 lockdep_is_held(&rt->fib6_table->tb6_lock)); 3785 lockdep_is_held(&rt->fib6_table->tb6_lock));
3781 } 3786 }
3782 3787
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 9898926ce30d..eab39bd91548 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -127,6 +127,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
127 return err; 127 return err;
128 128
129 inner_hdr = ipv6_hdr(skb); 129 inner_hdr = ipv6_hdr(skb);
130 flowlabel = seg6_make_flowlabel(net, skb, inner_hdr);
130 131
131 skb_push(skb, tot_len); 132 skb_push(skb, tot_len);
132 skb_reset_network_header(skb); 133 skb_reset_network_header(skb);
@@ -138,7 +139,6 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
138 * decapsulation will overwrite inner hlim with outer hlim 139 * decapsulation will overwrite inner hlim with outer hlim
139 */ 140 */
140 141
141 flowlabel = seg6_make_flowlabel(net, skb, inner_hdr);
142 if (skb->protocol == htons(ETH_P_IPV6)) { 142 if (skb->protocol == htons(ETH_P_IPV6)) {
143 ip6_flow_hdr(hdr, ip6_tclass(ip6_flowinfo(inner_hdr)), 143 ip6_flow_hdr(hdr, ip6_tclass(ip6_flowinfo(inner_hdr)),
144 flowlabel); 144 flowlabel);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 6acfdd3e442b..a34e28ac03a7 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1051,11 +1051,14 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1051 return -EINVAL; 1051 return -EINVAL;
1052 if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) 1052 if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
1053 return -EINVAL; 1053 return -EINVAL;
1054 if (udp_sk(sk)->no_check6_tx)
1055 return -EINVAL;
1054 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite) 1056 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite)
1055 return -EIO; 1057 return -EIO;
1056 1058
1057 skb_shinfo(skb)->gso_size = cork->gso_size; 1059 skb_shinfo(skb)->gso_size = cork->gso_size;
1058 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; 1060 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1061 goto csum_partial;
1059 } 1062 }
1060 1063
1061 if (is_udplite) 1064 if (is_udplite)
@@ -1064,6 +1067,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1064 skb->ip_summed = CHECKSUM_NONE; 1067 skb->ip_summed = CHECKSUM_NONE;
1065 goto send; 1068 goto send;
1066 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 1069 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1070csum_partial:
1067 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len); 1071 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1068 goto send; 1072 goto send;
1069 } else 1073 } else
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 7f1e842ef05a..e87686f7d63c 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -57,6 +57,10 @@ static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd)
57 57
58static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd) 58static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
59{ 59{
60 /* Drop reference taken during previous invocation */
61 if (pd->session)
62 l2tp_session_dec_refcount(pd->session);
63
60 pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx); 64 pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx);
61 pd->session_idx++; 65 pd->session_idx++;
62 66
@@ -105,11 +109,16 @@ static void l2tp_dfs_seq_stop(struct seq_file *p, void *v)
105 if (!pd || pd == SEQ_START_TOKEN) 109 if (!pd || pd == SEQ_START_TOKEN)
106 return; 110 return;
107 111
108 /* Drop reference taken by last invocation of l2tp_dfs_next_tunnel() */ 112 /* Drop reference taken by last invocation of l2tp_dfs_next_session()
113 * or l2tp_dfs_next_tunnel().
114 */
115 if (pd->session) {
116 l2tp_session_dec_refcount(pd->session);
117 pd->session = NULL;
118 }
109 if (pd->tunnel) { 119 if (pd->tunnel) {
110 l2tp_tunnel_dec_refcount(pd->tunnel); 120 l2tp_tunnel_dec_refcount(pd->tunnel);
111 pd->tunnel = NULL; 121 pd->tunnel = NULL;
112 pd->session = NULL;
113 } 122 }
114} 123}
115 124
@@ -250,13 +259,10 @@ static int l2tp_dfs_seq_show(struct seq_file *m, void *v)
250 goto out; 259 goto out;
251 } 260 }
252 261
253 /* Show the tunnel or session context */ 262 if (!pd->session)
254 if (!pd->session) {
255 l2tp_dfs_seq_tunnel_show(m, pd->tunnel); 263 l2tp_dfs_seq_tunnel_show(m, pd->tunnel);
256 } else { 264 else
257 l2tp_dfs_seq_session_show(m, pd->session); 265 l2tp_dfs_seq_session_show(m, pd->session);
258 l2tp_session_dec_refcount(pd->session);
259 }
260 266
261out: 267out:
262 return 0; 268 return 0;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 1fd9e145076a..f951c768dcf2 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1576,6 +1576,10 @@ static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd)
1576 1576
1577static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd) 1577static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
1578{ 1578{
1579 /* Drop reference taken during previous invocation */
1580 if (pd->session)
1581 l2tp_session_dec_refcount(pd->session);
1582
1579 pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx); 1583 pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx);
1580 pd->session_idx++; 1584 pd->session_idx++;
1581 1585
@@ -1624,11 +1628,16 @@ static void pppol2tp_seq_stop(struct seq_file *p, void *v)
1624 if (!pd || pd == SEQ_START_TOKEN) 1628 if (!pd || pd == SEQ_START_TOKEN)
1625 return; 1629 return;
1626 1630
1627 /* Drop reference taken by last invocation of pppol2tp_next_tunnel() */ 1631 /* Drop reference taken by last invocation of pppol2tp_next_session()
1632 * or pppol2tp_next_tunnel().
1633 */
1634 if (pd->session) {
1635 l2tp_session_dec_refcount(pd->session);
1636 pd->session = NULL;
1637 }
1628 if (pd->tunnel) { 1638 if (pd->tunnel) {
1629 l2tp_tunnel_dec_refcount(pd->tunnel); 1639 l2tp_tunnel_dec_refcount(pd->tunnel);
1630 pd->tunnel = NULL; 1640 pd->tunnel = NULL;
1631 pd->session = NULL;
1632 } 1641 }
1633} 1642}
1634 1643
@@ -1723,14 +1732,10 @@ static int pppol2tp_seq_show(struct seq_file *m, void *v)
1723 goto out; 1732 goto out;
1724 } 1733 }
1725 1734
1726 /* Show the tunnel or session context. 1735 if (!pd->session)
1727 */
1728 if (!pd->session) {
1729 pppol2tp_seq_tunnel_show(m, pd->tunnel); 1736 pppol2tp_seq_tunnel_show(m, pd->tunnel);
1730 } else { 1737 else
1731 pppol2tp_seq_session_show(m, pd->session); 1738 pppol2tp_seq_session_show(m, pd->session);
1732 l2tp_session_dec_refcount(pd->session);
1733 }
1734 1739
1735out: 1740out:
1736 return 0; 1741 return 0;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 44d8a55e9721..e57c9d479503 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -444,6 +444,9 @@ config NETFILTER_SYNPROXY
444 444
445endif # NF_CONNTRACK 445endif # NF_CONNTRACK
446 446
447config NF_OSF
448 tristate 'Passive OS fingerprint infrastructure'
449
447config NF_TABLES 450config NF_TABLES
448 select NETFILTER_NETLINK 451 select NETFILTER_NETLINK
449 tristate "Netfilter nf_tables support" 452 tristate "Netfilter nf_tables support"
@@ -474,24 +477,6 @@ config NF_TABLES_NETDEV
474 help 477 help
475 This option enables support for the "netdev" table. 478 This option enables support for the "netdev" table.
476 479
477config NFT_EXTHDR
478 tristate "Netfilter nf_tables exthdr module"
479 help
480 This option adds the "exthdr" expression that you can use to match
481 IPv6 extension headers and tcp options.
482
483config NFT_META
484 tristate "Netfilter nf_tables meta module"
485 help
486 This option adds the "meta" expression that you can use to match and
487 to set packet metainformation such as the packet mark.
488
489config NFT_RT
490 tristate "Netfilter nf_tables routing module"
491 help
492 This option adds the "rt" expression that you can use to match
493 packet routing information such as the packet nexthop.
494
495config NFT_NUMGEN 480config NFT_NUMGEN
496 tristate "Netfilter nf_tables number generator module" 481 tristate "Netfilter nf_tables number generator module"
497 help 482 help
@@ -667,8 +652,7 @@ endif # NF_TABLES
667 652
668config NF_FLOW_TABLE_INET 653config NF_FLOW_TABLE_INET
669 tristate "Netfilter flow table mixed IPv4/IPv6 module" 654 tristate "Netfilter flow table mixed IPv4/IPv6 module"
670 depends on NF_FLOW_TABLE_IPV4 655 depends on NF_FLOW_TABLE
671 depends on NF_FLOW_TABLE_IPV6
672 help 656 help
673 This option adds the flow table mixed IPv4/IPv6 support. 657 This option adds the flow table mixed IPv4/IPv6 support.
674 658
@@ -1378,6 +1362,7 @@ config NETFILTER_XT_MATCH_NFACCT
1378config NETFILTER_XT_MATCH_OSF 1362config NETFILTER_XT_MATCH_OSF
1379 tristate '"osf" Passive OS fingerprint match' 1363 tristate '"osf" Passive OS fingerprint match'
1380 depends on NETFILTER_ADVANCED && NETFILTER_NETLINK 1364 depends on NETFILTER_ADVANCED && NETFILTER_NETLINK
1365 select NF_OSF
1381 help 1366 help
1382 This option selects the Passive OS Fingerprinting match module 1367 This option selects the Passive OS Fingerprinting match module
1383 that allows to passively match the remote operating system by 1368 that allows to passively match the remote operating system by
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index fd32bd2c9521..1aa710b5d384 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -76,13 +76,10 @@ obj-$(CONFIG_NF_DUP_NETDEV) += nf_dup_netdev.o
76nf_tables-objs := nf_tables_core.o nf_tables_api.o nft_chain_filter.o \ 76nf_tables-objs := nf_tables_core.o nf_tables_api.o nft_chain_filter.o \
77 nf_tables_trace.o nft_immediate.o nft_cmp.o nft_range.o \ 77 nf_tables_trace.o nft_immediate.o nft_cmp.o nft_range.o \
78 nft_bitwise.o nft_byteorder.o nft_payload.o nft_lookup.o \ 78 nft_bitwise.o nft_byteorder.o nft_payload.o nft_lookup.o \
79 nft_dynset.o 79 nft_dynset.o nft_meta.o nft_rt.o nft_exthdr.o
80 80
81obj-$(CONFIG_NF_TABLES) += nf_tables.o 81obj-$(CONFIG_NF_TABLES) += nf_tables.o
82obj-$(CONFIG_NFT_COMPAT) += nft_compat.o 82obj-$(CONFIG_NFT_COMPAT) += nft_compat.o
83obj-$(CONFIG_NFT_EXTHDR) += nft_exthdr.o
84obj-$(CONFIG_NFT_META) += nft_meta.o
85obj-$(CONFIG_NFT_RT) += nft_rt.o
86obj-$(CONFIG_NFT_NUMGEN) += nft_numgen.o 83obj-$(CONFIG_NFT_NUMGEN) += nft_numgen.o
87obj-$(CONFIG_NFT_CT) += nft_ct.o 84obj-$(CONFIG_NFT_CT) += nft_ct.o
88obj-$(CONFIG_NFT_FLOW_OFFLOAD) += nft_flow_offload.o 85obj-$(CONFIG_NFT_FLOW_OFFLOAD) += nft_flow_offload.o
@@ -104,6 +101,7 @@ obj-$(CONFIG_NFT_HASH) += nft_hash.o
104obj-$(CONFIG_NFT_FIB) += nft_fib.o 101obj-$(CONFIG_NFT_FIB) += nft_fib.o
105obj-$(CONFIG_NFT_FIB_INET) += nft_fib_inet.o 102obj-$(CONFIG_NFT_FIB_INET) += nft_fib_inet.o
106obj-$(CONFIG_NFT_FIB_NETDEV) += nft_fib_netdev.o 103obj-$(CONFIG_NFT_FIB_NETDEV) += nft_fib_netdev.o
104obj-$(CONFIG_NF_OSF) += nf_osf.o
107 105
108# nf_tables netdev 106# nf_tables netdev
109obj-$(CONFIG_NFT_DUP_NETDEV) += nft_dup_netdev.o 107obj-$(CONFIG_NFT_DUP_NETDEV) += nft_dup_netdev.o
@@ -111,6 +109,8 @@ obj-$(CONFIG_NFT_FWD_NETDEV) += nft_fwd_netdev.o
111 109
112# flow table infrastructure 110# flow table infrastructure
113obj-$(CONFIG_NF_FLOW_TABLE) += nf_flow_table.o 111obj-$(CONFIG_NF_FLOW_TABLE) += nf_flow_table.o
112nf_flow_table-objs := nf_flow_table_core.o nf_flow_table_ip.o
113
114obj-$(CONFIG_NF_FLOW_TABLE_INET) += nf_flow_table_inet.o 114obj-$(CONFIG_NF_FLOW_TABLE_INET) += nf_flow_table_inet.o
115 115
116# generic X tables 116# generic X tables
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index b32fb0dbe237..05dc1b77e466 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -225,6 +225,25 @@ config IP_VS_SH
225 If you want to compile it in kernel, say Y. To compile it as a 225 If you want to compile it in kernel, say Y. To compile it as a
226 module, choose M here. If unsure, say N. 226 module, choose M here. If unsure, say N.
227 227
228config IP_VS_MH
229 tristate "maglev hashing scheduling"
230 ---help---
231 The maglev consistent hashing scheduling algorithm provides the
232 Google's Maglev hashing algorithm as a IPVS scheduler. It assigns
233 network connections to the servers through looking up a statically
234 assigned special hash table called the lookup table. Maglev hashing
235 is to assign a preference list of all the lookup table positions
236 to each destination.
237
238 Through this operation, The maglev hashing gives an almost equal
239 share of the lookup table to each of the destinations and provides
240 minimal disruption by using the lookup table. When the set of
241 destinations changes, a connection will likely be sent to the same
242 destination as it was before.
243
244 If you want to compile it in kernel, say Y. To compile it as a
245 module, choose M here. If unsure, say N.
246
228config IP_VS_SED 247config IP_VS_SED
229 tristate "shortest expected delay scheduling" 248 tristate "shortest expected delay scheduling"
230 ---help--- 249 ---help---
@@ -266,6 +285,24 @@ config IP_VS_SH_TAB_BITS
266 needs to be large enough to effectively fit all the destinations 285 needs to be large enough to effectively fit all the destinations
267 multiplied by their respective weights. 286 multiplied by their respective weights.
268 287
288comment 'IPVS MH scheduler'
289
290config IP_VS_MH_TAB_INDEX
291 int "IPVS maglev hashing table index of size (the prime numbers)"
292 range 8 17
293 default 12
294 ---help---
295 The maglev hashing scheduler maps source IPs to destinations
296 stored in a hash table. This table is assigned by a preference
297 list of the positions to each destination until all slots in
298 the table are filled. The index determines the prime for size of
299 the table as 251, 509, 1021, 2039, 4093, 8191, 16381, 32749,
300 65521 or 131071. When using weights to allow destinations to
301 receive more connections, the table is assigned an amount
302 proportional to the weights specified. The table needs to be large
303 enough to effectively fit all the destinations multiplied by their
304 respective weights.
305
269comment 'IPVS application helper' 306comment 'IPVS application helper'
270 307
271config IP_VS_FTP 308config IP_VS_FTP
diff --git a/net/netfilter/ipvs/Makefile b/net/netfilter/ipvs/Makefile
index c552993fa4b9..bfce2677fda2 100644
--- a/net/netfilter/ipvs/Makefile
+++ b/net/netfilter/ipvs/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_IP_VS_LBLC) += ip_vs_lblc.o
33obj-$(CONFIG_IP_VS_LBLCR) += ip_vs_lblcr.o 33obj-$(CONFIG_IP_VS_LBLCR) += ip_vs_lblcr.o
34obj-$(CONFIG_IP_VS_DH) += ip_vs_dh.o 34obj-$(CONFIG_IP_VS_DH) += ip_vs_dh.o
35obj-$(CONFIG_IP_VS_SH) += ip_vs_sh.o 35obj-$(CONFIG_IP_VS_SH) += ip_vs_sh.o
36obj-$(CONFIG_IP_VS_MH) += ip_vs_mh.o
36obj-$(CONFIG_IP_VS_SED) += ip_vs_sed.o 37obj-$(CONFIG_IP_VS_SED) += ip_vs_sed.o
37obj-$(CONFIG_IP_VS_NQ) += ip_vs_nq.o 38obj-$(CONFIG_IP_VS_NQ) += ip_vs_nq.o
38 39
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index f36098887ad0..d4f68d0f7df7 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -821,6 +821,10 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
821 if (add && udest->af != svc->af) 821 if (add && udest->af != svc->af)
822 ipvs->mixed_address_family_dests++; 822 ipvs->mixed_address_family_dests++;
823 823
824 /* keep the last_weight with latest non-0 weight */
825 if (add || udest->weight != 0)
826 atomic_set(&dest->last_weight, udest->weight);
827
824 /* set the weight and the flags */ 828 /* set the weight and the flags */
825 atomic_set(&dest->weight, udest->weight); 829 atomic_set(&dest->weight, udest->weight);
826 conn_flags = udest->conn_flags & IP_VS_CONN_F_DEST_MASK; 830 conn_flags = udest->conn_flags & IP_VS_CONN_F_DEST_MASK;
diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c
index 75f798f8e83b..07459e71d907 100644
--- a/net/netfilter/ipvs/ip_vs_dh.c
+++ b/net/netfilter/ipvs/ip_vs_dh.c
@@ -43,6 +43,7 @@
43#include <linux/module.h> 43#include <linux/module.h>
44#include <linux/kernel.h> 44#include <linux/kernel.h>
45#include <linux/skbuff.h> 45#include <linux/skbuff.h>
46#include <linux/hash.h>
46 47
47#include <net/ip_vs.h> 48#include <net/ip_vs.h>
48 49
@@ -81,7 +82,7 @@ static inline unsigned int ip_vs_dh_hashkey(int af, const union nf_inet_addr *ad
81 addr_fold = addr->ip6[0]^addr->ip6[1]^ 82 addr_fold = addr->ip6[0]^addr->ip6[1]^
82 addr->ip6[2]^addr->ip6[3]; 83 addr->ip6[2]^addr->ip6[3];
83#endif 84#endif
84 return (ntohl(addr_fold)*2654435761UL) & IP_VS_DH_TAB_MASK; 85 return hash_32(ntohl(addr_fold), IP_VS_DH_TAB_BITS);
85} 86}
86 87
87 88
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 3057e453bf31..b9f375e6dc93 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -48,6 +48,7 @@
48#include <linux/kernel.h> 48#include <linux/kernel.h>
49#include <linux/skbuff.h> 49#include <linux/skbuff.h>
50#include <linux/jiffies.h> 50#include <linux/jiffies.h>
51#include <linux/hash.h>
51 52
52/* for sysctl */ 53/* for sysctl */
53#include <linux/fs.h> 54#include <linux/fs.h>
@@ -160,7 +161,7 @@ ip_vs_lblc_hashkey(int af, const union nf_inet_addr *addr)
160 addr_fold = addr->ip6[0]^addr->ip6[1]^ 161 addr_fold = addr->ip6[0]^addr->ip6[1]^
161 addr->ip6[2]^addr->ip6[3]; 162 addr->ip6[2]^addr->ip6[3];
162#endif 163#endif
163 return (ntohl(addr_fold)*2654435761UL) & IP_VS_LBLC_TAB_MASK; 164 return hash_32(ntohl(addr_fold), IP_VS_LBLC_TAB_BITS);
164} 165}
165 166
166 167
@@ -371,6 +372,7 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
371 tbl->counter = 1; 372 tbl->counter = 1;
372 tbl->dead = false; 373 tbl->dead = false;
373 tbl->svc = svc; 374 tbl->svc = svc;
375 atomic_set(&tbl->entries, 0);
374 376
375 /* 377 /*
376 * Hook periodic timer for garbage collection 378 * Hook periodic timer for garbage collection
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 92adc04557ed..542c4949937a 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -47,6 +47,7 @@
47#include <linux/jiffies.h> 47#include <linux/jiffies.h>
48#include <linux/list.h> 48#include <linux/list.h>
49#include <linux/slab.h> 49#include <linux/slab.h>
50#include <linux/hash.h>
50 51
51/* for sysctl */ 52/* for sysctl */
52#include <linux/fs.h> 53#include <linux/fs.h>
@@ -323,7 +324,7 @@ ip_vs_lblcr_hashkey(int af, const union nf_inet_addr *addr)
323 addr_fold = addr->ip6[0]^addr->ip6[1]^ 324 addr_fold = addr->ip6[0]^addr->ip6[1]^
324 addr->ip6[2]^addr->ip6[3]; 325 addr->ip6[2]^addr->ip6[3];
325#endif 326#endif
326 return (ntohl(addr_fold)*2654435761UL) & IP_VS_LBLCR_TAB_MASK; 327 return hash_32(ntohl(addr_fold), IP_VS_LBLCR_TAB_BITS);
327} 328}
328 329
329 330
@@ -534,6 +535,7 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
534 tbl->counter = 1; 535 tbl->counter = 1;
535 tbl->dead = false; 536 tbl->dead = false;
536 tbl->svc = svc; 537 tbl->svc = svc;
538 atomic_set(&tbl->entries, 0);
537 539
538 /* 540 /*
539 * Hook periodic timer for garbage collection 541 * Hook periodic timer for garbage collection
diff --git a/net/netfilter/ipvs/ip_vs_mh.c b/net/netfilter/ipvs/ip_vs_mh.c
new file mode 100644
index 000000000000..0f795b186eb3
--- /dev/null
+++ b/net/netfilter/ipvs/ip_vs_mh.c
@@ -0,0 +1,540 @@
1// SPDX-License-Identifier: GPL-2.0
2/* IPVS: Maglev Hashing scheduling module
3 *
4 * Authors: Inju Song <inju.song@navercorp.com>
5 *
6 */
7
8/* The mh algorithm is to assign a preference list of all the lookup
9 * table positions to each destination and populate the table with
10 * the most-preferred position of destinations. Then it is to select
11 * destination with the hash key of source IP address through looking
12 * up a the lookup table.
13 *
14 * The algorithm is detailed in:
15 * [3.4 Consistent Hasing]
16https://www.usenix.org/system/files/conference/nsdi16/nsdi16-paper-eisenbud.pdf
17 *
18 */
19
20#define KMSG_COMPONENT "IPVS"
21#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22
23#include <linux/ip.h>
24#include <linux/slab.h>
25#include <linux/module.h>
26#include <linux/kernel.h>
27#include <linux/skbuff.h>
28
29#include <net/ip_vs.h>
30
31#include <linux/siphash.h>
32#include <linux/bitops.h>
33#include <linux/gcd.h>
34
35#define IP_VS_SVC_F_SCHED_MH_FALLBACK IP_VS_SVC_F_SCHED1 /* MH fallback */
36#define IP_VS_SVC_F_SCHED_MH_PORT IP_VS_SVC_F_SCHED2 /* MH use port */
37
38struct ip_vs_mh_lookup {
39 struct ip_vs_dest __rcu *dest; /* real server (cache) */
40};
41
42struct ip_vs_mh_dest_setup {
43 unsigned int offset; /* starting offset */
44 unsigned int skip; /* skip */
45 unsigned int perm; /* next_offset */
46 int turns; /* weight / gcd() and rshift */
47};
48
49/* Available prime numbers for MH table */
50static int primes[] = {251, 509, 1021, 2039, 4093,
51 8191, 16381, 32749, 65521, 131071};
52
53/* For IPVS MH entry hash table */
54#ifndef CONFIG_IP_VS_MH_TAB_INDEX
55#define CONFIG_IP_VS_MH_TAB_INDEX 12
56#endif
57#define IP_VS_MH_TAB_BITS (CONFIG_IP_VS_MH_TAB_INDEX / 2)
58#define IP_VS_MH_TAB_INDEX (CONFIG_IP_VS_MH_TAB_INDEX - 8)
59#define IP_VS_MH_TAB_SIZE primes[IP_VS_MH_TAB_INDEX]
60
61struct ip_vs_mh_state {
62 struct rcu_head rcu_head;
63 struct ip_vs_mh_lookup *lookup;
64 struct ip_vs_mh_dest_setup *dest_setup;
65 hsiphash_key_t hash1, hash2;
66 int gcd;
67 int rshift;
68};
69
70static inline void generate_hash_secret(hsiphash_key_t *hash1,
71 hsiphash_key_t *hash2)
72{
73 hash1->key[0] = 2654435761UL;
74 hash1->key[1] = 2654435761UL;
75
76 hash2->key[0] = 2654446892UL;
77 hash2->key[1] = 2654446892UL;
78}
79
80/* Helper function to determine if server is unavailable */
81static inline bool is_unavailable(struct ip_vs_dest *dest)
82{
83 return atomic_read(&dest->weight) <= 0 ||
84 dest->flags & IP_VS_DEST_F_OVERLOAD;
85}
86
87/* Returns hash value for IPVS MH entry */
88static inline unsigned int
89ip_vs_mh_hashkey(int af, const union nf_inet_addr *addr,
90 __be16 port, hsiphash_key_t *key, unsigned int offset)
91{
92 unsigned int v;
93 __be32 addr_fold = addr->ip;
94
95#ifdef CONFIG_IP_VS_IPV6
96 if (af == AF_INET6)
97 addr_fold = addr->ip6[0] ^ addr->ip6[1] ^
98 addr->ip6[2] ^ addr->ip6[3];
99#endif
100 v = (offset + ntohs(port) + ntohl(addr_fold));
101 return hsiphash(&v, sizeof(v), key);
102}
103
104/* Reset all the hash buckets of the specified table. */
105static void ip_vs_mh_reset(struct ip_vs_mh_state *s)
106{
107 int i;
108 struct ip_vs_mh_lookup *l;
109 struct ip_vs_dest *dest;
110
111 l = &s->lookup[0];
112 for (i = 0; i < IP_VS_MH_TAB_SIZE; i++) {
113 dest = rcu_dereference_protected(l->dest, 1);
114 if (dest) {
115 ip_vs_dest_put(dest);
116 RCU_INIT_POINTER(l->dest, NULL);
117 }
118 l++;
119 }
120}
121
122static int ip_vs_mh_permutate(struct ip_vs_mh_state *s,
123 struct ip_vs_service *svc)
124{
125 struct list_head *p;
126 struct ip_vs_mh_dest_setup *ds;
127 struct ip_vs_dest *dest;
128 int lw;
129
130 /* If gcd is smaller then 1, number of dests or
131 * all last_weight of dests are zero. So, skip
132 * permutation for the dests.
133 */
134 if (s->gcd < 1)
135 return 0;
136
137 /* Set dest_setup for the dests permutation */
138 p = &svc->destinations;
139 ds = &s->dest_setup[0];
140 while ((p = p->next) != &svc->destinations) {
141 dest = list_entry(p, struct ip_vs_dest, n_list);
142
143 ds->offset = ip_vs_mh_hashkey(svc->af, &dest->addr,
144 dest->port, &s->hash1, 0) %
145 IP_VS_MH_TAB_SIZE;
146 ds->skip = ip_vs_mh_hashkey(svc->af, &dest->addr,
147 dest->port, &s->hash2, 0) %
148 (IP_VS_MH_TAB_SIZE - 1) + 1;
149 ds->perm = ds->offset;
150
151 lw = atomic_read(&dest->last_weight);
152 ds->turns = ((lw / s->gcd) >> s->rshift) ? : (lw != 0);
153 ds++;
154 }
155
156 return 0;
157}
158
159static int ip_vs_mh_populate(struct ip_vs_mh_state *s,
160 struct ip_vs_service *svc)
161{
162 int n, c, dt_count;
163 unsigned long *table;
164 struct list_head *p;
165 struct ip_vs_mh_dest_setup *ds;
166 struct ip_vs_dest *dest, *new_dest;
167
168 /* If gcd is smaller then 1, number of dests or
169 * all last_weight of dests are zero. So, skip
170 * the population for the dests and reset lookup table.
171 */
172 if (s->gcd < 1) {
173 ip_vs_mh_reset(s);
174 return 0;
175 }
176
177 table = kcalloc(BITS_TO_LONGS(IP_VS_MH_TAB_SIZE),
178 sizeof(unsigned long), GFP_KERNEL);
179 if (!table)
180 return -ENOMEM;
181
182 p = &svc->destinations;
183 n = 0;
184 dt_count = 0;
185 while (n < IP_VS_MH_TAB_SIZE) {
186 if (p == &svc->destinations)
187 p = p->next;
188
189 ds = &s->dest_setup[0];
190 while (p != &svc->destinations) {
191 /* Ignore added server with zero weight */
192 if (ds->turns < 1) {
193 p = p->next;
194 ds++;
195 continue;
196 }
197
198 c = ds->perm;
199 while (test_bit(c, table)) {
200 /* Add skip, mod IP_VS_MH_TAB_SIZE */
201 ds->perm += ds->skip;
202 if (ds->perm >= IP_VS_MH_TAB_SIZE)
203 ds->perm -= IP_VS_MH_TAB_SIZE;
204 c = ds->perm;
205 }
206
207 __set_bit(c, table);
208
209 dest = rcu_dereference_protected(s->lookup[c].dest, 1);
210 new_dest = list_entry(p, struct ip_vs_dest, n_list);
211 if (dest != new_dest) {
212 if (dest)
213 ip_vs_dest_put(dest);
214 ip_vs_dest_hold(new_dest);
215 RCU_INIT_POINTER(s->lookup[c].dest, new_dest);
216 }
217
218 if (++n == IP_VS_MH_TAB_SIZE)
219 goto out;
220
221 if (++dt_count >= ds->turns) {
222 dt_count = 0;
223 p = p->next;
224 ds++;
225 }
226 }
227 }
228
229out:
230 kfree(table);
231 return 0;
232}
233
234/* Get ip_vs_dest associated with supplied parameters. */
235static inline struct ip_vs_dest *
236ip_vs_mh_get(struct ip_vs_service *svc, struct ip_vs_mh_state *s,
237 const union nf_inet_addr *addr, __be16 port)
238{
239 unsigned int hash = ip_vs_mh_hashkey(svc->af, addr, port, &s->hash1, 0)
240 % IP_VS_MH_TAB_SIZE;
241 struct ip_vs_dest *dest = rcu_dereference(s->lookup[hash].dest);
242
243 return (!dest || is_unavailable(dest)) ? NULL : dest;
244}
245
246/* As ip_vs_mh_get, but with fallback if selected server is unavailable */
247static inline struct ip_vs_dest *
248ip_vs_mh_get_fallback(struct ip_vs_service *svc, struct ip_vs_mh_state *s,
249 const union nf_inet_addr *addr, __be16 port)
250{
251 unsigned int offset, roffset;
252 unsigned int hash, ihash;
253 struct ip_vs_dest *dest;
254
255 /* First try the dest it's supposed to go to */
256 ihash = ip_vs_mh_hashkey(svc->af, addr, port,
257 &s->hash1, 0) % IP_VS_MH_TAB_SIZE;
258 dest = rcu_dereference(s->lookup[ihash].dest);
259 if (!dest)
260 return NULL;
261 if (!is_unavailable(dest))
262 return dest;
263
264 IP_VS_DBG_BUF(6, "MH: selected unavailable server %s:%u, reselecting",
265 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port));
266
267 /* If the original dest is unavailable, loop around the table
268 * starting from ihash to find a new dest
269 */
270 for (offset = 0; offset < IP_VS_MH_TAB_SIZE; offset++) {
271 roffset = (offset + ihash) % IP_VS_MH_TAB_SIZE;
272 hash = ip_vs_mh_hashkey(svc->af, addr, port, &s->hash1,
273 roffset) % IP_VS_MH_TAB_SIZE;
274 dest = rcu_dereference(s->lookup[hash].dest);
275 if (!dest)
276 break;
277 if (!is_unavailable(dest))
278 return dest;
279 IP_VS_DBG_BUF(6,
280 "MH: selected unavailable server %s:%u (offset %u), reselecting",
281 IP_VS_DBG_ADDR(dest->af, &dest->addr),
282 ntohs(dest->port), roffset);
283 }
284
285 return NULL;
286}
287
288/* Assign all the hash buckets of the specified table with the service. */
289static int ip_vs_mh_reassign(struct ip_vs_mh_state *s,
290 struct ip_vs_service *svc)
291{
292 int ret;
293
294 if (svc->num_dests > IP_VS_MH_TAB_SIZE)
295 return -EINVAL;
296
297 if (svc->num_dests >= 1) {
298 s->dest_setup = kcalloc(svc->num_dests,
299 sizeof(struct ip_vs_mh_dest_setup),
300 GFP_KERNEL);
301 if (!s->dest_setup)
302 return -ENOMEM;
303 }
304
305 ip_vs_mh_permutate(s, svc);
306
307 ret = ip_vs_mh_populate(s, svc);
308 if (ret < 0)
309 goto out;
310
311 IP_VS_DBG_BUF(6, "MH: reassign lookup table of %s:%u\n",
312 IP_VS_DBG_ADDR(svc->af, &svc->addr),
313 ntohs(svc->port));
314
315out:
316 if (svc->num_dests >= 1) {
317 kfree(s->dest_setup);
318 s->dest_setup = NULL;
319 }
320 return ret;
321}
322
323static int ip_vs_mh_gcd_weight(struct ip_vs_service *svc)
324{
325 struct ip_vs_dest *dest;
326 int weight;
327 int g = 0;
328
329 list_for_each_entry(dest, &svc->destinations, n_list) {
330 weight = atomic_read(&dest->last_weight);
331 if (weight > 0) {
332 if (g > 0)
333 g = gcd(weight, g);
334 else
335 g = weight;
336 }
337 }
338 return g;
339}
340
341/* To avoid assigning huge weight for the MH table,
342 * calculate shift value with gcd.
343 */
344static int ip_vs_mh_shift_weight(struct ip_vs_service *svc, int gcd)
345{
346 struct ip_vs_dest *dest;
347 int new_weight, weight = 0;
348 int mw, shift;
349
350 /* If gcd is smaller then 1, number of dests or
351 * all last_weight of dests are zero. So, return
352 * shift value as zero.
353 */
354 if (gcd < 1)
355 return 0;
356
357 list_for_each_entry(dest, &svc->destinations, n_list) {
358 new_weight = atomic_read(&dest->last_weight);
359 if (new_weight > weight)
360 weight = new_weight;
361 }
362
363 /* Because gcd is greater than zero,
364 * the maximum weight and gcd are always greater than zero
365 */
366 mw = weight / gcd;
367
368 /* shift = occupied bits of weight/gcd - MH highest bits */
369 shift = fls(mw) - IP_VS_MH_TAB_BITS;
370 return (shift >= 0) ? shift : 0;
371}
372
373static void ip_vs_mh_state_free(struct rcu_head *head)
374{
375 struct ip_vs_mh_state *s;
376
377 s = container_of(head, struct ip_vs_mh_state, rcu_head);
378 kfree(s->lookup);
379 kfree(s);
380}
381
382static int ip_vs_mh_init_svc(struct ip_vs_service *svc)
383{
384 int ret;
385 struct ip_vs_mh_state *s;
386
387 /* Allocate the MH table for this service */
388 s = kzalloc(sizeof(*s), GFP_KERNEL);
389 if (!s)
390 return -ENOMEM;
391
392 s->lookup = kcalloc(IP_VS_MH_TAB_SIZE, sizeof(struct ip_vs_mh_lookup),
393 GFP_KERNEL);
394 if (!s->lookup) {
395 kfree(s);
396 return -ENOMEM;
397 }
398
399 generate_hash_secret(&s->hash1, &s->hash2);
400 s->gcd = ip_vs_mh_gcd_weight(svc);
401 s->rshift = ip_vs_mh_shift_weight(svc, s->gcd);
402
403 IP_VS_DBG(6,
404 "MH lookup table (memory=%zdbytes) allocated for current service\n",
405 sizeof(struct ip_vs_mh_lookup) * IP_VS_MH_TAB_SIZE);
406
407 /* Assign the lookup table with current dests */
408 ret = ip_vs_mh_reassign(s, svc);
409 if (ret < 0) {
410 ip_vs_mh_reset(s);
411 ip_vs_mh_state_free(&s->rcu_head);
412 return ret;
413 }
414
415 /* No more failures, attach state */
416 svc->sched_data = s;
417 return 0;
418}
419
420static void ip_vs_mh_done_svc(struct ip_vs_service *svc)
421{
422 struct ip_vs_mh_state *s = svc->sched_data;
423
424 /* Got to clean up lookup entry here */
425 ip_vs_mh_reset(s);
426
427 call_rcu(&s->rcu_head, ip_vs_mh_state_free);
428 IP_VS_DBG(6, "MH lookup table (memory=%zdbytes) released\n",
429 sizeof(struct ip_vs_mh_lookup) * IP_VS_MH_TAB_SIZE);
430}
431
432static int ip_vs_mh_dest_changed(struct ip_vs_service *svc,
433 struct ip_vs_dest *dest)
434{
435 struct ip_vs_mh_state *s = svc->sched_data;
436
437 s->gcd = ip_vs_mh_gcd_weight(svc);
438 s->rshift = ip_vs_mh_shift_weight(svc, s->gcd);
439
440 /* Assign the lookup table with the updated service */
441 return ip_vs_mh_reassign(s, svc);
442}
443
444/* Helper function to get port number */
445static inline __be16
446ip_vs_mh_get_port(const struct sk_buff *skb, struct ip_vs_iphdr *iph)
447{
448 __be16 _ports[2], *ports;
449
450 /* At this point we know that we have a valid packet of some kind.
451 * Because ICMP packets are only guaranteed to have the first 8
452 * bytes, let's just grab the ports. Fortunately they're in the
453 * same position for all three of the protocols we care about.
454 */
455 switch (iph->protocol) {
456 case IPPROTO_TCP:
457 case IPPROTO_UDP:
458 case IPPROTO_SCTP:
459 ports = skb_header_pointer(skb, iph->len, sizeof(_ports),
460 &_ports);
461 if (unlikely(!ports))
462 return 0;
463
464 if (likely(!ip_vs_iph_inverse(iph)))
465 return ports[0];
466 else
467 return ports[1];
468 default:
469 return 0;
470 }
471}
472
473/* Maglev Hashing scheduling */
474static struct ip_vs_dest *
475ip_vs_mh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
476 struct ip_vs_iphdr *iph)
477{
478 struct ip_vs_dest *dest;
479 struct ip_vs_mh_state *s;
480 __be16 port = 0;
481 const union nf_inet_addr *hash_addr;
482
483 hash_addr = ip_vs_iph_inverse(iph) ? &iph->daddr : &iph->saddr;
484
485 IP_VS_DBG(6, "%s : Scheduling...\n", __func__);
486
487 if (svc->flags & IP_VS_SVC_F_SCHED_MH_PORT)
488 port = ip_vs_mh_get_port(skb, iph);
489
490 s = (struct ip_vs_mh_state *)svc->sched_data;
491
492 if (svc->flags & IP_VS_SVC_F_SCHED_MH_FALLBACK)
493 dest = ip_vs_mh_get_fallback(svc, s, hash_addr, port);
494 else
495 dest = ip_vs_mh_get(svc, s, hash_addr, port);
496
497 if (!dest) {
498 ip_vs_scheduler_err(svc, "no destination available");
499 return NULL;
500 }
501
502 IP_VS_DBG_BUF(6, "MH: source IP address %s:%u --> server %s:%u\n",
503 IP_VS_DBG_ADDR(svc->af, hash_addr),
504 ntohs(port),
505 IP_VS_DBG_ADDR(dest->af, &dest->addr),
506 ntohs(dest->port));
507
508 return dest;
509}
510
511/* IPVS MH Scheduler structure */
512static struct ip_vs_scheduler ip_vs_mh_scheduler = {
513 .name = "mh",
514 .refcnt = ATOMIC_INIT(0),
515 .module = THIS_MODULE,
516 .n_list = LIST_HEAD_INIT(ip_vs_mh_scheduler.n_list),
517 .init_service = ip_vs_mh_init_svc,
518 .done_service = ip_vs_mh_done_svc,
519 .add_dest = ip_vs_mh_dest_changed,
520 .del_dest = ip_vs_mh_dest_changed,
521 .upd_dest = ip_vs_mh_dest_changed,
522 .schedule = ip_vs_mh_schedule,
523};
524
525static int __init ip_vs_mh_init(void)
526{
527 return register_ip_vs_scheduler(&ip_vs_mh_scheduler);
528}
529
530static void __exit ip_vs_mh_cleanup(void)
531{
532 unregister_ip_vs_scheduler(&ip_vs_mh_scheduler);
533 rcu_barrier();
534}
535
536module_init(ip_vs_mh_init);
537module_exit(ip_vs_mh_cleanup);
538MODULE_DESCRIPTION("Maglev hashing ipvs scheduler");
539MODULE_LICENSE("GPL v2");
540MODULE_AUTHOR("Inju Song <inju.song@navercorp.com>");
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index bcd9b7bde4ee..569631d2b2a1 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -436,7 +436,7 @@ static bool tcp_state_active(int state)
436 return tcp_state_active_table[state]; 436 return tcp_state_active_table[state];
437} 437}
438 438
439static struct tcp_states_t tcp_states [] = { 439static struct tcp_states_t tcp_states[] = {
440/* INPUT */ 440/* INPUT */
441/* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */ 441/* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
442/*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSR }}, 442/*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSR }},
@@ -459,7 +459,7 @@ static struct tcp_states_t tcp_states [] = {
459/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }}, 459/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},
460}; 460};
461 461
462static struct tcp_states_t tcp_states_dos [] = { 462static struct tcp_states_t tcp_states_dos[] = {
463/* INPUT */ 463/* INPUT */
464/* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */ 464/* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
465/*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSA }}, 465/*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSA }},
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index 16aaac6eedc9..1e01c782583a 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -96,7 +96,8 @@ ip_vs_sh_hashkey(int af, const union nf_inet_addr *addr,
96 addr_fold = addr->ip6[0]^addr->ip6[1]^ 96 addr_fold = addr->ip6[0]^addr->ip6[1]^
97 addr->ip6[2]^addr->ip6[3]; 97 addr->ip6[2]^addr->ip6[3];
98#endif 98#endif
99 return (offset + (ntohs(port) + ntohl(addr_fold))*2654435761UL) & 99 return (offset + hash_32(ntohs(port) + ntohl(addr_fold),
100 IP_VS_SH_TAB_BITS)) &
100 IP_VS_SH_TAB_MASK; 101 IP_VS_SH_TAB_MASK;
101} 102}
102 103
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 41ff04ee2554..605441727008 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -186,6 +186,7 @@ unsigned int nf_conntrack_htable_size __read_mostly;
186EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); 186EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
187 187
188unsigned int nf_conntrack_max __read_mostly; 188unsigned int nf_conntrack_max __read_mostly;
189EXPORT_SYMBOL_GPL(nf_conntrack_max);
189seqcount_t nf_conntrack_generation __read_mostly; 190seqcount_t nf_conntrack_generation __read_mostly;
190static unsigned int nf_conntrack_hash_rnd __read_mostly; 191static unsigned int nf_conntrack_hash_rnd __read_mostly;
191 192
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index f0e9a7511e1a..a11c304fb771 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -566,8 +566,7 @@ static const struct nf_conntrack_expect_policy ftp_exp_policy = {
566 .timeout = 5 * 60, 566 .timeout = 5 * 60,
567}; 567};
568 568
569/* don't make this __exit, since it's called from __init ! */ 569static void __exit nf_conntrack_ftp_fini(void)
570static void nf_conntrack_ftp_fini(void)
571{ 570{
572 nf_conntrack_helpers_unregister(ftp, ports_c * 2); 571 nf_conntrack_helpers_unregister(ftp, ports_c * 2);
573 kfree(ftp_buffer); 572 kfree(ftp_buffer);
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 5523acce9d69..4099f4d79bae 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -232,8 +232,6 @@ static int help(struct sk_buff *skb, unsigned int protoff,
232static struct nf_conntrack_helper irc[MAX_PORTS] __read_mostly; 232static struct nf_conntrack_helper irc[MAX_PORTS] __read_mostly;
233static struct nf_conntrack_expect_policy irc_exp_policy; 233static struct nf_conntrack_expect_policy irc_exp_policy;
234 234
235static void nf_conntrack_irc_fini(void);
236
237static int __init nf_conntrack_irc_init(void) 235static int __init nf_conntrack_irc_init(void)
238{ 236{
239 int i, ret; 237 int i, ret;
@@ -276,9 +274,7 @@ static int __init nf_conntrack_irc_init(void)
276 return 0; 274 return 0;
277} 275}
278 276
279/* This function is intentionally _NOT_ defined as __exit, because 277static void __exit nf_conntrack_irc_fini(void)
280 * it is needed by the init function */
281static void nf_conntrack_irc_fini(void)
282{ 278{
283 nf_conntrack_helpers_unregister(irc, ports_c); 279 nf_conntrack_helpers_unregister(irc, ports_c);
284 kfree(irc_buffer); 280 kfree(irc_buffer);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 4c1d0c5bc268..d807b8770be3 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2205,6 +2205,9 @@ ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
2205 if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks))) 2205 if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks)))
2206 goto nla_put_failure; 2206 goto nla_put_failure;
2207 2207
2208 if (nla_put_be32(skb, CTA_STATS_GLOBAL_MAX_ENTRIES, htonl(nf_conntrack_max)))
2209 goto nla_put_failure;
2210
2208 nlmsg_end(skb, nlh); 2211 nlmsg_end(skb, nlh);
2209 return skb->len; 2212 return skb->len;
2210 2213
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c
index ae457f39d5ce..5072ff96ab33 100644
--- a/net/netfilter/nf_conntrack_sane.c
+++ b/net/netfilter/nf_conntrack_sane.c
@@ -173,8 +173,7 @@ static const struct nf_conntrack_expect_policy sane_exp_policy = {
173 .timeout = 5 * 60, 173 .timeout = 5 * 60,
174}; 174};
175 175
176/* don't make this __exit, since it's called from __init ! */ 176static void __exit nf_conntrack_sane_fini(void)
177static void nf_conntrack_sane_fini(void)
178{ 177{
179 nf_conntrack_helpers_unregister(sane, ports_c * 2); 178 nf_conntrack_helpers_unregister(sane, ports_c * 2);
180 kfree(sane_buffer); 179 kfree(sane_buffer);
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 908e51e2dc2b..c8d2b6688a2a 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1617,7 +1617,7 @@ static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1
1617 }, 1617 },
1618}; 1618};
1619 1619
1620static void nf_conntrack_sip_fini(void) 1620static void __exit nf_conntrack_sip_fini(void)
1621{ 1621{
1622 nf_conntrack_helpers_unregister(sip, ports_c * 4); 1622 nf_conntrack_helpers_unregister(sip, ports_c * 4);
1623} 1623}
diff --git a/net/netfilter/nf_conntrack_tftp.c b/net/netfilter/nf_conntrack_tftp.c
index 0ec6779fd5d9..548b673b3625 100644
--- a/net/netfilter/nf_conntrack_tftp.c
+++ b/net/netfilter/nf_conntrack_tftp.c
@@ -104,7 +104,7 @@ static const struct nf_conntrack_expect_policy tftp_exp_policy = {
104 .timeout = 5 * 60, 104 .timeout = 5 * 60,
105}; 105};
106 106
107static void nf_conntrack_tftp_fini(void) 107static void __exit nf_conntrack_tftp_fini(void)
108{ 108{
109 nf_conntrack_helpers_unregister(tftp, ports_c * 2); 109 nf_conntrack_helpers_unregister(tftp, ports_c * 2);
110} 110}
diff --git a/net/netfilter/nf_flow_table.c b/net/netfilter/nf_flow_table_core.c
index ec410cae9307..eb0d1658ac05 100644
--- a/net/netfilter/nf_flow_table.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -4,6 +4,8 @@
4#include <linux/netfilter.h> 4#include <linux/netfilter.h>
5#include <linux/rhashtable.h> 5#include <linux/rhashtable.h>
6#include <linux/netdevice.h> 6#include <linux/netdevice.h>
7#include <net/ip.h>
8#include <net/ip6_route.h>
7#include <net/netfilter/nf_tables.h> 9#include <net/netfilter/nf_tables.h>
8#include <net/netfilter/nf_flow_table.h> 10#include <net/netfilter/nf_flow_table.h>
9#include <net/netfilter/nf_conntrack.h> 11#include <net/netfilter/nf_conntrack.h>
@@ -16,6 +18,43 @@ struct flow_offload_entry {
16 struct rcu_head rcu_head; 18 struct rcu_head rcu_head;
17}; 19};
18 20
21static DEFINE_MUTEX(flowtable_lock);
22static LIST_HEAD(flowtables);
23
24static void
25flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
26 struct nf_flow_route *route,
27 enum flow_offload_tuple_dir dir)
28{
29 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
30 struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
31 struct dst_entry *dst = route->tuple[dir].dst;
32
33 ft->dir = dir;
34
35 switch (ctt->src.l3num) {
36 case NFPROTO_IPV4:
37 ft->src_v4 = ctt->src.u3.in;
38 ft->dst_v4 = ctt->dst.u3.in;
39 ft->mtu = ip_dst_mtu_maybe_forward(dst, true);
40 break;
41 case NFPROTO_IPV6:
42 ft->src_v6 = ctt->src.u3.in6;
43 ft->dst_v6 = ctt->dst.u3.in6;
44 ft->mtu = ip6_dst_mtu_forward(dst);
45 break;
46 }
47
48 ft->l3proto = ctt->src.l3num;
49 ft->l4proto = ctt->dst.protonum;
50 ft->src_port = ctt->src.u.tcp.port;
51 ft->dst_port = ctt->dst.u.tcp.port;
52
53 ft->iifidx = route->tuple[dir].ifindex;
54 ft->oifidx = route->tuple[!dir].ifindex;
55 ft->dst_cache = dst;
56}
57
19struct flow_offload * 58struct flow_offload *
20flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route) 59flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route)
21{ 60{
@@ -40,69 +79,12 @@ flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route)
40 79
41 entry->ct = ct; 80 entry->ct = ct;
42 81
43 switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num) { 82 flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_ORIGINAL);
44 case NFPROTO_IPV4: 83 flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_REPLY);
45 flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4 =
46 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.in;
47 flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4 =
48 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in;
49 flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4 =
50 ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.in;
51 flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4 =
52 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.in;
53 break;
54 case NFPROTO_IPV6:
55 flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6 =
56 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.in6;
57 flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6 =
58 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6;
59 flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6 =
60 ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.in6;
61 flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6 =
62 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.in6;
63 break;
64 }
65
66 flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l3proto =
67 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
68 flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto =
69 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;
70 flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.l3proto =
71 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
72 flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.l4proto =
73 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;
74
75 flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache =
76 route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst;
77 flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache =
78 route->tuple[FLOW_OFFLOAD_DIR_REPLY].dst;
79
80 flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port =
81 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.tcp.port;
82 flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port =
83 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port;
84 flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port =
85 ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u.tcp.port;
86 flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port =
87 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.tcp.port;
88
89 flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dir =
90 FLOW_OFFLOAD_DIR_ORIGINAL;
91 flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dir =
92 FLOW_OFFLOAD_DIR_REPLY;
93
94 flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.iifidx =
95 route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].ifindex;
96 flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.oifidx =
97 route->tuple[FLOW_OFFLOAD_DIR_REPLY].ifindex;
98 flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.iifidx =
99 route->tuple[FLOW_OFFLOAD_DIR_REPLY].ifindex;
100 flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.oifidx =
101 route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].ifindex;
102 84
103 if (ct->status & IPS_SRC_NAT) 85 if (ct->status & IPS_SRC_NAT)
104 flow->flags |= FLOW_OFFLOAD_SNAT; 86 flow->flags |= FLOW_OFFLOAD_SNAT;
105 else if (ct->status & IPS_DST_NAT) 87 if (ct->status & IPS_DST_NAT)
106 flow->flags |= FLOW_OFFLOAD_DNAT; 88 flow->flags |= FLOW_OFFLOAD_DNAT;
107 89
108 return flow; 90 return flow;
@@ -118,6 +100,43 @@ err_ct_refcnt:
118} 100}
119EXPORT_SYMBOL_GPL(flow_offload_alloc); 101EXPORT_SYMBOL_GPL(flow_offload_alloc);
120 102
103static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
104{
105 tcp->state = TCP_CONNTRACK_ESTABLISHED;
106 tcp->seen[0].td_maxwin = 0;
107 tcp->seen[1].td_maxwin = 0;
108}
109
110static void flow_offload_fixup_ct_state(struct nf_conn *ct)
111{
112 const struct nf_conntrack_l4proto *l4proto;
113 struct net *net = nf_ct_net(ct);
114 unsigned int *timeouts;
115 unsigned int timeout;
116 int l4num;
117
118 l4num = nf_ct_protonum(ct);
119 if (l4num == IPPROTO_TCP)
120 flow_offload_fixup_tcp(&ct->proto.tcp);
121
122 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), l4num);
123 if (!l4proto)
124 return;
125
126 timeouts = l4proto->get_timeouts(net);
127 if (!timeouts)
128 return;
129
130 if (l4num == IPPROTO_TCP)
131 timeout = timeouts[TCP_CONNTRACK_ESTABLISHED];
132 else if (l4num == IPPROTO_UDP)
133 timeout = timeouts[UDP_CT_REPLIED];
134 else
135 return;
136
137 ct->timeout = nfct_time_stamp + timeout;
138}
139
121void flow_offload_free(struct flow_offload *flow) 140void flow_offload_free(struct flow_offload *flow)
122{ 141{
123 struct flow_offload_entry *e; 142 struct flow_offload_entry *e;
@@ -125,17 +144,46 @@ void flow_offload_free(struct flow_offload *flow)
125 dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache); 144 dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
126 dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache); 145 dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
127 e = container_of(flow, struct flow_offload_entry, flow); 146 e = container_of(flow, struct flow_offload_entry, flow);
128 nf_ct_delete(e->ct, 0, 0); 147 if (flow->flags & FLOW_OFFLOAD_DYING)
148 nf_ct_delete(e->ct, 0, 0);
129 nf_ct_put(e->ct); 149 nf_ct_put(e->ct);
130 kfree_rcu(e, rcu_head); 150 kfree_rcu(e, rcu_head);
131} 151}
132EXPORT_SYMBOL_GPL(flow_offload_free); 152EXPORT_SYMBOL_GPL(flow_offload_free);
133 153
134void flow_offload_dead(struct flow_offload *flow) 154static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
155{
156 const struct flow_offload_tuple *tuple = data;
157
158 return jhash(tuple, offsetof(struct flow_offload_tuple, dir), seed);
159}
160
161static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
162{
163 const struct flow_offload_tuple_rhash *tuplehash = data;
164
165 return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, dir), seed);
166}
167
168static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
169 const void *ptr)
135{ 170{
136 flow->flags |= FLOW_OFFLOAD_DYING; 171 const struct flow_offload_tuple *tuple = arg->key;
172 const struct flow_offload_tuple_rhash *x = ptr;
173
174 if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, dir)))
175 return 1;
176
177 return 0;
137} 178}
138EXPORT_SYMBOL_GPL(flow_offload_dead); 179
180static const struct rhashtable_params nf_flow_offload_rhash_params = {
181 .head_offset = offsetof(struct flow_offload_tuple_rhash, node),
182 .hashfn = flow_offload_hash,
183 .obj_hashfn = flow_offload_hash_obj,
184 .obj_cmpfn = flow_offload_hash_cmp,
185 .automatic_shrinking = true,
186};
139 187
140int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow) 188int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
141{ 189{
@@ -143,10 +191,10 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
143 191
144 rhashtable_insert_fast(&flow_table->rhashtable, 192 rhashtable_insert_fast(&flow_table->rhashtable,
145 &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node, 193 &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
146 *flow_table->type->params); 194 nf_flow_offload_rhash_params);
147 rhashtable_insert_fast(&flow_table->rhashtable, 195 rhashtable_insert_fast(&flow_table->rhashtable,
148 &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node, 196 &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
149 *flow_table->type->params); 197 nf_flow_offload_rhash_params);
150 return 0; 198 return 0;
151} 199}
152EXPORT_SYMBOL_GPL(flow_offload_add); 200EXPORT_SYMBOL_GPL(flow_offload_add);
@@ -154,22 +202,51 @@ EXPORT_SYMBOL_GPL(flow_offload_add);
154static void flow_offload_del(struct nf_flowtable *flow_table, 202static void flow_offload_del(struct nf_flowtable *flow_table,
155 struct flow_offload *flow) 203 struct flow_offload *flow)
156{ 204{
205 struct flow_offload_entry *e;
206
157 rhashtable_remove_fast(&flow_table->rhashtable, 207 rhashtable_remove_fast(&flow_table->rhashtable,
158 &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node, 208 &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
159 *flow_table->type->params); 209 nf_flow_offload_rhash_params);
160 rhashtable_remove_fast(&flow_table->rhashtable, 210 rhashtable_remove_fast(&flow_table->rhashtable,
161 &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node, 211 &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
162 *flow_table->type->params); 212 nf_flow_offload_rhash_params);
213
214 e = container_of(flow, struct flow_offload_entry, flow);
215 clear_bit(IPS_OFFLOAD_BIT, &e->ct->status);
163 216
164 flow_offload_free(flow); 217 flow_offload_free(flow);
165} 218}
166 219
220void flow_offload_teardown(struct flow_offload *flow)
221{
222 struct flow_offload_entry *e;
223
224 flow->flags |= FLOW_OFFLOAD_TEARDOWN;
225
226 e = container_of(flow, struct flow_offload_entry, flow);
227 flow_offload_fixup_ct_state(e->ct);
228}
229EXPORT_SYMBOL_GPL(flow_offload_teardown);
230
167struct flow_offload_tuple_rhash * 231struct flow_offload_tuple_rhash *
168flow_offload_lookup(struct nf_flowtable *flow_table, 232flow_offload_lookup(struct nf_flowtable *flow_table,
169 struct flow_offload_tuple *tuple) 233 struct flow_offload_tuple *tuple)
170{ 234{
171 return rhashtable_lookup_fast(&flow_table->rhashtable, tuple, 235 struct flow_offload_tuple_rhash *tuplehash;
172 *flow_table->type->params); 236 struct flow_offload *flow;
237 int dir;
238
239 tuplehash = rhashtable_lookup_fast(&flow_table->rhashtable, tuple,
240 nf_flow_offload_rhash_params);
241 if (!tuplehash)
242 return NULL;
243
244 dir = tuplehash->tuple.dir;
245 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
246 if (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))
247 return NULL;
248
249 return tuplehash;
173} 250}
174EXPORT_SYMBOL_GPL(flow_offload_lookup); 251EXPORT_SYMBOL_GPL(flow_offload_lookup);
175 252
@@ -216,11 +293,6 @@ static inline bool nf_flow_has_expired(const struct flow_offload *flow)
216 return (__s32)(flow->timeout - (u32)jiffies) <= 0; 293 return (__s32)(flow->timeout - (u32)jiffies) <= 0;
217} 294}
218 295
219static inline bool nf_flow_is_dying(const struct flow_offload *flow)
220{
221 return flow->flags & FLOW_OFFLOAD_DYING;
222}
223
224static int nf_flow_offload_gc_step(struct nf_flowtable *flow_table) 296static int nf_flow_offload_gc_step(struct nf_flowtable *flow_table)
225{ 297{
226 struct flow_offload_tuple_rhash *tuplehash; 298 struct flow_offload_tuple_rhash *tuplehash;
@@ -248,7 +320,8 @@ static int nf_flow_offload_gc_step(struct nf_flowtable *flow_table)
248 flow = container_of(tuplehash, struct flow_offload, tuplehash[0]); 320 flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
249 321
250 if (nf_flow_has_expired(flow) || 322 if (nf_flow_has_expired(flow) ||
251 nf_flow_is_dying(flow)) 323 (flow->flags & (FLOW_OFFLOAD_DYING |
324 FLOW_OFFLOAD_TEARDOWN)))
252 flow_offload_del(flow_table, flow); 325 flow_offload_del(flow_table, flow);
253 } 326 }
254out: 327out:
@@ -258,7 +331,7 @@ out:
258 return 1; 331 return 1;
259} 332}
260 333
261void nf_flow_offload_work_gc(struct work_struct *work) 334static void nf_flow_offload_work_gc(struct work_struct *work)
262{ 335{
263 struct nf_flowtable *flow_table; 336 struct nf_flowtable *flow_table;
264 337
@@ -266,42 +339,6 @@ void nf_flow_offload_work_gc(struct work_struct *work)
266 nf_flow_offload_gc_step(flow_table); 339 nf_flow_offload_gc_step(flow_table);
267 queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ); 340 queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
268} 341}
269EXPORT_SYMBOL_GPL(nf_flow_offload_work_gc);
270
271static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
272{
273 const struct flow_offload_tuple *tuple = data;
274
275 return jhash(tuple, offsetof(struct flow_offload_tuple, dir), seed);
276}
277
278static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
279{
280 const struct flow_offload_tuple_rhash *tuplehash = data;
281
282 return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, dir), seed);
283}
284
285static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
286 const void *ptr)
287{
288 const struct flow_offload_tuple *tuple = arg->key;
289 const struct flow_offload_tuple_rhash *x = ptr;
290
291 if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, dir)))
292 return 1;
293
294 return 0;
295}
296
297const struct rhashtable_params nf_flow_offload_rhash_params = {
298 .head_offset = offsetof(struct flow_offload_tuple_rhash, node),
299 .hashfn = flow_offload_hash,
300 .obj_hashfn = flow_offload_hash_obj,
301 .obj_cmpfn = flow_offload_hash_cmp,
302 .automatic_shrinking = true,
303};
304EXPORT_SYMBOL_GPL(nf_flow_offload_rhash_params);
305 342
306static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff, 343static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
307 __be16 port, __be16 new_port) 344 __be16 port, __be16 new_port)
@@ -419,33 +456,69 @@ int nf_flow_dnat_port(const struct flow_offload *flow,
419} 456}
420EXPORT_SYMBOL_GPL(nf_flow_dnat_port); 457EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
421 458
459int nf_flow_table_init(struct nf_flowtable *flowtable)
460{
461 int err;
462
463 INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
464
465 err = rhashtable_init(&flowtable->rhashtable,
466 &nf_flow_offload_rhash_params);
467 if (err < 0)
468 return err;
469
470 queue_delayed_work(system_power_efficient_wq,
471 &flowtable->gc_work, HZ);
472
473 mutex_lock(&flowtable_lock);
474 list_add(&flowtable->list, &flowtables);
475 mutex_unlock(&flowtable_lock);
476
477 return 0;
478}
479EXPORT_SYMBOL_GPL(nf_flow_table_init);
480
422static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data) 481static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
423{ 482{
424 struct net_device *dev = data; 483 struct net_device *dev = data;
425 484
426 if (dev && flow->tuplehash[0].tuple.iifidx != dev->ifindex) 485 if (!dev) {
486 flow_offload_teardown(flow);
427 return; 487 return;
488 }
428 489
429 flow_offload_dead(flow); 490 if (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
491 flow->tuplehash[1].tuple.iifidx == dev->ifindex)
492 flow_offload_dead(flow);
430} 493}
431 494
432static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable, 495static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
433 void *data) 496 struct net_device *dev)
434{ 497{
435 nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, data); 498 nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
436 flush_delayed_work(&flowtable->gc_work); 499 flush_delayed_work(&flowtable->gc_work);
437} 500}
438 501
439void nf_flow_table_cleanup(struct net *net, struct net_device *dev) 502void nf_flow_table_cleanup(struct net *net, struct net_device *dev)
440{ 503{
441 nft_flow_table_iterate(net, nf_flow_table_iterate_cleanup, dev); 504 struct nf_flowtable *flowtable;
505
506 mutex_lock(&flowtable_lock);
507 list_for_each_entry(flowtable, &flowtables, list)
508 nf_flow_table_iterate_cleanup(flowtable, dev);
509 mutex_unlock(&flowtable_lock);
442} 510}
443EXPORT_SYMBOL_GPL(nf_flow_table_cleanup); 511EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
444 512
445void nf_flow_table_free(struct nf_flowtable *flow_table) 513void nf_flow_table_free(struct nf_flowtable *flow_table)
446{ 514{
515 mutex_lock(&flowtable_lock);
516 list_del(&flow_table->list);
517 mutex_unlock(&flowtable_lock);
518 cancel_delayed_work_sync(&flow_table->gc_work);
447 nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL); 519 nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
448 WARN_ON(!nf_flow_offload_gc_step(flow_table)); 520 WARN_ON(!nf_flow_offload_gc_step(flow_table));
521 rhashtable_destroy(&flow_table->rhashtable);
449} 522}
450EXPORT_SYMBOL_GPL(nf_flow_table_free); 523EXPORT_SYMBOL_GPL(nf_flow_table_free);
451 524
diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c
index 375a1881d93d..99771aa7e7ea 100644
--- a/net/netfilter/nf_flow_table_inet.c
+++ b/net/netfilter/nf_flow_table_inet.c
@@ -22,8 +22,7 @@ nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb,
22 22
23static struct nf_flowtable_type flowtable_inet = { 23static struct nf_flowtable_type flowtable_inet = {
24 .family = NFPROTO_INET, 24 .family = NFPROTO_INET,
25 .params = &nf_flow_offload_rhash_params, 25 .init = nf_flow_table_init,
26 .gc = nf_flow_offload_work_gc,
27 .free = nf_flow_table_free, 26 .free = nf_flow_table_free,
28 .hook = nf_flow_offload_inet_hook, 27 .hook = nf_flow_offload_inet_hook,
29 .owner = THIS_MODULE, 28 .owner = THIS_MODULE,
diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
new file mode 100644
index 000000000000..82451b7e0acb
--- /dev/null
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -0,0 +1,487 @@
1#include <linux/kernel.h>
2#include <linux/init.h>
3#include <linux/module.h>
4#include <linux/netfilter.h>
5#include <linux/rhashtable.h>
6#include <linux/ip.h>
7#include <linux/ipv6.h>
8#include <linux/netdevice.h>
9#include <net/ip.h>
10#include <net/ipv6.h>
11#include <net/ip6_route.h>
12#include <net/neighbour.h>
13#include <net/netfilter/nf_flow_table.h>
14/* For layer 4 checksum field offset. */
15#include <linux/tcp.h>
16#include <linux/udp.h>
17
18static int nf_flow_state_check(struct flow_offload *flow, int proto,
19 struct sk_buff *skb, unsigned int thoff)
20{
21 struct tcphdr *tcph;
22
23 if (proto != IPPROTO_TCP)
24 return 0;
25
26 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)))
27 return -1;
28
29 tcph = (void *)(skb_network_header(skb) + thoff);
30 if (unlikely(tcph->fin || tcph->rst)) {
31 flow_offload_teardown(flow);
32 return -1;
33 }
34
35 return 0;
36}
37
38static int nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
39 __be32 addr, __be32 new_addr)
40{
41 struct tcphdr *tcph;
42
43 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
44 skb_try_make_writable(skb, thoff + sizeof(*tcph)))
45 return -1;
46
47 tcph = (void *)(skb_network_header(skb) + thoff);
48 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
49
50 return 0;
51}
52
53static int nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
54 __be32 addr, __be32 new_addr)
55{
56 struct udphdr *udph;
57
58 if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
59 skb_try_make_writable(skb, thoff + sizeof(*udph)))
60 return -1;
61
62 udph = (void *)(skb_network_header(skb) + thoff);
63 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
64 inet_proto_csum_replace4(&udph->check, skb, addr,
65 new_addr, true);
66 if (!udph->check)
67 udph->check = CSUM_MANGLED_0;
68 }
69
70 return 0;
71}
72
73static int nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
74 unsigned int thoff, __be32 addr,
75 __be32 new_addr)
76{
77 switch (iph->protocol) {
78 case IPPROTO_TCP:
79 if (nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr) < 0)
80 return NF_DROP;
81 break;
82 case IPPROTO_UDP:
83 if (nf_flow_nat_ip_udp(skb, thoff, addr, new_addr) < 0)
84 return NF_DROP;
85 break;
86 }
87
88 return 0;
89}
90
91static int nf_flow_snat_ip(const struct flow_offload *flow, struct sk_buff *skb,
92 struct iphdr *iph, unsigned int thoff,
93 enum flow_offload_tuple_dir dir)
94{
95 __be32 addr, new_addr;
96
97 switch (dir) {
98 case FLOW_OFFLOAD_DIR_ORIGINAL:
99 addr = iph->saddr;
100 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
101 iph->saddr = new_addr;
102 break;
103 case FLOW_OFFLOAD_DIR_REPLY:
104 addr = iph->daddr;
105 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
106 iph->daddr = new_addr;
107 break;
108 default:
109 return -1;
110 }
111 csum_replace4(&iph->check, addr, new_addr);
112
113 return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
114}
115
116static int nf_flow_dnat_ip(const struct flow_offload *flow, struct sk_buff *skb,
117 struct iphdr *iph, unsigned int thoff,
118 enum flow_offload_tuple_dir dir)
119{
120 __be32 addr, new_addr;
121
122 switch (dir) {
123 case FLOW_OFFLOAD_DIR_ORIGINAL:
124 addr = iph->daddr;
125 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
126 iph->daddr = new_addr;
127 break;
128 case FLOW_OFFLOAD_DIR_REPLY:
129 addr = iph->saddr;
130 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
131 iph->saddr = new_addr;
132 break;
133 default:
134 return -1;
135 }
136 csum_replace4(&iph->check, addr, new_addr);
137
138 return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
139}
140
141static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
142 unsigned int thoff, enum flow_offload_tuple_dir dir)
143{
144 struct iphdr *iph = ip_hdr(skb);
145
146 if (flow->flags & FLOW_OFFLOAD_SNAT &&
147 (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
148 nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0))
149 return -1;
150 if (flow->flags & FLOW_OFFLOAD_DNAT &&
151 (nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
152 nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0))
153 return -1;
154
155 return 0;
156}
157
158static bool ip_has_options(unsigned int thoff)
159{
160 return thoff != sizeof(struct iphdr);
161}
162
163static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
164 struct flow_offload_tuple *tuple)
165{
166 struct flow_ports *ports;
167 unsigned int thoff;
168 struct iphdr *iph;
169
170 if (!pskb_may_pull(skb, sizeof(*iph)))
171 return -1;
172
173 iph = ip_hdr(skb);
174 thoff = iph->ihl * 4;
175
176 if (ip_is_fragment(iph) ||
177 unlikely(ip_has_options(thoff)))
178 return -1;
179
180 if (iph->protocol != IPPROTO_TCP &&
181 iph->protocol != IPPROTO_UDP)
182 return -1;
183
184 thoff = iph->ihl * 4;
185 if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
186 return -1;
187
188 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
189
190 tuple->src_v4.s_addr = iph->saddr;
191 tuple->dst_v4.s_addr = iph->daddr;
192 tuple->src_port = ports->source;
193 tuple->dst_port = ports->dest;
194 tuple->l3proto = AF_INET;
195 tuple->l4proto = iph->protocol;
196 tuple->iifidx = dev->ifindex;
197
198 return 0;
199}
200
201/* Based on ip_exceeds_mtu(). */
202static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
203{
204 if (skb->len <= mtu)
205 return false;
206
207 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
208 return false;
209
210 return true;
211}
212
213unsigned int
214nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
215 const struct nf_hook_state *state)
216{
217 struct flow_offload_tuple_rhash *tuplehash;
218 struct nf_flowtable *flow_table = priv;
219 struct flow_offload_tuple tuple = {};
220 enum flow_offload_tuple_dir dir;
221 struct flow_offload *flow;
222 struct net_device *outdev;
223 const struct rtable *rt;
224 unsigned int thoff;
225 struct iphdr *iph;
226 __be32 nexthop;
227
228 if (skb->protocol != htons(ETH_P_IP))
229 return NF_ACCEPT;
230
231 if (nf_flow_tuple_ip(skb, state->in, &tuple) < 0)
232 return NF_ACCEPT;
233
234 tuplehash = flow_offload_lookup(flow_table, &tuple);
235 if (tuplehash == NULL)
236 return NF_ACCEPT;
237
238 outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx);
239 if (!outdev)
240 return NF_ACCEPT;
241
242 dir = tuplehash->tuple.dir;
243 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
244 rt = (const struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
245
246 if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)) &&
247 (ip_hdr(skb)->frag_off & htons(IP_DF)) != 0)
248 return NF_ACCEPT;
249
250 if (skb_try_make_writable(skb, sizeof(*iph)))
251 return NF_DROP;
252
253 thoff = ip_hdr(skb)->ihl * 4;
254 if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
255 return NF_ACCEPT;
256
257 if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) &&
258 nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
259 return NF_DROP;
260
261 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
262 iph = ip_hdr(skb);
263 ip_decrease_ttl(iph);
264
265 skb->dev = outdev;
266 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
267 neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
268
269 return NF_STOLEN;
270}
271EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
272
273static int nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
274 struct in6_addr *addr,
275 struct in6_addr *new_addr)
276{
277 struct tcphdr *tcph;
278
279 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
280 skb_try_make_writable(skb, thoff + sizeof(*tcph)))
281 return -1;
282
283 tcph = (void *)(skb_network_header(skb) + thoff);
284 inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
285 new_addr->s6_addr32, true);
286
287 return 0;
288}
289
290static int nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
291 struct in6_addr *addr,
292 struct in6_addr *new_addr)
293{
294 struct udphdr *udph;
295
296 if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
297 skb_try_make_writable(skb, thoff + sizeof(*udph)))
298 return -1;
299
300 udph = (void *)(skb_network_header(skb) + thoff);
301 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
302 inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
303 new_addr->s6_addr32, true);
304 if (!udph->check)
305 udph->check = CSUM_MANGLED_0;
306 }
307
308 return 0;
309}
310
311static int nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
312 unsigned int thoff, struct in6_addr *addr,
313 struct in6_addr *new_addr)
314{
315 switch (ip6h->nexthdr) {
316 case IPPROTO_TCP:
317 if (nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr) < 0)
318 return NF_DROP;
319 break;
320 case IPPROTO_UDP:
321 if (nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr) < 0)
322 return NF_DROP;
323 break;
324 }
325
326 return 0;
327}
328
329static int nf_flow_snat_ipv6(const struct flow_offload *flow,
330 struct sk_buff *skb, struct ipv6hdr *ip6h,
331 unsigned int thoff,
332 enum flow_offload_tuple_dir dir)
333{
334 struct in6_addr addr, new_addr;
335
336 switch (dir) {
337 case FLOW_OFFLOAD_DIR_ORIGINAL:
338 addr = ip6h->saddr;
339 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
340 ip6h->saddr = new_addr;
341 break;
342 case FLOW_OFFLOAD_DIR_REPLY:
343 addr = ip6h->daddr;
344 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
345 ip6h->daddr = new_addr;
346 break;
347 default:
348 return -1;
349 }
350
351 return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
352}
353
354static int nf_flow_dnat_ipv6(const struct flow_offload *flow,
355 struct sk_buff *skb, struct ipv6hdr *ip6h,
356 unsigned int thoff,
357 enum flow_offload_tuple_dir dir)
358{
359 struct in6_addr addr, new_addr;
360
361 switch (dir) {
362 case FLOW_OFFLOAD_DIR_ORIGINAL:
363 addr = ip6h->daddr;
364 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
365 ip6h->daddr = new_addr;
366 break;
367 case FLOW_OFFLOAD_DIR_REPLY:
368 addr = ip6h->saddr;
369 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
370 ip6h->saddr = new_addr;
371 break;
372 default:
373 return -1;
374 }
375
376 return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
377}
378
379static int nf_flow_nat_ipv6(const struct flow_offload *flow,
380 struct sk_buff *skb,
381 enum flow_offload_tuple_dir dir)
382{
383 struct ipv6hdr *ip6h = ipv6_hdr(skb);
384 unsigned int thoff = sizeof(*ip6h);
385
386 if (flow->flags & FLOW_OFFLOAD_SNAT &&
387 (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
388 nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
389 return -1;
390 if (flow->flags & FLOW_OFFLOAD_DNAT &&
391 (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
392 nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
393 return -1;
394
395 return 0;
396}
397
398static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
399 struct flow_offload_tuple *tuple)
400{
401 struct flow_ports *ports;
402 struct ipv6hdr *ip6h;
403 unsigned int thoff;
404
405 if (!pskb_may_pull(skb, sizeof(*ip6h)))
406 return -1;
407
408 ip6h = ipv6_hdr(skb);
409
410 if (ip6h->nexthdr != IPPROTO_TCP &&
411 ip6h->nexthdr != IPPROTO_UDP)
412 return -1;
413
414 thoff = sizeof(*ip6h);
415 if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
416 return -1;
417
418 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
419
420 tuple->src_v6 = ip6h->saddr;
421 tuple->dst_v6 = ip6h->daddr;
422 tuple->src_port = ports->source;
423 tuple->dst_port = ports->dest;
424 tuple->l3proto = AF_INET6;
425 tuple->l4proto = ip6h->nexthdr;
426 tuple->iifidx = dev->ifindex;
427
428 return 0;
429}
430
431unsigned int
432nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
433 const struct nf_hook_state *state)
434{
435 struct flow_offload_tuple_rhash *tuplehash;
436 struct nf_flowtable *flow_table = priv;
437 struct flow_offload_tuple tuple = {};
438 enum flow_offload_tuple_dir dir;
439 struct flow_offload *flow;
440 struct net_device *outdev;
441 struct in6_addr *nexthop;
442 struct ipv6hdr *ip6h;
443 struct rt6_info *rt;
444
445 if (skb->protocol != htons(ETH_P_IPV6))
446 return NF_ACCEPT;
447
448 if (nf_flow_tuple_ipv6(skb, state->in, &tuple) < 0)
449 return NF_ACCEPT;
450
451 tuplehash = flow_offload_lookup(flow_table, &tuple);
452 if (tuplehash == NULL)
453 return NF_ACCEPT;
454
455 outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx);
456 if (!outdev)
457 return NF_ACCEPT;
458
459 dir = tuplehash->tuple.dir;
460 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
461 rt = (struct rt6_info *)flow->tuplehash[dir].tuple.dst_cache;
462
463 if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
464 return NF_ACCEPT;
465
466 if (nf_flow_state_check(flow, ipv6_hdr(skb)->nexthdr, skb,
467 sizeof(*ip6h)))
468 return NF_ACCEPT;
469
470 if (skb_try_make_writable(skb, sizeof(*ip6h)))
471 return NF_DROP;
472
473 if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) &&
474 nf_flow_nat_ipv6(flow, skb, dir) < 0)
475 return NF_DROP;
476
477 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
478 ip6h = ipv6_hdr(skb);
479 ip6h->hop_limit--;
480
481 skb->dev = outdev;
482 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
483 neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
484
485 return NF_STOLEN;
486}
487EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 617693ff9f4c..37b3c9913b08 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -157,7 +157,7 @@ EXPORT_SYMBOL(nf_nat_used_tuple);
157static int in_range(const struct nf_nat_l3proto *l3proto, 157static int in_range(const struct nf_nat_l3proto *l3proto,
158 const struct nf_nat_l4proto *l4proto, 158 const struct nf_nat_l4proto *l4proto,
159 const struct nf_conntrack_tuple *tuple, 159 const struct nf_conntrack_tuple *tuple,
160 const struct nf_nat_range *range) 160 const struct nf_nat_range2 *range)
161{ 161{
162 /* If we are supposed to map IPs, then we must be in the 162 /* If we are supposed to map IPs, then we must be in the
163 * range specified, otherwise let this drag us onto a new src IP. 163 * range specified, otherwise let this drag us onto a new src IP.
@@ -194,7 +194,7 @@ find_appropriate_src(struct net *net,
194 const struct nf_nat_l4proto *l4proto, 194 const struct nf_nat_l4proto *l4proto,
195 const struct nf_conntrack_tuple *tuple, 195 const struct nf_conntrack_tuple *tuple,
196 struct nf_conntrack_tuple *result, 196 struct nf_conntrack_tuple *result,
197 const struct nf_nat_range *range) 197 const struct nf_nat_range2 *range)
198{ 198{
199 unsigned int h = hash_by_src(net, tuple); 199 unsigned int h = hash_by_src(net, tuple);
200 const struct nf_conn *ct; 200 const struct nf_conn *ct;
@@ -224,7 +224,7 @@ find_appropriate_src(struct net *net,
224static void 224static void
225find_best_ips_proto(const struct nf_conntrack_zone *zone, 225find_best_ips_proto(const struct nf_conntrack_zone *zone,
226 struct nf_conntrack_tuple *tuple, 226 struct nf_conntrack_tuple *tuple,
227 const struct nf_nat_range *range, 227 const struct nf_nat_range2 *range,
228 const struct nf_conn *ct, 228 const struct nf_conn *ct,
229 enum nf_nat_manip_type maniptype) 229 enum nf_nat_manip_type maniptype)
230{ 230{
@@ -298,7 +298,7 @@ find_best_ips_proto(const struct nf_conntrack_zone *zone,
298static void 298static void
299get_unique_tuple(struct nf_conntrack_tuple *tuple, 299get_unique_tuple(struct nf_conntrack_tuple *tuple,
300 const struct nf_conntrack_tuple *orig_tuple, 300 const struct nf_conntrack_tuple *orig_tuple,
301 const struct nf_nat_range *range, 301 const struct nf_nat_range2 *range,
302 struct nf_conn *ct, 302 struct nf_conn *ct,
303 enum nf_nat_manip_type maniptype) 303 enum nf_nat_manip_type maniptype)
304{ 304{
@@ -349,9 +349,10 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
349 /* Only bother mapping if it's not already in range and unique */ 349 /* Only bother mapping if it's not already in range and unique */
350 if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) { 350 if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
351 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) { 351 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
352 if (l4proto->in_range(tuple, maniptype, 352 if (!(range->flags & NF_NAT_RANGE_PROTO_OFFSET) &&
353 &range->min_proto, 353 l4proto->in_range(tuple, maniptype,
354 &range->max_proto) && 354 &range->min_proto,
355 &range->max_proto) &&
355 (range->min_proto.all == range->max_proto.all || 356 (range->min_proto.all == range->max_proto.all ||
356 !nf_nat_used_tuple(tuple, ct))) 357 !nf_nat_used_tuple(tuple, ct)))
357 goto out; 358 goto out;
@@ -360,7 +361,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
360 } 361 }
361 } 362 }
362 363
363 /* Last change: get protocol to try to obtain unique tuple. */ 364 /* Last chance: get protocol to try to obtain unique tuple. */
364 l4proto->unique_tuple(l3proto, tuple, range, maniptype, ct); 365 l4proto->unique_tuple(l3proto, tuple, range, maniptype, ct);
365out: 366out:
366 rcu_read_unlock(); 367 rcu_read_unlock();
@@ -381,7 +382,7 @@ EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add);
381 382
382unsigned int 383unsigned int
383nf_nat_setup_info(struct nf_conn *ct, 384nf_nat_setup_info(struct nf_conn *ct,
384 const struct nf_nat_range *range, 385 const struct nf_nat_range2 *range,
385 enum nf_nat_manip_type maniptype) 386 enum nf_nat_manip_type maniptype)
386{ 387{
387 struct net *net = nf_ct_net(ct); 388 struct net *net = nf_ct_net(ct);
@@ -459,7 +460,7 @@ __nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip)
459 (manip == NF_NAT_MANIP_SRC ? 460 (manip == NF_NAT_MANIP_SRC ?
460 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 : 461 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 :
461 ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3); 462 ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3);
462 struct nf_nat_range range = { 463 struct nf_nat_range2 range = {
463 .flags = NF_NAT_RANGE_MAP_IPS, 464 .flags = NF_NAT_RANGE_MAP_IPS,
464 .min_addr = ip, 465 .min_addr = ip,
465 .max_addr = ip, 466 .max_addr = ip,
@@ -702,7 +703,7 @@ static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
702 703
703static int nfnetlink_parse_nat_proto(struct nlattr *attr, 704static int nfnetlink_parse_nat_proto(struct nlattr *attr,
704 const struct nf_conn *ct, 705 const struct nf_conn *ct,
705 struct nf_nat_range *range) 706 struct nf_nat_range2 *range)
706{ 707{
707 struct nlattr *tb[CTA_PROTONAT_MAX+1]; 708 struct nlattr *tb[CTA_PROTONAT_MAX+1];
708 const struct nf_nat_l4proto *l4proto; 709 const struct nf_nat_l4proto *l4proto;
@@ -730,7 +731,7 @@ static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
730 731
731static int 732static int
732nfnetlink_parse_nat(const struct nlattr *nat, 733nfnetlink_parse_nat(const struct nlattr *nat,
733 const struct nf_conn *ct, struct nf_nat_range *range, 734 const struct nf_conn *ct, struct nf_nat_range2 *range,
734 const struct nf_nat_l3proto *l3proto) 735 const struct nf_nat_l3proto *l3proto)
735{ 736{
736 struct nlattr *tb[CTA_NAT_MAX+1]; 737 struct nlattr *tb[CTA_NAT_MAX+1];
@@ -758,7 +759,7 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
758 enum nf_nat_manip_type manip, 759 enum nf_nat_manip_type manip,
759 const struct nlattr *attr) 760 const struct nlattr *attr)
760{ 761{
761 struct nf_nat_range range; 762 struct nf_nat_range2 range;
762 const struct nf_nat_l3proto *l3proto; 763 const struct nf_nat_l3proto *l3proto;
763 int err; 764 int err;
764 765
diff --git a/net/netfilter/nf_nat_helper.c b/net/netfilter/nf_nat_helper.c
index 607a373379b4..99606baedda4 100644
--- a/net/netfilter/nf_nat_helper.c
+++ b/net/netfilter/nf_nat_helper.c
@@ -191,7 +191,7 @@ EXPORT_SYMBOL(nf_nat_mangle_udp_packet);
191void nf_nat_follow_master(struct nf_conn *ct, 191void nf_nat_follow_master(struct nf_conn *ct,
192 struct nf_conntrack_expect *exp) 192 struct nf_conntrack_expect *exp)
193{ 193{
194 struct nf_nat_range range; 194 struct nf_nat_range2 range;
195 195
196 /* This must be a fresh one. */ 196 /* This must be a fresh one. */
197 BUG_ON(ct->status & IPS_NAT_DONE_MASK); 197 BUG_ON(ct->status & IPS_NAT_DONE_MASK);
diff --git a/net/netfilter/nf_nat_proto_common.c b/net/netfilter/nf_nat_proto_common.c
index 7d7466dbf663..5d849d835561 100644
--- a/net/netfilter/nf_nat_proto_common.c
+++ b/net/netfilter/nf_nat_proto_common.c
@@ -36,7 +36,7 @@ EXPORT_SYMBOL_GPL(nf_nat_l4proto_in_range);
36 36
37void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto, 37void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
38 struct nf_conntrack_tuple *tuple, 38 struct nf_conntrack_tuple *tuple,
39 const struct nf_nat_range *range, 39 const struct nf_nat_range2 *range,
40 enum nf_nat_manip_type maniptype, 40 enum nf_nat_manip_type maniptype,
41 const struct nf_conn *ct, 41 const struct nf_conn *ct,
42 u16 *rover) 42 u16 *rover)
@@ -83,6 +83,8 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
83 : tuple->src.u.all); 83 : tuple->src.u.all);
84 } else if (range->flags & NF_NAT_RANGE_PROTO_RANDOM_FULLY) { 84 } else if (range->flags & NF_NAT_RANGE_PROTO_RANDOM_FULLY) {
85 off = prandom_u32(); 85 off = prandom_u32();
86 } else if (range->flags & NF_NAT_RANGE_PROTO_OFFSET) {
87 off = (ntohs(*portptr) - ntohs(range->base_proto.all));
86 } else { 88 } else {
87 off = *rover; 89 off = *rover;
88 } 90 }
@@ -91,7 +93,8 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
91 *portptr = htons(min + off % range_size); 93 *portptr = htons(min + off % range_size);
92 if (++i != range_size && nf_nat_used_tuple(tuple, ct)) 94 if (++i != range_size && nf_nat_used_tuple(tuple, ct))
93 continue; 95 continue;
94 if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) 96 if (!(range->flags & (NF_NAT_RANGE_PROTO_RANDOM_ALL|
97 NF_NAT_RANGE_PROTO_OFFSET)))
95 *rover = off; 98 *rover = off;
96 return; 99 return;
97 } 100 }
@@ -100,7 +103,7 @@ EXPORT_SYMBOL_GPL(nf_nat_l4proto_unique_tuple);
100 103
101#if IS_ENABLED(CONFIG_NF_CT_NETLINK) 104#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
102int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[], 105int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
103 struct nf_nat_range *range) 106 struct nf_nat_range2 *range)
104{ 107{
105 if (tb[CTA_PROTONAT_PORT_MIN]) { 108 if (tb[CTA_PROTONAT_PORT_MIN]) {
106 range->min_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]); 109 range->min_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
diff --git a/net/netfilter/nf_nat_proto_dccp.c b/net/netfilter/nf_nat_proto_dccp.c
index 269fcd5dc34c..67ea0d83aa5a 100644
--- a/net/netfilter/nf_nat_proto_dccp.c
+++ b/net/netfilter/nf_nat_proto_dccp.c
@@ -23,7 +23,7 @@ static u_int16_t dccp_port_rover;
23static void 23static void
24dccp_unique_tuple(const struct nf_nat_l3proto *l3proto, 24dccp_unique_tuple(const struct nf_nat_l3proto *l3proto,
25 struct nf_conntrack_tuple *tuple, 25 struct nf_conntrack_tuple *tuple,
26 const struct nf_nat_range *range, 26 const struct nf_nat_range2 *range,
27 enum nf_nat_manip_type maniptype, 27 enum nf_nat_manip_type maniptype,
28 const struct nf_conn *ct) 28 const struct nf_conn *ct)
29{ 29{
diff --git a/net/netfilter/nf_nat_proto_sctp.c b/net/netfilter/nf_nat_proto_sctp.c
index c57ee3240b1d..1c5d9b65fbba 100644
--- a/net/netfilter/nf_nat_proto_sctp.c
+++ b/net/netfilter/nf_nat_proto_sctp.c
@@ -17,7 +17,7 @@ static u_int16_t nf_sctp_port_rover;
17static void 17static void
18sctp_unique_tuple(const struct nf_nat_l3proto *l3proto, 18sctp_unique_tuple(const struct nf_nat_l3proto *l3proto,
19 struct nf_conntrack_tuple *tuple, 19 struct nf_conntrack_tuple *tuple,
20 const struct nf_nat_range *range, 20 const struct nf_nat_range2 *range,
21 enum nf_nat_manip_type maniptype, 21 enum nf_nat_manip_type maniptype,
22 const struct nf_conn *ct) 22 const struct nf_conn *ct)
23{ 23{
diff --git a/net/netfilter/nf_nat_proto_tcp.c b/net/netfilter/nf_nat_proto_tcp.c
index 4f8820fc5148..f15fcd475f98 100644
--- a/net/netfilter/nf_nat_proto_tcp.c
+++ b/net/netfilter/nf_nat_proto_tcp.c
@@ -23,7 +23,7 @@ static u16 tcp_port_rover;
23static void 23static void
24tcp_unique_tuple(const struct nf_nat_l3proto *l3proto, 24tcp_unique_tuple(const struct nf_nat_l3proto *l3proto,
25 struct nf_conntrack_tuple *tuple, 25 struct nf_conntrack_tuple *tuple,
26 const struct nf_nat_range *range, 26 const struct nf_nat_range2 *range,
27 enum nf_nat_manip_type maniptype, 27 enum nf_nat_manip_type maniptype,
28 const struct nf_conn *ct) 28 const struct nf_conn *ct)
29{ 29{
diff --git a/net/netfilter/nf_nat_proto_udp.c b/net/netfilter/nf_nat_proto_udp.c
index edd4a77dc09a..5790f70a83b2 100644
--- a/net/netfilter/nf_nat_proto_udp.c
+++ b/net/netfilter/nf_nat_proto_udp.c
@@ -22,7 +22,7 @@ static u16 udp_port_rover;
22static void 22static void
23udp_unique_tuple(const struct nf_nat_l3proto *l3proto, 23udp_unique_tuple(const struct nf_nat_l3proto *l3proto,
24 struct nf_conntrack_tuple *tuple, 24 struct nf_conntrack_tuple *tuple,
25 const struct nf_nat_range *range, 25 const struct nf_nat_range2 *range,
26 enum nf_nat_manip_type maniptype, 26 enum nf_nat_manip_type maniptype,
27 const struct nf_conn *ct) 27 const struct nf_conn *ct)
28{ 28{
@@ -100,7 +100,7 @@ static bool udplite_manip_pkt(struct sk_buff *skb,
100static void 100static void
101udplite_unique_tuple(const struct nf_nat_l3proto *l3proto, 101udplite_unique_tuple(const struct nf_nat_l3proto *l3proto,
102 struct nf_conntrack_tuple *tuple, 102 struct nf_conntrack_tuple *tuple,
103 const struct nf_nat_range *range, 103 const struct nf_nat_range2 *range,
104 enum nf_nat_manip_type maniptype, 104 enum nf_nat_manip_type maniptype,
105 const struct nf_conn *ct) 105 const struct nf_conn *ct)
106{ 106{
diff --git a/net/netfilter/nf_nat_proto_unknown.c b/net/netfilter/nf_nat_proto_unknown.c
index 6e494d584412..c5db3e251232 100644
--- a/net/netfilter/nf_nat_proto_unknown.c
+++ b/net/netfilter/nf_nat_proto_unknown.c
@@ -27,7 +27,7 @@ static bool unknown_in_range(const struct nf_conntrack_tuple *tuple,
27 27
28static void unknown_unique_tuple(const struct nf_nat_l3proto *l3proto, 28static void unknown_unique_tuple(const struct nf_nat_l3proto *l3proto,
29 struct nf_conntrack_tuple *tuple, 29 struct nf_conntrack_tuple *tuple,
30 const struct nf_nat_range *range, 30 const struct nf_nat_range2 *range,
31 enum nf_nat_manip_type maniptype, 31 enum nf_nat_manip_type maniptype,
32 const struct nf_conn *ct) 32 const struct nf_conn *ct)
33{ 33{
diff --git a/net/netfilter/nf_nat_redirect.c b/net/netfilter/nf_nat_redirect.c
index 25b06b959118..7c4bb0a773ca 100644
--- a/net/netfilter/nf_nat_redirect.c
+++ b/net/netfilter/nf_nat_redirect.c
@@ -36,7 +36,7 @@ nf_nat_redirect_ipv4(struct sk_buff *skb,
36 struct nf_conn *ct; 36 struct nf_conn *ct;
37 enum ip_conntrack_info ctinfo; 37 enum ip_conntrack_info ctinfo;
38 __be32 newdst; 38 __be32 newdst;
39 struct nf_nat_range newrange; 39 struct nf_nat_range2 newrange;
40 40
41 WARN_ON(hooknum != NF_INET_PRE_ROUTING && 41 WARN_ON(hooknum != NF_INET_PRE_ROUTING &&
42 hooknum != NF_INET_LOCAL_OUT); 42 hooknum != NF_INET_LOCAL_OUT);
@@ -82,10 +82,10 @@ EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv4);
82static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT; 82static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT;
83 83
84unsigned int 84unsigned int
85nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, 85nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
86 unsigned int hooknum) 86 unsigned int hooknum)
87{ 87{
88 struct nf_nat_range newrange; 88 struct nf_nat_range2 newrange;
89 struct in6_addr newdst; 89 struct in6_addr newdst;
90 enum ip_conntrack_info ctinfo; 90 enum ip_conntrack_info ctinfo;
91 struct nf_conn *ct; 91 struct nf_conn *ct;
diff --git a/net/netfilter/nf_nat_sip.c b/net/netfilter/nf_nat_sip.c
index 791fac4fd745..1f3086074981 100644
--- a/net/netfilter/nf_nat_sip.c
+++ b/net/netfilter/nf_nat_sip.c
@@ -316,7 +316,7 @@ static void nf_nat_sip_seq_adjust(struct sk_buff *skb, unsigned int protoff,
316static void nf_nat_sip_expected(struct nf_conn *ct, 316static void nf_nat_sip_expected(struct nf_conn *ct,
317 struct nf_conntrack_expect *exp) 317 struct nf_conntrack_expect *exp)
318{ 318{
319 struct nf_nat_range range; 319 struct nf_nat_range2 range;
320 320
321 /* This must be a fresh one. */ 321 /* This must be a fresh one. */
322 BUG_ON(ct->status & IPS_NAT_DONE_MASK); 322 BUG_ON(ct->status & IPS_NAT_DONE_MASK);
diff --git a/net/netfilter/nf_osf.c b/net/netfilter/nf_osf.c
new file mode 100644
index 000000000000..5ba5c7bef2f9
--- /dev/null
+++ b/net/netfilter/nf_osf.c
@@ -0,0 +1,218 @@
1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2#include <linux/module.h>
3#include <linux/kernel.h>
4
5#include <linux/capability.h>
6#include <linux/if.h>
7#include <linux/inetdevice.h>
8#include <linux/ip.h>
9#include <linux/list.h>
10#include <linux/rculist.h>
11#include <linux/skbuff.h>
12#include <linux/slab.h>
13#include <linux/tcp.h>
14
15#include <net/ip.h>
16#include <net/tcp.h>
17
18#include <linux/netfilter/nfnetlink.h>
19#include <linux/netfilter/x_tables.h>
20#include <net/netfilter/nf_log.h>
21#include <linux/netfilter/nf_osf.h>
22
23static inline int nf_osf_ttl(const struct sk_buff *skb,
24 const struct nf_osf_info *info,
25 unsigned char f_ttl)
26{
27 const struct iphdr *ip = ip_hdr(skb);
28
29 if (info->flags & NF_OSF_TTL) {
30 if (info->ttl == NF_OSF_TTL_TRUE)
31 return ip->ttl == f_ttl;
32 if (info->ttl == NF_OSF_TTL_NOCHECK)
33 return 1;
34 else if (ip->ttl <= f_ttl)
35 return 1;
36 else {
37 struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
38 int ret = 0;
39
40 for_ifa(in_dev) {
41 if (inet_ifa_match(ip->saddr, ifa)) {
42 ret = (ip->ttl == f_ttl);
43 break;
44 }
45 }
46 endfor_ifa(in_dev);
47
48 return ret;
49 }
50 }
51
52 return ip->ttl == f_ttl;
53}
54
55bool
56nf_osf_match(const struct sk_buff *skb, u_int8_t family,
57 int hooknum, struct net_device *in, struct net_device *out,
58 const struct nf_osf_info *info, struct net *net,
59 const struct list_head *nf_osf_fingers)
60{
61 const unsigned char *optp = NULL, *_optp = NULL;
62 unsigned int optsize = 0, check_WSS = 0;
63 int fmatch = FMATCH_WRONG, fcount = 0;
64 const struct iphdr *ip = ip_hdr(skb);
65 const struct nf_osf_user_finger *f;
66 unsigned char opts[MAX_IPOPTLEN];
67 const struct nf_osf_finger *kf;
68 u16 window, totlen, mss = 0;
69 const struct tcphdr *tcp;
70 struct tcphdr _tcph;
71 bool df;
72
73 tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), &_tcph);
74 if (!tcp)
75 return false;
76
77 if (!tcp->syn)
78 return false;
79
80 totlen = ntohs(ip->tot_len);
81 df = ntohs(ip->frag_off) & IP_DF;
82 window = ntohs(tcp->window);
83
84 if (tcp->doff * 4 > sizeof(struct tcphdr)) {
85 optsize = tcp->doff * 4 - sizeof(struct tcphdr);
86
87 _optp = optp = skb_header_pointer(skb, ip_hdrlen(skb) +
88 sizeof(struct tcphdr), optsize, opts);
89 }
90
91 list_for_each_entry_rcu(kf, &nf_osf_fingers[df], finger_entry) {
92 int foptsize, optnum;
93
94 f = &kf->finger;
95
96 if (!(info->flags & NF_OSF_LOG) && strcmp(info->genre, f->genre))
97 continue;
98
99 optp = _optp;
100 fmatch = FMATCH_WRONG;
101
102 if (totlen != f->ss || !nf_osf_ttl(skb, info, f->ttl))
103 continue;
104
105 /*
106 * Should not happen if userspace parser was written correctly.
107 */
108 if (f->wss.wc >= OSF_WSS_MAX)
109 continue;
110
111 /* Check options */
112
113 foptsize = 0;
114 for (optnum = 0; optnum < f->opt_num; ++optnum)
115 foptsize += f->opt[optnum].length;
116
117 if (foptsize > MAX_IPOPTLEN ||
118 optsize > MAX_IPOPTLEN ||
119 optsize != foptsize)
120 continue;
121
122 check_WSS = f->wss.wc;
123
124 for (optnum = 0; optnum < f->opt_num; ++optnum) {
125 if (f->opt[optnum].kind == (*optp)) {
126 __u32 len = f->opt[optnum].length;
127 const __u8 *optend = optp + len;
128
129 fmatch = FMATCH_OK;
130
131 switch (*optp) {
132 case OSFOPT_MSS:
133 mss = optp[3];
134 mss <<= 8;
135 mss |= optp[2];
136
137 mss = ntohs((__force __be16)mss);
138 break;
139 case OSFOPT_TS:
140 break;
141 }
142
143 optp = optend;
144 } else
145 fmatch = FMATCH_OPT_WRONG;
146
147 if (fmatch != FMATCH_OK)
148 break;
149 }
150
151 if (fmatch != FMATCH_OPT_WRONG) {
152 fmatch = FMATCH_WRONG;
153
154 switch (check_WSS) {
155 case OSF_WSS_PLAIN:
156 if (f->wss.val == 0 || window == f->wss.val)
157 fmatch = FMATCH_OK;
158 break;
159 case OSF_WSS_MSS:
160 /*
161 * Some smart modems decrease mangle MSS to
162 * SMART_MSS_2, so we check standard, decreased
163 * and the one provided in the fingerprint MSS
164 * values.
165 */
166#define SMART_MSS_1 1460
167#define SMART_MSS_2 1448
168 if (window == f->wss.val * mss ||
169 window == f->wss.val * SMART_MSS_1 ||
170 window == f->wss.val * SMART_MSS_2)
171 fmatch = FMATCH_OK;
172 break;
173 case OSF_WSS_MTU:
174 if (window == f->wss.val * (mss + 40) ||
175 window == f->wss.val * (SMART_MSS_1 + 40) ||
176 window == f->wss.val * (SMART_MSS_2 + 40))
177 fmatch = FMATCH_OK;
178 break;
179 case OSF_WSS_MODULO:
180 if ((window % f->wss.val) == 0)
181 fmatch = FMATCH_OK;
182 break;
183 }
184 }
185
186 if (fmatch != FMATCH_OK)
187 continue;
188
189 fcount++;
190
191 if (info->flags & NF_OSF_LOG)
192 nf_log_packet(net, family, hooknum, skb,
193 in, out, NULL,
194 "%s [%s:%s] : %pI4:%d -> %pI4:%d hops=%d\n",
195 f->genre, f->version, f->subtype,
196 &ip->saddr, ntohs(tcp->source),
197 &ip->daddr, ntohs(tcp->dest),
198 f->ttl - ip->ttl);
199
200 if ((info->flags & NF_OSF_LOG) &&
201 info->loglevel == NF_OSF_LOGLEVEL_FIRST)
202 break;
203 }
204
205 if (!fcount && (info->flags & NF_OSF_LOG))
206 nf_log_packet(net, family, hooknum, skb, in, out, NULL,
207 "Remote OS is not known: %pI4:%u -> %pI4:%u\n",
208 &ip->saddr, ntohs(tcp->source),
209 &ip->daddr, ntohs(tcp->dest));
210
211 if (fcount)
212 fmatch = FMATCH_OK;
213
214 return fmatch == FMATCH_OK;
215}
216EXPORT_SYMBOL_GPL(nf_osf_match);
217
218MODULE_LICENSE("GPL");
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 04d4e3772584..18bd584fadda 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -386,13 +386,17 @@ static struct nft_table *nft_table_lookup(const struct net *net,
386{ 386{
387 struct nft_table *table; 387 struct nft_table *table;
388 388
389 if (nla == NULL)
390 return ERR_PTR(-EINVAL);
391
389 list_for_each_entry(table, &net->nft.tables, list) { 392 list_for_each_entry(table, &net->nft.tables, list) {
390 if (!nla_strcmp(nla, table->name) && 393 if (!nla_strcmp(nla, table->name) &&
391 table->family == family && 394 table->family == family &&
392 nft_active_genmask(table, genmask)) 395 nft_active_genmask(table, genmask))
393 return table; 396 return table;
394 } 397 }
395 return NULL; 398
399 return ERR_PTR(-ENOENT);
396} 400}
397 401
398static struct nft_table *nft_table_lookup_byhandle(const struct net *net, 402static struct nft_table *nft_table_lookup_byhandle(const struct net *net,
@@ -406,37 +410,6 @@ static struct nft_table *nft_table_lookup_byhandle(const struct net *net,
406 nft_active_genmask(table, genmask)) 410 nft_active_genmask(table, genmask))
407 return table; 411 return table;
408 } 412 }
409 return NULL;
410}
411
412static struct nft_table *nf_tables_table_lookup(const struct net *net,
413 const struct nlattr *nla,
414 u8 family, u8 genmask)
415{
416 struct nft_table *table;
417
418 if (nla == NULL)
419 return ERR_PTR(-EINVAL);
420
421 table = nft_table_lookup(net, nla, family, genmask);
422 if (table != NULL)
423 return table;
424
425 return ERR_PTR(-ENOENT);
426}
427
428static struct nft_table *nf_tables_table_lookup_byhandle(const struct net *net,
429 const struct nlattr *nla,
430 u8 genmask)
431{
432 struct nft_table *table;
433
434 if (nla == NULL)
435 return ERR_PTR(-EINVAL);
436
437 table = nft_table_lookup_byhandle(net, nla, genmask);
438 if (table != NULL)
439 return table;
440 413
441 return ERR_PTR(-ENOENT); 414 return ERR_PTR(-ENOENT);
442} 415}
@@ -608,10 +581,11 @@ static int nf_tables_gettable(struct net *net, struct sock *nlsk,
608 return netlink_dump_start(nlsk, skb, nlh, &c); 581 return netlink_dump_start(nlsk, skb, nlh, &c);
609 } 582 }
610 583
611 table = nf_tables_table_lookup(net, nla[NFTA_TABLE_NAME], family, 584 table = nft_table_lookup(net, nla[NFTA_TABLE_NAME], family, genmask);
612 genmask); 585 if (IS_ERR(table)) {
613 if (IS_ERR(table)) 586 NL_SET_BAD_ATTR(extack, nla[NFTA_TABLE_NAME]);
614 return PTR_ERR(table); 587 return PTR_ERR(table);
588 }
615 589
616 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 590 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
617 if (!skb2) 591 if (!skb2)
@@ -727,21 +701,23 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk,
727{ 701{
728 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 702 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
729 u8 genmask = nft_genmask_next(net); 703 u8 genmask = nft_genmask_next(net);
730 const struct nlattr *name;
731 struct nft_table *table;
732 int family = nfmsg->nfgen_family; 704 int family = nfmsg->nfgen_family;
705 const struct nlattr *attr;
706 struct nft_table *table;
733 u32 flags = 0; 707 u32 flags = 0;
734 struct nft_ctx ctx; 708 struct nft_ctx ctx;
735 int err; 709 int err;
736 710
737 name = nla[NFTA_TABLE_NAME]; 711 attr = nla[NFTA_TABLE_NAME];
738 table = nf_tables_table_lookup(net, name, family, genmask); 712 table = nft_table_lookup(net, attr, family, genmask);
739 if (IS_ERR(table)) { 713 if (IS_ERR(table)) {
740 if (PTR_ERR(table) != -ENOENT) 714 if (PTR_ERR(table) != -ENOENT)
741 return PTR_ERR(table); 715 return PTR_ERR(table);
742 } else { 716 } else {
743 if (nlh->nlmsg_flags & NLM_F_EXCL) 717 if (nlh->nlmsg_flags & NLM_F_EXCL) {
718 NL_SET_BAD_ATTR(extack, attr);
744 return -EEXIST; 719 return -EEXIST;
720 }
745 if (nlh->nlmsg_flags & NLM_F_REPLACE) 721 if (nlh->nlmsg_flags & NLM_F_REPLACE)
746 return -EOPNOTSUPP; 722 return -EOPNOTSUPP;
747 723
@@ -760,7 +736,7 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk,
760 if (table == NULL) 736 if (table == NULL)
761 goto err_kzalloc; 737 goto err_kzalloc;
762 738
763 table->name = nla_strdup(name, GFP_KERNEL); 739 table->name = nla_strdup(attr, GFP_KERNEL);
764 if (table->name == NULL) 740 if (table->name == NULL)
765 goto err_strdup; 741 goto err_strdup;
766 742
@@ -883,8 +859,9 @@ static int nf_tables_deltable(struct net *net, struct sock *nlsk,
883{ 859{
884 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 860 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
885 u8 genmask = nft_genmask_next(net); 861 u8 genmask = nft_genmask_next(net);
886 struct nft_table *table;
887 int family = nfmsg->nfgen_family; 862 int family = nfmsg->nfgen_family;
863 const struct nlattr *attr;
864 struct nft_table *table;
888 struct nft_ctx ctx; 865 struct nft_ctx ctx;
889 866
890 nft_ctx_init(&ctx, net, skb, nlh, 0, NULL, NULL, nla); 867 nft_ctx_init(&ctx, net, skb, nlh, 0, NULL, NULL, nla);
@@ -892,16 +869,18 @@ static int nf_tables_deltable(struct net *net, struct sock *nlsk,
892 (!nla[NFTA_TABLE_NAME] && !nla[NFTA_TABLE_HANDLE])) 869 (!nla[NFTA_TABLE_NAME] && !nla[NFTA_TABLE_HANDLE]))
893 return nft_flush(&ctx, family); 870 return nft_flush(&ctx, family);
894 871
895 if (nla[NFTA_TABLE_HANDLE]) 872 if (nla[NFTA_TABLE_HANDLE]) {
896 table = nf_tables_table_lookup_byhandle(net, 873 attr = nla[NFTA_TABLE_HANDLE];
897 nla[NFTA_TABLE_HANDLE], 874 table = nft_table_lookup_byhandle(net, attr, genmask);
898 genmask); 875 } else {
899 else 876 attr = nla[NFTA_TABLE_NAME];
900 table = nf_tables_table_lookup(net, nla[NFTA_TABLE_NAME], 877 table = nft_table_lookup(net, attr, family, genmask);
901 family, genmask); 878 }
902 879
903 if (IS_ERR(table)) 880 if (IS_ERR(table)) {
881 NL_SET_BAD_ATTR(extack, attr);
904 return PTR_ERR(table); 882 return PTR_ERR(table);
883 }
905 884
906 if (nlh->nlmsg_flags & NLM_F_NONREC && 885 if (nlh->nlmsg_flags & NLM_F_NONREC &&
907 table->use > 0) 886 table->use > 0)
@@ -949,8 +928,7 @@ EXPORT_SYMBOL_GPL(nft_unregister_chain_type);
949 */ 928 */
950 929
951static struct nft_chain * 930static struct nft_chain *
952nf_tables_chain_lookup_byhandle(const struct nft_table *table, u64 handle, 931nft_chain_lookup_byhandle(const struct nft_table *table, u64 handle, u8 genmask)
953 u8 genmask)
954{ 932{
955 struct nft_chain *chain; 933 struct nft_chain *chain;
956 934
@@ -963,9 +941,8 @@ nf_tables_chain_lookup_byhandle(const struct nft_table *table, u64 handle,
963 return ERR_PTR(-ENOENT); 941 return ERR_PTR(-ENOENT);
964} 942}
965 943
966static struct nft_chain *nf_tables_chain_lookup(const struct nft_table *table, 944static struct nft_chain *nft_chain_lookup(const struct nft_table *table,
967 const struct nlattr *nla, 945 const struct nlattr *nla, u8 genmask)
968 u8 genmask)
969{ 946{
970 struct nft_chain *chain; 947 struct nft_chain *chain;
971 948
@@ -1194,14 +1171,17 @@ static int nf_tables_getchain(struct net *net, struct sock *nlsk,
1194 return netlink_dump_start(nlsk, skb, nlh, &c); 1171 return netlink_dump_start(nlsk, skb, nlh, &c);
1195 } 1172 }
1196 1173
1197 table = nf_tables_table_lookup(net, nla[NFTA_CHAIN_TABLE], family, 1174 table = nft_table_lookup(net, nla[NFTA_CHAIN_TABLE], family, genmask);
1198 genmask); 1175 if (IS_ERR(table)) {
1199 if (IS_ERR(table)) 1176 NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_TABLE]);
1200 return PTR_ERR(table); 1177 return PTR_ERR(table);
1178 }
1201 1179
1202 chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME], genmask); 1180 chain = nft_chain_lookup(table, nla[NFTA_CHAIN_NAME], genmask);
1203 if (IS_ERR(chain)) 1181 if (IS_ERR(chain)) {
1182 NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_NAME]);
1204 return PTR_ERR(chain); 1183 return PTR_ERR(chain);
1184 }
1205 1185
1206 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1186 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1207 if (!skb2) 1187 if (!skb2)
@@ -1513,8 +1493,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
1513 nla[NFTA_CHAIN_NAME]) { 1493 nla[NFTA_CHAIN_NAME]) {
1514 struct nft_chain *chain2; 1494 struct nft_chain *chain2;
1515 1495
1516 chain2 = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME], 1496 chain2 = nft_chain_lookup(table, nla[NFTA_CHAIN_NAME], genmask);
1517 genmask);
1518 if (!IS_ERR(chain2)) 1497 if (!IS_ERR(chain2))
1519 return -EEXIST; 1498 return -EEXIST;
1520 } 1499 }
@@ -1564,9 +1543,9 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
1564 struct netlink_ext_ack *extack) 1543 struct netlink_ext_ack *extack)
1565{ 1544{
1566 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1545 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1567 const struct nlattr * uninitialized_var(name);
1568 u8 genmask = nft_genmask_next(net); 1546 u8 genmask = nft_genmask_next(net);
1569 int family = nfmsg->nfgen_family; 1547 int family = nfmsg->nfgen_family;
1548 const struct nlattr *attr;
1570 struct nft_table *table; 1549 struct nft_table *table;
1571 struct nft_chain *chain; 1550 struct nft_chain *chain;
1572 u8 policy = NF_ACCEPT; 1551 u8 policy = NF_ACCEPT;
@@ -1576,36 +1555,46 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
1576 1555
1577 create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false; 1556 create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
1578 1557
1579 table = nf_tables_table_lookup(net, nla[NFTA_CHAIN_TABLE], family, 1558 table = nft_table_lookup(net, nla[NFTA_CHAIN_TABLE], family, genmask);
1580 genmask); 1559 if (IS_ERR(table)) {
1581 if (IS_ERR(table)) 1560 NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_TABLE]);
1582 return PTR_ERR(table); 1561 return PTR_ERR(table);
1562 }
1583 1563
1584 chain = NULL; 1564 chain = NULL;
1585 name = nla[NFTA_CHAIN_NAME]; 1565 attr = nla[NFTA_CHAIN_NAME];
1586 1566
1587 if (nla[NFTA_CHAIN_HANDLE]) { 1567 if (nla[NFTA_CHAIN_HANDLE]) {
1588 handle = be64_to_cpu(nla_get_be64(nla[NFTA_CHAIN_HANDLE])); 1568 handle = be64_to_cpu(nla_get_be64(nla[NFTA_CHAIN_HANDLE]));
1589 chain = nf_tables_chain_lookup_byhandle(table, handle, genmask); 1569 chain = nft_chain_lookup_byhandle(table, handle, genmask);
1590 if (IS_ERR(chain)) 1570 if (IS_ERR(chain)) {
1571 NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_HANDLE]);
1591 return PTR_ERR(chain); 1572 return PTR_ERR(chain);
1573 }
1574 attr = nla[NFTA_CHAIN_HANDLE];
1592 } else { 1575 } else {
1593 chain = nf_tables_chain_lookup(table, name, genmask); 1576 chain = nft_chain_lookup(table, attr, genmask);
1594 if (IS_ERR(chain)) { 1577 if (IS_ERR(chain)) {
1595 if (PTR_ERR(chain) != -ENOENT) 1578 if (PTR_ERR(chain) != -ENOENT) {
1579 NL_SET_BAD_ATTR(extack, attr);
1596 return PTR_ERR(chain); 1580 return PTR_ERR(chain);
1581 }
1597 chain = NULL; 1582 chain = NULL;
1598 } 1583 }
1599 } 1584 }
1600 1585
1601 if (nla[NFTA_CHAIN_POLICY]) { 1586 if (nla[NFTA_CHAIN_POLICY]) {
1602 if (chain != NULL && 1587 if (chain != NULL &&
1603 !nft_is_base_chain(chain)) 1588 !nft_is_base_chain(chain)) {
1589 NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_POLICY]);
1604 return -EOPNOTSUPP; 1590 return -EOPNOTSUPP;
1591 }
1605 1592
1606 if (chain == NULL && 1593 if (chain == NULL &&
1607 nla[NFTA_CHAIN_HOOK] == NULL) 1594 nla[NFTA_CHAIN_HOOK] == NULL) {
1595 NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_POLICY]);
1608 return -EOPNOTSUPP; 1596 return -EOPNOTSUPP;
1597 }
1609 1598
1610 policy = ntohl(nla_get_be32(nla[NFTA_CHAIN_POLICY])); 1599 policy = ntohl(nla_get_be32(nla[NFTA_CHAIN_POLICY]));
1611 switch (policy) { 1600 switch (policy) {
@@ -1620,8 +1609,10 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
1620 nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla); 1609 nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla);
1621 1610
1622 if (chain != NULL) { 1611 if (chain != NULL) {
1623 if (nlh->nlmsg_flags & NLM_F_EXCL) 1612 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1613 NL_SET_BAD_ATTR(extack, attr);
1624 return -EEXIST; 1614 return -EEXIST;
1615 }
1625 if (nlh->nlmsg_flags & NLM_F_REPLACE) 1616 if (nlh->nlmsg_flags & NLM_F_REPLACE)
1626 return -EOPNOTSUPP; 1617 return -EOPNOTSUPP;
1627 1618
@@ -1638,28 +1629,34 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk,
1638{ 1629{
1639 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1630 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1640 u8 genmask = nft_genmask_next(net); 1631 u8 genmask = nft_genmask_next(net);
1632 int family = nfmsg->nfgen_family;
1633 const struct nlattr *attr;
1641 struct nft_table *table; 1634 struct nft_table *table;
1642 struct nft_chain *chain; 1635 struct nft_chain *chain;
1643 struct nft_rule *rule; 1636 struct nft_rule *rule;
1644 int family = nfmsg->nfgen_family;
1645 struct nft_ctx ctx; 1637 struct nft_ctx ctx;
1646 u64 handle; 1638 u64 handle;
1647 u32 use; 1639 u32 use;
1648 int err; 1640 int err;
1649 1641
1650 table = nf_tables_table_lookup(net, nla[NFTA_CHAIN_TABLE], family, 1642 table = nft_table_lookup(net, nla[NFTA_CHAIN_TABLE], family, genmask);
1651 genmask); 1643 if (IS_ERR(table)) {
1652 if (IS_ERR(table)) 1644 NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_TABLE]);
1653 return PTR_ERR(table); 1645 return PTR_ERR(table);
1646 }
1654 1647
1655 if (nla[NFTA_CHAIN_HANDLE]) { 1648 if (nla[NFTA_CHAIN_HANDLE]) {
1656 handle = be64_to_cpu(nla_get_be64(nla[NFTA_CHAIN_HANDLE])); 1649 attr = nla[NFTA_CHAIN_HANDLE];
1657 chain = nf_tables_chain_lookup_byhandle(table, handle, genmask); 1650 handle = be64_to_cpu(nla_get_be64(attr));
1651 chain = nft_chain_lookup_byhandle(table, handle, genmask);
1658 } else { 1652 } else {
1659 chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME], genmask); 1653 attr = nla[NFTA_CHAIN_NAME];
1654 chain = nft_chain_lookup(table, attr, genmask);
1660 } 1655 }
1661 if (IS_ERR(chain)) 1656 if (IS_ERR(chain)) {
1657 NL_SET_BAD_ATTR(extack, attr);
1662 return PTR_ERR(chain); 1658 return PTR_ERR(chain);
1659 }
1663 1660
1664 if (nlh->nlmsg_flags & NLM_F_NONREC && 1661 if (nlh->nlmsg_flags & NLM_F_NONREC &&
1665 chain->use > 0) 1662 chain->use > 0)
@@ -1681,8 +1678,10 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk,
1681 /* There are rules and elements that are still holding references to us, 1678 /* There are rules and elements that are still holding references to us,
1682 * we cannot do a recursive removal in this case. 1679 * we cannot do a recursive removal in this case.
1683 */ 1680 */
1684 if (use > 0) 1681 if (use > 0) {
1682 NL_SET_BAD_ATTR(extack, attr);
1685 return -EBUSY; 1683 return -EBUSY;
1684 }
1686 1685
1687 return nft_delchain(&ctx); 1686 return nft_delchain(&ctx);
1688} 1687}
@@ -1939,8 +1938,8 @@ void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr)
1939 * Rules 1938 * Rules
1940 */ 1939 */
1941 1940
1942static struct nft_rule *__nf_tables_rule_lookup(const struct nft_chain *chain, 1941static struct nft_rule *__nft_rule_lookup(const struct nft_chain *chain,
1943 u64 handle) 1942 u64 handle)
1944{ 1943{
1945 struct nft_rule *rule; 1944 struct nft_rule *rule;
1946 1945
@@ -1953,13 +1952,13 @@ static struct nft_rule *__nf_tables_rule_lookup(const struct nft_chain *chain,
1953 return ERR_PTR(-ENOENT); 1952 return ERR_PTR(-ENOENT);
1954} 1953}
1955 1954
1956static struct nft_rule *nf_tables_rule_lookup(const struct nft_chain *chain, 1955static struct nft_rule *nft_rule_lookup(const struct nft_chain *chain,
1957 const struct nlattr *nla) 1956 const struct nlattr *nla)
1958{ 1957{
1959 if (nla == NULL) 1958 if (nla == NULL)
1960 return ERR_PTR(-EINVAL); 1959 return ERR_PTR(-EINVAL);
1961 1960
1962 return __nf_tables_rule_lookup(chain, be64_to_cpu(nla_get_be64(nla))); 1961 return __nft_rule_lookup(chain, be64_to_cpu(nla_get_be64(nla)));
1963} 1962}
1964 1963
1965static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = { 1964static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
@@ -2191,18 +2190,23 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk,
2191 return netlink_dump_start(nlsk, skb, nlh, &c); 2190 return netlink_dump_start(nlsk, skb, nlh, &c);
2192 } 2191 }
2193 2192
2194 table = nf_tables_table_lookup(net, nla[NFTA_RULE_TABLE], family, 2193 table = nft_table_lookup(net, nla[NFTA_RULE_TABLE], family, genmask);
2195 genmask); 2194 if (IS_ERR(table)) {
2196 if (IS_ERR(table)) 2195 NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_TABLE]);
2197 return PTR_ERR(table); 2196 return PTR_ERR(table);
2197 }
2198 2198
2199 chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN], genmask); 2199 chain = nft_chain_lookup(table, nla[NFTA_RULE_CHAIN], genmask);
2200 if (IS_ERR(chain)) 2200 if (IS_ERR(chain)) {
2201 NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
2201 return PTR_ERR(chain); 2202 return PTR_ERR(chain);
2203 }
2202 2204
2203 rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]); 2205 rule = nft_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
2204 if (IS_ERR(rule)) 2206 if (IS_ERR(rule)) {
2207 NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_HANDLE]);
2205 return PTR_ERR(rule); 2208 return PTR_ERR(rule);
2209 }
2206 2210
2207 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2211 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2208 if (!skb2) 2212 if (!skb2)
@@ -2265,23 +2269,30 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
2265 2269
2266 create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false; 2270 create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
2267 2271
2268 table = nf_tables_table_lookup(net, nla[NFTA_RULE_TABLE], family, 2272 table = nft_table_lookup(net, nla[NFTA_RULE_TABLE], family, genmask);
2269 genmask); 2273 if (IS_ERR(table)) {
2270 if (IS_ERR(table)) 2274 NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_TABLE]);
2271 return PTR_ERR(table); 2275 return PTR_ERR(table);
2276 }
2272 2277
2273 chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN], genmask); 2278 chain = nft_chain_lookup(table, nla[NFTA_RULE_CHAIN], genmask);
2274 if (IS_ERR(chain)) 2279 if (IS_ERR(chain)) {
2280 NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
2275 return PTR_ERR(chain); 2281 return PTR_ERR(chain);
2282 }
2276 2283
2277 if (nla[NFTA_RULE_HANDLE]) { 2284 if (nla[NFTA_RULE_HANDLE]) {
2278 handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE])); 2285 handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE]));
2279 rule = __nf_tables_rule_lookup(chain, handle); 2286 rule = __nft_rule_lookup(chain, handle);
2280 if (IS_ERR(rule)) 2287 if (IS_ERR(rule)) {
2288 NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_HANDLE]);
2281 return PTR_ERR(rule); 2289 return PTR_ERR(rule);
2290 }
2282 2291
2283 if (nlh->nlmsg_flags & NLM_F_EXCL) 2292 if (nlh->nlmsg_flags & NLM_F_EXCL) {
2293 NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_HANDLE]);
2284 return -EEXIST; 2294 return -EEXIST;
2295 }
2285 if (nlh->nlmsg_flags & NLM_F_REPLACE) 2296 if (nlh->nlmsg_flags & NLM_F_REPLACE)
2286 old_rule = rule; 2297 old_rule = rule;
2287 else 2298 else
@@ -2300,9 +2311,11 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
2300 return -EOPNOTSUPP; 2311 return -EOPNOTSUPP;
2301 2312
2302 pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION])); 2313 pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION]));
2303 old_rule = __nf_tables_rule_lookup(chain, pos_handle); 2314 old_rule = __nft_rule_lookup(chain, pos_handle);
2304 if (IS_ERR(old_rule)) 2315 if (IS_ERR(old_rule)) {
2316 NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION]);
2305 return PTR_ERR(old_rule); 2317 return PTR_ERR(old_rule);
2318 }
2306 } 2319 }
2307 2320
2308 nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla); 2321 nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla);
@@ -2440,32 +2453,37 @@ static int nf_tables_delrule(struct net *net, struct sock *nlsk,
2440 int family = nfmsg->nfgen_family, err = 0; 2453 int family = nfmsg->nfgen_family, err = 0;
2441 struct nft_ctx ctx; 2454 struct nft_ctx ctx;
2442 2455
2443 table = nf_tables_table_lookup(net, nla[NFTA_RULE_TABLE], family, 2456 table = nft_table_lookup(net, nla[NFTA_RULE_TABLE], family, genmask);
2444 genmask); 2457 if (IS_ERR(table)) {
2445 if (IS_ERR(table)) 2458 NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_TABLE]);
2446 return PTR_ERR(table); 2459 return PTR_ERR(table);
2460 }
2447 2461
2448 if (nla[NFTA_RULE_CHAIN]) { 2462 if (nla[NFTA_RULE_CHAIN]) {
2449 chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN], 2463 chain = nft_chain_lookup(table, nla[NFTA_RULE_CHAIN], genmask);
2450 genmask); 2464 if (IS_ERR(chain)) {
2451 if (IS_ERR(chain)) 2465 NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
2452 return PTR_ERR(chain); 2466 return PTR_ERR(chain);
2467 }
2453 } 2468 }
2454 2469
2455 nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla); 2470 nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla);
2456 2471
2457 if (chain) { 2472 if (chain) {
2458 if (nla[NFTA_RULE_HANDLE]) { 2473 if (nla[NFTA_RULE_HANDLE]) {
2459 rule = nf_tables_rule_lookup(chain, 2474 rule = nft_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
2460 nla[NFTA_RULE_HANDLE]); 2475 if (IS_ERR(rule)) {
2461 if (IS_ERR(rule)) 2476 NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_HANDLE]);
2462 return PTR_ERR(rule); 2477 return PTR_ERR(rule);
2478 }
2463 2479
2464 err = nft_delrule(&ctx, rule); 2480 err = nft_delrule(&ctx, rule);
2465 } else if (nla[NFTA_RULE_ID]) { 2481 } else if (nla[NFTA_RULE_ID]) {
2466 rule = nft_rule_lookup_byid(net, nla[NFTA_RULE_ID]); 2482 rule = nft_rule_lookup_byid(net, nla[NFTA_RULE_ID]);
2467 if (IS_ERR(rule)) 2483 if (IS_ERR(rule)) {
2484 NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_ID]);
2468 return PTR_ERR(rule); 2485 return PTR_ERR(rule);
2486 }
2469 2487
2470 err = nft_delrule(&ctx, rule); 2488 err = nft_delrule(&ctx, rule);
2471 } else { 2489 } else {
@@ -2510,14 +2528,12 @@ void nft_unregister_set(struct nft_set_type *type)
2510EXPORT_SYMBOL_GPL(nft_unregister_set); 2528EXPORT_SYMBOL_GPL(nft_unregister_set);
2511 2529
2512#define NFT_SET_FEATURES (NFT_SET_INTERVAL | NFT_SET_MAP | \ 2530#define NFT_SET_FEATURES (NFT_SET_INTERVAL | NFT_SET_MAP | \
2513 NFT_SET_TIMEOUT | NFT_SET_OBJECT) 2531 NFT_SET_TIMEOUT | NFT_SET_OBJECT | \
2532 NFT_SET_EVAL)
2514 2533
2515static bool nft_set_ops_candidate(const struct nft_set_ops *ops, u32 flags) 2534static bool nft_set_ops_candidate(const struct nft_set_type *type, u32 flags)
2516{ 2535{
2517 if ((flags & NFT_SET_EVAL) && !ops->update) 2536 return (flags & type->features) == (flags & NFT_SET_FEATURES);
2518 return false;
2519
2520 return (flags & ops->features) == (flags & NFT_SET_FEATURES);
2521} 2537}
2522 2538
2523/* 2539/*
@@ -2554,14 +2570,9 @@ nft_select_set_ops(const struct nft_ctx *ctx,
2554 best.space = ~0; 2570 best.space = ~0;
2555 2571
2556 list_for_each_entry(type, &nf_tables_set_types, list) { 2572 list_for_each_entry(type, &nf_tables_set_types, list) {
2557 if (!type->select_ops) 2573 ops = &type->ops;
2558 ops = type->ops;
2559 else
2560 ops = type->select_ops(ctx, desc, flags);
2561 if (!ops)
2562 continue;
2563 2574
2564 if (!nft_set_ops_candidate(ops, flags)) 2575 if (!nft_set_ops_candidate(type, flags))
2565 continue; 2576 continue;
2566 if (!ops->estimate(desc, flags, &est)) 2577 if (!ops->estimate(desc, flags, &est))
2567 continue; 2578 continue;
@@ -2592,7 +2603,7 @@ nft_select_set_ops(const struct nft_ctx *ctx,
2592 if (!try_module_get(type->owner)) 2603 if (!try_module_get(type->owner))
2593 continue; 2604 continue;
2594 if (bops != NULL) 2605 if (bops != NULL)
2595 module_put(bops->type->owner); 2606 module_put(to_set_type(bops)->owner);
2596 2607
2597 bops = ops; 2608 bops = ops;
2598 best = est; 2609 best = est;
@@ -2633,6 +2644,7 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, struct net *net,
2633 const struct sk_buff *skb, 2644 const struct sk_buff *skb,
2634 const struct nlmsghdr *nlh, 2645 const struct nlmsghdr *nlh,
2635 const struct nlattr * const nla[], 2646 const struct nlattr * const nla[],
2647 struct netlink_ext_ack *extack,
2636 u8 genmask) 2648 u8 genmask)
2637{ 2649{
2638 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2650 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
@@ -2640,18 +2652,20 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, struct net *net,
2640 struct nft_table *table = NULL; 2652 struct nft_table *table = NULL;
2641 2653
2642 if (nla[NFTA_SET_TABLE] != NULL) { 2654 if (nla[NFTA_SET_TABLE] != NULL) {
2643 table = nf_tables_table_lookup(net, nla[NFTA_SET_TABLE], 2655 table = nft_table_lookup(net, nla[NFTA_SET_TABLE], family,
2644 family, genmask); 2656 genmask);
2645 if (IS_ERR(table)) 2657 if (IS_ERR(table)) {
2658 NL_SET_BAD_ATTR(extack, nla[NFTA_SET_TABLE]);
2646 return PTR_ERR(table); 2659 return PTR_ERR(table);
2660 }
2647 } 2661 }
2648 2662
2649 nft_ctx_init(ctx, net, skb, nlh, family, table, NULL, nla); 2663 nft_ctx_init(ctx, net, skb, nlh, family, table, NULL, nla);
2650 return 0; 2664 return 0;
2651} 2665}
2652 2666
2653static struct nft_set *nf_tables_set_lookup(const struct nft_table *table, 2667static struct nft_set *nft_set_lookup(const struct nft_table *table,
2654 const struct nlattr *nla, u8 genmask) 2668 const struct nlattr *nla, u8 genmask)
2655{ 2669{
2656 struct nft_set *set; 2670 struct nft_set *set;
2657 2671
@@ -2666,14 +2680,12 @@ static struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
2666 return ERR_PTR(-ENOENT); 2680 return ERR_PTR(-ENOENT);
2667} 2681}
2668 2682
2669static struct nft_set *nf_tables_set_lookup_byhandle(const struct nft_table *table, 2683static struct nft_set *nft_set_lookup_byhandle(const struct nft_table *table,
2670 const struct nlattr *nla, u8 genmask) 2684 const struct nlattr *nla,
2685 u8 genmask)
2671{ 2686{
2672 struct nft_set *set; 2687 struct nft_set *set;
2673 2688
2674 if (nla == NULL)
2675 return ERR_PTR(-EINVAL);
2676
2677 list_for_each_entry(set, &table->sets, list) { 2689 list_for_each_entry(set, &table->sets, list) {
2678 if (be64_to_cpu(nla_get_be64(nla)) == set->handle && 2690 if (be64_to_cpu(nla_get_be64(nla)) == set->handle &&
2679 nft_active_genmask(set, genmask)) 2691 nft_active_genmask(set, genmask))
@@ -2682,9 +2694,8 @@ static struct nft_set *nf_tables_set_lookup_byhandle(const struct nft_table *tab
2682 return ERR_PTR(-ENOENT); 2694 return ERR_PTR(-ENOENT);
2683} 2695}
2684 2696
2685static struct nft_set *nf_tables_set_lookup_byid(const struct net *net, 2697static struct nft_set *nft_set_lookup_byid(const struct net *net,
2686 const struct nlattr *nla, 2698 const struct nlattr *nla, u8 genmask)
2687 u8 genmask)
2688{ 2699{
2689 struct nft_trans *trans; 2700 struct nft_trans *trans;
2690 u32 id = ntohl(nla_get_be32(nla)); 2701 u32 id = ntohl(nla_get_be32(nla));
@@ -2708,12 +2719,12 @@ struct nft_set *nft_set_lookup_global(const struct net *net,
2708{ 2719{
2709 struct nft_set *set; 2720 struct nft_set *set;
2710 2721
2711 set = nf_tables_set_lookup(table, nla_set_name, genmask); 2722 set = nft_set_lookup(table, nla_set_name, genmask);
2712 if (IS_ERR(set)) { 2723 if (IS_ERR(set)) {
2713 if (!nla_set_id) 2724 if (!nla_set_id)
2714 return set; 2725 return set;
2715 2726
2716 set = nf_tables_set_lookup_byid(net, nla_set_id, genmask); 2727 set = nft_set_lookup_byid(net, nla_set_id, genmask);
2717 } 2728 }
2718 return set; 2729 return set;
2719} 2730}
@@ -2773,6 +2784,27 @@ cont:
2773 return 0; 2784 return 0;
2774} 2785}
2775 2786
2787static int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result)
2788{
2789 u64 ms = be64_to_cpu(nla_get_be64(nla));
2790 u64 max = (u64)(~((u64)0));
2791
2792 max = div_u64(max, NSEC_PER_MSEC);
2793 if (ms >= max)
2794 return -ERANGE;
2795
2796 ms *= NSEC_PER_MSEC;
2797 *result = nsecs_to_jiffies64(ms);
2798 return 0;
2799}
2800
2801static u64 nf_jiffies64_to_msecs(u64 input)
2802{
2803 u64 ms = jiffies64_to_nsecs(input);
2804
2805 return cpu_to_be64(div_u64(ms, NSEC_PER_MSEC));
2806}
2807
2776static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx, 2808static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
2777 const struct nft_set *set, u16 event, u16 flags) 2809 const struct nft_set *set, u16 event, u16 flags)
2778{ 2810{
@@ -2820,7 +2852,7 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
2820 2852
2821 if (set->timeout && 2853 if (set->timeout &&
2822 nla_put_be64(skb, NFTA_SET_TIMEOUT, 2854 nla_put_be64(skb, NFTA_SET_TIMEOUT,
2823 cpu_to_be64(jiffies_to_msecs(set->timeout)), 2855 nf_jiffies64_to_msecs(set->timeout),
2824 NFTA_SET_PAD)) 2856 NFTA_SET_PAD))
2825 goto nla_put_failure; 2857 goto nla_put_failure;
2826 if (set->gc_int && 2858 if (set->gc_int &&
@@ -2958,7 +2990,8 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk,
2958 int err; 2990 int err;
2959 2991
2960 /* Verify existence before starting dump */ 2992 /* Verify existence before starting dump */
2961 err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla, genmask); 2993 err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla, extack,
2994 genmask);
2962 if (err < 0) 2995 if (err < 0)
2963 return err; 2996 return err;
2964 2997
@@ -2985,7 +3018,7 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk,
2985 if (!nla[NFTA_SET_TABLE]) 3018 if (!nla[NFTA_SET_TABLE])
2986 return -EINVAL; 3019 return -EINVAL;
2987 3020
2988 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME], genmask); 3021 set = nft_set_lookup(ctx.table, nla[NFTA_SET_NAME], genmask);
2989 if (IS_ERR(set)) 3022 if (IS_ERR(set))
2990 return PTR_ERR(set); 3023 return PTR_ERR(set);
2991 3024
@@ -3115,8 +3148,10 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
3115 if (nla[NFTA_SET_TIMEOUT] != NULL) { 3148 if (nla[NFTA_SET_TIMEOUT] != NULL) {
3116 if (!(flags & NFT_SET_TIMEOUT)) 3149 if (!(flags & NFT_SET_TIMEOUT))
3117 return -EINVAL; 3150 return -EINVAL;
3118 timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64( 3151
3119 nla[NFTA_SET_TIMEOUT]))); 3152 err = nf_msecs_to_jiffies64(nla[NFTA_SET_TIMEOUT], &timeout);
3153 if (err)
3154 return err;
3120 } 3155 }
3121 gc_int = 0; 3156 gc_int = 0;
3122 if (nla[NFTA_SET_GC_INTERVAL] != NULL) { 3157 if (nla[NFTA_SET_GC_INTERVAL] != NULL) {
@@ -3137,22 +3172,28 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
3137 3172
3138 create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false; 3173 create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
3139 3174
3140 table = nf_tables_table_lookup(net, nla[NFTA_SET_TABLE], family, 3175 table = nft_table_lookup(net, nla[NFTA_SET_TABLE], family, genmask);
3141 genmask); 3176 if (IS_ERR(table)) {
3142 if (IS_ERR(table)) 3177 NL_SET_BAD_ATTR(extack, nla[NFTA_SET_TABLE]);
3143 return PTR_ERR(table); 3178 return PTR_ERR(table);
3179 }
3144 3180
3145 nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla); 3181 nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
3146 3182
3147 set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME], genmask); 3183 set = nft_set_lookup(table, nla[NFTA_SET_NAME], genmask);
3148 if (IS_ERR(set)) { 3184 if (IS_ERR(set)) {
3149 if (PTR_ERR(set) != -ENOENT) 3185 if (PTR_ERR(set) != -ENOENT) {
3186 NL_SET_BAD_ATTR(extack, nla[NFTA_SET_NAME]);
3150 return PTR_ERR(set); 3187 return PTR_ERR(set);
3188 }
3151 } else { 3189 } else {
3152 if (nlh->nlmsg_flags & NLM_F_EXCL) 3190 if (nlh->nlmsg_flags & NLM_F_EXCL) {
3191 NL_SET_BAD_ATTR(extack, nla[NFTA_SET_NAME]);
3153 return -EEXIST; 3192 return -EEXIST;
3193 }
3154 if (nlh->nlmsg_flags & NLM_F_REPLACE) 3194 if (nlh->nlmsg_flags & NLM_F_REPLACE)
3155 return -EOPNOTSUPP; 3195 return -EOPNOTSUPP;
3196
3156 return 0; 3197 return 0;
3157 } 3198 }
3158 3199
@@ -3229,14 +3270,14 @@ err3:
3229err2: 3270err2:
3230 kvfree(set); 3271 kvfree(set);
3231err1: 3272err1:
3232 module_put(ops->type->owner); 3273 module_put(to_set_type(ops)->owner);
3233 return err; 3274 return err;
3234} 3275}
3235 3276
3236static void nft_set_destroy(struct nft_set *set) 3277static void nft_set_destroy(struct nft_set *set)
3237{ 3278{
3238 set->ops->destroy(set); 3279 set->ops->destroy(set);
3239 module_put(set->ops->type->owner); 3280 module_put(to_set_type(set->ops)->owner);
3240 kfree(set->name); 3281 kfree(set->name);
3241 kvfree(set); 3282 kvfree(set);
3242} 3283}
@@ -3255,6 +3296,7 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk,
3255{ 3296{
3256 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 3297 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
3257 u8 genmask = nft_genmask_next(net); 3298 u8 genmask = nft_genmask_next(net);
3299 const struct nlattr *attr;
3258 struct nft_set *set; 3300 struct nft_set *set;
3259 struct nft_ctx ctx; 3301 struct nft_ctx ctx;
3260 int err; 3302 int err;
@@ -3264,20 +3306,28 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk,
3264 if (nla[NFTA_SET_TABLE] == NULL) 3306 if (nla[NFTA_SET_TABLE] == NULL)
3265 return -EINVAL; 3307 return -EINVAL;
3266 3308
3267 err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla, genmask); 3309 err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla, extack,
3310 genmask);
3268 if (err < 0) 3311 if (err < 0)
3269 return err; 3312 return err;
3270 3313
3271 if (nla[NFTA_SET_HANDLE]) 3314 if (nla[NFTA_SET_HANDLE]) {
3272 set = nf_tables_set_lookup_byhandle(ctx.table, nla[NFTA_SET_HANDLE], genmask); 3315 attr = nla[NFTA_SET_HANDLE];
3273 else 3316 set = nft_set_lookup_byhandle(ctx.table, attr, genmask);
3274 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME], genmask); 3317 } else {
3275 if (IS_ERR(set)) 3318 attr = nla[NFTA_SET_NAME];
3276 return PTR_ERR(set); 3319 set = nft_set_lookup(ctx.table, attr, genmask);
3320 }
3277 3321
3322 if (IS_ERR(set)) {
3323 NL_SET_BAD_ATTR(extack, attr);
3324 return PTR_ERR(set);
3325 }
3278 if (!list_empty(&set->bindings) || 3326 if (!list_empty(&set->bindings) ||
3279 (nlh->nlmsg_flags & NLM_F_NONREC && atomic_read(&set->nelems) > 0)) 3327 (nlh->nlmsg_flags & NLM_F_NONREC && atomic_read(&set->nelems) > 0)) {
3328 NL_SET_BAD_ATTR(extack, attr);
3280 return -EBUSY; 3329 return -EBUSY;
3330 }
3281 3331
3282 return nft_delset(&ctx, set); 3332 return nft_delset(&ctx, set);
3283} 3333}
@@ -3367,8 +3417,8 @@ const struct nft_set_ext_type nft_set_ext_types[] = {
3367 .align = __alignof__(u64), 3417 .align = __alignof__(u64),
3368 }, 3418 },
3369 [NFT_SET_EXT_EXPIRATION] = { 3419 [NFT_SET_EXT_EXPIRATION] = {
3370 .len = sizeof(unsigned long), 3420 .len = sizeof(u64),
3371 .align = __alignof__(unsigned long), 3421 .align = __alignof__(u64),
3372 }, 3422 },
3373 [NFT_SET_EXT_USERDATA] = { 3423 [NFT_SET_EXT_USERDATA] = {
3374 .len = sizeof(struct nft_userdata), 3424 .len = sizeof(struct nft_userdata),
@@ -3405,16 +3455,19 @@ static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, struct net *net,
3405 const struct sk_buff *skb, 3455 const struct sk_buff *skb,
3406 const struct nlmsghdr *nlh, 3456 const struct nlmsghdr *nlh,
3407 const struct nlattr * const nla[], 3457 const struct nlattr * const nla[],
3458 struct netlink_ext_ack *extack,
3408 u8 genmask) 3459 u8 genmask)
3409{ 3460{
3410 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 3461 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
3411 int family = nfmsg->nfgen_family; 3462 int family = nfmsg->nfgen_family;
3412 struct nft_table *table; 3463 struct nft_table *table;
3413 3464
3414 table = nf_tables_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE], 3465 table = nft_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE], family,
3415 family, genmask); 3466 genmask);
3416 if (IS_ERR(table)) 3467 if (IS_ERR(table)) {
3468 NL_SET_BAD_ATTR(extack, nla[NFTA_SET_ELEM_LIST_TABLE]);
3417 return PTR_ERR(table); 3469 return PTR_ERR(table);
3470 }
3418 3471
3419 nft_ctx_init(ctx, net, skb, nlh, family, table, NULL, nla); 3472 nft_ctx_init(ctx, net, skb, nlh, family, table, NULL, nla);
3420 return 0; 3473 return 0;
@@ -3458,22 +3511,21 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
3458 3511
3459 if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) && 3512 if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) &&
3460 nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT, 3513 nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT,
3461 cpu_to_be64(jiffies_to_msecs( 3514 nf_jiffies64_to_msecs(*nft_set_ext_timeout(ext)),
3462 *nft_set_ext_timeout(ext))),
3463 NFTA_SET_ELEM_PAD)) 3515 NFTA_SET_ELEM_PAD))
3464 goto nla_put_failure; 3516 goto nla_put_failure;
3465 3517
3466 if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) { 3518 if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
3467 unsigned long expires, now = jiffies; 3519 u64 expires, now = get_jiffies_64();
3468 3520
3469 expires = *nft_set_ext_expiration(ext); 3521 expires = *nft_set_ext_expiration(ext);
3470 if (time_before(now, expires)) 3522 if (time_before64(now, expires))
3471 expires -= now; 3523 expires -= now;
3472 else 3524 else
3473 expires = 0; 3525 expires = 0;
3474 3526
3475 if (nla_put_be64(skb, NFTA_SET_ELEM_EXPIRATION, 3527 if (nla_put_be64(skb, NFTA_SET_ELEM_EXPIRATION,
3476 cpu_to_be64(jiffies_to_msecs(expires)), 3528 nf_jiffies64_to_msecs(expires),
3477 NFTA_SET_ELEM_PAD)) 3529 NFTA_SET_ELEM_PAD))
3478 goto nla_put_failure; 3530 goto nla_put_failure;
3479 } 3531 }
@@ -3744,12 +3796,12 @@ static int nf_tables_getsetelem(struct net *net, struct sock *nlsk,
3744 struct nft_ctx ctx; 3796 struct nft_ctx ctx;
3745 int rem, err = 0; 3797 int rem, err = 0;
3746 3798
3747 err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, genmask); 3799 err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, extack,
3800 genmask);
3748 if (err < 0) 3801 if (err < 0)
3749 return err; 3802 return err;
3750 3803
3751 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], 3804 set = nft_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], genmask);
3752 genmask);
3753 if (IS_ERR(set)) 3805 if (IS_ERR(set))
3754 return PTR_ERR(set); 3806 return PTR_ERR(set);
3755 3807
@@ -3848,7 +3900,7 @@ void *nft_set_elem_init(const struct nft_set *set,
3848 memcpy(nft_set_ext_data(ext), data, set->dlen); 3900 memcpy(nft_set_ext_data(ext), data, set->dlen);
3849 if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) 3901 if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION))
3850 *nft_set_ext_expiration(ext) = 3902 *nft_set_ext_expiration(ext) =
3851 jiffies + timeout; 3903 get_jiffies_64() + timeout;
3852 if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT)) 3904 if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT))
3853 *nft_set_ext_timeout(ext) = timeout; 3905 *nft_set_ext_timeout(ext) = timeout;
3854 3906
@@ -3935,8 +3987,10 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
3935 if (nla[NFTA_SET_ELEM_TIMEOUT] != NULL) { 3987 if (nla[NFTA_SET_ELEM_TIMEOUT] != NULL) {
3936 if (!(set->flags & NFT_SET_TIMEOUT)) 3988 if (!(set->flags & NFT_SET_TIMEOUT))
3937 return -EINVAL; 3989 return -EINVAL;
3938 timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64( 3990 err = nf_msecs_to_jiffies64(nla[NFTA_SET_ELEM_TIMEOUT],
3939 nla[NFTA_SET_ELEM_TIMEOUT]))); 3991 &timeout);
3992 if (err)
3993 return err;
3940 } else if (set->flags & NFT_SET_TIMEOUT) { 3994 } else if (set->flags & NFT_SET_TIMEOUT) {
3941 timeout = set->timeout; 3995 timeout = set->timeout;
3942 } 3996 }
@@ -3961,8 +4015,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
3961 err = -EINVAL; 4015 err = -EINVAL;
3962 goto err2; 4016 goto err2;
3963 } 4017 }
3964 obj = nf_tables_obj_lookup(ctx->table, nla[NFTA_SET_ELEM_OBJREF], 4018 obj = nft_obj_lookup(ctx->table, nla[NFTA_SET_ELEM_OBJREF],
3965 set->objtype, genmask); 4019 set->objtype, genmask);
3966 if (IS_ERR(obj)) { 4020 if (IS_ERR(obj)) {
3967 err = PTR_ERR(obj); 4021 err = PTR_ERR(obj);
3968 goto err2; 4022 goto err2;
@@ -4099,7 +4153,8 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
4099 if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) 4153 if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL)
4100 return -EINVAL; 4154 return -EINVAL;
4101 4155
4102 err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, genmask); 4156 err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, extack,
4157 genmask);
4103 if (err < 0) 4158 if (err < 0)
4104 return err; 4159 return err;
4105 4160
@@ -4287,12 +4342,12 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk,
4287 struct nft_ctx ctx; 4342 struct nft_ctx ctx;
4288 int rem, err = 0; 4343 int rem, err = 0;
4289 4344
4290 err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, genmask); 4345 err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, extack,
4346 genmask);
4291 if (err < 0) 4347 if (err < 0)
4292 return err; 4348 return err;
4293 4349
4294 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], 4350 set = nft_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], genmask);
4295 genmask);
4296 if (IS_ERR(set)) 4351 if (IS_ERR(set))
4297 return PTR_ERR(set); 4352 return PTR_ERR(set);
4298 if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT) 4353 if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
@@ -4380,9 +4435,9 @@ void nft_unregister_obj(struct nft_object_type *obj_type)
4380} 4435}
4381EXPORT_SYMBOL_GPL(nft_unregister_obj); 4436EXPORT_SYMBOL_GPL(nft_unregister_obj);
4382 4437
4383struct nft_object *nf_tables_obj_lookup(const struct nft_table *table, 4438struct nft_object *nft_obj_lookup(const struct nft_table *table,
4384 const struct nlattr *nla, 4439 const struct nlattr *nla, u32 objtype,
4385 u32 objtype, u8 genmask) 4440 u8 genmask)
4386{ 4441{
4387 struct nft_object *obj; 4442 struct nft_object *obj;
4388 4443
@@ -4394,11 +4449,11 @@ struct nft_object *nf_tables_obj_lookup(const struct nft_table *table,
4394 } 4449 }
4395 return ERR_PTR(-ENOENT); 4450 return ERR_PTR(-ENOENT);
4396} 4451}
4397EXPORT_SYMBOL_GPL(nf_tables_obj_lookup); 4452EXPORT_SYMBOL_GPL(nft_obj_lookup);
4398 4453
4399static struct nft_object *nf_tables_obj_lookup_byhandle(const struct nft_table *table, 4454static struct nft_object *nft_obj_lookup_byhandle(const struct nft_table *table,
4400 const struct nlattr *nla, 4455 const struct nlattr *nla,
4401 u32 objtype, u8 genmask) 4456 u32 objtype, u8 genmask)
4402{ 4457{
4403 struct nft_object *obj; 4458 struct nft_object *obj;
4404 4459
@@ -4542,22 +4597,25 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
4542 !nla[NFTA_OBJ_DATA]) 4597 !nla[NFTA_OBJ_DATA])
4543 return -EINVAL; 4598 return -EINVAL;
4544 4599
4545 table = nf_tables_table_lookup(net, nla[NFTA_OBJ_TABLE], family, 4600 table = nft_table_lookup(net, nla[NFTA_OBJ_TABLE], family, genmask);
4546 genmask); 4601 if (IS_ERR(table)) {
4547 if (IS_ERR(table)) 4602 NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_TABLE]);
4548 return PTR_ERR(table); 4603 return PTR_ERR(table);
4604 }
4549 4605
4550 objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE])); 4606 objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
4551 obj = nf_tables_obj_lookup(table, nla[NFTA_OBJ_NAME], objtype, genmask); 4607 obj = nft_obj_lookup(table, nla[NFTA_OBJ_NAME], objtype, genmask);
4552 if (IS_ERR(obj)) { 4608 if (IS_ERR(obj)) {
4553 err = PTR_ERR(obj); 4609 err = PTR_ERR(obj);
4554 if (err != -ENOENT) 4610 if (err != -ENOENT) {
4611 NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_NAME]);
4555 return err; 4612 return err;
4556 4613 }
4557 } else { 4614 } else {
4558 if (nlh->nlmsg_flags & NLM_F_EXCL) 4615 if (nlh->nlmsg_flags & NLM_F_EXCL) {
4616 NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_NAME]);
4559 return -EEXIST; 4617 return -EEXIST;
4560 4618 }
4561 return 0; 4619 return 0;
4562 } 4620 }
4563 4621
@@ -4768,15 +4826,18 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk,
4768 !nla[NFTA_OBJ_TYPE]) 4826 !nla[NFTA_OBJ_TYPE])
4769 return -EINVAL; 4827 return -EINVAL;
4770 4828
4771 table = nf_tables_table_lookup(net, nla[NFTA_OBJ_TABLE], family, 4829 table = nft_table_lookup(net, nla[NFTA_OBJ_TABLE], family, genmask);
4772 genmask); 4830 if (IS_ERR(table)) {
4773 if (IS_ERR(table)) 4831 NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_TABLE]);
4774 return PTR_ERR(table); 4832 return PTR_ERR(table);
4833 }
4775 4834
4776 objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE])); 4835 objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
4777 obj = nf_tables_obj_lookup(table, nla[NFTA_OBJ_NAME], objtype, genmask); 4836 obj = nft_obj_lookup(table, nla[NFTA_OBJ_NAME], objtype, genmask);
4778 if (IS_ERR(obj)) 4837 if (IS_ERR(obj)) {
4838 NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_NAME]);
4779 return PTR_ERR(obj); 4839 return PTR_ERR(obj);
4840 }
4780 4841
4781 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 4842 skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
4782 if (!skb2) 4843 if (!skb2)
@@ -4815,6 +4876,7 @@ static int nf_tables_delobj(struct net *net, struct sock *nlsk,
4815 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 4876 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
4816 u8 genmask = nft_genmask_next(net); 4877 u8 genmask = nft_genmask_next(net);
4817 int family = nfmsg->nfgen_family; 4878 int family = nfmsg->nfgen_family;
4879 const struct nlattr *attr;
4818 struct nft_table *table; 4880 struct nft_table *table;
4819 struct nft_object *obj; 4881 struct nft_object *obj;
4820 struct nft_ctx ctx; 4882 struct nft_ctx ctx;
@@ -4824,22 +4886,29 @@ static int nf_tables_delobj(struct net *net, struct sock *nlsk,
4824 (!nla[NFTA_OBJ_NAME] && !nla[NFTA_OBJ_HANDLE])) 4886 (!nla[NFTA_OBJ_NAME] && !nla[NFTA_OBJ_HANDLE]))
4825 return -EINVAL; 4887 return -EINVAL;
4826 4888
4827 table = nf_tables_table_lookup(net, nla[NFTA_OBJ_TABLE], family, 4889 table = nft_table_lookup(net, nla[NFTA_OBJ_TABLE], family, genmask);
4828 genmask); 4890 if (IS_ERR(table)) {
4829 if (IS_ERR(table)) 4891 NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_TABLE]);
4830 return PTR_ERR(table); 4892 return PTR_ERR(table);
4893 }
4831 4894
4832 objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE])); 4895 objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
4833 if (nla[NFTA_OBJ_HANDLE]) 4896 if (nla[NFTA_OBJ_HANDLE]) {
4834 obj = nf_tables_obj_lookup_byhandle(table, nla[NFTA_OBJ_HANDLE], 4897 attr = nla[NFTA_OBJ_HANDLE];
4835 objtype, genmask); 4898 obj = nft_obj_lookup_byhandle(table, attr, objtype, genmask);
4836 else 4899 } else {
4837 obj = nf_tables_obj_lookup(table, nla[NFTA_OBJ_NAME], 4900 attr = nla[NFTA_OBJ_NAME];
4838 objtype, genmask); 4901 obj = nft_obj_lookup(table, attr, objtype, genmask);
4839 if (IS_ERR(obj)) 4902 }
4903
4904 if (IS_ERR(obj)) {
4905 NL_SET_BAD_ATTR(extack, attr);
4840 return PTR_ERR(obj); 4906 return PTR_ERR(obj);
4841 if (obj->use > 0) 4907 }
4908 if (obj->use > 0) {
4909 NL_SET_BAD_ATTR(extack, attr);
4842 return -EBUSY; 4910 return -EBUSY;
4911 }
4843 4912
4844 nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla); 4913 nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
4845 4914
@@ -4910,9 +4979,8 @@ static const struct nla_policy nft_flowtable_policy[NFTA_FLOWTABLE_MAX + 1] = {
4910 [NFTA_FLOWTABLE_HANDLE] = { .type = NLA_U64 }, 4979 [NFTA_FLOWTABLE_HANDLE] = { .type = NLA_U64 },
4911}; 4980};
4912 4981
4913struct nft_flowtable *nf_tables_flowtable_lookup(const struct nft_table *table, 4982struct nft_flowtable *nft_flowtable_lookup(const struct nft_table *table,
4914 const struct nlattr *nla, 4983 const struct nlattr *nla, u8 genmask)
4915 u8 genmask)
4916{ 4984{
4917 struct nft_flowtable *flowtable; 4985 struct nft_flowtable *flowtable;
4918 4986
@@ -4923,11 +4991,11 @@ struct nft_flowtable *nf_tables_flowtable_lookup(const struct nft_table *table,
4923 } 4991 }
4924 return ERR_PTR(-ENOENT); 4992 return ERR_PTR(-ENOENT);
4925} 4993}
4926EXPORT_SYMBOL_GPL(nf_tables_flowtable_lookup); 4994EXPORT_SYMBOL_GPL(nft_flowtable_lookup);
4927 4995
4928static struct nft_flowtable * 4996static struct nft_flowtable *
4929nf_tables_flowtable_lookup_byhandle(const struct nft_table *table, 4997nft_flowtable_lookup_byhandle(const struct nft_table *table,
4930 const struct nlattr *nla, u8 genmask) 4998 const struct nlattr *nla, u8 genmask)
4931{ 4999{
4932 struct nft_flowtable *flowtable; 5000 struct nft_flowtable *flowtable;
4933 5001
@@ -5026,7 +5094,7 @@ static int nf_tables_flowtable_parse_hook(const struct nft_ctx *ctx,
5026 flowtable->ops[i].pf = NFPROTO_NETDEV; 5094 flowtable->ops[i].pf = NFPROTO_NETDEV;
5027 flowtable->ops[i].hooknum = hooknum; 5095 flowtable->ops[i].hooknum = hooknum;
5028 flowtable->ops[i].priority = priority; 5096 flowtable->ops[i].priority = priority;
5029 flowtable->ops[i].priv = &flowtable->data.rhashtable; 5097 flowtable->ops[i].priv = &flowtable->data;
5030 flowtable->ops[i].hook = flowtable->data.type->hook; 5098 flowtable->ops[i].hook = flowtable->data.type->hook;
5031 flowtable->ops[i].dev = dev_array[i]; 5099 flowtable->ops[i].dev = dev_array[i];
5032 flowtable->dev_name[i] = kstrdup(dev_array[i]->name, 5100 flowtable->dev_name[i] = kstrdup(dev_array[i]->name,
@@ -5067,23 +5135,6 @@ static const struct nf_flowtable_type *nft_flowtable_type_get(u8 family)
5067 return ERR_PTR(-ENOENT); 5135 return ERR_PTR(-ENOENT);
5068} 5136}
5069 5137
5070void nft_flow_table_iterate(struct net *net,
5071 void (*iter)(struct nf_flowtable *flowtable, void *data),
5072 void *data)
5073{
5074 struct nft_flowtable *flowtable;
5075 const struct nft_table *table;
5076
5077 nfnl_lock(NFNL_SUBSYS_NFTABLES);
5078 list_for_each_entry(table, &net->nft.tables, list) {
5079 list_for_each_entry(flowtable, &table->flowtables, list) {
5080 iter(&flowtable->data, data);
5081 }
5082 }
5083 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
5084}
5085EXPORT_SYMBOL_GPL(nft_flow_table_iterate);
5086
5087static void nft_unregister_flowtable_net_hooks(struct net *net, 5138static void nft_unregister_flowtable_net_hooks(struct net *net,
5088 struct nft_flowtable *flowtable) 5139 struct nft_flowtable *flowtable)
5089{ 5140{
@@ -5117,20 +5168,26 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
5117 !nla[NFTA_FLOWTABLE_HOOK]) 5168 !nla[NFTA_FLOWTABLE_HOOK])
5118 return -EINVAL; 5169 return -EINVAL;
5119 5170
5120 table = nf_tables_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE], 5171 table = nft_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE], family,
5121 family, genmask); 5172 genmask);
5122 if (IS_ERR(table)) 5173 if (IS_ERR(table)) {
5174 NL_SET_BAD_ATTR(extack, nla[NFTA_FLOWTABLE_TABLE]);
5123 return PTR_ERR(table); 5175 return PTR_ERR(table);
5176 }
5124 5177
5125 flowtable = nf_tables_flowtable_lookup(table, nla[NFTA_FLOWTABLE_NAME], 5178 flowtable = nft_flowtable_lookup(table, nla[NFTA_FLOWTABLE_NAME],
5126 genmask); 5179 genmask);
5127 if (IS_ERR(flowtable)) { 5180 if (IS_ERR(flowtable)) {
5128 err = PTR_ERR(flowtable); 5181 err = PTR_ERR(flowtable);
5129 if (err != -ENOENT) 5182 if (err != -ENOENT) {
5183 NL_SET_BAD_ATTR(extack, nla[NFTA_FLOWTABLE_NAME]);
5130 return err; 5184 return err;
5185 }
5131 } else { 5186 } else {
5132 if (nlh->nlmsg_flags & NLM_F_EXCL) 5187 if (nlh->nlmsg_flags & NLM_F_EXCL) {
5188 NL_SET_BAD_ATTR(extack, nla[NFTA_FLOWTABLE_NAME]);
5133 return -EEXIST; 5189 return -EEXIST;
5190 }
5134 5191
5135 return 0; 5192 return 0;
5136 } 5193 }
@@ -5157,14 +5214,14 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
5157 } 5214 }
5158 5215
5159 flowtable->data.type = type; 5216 flowtable->data.type = type;
5160 err = rhashtable_init(&flowtable->data.rhashtable, type->params); 5217 err = type->init(&flowtable->data);
5161 if (err < 0) 5218 if (err < 0)
5162 goto err3; 5219 goto err3;
5163 5220
5164 err = nf_tables_flowtable_parse_hook(&ctx, nla[NFTA_FLOWTABLE_HOOK], 5221 err = nf_tables_flowtable_parse_hook(&ctx, nla[NFTA_FLOWTABLE_HOOK],
5165 flowtable); 5222 flowtable);
5166 if (err < 0) 5223 if (err < 0)
5167 goto err3; 5224 goto err4;
5168 5225
5169 for (i = 0; i < flowtable->ops_len; i++) { 5226 for (i = 0; i < flowtable->ops_len; i++) {
5170 if (!flowtable->ops[i].dev) 5227 if (!flowtable->ops[i].dev)
@@ -5178,37 +5235,35 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
5178 if (flowtable->ops[i].dev == ft->ops[k].dev && 5235 if (flowtable->ops[i].dev == ft->ops[k].dev &&
5179 flowtable->ops[i].pf == ft->ops[k].pf) { 5236 flowtable->ops[i].pf == ft->ops[k].pf) {
5180 err = -EBUSY; 5237 err = -EBUSY;
5181 goto err4; 5238 goto err5;
5182 } 5239 }
5183 } 5240 }
5184 } 5241 }
5185 5242
5186 err = nf_register_net_hook(net, &flowtable->ops[i]); 5243 err = nf_register_net_hook(net, &flowtable->ops[i]);
5187 if (err < 0) 5244 if (err < 0)
5188 goto err4; 5245 goto err5;
5189 } 5246 }
5190 5247
5191 err = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable); 5248 err = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable);
5192 if (err < 0) 5249 if (err < 0)
5193 goto err5; 5250 goto err6;
5194
5195 INIT_DEFERRABLE_WORK(&flowtable->data.gc_work, type->gc);
5196 queue_delayed_work(system_power_efficient_wq,
5197 &flowtable->data.gc_work, HZ);
5198 5251
5199 list_add_tail_rcu(&flowtable->list, &table->flowtables); 5252 list_add_tail_rcu(&flowtable->list, &table->flowtables);
5200 table->use++; 5253 table->use++;
5201 5254
5202 return 0; 5255 return 0;
5203err5: 5256err6:
5204 i = flowtable->ops_len; 5257 i = flowtable->ops_len;
5205err4: 5258err5:
5206 for (k = i - 1; k >= 0; k--) { 5259 for (k = i - 1; k >= 0; k--) {
5207 kfree(flowtable->dev_name[k]); 5260 kfree(flowtable->dev_name[k]);
5208 nf_unregister_net_hook(net, &flowtable->ops[k]); 5261 nf_unregister_net_hook(net, &flowtable->ops[k]);
5209 } 5262 }
5210 5263
5211 kfree(flowtable->ops); 5264 kfree(flowtable->ops);
5265err4:
5266 flowtable->data.type->free(&flowtable->data);
5212err3: 5267err3:
5213 module_put(type->owner); 5268 module_put(type->owner);
5214err2: 5269err2:
@@ -5228,6 +5283,7 @@ static int nf_tables_delflowtable(struct net *net, struct sock *nlsk,
5228 u8 genmask = nft_genmask_next(net); 5283 u8 genmask = nft_genmask_next(net);
5229 int family = nfmsg->nfgen_family; 5284 int family = nfmsg->nfgen_family;
5230 struct nft_flowtable *flowtable; 5285 struct nft_flowtable *flowtable;
5286 const struct nlattr *attr;
5231 struct nft_table *table; 5287 struct nft_table *table;
5232 struct nft_ctx ctx; 5288 struct nft_ctx ctx;
5233 5289
@@ -5236,23 +5292,29 @@ static int nf_tables_delflowtable(struct net *net, struct sock *nlsk,
5236 !nla[NFTA_FLOWTABLE_HANDLE])) 5292 !nla[NFTA_FLOWTABLE_HANDLE]))
5237 return -EINVAL; 5293 return -EINVAL;
5238 5294
5239 table = nf_tables_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE], 5295 table = nft_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE], family,
5240 family, genmask); 5296 genmask);
5241 if (IS_ERR(table)) 5297 if (IS_ERR(table)) {
5298 NL_SET_BAD_ATTR(extack, nla[NFTA_FLOWTABLE_TABLE]);
5242 return PTR_ERR(table); 5299 return PTR_ERR(table);
5300 }
5243 5301
5244 if (nla[NFTA_FLOWTABLE_HANDLE]) 5302 if (nla[NFTA_FLOWTABLE_HANDLE]) {
5245 flowtable = nf_tables_flowtable_lookup_byhandle(table, 5303 attr = nla[NFTA_FLOWTABLE_HANDLE];
5246 nla[NFTA_FLOWTABLE_HANDLE], 5304 flowtable = nft_flowtable_lookup_byhandle(table, attr, genmask);
5247 genmask); 5305 } else {
5248 else 5306 attr = nla[NFTA_FLOWTABLE_NAME];
5249 flowtable = nf_tables_flowtable_lookup(table, 5307 flowtable = nft_flowtable_lookup(table, attr, genmask);
5250 nla[NFTA_FLOWTABLE_NAME], 5308 }
5251 genmask); 5309
5252 if (IS_ERR(flowtable)) 5310 if (IS_ERR(flowtable)) {
5253 return PTR_ERR(flowtable); 5311 NL_SET_BAD_ATTR(extack, attr);
5254 if (flowtable->use > 0) 5312 return PTR_ERR(flowtable);
5313 }
5314 if (flowtable->use > 0) {
5315 NL_SET_BAD_ATTR(extack, attr);
5255 return -EBUSY; 5316 return -EBUSY;
5317 }
5256 5318
5257 nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla); 5319 nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla);
5258 5320
@@ -5433,13 +5495,13 @@ static int nf_tables_getflowtable(struct net *net, struct sock *nlsk,
5433 if (!nla[NFTA_FLOWTABLE_NAME]) 5495 if (!nla[NFTA_FLOWTABLE_NAME])
5434 return -EINVAL; 5496 return -EINVAL;
5435 5497
5436 table = nf_tables_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE], 5498 table = nft_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE], family,
5437 family, genmask); 5499 genmask);
5438 if (IS_ERR(table)) 5500 if (IS_ERR(table))
5439 return PTR_ERR(table); 5501 return PTR_ERR(table);
5440 5502
5441 flowtable = nf_tables_flowtable_lookup(table, nla[NFTA_FLOWTABLE_NAME], 5503 flowtable = nft_flowtable_lookup(table, nla[NFTA_FLOWTABLE_NAME],
5442 genmask); 5504 genmask);
5443 if (IS_ERR(flowtable)) 5505 if (IS_ERR(flowtable))
5444 return PTR_ERR(flowtable); 5506 return PTR_ERR(flowtable);
5445 5507
@@ -5492,11 +5554,9 @@ err:
5492 5554
5493static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable) 5555static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
5494{ 5556{
5495 cancel_delayed_work_sync(&flowtable->data.gc_work);
5496 kfree(flowtable->ops); 5557 kfree(flowtable->ops);
5497 kfree(flowtable->name); 5558 kfree(flowtable->name);
5498 flowtable->data.type->free(&flowtable->data); 5559 flowtable->data.type->free(&flowtable->data);
5499 rhashtable_destroy(&flowtable->data.rhashtable);
5500 module_put(flowtable->data.type->owner); 5560 module_put(flowtable->data.type->owner);
5501} 5561}
5502 5562
@@ -6410,8 +6470,8 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
6410 case NFT_GOTO: 6470 case NFT_GOTO:
6411 if (!tb[NFTA_VERDICT_CHAIN]) 6471 if (!tb[NFTA_VERDICT_CHAIN])
6412 return -EINVAL; 6472 return -EINVAL;
6413 chain = nf_tables_chain_lookup(ctx->table, 6473 chain = nft_chain_lookup(ctx->table, tb[NFTA_VERDICT_CHAIN],
6414 tb[NFTA_VERDICT_CHAIN], genmask); 6474 genmask);
6415 if (IS_ERR(chain)) 6475 if (IS_ERR(chain))
6416 return PTR_ERR(chain); 6476 return PTR_ERR(chain);
6417 if (nft_is_base_chain(chain)) 6477 if (nft_is_base_chain(chain))
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index dfd0bf3810d2..9cf47c4cb9d5 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -251,6 +251,9 @@ static struct nft_expr_type *nft_basic_types[] = {
251 &nft_payload_type, 251 &nft_payload_type,
252 &nft_dynset_type, 252 &nft_dynset_type,
253 &nft_range_type, 253 &nft_range_type,
254 &nft_meta_type,
255 &nft_rt_type,
256 &nft_exthdr_type,
254}; 257};
255 258
256int __init nf_tables_core_module_init(void) 259int __init nf_tables_core_module_init(void)
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 7b46aa4c478d..e5cc4d9b9ce7 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -37,7 +37,6 @@
37#include <net/sock.h> 37#include <net/sock.h>
38#include <net/netfilter/nf_log.h> 38#include <net/netfilter/nf_log.h>
39#include <net/netns/generic.h> 39#include <net/netns/generic.h>
40#include <net/netfilter/nfnetlink_log.h>
41 40
42#include <linux/atomic.h> 41#include <linux/atomic.h>
43#include <linux/refcount.h> 42#include <linux/refcount.h>
@@ -47,6 +46,7 @@
47#include "../bridge/br_private.h" 46#include "../bridge/br_private.h"
48#endif 47#endif
49 48
49#define NFULNL_COPY_DISABLED 0xff
50#define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE 50#define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE
51#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */ 51#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */
52#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ 52#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */
@@ -618,7 +618,7 @@ static const struct nf_loginfo default_loginfo = {
618}; 618};
619 619
620/* log handler for internal netfilter logging api */ 620/* log handler for internal netfilter logging api */
621void 621static void
622nfulnl_log_packet(struct net *net, 622nfulnl_log_packet(struct net *net,
623 u_int8_t pf, 623 u_int8_t pf,
624 unsigned int hooknum, 624 unsigned int hooknum,
@@ -633,7 +633,7 @@ nfulnl_log_packet(struct net *net,
633 struct nfulnl_instance *inst; 633 struct nfulnl_instance *inst;
634 const struct nf_loginfo *li; 634 const struct nf_loginfo *li;
635 unsigned int qthreshold; 635 unsigned int qthreshold;
636 unsigned int plen; 636 unsigned int plen = 0;
637 struct nfnl_log_net *log = nfnl_log_pernet(net); 637 struct nfnl_log_net *log = nfnl_log_pernet(net);
638 const struct nfnl_ct_hook *nfnl_ct = NULL; 638 const struct nfnl_ct_hook *nfnl_ct = NULL;
639 struct nf_conn *ct = NULL; 639 struct nf_conn *ct = NULL;
@@ -648,7 +648,6 @@ nfulnl_log_packet(struct net *net,
648 if (!inst) 648 if (!inst)
649 return; 649 return;
650 650
651 plen = 0;
652 if (prefix) 651 if (prefix)
653 plen = strlen(prefix) + 1; 652 plen = strlen(prefix) + 1;
654 653
@@ -760,7 +759,6 @@ alloc_failure:
760 /* FIXME: statistics */ 759 /* FIXME: statistics */
761 goto unlock_and_release; 760 goto unlock_and_release;
762} 761}
763EXPORT_SYMBOL_GPL(nfulnl_log_packet);
764 762
765static int 763static int
766nfulnl_rcv_nl_event(struct notifier_block *this, 764nfulnl_rcv_nl_event(struct notifier_block *this,
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 04863fad05dd..b07a3fd9eeea 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -36,7 +36,7 @@ static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr,
36 u64 timeout; 36 u64 timeout;
37 void *elem; 37 void *elem;
38 38
39 if (set->size && !atomic_add_unless(&set->nelems, 1, set->size)) 39 if (!atomic_add_unless(&set->nelems, 1, set->size))
40 return NULL; 40 return NULL;
41 41
42 timeout = priv->timeout ? : set->timeout; 42 timeout = priv->timeout ? : set->timeout;
@@ -81,7 +81,7 @@ static void nft_dynset_eval(const struct nft_expr *expr,
81 if (priv->op == NFT_DYNSET_OP_UPDATE && 81 if (priv->op == NFT_DYNSET_OP_UPDATE &&
82 nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) { 82 nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
83 timeout = priv->timeout ? : set->timeout; 83 timeout = priv->timeout ? : set->timeout;
84 *nft_set_ext_expiration(ext) = jiffies + timeout; 84 *nft_set_ext_expiration(ext) = get_jiffies_64() + timeout;
85 } 85 }
86 86
87 if (sexpr != NULL) 87 if (sexpr != NULL)
@@ -216,6 +216,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
216 if (err < 0) 216 if (err < 0)
217 goto err1; 217 goto err1;
218 218
219 if (set->size == 0)
220 set->size = 0xffff;
221
219 priv->set = set; 222 priv->set = set;
220 return 0; 223 return 0;
221 224
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index 47ec1046ad11..a940c9fd9045 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -10,11 +10,10 @@
10 10
11#include <asm/unaligned.h> 11#include <asm/unaligned.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/netlink.h> 13#include <linux/netlink.h>
16#include <linux/netfilter.h> 14#include <linux/netfilter.h>
17#include <linux/netfilter/nf_tables.h> 15#include <linux/netfilter/nf_tables.h>
16#include <net/netfilter/nf_tables_core.h>
18#include <net/netfilter/nf_tables.h> 17#include <net/netfilter/nf_tables.h>
19#include <net/tcp.h> 18#include <net/tcp.h>
20 19
@@ -353,7 +352,6 @@ static int nft_exthdr_dump_set(struct sk_buff *skb, const struct nft_expr *expr)
353 return nft_exthdr_dump_common(skb, priv); 352 return nft_exthdr_dump_common(skb, priv);
354} 353}
355 354
356static struct nft_expr_type nft_exthdr_type;
357static const struct nft_expr_ops nft_exthdr_ipv6_ops = { 355static const struct nft_expr_ops nft_exthdr_ipv6_ops = {
358 .type = &nft_exthdr_type, 356 .type = &nft_exthdr_type,
359 .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)), 357 .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
@@ -407,27 +405,10 @@ nft_exthdr_select_ops(const struct nft_ctx *ctx,
407 return ERR_PTR(-EOPNOTSUPP); 405 return ERR_PTR(-EOPNOTSUPP);
408} 406}
409 407
410static struct nft_expr_type nft_exthdr_type __read_mostly = { 408struct nft_expr_type nft_exthdr_type __read_mostly = {
411 .name = "exthdr", 409 .name = "exthdr",
412 .select_ops = nft_exthdr_select_ops, 410 .select_ops = nft_exthdr_select_ops,
413 .policy = nft_exthdr_policy, 411 .policy = nft_exthdr_policy,
414 .maxattr = NFTA_EXTHDR_MAX, 412 .maxattr = NFTA_EXTHDR_MAX,
415 .owner = THIS_MODULE, 413 .owner = THIS_MODULE,
416}; 414};
417
418static int __init nft_exthdr_module_init(void)
419{
420 return nft_register_expr(&nft_exthdr_type);
421}
422
423static void __exit nft_exthdr_module_exit(void)
424{
425 nft_unregister_expr(&nft_exthdr_type);
426}
427
428module_init(nft_exthdr_module_init);
429module_exit(nft_exthdr_module_exit);
430
431MODULE_LICENSE("GPL");
432MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
433MODULE_ALIAS_NFT_EXPR("exthdr");
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index b65829b2be22..d6bab8c3cbb0 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -142,9 +142,8 @@ static int nft_flow_offload_init(const struct nft_ctx *ctx,
142 if (!tb[NFTA_FLOW_TABLE_NAME]) 142 if (!tb[NFTA_FLOW_TABLE_NAME])
143 return -EINVAL; 143 return -EINVAL;
144 144
145 flowtable = nf_tables_flowtable_lookup(ctx->table, 145 flowtable = nft_flowtable_lookup(ctx->table, tb[NFTA_FLOW_TABLE_NAME],
146 tb[NFTA_FLOW_TABLE_NAME], 146 genmask);
147 genmask);
148 if (IS_ERR(flowtable)) 147 if (IS_ERR(flowtable))
149 return PTR_ERR(flowtable); 148 return PTR_ERR(flowtable);
150 149
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 24f2f7567ddb..e235c17f1b8b 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -97,7 +97,7 @@ static int nft_jhash_init(const struct nft_ctx *ctx,
97 priv->len = len; 97 priv->len = len;
98 98
99 priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS])); 99 priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
100 if (priv->modulus <= 1) 100 if (priv->modulus < 1)
101 return -ERANGE; 101 return -ERANGE;
102 102
103 if (priv->offset + priv->modulus - 1 < priv->offset) 103 if (priv->offset + priv->modulus - 1 < priv->offset)
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 8fb91940e2e7..5348bd058c88 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net> 2 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
3 * Copyright (c) 2014 Intel Corporation
4 * Author: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
3 * 5 *
4 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
@@ -9,8 +11,6 @@
9 */ 11 */
10 12
11#include <linux/kernel.h> 13#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/netlink.h> 14#include <linux/netlink.h>
15#include <linux/netfilter.h> 15#include <linux/netfilter.h>
16#include <linux/netfilter/nf_tables.h> 16#include <linux/netfilter/nf_tables.h>
@@ -24,21 +24,35 @@
24#include <net/tcp_states.h> /* for TCP_TIME_WAIT */ 24#include <net/tcp_states.h> /* for TCP_TIME_WAIT */
25#include <net/netfilter/nf_tables.h> 25#include <net/netfilter/nf_tables.h>
26#include <net/netfilter/nf_tables_core.h> 26#include <net/netfilter/nf_tables_core.h>
27#include <net/netfilter/nft_meta.h>
28 27
29#include <uapi/linux/netfilter_bridge.h> /* NF_BR_PRE_ROUTING */ 28#include <uapi/linux/netfilter_bridge.h> /* NF_BR_PRE_ROUTING */
30 29
30struct nft_meta {
31 enum nft_meta_keys key:8;
32 union {
33 enum nft_registers dreg:8;
34 enum nft_registers sreg:8;
35 };
36};
37
31static DEFINE_PER_CPU(struct rnd_state, nft_prandom_state); 38static DEFINE_PER_CPU(struct rnd_state, nft_prandom_state);
32 39
33void nft_meta_get_eval(const struct nft_expr *expr, 40#ifdef CONFIG_NF_TABLES_BRIDGE
34 struct nft_regs *regs, 41#include "../bridge/br_private.h"
35 const struct nft_pktinfo *pkt) 42#endif
43
44static void nft_meta_get_eval(const struct nft_expr *expr,
45 struct nft_regs *regs,
46 const struct nft_pktinfo *pkt)
36{ 47{
37 const struct nft_meta *priv = nft_expr_priv(expr); 48 const struct nft_meta *priv = nft_expr_priv(expr);
38 const struct sk_buff *skb = pkt->skb; 49 const struct sk_buff *skb = pkt->skb;
39 const struct net_device *in = nft_in(pkt), *out = nft_out(pkt); 50 const struct net_device *in = nft_in(pkt), *out = nft_out(pkt);
40 struct sock *sk; 51 struct sock *sk;
41 u32 *dest = &regs->data[priv->dreg]; 52 u32 *dest = &regs->data[priv->dreg];
53#ifdef CONFIG_NF_TABLES_BRIDGE
54 const struct net_bridge_port *p;
55#endif
42 56
43 switch (priv->key) { 57 switch (priv->key) {
44 case NFT_META_LEN: 58 case NFT_META_LEN:
@@ -215,6 +229,18 @@ void nft_meta_get_eval(const struct nft_expr *expr,
215 nft_reg_store8(dest, !!skb->sp); 229 nft_reg_store8(dest, !!skb->sp);
216 break; 230 break;
217#endif 231#endif
232#ifdef CONFIG_NF_TABLES_BRIDGE
233 case NFT_META_BRI_IIFNAME:
234 if (in == NULL || (p = br_port_get_rcu(in)) == NULL)
235 goto err;
236 strncpy((char *)dest, p->br->dev->name, IFNAMSIZ);
237 return;
238 case NFT_META_BRI_OIFNAME:
239 if (out == NULL || (p = br_port_get_rcu(out)) == NULL)
240 goto err;
241 strncpy((char *)dest, p->br->dev->name, IFNAMSIZ);
242 return;
243#endif
218 default: 244 default:
219 WARN_ON(1); 245 WARN_ON(1);
220 goto err; 246 goto err;
@@ -224,11 +250,10 @@ void nft_meta_get_eval(const struct nft_expr *expr,
224err: 250err:
225 regs->verdict.code = NFT_BREAK; 251 regs->verdict.code = NFT_BREAK;
226} 252}
227EXPORT_SYMBOL_GPL(nft_meta_get_eval);
228 253
229void nft_meta_set_eval(const struct nft_expr *expr, 254static void nft_meta_set_eval(const struct nft_expr *expr,
230 struct nft_regs *regs, 255 struct nft_regs *regs,
231 const struct nft_pktinfo *pkt) 256 const struct nft_pktinfo *pkt)
232{ 257{
233 const struct nft_meta *meta = nft_expr_priv(expr); 258 const struct nft_meta *meta = nft_expr_priv(expr);
234 struct sk_buff *skb = pkt->skb; 259 struct sk_buff *skb = pkt->skb;
@@ -258,18 +283,16 @@ void nft_meta_set_eval(const struct nft_expr *expr,
258 WARN_ON(1); 283 WARN_ON(1);
259 } 284 }
260} 285}
261EXPORT_SYMBOL_GPL(nft_meta_set_eval);
262 286
263const struct nla_policy nft_meta_policy[NFTA_META_MAX + 1] = { 287static const struct nla_policy nft_meta_policy[NFTA_META_MAX + 1] = {
264 [NFTA_META_DREG] = { .type = NLA_U32 }, 288 [NFTA_META_DREG] = { .type = NLA_U32 },
265 [NFTA_META_KEY] = { .type = NLA_U32 }, 289 [NFTA_META_KEY] = { .type = NLA_U32 },
266 [NFTA_META_SREG] = { .type = NLA_U32 }, 290 [NFTA_META_SREG] = { .type = NLA_U32 },
267}; 291};
268EXPORT_SYMBOL_GPL(nft_meta_policy);
269 292
270int nft_meta_get_init(const struct nft_ctx *ctx, 293static int nft_meta_get_init(const struct nft_ctx *ctx,
271 const struct nft_expr *expr, 294 const struct nft_expr *expr,
272 const struct nlattr * const tb[]) 295 const struct nlattr * const tb[])
273{ 296{
274 struct nft_meta *priv = nft_expr_priv(expr); 297 struct nft_meta *priv = nft_expr_priv(expr);
275 unsigned int len; 298 unsigned int len;
@@ -318,6 +341,14 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
318 len = sizeof(u8); 341 len = sizeof(u8);
319 break; 342 break;
320#endif 343#endif
344#ifdef CONFIG_NF_TABLES_BRIDGE
345 case NFT_META_BRI_IIFNAME:
346 case NFT_META_BRI_OIFNAME:
347 if (ctx->family != NFPROTO_BRIDGE)
348 return -EOPNOTSUPP;
349 len = IFNAMSIZ;
350 break;
351#endif
321 default: 352 default:
322 return -EOPNOTSUPP; 353 return -EOPNOTSUPP;
323 } 354 }
@@ -326,7 +357,6 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
326 return nft_validate_register_store(ctx, priv->dreg, NULL, 357 return nft_validate_register_store(ctx, priv->dreg, NULL,
327 NFT_DATA_VALUE, len); 358 NFT_DATA_VALUE, len);
328} 359}
329EXPORT_SYMBOL_GPL(nft_meta_get_init);
330 360
331static int nft_meta_get_validate(const struct nft_ctx *ctx, 361static int nft_meta_get_validate(const struct nft_ctx *ctx,
332 const struct nft_expr *expr, 362 const struct nft_expr *expr,
@@ -360,9 +390,9 @@ static int nft_meta_get_validate(const struct nft_ctx *ctx,
360#endif 390#endif
361} 391}
362 392
363int nft_meta_set_validate(const struct nft_ctx *ctx, 393static int nft_meta_set_validate(const struct nft_ctx *ctx,
364 const struct nft_expr *expr, 394 const struct nft_expr *expr,
365 const struct nft_data **data) 395 const struct nft_data **data)
366{ 396{
367 struct nft_meta *priv = nft_expr_priv(expr); 397 struct nft_meta *priv = nft_expr_priv(expr);
368 unsigned int hooks; 398 unsigned int hooks;
@@ -388,11 +418,10 @@ int nft_meta_set_validate(const struct nft_ctx *ctx,
388 418
389 return nft_chain_validate_hooks(ctx->chain, hooks); 419 return nft_chain_validate_hooks(ctx->chain, hooks);
390} 420}
391EXPORT_SYMBOL_GPL(nft_meta_set_validate);
392 421
393int nft_meta_set_init(const struct nft_ctx *ctx, 422static int nft_meta_set_init(const struct nft_ctx *ctx,
394 const struct nft_expr *expr, 423 const struct nft_expr *expr,
395 const struct nlattr * const tb[]) 424 const struct nlattr * const tb[])
396{ 425{
397 struct nft_meta *priv = nft_expr_priv(expr); 426 struct nft_meta *priv = nft_expr_priv(expr);
398 unsigned int len; 427 unsigned int len;
@@ -424,10 +453,9 @@ int nft_meta_set_init(const struct nft_ctx *ctx,
424 453
425 return 0; 454 return 0;
426} 455}
427EXPORT_SYMBOL_GPL(nft_meta_set_init);
428 456
429int nft_meta_get_dump(struct sk_buff *skb, 457static int nft_meta_get_dump(struct sk_buff *skb,
430 const struct nft_expr *expr) 458 const struct nft_expr *expr)
431{ 459{
432 const struct nft_meta *priv = nft_expr_priv(expr); 460 const struct nft_meta *priv = nft_expr_priv(expr);
433 461
@@ -440,10 +468,8 @@ int nft_meta_get_dump(struct sk_buff *skb,
440nla_put_failure: 468nla_put_failure:
441 return -1; 469 return -1;
442} 470}
443EXPORT_SYMBOL_GPL(nft_meta_get_dump);
444 471
445int nft_meta_set_dump(struct sk_buff *skb, 472static int nft_meta_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
446 const struct nft_expr *expr)
447{ 473{
448 const struct nft_meta *priv = nft_expr_priv(expr); 474 const struct nft_meta *priv = nft_expr_priv(expr);
449 475
@@ -457,19 +483,16 @@ int nft_meta_set_dump(struct sk_buff *skb,
457nla_put_failure: 483nla_put_failure:
458 return -1; 484 return -1;
459} 485}
460EXPORT_SYMBOL_GPL(nft_meta_set_dump);
461 486
462void nft_meta_set_destroy(const struct nft_ctx *ctx, 487static void nft_meta_set_destroy(const struct nft_ctx *ctx,
463 const struct nft_expr *expr) 488 const struct nft_expr *expr)
464{ 489{
465 const struct nft_meta *priv = nft_expr_priv(expr); 490 const struct nft_meta *priv = nft_expr_priv(expr);
466 491
467 if (priv->key == NFT_META_NFTRACE) 492 if (priv->key == NFT_META_NFTRACE)
468 static_branch_dec(&nft_trace_enabled); 493 static_branch_dec(&nft_trace_enabled);
469} 494}
470EXPORT_SYMBOL_GPL(nft_meta_set_destroy);
471 495
472static struct nft_expr_type nft_meta_type;
473static const struct nft_expr_ops nft_meta_get_ops = { 496static const struct nft_expr_ops nft_meta_get_ops = {
474 .type = &nft_meta_type, 497 .type = &nft_meta_type,
475 .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)), 498 .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
@@ -508,27 +531,10 @@ nft_meta_select_ops(const struct nft_ctx *ctx,
508 return ERR_PTR(-EINVAL); 531 return ERR_PTR(-EINVAL);
509} 532}
510 533
511static struct nft_expr_type nft_meta_type __read_mostly = { 534struct nft_expr_type nft_meta_type __read_mostly = {
512 .name = "meta", 535 .name = "meta",
513 .select_ops = nft_meta_select_ops, 536 .select_ops = nft_meta_select_ops,
514 .policy = nft_meta_policy, 537 .policy = nft_meta_policy,
515 .maxattr = NFTA_META_MAX, 538 .maxattr = NFTA_META_MAX,
516 .owner = THIS_MODULE, 539 .owner = THIS_MODULE,
517}; 540};
518
519static int __init nft_meta_module_init(void)
520{
521 return nft_register_expr(&nft_meta_type);
522}
523
524static void __exit nft_meta_module_exit(void)
525{
526 nft_unregister_expr(&nft_meta_type);
527}
528
529module_init(nft_meta_module_init);
530module_exit(nft_meta_module_exit);
531
532MODULE_LICENSE("GPL");
533MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
534MODULE_ALIAS_NFT_EXPR("meta");
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index 1f36954c2ba9..c15807d10b91 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -43,7 +43,7 @@ static void nft_nat_eval(const struct nft_expr *expr,
43 const struct nft_nat *priv = nft_expr_priv(expr); 43 const struct nft_nat *priv = nft_expr_priv(expr);
44 enum ip_conntrack_info ctinfo; 44 enum ip_conntrack_info ctinfo;
45 struct nf_conn *ct = nf_ct_get(pkt->skb, &ctinfo); 45 struct nf_conn *ct = nf_ct_get(pkt->skb, &ctinfo);
46 struct nf_nat_range range; 46 struct nf_nat_range2 range;
47 47
48 memset(&range, 0, sizeof(range)); 48 memset(&range, 0, sizeof(range));
49 if (priv->sreg_addr_min) { 49 if (priv->sreg_addr_min) {
diff --git a/net/netfilter/nft_numgen.c b/net/netfilter/nft_numgen.c
index 5a3a52c71545..8a64db8f2e69 100644
--- a/net/netfilter/nft_numgen.c
+++ b/net/netfilter/nft_numgen.c
@@ -24,13 +24,11 @@ struct nft_ng_inc {
24 u32 modulus; 24 u32 modulus;
25 atomic_t counter; 25 atomic_t counter;
26 u32 offset; 26 u32 offset;
27 struct nft_set *map;
27}; 28};
28 29
29static void nft_ng_inc_eval(const struct nft_expr *expr, 30static u32 nft_ng_inc_gen(struct nft_ng_inc *priv)
30 struct nft_regs *regs,
31 const struct nft_pktinfo *pkt)
32{ 31{
33 struct nft_ng_inc *priv = nft_expr_priv(expr);
34 u32 nval, oval; 32 u32 nval, oval;
35 33
36 do { 34 do {
@@ -38,7 +36,36 @@ static void nft_ng_inc_eval(const struct nft_expr *expr,
38 nval = (oval + 1 < priv->modulus) ? oval + 1 : 0; 36 nval = (oval + 1 < priv->modulus) ? oval + 1 : 0;
39 } while (atomic_cmpxchg(&priv->counter, oval, nval) != oval); 37 } while (atomic_cmpxchg(&priv->counter, oval, nval) != oval);
40 38
41 regs->data[priv->dreg] = nval + priv->offset; 39 return nval + priv->offset;
40}
41
42static void nft_ng_inc_eval(const struct nft_expr *expr,
43 struct nft_regs *regs,
44 const struct nft_pktinfo *pkt)
45{
46 struct nft_ng_inc *priv = nft_expr_priv(expr);
47
48 regs->data[priv->dreg] = nft_ng_inc_gen(priv);
49}
50
51static void nft_ng_inc_map_eval(const struct nft_expr *expr,
52 struct nft_regs *regs,
53 const struct nft_pktinfo *pkt)
54{
55 struct nft_ng_inc *priv = nft_expr_priv(expr);
56 const struct nft_set *map = priv->map;
57 const struct nft_set_ext *ext;
58 u32 result;
59 bool found;
60
61 result = nft_ng_inc_gen(priv);
62 found = map->ops->lookup(nft_net(pkt), map, &result, &ext);
63
64 if (!found)
65 return;
66
67 nft_data_copy(&regs->data[priv->dreg],
68 nft_set_ext_data(ext), map->dlen);
42} 69}
43 70
44static const struct nla_policy nft_ng_policy[NFTA_NG_MAX + 1] = { 71static const struct nla_policy nft_ng_policy[NFTA_NG_MAX + 1] = {
@@ -46,6 +73,9 @@ static const struct nla_policy nft_ng_policy[NFTA_NG_MAX + 1] = {
46 [NFTA_NG_MODULUS] = { .type = NLA_U32 }, 73 [NFTA_NG_MODULUS] = { .type = NLA_U32 },
47 [NFTA_NG_TYPE] = { .type = NLA_U32 }, 74 [NFTA_NG_TYPE] = { .type = NLA_U32 },
48 [NFTA_NG_OFFSET] = { .type = NLA_U32 }, 75 [NFTA_NG_OFFSET] = { .type = NLA_U32 },
76 [NFTA_NG_SET_NAME] = { .type = NLA_STRING,
77 .len = NFT_SET_MAXNAMELEN - 1 },
78 [NFTA_NG_SET_ID] = { .type = NLA_U32 },
49}; 79};
50 80
51static int nft_ng_inc_init(const struct nft_ctx *ctx, 81static int nft_ng_inc_init(const struct nft_ctx *ctx,
@@ -71,6 +101,25 @@ static int nft_ng_inc_init(const struct nft_ctx *ctx,
71 NFT_DATA_VALUE, sizeof(u32)); 101 NFT_DATA_VALUE, sizeof(u32));
72} 102}
73 103
104static int nft_ng_inc_map_init(const struct nft_ctx *ctx,
105 const struct nft_expr *expr,
106 const struct nlattr * const tb[])
107{
108 struct nft_ng_inc *priv = nft_expr_priv(expr);
109 u8 genmask = nft_genmask_next(ctx->net);
110
111 nft_ng_inc_init(ctx, expr, tb);
112
113 priv->map = nft_set_lookup_global(ctx->net, ctx->table,
114 tb[NFTA_NG_SET_NAME],
115 tb[NFTA_NG_SET_ID], genmask);
116
117 if (IS_ERR(priv->map))
118 return PTR_ERR(priv->map);
119
120 return 0;
121}
122
74static int nft_ng_dump(struct sk_buff *skb, enum nft_registers dreg, 123static int nft_ng_dump(struct sk_buff *skb, enum nft_registers dreg,
75 u32 modulus, enum nft_ng_types type, u32 offset) 124 u32 modulus, enum nft_ng_types type, u32 offset)
76{ 125{
@@ -97,6 +146,22 @@ static int nft_ng_inc_dump(struct sk_buff *skb, const struct nft_expr *expr)
97 priv->offset); 146 priv->offset);
98} 147}
99 148
149static int nft_ng_inc_map_dump(struct sk_buff *skb,
150 const struct nft_expr *expr)
151{
152 const struct nft_ng_inc *priv = nft_expr_priv(expr);
153
154 if (nft_ng_dump(skb, priv->dreg, priv->modulus,
155 NFT_NG_INCREMENTAL, priv->offset) ||
156 nla_put_string(skb, NFTA_NG_SET_NAME, priv->map->name))
157 goto nla_put_failure;
158
159 return 0;
160
161nla_put_failure:
162 return -1;
163}
164
100struct nft_ng_random { 165struct nft_ng_random {
101 enum nft_registers dreg:8; 166 enum nft_registers dreg:8;
102 u32 modulus; 167 u32 modulus;
@@ -156,6 +221,14 @@ static const struct nft_expr_ops nft_ng_inc_ops = {
156 .dump = nft_ng_inc_dump, 221 .dump = nft_ng_inc_dump,
157}; 222};
158 223
224static const struct nft_expr_ops nft_ng_inc_map_ops = {
225 .type = &nft_ng_type,
226 .size = NFT_EXPR_SIZE(sizeof(struct nft_ng_inc)),
227 .eval = nft_ng_inc_map_eval,
228 .init = nft_ng_inc_map_init,
229 .dump = nft_ng_inc_map_dump,
230};
231
159static const struct nft_expr_ops nft_ng_random_ops = { 232static const struct nft_expr_ops nft_ng_random_ops = {
160 .type = &nft_ng_type, 233 .type = &nft_ng_type,
161 .size = NFT_EXPR_SIZE(sizeof(struct nft_ng_random)), 234 .size = NFT_EXPR_SIZE(sizeof(struct nft_ng_random)),
@@ -178,6 +251,8 @@ nft_ng_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
178 251
179 switch (type) { 252 switch (type) {
180 case NFT_NG_INCREMENTAL: 253 case NFT_NG_INCREMENTAL:
254 if (tb[NFTA_NG_SET_NAME])
255 return &nft_ng_inc_map_ops;
181 return &nft_ng_inc_ops; 256 return &nft_ng_inc_ops;
182 case NFT_NG_RANDOM: 257 case NFT_NG_RANDOM:
183 return &nft_ng_random_ops; 258 return &nft_ng_random_ops;
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index 0b02407773ad..cdf348f751ec 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -38,8 +38,8 @@ static int nft_objref_init(const struct nft_ctx *ctx,
38 return -EINVAL; 38 return -EINVAL;
39 39
40 objtype = ntohl(nla_get_be32(tb[NFTA_OBJREF_IMM_TYPE])); 40 objtype = ntohl(nla_get_be32(tb[NFTA_OBJREF_IMM_TYPE]));
41 obj = nf_tables_obj_lookup(ctx->table, tb[NFTA_OBJREF_IMM_NAME], objtype, 41 obj = nft_obj_lookup(ctx->table, tb[NFTA_OBJREF_IMM_NAME], objtype,
42 genmask); 42 genmask);
43 if (IS_ERR(obj)) 43 if (IS_ERR(obj))
44 return -ENOENT; 44 return -ENOENT;
45 45
diff --git a/net/netfilter/nft_rt.c b/net/netfilter/nft_rt.c
index 11a2071b6dd4..76dba9f6b6f6 100644
--- a/net/netfilter/nft_rt.c
+++ b/net/netfilter/nft_rt.c
@@ -7,8 +7,6 @@
7 */ 7 */
8 8
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/netlink.h> 10#include <linux/netlink.h>
13#include <linux/netfilter.h> 11#include <linux/netfilter.h>
14#include <linux/netfilter/nf_tables.h> 12#include <linux/netfilter/nf_tables.h>
@@ -179,7 +177,6 @@ static int nft_rt_validate(const struct nft_ctx *ctx, const struct nft_expr *exp
179 return nft_chain_validate_hooks(ctx->chain, hooks); 177 return nft_chain_validate_hooks(ctx->chain, hooks);
180} 178}
181 179
182static struct nft_expr_type nft_rt_type;
183static const struct nft_expr_ops nft_rt_get_ops = { 180static const struct nft_expr_ops nft_rt_get_ops = {
184 .type = &nft_rt_type, 181 .type = &nft_rt_type,
185 .size = NFT_EXPR_SIZE(sizeof(struct nft_rt)), 182 .size = NFT_EXPR_SIZE(sizeof(struct nft_rt)),
@@ -189,27 +186,10 @@ static const struct nft_expr_ops nft_rt_get_ops = {
189 .validate = nft_rt_validate, 186 .validate = nft_rt_validate,
190}; 187};
191 188
192static struct nft_expr_type nft_rt_type __read_mostly = { 189struct nft_expr_type nft_rt_type __read_mostly = {
193 .name = "rt", 190 .name = "rt",
194 .ops = &nft_rt_get_ops, 191 .ops = &nft_rt_get_ops,
195 .policy = nft_rt_policy, 192 .policy = nft_rt_policy,
196 .maxattr = NFTA_RT_MAX, 193 .maxattr = NFTA_RT_MAX,
197 .owner = THIS_MODULE, 194 .owner = THIS_MODULE,
198}; 195};
199
200static int __init nft_rt_module_init(void)
201{
202 return nft_register_expr(&nft_rt_type);
203}
204
205static void __exit nft_rt_module_exit(void)
206{
207 nft_unregister_expr(&nft_rt_type);
208}
209
210module_init(nft_rt_module_init);
211module_exit(nft_rt_module_exit);
212
213MODULE_LICENSE("GPL");
214MODULE_AUTHOR("Anders K. Pedersen <akp@cohaesio.com>");
215MODULE_ALIAS_NFT_EXPR("rt");
diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
index 45fb2752fb63..d6626e01c7ee 100644
--- a/net/netfilter/nft_set_bitmap.c
+++ b/net/netfilter/nft_set_bitmap.c
@@ -296,27 +296,23 @@ static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features,
296 return true; 296 return true;
297} 297}
298 298
299static struct nft_set_type nft_bitmap_type;
300static struct nft_set_ops nft_bitmap_ops __read_mostly = {
301 .type = &nft_bitmap_type,
302 .privsize = nft_bitmap_privsize,
303 .elemsize = offsetof(struct nft_bitmap_elem, ext),
304 .estimate = nft_bitmap_estimate,
305 .init = nft_bitmap_init,
306 .destroy = nft_bitmap_destroy,
307 .insert = nft_bitmap_insert,
308 .remove = nft_bitmap_remove,
309 .deactivate = nft_bitmap_deactivate,
310 .flush = nft_bitmap_flush,
311 .activate = nft_bitmap_activate,
312 .lookup = nft_bitmap_lookup,
313 .walk = nft_bitmap_walk,
314 .get = nft_bitmap_get,
315};
316
317static struct nft_set_type nft_bitmap_type __read_mostly = { 299static struct nft_set_type nft_bitmap_type __read_mostly = {
318 .ops = &nft_bitmap_ops,
319 .owner = THIS_MODULE, 300 .owner = THIS_MODULE,
301 .ops = {
302 .privsize = nft_bitmap_privsize,
303 .elemsize = offsetof(struct nft_bitmap_elem, ext),
304 .estimate = nft_bitmap_estimate,
305 .init = nft_bitmap_init,
306 .destroy = nft_bitmap_destroy,
307 .insert = nft_bitmap_insert,
308 .remove = nft_bitmap_remove,
309 .deactivate = nft_bitmap_deactivate,
310 .flush = nft_bitmap_flush,
311 .activate = nft_bitmap_activate,
312 .lookup = nft_bitmap_lookup,
313 .walk = nft_bitmap_walk,
314 .get = nft_bitmap_get,
315 },
320}; 316};
321 317
322static int __init nft_bitmap_module_init(void) 318static int __init nft_bitmap_module_init(void)
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index fc9c6d5d64cd..dbf1f4ad077c 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -605,6 +605,12 @@ static void nft_hash_destroy(const struct nft_set *set)
605static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features, 605static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
606 struct nft_set_estimate *est) 606 struct nft_set_estimate *est)
607{ 607{
608 if (!desc->size)
609 return false;
610
611 if (desc->klen == 4)
612 return false;
613
608 est->size = sizeof(struct nft_hash) + 614 est->size = sizeof(struct nft_hash) +
609 nft_hash_buckets(desc->size) * sizeof(struct hlist_head) + 615 nft_hash_buckets(desc->size) * sizeof(struct hlist_head) +
610 desc->size * sizeof(struct nft_hash_elem); 616 desc->size * sizeof(struct nft_hash_elem);
@@ -614,91 +620,100 @@ static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
614 return true; 620 return true;
615} 621}
616 622
617static struct nft_set_type nft_hash_type; 623static bool nft_hash_fast_estimate(const struct nft_set_desc *desc, u32 features,
618static struct nft_set_ops nft_rhash_ops __read_mostly = { 624 struct nft_set_estimate *est)
619 .type = &nft_hash_type, 625{
620 .privsize = nft_rhash_privsize, 626 if (!desc->size)
621 .elemsize = offsetof(struct nft_rhash_elem, ext), 627 return false;
622 .estimate = nft_rhash_estimate,
623 .init = nft_rhash_init,
624 .destroy = nft_rhash_destroy,
625 .insert = nft_rhash_insert,
626 .activate = nft_rhash_activate,
627 .deactivate = nft_rhash_deactivate,
628 .flush = nft_rhash_flush,
629 .remove = nft_rhash_remove,
630 .lookup = nft_rhash_lookup,
631 .update = nft_rhash_update,
632 .walk = nft_rhash_walk,
633 .get = nft_rhash_get,
634 .features = NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
635};
636 628
637static struct nft_set_ops nft_hash_ops __read_mostly = { 629 if (desc->klen != 4)
638 .type = &nft_hash_type, 630 return false;
639 .privsize = nft_hash_privsize,
640 .elemsize = offsetof(struct nft_hash_elem, ext),
641 .estimate = nft_hash_estimate,
642 .init = nft_hash_init,
643 .destroy = nft_hash_destroy,
644 .insert = nft_hash_insert,
645 .activate = nft_hash_activate,
646 .deactivate = nft_hash_deactivate,
647 .flush = nft_hash_flush,
648 .remove = nft_hash_remove,
649 .lookup = nft_hash_lookup,
650 .walk = nft_hash_walk,
651 .get = nft_hash_get,
652 .features = NFT_SET_MAP | NFT_SET_OBJECT,
653};
654 631
655static struct nft_set_ops nft_hash_fast_ops __read_mostly = { 632 est->size = sizeof(struct nft_hash) +
656 .type = &nft_hash_type, 633 nft_hash_buckets(desc->size) * sizeof(struct hlist_head) +
657 .privsize = nft_hash_privsize, 634 desc->size * sizeof(struct nft_hash_elem);
658 .elemsize = offsetof(struct nft_hash_elem, ext), 635 est->lookup = NFT_SET_CLASS_O_1;
659 .estimate = nft_hash_estimate, 636 est->space = NFT_SET_CLASS_O_N;
660 .init = nft_hash_init,
661 .destroy = nft_hash_destroy,
662 .insert = nft_hash_insert,
663 .activate = nft_hash_activate,
664 .deactivate = nft_hash_deactivate,
665 .flush = nft_hash_flush,
666 .remove = nft_hash_remove,
667 .lookup = nft_hash_lookup_fast,
668 .walk = nft_hash_walk,
669 .get = nft_hash_get,
670 .features = NFT_SET_MAP | NFT_SET_OBJECT,
671};
672
673static const struct nft_set_ops *
674nft_hash_select_ops(const struct nft_ctx *ctx, const struct nft_set_desc *desc,
675 u32 flags)
676{
677 if (desc->size && !(flags & (NFT_SET_EVAL | NFT_SET_TIMEOUT))) {
678 switch (desc->klen) {
679 case 4:
680 return &nft_hash_fast_ops;
681 default:
682 return &nft_hash_ops;
683 }
684 }
685 637
686 return &nft_rhash_ops; 638 return true;
687} 639}
688 640
641static struct nft_set_type nft_rhash_type __read_mostly = {
642 .owner = THIS_MODULE,
643 .features = NFT_SET_MAP | NFT_SET_OBJECT |
644 NFT_SET_TIMEOUT | NFT_SET_EVAL,
645 .ops = {
646 .privsize = nft_rhash_privsize,
647 .elemsize = offsetof(struct nft_rhash_elem, ext),
648 .estimate = nft_rhash_estimate,
649 .init = nft_rhash_init,
650 .destroy = nft_rhash_destroy,
651 .insert = nft_rhash_insert,
652 .activate = nft_rhash_activate,
653 .deactivate = nft_rhash_deactivate,
654 .flush = nft_rhash_flush,
655 .remove = nft_rhash_remove,
656 .lookup = nft_rhash_lookup,
657 .update = nft_rhash_update,
658 .walk = nft_rhash_walk,
659 .get = nft_rhash_get,
660 },
661};
662
689static struct nft_set_type nft_hash_type __read_mostly = { 663static struct nft_set_type nft_hash_type __read_mostly = {
690 .select_ops = nft_hash_select_ops,
691 .owner = THIS_MODULE, 664 .owner = THIS_MODULE,
665 .features = NFT_SET_MAP | NFT_SET_OBJECT,
666 .ops = {
667 .privsize = nft_hash_privsize,
668 .elemsize = offsetof(struct nft_hash_elem, ext),
669 .estimate = nft_hash_estimate,
670 .init = nft_hash_init,
671 .destroy = nft_hash_destroy,
672 .insert = nft_hash_insert,
673 .activate = nft_hash_activate,
674 .deactivate = nft_hash_deactivate,
675 .flush = nft_hash_flush,
676 .remove = nft_hash_remove,
677 .lookup = nft_hash_lookup,
678 .walk = nft_hash_walk,
679 .get = nft_hash_get,
680 },
681};
682
683static struct nft_set_type nft_hash_fast_type __read_mostly = {
684 .owner = THIS_MODULE,
685 .features = NFT_SET_MAP | NFT_SET_OBJECT,
686 .ops = {
687 .privsize = nft_hash_privsize,
688 .elemsize = offsetof(struct nft_hash_elem, ext),
689 .estimate = nft_hash_fast_estimate,
690 .init = nft_hash_init,
691 .destroy = nft_hash_destroy,
692 .insert = nft_hash_insert,
693 .activate = nft_hash_activate,
694 .deactivate = nft_hash_deactivate,
695 .flush = nft_hash_flush,
696 .remove = nft_hash_remove,
697 .lookup = nft_hash_lookup_fast,
698 .walk = nft_hash_walk,
699 .get = nft_hash_get,
700 },
692}; 701};
693 702
694static int __init nft_hash_module_init(void) 703static int __init nft_hash_module_init(void)
695{ 704{
696 return nft_register_set(&nft_hash_type); 705 if (nft_register_set(&nft_hash_fast_type) ||
706 nft_register_set(&nft_hash_type) ||
707 nft_register_set(&nft_rhash_type))
708 return 1;
709 return 0;
697} 710}
698 711
699static void __exit nft_hash_module_exit(void) 712static void __exit nft_hash_module_exit(void)
700{ 713{
714 nft_unregister_set(&nft_rhash_type);
701 nft_unregister_set(&nft_hash_type); 715 nft_unregister_set(&nft_hash_type);
716 nft_unregister_set(&nft_hash_fast_type);
702} 717}
703 718
704module_init(nft_hash_module_init); 719module_init(nft_hash_module_init);
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index e6f08bc5f359..22c57d7612c4 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -393,28 +393,24 @@ static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
393 return true; 393 return true;
394} 394}
395 395
396static struct nft_set_type nft_rbtree_type;
397static struct nft_set_ops nft_rbtree_ops __read_mostly = {
398 .type = &nft_rbtree_type,
399 .privsize = nft_rbtree_privsize,
400 .elemsize = offsetof(struct nft_rbtree_elem, ext),
401 .estimate = nft_rbtree_estimate,
402 .init = nft_rbtree_init,
403 .destroy = nft_rbtree_destroy,
404 .insert = nft_rbtree_insert,
405 .remove = nft_rbtree_remove,
406 .deactivate = nft_rbtree_deactivate,
407 .flush = nft_rbtree_flush,
408 .activate = nft_rbtree_activate,
409 .lookup = nft_rbtree_lookup,
410 .walk = nft_rbtree_walk,
411 .get = nft_rbtree_get,
412 .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT,
413};
414
415static struct nft_set_type nft_rbtree_type __read_mostly = { 396static struct nft_set_type nft_rbtree_type __read_mostly = {
416 .ops = &nft_rbtree_ops,
417 .owner = THIS_MODULE, 397 .owner = THIS_MODULE,
398 .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT,
399 .ops = {
400 .privsize = nft_rbtree_privsize,
401 .elemsize = offsetof(struct nft_rbtree_elem, ext),
402 .estimate = nft_rbtree_estimate,
403 .init = nft_rbtree_init,
404 .destroy = nft_rbtree_destroy,
405 .insert = nft_rbtree_insert,
406 .remove = nft_rbtree_remove,
407 .deactivate = nft_rbtree_deactivate,
408 .flush = nft_rbtree_flush,
409 .activate = nft_rbtree_activate,
410 .lookup = nft_rbtree_lookup,
411 .walk = nft_rbtree_walk,
412 .get = nft_rbtree_get,
413 },
418}; 414};
419 415
420static int __init nft_rbtree_module_init(void) 416static int __init nft_rbtree_module_init(void)
diff --git a/net/netfilter/xt_NETMAP.c b/net/netfilter/xt_NETMAP.c
index 58aa9dd3c5b7..1d437875e15a 100644
--- a/net/netfilter/xt_NETMAP.c
+++ b/net/netfilter/xt_NETMAP.c
@@ -21,8 +21,8 @@
21static unsigned int 21static unsigned int
22netmap_tg6(struct sk_buff *skb, const struct xt_action_param *par) 22netmap_tg6(struct sk_buff *skb, const struct xt_action_param *par)
23{ 23{
24 const struct nf_nat_range *range = par->targinfo; 24 const struct nf_nat_range2 *range = par->targinfo;
25 struct nf_nat_range newrange; 25 struct nf_nat_range2 newrange;
26 struct nf_conn *ct; 26 struct nf_conn *ct;
27 enum ip_conntrack_info ctinfo; 27 enum ip_conntrack_info ctinfo;
28 union nf_inet_addr new_addr, netmask; 28 union nf_inet_addr new_addr, netmask;
@@ -56,7 +56,7 @@ netmap_tg6(struct sk_buff *skb, const struct xt_action_param *par)
56 56
57static int netmap_tg6_checkentry(const struct xt_tgchk_param *par) 57static int netmap_tg6_checkentry(const struct xt_tgchk_param *par)
58{ 58{
59 const struct nf_nat_range *range = par->targinfo; 59 const struct nf_nat_range2 *range = par->targinfo;
60 60
61 if (!(range->flags & NF_NAT_RANGE_MAP_IPS)) 61 if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
62 return -EINVAL; 62 return -EINVAL;
@@ -75,7 +75,7 @@ netmap_tg4(struct sk_buff *skb, const struct xt_action_param *par)
75 enum ip_conntrack_info ctinfo; 75 enum ip_conntrack_info ctinfo;
76 __be32 new_ip, netmask; 76 __be32 new_ip, netmask;
77 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; 77 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
78 struct nf_nat_range newrange; 78 struct nf_nat_range2 newrange;
79 79
80 WARN_ON(xt_hooknum(par) != NF_INET_PRE_ROUTING && 80 WARN_ON(xt_hooknum(par) != NF_INET_PRE_ROUTING &&
81 xt_hooknum(par) != NF_INET_POST_ROUTING && 81 xt_hooknum(par) != NF_INET_POST_ROUTING &&
diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c
index c7f8958cea4a..1ed0cac585c4 100644
--- a/net/netfilter/xt_NFLOG.c
+++ b/net/netfilter/xt_NFLOG.c
@@ -13,7 +13,6 @@
13#include <linux/netfilter/x_tables.h> 13#include <linux/netfilter/x_tables.h>
14#include <linux/netfilter/xt_NFLOG.h> 14#include <linux/netfilter/xt_NFLOG.h>
15#include <net/netfilter/nf_log.h> 15#include <net/netfilter/nf_log.h>
16#include <net/netfilter/nfnetlink_log.h>
17 16
18MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); 17MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
19MODULE_DESCRIPTION("Xtables: packet logging to netlink using NFLOG"); 18MODULE_DESCRIPTION("Xtables: packet logging to netlink using NFLOG");
@@ -37,8 +36,9 @@ nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
37 if (info->flags & XT_NFLOG_F_COPY_LEN) 36 if (info->flags & XT_NFLOG_F_COPY_LEN)
38 li.u.ulog.flags |= NF_LOG_F_COPY_LEN; 37 li.u.ulog.flags |= NF_LOG_F_COPY_LEN;
39 38
40 nfulnl_log_packet(net, xt_family(par), xt_hooknum(par), skb, 39 nf_log_packet(net, xt_family(par), xt_hooknum(par), skb, xt_in(par),
41 xt_in(par), xt_out(par), &li, info->prefix); 40 xt_out(par), &li, "%s", info->prefix);
41
42 return XT_CONTINUE; 42 return XT_CONTINUE;
43} 43}
44 44
@@ -50,7 +50,13 @@ static int nflog_tg_check(const struct xt_tgchk_param *par)
50 return -EINVAL; 50 return -EINVAL;
51 if (info->prefix[sizeof(info->prefix) - 1] != '\0') 51 if (info->prefix[sizeof(info->prefix) - 1] != '\0')
52 return -EINVAL; 52 return -EINVAL;
53 return 0; 53
54 return nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG);
55}
56
57static void nflog_tg_destroy(const struct xt_tgdtor_param *par)
58{
59 nf_logger_put(par->family, NF_LOG_TYPE_ULOG);
54} 60}
55 61
56static struct xt_target nflog_tg_reg __read_mostly = { 62static struct xt_target nflog_tg_reg __read_mostly = {
@@ -58,6 +64,7 @@ static struct xt_target nflog_tg_reg __read_mostly = {
58 .revision = 0, 64 .revision = 0,
59 .family = NFPROTO_UNSPEC, 65 .family = NFPROTO_UNSPEC,
60 .checkentry = nflog_tg_check, 66 .checkentry = nflog_tg_check,
67 .destroy = nflog_tg_destroy,
61 .target = nflog_tg, 68 .target = nflog_tg,
62 .targetsize = sizeof(struct xt_nflog_info), 69 .targetsize = sizeof(struct xt_nflog_info),
63 .me = THIS_MODULE, 70 .me = THIS_MODULE,
diff --git a/net/netfilter/xt_REDIRECT.c b/net/netfilter/xt_REDIRECT.c
index 98a4c6d4f1cb..5ce9461e979c 100644
--- a/net/netfilter/xt_REDIRECT.c
+++ b/net/netfilter/xt_REDIRECT.c
@@ -36,7 +36,7 @@ redirect_tg6(struct sk_buff *skb, const struct xt_action_param *par)
36 36
37static int redirect_tg6_checkentry(const struct xt_tgchk_param *par) 37static int redirect_tg6_checkentry(const struct xt_tgchk_param *par)
38{ 38{
39 const struct nf_nat_range *range = par->targinfo; 39 const struct nf_nat_range2 *range = par->targinfo;
40 40
41 if (range->flags & NF_NAT_RANGE_MAP_IPS) 41 if (range->flags & NF_NAT_RANGE_MAP_IPS)
42 return -EINVAL; 42 return -EINVAL;
diff --git a/net/netfilter/xt_nat.c b/net/netfilter/xt_nat.c
index bdb689cdc829..8af9707f8789 100644
--- a/net/netfilter/xt_nat.c
+++ b/net/netfilter/xt_nat.c
@@ -37,11 +37,12 @@ static void xt_nat_destroy(const struct xt_tgdtor_param *par)
37 nf_ct_netns_put(par->net, par->family); 37 nf_ct_netns_put(par->net, par->family);
38} 38}
39 39
40static void xt_nat_convert_range(struct nf_nat_range *dst, 40static void xt_nat_convert_range(struct nf_nat_range2 *dst,
41 const struct nf_nat_ipv4_range *src) 41 const struct nf_nat_ipv4_range *src)
42{ 42{
43 memset(&dst->min_addr, 0, sizeof(dst->min_addr)); 43 memset(&dst->min_addr, 0, sizeof(dst->min_addr));
44 memset(&dst->max_addr, 0, sizeof(dst->max_addr)); 44 memset(&dst->max_addr, 0, sizeof(dst->max_addr));
45 memset(&dst->base_proto, 0, sizeof(dst->base_proto));
45 46
46 dst->flags = src->flags; 47 dst->flags = src->flags;
47 dst->min_addr.ip = src->min_ip; 48 dst->min_addr.ip = src->min_ip;
@@ -54,7 +55,7 @@ static unsigned int
54xt_snat_target_v0(struct sk_buff *skb, const struct xt_action_param *par) 55xt_snat_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
55{ 56{
56 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; 57 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
57 struct nf_nat_range range; 58 struct nf_nat_range2 range;
58 enum ip_conntrack_info ctinfo; 59 enum ip_conntrack_info ctinfo;
59 struct nf_conn *ct; 60 struct nf_conn *ct;
60 61
@@ -71,7 +72,7 @@ static unsigned int
71xt_dnat_target_v0(struct sk_buff *skb, const struct xt_action_param *par) 72xt_dnat_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
72{ 73{
73 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; 74 const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
74 struct nf_nat_range range; 75 struct nf_nat_range2 range;
75 enum ip_conntrack_info ctinfo; 76 enum ip_conntrack_info ctinfo;
76 struct nf_conn *ct; 77 struct nf_conn *ct;
77 78
@@ -86,7 +87,8 @@ xt_dnat_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
86static unsigned int 87static unsigned int
87xt_snat_target_v1(struct sk_buff *skb, const struct xt_action_param *par) 88xt_snat_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
88{ 89{
89 const struct nf_nat_range *range = par->targinfo; 90 const struct nf_nat_range *range_v1 = par->targinfo;
91 struct nf_nat_range2 range;
90 enum ip_conntrack_info ctinfo; 92 enum ip_conntrack_info ctinfo;
91 struct nf_conn *ct; 93 struct nf_conn *ct;
92 94
@@ -95,13 +97,49 @@ xt_snat_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
95 (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || 97 (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
96 ctinfo == IP_CT_RELATED_REPLY))); 98 ctinfo == IP_CT_RELATED_REPLY)));
97 99
98 return nf_nat_setup_info(ct, range, NF_NAT_MANIP_SRC); 100 memcpy(&range, range_v1, sizeof(*range_v1));
101 memset(&range.base_proto, 0, sizeof(range.base_proto));
102
103 return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
99} 104}
100 105
101static unsigned int 106static unsigned int
102xt_dnat_target_v1(struct sk_buff *skb, const struct xt_action_param *par) 107xt_dnat_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
103{ 108{
104 const struct nf_nat_range *range = par->targinfo; 109 const struct nf_nat_range *range_v1 = par->targinfo;
110 struct nf_nat_range2 range;
111 enum ip_conntrack_info ctinfo;
112 struct nf_conn *ct;
113
114 ct = nf_ct_get(skb, &ctinfo);
115 WARN_ON(!(ct != NULL &&
116 (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)));
117
118 memcpy(&range, range_v1, sizeof(*range_v1));
119 memset(&range.base_proto, 0, sizeof(range.base_proto));
120
121 return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
122}
123
124static unsigned int
125xt_snat_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
126{
127 const struct nf_nat_range2 *range = par->targinfo;
128 enum ip_conntrack_info ctinfo;
129 struct nf_conn *ct;
130
131 ct = nf_ct_get(skb, &ctinfo);
132 WARN_ON(!(ct != NULL &&
133 (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
134 ctinfo == IP_CT_RELATED_REPLY)));
135
136 return nf_nat_setup_info(ct, range, NF_NAT_MANIP_SRC);
137}
138
139static unsigned int
140xt_dnat_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
141{
142 const struct nf_nat_range2 *range = par->targinfo;
105 enum ip_conntrack_info ctinfo; 143 enum ip_conntrack_info ctinfo;
106 struct nf_conn *ct; 144 struct nf_conn *ct;
107 145
@@ -163,6 +201,28 @@ static struct xt_target xt_nat_target_reg[] __read_mostly = {
163 (1 << NF_INET_LOCAL_OUT), 201 (1 << NF_INET_LOCAL_OUT),
164 .me = THIS_MODULE, 202 .me = THIS_MODULE,
165 }, 203 },
204 {
205 .name = "SNAT",
206 .revision = 2,
207 .checkentry = xt_nat_checkentry,
208 .destroy = xt_nat_destroy,
209 .target = xt_snat_target_v2,
210 .targetsize = sizeof(struct nf_nat_range2),
211 .table = "nat",
212 .hooks = (1 << NF_INET_POST_ROUTING) |
213 (1 << NF_INET_LOCAL_IN),
214 .me = THIS_MODULE,
215 },
216 {
217 .name = "DNAT",
218 .revision = 2,
219 .target = xt_dnat_target_v2,
220 .targetsize = sizeof(struct nf_nat_range2),
221 .table = "nat",
222 .hooks = (1 << NF_INET_PRE_ROUTING) |
223 (1 << NF_INET_LOCAL_OUT),
224 .me = THIS_MODULE,
225 },
166}; 226};
167 227
168static int __init xt_nat_init(void) 228static int __init xt_nat_init(void)
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index a34f314a8c23..9cfef73b4107 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -37,21 +37,6 @@
37#include <net/netfilter/nf_log.h> 37#include <net/netfilter/nf_log.h>
38#include <linux/netfilter/xt_osf.h> 38#include <linux/netfilter/xt_osf.h>
39 39
40struct xt_osf_finger {
41 struct rcu_head rcu_head;
42 struct list_head finger_entry;
43 struct xt_osf_user_finger finger;
44};
45
46enum osf_fmatch_states {
47 /* Packet does not match the fingerprint */
48 FMATCH_WRONG = 0,
49 /* Packet matches the fingerprint */
50 FMATCH_OK,
51 /* Options do not match the fingerprint, but header does */
52 FMATCH_OPT_WRONG,
53};
54
55/* 40/*
56 * Indexed by dont-fragment bit. 41 * Indexed by dont-fragment bit.
57 * It is the only constant value in the fingerprint. 42 * It is the only constant value in the fingerprint.
@@ -164,200 +149,17 @@ static const struct nfnetlink_subsystem xt_osf_nfnetlink = {
164 .cb = xt_osf_nfnetlink_callbacks, 149 .cb = xt_osf_nfnetlink_callbacks,
165}; 150};
166 151
167static inline int xt_osf_ttl(const struct sk_buff *skb, const struct xt_osf_info *info,
168 unsigned char f_ttl)
169{
170 const struct iphdr *ip = ip_hdr(skb);
171
172 if (info->flags & XT_OSF_TTL) {
173 if (info->ttl == XT_OSF_TTL_TRUE)
174 return ip->ttl == f_ttl;
175 if (info->ttl == XT_OSF_TTL_NOCHECK)
176 return 1;
177 else if (ip->ttl <= f_ttl)
178 return 1;
179 else {
180 struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
181 int ret = 0;
182
183 for_ifa(in_dev) {
184 if (inet_ifa_match(ip->saddr, ifa)) {
185 ret = (ip->ttl == f_ttl);
186 break;
187 }
188 }
189 endfor_ifa(in_dev);
190
191 return ret;
192 }
193 }
194
195 return ip->ttl == f_ttl;
196}
197
198static bool 152static bool
199xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p) 153xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p)
200{ 154{
201 const struct xt_osf_info *info = p->matchinfo; 155 const struct xt_osf_info *info = p->matchinfo;
202 const struct iphdr *ip = ip_hdr(skb);
203 const struct tcphdr *tcp;
204 struct tcphdr _tcph;
205 int fmatch = FMATCH_WRONG, fcount = 0;
206 unsigned int optsize = 0, check_WSS = 0;
207 u16 window, totlen, mss = 0;
208 bool df;
209 const unsigned char *optp = NULL, *_optp = NULL;
210 unsigned char opts[MAX_IPOPTLEN];
211 const struct xt_osf_finger *kf;
212 const struct xt_osf_user_finger *f;
213 struct net *net = xt_net(p); 156 struct net *net = xt_net(p);
214 157
215 if (!info) 158 if (!info)
216 return false; 159 return false;
217 160
218 tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), &_tcph); 161 return nf_osf_match(skb, xt_family(p), xt_hooknum(p), xt_in(p),
219 if (!tcp) 162 xt_out(p), info, net, xt_osf_fingers);
220 return false;
221
222 if (!tcp->syn)
223 return false;
224
225 totlen = ntohs(ip->tot_len);
226 df = ntohs(ip->frag_off) & IP_DF;
227 window = ntohs(tcp->window);
228
229 if (tcp->doff * 4 > sizeof(struct tcphdr)) {
230 optsize = tcp->doff * 4 - sizeof(struct tcphdr);
231
232 _optp = optp = skb_header_pointer(skb, ip_hdrlen(skb) +
233 sizeof(struct tcphdr), optsize, opts);
234 }
235
236 list_for_each_entry_rcu(kf, &xt_osf_fingers[df], finger_entry) {
237 int foptsize, optnum;
238
239 f = &kf->finger;
240
241 if (!(info->flags & XT_OSF_LOG) && strcmp(info->genre, f->genre))
242 continue;
243
244 optp = _optp;
245 fmatch = FMATCH_WRONG;
246
247 if (totlen != f->ss || !xt_osf_ttl(skb, info, f->ttl))
248 continue;
249
250 /*
251 * Should not happen if userspace parser was written correctly.
252 */
253 if (f->wss.wc >= OSF_WSS_MAX)
254 continue;
255
256 /* Check options */
257
258 foptsize = 0;
259 for (optnum = 0; optnum < f->opt_num; ++optnum)
260 foptsize += f->opt[optnum].length;
261
262 if (foptsize > MAX_IPOPTLEN ||
263 optsize > MAX_IPOPTLEN ||
264 optsize != foptsize)
265 continue;
266
267 check_WSS = f->wss.wc;
268
269 for (optnum = 0; optnum < f->opt_num; ++optnum) {
270 if (f->opt[optnum].kind == (*optp)) {
271 __u32 len = f->opt[optnum].length;
272 const __u8 *optend = optp + len;
273
274 fmatch = FMATCH_OK;
275
276 switch (*optp) {
277 case OSFOPT_MSS:
278 mss = optp[3];
279 mss <<= 8;
280 mss |= optp[2];
281
282 mss = ntohs((__force __be16)mss);
283 break;
284 case OSFOPT_TS:
285 break;
286 }
287
288 optp = optend;
289 } else
290 fmatch = FMATCH_OPT_WRONG;
291
292 if (fmatch != FMATCH_OK)
293 break;
294 }
295
296 if (fmatch != FMATCH_OPT_WRONG) {
297 fmatch = FMATCH_WRONG;
298
299 switch (check_WSS) {
300 case OSF_WSS_PLAIN:
301 if (f->wss.val == 0 || window == f->wss.val)
302 fmatch = FMATCH_OK;
303 break;
304 case OSF_WSS_MSS:
305 /*
306 * Some smart modems decrease mangle MSS to
307 * SMART_MSS_2, so we check standard, decreased
308 * and the one provided in the fingerprint MSS
309 * values.
310 */
311#define SMART_MSS_1 1460
312#define SMART_MSS_2 1448
313 if (window == f->wss.val * mss ||
314 window == f->wss.val * SMART_MSS_1 ||
315 window == f->wss.val * SMART_MSS_2)
316 fmatch = FMATCH_OK;
317 break;
318 case OSF_WSS_MTU:
319 if (window == f->wss.val * (mss + 40) ||
320 window == f->wss.val * (SMART_MSS_1 + 40) ||
321 window == f->wss.val * (SMART_MSS_2 + 40))
322 fmatch = FMATCH_OK;
323 break;
324 case OSF_WSS_MODULO:
325 if ((window % f->wss.val) == 0)
326 fmatch = FMATCH_OK;
327 break;
328 }
329 }
330
331 if (fmatch != FMATCH_OK)
332 continue;
333
334 fcount++;
335
336 if (info->flags & XT_OSF_LOG)
337 nf_log_packet(net, xt_family(p), xt_hooknum(p), skb,
338 xt_in(p), xt_out(p), NULL,
339 "%s [%s:%s] : %pI4:%d -> %pI4:%d hops=%d\n",
340 f->genre, f->version, f->subtype,
341 &ip->saddr, ntohs(tcp->source),
342 &ip->daddr, ntohs(tcp->dest),
343 f->ttl - ip->ttl);
344
345 if ((info->flags & XT_OSF_LOG) &&
346 info->loglevel == XT_OSF_LOGLEVEL_FIRST)
347 break;
348 }
349
350 if (!fcount && (info->flags & XT_OSF_LOG))
351 nf_log_packet(net, xt_family(p), xt_hooknum(p), skb, xt_in(p),
352 xt_out(p), NULL,
353 "Remote OS is not known: %pI4:%u -> %pI4:%u\n",
354 &ip->saddr, ntohs(tcp->source),
355 &ip->daddr, ntohs(tcp->dest));
356
357 if (fcount)
358 fmatch = FMATCH_OK;
359
360 return fmatch == FMATCH_OK;
361} 163}
362 164
363static struct xt_match xt_osf_match = { 165static struct xt_match xt_osf_match = {
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index c5904f629091..02fc343feb66 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -72,7 +72,7 @@ struct ovs_conntrack_info {
72 struct md_mark mark; 72 struct md_mark mark;
73 struct md_labels labels; 73 struct md_labels labels;
74#ifdef CONFIG_NF_NAT_NEEDED 74#ifdef CONFIG_NF_NAT_NEEDED
75 struct nf_nat_range range; /* Only present for SRC NAT and DST NAT. */ 75 struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */
76#endif 76#endif
77}; 77};
78 78
@@ -710,7 +710,7 @@ static bool skb_nfct_cached(struct net *net,
710 */ 710 */
711static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, 711static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
712 enum ip_conntrack_info ctinfo, 712 enum ip_conntrack_info ctinfo,
713 const struct nf_nat_range *range, 713 const struct nf_nat_range2 *range,
714 enum nf_nat_manip_type maniptype) 714 enum nf_nat_manip_type maniptype)
715{ 715{
716 int hooknum, nh_off, err = NF_ACCEPT; 716 int hooknum, nh_off, err = NF_ACCEPT;
diff --git a/net/qrtr/Kconfig b/net/qrtr/Kconfig
index 326fd97444f5..1944834d225c 100644
--- a/net/qrtr/Kconfig
+++ b/net/qrtr/Kconfig
@@ -21,4 +21,11 @@ config QRTR_SMD
21 Say Y here to support SMD based ipcrouter channels. SMD is the 21 Say Y here to support SMD based ipcrouter channels. SMD is the
22 most common transport for IPC Router. 22 most common transport for IPC Router.
23 23
24config QRTR_TUN
25 tristate "TUN device for Qualcomm IPC Router"
26 ---help---
27 Say Y here to expose a character device that allows user space to
28 implement endpoints of QRTR, for purpose of tunneling data to other
29 hosts or testing purposes.
30
24endif # QRTR 31endif # QRTR
diff --git a/net/qrtr/Makefile b/net/qrtr/Makefile
index ab09e40f7c74..be012bfd3e52 100644
--- a/net/qrtr/Makefile
+++ b/net/qrtr/Makefile
@@ -2,3 +2,5 @@ obj-$(CONFIG_QRTR) := qrtr.o
2 2
3obj-$(CONFIG_QRTR_SMD) += qrtr-smd.o 3obj-$(CONFIG_QRTR_SMD) += qrtr-smd.o
4qrtr-smd-y := smd.o 4qrtr-smd-y := smd.o
5obj-$(CONFIG_QRTR_TUN) += qrtr-tun.o
6qrtr-tun-y := tun.o
diff --git a/net/qrtr/tun.c b/net/qrtr/tun.c
new file mode 100644
index 000000000000..ccff1e544c21
--- /dev/null
+++ b/net/qrtr/tun.c
@@ -0,0 +1,161 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Linaro Ltd */
3
4#include <linux/miscdevice.h>
5#include <linux/module.h>
6#include <linux/poll.h>
7#include <linux/skbuff.h>
8#include <linux/uaccess.h>
9
10#include "qrtr.h"
11
12struct qrtr_tun {
13 struct qrtr_endpoint ep;
14
15 struct sk_buff_head queue;
16 wait_queue_head_t readq;
17};
18
19static int qrtr_tun_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
20{
21 struct qrtr_tun *tun = container_of(ep, struct qrtr_tun, ep);
22
23 skb_queue_tail(&tun->queue, skb);
24
25 /* wake up any blocking processes, waiting for new data */
26 wake_up_interruptible(&tun->readq);
27
28 return 0;
29}
30
31static int qrtr_tun_open(struct inode *inode, struct file *filp)
32{
33 struct qrtr_tun *tun;
34
35 tun = kzalloc(sizeof(*tun), GFP_KERNEL);
36 if (!tun)
37 return -ENOMEM;
38
39 skb_queue_head_init(&tun->queue);
40 init_waitqueue_head(&tun->readq);
41
42 tun->ep.xmit = qrtr_tun_send;
43
44 filp->private_data = tun;
45
46 return qrtr_endpoint_register(&tun->ep, QRTR_EP_NID_AUTO);
47}
48
49static ssize_t qrtr_tun_read_iter(struct kiocb *iocb, struct iov_iter *to)
50{
51 struct file *filp = iocb->ki_filp;
52 struct qrtr_tun *tun = filp->private_data;
53 struct sk_buff *skb;
54 int count;
55
56 while (!(skb = skb_dequeue(&tun->queue))) {
57 if (filp->f_flags & O_NONBLOCK)
58 return -EAGAIN;
59
60 /* Wait until we get data or the endpoint goes away */
61 if (wait_event_interruptible(tun->readq,
62 !skb_queue_empty(&tun->queue)))
63 return -ERESTARTSYS;
64 }
65
66 count = min_t(size_t, iov_iter_count(to), skb->len);
67 if (copy_to_iter(skb->data, count, to) != count)
68 count = -EFAULT;
69
70 kfree_skb(skb);
71
72 return count;
73}
74
75static ssize_t qrtr_tun_write_iter(struct kiocb *iocb, struct iov_iter *from)
76{
77 struct file *filp = iocb->ki_filp;
78 struct qrtr_tun *tun = filp->private_data;
79 size_t len = iov_iter_count(from);
80 ssize_t ret;
81 void *kbuf;
82
83 kbuf = kzalloc(len, GFP_KERNEL);
84 if (!kbuf)
85 return -ENOMEM;
86
87 if (!copy_from_iter_full(kbuf, len, from))
88 return -EFAULT;
89
90 ret = qrtr_endpoint_post(&tun->ep, kbuf, len);
91
92 return ret < 0 ? ret : len;
93}
94
95static __poll_t qrtr_tun_poll(struct file *filp, poll_table *wait)
96{
97 struct qrtr_tun *tun = filp->private_data;
98 __poll_t mask = 0;
99
100 poll_wait(filp, &tun->readq, wait);
101
102 if (!skb_queue_empty(&tun->queue))
103 mask |= EPOLLIN | EPOLLRDNORM;
104
105 return mask;
106}
107
108static int qrtr_tun_release(struct inode *inode, struct file *filp)
109{
110 struct qrtr_tun *tun = filp->private_data;
111 struct sk_buff *skb;
112
113 qrtr_endpoint_unregister(&tun->ep);
114
115 /* Discard all SKBs */
116 while (!skb_queue_empty(&tun->queue)) {
117 skb = skb_dequeue(&tun->queue);
118 kfree_skb(skb);
119 }
120
121 kfree(tun);
122
123 return 0;
124}
125
126static const struct file_operations qrtr_tun_ops = {
127 .owner = THIS_MODULE,
128 .open = qrtr_tun_open,
129 .poll = qrtr_tun_poll,
130 .read_iter = qrtr_tun_read_iter,
131 .write_iter = qrtr_tun_write_iter,
132 .release = qrtr_tun_release,
133};
134
135static struct miscdevice qrtr_tun_miscdev = {
136 MISC_DYNAMIC_MINOR,
137 "qrtr-tun",
138 &qrtr_tun_ops,
139};
140
141static int __init qrtr_tun_init(void)
142{
143 int ret;
144
145 ret = misc_register(&qrtr_tun_miscdev);
146 if (ret)
147 pr_err("failed to register Qualcomm IPC Router tun device\n");
148
149 return ret;
150}
151
152static void __exit qrtr_tun_exit(void)
153{
154 misc_deregister(&qrtr_tun_miscdev);
155}
156
157module_init(qrtr_tun_init);
158module_exit(qrtr_tun_exit);
159
160MODULE_DESCRIPTION("Qualcomm IPC Router TUN device");
161MODULE_LICENSE("GPL v2");
diff --git a/net/rds/recv.c b/net/rds/recv.c
index de50e2126e40..dc67458b52f0 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -558,6 +558,7 @@ static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg,
558 struct rds_cmsg_rx_trace t; 558 struct rds_cmsg_rx_trace t;
559 int i, j; 559 int i, j;
560 560
561 memset(&t, 0, sizeof(t));
561 inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock(); 562 inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock();
562 t.rx_traces = rs->rs_rx_traces; 563 t.rx_traces = rs->rs_rx_traces;
563 for (i = 0; i < rs->rs_rx_traces; i++) { 564 for (i = 0; i < rs->rs_rx_traces; i++) {
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 7e28b2ce1437..526a8e491626 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -648,6 +648,11 @@ static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index,
648 return tcf_idr_search(tn, a, index); 648 return tcf_idr_search(tn, a, index);
649} 649}
650 650
651static size_t tcf_csum_get_fill_size(const struct tc_action *act)
652{
653 return nla_total_size(sizeof(struct tc_csum));
654}
655
651static struct tc_action_ops act_csum_ops = { 656static struct tc_action_ops act_csum_ops = {
652 .kind = "csum", 657 .kind = "csum",
653 .type = TCA_ACT_CSUM, 658 .type = TCA_ACT_CSUM,
@@ -658,6 +663,7 @@ static struct tc_action_ops act_csum_ops = {
658 .cleanup = tcf_csum_cleanup, 663 .cleanup = tcf_csum_cleanup,
659 .walk = tcf_csum_walker, 664 .walk = tcf_csum_walker,
660 .lookup = tcf_csum_search, 665 .lookup = tcf_csum_search,
666 .get_fill_size = tcf_csum_get_fill_size,
661 .size = sizeof(struct tcf_csum), 667 .size = sizeof(struct tcf_csum),
662}; 668};
663 669
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index d964e60c730e..eacaaf803914 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -61,16 +61,18 @@ struct fl_flow_mask_range {
61struct fl_flow_mask { 61struct fl_flow_mask {
62 struct fl_flow_key key; 62 struct fl_flow_key key;
63 struct fl_flow_mask_range range; 63 struct fl_flow_mask_range range;
64 struct rcu_head rcu; 64 struct rhash_head ht_node;
65 struct rhashtable ht;
66 struct rhashtable_params filter_ht_params;
67 struct flow_dissector dissector;
68 struct list_head filters;
69 struct rcu_head rcu;
70 struct list_head list;
65}; 71};
66 72
67struct cls_fl_head { 73struct cls_fl_head {
68 struct rhashtable ht; 74 struct rhashtable ht;
69 struct fl_flow_mask mask; 75 struct list_head masks;
70 struct flow_dissector dissector;
71 bool mask_assigned;
72 struct list_head filters;
73 struct rhashtable_params ht_params;
74 union { 76 union {
75 struct work_struct work; 77 struct work_struct work;
76 struct rcu_head rcu; 78 struct rcu_head rcu;
@@ -79,6 +81,7 @@ struct cls_fl_head {
79}; 81};
80 82
81struct cls_fl_filter { 83struct cls_fl_filter {
84 struct fl_flow_mask *mask;
82 struct rhash_head ht_node; 85 struct rhash_head ht_node;
83 struct fl_flow_key mkey; 86 struct fl_flow_key mkey;
84 struct tcf_exts exts; 87 struct tcf_exts exts;
@@ -94,6 +97,13 @@ struct cls_fl_filter {
94 struct net_device *hw_dev; 97 struct net_device *hw_dev;
95}; 98};
96 99
100static const struct rhashtable_params mask_ht_params = {
101 .key_offset = offsetof(struct fl_flow_mask, key),
102 .key_len = sizeof(struct fl_flow_key),
103 .head_offset = offsetof(struct fl_flow_mask, ht_node),
104 .automatic_shrinking = true,
105};
106
97static unsigned short int fl_mask_range(const struct fl_flow_mask *mask) 107static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
98{ 108{
99 return mask->range.end - mask->range.start; 109 return mask->range.end - mask->range.start;
@@ -103,13 +113,19 @@ static void fl_mask_update_range(struct fl_flow_mask *mask)
103{ 113{
104 const u8 *bytes = (const u8 *) &mask->key; 114 const u8 *bytes = (const u8 *) &mask->key;
105 size_t size = sizeof(mask->key); 115 size_t size = sizeof(mask->key);
106 size_t i, first = 0, last = size - 1; 116 size_t i, first = 0, last;
107 117
108 for (i = 0; i < sizeof(mask->key); i++) { 118 for (i = 0; i < size; i++) {
119 if (bytes[i]) {
120 first = i;
121 break;
122 }
123 }
124 last = first;
125 for (i = size - 1; i != first; i--) {
109 if (bytes[i]) { 126 if (bytes[i]) {
110 if (!first && i)
111 first = i;
112 last = i; 127 last = i;
128 break;
113 } 129 }
114 } 130 }
115 mask->range.start = rounddown(first, sizeof(long)); 131 mask->range.start = rounddown(first, sizeof(long));
@@ -140,12 +156,11 @@ static void fl_clear_masked_range(struct fl_flow_key *key,
140 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask)); 156 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
141} 157}
142 158
143static struct cls_fl_filter *fl_lookup(struct cls_fl_head *head, 159static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
144 struct fl_flow_key *mkey) 160 struct fl_flow_key *mkey)
145{ 161{
146 return rhashtable_lookup_fast(&head->ht, 162 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
147 fl_key_get_start(mkey, &head->mask), 163 mask->filter_ht_params);
148 head->ht_params);
149} 164}
150 165
151static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp, 166static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
@@ -153,28 +168,28 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
153{ 168{
154 struct cls_fl_head *head = rcu_dereference_bh(tp->root); 169 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
155 struct cls_fl_filter *f; 170 struct cls_fl_filter *f;
171 struct fl_flow_mask *mask;
156 struct fl_flow_key skb_key; 172 struct fl_flow_key skb_key;
157 struct fl_flow_key skb_mkey; 173 struct fl_flow_key skb_mkey;
158 174
159 if (!atomic_read(&head->ht.nelems)) 175 list_for_each_entry_rcu(mask, &head->masks, list) {
160 return -1; 176 fl_clear_masked_range(&skb_key, mask);
161
162 fl_clear_masked_range(&skb_key, &head->mask);
163 177
164 skb_key.indev_ifindex = skb->skb_iif; 178 skb_key.indev_ifindex = skb->skb_iif;
165 /* skb_flow_dissect() does not set n_proto in case an unknown protocol, 179 /* skb_flow_dissect() does not set n_proto in case an unknown
166 * so do it rather here. 180 * protocol, so do it rather here.
167 */ 181 */
168 skb_key.basic.n_proto = skb->protocol; 182 skb_key.basic.n_proto = skb->protocol;
169 skb_flow_dissect_tunnel_info(skb, &head->dissector, &skb_key); 183 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
170 skb_flow_dissect(skb, &head->dissector, &skb_key, 0); 184 skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
171 185
172 fl_set_masked_key(&skb_mkey, &skb_key, &head->mask); 186 fl_set_masked_key(&skb_mkey, &skb_key, mask);
173 187
174 f = fl_lookup(head, &skb_mkey); 188 f = fl_lookup(mask, &skb_mkey);
175 if (f && !tc_skip_sw(f->flags)) { 189 if (f && !tc_skip_sw(f->flags)) {
176 *res = f->res; 190 *res = f->res;
177 return tcf_exts_exec(skb, &f->exts, res); 191 return tcf_exts_exec(skb, &f->exts, res);
192 }
178 } 193 }
179 return -1; 194 return -1;
180} 195}
@@ -187,11 +202,28 @@ static int fl_init(struct tcf_proto *tp)
187 if (!head) 202 if (!head)
188 return -ENOBUFS; 203 return -ENOBUFS;
189 204
190 INIT_LIST_HEAD_RCU(&head->filters); 205 INIT_LIST_HEAD_RCU(&head->masks);
191 rcu_assign_pointer(tp->root, head); 206 rcu_assign_pointer(tp->root, head);
192 idr_init(&head->handle_idr); 207 idr_init(&head->handle_idr);
193 208
194 return 0; 209 return rhashtable_init(&head->ht, &mask_ht_params);
210}
211
212static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask,
213 bool async)
214{
215 if (!list_empty(&mask->filters))
216 return false;
217
218 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
219 rhashtable_destroy(&mask->ht);
220 list_del_rcu(&mask->list);
221 if (async)
222 kfree_rcu(mask, rcu);
223 else
224 kfree(mask);
225
226 return true;
195} 227}
196 228
197static void __fl_destroy_filter(struct cls_fl_filter *f) 229static void __fl_destroy_filter(struct cls_fl_filter *f)
@@ -234,8 +266,6 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
234} 266}
235 267
236static int fl_hw_replace_filter(struct tcf_proto *tp, 268static int fl_hw_replace_filter(struct tcf_proto *tp,
237 struct flow_dissector *dissector,
238 struct fl_flow_key *mask,
239 struct cls_fl_filter *f, 269 struct cls_fl_filter *f,
240 struct netlink_ext_ack *extack) 270 struct netlink_ext_ack *extack)
241{ 271{
@@ -247,8 +277,8 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
247 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); 277 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
248 cls_flower.command = TC_CLSFLOWER_REPLACE; 278 cls_flower.command = TC_CLSFLOWER_REPLACE;
249 cls_flower.cookie = (unsigned long) f; 279 cls_flower.cookie = (unsigned long) f;
250 cls_flower.dissector = dissector; 280 cls_flower.dissector = &f->mask->dissector;
251 cls_flower.mask = mask; 281 cls_flower.mask = &f->mask->key;
252 cls_flower.key = &f->mkey; 282 cls_flower.key = &f->mkey;
253 cls_flower.exts = &f->exts; 283 cls_flower.exts = &f->exts;
254 cls_flower.classid = f->res.classid; 284 cls_flower.classid = f->res.classid;
@@ -283,28 +313,31 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
283 &cls_flower, false); 313 &cls_flower, false);
284} 314}
285 315
286static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f, 316static bool __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
287 struct netlink_ext_ack *extack) 317 struct netlink_ext_ack *extack)
288{ 318{
289 struct cls_fl_head *head = rtnl_dereference(tp->root); 319 struct cls_fl_head *head = rtnl_dereference(tp->root);
320 bool async = tcf_exts_get_net(&f->exts);
321 bool last;
290 322
291 idr_remove(&head->handle_idr, f->handle); 323 idr_remove(&head->handle_idr, f->handle);
292 list_del_rcu(&f->list); 324 list_del_rcu(&f->list);
325 last = fl_mask_put(head, f->mask, async);
293 if (!tc_skip_hw(f->flags)) 326 if (!tc_skip_hw(f->flags))
294 fl_hw_destroy_filter(tp, f, extack); 327 fl_hw_destroy_filter(tp, f, extack);
295 tcf_unbind_filter(tp, &f->res); 328 tcf_unbind_filter(tp, &f->res);
296 if (tcf_exts_get_net(&f->exts)) 329 if (async)
297 call_rcu(&f->rcu, fl_destroy_filter); 330 call_rcu(&f->rcu, fl_destroy_filter);
298 else 331 else
299 __fl_destroy_filter(f); 332 __fl_destroy_filter(f);
333
334 return last;
300} 335}
301 336
302static void fl_destroy_sleepable(struct work_struct *work) 337static void fl_destroy_sleepable(struct work_struct *work)
303{ 338{
304 struct cls_fl_head *head = container_of(work, struct cls_fl_head, 339 struct cls_fl_head *head = container_of(work, struct cls_fl_head,
305 work); 340 work);
306 if (head->mask_assigned)
307 rhashtable_destroy(&head->ht);
308 kfree(head); 341 kfree(head);
309 module_put(THIS_MODULE); 342 module_put(THIS_MODULE);
310} 343}
@@ -320,10 +353,15 @@ static void fl_destroy_rcu(struct rcu_head *rcu)
320static void fl_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack) 353static void fl_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
321{ 354{
322 struct cls_fl_head *head = rtnl_dereference(tp->root); 355 struct cls_fl_head *head = rtnl_dereference(tp->root);
356 struct fl_flow_mask *mask, *next_mask;
323 struct cls_fl_filter *f, *next; 357 struct cls_fl_filter *f, *next;
324 358
325 list_for_each_entry_safe(f, next, &head->filters, list) 359 list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
326 __fl_delete(tp, f, extack); 360 list_for_each_entry_safe(f, next, &mask->filters, list) {
361 if (__fl_delete(tp, f, extack))
362 break;
363 }
364 }
327 idr_destroy(&head->handle_idr); 365 idr_destroy(&head->handle_idr);
328 366
329 __module_get(THIS_MODULE); 367 __module_get(THIS_MODULE);
@@ -715,14 +753,14 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
715 return ret; 753 return ret;
716} 754}
717 755
718static bool fl_mask_eq(struct fl_flow_mask *mask1, 756static void fl_mask_copy(struct fl_flow_mask *dst,
719 struct fl_flow_mask *mask2) 757 struct fl_flow_mask *src)
720{ 758{
721 const long *lmask1 = fl_key_get_start(&mask1->key, mask1); 759 const void *psrc = fl_key_get_start(&src->key, src);
722 const long *lmask2 = fl_key_get_start(&mask2->key, mask2); 760 void *pdst = fl_key_get_start(&dst->key, src);
723 761
724 return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) && 762 memcpy(pdst, psrc, fl_mask_range(src));
725 !memcmp(lmask1, lmask2, fl_mask_range(mask1)); 763 dst->range = src->range;
726} 764}
727 765
728static const struct rhashtable_params fl_ht_params = { 766static const struct rhashtable_params fl_ht_params = {
@@ -731,14 +769,13 @@ static const struct rhashtable_params fl_ht_params = {
731 .automatic_shrinking = true, 769 .automatic_shrinking = true,
732}; 770};
733 771
734static int fl_init_hashtable(struct cls_fl_head *head, 772static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
735 struct fl_flow_mask *mask)
736{ 773{
737 head->ht_params = fl_ht_params; 774 mask->filter_ht_params = fl_ht_params;
738 head->ht_params.key_len = fl_mask_range(mask); 775 mask->filter_ht_params.key_len = fl_mask_range(mask);
739 head->ht_params.key_offset += mask->range.start; 776 mask->filter_ht_params.key_offset += mask->range.start;
740 777
741 return rhashtable_init(&head->ht, &head->ht_params); 778 return rhashtable_init(&mask->ht, &mask->filter_ht_params);
742} 779}
743 780
744#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member) 781#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
@@ -761,8 +798,7 @@ static int fl_init_hashtable(struct cls_fl_head *head,
761 FL_KEY_SET(keys, cnt, id, member); \ 798 FL_KEY_SET(keys, cnt, id, member); \
762 } while(0); 799 } while(0);
763 800
764static void fl_init_dissector(struct cls_fl_head *head, 801static void fl_init_dissector(struct fl_flow_mask *mask)
765 struct fl_flow_mask *mask)
766{ 802{
767 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX]; 803 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
768 size_t cnt = 0; 804 size_t cnt = 0;
@@ -802,31 +838,66 @@ static void fl_init_dissector(struct cls_fl_head *head,
802 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, 838 FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
803 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp); 839 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
804 840
805 skb_flow_dissector_init(&head->dissector, keys, cnt); 841 skb_flow_dissector_init(&mask->dissector, keys, cnt);
842}
843
844static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
845 struct fl_flow_mask *mask)
846{
847 struct fl_flow_mask *newmask;
848 int err;
849
850 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
851 if (!newmask)
852 return ERR_PTR(-ENOMEM);
853
854 fl_mask_copy(newmask, mask);
855
856 err = fl_init_mask_hashtable(newmask);
857 if (err)
858 goto errout_free;
859
860 fl_init_dissector(newmask);
861
862 INIT_LIST_HEAD_RCU(&newmask->filters);
863
864 err = rhashtable_insert_fast(&head->ht, &newmask->ht_node,
865 mask_ht_params);
866 if (err)
867 goto errout_destroy;
868
869 list_add_tail_rcu(&newmask->list, &head->masks);
870
871 return newmask;
872
873errout_destroy:
874 rhashtable_destroy(&newmask->ht);
875errout_free:
876 kfree(newmask);
877
878 return ERR_PTR(err);
806} 879}
807 880
808static int fl_check_assign_mask(struct cls_fl_head *head, 881static int fl_check_assign_mask(struct cls_fl_head *head,
882 struct cls_fl_filter *fnew,
883 struct cls_fl_filter *fold,
809 struct fl_flow_mask *mask) 884 struct fl_flow_mask *mask)
810{ 885{
811 int err; 886 struct fl_flow_mask *newmask;
812 887
813 if (head->mask_assigned) { 888 fnew->mask = rhashtable_lookup_fast(&head->ht, mask, mask_ht_params);
814 if (!fl_mask_eq(&head->mask, mask)) 889 if (!fnew->mask) {
890 if (fold)
815 return -EINVAL; 891 return -EINVAL;
816 else
817 return 0;
818 }
819 892
820 /* Mask is not assigned yet. So assign it and init hashtable 893 newmask = fl_create_new_mask(head, mask);
821 * according to that. 894 if (IS_ERR(newmask))
822 */ 895 return PTR_ERR(newmask);
823 err = fl_init_hashtable(head, mask);
824 if (err)
825 return err;
826 memcpy(&head->mask, mask, sizeof(head->mask));
827 head->mask_assigned = true;
828 896
829 fl_init_dissector(head, mask); 897 fnew->mask = newmask;
898 } else if (fold && fold->mask == fnew->mask) {
899 return -EINVAL;
900 }
830 901
831 return 0; 902 return 0;
832} 903}
@@ -924,30 +995,26 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
924 if (err) 995 if (err)
925 goto errout_idr; 996 goto errout_idr;
926 997
927 err = fl_check_assign_mask(head, &mask); 998 err = fl_check_assign_mask(head, fnew, fold, &mask);
928 if (err) 999 if (err)
929 goto errout_idr; 1000 goto errout_idr;
930 1001
931 if (!tc_skip_sw(fnew->flags)) { 1002 if (!tc_skip_sw(fnew->flags)) {
932 if (!fold && fl_lookup(head, &fnew->mkey)) { 1003 if (!fold && fl_lookup(fnew->mask, &fnew->mkey)) {
933 err = -EEXIST; 1004 err = -EEXIST;
934 goto errout_idr; 1005 goto errout_mask;
935 } 1006 }
936 1007
937 err = rhashtable_insert_fast(&head->ht, &fnew->ht_node, 1008 err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
938 head->ht_params); 1009 fnew->mask->filter_ht_params);
939 if (err) 1010 if (err)
940 goto errout_idr; 1011 goto errout_mask;
941 } 1012 }
942 1013
943 if (!tc_skip_hw(fnew->flags)) { 1014 if (!tc_skip_hw(fnew->flags)) {
944 err = fl_hw_replace_filter(tp, 1015 err = fl_hw_replace_filter(tp, fnew, extack);
945 &head->dissector,
946 &mask.key,
947 fnew,
948 extack);
949 if (err) 1016 if (err)
950 goto errout_idr; 1017 goto errout_mask;
951 } 1018 }
952 1019
953 if (!tc_in_hw(fnew->flags)) 1020 if (!tc_in_hw(fnew->flags))
@@ -955,8 +1022,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
955 1022
956 if (fold) { 1023 if (fold) {
957 if (!tc_skip_sw(fold->flags)) 1024 if (!tc_skip_sw(fold->flags))
958 rhashtable_remove_fast(&head->ht, &fold->ht_node, 1025 rhashtable_remove_fast(&fold->mask->ht,
959 head->ht_params); 1026 &fold->ht_node,
1027 fold->mask->filter_ht_params);
960 if (!tc_skip_hw(fold->flags)) 1028 if (!tc_skip_hw(fold->flags))
961 fl_hw_destroy_filter(tp, fold, NULL); 1029 fl_hw_destroy_filter(tp, fold, NULL);
962 } 1030 }
@@ -970,12 +1038,15 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
970 tcf_exts_get_net(&fold->exts); 1038 tcf_exts_get_net(&fold->exts);
971 call_rcu(&fold->rcu, fl_destroy_filter); 1039 call_rcu(&fold->rcu, fl_destroy_filter);
972 } else { 1040 } else {
973 list_add_tail_rcu(&fnew->list, &head->filters); 1041 list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
974 } 1042 }
975 1043
976 kfree(tb); 1044 kfree(tb);
977 return 0; 1045 return 0;
978 1046
1047errout_mask:
1048 fl_mask_put(head, fnew->mask, false);
1049
979errout_idr: 1050errout_idr:
980 if (fnew->handle) 1051 if (fnew->handle)
981 idr_remove(&head->handle_idr, fnew->handle); 1052 idr_remove(&head->handle_idr, fnew->handle);
@@ -994,10 +1065,10 @@ static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
994 struct cls_fl_filter *f = arg; 1065 struct cls_fl_filter *f = arg;
995 1066
996 if (!tc_skip_sw(f->flags)) 1067 if (!tc_skip_sw(f->flags))
997 rhashtable_remove_fast(&head->ht, &f->ht_node, 1068 rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
998 head->ht_params); 1069 f->mask->filter_ht_params);
999 __fl_delete(tp, f, extack); 1070 __fl_delete(tp, f, extack);
1000 *last = list_empty(&head->filters); 1071 *last = list_empty(&head->masks);
1001 return 0; 1072 return 0;
1002} 1073}
1003 1074
@@ -1005,16 +1076,19 @@ static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
1005{ 1076{
1006 struct cls_fl_head *head = rtnl_dereference(tp->root); 1077 struct cls_fl_head *head = rtnl_dereference(tp->root);
1007 struct cls_fl_filter *f; 1078 struct cls_fl_filter *f;
1008 1079 struct fl_flow_mask *mask;
1009 list_for_each_entry_rcu(f, &head->filters, list) { 1080
1010 if (arg->count < arg->skip) 1081 list_for_each_entry_rcu(mask, &head->masks, list) {
1011 goto skip; 1082 list_for_each_entry_rcu(f, &mask->filters, list) {
1012 if (arg->fn(tp, f, arg) < 0) { 1083 if (arg->count < arg->skip)
1013 arg->stop = 1; 1084 goto skip;
1014 break; 1085 if (arg->fn(tp, f, arg) < 0) {
1015 } 1086 arg->stop = 1;
1087 break;
1088 }
1016skip: 1089skip:
1017 arg->count++; 1090 arg->count++;
1091 }
1018 } 1092 }
1019} 1093}
1020 1094
@@ -1150,7 +1224,6 @@ static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
1150static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, 1224static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
1151 struct sk_buff *skb, struct tcmsg *t) 1225 struct sk_buff *skb, struct tcmsg *t)
1152{ 1226{
1153 struct cls_fl_head *head = rtnl_dereference(tp->root);
1154 struct cls_fl_filter *f = fh; 1227 struct cls_fl_filter *f = fh;
1155 struct nlattr *nest; 1228 struct nlattr *nest;
1156 struct fl_flow_key *key, *mask; 1229 struct fl_flow_key *key, *mask;
@@ -1169,7 +1242,7 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
1169 goto nla_put_failure; 1242 goto nla_put_failure;
1170 1243
1171 key = &f->key; 1244 key = &f->key;
1172 mask = &head->mask.key; 1245 mask = &f->mask->key;
1173 1246
1174 if (mask->indev_ifindex) { 1247 if (mask->indev_ifindex) {
1175 struct net_device *dev; 1248 struct net_device *dev;
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index a366e4c9413a..4808713c73b9 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -128,6 +128,28 @@ static bool fq_flow_is_detached(const struct fq_flow *f)
128 return f->next == &detached; 128 return f->next == &detached;
129} 129}
130 130
131static bool fq_flow_is_throttled(const struct fq_flow *f)
132{
133 return f->next == &throttled;
134}
135
136static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
137{
138 if (head->first)
139 head->last->next = flow;
140 else
141 head->first = flow;
142 head->last = flow;
143 flow->next = NULL;
144}
145
146static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
147{
148 rb_erase(&f->rate_node, &q->delayed);
149 q->throttled_flows--;
150 fq_flow_add_tail(&q->old_flows, f);
151}
152
131static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) 153static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
132{ 154{
133 struct rb_node **p = &q->delayed.rb_node, *parent = NULL; 155 struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
@@ -155,15 +177,6 @@ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
155 177
156static struct kmem_cache *fq_flow_cachep __read_mostly; 178static struct kmem_cache *fq_flow_cachep __read_mostly;
157 179
158static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
159{
160 if (head->first)
161 head->last->next = flow;
162 else
163 head->first = flow;
164 head->last = flow;
165 flow->next = NULL;
166}
167 180
168/* limit number of collected flows per round */ 181/* limit number of collected flows per round */
169#define FQ_GC_MAX 8 182#define FQ_GC_MAX 8
@@ -267,6 +280,8 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
267 f->socket_hash != sk->sk_hash)) { 280 f->socket_hash != sk->sk_hash)) {
268 f->credit = q->initial_quantum; 281 f->credit = q->initial_quantum;
269 f->socket_hash = sk->sk_hash; 282 f->socket_hash = sk->sk_hash;
283 if (fq_flow_is_throttled(f))
284 fq_flow_unset_throttled(q, f);
270 f->time_next_packet = 0ULL; 285 f->time_next_packet = 0ULL;
271 } 286 }
272 return f; 287 return f;
@@ -438,9 +453,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
438 q->time_next_delayed_flow = f->time_next_packet; 453 q->time_next_delayed_flow = f->time_next_packet;
439 break; 454 break;
440 } 455 }
441 rb_erase(p, &q->delayed); 456 fq_flow_unset_throttled(q, f);
442 q->throttled_flows--;
443 fq_flow_add_tail(&q->old_flows, f);
444 } 457 }
445} 458}
446 459
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index a8f3b088fcb2..039fdb862b17 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -652,33 +652,20 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
652 */ 652 */
653 peer->param_flags = asoc->param_flags; 653 peer->param_flags = asoc->param_flags;
654 654
655 sctp_transport_route(peer, NULL, sp);
656
657 /* Initialize the pmtu of the transport. */ 655 /* Initialize the pmtu of the transport. */
658 if (peer->param_flags & SPP_PMTUD_DISABLE) { 656 sctp_transport_route(peer, NULL, sp);
659 if (asoc->pathmtu)
660 peer->pathmtu = asoc->pathmtu;
661 else
662 peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
663 }
664 657
665 /* If this is the first transport addr on this association, 658 /* If this is the first transport addr on this association,
666 * initialize the association PMTU to the peer's PMTU. 659 * initialize the association PMTU to the peer's PMTU.
667 * If not and the current association PMTU is higher than the new 660 * If not and the current association PMTU is higher than the new
668 * peer's PMTU, reset the association PMTU to the new peer's PMTU. 661 * peer's PMTU, reset the association PMTU to the new peer's PMTU.
669 */ 662 */
670 if (asoc->pathmtu) 663 sctp_assoc_set_pmtu(asoc, asoc->pathmtu ?
671 asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu); 664 min_t(int, peer->pathmtu, asoc->pathmtu) :
672 else 665 peer->pathmtu);
673 asoc->pathmtu = peer->pathmtu;
674
675 pr_debug("%s: association:%p PMTU set to %d\n", __func__, asoc,
676 asoc->pathmtu);
677 666
678 peer->pmtu_pending = 0; 667 peer->pmtu_pending = 0;
679 668
680 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
681
682 /* The asoc->peer.port might not be meaningful yet, but 669 /* The asoc->peer.port might not be meaningful yet, but
683 * initialize the packet structure anyway. 670 * initialize the packet structure anyway.
684 */ 671 */
@@ -1381,6 +1368,31 @@ sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
1381 } 1368 }
1382} 1369}
1383 1370
1371void sctp_assoc_update_frag_point(struct sctp_association *asoc)
1372{
1373 int frag = sctp_mtu_payload(sctp_sk(asoc->base.sk), asoc->pathmtu,
1374 sctp_datachk_len(&asoc->stream));
1375
1376 if (asoc->user_frag)
1377 frag = min_t(int, frag, asoc->user_frag);
1378
1379 frag = min_t(int, frag, SCTP_MAX_CHUNK_LEN -
1380 sctp_datachk_len(&asoc->stream));
1381
1382 asoc->frag_point = SCTP_TRUNC4(frag);
1383}
1384
1385void sctp_assoc_set_pmtu(struct sctp_association *asoc, __u32 pmtu)
1386{
1387 if (asoc->pathmtu != pmtu) {
1388 asoc->pathmtu = pmtu;
1389 sctp_assoc_update_frag_point(asoc);
1390 }
1391
1392 pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc,
1393 asoc->pathmtu, asoc->frag_point);
1394}
1395
1384/* Update the association's pmtu and frag_point by going through all the 1396/* Update the association's pmtu and frag_point by going through all the
1385 * transports. This routine is called when a transport's PMTU has changed. 1397 * transports. This routine is called when a transport's PMTU has changed.
1386 */ 1398 */
@@ -1393,24 +1405,16 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
1393 return; 1405 return;
1394 1406
1395 /* Get the lowest pmtu of all the transports. */ 1407 /* Get the lowest pmtu of all the transports. */
1396 list_for_each_entry(t, &asoc->peer.transport_addr_list, 1408 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
1397 transports) {
1398 if (t->pmtu_pending && t->dst) { 1409 if (t->pmtu_pending && t->dst) {
1399 sctp_transport_update_pmtu( 1410 sctp_transport_update_pmtu(t, sctp_dst_mtu(t->dst));
1400 t, SCTP_TRUNC4(dst_mtu(t->dst)));
1401 t->pmtu_pending = 0; 1411 t->pmtu_pending = 0;
1402 } 1412 }
1403 if (!pmtu || (t->pathmtu < pmtu)) 1413 if (!pmtu || (t->pathmtu < pmtu))
1404 pmtu = t->pathmtu; 1414 pmtu = t->pathmtu;
1405 } 1415 }
1406 1416
1407 if (pmtu) { 1417 sctp_assoc_set_pmtu(asoc, pmtu);
1408 asoc->pathmtu = pmtu;
1409 asoc->frag_point = sctp_frag_point(asoc, pmtu);
1410 }
1411
1412 pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc,
1413 asoc->pathmtu, asoc->frag_point);
1414} 1418}
1415 1419
1416/* Should we send a SACK to update our peer? */ 1420/* Should we send a SACK to update our peer? */
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index be296d633e95..79daa98208c3 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -172,8 +172,6 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
172 struct list_head *pos, *temp; 172 struct list_head *pos, *temp;
173 struct sctp_chunk *chunk; 173 struct sctp_chunk *chunk;
174 struct sctp_datamsg *msg; 174 struct sctp_datamsg *msg;
175 struct sctp_sock *sp;
176 struct sctp_af *af;
177 int err; 175 int err;
178 176
179 msg = sctp_datamsg_new(GFP_KERNEL); 177 msg = sctp_datamsg_new(GFP_KERNEL);
@@ -192,12 +190,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
192 /* This is the biggest possible DATA chunk that can fit into 190 /* This is the biggest possible DATA chunk that can fit into
193 * the packet 191 * the packet
194 */ 192 */
195 sp = sctp_sk(asoc->base.sk); 193 max_data = asoc->frag_point;
196 af = sp->pf->af;
197 max_data = asoc->pathmtu - af->net_header_len -
198 sizeof(struct sctphdr) - sctp_datachk_len(&asoc->stream) -
199 af->ip_options_len(asoc->base.sk);
200 max_data = SCTP_TRUNC4(max_data);
201 194
202 /* If the the peer requested that we authenticate DATA chunks 195 /* If the the peer requested that we authenticate DATA chunks
203 * we need to account for bundling of the AUTH chunks along with 196 * we need to account for bundling of the AUTH chunks along with
@@ -222,9 +215,6 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
222 } 215 }
223 } 216 }
224 217
225 /* Check what's our max considering the above */
226 max_data = min_t(size_t, max_data, asoc->frag_point);
227
228 /* Set first_len and then account for possible bundles on first frag */ 218 /* Set first_len and then account for possible bundles on first frag */
229 first_len = max_data; 219 first_len = max_data;
230 220
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 23ebc5318edc..eb93ffe2408b 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -217,7 +217,7 @@ new_skb:
217 skb_pull(chunk->skb, sizeof(*ch)); 217 skb_pull(chunk->skb, sizeof(*ch));
218 chunk->subh.v = NULL; /* Subheader is no longer valid. */ 218 chunk->subh.v = NULL; /* Subheader is no longer valid. */
219 219
220 if (chunk->chunk_end + sizeof(*ch) < skb_tail_pointer(chunk->skb)) { 220 if (chunk->chunk_end + sizeof(*ch) <= skb_tail_pointer(chunk->skb)) {
221 /* This is not a singleton */ 221 /* This is not a singleton */
222 chunk->singleton = 0; 222 chunk->singleton = 0;
223 } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) { 223 } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 2e3f7b75a8ec..42247110d842 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -895,6 +895,9 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
895 if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2)) 895 if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
896 return 1; 896 return 1;
897 897
898 if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET)
899 return addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr;
900
898 return __sctp_v6_cmp_addr(addr1, addr2); 901 return __sctp_v6_cmp_addr(addr1, addr2);
899} 902}
900 903
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 690d8557bb7b..e672dee302c7 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -90,8 +90,8 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
90{ 90{
91 struct sctp_transport *tp = packet->transport; 91 struct sctp_transport *tp = packet->transport;
92 struct sctp_association *asoc = tp->asoc; 92 struct sctp_association *asoc = tp->asoc;
93 struct sctp_sock *sp = NULL;
93 struct sock *sk; 94 struct sock *sk;
94 size_t overhead = sizeof(struct ipv6hdr) + sizeof(struct sctphdr);
95 95
96 pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag); 96 pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
97 packet->vtag = vtag; 97 packet->vtag = vtag;
@@ -102,28 +102,20 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
102 102
103 /* set packet max_size with pathmtu, then calculate overhead */ 103 /* set packet max_size with pathmtu, then calculate overhead */
104 packet->max_size = tp->pathmtu; 104 packet->max_size = tp->pathmtu;
105
105 if (asoc) { 106 if (asoc) {
106 struct sctp_sock *sp = sctp_sk(asoc->base.sk); 107 sk = asoc->base.sk;
107 struct sctp_af *af = sp->pf->af; 108 sp = sctp_sk(sk);
108
109 overhead = af->net_header_len +
110 af->ip_options_len(asoc->base.sk);
111 overhead += sizeof(struct sctphdr);
112 packet->overhead = overhead;
113 packet->size = overhead;
114 } else {
115 packet->overhead = overhead;
116 packet->size = overhead;
117 return;
118 } 109 }
110 packet->overhead = sctp_mtu_payload(sp, 0, 0);
111 packet->size = packet->overhead;
112
113 if (!asoc)
114 return;
119 115
120 /* update dst or transport pathmtu if in need */ 116 /* update dst or transport pathmtu if in need */
121 sk = asoc->base.sk;
122 if (!sctp_transport_dst_check(tp)) { 117 if (!sctp_transport_dst_check(tp)) {
123 sctp_transport_route(tp, NULL, sctp_sk(sk)); 118 sctp_transport_route(tp, NULL, sp);
124 if (asoc->param_flags & SPP_PMTUD_ENABLE)
125 sctp_assoc_sync_pmtu(asoc);
126 } else if (!sctp_transport_pmtu_check(tp)) {
127 if (asoc->param_flags & SPP_PMTUD_ENABLE) 119 if (asoc->param_flags & SPP_PMTUD_ENABLE)
128 sctp_assoc_sync_pmtu(asoc); 120 sctp_assoc_sync_pmtu(asoc);
129 } 121 }
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index db93eabd6ef5..4d7b3ccea078 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -81,8 +81,6 @@ static int sctp_process_param(struct sctp_association *asoc,
81 gfp_t gfp); 81 gfp_t gfp);
82static void *sctp_addto_param(struct sctp_chunk *chunk, int len, 82static void *sctp_addto_param(struct sctp_chunk *chunk, int len,
83 const void *data); 83 const void *data);
84static void *sctp_addto_chunk_fixed(struct sctp_chunk *, int len,
85 const void *data);
86 84
87/* Control chunk destructor */ 85/* Control chunk destructor */
88static void sctp_control_release_owner(struct sk_buff *skb) 86static void sctp_control_release_owner(struct sk_buff *skb)
@@ -154,12 +152,11 @@ static const struct sctp_paramhdr prsctp_param = {
154 cpu_to_be16(sizeof(struct sctp_paramhdr)), 152 cpu_to_be16(sizeof(struct sctp_paramhdr)),
155}; 153};
156 154
157/* A helper to initialize an op error inside a 155/* A helper to initialize an op error inside a provided chunk, as most
158 * provided chunk, as most cause codes will be embedded inside an 156 * cause codes will be embedded inside an abort chunk.
159 * abort chunk.
160 */ 157 */
161void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, 158int sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
162 size_t paylen) 159 size_t paylen)
163{ 160{
164 struct sctp_errhdr err; 161 struct sctp_errhdr err;
165 __u16 len; 162 __u16 len;
@@ -167,33 +164,16 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
167 /* Cause code constants are now defined in network order. */ 164 /* Cause code constants are now defined in network order. */
168 err.cause = cause_code; 165 err.cause = cause_code;
169 len = sizeof(err) + paylen; 166 len = sizeof(err) + paylen;
170 err.length = htons(len); 167 err.length = htons(len);
171 chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(err), &err);
172}
173
174/* A helper to initialize an op error inside a
175 * provided chunk, as most cause codes will be embedded inside an
176 * abort chunk. Differs from sctp_init_cause in that it won't oops
177 * if there isn't enough space in the op error chunk
178 */
179static int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code,
180 size_t paylen)
181{
182 struct sctp_errhdr err;
183 __u16 len;
184
185 /* Cause code constants are now defined in network order. */
186 err.cause = cause_code;
187 len = sizeof(err) + paylen;
188 err.length = htons(len);
189 168
190 if (skb_tailroom(chunk->skb) < len) 169 if (skb_tailroom(chunk->skb) < len)
191 return -ENOSPC; 170 return -ENOSPC;
192 171
193 chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk, sizeof(err), &err); 172 chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(err), &err);
194 173
195 return 0; 174 return 0;
196} 175}
176
197/* 3.3.2 Initiation (INIT) (1) 177/* 3.3.2 Initiation (INIT) (1)
198 * 178 *
199 * This chunk is used to initiate a SCTP association between two 179 * This chunk is used to initiate a SCTP association between two
@@ -1257,20 +1237,26 @@ nodata:
1257 return retval; 1237 return retval;
1258} 1238}
1259 1239
1260/* Create an Operation Error chunk of a fixed size, 1240/* Create an Operation Error chunk of a fixed size, specifically,
1261 * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT) 1241 * min(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT) - overheads.
1262 * This is a helper function to allocate an error chunk for 1242 * This is a helper function to allocate an error chunk for for those
1263 * for those invalid parameter codes in which we may not want 1243 * invalid parameter codes in which we may not want to report all the
1264 * to report all the errors, if the incoming chunk is large 1244 * errors, if the incoming chunk is large. If it can't fit in a single
1245 * packet, we ignore it.
1265 */ 1246 */
1266static inline struct sctp_chunk *sctp_make_op_error_fixed( 1247static inline struct sctp_chunk *sctp_make_op_error_limited(
1267 const struct sctp_association *asoc, 1248 const struct sctp_association *asoc,
1268 const struct sctp_chunk *chunk) 1249 const struct sctp_chunk *chunk)
1269{ 1250{
1270 size_t size = asoc ? asoc->pathmtu : 0; 1251 size_t size = SCTP_DEFAULT_MAXSEGMENT;
1252 struct sctp_sock *sp = NULL;
1271 1253
1272 if (!size) 1254 if (asoc) {
1273 size = SCTP_DEFAULT_MAXSEGMENT; 1255 size = min_t(size_t, size, asoc->pathmtu);
1256 sp = sctp_sk(asoc->base.sk);
1257 }
1258
1259 size = sctp_mtu_payload(sp, size, sizeof(struct sctp_errhdr));
1274 1260
1275 return sctp_make_op_error_space(asoc, chunk, size); 1261 return sctp_make_op_error_space(asoc, chunk, size);
1276} 1262}
@@ -1522,18 +1508,6 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
1522 return target; 1508 return target;
1523} 1509}
1524 1510
1525/* Append bytes to the end of a chunk. Returns NULL if there isn't sufficient
1526 * space in the chunk
1527 */
1528static void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk,
1529 int len, const void *data)
1530{
1531 if (skb_tailroom(chunk->skb) >= len)
1532 return sctp_addto_chunk(chunk, len, data);
1533 else
1534 return NULL;
1535}
1536
1537/* Append bytes from user space to the end of a chunk. Will panic if 1511/* Append bytes from user space to the end of a chunk. Will panic if
1538 * chunk is not big enough. 1512 * chunk is not big enough.
1539 * Returns a kernel err value. 1513 * Returns a kernel err value.
@@ -1828,6 +1802,9 @@ no_hmac:
1828 kt = ktime_get_real(); 1802 kt = ktime_get_real();
1829 1803
1830 if (!asoc && ktime_before(bear_cookie->expiration, kt)) { 1804 if (!asoc && ktime_before(bear_cookie->expiration, kt)) {
1805 suseconds_t usecs = ktime_to_us(ktime_sub(kt, bear_cookie->expiration));
1806 __be32 n = htonl(usecs);
1807
1831 /* 1808 /*
1832 * Section 3.3.10.3 Stale Cookie Error (3) 1809 * Section 3.3.10.3 Stale Cookie Error (3)
1833 * 1810 *
@@ -1836,17 +1813,12 @@ no_hmac:
1836 * Stale Cookie Error: Indicates the receipt of a valid State 1813 * Stale Cookie Error: Indicates the receipt of a valid State
1837 * Cookie that has expired. 1814 * Cookie that has expired.
1838 */ 1815 */
1839 len = ntohs(chunk->chunk_hdr->length); 1816 *errp = sctp_make_op_error(asoc, chunk,
1840 *errp = sctp_make_op_error_space(asoc, chunk, len); 1817 SCTP_ERROR_STALE_COOKIE, &n,
1841 if (*errp) { 1818 sizeof(n), 0);
1842 suseconds_t usecs = ktime_to_us(ktime_sub(kt, bear_cookie->expiration)); 1819 if (*errp)
1843 __be32 n = htonl(usecs);
1844
1845 sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE,
1846 sizeof(n));
1847 sctp_addto_chunk(*errp, sizeof(n), &n);
1848 *error = -SCTP_IERROR_STALE_COOKIE; 1820 *error = -SCTP_IERROR_STALE_COOKIE;
1849 } else 1821 else
1850 *error = -SCTP_IERROR_NOMEM; 1822 *error = -SCTP_IERROR_NOMEM;
1851 1823
1852 goto fail; 1824 goto fail;
@@ -1997,12 +1969,8 @@ static int sctp_process_hn_param(const struct sctp_association *asoc,
1997 if (*errp) 1969 if (*errp)
1998 sctp_chunk_free(*errp); 1970 sctp_chunk_free(*errp);
1999 1971
2000 *errp = sctp_make_op_error_space(asoc, chunk, len); 1972 *errp = sctp_make_op_error(asoc, chunk, SCTP_ERROR_DNS_FAILED,
2001 1973 param.v, len, 0);
2002 if (*errp) {
2003 sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len);
2004 sctp_addto_chunk(*errp, len, param.v);
2005 }
2006 1974
2007 /* Stop processing this chunk. */ 1975 /* Stop processing this chunk. */
2008 return 0; 1976 return 0;
@@ -2127,23 +2095,23 @@ static enum sctp_ierror sctp_process_unk_param(
2127 /* Make an ERROR chunk, preparing enough room for 2095 /* Make an ERROR chunk, preparing enough room for
2128 * returning multiple unknown parameters. 2096 * returning multiple unknown parameters.
2129 */ 2097 */
2130 if (NULL == *errp) 2098 if (!*errp) {
2131 *errp = sctp_make_op_error_fixed(asoc, chunk); 2099 *errp = sctp_make_op_error_limited(asoc, chunk);
2132 2100 if (!*errp) {
2133 if (*errp) { 2101 /* If there is no memory for generating the
2134 if (!sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM, 2102 * ERROR report as specified, an ABORT will be
2135 SCTP_PAD4(ntohs(param.p->length)))) 2103 * triggered to the peer and the association
2136 sctp_addto_chunk_fixed(*errp, 2104 * won't be established.
2137 SCTP_PAD4(ntohs(param.p->length)), 2105 */
2138 param.v); 2106 retval = SCTP_IERROR_NOMEM;
2139 } else { 2107 break;
2140 /* If there is no memory for generating the ERROR 2108 }
2141 * report as specified, an ABORT will be triggered
2142 * to the peer and the association won't be
2143 * established.
2144 */
2145 retval = SCTP_IERROR_NOMEM;
2146 } 2109 }
2110
2111 if (!sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
2112 ntohs(param.p->length)))
2113 sctp_addto_chunk(*errp, ntohs(param.p->length),
2114 param.v);
2147 break; 2115 break;
2148 default: 2116 default:
2149 break; 2117 break;
@@ -2219,10 +2187,10 @@ static enum sctp_ierror sctp_verify_param(struct net *net,
2219 * MUST be aborted. The ABORT chunk SHOULD contain the error 2187 * MUST be aborted. The ABORT chunk SHOULD contain the error
2220 * cause 'Protocol Violation'. 2188 * cause 'Protocol Violation'.
2221 */ 2189 */
2222 if (SCTP_AUTH_RANDOM_LENGTH != 2190 if (SCTP_AUTH_RANDOM_LENGTH != ntohs(param.p->length) -
2223 ntohs(param.p->length) - sizeof(struct sctp_paramhdr)) { 2191 sizeof(struct sctp_paramhdr)) {
2224 sctp_process_inv_paramlength(asoc, param.p, 2192 sctp_process_inv_paramlength(asoc, param.p,
2225 chunk, err_chunk); 2193 chunk, err_chunk);
2226 retval = SCTP_IERROR_ABORT; 2194 retval = SCTP_IERROR_ABORT;
2227 } 2195 }
2228 break; 2196 break;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index dd0594a10961..28c070e187c2 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1794,6 +1794,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
1794 GFP_ATOMIC)) 1794 GFP_ATOMIC))
1795 goto nomem; 1795 goto nomem;
1796 1796
1797 if (sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC))
1798 goto nomem;
1799
1797 /* Make sure no new addresses are being added during the 1800 /* Make sure no new addresses are being added during the
1798 * restart. Though this is a pretty complicated attack 1801 * restart. Though this is a pretty complicated attack
1799 * since you'd have to get inside the cookie. 1802 * since you'd have to get inside the cookie.
@@ -1906,6 +1909,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_b(
1906 GFP_ATOMIC)) 1909 GFP_ATOMIC))
1907 goto nomem; 1910 goto nomem;
1908 1911
1912 if (sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC))
1913 goto nomem;
1914
1909 /* Update the content of current association. */ 1915 /* Update the content of current association. */
1910 sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); 1916 sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
1911 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 1917 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
@@ -2050,7 +2056,7 @@ static enum sctp_disposition sctp_sf_do_dupcook_d(
2050 } 2056 }
2051 } 2057 }
2052 2058
2053 repl = sctp_make_cookie_ack(new_asoc, chunk); 2059 repl = sctp_make_cookie_ack(asoc, chunk);
2054 if (!repl) 2060 if (!repl)
2055 goto nomem; 2061 goto nomem;
2056 2062
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 80835ac26d2c..1b4593b842b0 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -644,16 +644,15 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
644 644
645 list_for_each_entry(trans, 645 list_for_each_entry(trans,
646 &asoc->peer.transport_addr_list, transports) { 646 &asoc->peer.transport_addr_list, transports) {
647 /* Clear the source and route cache */
648 sctp_transport_dst_release(trans);
649 trans->cwnd = min(4*asoc->pathmtu, max_t(__u32, 647 trans->cwnd = min(4*asoc->pathmtu, max_t(__u32,
650 2*asoc->pathmtu, 4380)); 648 2*asoc->pathmtu, 4380));
651 trans->ssthresh = asoc->peer.i.a_rwnd; 649 trans->ssthresh = asoc->peer.i.a_rwnd;
652 trans->rto = asoc->rto_initial; 650 trans->rto = asoc->rto_initial;
653 sctp_max_rto(asoc, trans); 651 sctp_max_rto(asoc, trans);
654 trans->rtt = trans->srtt = trans->rttvar = 0; 652 trans->rtt = trans->srtt = trans->rttvar = 0;
653 /* Clear the source and route cache */
655 sctp_transport_route(trans, NULL, 654 sctp_transport_route(trans, NULL,
656 sctp_sk(asoc->base.sk)); 655 sctp_sk(asoc->base.sk));
657 } 656 }
658 } 657 }
659 retval = sctp_send_asconf(asoc, chunk); 658 retval = sctp_send_asconf(asoc, chunk);
@@ -896,7 +895,6 @@ skip_mkasconf:
896 */ 895 */
897 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 896 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
898 transports) { 897 transports) {
899 sctp_transport_dst_release(transport);
900 sctp_transport_route(transport, NULL, 898 sctp_transport_route(transport, NULL,
901 sctp_sk(asoc->base.sk)); 899 sctp_sk(asoc->base.sk));
902 } 900 }
@@ -1895,6 +1893,7 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
1895 struct sctp_sndrcvinfo *sinfo) 1893 struct sctp_sndrcvinfo *sinfo)
1896{ 1894{
1897 struct sock *sk = asoc->base.sk; 1895 struct sock *sk = asoc->base.sk;
1896 struct sctp_sock *sp = sctp_sk(sk);
1898 struct net *net = sock_net(sk); 1897 struct net *net = sock_net(sk);
1899 struct sctp_datamsg *datamsg; 1898 struct sctp_datamsg *datamsg;
1900 bool wait_connect = false; 1899 bool wait_connect = false;
@@ -1913,13 +1912,16 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
1913 goto err; 1912 goto err;
1914 } 1913 }
1915 1914
1916 if (sctp_sk(sk)->disable_fragments && msg_len > asoc->frag_point) { 1915 if (sp->disable_fragments && msg_len > asoc->frag_point) {
1917 err = -EMSGSIZE; 1916 err = -EMSGSIZE;
1918 goto err; 1917 goto err;
1919 } 1918 }
1920 1919
1921 if (asoc->pmtu_pending) 1920 if (asoc->pmtu_pending) {
1922 sctp_assoc_pending_pmtu(asoc); 1921 if (sp->param_flags & SPP_PMTUD_ENABLE)
1922 sctp_assoc_sync_pmtu(asoc);
1923 asoc->pmtu_pending = 0;
1924 }
1923 1925
1924 if (sctp_wspace(asoc) < msg_len) 1926 if (sctp_wspace(asoc) < msg_len)
1925 sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc)); 1927 sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc));
@@ -1936,7 +1938,7 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
1936 if (err) 1938 if (err)
1937 goto err; 1939 goto err;
1938 1940
1939 if (sctp_sk(sk)->strm_interleave) { 1941 if (sp->strm_interleave) {
1940 timeo = sock_sndtimeo(sk, 0); 1942 timeo = sock_sndtimeo(sk, 0);
1941 err = sctp_wait_for_connect(asoc, &timeo); 1943 err = sctp_wait_for_connect(asoc, &timeo);
1942 if (err) 1944 if (err)
@@ -2539,7 +2541,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2539 trans->pathmtu = params->spp_pathmtu; 2541 trans->pathmtu = params->spp_pathmtu;
2540 sctp_assoc_sync_pmtu(asoc); 2542 sctp_assoc_sync_pmtu(asoc);
2541 } else if (asoc) { 2543 } else if (asoc) {
2542 asoc->pathmtu = params->spp_pathmtu; 2544 sctp_assoc_set_pmtu(asoc, params->spp_pathmtu);
2543 } else { 2545 } else {
2544 sp->pathmtu = params->spp_pathmtu; 2546 sp->pathmtu = params->spp_pathmtu;
2545 } 2547 }
@@ -3209,7 +3211,6 @@ static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsign
3209static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen) 3211static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen)
3210{ 3212{
3211 struct sctp_sock *sp = sctp_sk(sk); 3213 struct sctp_sock *sp = sctp_sk(sk);
3212 struct sctp_af *af = sp->pf->af;
3213 struct sctp_assoc_value params; 3214 struct sctp_assoc_value params;
3214 struct sctp_association *asoc; 3215 struct sctp_association *asoc;
3215 int val; 3216 int val;
@@ -3231,30 +3232,24 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
3231 return -EINVAL; 3232 return -EINVAL;
3232 } 3233 }
3233 3234
3235 asoc = sctp_id2assoc(sk, params.assoc_id);
3236
3234 if (val) { 3237 if (val) {
3235 int min_len, max_len; 3238 int min_len, max_len;
3239 __u16 datasize = asoc ? sctp_datachk_len(&asoc->stream) :
3240 sizeof(struct sctp_data_chunk);
3236 3241
3237 min_len = SCTP_DEFAULT_MINSEGMENT - af->net_header_len; 3242 min_len = sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT,
3238 min_len -= af->ip_options_len(sk); 3243 datasize);
3239 min_len -= sizeof(struct sctphdr) + 3244 max_len = SCTP_MAX_CHUNK_LEN - datasize;
3240 sizeof(struct sctp_data_chunk);
3241
3242 max_len = SCTP_MAX_CHUNK_LEN - sizeof(struct sctp_data_chunk);
3243 3245
3244 if (val < min_len || val > max_len) 3246 if (val < min_len || val > max_len)
3245 return -EINVAL; 3247 return -EINVAL;
3246 } 3248 }
3247 3249
3248 asoc = sctp_id2assoc(sk, params.assoc_id);
3249 if (asoc) { 3250 if (asoc) {
3250 if (val == 0) {
3251 val = asoc->pathmtu - af->net_header_len;
3252 val -= af->ip_options_len(sk);
3253 val -= sizeof(struct sctphdr) +
3254 sctp_datachk_len(&asoc->stream);
3255 }
3256 asoc->user_frag = val; 3251 asoc->user_frag = val;
3257 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); 3252 sctp_assoc_update_frag_point(asoc);
3258 } else { 3253 } else {
3259 if (params.assoc_id && sctp_style(sk, UDP)) 3254 if (params.assoc_id && sctp_style(sk, UDP))
3260 return -EINVAL; 3255 return -EINVAL;
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index f799043abec9..f1f1d1b232ba 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -240,6 +240,8 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new)
240 240
241 new->out = NULL; 241 new->out = NULL;
242 new->in = NULL; 242 new->in = NULL;
243 new->outcnt = 0;
244 new->incnt = 0;
243} 245}
244 246
245static int sctp_send_reconf(struct sctp_association *asoc, 247static int sctp_send_reconf(struct sctp_association *asoc,
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 47f82bd794d9..4a95e260b674 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -242,9 +242,18 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
242 &transport->fl, sk); 242 &transport->fl, sk);
243 } 243 }
244 244
245 if (transport->dst) { 245 if (transport->param_flags & SPP_PMTUD_DISABLE) {
246 transport->pathmtu = SCTP_TRUNC4(dst_mtu(transport->dst)); 246 struct sctp_association *asoc = transport->asoc;
247 } else 247
248 if (!transport->pathmtu && asoc && asoc->pathmtu)
249 transport->pathmtu = asoc->pathmtu;
250 if (transport->pathmtu)
251 return;
252 }
253
254 if (transport->dst)
255 transport->pathmtu = sctp_dst_mtu(transport->dst);
256 else
248 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 257 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
249} 258}
250 259
@@ -290,6 +299,7 @@ void sctp_transport_route(struct sctp_transport *transport,
290 struct sctp_association *asoc = transport->asoc; 299 struct sctp_association *asoc = transport->asoc;
291 struct sctp_af *af = transport->af_specific; 300 struct sctp_af *af = transport->af_specific;
292 301
302 sctp_transport_dst_release(transport);
293 af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt)); 303 af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt));
294 304
295 if (saddr) 305 if (saddr)
@@ -297,21 +307,14 @@ void sctp_transport_route(struct sctp_transport *transport,
297 else 307 else
298 af->get_saddr(opt, transport, &transport->fl); 308 af->get_saddr(opt, transport, &transport->fl);
299 309
300 if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) { 310 sctp_transport_pmtu(transport, sctp_opt2sk(opt));
301 return;
302 }
303 if (transport->dst) {
304 transport->pathmtu = SCTP_TRUNC4(dst_mtu(transport->dst));
305 311
306 /* Initialize sk->sk_rcv_saddr, if the transport is the 312 /* Initialize sk->sk_rcv_saddr, if the transport is the
307 * association's active path for getsockname(). 313 * association's active path for getsockname().
308 */ 314 */
309 if (asoc && (!asoc->peer.primary_path || 315 if (transport->dst && asoc &&
310 (transport == asoc->peer.active_path))) 316 (!asoc->peer.primary_path || transport == asoc->peer.active_path))
311 opt->pf->to_sk_saddr(&transport->saddr, 317 opt->pf->to_sk_saddr(&transport->saddr, asoc->base.sk);
312 asoc->base.sk);
313 } else
314 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
315} 318}
316 319
317/* Hold a reference to a transport. */ 320/* Hold a reference to a transport. */
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 4470501374bf..17688a02035b 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -29,6 +29,7 @@
29#include <net/sock.h> 29#include <net/sock.h>
30#include <net/tcp.h> 30#include <net/tcp.h>
31#include <net/smc.h> 31#include <net/smc.h>
32#include <asm/ioctls.h>
32 33
33#include "smc.h" 34#include "smc.h"
34#include "smc_clc.h" 35#include "smc_clc.h"
@@ -292,8 +293,20 @@ static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
292 smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC); 293 smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
293} 294}
294 295
296/* register a new rmb */
297static int smc_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc)
298{
299 /* register memory region for new rmb */
300 if (smc_wr_reg_send(link, rmb_desc->mr_rx[SMC_SINGLE_LINK])) {
301 rmb_desc->regerr = 1;
302 return -EFAULT;
303 }
304 return 0;
305}
306
295static int smc_clnt_conf_first_link(struct smc_sock *smc) 307static int smc_clnt_conf_first_link(struct smc_sock *smc)
296{ 308{
309 struct net *net = sock_net(smc->clcsock->sk);
297 struct smc_link_group *lgr = smc->conn.lgr; 310 struct smc_link_group *lgr = smc->conn.lgr;
298 struct smc_link *link; 311 struct smc_link *link;
299 int rest; 312 int rest;
@@ -321,9 +334,7 @@ static int smc_clnt_conf_first_link(struct smc_sock *smc)
321 334
322 smc_wr_remember_qp_attr(link); 335 smc_wr_remember_qp_attr(link);
323 336
324 rc = smc_wr_reg_send(link, 337 if (smc_reg_rmb(link, smc->conn.rmb_desc))
325 smc->conn.rmb_desc->mr_rx[SMC_SINGLE_LINK]);
326 if (rc)
327 return SMC_CLC_DECL_INTERR; 338 return SMC_CLC_DECL_INTERR;
328 339
329 /* send CONFIRM LINK response over RoCE fabric */ 340 /* send CONFIRM LINK response over RoCE fabric */
@@ -353,7 +364,7 @@ static int smc_clnt_conf_first_link(struct smc_sock *smc)
353 if (rc < 0) 364 if (rc < 0)
354 return SMC_CLC_DECL_TCL; 365 return SMC_CLC_DECL_TCL;
355 366
356 link->state = SMC_LNK_ACTIVE; 367 smc_llc_link_active(link, net->ipv4.sysctl_tcp_keepalive_time);
357 368
358 return 0; 369 return 0;
359} 370}
@@ -391,6 +402,9 @@ static int smc_connect_rdma(struct smc_sock *smc)
391 402
392 sock_hold(&smc->sk); /* sock put in passive closing */ 403 sock_hold(&smc->sk); /* sock put in passive closing */
393 404
405 if (smc->use_fallback)
406 goto out_connected;
407
394 if (!tcp_sk(smc->clcsock->sk)->syn_smc) { 408 if (!tcp_sk(smc->clcsock->sk)->syn_smc) {
395 /* peer has not signalled SMC-capability */ 409 /* peer has not signalled SMC-capability */
396 smc->use_fallback = true; 410 smc->use_fallback = true;
@@ -473,13 +487,8 @@ static int smc_connect_rdma(struct smc_sock *smc)
473 goto decline_rdma_unlock; 487 goto decline_rdma_unlock;
474 } 488 }
475 } else { 489 } else {
476 struct smc_buf_desc *buf_desc = smc->conn.rmb_desc; 490 if (!smc->conn.rmb_desc->reused) {
477 491 if (smc_reg_rmb(link, smc->conn.rmb_desc)) {
478 if (!buf_desc->reused) {
479 /* register memory region for new rmb */
480 rc = smc_wr_reg_send(link,
481 buf_desc->mr_rx[SMC_SINGLE_LINK]);
482 if (rc) {
483 reason_code = SMC_CLC_DECL_INTERR; 492 reason_code = SMC_CLC_DECL_INTERR;
484 goto decline_rdma_unlock; 493 goto decline_rdma_unlock;
485 } 494 }
@@ -712,6 +721,7 @@ void smc_close_non_accepted(struct sock *sk)
712 721
713static int smc_serv_conf_first_link(struct smc_sock *smc) 722static int smc_serv_conf_first_link(struct smc_sock *smc)
714{ 723{
724 struct net *net = sock_net(smc->clcsock->sk);
715 struct smc_link_group *lgr = smc->conn.lgr; 725 struct smc_link_group *lgr = smc->conn.lgr;
716 struct smc_link *link; 726 struct smc_link *link;
717 int rest; 727 int rest;
@@ -719,9 +729,7 @@ static int smc_serv_conf_first_link(struct smc_sock *smc)
719 729
720 link = &lgr->lnk[SMC_SINGLE_LINK]; 730 link = &lgr->lnk[SMC_SINGLE_LINK];
721 731
722 rc = smc_wr_reg_send(link, 732 if (smc_reg_rmb(link, smc->conn.rmb_desc))
723 smc->conn.rmb_desc->mr_rx[SMC_SINGLE_LINK]);
724 if (rc)
725 return SMC_CLC_DECL_INTERR; 733 return SMC_CLC_DECL_INTERR;
726 734
727 /* send CONFIRM LINK request to client over the RoCE fabric */ 735 /* send CONFIRM LINK request to client over the RoCE fabric */
@@ -766,7 +774,7 @@ static int smc_serv_conf_first_link(struct smc_sock *smc)
766 return rc; 774 return rc;
767 } 775 }
768 776
769 link->state = SMC_LNK_ACTIVE; 777 smc_llc_link_active(link, net->ipv4.sysctl_tcp_keepalive_time);
770 778
771 return 0; 779 return 0;
772} 780}
@@ -790,6 +798,9 @@ static void smc_listen_work(struct work_struct *work)
790 int rc = 0; 798 int rc = 0;
791 u8 ibport; 799 u8 ibport;
792 800
801 if (new_smc->use_fallback)
802 goto out_connected;
803
793 /* check if peer is smc capable */ 804 /* check if peer is smc capable */
794 if (!tcp_sk(newclcsock->sk)->syn_smc) { 805 if (!tcp_sk(newclcsock->sk)->syn_smc) {
795 new_smc->use_fallback = true; 806 new_smc->use_fallback = true;
@@ -854,13 +865,8 @@ static void smc_listen_work(struct work_struct *work)
854 smc_rx_init(new_smc); 865 smc_rx_init(new_smc);
855 866
856 if (local_contact != SMC_FIRST_CONTACT) { 867 if (local_contact != SMC_FIRST_CONTACT) {
857 struct smc_buf_desc *buf_desc = new_smc->conn.rmb_desc; 868 if (!new_smc->conn.rmb_desc->reused) {
858 869 if (smc_reg_rmb(link, new_smc->conn.rmb_desc)) {
859 if (!buf_desc->reused) {
860 /* register memory region for new rmb */
861 rc = smc_wr_reg_send(link,
862 buf_desc->mr_rx[SMC_SINGLE_LINK]);
863 if (rc) {
864 reason_code = SMC_CLC_DECL_INTERR; 870 reason_code = SMC_CLC_DECL_INTERR;
865 goto decline_rdma_unlock; 871 goto decline_rdma_unlock;
866 } 872 }
@@ -968,7 +974,7 @@ static void smc_tcp_listen_work(struct work_struct *work)
968 continue; 974 continue;
969 975
970 new_smc->listen_smc = lsmc; 976 new_smc->listen_smc = lsmc;
971 new_smc->use_fallback = false; /* assume rdma capability first*/ 977 new_smc->use_fallback = lsmc->use_fallback;
972 sock_hold(lsk); /* sock_put in smc_listen_work */ 978 sock_hold(lsk); /* sock_put in smc_listen_work */
973 INIT_WORK(&new_smc->smc_listen_work, smc_listen_work); 979 INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
974 smc_copy_sock_settings_to_smc(new_smc); 980 smc_copy_sock_settings_to_smc(new_smc);
@@ -1004,7 +1010,8 @@ static int smc_listen(struct socket *sock, int backlog)
1004 * them to the clc socket -- copy smc socket options to clc socket 1010 * them to the clc socket -- copy smc socket options to clc socket
1005 */ 1011 */
1006 smc_copy_sock_settings_to_clc(smc); 1012 smc_copy_sock_settings_to_clc(smc);
1007 tcp_sk(smc->clcsock->sk)->syn_smc = 1; 1013 if (!smc->use_fallback)
1014 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
1008 1015
1009 rc = kernel_listen(smc->clcsock, backlog); 1016 rc = kernel_listen(smc->clcsock, backlog);
1010 if (rc) 1017 if (rc)
@@ -1037,6 +1044,7 @@ static int smc_accept(struct socket *sock, struct socket *new_sock,
1037 1044
1038 if (lsmc->sk.sk_state != SMC_LISTEN) { 1045 if (lsmc->sk.sk_state != SMC_LISTEN) {
1039 rc = -EINVAL; 1046 rc = -EINVAL;
1047 release_sock(sk);
1040 goto out; 1048 goto out;
1041 } 1049 }
1042 1050
@@ -1064,9 +1072,29 @@ static int smc_accept(struct socket *sock, struct socket *new_sock,
1064 1072
1065 if (!rc) 1073 if (!rc)
1066 rc = sock_error(nsk); 1074 rc = sock_error(nsk);
1075 release_sock(sk);
1076 if (rc)
1077 goto out;
1078
1079 if (lsmc->sockopt_defer_accept && !(flags & O_NONBLOCK)) {
1080 /* wait till data arrives on the socket */
1081 timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept *
1082 MSEC_PER_SEC);
1083 if (smc_sk(nsk)->use_fallback) {
1084 struct sock *clcsk = smc_sk(nsk)->clcsock->sk;
1085
1086 lock_sock(clcsk);
1087 if (skb_queue_empty(&clcsk->sk_receive_queue))
1088 sk_wait_data(clcsk, &timeo, NULL);
1089 release_sock(clcsk);
1090 } else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
1091 lock_sock(nsk);
1092 smc_rx_wait(smc_sk(nsk), &timeo, smc_rx_data_available);
1093 release_sock(nsk);
1094 }
1095 }
1067 1096
1068out: 1097out:
1069 release_sock(sk);
1070 sock_put(sk); /* sock_hold above */ 1098 sock_put(sk); /* sock_hold above */
1071 return rc; 1099 return rc;
1072} 1100}
@@ -1097,6 +1125,16 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1097 (sk->sk_state != SMC_APPCLOSEWAIT1) && 1125 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
1098 (sk->sk_state != SMC_INIT)) 1126 (sk->sk_state != SMC_INIT))
1099 goto out; 1127 goto out;
1128
1129 if (msg->msg_flags & MSG_FASTOPEN) {
1130 if (sk->sk_state == SMC_INIT) {
1131 smc->use_fallback = true;
1132 } else {
1133 rc = -EINVAL;
1134 goto out;
1135 }
1136 }
1137
1100 if (smc->use_fallback) 1138 if (smc->use_fallback)
1101 rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len); 1139 rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
1102 else 1140 else
@@ -1125,10 +1163,12 @@ static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1125 goto out; 1163 goto out;
1126 } 1164 }
1127 1165
1128 if (smc->use_fallback) 1166 if (smc->use_fallback) {
1129 rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags); 1167 rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);
1130 else 1168 } else {
1131 rc = smc_rx_recvmsg(smc, msg, len, flags); 1169 msg->msg_namelen = 0;
1170 rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
1171 }
1132 1172
1133out: 1173out:
1134 release_sock(sk); 1174 release_sock(sk);
@@ -1166,13 +1206,15 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
1166 /* delegate to CLC child sock */ 1206 /* delegate to CLC child sock */
1167 release_sock(sk); 1207 release_sock(sk);
1168 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); 1208 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
1169 /* if non-blocking connect finished ... */
1170 lock_sock(sk); 1209 lock_sock(sk);
1171 if ((sk->sk_state == SMC_INIT) && (mask & EPOLLOUT)) { 1210 sk->sk_err = smc->clcsock->sk->sk_err;
1172 sk->sk_err = smc->clcsock->sk->sk_err; 1211 if (sk->sk_err) {
1173 if (sk->sk_err) { 1212 mask |= EPOLLERR;
1174 mask |= EPOLLERR; 1213 } else {
1175 } else { 1214 /* if non-blocking connect finished ... */
1215 if (sk->sk_state == SMC_INIT &&
1216 mask & EPOLLOUT &&
1217 smc->clcsock->sk->sk_state != TCP_CLOSE) {
1176 rc = smc_connect_rdma(smc); 1218 rc = smc_connect_rdma(smc);
1177 if (rc < 0) 1219 if (rc < 0)
1178 mask |= EPOLLERR; 1220 mask |= EPOLLERR;
@@ -1274,14 +1316,64 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
1274{ 1316{
1275 struct sock *sk = sock->sk; 1317 struct sock *sk = sock->sk;
1276 struct smc_sock *smc; 1318 struct smc_sock *smc;
1319 int val, rc;
1277 1320
1278 smc = smc_sk(sk); 1321 smc = smc_sk(sk);
1279 1322
1280 /* generic setsockopts reaching us here always apply to the 1323 /* generic setsockopts reaching us here always apply to the
1281 * CLC socket 1324 * CLC socket
1282 */ 1325 */
1283 return smc->clcsock->ops->setsockopt(smc->clcsock, level, optname, 1326 rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
1284 optval, optlen); 1327 optval, optlen);
1328 if (smc->clcsock->sk->sk_err) {
1329 sk->sk_err = smc->clcsock->sk->sk_err;
1330 sk->sk_error_report(sk);
1331 }
1332 if (rc)
1333 return rc;
1334
1335 if (optlen < sizeof(int))
1336 return rc;
1337 get_user(val, (int __user *)optval);
1338
1339 lock_sock(sk);
1340 switch (optname) {
1341 case TCP_ULP:
1342 case TCP_FASTOPEN:
1343 case TCP_FASTOPEN_CONNECT:
1344 case TCP_FASTOPEN_KEY:
1345 case TCP_FASTOPEN_NO_COOKIE:
1346 /* option not supported by SMC */
1347 if (sk->sk_state == SMC_INIT) {
1348 smc->use_fallback = true;
1349 } else {
1350 if (!smc->use_fallback)
1351 rc = -EINVAL;
1352 }
1353 break;
1354 case TCP_NODELAY:
1355 if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) {
1356 if (val)
1357 mod_delayed_work(system_wq, &smc->conn.tx_work,
1358 0);
1359 }
1360 break;
1361 case TCP_CORK:
1362 if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) {
1363 if (!val)
1364 mod_delayed_work(system_wq, &smc->conn.tx_work,
1365 0);
1366 }
1367 break;
1368 case TCP_DEFER_ACCEPT:
1369 smc->sockopt_defer_accept = val;
1370 break;
1371 default:
1372 break;
1373 }
1374 release_sock(sk);
1375
1376 return rc;
1285} 1377}
1286 1378
1287static int smc_getsockopt(struct socket *sock, int level, int optname, 1379static int smc_getsockopt(struct socket *sock, int level, int optname,
@@ -1299,12 +1391,38 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
1299 unsigned long arg) 1391 unsigned long arg)
1300{ 1392{
1301 struct smc_sock *smc; 1393 struct smc_sock *smc;
1394 int answ;
1302 1395
1303 smc = smc_sk(sock->sk); 1396 smc = smc_sk(sock->sk);
1304 if (smc->use_fallback) 1397 if (smc->use_fallback) {
1398 if (!smc->clcsock)
1399 return -EBADF;
1305 return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg); 1400 return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
1306 else 1401 }
1307 return sock_no_ioctl(sock, cmd, arg); 1402 switch (cmd) {
1403 case SIOCINQ: /* same as FIONREAD */
1404 if (smc->sk.sk_state == SMC_LISTEN)
1405 return -EINVAL;
1406 answ = atomic_read(&smc->conn.bytes_to_rcv);
1407 break;
1408 case SIOCOUTQ:
1409 /* output queue size (not send + not acked) */
1410 if (smc->sk.sk_state == SMC_LISTEN)
1411 return -EINVAL;
1412 answ = smc->conn.sndbuf_size -
1413 atomic_read(&smc->conn.sndbuf_space);
1414 break;
1415 case SIOCOUTQNSD:
1416 /* output queue size (not send only) */
1417 if (smc->sk.sk_state == SMC_LISTEN)
1418 return -EINVAL;
1419 answ = smc_tx_prepared_sends(&smc->conn);
1420 break;
1421 default:
1422 return -ENOIOCTLCMD;
1423 }
1424
1425 return put_user(answ, (int __user *)arg);
1308} 1426}
1309 1427
1310static ssize_t smc_sendpage(struct socket *sock, struct page *page, 1428static ssize_t smc_sendpage(struct socket *sock, struct page *page,
@@ -1316,8 +1434,11 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page,
1316 1434
1317 smc = smc_sk(sk); 1435 smc = smc_sk(sk);
1318 lock_sock(sk); 1436 lock_sock(sk);
1319 if (sk->sk_state != SMC_ACTIVE) 1437 if (sk->sk_state != SMC_ACTIVE) {
1438 release_sock(sk);
1320 goto out; 1439 goto out;
1440 }
1441 release_sock(sk);
1321 if (smc->use_fallback) 1442 if (smc->use_fallback)
1322 rc = kernel_sendpage(smc->clcsock, page, offset, 1443 rc = kernel_sendpage(smc->clcsock, page, offset,
1323 size, flags); 1444 size, flags);
@@ -1325,13 +1446,18 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page,
1325 rc = sock_no_sendpage(sock, page, offset, size, flags); 1446 rc = sock_no_sendpage(sock, page, offset, size, flags);
1326 1447
1327out: 1448out:
1328 release_sock(sk);
1329 return rc; 1449 return rc;
1330} 1450}
1331 1451
1452/* Map the affected portions of the rmbe into an spd, note the number of bytes
1453 * to splice in conn->splice_pending, and press 'go'. Delays consumer cursor
1454 * updates till whenever a respective page has been fully processed.
1455 * Note that subsequent recv() calls have to wait till all splice() processing
1456 * completed.
1457 */
1332static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos, 1458static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
1333 struct pipe_inode_info *pipe, size_t len, 1459 struct pipe_inode_info *pipe, size_t len,
1334 unsigned int flags) 1460 unsigned int flags)
1335{ 1461{
1336 struct sock *sk = sock->sk; 1462 struct sock *sk = sock->sk;
1337 struct smc_sock *smc; 1463 struct smc_sock *smc;
@@ -1339,16 +1465,34 @@ static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
1339 1465
1340 smc = smc_sk(sk); 1466 smc = smc_sk(sk);
1341 lock_sock(sk); 1467 lock_sock(sk);
1342 if ((sk->sk_state != SMC_ACTIVE) && (sk->sk_state != SMC_CLOSED)) 1468
1469 if (sk->sk_state == SMC_INIT ||
1470 sk->sk_state == SMC_LISTEN ||
1471 sk->sk_state == SMC_CLOSED)
1472 goto out;
1473
1474 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
1475 rc = 0;
1343 goto out; 1476 goto out;
1477 }
1478
1344 if (smc->use_fallback) { 1479 if (smc->use_fallback) {
1345 rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos, 1480 rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos,
1346 pipe, len, flags); 1481 pipe, len, flags);
1347 } else { 1482 } else {
1348 rc = -EOPNOTSUPP; 1483 if (*ppos) {
1484 rc = -ESPIPE;
1485 goto out;
1486 }
1487 if (flags & SPLICE_F_NONBLOCK)
1488 flags = MSG_DONTWAIT;
1489 else
1490 flags = 0;
1491 rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
1349 } 1492 }
1350out: 1493out:
1351 release_sock(sk); 1494 release_sock(sk);
1495
1352 return rc; 1496 return rc;
1353} 1497}
1354 1498
diff --git a/net/smc/smc.h b/net/smc/smc.h
index e4829a2f46ba..ec209cd48d42 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -164,6 +164,9 @@ struct smc_connection {
164 atomic_t bytes_to_rcv; /* arrived data, 164 atomic_t bytes_to_rcv; /* arrived data,
165 * not yet received 165 * not yet received
166 */ 166 */
167 atomic_t splice_pending; /* number of spliced bytes
168 * pending processing
169 */
167#ifndef KERNEL_HAS_ATOMIC64 170#ifndef KERNEL_HAS_ATOMIC64
168 spinlock_t acurs_lock; /* protect cursors */ 171 spinlock_t acurs_lock; /* protect cursors */
169#endif 172#endif
@@ -180,6 +183,10 @@ struct smc_sock { /* smc sock container */
180 struct list_head accept_q; /* sockets to be accepted */ 183 struct list_head accept_q; /* sockets to be accepted */
181 spinlock_t accept_q_lock; /* protects accept_q */ 184 spinlock_t accept_q_lock; /* protects accept_q */
182 bool use_fallback; /* fallback to tcp */ 185 bool use_fallback; /* fallback to tcp */
186 int sockopt_defer_accept;
187 /* sockopt TCP_DEFER_ACCEPT
188 * value
189 */
183 u8 wait_close_tx_prepared : 1; 190 u8 wait_close_tx_prepared : 1;
184 /* shutdown wr or close 191 /* shutdown wr or close
185 * started, waiting for unsent 192 * started, waiting for unsent
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index b42395d24cba..42ad57365eca 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -82,7 +82,7 @@ static inline void smc_cdc_add_pending_send(struct smc_connection *conn,
82 sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE, 82 sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE,
83 "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)"); 83 "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)");
84 BUILD_BUG_ON_MSG( 84 BUILD_BUG_ON_MSG(
85 offsetof(struct smc_cdc_msg, reserved) > SMC_WR_TX_SIZE, 85 sizeof(struct smc_cdc_msg) != SMC_WR_TX_SIZE,
86 "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()"); 86 "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
87 BUILD_BUG_ON_MSG( 87 BUILD_BUG_ON_MSG(
88 sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE, 88 sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h
index ab240b37ad11..d2012fd22100 100644
--- a/net/smc/smc_cdc.h
+++ b/net/smc/smc_cdc.h
@@ -48,7 +48,7 @@ struct smc_cdc_msg {
48 struct smc_cdc_producer_flags prod_flags; 48 struct smc_cdc_producer_flags prod_flags;
49 struct smc_cdc_conn_state_flags conn_state_flags; 49 struct smc_cdc_conn_state_flags conn_state_flags;
50 u8 reserved[18]; 50 u8 reserved[18];
51} __aligned(8); 51} __packed; /* format defined in RFC7609 */
52 52
53static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn) 53static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn)
54{ 54{
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index f44f6803f7ff..9c74844e2008 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -32,6 +32,9 @@
32 32
33static u32 smc_lgr_num; /* unique link group number */ 33static u32 smc_lgr_num; /* unique link group number */
34 34
35static void smc_buf_free(struct smc_buf_desc *buf_desc, struct smc_link *lnk,
36 bool is_rmb);
37
35static void smc_lgr_schedule_free_work(struct smc_link_group *lgr) 38static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
36{ 39{
37 /* client link group creation always follows the server link group 40 /* client link group creation always follows the server link group
@@ -234,9 +237,22 @@ static void smc_buf_unuse(struct smc_connection *conn)
234 conn->sndbuf_size = 0; 237 conn->sndbuf_size = 0;
235 } 238 }
236 if (conn->rmb_desc) { 239 if (conn->rmb_desc) {
237 conn->rmb_desc->reused = true; 240 if (!conn->rmb_desc->regerr) {
238 conn->rmb_desc->used = 0; 241 conn->rmb_desc->reused = 1;
239 conn->rmbe_size = 0; 242 conn->rmb_desc->used = 0;
243 conn->rmbe_size = 0;
244 } else {
245 /* buf registration failed, reuse not possible */
246 struct smc_link_group *lgr = conn->lgr;
247 struct smc_link *lnk;
248
249 write_lock_bh(&lgr->rmbs_lock);
250 list_del(&conn->rmb_desc->list);
251 write_unlock_bh(&lgr->rmbs_lock);
252
253 lnk = &lgr->lnk[SMC_SINGLE_LINK];
254 smc_buf_free(conn->rmb_desc, lnk, true);
255 }
240 } 256 }
241} 257}
242 258
@@ -274,8 +290,8 @@ static void smc_buf_free(struct smc_buf_desc *buf_desc, struct smc_link *lnk,
274 DMA_TO_DEVICE); 290 DMA_TO_DEVICE);
275 } 291 }
276 sg_free_table(&buf_desc->sgt[SMC_SINGLE_LINK]); 292 sg_free_table(&buf_desc->sgt[SMC_SINGLE_LINK]);
277 if (buf_desc->cpu_addr) 293 if (buf_desc->pages)
278 free_pages((unsigned long)buf_desc->cpu_addr, buf_desc->order); 294 __free_pages(buf_desc->pages, buf_desc->order);
279 kfree(buf_desc); 295 kfree(buf_desc);
280} 296}
281 297
@@ -310,6 +326,7 @@ static void smc_lgr_free_bufs(struct smc_link_group *lgr)
310/* remove a link group */ 326/* remove a link group */
311void smc_lgr_free(struct smc_link_group *lgr) 327void smc_lgr_free(struct smc_link_group *lgr)
312{ 328{
329 smc_llc_link_flush(&lgr->lnk[SMC_SINGLE_LINK]);
313 smc_lgr_free_bufs(lgr); 330 smc_lgr_free_bufs(lgr);
314 smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]); 331 smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
315 kfree(lgr); 332 kfree(lgr);
@@ -332,6 +349,7 @@ void smc_lgr_terminate(struct smc_link_group *lgr)
332 struct rb_node *node; 349 struct rb_node *node;
333 350
334 smc_lgr_forget(lgr); 351 smc_lgr_forget(lgr);
352 smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
335 353
336 write_lock_bh(&lgr->conns_lock); 354 write_lock_bh(&lgr->conns_lock);
337 node = rb_first(&lgr->conns_all); 355 node = rb_first(&lgr->conns_all);
@@ -358,7 +376,8 @@ void smc_lgr_terminate(struct smc_link_group *lgr)
358static int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id) 376static int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id)
359{ 377{
360 struct dst_entry *dst = sk_dst_get(clcsock->sk); 378 struct dst_entry *dst = sk_dst_get(clcsock->sk);
361 int rc = 0; 379 struct net_device *ndev;
380 int i, nest_lvl, rc = 0;
362 381
363 *vlan_id = 0; 382 *vlan_id = 0;
364 if (!dst) { 383 if (!dst) {
@@ -370,8 +389,27 @@ static int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id)
370 goto out_rel; 389 goto out_rel;
371 } 390 }
372 391
373 if (is_vlan_dev(dst->dev)) 392 ndev = dst->dev;
374 *vlan_id = vlan_dev_vlan_id(dst->dev); 393 if (is_vlan_dev(ndev)) {
394 *vlan_id = vlan_dev_vlan_id(ndev);
395 goto out_rel;
396 }
397
398 rtnl_lock();
399 nest_lvl = dev_get_nest_level(ndev);
400 for (i = 0; i < nest_lvl; i++) {
401 struct list_head *lower = &ndev->adj_list.lower;
402
403 if (list_empty(lower))
404 break;
405 lower = lower->next;
406 ndev = (struct net_device *)netdev_lower_get_next(ndev, &lower);
407 if (is_vlan_dev(ndev)) {
408 *vlan_id = vlan_dev_vlan_id(ndev);
409 break;
410 }
411 }
412 rtnl_unlock();
375 413
376out_rel: 414out_rel:
377 dst_release(dst); 415 dst_release(dst);
@@ -528,16 +566,16 @@ static struct smc_buf_desc *smc_new_buf_create(struct smc_link_group *lgr,
528 if (!buf_desc) 566 if (!buf_desc)
529 return ERR_PTR(-ENOMEM); 567 return ERR_PTR(-ENOMEM);
530 568
531 buf_desc->cpu_addr = 569 buf_desc->order = get_order(bufsize);
532 (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | 570 buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN |
533 __GFP_NOMEMALLOC | 571 __GFP_NOMEMALLOC | __GFP_COMP |
534 __GFP_NORETRY | __GFP_ZERO, 572 __GFP_NORETRY | __GFP_ZERO,
535 get_order(bufsize)); 573 buf_desc->order);
536 if (!buf_desc->cpu_addr) { 574 if (!buf_desc->pages) {
537 kfree(buf_desc); 575 kfree(buf_desc);
538 return ERR_PTR(-EAGAIN); 576 return ERR_PTR(-EAGAIN);
539 } 577 }
540 buf_desc->order = get_order(bufsize); 578 buf_desc->cpu_addr = (void *)page_address(buf_desc->pages);
541 579
542 /* build the sg table from the pages */ 580 /* build the sg table from the pages */
543 lnk = &lgr->lnk[SMC_SINGLE_LINK]; 581 lnk = &lgr->lnk[SMC_SINGLE_LINK];
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index 07e2a393e6d9..fca8624e5e71 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -79,6 +79,7 @@ struct smc_link {
79 dma_addr_t wr_rx_dma_addr; /* DMA address of wr_rx_bufs */ 79 dma_addr_t wr_rx_dma_addr; /* DMA address of wr_rx_bufs */
80 u64 wr_rx_id; /* seq # of last recv WR */ 80 u64 wr_rx_id; /* seq # of last recv WR */
81 u32 wr_rx_cnt; /* number of WR recv buffers */ 81 u32 wr_rx_cnt; /* number of WR recv buffers */
82 unsigned long wr_rx_tstamp; /* jiffies when last buf rx */
82 83
83 struct ib_reg_wr wr_reg; /* WR register memory region */ 84 struct ib_reg_wr wr_reg; /* WR register memory region */
84 wait_queue_head_t wr_reg_wait; /* wait for wr_reg result */ 85 wait_queue_head_t wr_reg_wait; /* wait for wr_reg result */
@@ -101,6 +102,9 @@ struct smc_link {
101 int llc_confirm_resp_rc; /* rc from conf_resp msg */ 102 int llc_confirm_resp_rc; /* rc from conf_resp msg */
102 struct completion llc_add; /* wait for rx of add link */ 103 struct completion llc_add; /* wait for rx of add link */
103 struct completion llc_add_resp; /* wait for rx of add link rsp*/ 104 struct completion llc_add_resp; /* wait for rx of add link rsp*/
105 struct delayed_work llc_testlink_wrk; /* testlink worker */
106 struct completion llc_testlink_resp; /* wait for rx of testlink */
107 int llc_testlink_time; /* testlink interval */
104}; 108};
105 109
106/* For now we just allow one parallel link per link group. The SMC protocol 110/* For now we just allow one parallel link per link group. The SMC protocol
@@ -116,6 +120,7 @@ struct smc_link {
116struct smc_buf_desc { 120struct smc_buf_desc {
117 struct list_head list; 121 struct list_head list;
118 void *cpu_addr; /* virtual address of buffer */ 122 void *cpu_addr; /* virtual address of buffer */
123 struct page *pages;
119 struct sg_table sgt[SMC_LINKS_PER_LGR_MAX];/* virtual buffer */ 124 struct sg_table sgt[SMC_LINKS_PER_LGR_MAX];/* virtual buffer */
120 struct ib_mr *mr_rx[SMC_LINKS_PER_LGR_MAX]; 125 struct ib_mr *mr_rx[SMC_LINKS_PER_LGR_MAX];
121 /* for rmb only: memory region 126 /* for rmb only: memory region
@@ -123,7 +128,8 @@ struct smc_buf_desc {
123 */ 128 */
124 u32 order; /* allocation order */ 129 u32 order; /* allocation order */
125 u32 used; /* currently used / unused */ 130 u32 used; /* currently used / unused */
126 bool reused; /* new created / reused */ 131 u8 reused : 1; /* new created / reused */
132 u8 regerr : 1; /* err during registration */
127}; 133};
128 134
129struct smc_rtoken { /* address/key of remote RMB */ 135struct smc_rtoken { /* address/key of remote RMB */
diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
index 427b91c1c964..05dd7e6d314d 100644
--- a/net/smc/smc_diag.c
+++ b/net/smc/smc_diag.c
@@ -38,17 +38,27 @@ static void smc_diag_msg_common_fill(struct smc_diag_msg *r, struct sock *sk)
38{ 38{
39 struct smc_sock *smc = smc_sk(sk); 39 struct smc_sock *smc = smc_sk(sk);
40 40
41 r->diag_family = sk->sk_family;
42 if (!smc->clcsock) 41 if (!smc->clcsock)
43 return; 42 return;
44 r->id.idiag_sport = htons(smc->clcsock->sk->sk_num); 43 r->id.idiag_sport = htons(smc->clcsock->sk->sk_num);
45 r->id.idiag_dport = smc->clcsock->sk->sk_dport; 44 r->id.idiag_dport = smc->clcsock->sk->sk_dport;
46 r->id.idiag_if = smc->clcsock->sk->sk_bound_dev_if; 45 r->id.idiag_if = smc->clcsock->sk->sk_bound_dev_if;
47 sock_diag_save_cookie(sk, r->id.idiag_cookie); 46 sock_diag_save_cookie(sk, r->id.idiag_cookie);
48 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); 47 if (sk->sk_protocol == SMCPROTO_SMC) {
49 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); 48 r->diag_family = PF_INET;
50 r->id.idiag_src[0] = smc->clcsock->sk->sk_rcv_saddr; 49 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
51 r->id.idiag_dst[0] = smc->clcsock->sk->sk_daddr; 50 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
51 r->id.idiag_src[0] = smc->clcsock->sk->sk_rcv_saddr;
52 r->id.idiag_dst[0] = smc->clcsock->sk->sk_daddr;
53#if IS_ENABLED(CONFIG_IPV6)
54 } else if (sk->sk_protocol == SMCPROTO_SMC6) {
55 r->diag_family = PF_INET6;
56 memcpy(&r->id.idiag_src, &smc->clcsock->sk->sk_v6_rcv_saddr,
57 sizeof(smc->clcsock->sk->sk_v6_rcv_saddr));
58 memcpy(&r->id.idiag_dst, &smc->clcsock->sk->sk_v6_daddr,
59 sizeof(smc->clcsock->sk->sk_v6_daddr));
60#endif
61 }
52} 62}
53 63
54static int smc_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, 64static int smc_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
@@ -153,7 +163,8 @@ errout:
153 return -EMSGSIZE; 163 return -EMSGSIZE;
154} 164}
155 165
156static int smc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) 166static int smc_diag_dump_proto(struct proto *prot, struct sk_buff *skb,
167 struct netlink_callback *cb)
157{ 168{
158 struct net *net = sock_net(skb->sk); 169 struct net *net = sock_net(skb->sk);
159 struct nlattr *bc = NULL; 170 struct nlattr *bc = NULL;
@@ -161,8 +172,8 @@ static int smc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
161 struct sock *sk; 172 struct sock *sk;
162 int rc = 0; 173 int rc = 0;
163 174
164 read_lock(&smc_proto.h.smc_hash->lock); 175 read_lock(&prot->h.smc_hash->lock);
165 head = &smc_proto.h.smc_hash->ht; 176 head = &prot->h.smc_hash->ht;
166 if (hlist_empty(head)) 177 if (hlist_empty(head))
167 goto out; 178 goto out;
168 179
@@ -175,7 +186,17 @@ static int smc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
175 } 186 }
176 187
177out: 188out:
178 read_unlock(&smc_proto.h.smc_hash->lock); 189 read_unlock(&prot->h.smc_hash->lock);
190 return rc;
191}
192
193static int smc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
194{
195 int rc = 0;
196
197 rc = smc_diag_dump_proto(&smc_proto, skb, cb);
198 if (!rc)
199 rc = smc_diag_dump_proto(&smc_proto6, skb, cb);
179 return rc; 200 return rc;
180} 201}
181 202
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index ea4b21981b4b..33b4d856f4c6 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -397,7 +397,8 @@ static void smc_llc_rx_test_link(struct smc_link *link,
397 struct smc_llc_msg_test_link *llc) 397 struct smc_llc_msg_test_link *llc)
398{ 398{
399 if (llc->hd.flags & SMC_LLC_FLAG_RESP) { 399 if (llc->hd.flags & SMC_LLC_FLAG_RESP) {
400 /* unused as long as we don't send this type of msg */ 400 if (link->state == SMC_LNK_ACTIVE)
401 complete(&link->llc_testlink_resp);
401 } else { 402 } else {
402 smc_llc_send_test_link(link, llc->user_data, SMC_LLC_RESP); 403 smc_llc_send_test_link(link, llc->user_data, SMC_LLC_RESP);
403 } 404 }
@@ -502,6 +503,65 @@ static void smc_llc_rx_handler(struct ib_wc *wc, void *buf)
502 } 503 }
503} 504}
504 505
506/***************************** worker ****************************************/
507
508static void smc_llc_testlink_work(struct work_struct *work)
509{
510 struct smc_link *link = container_of(to_delayed_work(work),
511 struct smc_link, llc_testlink_wrk);
512 unsigned long next_interval;
513 struct smc_link_group *lgr;
514 unsigned long expire_time;
515 u8 user_data[16] = { 0 };
516 int rc;
517
518 lgr = container_of(link, struct smc_link_group, lnk[SMC_SINGLE_LINK]);
519 if (link->state != SMC_LNK_ACTIVE)
520 return; /* don't reschedule worker */
521 expire_time = link->wr_rx_tstamp + link->llc_testlink_time;
522 if (time_is_after_jiffies(expire_time)) {
523 next_interval = expire_time - jiffies;
524 goto out;
525 }
526 reinit_completion(&link->llc_testlink_resp);
527 smc_llc_send_test_link(link, user_data, SMC_LLC_REQ);
528 /* receive TEST LINK response over RoCE fabric */
529 rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp,
530 SMC_LLC_WAIT_TIME);
531 if (rc <= 0) {
532 smc_lgr_terminate(lgr);
533 return;
534 }
535 next_interval = link->llc_testlink_time;
536out:
537 schedule_delayed_work(&link->llc_testlink_wrk, next_interval);
538}
539
540void smc_llc_link_active(struct smc_link *link, int testlink_time)
541{
542 init_completion(&link->llc_testlink_resp);
543 INIT_DELAYED_WORK(&link->llc_testlink_wrk, smc_llc_testlink_work);
544 link->state = SMC_LNK_ACTIVE;
545 if (testlink_time) {
546 link->llc_testlink_time = testlink_time * HZ;
547 schedule_delayed_work(&link->llc_testlink_wrk,
548 link->llc_testlink_time);
549 }
550}
551
552/* called in tasklet context */
553void smc_llc_link_inactive(struct smc_link *link)
554{
555 link->state = SMC_LNK_INACTIVE;
556 cancel_delayed_work(&link->llc_testlink_wrk);
557}
558
559/* called in worker context */
560void smc_llc_link_flush(struct smc_link *link)
561{
562 cancel_delayed_work_sync(&link->llc_testlink_wrk);
563}
564
505/***************************** init, exit, misc ******************************/ 565/***************************** init, exit, misc ******************************/
506 566
507static struct smc_wr_rx_handler smc_llc_rx_handlers[] = { 567static struct smc_wr_rx_handler smc_llc_rx_handlers[] = {
diff --git a/net/smc/smc_llc.h b/net/smc/smc_llc.h
index e4a7d5e234d5..d6e42116485e 100644
--- a/net/smc/smc_llc.h
+++ b/net/smc/smc_llc.h
@@ -44,6 +44,9 @@ int smc_llc_send_delete_link(struct smc_link *link,
44 enum smc_llc_reqresp reqresp); 44 enum smc_llc_reqresp reqresp);
45int smc_llc_send_test_link(struct smc_link *lnk, u8 user_data[16], 45int smc_llc_send_test_link(struct smc_link *lnk, u8 user_data[16],
46 enum smc_llc_reqresp reqresp); 46 enum smc_llc_reqresp reqresp);
47void smc_llc_link_active(struct smc_link *link, int testlink_time);
48void smc_llc_link_inactive(struct smc_link *link);
49void smc_llc_link_flush(struct smc_link *link);
47int smc_llc_init(void) __init; 50int smc_llc_init(void) __init;
48 51
49#endif /* SMC_LLC_H */ 52#endif /* SMC_LLC_H */
diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c
index eff4e0d0bb31..ed45569289f5 100644
--- a/net/smc/smc_rx.c
+++ b/net/smc/smc_rx.c
@@ -22,11 +22,10 @@
22#include "smc_tx.h" /* smc_tx_consumer_update() */ 22#include "smc_tx.h" /* smc_tx_consumer_update() */
23#include "smc_rx.h" 23#include "smc_rx.h"
24 24
25/* callback implementation for sk.sk_data_ready() 25/* callback implementation to wakeup consumers blocked with smc_rx_wait().
26 * to wakeup rcvbuf consumers that blocked with smc_rx_wait_data().
27 * indirectly called by smc_cdc_msg_recv_action(). 26 * indirectly called by smc_cdc_msg_recv_action().
28 */ 27 */
29static void smc_rx_data_ready(struct sock *sk) 28static void smc_rx_wake_up(struct sock *sk)
30{ 29{
31 struct socket_wq *wq; 30 struct socket_wq *wq;
32 31
@@ -44,28 +43,140 @@ static void smc_rx_data_ready(struct sock *sk)
44 rcu_read_unlock(); 43 rcu_read_unlock();
45} 44}
46 45
46/* Update consumer cursor
47 * @conn connection to update
48 * @cons consumer cursor
49 * @len number of Bytes consumed
50 */
51static void smc_rx_update_consumer(struct smc_connection *conn,
52 union smc_host_cursor cons, size_t len)
53{
54 smc_curs_add(conn->rmbe_size, &cons, len);
55 smc_curs_write(&conn->local_tx_ctrl.cons, smc_curs_read(&cons, conn),
56 conn);
57 /* send consumer cursor update if required */
58 /* similar to advertising new TCP rcv_wnd if required */
59 smc_tx_consumer_update(conn);
60}
61
62struct smc_spd_priv {
63 struct smc_sock *smc;
64 size_t len;
65};
66
67static void smc_rx_pipe_buf_release(struct pipe_inode_info *pipe,
68 struct pipe_buffer *buf)
69{
70 struct smc_spd_priv *priv = (struct smc_spd_priv *)buf->private;
71 struct smc_sock *smc = priv->smc;
72 struct smc_connection *conn;
73 union smc_host_cursor cons;
74 struct sock *sk = &smc->sk;
75
76 if (sk->sk_state == SMC_CLOSED ||
77 sk->sk_state == SMC_PEERFINCLOSEWAIT ||
78 sk->sk_state == SMC_APPFINCLOSEWAIT)
79 goto out;
80 conn = &smc->conn;
81 lock_sock(sk);
82 smc_curs_write(&cons, smc_curs_read(&conn->local_tx_ctrl.cons, conn),
83 conn);
84 smc_rx_update_consumer(conn, cons, priv->len);
85 release_sock(sk);
86 if (atomic_sub_and_test(priv->len, &conn->splice_pending))
87 smc_rx_wake_up(sk);
88out:
89 kfree(priv);
90 put_page(buf->page);
91 sock_put(sk);
92}
93
94static int smc_rx_pipe_buf_nosteal(struct pipe_inode_info *pipe,
95 struct pipe_buffer *buf)
96{
97 return 1;
98}
99
100static const struct pipe_buf_operations smc_pipe_ops = {
101 .can_merge = 0,
102 .confirm = generic_pipe_buf_confirm,
103 .release = smc_rx_pipe_buf_release,
104 .steal = smc_rx_pipe_buf_nosteal,
105 .get = generic_pipe_buf_get
106};
107
108static void smc_rx_spd_release(struct splice_pipe_desc *spd,
109 unsigned int i)
110{
111 put_page(spd->pages[i]);
112}
113
114static int smc_rx_splice(struct pipe_inode_info *pipe, char *src, size_t len,
115 struct smc_sock *smc)
116{
117 struct splice_pipe_desc spd;
118 struct partial_page partial;
119 struct smc_spd_priv *priv;
120 struct page *page;
121 int bytes;
122
123 page = virt_to_page(smc->conn.rmb_desc->cpu_addr);
124 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
125 if (!priv)
126 return -ENOMEM;
127 priv->len = len;
128 priv->smc = smc;
129 partial.offset = src - (char *)smc->conn.rmb_desc->cpu_addr;
130 partial.len = len;
131 partial.private = (unsigned long)priv;
132
133 spd.nr_pages_max = 1;
134 spd.nr_pages = 1;
135 spd.pages = &page;
136 spd.partial = &partial;
137 spd.ops = &smc_pipe_ops;
138 spd.spd_release = smc_rx_spd_release;
139
140 bytes = splice_to_pipe(pipe, &spd);
141 if (bytes > 0) {
142 sock_hold(&smc->sk);
143 get_page(smc->conn.rmb_desc->pages);
144 atomic_add(bytes, &smc->conn.splice_pending);
145 }
146
147 return bytes;
148}
149
150static int smc_rx_data_available_and_no_splice_pend(struct smc_connection *conn)
151{
152 return atomic_read(&conn->bytes_to_rcv) &&
153 !atomic_read(&conn->splice_pending);
154}
155
47/* blocks rcvbuf consumer until >=len bytes available or timeout or interrupted 156/* blocks rcvbuf consumer until >=len bytes available or timeout or interrupted
48 * @smc smc socket 157 * @smc smc socket
49 * @timeo pointer to max seconds to wait, pointer to value 0 for no timeout 158 * @timeo pointer to max seconds to wait, pointer to value 0 for no timeout
159 * @fcrit add'l criterion to evaluate as function pointer
50 * Returns: 160 * Returns:
51 * 1 if at least 1 byte available in rcvbuf or if socket error/shutdown. 161 * 1 if at least 1 byte available in rcvbuf or if socket error/shutdown.
52 * 0 otherwise (nothing in rcvbuf nor timeout, e.g. interrupted). 162 * 0 otherwise (nothing in rcvbuf nor timeout, e.g. interrupted).
53 */ 163 */
54static int smc_rx_wait_data(struct smc_sock *smc, long *timeo) 164int smc_rx_wait(struct smc_sock *smc, long *timeo,
165 int (*fcrit)(struct smc_connection *conn))
55{ 166{
56 DEFINE_WAIT_FUNC(wait, woken_wake_function); 167 DEFINE_WAIT_FUNC(wait, woken_wake_function);
57 struct smc_connection *conn = &smc->conn; 168 struct smc_connection *conn = &smc->conn;
58 struct sock *sk = &smc->sk; 169 struct sock *sk = &smc->sk;
59 int rc; 170 int rc;
60 171
61 if (atomic_read(&conn->bytes_to_rcv)) 172 if (fcrit(conn))
62 return 1; 173 return 1;
63 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 174 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
64 add_wait_queue(sk_sleep(sk), &wait); 175 add_wait_queue(sk_sleep(sk), &wait);
65 rc = sk_wait_event(sk, timeo, 176 rc = sk_wait_event(sk, timeo,
66 sk->sk_err || 177 sk->sk_err ||
67 sk->sk_shutdown & RCV_SHUTDOWN || 178 sk->sk_shutdown & RCV_SHUTDOWN ||
68 atomic_read(&conn->bytes_to_rcv) || 179 fcrit(conn) ||
69 smc_cdc_rxed_any_close_or_senddone(conn), 180 smc_cdc_rxed_any_close_or_senddone(conn),
70 &wait); 181 &wait);
71 remove_wait_queue(sk_sleep(sk), &wait); 182 remove_wait_queue(sk_sleep(sk), &wait);
@@ -73,19 +184,25 @@ static int smc_rx_wait_data(struct smc_sock *smc, long *timeo)
73 return rc; 184 return rc;
74} 185}
75 186
76/* rcvbuf consumer: main API called by socket layer. 187/* smc_rx_recvmsg - receive data from RMBE
77 * called under sk lock. 188 * @msg: copy data to receive buffer
189 * @pipe: copy data to pipe if set - indicates splice() call
190 *
191 * rcvbuf consumer: main API called by socket layer.
192 * Called under sk lock.
78 */ 193 */
79int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, size_t len, 194int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
80 int flags) 195 struct pipe_inode_info *pipe, size_t len, int flags)
81{ 196{
82 size_t copylen, read_done = 0, read_remaining = len; 197 size_t copylen, read_done = 0, read_remaining = len;
83 size_t chunk_len, chunk_off, chunk_len_sum; 198 size_t chunk_len, chunk_off, chunk_len_sum;
84 struct smc_connection *conn = &smc->conn; 199 struct smc_connection *conn = &smc->conn;
200 int (*func)(struct smc_connection *conn);
85 union smc_host_cursor cons; 201 union smc_host_cursor cons;
86 int readable, chunk; 202 int readable, chunk;
87 char *rcvbuf_base; 203 char *rcvbuf_base;
88 struct sock *sk; 204 struct sock *sk;
205 int splbytes;
89 long timeo; 206 long timeo;
90 int target; /* Read at least these many bytes */ 207 int target; /* Read at least these many bytes */
91 int rc; 208 int rc;
@@ -101,37 +218,32 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, size_t len,
101 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 218 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
102 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 219 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
103 220
104 msg->msg_namelen = 0;
105 /* we currently use 1 RMBE per RMB, so RMBE == RMB base addr */ 221 /* we currently use 1 RMBE per RMB, so RMBE == RMB base addr */
106 rcvbuf_base = conn->rmb_desc->cpu_addr; 222 rcvbuf_base = conn->rmb_desc->cpu_addr;
107 223
108 do { /* while (read_remaining) */ 224 do { /* while (read_remaining) */
109 if (read_done >= target) 225 if (read_done >= target || (pipe && read_done))
110 break; 226 break;
111 227
112 if (atomic_read(&conn->bytes_to_rcv)) 228 if (atomic_read(&conn->bytes_to_rcv))
113 goto copy; 229 goto copy;
114 230
231 if (sk->sk_shutdown & RCV_SHUTDOWN ||
232 smc_cdc_rxed_any_close_or_senddone(conn) ||
233 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort)
234 break;
235
115 if (read_done) { 236 if (read_done) {
116 if (sk->sk_err || 237 if (sk->sk_err ||
117 sk->sk_state == SMC_CLOSED || 238 sk->sk_state == SMC_CLOSED ||
118 sk->sk_shutdown & RCV_SHUTDOWN ||
119 !timeo || 239 !timeo ||
120 signal_pending(current) || 240 signal_pending(current))
121 smc_cdc_rxed_any_close_or_senddone(conn) ||
122 conn->local_tx_ctrl.conn_state_flags.
123 peer_conn_abort)
124 break; 241 break;
125 } else { 242 } else {
126 if (sk->sk_err) { 243 if (sk->sk_err) {
127 read_done = sock_error(sk); 244 read_done = sock_error(sk);
128 break; 245 break;
129 } 246 }
130 if (sk->sk_shutdown & RCV_SHUTDOWN ||
131 smc_cdc_rxed_any_close_or_senddone(conn) ||
132 conn->local_tx_ctrl.conn_state_flags.
133 peer_conn_abort)
134 break;
135 if (sk->sk_state == SMC_CLOSED) { 247 if (sk->sk_state == SMC_CLOSED) {
136 if (!sock_flag(sk, SOCK_DONE)) { 248 if (!sock_flag(sk, SOCK_DONE)) {
137 /* This occurs when user tries to read 249 /* This occurs when user tries to read
@@ -150,20 +262,33 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, size_t len,
150 return -EAGAIN; 262 return -EAGAIN;
151 } 263 }
152 264
153 if (!atomic_read(&conn->bytes_to_rcv)) { 265 if (!smc_rx_data_available(conn)) {
154 smc_rx_wait_data(smc, &timeo); 266 smc_rx_wait(smc, &timeo, smc_rx_data_available);
155 continue; 267 continue;
156 } 268 }
157 269
158copy: 270copy:
159 /* initialize variables for 1st iteration of subsequent loop */ 271 /* initialize variables for 1st iteration of subsequent loop */
160 /* could be just 1 byte, even after smc_rx_wait_data above */ 272 /* could be just 1 byte, even after waiting on data above */
161 readable = atomic_read(&conn->bytes_to_rcv); 273 readable = atomic_read(&conn->bytes_to_rcv);
274 splbytes = atomic_read(&conn->splice_pending);
275 if (!readable || (msg && splbytes)) {
276 if (splbytes)
277 func = smc_rx_data_available_and_no_splice_pend;
278 else
279 func = smc_rx_data_available;
280 smc_rx_wait(smc, &timeo, func);
281 continue;
282 }
283
162 /* not more than what user space asked for */ 284 /* not more than what user space asked for */
163 copylen = min_t(size_t, read_remaining, readable); 285 copylen = min_t(size_t, read_remaining, readable);
164 smc_curs_write(&cons, 286 smc_curs_write(&cons,
165 smc_curs_read(&conn->local_tx_ctrl.cons, conn), 287 smc_curs_read(&conn->local_tx_ctrl.cons, conn),
166 conn); 288 conn);
289 /* subsequent splice() calls pick up where previous left */
290 if (splbytes)
291 smc_curs_add(conn->rmbe_size, &cons, splbytes);
167 /* determine chunks where to read from rcvbuf */ 292 /* determine chunks where to read from rcvbuf */
168 /* either unwrapped case, or 1st chunk of wrapped case */ 293 /* either unwrapped case, or 1st chunk of wrapped case */
169 chunk_len = min_t(size_t, 294 chunk_len = min_t(size_t,
@@ -173,9 +298,16 @@ copy:
173 smc_rmb_sync_sg_for_cpu(conn); 298 smc_rmb_sync_sg_for_cpu(conn);
174 for (chunk = 0; chunk < 2; chunk++) { 299 for (chunk = 0; chunk < 2; chunk++) {
175 if (!(flags & MSG_TRUNC)) { 300 if (!(flags & MSG_TRUNC)) {
176 rc = memcpy_to_msg(msg, rcvbuf_base + chunk_off, 301 if (msg) {
177 chunk_len); 302 rc = memcpy_to_msg(msg, rcvbuf_base +
178 if (rc) { 303 chunk_off,
304 chunk_len);
305 } else {
306 rc = smc_rx_splice(pipe, rcvbuf_base +
307 chunk_off, chunk_len,
308 smc);
309 }
310 if (rc < 0) {
179 if (!read_done) 311 if (!read_done)
180 read_done = -EFAULT; 312 read_done = -EFAULT;
181 smc_rmb_sync_sg_for_device(conn); 313 smc_rmb_sync_sg_for_device(conn);
@@ -196,18 +328,13 @@ copy:
196 328
197 /* update cursors */ 329 /* update cursors */
198 if (!(flags & MSG_PEEK)) { 330 if (!(flags & MSG_PEEK)) {
199 smc_curs_add(conn->rmbe_size, &cons, copylen);
200 /* increased in recv tasklet smc_cdc_msg_rcv() */ 331 /* increased in recv tasklet smc_cdc_msg_rcv() */
201 smp_mb__before_atomic(); 332 smp_mb__before_atomic();
202 atomic_sub(copylen, &conn->bytes_to_rcv); 333 atomic_sub(copylen, &conn->bytes_to_rcv);
203 /* guarantee 0 <= bytes_to_rcv <= rmbe_size */ 334 /* guarantee 0 <= bytes_to_rcv <= rmbe_size */
204 smp_mb__after_atomic(); 335 smp_mb__after_atomic();
205 smc_curs_write(&conn->local_tx_ctrl.cons, 336 if (msg)
206 smc_curs_read(&cons, conn), 337 smc_rx_update_consumer(conn, cons, copylen);
207 conn);
208 /* send consumer cursor update if required */
209 /* similar to advertising new TCP rcv_wnd if required */
210 smc_tx_consumer_update(conn);
211 } 338 }
212 } while (read_remaining); 339 } while (read_remaining);
213out: 340out:
@@ -217,5 +344,6 @@ out:
217/* Initialize receive properties on connection establishment. NB: not __init! */ 344/* Initialize receive properties on connection establishment. NB: not __init! */
218void smc_rx_init(struct smc_sock *smc) 345void smc_rx_init(struct smc_sock *smc)
219{ 346{
220 smc->sk.sk_data_ready = smc_rx_data_ready; 347 smc->sk.sk_data_ready = smc_rx_wake_up;
348 atomic_set(&smc->conn.splice_pending, 0);
221} 349}
diff --git a/net/smc/smc_rx.h b/net/smc/smc_rx.h
index 3a32b59bf06c..db823c97d824 100644
--- a/net/smc/smc_rx.h
+++ b/net/smc/smc_rx.h
@@ -18,7 +18,14 @@
18#include "smc.h" 18#include "smc.h"
19 19
20void smc_rx_init(struct smc_sock *smc); 20void smc_rx_init(struct smc_sock *smc);
21int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, size_t len, 21
22 int flags); 22int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
23 struct pipe_inode_info *pipe, size_t len, int flags);
24int smc_rx_wait(struct smc_sock *smc, long *timeo,
25 int (*fcrit)(struct smc_connection *conn));
26static inline int smc_rx_data_available(struct smc_connection *conn)
27{
28 return atomic_read(&conn->bytes_to_rcv);
29}
23 30
24#endif /* SMC_RX_H */ 31#endif /* SMC_RX_H */
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index 72f004c9c9b1..58dfe0bd9d60 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -19,6 +19,7 @@
19#include <linux/sched/signal.h> 19#include <linux/sched/signal.h>
20 20
21#include <net/sock.h> 21#include <net/sock.h>
22#include <net/tcp.h>
22 23
23#include "smc.h" 24#include "smc.h"
24#include "smc_wr.h" 25#include "smc_wr.h"
@@ -26,6 +27,7 @@
26#include "smc_tx.h" 27#include "smc_tx.h"
27 28
28#define SMC_TX_WORK_DELAY HZ 29#define SMC_TX_WORK_DELAY HZ
30#define SMC_TX_CORK_DELAY (HZ >> 2) /* 250 ms */
29 31
30/***************************** sndbuf producer *******************************/ 32/***************************** sndbuf producer *******************************/
31 33
@@ -115,6 +117,13 @@ static int smc_tx_wait_memory(struct smc_sock *smc, int flags)
115 return rc; 117 return rc;
116} 118}
117 119
120static bool smc_tx_is_corked(struct smc_sock *smc)
121{
122 struct tcp_sock *tp = tcp_sk(smc->clcsock->sk);
123
124 return (tp->nonagle & TCP_NAGLE_CORK) ? true : false;
125}
126
118/* sndbuf producer: main API called by socket layer. 127/* sndbuf producer: main API called by socket layer.
119 * called under sock lock. 128 * called under sock lock.
120 */ 129 */
@@ -209,7 +218,16 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
209 /* since we just produced more new data into sndbuf, 218 /* since we just produced more new data into sndbuf,
210 * trigger sndbuf consumer: RDMA write into peer RMBE and CDC 219 * trigger sndbuf consumer: RDMA write into peer RMBE and CDC
211 */ 220 */
212 smc_tx_sndbuf_nonempty(conn); 221 if ((msg->msg_flags & MSG_MORE || smc_tx_is_corked(smc)) &&
222 (atomic_read(&conn->sndbuf_space) >
223 (conn->sndbuf_size >> 1)))
224 /* for a corked socket defer the RDMA writes if there
225 * is still sufficient sndbuf_space available
226 */
227 schedule_delayed_work(&conn->tx_work,
228 SMC_TX_CORK_DELAY);
229 else
230 smc_tx_sndbuf_nonempty(conn);
213 } /* while (msg_data_left(msg)) */ 231 } /* while (msg_data_left(msg)) */
214 232
215 return send_done; 233 return send_done;
@@ -409,8 +427,8 @@ int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
409 } 427 }
410 rc = 0; 428 rc = 0;
411 if (conn->alert_token_local) /* connection healthy */ 429 if (conn->alert_token_local) /* connection healthy */
412 schedule_delayed_work(&conn->tx_work, 430 mod_delayed_work(system_wq, &conn->tx_work,
413 SMC_TX_WORK_DELAY); 431 SMC_TX_WORK_DELAY);
414 } 432 }
415 goto out_unlock; 433 goto out_unlock;
416 } 434 }
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index 1b8af23e6e2b..cc7c1bb60fe8 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -376,6 +376,7 @@ static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num)
376 for (i = 0; i < num; i++) { 376 for (i = 0; i < num; i++) {
377 link = wc[i].qp->qp_context; 377 link = wc[i].qp->qp_context;
378 if (wc[i].status == IB_WC_SUCCESS) { 378 if (wc[i].status == IB_WC_SUCCESS) {
379 link->wr_rx_tstamp = jiffies;
379 smc_wr_rx_demultiplex(&wc[i]); 380 smc_wr_rx_demultiplex(&wc[i]);
380 smc_wr_rx_post(link); /* refill WR RX */ 381 smc_wr_rx_post(link); /* refill WR RX */
381 } else { 382 } else {
diff --git a/net/tipc/node.c b/net/tipc/node.c
index e9c52e1416c5..b0b840084f0a 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -195,6 +195,27 @@ int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
195 return mtu; 195 return mtu;
196} 196}
197 197
198bool tipc_node_get_id(struct net *net, u32 addr, u8 *id)
199{
200 u8 *own_id = tipc_own_id(net);
201 struct tipc_node *n;
202
203 if (!own_id)
204 return true;
205
206 if (addr == tipc_own_addr(net)) {
207 memcpy(id, own_id, TIPC_NODEID_LEN);
208 return true;
209 }
210 n = tipc_node_find(net, addr);
211 if (!n)
212 return false;
213
214 memcpy(id, &n->peer_id, TIPC_NODEID_LEN);
215 tipc_node_put(n);
216 return true;
217}
218
198u16 tipc_node_get_capabilities(struct net *net, u32 addr) 219u16 tipc_node_get_capabilities(struct net *net, u32 addr)
199{ 220{
200 struct tipc_node *n; 221 struct tipc_node *n;
@@ -2250,7 +2271,7 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
2250 2271
2251 rtnl_lock(); 2272 rtnl_lock();
2252 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) { 2273 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
2253 err = __tipc_nl_add_monitor(net, &msg, prev_bearer); 2274 err = __tipc_nl_add_monitor(net, &msg, bearer_id);
2254 if (err) 2275 if (err)
2255 break; 2276 break;
2256 } 2277 }
diff --git a/net/tipc/node.h b/net/tipc/node.h
index bb271a37c93f..846c8f240872 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -60,6 +60,7 @@ enum {
60#define INVALID_BEARER_ID -1 60#define INVALID_BEARER_ID -1
61 61
62void tipc_node_stop(struct net *net); 62void tipc_node_stop(struct net *net);
63bool tipc_node_get_id(struct net *net, u32 addr, u8 *id);
63u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr); 64u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr);
64void tipc_node_check_dest(struct net *net, u32 onode, u8 *peer_id128, 65void tipc_node_check_dest(struct net *net, u32 onode, u8 *peer_id128,
65 struct tipc_bearer *bearer, 66 struct tipc_bearer *bearer,
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 252a52ae0893..c4992002fd68 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2973,7 +2973,8 @@ static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2973 2973
2974static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 2974static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2975{ 2975{
2976 struct sock *sk = sock->sk; 2976 struct net *net = sock_net(sock->sk);
2977 struct tipc_sioc_nodeid_req nr = {0};
2977 struct tipc_sioc_ln_req lnr; 2978 struct tipc_sioc_ln_req lnr;
2978 void __user *argp = (void __user *)arg; 2979 void __user *argp = (void __user *)arg;
2979 2980
@@ -2981,7 +2982,7 @@ static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2981 case SIOCGETLINKNAME: 2982 case SIOCGETLINKNAME:
2982 if (copy_from_user(&lnr, argp, sizeof(lnr))) 2983 if (copy_from_user(&lnr, argp, sizeof(lnr)))
2983 return -EFAULT; 2984 return -EFAULT;
2984 if (!tipc_node_get_linkname(sock_net(sk), 2985 if (!tipc_node_get_linkname(net,
2985 lnr.bearer_id & 0xffff, lnr.peer, 2986 lnr.bearer_id & 0xffff, lnr.peer,
2986 lnr.linkname, TIPC_MAX_LINK_NAME)) { 2987 lnr.linkname, TIPC_MAX_LINK_NAME)) {
2987 if (copy_to_user(argp, &lnr, sizeof(lnr))) 2988 if (copy_to_user(argp, &lnr, sizeof(lnr)))
@@ -2989,6 +2990,14 @@ static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2989 return 0; 2990 return 0;
2990 } 2991 }
2991 return -EADDRNOTAVAIL; 2992 return -EADDRNOTAVAIL;
2993 case SIOCGETNODEID:
2994 if (copy_from_user(&nr, argp, sizeof(nr)))
2995 return -EFAULT;
2996 if (!tipc_node_get_id(net, nr.peer, nr.node_id))
2997 return -EADDRNOTAVAIL;
2998 if (copy_to_user(argp, &nr, sizeof(nr)))
2999 return -EFAULT;
3000 return 0;
2992 default: 3001 default:
2993 return -ENOIOCTLCMD; 3002 return -ENOIOCTLCMD;
2994 } 3003 }
diff --git a/net/tls/Kconfig b/net/tls/Kconfig
index 89b8745a986f..73f05ece53d0 100644
--- a/net/tls/Kconfig
+++ b/net/tls/Kconfig
@@ -14,3 +14,13 @@ config TLS
14 encryption handling of the TLS protocol to be done in-kernel. 14 encryption handling of the TLS protocol to be done in-kernel.
15 15
16 If unsure, say N. 16 If unsure, say N.
17
18config TLS_DEVICE
19 bool "Transport Layer Security HW offload"
20 depends on TLS
21 select SOCK_VALIDATE_XMIT
22 default n
23 help
24 Enable kernel support for HW offload of the TLS protocol.
25
26 If unsure, say N.
diff --git a/net/tls/Makefile b/net/tls/Makefile
index a930fd1c4f7b..4d6b728a67d0 100644
--- a/net/tls/Makefile
+++ b/net/tls/Makefile
@@ -5,3 +5,5 @@
5obj-$(CONFIG_TLS) += tls.o 5obj-$(CONFIG_TLS) += tls.o
6 6
7tls-y := tls_main.o tls_sw.o 7tls-y := tls_main.o tls_sw.o
8
9tls-$(CONFIG_TLS_DEVICE) += tls_device.o tls_device_fallback.o
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
new file mode 100644
index 000000000000..ac45d6224e41
--- /dev/null
+++ b/net/tls/tls_device.c
@@ -0,0 +1,764 @@
1/* Copyright (c) 2018, Mellanox Technologies All rights reserved.
2 *
3 * This software is available to you under a choice of one of two
4 * licenses. You may choose to be licensed under the terms of the GNU
5 * General Public License (GPL) Version 2, available from the file
6 * COPYING in the main directory of this source tree, or the
7 * OpenIB.org BSD license below:
8 *
9 * Redistribution and use in source and binary forms, with or
10 * without modification, are permitted provided that the following
11 * conditions are met:
12 *
13 * - Redistributions of source code must retain the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer.
16 *
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 */
31
32#include <crypto/aead.h>
33#include <linux/highmem.h>
34#include <linux/module.h>
35#include <linux/netdevice.h>
36#include <net/dst.h>
37#include <net/inet_connection_sock.h>
38#include <net/tcp.h>
39#include <net/tls.h>
40
41/* device_offload_lock is used to synchronize tls_dev_add
42 * against NETDEV_DOWN notifications.
43 */
44static DECLARE_RWSEM(device_offload_lock);
45
46static void tls_device_gc_task(struct work_struct *work);
47
48static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
49static LIST_HEAD(tls_device_gc_list);
50static LIST_HEAD(tls_device_list);
51static DEFINE_SPINLOCK(tls_device_lock);
52
53static void tls_device_free_ctx(struct tls_context *ctx)
54{
55 struct tls_offload_context *offload_ctx = tls_offload_ctx(ctx);
56
57 kfree(offload_ctx);
58 kfree(ctx);
59}
60
61static void tls_device_gc_task(struct work_struct *work)
62{
63 struct tls_context *ctx, *tmp;
64 unsigned long flags;
65 LIST_HEAD(gc_list);
66
67 spin_lock_irqsave(&tls_device_lock, flags);
68 list_splice_init(&tls_device_gc_list, &gc_list);
69 spin_unlock_irqrestore(&tls_device_lock, flags);
70
71 list_for_each_entry_safe(ctx, tmp, &gc_list, list) {
72 struct net_device *netdev = ctx->netdev;
73
74 if (netdev) {
75 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
76 TLS_OFFLOAD_CTX_DIR_TX);
77 dev_put(netdev);
78 }
79
80 list_del(&ctx->list);
81 tls_device_free_ctx(ctx);
82 }
83}
84
85static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
86{
87 unsigned long flags;
88
89 spin_lock_irqsave(&tls_device_lock, flags);
90 list_move_tail(&ctx->list, &tls_device_gc_list);
91
92 /* schedule_work inside the spinlock
93 * to make sure tls_device_down waits for that work.
94 */
95 schedule_work(&tls_device_gc_work);
96
97 spin_unlock_irqrestore(&tls_device_lock, flags);
98}
99
100/* We assume that the socket is already connected */
101static struct net_device *get_netdev_for_sock(struct sock *sk)
102{
103 struct dst_entry *dst = sk_dst_get(sk);
104 struct net_device *netdev = NULL;
105
106 if (likely(dst)) {
107 netdev = dst->dev;
108 dev_hold(netdev);
109 }
110
111 dst_release(dst);
112
113 return netdev;
114}
115
116static void destroy_record(struct tls_record_info *record)
117{
118 int nr_frags = record->num_frags;
119 skb_frag_t *frag;
120
121 while (nr_frags-- > 0) {
122 frag = &record->frags[nr_frags];
123 __skb_frag_unref(frag);
124 }
125 kfree(record);
126}
127
128static void delete_all_records(struct tls_offload_context *offload_ctx)
129{
130 struct tls_record_info *info, *temp;
131
132 list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) {
133 list_del(&info->list);
134 destroy_record(info);
135 }
136
137 offload_ctx->retransmit_hint = NULL;
138}
139
140static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
141{
142 struct tls_context *tls_ctx = tls_get_ctx(sk);
143 struct tls_record_info *info, *temp;
144 struct tls_offload_context *ctx;
145 u64 deleted_records = 0;
146 unsigned long flags;
147
148 if (!tls_ctx)
149 return;
150
151 ctx = tls_offload_ctx(tls_ctx);
152
153 spin_lock_irqsave(&ctx->lock, flags);
154 info = ctx->retransmit_hint;
155 if (info && !before(acked_seq, info->end_seq)) {
156 ctx->retransmit_hint = NULL;
157 list_del(&info->list);
158 destroy_record(info);
159 deleted_records++;
160 }
161
162 list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
163 if (before(acked_seq, info->end_seq))
164 break;
165 list_del(&info->list);
166
167 destroy_record(info);
168 deleted_records++;
169 }
170
171 ctx->unacked_record_sn += deleted_records;
172 spin_unlock_irqrestore(&ctx->lock, flags);
173}
174
175/* At this point, there should be no references on this
176 * socket and no in-flight SKBs associated with this
177 * socket, so it is safe to free all the resources.
178 */
179void tls_device_sk_destruct(struct sock *sk)
180{
181 struct tls_context *tls_ctx = tls_get_ctx(sk);
182 struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx);
183
184 if (ctx->open_record)
185 destroy_record(ctx->open_record);
186
187 delete_all_records(ctx);
188 crypto_free_aead(ctx->aead_send);
189 ctx->sk_destruct(sk);
190 clean_acked_data_disable(inet_csk(sk));
191
192 if (refcount_dec_and_test(&tls_ctx->refcount))
193 tls_device_queue_ctx_destruction(tls_ctx);
194}
195EXPORT_SYMBOL(tls_device_sk_destruct);
196
197static void tls_append_frag(struct tls_record_info *record,
198 struct page_frag *pfrag,
199 int size)
200{
201 skb_frag_t *frag;
202
203 frag = &record->frags[record->num_frags - 1];
204 if (frag->page.p == pfrag->page &&
205 frag->page_offset + frag->size == pfrag->offset) {
206 frag->size += size;
207 } else {
208 ++frag;
209 frag->page.p = pfrag->page;
210 frag->page_offset = pfrag->offset;
211 frag->size = size;
212 ++record->num_frags;
213 get_page(pfrag->page);
214 }
215
216 pfrag->offset += size;
217 record->len += size;
218}
219
220static int tls_push_record(struct sock *sk,
221 struct tls_context *ctx,
222 struct tls_offload_context *offload_ctx,
223 struct tls_record_info *record,
224 struct page_frag *pfrag,
225 int flags,
226 unsigned char record_type)
227{
228 struct tcp_sock *tp = tcp_sk(sk);
229 struct page_frag dummy_tag_frag;
230 skb_frag_t *frag;
231 int i;
232
233 /* fill prepend */
234 frag = &record->frags[0];
235 tls_fill_prepend(ctx,
236 skb_frag_address(frag),
237 record->len - ctx->tx.prepend_size,
238 record_type);
239
240 /* HW doesn't care about the data in the tag, because it fills it. */
241 dummy_tag_frag.page = skb_frag_page(frag);
242 dummy_tag_frag.offset = 0;
243
244 tls_append_frag(record, &dummy_tag_frag, ctx->tx.tag_size);
245 record->end_seq = tp->write_seq + record->len;
246 spin_lock_irq(&offload_ctx->lock);
247 list_add_tail(&record->list, &offload_ctx->records_list);
248 spin_unlock_irq(&offload_ctx->lock);
249 offload_ctx->open_record = NULL;
250 set_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
251 tls_advance_record_sn(sk, &ctx->tx);
252
253 for (i = 0; i < record->num_frags; i++) {
254 frag = &record->frags[i];
255 sg_unmark_end(&offload_ctx->sg_tx_data[i]);
256 sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
257 frag->size, frag->page_offset);
258 sk_mem_charge(sk, frag->size);
259 get_page(skb_frag_page(frag));
260 }
261 sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
262
263 /* all ready, send */
264 return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
265}
266
267static int tls_create_new_record(struct tls_offload_context *offload_ctx,
268 struct page_frag *pfrag,
269 size_t prepend_size)
270{
271 struct tls_record_info *record;
272 skb_frag_t *frag;
273
274 record = kmalloc(sizeof(*record), GFP_KERNEL);
275 if (!record)
276 return -ENOMEM;
277
278 frag = &record->frags[0];
279 __skb_frag_set_page(frag, pfrag->page);
280 frag->page_offset = pfrag->offset;
281 skb_frag_size_set(frag, prepend_size);
282
283 get_page(pfrag->page);
284 pfrag->offset += prepend_size;
285
286 record->num_frags = 1;
287 record->len = prepend_size;
288 offload_ctx->open_record = record;
289 return 0;
290}
291
292static int tls_do_allocation(struct sock *sk,
293 struct tls_offload_context *offload_ctx,
294 struct page_frag *pfrag,
295 size_t prepend_size)
296{
297 int ret;
298
299 if (!offload_ctx->open_record) {
300 if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
301 sk->sk_allocation))) {
302 sk->sk_prot->enter_memory_pressure(sk);
303 sk_stream_moderate_sndbuf(sk);
304 return -ENOMEM;
305 }
306
307 ret = tls_create_new_record(offload_ctx, pfrag, prepend_size);
308 if (ret)
309 return ret;
310
311 if (pfrag->size > pfrag->offset)
312 return 0;
313 }
314
315 if (!sk_page_frag_refill(sk, pfrag))
316 return -ENOMEM;
317
318 return 0;
319}
320
321static int tls_push_data(struct sock *sk,
322 struct iov_iter *msg_iter,
323 size_t size, int flags,
324 unsigned char record_type)
325{
326 struct tls_context *tls_ctx = tls_get_ctx(sk);
327 struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx);
328 int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
329 int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
330 struct tls_record_info *record = ctx->open_record;
331 struct page_frag *pfrag;
332 size_t orig_size = size;
333 u32 max_open_record_len;
334 int copy, rc = 0;
335 bool done = false;
336 long timeo;
337
338 if (flags &
339 ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST))
340 return -ENOTSUPP;
341
342 if (sk->sk_err)
343 return -sk->sk_err;
344
345 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
346 rc = tls_complete_pending_work(sk, tls_ctx, flags, &timeo);
347 if (rc < 0)
348 return rc;
349
350 pfrag = sk_page_frag(sk);
351
352 /* TLS_HEADER_SIZE is not counted as part of the TLS record, and
353 * we need to leave room for an authentication tag.
354 */
355 max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
356 tls_ctx->tx.prepend_size;
357 do {
358 rc = tls_do_allocation(sk, ctx, pfrag,
359 tls_ctx->tx.prepend_size);
360 if (rc) {
361 rc = sk_stream_wait_memory(sk, &timeo);
362 if (!rc)
363 continue;
364
365 record = ctx->open_record;
366 if (!record)
367 break;
368handle_error:
369 if (record_type != TLS_RECORD_TYPE_DATA) {
370 /* avoid sending partial
371 * record with type !=
372 * application_data
373 */
374 size = orig_size;
375 destroy_record(record);
376 ctx->open_record = NULL;
377 } else if (record->len > tls_ctx->tx.prepend_size) {
378 goto last_record;
379 }
380
381 break;
382 }
383
384 record = ctx->open_record;
385 copy = min_t(size_t, size, (pfrag->size - pfrag->offset));
386 copy = min_t(size_t, copy, (max_open_record_len - record->len));
387
388 if (copy_from_iter_nocache(page_address(pfrag->page) +
389 pfrag->offset,
390 copy, msg_iter) != copy) {
391 rc = -EFAULT;
392 goto handle_error;
393 }
394 tls_append_frag(record, pfrag, copy);
395
396 size -= copy;
397 if (!size) {
398last_record:
399 tls_push_record_flags = flags;
400 if (more) {
401 tls_ctx->pending_open_record_frags =
402 record->num_frags;
403 break;
404 }
405
406 done = true;
407 }
408
409 if (done || record->len >= max_open_record_len ||
410 (record->num_frags >= MAX_SKB_FRAGS - 1)) {
411 rc = tls_push_record(sk,
412 tls_ctx,
413 ctx,
414 record,
415 pfrag,
416 tls_push_record_flags,
417 record_type);
418 if (rc < 0)
419 break;
420 }
421 } while (!done);
422
423 if (orig_size - size > 0)
424 rc = orig_size - size;
425
426 return rc;
427}
428
429int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
430{
431 unsigned char record_type = TLS_RECORD_TYPE_DATA;
432 int rc;
433
434 lock_sock(sk);
435
436 if (unlikely(msg->msg_controllen)) {
437 rc = tls_proccess_cmsg(sk, msg, &record_type);
438 if (rc)
439 goto out;
440 }
441
442 rc = tls_push_data(sk, &msg->msg_iter, size,
443 msg->msg_flags, record_type);
444
445out:
446 release_sock(sk);
447 return rc;
448}
449
450int tls_device_sendpage(struct sock *sk, struct page *page,
451 int offset, size_t size, int flags)
452{
453 struct iov_iter msg_iter;
454 char *kaddr = kmap(page);
455 struct kvec iov;
456 int rc;
457
458 if (flags & MSG_SENDPAGE_NOTLAST)
459 flags |= MSG_MORE;
460
461 lock_sock(sk);
462
463 if (flags & MSG_OOB) {
464 rc = -ENOTSUPP;
465 goto out;
466 }
467
468 iov.iov_base = kaddr + offset;
469 iov.iov_len = size;
470 iov_iter_kvec(&msg_iter, WRITE | ITER_KVEC, &iov, 1, size);
471 rc = tls_push_data(sk, &msg_iter, size,
472 flags, TLS_RECORD_TYPE_DATA);
473 kunmap(page);
474
475out:
476 release_sock(sk);
477 return rc;
478}
479
480struct tls_record_info *tls_get_record(struct tls_offload_context *context,
481 u32 seq, u64 *p_record_sn)
482{
483 u64 record_sn = context->hint_record_sn;
484 struct tls_record_info *info;
485
486 info = context->retransmit_hint;
487 if (!info ||
488 before(seq, info->end_seq - info->len)) {
489 /* if retransmit_hint is irrelevant start
490 * from the beggining of the list
491 */
492 info = list_first_entry(&context->records_list,
493 struct tls_record_info, list);
494 record_sn = context->unacked_record_sn;
495 }
496
497 list_for_each_entry_from(info, &context->records_list, list) {
498 if (before(seq, info->end_seq)) {
499 if (!context->retransmit_hint ||
500 after(info->end_seq,
501 context->retransmit_hint->end_seq)) {
502 context->hint_record_sn = record_sn;
503 context->retransmit_hint = info;
504 }
505 *p_record_sn = record_sn;
506 return info;
507 }
508 record_sn++;
509 }
510
511 return NULL;
512}
513EXPORT_SYMBOL(tls_get_record);
514
515static int tls_device_push_pending_record(struct sock *sk, int flags)
516{
517 struct iov_iter msg_iter;
518
519 iov_iter_kvec(&msg_iter, WRITE | ITER_KVEC, NULL, 0, 0);
520 return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
521}
522
523int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
524{
525 u16 nonce_size, tag_size, iv_size, rec_seq_size;
526 struct tls_record_info *start_marker_record;
527 struct tls_offload_context *offload_ctx;
528 struct tls_crypto_info *crypto_info;
529 struct net_device *netdev;
530 char *iv, *rec_seq;
531 struct sk_buff *skb;
532 int rc = -EINVAL;
533 __be64 rcd_sn;
534
535 if (!ctx)
536 goto out;
537
538 if (ctx->priv_ctx_tx) {
539 rc = -EEXIST;
540 goto out;
541 }
542
543 start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
544 if (!start_marker_record) {
545 rc = -ENOMEM;
546 goto out;
547 }
548
549 offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE, GFP_KERNEL);
550 if (!offload_ctx) {
551 rc = -ENOMEM;
552 goto free_marker_record;
553 }
554
555 crypto_info = &ctx->crypto_send;
556 switch (crypto_info->cipher_type) {
557 case TLS_CIPHER_AES_GCM_128:
558 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
559 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
560 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
561 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
562 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
563 rec_seq =
564 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
565 break;
566 default:
567 rc = -EINVAL;
568 goto free_offload_ctx;
569 }
570
571 ctx->tx.prepend_size = TLS_HEADER_SIZE + nonce_size;
572 ctx->tx.tag_size = tag_size;
573 ctx->tx.overhead_size = ctx->tx.prepend_size + ctx->tx.tag_size;
574 ctx->tx.iv_size = iv_size;
575 ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
576 GFP_KERNEL);
577 if (!ctx->tx.iv) {
578 rc = -ENOMEM;
579 goto free_offload_ctx;
580 }
581
582 memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
583
584 ctx->tx.rec_seq_size = rec_seq_size;
585 ctx->tx.rec_seq = kmalloc(rec_seq_size, GFP_KERNEL);
586 if (!ctx->tx.rec_seq) {
587 rc = -ENOMEM;
588 goto free_iv;
589 }
590 memcpy(ctx->tx.rec_seq, rec_seq, rec_seq_size);
591
592 rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
593 if (rc)
594 goto free_rec_seq;
595
596 /* start at rec_seq - 1 to account for the start marker record */
597 memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
598 offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1;
599
600 start_marker_record->end_seq = tcp_sk(sk)->write_seq;
601 start_marker_record->len = 0;
602 start_marker_record->num_frags = 0;
603
604 INIT_LIST_HEAD(&offload_ctx->records_list);
605 list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
606 spin_lock_init(&offload_ctx->lock);
607
608 clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
609 ctx->push_pending_record = tls_device_push_pending_record;
610 offload_ctx->sk_destruct = sk->sk_destruct;
611
612 /* TLS offload is greatly simplified if we don't send
613 * SKBs where only part of the payload needs to be encrypted.
614 * So mark the last skb in the write queue as end of record.
615 */
616 skb = tcp_write_queue_tail(sk);
617 if (skb)
618 TCP_SKB_CB(skb)->eor = 1;
619
620 refcount_set(&ctx->refcount, 1);
621
622 /* We support starting offload on multiple sockets
623 * concurrently, so we only need a read lock here.
624 * This lock must precede get_netdev_for_sock to prevent races between
625 * NETDEV_DOWN and setsockopt.
626 */
627 down_read(&device_offload_lock);
628 netdev = get_netdev_for_sock(sk);
629 if (!netdev) {
630 pr_err_ratelimited("%s: netdev not found\n", __func__);
631 rc = -EINVAL;
632 goto release_lock;
633 }
634
635 if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
636 rc = -ENOTSUPP;
637 goto release_netdev;
638 }
639
640 /* Avoid offloading if the device is down
641 * We don't want to offload new flows after
642 * the NETDEV_DOWN event
643 */
644 if (!(netdev->flags & IFF_UP)) {
645 rc = -EINVAL;
646 goto release_netdev;
647 }
648
649 ctx->priv_ctx_tx = offload_ctx;
650 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
651 &ctx->crypto_send,
652 tcp_sk(sk)->write_seq);
653 if (rc)
654 goto release_netdev;
655
656 ctx->netdev = netdev;
657
658 spin_lock_irq(&tls_device_lock);
659 list_add_tail(&ctx->list, &tls_device_list);
660 spin_unlock_irq(&tls_device_lock);
661
662 sk->sk_validate_xmit_skb = tls_validate_xmit_skb;
663 /* following this assignment tls_is_sk_tx_device_offloaded
664 * will return true and the context might be accessed
665 * by the netdev's xmit function.
666 */
667 smp_store_release(&sk->sk_destruct,
668 &tls_device_sk_destruct);
669 up_read(&device_offload_lock);
670 goto out;
671
672release_netdev:
673 dev_put(netdev);
674release_lock:
675 up_read(&device_offload_lock);
676 clean_acked_data_disable(inet_csk(sk));
677 crypto_free_aead(offload_ctx->aead_send);
678free_rec_seq:
679 kfree(ctx->tx.rec_seq);
680free_iv:
681 kfree(ctx->tx.iv);
682free_offload_ctx:
683 kfree(offload_ctx);
684 ctx->priv_ctx_tx = NULL;
685free_marker_record:
686 kfree(start_marker_record);
687out:
688 return rc;
689}
690
691static int tls_device_down(struct net_device *netdev)
692{
693 struct tls_context *ctx, *tmp;
694 unsigned long flags;
695 LIST_HEAD(list);
696
697 /* Request a write lock to block new offload attempts */
698 down_write(&device_offload_lock);
699
700 spin_lock_irqsave(&tls_device_lock, flags);
701 list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
702 if (ctx->netdev != netdev ||
703 !refcount_inc_not_zero(&ctx->refcount))
704 continue;
705
706 list_move(&ctx->list, &list);
707 }
708 spin_unlock_irqrestore(&tls_device_lock, flags);
709
710 list_for_each_entry_safe(ctx, tmp, &list, list) {
711 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
712 TLS_OFFLOAD_CTX_DIR_TX);
713 ctx->netdev = NULL;
714 dev_put(netdev);
715 list_del_init(&ctx->list);
716
717 if (refcount_dec_and_test(&ctx->refcount))
718 tls_device_free_ctx(ctx);
719 }
720
721 up_write(&device_offload_lock);
722
723 flush_work(&tls_device_gc_work);
724
725 return NOTIFY_DONE;
726}
727
728static int tls_dev_event(struct notifier_block *this, unsigned long event,
729 void *ptr)
730{
731 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
732
733 if (!(dev->features & NETIF_F_HW_TLS_TX))
734 return NOTIFY_DONE;
735
736 switch (event) {
737 case NETDEV_REGISTER:
738 case NETDEV_FEAT_CHANGE:
739 if (dev->tlsdev_ops &&
740 dev->tlsdev_ops->tls_dev_add &&
741 dev->tlsdev_ops->tls_dev_del)
742 return NOTIFY_DONE;
743 else
744 return NOTIFY_BAD;
745 case NETDEV_DOWN:
746 return tls_device_down(dev);
747 }
748 return NOTIFY_DONE;
749}
750
751static struct notifier_block tls_dev_notifier = {
752 .notifier_call = tls_dev_event,
753};
754
755void __init tls_device_init(void)
756{
757 register_netdevice_notifier(&tls_dev_notifier);
758}
759
760void __exit tls_device_cleanup(void)
761{
762 unregister_netdevice_notifier(&tls_dev_notifier);
763 flush_work(&tls_device_gc_work);
764}
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
new file mode 100644
index 000000000000..748914abdb60
--- /dev/null
+++ b/net/tls/tls_device_fallback.c
@@ -0,0 +1,450 @@
1/* Copyright (c) 2018, Mellanox Technologies All rights reserved.
2 *
3 * This software is available to you under a choice of one of two
4 * licenses. You may choose to be licensed under the terms of the GNU
5 * General Public License (GPL) Version 2, available from the file
6 * COPYING in the main directory of this source tree, or the
7 * OpenIB.org BSD license below:
8 *
9 * Redistribution and use in source and binary forms, with or
10 * without modification, are permitted provided that the following
11 * conditions are met:
12 *
13 * - Redistributions of source code must retain the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer.
16 *
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 */
31
32#include <net/tls.h>
33#include <crypto/aead.h>
34#include <crypto/scatterwalk.h>
35#include <net/ip6_checksum.h>
36
37static void chain_to_walk(struct scatterlist *sg, struct scatter_walk *walk)
38{
39 struct scatterlist *src = walk->sg;
40 int diff = walk->offset - src->offset;
41
42 sg_set_page(sg, sg_page(src),
43 src->length - diff, walk->offset);
44
45 scatterwalk_crypto_chain(sg, sg_next(src), 0, 2);
46}
47
48static int tls_enc_record(struct aead_request *aead_req,
49 struct crypto_aead *aead, char *aad,
50 char *iv, __be64 rcd_sn,
51 struct scatter_walk *in,
52 struct scatter_walk *out, int *in_len)
53{
54 unsigned char buf[TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE];
55 struct scatterlist sg_in[3];
56 struct scatterlist sg_out[3];
57 u16 len;
58 int rc;
59
60 len = min_t(int, *in_len, ARRAY_SIZE(buf));
61
62 scatterwalk_copychunks(buf, in, len, 0);
63 scatterwalk_copychunks(buf, out, len, 1);
64
65 *in_len -= len;
66 if (!*in_len)
67 return 0;
68
69 scatterwalk_pagedone(in, 0, 1);
70 scatterwalk_pagedone(out, 1, 1);
71
72 len = buf[4] | (buf[3] << 8);
73 len -= TLS_CIPHER_AES_GCM_128_IV_SIZE;
74
75 tls_make_aad(aad, len - TLS_CIPHER_AES_GCM_128_TAG_SIZE,
76 (char *)&rcd_sn, sizeof(rcd_sn), buf[0]);
77
78 memcpy(iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, buf + TLS_HEADER_SIZE,
79 TLS_CIPHER_AES_GCM_128_IV_SIZE);
80
81 sg_init_table(sg_in, ARRAY_SIZE(sg_in));
82 sg_init_table(sg_out, ARRAY_SIZE(sg_out));
83 sg_set_buf(sg_in, aad, TLS_AAD_SPACE_SIZE);
84 sg_set_buf(sg_out, aad, TLS_AAD_SPACE_SIZE);
85 chain_to_walk(sg_in + 1, in);
86 chain_to_walk(sg_out + 1, out);
87
88 *in_len -= len;
89 if (*in_len < 0) {
90 *in_len += TLS_CIPHER_AES_GCM_128_TAG_SIZE;
91 /* the input buffer doesn't contain the entire record.
92 * trim len accordingly. The resulting authentication tag
93 * will contain garbage, but we don't care, so we won't
94 * include any of it in the output skb
95 * Note that we assume the output buffer length
96 * is larger then input buffer length + tag size
97 */
98 if (*in_len < 0)
99 len += *in_len;
100
101 *in_len = 0;
102 }
103
104 if (*in_len) {
105 scatterwalk_copychunks(NULL, in, len, 2);
106 scatterwalk_pagedone(in, 0, 1);
107 scatterwalk_copychunks(NULL, out, len, 2);
108 scatterwalk_pagedone(out, 1, 1);
109 }
110
111 len -= TLS_CIPHER_AES_GCM_128_TAG_SIZE;
112 aead_request_set_crypt(aead_req, sg_in, sg_out, len, iv);
113
114 rc = crypto_aead_encrypt(aead_req);
115
116 return rc;
117}
118
119static void tls_init_aead_request(struct aead_request *aead_req,
120 struct crypto_aead *aead)
121{
122 aead_request_set_tfm(aead_req, aead);
123 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
124}
125
126static struct aead_request *tls_alloc_aead_request(struct crypto_aead *aead,
127 gfp_t flags)
128{
129 unsigned int req_size = sizeof(struct aead_request) +
130 crypto_aead_reqsize(aead);
131 struct aead_request *aead_req;
132
133 aead_req = kzalloc(req_size, flags);
134 if (aead_req)
135 tls_init_aead_request(aead_req, aead);
136 return aead_req;
137}
138
139static int tls_enc_records(struct aead_request *aead_req,
140 struct crypto_aead *aead, struct scatterlist *sg_in,
141 struct scatterlist *sg_out, char *aad, char *iv,
142 u64 rcd_sn, int len)
143{
144 struct scatter_walk out, in;
145 int rc;
146
147 scatterwalk_start(&in, sg_in);
148 scatterwalk_start(&out, sg_out);
149
150 do {
151 rc = tls_enc_record(aead_req, aead, aad, iv,
152 cpu_to_be64(rcd_sn), &in, &out, &len);
153 rcd_sn++;
154
155 } while (rc == 0 && len);
156
157 scatterwalk_done(&in, 0, 0);
158 scatterwalk_done(&out, 1, 0);
159
160 return rc;
161}
162
163/* Can't use icsk->icsk_af_ops->send_check here because the ip addresses
164 * might have been changed by NAT.
165 */
166static void update_chksum(struct sk_buff *skb, int headln)
167{
168 struct tcphdr *th = tcp_hdr(skb);
169 int datalen = skb->len - headln;
170 const struct ipv6hdr *ipv6h;
171 const struct iphdr *iph;
172
173 /* We only changed the payload so if we are using partial we don't
174 * need to update anything.
175 */
176 if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
177 return;
178
179 skb->ip_summed = CHECKSUM_PARTIAL;
180 skb->csum_start = skb_transport_header(skb) - skb->head;
181 skb->csum_offset = offsetof(struct tcphdr, check);
182
183 if (skb->sk->sk_family == AF_INET6) {
184 ipv6h = ipv6_hdr(skb);
185 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
186 datalen, IPPROTO_TCP, 0);
187 } else {
188 iph = ip_hdr(skb);
189 th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, datalen,
190 IPPROTO_TCP, 0);
191 }
192}
193
194static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
195{
196 skb_copy_header(nskb, skb);
197
198 skb_put(nskb, skb->len);
199 memcpy(nskb->data, skb->data, headln);
200 update_chksum(nskb, headln);
201
202 nskb->destructor = skb->destructor;
203 nskb->sk = skb->sk;
204 skb->destructor = NULL;
205 skb->sk = NULL;
206 refcount_add(nskb->truesize - skb->truesize,
207 &nskb->sk->sk_wmem_alloc);
208}
209
210/* This function may be called after the user socket is already
211 * closed so make sure we don't use anything freed during
212 * tls_sk_proto_close here
213 */
214
215static int fill_sg_in(struct scatterlist *sg_in,
216 struct sk_buff *skb,
217 struct tls_offload_context *ctx,
218 u64 *rcd_sn,
219 s32 *sync_size,
220 int *resync_sgs)
221{
222 int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
223 int payload_len = skb->len - tcp_payload_offset;
224 u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
225 struct tls_record_info *record;
226 unsigned long flags;
227 int remaining;
228 int i;
229
230 spin_lock_irqsave(&ctx->lock, flags);
231 record = tls_get_record(ctx, tcp_seq, rcd_sn);
232 if (!record) {
233 spin_unlock_irqrestore(&ctx->lock, flags);
234 WARN(1, "Record not found for seq %u\n", tcp_seq);
235 return -EINVAL;
236 }
237
238 *sync_size = tcp_seq - tls_record_start_seq(record);
239 if (*sync_size < 0) {
240 int is_start_marker = tls_record_is_start_marker(record);
241
242 spin_unlock_irqrestore(&ctx->lock, flags);
243 /* This should only occur if the relevant record was
244 * already acked. In that case it should be ok
245 * to drop the packet and avoid retransmission.
246 *
247 * There is a corner case where the packet contains
248 * both an acked and a non-acked record.
249 * We currently don't handle that case and rely
250 * on TCP to retranmit a packet that doesn't contain
251 * already acked payload.
252 */
253 if (!is_start_marker)
254 *sync_size = 0;
255 return -EINVAL;
256 }
257
258 remaining = *sync_size;
259 for (i = 0; remaining > 0; i++) {
260 skb_frag_t *frag = &record->frags[i];
261
262 __skb_frag_ref(frag);
263 sg_set_page(sg_in + i, skb_frag_page(frag),
264 skb_frag_size(frag), frag->page_offset);
265
266 remaining -= skb_frag_size(frag);
267
268 if (remaining < 0)
269 sg_in[i].length += remaining;
270 }
271 *resync_sgs = i;
272
273 spin_unlock_irqrestore(&ctx->lock, flags);
274 if (skb_to_sgvec(skb, &sg_in[i], tcp_payload_offset, payload_len) < 0)
275 return -EINVAL;
276
277 return 0;
278}
279
280static void fill_sg_out(struct scatterlist sg_out[3], void *buf,
281 struct tls_context *tls_ctx,
282 struct sk_buff *nskb,
283 int tcp_payload_offset,
284 int payload_len,
285 int sync_size,
286 void *dummy_buf)
287{
288 sg_set_buf(&sg_out[0], dummy_buf, sync_size);
289 sg_set_buf(&sg_out[1], nskb->data + tcp_payload_offset, payload_len);
290 /* Add room for authentication tag produced by crypto */
291 dummy_buf += sync_size;
292 sg_set_buf(&sg_out[2], dummy_buf, TLS_CIPHER_AES_GCM_128_TAG_SIZE);
293}
294
295static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
296 struct scatterlist sg_out[3],
297 struct scatterlist *sg_in,
298 struct sk_buff *skb,
299 s32 sync_size, u64 rcd_sn)
300{
301 int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
302 struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx);
303 int payload_len = skb->len - tcp_payload_offset;
304 void *buf, *iv, *aad, *dummy_buf;
305 struct aead_request *aead_req;
306 struct sk_buff *nskb = NULL;
307 int buf_len;
308
309 aead_req = tls_alloc_aead_request(ctx->aead_send, GFP_ATOMIC);
310 if (!aead_req)
311 return NULL;
312
313 buf_len = TLS_CIPHER_AES_GCM_128_SALT_SIZE +
314 TLS_CIPHER_AES_GCM_128_IV_SIZE +
315 TLS_AAD_SPACE_SIZE +
316 sync_size +
317 TLS_CIPHER_AES_GCM_128_TAG_SIZE;
318 buf = kmalloc(buf_len, GFP_ATOMIC);
319 if (!buf)
320 goto free_req;
321
322 iv = buf;
323 memcpy(iv, tls_ctx->crypto_send_aes_gcm_128.salt,
324 TLS_CIPHER_AES_GCM_128_SALT_SIZE);
325 aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE +
326 TLS_CIPHER_AES_GCM_128_IV_SIZE;
327 dummy_buf = aad + TLS_AAD_SPACE_SIZE;
328
329 nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC);
330 if (!nskb)
331 goto free_buf;
332
333 skb_reserve(nskb, skb_headroom(skb));
334
335 fill_sg_out(sg_out, buf, tls_ctx, nskb, tcp_payload_offset,
336 payload_len, sync_size, dummy_buf);
337
338 if (tls_enc_records(aead_req, ctx->aead_send, sg_in, sg_out, aad, iv,
339 rcd_sn, sync_size + payload_len) < 0)
340 goto free_nskb;
341
342 complete_skb(nskb, skb, tcp_payload_offset);
343
344 /* validate_xmit_skb_list assumes that if the skb wasn't segmented
345 * nskb->prev will point to the skb itself
346 */
347 nskb->prev = nskb;
348
349free_buf:
350 kfree(buf);
351free_req:
352 kfree(aead_req);
353 return nskb;
354free_nskb:
355 kfree_skb(nskb);
356 nskb = NULL;
357 goto free_buf;
358}
359
360static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
361{
362 int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
363 struct tls_context *tls_ctx = tls_get_ctx(sk);
364 struct tls_offload_context *ctx = tls_offload_ctx(tls_ctx);
365 int payload_len = skb->len - tcp_payload_offset;
366 struct scatterlist *sg_in, sg_out[3];
367 struct sk_buff *nskb = NULL;
368 int sg_in_max_elements;
369 int resync_sgs = 0;
370 s32 sync_size = 0;
371 u64 rcd_sn;
372
373 /* worst case is:
374 * MAX_SKB_FRAGS in tls_record_info
375 * MAX_SKB_FRAGS + 1 in SKB head and frags.
376 */
377 sg_in_max_elements = 2 * MAX_SKB_FRAGS + 1;
378
379 if (!payload_len)
380 return skb;
381
382 sg_in = kmalloc_array(sg_in_max_elements, sizeof(*sg_in), GFP_ATOMIC);
383 if (!sg_in)
384 goto free_orig;
385
386 sg_init_table(sg_in, sg_in_max_elements);
387 sg_init_table(sg_out, ARRAY_SIZE(sg_out));
388
389 if (fill_sg_in(sg_in, skb, ctx, &rcd_sn, &sync_size, &resync_sgs)) {
390 /* bypass packets before kernel TLS socket option was set */
391 if (sync_size < 0 && payload_len <= -sync_size)
392 nskb = skb_get(skb);
393 goto put_sg;
394 }
395
396 nskb = tls_enc_skb(tls_ctx, sg_out, sg_in, skb, sync_size, rcd_sn);
397
398put_sg:
399 while (resync_sgs)
400 put_page(sg_page(&sg_in[--resync_sgs]));
401 kfree(sg_in);
402free_orig:
403 kfree_skb(skb);
404 return nskb;
405}
406
407struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
408 struct net_device *dev,
409 struct sk_buff *skb)
410{
411 if (dev == tls_get_ctx(sk)->netdev)
412 return skb;
413
414 return tls_sw_fallback(sk, skb);
415}
416
417int tls_sw_fallback_init(struct sock *sk,
418 struct tls_offload_context *offload_ctx,
419 struct tls_crypto_info *crypto_info)
420{
421 const u8 *key;
422 int rc;
423
424 offload_ctx->aead_send =
425 crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
426 if (IS_ERR(offload_ctx->aead_send)) {
427 rc = PTR_ERR(offload_ctx->aead_send);
428 pr_err_ratelimited("crypto_alloc_aead failed rc=%d\n", rc);
429 offload_ctx->aead_send = NULL;
430 goto err_out;
431 }
432
433 key = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->key;
434
435 rc = crypto_aead_setkey(offload_ctx->aead_send, key,
436 TLS_CIPHER_AES_GCM_128_KEY_SIZE);
437 if (rc)
438 goto free_aead;
439
440 rc = crypto_aead_setauthsize(offload_ctx->aead_send,
441 TLS_CIPHER_AES_GCM_128_TAG_SIZE);
442 if (rc)
443 goto free_aead;
444
445 return 0;
446free_aead:
447 crypto_free_aead(offload_ctx->aead_send);
448err_out:
449 return rc;
450}
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 0d379970960e..4b57ddd72f34 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -51,12 +51,12 @@ enum {
51 TLSV6, 51 TLSV6,
52 TLS_NUM_PROTS, 52 TLS_NUM_PROTS,
53}; 53};
54
55enum { 54enum {
56 TLS_BASE, 55 TLS_BASE,
57 TLS_SW_TX, 56 TLS_SW,
58 TLS_SW_RX, 57#ifdef CONFIG_TLS_DEVICE
59 TLS_SW_RXTX, 58 TLS_HW,
59#endif
60 TLS_HW_RECORD, 60 TLS_HW_RECORD,
61 TLS_NUM_CONFIG, 61 TLS_NUM_CONFIG,
62}; 62};
@@ -65,14 +65,14 @@ static struct proto *saved_tcpv6_prot;
65static DEFINE_MUTEX(tcpv6_prot_mutex); 65static DEFINE_MUTEX(tcpv6_prot_mutex);
66static LIST_HEAD(device_list); 66static LIST_HEAD(device_list);
67static DEFINE_MUTEX(device_mutex); 67static DEFINE_MUTEX(device_mutex);
68static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG]; 68static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
69static struct proto_ops tls_sw_proto_ops; 69static struct proto_ops tls_sw_proto_ops;
70 70
71static inline void update_sk_prot(struct sock *sk, struct tls_context *ctx) 71static void update_sk_prot(struct sock *sk, struct tls_context *ctx)
72{ 72{
73 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4; 73 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
74 74
75 sk->sk_prot = &tls_prots[ip_ver][ctx->conf]; 75 sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf];
76} 76}
77 77
78int wait_on_pending_writer(struct sock *sk, long *timeo) 78int wait_on_pending_writer(struct sock *sk, long *timeo)
@@ -114,6 +114,7 @@ int tls_push_sg(struct sock *sk,
114 size = sg->length - offset; 114 size = sg->length - offset;
115 offset += sg->offset; 115 offset += sg->offset;
116 116
117 ctx->in_tcp_sendpages = true;
117 while (1) { 118 while (1) {
118 if (sg_is_last(sg)) 119 if (sg_is_last(sg))
119 sendpage_flags = flags; 120 sendpage_flags = flags;
@@ -148,6 +149,8 @@ retry:
148 } 149 }
149 150
150 clear_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags); 151 clear_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
152 ctx->in_tcp_sendpages = false;
153 ctx->sk_write_space(sk);
151 154
152 return 0; 155 return 0;
153} 156}
@@ -217,6 +220,10 @@ static void tls_write_space(struct sock *sk)
217{ 220{
218 struct tls_context *ctx = tls_get_ctx(sk); 221 struct tls_context *ctx = tls_get_ctx(sk);
219 222
223 /* We are already sending pages, ignore notification */
224 if (ctx->in_tcp_sendpages)
225 return;
226
220 if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) { 227 if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) {
221 gfp_t sk_allocation = sk->sk_allocation; 228 gfp_t sk_allocation = sk->sk_allocation;
222 int rc; 229 int rc;
@@ -245,10 +252,10 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
245 lock_sock(sk); 252 lock_sock(sk);
246 sk_proto_close = ctx->sk_proto_close; 253 sk_proto_close = ctx->sk_proto_close;
247 254
248 if (ctx->conf == TLS_HW_RECORD) 255 if (ctx->tx_conf == TLS_HW_RECORD && ctx->rx_conf == TLS_HW_RECORD)
249 goto skip_tx_cleanup; 256 goto skip_tx_cleanup;
250 257
251 if (ctx->conf == TLS_BASE) { 258 if (ctx->tx_conf == TLS_BASE && ctx->rx_conf == TLS_BASE) {
252 kfree(ctx); 259 kfree(ctx);
253 ctx = NULL; 260 ctx = NULL;
254 goto skip_tx_cleanup; 261 goto skip_tx_cleanup;
@@ -270,15 +277,26 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
270 } 277 }
271 } 278 }
272 279
273 kfree(ctx->tx.rec_seq); 280 /* We need these for tls_sw_fallback handling of other packets */
274 kfree(ctx->tx.iv); 281 if (ctx->tx_conf == TLS_SW) {
275 kfree(ctx->rx.rec_seq); 282 kfree(ctx->tx.rec_seq);
276 kfree(ctx->rx.iv); 283 kfree(ctx->tx.iv);
284 tls_sw_free_resources_tx(sk);
285 }
286
287 if (ctx->rx_conf == TLS_SW) {
288 kfree(ctx->rx.rec_seq);
289 kfree(ctx->rx.iv);
290 tls_sw_free_resources_rx(sk);
291 }
277 292
278 if (ctx->conf == TLS_SW_TX || 293#ifdef CONFIG_TLS_DEVICE
279 ctx->conf == TLS_SW_RX || 294 if (ctx->tx_conf != TLS_HW) {
280 ctx->conf == TLS_SW_RXTX) { 295#else
281 tls_sw_free_resources(sk); 296 {
297#endif
298 kfree(ctx);
299 ctx = NULL;
282 } 300 }
283 301
284skip_tx_cleanup: 302skip_tx_cleanup:
@@ -287,7 +305,8 @@ skip_tx_cleanup:
287 /* free ctx for TLS_HW_RECORD, used by tcp_set_state 305 /* free ctx for TLS_HW_RECORD, used by tcp_set_state
288 * for sk->sk_prot->unhash [tls_hw_unhash] 306 * for sk->sk_prot->unhash [tls_hw_unhash]
289 */ 307 */
290 if (ctx && ctx->conf == TLS_HW_RECORD) 308 if (ctx && ctx->tx_conf == TLS_HW_RECORD &&
309 ctx->rx_conf == TLS_HW_RECORD)
291 kfree(ctx); 310 kfree(ctx);
292} 311}
293 312
@@ -441,25 +460,29 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
441 goto err_crypto_info; 460 goto err_crypto_info;
442 } 461 }
443 462
444 /* currently SW is default, we will have ethtool in future */
445 if (tx) { 463 if (tx) {
446 rc = tls_set_sw_offload(sk, ctx, 1); 464#ifdef CONFIG_TLS_DEVICE
447 if (ctx->conf == TLS_SW_RX) 465 rc = tls_set_device_offload(sk, ctx);
448 conf = TLS_SW_RXTX; 466 conf = TLS_HW;
449 else 467 if (rc) {
450 conf = TLS_SW_TX; 468#else
469 {
470#endif
471 rc = tls_set_sw_offload(sk, ctx, 1);
472 conf = TLS_SW;
473 }
451 } else { 474 } else {
452 rc = tls_set_sw_offload(sk, ctx, 0); 475 rc = tls_set_sw_offload(sk, ctx, 0);
453 if (ctx->conf == TLS_SW_TX) 476 conf = TLS_SW;
454 conf = TLS_SW_RXTX;
455 else
456 conf = TLS_SW_RX;
457 } 477 }
458 478
459 if (rc) 479 if (rc)
460 goto err_crypto_info; 480 goto err_crypto_info;
461 481
462 ctx->conf = conf; 482 if (tx)
483 ctx->tx_conf = conf;
484 else
485 ctx->rx_conf = conf;
463 update_sk_prot(sk, ctx); 486 update_sk_prot(sk, ctx);
464 if (tx) { 487 if (tx) {
465 ctx->sk_write_space = sk->sk_write_space; 488 ctx->sk_write_space = sk->sk_write_space;
@@ -535,7 +558,8 @@ static int tls_hw_prot(struct sock *sk)
535 ctx->hash = sk->sk_prot->hash; 558 ctx->hash = sk->sk_prot->hash;
536 ctx->unhash = sk->sk_prot->unhash; 559 ctx->unhash = sk->sk_prot->unhash;
537 ctx->sk_proto_close = sk->sk_prot->close; 560 ctx->sk_proto_close = sk->sk_prot->close;
538 ctx->conf = TLS_HW_RECORD; 561 ctx->rx_conf = TLS_HW_RECORD;
562 ctx->tx_conf = TLS_HW_RECORD;
539 update_sk_prot(sk, ctx); 563 update_sk_prot(sk, ctx);
540 rc = 1; 564 rc = 1;
541 break; 565 break;
@@ -579,29 +603,40 @@ static int tls_hw_hash(struct sock *sk)
579 return err; 603 return err;
580} 604}
581 605
582static void build_protos(struct proto *prot, struct proto *base) 606static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
607 struct proto *base)
583{ 608{
584 prot[TLS_BASE] = *base; 609 prot[TLS_BASE][TLS_BASE] = *base;
585 prot[TLS_BASE].setsockopt = tls_setsockopt; 610 prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt;
586 prot[TLS_BASE].getsockopt = tls_getsockopt; 611 prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt;
587 prot[TLS_BASE].close = tls_sk_proto_close; 612 prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close;
588 613
589 prot[TLS_SW_TX] = prot[TLS_BASE]; 614 prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
590 prot[TLS_SW_TX].sendmsg = tls_sw_sendmsg; 615 prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg;
591 prot[TLS_SW_TX].sendpage = tls_sw_sendpage; 616 prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage;
592 617
593 prot[TLS_SW_RX] = prot[TLS_BASE]; 618 prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
594 prot[TLS_SW_RX].recvmsg = tls_sw_recvmsg; 619 prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg;
595 prot[TLS_SW_RX].close = tls_sk_proto_close; 620 prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close;
596 621
597 prot[TLS_SW_RXTX] = prot[TLS_SW_TX]; 622 prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE];
598 prot[TLS_SW_RXTX].recvmsg = tls_sw_recvmsg; 623 prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg;
599 prot[TLS_SW_RXTX].close = tls_sk_proto_close; 624 prot[TLS_SW][TLS_SW].close = tls_sk_proto_close;
600 625
601 prot[TLS_HW_RECORD] = *base; 626#ifdef CONFIG_TLS_DEVICE
602 prot[TLS_HW_RECORD].hash = tls_hw_hash; 627 prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
603 prot[TLS_HW_RECORD].unhash = tls_hw_unhash; 628 prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg;
604 prot[TLS_HW_RECORD].close = tls_sk_proto_close; 629 prot[TLS_HW][TLS_BASE].sendpage = tls_device_sendpage;
630
631 prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW];
632 prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg;
633 prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage;
634#endif
635
636 prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
637 prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_hw_hash;
638 prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_hw_unhash;
639 prot[TLS_HW_RECORD][TLS_HW_RECORD].close = tls_sk_proto_close;
605} 640}
606 641
607static int tls_init(struct sock *sk) 642static int tls_init(struct sock *sk)
@@ -632,7 +667,7 @@ static int tls_init(struct sock *sk)
632 ctx->getsockopt = sk->sk_prot->getsockopt; 667 ctx->getsockopt = sk->sk_prot->getsockopt;
633 ctx->sk_proto_close = sk->sk_prot->close; 668 ctx->sk_proto_close = sk->sk_prot->close;
634 669
635 /* Build IPv6 TLS whenever the address of tcpv6_prot changes */ 670 /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
636 if (ip_ver == TLSV6 && 671 if (ip_ver == TLSV6 &&
637 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) { 672 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
638 mutex_lock(&tcpv6_prot_mutex); 673 mutex_lock(&tcpv6_prot_mutex);
@@ -643,7 +678,8 @@ static int tls_init(struct sock *sk)
643 mutex_unlock(&tcpv6_prot_mutex); 678 mutex_unlock(&tcpv6_prot_mutex);
644 } 679 }
645 680
646 ctx->conf = TLS_BASE; 681 ctx->tx_conf = TLS_BASE;
682 ctx->rx_conf = TLS_BASE;
647 update_sk_prot(sk, ctx); 683 update_sk_prot(sk, ctx);
648out: 684out:
649 return rc; 685 return rc;
@@ -681,6 +717,9 @@ static int __init tls_register(void)
681 tls_sw_proto_ops.poll = tls_sw_poll; 717 tls_sw_proto_ops.poll = tls_sw_poll;
682 tls_sw_proto_ops.splice_read = tls_sw_splice_read; 718 tls_sw_proto_ops.splice_read = tls_sw_splice_read;
683 719
720#ifdef CONFIG_TLS_DEVICE
721 tls_device_init();
722#endif
684 tcp_register_ulp(&tcp_tls_ulp_ops); 723 tcp_register_ulp(&tcp_tls_ulp_ops);
685 724
686 return 0; 725 return 0;
@@ -689,6 +728,9 @@ static int __init tls_register(void)
689static void __exit tls_unregister(void) 728static void __exit tls_unregister(void)
690{ 729{
691 tcp_unregister_ulp(&tcp_tls_ulp_ops); 730 tcp_unregister_ulp(&tcp_tls_ulp_ops);
731#ifdef CONFIG_TLS_DEVICE
732 tls_device_cleanup();
733#endif
692} 734}
693 735
694module_init(tls_register); 736module_init(tls_register);
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 6ed1c02cfc94..5c3909c311f1 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -52,7 +52,7 @@ static int tls_do_decryption(struct sock *sk,
52 gfp_t flags) 52 gfp_t flags)
53{ 53{
54 struct tls_context *tls_ctx = tls_get_ctx(sk); 54 struct tls_context *tls_ctx = tls_get_ctx(sk);
55 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 55 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
56 struct strp_msg *rxm = strp_msg(skb); 56 struct strp_msg *rxm = strp_msg(skb);
57 struct aead_request *aead_req; 57 struct aead_request *aead_req;
58 58
@@ -122,7 +122,7 @@ out:
122static void trim_both_sgl(struct sock *sk, int target_size) 122static void trim_both_sgl(struct sock *sk, int target_size)
123{ 123{
124 struct tls_context *tls_ctx = tls_get_ctx(sk); 124 struct tls_context *tls_ctx = tls_get_ctx(sk);
125 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 125 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
126 126
127 trim_sg(sk, ctx->sg_plaintext_data, 127 trim_sg(sk, ctx->sg_plaintext_data,
128 &ctx->sg_plaintext_num_elem, 128 &ctx->sg_plaintext_num_elem,
@@ -141,7 +141,7 @@ static void trim_both_sgl(struct sock *sk, int target_size)
141static int alloc_encrypted_sg(struct sock *sk, int len) 141static int alloc_encrypted_sg(struct sock *sk, int len)
142{ 142{
143 struct tls_context *tls_ctx = tls_get_ctx(sk); 143 struct tls_context *tls_ctx = tls_get_ctx(sk);
144 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 144 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
145 int rc = 0; 145 int rc = 0;
146 146
147 rc = sk_alloc_sg(sk, len, 147 rc = sk_alloc_sg(sk, len,
@@ -155,7 +155,7 @@ static int alloc_encrypted_sg(struct sock *sk, int len)
155static int alloc_plaintext_sg(struct sock *sk, int len) 155static int alloc_plaintext_sg(struct sock *sk, int len)
156{ 156{
157 struct tls_context *tls_ctx = tls_get_ctx(sk); 157 struct tls_context *tls_ctx = tls_get_ctx(sk);
158 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 158 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
159 int rc = 0; 159 int rc = 0;
160 160
161 rc = sk_alloc_sg(sk, len, ctx->sg_plaintext_data, 0, 161 rc = sk_alloc_sg(sk, len, ctx->sg_plaintext_data, 0,
@@ -181,7 +181,7 @@ static void free_sg(struct sock *sk, struct scatterlist *sg,
181static void tls_free_both_sg(struct sock *sk) 181static void tls_free_both_sg(struct sock *sk)
182{ 182{
183 struct tls_context *tls_ctx = tls_get_ctx(sk); 183 struct tls_context *tls_ctx = tls_get_ctx(sk);
184 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 184 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
185 185
186 free_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem, 186 free_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem,
187 &ctx->sg_encrypted_size); 187 &ctx->sg_encrypted_size);
@@ -191,7 +191,7 @@ static void tls_free_both_sg(struct sock *sk)
191} 191}
192 192
193static int tls_do_encryption(struct tls_context *tls_ctx, 193static int tls_do_encryption(struct tls_context *tls_ctx,
194 struct tls_sw_context *ctx, size_t data_len, 194 struct tls_sw_context_tx *ctx, size_t data_len,
195 gfp_t flags) 195 gfp_t flags)
196{ 196{
197 unsigned int req_size = sizeof(struct aead_request) + 197 unsigned int req_size = sizeof(struct aead_request) +
@@ -227,7 +227,7 @@ static int tls_push_record(struct sock *sk, int flags,
227 unsigned char record_type) 227 unsigned char record_type)
228{ 228{
229 struct tls_context *tls_ctx = tls_get_ctx(sk); 229 struct tls_context *tls_ctx = tls_get_ctx(sk);
230 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 230 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
231 int rc; 231 int rc;
232 232
233 sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1); 233 sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
@@ -339,7 +339,7 @@ static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
339 int bytes) 339 int bytes)
340{ 340{
341 struct tls_context *tls_ctx = tls_get_ctx(sk); 341 struct tls_context *tls_ctx = tls_get_ctx(sk);
342 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 342 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
343 struct scatterlist *sg = ctx->sg_plaintext_data; 343 struct scatterlist *sg = ctx->sg_plaintext_data;
344 int copy, i, rc = 0; 344 int copy, i, rc = 0;
345 345
@@ -367,7 +367,7 @@ out:
367int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 367int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
368{ 368{
369 struct tls_context *tls_ctx = tls_get_ctx(sk); 369 struct tls_context *tls_ctx = tls_get_ctx(sk);
370 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 370 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
371 int ret = 0; 371 int ret = 0;
372 int required_size; 372 int required_size;
373 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 373 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
@@ -522,7 +522,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
522 int offset, size_t size, int flags) 522 int offset, size_t size, int flags)
523{ 523{
524 struct tls_context *tls_ctx = tls_get_ctx(sk); 524 struct tls_context *tls_ctx = tls_get_ctx(sk);
525 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 525 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
526 int ret = 0; 526 int ret = 0;
527 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 527 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
528 bool eor; 528 bool eor;
@@ -636,7 +636,7 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
636 long timeo, int *err) 636 long timeo, int *err)
637{ 637{
638 struct tls_context *tls_ctx = tls_get_ctx(sk); 638 struct tls_context *tls_ctx = tls_get_ctx(sk);
639 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 639 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
640 struct sk_buff *skb; 640 struct sk_buff *skb;
641 DEFINE_WAIT_FUNC(wait, woken_wake_function); 641 DEFINE_WAIT_FUNC(wait, woken_wake_function);
642 642
@@ -674,7 +674,7 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
674 struct scatterlist *sgout) 674 struct scatterlist *sgout)
675{ 675{
676 struct tls_context *tls_ctx = tls_get_ctx(sk); 676 struct tls_context *tls_ctx = tls_get_ctx(sk);
677 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 677 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
678 char iv[TLS_CIPHER_AES_GCM_128_SALT_SIZE + MAX_IV_SIZE]; 678 char iv[TLS_CIPHER_AES_GCM_128_SALT_SIZE + MAX_IV_SIZE];
679 struct scatterlist sgin_arr[MAX_SKB_FRAGS + 2]; 679 struct scatterlist sgin_arr[MAX_SKB_FRAGS + 2];
680 struct scatterlist *sgin = &sgin_arr[0]; 680 struct scatterlist *sgin = &sgin_arr[0];
@@ -723,7 +723,7 @@ static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
723 unsigned int len) 723 unsigned int len)
724{ 724{
725 struct tls_context *tls_ctx = tls_get_ctx(sk); 725 struct tls_context *tls_ctx = tls_get_ctx(sk);
726 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 726 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
727 struct strp_msg *rxm = strp_msg(skb); 727 struct strp_msg *rxm = strp_msg(skb);
728 728
729 if (len < rxm->full_len) { 729 if (len < rxm->full_len) {
@@ -749,7 +749,7 @@ int tls_sw_recvmsg(struct sock *sk,
749 int *addr_len) 749 int *addr_len)
750{ 750{
751 struct tls_context *tls_ctx = tls_get_ctx(sk); 751 struct tls_context *tls_ctx = tls_get_ctx(sk);
752 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 752 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
753 unsigned char control; 753 unsigned char control;
754 struct strp_msg *rxm; 754 struct strp_msg *rxm;
755 struct sk_buff *skb; 755 struct sk_buff *skb;
@@ -869,7 +869,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
869 size_t len, unsigned int flags) 869 size_t len, unsigned int flags)
870{ 870{
871 struct tls_context *tls_ctx = tls_get_ctx(sock->sk); 871 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
872 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 872 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
873 struct strp_msg *rxm = NULL; 873 struct strp_msg *rxm = NULL;
874 struct sock *sk = sock->sk; 874 struct sock *sk = sock->sk;
875 struct sk_buff *skb; 875 struct sk_buff *skb;
@@ -922,7 +922,7 @@ unsigned int tls_sw_poll(struct file *file, struct socket *sock,
922 unsigned int ret; 922 unsigned int ret;
923 struct sock *sk = sock->sk; 923 struct sock *sk = sock->sk;
924 struct tls_context *tls_ctx = tls_get_ctx(sk); 924 struct tls_context *tls_ctx = tls_get_ctx(sk);
925 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 925 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
926 926
927 /* Grab POLLOUT and POLLHUP from the underlying socket */ 927 /* Grab POLLOUT and POLLHUP from the underlying socket */
928 ret = ctx->sk_poll(file, sock, wait); 928 ret = ctx->sk_poll(file, sock, wait);
@@ -938,7 +938,7 @@ unsigned int tls_sw_poll(struct file *file, struct socket *sock,
938static int tls_read_size(struct strparser *strp, struct sk_buff *skb) 938static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
939{ 939{
940 struct tls_context *tls_ctx = tls_get_ctx(strp->sk); 940 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
941 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 941 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
942 char header[tls_ctx->rx.prepend_size]; 942 char header[tls_ctx->rx.prepend_size];
943 struct strp_msg *rxm = strp_msg(skb); 943 struct strp_msg *rxm = strp_msg(skb);
944 size_t cipher_overhead; 944 size_t cipher_overhead;
@@ -987,7 +987,7 @@ read_failure:
987static void tls_queue(struct strparser *strp, struct sk_buff *skb) 987static void tls_queue(struct strparser *strp, struct sk_buff *skb)
988{ 988{
989 struct tls_context *tls_ctx = tls_get_ctx(strp->sk); 989 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
990 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 990 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
991 struct strp_msg *rxm; 991 struct strp_msg *rxm;
992 992
993 rxm = strp_msg(skb); 993 rxm = strp_msg(skb);
@@ -1003,18 +1003,28 @@ static void tls_queue(struct strparser *strp, struct sk_buff *skb)
1003static void tls_data_ready(struct sock *sk) 1003static void tls_data_ready(struct sock *sk)
1004{ 1004{
1005 struct tls_context *tls_ctx = tls_get_ctx(sk); 1005 struct tls_context *tls_ctx = tls_get_ctx(sk);
1006 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 1006 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1007 1007
1008 strp_data_ready(&ctx->strp); 1008 strp_data_ready(&ctx->strp);
1009} 1009}
1010 1010
1011void tls_sw_free_resources(struct sock *sk) 1011void tls_sw_free_resources_tx(struct sock *sk)
1012{ 1012{
1013 struct tls_context *tls_ctx = tls_get_ctx(sk); 1013 struct tls_context *tls_ctx = tls_get_ctx(sk);
1014 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); 1014 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1015 1015
1016 if (ctx->aead_send) 1016 if (ctx->aead_send)
1017 crypto_free_aead(ctx->aead_send); 1017 crypto_free_aead(ctx->aead_send);
1018 tls_free_both_sg(sk);
1019
1020 kfree(ctx);
1021}
1022
1023void tls_sw_free_resources_rx(struct sock *sk)
1024{
1025 struct tls_context *tls_ctx = tls_get_ctx(sk);
1026 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1027
1018 if (ctx->aead_recv) { 1028 if (ctx->aead_recv) {
1019 if (ctx->recv_pkt) { 1029 if (ctx->recv_pkt) {
1020 kfree_skb(ctx->recv_pkt); 1030 kfree_skb(ctx->recv_pkt);
@@ -1030,10 +1040,7 @@ void tls_sw_free_resources(struct sock *sk)
1030 lock_sock(sk); 1040 lock_sock(sk);
1031 } 1041 }
1032 1042
1033 tls_free_both_sg(sk);
1034
1035 kfree(ctx); 1043 kfree(ctx);
1036 kfree(tls_ctx);
1037} 1044}
1038 1045
1039int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) 1046int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
@@ -1041,7 +1048,8 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
1041 char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE]; 1048 char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE];
1042 struct tls_crypto_info *crypto_info; 1049 struct tls_crypto_info *crypto_info;
1043 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info; 1050 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
1044 struct tls_sw_context *sw_ctx; 1051 struct tls_sw_context_tx *sw_ctx_tx = NULL;
1052 struct tls_sw_context_rx *sw_ctx_rx = NULL;
1045 struct cipher_context *cctx; 1053 struct cipher_context *cctx;
1046 struct crypto_aead **aead; 1054 struct crypto_aead **aead;
1047 struct strp_callbacks cb; 1055 struct strp_callbacks cb;
@@ -1054,27 +1062,32 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
1054 goto out; 1062 goto out;
1055 } 1063 }
1056 1064
1057 if (!ctx->priv_ctx) { 1065 if (tx) {
1058 sw_ctx = kzalloc(sizeof(*sw_ctx), GFP_KERNEL); 1066 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
1059 if (!sw_ctx) { 1067 if (!sw_ctx_tx) {
1060 rc = -ENOMEM; 1068 rc = -ENOMEM;
1061 goto out; 1069 goto out;
1062 } 1070 }
1063 crypto_init_wait(&sw_ctx->async_wait); 1071 crypto_init_wait(&sw_ctx_tx->async_wait);
1072 ctx->priv_ctx_tx = sw_ctx_tx;
1064 } else { 1073 } else {
1065 sw_ctx = ctx->priv_ctx; 1074 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
1075 if (!sw_ctx_rx) {
1076 rc = -ENOMEM;
1077 goto out;
1078 }
1079 crypto_init_wait(&sw_ctx_rx->async_wait);
1080 ctx->priv_ctx_rx = sw_ctx_rx;
1066 } 1081 }
1067 1082
1068 ctx->priv_ctx = (struct tls_offload_context *)sw_ctx;
1069
1070 if (tx) { 1083 if (tx) {
1071 crypto_info = &ctx->crypto_send; 1084 crypto_info = &ctx->crypto_send;
1072 cctx = &ctx->tx; 1085 cctx = &ctx->tx;
1073 aead = &sw_ctx->aead_send; 1086 aead = &sw_ctx_tx->aead_send;
1074 } else { 1087 } else {
1075 crypto_info = &ctx->crypto_recv; 1088 crypto_info = &ctx->crypto_recv;
1076 cctx = &ctx->rx; 1089 cctx = &ctx->rx;
1077 aead = &sw_ctx->aead_recv; 1090 aead = &sw_ctx_rx->aead_recv;
1078 } 1091 }
1079 1092
1080 switch (crypto_info->cipher_type) { 1093 switch (crypto_info->cipher_type) {
@@ -1121,22 +1134,24 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
1121 } 1134 }
1122 memcpy(cctx->rec_seq, rec_seq, rec_seq_size); 1135 memcpy(cctx->rec_seq, rec_seq, rec_seq_size);
1123 1136
1124 if (tx) { 1137 if (sw_ctx_tx) {
1125 sg_init_table(sw_ctx->sg_encrypted_data, 1138 sg_init_table(sw_ctx_tx->sg_encrypted_data,
1126 ARRAY_SIZE(sw_ctx->sg_encrypted_data)); 1139 ARRAY_SIZE(sw_ctx_tx->sg_encrypted_data));
1127 sg_init_table(sw_ctx->sg_plaintext_data, 1140 sg_init_table(sw_ctx_tx->sg_plaintext_data,
1128 ARRAY_SIZE(sw_ctx->sg_plaintext_data)); 1141 ARRAY_SIZE(sw_ctx_tx->sg_plaintext_data));
1129 1142
1130 sg_init_table(sw_ctx->sg_aead_in, 2); 1143 sg_init_table(sw_ctx_tx->sg_aead_in, 2);
1131 sg_set_buf(&sw_ctx->sg_aead_in[0], sw_ctx->aad_space, 1144 sg_set_buf(&sw_ctx_tx->sg_aead_in[0], sw_ctx_tx->aad_space,
1132 sizeof(sw_ctx->aad_space)); 1145 sizeof(sw_ctx_tx->aad_space));
1133 sg_unmark_end(&sw_ctx->sg_aead_in[1]); 1146 sg_unmark_end(&sw_ctx_tx->sg_aead_in[1]);
1134 sg_chain(sw_ctx->sg_aead_in, 2, sw_ctx->sg_plaintext_data); 1147 sg_chain(sw_ctx_tx->sg_aead_in, 2,
1135 sg_init_table(sw_ctx->sg_aead_out, 2); 1148 sw_ctx_tx->sg_plaintext_data);
1136 sg_set_buf(&sw_ctx->sg_aead_out[0], sw_ctx->aad_space, 1149 sg_init_table(sw_ctx_tx->sg_aead_out, 2);
1137 sizeof(sw_ctx->aad_space)); 1150 sg_set_buf(&sw_ctx_tx->sg_aead_out[0], sw_ctx_tx->aad_space,
1138 sg_unmark_end(&sw_ctx->sg_aead_out[1]); 1151 sizeof(sw_ctx_tx->aad_space));
1139 sg_chain(sw_ctx->sg_aead_out, 2, sw_ctx->sg_encrypted_data); 1152 sg_unmark_end(&sw_ctx_tx->sg_aead_out[1]);
1153 sg_chain(sw_ctx_tx->sg_aead_out, 2,
1154 sw_ctx_tx->sg_encrypted_data);
1140 } 1155 }
1141 1156
1142 if (!*aead) { 1157 if (!*aead) {
@@ -1161,22 +1176,22 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
1161 if (rc) 1176 if (rc)
1162 goto free_aead; 1177 goto free_aead;
1163 1178
1164 if (!tx) { 1179 if (sw_ctx_rx) {
1165 /* Set up strparser */ 1180 /* Set up strparser */
1166 memset(&cb, 0, sizeof(cb)); 1181 memset(&cb, 0, sizeof(cb));
1167 cb.rcv_msg = tls_queue; 1182 cb.rcv_msg = tls_queue;
1168 cb.parse_msg = tls_read_size; 1183 cb.parse_msg = tls_read_size;
1169 1184
1170 strp_init(&sw_ctx->strp, sk, &cb); 1185 strp_init(&sw_ctx_rx->strp, sk, &cb);
1171 1186
1172 write_lock_bh(&sk->sk_callback_lock); 1187 write_lock_bh(&sk->sk_callback_lock);
1173 sw_ctx->saved_data_ready = sk->sk_data_ready; 1188 sw_ctx_rx->saved_data_ready = sk->sk_data_ready;
1174 sk->sk_data_ready = tls_data_ready; 1189 sk->sk_data_ready = tls_data_ready;
1175 write_unlock_bh(&sk->sk_callback_lock); 1190 write_unlock_bh(&sk->sk_callback_lock);
1176 1191
1177 sw_ctx->sk_poll = sk->sk_socket->ops->poll; 1192 sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll;
1178 1193
1179 strp_check_rcv(&sw_ctx->strp); 1194 strp_check_rcv(&sw_ctx_rx->strp);
1180 } 1195 }
1181 1196
1182 goto out; 1197 goto out;
@@ -1188,11 +1203,16 @@ free_rec_seq:
1188 kfree(cctx->rec_seq); 1203 kfree(cctx->rec_seq);
1189 cctx->rec_seq = NULL; 1204 cctx->rec_seq = NULL;
1190free_iv: 1205free_iv:
1191 kfree(ctx->tx.iv); 1206 kfree(cctx->iv);
1192 ctx->tx.iv = NULL; 1207 cctx->iv = NULL;
1193free_priv: 1208free_priv:
1194 kfree(ctx->priv_ctx); 1209 if (tx) {
1195 ctx->priv_ctx = NULL; 1210 kfree(ctx->priv_ctx_tx);
1211 ctx->priv_ctx_tx = NULL;
1212 } else {
1213 kfree(ctx->priv_ctx_rx);
1214 ctx->priv_ctx_rx = NULL;
1215 }
1196out: 1216out:
1197 return rc; 1217 return rc;
1198} 1218}
diff --git a/sound/core/control.c b/sound/core/control.c
index 69734b0eafd0..9aa15bfc7936 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -1492,7 +1492,7 @@ static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
1492 int op_flag) 1492 int op_flag)
1493{ 1493{
1494 struct snd_ctl_tlv header; 1494 struct snd_ctl_tlv header;
1495 unsigned int *container; 1495 unsigned int __user *container;
1496 unsigned int container_size; 1496 unsigned int container_size;
1497 struct snd_kcontrol *kctl; 1497 struct snd_kcontrol *kctl;
1498 struct snd_ctl_elem_id id; 1498 struct snd_ctl_elem_id id;
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index b719d0bd833e..06d7c40af570 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -27,10 +27,11 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
27 s32 __user *src) 27 s32 __user *src)
28{ 28{
29 snd_pcm_sframes_t delay; 29 snd_pcm_sframes_t delay;
30 int err;
30 31
31 delay = snd_pcm_delay(substream); 32 err = snd_pcm_delay(substream, &delay);
32 if (delay < 0) 33 if (err)
33 return delay; 34 return err;
34 if (put_user(delay, src)) 35 if (put_user(delay, src))
35 return -EFAULT; 36 return -EFAULT;
36 return 0; 37 return 0;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 35ffccea94c3..0e875d5a9e86 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2692,7 +2692,8 @@ static int snd_pcm_hwsync(struct snd_pcm_substream *substream)
2692 return err; 2692 return err;
2693} 2693}
2694 2694
2695static snd_pcm_sframes_t snd_pcm_delay(struct snd_pcm_substream *substream) 2695static int snd_pcm_delay(struct snd_pcm_substream *substream,
2696 snd_pcm_sframes_t *delay)
2696{ 2697{
2697 struct snd_pcm_runtime *runtime = substream->runtime; 2698 struct snd_pcm_runtime *runtime = substream->runtime;
2698 int err; 2699 int err;
@@ -2708,7 +2709,9 @@ static snd_pcm_sframes_t snd_pcm_delay(struct snd_pcm_substream *substream)
2708 n += runtime->delay; 2709 n += runtime->delay;
2709 } 2710 }
2710 snd_pcm_stream_unlock_irq(substream); 2711 snd_pcm_stream_unlock_irq(substream);
2711 return err < 0 ? err : n; 2712 if (!err)
2713 *delay = n;
2714 return err;
2712} 2715}
2713 2716
2714static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream, 2717static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
@@ -2751,6 +2754,7 @@ static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
2751 sync_ptr.s.status.hw_ptr = status->hw_ptr; 2754 sync_ptr.s.status.hw_ptr = status->hw_ptr;
2752 sync_ptr.s.status.tstamp = status->tstamp; 2755 sync_ptr.s.status.tstamp = status->tstamp;
2753 sync_ptr.s.status.suspended_state = status->suspended_state; 2756 sync_ptr.s.status.suspended_state = status->suspended_state;
2757 sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
2754 snd_pcm_stream_unlock_irq(substream); 2758 snd_pcm_stream_unlock_irq(substream);
2755 if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr))) 2759 if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
2756 return -EFAULT; 2760 return -EFAULT;
@@ -2916,11 +2920,13 @@ static int snd_pcm_common_ioctl(struct file *file,
2916 return snd_pcm_hwsync(substream); 2920 return snd_pcm_hwsync(substream);
2917 case SNDRV_PCM_IOCTL_DELAY: 2921 case SNDRV_PCM_IOCTL_DELAY:
2918 { 2922 {
2919 snd_pcm_sframes_t delay = snd_pcm_delay(substream); 2923 snd_pcm_sframes_t delay;
2920 snd_pcm_sframes_t __user *res = arg; 2924 snd_pcm_sframes_t __user *res = arg;
2925 int err;
2921 2926
2922 if (delay < 0) 2927 err = snd_pcm_delay(substream, &delay);
2923 return delay; 2928 if (err)
2929 return err;
2924 if (put_user(delay, res)) 2930 if (put_user(delay, res))
2925 return -EFAULT; 2931 return -EFAULT;
2926 return 0; 2932 return 0;
@@ -3008,13 +3014,7 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
3008 case SNDRV_PCM_IOCTL_DROP: 3014 case SNDRV_PCM_IOCTL_DROP:
3009 return snd_pcm_drop(substream); 3015 return snd_pcm_drop(substream);
3010 case SNDRV_PCM_IOCTL_DELAY: 3016 case SNDRV_PCM_IOCTL_DELAY:
3011 { 3017 return snd_pcm_delay(substream, frames);
3012 result = snd_pcm_delay(substream);
3013 if (result < 0)
3014 return result;
3015 *frames = result;
3016 return 0;
3017 }
3018 default: 3018 default:
3019 return -EINVAL; 3019 return -EINVAL;
3020 } 3020 }
@@ -3234,7 +3234,7 @@ static __poll_t snd_pcm_capture_poll(struct file *file, poll_table * wait)
3234/* 3234/*
3235 * mmap status record 3235 * mmap status record
3236 */ 3236 */
3237static int snd_pcm_mmap_status_fault(struct vm_fault *vmf) 3237static vm_fault_t snd_pcm_mmap_status_fault(struct vm_fault *vmf)
3238{ 3238{
3239 struct snd_pcm_substream *substream = vmf->vma->vm_private_data; 3239 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3240 struct snd_pcm_runtime *runtime; 3240 struct snd_pcm_runtime *runtime;
@@ -3270,7 +3270,7 @@ static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file
3270/* 3270/*
3271 * mmap control record 3271 * mmap control record
3272 */ 3272 */
3273static int snd_pcm_mmap_control_fault(struct vm_fault *vmf) 3273static vm_fault_t snd_pcm_mmap_control_fault(struct vm_fault *vmf)
3274{ 3274{
3275 struct snd_pcm_substream *substream = vmf->vma->vm_private_data; 3275 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3276 struct snd_pcm_runtime *runtime; 3276 struct snd_pcm_runtime *runtime;
@@ -3359,7 +3359,7 @@ snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs)
3359/* 3359/*
3360 * fault callback for mmapping a RAM page 3360 * fault callback for mmapping a RAM page
3361 */ 3361 */
3362static int snd_pcm_mmap_data_fault(struct vm_fault *vmf) 3362static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf)
3363{ 3363{
3364 struct snd_pcm_substream *substream = vmf->vma->vm_private_data; 3364 struct snd_pcm_substream *substream = vmf->vma->vm_private_data;
3365 struct snd_pcm_runtime *runtime; 3365 struct snd_pcm_runtime *runtime;
diff --git a/sound/core/seq/oss/seq_oss_event.c b/sound/core/seq/oss/seq_oss_event.c
index c3908862bc8b..86ca584c27b2 100644
--- a/sound/core/seq/oss/seq_oss_event.c
+++ b/sound/core/seq/oss/seq_oss_event.c
@@ -26,6 +26,7 @@
26#include <sound/seq_oss_legacy.h> 26#include <sound/seq_oss_legacy.h>
27#include "seq_oss_readq.h" 27#include "seq_oss_readq.h"
28#include "seq_oss_writeq.h" 28#include "seq_oss_writeq.h"
29#include <linux/nospec.h>
29 30
30 31
31/* 32/*
@@ -287,10 +288,10 @@ note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, st
287{ 288{
288 struct seq_oss_synthinfo *info; 289 struct seq_oss_synthinfo *info;
289 290
290 if (!snd_seq_oss_synth_is_valid(dp, dev)) 291 info = snd_seq_oss_synth_info(dp, dev);
292 if (!info)
291 return -ENXIO; 293 return -ENXIO;
292 294
293 info = &dp->synths[dev];
294 switch (info->arg.event_passing) { 295 switch (info->arg.event_passing) {
295 case SNDRV_SEQ_OSS_PROCESS_EVENTS: 296 case SNDRV_SEQ_OSS_PROCESS_EVENTS:
296 if (! info->ch || ch < 0 || ch >= info->nr_voices) { 297 if (! info->ch || ch < 0 || ch >= info->nr_voices) {
@@ -298,6 +299,7 @@ note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, st
298 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev); 299 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev);
299 } 300 }
300 301
302 ch = array_index_nospec(ch, info->nr_voices);
301 if (note == 255 && info->ch[ch].note >= 0) { 303 if (note == 255 && info->ch[ch].note >= 0) {
302 /* volume control */ 304 /* volume control */
303 int type; 305 int type;
@@ -347,10 +349,10 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
347{ 349{
348 struct seq_oss_synthinfo *info; 350 struct seq_oss_synthinfo *info;
349 351
350 if (!snd_seq_oss_synth_is_valid(dp, dev)) 352 info = snd_seq_oss_synth_info(dp, dev);
353 if (!info)
351 return -ENXIO; 354 return -ENXIO;
352 355
353 info = &dp->synths[dev];
354 switch (info->arg.event_passing) { 356 switch (info->arg.event_passing) {
355 case SNDRV_SEQ_OSS_PROCESS_EVENTS: 357 case SNDRV_SEQ_OSS_PROCESS_EVENTS:
356 if (! info->ch || ch < 0 || ch >= info->nr_voices) { 358 if (! info->ch || ch < 0 || ch >= info->nr_voices) {
@@ -358,6 +360,7 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
358 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev); 360 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev);
359 } 361 }
360 362
363 ch = array_index_nospec(ch, info->nr_voices);
361 if (info->ch[ch].note >= 0) { 364 if (info->ch[ch].note >= 0) {
362 note = info->ch[ch].note; 365 note = info->ch[ch].note;
363 info->ch[ch].vel = 0; 366 info->ch[ch].vel = 0;
@@ -381,7 +384,7 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
381static int 384static int
382set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note, int vel, struct snd_seq_event *ev) 385set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note, int vel, struct snd_seq_event *ev)
383{ 386{
384 if (! snd_seq_oss_synth_is_valid(dp, dev)) 387 if (!snd_seq_oss_synth_info(dp, dev))
385 return -ENXIO; 388 return -ENXIO;
386 389
387 ev->type = type; 390 ev->type = type;
@@ -399,7 +402,7 @@ set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note,
399static int 402static int
400set_control_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int param, int val, struct snd_seq_event *ev) 403set_control_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int param, int val, struct snd_seq_event *ev)
401{ 404{
402 if (! snd_seq_oss_synth_is_valid(dp, dev)) 405 if (!snd_seq_oss_synth_info(dp, dev))
403 return -ENXIO; 406 return -ENXIO;
404 407
405 ev->type = type; 408 ev->type = type;
diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
index b30b2139e3f0..9debd1b8fd28 100644
--- a/sound/core/seq/oss/seq_oss_midi.c
+++ b/sound/core/seq/oss/seq_oss_midi.c
@@ -29,6 +29,7 @@
29#include "../seq_lock.h" 29#include "../seq_lock.h"
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/nospec.h>
32 33
33 34
34/* 35/*
@@ -315,6 +316,7 @@ get_mididev(struct seq_oss_devinfo *dp, int dev)
315{ 316{
316 if (dev < 0 || dev >= dp->max_mididev) 317 if (dev < 0 || dev >= dp->max_mididev)
317 return NULL; 318 return NULL;
319 dev = array_index_nospec(dev, dp->max_mididev);
318 return get_mdev(dev); 320 return get_mdev(dev);
319} 321}
320 322
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
index cd0e0ebbfdb1..278ebb993122 100644
--- a/sound/core/seq/oss/seq_oss_synth.c
+++ b/sound/core/seq/oss/seq_oss_synth.c
@@ -26,6 +26,7 @@
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/nospec.h>
29 30
30/* 31/*
31 * constants 32 * constants
@@ -339,17 +340,13 @@ snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp)
339 dp->max_synthdev = 0; 340 dp->max_synthdev = 0;
340} 341}
341 342
342/* 343static struct seq_oss_synthinfo *
343 * check if the specified device is MIDI mapped device 344get_synthinfo_nospec(struct seq_oss_devinfo *dp, int dev)
344 */
345static int
346is_midi_dev(struct seq_oss_devinfo *dp, int dev)
347{ 345{
348 if (dev < 0 || dev >= dp->max_synthdev) 346 if (dev < 0 || dev >= dp->max_synthdev)
349 return 0; 347 return NULL;
350 if (dp->synths[dev].is_midi) 348 dev = array_index_nospec(dev, SNDRV_SEQ_OSS_MAX_SYNTH_DEVS);
351 return 1; 349 return &dp->synths[dev];
352 return 0;
353} 350}
354 351
355/* 352/*
@@ -359,14 +356,20 @@ static struct seq_oss_synth *
359get_synthdev(struct seq_oss_devinfo *dp, int dev) 356get_synthdev(struct seq_oss_devinfo *dp, int dev)
360{ 357{
361 struct seq_oss_synth *rec; 358 struct seq_oss_synth *rec;
362 if (dev < 0 || dev >= dp->max_synthdev) 359 struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
363 return NULL; 360
364 if (! dp->synths[dev].opened) 361 if (!info)
365 return NULL; 362 return NULL;
366 if (dp->synths[dev].is_midi) 363 if (!info->opened)
367 return &midi_synth_dev;
368 if ((rec = get_sdev(dev)) == NULL)
369 return NULL; 364 return NULL;
365 if (info->is_midi) {
366 rec = &midi_synth_dev;
367 snd_use_lock_use(&rec->use_lock);
368 } else {
369 rec = get_sdev(dev);
370 if (!rec)
371 return NULL;
372 }
370 if (! rec->opened) { 373 if (! rec->opened) {
371 snd_use_lock_free(&rec->use_lock); 374 snd_use_lock_free(&rec->use_lock);
372 return NULL; 375 return NULL;
@@ -402,10 +405,8 @@ snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev)
402 struct seq_oss_synth *rec; 405 struct seq_oss_synth *rec;
403 struct seq_oss_synthinfo *info; 406 struct seq_oss_synthinfo *info;
404 407
405 if (snd_BUG_ON(dev < 0 || dev >= dp->max_synthdev)) 408 info = get_synthinfo_nospec(dp, dev);
406 return; 409 if (!info || !info->opened)
407 info = &dp->synths[dev];
408 if (! info->opened)
409 return; 410 return;
410 if (info->sysex) 411 if (info->sysex)
411 info->sysex->len = 0; /* reset sysex */ 412 info->sysex->len = 0; /* reset sysex */
@@ -454,12 +455,14 @@ snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
454 const char __user *buf, int p, int c) 455 const char __user *buf, int p, int c)
455{ 456{
456 struct seq_oss_synth *rec; 457 struct seq_oss_synth *rec;
458 struct seq_oss_synthinfo *info;
457 int rc; 459 int rc;
458 460
459 if (dev < 0 || dev >= dp->max_synthdev) 461 info = get_synthinfo_nospec(dp, dev);
462 if (!info)
460 return -ENXIO; 463 return -ENXIO;
461 464
462 if (is_midi_dev(dp, dev)) 465 if (info->is_midi)
463 return 0; 466 return 0;
464 if ((rec = get_synthdev(dp, dev)) == NULL) 467 if ((rec = get_synthdev(dp, dev)) == NULL)
465 return -ENXIO; 468 return -ENXIO;
@@ -467,24 +470,25 @@ snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
467 if (rec->oper.load_patch == NULL) 470 if (rec->oper.load_patch == NULL)
468 rc = -ENXIO; 471 rc = -ENXIO;
469 else 472 else
470 rc = rec->oper.load_patch(&dp->synths[dev].arg, fmt, buf, p, c); 473 rc = rec->oper.load_patch(&info->arg, fmt, buf, p, c);
471 snd_use_lock_free(&rec->use_lock); 474 snd_use_lock_free(&rec->use_lock);
472 return rc; 475 return rc;
473} 476}
474 477
475/* 478/*
476 * check if the device is valid synth device 479 * check if the device is valid synth device and return the synth info
477 */ 480 */
478int 481struct seq_oss_synthinfo *
479snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev) 482snd_seq_oss_synth_info(struct seq_oss_devinfo *dp, int dev)
480{ 483{
481 struct seq_oss_synth *rec; 484 struct seq_oss_synth *rec;
485
482 rec = get_synthdev(dp, dev); 486 rec = get_synthdev(dp, dev);
483 if (rec) { 487 if (rec) {
484 snd_use_lock_free(&rec->use_lock); 488 snd_use_lock_free(&rec->use_lock);
485 return 1; 489 return get_synthinfo_nospec(dp, dev);
486 } 490 }
487 return 0; 491 return NULL;
488} 492}
489 493
490 494
@@ -499,16 +503,18 @@ snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
499 int i, send; 503 int i, send;
500 unsigned char *dest; 504 unsigned char *dest;
501 struct seq_oss_synth_sysex *sysex; 505 struct seq_oss_synth_sysex *sysex;
506 struct seq_oss_synthinfo *info;
502 507
503 if (! snd_seq_oss_synth_is_valid(dp, dev)) 508 info = snd_seq_oss_synth_info(dp, dev);
509 if (!info)
504 return -ENXIO; 510 return -ENXIO;
505 511
506 sysex = dp->synths[dev].sysex; 512 sysex = info->sysex;
507 if (sysex == NULL) { 513 if (sysex == NULL) {
508 sysex = kzalloc(sizeof(*sysex), GFP_KERNEL); 514 sysex = kzalloc(sizeof(*sysex), GFP_KERNEL);
509 if (sysex == NULL) 515 if (sysex == NULL)
510 return -ENOMEM; 516 return -ENOMEM;
511 dp->synths[dev].sysex = sysex; 517 info->sysex = sysex;
512 } 518 }
513 519
514 send = 0; 520 send = 0;
@@ -553,10 +559,12 @@ snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
553int 559int
554snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev) 560snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev)
555{ 561{
556 if (! snd_seq_oss_synth_is_valid(dp, dev)) 562 struct seq_oss_synthinfo *info = snd_seq_oss_synth_info(dp, dev);
563
564 if (!info)
557 return -EINVAL; 565 return -EINVAL;
558 snd_seq_oss_fill_addr(dp, ev, dp->synths[dev].arg.addr.client, 566 snd_seq_oss_fill_addr(dp, ev, info->arg.addr.client,
559 dp->synths[dev].arg.addr.port); 567 info->arg.addr.port);
560 return 0; 568 return 0;
561} 569}
562 570
@@ -568,16 +576,18 @@ int
568snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, unsigned long addr) 576snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, unsigned long addr)
569{ 577{
570 struct seq_oss_synth *rec; 578 struct seq_oss_synth *rec;
579 struct seq_oss_synthinfo *info;
571 int rc; 580 int rc;
572 581
573 if (is_midi_dev(dp, dev)) 582 info = get_synthinfo_nospec(dp, dev);
583 if (!info || info->is_midi)
574 return -ENXIO; 584 return -ENXIO;
575 if ((rec = get_synthdev(dp, dev)) == NULL) 585 if ((rec = get_synthdev(dp, dev)) == NULL)
576 return -ENXIO; 586 return -ENXIO;
577 if (rec->oper.ioctl == NULL) 587 if (rec->oper.ioctl == NULL)
578 rc = -ENXIO; 588 rc = -ENXIO;
579 else 589 else
580 rc = rec->oper.ioctl(&dp->synths[dev].arg, cmd, addr); 590 rc = rec->oper.ioctl(&info->arg, cmd, addr);
581 snd_use_lock_free(&rec->use_lock); 591 snd_use_lock_free(&rec->use_lock);
582 return rc; 592 return rc;
583} 593}
@@ -589,7 +599,10 @@ snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, u
589int 599int
590snd_seq_oss_synth_raw_event(struct seq_oss_devinfo *dp, int dev, unsigned char *data, struct snd_seq_event *ev) 600snd_seq_oss_synth_raw_event(struct seq_oss_devinfo *dp, int dev, unsigned char *data, struct snd_seq_event *ev)
591{ 601{
592 if (! snd_seq_oss_synth_is_valid(dp, dev) || is_midi_dev(dp, dev)) 602 struct seq_oss_synthinfo *info;
603
604 info = snd_seq_oss_synth_info(dp, dev);
605 if (!info || info->is_midi)
593 return -ENXIO; 606 return -ENXIO;
594 ev->type = SNDRV_SEQ_EVENT_OSS; 607 ev->type = SNDRV_SEQ_EVENT_OSS;
595 memcpy(ev->data.raw8.d, data, 8); 608 memcpy(ev->data.raw8.d, data, 8);
diff --git a/sound/core/seq/oss/seq_oss_synth.h b/sound/core/seq/oss/seq_oss_synth.h
index 74ac55f166b6..a63f9e22974d 100644
--- a/sound/core/seq/oss/seq_oss_synth.h
+++ b/sound/core/seq/oss/seq_oss_synth.h
@@ -37,7 +37,8 @@ void snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp);
37void snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev); 37void snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev);
38int snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt, 38int snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
39 const char __user *buf, int p, int c); 39 const char __user *buf, int p, int c);
40int snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev); 40struct seq_oss_synthinfo *snd_seq_oss_synth_info(struct seq_oss_devinfo *dp,
41 int dev);
41int snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf, 42int snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
42 struct snd_seq_event *ev); 43 struct snd_seq_event *ev);
43int snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev); 44int snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev);
diff --git a/sound/drivers/opl3/opl3_synth.c b/sound/drivers/opl3/opl3_synth.c
index ddcc1a325a61..42920a243328 100644
--- a/sound/drivers/opl3/opl3_synth.c
+++ b/sound/drivers/opl3/opl3_synth.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/export.h> 23#include <linux/export.h>
24#include <linux/nospec.h>
24#include <sound/opl3.h> 25#include <sound/opl3.h>
25#include <sound/asound_fm.h> 26#include <sound/asound_fm.h>
26 27
@@ -448,7 +449,7 @@ static int snd_opl3_set_voice(struct snd_opl3 * opl3, struct snd_dm_fm_voice * v
448{ 449{
449 unsigned short reg_side; 450 unsigned short reg_side;
450 unsigned char op_offset; 451 unsigned char op_offset;
451 unsigned char voice_offset; 452 unsigned char voice_offset, voice_op;
452 453
453 unsigned short opl3_reg; 454 unsigned short opl3_reg;
454 unsigned char reg_val; 455 unsigned char reg_val;
@@ -473,7 +474,9 @@ static int snd_opl3_set_voice(struct snd_opl3 * opl3, struct snd_dm_fm_voice * v
473 voice_offset = voice->voice - MAX_OPL2_VOICES; 474 voice_offset = voice->voice - MAX_OPL2_VOICES;
474 } 475 }
475 /* Get register offset of operator */ 476 /* Get register offset of operator */
476 op_offset = snd_opl3_regmap[voice_offset][voice->op]; 477 voice_offset = array_index_nospec(voice_offset, MAX_OPL2_VOICES);
478 voice_op = array_index_nospec(voice->op, 4);
479 op_offset = snd_opl3_regmap[voice_offset][voice_op];
477 480
478 reg_val = 0x00; 481 reg_val = 0x00;
479 /* Set amplitude modulation (tremolo) effect */ 482 /* Set amplitude modulation (tremolo) effect */
diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c
index 8573289c381e..928a255bfc35 100644
--- a/sound/firewire/dice/dice-stream.c
+++ b/sound/firewire/dice/dice-stream.c
@@ -435,7 +435,7 @@ int snd_dice_stream_init_duplex(struct snd_dice *dice)
435 err = init_stream(dice, AMDTP_IN_STREAM, i); 435 err = init_stream(dice, AMDTP_IN_STREAM, i);
436 if (err < 0) { 436 if (err < 0) {
437 for (; i >= 0; i--) 437 for (; i >= 0; i--)
438 destroy_stream(dice, AMDTP_OUT_STREAM, i); 438 destroy_stream(dice, AMDTP_IN_STREAM, i);
439 goto end; 439 goto end;
440 } 440 }
441 } 441 }
diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
index 4ddb4cdd054b..96bb01b6b751 100644
--- a/sound/firewire/dice/dice.c
+++ b/sound/firewire/dice/dice.c
@@ -14,7 +14,7 @@ MODULE_LICENSE("GPL v2");
14#define OUI_WEISS 0x001c6a 14#define OUI_WEISS 0x001c6a
15#define OUI_LOUD 0x000ff2 15#define OUI_LOUD 0x000ff2
16#define OUI_FOCUSRITE 0x00130e 16#define OUI_FOCUSRITE 0x00130e
17#define OUI_TCELECTRONIC 0x001486 17#define OUI_TCELECTRONIC 0x000166
18 18
19#define DICE_CATEGORY_ID 0x04 19#define DICE_CATEGORY_ID 0x04
20#define WEISS_CATEGORY_ID 0x00 20#define WEISS_CATEGORY_ID 0x00
diff --git a/sound/pci/asihpi/hpimsginit.c b/sound/pci/asihpi/hpimsginit.c
index 7eb617175fde..a31a70dccecf 100644
--- a/sound/pci/asihpi/hpimsginit.c
+++ b/sound/pci/asihpi/hpimsginit.c
@@ -23,6 +23,7 @@
23 23
24#include "hpi_internal.h" 24#include "hpi_internal.h"
25#include "hpimsginit.h" 25#include "hpimsginit.h"
26#include <linux/nospec.h>
26 27
27/* The actual message size for each object type */ 28/* The actual message size for each object type */
28static u16 msg_size[HPI_OBJ_MAXINDEX + 1] = HPI_MESSAGE_SIZE_BY_OBJECT; 29static u16 msg_size[HPI_OBJ_MAXINDEX + 1] = HPI_MESSAGE_SIZE_BY_OBJECT;
@@ -39,10 +40,12 @@ static void hpi_init_message(struct hpi_message *phm, u16 object,
39{ 40{
40 u16 size; 41 u16 size;
41 42
42 if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) 43 if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
44 object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1);
43 size = msg_size[object]; 45 size = msg_size[object];
44 else 46 } else {
45 size = sizeof(*phm); 47 size = sizeof(*phm);
48 }
46 49
47 memset(phm, 0, size); 50 memset(phm, 0, size);
48 phm->size = size; 51 phm->size = size;
@@ -66,10 +69,12 @@ void hpi_init_response(struct hpi_response *phr, u16 object, u16 function,
66{ 69{
67 u16 size; 70 u16 size;
68 71
69 if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) 72 if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
73 object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1);
70 size = res_size[object]; 74 size = res_size[object];
71 else 75 } else {
72 size = sizeof(*phr); 76 size = sizeof(*phr);
77 }
73 78
74 memset(phr, 0, sizeof(*phr)); 79 memset(phr, 0, sizeof(*phr));
75 phr->size = size; 80 phr->size = size;
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
index 5badd08e1d69..b1a2a7ea4172 100644
--- a/sound/pci/asihpi/hpioctl.c
+++ b/sound/pci/asihpi/hpioctl.c
@@ -33,6 +33,7 @@
33#include <linux/stringify.h> 33#include <linux/stringify.h>
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/vmalloc.h> 35#include <linux/vmalloc.h>
36#include <linux/nospec.h>
36 37
37#ifdef MODULE_FIRMWARE 38#ifdef MODULE_FIRMWARE
38MODULE_FIRMWARE("asihpi/dsp5000.bin"); 39MODULE_FIRMWARE("asihpi/dsp5000.bin");
@@ -186,7 +187,8 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
186 struct hpi_adapter *pa = NULL; 187 struct hpi_adapter *pa = NULL;
187 188
188 if (hm->h.adapter_index < ARRAY_SIZE(adapters)) 189 if (hm->h.adapter_index < ARRAY_SIZE(adapters))
189 pa = &adapters[hm->h.adapter_index]; 190 pa = &adapters[array_index_nospec(hm->h.adapter_index,
191 ARRAY_SIZE(adapters))];
190 192
191 if (!pa || !pa->adapter || !pa->adapter->type) { 193 if (!pa || !pa->adapter || !pa->adapter->type) {
192 hpi_init_response(&hr->r0, hm->h.object, 194 hpi_init_response(&hr->r0, hm->h.object,
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index 57df06e76968..cc009a4a3d1d 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -21,6 +21,7 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/compat.h> 23#include <linux/compat.h>
24#include <linux/nospec.h>
24#include <sound/core.h> 25#include <sound/core.h>
25#include "hda_codec.h" 26#include "hda_codec.h"
26#include "hda_local.h" 27#include "hda_local.h"
@@ -51,7 +52,16 @@ static int get_wcap_ioctl(struct hda_codec *codec,
51 52
52 if (get_user(verb, &arg->verb)) 53 if (get_user(verb, &arg->verb))
53 return -EFAULT; 54 return -EFAULT;
54 res = get_wcaps(codec, verb >> 24); 55 /* open-code get_wcaps(verb>>24) with nospec */
56 verb >>= 24;
57 if (verb < codec->core.start_nid ||
58 verb >= codec->core.start_nid + codec->core.num_nodes) {
59 res = 0;
60 } else {
61 verb -= codec->core.start_nid;
62 verb = array_index_nospec(verb, codec->core.num_nodes);
63 res = codec->wcaps[verb];
64 }
55 if (put_user(res, &arg->res)) 65 if (put_user(res, &arg->res))
56 return -EFAULT; 66 return -EFAULT;
57 return 0; 67 return 0;
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index b4f1b6e88305..7d7eb1354eee 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1383,6 +1383,8 @@ static void hdmi_pcm_setup_pin(struct hdmi_spec *spec,
1383 pcm = get_pcm_rec(spec, per_pin->pcm_idx); 1383 pcm = get_pcm_rec(spec, per_pin->pcm_idx);
1384 else 1384 else
1385 return; 1385 return;
1386 if (!pcm->pcm)
1387 return;
1386 if (!test_bit(per_pin->pcm_idx, &spec->pcm_in_use)) 1388 if (!test_bit(per_pin->pcm_idx, &spec->pcm_in_use))
1387 return; 1389 return;
1388 1390
@@ -2151,8 +2153,13 @@ static int generic_hdmi_build_controls(struct hda_codec *codec)
2151 int dev, err; 2153 int dev, err;
2152 int pin_idx, pcm_idx; 2154 int pin_idx, pcm_idx;
2153 2155
2154
2155 for (pcm_idx = 0; pcm_idx < spec->pcm_used; pcm_idx++) { 2156 for (pcm_idx = 0; pcm_idx < spec->pcm_used; pcm_idx++) {
2157 if (!get_pcm_rec(spec, pcm_idx)->pcm) {
2158 /* no PCM: mark this for skipping permanently */
2159 set_bit(pcm_idx, &spec->pcm_bitmap);
2160 continue;
2161 }
2162
2156 err = generic_hdmi_build_jack(codec, pcm_idx); 2163 err = generic_hdmi_build_jack(codec, pcm_idx);
2157 if (err < 0) 2164 if (err < 0)
2158 return err; 2165 return err;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index fc77bf7a1544..8c238e51bb5a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -331,6 +331,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
331 /* fallthrough */ 331 /* fallthrough */
332 case 0x10ec0215: 332 case 0x10ec0215:
333 case 0x10ec0233: 333 case 0x10ec0233:
334 case 0x10ec0235:
334 case 0x10ec0236: 335 case 0x10ec0236:
335 case 0x10ec0255: 336 case 0x10ec0255:
336 case 0x10ec0256: 337 case 0x10ec0256:
@@ -6575,6 +6576,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6575 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 6576 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6576 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 6577 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6577 SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 6578 SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
6579 SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
6578 SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 6580 SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
6579 SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 6581 SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
6580 SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 6582 SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
@@ -7160,8 +7162,11 @@ static int patch_alc269(struct hda_codec *codec)
7160 case 0x10ec0298: 7162 case 0x10ec0298:
7161 spec->codec_variant = ALC269_TYPE_ALC298; 7163 spec->codec_variant = ALC269_TYPE_ALC298;
7162 break; 7164 break;
7165 case 0x10ec0235:
7163 case 0x10ec0255: 7166 case 0x10ec0255:
7164 spec->codec_variant = ALC269_TYPE_ALC255; 7167 spec->codec_variant = ALC269_TYPE_ALC255;
7168 spec->shutup = alc256_shutup;
7169 spec->init_hook = alc256_init;
7165 break; 7170 break;
7166 case 0x10ec0236: 7171 case 0x10ec0236:
7167 case 0x10ec0256: 7172 case 0x10ec0256:
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 4c59983158e0..11b5b5e0e058 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -137,6 +137,7 @@
137#include <linux/pci.h> 137#include <linux/pci.h>
138#include <linux/math64.h> 138#include <linux/math64.h>
139#include <linux/io.h> 139#include <linux/io.h>
140#include <linux/nospec.h>
140 141
141#include <sound/core.h> 142#include <sound/core.h>
142#include <sound/control.h> 143#include <sound/control.h>
@@ -5698,40 +5699,43 @@ static int snd_hdspm_channel_info(struct snd_pcm_substream *substream,
5698 struct snd_pcm_channel_info *info) 5699 struct snd_pcm_channel_info *info)
5699{ 5700{
5700 struct hdspm *hdspm = snd_pcm_substream_chip(substream); 5701 struct hdspm *hdspm = snd_pcm_substream_chip(substream);
5702 unsigned int channel = info->channel;
5701 5703
5702 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 5704 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
5703 if (snd_BUG_ON(info->channel >= hdspm->max_channels_out)) { 5705 if (snd_BUG_ON(channel >= hdspm->max_channels_out)) {
5704 dev_info(hdspm->card->dev, 5706 dev_info(hdspm->card->dev,
5705 "snd_hdspm_channel_info: output channel out of range (%d)\n", 5707 "snd_hdspm_channel_info: output channel out of range (%d)\n",
5706 info->channel); 5708 channel);
5707 return -EINVAL; 5709 return -EINVAL;
5708 } 5710 }
5709 5711
5710 if (hdspm->channel_map_out[info->channel] < 0) { 5712 channel = array_index_nospec(channel, hdspm->max_channels_out);
5713 if (hdspm->channel_map_out[channel] < 0) {
5711 dev_info(hdspm->card->dev, 5714 dev_info(hdspm->card->dev,
5712 "snd_hdspm_channel_info: output channel %d mapped out\n", 5715 "snd_hdspm_channel_info: output channel %d mapped out\n",
5713 info->channel); 5716 channel);
5714 return -EINVAL; 5717 return -EINVAL;
5715 } 5718 }
5716 5719
5717 info->offset = hdspm->channel_map_out[info->channel] * 5720 info->offset = hdspm->channel_map_out[channel] *
5718 HDSPM_CHANNEL_BUFFER_BYTES; 5721 HDSPM_CHANNEL_BUFFER_BYTES;
5719 } else { 5722 } else {
5720 if (snd_BUG_ON(info->channel >= hdspm->max_channels_in)) { 5723 if (snd_BUG_ON(channel >= hdspm->max_channels_in)) {
5721 dev_info(hdspm->card->dev, 5724 dev_info(hdspm->card->dev,
5722 "snd_hdspm_channel_info: input channel out of range (%d)\n", 5725 "snd_hdspm_channel_info: input channel out of range (%d)\n",
5723 info->channel); 5726 channel);
5724 return -EINVAL; 5727 return -EINVAL;
5725 } 5728 }
5726 5729
5727 if (hdspm->channel_map_in[info->channel] < 0) { 5730 channel = array_index_nospec(channel, hdspm->max_channels_in);
5731 if (hdspm->channel_map_in[channel] < 0) {
5728 dev_info(hdspm->card->dev, 5732 dev_info(hdspm->card->dev,
5729 "snd_hdspm_channel_info: input channel %d mapped out\n", 5733 "snd_hdspm_channel_info: input channel %d mapped out\n",
5730 info->channel); 5734 channel);
5731 return -EINVAL; 5735 return -EINVAL;
5732 } 5736 }
5733 5737
5734 info->offset = hdspm->channel_map_in[info->channel] * 5738 info->offset = hdspm->channel_map_in[channel] *
5735 HDSPM_CHANNEL_BUFFER_BYTES; 5739 HDSPM_CHANNEL_BUFFER_BYTES;
5736 } 5740 }
5737 5741
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index df648b1d9217..edd765e22377 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -26,6 +26,7 @@
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/nospec.h>
29 30
30#include <sound/core.h> 31#include <sound/core.h>
31#include <sound/control.h> 32#include <sound/control.h>
@@ -2071,9 +2072,10 @@ static int snd_rme9652_channel_info(struct snd_pcm_substream *substream,
2071 if (snd_BUG_ON(info->channel >= RME9652_NCHANNELS)) 2072 if (snd_BUG_ON(info->channel >= RME9652_NCHANNELS))
2072 return -EINVAL; 2073 return -EINVAL;
2073 2074
2074 if ((chn = rme9652->channel_map[info->channel]) < 0) { 2075 chn = rme9652->channel_map[array_index_nospec(info->channel,
2076 RME9652_NCHANNELS)];
2077 if (chn < 0)
2075 return -EINVAL; 2078 return -EINVAL;
2076 }
2077 2079
2078 info->offset = chn * RME9652_CHANNEL_BUFFER_BYTES; 2080 info->offset = chn * RME9652_CHANNEL_BUFFER_BYTES;
2079 info->first = 0; 2081 info->first = 0;
diff --git a/sound/soc/amd/acp-da7219-max98357a.c b/sound/soc/amd/acp-da7219-max98357a.c
index b205c782e494..f41560ecbcd1 100644
--- a/sound/soc/amd/acp-da7219-max98357a.c
+++ b/sound/soc/amd/acp-da7219-max98357a.c
@@ -43,7 +43,7 @@
43#define DUAL_CHANNEL 2 43#define DUAL_CHANNEL 2
44 44
45static struct snd_soc_jack cz_jack; 45static struct snd_soc_jack cz_jack;
46struct clk *da7219_dai_clk; 46static struct clk *da7219_dai_clk;
47 47
48static int cz_da7219_init(struct snd_soc_pcm_runtime *rtd) 48static int cz_da7219_init(struct snd_soc_pcm_runtime *rtd)
49{ 49{
diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c
index 80c2a06285bb..12bf24c26818 100644
--- a/sound/soc/codecs/adau17x1.c
+++ b/sound/soc/codecs/adau17x1.c
@@ -502,7 +502,7 @@ static int adau17x1_hw_params(struct snd_pcm_substream *substream,
502 } 502 }
503 503
504 if (adau->sigmadsp) { 504 if (adau->sigmadsp) {
505 ret = adau17x1_setup_firmware(adau, params_rate(params)); 505 ret = adau17x1_setup_firmware(component, params_rate(params));
506 if (ret < 0) 506 if (ret < 0)
507 return ret; 507 return ret;
508 } 508 }
@@ -835,26 +835,40 @@ bool adau17x1_volatile_register(struct device *dev, unsigned int reg)
835} 835}
836EXPORT_SYMBOL_GPL(adau17x1_volatile_register); 836EXPORT_SYMBOL_GPL(adau17x1_volatile_register);
837 837
838int adau17x1_setup_firmware(struct adau *adau, unsigned int rate) 838int adau17x1_setup_firmware(struct snd_soc_component *component,
839 unsigned int rate)
839{ 840{
840 int ret; 841 int ret;
841 int dspsr; 842 int dspsr, dsp_run;
843 struct adau *adau = snd_soc_component_get_drvdata(component);
844 struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
845
846 snd_soc_dapm_mutex_lock(dapm);
842 847
843 ret = regmap_read(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, &dspsr); 848 ret = regmap_read(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, &dspsr);
844 if (ret) 849 if (ret)
845 return ret; 850 goto err;
851
852 ret = regmap_read(adau->regmap, ADAU17X1_DSP_RUN, &dsp_run);
853 if (ret)
854 goto err;
846 855
847 regmap_write(adau->regmap, ADAU17X1_DSP_ENABLE, 1); 856 regmap_write(adau->regmap, ADAU17X1_DSP_ENABLE, 1);
848 regmap_write(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, 0xf); 857 regmap_write(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, 0xf);
858 regmap_write(adau->regmap, ADAU17X1_DSP_RUN, 0);
849 859
850 ret = sigmadsp_setup(adau->sigmadsp, rate); 860 ret = sigmadsp_setup(adau->sigmadsp, rate);
851 if (ret) { 861 if (ret) {
852 regmap_write(adau->regmap, ADAU17X1_DSP_ENABLE, 0); 862 regmap_write(adau->regmap, ADAU17X1_DSP_ENABLE, 0);
853 return ret; 863 goto err;
854 } 864 }
855 regmap_write(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, dspsr); 865 regmap_write(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, dspsr);
866 regmap_write(adau->regmap, ADAU17X1_DSP_RUN, dsp_run);
856 867
857 return 0; 868err:
869 snd_soc_dapm_mutex_unlock(dapm);
870
871 return ret;
858} 872}
859EXPORT_SYMBOL_GPL(adau17x1_setup_firmware); 873EXPORT_SYMBOL_GPL(adau17x1_setup_firmware);
860 874
diff --git a/sound/soc/codecs/adau17x1.h b/sound/soc/codecs/adau17x1.h
index a7b1cb770814..e6fe87beec07 100644
--- a/sound/soc/codecs/adau17x1.h
+++ b/sound/soc/codecs/adau17x1.h
@@ -68,7 +68,8 @@ int adau17x1_resume(struct snd_soc_component *component);
68 68
69extern const struct snd_soc_dai_ops adau17x1_dai_ops; 69extern const struct snd_soc_dai_ops adau17x1_dai_ops;
70 70
71int adau17x1_setup_firmware(struct adau *adau, unsigned int rate); 71int adau17x1_setup_firmware(struct snd_soc_component *component,
72 unsigned int rate);
72bool adau17x1_has_dsp(struct adau *adau); 73bool adau17x1_has_dsp(struct adau *adau);
73 74
74#define ADAU17X1_CLOCK_CONTROL 0x4000 75#define ADAU17X1_CLOCK_CONTROL 0x4000
diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
index 12ee83d52405..b7cf7cce95fe 100644
--- a/sound/soc/codecs/msm8916-wcd-analog.c
+++ b/sound/soc/codecs/msm8916-wcd-analog.c
@@ -1187,7 +1187,8 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev)
1187 return irq; 1187 return irq;
1188 } 1188 }
1189 1189
1190 ret = devm_request_irq(dev, irq, pm8916_mbhc_switch_irq_handler, 1190 ret = devm_request_threaded_irq(dev, irq, NULL,
1191 pm8916_mbhc_switch_irq_handler,
1191 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | 1192 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
1192 IRQF_ONESHOT, 1193 IRQF_ONESHOT,
1193 "mbhc switch irq", priv); 1194 "mbhc switch irq", priv);
@@ -1201,7 +1202,8 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev)
1201 return irq; 1202 return irq;
1202 } 1203 }
1203 1204
1204 ret = devm_request_irq(dev, irq, mbhc_btn_press_irq_handler, 1205 ret = devm_request_threaded_irq(dev, irq, NULL,
1206 mbhc_btn_press_irq_handler,
1205 IRQF_TRIGGER_RISING | 1207 IRQF_TRIGGER_RISING |
1206 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 1208 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
1207 "mbhc btn press irq", priv); 1209 "mbhc btn press irq", priv);
@@ -1214,7 +1216,8 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev)
1214 return irq; 1216 return irq;
1215 } 1217 }
1216 1218
1217 ret = devm_request_irq(dev, irq, mbhc_btn_release_irq_handler, 1219 ret = devm_request_threaded_irq(dev, irq, NULL,
1220 mbhc_btn_release_irq_handler,
1218 IRQF_TRIGGER_RISING | 1221 IRQF_TRIGGER_RISING |
1219 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 1222 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
1220 "mbhc btn release irq", priv); 1223 "mbhc btn release irq", priv);
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
index e8a66b03faab..1570b91bf018 100644
--- a/sound/soc/codecs/rt5514.c
+++ b/sound/soc/codecs/rt5514.c
@@ -89,6 +89,7 @@ static const struct reg_default rt5514_reg[] = {
89 {RT5514_PLL3_CALIB_CTRL5, 0x40220012}, 89 {RT5514_PLL3_CALIB_CTRL5, 0x40220012},
90 {RT5514_DELAY_BUF_CTRL1, 0x7fff006a}, 90 {RT5514_DELAY_BUF_CTRL1, 0x7fff006a},
91 {RT5514_DELAY_BUF_CTRL3, 0x00000000}, 91 {RT5514_DELAY_BUF_CTRL3, 0x00000000},
92 {RT5514_ASRC_IN_CTRL1, 0x00000003},
92 {RT5514_DOWNFILTER0_CTRL1, 0x00020c2f}, 93 {RT5514_DOWNFILTER0_CTRL1, 0x00020c2f},
93 {RT5514_DOWNFILTER0_CTRL2, 0x00020c2f}, 94 {RT5514_DOWNFILTER0_CTRL2, 0x00020c2f},
94 {RT5514_DOWNFILTER0_CTRL3, 0x10000362}, 95 {RT5514_DOWNFILTER0_CTRL3, 0x10000362},
@@ -181,6 +182,7 @@ static bool rt5514_readable_register(struct device *dev, unsigned int reg)
181 case RT5514_PLL3_CALIB_CTRL5: 182 case RT5514_PLL3_CALIB_CTRL5:
182 case RT5514_DELAY_BUF_CTRL1: 183 case RT5514_DELAY_BUF_CTRL1:
183 case RT5514_DELAY_BUF_CTRL3: 184 case RT5514_DELAY_BUF_CTRL3:
185 case RT5514_ASRC_IN_CTRL1:
184 case RT5514_DOWNFILTER0_CTRL1: 186 case RT5514_DOWNFILTER0_CTRL1:
185 case RT5514_DOWNFILTER0_CTRL2: 187 case RT5514_DOWNFILTER0_CTRL2:
186 case RT5514_DOWNFILTER0_CTRL3: 188 case RT5514_DOWNFILTER0_CTRL3:
@@ -238,6 +240,7 @@ static bool rt5514_i2c_readable_register(struct device *dev,
238 case RT5514_DSP_MAPPING | RT5514_PLL3_CALIB_CTRL5: 240 case RT5514_DSP_MAPPING | RT5514_PLL3_CALIB_CTRL5:
239 case RT5514_DSP_MAPPING | RT5514_DELAY_BUF_CTRL1: 241 case RT5514_DSP_MAPPING | RT5514_DELAY_BUF_CTRL1:
240 case RT5514_DSP_MAPPING | RT5514_DELAY_BUF_CTRL3: 242 case RT5514_DSP_MAPPING | RT5514_DELAY_BUF_CTRL3:
243 case RT5514_DSP_MAPPING | RT5514_ASRC_IN_CTRL1:
241 case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL1: 244 case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL1:
242 case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL2: 245 case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL2:
243 case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL3: 246 case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL3:
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 40a700493f4c..da8fd98c7f51 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -144,6 +144,13 @@ static int fsl_esai_divisor_cal(struct snd_soc_dai *dai, bool tx, u32 ratio,
144 144
145 psr = ratio <= 256 * maxfp ? ESAI_xCCR_xPSR_BYPASS : ESAI_xCCR_xPSR_DIV8; 145 psr = ratio <= 256 * maxfp ? ESAI_xCCR_xPSR_BYPASS : ESAI_xCCR_xPSR_DIV8;
146 146
147 /* Do not loop-search if PM (1 ~ 256) alone can serve the ratio */
148 if (ratio <= 256) {
149 pm = ratio;
150 fp = 1;
151 goto out;
152 }
153
147 /* Set the max fluctuation -- 0.1% of the max devisor */ 154 /* Set the max fluctuation -- 0.1% of the max devisor */
148 savesub = (psr ? 1 : 8) * 256 * maxfp / 1000; 155 savesub = (psr ? 1 : 8) * 256 * maxfp / 1000;
149 156
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 0823b08923b5..89df2d9f63d7 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -217,6 +217,7 @@ struct fsl_ssi_soc_data {
217 * @dai_fmt: DAI configuration this device is currently used with 217 * @dai_fmt: DAI configuration this device is currently used with
218 * @streams: Mask of current active streams: BIT(TX) and BIT(RX) 218 * @streams: Mask of current active streams: BIT(TX) and BIT(RX)
219 * @i2s_net: I2S and Network mode configurations of SCR register 219 * @i2s_net: I2S and Network mode configurations of SCR register
220 * (this is the initial settings based on the DAI format)
220 * @synchronous: Use synchronous mode - both of TX and RX use STCK and SFCK 221 * @synchronous: Use synchronous mode - both of TX and RX use STCK and SFCK
221 * @use_dma: DMA is used or FIQ with stream filter 222 * @use_dma: DMA is used or FIQ with stream filter
222 * @use_dual_fifo: DMA with support for dual FIFO mode 223 * @use_dual_fifo: DMA with support for dual FIFO mode
@@ -829,16 +830,23 @@ static int fsl_ssi_hw_params(struct snd_pcm_substream *substream,
829 } 830 }
830 831
831 if (!fsl_ssi_is_ac97(ssi)) { 832 if (!fsl_ssi_is_ac97(ssi)) {
833 /*
834 * Keep the ssi->i2s_net intact while having a local variable
835 * to override settings for special use cases. Otherwise, the
836 * ssi->i2s_net will lose the settings for regular use cases.
837 */
838 u8 i2s_net = ssi->i2s_net;
839
832 /* Normal + Network mode to send 16-bit data in 32-bit frames */ 840 /* Normal + Network mode to send 16-bit data in 32-bit frames */
833 if (fsl_ssi_is_i2s_cbm_cfs(ssi) && sample_size == 16) 841 if (fsl_ssi_is_i2s_cbm_cfs(ssi) && sample_size == 16)
834 ssi->i2s_net = SSI_SCR_I2S_MODE_NORMAL | SSI_SCR_NET; 842 i2s_net = SSI_SCR_I2S_MODE_NORMAL | SSI_SCR_NET;
835 843
836 /* Use Normal mode to send mono data at 1st slot of 2 slots */ 844 /* Use Normal mode to send mono data at 1st slot of 2 slots */
837 if (channels == 1) 845 if (channels == 1)
838 ssi->i2s_net = SSI_SCR_I2S_MODE_NORMAL; 846 i2s_net = SSI_SCR_I2S_MODE_NORMAL;
839 847
840 regmap_update_bits(regs, REG_SSI_SCR, 848 regmap_update_bits(regs, REG_SSI_SCR,
841 SSI_SCR_I2S_NET_MASK, ssi->i2s_net); 849 SSI_SCR_I2S_NET_MASK, i2s_net);
842 } 850 }
843 851
844 /* In synchronous mode, the SSI uses STCCR for capture */ 852 /* In synchronous mode, the SSI uses STCCR for capture */
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index ceb105cbd461..addac2a8e52a 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -72,24 +72,28 @@ config SND_SOC_INTEL_BAYTRAIL
72 for Baytrail Chromebooks but this option is now deprecated and is 72 for Baytrail Chromebooks but this option is now deprecated and is
73 not recommended, use SND_SST_ATOM_HIFI2_PLATFORM instead. 73 not recommended, use SND_SST_ATOM_HIFI2_PLATFORM instead.
74 74
75config SND_SST_ATOM_HIFI2_PLATFORM
76 tristate
77 select SND_SOC_COMPRESS
78
75config SND_SST_ATOM_HIFI2_PLATFORM_PCI 79config SND_SST_ATOM_HIFI2_PLATFORM_PCI
76 tristate "PCI HiFi2 (Medfield, Merrifield) Platforms" 80 tristate "PCI HiFi2 (Merrifield) Platforms"
77 depends on X86 && PCI 81 depends on X86 && PCI
78 select SND_SST_IPC_PCI 82 select SND_SST_IPC_PCI
79 select SND_SOC_COMPRESS 83 select SND_SST_ATOM_HIFI2_PLATFORM
80 help 84 help
81 If you have a Intel Medfield or Merrifield/Edison platform, then 85 If you have a Intel Merrifield/Edison platform, then
82 enable this option by saying Y or m. Distros will typically not 86 enable this option by saying Y or m. Distros will typically not
83 enable this option: Medfield devices are not available to 87 enable this option: while Merrifield/Edison can run a mainline
84 developers and while Merrifield/Edison can run a mainline kernel with 88 kernel with limited functionality it will require a firmware file
85 limited functionality it will require a firmware file which 89 which is not in the standard firmware tree
86 is not in the standard firmware tree
87 90
88config SND_SST_ATOM_HIFI2_PLATFORM 91config SND_SST_ATOM_HIFI2_PLATFORM_ACPI
89 tristate "ACPI HiFi2 (Baytrail, Cherrytrail) Platforms" 92 tristate "ACPI HiFi2 (Baytrail, Cherrytrail) Platforms"
93 default ACPI
90 depends on X86 && ACPI 94 depends on X86 && ACPI
91 select SND_SST_IPC_ACPI 95 select SND_SST_IPC_ACPI
92 select SND_SOC_COMPRESS 96 select SND_SST_ATOM_HIFI2_PLATFORM
93 select SND_SOC_ACPI_INTEL_MATCH 97 select SND_SOC_ACPI_INTEL_MATCH
94 select IOSF_MBI 98 select IOSF_MBI
95 help 99 help
diff --git a/sound/soc/omap/omap-dmic.c b/sound/soc/omap/omap-dmic.c
index 09db2aec12a3..b2f5d2fa354d 100644
--- a/sound/soc/omap/omap-dmic.c
+++ b/sound/soc/omap/omap-dmic.c
@@ -281,7 +281,7 @@ static int omap_dmic_dai_trigger(struct snd_pcm_substream *substream,
281static int omap_dmic_select_fclk(struct omap_dmic *dmic, int clk_id, 281static int omap_dmic_select_fclk(struct omap_dmic *dmic, int clk_id,
282 unsigned int freq) 282 unsigned int freq)
283{ 283{
284 struct clk *parent_clk; 284 struct clk *parent_clk, *mux;
285 char *parent_clk_name; 285 char *parent_clk_name;
286 int ret = 0; 286 int ret = 0;
287 287
@@ -329,14 +329,21 @@ static int omap_dmic_select_fclk(struct omap_dmic *dmic, int clk_id,
329 return -ENODEV; 329 return -ENODEV;
330 } 330 }
331 331
332 mux = clk_get_parent(dmic->fclk);
333 if (IS_ERR(mux)) {
334 dev_err(dmic->dev, "can't get fck mux parent\n");
335 clk_put(parent_clk);
336 return -ENODEV;
337 }
338
332 mutex_lock(&dmic->mutex); 339 mutex_lock(&dmic->mutex);
333 if (dmic->active) { 340 if (dmic->active) {
334 /* disable clock while reparenting */ 341 /* disable clock while reparenting */
335 pm_runtime_put_sync(dmic->dev); 342 pm_runtime_put_sync(dmic->dev);
336 ret = clk_set_parent(dmic->fclk, parent_clk); 343 ret = clk_set_parent(mux, parent_clk);
337 pm_runtime_get_sync(dmic->dev); 344 pm_runtime_get_sync(dmic->dev);
338 } else { 345 } else {
339 ret = clk_set_parent(dmic->fclk, parent_clk); 346 ret = clk_set_parent(mux, parent_clk);
340 } 347 }
341 mutex_unlock(&dmic->mutex); 348 mutex_unlock(&dmic->mutex);
342 349
@@ -349,6 +356,7 @@ static int omap_dmic_select_fclk(struct omap_dmic *dmic, int clk_id,
349 dmic->fclk_freq = freq; 356 dmic->fclk_freq = freq;
350 357
351err_busy: 358err_busy:
359 clk_put(mux);
352 clk_put(parent_clk); 360 clk_put(parent_clk);
353 361
354 return ret; 362 return ret;
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 6a76688a8ba9..94f081b93258 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -1536,7 +1536,7 @@ static int rsnd_remove(struct platform_device *pdev)
1536 return ret; 1536 return ret;
1537} 1537}
1538 1538
1539static int rsnd_suspend(struct device *dev) 1539static int __maybe_unused rsnd_suspend(struct device *dev)
1540{ 1540{
1541 struct rsnd_priv *priv = dev_get_drvdata(dev); 1541 struct rsnd_priv *priv = dev_get_drvdata(dev);
1542 1542
@@ -1545,7 +1545,7 @@ static int rsnd_suspend(struct device *dev)
1545 return 0; 1545 return 0;
1546} 1546}
1547 1547
1548static int rsnd_resume(struct device *dev) 1548static int __maybe_unused rsnd_resume(struct device *dev)
1549{ 1549{
1550 struct rsnd_priv *priv = dev_get_drvdata(dev); 1550 struct rsnd_priv *priv = dev_get_drvdata(dev);
1551 1551
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index fa27d0fca6dc..986b8b2f90fb 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -513,7 +513,7 @@ static void remove_widget(struct snd_soc_component *comp,
513 */ 513 */
514 if (dobj->widget.kcontrol_type == SND_SOC_TPLG_TYPE_ENUM) { 514 if (dobj->widget.kcontrol_type == SND_SOC_TPLG_TYPE_ENUM) {
515 /* enumerated widget mixer */ 515 /* enumerated widget mixer */
516 for (i = 0; i < w->num_kcontrols; i++) { 516 for (i = 0; w->kcontrols != NULL && i < w->num_kcontrols; i++) {
517 struct snd_kcontrol *kcontrol = w->kcontrols[i]; 517 struct snd_kcontrol *kcontrol = w->kcontrols[i];
518 struct soc_enum *se = 518 struct soc_enum *se =
519 (struct soc_enum *)kcontrol->private_value; 519 (struct soc_enum *)kcontrol->private_value;
@@ -530,7 +530,7 @@ static void remove_widget(struct snd_soc_component *comp,
530 } 530 }
531 } else { 531 } else {
532 /* volume mixer or bytes controls */ 532 /* volume mixer or bytes controls */
533 for (i = 0; i < w->num_kcontrols; i++) { 533 for (i = 0; w->kcontrols != NULL && i < w->num_kcontrols; i++) {
534 struct snd_kcontrol *kcontrol = w->kcontrols[i]; 534 struct snd_kcontrol *kcontrol = w->kcontrols[i];
535 535
536 if (dobj->widget.kcontrol_type 536 if (dobj->widget.kcontrol_type
@@ -1325,8 +1325,10 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create(
1325 ec->hdr.name); 1325 ec->hdr.name);
1326 1326
1327 kc[i].name = kstrdup(ec->hdr.name, GFP_KERNEL); 1327 kc[i].name = kstrdup(ec->hdr.name, GFP_KERNEL);
1328 if (kc[i].name == NULL) 1328 if (kc[i].name == NULL) {
1329 kfree(se);
1329 goto err_se; 1330 goto err_se;
1331 }
1330 kc[i].private_value = (long)se; 1332 kc[i].private_value = (long)se;
1331 kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER; 1333 kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
1332 kc[i].access = ec->hdr.access; 1334 kc[i].access = ec->hdr.access;
@@ -1442,8 +1444,10 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dbytes_create(
1442 be->hdr.name, be->hdr.access); 1444 be->hdr.name, be->hdr.access);
1443 1445
1444 kc[i].name = kstrdup(be->hdr.name, GFP_KERNEL); 1446 kc[i].name = kstrdup(be->hdr.name, GFP_KERNEL);
1445 if (kc[i].name == NULL) 1447 if (kc[i].name == NULL) {
1448 kfree(sbe);
1446 goto err; 1449 goto err;
1450 }
1447 kc[i].private_value = (long)sbe; 1451 kc[i].private_value = (long)sbe;
1448 kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER; 1452 kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
1449 kc[i].access = be->hdr.access; 1453 kc[i].access = be->hdr.access;
@@ -2576,7 +2580,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
2576 2580
2577 /* match index */ 2581 /* match index */
2578 if (dobj->index != index && 2582 if (dobj->index != index &&
2579 dobj->index != SND_SOC_TPLG_INDEX_ALL) 2583 index != SND_SOC_TPLG_INDEX_ALL)
2580 continue; 2584 continue;
2581 2585
2582 switch (dobj->type) { 2586 switch (dobj->type) {
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 301ad61ed426..344d7b069d59 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -1776,7 +1776,8 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
1776 build_feature_ctl(state, _ftr, ch_bits, control, 1776 build_feature_ctl(state, _ftr, ch_bits, control,
1777 &iterm, unitid, ch_read_only); 1777 &iterm, unitid, ch_read_only);
1778 if (uac_v2v3_control_is_readable(master_bits, control)) 1778 if (uac_v2v3_control_is_readable(master_bits, control))
1779 build_feature_ctl(state, _ftr, 0, i, &iterm, unitid, 1779 build_feature_ctl(state, _ftr, 0, control,
1780 &iterm, unitid,
1780 !uac_v2v3_control_is_writeable(master_bits, 1781 !uac_v2v3_control_is_writeable(master_bits,
1781 control)); 1782 control));
1782 } 1783 }
@@ -1859,7 +1860,7 @@ static int parse_audio_input_terminal(struct mixer_build *state, int unitid,
1859 check_input_term(state, d->bTerminalID, &iterm); 1860 check_input_term(state, d->bTerminalID, &iterm);
1860 if (state->mixer->protocol == UAC_VERSION_2) { 1861 if (state->mixer->protocol == UAC_VERSION_2) {
1861 /* Check for jack detection. */ 1862 /* Check for jack detection. */
1862 if (uac_v2v3_control_is_readable(d->bmControls, 1863 if (uac_v2v3_control_is_readable(le16_to_cpu(d->bmControls),
1863 UAC2_TE_CONNECTOR)) { 1864 UAC2_TE_CONNECTOR)) {
1864 build_connector_control(state, &iterm, true); 1865 build_connector_control(state, &iterm, true);
1865 } 1866 }
@@ -2561,7 +2562,7 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
2561 if (err < 0 && err != -EINVAL) 2562 if (err < 0 && err != -EINVAL)
2562 return err; 2563 return err;
2563 2564
2564 if (uac_v2v3_control_is_readable(desc->bmControls, 2565 if (uac_v2v3_control_is_readable(le16_to_cpu(desc->bmControls),
2565 UAC2_TE_CONNECTOR)) { 2566 UAC2_TE_CONNECTOR)) {
2566 build_connector_control(&state, &state.oterm, 2567 build_connector_control(&state, &state.oterm,
2567 false); 2568 false);
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index 9038b2e7df73..eaa03acd4686 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -353,8 +353,11 @@ static struct usbmix_name_map bose_companion5_map[] = {
353/* 353/*
354 * Dell usb dock with ALC4020 codec had a firmware problem where it got 354 * Dell usb dock with ALC4020 codec had a firmware problem where it got
355 * screwed up when zero volume is passed; just skip it as a workaround 355 * screwed up when zero volume is passed; just skip it as a workaround
356 *
357 * Also the extension unit gives an access error, so skip it as well.
356 */ 358 */
357static const struct usbmix_name_map dell_alc4020_map[] = { 359static const struct usbmix_name_map dell_alc4020_map[] = {
360 { 4, NULL }, /* extension unit */
358 { 16, NULL }, 361 { 16, NULL },
359 { 19, NULL }, 362 { 19, NULL },
360 { 0 } 363 { 0 }
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 6a8f5843334e..956be9f7c72a 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -349,7 +349,7 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor
349 * TODO: this conversion is not complete, update it 349 * TODO: this conversion is not complete, update it
350 * after adding UAC3 values to asound.h 350 * after adding UAC3 values to asound.h
351 */ 351 */
352 switch (is->bChPurpose) { 352 switch (is->bChRelationship) {
353 case UAC3_CH_MONO: 353 case UAC3_CH_MONO:
354 map = SNDRV_CHMAP_MONO; 354 map = SNDRV_CHMAP_MONO;
355 break; 355 break;
diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
index ebcab5c5465d..8082f7b077f1 100644
--- a/sound/usb/usx2y/us122l.c
+++ b/sound/usb/usx2y/us122l.c
@@ -139,7 +139,7 @@ static void usb_stream_hwdep_vm_open(struct vm_area_struct *area)
139 snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count)); 139 snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count));
140} 140}
141 141
142static int usb_stream_hwdep_vm_fault(struct vm_fault *vmf) 142static vm_fault_t usb_stream_hwdep_vm_fault(struct vm_fault *vmf)
143{ 143{
144 unsigned long offset; 144 unsigned long offset;
145 struct page *page; 145 struct page *page;
diff --git a/sound/usb/usx2y/usX2Yhwdep.c b/sound/usb/usx2y/usX2Yhwdep.c
index d8bd7c99b48c..c1dd9a7b48df 100644
--- a/sound/usb/usx2y/usX2Yhwdep.c
+++ b/sound/usb/usx2y/usX2Yhwdep.c
@@ -31,7 +31,7 @@
31#include "usbusx2y.h" 31#include "usbusx2y.h"
32#include "usX2Yhwdep.h" 32#include "usX2Yhwdep.h"
33 33
34static int snd_us428ctls_vm_fault(struct vm_fault *vmf) 34static vm_fault_t snd_us428ctls_vm_fault(struct vm_fault *vmf)
35{ 35{
36 unsigned long offset; 36 unsigned long offset;
37 struct page * page; 37 struct page * page;
diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
index 0d050528a4e1..4fd9276b8e50 100644
--- a/sound/usb/usx2y/usx2yhwdeppcm.c
+++ b/sound/usb/usx2y/usx2yhwdeppcm.c
@@ -652,7 +652,7 @@ static void snd_usX2Y_hwdep_pcm_vm_close(struct vm_area_struct *area)
652} 652}
653 653
654 654
655static int snd_usX2Y_hwdep_pcm_vm_fault(struct vm_fault *vmf) 655static vm_fault_t snd_usX2Y_hwdep_pcm_vm_fault(struct vm_fault *vmf)
656{ 656{
657 unsigned long offset; 657 unsigned long offset;
658 void *vaddr; 658 void *vaddr;
diff --git a/tools/bpf/bpf_dbg.c b/tools/bpf/bpf_dbg.c
index 4f254bcc4423..61b9aa5d6415 100644
--- a/tools/bpf/bpf_dbg.c
+++ b/tools/bpf/bpf_dbg.c
@@ -1063,7 +1063,7 @@ static int cmd_load_pcap(char *file)
1063 1063
1064static int cmd_load(char *arg) 1064static int cmd_load(char *arg)
1065{ 1065{
1066 char *subcmd, *cont, *tmp = strdup(arg); 1066 char *subcmd, *cont = NULL, *tmp = strdup(arg);
1067 int ret = CMD_OK; 1067 int ret = CMD_OK;
1068 1068
1069 subcmd = strtok_r(tmp, " ", &cont); 1069 subcmd = strtok_r(tmp, " ", &cont);
@@ -1073,7 +1073,10 @@ static int cmd_load(char *arg)
1073 bpf_reset(); 1073 bpf_reset();
1074 bpf_reset_breakpoints(); 1074 bpf_reset_breakpoints();
1075 1075
1076 ret = cmd_load_bpf(cont); 1076 if (!cont)
1077 ret = CMD_ERR;
1078 else
1079 ret = cmd_load_bpf(cont);
1077 } else if (matches(subcmd, "pcap") == 0) { 1080 } else if (matches(subcmd, "pcap") == 0) {
1078 ret = cmd_load_pcap(cont); 1081 ret = cmd_load_pcap(cont);
1079 } else { 1082 } else {
diff --git a/tools/perf/Documentation/perf-mem.txt b/tools/perf/Documentation/perf-mem.txt
index 8806ed5f3802..f8d2167cf3e7 100644
--- a/tools/perf/Documentation/perf-mem.txt
+++ b/tools/perf/Documentation/perf-mem.txt
@@ -28,29 +28,46 @@ OPTIONS
28<command>...:: 28<command>...::
29 Any command you can specify in a shell. 29 Any command you can specify in a shell.
30 30
31-i::
32--input=<file>::
33 Input file name.
34
31-f:: 35-f::
32--force:: 36--force::
33 Don't do ownership validation 37 Don't do ownership validation
34 38
35-t:: 39-t::
36--type=:: 40--type=<type>::
37 Select the memory operation type: load or store (default: load,store) 41 Select the memory operation type: load or store (default: load,store)
38 42
39-D:: 43-D::
40--dump-raw-samples=:: 44--dump-raw-samples::
41 Dump the raw decoded samples on the screen in a format that is easy to parse with 45 Dump the raw decoded samples on the screen in a format that is easy to parse with
42 one sample per line. 46 one sample per line.
43 47
44-x:: 48-x::
45--field-separator:: 49--field-separator=<separator>::
46 Specify the field separator used when dump raw samples (-D option). By default, 50 Specify the field separator used when dump raw samples (-D option). By default,
47 The separator is the space character. 51 The separator is the space character.
48 52
49-C:: 53-C::
50--cpu-list:: 54--cpu=<cpu>::
51 Restrict dump of raw samples to those provided via this option. Note that the same 55 Monitor only on the list of CPUs provided. Multiple CPUs can be provided as a
52 option can be passed in record mode. It will be interpreted the same way as perf 56 comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2. Default
53 record. 57 is to monitor all CPUS.
58-U::
59--hide-unresolved::
60 Only display entries resolved to a symbol.
61
62-p::
63--phys-data::
64 Record/Report sample physical addresses
65
66RECORD OPTIONS
67--------------
68-e::
69--event <event>::
70 Event selector. Use 'perf mem record -e list' to list available events.
54 71
55-K:: 72-K::
56--all-kernel:: 73--all-kernel::
@@ -60,12 +77,12 @@ OPTIONS
60--all-user:: 77--all-user::
61 Configure all used events to run in user space. 78 Configure all used events to run in user space.
62 79
63--ldload:: 80-v::
64 Specify desired latency for loads event. 81--verbose::
82 Be more verbose (show counter open errors, etc)
65 83
66-p:: 84--ldlat <n>::
67--phys-data:: 85 Specify desired latency for loads event.
68 Record/Report sample physical addresses
69 86
70In addition, for report all perf report options are valid, and for record 87In addition, for report all perf report options are valid, and for record
71all perf record options. 88all perf record options.
diff --git a/tools/perf/arch/s390/util/auxtrace.c b/tools/perf/arch/s390/util/auxtrace.c
index 6cb48e4cffd9..3afe8256eff2 100644
--- a/tools/perf/arch/s390/util/auxtrace.c
+++ b/tools/perf/arch/s390/util/auxtrace.c
@@ -87,6 +87,7 @@ struct auxtrace_record *auxtrace_record__init(struct perf_evlist *evlist,
87 struct perf_evsel *pos; 87 struct perf_evsel *pos;
88 int diagnose = 0; 88 int diagnose = 0;
89 89
90 *err = 0;
90 if (evlist->nr_entries == 0) 91 if (evlist->nr_entries == 0)
91 return NULL; 92 return NULL;
92 93
diff --git a/tools/perf/arch/s390/util/header.c b/tools/perf/arch/s390/util/header.c
index a4c30f1c70be..163b92f33998 100644
--- a/tools/perf/arch/s390/util/header.c
+++ b/tools/perf/arch/s390/util/header.c
@@ -146,21 +146,3 @@ char *get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
146 zfree(&buf); 146 zfree(&buf);
147 return buf; 147 return buf;
148} 148}
149
150/*
151 * Compare the cpuid string returned by get_cpuid() function
152 * with the name generated by the jevents file read from
153 * pmu-events/arch/s390/mapfile.csv.
154 *
155 * Parameter mapcpuid is the cpuid as stored in the
156 * pmu-events/arch/s390/mapfile.csv. This is just the type number.
157 * Parameter cpuid is the cpuid returned by function get_cpuid().
158 */
159int strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
160{
161 char *cp = strchr(cpuid, ',');
162
163 if (cp == NULL)
164 return -1;
165 return strncmp(cp + 1, mapcpuid, strlen(mapcpuid));
166}
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 147a27e8c937..f17dc601b0f3 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -172,6 +172,7 @@ static bool interval_count;
172static const char *output_name; 172static const char *output_name;
173static int output_fd; 173static int output_fd;
174static int print_free_counters_hint; 174static int print_free_counters_hint;
175static int print_mixed_hw_group_error;
175 176
176struct perf_stat { 177struct perf_stat {
177 bool record; 178 bool record;
@@ -1126,6 +1127,30 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
1126 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name); 1127 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
1127} 1128}
1128 1129
1130static bool is_mixed_hw_group(struct perf_evsel *counter)
1131{
1132 struct perf_evlist *evlist = counter->evlist;
1133 u32 pmu_type = counter->attr.type;
1134 struct perf_evsel *pos;
1135
1136 if (counter->nr_members < 2)
1137 return false;
1138
1139 evlist__for_each_entry(evlist, pos) {
1140 /* software events can be part of any hardware group */
1141 if (pos->attr.type == PERF_TYPE_SOFTWARE)
1142 continue;
1143 if (pmu_type == PERF_TYPE_SOFTWARE) {
1144 pmu_type = pos->attr.type;
1145 continue;
1146 }
1147 if (pmu_type != pos->attr.type)
1148 return true;
1149 }
1150
1151 return false;
1152}
1153
1129static void printout(int id, int nr, struct perf_evsel *counter, double uval, 1154static void printout(int id, int nr, struct perf_evsel *counter, double uval,
1130 char *prefix, u64 run, u64 ena, double noise, 1155 char *prefix, u64 run, u64 ena, double noise,
1131 struct runtime_stat *st) 1156 struct runtime_stat *st)
@@ -1178,8 +1203,11 @@ static void printout(int id, int nr, struct perf_evsel *counter, double uval,
1178 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED, 1203 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
1179 csv_sep); 1204 csv_sep);
1180 1205
1181 if (counter->supported) 1206 if (counter->supported) {
1182 print_free_counters_hint = 1; 1207 print_free_counters_hint = 1;
1208 if (is_mixed_hw_group(counter))
1209 print_mixed_hw_group_error = 1;
1210 }
1183 1211
1184 fprintf(stat_config.output, "%-*s%s", 1212 fprintf(stat_config.output, "%-*s%s",
1185 csv_output ? 0 : unit_width, 1213 csv_output ? 0 : unit_width,
@@ -1256,7 +1284,8 @@ static void uniquify_event_name(struct perf_evsel *counter)
1256 char *new_name; 1284 char *new_name;
1257 char *config; 1285 char *config;
1258 1286
1259 if (!counter->pmu_name || !strncmp(counter->name, counter->pmu_name, 1287 if (counter->uniquified_name ||
1288 !counter->pmu_name || !strncmp(counter->name, counter->pmu_name,
1260 strlen(counter->pmu_name))) 1289 strlen(counter->pmu_name)))
1261 return; 1290 return;
1262 1291
@@ -1274,6 +1303,8 @@ static void uniquify_event_name(struct perf_evsel *counter)
1274 counter->name = new_name; 1303 counter->name = new_name;
1275 } 1304 }
1276 } 1305 }
1306
1307 counter->uniquified_name = true;
1277} 1308}
1278 1309
1279static void collect_all_aliases(struct perf_evsel *counter, 1310static void collect_all_aliases(struct perf_evsel *counter,
@@ -1757,6 +1788,11 @@ static void print_footer(void)
1757" echo 0 > /proc/sys/kernel/nmi_watchdog\n" 1788" echo 0 > /proc/sys/kernel/nmi_watchdog\n"
1758" perf stat ...\n" 1789" perf stat ...\n"
1759" echo 1 > /proc/sys/kernel/nmi_watchdog\n"); 1790" echo 1 > /proc/sys/kernel/nmi_watchdog\n");
1791
1792 if (print_mixed_hw_group_error)
1793 fprintf(output,
1794 "The events in group usually have to be from "
1795 "the same PMU. Try reorganizing the group.\n");
1760} 1796}
1761 1797
1762static void print_counters(struct timespec *ts, int argc, const char **argv) 1798static void print_counters(struct timespec *ts, int argc, const char **argv)
diff --git a/tools/perf/pmu-events/arch/s390/mapfile.csv b/tools/perf/pmu-events/arch/s390/mapfile.csv
index ca7682748a4b..78bcf7f8e206 100644
--- a/tools/perf/pmu-events/arch/s390/mapfile.csv
+++ b/tools/perf/pmu-events/arch/s390/mapfile.csv
@@ -1,6 +1,6 @@
1Family-model,Version,Filename,EventType 1Family-model,Version,Filename,EventType
2209[78],1,cf_z10,core 2^IBM.209[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z10,core
3281[78],1,cf_z196,core 3^IBM.281[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z196,core
4282[78],1,cf_zec12,core 4^IBM.282[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_zec12,core
5296[45],1,cf_z13,core 5^IBM.296[45].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z13,core
63906,3,cf_z14,core 6^IBM.390[67].*[13]\.[1-5].[[:xdigit:]]+$,3,cf_z14,core
diff --git a/tools/perf/tests/attr/test-record-group-sampling b/tools/perf/tests/attr/test-record-group-sampling
index f906b793196f..8a33ca4f9e1f 100644
--- a/tools/perf/tests/attr/test-record-group-sampling
+++ b/tools/perf/tests/attr/test-record-group-sampling
@@ -35,3 +35,6 @@ inherit=0
35# sampling disabled 35# sampling disabled
36sample_freq=0 36sample_freq=0
37sample_period=0 37sample_period=0
38freq=0
39write_backward=0
40sample_id_all=0
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
index 1ecc1f0ff84a..016882dbbc16 100755
--- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
@@ -19,12 +19,10 @@ trace_libc_inet_pton_backtrace() {
19 expected[1]=".*inet_pton[[:space:]]\($libc\)$" 19 expected[1]=".*inet_pton[[:space:]]\($libc\)$"
20 case "$(uname -m)" in 20 case "$(uname -m)" in
21 s390x) 21 s390x)
22 eventattr='call-graph=dwarf' 22 eventattr='call-graph=dwarf,max-stack=4'
23 expected[2]="gaih_inet.*[[:space:]]\($libc|inlined\)$" 23 expected[2]="gaih_inet.*[[:space:]]\($libc|inlined\)$"
24 expected[3]="__GI_getaddrinfo[[:space:]]\($libc|inlined\)$" 24 expected[3]="(__GI_)?getaddrinfo[[:space:]]\($libc|inlined\)$"
25 expected[4]="main[[:space:]]\(.*/bin/ping.*\)$" 25 expected[4]="main[[:space:]]\(.*/bin/ping.*\)$"
26 expected[5]="__libc_start_main[[:space:]]\($libc\)$"
27 expected[6]="_start[[:space:]]\(.*/bin/ping.*\)$"
28 ;; 26 ;;
29 *) 27 *)
30 eventattr='max-stack=3' 28 eventattr='max-stack=3'
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 3e87486c28fe..4cd2cf93f726 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -930,8 +930,11 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
930 * than leader in case leader 'leads' the sampling. 930 * than leader in case leader 'leads' the sampling.
931 */ 931 */
932 if ((leader != evsel) && leader->sample_read) { 932 if ((leader != evsel) && leader->sample_read) {
933 attr->sample_freq = 0; 933 attr->freq = 0;
934 attr->sample_period = 0; 934 attr->sample_freq = 0;
935 attr->sample_period = 0;
936 attr->write_backward = 0;
937 attr->sample_id_all = 0;
935 } 938 }
936 939
937 if (opts->no_samples) 940 if (opts->no_samples)
@@ -1922,7 +1925,8 @@ try_fallback:
1922 goto fallback_missing_features; 1925 goto fallback_missing_features;
1923 } else if (!perf_missing_features.group_read && 1926 } else if (!perf_missing_features.group_read &&
1924 evsel->attr.inherit && 1927 evsel->attr.inherit &&
1925 (evsel->attr.read_format & PERF_FORMAT_GROUP)) { 1928 (evsel->attr.read_format & PERF_FORMAT_GROUP) &&
1929 perf_evsel__is_group_leader(evsel)) {
1926 perf_missing_features.group_read = true; 1930 perf_missing_features.group_read = true;
1927 pr_debug2("switching off group read\n"); 1931 pr_debug2("switching off group read\n");
1928 goto fallback_missing_features; 1932 goto fallback_missing_features;
@@ -2754,8 +2758,14 @@ bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
2754 (paranoid = perf_event_paranoid()) > 1) { 2758 (paranoid = perf_event_paranoid()) > 1) {
2755 const char *name = perf_evsel__name(evsel); 2759 const char *name = perf_evsel__name(evsel);
2756 char *new_name; 2760 char *new_name;
2761 const char *sep = ":";
2757 2762
2758 if (asprintf(&new_name, "%s%su", name, strchr(name, ':') ? "" : ":") < 0) 2763 /* Is there already the separator in the name. */
2764 if (strchr(name, '/') ||
2765 strchr(name, ':'))
2766 sep = "";
2767
2768 if (asprintf(&new_name, "%s%su", name, sep) < 0)
2759 return false; 2769 return false;
2760 2770
2761 if (evsel->name) 2771 if (evsel->name)
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index d3ee3af618ef..92ec009a292d 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -115,6 +115,7 @@ struct perf_evsel {
115 unsigned int sample_size; 115 unsigned int sample_size;
116 int id_pos; 116 int id_pos;
117 int is_pos; 117 int is_pos;
118 bool uniquified_name;
118 bool snapshot; 119 bool snapshot;
119 bool supported; 120 bool supported;
120 bool needs_swap; 121 bool needs_swap;
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 2eca8478e24f..32d50492505d 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1019,13 +1019,6 @@ int machine__load_vmlinux_path(struct machine *machine, enum map_type type)
1019 return ret; 1019 return ret;
1020} 1020}
1021 1021
1022static void map_groups__fixup_end(struct map_groups *mg)
1023{
1024 int i;
1025 for (i = 0; i < MAP__NR_TYPES; ++i)
1026 __map_groups__fixup_end(mg, i);
1027}
1028
1029static char *get_kernel_version(const char *root_dir) 1022static char *get_kernel_version(const char *root_dir)
1030{ 1023{
1031 char version[PATH_MAX]; 1024 char version[PATH_MAX];
@@ -1233,6 +1226,7 @@ int machine__create_kernel_maps(struct machine *machine)
1233{ 1226{
1234 struct dso *kernel = machine__get_kernel(machine); 1227 struct dso *kernel = machine__get_kernel(machine);
1235 const char *name = NULL; 1228 const char *name = NULL;
1229 struct map *map;
1236 u64 addr = 0; 1230 u64 addr = 0;
1237 int ret; 1231 int ret;
1238 1232
@@ -1259,13 +1253,25 @@ int machine__create_kernel_maps(struct machine *machine)
1259 machine__destroy_kernel_maps(machine); 1253 machine__destroy_kernel_maps(machine);
1260 return -1; 1254 return -1;
1261 } 1255 }
1262 machine__set_kernel_mmap(machine, addr, 0); 1256
1257 /* we have a real start address now, so re-order the kmaps */
1258 map = machine__kernel_map(machine);
1259
1260 map__get(map);
1261 map_groups__remove(&machine->kmaps, map);
1262
1263 /* assume it's the last in the kmaps */
1264 machine__set_kernel_mmap(machine, addr, ~0ULL);
1265
1266 map_groups__insert(&machine->kmaps, map);
1267 map__put(map);
1263 } 1268 }
1264 1269
1265 /* 1270 /* update end address of the kernel map using adjacent module address */
1266 * Now that we have all the maps created, just set the ->end of them: 1271 map = map__next(machine__kernel_map(machine));
1267 */ 1272 if (map)
1268 map_groups__fixup_end(&machine->kmaps); 1273 machine__set_kernel_mmap(machine, addr, map->start);
1274
1269 return 0; 1275 return 0;
1270} 1276}
1271 1277
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 7afeb80cc39e..d14464c42714 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -224,15 +224,15 @@ event_def: event_pmu |
224 event_bpf_file 224 event_bpf_file
225 225
226event_pmu: 226event_pmu:
227PE_NAME opt_event_config 227PE_NAME '/' event_config '/'
228{ 228{
229 struct list_head *list, *orig_terms, *terms; 229 struct list_head *list, *orig_terms, *terms;
230 230
231 if (parse_events_copy_term_list($2, &orig_terms)) 231 if (parse_events_copy_term_list($3, &orig_terms))
232 YYABORT; 232 YYABORT;
233 233
234 ALLOC_LIST(list); 234 ALLOC_LIST(list);
235 if (parse_events_add_pmu(_parse_state, list, $1, $2, false)) { 235 if (parse_events_add_pmu(_parse_state, list, $1, $3, false)) {
236 struct perf_pmu *pmu = NULL; 236 struct perf_pmu *pmu = NULL;
237 int ok = 0; 237 int ok = 0;
238 char *pattern; 238 char *pattern;
@@ -262,7 +262,7 @@ PE_NAME opt_event_config
262 if (!ok) 262 if (!ok)
263 YYABORT; 263 YYABORT;
264 } 264 }
265 parse_events_terms__delete($2); 265 parse_events_terms__delete($3);
266 parse_events_terms__delete(orig_terms); 266 parse_events_terms__delete(orig_terms);
267 $$ = list; 267 $$ = list;
268} 268}
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 61a5e5027338..d2fb597c9a8c 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -539,9 +539,10 @@ static bool pmu_is_uncore(const char *name)
539 539
540/* 540/*
541 * PMU CORE devices have different name other than cpu in sysfs on some 541 * PMU CORE devices have different name other than cpu in sysfs on some
542 * platforms. looking for possible sysfs files to identify as core device. 542 * platforms.
543 * Looking for possible sysfs files to identify the arm core device.
543 */ 544 */
544static int is_pmu_core(const char *name) 545static int is_arm_pmu_core(const char *name)
545{ 546{
546 struct stat st; 547 struct stat st;
547 char path[PATH_MAX]; 548 char path[PATH_MAX];
@@ -550,12 +551,6 @@ static int is_pmu_core(const char *name)
550 if (!sysfs) 551 if (!sysfs)
551 return 0; 552 return 0;
552 553
553 /* Look for cpu sysfs (x86 and others) */
554 scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/cpu", sysfs);
555 if ((stat(path, &st) == 0) &&
556 (strncmp(name, "cpu", strlen("cpu")) == 0))
557 return 1;
558
559 /* Look for cpu sysfs (specific to arm) */ 554 /* Look for cpu sysfs (specific to arm) */
560 scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s/cpus", 555 scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s/cpus",
561 sysfs, name); 556 sysfs, name);
@@ -586,7 +581,7 @@ char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
586 * cpuid string generated on this platform. 581 * cpuid string generated on this platform.
587 * Otherwise return non-zero. 582 * Otherwise return non-zero.
588 */ 583 */
589int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid) 584int strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
590{ 585{
591 regex_t re; 586 regex_t re;
592 regmatch_t pmatch[1]; 587 regmatch_t pmatch[1];
@@ -668,6 +663,7 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
668 struct pmu_events_map *map; 663 struct pmu_events_map *map;
669 struct pmu_event *pe; 664 struct pmu_event *pe;
670 const char *name = pmu->name; 665 const char *name = pmu->name;
666 const char *pname;
671 667
672 map = perf_pmu__find_map(pmu); 668 map = perf_pmu__find_map(pmu);
673 if (!map) 669 if (!map)
@@ -686,11 +682,9 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
686 break; 682 break;
687 } 683 }
688 684
689 if (!is_pmu_core(name)) { 685 if (!is_arm_pmu_core(name)) {
690 /* check for uncore devices */ 686 pname = pe->pmu ? pe->pmu : "cpu";
691 if (pe->pmu == NULL) 687 if (strncmp(pname, name, strlen(pname)))
692 continue;
693 if (strncmp(pe->pmu, name, strlen(pe->pmu)))
694 continue; 688 continue;
695 } 689 }
696 690
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index aa336f0abebc..ed197eef1cfc 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -1200,7 +1200,7 @@ static void test_stacktrace_build_id(void)
1200 1200
1201 assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null") 1201 assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
1202 == 0); 1202 == 0);
1203 assert(system("./urandom_read if=/dev/urandom of=/dev/zero count=4 2> /dev/null") == 0); 1203 assert(system("./urandom_read") == 0);
1204 /* disable stack trace collection */ 1204 /* disable stack trace collection */
1205 key = 0; 1205 key = 0;
1206 val = 1; 1206 val = 1;
@@ -1250,7 +1250,7 @@ static void test_stacktrace_build_id(void)
1250 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); 1250 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
1251 1251
1252 if (CHECK(build_id_matches < 1, "build id match", 1252 if (CHECK(build_id_matches < 1, "build id match",
1253 "Didn't find expected build ID from the map")) 1253 "Didn't find expected build ID from the map\n"))
1254 goto disable_pmu; 1254 goto disable_pmu;
1255 1255
1256 stack_trace_len = PERF_MAX_STACK_DEPTH 1256 stack_trace_len = PERF_MAX_STACK_DEPTH
diff --git a/tools/testing/selftests/firmware/Makefile b/tools/testing/selftests/firmware/Makefile
index 826f38d5dd19..261c81f08606 100644
--- a/tools/testing/selftests/firmware/Makefile
+++ b/tools/testing/selftests/firmware/Makefile
@@ -4,6 +4,7 @@
4all: 4all:
5 5
6TEST_PROGS := fw_run_tests.sh 6TEST_PROGS := fw_run_tests.sh
7TEST_FILES := fw_fallback.sh fw_filesystem.sh fw_lib.sh
7 8
8include ../lib.mk 9include ../lib.mk
9 10
diff --git a/tools/testing/selftests/firmware/fw_lib.sh b/tools/testing/selftests/firmware/fw_lib.sh
index 9ea31b57d71a..962d7f4ac627 100755
--- a/tools/testing/selftests/firmware/fw_lib.sh
+++ b/tools/testing/selftests/firmware/fw_lib.sh
@@ -154,11 +154,13 @@ test_finish()
154 if [ "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then 154 if [ "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
155 echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout 155 echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout
156 fi 156 fi
157 if [ "$OLD_FWPATH" = "" ]; then
158 OLD_FWPATH=" "
159 fi
160 if [ "$TEST_REQS_FW_SET_CUSTOM_PATH" = "yes" ]; then 157 if [ "$TEST_REQS_FW_SET_CUSTOM_PATH" = "yes" ]; then
161 echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path 158 if [ "$OLD_FWPATH" = "" ]; then
159 # A zero-length write won't work; write a null byte
160 printf '\000' >/sys/module/firmware_class/parameters/path
161 else
162 echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path
163 fi
162 fi 164 fi
163 if [ -f $FW ]; then 165 if [ -f $FW ]; then
164 rm -f "$FW" 166 rm -f "$FW"
diff --git a/tools/testing/selftests/firmware/fw_run_tests.sh b/tools/testing/selftests/firmware/fw_run_tests.sh
index 06d638e9dc62..cffdd4eb0a57 100755
--- a/tools/testing/selftests/firmware/fw_run_tests.sh
+++ b/tools/testing/selftests/firmware/fw_run_tests.sh
@@ -66,5 +66,5 @@ if [ -f $FW_FORCE_SYSFS_FALLBACK ]; then
66 run_test_config_0003 66 run_test_config_0003
67else 67else
68 echo "Running basic kernel configuration, working with your config" 68 echo "Running basic kernel configuration, working with your config"
69 run_test 69 run_tests
70fi 70fi
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc
index 786dce7e48be..2aabab363cfb 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc
@@ -29,7 +29,7 @@ do_reset
29 29
30echo "Test extended error support" 30echo "Test extended error support"
31echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' > events/sched/sched_wakeup/trigger 31echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' > events/sched/sched_wakeup/trigger
32echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' >> events/sched/sched_wakeup/trigger &>/dev/null 32! echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' >> events/sched/sched_wakeup/trigger 2> /dev/null
33if ! grep -q "ERROR:" events/sched/sched_wakeup/hist; then 33if ! grep -q "ERROR:" events/sched/sched_wakeup/hist; then
34 fail "Failed to generate extended error in histogram" 34 fail "Failed to generate extended error in histogram"
35fi 35fi
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc
new file mode 100644
index 000000000000..c193dce611a2
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc
@@ -0,0 +1,44 @@
1#!/bin/sh
2# description: event trigger - test multiple actions on hist trigger
3
4
5do_reset() {
6 reset_trigger
7 echo > set_event
8 clear_trace
9}
10
11fail() { #msg
12 do_reset
13 echo $1
14 exit_fail
15}
16
17if [ ! -f set_event ]; then
18 echo "event tracing is not supported"
19 exit_unsupported
20fi
21
22if [ ! -f synthetic_events ]; then
23 echo "synthetic event is not supported"
24 exit_unsupported
25fi
26
27clear_synthetic_events
28reset_tracer
29do_reset
30
31echo "Test multiple actions on hist trigger"
32echo 'wakeup_latency u64 lat; pid_t pid' >> synthetic_events
33TRIGGER1=events/sched/sched_wakeup/trigger
34TRIGGER2=events/sched/sched_switch/trigger
35
36echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="cyclictest"' > $TRIGGER1
37echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0 if next_comm=="cyclictest"' >> $TRIGGER2
38echo 'hist:keys=next_pid:onmatch(sched.sched_wakeup).wakeup_latency(sched.sched_switch.$wakeup_lat,next_pid) if next_comm=="cyclictest"' >> $TRIGGER2
39echo 'hist:keys=next_pid:onmatch(sched.sched_wakeup).wakeup_latency(sched.sched_switch.$wakeup_lat,prev_pid) if next_comm=="cyclictest"' >> $TRIGGER2
40echo 'hist:keys=next_pid if next_comm=="cyclictest"' >> $TRIGGER2
41
42do_reset
43
44exit 0
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 195e9d4739a9..c1b1a4dc6a96 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -20,10 +20,10 @@ all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
20 20
21.ONESHELL: 21.ONESHELL:
22define RUN_TESTS 22define RUN_TESTS
23 @export KSFT_TAP_LEVEL=`echo 1`; 23 @export KSFT_TAP_LEVEL=`echo 1`; \
24 @test_num=`echo 0`; 24 test_num=`echo 0`; \
25 @echo "TAP version 13"; 25 echo "TAP version 13"; \
26 @for TEST in $(1); do \ 26 for TEST in $(1); do \
27 BASENAME_TEST=`basename $$TEST`; \ 27 BASENAME_TEST=`basename $$TEST`; \
28 test_num=`echo $$test_num+1 | bc`; \ 28 test_num=`echo $$test_num+1 | bc`; \
29 echo "selftests: $$BASENAME_TEST"; \ 29 echo "selftests: $$BASENAME_TEST"; \
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index df9102ec7b7a..73af45773938 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -7,14 +7,16 @@ CFLAGS += -I../../../../usr/include/
7TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh 7TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh
8TEST_PROGS += fib_tests.sh fib-onlink-tests.sh in_netns.sh pmtu.sh udpgso.sh 8TEST_PROGS += fib_tests.sh fib-onlink-tests.sh in_netns.sh pmtu.sh udpgso.sh
9TEST_PROGS += udpgso_bench.sh 9TEST_PROGS += udpgso_bench.sh
10TEST_GEN_PROGS_EXTENDED := in_netns.sh
10TEST_GEN_FILES = socket 11TEST_GEN_FILES = socket
11TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy 12TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy
12TEST_GEN_FILES += tcp_mmap 13TEST_GEN_FILES += tcp_mmap tcp_inq
14TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx
13TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa 15TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
14TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict 16TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict
15TEST_GEN_PROGS += udpgso udpgso_bench_tx udpgso_bench_rx
16 17
17include ../lib.mk 18include ../lib.mk
18 19
19$(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma 20$(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma
20$(OUTPUT)/tcp_mmap: LDFLAGS += -lpthread 21$(OUTPUT)/tcp_mmap: LDFLAGS += -lpthread
22$(OUTPUT)/tcp_inq: LDFLAGS += -lpthread
diff --git a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
index 75d922438bc9..d8313d0438b7 100755
--- a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
@@ -1,6 +1,7 @@
1#!/bin/bash 1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0 2# SPDX-License-Identifier: GPL-2.0
3 3
4ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding"
4NUM_NETIFS=4 5NUM_NETIFS=4
5CHECK_TC="yes" 6CHECK_TC="yes"
6source lib.sh 7source lib.sh
@@ -75,14 +76,31 @@ cleanup()
75 vrf_cleanup 76 vrf_cleanup
76} 77}
77 78
79ping_ipv4()
80{
81 ping_test $h1 192.0.2.2
82}
83
84ping_ipv6()
85{
86 ping6_test $h1 2001:db8:1::2
87}
88
89learning()
90{
91 learning_test "br0" $swp1 $h1 $h2
92}
93
94flooding()
95{
96 flood_test $swp2 $h1 $h2
97}
98
78trap cleanup EXIT 99trap cleanup EXIT
79 100
80setup_prepare 101setup_prepare
81setup_wait 102setup_wait
82 103
83ping_test $h1 192.0.2.2 104tests_run
84ping6_test $h1 2001:db8:1::2
85learning_test "br0" $swp1 $h1 $h2
86flood_test $swp2 $h1 $h2
87 105
88exit $EXIT_STATUS 106exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/bridge_vlan_unaware.sh b/tools/testing/selftests/net/forwarding/bridge_vlan_unaware.sh
index 1cddf06f691d..c15c6c85c984 100755
--- a/tools/testing/selftests/net/forwarding/bridge_vlan_unaware.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_vlan_unaware.sh
@@ -1,6 +1,7 @@
1#!/bin/bash 1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0 2# SPDX-License-Identifier: GPL-2.0
3 3
4ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding"
4NUM_NETIFS=4 5NUM_NETIFS=4
5source lib.sh 6source lib.sh
6 7
@@ -73,14 +74,31 @@ cleanup()
73 vrf_cleanup 74 vrf_cleanup
74} 75}
75 76
77ping_ipv4()
78{
79 ping_test $h1 192.0.2.2
80}
81
82ping_ipv6()
83{
84 ping6_test $h1 2001:db8:1::2
85}
86
87learning()
88{
89 learning_test "br0" $swp1 $h1 $h2
90}
91
92flooding()
93{
94 flood_test $swp2 $h1 $h2
95}
96
76trap cleanup EXIT 97trap cleanup EXIT
77 98
78setup_prepare 99setup_prepare
79setup_wait 100setup_wait
80 101
81ping_test $h1 192.0.2.2 102tests_run
82ping6_test $h1 2001:db8:1::2
83learning_test "br0" $swp1 $h1 $h2
84flood_test $swp2 $h1 $h2
85 103
86exit $EXIT_STATUS 104exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 1ac6c62271f3..91041c49655b 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -321,6 +321,25 @@ simple_if_fini()
321 vrf_destroy $vrf_name 321 vrf_destroy $vrf_name
322} 322}
323 323
324tunnel_create()
325{
326 local name=$1; shift
327 local type=$1; shift
328 local local=$1; shift
329 local remote=$1; shift
330
331 ip link add name $name type $type \
332 local $local remote $remote "$@"
333 ip link set dev $name up
334}
335
336tunnel_destroy()
337{
338 local name=$1; shift
339
340 ip link del dev $name
341}
342
324master_name_get() 343master_name_get()
325{ 344{
326 local if_name=$1 345 local if_name=$1
@@ -335,6 +354,15 @@ link_stats_tx_packets_get()
335 ip -j -s link show dev $if_name | jq '.[]["stats64"]["tx"]["packets"]' 354 ip -j -s link show dev $if_name | jq '.[]["stats64"]["tx"]["packets"]'
336} 355}
337 356
357tc_rule_stats_get()
358{
359 local dev=$1; shift
360 local pref=$1; shift
361
362 tc -j -s filter show dev $dev ingress pref $pref |
363 jq '.[1].options.actions[].stats.packets'
364}
365
338mac_get() 366mac_get()
339{ 367{
340 local if_name=$1 368 local if_name=$1
@@ -353,19 +381,33 @@ bridge_ageing_time_get()
353 echo $((ageing_time / 100)) 381 echo $((ageing_time / 100))
354} 382}
355 383
356forwarding_enable() 384declare -A SYSCTL_ORIG
385sysctl_set()
386{
387 local key=$1; shift
388 local value=$1; shift
389
390 SYSCTL_ORIG[$key]=$(sysctl -n $key)
391 sysctl -qw $key=$value
392}
393
394sysctl_restore()
357{ 395{
358 ipv4_fwd=$(sysctl -n net.ipv4.conf.all.forwarding) 396 local key=$1; shift
359 ipv6_fwd=$(sysctl -n net.ipv6.conf.all.forwarding)
360 397
361 sysctl -q -w net.ipv4.conf.all.forwarding=1 398 sysctl -qw $key=${SYSCTL_ORIG["$key"]}
362 sysctl -q -w net.ipv6.conf.all.forwarding=1 399}
400
401forwarding_enable()
402{
403 sysctl_set net.ipv4.conf.all.forwarding 1
404 sysctl_set net.ipv6.conf.all.forwarding 1
363} 405}
364 406
365forwarding_restore() 407forwarding_restore()
366{ 408{
367 sysctl -q -w net.ipv6.conf.all.forwarding=$ipv6_fwd 409 sysctl_restore net.ipv6.conf.all.forwarding
368 sysctl -q -w net.ipv4.conf.all.forwarding=$ipv4_fwd 410 sysctl_restore net.ipv4.conf.all.forwarding
369} 411}
370 412
371tc_offload_check() 413tc_offload_check()
@@ -381,6 +423,83 @@ tc_offload_check()
381 return 0 423 return 0
382} 424}
383 425
426slow_path_trap_install()
427{
428 local dev=$1; shift
429 local direction=$1; shift
430
431 if [ "${tcflags/skip_hw}" != "$tcflags" ]; then
432 # For slow-path testing, we need to install a trap to get to
433 # slow path the packets that would otherwise be switched in HW.
434 tc filter add dev $dev $direction pref 1 \
435 flower skip_sw action trap
436 fi
437}
438
439slow_path_trap_uninstall()
440{
441 local dev=$1; shift
442 local direction=$1; shift
443
444 if [ "${tcflags/skip_hw}" != "$tcflags" ]; then
445 tc filter del dev $dev $direction pref 1 flower skip_sw
446 fi
447}
448
449__icmp_capture_add_del()
450{
451 local add_del=$1; shift
452 local pref=$1; shift
453 local vsuf=$1; shift
454 local tundev=$1; shift
455 local filter=$1; shift
456
457 tc filter $add_del dev "$tundev" ingress \
458 proto ip$vsuf pref $pref \
459 flower ip_proto icmp$vsuf $filter \
460 action pass
461}
462
463icmp_capture_install()
464{
465 __icmp_capture_add_del add 100 "" "$@"
466}
467
468icmp_capture_uninstall()
469{
470 __icmp_capture_add_del del 100 "" "$@"
471}
472
473icmp6_capture_install()
474{
475 __icmp_capture_add_del add 100 v6 "$@"
476}
477
478icmp6_capture_uninstall()
479{
480 __icmp_capture_add_del del 100 v6 "$@"
481}
482
483matchall_sink_create()
484{
485 local dev=$1; shift
486
487 tc qdisc add dev $dev clsact
488 tc filter add dev $dev ingress \
489 pref 10000 \
490 matchall \
491 action drop
492}
493
494tests_run()
495{
496 local current_test
497
498 for current_test in ${TESTS:-$ALL_TESTS}; do
499 $current_test
500 done
501}
502
384############################################################################## 503##############################################################################
385# Tests 504# Tests
386 505
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre.sh b/tools/testing/selftests/net/forwarding/mirror_gre.sh
new file mode 100755
index 000000000000..c6786d1b2b96
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/mirror_gre.sh
@@ -0,0 +1,161 @@
1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0
3
4# This test uses standard topology for testing gretap. See
5# mirror_gre_topo_lib.sh for more details.
6#
7# Test for "tc action mirred egress mirror" when the device to mirror to is a
8# gretap or ip6gretap netdevice. Expect that the packets come out encapsulated,
9# and another gretap / ip6gretap netdevice is then capable of decapsulating the
10# traffic. Test that the payload is what is expected (ICMP ping request or
11# reply, depending on test).
12
13ALL_TESTS="
14 test_gretap
15 test_ip6gretap
16 test_gretap_mac
17 test_ip6gretap_mac
18 test_two_spans
19"
20
21NUM_NETIFS=6
22source lib.sh
23source mirror_lib.sh
24source mirror_gre_lib.sh
25source mirror_gre_topo_lib.sh
26
27setup_prepare()
28{
29 h1=${NETIFS[p1]}
30 swp1=${NETIFS[p2]}
31
32 swp2=${NETIFS[p3]}
33 h2=${NETIFS[p4]}
34
35 swp3=${NETIFS[p5]}
36 h3=${NETIFS[p6]}
37
38 vrf_prepare
39 mirror_gre_topo_create
40
41 ip address add dev $swp3 192.0.2.129/28
42 ip address add dev $h3 192.0.2.130/28
43
44 ip address add dev $swp3 2001:db8:2::1/64
45 ip address add dev $h3 2001:db8:2::2/64
46}
47
48cleanup()
49{
50 pre_cleanup
51
52 ip address del dev $h3 2001:db8:2::2/64
53 ip address del dev $swp3 2001:db8:2::1/64
54
55 ip address del dev $h3 192.0.2.130/28
56 ip address del dev $swp3 192.0.2.129/28
57
58 mirror_gre_topo_destroy
59 vrf_cleanup
60}
61
62test_span_gre_mac()
63{
64 local tundev=$1; shift
65 local direction=$1; shift
66 local prot=$1; shift
67 local what=$1; shift
68
69 local swp3mac=$(mac_get $swp3)
70 local h3mac=$(mac_get $h3)
71
72 RET=0
73
74 mirror_install $swp1 $direction $tundev "matchall $tcflags"
75 tc qdisc add dev $h3 clsact
76 tc filter add dev $h3 ingress pref 77 prot $prot \
77 flower ip_proto 0x2f src_mac $swp3mac dst_mac $h3mac \
78 action pass
79
80 mirror_test v$h1 192.0.2.1 192.0.2.2 $h3 77 10
81
82 tc filter del dev $h3 ingress pref 77
83 tc qdisc del dev $h3 clsact
84 mirror_uninstall $swp1 $direction
85
86 log_test "$direction $what: envelope MAC ($tcflags)"
87}
88
89test_two_spans()
90{
91 RET=0
92
93 mirror_install $swp1 ingress gt4 "matchall $tcflags"
94 mirror_install $swp1 egress gt6 "matchall $tcflags"
95 quick_test_span_gre_dir gt4 ingress
96 quick_test_span_gre_dir gt6 egress
97
98 mirror_uninstall $swp1 ingress
99 fail_test_span_gre_dir gt4 ingress
100 quick_test_span_gre_dir gt6 egress
101
102 mirror_install $swp1 ingress gt4 "matchall $tcflags"
103 mirror_uninstall $swp1 egress
104 quick_test_span_gre_dir gt4 ingress
105 fail_test_span_gre_dir gt6 egress
106
107 mirror_uninstall $swp1 ingress
108 log_test "two simultaneously configured mirrors ($tcflags)"
109}
110
111test_gretap()
112{
113 full_test_span_gre_dir gt4 ingress 8 0 "mirror to gretap"
114 full_test_span_gre_dir gt4 egress 0 8 "mirror to gretap"
115}
116
117test_ip6gretap()
118{
119 full_test_span_gre_dir gt6 ingress 8 0 "mirror to ip6gretap"
120 full_test_span_gre_dir gt6 egress 0 8 "mirror to ip6gretap"
121}
122
123test_gretap_mac()
124{
125 test_span_gre_mac gt4 ingress ip "mirror to gretap"
126 test_span_gre_mac gt4 egress ip "mirror to gretap"
127}
128
129test_ip6gretap_mac()
130{
131 test_span_gre_mac gt6 ingress ipv6 "mirror to ip6gretap"
132 test_span_gre_mac gt6 egress ipv6 "mirror to ip6gretap"
133}
134
135test_all()
136{
137 slow_path_trap_install $swp1 ingress
138 slow_path_trap_install $swp1 egress
139
140 tests_run
141
142 slow_path_trap_uninstall $swp1 egress
143 slow_path_trap_uninstall $swp1 ingress
144}
145
146trap cleanup EXIT
147
148setup_prepare
149setup_wait
150
151tcflags="skip_hw"
152test_all
153
154if ! tc_offload_check; then
155 echo "WARN: Could not test offloaded functionality"
156else
157 tcflags="skip_sw"
158 test_all
159fi
160
161exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bound.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bound.sh
new file mode 100755
index 000000000000..360ca133bead
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bound.sh
@@ -0,0 +1,226 @@
1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0
3
4# +---------------------+ +---------------------+
5# | H1 | | H2 |
6# | + $h1 | | $h2 + |
7# | | 192.0.2.1/28 | | 192.0.2.2/28 | |
8# +-----|---------------+ +---------------|-----+
9# | |
10# +-----|-------------------------------------------------------------|-----+
11# | SW o--> mirror | |
12# | +---|-------------------------------------------------------------|---+ |
13# | | + $swp1 BR $swp2 + | |
14# | +---------------------------------------------------------------------+ |
15# | |
16# | +---------------------------------------------------------------------+ |
17# | | OL + gt6 (ip6gretap) + gt4 (gretap) | |
18# | | : loc=2001:db8:2::1 : loc=192.0.2.129 | |
19# | | : rem=2001:db8:2::2 : rem=192.0.2.130 | |
20# | | : ttl=100 : ttl=100 | |
21# | | : tos=inherit : tos=inherit | |
22# | +-------------------------:--|-------------------:--|-----------------+ |
23# | : | : | |
24# | +-------------------------:--|-------------------:--|-----------------+ |
25# | | UL : |,---------------------' | |
26# | | + $swp3 : || : | |
27# | | | 192.0.2.129/28 : vv : | |
28# | | | 2001:db8:2::1/64 : + ul (dummy) : | |
29# | +---|---------------------:----------------------:--------------------+ |
30# +-----|---------------------:----------------------:----------------------+
31# | : :
32# +-----|---------------------:----------------------:----------------------+
33# | H3 + $h3 + h3-gt6 (ip6gretap) + h3-gt4 (gretap) |
34# | 192.0.2.130/28 loc=2001:db8:2::2 loc=192.0.2.130 |
35# | 2001:db8:2::2/64 rem=2001:db8:2::1 rem=192.0.2.129 |
36# | ttl=100 ttl=100 |
37# | tos=inherit tos=inherit |
38# | |
39# +-------------------------------------------------------------------------+
40#
41# This tests mirroring to gretap and ip6gretap configured in an overlay /
42# underlay manner, i.e. with a bound dummy device that marks underlay VRF where
43# the encapsulated packed should be routed.
44
45ALL_TESTS="
46 test_gretap
47 test_ip6gretap
48"
49
50NUM_NETIFS=6
51source lib.sh
52source mirror_lib.sh
53source mirror_gre_lib.sh
54
55h1_create()
56{
57 simple_if_init $h1 192.0.2.1/28
58}
59
60h1_destroy()
61{
62 simple_if_fini $h1 192.0.2.1/28
63}
64
65h2_create()
66{
67 simple_if_init $h2 192.0.2.2/28
68}
69
70h2_destroy()
71{
72 simple_if_fini $h2 192.0.2.2/28
73}
74
75h3_create()
76{
77 simple_if_init $h3 192.0.2.130/28 2001:db8:2::2/64
78
79 tunnel_create h3-gt4 gretap 192.0.2.130 192.0.2.129
80 ip link set h3-gt4 vrf v$h3
81 matchall_sink_create h3-gt4
82
83 tunnel_create h3-gt6 ip6gretap 2001:db8:2::2 2001:db8:2::1
84 ip link set h3-gt6 vrf v$h3
85 matchall_sink_create h3-gt6
86}
87
88h3_destroy()
89{
90 tunnel_destroy h3-gt6
91 tunnel_destroy h3-gt4
92
93 simple_if_fini $h3 192.0.2.130/28 2001:db8:2::2/64
94}
95
96switch_create()
97{
98 # Bridge between H1 and H2.
99
100 ip link add name br1 type bridge vlan_filtering 1
101 ip link set dev br1 up
102
103 ip link set dev $swp1 master br1
104 ip link set dev $swp1 up
105
106 ip link set dev $swp2 master br1
107 ip link set dev $swp2 up
108
109 tc qdisc add dev $swp1 clsact
110
111 # Underlay.
112
113 simple_if_init $swp3 192.0.2.129/28 2001:db8:2::1/64
114
115 ip link add name ul type dummy
116 ip link set dev ul master v$swp3
117 ip link set dev ul up
118
119 # Overlay.
120
121 vrf_create vrf-ol
122 ip link set dev vrf-ol up
123
124 tunnel_create gt4 gretap 192.0.2.129 192.0.2.130 \
125 ttl 100 tos inherit dev ul
126 ip link set dev gt4 master vrf-ol
127 ip link set dev gt4 up
128
129 tunnel_create gt6 ip6gretap 2001:db8:2::1 2001:db8:2::2 \
130 ttl 100 tos inherit dev ul allow-localremote
131 ip link set dev gt6 master vrf-ol
132 ip link set dev gt6 up
133}
134
135switch_destroy()
136{
137 vrf_destroy vrf-ol
138
139 tunnel_destroy gt6
140 tunnel_destroy gt4
141
142 simple_if_fini $swp3 192.0.2.129/28 2001:db8:2::1/64
143
144 ip link del dev ul
145
146 tc qdisc del dev $swp1 clsact
147
148 ip link set dev $swp1 down
149 ip link set dev $swp2 down
150 ip link del dev br1
151}
152
153setup_prepare()
154{
155 h1=${NETIFS[p1]}
156 swp1=${NETIFS[p2]}
157
158 swp2=${NETIFS[p3]}
159 h2=${NETIFS[p4]}
160
161 swp3=${NETIFS[p5]}
162 h3=${NETIFS[p6]}
163
164 vrf_prepare
165
166 h1_create
167 h2_create
168 h3_create
169
170 switch_create
171}
172
173cleanup()
174{
175 pre_cleanup
176
177 switch_destroy
178
179 h3_destroy
180 h2_destroy
181 h1_destroy
182
183 vrf_cleanup
184}
185
186test_gretap()
187{
188 full_test_span_gre_dir gt4 ingress 8 0 "mirror to gretap w/ UL"
189 full_test_span_gre_dir gt4 egress 0 8 "mirror to gretap w/ UL"
190}
191
192test_ip6gretap()
193{
194 full_test_span_gre_dir gt6 ingress 8 0 "mirror to ip6gretap w/ UL"
195 full_test_span_gre_dir gt6 egress 0 8 "mirror to ip6gretap w/ UL"
196}
197
198test_all()
199{
200 RET=0
201
202 slow_path_trap_install $swp1 ingress
203 slow_path_trap_install $swp1 egress
204
205 tests_run
206
207 slow_path_trap_uninstall $swp1 egress
208 slow_path_trap_uninstall $swp1 ingress
209}
210
211trap cleanup EXIT
212
213setup_prepare
214setup_wait
215
216tcflags="skip_hw"
217test_all
218
219if ! tc_offload_check; then
220 echo "WARN: Could not test offloaded functionality"
221else
222 tcflags="skip_sw"
223 test_all
224fi
225
226exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
new file mode 100755
index 000000000000..50ab3462af0c
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
@@ -0,0 +1,212 @@
1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0
3
4# This test uses standard topology for testing gretap. See
5# mirror_gre_topo_lib.sh for more details.
6#
7# Test how mirrors to gretap and ip6gretap react to changes to relevant
8# configuration.
9
10ALL_TESTS="
11 test_ttl
12 test_tun_up
13 test_egress_up
14 test_remote_ip
15"
16
17NUM_NETIFS=6
18source lib.sh
19source mirror_lib.sh
20source mirror_gre_lib.sh
21source mirror_gre_topo_lib.sh
22
23setup_prepare()
24{
25 h1=${NETIFS[p1]}
26 swp1=${NETIFS[p2]}
27
28 swp2=${NETIFS[p3]}
29 h2=${NETIFS[p4]}
30
31 swp3=${NETIFS[p5]}
32 h3=${NETIFS[p6]}
33
34 vrf_prepare
35 mirror_gre_topo_create
36
37 # This test downs $swp3, which deletes the configured IPv6 address
38 # unless this sysctl is set.
39 sysctl_set net.ipv6.conf.$swp3.keep_addr_on_down 1
40
41 ip address add dev $swp3 192.0.2.129/28
42 ip address add dev $h3 192.0.2.130/28
43
44 ip address add dev $swp3 2001:db8:2::1/64
45 ip address add dev $h3 2001:db8:2::2/64
46}
47
48cleanup()
49{
50 pre_cleanup
51
52 ip address del dev $h3 2001:db8:2::2/64
53 ip address del dev $swp3 2001:db8:2::1/64
54
55 ip address del dev $h3 192.0.2.130/28
56 ip address del dev $swp3 192.0.2.129/28
57
58 sysctl_restore net.ipv6.conf.$swp3.keep_addr_on_down
59
60 mirror_gre_topo_destroy
61 vrf_cleanup
62}
63
64test_span_gre_ttl()
65{
66 local tundev=$1; shift
67 local type=$1; shift
68 local prot=$1; shift
69 local what=$1; shift
70
71 RET=0
72
73 mirror_install $swp1 ingress $tundev "matchall $tcflags"
74 tc qdisc add dev $h3 clsact
75 tc filter add dev $h3 ingress pref 77 prot $prot \
76 flower ip_ttl 50 action pass
77
78 mirror_test v$h1 192.0.2.1 192.0.2.2 $h3 77 0
79
80 ip link set dev $tundev type $type ttl 50
81 mirror_test v$h1 192.0.2.1 192.0.2.2 $h3 77 10
82
83 ip link set dev $tundev type $type ttl 100
84 tc filter del dev $h3 ingress pref 77
85 tc qdisc del dev $h3 clsact
86 mirror_uninstall $swp1 ingress
87
88 log_test "$what: TTL change ($tcflags)"
89}
90
91test_span_gre_tun_up()
92{
93 local tundev=$1; shift
94 local what=$1; shift
95
96 RET=0
97
98 ip link set dev $tundev down
99 mirror_install $swp1 ingress $tundev "matchall $tcflags"
100 fail_test_span_gre_dir $tundev ingress
101
102 ip link set dev $tundev up
103
104 quick_test_span_gre_dir $tundev ingress
105 mirror_uninstall $swp1 ingress
106
107 log_test "$what: tunnel down/up ($tcflags)"
108}
109
110test_span_gre_egress_up()
111{
112 local tundev=$1; shift
113 local remote_ip=$1; shift
114 local what=$1; shift
115
116 RET=0
117
118 ip link set dev $swp3 down
119 mirror_install $swp1 ingress $tundev "matchall $tcflags"
120 fail_test_span_gre_dir $tundev ingress
121
122 # After setting the device up, wait for neighbor to get resolved so that
123 # we can expect mirroring to work.
124 ip link set dev $swp3 up
125 while true; do
126 ip neigh sh dev $swp3 $remote_ip nud reachable |
127 grep -q ^
128 if [[ $? -ne 0 ]]; then
129 sleep 1
130 else
131 break
132 fi
133 done
134
135 quick_test_span_gre_dir $tundev ingress
136 mirror_uninstall $swp1 ingress
137
138 log_test "$what: egress down/up ($tcflags)"
139}
140
141test_span_gre_remote_ip()
142{
143 local tundev=$1; shift
144 local type=$1; shift
145 local correct_ip=$1; shift
146 local wrong_ip=$1; shift
147 local what=$1; shift
148
149 RET=0
150
151 ip link set dev $tundev type $type remote $wrong_ip
152 mirror_install $swp1 ingress $tundev "matchall $tcflags"
153 fail_test_span_gre_dir $tundev ingress
154
155 ip link set dev $tundev type $type remote $correct_ip
156 quick_test_span_gre_dir $tundev ingress
157 mirror_uninstall $swp1 ingress
158
159 log_test "$what: remote address change ($tcflags)"
160}
161
162test_ttl()
163{
164 test_span_gre_ttl gt4 gretap ip "mirror to gretap"
165 test_span_gre_ttl gt6 ip6gretap ipv6 "mirror to ip6gretap"
166}
167
168test_tun_up()
169{
170 test_span_gre_tun_up gt4 "mirror to gretap"
171 test_span_gre_tun_up gt6 "mirror to ip6gretap"
172}
173
174test_egress_up()
175{
176 test_span_gre_egress_up gt4 192.0.2.130 "mirror to gretap"
177 test_span_gre_egress_up gt6 2001:db8:2::2 "mirror to ip6gretap"
178}
179
180test_remote_ip()
181{
182 test_span_gre_remote_ip gt4 gretap 192.0.2.130 192.0.2.132 "mirror to gretap"
183 test_span_gre_remote_ip gt6 ip6gretap 2001:db8:2::2 2001:db8:2::4 "mirror to ip6gretap"
184}
185
186test_all()
187{
188 slow_path_trap_install $swp1 ingress
189 slow_path_trap_install $swp1 egress
190
191 tests_run
192
193 slow_path_trap_uninstall $swp1 egress
194 slow_path_trap_uninstall $swp1 ingress
195}
196
197trap cleanup EXIT
198
199setup_prepare
200setup_wait
201
202tcflags="skip_hw"
203test_all
204
205if ! tc_offload_check; then
206 echo "WARN: Could not test offloaded functionality"
207else
208 tcflags="skip_sw"
209 test_all
210fi
211
212exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh b/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh
new file mode 100755
index 000000000000..2e54407d8954
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh
@@ -0,0 +1,129 @@
1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0
3
4# This test uses standard topology for testing gretap. See
5# mirror_gre_topo_lib.sh for more details.
6#
7# This tests flower-triggered mirroring to gretap and ip6gretap netdevices. The
8# interfaces on H1 and H2 have two addresses each. Flower match on one of the
9# addresses is configured with mirror action. It is expected that when pinging
10# this address, mirroring takes place, whereas when pinging the other one,
11# there's no mirroring.
12
13ALL_TESTS="
14 test_gretap
15 test_ip6gretap
16"
17
18NUM_NETIFS=6
19source lib.sh
20source mirror_lib.sh
21source mirror_gre_lib.sh
22source mirror_gre_topo_lib.sh
23
24setup_prepare()
25{
26 h1=${NETIFS[p1]}
27 swp1=${NETIFS[p2]}
28
29 swp2=${NETIFS[p3]}
30 h2=${NETIFS[p4]}
31
32 swp3=${NETIFS[p5]}
33 h3=${NETIFS[p6]}
34
35 vrf_prepare
36 mirror_gre_topo_create
37
38 ip address add dev $swp3 192.0.2.129/28
39 ip address add dev $h3 192.0.2.130/28
40
41 ip address add dev $swp3 2001:db8:2::1/64
42 ip address add dev $h3 2001:db8:2::2/64
43
44 ip address add dev $h1 192.0.2.3/28
45 ip address add dev $h2 192.0.2.4/28
46}
47
48cleanup()
49{
50 pre_cleanup
51
52 ip address del dev $h2 192.0.2.4/28
53 ip address del dev $h1 192.0.2.3/28
54
55 ip address del dev $h3 2001:db8:2::2/64
56 ip address del dev $swp3 2001:db8:2::1/64
57
58 ip address del dev $h3 192.0.2.130/28
59 ip address del dev $swp3 192.0.2.129/28
60
61 mirror_gre_topo_destroy
62 vrf_cleanup
63}
64
65test_span_gre_dir_acl()
66{
67 test_span_gre_dir_ips "$@" 192.0.2.3 192.0.2.4
68}
69
70full_test_span_gre_dir_acl()
71{
72 local tundev=$1; shift
73 local direction=$1; shift
74 local forward_type=$1; shift
75 local backward_type=$1; shift
76 local match_dip=$1; shift
77 local what=$1; shift
78
79 mirror_install $swp1 $direction $tundev \
80 "protocol ip flower $tcflags dst_ip $match_dip"
81 fail_test_span_gre_dir $tundev $direction
82 test_span_gre_dir_acl "$tundev" "$direction" \
83 "$forward_type" "$backward_type"
84 mirror_uninstall $swp1 $direction
85
86 log_test "$direction $what ($tcflags)"
87}
88
89test_gretap()
90{
91 full_test_span_gre_dir_acl gt4 ingress 8 0 192.0.2.4 "ACL mirror to gretap"
92 full_test_span_gre_dir_acl gt4 egress 0 8 192.0.2.3 "ACL mirror to gretap"
93}
94
95test_ip6gretap()
96{
97 full_test_span_gre_dir_acl gt6 ingress 8 0 192.0.2.4 "ACL mirror to ip6gretap"
98 full_test_span_gre_dir_acl gt6 egress 0 8 192.0.2.3 "ACL mirror to ip6gretap"
99}
100
101test_all()
102{
103 RET=0
104
105 slow_path_trap_install $swp1 ingress
106 slow_path_trap_install $swp1 egress
107
108 tests_run
109
110 slow_path_trap_uninstall $swp1 egress
111 slow_path_trap_uninstall $swp1 ingress
112}
113
114trap cleanup EXIT
115
116setup_prepare
117setup_wait
118
119tcflags="skip_hw"
120test_all
121
122if ! tc_offload_check; then
123 echo "WARN: Could not test offloaded functionality"
124else
125 tcflags="skip_sw"
126 test_all
127fi
128
129exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh b/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh
new file mode 100644
index 000000000000..207ffd167dba
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh
@@ -0,0 +1,85 @@
1# SPDX-License-Identifier: GPL-2.0
2
3do_test_span_gre_dir_ips()
4{
5 local expect=$1; shift
6 local tundev=$1; shift
7 local direction=$1; shift
8 local ip1=$1; shift
9 local ip2=$1; shift
10
11 icmp_capture_install h3-$tundev
12 mirror_test v$h1 $ip1 $ip2 h3-$tundev 100 $expect
13 mirror_test v$h2 $ip2 $ip1 h3-$tundev 100 $expect
14 icmp_capture_uninstall h3-$tundev
15}
16
17quick_test_span_gre_dir_ips()
18{
19 do_test_span_gre_dir_ips 10 "$@"
20}
21
22fail_test_span_gre_dir_ips()
23{
24 do_test_span_gre_dir_ips 0 "$@"
25}
26
27test_span_gre_dir_ips()
28{
29 local tundev=$1; shift
30 local direction=$1; shift
31 local forward_type=$1; shift
32 local backward_type=$1; shift
33 local ip1=$1; shift
34 local ip2=$1; shift
35
36 quick_test_span_gre_dir_ips "$tundev" "$direction" "$ip1" "$ip2"
37
38 icmp_capture_install h3-$tundev "type $forward_type"
39 mirror_test v$h1 $ip1 $ip2 h3-$tundev 100 10
40 icmp_capture_uninstall h3-$tundev
41
42 icmp_capture_install h3-$tundev "type $backward_type"
43 mirror_test v$h2 $ip2 $ip1 h3-$tundev 100 10
44 icmp_capture_uninstall h3-$tundev
45}
46
47full_test_span_gre_dir_ips()
48{
49 local tundev=$1; shift
50 local direction=$1; shift
51 local forward_type=$1; shift
52 local backward_type=$1; shift
53 local what=$1; shift
54 local ip1=$1; shift
55 local ip2=$1; shift
56
57 RET=0
58
59 mirror_install $swp1 $direction $tundev "matchall $tcflags"
60 test_span_gre_dir_ips "$tundev" "$direction" "$forward_type" \
61 "$backward_type" "$ip1" "$ip2"
62 mirror_uninstall $swp1 $direction
63
64 log_test "$direction $what ($tcflags)"
65}
66
67quick_test_span_gre_dir()
68{
69 quick_test_span_gre_dir_ips "$@" 192.0.2.1 192.0.2.2
70}
71
72fail_test_span_gre_dir()
73{
74 fail_test_span_gre_dir_ips "$@" 192.0.2.1 192.0.2.2
75}
76
77test_span_gre_dir()
78{
79 test_span_gre_dir_ips "$@" 192.0.2.1 192.0.2.2
80}
81
82full_test_span_gre_dir()
83{
84 full_test_span_gre_dir_ips "$@" 192.0.2.1 192.0.2.2
85}
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_neigh.sh b/tools/testing/selftests/net/forwarding/mirror_gre_neigh.sh
new file mode 100755
index 000000000000..fc0508e40fca
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_neigh.sh
@@ -0,0 +1,115 @@
1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0
3
4# This test uses standard topology for testing gretap. See
5# mirror_gre_topo_lib.sh for more details.
6#
7# Test for mirroring to gretap and ip6gretap, such that the neighbor entry for
8# the tunnel remote address has invalid address at the time that the mirroring
9# is set up. Later on, the neighbor is deleted and it is expected to be
10# reinitialized using the usual ARP process, and the mirroring offload updated.
11
12ALL_TESTS="
13 test_gretap
14 test_ip6gretap
15"
16
17NUM_NETIFS=6
18source lib.sh
19source mirror_lib.sh
20source mirror_gre_lib.sh
21source mirror_gre_topo_lib.sh
22
23setup_prepare()
24{
25 h1=${NETIFS[p1]}
26 swp1=${NETIFS[p2]}
27
28 swp2=${NETIFS[p3]}
29 h2=${NETIFS[p4]}
30
31 swp3=${NETIFS[p5]}
32 h3=${NETIFS[p6]}
33
34 vrf_prepare
35 mirror_gre_topo_create
36
37 ip address add dev $swp3 192.0.2.129/28
38 ip address add dev $h3 192.0.2.130/28
39
40 ip address add dev $swp3 2001:db8:2::1/64
41 ip address add dev $h3 2001:db8:2::2/64
42}
43
44cleanup()
45{
46 pre_cleanup
47
48 ip address del dev $h3 2001:db8:2::2/64
49 ip address del dev $swp3 2001:db8:2::1/64
50
51 ip address del dev $h3 192.0.2.130/28
52 ip address del dev $swp3 192.0.2.129/28
53
54 mirror_gre_topo_destroy
55 vrf_cleanup
56}
57
58test_span_gre_neigh()
59{
60 local addr=$1; shift
61 local tundev=$1; shift
62 local direction=$1; shift
63 local what=$1; shift
64
65 RET=0
66
67 ip neigh replace dev $swp3 $addr lladdr 00:11:22:33:44:55
68 mirror_install $swp1 $direction $tundev "matchall $tcflags"
69 fail_test_span_gre_dir $tundev ingress
70 ip neigh del dev $swp3 $addr
71 quick_test_span_gre_dir $tundev ingress
72 mirror_uninstall $swp1 $direction
73
74 log_test "$direction $what: neighbor change ($tcflags)"
75}
76
77test_gretap()
78{
79 test_span_gre_neigh 192.0.2.130 gt4 ingress "mirror to gretap"
80 test_span_gre_neigh 192.0.2.130 gt4 egress "mirror to gretap"
81}
82
83test_ip6gretap()
84{
85 test_span_gre_neigh 2001:db8:2::2 gt6 ingress "mirror to ip6gretap"
86 test_span_gre_neigh 2001:db8:2::2 gt6 egress "mirror to ip6gretap"
87}
88
89test_all()
90{
91 slow_path_trap_install $swp1 ingress
92 slow_path_trap_install $swp1 egress
93
94 tests_run
95
96 slow_path_trap_uninstall $swp1 egress
97 slow_path_trap_uninstall $swp1 ingress
98}
99
100trap cleanup EXIT
101
102setup_prepare
103setup_wait
104
105tcflags="skip_hw"
106test_all
107
108if ! tc_offload_check; then
109 echo "WARN: Could not test offloaded functionality"
110else
111 tcflags="skip_sw"
112 test_all
113fi
114
115exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh b/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh
new file mode 100755
index 000000000000..8fa681eb90e7
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh
@@ -0,0 +1,127 @@
1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0
3
4# This test uses standard topology for testing gretap. See
5# mirror_gre_topo_lib.sh for more details.
6#
7# Test that gretap and ip6gretap mirroring works when the other tunnel endpoint
8# is reachable through a next-hop route (as opposed to directly-attached route).
9
10ALL_TESTS="
11 test_gretap
12 test_ip6gretap
13"
14
15NUM_NETIFS=6
16source lib.sh
17source mirror_lib.sh
18source mirror_gre_lib.sh
19source mirror_gre_topo_lib.sh
20
21setup_prepare()
22{
23 h1=${NETIFS[p1]}
24 swp1=${NETIFS[p2]}
25
26 swp2=${NETIFS[p3]}
27 h2=${NETIFS[p4]}
28
29 swp3=${NETIFS[p5]}
30 h3=${NETIFS[p6]}
31
32 sysctl_set net.ipv4.conf.all.rp_filter 0
33 sysctl_set net.ipv4.conf.$h3.rp_filter 0
34
35 vrf_prepare
36 mirror_gre_topo_create
37
38 ip address add dev $swp3 192.0.2.161/28
39 ip address add dev $h3 192.0.2.162/28
40 ip address add dev gt4 192.0.2.129/32
41 ip address add dev h3-gt4 192.0.2.130/32
42
43 # IPv6 route can't be added after address. Such routes are rejected due
44 # to the gateway address having been configured on the local system. It
45 # works the other way around though.
46 ip address add dev $swp3 2001:db8:4::1/64
47 ip -6 route add 2001:db8:2::2/128 via 2001:db8:4::2
48 ip address add dev $h3 2001:db8:4::2/64
49 ip address add dev gt6 2001:db8:2::1
50 ip address add dev h3-gt6 2001:db8:2::2
51}
52
53cleanup()
54{
55 pre_cleanup
56
57 ip -6 route del 2001:db8:2::2/128 via 2001:db8:4::2
58 ip address del dev $h3 2001:db8:4::2/64
59 ip address del dev $swp3 2001:db8:4::1/64
60
61 ip address del dev $h3 192.0.2.162/28
62 ip address del dev $swp3 192.0.2.161/28
63
64 mirror_gre_topo_destroy
65 vrf_cleanup
66
67 sysctl_restore net.ipv4.conf.$h3.rp_filter
68 sysctl_restore net.ipv4.conf.all.rp_filter
69}
70
71test_gretap()
72{
73 RET=0
74 mirror_install $swp1 ingress gt4 "matchall $tcflags"
75
76 # For IPv4, test that there's no mirroring without the route directing
77 # the traffic to tunnel remote address. Then add it and test that
78 # mirroring starts. For IPv6 we can't test this due to the limitation
79 # that routes for locally-specified IPv6 addresses can't be added.
80 fail_test_span_gre_dir gt4 ingress
81
82 ip route add 192.0.2.130/32 via 192.0.2.162
83 quick_test_span_gre_dir gt4 ingress
84 ip route del 192.0.2.130/32 via 192.0.2.162
85
86 mirror_uninstall $swp1 ingress
87 log_test "mirror to gre with next-hop remote ($tcflags)"
88}
89
90test_ip6gretap()
91{
92 RET=0
93
94 mirror_install $swp1 ingress gt6 "matchall $tcflags"
95 quick_test_span_gre_dir gt6 ingress
96 mirror_uninstall $swp1 ingress
97
98 log_test "mirror to ip6gre with next-hop remote ($tcflags)"
99}
100
101test_all()
102{
103 slow_path_trap_install $swp1 ingress
104 slow_path_trap_install $swp1 egress
105
106 tests_run
107
108 slow_path_trap_uninstall $swp1 egress
109 slow_path_trap_uninstall $swp1 ingress
110}
111
112trap cleanup EXIT
113
114setup_prepare
115setup_wait
116
117tcflags="skip_hw"
118test_all
119
120if ! tc_offload_check; then
121 echo "WARN: Could not test offloaded functionality"
122else
123 tcflags="skip_sw"
124 test_all
125fi
126
127exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh b/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh
new file mode 100644
index 000000000000..b3ceda2b4197
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh
@@ -0,0 +1,129 @@
1# SPDX-License-Identifier: GPL-2.0
2
3# This is the standard topology for testing mirroring to gretap and ip6gretap
4# netdevices. The tests that use it tweak it in one way or another--importantly,
5# $swp3 and $h3 need to have addresses set up.
6#
7# +---------------------+ +---------------------+
8# | H1 | | H2 |
9# | + $h1 | | $h2 + |
10# | | 192.0.2.1/28 | | 192.0.2.2/28 | |
11# +-----|---------------+ +---------------|-----+
12# | |
13# +-----|-------------------------------------------------------------|-----+
14# | SW o--> mirror | |
15# | +---|-------------------------------------------------------------|---+ |
16# | | + $swp1 BR $swp2 + | |
17# | +---------------------------------------------------------------------+ |
18# | |
19# | + $swp3 + gt6 (ip6gretap) + gt4 (gretap) |
20# | | : loc=2001:db8:2::1 : loc=192.0.2.129 |
21# | | : rem=2001:db8:2::2 : rem=192.0.2.130 |
22# | | : ttl=100 : ttl=100 |
23# | | : tos=inherit : tos=inherit |
24# | | : : |
25# +-----|---------------------:----------------------:----------------------+
26# | : :
27# +-----|---------------------:----------------------:----------------------+
28# | H3 + $h3 + h3-gt6 (ip6gretap) + h3-gt4 (gretap) |
29# | loc=2001:db8:2::2 loc=192.0.2.130 |
30# | rem=2001:db8:2::1 rem=192.0.2.129 |
31# | ttl=100 ttl=100 |
32# | tos=inherit tos=inherit |
33# | |
34# +-------------------------------------------------------------------------+
35
36mirror_gre_topo_h1_create()
37{
38 simple_if_init $h1 192.0.2.1/28
39}
40
41mirror_gre_topo_h1_destroy()
42{
43 simple_if_fini $h1 192.0.2.1/28
44}
45
46mirror_gre_topo_h2_create()
47{
48 simple_if_init $h2 192.0.2.2/28
49}
50
51mirror_gre_topo_h2_destroy()
52{
53 simple_if_fini $h2 192.0.2.2/28
54}
55
56mirror_gre_topo_h3_create()
57{
58 simple_if_init $h3
59
60 tunnel_create h3-gt4 gretap 192.0.2.130 192.0.2.129
61 ip link set h3-gt4 vrf v$h3
62 matchall_sink_create h3-gt4
63
64 tunnel_create h3-gt6 ip6gretap 2001:db8:2::2 2001:db8:2::1
65 ip link set h3-gt6 vrf v$h3
66 matchall_sink_create h3-gt6
67}
68
69mirror_gre_topo_h3_destroy()
70{
71 tunnel_destroy h3-gt6
72 tunnel_destroy h3-gt4
73
74 simple_if_fini $h3
75}
76
77mirror_gre_topo_switch_create()
78{
79 ip link set dev $swp3 up
80
81 ip link add name br1 type bridge vlan_filtering 1
82 ip link set dev br1 up
83
84 ip link set dev $swp1 master br1
85 ip link set dev $swp1 up
86
87 ip link set dev $swp2 master br1
88 ip link set dev $swp2 up
89
90 tunnel_create gt4 gretap 192.0.2.129 192.0.2.130 \
91 ttl 100 tos inherit
92
93 tunnel_create gt6 ip6gretap 2001:db8:2::1 2001:db8:2::2 \
94 ttl 100 tos inherit allow-localremote
95
96 tc qdisc add dev $swp1 clsact
97}
98
99mirror_gre_topo_switch_destroy()
100{
101 tc qdisc del dev $swp1 clsact
102
103 tunnel_destroy gt6
104 tunnel_destroy gt4
105
106 ip link set dev $swp1 down
107 ip link set dev $swp2 down
108 ip link del dev br1
109
110 ip link set dev $swp3 down
111}
112
113mirror_gre_topo_create()
114{
115 mirror_gre_topo_h1_create
116 mirror_gre_topo_h2_create
117 mirror_gre_topo_h3_create
118
119 mirror_gre_topo_switch_create
120}
121
122mirror_gre_topo_destroy()
123{
124 mirror_gre_topo_switch_destroy
125
126 mirror_gre_topo_h3_destroy
127 mirror_gre_topo_h2_destroy
128 mirror_gre_topo_h1_destroy
129}
diff --git a/tools/testing/selftests/net/forwarding/mirror_lib.sh b/tools/testing/selftests/net/forwarding/mirror_lib.sh
new file mode 100644
index 000000000000..e5028a5725e3
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/mirror_lib.sh
@@ -0,0 +1,40 @@
1# SPDX-License-Identifier: GPL-2.0
2
3mirror_install()
4{
5 local from_dev=$1; shift
6 local direction=$1; shift
7 local to_dev=$1; shift
8 local filter=$1; shift
9
10 tc filter add dev $from_dev $direction \
11 pref 1000 $filter \
12 action mirred egress mirror dev $to_dev
13}
14
15mirror_uninstall()
16{
17 local from_dev=$1; shift
18 local direction=$1; shift
19
20 tc filter del dev $swp1 $direction pref 1000
21}
22
23mirror_test()
24{
25 local vrf_name=$1; shift
26 local sip=$1; shift
27 local dip=$1; shift
28 local dev=$1; shift
29 local pref=$1; shift
30 local expect=$1; shift
31
32 local t0=$(tc_rule_stats_get $dev $pref)
33 ip vrf exec $vrf_name \
34 ${PING} ${sip:+-I $sip} $dip -c 10 -i 0.1 -w 2 &> /dev/null
35 local t1=$(tc_rule_stats_get $dev $pref)
36 local delta=$((t1 - t0))
37 # Tolerate a couple stray extra packets.
38 ((expect <= delta && delta <= expect + 2))
39 check_err $? "Expected to capture $expect packets, got $delta."
40}
diff --git a/tools/testing/selftests/net/forwarding/router.sh b/tools/testing/selftests/net/forwarding/router.sh
index cc6a14abfa87..a75cb51cc5bd 100755
--- a/tools/testing/selftests/net/forwarding/router.sh
+++ b/tools/testing/selftests/net/forwarding/router.sh
@@ -1,6 +1,7 @@
1#!/bin/bash 1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0 2# SPDX-License-Identifier: GPL-2.0
3 3
4ALL_TESTS="ping_ipv4 ping_ipv6"
4NUM_NETIFS=4 5NUM_NETIFS=4
5source lib.sh 6source lib.sh
6 7
@@ -114,12 +115,21 @@ cleanup()
114 vrf_cleanup 115 vrf_cleanup
115} 116}
116 117
118ping_ipv4()
119{
120 ping_test $h1 198.51.100.2
121}
122
123ping_ipv6()
124{
125 ping6_test $h1 2001:db8:2::2
126}
127
117trap cleanup EXIT 128trap cleanup EXIT
118 129
119setup_prepare 130setup_prepare
120setup_wait 131setup_wait
121 132
122ping_test $h1 198.51.100.2 133tests_run
123ping6_test $h1 2001:db8:2::2
124 134
125exit $EXIT_STATUS 135exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/router_multipath.sh b/tools/testing/selftests/net/forwarding/router_multipath.sh
index 3bc351008db6..8b6d0fb6d604 100755
--- a/tools/testing/selftests/net/forwarding/router_multipath.sh
+++ b/tools/testing/selftests/net/forwarding/router_multipath.sh
@@ -1,6 +1,7 @@
1#!/bin/bash 1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0 2# SPDX-License-Identifier: GPL-2.0
3 3
4ALL_TESTS="ping_ipv4 ping_ipv6 multipath_test"
4NUM_NETIFS=8 5NUM_NETIFS=8
5source lib.sh 6source lib.sh
6 7
@@ -191,7 +192,7 @@ multipath_eval()
191 diff=$(echo $weights_ratio - $packets_ratio | bc -l) 192 diff=$(echo $weights_ratio - $packets_ratio | bc -l)
192 diff=${diff#-} 193 diff=${diff#-}
193 194
194 test "$(echo "$diff / $weights_ratio > 0.1" | bc -l)" -eq 0 195 test "$(echo "$diff / $weights_ratio > 0.15" | bc -l)" -eq 0
195 check_err $? "Too large discrepancy between expected and measured ratios" 196 check_err $? "Too large discrepancy between expected and measured ratios"
196 log_test "$desc" 197 log_test "$desc"
197 log_info "Expected ratio $weights_ratio Measured ratio $packets_ratio" 198 log_info "Expected ratio $weights_ratio Measured ratio $packets_ratio"
@@ -204,13 +205,11 @@ multipath4_test()
204 local weight_rp13=$3 205 local weight_rp13=$3
205 local t0_rp12 t0_rp13 t1_rp12 t1_rp13 206 local t0_rp12 t0_rp13 t1_rp12 t1_rp13
206 local packets_rp12 packets_rp13 207 local packets_rp12 packets_rp13
207 local hash_policy
208 208
209 # Transmit multiple flows from h1 to h2 and make sure they are 209 # Transmit multiple flows from h1 to h2 and make sure they are
210 # distributed between both multipath links (rp12 and rp13) 210 # distributed between both multipath links (rp12 and rp13)
211 # according to the configured weights. 211 # according to the configured weights.
212 hash_policy=$(sysctl -n net.ipv4.fib_multipath_hash_policy) 212 sysctl_set net.ipv4.fib_multipath_hash_policy 1
213 sysctl -q -w net.ipv4.fib_multipath_hash_policy=1
214 ip route replace 198.51.100.0/24 vrf vrf-r1 \ 213 ip route replace 198.51.100.0/24 vrf vrf-r1 \
215 nexthop via 169.254.2.22 dev $rp12 weight $weight_rp12 \ 214 nexthop via 169.254.2.22 dev $rp12 weight $weight_rp12 \
216 nexthop via 169.254.3.23 dev $rp13 weight $weight_rp13 215 nexthop via 169.254.3.23 dev $rp13 weight $weight_rp13
@@ -232,7 +231,7 @@ multipath4_test()
232 ip route replace 198.51.100.0/24 vrf vrf-r1 \ 231 ip route replace 198.51.100.0/24 vrf vrf-r1 \
233 nexthop via 169.254.2.22 dev $rp12 \ 232 nexthop via 169.254.2.22 dev $rp12 \
234 nexthop via 169.254.3.23 dev $rp13 233 nexthop via 169.254.3.23 dev $rp13
235 sysctl -q -w net.ipv4.fib_multipath_hash_policy=$hash_policy 234 sysctl_restore net.ipv4.fib_multipath_hash_policy
236} 235}
237 236
238multipath6_l4_test() 237multipath6_l4_test()
@@ -242,13 +241,11 @@ multipath6_l4_test()
242 local weight_rp13=$3 241 local weight_rp13=$3
243 local t0_rp12 t0_rp13 t1_rp12 t1_rp13 242 local t0_rp12 t0_rp13 t1_rp12 t1_rp13
244 local packets_rp12 packets_rp13 243 local packets_rp12 packets_rp13
245 local hash_policy
246 244
247 # Transmit multiple flows from h1 to h2 and make sure they are 245 # Transmit multiple flows from h1 to h2 and make sure they are
248 # distributed between both multipath links (rp12 and rp13) 246 # distributed between both multipath links (rp12 and rp13)
249 # according to the configured weights. 247 # according to the configured weights.
250 hash_policy=$(sysctl -n net.ipv6.fib_multipath_hash_policy) 248 sysctl_set net.ipv6.fib_multipath_hash_policy 1
251 sysctl -q -w net.ipv6.fib_multipath_hash_policy=1
252 249
253 ip route replace 2001:db8:2::/64 vrf vrf-r1 \ 250 ip route replace 2001:db8:2::/64 vrf vrf-r1 \
254 nexthop via fe80:2::22 dev $rp12 weight $weight_rp12 \ 251 nexthop via fe80:2::22 dev $rp12 weight $weight_rp12 \
@@ -271,7 +268,7 @@ multipath6_l4_test()
271 nexthop via fe80:2::22 dev $rp12 \ 268 nexthop via fe80:2::22 dev $rp12 \
272 nexthop via fe80:3::23 dev $rp13 269 nexthop via fe80:3::23 dev $rp13
273 270
274 sysctl -q -w net.ipv6.fib_multipath_hash_policy=$hash_policy 271 sysctl_restore net.ipv6.fib_multipath_hash_policy
275} 272}
276 273
277multipath6_test() 274multipath6_test()
@@ -364,13 +361,21 @@ cleanup()
364 vrf_cleanup 361 vrf_cleanup
365} 362}
366 363
364ping_ipv4()
365{
366 ping_test $h1 198.51.100.2
367}
368
369ping_ipv6()
370{
371 ping6_test $h1 2001:db8:2::2
372}
373
367trap cleanup EXIT 374trap cleanup EXIT
368 375
369setup_prepare 376setup_prepare
370setup_wait 377setup_wait
371 378
372ping_test $h1 198.51.100.2 379tests_run
373ping6_test $h1 2001:db8:2::2
374multipath_test
375 380
376exit $EXIT_STATUS 381exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/tc_actions.sh b/tools/testing/selftests/net/forwarding/tc_actions.sh
index 3a6385ebd5d0..813d02d1939d 100755
--- a/tools/testing/selftests/net/forwarding/tc_actions.sh
+++ b/tools/testing/selftests/net/forwarding/tc_actions.sh
@@ -1,6 +1,8 @@
1#!/bin/bash 1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0 2# SPDX-License-Identifier: GPL-2.0
3 3
4ALL_TESTS="gact_drop_and_ok_test mirred_egress_redirect_test \
5 mirred_egress_mirror_test gact_trap_test"
4NUM_NETIFS=4 6NUM_NETIFS=4
5source tc_common.sh 7source tc_common.sh
6source lib.sh 8source lib.sh
@@ -111,6 +113,10 @@ gact_trap_test()
111{ 113{
112 RET=0 114 RET=0
113 115
116 if [[ "$tcflags" != "skip_sw" ]]; then
117 return 0;
118 fi
119
114 tc filter add dev $swp1 ingress protocol ip pref 1 handle 101 flower \ 120 tc filter add dev $swp1 ingress protocol ip pref 1 handle 101 flower \
115 skip_hw dst_ip 192.0.2.2 action drop 121 skip_hw dst_ip 192.0.2.2 action drop
116 tc filter add dev $swp1 ingress protocol ip pref 3 handle 103 flower \ 122 tc filter add dev $swp1 ingress protocol ip pref 3 handle 103 flower \
@@ -179,24 +185,29 @@ cleanup()
179 ip link set $swp1 address $swp1origmac 185 ip link set $swp1 address $swp1origmac
180} 186}
181 187
188mirred_egress_redirect_test()
189{
190 mirred_egress_test "redirect"
191}
192
193mirred_egress_mirror_test()
194{
195 mirred_egress_test "mirror"
196}
197
182trap cleanup EXIT 198trap cleanup EXIT
183 199
184setup_prepare 200setup_prepare
185setup_wait 201setup_wait
186 202
187gact_drop_and_ok_test 203tests_run
188mirred_egress_test "redirect"
189mirred_egress_test "mirror"
190 204
191tc_offload_check 205tc_offload_check
192if [[ $? -ne 0 ]]; then 206if [[ $? -ne 0 ]]; then
193 log_info "Could not test offloaded functionality" 207 log_info "Could not test offloaded functionality"
194else 208else
195 tcflags="skip_sw" 209 tcflags="skip_sw"
196 gact_drop_and_ok_test 210 tests_run
197 mirred_egress_test "redirect"
198 mirred_egress_test "mirror"
199 gact_trap_test
200fi 211fi
201 212
202exit $EXIT_STATUS 213exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/tc_chains.sh b/tools/testing/selftests/net/forwarding/tc_chains.sh
index 2fd15226974b..d2c783e94df3 100755
--- a/tools/testing/selftests/net/forwarding/tc_chains.sh
+++ b/tools/testing/selftests/net/forwarding/tc_chains.sh
@@ -1,6 +1,7 @@
1#!/bin/bash 1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0 2# SPDX-License-Identifier: GPL-2.0
3 3
4ALL_TESTS="unreachable_chain_test gact_goto_chain_test"
4NUM_NETIFS=2 5NUM_NETIFS=2
5source tc_common.sh 6source tc_common.sh
6source lib.sh 7source lib.sh
@@ -107,16 +108,14 @@ trap cleanup EXIT
107setup_prepare 108setup_prepare
108setup_wait 109setup_wait
109 110
110unreachable_chain_test 111tests_run
111gact_goto_chain_test
112 112
113tc_offload_check 113tc_offload_check
114if [[ $? -ne 0 ]]; then 114if [[ $? -ne 0 ]]; then
115 log_info "Could not test offloaded functionality" 115 log_info "Could not test offloaded functionality"
116else 116else
117 tcflags="skip_sw" 117 tcflags="skip_sw"
118 unreachable_chain_test 118 tests_run
119 gact_goto_chain_test
120fi 119fi
121 120
122exit $EXIT_STATUS 121exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/tc_flower.sh b/tools/testing/selftests/net/forwarding/tc_flower.sh
index 0c54059f1875..20d1077e5a3d 100755
--- a/tools/testing/selftests/net/forwarding/tc_flower.sh
+++ b/tools/testing/selftests/net/forwarding/tc_flower.sh
@@ -1,6 +1,8 @@
1#!/bin/bash 1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0 2# SPDX-License-Identifier: GPL-2.0
3 3
4ALL_TESTS="match_dst_mac_test match_src_mac_test match_dst_ip_test \
5 match_src_ip_test match_ip_flags_test"
4NUM_NETIFS=2 6NUM_NETIFS=2
5source tc_common.sh 7source tc_common.sh
6source lib.sh 8source lib.sh
@@ -245,22 +247,14 @@ trap cleanup EXIT
245setup_prepare 247setup_prepare
246setup_wait 248setup_wait
247 249
248match_dst_mac_test 250tests_run
249match_src_mac_test
250match_dst_ip_test
251match_src_ip_test
252match_ip_flags_test
253 251
254tc_offload_check 252tc_offload_check
255if [[ $? -ne 0 ]]; then 253if [[ $? -ne 0 ]]; then
256 log_info "Could not test offloaded functionality" 254 log_info "Could not test offloaded functionality"
257else 255else
258 tcflags="skip_sw" 256 tcflags="skip_sw"
259 match_dst_mac_test 257 tests_run
260 match_src_mac_test
261 match_dst_ip_test
262 match_src_ip_test
263 match_ip_flags_test
264fi 258fi
265 259
266exit $EXIT_STATUS 260exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/tc_shblocks.sh b/tools/testing/selftests/net/forwarding/tc_shblocks.sh
index 077b98048ef4..b5b917203815 100755
--- a/tools/testing/selftests/net/forwarding/tc_shblocks.sh
+++ b/tools/testing/selftests/net/forwarding/tc_shblocks.sh
@@ -1,6 +1,7 @@
1#!/bin/bash 1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0 2# SPDX-License-Identifier: GPL-2.0
3 3
4ALL_TESTS="shared_block_test"
4NUM_NETIFS=4 5NUM_NETIFS=4
5source tc_common.sh 6source tc_common.sh
6source lib.sh 7source lib.sh
@@ -109,14 +110,14 @@ trap cleanup EXIT
109setup_prepare 110setup_prepare
110setup_wait 111setup_wait
111 112
112shared_block_test 113tests_run
113 114
114tc_offload_check 115tc_offload_check
115if [[ $? -ne 0 ]]; then 116if [[ $? -ne 0 ]]; then
116 log_info "Could not test offloaded functionality" 117 log_info "Could not test offloaded functionality"
117else 118else
118 tcflags="skip_sw" 119 tcflags="skip_sw"
119 shared_block_test 120 tests_run
120fi 121fi
121 122
122exit $EXIT_STATUS 123exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index 1e428781a625..7651fd4d86fe 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -368,7 +368,7 @@ test_pmtu_vti6_link_add_mtu() {
368 368
369 fail=0 369 fail=0
370 370
371 min=1280 371 min=68 # vti6 can carry IPv4 packets too
372 max=$((65535 - 40)) 372 max=$((65535 - 40))
373 # Check invalid values first 373 # Check invalid values first
374 for v in $((min - 1)) $((max + 1)); do 374 for v in $((min - 1)) $((max + 1)); do
@@ -384,7 +384,7 @@ test_pmtu_vti6_link_add_mtu() {
384 done 384 done
385 385
386 # Now check valid values 386 # Now check valid values
387 for v in 1280 1300 $((65535 - 40)); do 387 for v in 68 1280 1300 $((65535 - 40)); do
388 ${ns_a} ip link add vti6_a mtu ${v} type vti6 local ${veth6_a_addr} remote ${veth6_b_addr} key 10 388 ${ns_a} ip link add vti6_a mtu ${v} type vti6 local ${veth6_a_addr} remote ${veth6_b_addr} key 10
389 mtu="$(link_get_mtu "${ns_a}" vti6_a)" 389 mtu="$(link_get_mtu "${ns_a}" vti6_a)"
390 ${ns_a} ip link del vti6_a 390 ${ns_a} ip link del vti6_a
diff --git a/tools/testing/selftests/net/tcp_inq.c b/tools/testing/selftests/net/tcp_inq.c
new file mode 100644
index 000000000000..d044b29ddabc
--- /dev/null
+++ b/tools/testing/selftests/net/tcp_inq.c
@@ -0,0 +1,189 @@
1/*
2 * Copyright 2018 Google Inc.
3 * Author: Soheil Hassas Yeganeh (soheil@google.com)
4 *
5 * Simple example on how to use TCP_INQ and TCP_CM_INQ.
6 *
7 * License (GPLv2):
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for
16 * more details.
17 */
18#define _GNU_SOURCE
19
20#include <error.h>
21#include <netinet/in.h>
22#include <netinet/tcp.h>
23#include <pthread.h>
24#include <stdio.h>
25#include <errno.h>
26#include <stdlib.h>
27#include <string.h>
28#include <sys/socket.h>
29#include <unistd.h>
30
31#ifndef TCP_INQ
32#define TCP_INQ 36
33#endif
34
35#ifndef TCP_CM_INQ
36#define TCP_CM_INQ TCP_INQ
37#endif
38
39#define BUF_SIZE 8192
40#define CMSG_SIZE 32
41
42static int family = AF_INET6;
43static socklen_t addr_len = sizeof(struct sockaddr_in6);
44static int port = 4974;
45
46static void setup_loopback_addr(int family, struct sockaddr_storage *sockaddr)
47{
48 struct sockaddr_in6 *addr6 = (void *) sockaddr;
49 struct sockaddr_in *addr4 = (void *) sockaddr;
50
51 switch (family) {
52 case PF_INET:
53 memset(addr4, 0, sizeof(*addr4));
54 addr4->sin_family = AF_INET;
55 addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
56 addr4->sin_port = htons(port);
57 break;
58 case PF_INET6:
59 memset(addr6, 0, sizeof(*addr6));
60 addr6->sin6_family = AF_INET6;
61 addr6->sin6_addr = in6addr_loopback;
62 addr6->sin6_port = htons(port);
63 break;
64 default:
65 error(1, 0, "illegal family");
66 }
67}
68
69void *start_server(void *arg)
70{
71 int server_fd = (int)(unsigned long)arg;
72 struct sockaddr_in addr;
73 socklen_t addrlen = sizeof(addr);
74 char *buf;
75 int fd;
76 int r;
77
78 buf = malloc(BUF_SIZE);
79
80 for (;;) {
81 fd = accept(server_fd, (struct sockaddr *)&addr, &addrlen);
82 if (fd == -1) {
83 perror("accept");
84 break;
85 }
86 do {
87 r = send(fd, buf, BUF_SIZE, 0);
88 } while (r < 0 && errno == EINTR);
89 if (r < 0)
90 perror("send");
91 if (r != BUF_SIZE)
92 fprintf(stderr, "can only send %d bytes\n", r);
93 /* TCP_INQ can overestimate in-queue by one byte if we send
94 * the FIN packet. Sleep for 1 second, so that the client
95 * likely invoked recvmsg().
96 */
97 sleep(1);
98 close(fd);
99 }
100
101 free(buf);
102 close(server_fd);
103 pthread_exit(0);
104}
105
106int main(int argc, char *argv[])
107{
108 struct sockaddr_storage listen_addr, addr;
109 int c, one = 1, inq = -1;
110 pthread_t server_thread;
111 char cmsgbuf[CMSG_SIZE];
112 struct iovec iov[1];
113 struct cmsghdr *cm;
114 struct msghdr msg;
115 int server_fd, fd;
116 char *buf;
117
118 while ((c = getopt(argc, argv, "46p:")) != -1) {
119 switch (c) {
120 case '4':
121 family = PF_INET;
122 addr_len = sizeof(struct sockaddr_in);
123 break;
124 case '6':
125 family = PF_INET6;
126 addr_len = sizeof(struct sockaddr_in6);
127 break;
128 case 'p':
129 port = atoi(optarg);
130 break;
131 }
132 }
133
134 server_fd = socket(family, SOCK_STREAM, 0);
135 if (server_fd < 0)
136 error(1, errno, "server socket");
137 setup_loopback_addr(family, &listen_addr);
138 if (setsockopt(server_fd, SOL_SOCKET, SO_REUSEADDR,
139 &one, sizeof(one)) != 0)
140 error(1, errno, "setsockopt(SO_REUSEADDR)");
141 if (bind(server_fd, (const struct sockaddr *)&listen_addr,
142 addr_len) == -1)
143 error(1, errno, "bind");
144 if (listen(server_fd, 128) == -1)
145 error(1, errno, "listen");
146 if (pthread_create(&server_thread, NULL, start_server,
147 (void *)(unsigned long)server_fd) != 0)
148 error(1, errno, "pthread_create");
149
150 fd = socket(family, SOCK_STREAM, 0);
151 if (fd < 0)
152 error(1, errno, "client socket");
153 setup_loopback_addr(family, &addr);
154 if (connect(fd, (const struct sockaddr *)&addr, addr_len) == -1)
155 error(1, errno, "connect");
156 if (setsockopt(fd, SOL_TCP, TCP_INQ, &one, sizeof(one)) != 0)
157 error(1, errno, "setsockopt(TCP_INQ)");
158
159 msg.msg_name = NULL;
160 msg.msg_namelen = 0;
161 msg.msg_iov = iov;
162 msg.msg_iovlen = 1;
163 msg.msg_control = cmsgbuf;
164 msg.msg_controllen = sizeof(cmsgbuf);
165 msg.msg_flags = 0;
166
167 buf = malloc(BUF_SIZE);
168 iov[0].iov_base = buf;
169 iov[0].iov_len = BUF_SIZE / 2;
170
171 if (recvmsg(fd, &msg, 0) != iov[0].iov_len)
172 error(1, errno, "recvmsg");
173 if (msg.msg_flags & MSG_CTRUNC)
174 error(1, 0, "control message is truncated");
175
176 for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm))
177 if (cm->cmsg_level == SOL_TCP && cm->cmsg_type == TCP_CM_INQ)
178 inq = *((int *) CMSG_DATA(cm));
179
180 if (inq != BUF_SIZE - iov[0].iov_len) {
181 fprintf(stderr, "unexpected inq: %d\n", inq);
182 exit(1);
183 }
184
185 printf("PASSED\n");
186 free(buf);
187 close(fd);
188 return 0;
189}
diff --git a/tools/testing/selftests/net/tcp_mmap.c b/tools/testing/selftests/net/tcp_mmap.c
index dea342fe6f4e..77f762780199 100644
--- a/tools/testing/selftests/net/tcp_mmap.c
+++ b/tools/testing/selftests/net/tcp_mmap.c
@@ -76,9 +76,10 @@
76#include <time.h> 76#include <time.h>
77#include <sys/time.h> 77#include <sys/time.h>
78#include <netinet/in.h> 78#include <netinet/in.h>
79#include <netinet/tcp.h>
80#include <arpa/inet.h> 79#include <arpa/inet.h>
81#include <poll.h> 80#include <poll.h>
81#include <linux/tcp.h>
82#include <assert.h>
82 83
83#ifndef MSG_ZEROCOPY 84#ifndef MSG_ZEROCOPY
84#define MSG_ZEROCOPY 0x4000000 85#define MSG_ZEROCOPY 0x4000000
@@ -134,11 +135,12 @@ void hash_zone(void *zone, unsigned int length)
134void *child_thread(void *arg) 135void *child_thread(void *arg)
135{ 136{
136 unsigned long total_mmap = 0, total = 0; 137 unsigned long total_mmap = 0, total = 0;
138 struct tcp_zerocopy_receive zc;
137 unsigned long delta_usec; 139 unsigned long delta_usec;
138 int flags = MAP_SHARED; 140 int flags = MAP_SHARED;
139 struct timeval t0, t1; 141 struct timeval t0, t1;
140 char *buffer = NULL; 142 char *buffer = NULL;
141 void *oaddr = NULL; 143 void *addr = NULL;
142 double throughput; 144 double throughput;
143 struct rusage ru; 145 struct rusage ru;
144 int lu, fd; 146 int lu, fd;
@@ -153,41 +155,46 @@ void *child_thread(void *arg)
153 perror("malloc"); 155 perror("malloc");
154 goto error; 156 goto error;
155 } 157 }
158 if (zflg) {
159 addr = mmap(NULL, chunk_size, PROT_READ, flags, fd, 0);
160 if (addr == (void *)-1)
161 zflg = 0;
162 }
156 while (1) { 163 while (1) {
157 struct pollfd pfd = { .fd = fd, .events = POLLIN, }; 164 struct pollfd pfd = { .fd = fd, .events = POLLIN, };
158 int sub; 165 int sub;
159 166
160 poll(&pfd, 1, 10000); 167 poll(&pfd, 1, 10000);
161 if (zflg) { 168 if (zflg) {
162 void *naddr; 169 socklen_t zc_len = sizeof(zc);
163 170 int res;
164 naddr = mmap(oaddr, chunk_size, PROT_READ, flags, fd, 0); 171
165 if (naddr == (void *)-1) { 172 zc.address = (__u64)addr;
166 if (errno == EAGAIN) { 173 zc.length = chunk_size;
167 /* That is if SO_RCVLOWAT is buggy */ 174 zc.recv_skip_hint = 0;
168 usleep(1000); 175 res = getsockopt(fd, IPPROTO_TCP, TCP_ZEROCOPY_RECEIVE,
169 continue; 176 &zc, &zc_len);
170 } 177 if (res == -1)
171 if (errno == EINVAL) {
172 flags = MAP_SHARED;
173 oaddr = NULL;
174 goto fallback;
175 }
176 if (errno != EIO)
177 perror("mmap()");
178 break; 178 break;
179
180 if (zc.length) {
181 assert(zc.length <= chunk_size);
182 total_mmap += zc.length;
183 if (xflg)
184 hash_zone(addr, zc.length);
185 total += zc.length;
179 } 186 }
180 total_mmap += chunk_size; 187 if (zc.recv_skip_hint) {
181 if (xflg) 188 assert(zc.recv_skip_hint <= chunk_size);
182 hash_zone(naddr, chunk_size); 189 lu = read(fd, buffer, zc.recv_skip_hint);
183 total += chunk_size; 190 if (lu > 0) {
184 if (!keepflag) { 191 if (xflg)
185 flags |= MAP_FIXED; 192 hash_zone(buffer, lu);
186 oaddr = naddr; 193 total += lu;
194 }
187 } 195 }
188 continue; 196 continue;
189 } 197 }
190fallback:
191 sub = 0; 198 sub = 0;
192 while (sub < chunk_size) { 199 while (sub < chunk_size) {
193 lu = read(fd, buffer + sub, chunk_size - sub); 200 lu = read(fd, buffer + sub, chunk_size - sub);
@@ -228,6 +235,8 @@ end:
228error: 235error:
229 free(buffer); 236 free(buffer);
230 close(fd); 237 close(fd);
238 if (zflg)
239 munmap(addr, chunk_size);
231 pthread_exit(0); 240 pthread_exit(0);
232} 241}
233 242
@@ -371,7 +380,8 @@ int main(int argc, char *argv[])
371 setup_sockaddr(cfg_family, host, &listenaddr); 380 setup_sockaddr(cfg_family, host, &listenaddr);
372 381
373 if (mss && 382 if (mss &&
374 setsockopt(fdlisten, SOL_TCP, TCP_MAXSEG, &mss, sizeof(mss)) == -1) { 383 setsockopt(fdlisten, IPPROTO_TCP, TCP_MAXSEG,
384 &mss, sizeof(mss)) == -1) {
375 perror("setsockopt TCP_MAXSEG"); 385 perror("setsockopt TCP_MAXSEG");
376 exit(1); 386 exit(1);
377 } 387 }
@@ -402,7 +412,7 @@ int main(int argc, char *argv[])
402 setup_sockaddr(cfg_family, host, &addr); 412 setup_sockaddr(cfg_family, host, &addr);
403 413
404 if (mss && 414 if (mss &&
405 setsockopt(fd, SOL_TCP, TCP_MAXSEG, &mss, sizeof(mss)) == -1) { 415 setsockopt(fd, IPPROTO_TCP, TCP_MAXSEG, &mss, sizeof(mss)) == -1) {
406 perror("setsockopt TCP_MAXSEG"); 416 perror("setsockopt TCP_MAXSEG");
407 exit(1); 417 exit(1);
408 } 418 }
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json b/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
index 93cf8fea8ae7..3a2f51fc7fd4 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
@@ -398,13 +398,83 @@
398 255 398 255
399 ] 399 ]
400 ], 400 ],
401 "cmdUnderTest": "for i in `seq 1 32`; do cmd=\"action csum tcp continue index $i \"; args=\"$args$cmd\"; done && $TC actions add $args", 401 "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum tcp continue index \\$i \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"",
402 "expExitCode": "255", 402 "expExitCode": "0",
403 "verifyCmd": "$TC actions ls action csum", 403 "verifyCmd": "$TC actions ls action csum",
404 "matchPattern": "^[ \t]+index [0-9]* ref", 404 "matchPattern": "^[ \t]+index [0-9]* ref",
405 "matchCount": "32", 405 "matchCount": "32",
406 "teardown": [ 406 "teardown": [
407 "$TC actions flush action csum" 407 "$TC actions flush action csum"
408 ] 408 ]
409 },
410 {
411 "id": "b4e9",
412 "name": "Delete batch of 32 csum actions",
413 "category": [
414 "actions",
415 "csum"
416 ],
417 "setup": [
418 [
419 "$TC actions flush action csum",
420 0,
421 1,
422 255
423 ],
424 "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum tcp continue index \\$i \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\""
425 ],
426 "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum index \\$i \\\"; args=\"\\$args\\$cmd\"; done && $TC actions del \\$args\"",
427 "expExitCode": "0",
428 "verifyCmd": "$TC actions list action csum",
429 "matchPattern": "^[ \t]+index [0-9]+ ref",
430 "matchCount": "0",
431 "teardown": []
432 },
433 {
434 "id": "0015",
435 "name": "Add batch of 32 csum tcp actions with large cookies",
436 "category": [
437 "actions",
438 "csum"
439 ],
440 "setup": [
441 [
442 "$TC actions flush action csum",
443 0,
444 1,
445 255
446 ]
447 ],
448 "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum tcp continue index \\$i cookie aaabbbcccdddeee \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"",
449 "expExitCode": "0",
450 "verifyCmd": "$TC actions ls action csum",
451 "matchPattern": "^[ \t]+index [0-9]* ref",
452 "matchCount": "32",
453 "teardown": [
454 "$TC actions flush action csum"
455 ]
456 },
457 {
458 "id": "989e",
459 "name": "Delete batch of 32 csum actions with large cookies",
460 "category": [
461 "actions",
462 "csum"
463 ],
464 "setup": [
465 [
466 "$TC actions flush action csum",
467 0,
468 1,
469 255
470 ],
471 "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum tcp continue index \\$i cookie aaabbbcccdddeee \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\""
472 ],
473 "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum index \\$i \\\"; args=\"\\$args\\$cmd\"; done && $TC actions del \\$args\"",
474 "expExitCode": "0",
475 "verifyCmd": "$TC actions list action csum",
476 "matchPattern": "^[ \t]+index [0-9]+ ref",
477 "matchCount": "0",
478 "teardown": []
409 } 479 }
410] 480]
diff --git a/tools/testing/selftests/x86/test_syscall_vdso.c b/tools/testing/selftests/x86/test_syscall_vdso.c
index 40370354d4c1..c9c3281077bc 100644
--- a/tools/testing/selftests/x86/test_syscall_vdso.c
+++ b/tools/testing/selftests/x86/test_syscall_vdso.c
@@ -100,12 +100,19 @@ asm (
100 " shl $32, %r8\n" 100 " shl $32, %r8\n"
101 " orq $0x7f7f7f7f, %r8\n" 101 " orq $0x7f7f7f7f, %r8\n"
102 " movq %r8, %r9\n" 102 " movq %r8, %r9\n"
103 " movq %r8, %r10\n" 103 " incq %r9\n"
104 " movq %r8, %r11\n" 104 " movq %r9, %r10\n"
105 " movq %r8, %r12\n" 105 " incq %r10\n"
106 " movq %r8, %r13\n" 106 " movq %r10, %r11\n"
107 " movq %r8, %r14\n" 107 " incq %r11\n"
108 " movq %r8, %r15\n" 108 " movq %r11, %r12\n"
109 " incq %r12\n"
110 " movq %r12, %r13\n"
111 " incq %r13\n"
112 " movq %r13, %r14\n"
113 " incq %r14\n"
114 " movq %r14, %r15\n"
115 " incq %r15\n"
109 " ret\n" 116 " ret\n"
110 " .code32\n" 117 " .code32\n"
111 " .popsection\n" 118 " .popsection\n"
@@ -128,12 +135,13 @@ int check_regs64(void)
128 int err = 0; 135 int err = 0;
129 int num = 8; 136 int num = 8;
130 uint64_t *r64 = &regs64.r8; 137 uint64_t *r64 = &regs64.r8;
138 uint64_t expected = 0x7f7f7f7f7f7f7f7fULL;
131 139
132 if (!kernel_is_64bit) 140 if (!kernel_is_64bit)
133 return 0; 141 return 0;
134 142
135 do { 143 do {
136 if (*r64 == 0x7f7f7f7f7f7f7f7fULL) 144 if (*r64 == expected++)
137 continue; /* register did not change */ 145 continue; /* register did not change */
138 if (syscall_addr != (long)&int80) { 146 if (syscall_addr != (long)&int80) {
139 /* 147 /*
@@ -147,18 +155,17 @@ int check_regs64(void)
147 continue; 155 continue;
148 } 156 }
149 } else { 157 } else {
150 /* INT80 syscall entrypoint can be used by 158 /*
159 * INT80 syscall entrypoint can be used by
151 * 64-bit programs too, unlike SYSCALL/SYSENTER. 160 * 64-bit programs too, unlike SYSCALL/SYSENTER.
152 * Therefore it must preserve R12+ 161 * Therefore it must preserve R12+
153 * (they are callee-saved registers in 64-bit C ABI). 162 * (they are callee-saved registers in 64-bit C ABI).
154 * 163 *
155 * This was probably historically not intended, 164 * Starting in Linux 4.17 (and any kernel that
156 * but R8..11 are clobbered (cleared to 0). 165 * backports the change), R8..11 are preserved.
157 * IOW: they are the only registers which aren't 166 * Historically (and probably unintentionally), they
158 * preserved across INT80 syscall. 167 * were clobbered or zeroed.
159 */ 168 */
160 if (*r64 == 0 && num <= 11)
161 continue;
162 } 169 }
163 printf("[FAIL]\tR%d has changed:%016llx\n", num, *r64); 170 printf("[FAIL]\tR%d has changed:%016llx\n", num, *r64);
164 err++; 171 err++;
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index dba629c5f8ac..a4c1b76240df 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
63static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); 63static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
64static u32 kvm_next_vmid; 64static u32 kvm_next_vmid;
65static unsigned int kvm_vmid_bits __read_mostly; 65static unsigned int kvm_vmid_bits __read_mostly;
66static DEFINE_SPINLOCK(kvm_vmid_lock); 66static DEFINE_RWLOCK(kvm_vmid_lock);
67 67
68static bool vgic_present; 68static bool vgic_present;
69 69
@@ -473,11 +473,16 @@ static void update_vttbr(struct kvm *kvm)
473{ 473{
474 phys_addr_t pgd_phys; 474 phys_addr_t pgd_phys;
475 u64 vmid; 475 u64 vmid;
476 bool new_gen;
476 477
477 if (!need_new_vmid_gen(kvm)) 478 read_lock(&kvm_vmid_lock);
479 new_gen = need_new_vmid_gen(kvm);
480 read_unlock(&kvm_vmid_lock);
481
482 if (!new_gen)
478 return; 483 return;
479 484
480 spin_lock(&kvm_vmid_lock); 485 write_lock(&kvm_vmid_lock);
481 486
482 /* 487 /*
483 * We need to re-check the vmid_gen here to ensure that if another vcpu 488 * We need to re-check the vmid_gen here to ensure that if another vcpu
@@ -485,7 +490,7 @@ static void update_vttbr(struct kvm *kvm)
485 * use the same vmid. 490 * use the same vmid.
486 */ 491 */
487 if (!need_new_vmid_gen(kvm)) { 492 if (!need_new_vmid_gen(kvm)) {
488 spin_unlock(&kvm_vmid_lock); 493 write_unlock(&kvm_vmid_lock);
489 return; 494 return;
490 } 495 }
491 496
@@ -519,7 +524,7 @@ static void update_vttbr(struct kvm *kvm)
519 vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits); 524 vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
520 kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid; 525 kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
521 526
522 spin_unlock(&kvm_vmid_lock); 527 write_unlock(&kvm_vmid_lock);
523} 528}
524 529
525static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) 530static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
index 6919352cbf15..c4762bef13c6 100644
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -18,6 +18,7 @@
18#include <linux/arm-smccc.h> 18#include <linux/arm-smccc.h>
19#include <linux/preempt.h> 19#include <linux/preempt.h>
20#include <linux/kvm_host.h> 20#include <linux/kvm_host.h>
21#include <linux/uaccess.h>
21#include <linux/wait.h> 22#include <linux/wait.h>
22 23
23#include <asm/cputype.h> 24#include <asm/cputype.h>
@@ -427,3 +428,62 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
427 smccc_set_retval(vcpu, val, 0, 0, 0); 428 smccc_set_retval(vcpu, val, 0, 0, 0);
428 return 1; 429 return 1;
429} 430}
431
432int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
433{
434 return 1; /* PSCI version */
435}
436
437int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
438{
439 if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices))
440 return -EFAULT;
441
442 return 0;
443}
444
445int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
446{
447 if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
448 void __user *uaddr = (void __user *)(long)reg->addr;
449 u64 val;
450
451 val = kvm_psci_version(vcpu, vcpu->kvm);
452 if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
453 return -EFAULT;
454
455 return 0;
456 }
457
458 return -EINVAL;
459}
460
461int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
462{
463 if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
464 void __user *uaddr = (void __user *)(long)reg->addr;
465 bool wants_02;
466 u64 val;
467
468 if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
469 return -EFAULT;
470
471 wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
472
473 switch (val) {
474 case KVM_ARM_PSCI_0_1:
475 if (wants_02)
476 return -EINVAL;
477 vcpu->kvm->arch.psci_version = val;
478 return 0;
479 case KVM_ARM_PSCI_0_2:
480 case KVM_ARM_PSCI_1_0:
481 if (!wants_02)
482 return -EINVAL;
483 vcpu->kvm->arch.psci_version = val;
484 return 0;
485 }
486 }
487
488 return -EINVAL;
489}
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index e21e2f49b005..ffc587bf4742 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -14,6 +14,8 @@
14#include <linux/irqchip/arm-gic.h> 14#include <linux/irqchip/arm-gic.h>
15#include <linux/kvm.h> 15#include <linux/kvm.h>
16#include <linux/kvm_host.h> 16#include <linux/kvm_host.h>
17#include <linux/nospec.h>
18
17#include <kvm/iodev.h> 19#include <kvm/iodev.h>
18#include <kvm/arm_vgic.h> 20#include <kvm/arm_vgic.h>
19 21
@@ -324,6 +326,9 @@ static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu,
324 326
325 if (n > vgic_v3_max_apr_idx(vcpu)) 327 if (n > vgic_v3_max_apr_idx(vcpu))
326 return 0; 328 return 0;
329
330 n = array_index_nospec(n, 4);
331
327 /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */ 332 /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
328 return vgicv3->vgic_ap1r[n]; 333 return vgicv3->vgic_ap1r[n];
329 } 334 }
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index e74baec76361..702936cbe173 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -14,11 +14,13 @@
14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */ 15 */
16 16
17#include <linux/interrupt.h>
18#include <linux/irq.h>
17#include <linux/kvm.h> 19#include <linux/kvm.h>
18#include <linux/kvm_host.h> 20#include <linux/kvm_host.h>
19#include <linux/list_sort.h> 21#include <linux/list_sort.h>
20#include <linux/interrupt.h> 22#include <linux/nospec.h>
21#include <linux/irq.h> 23
22#include <asm/kvm_hyp.h> 24#include <asm/kvm_hyp.h>
23 25
24#include "vgic.h" 26#include "vgic.h"
@@ -101,12 +103,16 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
101 u32 intid) 103 u32 intid)
102{ 104{
103 /* SGIs and PPIs */ 105 /* SGIs and PPIs */
104 if (intid <= VGIC_MAX_PRIVATE) 106 if (intid <= VGIC_MAX_PRIVATE) {
107 intid = array_index_nospec(intid, VGIC_MAX_PRIVATE);
105 return &vcpu->arch.vgic_cpu.private_irqs[intid]; 108 return &vcpu->arch.vgic_cpu.private_irqs[intid];
109 }
106 110
107 /* SPIs */ 111 /* SPIs */
108 if (intid <= VGIC_MAX_SPI) 112 if (intid <= VGIC_MAX_SPI) {
113 intid = array_index_nospec(intid, VGIC_MAX_SPI);
109 return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS]; 114 return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
115 }
110 116
111 /* LPIs */ 117 /* LPIs */
112 if (intid >= VGIC_MIN_LPI) 118 if (intid >= VGIC_MIN_LPI)
@@ -594,6 +600,7 @@ retry:
594 600
595 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { 601 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
596 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; 602 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
603 bool target_vcpu_needs_kick = false;
597 604
598 spin_lock(&irq->irq_lock); 605 spin_lock(&irq->irq_lock);
599 606
@@ -664,11 +671,18 @@ retry:
664 list_del(&irq->ap_list); 671 list_del(&irq->ap_list);
665 irq->vcpu = target_vcpu; 672 irq->vcpu = target_vcpu;
666 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); 673 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
674 target_vcpu_needs_kick = true;
667 } 675 }
668 676
669 spin_unlock(&irq->irq_lock); 677 spin_unlock(&irq->irq_lock);
670 spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); 678 spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
671 spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags); 679 spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
680
681 if (target_vcpu_needs_kick) {
682 kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
683 kvm_vcpu_kick(target_vcpu);
684 }
685
672 goto retry; 686 goto retry;
673 } 687 }
674 688