aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-01-28 10:33:06 -0500
committerDavid S. Miller <davem@davemloft.net>2017-01-28 10:33:06 -0500
commit4e8f2fc1a55d543717efb70e170b09e773d0542b (patch)
tree30df1d7fc9dfa24fe2916711a17656682c3f7ec9
parent158f323b9868b59967ad96957c4ca388161be321 (diff)
parent1b1bc42c1692e9b62756323c675a44cb1a1f9dbd (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Two trivial overlapping changes conflicts in MPLS and mlx5. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-net.txt2
-rw-r--r--Documentation/devicetree/bindings/net/phy.txt5
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt19
-rw-r--r--Documentation/filesystems/proc.txt5
-rw-r--r--Documentation/power/states.txt4
-rw-r--r--MAINTAINERS34
-rw-r--r--Makefile4
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/include/asm/cache.h9
-rw-r--r--arch/arc/include/asm/entry-arcv2.h2
-rw-r--r--arch/arc/include/asm/module.h4
-rw-r--r--arch/arc/include/asm/ptrace.h2
-rw-r--r--arch/arc/include/asm/setup.h1
-rw-r--r--arch/arc/kernel/intc-arcv2.c6
-rw-r--r--arch/arc/kernel/intc-compact.c4
-rw-r--r--arch/arc/kernel/mcip.c4
-rw-r--r--arch/arc/kernel/module.c4
-rw-r--r--arch/arc/mm/cache.c155
-rw-r--r--arch/arc/mm/init.c5
-rw-r--r--arch/arm/boot/dts/Makefile1
-rw-r--r--arch/arm/boot/dts/am335x-icev2.dts1
-rw-r--r--arch/arm/boot/dts/bcm-nsp.dtsi2
-rw-r--r--arch/arm/boot/dts/da850-evm.dts1
-rw-r--r--arch/arm/boot/dts/dra7.dtsi1
-rw-r--r--arch/arm/boot/dts/dra72-evm-revc.dts2
-rw-r--r--arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi4
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts11
-rw-r--r--arch/arm/boot/dts/omap5.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom-mdm9615.dtsi2
-rw-r--r--arch/arm/boot/dts/sun6i-a31-hummingbird.dts4
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi1
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts2
-rw-r--r--arch/arm/configs/multi_v7_defconfig4
-rw-r--r--arch/arm/configs/s3c2410_defconfig6
-rw-r--r--arch/arm/include/asm/cputype.h3
-rw-r--r--arch/arm/include/asm/ftrace.h18
-rw-r--r--arch/arm/include/asm/virt.h5
-rw-r--r--arch/arm/include/uapi/asm/types.h (renamed from arch/arm/include/asm/types.h)6
-rw-r--r--arch/arm/kernel/hw_breakpoint.c16
-rw-r--r--arch/arm/kernel/smp_tlb.c7
-rw-r--r--arch/arm/kvm/arm.c3
-rw-r--r--arch/arm/mach-omap1/dma.c16
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c2
-rw-r--r--arch/arm/mach-ux500/pm.c4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi2
-rw-r--r--arch/arm64/boot/dts/exynos/exynos5433.dtsi2
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts2
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi6
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/include/asm/virt.h9
-rw-r--r--arch/arm64/include/uapi/asm/ptrace.h1
-rw-r--r--arch/arm64/kernel/entry.S2
-rw-r--r--arch/arm64/kernel/ptrace.c16
-rw-r--r--arch/arm64/kernel/traps.c28
-rw-r--r--arch/arm64/mm/init.c2
-rw-r--r--arch/frv/include/asm/atomic.h35
-rw-r--r--arch/mn10300/include/asm/switch_to.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h5
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h4
-rw-r--r--arch/powerpc/include/asm/hugetlb.h14
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h5
-rw-r--r--arch/powerpc/include/asm/page.h3
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h1
-rw-r--r--arch/powerpc/include/asm/pgtable-be-types.h8
-rw-r--r--arch/powerpc/include/asm/pgtable-types.h7
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h10
-rw-r--r--arch/powerpc/kernel/eeh.c10
-rw-r--r--arch/powerpc/kernel/ptrace.c14
-rw-r--r--arch/powerpc/mm/hash_utils_64.c4
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c5
-rw-r--r--arch/powerpc/mm/hugetlbpage.c31
-rw-r--r--arch/powerpc/mm/init-common.c13
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c18
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/perf/power9-events-list.h2
-rw-r--r--arch/powerpc/perf/power9-pmu.c2
-rw-r--r--arch/powerpc/sysdev/xics/icp-opal.c31
-rw-r--r--arch/s390/configs/default_defconfig27
-rw-r--r--arch/s390/configs/gcov_defconfig50
-rw-r--r--arch/s390/configs/performance_defconfig33
-rw-r--r--arch/s390/defconfig5
-rw-r--r--arch/s390/include/asm/ctl_reg.h4
-rw-r--r--arch/s390/kernel/ptrace.c8
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/s390/mm/pgtable.c7
-rw-r--r--arch/tile/kernel/ptrace.c2
-rw-r--r--arch/x86/events/amd/ibs.c2
-rw-r--r--arch/x86/events/intel/core.c7
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kvm/x86.c3
-rw-r--r--arch/x86/pci/acpi.c10
-rw-r--r--block/blk-mq.c1
-rw-r--r--drivers/acpi/acpica/tbdata.c9
-rw-r--r--drivers/acpi/acpica/tbinstal.c17
-rw-r--r--drivers/acpi/sleep.c8
-rw-r--r--drivers/acpi/video_detect.c11
-rw-r--r--drivers/base/memory.c4
-rw-r--r--drivers/block/nbd.c6
-rw-r--r--drivers/block/xen-blkfront.c22
-rw-r--r--drivers/char/virtio_console.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c14
-rw-r--r--drivers/clocksource/exynos_mct.c1
-rw-r--r--drivers/cpufreq/intel_pstate.c14
-rw-r--r--drivers/gpio/gpiolib.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c27
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c24
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h1
-rw-r--r--drivers/gpu/drm/ast/ast_main.c157
-rw-r--r--drivers/gpu/drm/ast/ast_post.c18
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c7
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig9
-rw-r--r--drivers/gpu/drm/drm_atomic.c12
-rw-r--r--drivers/gpu/drm/drm_modes.c7
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c12
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c36
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c74
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c66
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c81
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h8
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c103
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c84
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c162
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c34
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c1
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c9
-rw-r--r--drivers/gpu/drm/i915/intel_display.c46
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c3
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c10
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c6
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c22
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c43
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c7
-rw-r--r--drivers/gpu/drm/radeon/si.c25
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c12
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c2
-rw-r--r--drivers/hid/hid-corsair.c60
-rw-r--r--drivers/hid/wacom_sys.c16
-rw-r--r--drivers/hid/wacom_wac.c10
-rw-r--r--drivers/infiniband/core/cma.c3
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c11
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c21
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h24
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c33
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c147
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c11
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c12
-rw-r--r--drivers/infiniband/hw/qedr/main.c23
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h8
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.c14
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c62
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c3
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c11
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c13
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c15
-rw-r--r--drivers/isdn/hardware/eicon/message.c3
-rw-r--r--drivers/media/cec/cec-adap.c103
-rw-r--r--drivers/media/dvb-core/dvb_net.c15
-rw-r--r--drivers/media/i2c/Kconfig1
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c33
-rw-r--r--drivers/media/i2c/tvp5150.c56
-rw-r--r--drivers/media/i2c/tvp5150_reg.h9
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c8
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.h2
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c133
-rw-r--r--drivers/memstick/core/memstick.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c7
-rw-r--r--drivers/net/can/c_can/c_can_pci.c1
-rw-r--r--drivers/net/can/ti_hecc.c16
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c15
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h2
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c11
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c80
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c7
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c88
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c112
-rw-r--r--drivers/net/gtp.c13
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/phy/bcm63xx.c21
-rw-r--r--drivers/net/phy/dp83848.c3
-rw-r--r--drivers/net/phy/marvell.c2
-rw-r--r--drivers/net/phy/micrel.c14
-rw-r--r--drivers/net/phy/phy.c15
-rw-r--r--drivers/net/phy/phy_led_triggers.c9
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/cdc_ether.c8
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/usb/r8152.c34
-rw-r--r--drivers/net/virtio_net.c25
-rw-r--r--drivers/net/vxlan.c12
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/net/xen-netback/xenbus.c13
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/nvdimm/namespace_devs.c23
-rw-r--r--drivers/nvdimm/pmem.c4
-rw-r--r--drivers/nvme/host/fc.c6
-rw-r--r--drivers/nvme/target/configfs.c1
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/fc.c36
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/rdma.c17
-rw-r--r--drivers/pci/host/pci-xgene-msi.c2
-rw-r--r--drivers/pci/host/pcie-designware.c10
-rw-r--r--drivers/pci/probe.c12
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c39
-rw-r--r--drivers/pinctrl/intel/pinctrl-broxton.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c30
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c7
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c7
-rw-r--r--drivers/pinctrl/pinctrl-amd.c2
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c2
-rw-r--r--drivers/platform/x86/ideapad-laptop.c1
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c2
-rw-r--r--drivers/platform/x86/mlx-platform.c2
-rw-r--r--drivers/platform/x86/surface3-wmi.c6
-rw-r--r--drivers/s390/virtio/virtio_ccw.c29
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c2
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h12
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c40
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c92
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c37
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c57
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h22
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c24
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h1
-rw-r--r--drivers/scsi/sd.c40
-rw-r--r--drivers/scsi/ses.c2
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c1
-rw-r--r--drivers/spi/Kconfig1
-rw-r--r--drivers/spi/spi-armada-3700.c11
-rw-r--r--drivers/spi/spi-axi-spi-engine.c3
-rw-r--r--drivers/spi/spi-davinci.c4
-rw-r--r--drivers/spi/spi-dw-mid.c4
-rw-r--r--drivers/spi/spi-dw.c5
-rw-r--r--drivers/spi/spi-pxa2xx.c1
-rw-r--r--drivers/spi/spi-sh-msiof.c4
-rw-r--r--drivers/thermal/rockchip_thermal.c153
-rw-r--r--drivers/thermal/thermal_core.c10
-rw-r--r--drivers/usb/dwc2/core.h4
-rw-r--r--drivers/usb/dwc2/gadget.c18
-rw-r--r--drivers/usb/dwc2/hcd.c7
-rw-r--r--drivers/usb/dwc2/params.c10
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c4
-rw-r--r--drivers/usb/gadget/composite.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c12
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c3
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.h1
-rw-r--r--drivers/usb/host/xhci-plat.c2
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c4
-rw-r--r--drivers/vhost/scsi.c4
-rw-r--r--drivers/vhost/vsock.c13
-rw-r--r--drivers/video/fbdev/core/fbcmap.c26
-rw-r--r--drivers/virtio/virtio_mmio.c20
-rw-r--r--drivers/virtio/virtio_ring.c7
-rw-r--r--drivers/xen/platform-pci.c71
-rw-r--r--drivers/xen/swiotlb-xen.c5
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/block_dev.c6
-rw-r--r--fs/btrfs/inode.c26
-rw-r--r--fs/ceph/caps.c7
-rw-r--r--fs/ceph/dir.c5
-rw-r--r--fs/ceph/inode.c3
-rw-r--r--fs/ceph/mds_client.c9
-rw-r--r--fs/dax.c2
-rw-r--r--fs/ext2/Kconfig1
-rw-r--r--fs/ext4/Kconfig1
-rw-r--r--fs/fuse/dev.c3
-rw-r--r--fs/fuse/dir.c2
-rw-r--r--fs/overlayfs/namei.c27
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/romfs/super.c23
-rw-r--r--fs/ubifs/Kconfig2
-rw-r--r--fs/ubifs/dir.c58
-rw-r--r--fs/ubifs/ioctl.c3
-rw-r--r--fs/ubifs/journal.c2
-rw-r--r--fs/ubifs/tnc.c25
-rw-r--r--fs/userfaultfd.c37
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c70
-rw-r--r--fs/xfs/libxfs/xfs_attr.c6
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c48
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h6
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c39
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h8
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c90
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.h3
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c10
-rw-r--r--fs/xfs/libxfs/xfs_sb.c2
-rw-r--r--fs/xfs/xfs_bmap_util.c28
-rw-r--r--fs/xfs/xfs_buf.c1
-rw-r--r--fs/xfs/xfs_dquot.c4
-rw-r--r--fs/xfs/xfs_inode.c23
-rw-r--r--fs/xfs/xfs_iomap.c2
-rw-r--r--fs/xfs/xfs_iops.c50
-rw-r--r--fs/xfs/xfs_linux.h6
-rw-r--r--fs/xfs/xfs_mount.h1
-rw-r--r--fs/xfs/xfs_qm.c3
-rw-r--r--include/drm/drm_atomic.h2
-rw-r--r--include/drm/drm_mode_config.h2
-rw-r--r--include/kvm/arm_arch_timer.h1
-rw-r--r--include/linux/bpf.h2
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/gpio/driver.h70
-rw-r--r--include/linux/kernel.h4
-rw-r--r--include/linux/memory_hotplug.h4
-rw-r--r--include/linux/micrel_phy.h2
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/nmi.h1
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/phy_led_triggers.h4
-rw-r--r--include/linux/rcupdate.h4
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/virtio_net.h6
-rw-r--r--include/net/ipv6.h2
-rw-r--r--include/net/lwtunnel.h13
-rw-r--r--include/net/netfilter/nf_tables.h6
-rw-r--r--include/net/netfilter/nft_fib.h6
-rw-r--r--include/rdma/ib_verbs.h14
-rw-r--r--include/scsi/libfc.h6
-rw-r--r--include/uapi/linux/cec-funcs.h10
-rw-r--r--include/uapi/linux/netfilter/nf_log.h2
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h4
-rw-r--r--include/uapi/rdma/Kbuild1
-rw-r--r--include/uapi/rdma/cxgb3-abi.h2
-rw-r--r--kernel/bpf/arraymap.c18
-rw-r--r--kernel/bpf/hashtab.c22
-rw-r--r--kernel/bpf/stackmap.c20
-rw-r--r--kernel/bpf/syscall.c26
-rw-r--r--kernel/cpu.c24
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/panic.c4
-rw-r--r--kernel/power/suspend.c4
-rw-r--r--kernel/rcu/rcu.h1
-rw-r--r--kernel/rcu/tiny.c4
-rw-r--r--kernel/rcu/tiny_plugin.h9
-rw-r--r--kernel/rcu/tree.c33
-rw-r--r--kernel/rcu/tree_exp.h52
-rw-r--r--kernel/rcu/tree_plugin.h2
-rw-r--r--kernel/rcu/update.c38
-rw-r--r--kernel/sysctl.c1
-rw-r--r--kernel/ucount.c14
-rw-r--r--kernel/watchdog.c9
-rw-r--r--kernel/watchdog_hld.c3
-rw-r--r--lib/ioremap.c1
-rw-r--r--lib/radix-tree.c2
-rw-r--r--mm/huge_memory.c18
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/memory_hotplug.c28
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/page_alloc.c69
-rw-r--r--mm/slub.c23
-rw-r--r--net/batman-adv/fragmentation.c10
-rw-r--r--net/bridge/br_netlink.c33
-rw-r--r--net/ceph/crypto.c2
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/lwt_bpf.c1
-rw-r--r--net/core/lwtunnel.c66
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/dsa/slave.c8
-rw-r--r--net/ipv4/fib_frontend.c8
-rw-r--r--net/ipv4/ip_output.c1
-rw-r--r--net/ipv4/ip_tunnel_core.c2
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c7
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c8
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nft_fib_ipv4.c15
-rw-r--r--net/ipv4/tcp_fastopen.c1
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/ila/ila_lwt.c1
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/ip6_gre.c3
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/ipv6/ip6_tunnel.c36
-rw-r--r--net/ipv6/netfilter/ip6t_rpfilter.c8
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c3
-rw-r--r--net/ipv6/netfilter/nft_fib_ipv6.c13
-rw-r--r--net/ipv6/route.c12
-rw-r--r--net/ipv6/seg6.c2
-rw-r--r--net/ipv6/seg6_iptunnel.c1
-rw-r--r--net/ipv6/tcp_ipv6.c4
-rw-r--r--net/mac80211/rate.c2
-rw-r--r--net/mpls/af_mpls.c48
-rw-r--r--net/mpls/mpls_iptunnel.c1
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/nf_conntrack_core.c44
-rw-r--r--net/netfilter/nf_log.c1
-rw-r--r--net/netfilter/nf_tables_api.c67
-rw-r--r--net/netfilter/nft_dynset.c3
-rw-r--r--net/netfilter/nft_log.c3
-rw-r--r--net/netfilter/nft_lookup.c3
-rw-r--r--net/netfilter/nft_objref.c6
-rw-r--r--net/netfilter/nft_set_hash.c2
-rw-r--r--net/netfilter/nft_set_rbtree.c2
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/sctp/ipv6.c3
-rw-r--r--net/sctp/offload.c2
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/tipc/node.c9
-rw-r--r--net/tipc/server.c48
-rw-r--r--net/tipc/subscr.c124
-rw-r--r--net/tipc/subscr.h1
-rw-r--r--net/unix/af_unix.c27
-rw-r--r--samples/bpf/tc_l2_redirect_kern.c1
-rw-r--r--samples/bpf/xdp_tx_iptunnel_kern.c1
-rw-r--r--tools/perf/util/probe-event.c95
-rw-r--r--tools/perf/util/probe-finder.c15
-rw-r--r--tools/perf/util/probe-finder.h3
-rw-r--r--tools/testing/selftests/bpf/test_lru_map.c53
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c2
-rw-r--r--tools/virtio/ringtest/main.h12
-rwxr-xr-xtools/virtio/ringtest/run-on-all.sh5
-rw-r--r--virt/kvm/arm/arch_timer.c26
-rw-r--r--virt/kvm/arm/hyp/timer-sr.c33
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c18
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c2
485 files changed, 4555 insertions, 2689 deletions
diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt
index c010fafc66a8..c7194e87d5f4 100644
--- a/Documentation/devicetree/bindings/net/mediatek-net.txt
+++ b/Documentation/devicetree/bindings/net/mediatek-net.txt
@@ -7,7 +7,7 @@ have dual GMAC each represented by a child node..
7* Ethernet controller node 7* Ethernet controller node
8 8
9Required properties: 9Required properties:
10- compatible: Should be "mediatek,mt7623-eth" 10- compatible: Should be "mediatek,mt2701-eth"
11- reg: Address and length of the register set for the device 11- reg: Address and length of the register set for the device
12- interrupts: Should contain the three frame engines interrupts in numeric 12- interrupts: Should contain the three frame engines interrupts in numeric
13 order. These are fe_int0, fe_int1 and fe_int2. 13 order. These are fe_int0, fe_int1 and fe_int2.
diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
index ff1bc4b1bb3b..fb5056b22685 100644
--- a/Documentation/devicetree/bindings/net/phy.txt
+++ b/Documentation/devicetree/bindings/net/phy.txt
@@ -19,8 +19,9 @@ Optional Properties:
19 specifications. If neither of these are specified, the default is to 19 specifications. If neither of these are specified, the default is to
20 assume clause 22. 20 assume clause 22.
21 21
22 If the phy's identifier is known then the list may contain an entry 22 If the PHY reports an incorrect ID (or none at all) then the
23 of the form: "ethernet-phy-idAAAA.BBBB" where 23 "compatible" list may contain an entry with the correct PHY ID in the
24 form: "ethernet-phy-idAAAA.BBBB" where
24 AAAA - The value of the 16 bit Phy Identifier 1 register as 25 AAAA - The value of the 16 bit Phy Identifier 1 register as
25 4 hex digits. This is the chip vendor OUI bits 3:18 26 4 hex digits. This is the chip vendor OUI bits 3:18
26 BBBB - The value of the 16 bit Phy Identifier 2 register as 27 BBBB - The value of the 16 bit Phy Identifier 2 register as
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index da6614c63796..dc975064fa27 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -1,17 +1,23 @@
1Renesas MSIOF spi controller 1Renesas MSIOF spi controller
2 2
3Required properties: 3Required properties:
4- compatible : "renesas,msiof-<soctype>" for SoCs, 4- compatible : "renesas,msiof-r8a7790" (R-Car H2)
5 "renesas,sh-msiof" for SuperH, or
6 "renesas,sh-mobile-msiof" for SH Mobile series.
7 Examples with soctypes are:
8 "renesas,msiof-r8a7790" (R-Car H2)
9 "renesas,msiof-r8a7791" (R-Car M2-W) 5 "renesas,msiof-r8a7791" (R-Car M2-W)
10 "renesas,msiof-r8a7792" (R-Car V2H) 6 "renesas,msiof-r8a7792" (R-Car V2H)
11 "renesas,msiof-r8a7793" (R-Car M2-N) 7 "renesas,msiof-r8a7793" (R-Car M2-N)
12 "renesas,msiof-r8a7794" (R-Car E2) 8 "renesas,msiof-r8a7794" (R-Car E2)
13 "renesas,msiof-r8a7796" (R-Car M3-W) 9 "renesas,msiof-r8a7796" (R-Car M3-W)
14 "renesas,msiof-sh73a0" (SH-Mobile AG5) 10 "renesas,msiof-sh73a0" (SH-Mobile AG5)
11 "renesas,sh-mobile-msiof" (generic SH-Mobile compatibile device)
12 "renesas,rcar-gen2-msiof" (generic R-Car Gen2 compatible device)
13 "renesas,rcar-gen3-msiof" (generic R-Car Gen3 compatible device)
14 "renesas,sh-msiof" (deprecated)
15
16 When compatible with the generic version, nodes
17 must list the SoC-specific version corresponding
18 to the platform first followed by the generic
19 version.
20
15- reg : A list of offsets and lengths of the register sets for 21- reg : A list of offsets and lengths of the register sets for
16 the device. 22 the device.
17 If only one register set is present, it is to be used 23 If only one register set is present, it is to be used
@@ -61,7 +67,8 @@ Documentation/devicetree/bindings/pinctrl/renesas,*.
61Example: 67Example:
62 68
63 msiof0: spi@e6e20000 { 69 msiof0: spi@e6e20000 {
64 compatible = "renesas,msiof-r8a7791"; 70 compatible = "renesas,msiof-r8a7791",
71 "renesas,rcar-gen2-msiof";
65 reg = <0 0xe6e20000 0 0x0064>; 72 reg = <0 0xe6e20000 0 0x0064>;
66 interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>; 73 interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>;
67 clocks = <&mstp0_clks R8A7791_CLK_MSIOF0>; 74 clocks = <&mstp0_clks R8A7791_CLK_MSIOF0>;
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 72624a16b792..c94b4675d021 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -212,10 +212,11 @@ asynchronous manner and the value may not be very precise. To see a precise
212snapshot of a moment, you can see /proc/<pid>/smaps file and scan page table. 212snapshot of a moment, you can see /proc/<pid>/smaps file and scan page table.
213It's slow but very precise. 213It's slow but very precise.
214 214
215Table 1-2: Contents of the status files (as of 4.1) 215Table 1-2: Contents of the status files (as of 4.8)
216.............................................................................. 216..............................................................................
217 Field Content 217 Field Content
218 Name filename of the executable 218 Name filename of the executable
219 Umask file mode creation mask
219 State state (R is running, S is sleeping, D is sleeping 220 State state (R is running, S is sleeping, D is sleeping
220 in an uninterruptible wait, Z is zombie, 221 in an uninterruptible wait, Z is zombie,
221 T is traced or stopped) 222 T is traced or stopped)
@@ -226,7 +227,6 @@ Table 1-2: Contents of the status files (as of 4.1)
226 TracerPid PID of process tracing this process (0 if not) 227 TracerPid PID of process tracing this process (0 if not)
227 Uid Real, effective, saved set, and file system UIDs 228 Uid Real, effective, saved set, and file system UIDs
228 Gid Real, effective, saved set, and file system GIDs 229 Gid Real, effective, saved set, and file system GIDs
229 Umask file mode creation mask
230 FDSize number of file descriptor slots currently allocated 230 FDSize number of file descriptor slots currently allocated
231 Groups supplementary group list 231 Groups supplementary group list
232 NStgid descendant namespace thread group ID hierarchy 232 NStgid descendant namespace thread group ID hierarchy
@@ -236,6 +236,7 @@ Table 1-2: Contents of the status files (as of 4.1)
236 VmPeak peak virtual memory size 236 VmPeak peak virtual memory size
237 VmSize total program size 237 VmSize total program size
238 VmLck locked memory size 238 VmLck locked memory size
239 VmPin pinned memory size
239 VmHWM peak resident set size ("high water mark") 240 VmHWM peak resident set size ("high water mark")
240 VmRSS size of memory portions. It contains the three 241 VmRSS size of memory portions. It contains the three
241 following parts (VmRSS = RssAnon + RssFile + RssShmem) 242 following parts (VmRSS = RssAnon + RssFile + RssShmem)
diff --git a/Documentation/power/states.txt b/Documentation/power/states.txt
index 8a39ce45d8a0..008ecb588317 100644
--- a/Documentation/power/states.txt
+++ b/Documentation/power/states.txt
@@ -35,9 +35,7 @@ only one way to cause the system to go into the Suspend-To-RAM state (write
35The default suspend mode (ie. the one to be used without writing anything into 35The default suspend mode (ie. the one to be used without writing anything into
36/sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or 36/sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or
37"s2idle", but it can be overridden by the value of the "mem_sleep_default" 37"s2idle", but it can be overridden by the value of the "mem_sleep_default"
38parameter in the kernel command line. On some ACPI-based systems, depending on 38parameter in the kernel command line.
39the information in the FADT, the default may be "s2idle" even if Suspend-To-RAM
40is supported.
41 39
42The properties of all of the sleep states are described below. 40The properties of all of the sleep states are described below.
43 41
diff --git a/MAINTAINERS b/MAINTAINERS
index d76fccd09266..cc106f71a9b8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -976,6 +976,7 @@ M: Russell King <linux@armlinux.org.uk>
976L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 976L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
977W: http://www.armlinux.org.uk/ 977W: http://www.armlinux.org.uk/
978S: Maintained 978S: Maintained
979T: git git://git.armlinux.org.uk/~rmk/linux-arm.git
979F: arch/arm/ 980F: arch/arm/
980 981
981ARM SUB-ARCHITECTURES 982ARM SUB-ARCHITECTURES
@@ -1153,6 +1154,7 @@ ARM/CLKDEV SUPPORT
1153M: Russell King <linux@armlinux.org.uk> 1154M: Russell King <linux@armlinux.org.uk>
1154L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1155L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1155S: Maintained 1156S: Maintained
1157T: git git://git.armlinux.org.uk/~rmk/linux-arm.git clkdev
1156F: arch/arm/include/asm/clkdev.h 1158F: arch/arm/include/asm/clkdev.h
1157F: drivers/clk/clkdev.c 1159F: drivers/clk/clkdev.c
1158 1160
@@ -1688,6 +1690,7 @@ M: Krzysztof Kozlowski <krzk@kernel.org>
1688R: Javier Martinez Canillas <javier@osg.samsung.com> 1690R: Javier Martinez Canillas <javier@osg.samsung.com>
1689L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1691L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1690L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) 1692L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
1693Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
1691S: Maintained 1694S: Maintained
1692F: arch/arm/boot/dts/s3c* 1695F: arch/arm/boot/dts/s3c*
1693F: arch/arm/boot/dts/s5p* 1696F: arch/arm/boot/dts/s5p*
@@ -3570,7 +3573,7 @@ F: drivers/infiniband/hw/cxgb3/
3570F: include/uapi/rdma/cxgb3-abi.h 3573F: include/uapi/rdma/cxgb3-abi.h
3571 3574
3572CXGB4 ETHERNET DRIVER (CXGB4) 3575CXGB4 ETHERNET DRIVER (CXGB4)
3573M: Hariprasad S <hariprasad@chelsio.com> 3576M: Ganesh Goudar <ganeshgr@chelsio.com>
3574L: netdev@vger.kernel.org 3577L: netdev@vger.kernel.org
3575W: http://www.chelsio.com 3578W: http://www.chelsio.com
3576S: Supported 3579S: Supported
@@ -4103,12 +4106,18 @@ F: drivers/gpu/drm/bridge/
4103 4106
4104DRM DRIVER FOR BOCHS VIRTUAL GPU 4107DRM DRIVER FOR BOCHS VIRTUAL GPU
4105M: Gerd Hoffmann <kraxel@redhat.com> 4108M: Gerd Hoffmann <kraxel@redhat.com>
4106S: Odd Fixes 4109L: virtualization@lists.linux-foundation.org
4110T: git git://git.kraxel.org/linux drm-qemu
4111S: Maintained
4107F: drivers/gpu/drm/bochs/ 4112F: drivers/gpu/drm/bochs/
4108 4113
4109DRM DRIVER FOR QEMU'S CIRRUS DEVICE 4114DRM DRIVER FOR QEMU'S CIRRUS DEVICE
4110M: Dave Airlie <airlied@redhat.com> 4115M: Dave Airlie <airlied@redhat.com>
4111S: Odd Fixes 4116M: Gerd Hoffmann <kraxel@redhat.com>
4117L: virtualization@lists.linux-foundation.org
4118T: git git://git.kraxel.org/linux drm-qemu
4119S: Obsolete
4120W: https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
4112F: drivers/gpu/drm/cirrus/ 4121F: drivers/gpu/drm/cirrus/
4113 4122
4114RADEON and AMDGPU DRM DRIVERS 4123RADEON and AMDGPU DRM DRIVERS
@@ -4150,7 +4159,7 @@ F: Documentation/gpu/i915.rst
4150INTEL GVT-g DRIVERS (Intel GPU Virtualization) 4159INTEL GVT-g DRIVERS (Intel GPU Virtualization)
4151M: Zhenyu Wang <zhenyuw@linux.intel.com> 4160M: Zhenyu Wang <zhenyuw@linux.intel.com>
4152M: Zhi Wang <zhi.a.wang@intel.com> 4161M: Zhi Wang <zhi.a.wang@intel.com>
4153L: igvt-g-dev@lists.01.org 4162L: intel-gvt-dev@lists.freedesktop.org
4154L: intel-gfx@lists.freedesktop.org 4163L: intel-gfx@lists.freedesktop.org
4155W: https://01.org/igvt-g 4164W: https://01.org/igvt-g
4156T: git https://github.com/01org/gvt-linux.git 4165T: git https://github.com/01org/gvt-linux.git
@@ -4301,7 +4310,10 @@ F: Documentation/devicetree/bindings/display/renesas,du.txt
4301 4310
4302DRM DRIVER FOR QXL VIRTUAL GPU 4311DRM DRIVER FOR QXL VIRTUAL GPU
4303M: Dave Airlie <airlied@redhat.com> 4312M: Dave Airlie <airlied@redhat.com>
4304S: Odd Fixes 4313M: Gerd Hoffmann <kraxel@redhat.com>
4314L: virtualization@lists.linux-foundation.org
4315T: git git://git.kraxel.org/linux drm-qemu
4316S: Maintained
4305F: drivers/gpu/drm/qxl/ 4317F: drivers/gpu/drm/qxl/
4306F: include/uapi/drm/qxl_drm.h 4318F: include/uapi/drm/qxl_drm.h
4307 4319
@@ -7703,8 +7715,10 @@ F: drivers/net/dsa/mv88e6xxx/
7703F: Documentation/devicetree/bindings/net/dsa/marvell.txt 7715F: Documentation/devicetree/bindings/net/dsa/marvell.txt
7704 7716
7705MARVELL ARMADA DRM SUPPORT 7717MARVELL ARMADA DRM SUPPORT
7706M: Russell King <rmk+kernel@armlinux.org.uk> 7718M: Russell King <linux@armlinux.org.uk>
7707S: Maintained 7719S: Maintained
7720T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-armada-devel
7721T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-armada-fixes
7708F: drivers/gpu/drm/armada/ 7722F: drivers/gpu/drm/armada/
7709F: include/uapi/drm/armada_drm.h 7723F: include/uapi/drm/armada_drm.h
7710F: Documentation/devicetree/bindings/display/armada/ 7724F: Documentation/devicetree/bindings/display/armada/
@@ -8909,8 +8923,10 @@ S: Supported
8909F: drivers/nfc/nxp-nci 8923F: drivers/nfc/nxp-nci
8910 8924
8911NXP TDA998X DRM DRIVER 8925NXP TDA998X DRM DRIVER
8912M: Russell King <rmk+kernel@armlinux.org.uk> 8926M: Russell King <linux@armlinux.org.uk>
8913S: Supported 8927S: Supported
8928T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-devel
8929T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-fixes
8914F: drivers/gpu/drm/i2c/tda998x_drv.c 8930F: drivers/gpu/drm/i2c/tda998x_drv.c
8915F: include/drm/i2c/tda998x.h 8931F: include/drm/i2c/tda998x.h
8916 8932
@@ -13105,6 +13121,7 @@ M: David Airlie <airlied@linux.ie>
13105M: Gerd Hoffmann <kraxel@redhat.com> 13121M: Gerd Hoffmann <kraxel@redhat.com>
13106L: dri-devel@lists.freedesktop.org 13122L: dri-devel@lists.freedesktop.org
13107L: virtualization@lists.linux-foundation.org 13123L: virtualization@lists.linux-foundation.org
13124T: git git://git.kraxel.org/linux drm-qemu
13108S: Maintained 13125S: Maintained
13109F: drivers/gpu/drm/virtio/ 13126F: drivers/gpu/drm/virtio/
13110F: include/uapi/linux/virtio_gpu.h 13127F: include/uapi/linux/virtio_gpu.h
@@ -13456,6 +13473,7 @@ F: arch/x86/
13456 13473
13457X86 PLATFORM DRIVERS 13474X86 PLATFORM DRIVERS
13458M: Darren Hart <dvhart@infradead.org> 13475M: Darren Hart <dvhart@infradead.org>
13476M: Andy Shevchenko <andy@infradead.org>
13459L: platform-driver-x86@vger.kernel.org 13477L: platform-driver-x86@vger.kernel.org
13460T: git git://git.infradead.org/users/dvhart/linux-platform-drivers-x86.git 13478T: git git://git.infradead.org/users/dvhart/linux-platform-drivers-x86.git
13461S: Maintained 13479S: Maintained
@@ -13627,6 +13645,7 @@ F: drivers/net/hamradio/z8530.h
13627 13645
13628ZBUD COMPRESSED PAGE ALLOCATOR 13646ZBUD COMPRESSED PAGE ALLOCATOR
13629M: Seth Jennings <sjenning@redhat.com> 13647M: Seth Jennings <sjenning@redhat.com>
13648M: Dan Streetman <ddstreet@ieee.org>
13630L: linux-mm@kvack.org 13649L: linux-mm@kvack.org
13631S: Maintained 13650S: Maintained
13632F: mm/zbud.c 13651F: mm/zbud.c
@@ -13682,6 +13701,7 @@ F: Documentation/vm/zsmalloc.txt
13682 13701
13683ZSWAP COMPRESSED SWAP CACHING 13702ZSWAP COMPRESSED SWAP CACHING
13684M: Seth Jennings <sjenning@redhat.com> 13703M: Seth Jennings <sjenning@redhat.com>
13704M: Dan Streetman <ddstreet@ieee.org>
13685L: linux-mm@kvack.org 13705L: linux-mm@kvack.org
13686S: Maintained 13706S: Maintained
13687F: mm/zswap.c 13707F: mm/zswap.c
diff --git a/Makefile b/Makefile
index 96e2352d10a8..098840012b9b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 10 2PATCHLEVEL = 10
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc4 4EXTRAVERSION = -rc5
5NAME = Roaring Lionus 5NAME = Anniversary Edition
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
8# To see a list of typical targets execute "make help" 8# To see a list of typical targets execute "make help"
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index c75d29077e4a..283099c9560a 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -29,7 +29,7 @@ config ARC
29 select HAVE_KPROBES 29 select HAVE_KPROBES
30 select HAVE_KRETPROBES 30 select HAVE_KRETPROBES
31 select HAVE_MEMBLOCK 31 select HAVE_MEMBLOCK
32 select HAVE_MOD_ARCH_SPECIFIC if ARC_DW2_UNWIND 32 select HAVE_MOD_ARCH_SPECIFIC
33 select HAVE_OPROFILE 33 select HAVE_OPROFILE
34 select HAVE_PERF_EVENTS 34 select HAVE_PERF_EVENTS
35 select HANDLE_DOMAIN_IRQ 35 select HANDLE_DOMAIN_IRQ
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index b3410ff6a62d..5008021fba98 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -67,7 +67,7 @@ extern unsigned long perip_base, perip_end;
67#define ARC_REG_IC_PTAG_HI 0x1F 67#define ARC_REG_IC_PTAG_HI 0x1F
68 68
69/* Bit val in IC_CTRL */ 69/* Bit val in IC_CTRL */
70#define IC_CTRL_CACHE_DISABLE 0x1 70#define IC_CTRL_DIS 0x1
71 71
72/* Data cache related Auxiliary registers */ 72/* Data cache related Auxiliary registers */
73#define ARC_REG_DC_BCR 0x72 /* Build Config reg */ 73#define ARC_REG_DC_BCR 0x72 /* Build Config reg */
@@ -80,8 +80,9 @@ extern unsigned long perip_base, perip_end;
80#define ARC_REG_DC_PTAG_HI 0x5F 80#define ARC_REG_DC_PTAG_HI 0x5F
81 81
82/* Bit val in DC_CTRL */ 82/* Bit val in DC_CTRL */
83#define DC_CTRL_INV_MODE_FLUSH 0x40 83#define DC_CTRL_DIS 0x001
84#define DC_CTRL_FLUSH_STATUS 0x100 84#define DC_CTRL_INV_MODE_FLUSH 0x040
85#define DC_CTRL_FLUSH_STATUS 0x100
85 86
86/*System-level cache (L2 cache) related Auxiliary registers */ 87/*System-level cache (L2 cache) related Auxiliary registers */
87#define ARC_REG_SLC_CFG 0x901 88#define ARC_REG_SLC_CFG 0x901
@@ -92,8 +93,8 @@ extern unsigned long perip_base, perip_end;
92#define ARC_REG_SLC_RGN_END 0x916 93#define ARC_REG_SLC_RGN_END 0x916
93 94
94/* Bit val in SLC_CONTROL */ 95/* Bit val in SLC_CONTROL */
96#define SLC_CTRL_DIS 0x001
95#define SLC_CTRL_IM 0x040 97#define SLC_CTRL_IM 0x040
96#define SLC_CTRL_DISABLE 0x001
97#define SLC_CTRL_BUSY 0x100 98#define SLC_CTRL_BUSY 0x100
98#define SLC_CTRL_RGN_OP_INV 0x200 99#define SLC_CTRL_RGN_OP_INV 0x200
99 100
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index b5ff87e6f4b7..aee1a77934cf 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -16,6 +16,7 @@
16 ; 16 ;
17 ; Now manually save: r12, sp, fp, gp, r25 17 ; Now manually save: r12, sp, fp, gp, r25
18 18
19 PUSH r30
19 PUSH r12 20 PUSH r12
20 21
21 ; Saving pt_regs->sp correctly requires some extra work due to the way 22 ; Saving pt_regs->sp correctly requires some extra work due to the way
@@ -72,6 +73,7 @@
72 POPAX AUX_USER_SP 73 POPAX AUX_USER_SP
731: 741:
74 POP r12 75 POP r12
76 POP r30
75 77
76.endm 78.endm
77 79
diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h
index 6e91d8b339c3..567590ea8f6c 100644
--- a/arch/arc/include/asm/module.h
+++ b/arch/arc/include/asm/module.h
@@ -14,13 +14,13 @@
14 14
15#include <asm-generic/module.h> 15#include <asm-generic/module.h>
16 16
17#ifdef CONFIG_ARC_DW2_UNWIND
18struct mod_arch_specific { 17struct mod_arch_specific {
18#ifdef CONFIG_ARC_DW2_UNWIND
19 void *unw_info; 19 void *unw_info;
20 int unw_sec_idx; 20 int unw_sec_idx;
21#endif
21 const char *secstr; 22 const char *secstr;
22}; 23};
23#endif
24 24
25#define MODULE_PROC_FAMILY "ARC700" 25#define MODULE_PROC_FAMILY "ARC700"
26 26
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 69095da1fcfd..47111d565a95 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -84,7 +84,7 @@ struct pt_regs {
84 unsigned long fp; 84 unsigned long fp;
85 unsigned long sp; /* user/kernel sp depending on where we came from */ 85 unsigned long sp; /* user/kernel sp depending on where we came from */
86 86
87 unsigned long r12; 87 unsigned long r12, r30;
88 88
89 /*------- Below list auto saved by h/w -----------*/ 89 /*------- Below list auto saved by h/w -----------*/
90 unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; 90 unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
index cb954cdab070..c568a9df82b1 100644
--- a/arch/arc/include/asm/setup.h
+++ b/arch/arc/include/asm/setup.h
@@ -31,6 +31,7 @@ extern int root_mountflags, end_mem;
31 31
32void setup_processor(void); 32void setup_processor(void);
33void __init setup_arch_memory(void); 33void __init setup_arch_memory(void);
34long __init arc_get_mem_sz(void);
34 35
35/* Helpers used in arc_*_mumbojumbo routines */ 36/* Helpers used in arc_*_mumbojumbo routines */
36#define IS_AVAIL1(v, s) ((v) ? s : "") 37#define IS_AVAIL1(v, s) ((v) ? s : "")
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 994dca7014db..ecef0fb0b66c 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -77,20 +77,20 @@ void arc_init_IRQ(void)
77 77
78static void arcv2_irq_mask(struct irq_data *data) 78static void arcv2_irq_mask(struct irq_data *data)
79{ 79{
80 write_aux_reg(AUX_IRQ_SELECT, data->irq); 80 write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
81 write_aux_reg(AUX_IRQ_ENABLE, 0); 81 write_aux_reg(AUX_IRQ_ENABLE, 0);
82} 82}
83 83
84static void arcv2_irq_unmask(struct irq_data *data) 84static void arcv2_irq_unmask(struct irq_data *data)
85{ 85{
86 write_aux_reg(AUX_IRQ_SELECT, data->irq); 86 write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
87 write_aux_reg(AUX_IRQ_ENABLE, 1); 87 write_aux_reg(AUX_IRQ_ENABLE, 1);
88} 88}
89 89
90void arcv2_irq_enable(struct irq_data *data) 90void arcv2_irq_enable(struct irq_data *data)
91{ 91{
92 /* set default priority */ 92 /* set default priority */
93 write_aux_reg(AUX_IRQ_SELECT, data->irq); 93 write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
94 write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO); 94 write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
95 95
96 /* 96 /*
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
index ce9deb953ca9..8c1fd5c00782 100644
--- a/arch/arc/kernel/intc-compact.c
+++ b/arch/arc/kernel/intc-compact.c
@@ -57,7 +57,7 @@ static void arc_irq_mask(struct irq_data *data)
57 unsigned int ienb; 57 unsigned int ienb;
58 58
59 ienb = read_aux_reg(AUX_IENABLE); 59 ienb = read_aux_reg(AUX_IENABLE);
60 ienb &= ~(1 << data->irq); 60 ienb &= ~(1 << data->hwirq);
61 write_aux_reg(AUX_IENABLE, ienb); 61 write_aux_reg(AUX_IENABLE, ienb);
62} 62}
63 63
@@ -66,7 +66,7 @@ static void arc_irq_unmask(struct irq_data *data)
66 unsigned int ienb; 66 unsigned int ienb;
67 67
68 ienb = read_aux_reg(AUX_IENABLE); 68 ienb = read_aux_reg(AUX_IENABLE);
69 ienb |= (1 << data->irq); 69 ienb |= (1 << data->hwirq);
70 write_aux_reg(AUX_IENABLE, ienb); 70 write_aux_reg(AUX_IENABLE, ienb);
71} 71}
72 72
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 560c4afc2af4..9274f8ade8c7 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/irq.h> 12#include <linux/irq.h>
13#include <linux/irqchip/chained_irq.h>
13#include <linux/spinlock.h> 14#include <linux/spinlock.h>
14#include <soc/arc/mcip.h> 15#include <soc/arc/mcip.h>
15#include <asm/irqflags-arcv2.h> 16#include <asm/irqflags-arcv2.h>
@@ -221,10 +222,13 @@ static irq_hw_number_t idu_first_hwirq;
221static void idu_cascade_isr(struct irq_desc *desc) 222static void idu_cascade_isr(struct irq_desc *desc)
222{ 223{
223 struct irq_domain *idu_domain = irq_desc_get_handler_data(desc); 224 struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
225 struct irq_chip *core_chip = irq_desc_get_chip(desc);
224 irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc)); 226 irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
225 irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq; 227 irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq;
226 228
229 chained_irq_enter(core_chip, desc);
227 generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq)); 230 generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
231 chained_irq_exit(core_chip, desc);
228} 232}
229 233
230static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) 234static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c
index 42e964db2967..3d99a6091332 100644
--- a/arch/arc/kernel/module.c
+++ b/arch/arc/kernel/module.c
@@ -32,8 +32,8 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
32#ifdef CONFIG_ARC_DW2_UNWIND 32#ifdef CONFIG_ARC_DW2_UNWIND
33 mod->arch.unw_sec_idx = 0; 33 mod->arch.unw_sec_idx = 0;
34 mod->arch.unw_info = NULL; 34 mod->arch.unw_info = NULL;
35 mod->arch.secstr = secstr;
36#endif 35#endif
36 mod->arch.secstr = secstr;
37 return 0; 37 return 0;
38} 38}
39 39
@@ -113,8 +113,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
113 113
114 } 114 }
115 115
116#ifdef CONFIG_ARC_DW2_UNWIND
116 if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0) 117 if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0)
117 module->arch.unw_sec_idx = tgtsec; 118 module->arch.unw_sec_idx = tgtsec;
119#endif
118 120
119 return 0; 121 return 0;
120 122
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index ec86ac0e3321..d408fa21a07c 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -23,7 +23,7 @@
23 23
24static int l2_line_sz; 24static int l2_line_sz;
25static int ioc_exists; 25static int ioc_exists;
26int slc_enable = 1, ioc_enable = 0; 26int slc_enable = 1, ioc_enable = 1;
27unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */ 27unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
28unsigned long perip_end = 0xFFFFFFFF; /* legacy value */ 28unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
29 29
@@ -271,7 +271,11 @@ void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
271 271
272/* 272/*
273 * For ARC700 MMUv3 I-cache and D-cache flushes 273 * For ARC700 MMUv3 I-cache and D-cache flushes
274 * Also reused for HS38 aliasing I-cache configuration 274 * - ARC700 programming model requires paddr and vaddr be passed in seperate
275 * AUX registers (*_IV*L and *_PTAG respectively) irrespective of whether the
276 * caches actually alias or not.
277 * - For HS38, only the aliasing I-cache configuration uses the PTAG reg
278 * (non aliasing I-cache version doesn't; while D-cache can't possibly alias)
275 */ 279 */
276static inline 280static inline
277void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr, 281void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
@@ -458,6 +462,21 @@ static inline void __dc_entire_op(const int op)
458 __after_dc_op(op); 462 __after_dc_op(op);
459} 463}
460 464
465static inline void __dc_disable(void)
466{
467 const int r = ARC_REG_DC_CTRL;
468
469 __dc_entire_op(OP_FLUSH_N_INV);
470 write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
471}
472
473static void __dc_enable(void)
474{
475 const int r = ARC_REG_DC_CTRL;
476
477 write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
478}
479
461/* For kernel mappings cache operation: index is same as paddr */ 480/* For kernel mappings cache operation: index is same as paddr */
462#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) 481#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
463 482
@@ -483,6 +502,8 @@ static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
483#else 502#else
484 503
485#define __dc_entire_op(op) 504#define __dc_entire_op(op)
505#define __dc_disable()
506#define __dc_enable()
486#define __dc_line_op(paddr, vaddr, sz, op) 507#define __dc_line_op(paddr, vaddr, sz, op)
487#define __dc_line_op_k(paddr, sz, op) 508#define __dc_line_op_k(paddr, sz, op)
488 509
@@ -597,6 +618,40 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
597#endif 618#endif
598} 619}
599 620
621noinline static void slc_entire_op(const int op)
622{
623 unsigned int ctrl, r = ARC_REG_SLC_CTRL;
624
625 ctrl = read_aux_reg(r);
626
627 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
628 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
629 else
630 ctrl |= SLC_CTRL_IM;
631
632 write_aux_reg(r, ctrl);
633
634 write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
635
636 /* Important to wait for flush to complete */
637 while (read_aux_reg(r) & SLC_CTRL_BUSY);
638}
639
640static inline void arc_slc_disable(void)
641{
642 const int r = ARC_REG_SLC_CTRL;
643
644 slc_entire_op(OP_FLUSH_N_INV);
645 write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
646}
647
648static inline void arc_slc_enable(void)
649{
650 const int r = ARC_REG_SLC_CTRL;
651
652 write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
653}
654
600/*********************************************************** 655/***********************************************************
601 * Exported APIs 656 * Exported APIs
602 */ 657 */
@@ -923,21 +978,54 @@ SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
923 return 0; 978 return 0;
924} 979}
925 980
926void arc_cache_init(void) 981/*
982 * IO-Coherency (IOC) setup rules:
983 *
984 * 1. Needs to be at system level, so only once by Master core
985 * Non-Masters need not be accessing caches at that time
986 * - They are either HALT_ON_RESET and kick started much later or
987 * - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
988 * doesn't perturb caches or coherency unit
989 *
990 * 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC,
991 * otherwise any straggler data might behave strangely post IOC enabling
992 *
993 * 3. All Caches need to be disabled when setting up IOC to elide any in-flight
994 * Coherency transactions
995 */
996noinline void __init arc_ioc_setup(void)
927{ 997{
928 unsigned int __maybe_unused cpu = smp_processor_id(); 998 unsigned int ap_sz;
929 char str[256];
930 999
931 printk(arc_cache_mumbojumbo(0, str, sizeof(str))); 1000 /* Flush + invalidate + disable L1 dcache */
1001 __dc_disable();
1002
1003 /* Flush + invalidate SLC */
1004 if (read_aux_reg(ARC_REG_SLC_BCR))
1005 slc_entire_op(OP_FLUSH_N_INV);
1006
1007 /* IOC Aperture start: TDB: handle non default CONFIG_LINUX_LINK_BASE */
1008 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
932 1009
933 /* 1010 /*
934 * Only master CPU needs to execute rest of function: 1011 * IOC Aperture size:
935 * - Assume SMP so all cores will have same cache config so 1012 * decoded as 2 ^ (SIZE + 2) KB: so setting 0x11 implies 512M
936 * any geomtry checks will be same for all 1013 * TBD: fix for PGU + 1GB of low mem
937 * - IOC setup / dma callbacks only need to be setup once 1014 * TBD: fix for PAE
938 */ 1015 */
939 if (cpu) 1016 ap_sz = order_base_2(arc_get_mem_sz()/1024) - 2;
940 return; 1017 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, ap_sz);
1018
1019 write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
1020 write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
1021
1022 /* Re-enable L1 dcache */
1023 __dc_enable();
1024}
1025
1026void __init arc_cache_init_master(void)
1027{
1028 unsigned int __maybe_unused cpu = smp_processor_id();
941 1029
942 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) { 1030 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
943 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; 1031 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
@@ -985,30 +1073,14 @@ void arc_cache_init(void)
985 } 1073 }
986 } 1074 }
987 1075
988 if (is_isa_arcv2() && l2_line_sz && !slc_enable) { 1076 /* Note that SLC disable not formally supported till HS 3.0 */
989 1077 if (is_isa_arcv2() && l2_line_sz && !slc_enable)
990 /* IM set : flush before invalidate */ 1078 arc_slc_disable();
991 write_aux_reg(ARC_REG_SLC_CTRL,
992 read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_IM);
993 1079
994 write_aux_reg(ARC_REG_SLC_INVALIDATE, 1); 1080 if (is_isa_arcv2() && ioc_enable)
995 1081 arc_ioc_setup();
996 /* Important to wait for flush to complete */
997 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
998 write_aux_reg(ARC_REG_SLC_CTRL,
999 read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);
1000 }
1001 1082
1002 if (is_isa_arcv2() && ioc_enable) { 1083 if (is_isa_arcv2() && ioc_enable) {
1003 /* IO coherency base - 0x8z */
1004 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
1005 /* IO coherency aperture size - 512Mb: 0x8z-0xAz */
1006 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, 0x11);
1007 /* Enable partial writes */
1008 write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
1009 /* Enable IO coherency */
1010 write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
1011
1012 __dma_cache_wback_inv = __dma_cache_wback_inv_ioc; 1084 __dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
1013 __dma_cache_inv = __dma_cache_inv_ioc; 1085 __dma_cache_inv = __dma_cache_inv_ioc;
1014 __dma_cache_wback = __dma_cache_wback_ioc; 1086 __dma_cache_wback = __dma_cache_wback_ioc;
@@ -1022,3 +1094,20 @@ void arc_cache_init(void)
1022 __dma_cache_wback = __dma_cache_wback_l1; 1094 __dma_cache_wback = __dma_cache_wback_l1;
1023 } 1095 }
1024} 1096}
1097
1098void __ref arc_cache_init(void)
1099{
1100 unsigned int __maybe_unused cpu = smp_processor_id();
1101 char str[256];
1102
1103 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
1104
1105 /*
1106 * Only master CPU needs to execute rest of function:
1107 * - Assume SMP so all cores will have same cache config so
1108 * any geomtry checks will be same for all
1109 * - IOC setup / dma callbacks only need to be setup once
1110 */
1111 if (!cpu)
1112 arc_cache_init_master();
1113}
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 399e2f223d25..8c9415ed6280 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -40,6 +40,11 @@ struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
40EXPORT_SYMBOL(node_data); 40EXPORT_SYMBOL(node_data);
41#endif 41#endif
42 42
43long __init arc_get_mem_sz(void)
44{
45 return low_mem_sz;
46}
47
43/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */ 48/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
44static int __init setup_mem_sz(char *str) 49static int __init setup_mem_sz(char *str)
45{ 50{
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 7327250f0bb6..f10fe8526239 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -846,6 +846,7 @@ dtb-$(CONFIG_MACH_SUN8I) += \
846 sun8i-a83t-allwinner-h8homlet-v2.dtb \ 846 sun8i-a83t-allwinner-h8homlet-v2.dtb \
847 sun8i-a83t-cubietruck-plus.dtb \ 847 sun8i-a83t-cubietruck-plus.dtb \
848 sun8i-h3-bananapi-m2-plus.dtb \ 848 sun8i-h3-bananapi-m2-plus.dtb \
849 sun8i-h3-nanopi-m1.dtb \
849 sun8i-h3-nanopi-neo.dtb \ 850 sun8i-h3-nanopi-neo.dtb \
850 sun8i-h3-orangepi-2.dtb \ 851 sun8i-h3-orangepi-2.dtb \
851 sun8i-h3-orangepi-lite.dtb \ 852 sun8i-h3-orangepi-lite.dtb \
diff --git a/arch/arm/boot/dts/am335x-icev2.dts b/arch/arm/boot/dts/am335x-icev2.dts
index 1463df3b5b19..8ed46f9d79b7 100644
--- a/arch/arm/boot/dts/am335x-icev2.dts
+++ b/arch/arm/boot/dts/am335x-icev2.dts
@@ -170,7 +170,6 @@
170 AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* (G16) mmc0_dat0.mmc0_dat0 */ 170 AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* (G16) mmc0_dat0.mmc0_dat0 */
171 AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* (G17) mmc0_clk.mmc0_clk */ 171 AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* (G17) mmc0_clk.mmc0_clk */
172 AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* (G18) mmc0_cmd.mmc0_cmd */ 172 AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* (G18) mmc0_cmd.mmc0_cmd */
173 AM33XX_IOPAD(0x960, PIN_INPUT_PULLUP | MUX_MODE5) /* (C15) spi0_cs1.mmc0_sdcd */
174 >; 173 >;
175 }; 174 };
176 175
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index b6142bda661e..15f07f9af3b3 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -160,7 +160,7 @@
160 160
161 axi { 161 axi {
162 compatible = "simple-bus"; 162 compatible = "simple-bus";
163 ranges = <0x00000000 0x18000000 0x0011c40a>; 163 ranges = <0x00000000 0x18000000 0x0011c40c>;
164 #address-cells = <1>; 164 #address-cells = <1>;
165 #size-cells = <1>; 165 #size-cells = <1>;
166 166
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
index 41de15fe15a2..78492a0bbbab 100644
--- a/arch/arm/boot/dts/da850-evm.dts
+++ b/arch/arm/boot/dts/da850-evm.dts
@@ -99,6 +99,7 @@
99 #size-cells = <1>; 99 #size-cells = <1>;
100 compatible = "m25p64"; 100 compatible = "m25p64";
101 spi-max-frequency = <30000000>; 101 spi-max-frequency = <30000000>;
102 m25p,fast-read;
102 reg = <0>; 103 reg = <0>;
103 partition@0 { 104 partition@0 {
104 label = "U-Boot-SPL"; 105 label = "U-Boot-SPL";
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 3a8579cb8dd9..3e1f75026eac 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1378,6 +1378,7 @@
1378 phy-names = "sata-phy"; 1378 phy-names = "sata-phy";
1379 clocks = <&sata_ref_clk>; 1379 clocks = <&sata_ref_clk>;
1380 ti,hwmods = "sata"; 1380 ti,hwmods = "sata";
1381 ports-implemented = <0x1>;
1381 }; 1382 };
1382 1383
1383 rtc: rtc@48838000 { 1384 rtc: rtc@48838000 {
diff --git a/arch/arm/boot/dts/dra72-evm-revc.dts b/arch/arm/boot/dts/dra72-evm-revc.dts
index c3d939c9666c..3f808a47df03 100644
--- a/arch/arm/boot/dts/dra72-evm-revc.dts
+++ b/arch/arm/boot/dts/dra72-evm-revc.dts
@@ -75,6 +75,6 @@
75 ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>; 75 ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
76 ti,tx-internal-delay = <DP83867_RGMIIDCTL_250_PS>; 76 ti,tx-internal-delay = <DP83867_RGMIIDCTL_250_PS>;
77 ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_8_B_NIB>; 77 ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_8_B_NIB>;
78 ti,min-output-imepdance; 78 ti,min-output-impedance;
79 }; 79 };
80}; 80};
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
index 34887a10c5f1..47ba97229a48 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
@@ -319,8 +319,6 @@
319 compatible = "fsl,imx6q-nitrogen6_max-sgtl5000", 319 compatible = "fsl,imx6q-nitrogen6_max-sgtl5000",
320 "fsl,imx-audio-sgtl5000"; 320 "fsl,imx-audio-sgtl5000";
321 model = "imx6q-nitrogen6_max-sgtl5000"; 321 model = "imx6q-nitrogen6_max-sgtl5000";
322 pinctrl-names = "default";
323 pinctrl-0 = <&pinctrl_sgtl5000>;
324 ssi-controller = <&ssi1>; 322 ssi-controller = <&ssi1>;
325 audio-codec = <&codec>; 323 audio-codec = <&codec>;
326 audio-routing = 324 audio-routing =
@@ -402,6 +400,8 @@
402 400
403 codec: sgtl5000@0a { 401 codec: sgtl5000@0a {
404 compatible = "fsl,sgtl5000"; 402 compatible = "fsl,sgtl5000";
403 pinctrl-names = "default";
404 pinctrl-0 = <&pinctrl_sgtl5000>;
405 reg = <0x0a>; 405 reg = <0x0a>;
406 clocks = <&clks IMX6QDL_CLK_CKO>; 406 clocks = <&clks IMX6QDL_CLK_CKO>;
407 VDDA-supply = <&reg_2p5v>; 407 VDDA-supply = <&reg_2p5v>;
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
index d80f21abea62..31d4cc62dbc7 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
@@ -250,8 +250,6 @@
250 compatible = "fsl,imx6q-nitrogen6_som2-sgtl5000", 250 compatible = "fsl,imx6q-nitrogen6_som2-sgtl5000",
251 "fsl,imx-audio-sgtl5000"; 251 "fsl,imx-audio-sgtl5000";
252 model = "imx6q-nitrogen6_som2-sgtl5000"; 252 model = "imx6q-nitrogen6_som2-sgtl5000";
253 pinctrl-names = "default";
254 pinctrl-0 = <&pinctrl_sgtl5000>;
255 ssi-controller = <&ssi1>; 253 ssi-controller = <&ssi1>;
256 audio-codec = <&codec>; 254 audio-codec = <&codec>;
257 audio-routing = 255 audio-routing =
@@ -320,6 +318,8 @@
320 318
321 codec: sgtl5000@0a { 319 codec: sgtl5000@0a {
322 compatible = "fsl,sgtl5000"; 320 compatible = "fsl,sgtl5000";
321 pinctrl-names = "default";
322 pinctrl-0 = <&pinctrl_sgtl5000>;
323 reg = <0x0a>; 323 reg = <0x0a>;
324 clocks = <&clks IMX6QDL_CLK_CKO>; 324 clocks = <&clks IMX6QDL_CLK_CKO>;
325 VDDA-supply = <&reg_2p5v>; 325 VDDA-supply = <&reg_2p5v>;
diff --git a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
index da8598402ab8..38faa90007d7 100644
--- a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
@@ -158,7 +158,7 @@
158&mmc1 { 158&mmc1 {
159 interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>; 159 interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>;
160 pinctrl-names = "default"; 160 pinctrl-names = "default";
161 pinctrl-0 = <&mmc1_pins &mmc1_cd>; 161 pinctrl-0 = <&mmc1_pins>;
162 wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */ 162 wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */
163 cd-gpios = <&gpio4 14 IRQ_TYPE_LEVEL_LOW>; /* gpio_110 */ 163 cd-gpios = <&gpio4 14 IRQ_TYPE_LEVEL_LOW>; /* gpio_110 */
164 vmmc-supply = <&vmmc1>; 164 vmmc-supply = <&vmmc1>;
@@ -193,7 +193,8 @@
193 OMAP3_CORE1_IOPAD(0x214a, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat1.sdmmc1_dat1 */ 193 OMAP3_CORE1_IOPAD(0x214a, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat1.sdmmc1_dat1 */
194 OMAP3_CORE1_IOPAD(0x214c, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat2.sdmmc1_dat2 */ 194 OMAP3_CORE1_IOPAD(0x214c, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat2.sdmmc1_dat2 */
195 OMAP3_CORE1_IOPAD(0x214e, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat3.sdmmc1_dat3 */ 195 OMAP3_CORE1_IOPAD(0x214e, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat3.sdmmc1_dat3 */
196 OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_strobe.gpio_126 sdmmc1_wp*/ 196 OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_strobe.gpio_126 */
197 OMAP3_CORE1_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_d11.gpio_110 */
197 >; 198 >;
198 }; 199 };
199 200
@@ -242,12 +243,6 @@
242 OMAP3_WKUP_IOPAD(0x2a16, PIN_OUTPUT | PIN_OFF_OUTPUT_LOW | MUX_MODE4) /* sys_boot6.gpio_8 */ 243 OMAP3_WKUP_IOPAD(0x2a16, PIN_OUTPUT | PIN_OFF_OUTPUT_LOW | MUX_MODE4) /* sys_boot6.gpio_8 */
243 >; 244 >;
244 }; 245 };
245
246 mmc1_cd: pinmux_mmc1_cd {
247 pinctrl-single,pins = <
248 OMAP3_WKUP_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_d11.gpio_110 */
249 >;
250 };
251}; 246};
252 247
253 248
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 7cd92babc41a..0844737b72b2 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -988,6 +988,7 @@
988 phy-names = "sata-phy"; 988 phy-names = "sata-phy";
989 clocks = <&sata_ref_clk>; 989 clocks = <&sata_ref_clk>;
990 ti,hwmods = "sata"; 990 ti,hwmods = "sata";
991 ports-implemented = <0x1>;
991 }; 992 };
992 993
993 dss: dss@58000000 { 994 dss: dss@58000000 {
diff --git a/arch/arm/boot/dts/qcom-mdm9615.dtsi b/arch/arm/boot/dts/qcom-mdm9615.dtsi
index 5ae4ec59e6ea..c852b69229c9 100644
--- a/arch/arm/boot/dts/qcom-mdm9615.dtsi
+++ b/arch/arm/boot/dts/qcom-mdm9615.dtsi
@@ -357,7 +357,7 @@
357 }; 357 };
358 358
359 amba { 359 amba {
360 compatible = "arm,amba-bus"; 360 compatible = "simple-bus";
361 #address-cells = <1>; 361 #address-cells = <1>;
362 #size-cells = <1>; 362 #size-cells = <1>;
363 ranges; 363 ranges;
diff --git a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
index 735914f6ae44..7cae328398b1 100644
--- a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
+++ b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
@@ -140,6 +140,10 @@
140 cpu-supply = <&reg_dcdc3>; 140 cpu-supply = <&reg_dcdc3>;
141}; 141};
142 142
143&de {
144 status = "okay";
145};
146
143&ehci0 { 147&ehci0 {
144 status = "okay"; 148 status = "okay";
145}; 149};
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 2b26175d55d1..e78faaf9243c 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -234,6 +234,7 @@
234 de: display-engine { 234 de: display-engine {
235 compatible = "allwinner,sun6i-a31-display-engine"; 235 compatible = "allwinner,sun6i-a31-display-engine";
236 allwinner,pipelines = <&fe0>; 236 allwinner,pipelines = <&fe0>;
237 status = "disabled";
237 }; 238 };
238 239
239 soc@01c00000 { 240 soc@01c00000 {
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
index 5ea4915f6d75..10d307408f23 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
@@ -56,7 +56,7 @@
56}; 56};
57 57
58&pio { 58&pio {
59 mmc2_pins_nrst: mmc2@0 { 59 mmc2_pins_nrst: mmc2-rst-pin {
60 allwinner,pins = "PC16"; 60 allwinner,pins = "PC16";
61 allwinner,function = "gpio_out"; 61 allwinner,function = "gpio_out";
62 allwinner,drive = <SUN4I_PINCTRL_10_MA>; 62 allwinner,drive = <SUN4I_PINCTRL_10_MA>;
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 64f4419f14e8..b416abcbacd8 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -472,7 +472,7 @@ CONFIG_MESON_WATCHDOG=y
472CONFIG_DW_WATCHDOG=y 472CONFIG_DW_WATCHDOG=y
473CONFIG_DIGICOLOR_WATCHDOG=y 473CONFIG_DIGICOLOR_WATCHDOG=y
474CONFIG_BCM2835_WDT=y 474CONFIG_BCM2835_WDT=y
475CONFIG_BCM47XX_WATCHDOG=y 475CONFIG_BCM47XX_WDT=y
476CONFIG_BCM7038_WDT=m 476CONFIG_BCM7038_WDT=m
477CONFIG_BCM_KONA_WDT=y 477CONFIG_BCM_KONA_WDT=y
478CONFIG_MFD_ACT8945A=y 478CONFIG_MFD_ACT8945A=y
@@ -894,7 +894,7 @@ CONFIG_BCM2835_MBOX=y
894CONFIG_RASPBERRYPI_FIRMWARE=y 894CONFIG_RASPBERRYPI_FIRMWARE=y
895CONFIG_EFI_VARS=m 895CONFIG_EFI_VARS=m
896CONFIG_EFI_CAPSULE_LOADER=m 896CONFIG_EFI_CAPSULE_LOADER=m
897CONFIG_CONFIG_BCM47XX_NVRAM=y 897CONFIG_BCM47XX_NVRAM=y
898CONFIG_BCM47XX_SPROM=y 898CONFIG_BCM47XX_SPROM=y
899CONFIG_EXT4_FS=y 899CONFIG_EXT4_FS=y
900CONFIG_AUTOFS4_FS=y 900CONFIG_AUTOFS4_FS=y
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index 4364040ed696..1e6c48dd7b11 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -86,9 +86,9 @@ CONFIG_IPV6_TUNNEL=m
86CONFIG_NETFILTER=y 86CONFIG_NETFILTER=y
87CONFIG_NF_CONNTRACK=m 87CONFIG_NF_CONNTRACK=m
88CONFIG_NF_CONNTRACK_EVENTS=y 88CONFIG_NF_CONNTRACK_EVENTS=y
89CONFIG_NF_CT_PROTO_DCCP=m 89CONFIG_NF_CT_PROTO_DCCP=y
90CONFIG_NF_CT_PROTO_SCTP=m 90CONFIG_NF_CT_PROTO_SCTP=y
91CONFIG_NF_CT_PROTO_UDPLITE=m 91CONFIG_NF_CT_PROTO_UDPLITE=y
92CONFIG_NF_CONNTRACK_AMANDA=m 92CONFIG_NF_CONNTRACK_AMANDA=m
93CONFIG_NF_CONNTRACK_FTP=m 93CONFIG_NF_CONNTRACK_FTP=m
94CONFIG_NF_CONNTRACK_H323=m 94CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 522b5feb4eaa..b62eaeb147aa 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -94,6 +94,9 @@
94#define ARM_CPU_XSCALE_ARCH_V2 0x4000 94#define ARM_CPU_XSCALE_ARCH_V2 0x4000
95#define ARM_CPU_XSCALE_ARCH_V3 0x6000 95#define ARM_CPU_XSCALE_ARCH_V3 0x6000
96 96
97/* Qualcomm implemented cores */
98#define ARM_CPU_PART_SCORPION 0x510002d0
99
97extern unsigned int processor_id; 100extern unsigned int processor_id;
98 101
99#ifdef CONFIG_CPU_CP15 102#ifdef CONFIG_CPU_CP15
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index bfe2a2f5a644..22b73112b75f 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -54,6 +54,24 @@ static inline void *return_address(unsigned int level)
54 54
55#define ftrace_return_address(n) return_address(n) 55#define ftrace_return_address(n) return_address(n)
56 56
57#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
58
59static inline bool arch_syscall_match_sym_name(const char *sym,
60 const char *name)
61{
62 if (!strcmp(sym, "sys_mmap2"))
63 sym = "sys_mmap_pgoff";
64 else if (!strcmp(sym, "sys_statfs64_wrapper"))
65 sym = "sys_statfs64";
66 else if (!strcmp(sym, "sys_fstatfs64_wrapper"))
67 sym = "sys_fstatfs64";
68 else if (!strcmp(sym, "sys_arm_fadvise64_64"))
69 sym = "sys_fadvise64_64";
70
71 /* Ignore case since sym may start with "SyS" instead of "sys" */
72 return !strcasecmp(sym, name);
73}
74
57#endif /* ifndef __ASSEMBLY__ */ 75#endif /* ifndef __ASSEMBLY__ */
58 76
59#endif /* _ASM_ARM_FTRACE */ 77#endif /* _ASM_ARM_FTRACE */
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index a2e75b84e2ae..6dae1956c74d 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -80,6 +80,11 @@ static inline bool is_kernel_in_hyp_mode(void)
80 return false; 80 return false;
81} 81}
82 82
83static inline bool has_vhe(void)
84{
85 return false;
86}
87
83/* The section containing the hypervisor idmap text */ 88/* The section containing the hypervisor idmap text */
84extern char __hyp_idmap_text_start[]; 89extern char __hyp_idmap_text_start[];
85extern char __hyp_idmap_text_end[]; 90extern char __hyp_idmap_text_end[];
diff --git a/arch/arm/include/asm/types.h b/arch/arm/include/uapi/asm/types.h
index a53cdb8f068c..9435a42f575e 100644
--- a/arch/arm/include/asm/types.h
+++ b/arch/arm/include/uapi/asm/types.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_TYPES_H 1#ifndef _UAPI_ASM_TYPES_H
2#define _ASM_TYPES_H 2#define _UAPI_ASM_TYPES_H
3 3
4#include <asm-generic/int-ll64.h> 4#include <asm-generic/int-ll64.h>
5 5
@@ -37,4 +37,4 @@
37#define __UINTPTR_TYPE__ unsigned long 37#define __UINTPTR_TYPE__ unsigned long
38#endif 38#endif
39 39
40#endif /* _ASM_TYPES_H */ 40#endif /* _UAPI_ASM_TYPES_H */
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 188180b5523d..be3b3fbd382f 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -1063,6 +1063,22 @@ static int __init arch_hw_breakpoint_init(void)
1063 return 0; 1063 return 0;
1064 } 1064 }
1065 1065
1066 /*
1067 * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
1068 * whenever a WFI is issued, even if the core is not powered down, in
1069 * violation of the architecture. When DBGPRSR.SPD is set, accesses to
1070 * breakpoint and watchpoint registers are treated as undefined, so
1071 * this results in boot time and runtime failures when these are
1072 * accessed and we unexpectedly take a trap.
1073 *
1074 * It's not clear if/how this can be worked around, so we blacklist
1075 * Scorpion CPUs to avoid these issues.
1076 */
1077 if (read_cpuid_part() == ARM_CPU_PART_SCORPION) {
1078 pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
1079 return 0;
1080 }
1081
1066 has_ossr = core_has_os_save_restore(); 1082 has_ossr = core_has_os_save_restore();
1067 1083
1068 /* Determine how many BRPs/WRPs are available. */ 1084 /* Determine how many BRPs/WRPs are available. */
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 22313cb53362..9af0701f7094 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -9,6 +9,7 @@
9 */ 9 */
10#include <linux/preempt.h> 10#include <linux/preempt.h>
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/uaccess.h>
12 13
13#include <asm/smp_plat.h> 14#include <asm/smp_plat.h>
14#include <asm/tlbflush.h> 15#include <asm/tlbflush.h>
@@ -40,8 +41,11 @@ static inline void ipi_flush_tlb_mm(void *arg)
40static inline void ipi_flush_tlb_page(void *arg) 41static inline void ipi_flush_tlb_page(void *arg)
41{ 42{
42 struct tlb_args *ta = (struct tlb_args *)arg; 43 struct tlb_args *ta = (struct tlb_args *)arg;
44 unsigned int __ua_flags = uaccess_save_and_enable();
43 45
44 local_flush_tlb_page(ta->ta_vma, ta->ta_start); 46 local_flush_tlb_page(ta->ta_vma, ta->ta_start);
47
48 uaccess_restore(__ua_flags);
45} 49}
46 50
47static inline void ipi_flush_tlb_kernel_page(void *arg) 51static inline void ipi_flush_tlb_kernel_page(void *arg)
@@ -54,8 +58,11 @@ static inline void ipi_flush_tlb_kernel_page(void *arg)
54static inline void ipi_flush_tlb_range(void *arg) 58static inline void ipi_flush_tlb_range(void *arg)
55{ 59{
56 struct tlb_args *ta = (struct tlb_args *)arg; 60 struct tlb_args *ta = (struct tlb_args *)arg;
61 unsigned int __ua_flags = uaccess_save_and_enable();
57 62
58 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); 63 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
64
65 uaccess_restore(__ua_flags);
59} 66}
60 67
61static inline void ipi_flush_tlb_kernel_range(void *arg) 68static inline void ipi_flush_tlb_kernel_range(void *arg)
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 11676787ad49..9d7446456e0c 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1099,6 +1099,9 @@ static void cpu_init_hyp_mode(void *dummy)
1099 __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr); 1099 __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
1100 __cpu_init_stage2(); 1100 __cpu_init_stage2();
1101 1101
1102 if (is_kernel_in_hyp_mode())
1103 kvm_timer_init_vhe();
1104
1102 kvm_arm_init_debug(); 1105 kvm_arm_init_debug();
1103} 1106}
1104 1107
diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c
index f6ba589cd312..c821c1d5610e 100644
--- a/arch/arm/mach-omap1/dma.c
+++ b/arch/arm/mach-omap1/dma.c
@@ -32,7 +32,6 @@
32#include "soc.h" 32#include "soc.h"
33 33
34#define OMAP1_DMA_BASE (0xfffed800) 34#define OMAP1_DMA_BASE (0xfffed800)
35#define OMAP1_LOGICAL_DMA_CH_COUNT 17
36 35
37static u32 enable_1510_mode; 36static u32 enable_1510_mode;
38 37
@@ -348,8 +347,6 @@ static int __init omap1_system_dma_init(void)
348 goto exit_iounmap; 347 goto exit_iounmap;
349 } 348 }
350 349
351 d->lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
352
353 /* Valid attributes for omap1 plus processors */ 350 /* Valid attributes for omap1 plus processors */
354 if (cpu_is_omap15xx()) 351 if (cpu_is_omap15xx())
355 d->dev_caps = ENABLE_1510_MODE; 352 d->dev_caps = ENABLE_1510_MODE;
@@ -366,13 +363,14 @@ static int __init omap1_system_dma_init(void)
366 d->dev_caps |= CLEAR_CSR_ON_READ; 363 d->dev_caps |= CLEAR_CSR_ON_READ;
367 d->dev_caps |= IS_WORD_16; 364 d->dev_caps |= IS_WORD_16;
368 365
369 if (cpu_is_omap15xx()) 366 /* available logical channels */
370 d->chan_count = 9; 367 if (cpu_is_omap15xx()) {
371 else if (cpu_is_omap16xx() || cpu_is_omap7xx()) { 368 d->lch_count = 9;
372 if (!(d->dev_caps & ENABLE_1510_MODE)) 369 } else {
373 d->chan_count = 16; 370 if (d->dev_caps & ENABLE_1510_MODE)
371 d->lch_count = 9;
374 else 372 else
375 d->chan_count = 9; 373 d->lch_count = 16;
376 } 374 }
377 375
378 p = dma_plat_info; 376 p = dma_plat_info;
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 477910a48448..70c004794880 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -161,7 +161,7 @@ static struct ti_st_plat_data wilink7_pdata = {
161 .nshutdown_gpio = 162, 161 .nshutdown_gpio = 162,
162 .dev_name = "/dev/ttyO1", 162 .dev_name = "/dev/ttyO1",
163 .flow_cntrl = 1, 163 .flow_cntrl = 1,
164 .baud_rate = 300000, 164 .baud_rate = 3000000,
165}; 165};
166 166
167static struct platform_device wl128x_device = { 167static struct platform_device wl128x_device = {
diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
index 8538910db202..a970e7fcba9e 100644
--- a/arch/arm/mach-ux500/pm.c
+++ b/arch/arm/mach-ux500/pm.c
@@ -134,8 +134,8 @@ bool prcmu_pending_irq(void)
134 */ 134 */
135bool prcmu_is_cpu_in_wfi(int cpu) 135bool prcmu_is_cpu_in_wfi(int cpu)
136{ 136{
137 return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : 137 return readl(PRCM_ARM_WFI_STANDBY) &
138 PRCM_ARM_WFI_STANDBY_WFI0; 138 (cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0);
139} 139}
140 140
141/* 141/*
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
index 238fbeacd330..5d28e1cdc998 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -137,6 +137,10 @@
137 }; 137 };
138}; 138};
139 139
140&scpi_clocks {
141 status = "disabled";
142};
143
140&uart_AO { 144&uart_AO {
141 status = "okay"; 145 status = "okay";
142 pinctrl-0 = <&uart_ao_a_pins>; 146 pinctrl-0 = <&uart_ao_a_pins>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index 596240c38a9c..b35307321b63 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -55,7 +55,7 @@
55 mboxes = <&mailbox 1 &mailbox 2>; 55 mboxes = <&mailbox 1 &mailbox 2>;
56 shmem = <&cpu_scp_lpri &cpu_scp_hpri>; 56 shmem = <&cpu_scp_lpri &cpu_scp_hpri>;
57 57
58 clocks { 58 scpi_clocks: clocks {
59 compatible = "arm,scpi-clocks"; 59 compatible = "arm,scpi-clocks";
60 60
61 scpi_dvfs: scpi_clocks@0 { 61 scpi_dvfs: scpi_clocks@0 {
diff --git a/arch/arm64/boot/dts/exynos/exynos5433.dtsi b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
index 64226d5ae471..135890cd8a85 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
@@ -1367,7 +1367,7 @@
1367 }; 1367 };
1368 1368
1369 amba { 1369 amba {
1370 compatible = "arm,amba-bus"; 1370 compatible = "simple-bus";
1371 #address-cells = <1>; 1371 #address-cells = <1>;
1372 #size-cells = <1>; 1372 #size-cells = <1>;
1373 ranges; 1373 ranges;
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
index 358089687a69..ef1b9e573af0 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
@@ -27,7 +27,7 @@
27 stdout-path = "serial0:115200n8"; 27 stdout-path = "serial0:115200n8";
28 }; 28 };
29 29
30 memory { 30 memory@0 {
31 device_type = "memory"; 31 device_type = "memory";
32 reg = <0x0 0x0 0x0 0x40000000>; 32 reg = <0x0 0x0 0x0 0x40000000>;
33 }; 33 };
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 68a908334c7b..54dc28351c8c 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -72,7 +72,7 @@
72 <1 10 0xf08>; 72 <1 10 0xf08>;
73 }; 73 };
74 74
75 amba_apu { 75 amba_apu: amba_apu@0 {
76 compatible = "simple-bus"; 76 compatible = "simple-bus";
77 #address-cells = <2>; 77 #address-cells = <2>;
78 #size-cells = <1>; 78 #size-cells = <1>;
@@ -175,7 +175,7 @@
175 }; 175 };
176 176
177 i2c0: i2c@ff020000 { 177 i2c0: i2c@ff020000 {
178 compatible = "cdns,i2c-r1p10"; 178 compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
179 status = "disabled"; 179 status = "disabled";
180 interrupt-parent = <&gic>; 180 interrupt-parent = <&gic>;
181 interrupts = <0 17 4>; 181 interrupts = <0 17 4>;
@@ -185,7 +185,7 @@
185 }; 185 };
186 186
187 i2c1: i2c@ff030000 { 187 i2c1: i2c@ff030000 {
188 compatible = "cdns,i2c-r1p10"; 188 compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
189 status = "disabled"; 189 status = "disabled";
190 interrupt-parent = <&gic>; 190 interrupt-parent = <&gic>;
191 interrupts = <0 18 4>; 191 interrupts = <0 18 4>;
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index bfe632808d77..90c39a662379 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -222,7 +222,7 @@ static inline void *phys_to_virt(phys_addr_t x)
222#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 222#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
223#else 223#else
224#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) 224#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
225#define __page_to_voff(page) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) 225#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
226 226
227#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET)) 227#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
228#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) 228#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index fea10736b11f..439f6b5d31f6 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -47,6 +47,7 @@
47#include <asm/ptrace.h> 47#include <asm/ptrace.h>
48#include <asm/sections.h> 48#include <asm/sections.h>
49#include <asm/sysreg.h> 49#include <asm/sysreg.h>
50#include <asm/cpufeature.h>
50 51
51/* 52/*
52 * __boot_cpu_mode records what mode CPUs were booted in. 53 * __boot_cpu_mode records what mode CPUs were booted in.
@@ -80,6 +81,14 @@ static inline bool is_kernel_in_hyp_mode(void)
80 return read_sysreg(CurrentEL) == CurrentEL_EL2; 81 return read_sysreg(CurrentEL) == CurrentEL_EL2;
81} 82}
82 83
84static inline bool has_vhe(void)
85{
86 if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
87 return true;
88
89 return false;
90}
91
83#ifdef CONFIG_ARM64_VHE 92#ifdef CONFIG_ARM64_VHE
84extern void verify_cpu_run_el(void); 93extern void verify_cpu_run_el(void);
85#else 94#else
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index b5c3933ed441..d1ff83dfe5de 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -77,6 +77,7 @@ struct user_fpsimd_state {
77 __uint128_t vregs[32]; 77 __uint128_t vregs[32];
78 __u32 fpsr; 78 __u32 fpsr;
79 __u32 fpcr; 79 __u32 fpcr;
80 __u32 __reserved[2];
80}; 81};
81 82
82struct user_hwdebug_state { 83struct user_hwdebug_state {
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 923841ffe4a9..43512d4d7df2 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -683,7 +683,7 @@ el0_inv:
683 mov x0, sp 683 mov x0, sp
684 mov x1, #BAD_SYNC 684 mov x1, #BAD_SYNC
685 mov x2, x25 685 mov x2, x25
686 bl bad_mode 686 bl bad_el0_sync
687 b ret_to_user 687 b ret_to_user
688ENDPROC(el0_sync) 688ENDPROC(el0_sync)
689 689
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index fc35e06ccaac..a22161ccf447 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -551,6 +551,8 @@ static int hw_break_set(struct task_struct *target,
551 /* (address, ctrl) registers */ 551 /* (address, ctrl) registers */
552 limit = regset->n * regset->size; 552 limit = regset->n * regset->size;
553 while (count && offset < limit) { 553 while (count && offset < limit) {
554 if (count < PTRACE_HBP_ADDR_SZ)
555 return -EINVAL;
554 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, 556 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
555 offset, offset + PTRACE_HBP_ADDR_SZ); 557 offset, offset + PTRACE_HBP_ADDR_SZ);
556 if (ret) 558 if (ret)
@@ -560,6 +562,8 @@ static int hw_break_set(struct task_struct *target,
560 return ret; 562 return ret;
561 offset += PTRACE_HBP_ADDR_SZ; 563 offset += PTRACE_HBP_ADDR_SZ;
562 564
565 if (!count)
566 break;
563 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 567 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
564 offset, offset + PTRACE_HBP_CTRL_SZ); 568 offset, offset + PTRACE_HBP_CTRL_SZ);
565 if (ret) 569 if (ret)
@@ -596,7 +600,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
596 const void *kbuf, const void __user *ubuf) 600 const void *kbuf, const void __user *ubuf)
597{ 601{
598 int ret; 602 int ret;
599 struct user_pt_regs newregs; 603 struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
600 604
601 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); 605 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
602 if (ret) 606 if (ret)
@@ -626,7 +630,8 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
626 const void *kbuf, const void __user *ubuf) 630 const void *kbuf, const void __user *ubuf)
627{ 631{
628 int ret; 632 int ret;
629 struct user_fpsimd_state newstate; 633 struct user_fpsimd_state newstate =
634 target->thread.fpsimd_state.user_fpsimd;
630 635
631 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1); 636 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
632 if (ret) 637 if (ret)
@@ -650,7 +655,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
650 const void *kbuf, const void __user *ubuf) 655 const void *kbuf, const void __user *ubuf)
651{ 656{
652 int ret; 657 int ret;
653 unsigned long tls; 658 unsigned long tls = target->thread.tp_value;
654 659
655 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 660 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
656 if (ret) 661 if (ret)
@@ -676,7 +681,8 @@ static int system_call_set(struct task_struct *target,
676 unsigned int pos, unsigned int count, 681 unsigned int pos, unsigned int count,
677 const void *kbuf, const void __user *ubuf) 682 const void *kbuf, const void __user *ubuf)
678{ 683{
679 int syscallno, ret; 684 int syscallno = task_pt_regs(target)->syscallno;
685 int ret;
680 686
681 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); 687 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
682 if (ret) 688 if (ret)
@@ -948,7 +954,7 @@ static int compat_tls_set(struct task_struct *target,
948 const void __user *ubuf) 954 const void __user *ubuf)
949{ 955{
950 int ret; 956 int ret;
951 compat_ulong_t tls; 957 compat_ulong_t tls = target->thread.tp_value;
952 958
953 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 959 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
954 if (ret) 960 if (ret)
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 5b830be79c01..659b2e6b6cf7 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -604,17 +604,34 @@ const char *esr_get_class_string(u32 esr)
604} 604}
605 605
606/* 606/*
607 * bad_mode handles the impossible case in the exception vector. 607 * bad_mode handles the impossible case in the exception vector. This is always
608 * fatal.
608 */ 609 */
609asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) 610asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
610{ 611{
611 siginfo_t info;
612 void __user *pc = (void __user *)instruction_pointer(regs);
613 console_verbose(); 612 console_verbose();
614 613
615 pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n", 614 pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
616 handler[reason], smp_processor_id(), esr, 615 handler[reason], smp_processor_id(), esr,
617 esr_get_class_string(esr)); 616 esr_get_class_string(esr));
617
618 die("Oops - bad mode", regs, 0);
619 local_irq_disable();
620 panic("bad mode");
621}
622
623/*
624 * bad_el0_sync handles unexpected, but potentially recoverable synchronous
625 * exceptions taken from EL0. Unlike bad_mode, this returns.
626 */
627asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
628{
629 siginfo_t info;
630 void __user *pc = (void __user *)instruction_pointer(regs);
631 console_verbose();
632
633 pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n",
634 smp_processor_id(), esr, esr_get_class_string(esr));
618 __show_regs(regs); 635 __show_regs(regs);
619 636
620 info.si_signo = SIGILL; 637 info.si_signo = SIGILL;
@@ -622,7 +639,10 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
622 info.si_code = ILL_ILLOPC; 639 info.si_code = ILL_ILLOPC;
623 info.si_addr = pc; 640 info.si_addr = pc;
624 641
625 arm64_notify_die("Oops - bad mode", regs, &info, 0); 642 current->thread.fault_address = 0;
643 current->thread.fault_code = 0;
644
645 force_sig_info(info.si_signo, &info, current);
626} 646}
627 647
628void __pte_error(const char *file, int line, unsigned long val) 648void __pte_error(const char *file, int line, unsigned long val)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 716d1226ba69..380ebe705093 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -404,6 +404,8 @@ void __init mem_init(void)
404 if (swiotlb_force == SWIOTLB_FORCE || 404 if (swiotlb_force == SWIOTLB_FORCE ||
405 max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT)) 405 max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
406 swiotlb_init(1); 406 swiotlb_init(1);
407 else
408 swiotlb_force = SWIOTLB_NO_FORCE;
407 409
408 set_max_mapnr(pfn_to_page(max_pfn) - mem_map); 410 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
409 411
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 1c2a5e264fc7..e93c9494503a 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -139,7 +139,7 @@ static inline void atomic64_dec(atomic64_t *v)
139#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) 139#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
140#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) 140#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
141#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0) 141#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0)
142 142#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
143 143
144#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new)) 144#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new))
145#define atomic_xchg(v, new) (xchg(&(v)->counter, new)) 145#define atomic_xchg(v, new) (xchg(&(v)->counter, new))
@@ -161,6 +161,39 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
161 return c; 161 return c;
162} 162}
163 163
164static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
165{
166 long long c, old;
167
168 c = atomic64_read(v);
169 for (;;) {
170 if (unlikely(c == u))
171 break;
172 old = atomic64_cmpxchg(v, c, c + i);
173 if (likely(old == c))
174 break;
175 c = old;
176 }
177 return c != u;
178}
179
180static inline long long atomic64_dec_if_positive(atomic64_t *v)
181{
182 long long c, old, dec;
183
184 c = atomic64_read(v);
185 for (;;) {
186 dec = c - 1;
187 if (unlikely(dec < 0))
188 break;
189 old = atomic64_cmpxchg((v), c, dec);
190 if (likely(old == c))
191 break;
192 c = old;
193 }
194 return dec;
195}
196
164#define ATOMIC_OP(op) \ 197#define ATOMIC_OP(op) \
165static inline int atomic_fetch_##op(int i, atomic_t *v) \ 198static inline int atomic_fetch_##op(int i, atomic_t *v) \
166{ \ 199{ \
diff --git a/arch/mn10300/include/asm/switch_to.h b/arch/mn10300/include/asm/switch_to.h
index 393d311735c8..67e333aa7629 100644
--- a/arch/mn10300/include/asm/switch_to.h
+++ b/arch/mn10300/include/asm/switch_to.h
@@ -16,7 +16,7 @@
16struct task_struct; 16struct task_struct;
17struct thread_struct; 17struct thread_struct;
18 18
19#if !defined(CONFIG_LAZY_SAVE_FPU) 19#if defined(CONFIG_FPU) && !defined(CONFIG_LAZY_SAVE_FPU)
20struct fpu_state_struct; 20struct fpu_state_struct;
21extern asmlinkage void fpu_save(struct fpu_state_struct *); 21extern asmlinkage void fpu_save(struct fpu_state_struct *);
22#define switch_fpu(prev, next) \ 22#define switch_fpu(prev, next) \
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index 1c64bc6330bc..0c4e470571ca 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -36,12 +36,13 @@
36#ifdef CONFIG_HUGETLB_PAGE 36#ifdef CONFIG_HUGETLB_PAGE
37static inline int hash__hugepd_ok(hugepd_t hpd) 37static inline int hash__hugepd_ok(hugepd_t hpd)
38{ 38{
39 unsigned long hpdval = hpd_val(hpd);
39 /* 40 /*
40 * if it is not a pte and have hugepd shift mask 41 * if it is not a pte and have hugepd shift mask
41 * set, then it is a hugepd directory pointer 42 * set, then it is a hugepd directory pointer
42 */ 43 */
43 if (!(hpd.pd & _PAGE_PTE) && 44 if (!(hpdval & _PAGE_PTE) &&
44 ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)) 45 ((hpdval & HUGEPD_SHIFT_MASK) != 0))
45 return true; 46 return true;
46 return false; 47 return false;
47} 48}
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index f61cad3de4e6..4c935f7504f7 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -201,6 +201,10 @@ extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
201 unsigned long phys); 201 unsigned long phys);
202extern void hash__vmemmap_remove_mapping(unsigned long start, 202extern void hash__vmemmap_remove_mapping(unsigned long start,
203 unsigned long page_size); 203 unsigned long page_size);
204
205int hash__create_section_mapping(unsigned long start, unsigned long end);
206int hash__remove_section_mapping(unsigned long start, unsigned long end);
207
204#endif /* !__ASSEMBLY__ */ 208#endif /* !__ASSEMBLY__ */
205#endif /* __KERNEL__ */ 209#endif /* __KERNEL__ */
206#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */ 210#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index ede215167d1a..7f4025a6c69e 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -21,12 +21,12 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
21 * We have only four bits to encode, MMU page size 21 * We have only four bits to encode, MMU page size
22 */ 22 */
23 BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf); 23 BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
24 return __va(hpd.pd & HUGEPD_ADDR_MASK); 24 return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
25} 25}
26 26
27static inline unsigned int hugepd_mmu_psize(hugepd_t hpd) 27static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
28{ 28{
29 return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2; 29 return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
30} 30}
31 31
32static inline unsigned int hugepd_shift(hugepd_t hpd) 32static inline unsigned int hugepd_shift(hugepd_t hpd)
@@ -52,18 +52,20 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
52{ 52{
53 BUG_ON(!hugepd_ok(hpd)); 53 BUG_ON(!hugepd_ok(hpd));
54#ifdef CONFIG_PPC_8xx 54#ifdef CONFIG_PPC_8xx
55 return (pte_t *)__va(hpd.pd & ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK)); 55 return (pte_t *)__va(hpd_val(hpd) &
56 ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
56#else 57#else
57 return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE); 58 return (pte_t *)((hpd_val(hpd) &
59 ~HUGEPD_SHIFT_MASK) | PD_HUGE);
58#endif 60#endif
59} 61}
60 62
61static inline unsigned int hugepd_shift(hugepd_t hpd) 63static inline unsigned int hugepd_shift(hugepd_t hpd)
62{ 64{
63#ifdef CONFIG_PPC_8xx 65#ifdef CONFIG_PPC_8xx
64 return ((hpd.pd & _PMD_PAGE_MASK) >> 1) + 17; 66 return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
65#else 67#else
66 return hpd.pd & HUGEPD_SHIFT_MASK; 68 return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
67#endif 69#endif
68} 70}
69 71
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 172849727054..0cd8a3852763 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -227,9 +227,10 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
227static inline int hugepd_ok(hugepd_t hpd) 227static inline int hugepd_ok(hugepd_t hpd)
228{ 228{
229#ifdef CONFIG_PPC_8xx 229#ifdef CONFIG_PPC_8xx
230 return ((hpd.pd & 0x4) != 0); 230 return ((hpd_val(hpd) & 0x4) != 0);
231#else 231#else
232 return (hpd.pd > 0); 232 /* We clear the top bit to indicate hugepd */
233 return ((hpd_val(hpd) & PD_HUGE) == 0);
233#endif 234#endif
234} 235}
235 236
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 56398e7e6100..47120bf2670c 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -294,15 +294,12 @@ extern long long virt_phys_offset;
294#include <asm/pgtable-types.h> 294#include <asm/pgtable-types.h>
295#endif 295#endif
296 296
297typedef struct { signed long pd; } hugepd_t;
298 297
299#ifndef CONFIG_HUGETLB_PAGE 298#ifndef CONFIG_HUGETLB_PAGE
300#define is_hugepd(pdep) (0) 299#define is_hugepd(pdep) (0)
301#define pgd_huge(pgd) (0) 300#define pgd_huge(pgd) (0)
302#endif /* CONFIG_HUGETLB_PAGE */ 301#endif /* CONFIG_HUGETLB_PAGE */
303 302
304#define __hugepd(x) ((hugepd_t) { (x) })
305
306struct page; 303struct page;
307extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg); 304extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
308extern void copy_user_page(void *to, void *from, unsigned long vaddr, 305extern void copy_user_page(void *to, void *from, unsigned long vaddr,
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index e157489ee7a1..ae0a23091a9b 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -65,6 +65,7 @@ struct power_pmu {
65#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */ 65#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */
66#define PPMU_HAS_SIER 0x00000040 /* Has SIER */ 66#define PPMU_HAS_SIER 0x00000040 /* Has SIER */
67#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */ 67#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */
68#define PPMU_NO_SIAR 0x00000100 /* Do not use SIAR */
68 69
69/* 70/*
70 * Values for flags to get_alternatives() 71 * Values for flags to get_alternatives()
diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h
index 49c0a5a80efa..9c0f5db5cf46 100644
--- a/arch/powerpc/include/asm/pgtable-be-types.h
+++ b/arch/powerpc/include/asm/pgtable-be-types.h
@@ -104,4 +104,12 @@ static inline bool pmd_xchg(pmd_t *pmdp, pmd_t old, pmd_t new)
104 return pmd_raw(old) == prev; 104 return pmd_raw(old) == prev;
105} 105}
106 106
107typedef struct { __be64 pdbe; } hugepd_t;
108#define __hugepd(x) ((hugepd_t) { cpu_to_be64(x) })
109
110static inline unsigned long hpd_val(hugepd_t x)
111{
112 return be64_to_cpu(x.pdbe);
113}
114
107#endif /* _ASM_POWERPC_PGTABLE_BE_TYPES_H */ 115#endif /* _ASM_POWERPC_PGTABLE_BE_TYPES_H */
diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h
index e7f4f3e0fcde..8bd3b13fe2fb 100644
--- a/arch/powerpc/include/asm/pgtable-types.h
+++ b/arch/powerpc/include/asm/pgtable-types.h
@@ -66,4 +66,11 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
66} 66}
67#endif 67#endif
68 68
69typedef struct { unsigned long pd; } hugepd_t;
70#define __hugepd(x) ((hugepd_t) { (x) })
71static inline unsigned long hpd_val(hugepd_t x)
72{
73 return x.pd;
74}
75
69#endif /* _ASM_POWERPC_PGTABLE_TYPES_H */ 76#endif /* _ASM_POWERPC_PGTABLE_TYPES_H */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index c56ea8c84abb..c4ced1d01d57 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -157,7 +157,7 @@
157#define PPC_INST_MCRXR 0x7c000400 157#define PPC_INST_MCRXR 0x7c000400
158#define PPC_INST_MCRXR_MASK 0xfc0007fe 158#define PPC_INST_MCRXR_MASK 0xfc0007fe
159#define PPC_INST_MFSPR_PVR 0x7c1f42a6 159#define PPC_INST_MFSPR_PVR 0x7c1f42a6
160#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff 160#define PPC_INST_MFSPR_PVR_MASK 0xfc1ffffe
161#define PPC_INST_MFTMR 0x7c0002dc 161#define PPC_INST_MFTMR 0x7c0002dc
162#define PPC_INST_MSGSND 0x7c00019c 162#define PPC_INST_MSGSND 0x7c00019c
163#define PPC_INST_MSGCLR 0x7c0001dc 163#define PPC_INST_MSGCLR 0x7c0001dc
@@ -174,13 +174,13 @@
174#define PPC_INST_RFDI 0x4c00004e 174#define PPC_INST_RFDI 0x4c00004e
175#define PPC_INST_RFMCI 0x4c00004c 175#define PPC_INST_RFMCI 0x4c00004c
176#define PPC_INST_MFSPR_DSCR 0x7c1102a6 176#define PPC_INST_MFSPR_DSCR 0x7c1102a6
177#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff 177#define PPC_INST_MFSPR_DSCR_MASK 0xfc1ffffe
178#define PPC_INST_MTSPR_DSCR 0x7c1103a6 178#define PPC_INST_MTSPR_DSCR 0x7c1103a6
179#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff 179#define PPC_INST_MTSPR_DSCR_MASK 0xfc1ffffe
180#define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6 180#define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6
181#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff 181#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1ffffe
182#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6 182#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
183#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff 183#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1ffffe
184#define PPC_INST_MFVSRD 0x7c000066 184#define PPC_INST_MFVSRD 0x7c000066
185#define PPC_INST_MTVSRD 0x7c000166 185#define PPC_INST_MTVSRD 0x7c000166
186#define PPC_INST_SLBFEE 0x7c0007a7 186#define PPC_INST_SLBFEE 0x7c0007a7
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 8180bfd7ab93..9de7f79e702b 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -298,9 +298,17 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
298 * 298 *
299 * For pHyp, we have to enable IO for log retrieval. Otherwise, 299 * For pHyp, we have to enable IO for log retrieval. Otherwise,
300 * 0xFF's is always returned from PCI config space. 300 * 0xFF's is always returned from PCI config space.
301 *
302 * When the @severity is EEH_LOG_PERM, the PE is going to be
303 * removed. Prior to that, the drivers for devices included in
304 * the PE will be closed. The drivers rely on working IO path
305 * to bring the devices to quiet state. Otherwise, PCI traffic
306 * from those devices after they are removed is like to cause
307 * another unexpected EEH error.
301 */ 308 */
302 if (!(pe->type & EEH_PE_PHB)) { 309 if (!(pe->type & EEH_PE_PHB)) {
303 if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG)) 310 if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) ||
311 severity == EEH_LOG_PERM)
304 eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); 312 eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
305 313
306 /* 314 /*
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index e4744ff38a17..925a4ef90559 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -463,6 +463,10 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
463 463
464 flush_fp_to_thread(target); 464 flush_fp_to_thread(target);
465 465
466 for (i = 0; i < 32 ; i++)
467 buf[i] = target->thread.TS_FPR(i);
468 buf[32] = target->thread.fp_state.fpscr;
469
466 /* copy to local buffer then write that out */ 470 /* copy to local buffer then write that out */
467 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1); 471 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
468 if (i) 472 if (i)
@@ -672,6 +676,9 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
672 flush_altivec_to_thread(target); 676 flush_altivec_to_thread(target);
673 flush_vsx_to_thread(target); 677 flush_vsx_to_thread(target);
674 678
679 for (i = 0; i < 32 ; i++)
680 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
681
675 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 682 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
676 buf, 0, 32 * sizeof(double)); 683 buf, 0, 32 * sizeof(double));
677 if (!ret) 684 if (!ret)
@@ -1019,6 +1026,10 @@ static int tm_cfpr_set(struct task_struct *target,
1019 flush_fp_to_thread(target); 1026 flush_fp_to_thread(target);
1020 flush_altivec_to_thread(target); 1027 flush_altivec_to_thread(target);
1021 1028
1029 for (i = 0; i < 32; i++)
1030 buf[i] = target->thread.TS_CKFPR(i);
1031 buf[32] = target->thread.ckfp_state.fpscr;
1032
1022 /* copy to local buffer then write that out */ 1033 /* copy to local buffer then write that out */
1023 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1); 1034 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1024 if (i) 1035 if (i)
@@ -1283,6 +1294,9 @@ static int tm_cvsx_set(struct task_struct *target,
1283 flush_altivec_to_thread(target); 1294 flush_altivec_to_thread(target);
1284 flush_vsx_to_thread(target); 1295 flush_vsx_to_thread(target);
1285 1296
1297 for (i = 0; i < 32 ; i++)
1298 buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
1299
1286 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1300 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1287 buf, 0, 32 * sizeof(double)); 1301 buf, 0, 32 * sizeof(double));
1288 if (!ret) 1302 if (!ret)
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 80334937e14f..67e19a0821be 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -747,7 +747,7 @@ static unsigned long __init htab_get_table_size(void)
747} 747}
748 748
749#ifdef CONFIG_MEMORY_HOTPLUG 749#ifdef CONFIG_MEMORY_HOTPLUG
750int create_section_mapping(unsigned long start, unsigned long end) 750int hash__create_section_mapping(unsigned long start, unsigned long end)
751{ 751{
752 int rc = htab_bolt_mapping(start, end, __pa(start), 752 int rc = htab_bolt_mapping(start, end, __pa(start),
753 pgprot_val(PAGE_KERNEL), mmu_linear_psize, 753 pgprot_val(PAGE_KERNEL), mmu_linear_psize,
@@ -761,7 +761,7 @@ int create_section_mapping(unsigned long start, unsigned long end)
761 return rc; 761 return rc;
762} 762}
763 763
764int remove_section_mapping(unsigned long start, unsigned long end) 764int hash__remove_section_mapping(unsigned long start, unsigned long end)
765{ 765{
766 int rc = htab_remove_mapping(start, end, mmu_linear_psize, 766 int rc = htab_remove_mapping(start, end, mmu_linear_psize,
767 mmu_kernel_ssize); 767 mmu_kernel_ssize);
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index d5026f3800b6..37b5f91e381b 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -125,11 +125,14 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
125int hugepd_ok(hugepd_t hpd) 125int hugepd_ok(hugepd_t hpd)
126{ 126{
127 bool is_hugepd; 127 bool is_hugepd;
128 unsigned long hpdval;
129
130 hpdval = hpd_val(hpd);
128 131
129 /* 132 /*
130 * We should not find this format in page directory, warn otherwise. 133 * We should not find this format in page directory, warn otherwise.
131 */ 134 */
132 is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)); 135 is_hugepd = (((hpdval & 0x3) == 0x0) && ((hpdval & HUGEPD_SHIFT_MASK) != 0));
133 WARN(is_hugepd, "Found wrong page directory format\n"); 136 WARN(is_hugepd, "Found wrong page directory format\n");
134 return 0; 137 return 0;
135} 138}
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 289df38fb7e0..8c3389cbcd12 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -53,7 +53,7 @@ static u64 gpage_freearray[MAX_NUMBER_GPAGES];
53static unsigned nr_gpages; 53static unsigned nr_gpages;
54#endif 54#endif
55 55
56#define hugepd_none(hpd) ((hpd).pd == 0) 56#define hugepd_none(hpd) (hpd_val(hpd) == 0)
57 57
58pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 58pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
59{ 59{
@@ -103,24 +103,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
103 for (i = 0; i < num_hugepd; i++, hpdp++) { 103 for (i = 0; i < num_hugepd; i++, hpdp++) {
104 if (unlikely(!hugepd_none(*hpdp))) 104 if (unlikely(!hugepd_none(*hpdp)))
105 break; 105 break;
106 else 106 else {
107#ifdef CONFIG_PPC_BOOK3S_64 107#ifdef CONFIG_PPC_BOOK3S_64
108 hpdp->pd = __pa(new) | 108 *hpdp = __hugepd(__pa(new) |
109 (shift_to_mmu_psize(pshift) << 2); 109 (shift_to_mmu_psize(pshift) << 2));
110#elif defined(CONFIG_PPC_8xx) 110#elif defined(CONFIG_PPC_8xx)
111 hpdp->pd = __pa(new) | 111 *hpdp = __hugepd(__pa(new) |
112 (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M : 112 (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
113 _PMD_PAGE_512K) | 113 _PMD_PAGE_512K) | _PMD_PRESENT);
114 _PMD_PRESENT;
115#else 114#else
116 /* We use the old format for PPC_FSL_BOOK3E */ 115 /* We use the old format for PPC_FSL_BOOK3E */
117 hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; 116 *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift);
118#endif 117#endif
118 }
119 } 119 }
120 /* If we bailed from the for loop early, an error occurred, clean up */ 120 /* If we bailed from the for loop early, an error occurred, clean up */
121 if (i < num_hugepd) { 121 if (i < num_hugepd) {
122 for (i = i - 1 ; i >= 0; i--, hpdp--) 122 for (i = i - 1 ; i >= 0; i--, hpdp--)
123 hpdp->pd = 0; 123 *hpdp = __hugepd(0);
124 kmem_cache_free(cachep, new); 124 kmem_cache_free(cachep, new);
125 } 125 }
126 spin_unlock(&mm->page_table_lock); 126 spin_unlock(&mm->page_table_lock);
@@ -454,7 +454,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
454 return; 454 return;
455 455
456 for (i = 0; i < num_hugepd; i++, hpdp++) 456 for (i = 0; i < num_hugepd; i++, hpdp++)
457 hpdp->pd = 0; 457 *hpdp = __hugepd(0);
458 458
459 if (shift >= pdshift) 459 if (shift >= pdshift)
460 hugepd_free(tlb, hugepte); 460 hugepd_free(tlb, hugepte);
@@ -810,12 +810,8 @@ static int __init hugetlbpage_init(void)
810 * if we have pdshift and shift value same, we don't 810 * if we have pdshift and shift value same, we don't
811 * use pgt cache for hugepd. 811 * use pgt cache for hugepd.
812 */ 812 */
813 if (pdshift > shift) { 813 if (pdshift > shift)
814 pgtable_cache_add(pdshift - shift, NULL); 814 pgtable_cache_add(pdshift - shift, NULL);
815 if (!PGT_CACHE(pdshift - shift))
816 panic("hugetlbpage_init(): could not create "
817 "pgtable cache for %d bit pagesize\n", shift);
818 }
819#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) 815#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
820 else if (!hugepte_cache) { 816 else if (!hugepte_cache) {
821 /* 817 /*
@@ -852,9 +848,6 @@ static int __init hugetlbpage_init(void)
852 else if (mmu_psize_defs[MMU_PAGE_2M].shift) 848 else if (mmu_psize_defs[MMU_PAGE_2M].shift)
853 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift; 849 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift;
854#endif 850#endif
855 else
856 panic("%s: Unable to set default huge page size\n", __func__);
857
858 return 0; 851 return 0;
859} 852}
860 853
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
index a175cd82ae8c..f2108c40e697 100644
--- a/arch/powerpc/mm/init-common.c
+++ b/arch/powerpc/mm/init-common.c
@@ -78,8 +78,12 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
78 align = max_t(unsigned long, align, minalign); 78 align = max_t(unsigned long, align, minalign);
79 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); 79 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
80 new = kmem_cache_create(name, table_size, align, 0, ctor); 80 new = kmem_cache_create(name, table_size, align, 0, ctor);
81 if (!new)
82 panic("Could not allocate pgtable cache for order %d", shift);
83
81 kfree(name); 84 kfree(name);
82 pgtable_cache[shift - 1] = new; 85 pgtable_cache[shift - 1] = new;
86
83 pr_debug("Allocated pgtable cache for order %d\n", shift); 87 pr_debug("Allocated pgtable cache for order %d\n", shift);
84} 88}
85 89
@@ -88,7 +92,7 @@ void pgtable_cache_init(void)
88{ 92{
89 pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor); 93 pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
90 94
91 if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE)) 95 if (PMD_CACHE_INDEX && !PGT_CACHE(PMD_CACHE_INDEX))
92 pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor); 96 pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
93 /* 97 /*
94 * In all current configs, when the PUD index exists it's the 98 * In all current configs, when the PUD index exists it's the
@@ -97,11 +101,4 @@ void pgtable_cache_init(void)
97 */ 101 */
98 if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) 102 if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
99 pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor); 103 pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
100
101 if (!PGT_CACHE(PGD_INDEX_SIZE))
102 panic("Couldn't allocate pgd cache");
103 if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE))
104 panic("Couldn't allocate pmd pgtable caches");
105 if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
106 panic("Couldn't allocate pud pgtable caches");
107} 104}
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index ebf9782bacf9..653ff6c74ebe 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -126,3 +126,21 @@ void mmu_cleanup_all(void)
126 else if (mmu_hash_ops.hpte_clear_all) 126 else if (mmu_hash_ops.hpte_clear_all)
127 mmu_hash_ops.hpte_clear_all(); 127 mmu_hash_ops.hpte_clear_all();
128} 128}
129
130#ifdef CONFIG_MEMORY_HOTPLUG
131int create_section_mapping(unsigned long start, unsigned long end)
132{
133 if (radix_enabled())
134 return -ENODEV;
135
136 return hash__create_section_mapping(start, end);
137}
138
139int remove_section_mapping(unsigned long start, unsigned long end)
140{
141 if (radix_enabled())
142 return -ENODEV;
143
144 return hash__remove_section_mapping(start, end);
145}
146#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index fd3e4034c04d..270eb9b74e2e 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -295,6 +295,8 @@ static inline void perf_read_regs(struct pt_regs *regs)
295 */ 295 */
296 if (TRAP(regs) != 0xf00) 296 if (TRAP(regs) != 0xf00)
297 use_siar = 0; 297 use_siar = 0;
298 else if ((ppmu->flags & PPMU_NO_SIAR))
299 use_siar = 0;
298 else if (marked) 300 else if (marked)
299 use_siar = 1; 301 use_siar = 1;
300 else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING)) 302 else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
diff --git a/arch/powerpc/perf/power9-events-list.h b/arch/powerpc/perf/power9-events-list.h
index 6447dc1c3d89..929b56d47ad9 100644
--- a/arch/powerpc/perf/power9-events-list.h
+++ b/arch/powerpc/perf/power9-events-list.h
@@ -16,7 +16,7 @@ EVENT(PM_CYC, 0x0001e)
16EVENT(PM_ICT_NOSLOT_CYC, 0x100f8) 16EVENT(PM_ICT_NOSLOT_CYC, 0x100f8)
17EVENT(PM_CMPLU_STALL, 0x1e054) 17EVENT(PM_CMPLU_STALL, 0x1e054)
18EVENT(PM_INST_CMPL, 0x00002) 18EVENT(PM_INST_CMPL, 0x00002)
19EVENT(PM_BRU_CMPL, 0x40060) 19EVENT(PM_BRU_CMPL, 0x10012)
20EVENT(PM_BR_MPRED_CMPL, 0x400f6) 20EVENT(PM_BR_MPRED_CMPL, 0x400f6)
21 21
22/* All L1 D cache load references counted at finish, gated by reject */ 22/* All L1 D cache load references counted at finish, gated by reject */
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 346010e8d463..7332634e18c9 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -384,7 +384,7 @@ static struct power_pmu power9_isa207_pmu = {
384 .bhrb_filter_map = power9_bhrb_filter_map, 384 .bhrb_filter_map = power9_bhrb_filter_map,
385 .get_constraint = isa207_get_constraint, 385 .get_constraint = isa207_get_constraint,
386 .disable_pmc = isa207_disable_pmc, 386 .disable_pmc = isa207_disable_pmc,
387 .flags = PPMU_HAS_SIER | PPMU_ARCH_207S, 387 .flags = PPMU_NO_SIAR | PPMU_ARCH_207S,
388 .n_generic = ARRAY_SIZE(power9_generic_events), 388 .n_generic = ARRAY_SIZE(power9_generic_events),
389 .generic_events = power9_generic_events, 389 .generic_events = power9_generic_events,
390 .cache_events = &power9_cache_events, 390 .cache_events = &power9_cache_events,
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index d38e86fd5720..60c57657c772 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -20,6 +20,7 @@
20#include <asm/xics.h> 20#include <asm/xics.h>
21#include <asm/io.h> 21#include <asm/io.h>
22#include <asm/opal.h> 22#include <asm/opal.h>
23#include <asm/kvm_ppc.h>
23 24
24static void icp_opal_teardown_cpu(void) 25static void icp_opal_teardown_cpu(void)
25{ 26{
@@ -39,7 +40,26 @@ static void icp_opal_flush_ipi(void)
39 * Should we be flagging idle loop instead? 40 * Should we be flagging idle loop instead?
40 * Or creating some task to be scheduled? 41 * Or creating some task to be scheduled?
41 */ 42 */
42 opal_int_eoi((0x00 << 24) | XICS_IPI); 43 if (opal_int_eoi((0x00 << 24) | XICS_IPI) > 0)
44 force_external_irq_replay();
45}
46
47static unsigned int icp_opal_get_xirr(void)
48{
49 unsigned int kvm_xirr;
50 __be32 hw_xirr;
51 int64_t rc;
52
53 /* Handle an interrupt latched by KVM first */
54 kvm_xirr = kvmppc_get_xics_latch();
55 if (kvm_xirr)
56 return kvm_xirr;
57
58 /* Then ask OPAL */
59 rc = opal_int_get_xirr(&hw_xirr, false);
60 if (rc < 0)
61 return 0;
62 return be32_to_cpu(hw_xirr);
43} 63}
44 64
45static unsigned int icp_opal_get_irq(void) 65static unsigned int icp_opal_get_irq(void)
@@ -47,12 +67,8 @@ static unsigned int icp_opal_get_irq(void)
47 unsigned int xirr; 67 unsigned int xirr;
48 unsigned int vec; 68 unsigned int vec;
49 unsigned int irq; 69 unsigned int irq;
50 int64_t rc;
51 70
52 rc = opal_int_get_xirr(&xirr, false); 71 xirr = icp_opal_get_xirr();
53 if (rc < 0)
54 return 0;
55 xirr = be32_to_cpu(xirr);
56 vec = xirr & 0x00ffffff; 72 vec = xirr & 0x00ffffff;
57 if (vec == XICS_IRQ_SPURIOUS) 73 if (vec == XICS_IRQ_SPURIOUS)
58 return 0; 74 return 0;
@@ -67,7 +83,8 @@ static unsigned int icp_opal_get_irq(void)
67 xics_mask_unknown_vec(vec); 83 xics_mask_unknown_vec(vec);
68 84
69 /* We might learn about it later, so EOI it */ 85 /* We might learn about it later, so EOI it */
70 opal_int_eoi(xirr); 86 if (opal_int_eoi(xirr) > 0)
87 force_external_irq_replay();
71 88
72 return 0; 89 return 0;
73} 90}
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index e659daffe368..e00975361fec 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -69,7 +69,7 @@ CONFIG_CMA=y
69CONFIG_CMA_DEBUG=y 69CONFIG_CMA_DEBUG=y
70CONFIG_CMA_DEBUGFS=y 70CONFIG_CMA_DEBUGFS=y
71CONFIG_MEM_SOFT_DIRTY=y 71CONFIG_MEM_SOFT_DIRTY=y
72CONFIG_ZPOOL=m 72CONFIG_ZSWAP=y
73CONFIG_ZBUD=m 73CONFIG_ZBUD=m
74CONFIG_ZSMALLOC=m 74CONFIG_ZSMALLOC=m
75CONFIG_ZSMALLOC_STAT=y 75CONFIG_ZSMALLOC_STAT=y
@@ -141,8 +141,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
141CONFIG_NF_CONNTRACK_EVENTS=y 141CONFIG_NF_CONNTRACK_EVENTS=y
142CONFIG_NF_CONNTRACK_TIMEOUT=y 142CONFIG_NF_CONNTRACK_TIMEOUT=y
143CONFIG_NF_CONNTRACK_TIMESTAMP=y 143CONFIG_NF_CONNTRACK_TIMESTAMP=y
144CONFIG_NF_CT_PROTO_DCCP=m
145CONFIG_NF_CT_PROTO_UDPLITE=m
146CONFIG_NF_CONNTRACK_AMANDA=m 144CONFIG_NF_CONNTRACK_AMANDA=m
147CONFIG_NF_CONNTRACK_FTP=m 145CONFIG_NF_CONNTRACK_FTP=m
148CONFIG_NF_CONNTRACK_H323=m 146CONFIG_NF_CONNTRACK_H323=m
@@ -159,13 +157,12 @@ CONFIG_NF_TABLES=m
159CONFIG_NFT_EXTHDR=m 157CONFIG_NFT_EXTHDR=m
160CONFIG_NFT_META=m 158CONFIG_NFT_META=m
161CONFIG_NFT_CT=m 159CONFIG_NFT_CT=m
162CONFIG_NFT_RBTREE=m
163CONFIG_NFT_HASH=m
164CONFIG_NFT_COUNTER=m 160CONFIG_NFT_COUNTER=m
165CONFIG_NFT_LOG=m 161CONFIG_NFT_LOG=m
166CONFIG_NFT_LIMIT=m 162CONFIG_NFT_LIMIT=m
167CONFIG_NFT_NAT=m 163CONFIG_NFT_NAT=m
168CONFIG_NFT_COMPAT=m 164CONFIG_NFT_COMPAT=m
165CONFIG_NFT_HASH=m
169CONFIG_NETFILTER_XT_SET=m 166CONFIG_NETFILTER_XT_SET=m
170CONFIG_NETFILTER_XT_TARGET_AUDIT=m 167CONFIG_NETFILTER_XT_TARGET_AUDIT=m
171CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 168CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -219,7 +216,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
219CONFIG_NETFILTER_XT_MATCH_RATEEST=m 216CONFIG_NETFILTER_XT_MATCH_RATEEST=m
220CONFIG_NETFILTER_XT_MATCH_REALM=m 217CONFIG_NETFILTER_XT_MATCH_REALM=m
221CONFIG_NETFILTER_XT_MATCH_RECENT=m 218CONFIG_NETFILTER_XT_MATCH_RECENT=m
222CONFIG_NETFILTER_XT_MATCH_SOCKET=m
223CONFIG_NETFILTER_XT_MATCH_STATE=m 219CONFIG_NETFILTER_XT_MATCH_STATE=m
224CONFIG_NETFILTER_XT_MATCH_STATISTIC=m 220CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
225CONFIG_NETFILTER_XT_MATCH_STRING=m 221CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -258,7 +254,6 @@ CONFIG_IP_VS_NQ=m
258CONFIG_IP_VS_FTP=m 254CONFIG_IP_VS_FTP=m
259CONFIG_IP_VS_PE_SIP=m 255CONFIG_IP_VS_PE_SIP=m
260CONFIG_NF_CONNTRACK_IPV4=m 256CONFIG_NF_CONNTRACK_IPV4=m
261# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
262CONFIG_NF_TABLES_IPV4=m 257CONFIG_NF_TABLES_IPV4=m
263CONFIG_NFT_CHAIN_ROUTE_IPV4=m 258CONFIG_NFT_CHAIN_ROUTE_IPV4=m
264CONFIG_NF_TABLES_ARP=m 259CONFIG_NF_TABLES_ARP=m
@@ -436,7 +431,6 @@ CONFIG_EQUALIZER=m
436CONFIG_IFB=m 431CONFIG_IFB=m
437CONFIG_MACVLAN=m 432CONFIG_MACVLAN=m
438CONFIG_MACVTAP=m 433CONFIG_MACVTAP=m
439CONFIG_IPVLAN=m
440CONFIG_VXLAN=m 434CONFIG_VXLAN=m
441CONFIG_TUN=m 435CONFIG_TUN=m
442CONFIG_VETH=m 436CONFIG_VETH=m
@@ -480,6 +474,7 @@ CONFIG_VIRTIO_BALLOON=m
480CONFIG_EXT4_FS=y 474CONFIG_EXT4_FS=y
481CONFIG_EXT4_FS_POSIX_ACL=y 475CONFIG_EXT4_FS_POSIX_ACL=y
482CONFIG_EXT4_FS_SECURITY=y 476CONFIG_EXT4_FS_SECURITY=y
477CONFIG_EXT4_ENCRYPTION=y
483CONFIG_JBD2_DEBUG=y 478CONFIG_JBD2_DEBUG=y
484CONFIG_JFS_FS=m 479CONFIG_JFS_FS=m
485CONFIG_JFS_POSIX_ACL=y 480CONFIG_JFS_POSIX_ACL=y
@@ -592,14 +587,12 @@ CONFIG_LOCK_STAT=y
592CONFIG_DEBUG_LOCKDEP=y 587CONFIG_DEBUG_LOCKDEP=y
593CONFIG_DEBUG_ATOMIC_SLEEP=y 588CONFIG_DEBUG_ATOMIC_SLEEP=y
594CONFIG_DEBUG_LOCKING_API_SELFTESTS=y 589CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
595CONFIG_DEBUG_LIST=y
596CONFIG_DEBUG_SG=y 590CONFIG_DEBUG_SG=y
597CONFIG_DEBUG_NOTIFIERS=y 591CONFIG_DEBUG_NOTIFIERS=y
598CONFIG_DEBUG_CREDENTIALS=y 592CONFIG_DEBUG_CREDENTIALS=y
599CONFIG_RCU_TORTURE_TEST=m 593CONFIG_RCU_TORTURE_TEST=m
600CONFIG_RCU_CPU_STALL_TIMEOUT=300 594CONFIG_RCU_CPU_STALL_TIMEOUT=300
601CONFIG_NOTIFIER_ERROR_INJECTION=m 595CONFIG_NOTIFIER_ERROR_INJECTION=m
602CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
603CONFIG_PM_NOTIFIER_ERROR_INJECT=m 596CONFIG_PM_NOTIFIER_ERROR_INJECT=m
604CONFIG_FAULT_INJECTION=y 597CONFIG_FAULT_INJECTION=y
605CONFIG_FAILSLAB=y 598CONFIG_FAILSLAB=y
@@ -618,6 +611,7 @@ CONFIG_STACK_TRACER=y
618CONFIG_BLK_DEV_IO_TRACE=y 611CONFIG_BLK_DEV_IO_TRACE=y
619CONFIG_UPROBE_EVENT=y 612CONFIG_UPROBE_EVENT=y
620CONFIG_FUNCTION_PROFILER=y 613CONFIG_FUNCTION_PROFILER=y
614CONFIG_HIST_TRIGGERS=y
621CONFIG_TRACE_ENUM_MAP_FILE=y 615CONFIG_TRACE_ENUM_MAP_FILE=y
622CONFIG_LKDTM=m 616CONFIG_LKDTM=m
623CONFIG_TEST_LIST_SORT=y 617CONFIG_TEST_LIST_SORT=y
@@ -630,6 +624,7 @@ CONFIG_TEST_STRING_HELPERS=y
630CONFIG_TEST_KSTRTOX=y 624CONFIG_TEST_KSTRTOX=y
631CONFIG_DMA_API_DEBUG=y 625CONFIG_DMA_API_DEBUG=y
632CONFIG_TEST_BPF=m 626CONFIG_TEST_BPF=m
627CONFIG_BUG_ON_DATA_CORRUPTION=y
633CONFIG_S390_PTDUMP=y 628CONFIG_S390_PTDUMP=y
634CONFIG_ENCRYPTED_KEYS=m 629CONFIG_ENCRYPTED_KEYS=m
635CONFIG_SECURITY=y 630CONFIG_SECURITY=y
@@ -640,16 +635,18 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
640CONFIG_SECURITY_SELINUX_DISABLE=y 635CONFIG_SECURITY_SELINUX_DISABLE=y
641CONFIG_IMA=y 636CONFIG_IMA=y
642CONFIG_IMA_APPRAISE=y 637CONFIG_IMA_APPRAISE=y
638CONFIG_CRYPTO_RSA=m
639CONFIG_CRYPTO_DH=m
640CONFIG_CRYPTO_ECDH=m
643CONFIG_CRYPTO_USER=m 641CONFIG_CRYPTO_USER=m
644# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
645CONFIG_CRYPTO_CRYPTD=m 642CONFIG_CRYPTO_CRYPTD=m
646CONFIG_CRYPTO_TEST=m 643CONFIG_CRYPTO_TEST=m
647CONFIG_CRYPTO_CCM=m 644CONFIG_CRYPTO_CCM=m
648CONFIG_CRYPTO_GCM=m 645CONFIG_CRYPTO_GCM=m
649CONFIG_CRYPTO_CTS=m 646CONFIG_CRYPTO_CHACHA20POLY1305=m
650CONFIG_CRYPTO_LRW=m 647CONFIG_CRYPTO_LRW=m
651CONFIG_CRYPTO_PCBC=m 648CONFIG_CRYPTO_PCBC=m
652CONFIG_CRYPTO_XTS=m 649CONFIG_CRYPTO_KEYWRAP=m
653CONFIG_CRYPTO_XCBC=m 650CONFIG_CRYPTO_XCBC=m
654CONFIG_CRYPTO_VMAC=m 651CONFIG_CRYPTO_VMAC=m
655CONFIG_CRYPTO_CRC32=m 652CONFIG_CRYPTO_CRC32=m
@@ -673,11 +670,13 @@ CONFIG_CRYPTO_SEED=m
673CONFIG_CRYPTO_SERPENT=m 670CONFIG_CRYPTO_SERPENT=m
674CONFIG_CRYPTO_TEA=m 671CONFIG_CRYPTO_TEA=m
675CONFIG_CRYPTO_TWOFISH=m 672CONFIG_CRYPTO_TWOFISH=m
676CONFIG_CRYPTO_LZO=m 673CONFIG_CRYPTO_842=m
677CONFIG_CRYPTO_LZ4=m 674CONFIG_CRYPTO_LZ4=m
678CONFIG_CRYPTO_LZ4HC=m 675CONFIG_CRYPTO_LZ4HC=m
679CONFIG_CRYPTO_USER_API_HASH=m 676CONFIG_CRYPTO_USER_API_HASH=m
680CONFIG_CRYPTO_USER_API_SKCIPHER=m 677CONFIG_CRYPTO_USER_API_SKCIPHER=m
678CONFIG_CRYPTO_USER_API_RNG=m
679CONFIG_CRYPTO_USER_API_AEAD=m
681CONFIG_ZCRYPT=m 680CONFIG_ZCRYPT=m
682CONFIG_CRYPTO_SHA1_S390=m 681CONFIG_CRYPTO_SHA1_S390=m
683CONFIG_CRYPTO_SHA256_S390=m 682CONFIG_CRYPTO_SHA256_S390=m
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 95ceac50bc65..f05d2d6e1087 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -12,6 +12,7 @@ CONFIG_TASK_IO_ACCOUNTING=y
12CONFIG_IKCONFIG=y 12CONFIG_IKCONFIG=y
13CONFIG_IKCONFIG_PROC=y 13CONFIG_IKCONFIG_PROC=y
14CONFIG_NUMA_BALANCING=y 14CONFIG_NUMA_BALANCING=y
15# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
15CONFIG_MEMCG=y 16CONFIG_MEMCG=y
16CONFIG_MEMCG_SWAP=y 17CONFIG_MEMCG_SWAP=y
17CONFIG_BLK_CGROUP=y 18CONFIG_BLK_CGROUP=y
@@ -54,8 +55,9 @@ CONFIG_SOLARIS_X86_PARTITION=y
54CONFIG_UNIXWARE_DISKLABEL=y 55CONFIG_UNIXWARE_DISKLABEL=y
55CONFIG_CFQ_GROUP_IOSCHED=y 56CONFIG_CFQ_GROUP_IOSCHED=y
56CONFIG_DEFAULT_DEADLINE=y 57CONFIG_DEFAULT_DEADLINE=y
58CONFIG_LIVEPATCH=y
57CONFIG_TUNE_ZEC12=y 59CONFIG_TUNE_ZEC12=y
58CONFIG_NR_CPUS=256 60CONFIG_NR_CPUS=512
59CONFIG_NUMA=y 61CONFIG_NUMA=y
60CONFIG_HZ_100=y 62CONFIG_HZ_100=y
61CONFIG_MEMORY_HOTPLUG=y 63CONFIG_MEMORY_HOTPLUG=y
@@ -65,6 +67,7 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
65CONFIG_CLEANCACHE=y 67CONFIG_CLEANCACHE=y
66CONFIG_FRONTSWAP=y 68CONFIG_FRONTSWAP=y
67CONFIG_CMA=y 69CONFIG_CMA=y
70CONFIG_MEM_SOFT_DIRTY=y
68CONFIG_ZSWAP=y 71CONFIG_ZSWAP=y
69CONFIG_ZBUD=m 72CONFIG_ZBUD=m
70CONFIG_ZSMALLOC=m 73CONFIG_ZSMALLOC=m
@@ -136,8 +139,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
136CONFIG_NF_CONNTRACK_EVENTS=y 139CONFIG_NF_CONNTRACK_EVENTS=y
137CONFIG_NF_CONNTRACK_TIMEOUT=y 140CONFIG_NF_CONNTRACK_TIMEOUT=y
138CONFIG_NF_CONNTRACK_TIMESTAMP=y 141CONFIG_NF_CONNTRACK_TIMESTAMP=y
139CONFIG_NF_CT_PROTO_DCCP=m
140CONFIG_NF_CT_PROTO_UDPLITE=m
141CONFIG_NF_CONNTRACK_AMANDA=m 142CONFIG_NF_CONNTRACK_AMANDA=m
142CONFIG_NF_CONNTRACK_FTP=m 143CONFIG_NF_CONNTRACK_FTP=m
143CONFIG_NF_CONNTRACK_H323=m 144CONFIG_NF_CONNTRACK_H323=m
@@ -154,13 +155,12 @@ CONFIG_NF_TABLES=m
154CONFIG_NFT_EXTHDR=m 155CONFIG_NFT_EXTHDR=m
155CONFIG_NFT_META=m 156CONFIG_NFT_META=m
156CONFIG_NFT_CT=m 157CONFIG_NFT_CT=m
157CONFIG_NFT_RBTREE=m
158CONFIG_NFT_HASH=m
159CONFIG_NFT_COUNTER=m 158CONFIG_NFT_COUNTER=m
160CONFIG_NFT_LOG=m 159CONFIG_NFT_LOG=m
161CONFIG_NFT_LIMIT=m 160CONFIG_NFT_LIMIT=m
162CONFIG_NFT_NAT=m 161CONFIG_NFT_NAT=m
163CONFIG_NFT_COMPAT=m 162CONFIG_NFT_COMPAT=m
163CONFIG_NFT_HASH=m
164CONFIG_NETFILTER_XT_SET=m 164CONFIG_NETFILTER_XT_SET=m
165CONFIG_NETFILTER_XT_TARGET_AUDIT=m 165CONFIG_NETFILTER_XT_TARGET_AUDIT=m
166CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 166CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -214,7 +214,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
214CONFIG_NETFILTER_XT_MATCH_RATEEST=m 214CONFIG_NETFILTER_XT_MATCH_RATEEST=m
215CONFIG_NETFILTER_XT_MATCH_REALM=m 215CONFIG_NETFILTER_XT_MATCH_REALM=m
216CONFIG_NETFILTER_XT_MATCH_RECENT=m 216CONFIG_NETFILTER_XT_MATCH_RECENT=m
217CONFIG_NETFILTER_XT_MATCH_SOCKET=m
218CONFIG_NETFILTER_XT_MATCH_STATE=m 217CONFIG_NETFILTER_XT_MATCH_STATE=m
219CONFIG_NETFILTER_XT_MATCH_STATISTIC=m 218CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
220CONFIG_NETFILTER_XT_MATCH_STRING=m 219CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -253,7 +252,6 @@ CONFIG_IP_VS_NQ=m
253CONFIG_IP_VS_FTP=m 252CONFIG_IP_VS_FTP=m
254CONFIG_IP_VS_PE_SIP=m 253CONFIG_IP_VS_PE_SIP=m
255CONFIG_NF_CONNTRACK_IPV4=m 254CONFIG_NF_CONNTRACK_IPV4=m
256# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
257CONFIG_NF_TABLES_IPV4=m 255CONFIG_NF_TABLES_IPV4=m
258CONFIG_NFT_CHAIN_ROUTE_IPV4=m 256CONFIG_NFT_CHAIN_ROUTE_IPV4=m
259CONFIG_NF_TABLES_ARP=m 257CONFIG_NF_TABLES_ARP=m
@@ -430,7 +428,6 @@ CONFIG_EQUALIZER=m
430CONFIG_IFB=m 428CONFIG_IFB=m
431CONFIG_MACVLAN=m 429CONFIG_MACVLAN=m
432CONFIG_MACVTAP=m 430CONFIG_MACVTAP=m
433CONFIG_IPVLAN=m
434CONFIG_VXLAN=m 431CONFIG_VXLAN=m
435CONFIG_TUN=m 432CONFIG_TUN=m
436CONFIG_VETH=m 433CONFIG_VETH=m
@@ -460,6 +457,7 @@ CONFIG_HW_RANDOM_VIRTIO=m
460CONFIG_RAW_DRIVER=m 457CONFIG_RAW_DRIVER=m
461CONFIG_HANGCHECK_TIMER=m 458CONFIG_HANGCHECK_TIMER=m
462CONFIG_TN3270_FS=y 459CONFIG_TN3270_FS=y
460# CONFIG_HWMON is not set
463CONFIG_WATCHDOG=y 461CONFIG_WATCHDOG=y
464CONFIG_WATCHDOG_NOWAYOUT=y 462CONFIG_WATCHDOG_NOWAYOUT=y
465CONFIG_SOFT_WATCHDOG=m 463CONFIG_SOFT_WATCHDOG=m
@@ -473,6 +471,7 @@ CONFIG_VIRTIO_BALLOON=m
473CONFIG_EXT4_FS=y 471CONFIG_EXT4_FS=y
474CONFIG_EXT4_FS_POSIX_ACL=y 472CONFIG_EXT4_FS_POSIX_ACL=y
475CONFIG_EXT4_FS_SECURITY=y 473CONFIG_EXT4_FS_SECURITY=y
474CONFIG_EXT4_ENCRYPTION=y
476CONFIG_JBD2_DEBUG=y 475CONFIG_JBD2_DEBUG=y
477CONFIG_JFS_FS=m 476CONFIG_JFS_FS=m
478CONFIG_JFS_POSIX_ACL=y 477CONFIG_JFS_POSIX_ACL=y
@@ -495,6 +494,7 @@ CONFIG_AUTOFS4_FS=m
495CONFIG_FUSE_FS=y 494CONFIG_FUSE_FS=y
496CONFIG_CUSE=m 495CONFIG_CUSE=m
497CONFIG_OVERLAY_FS=m 496CONFIG_OVERLAY_FS=m
497CONFIG_OVERLAY_FS_REDIRECT_DIR=y
498CONFIG_FSCACHE=m 498CONFIG_FSCACHE=m
499CONFIG_CACHEFILES=m 499CONFIG_CACHEFILES=m
500CONFIG_ISO9660_FS=y 500CONFIG_ISO9660_FS=y
@@ -551,25 +551,27 @@ CONFIG_FRAME_WARN=1024
551CONFIG_UNUSED_SYMBOLS=y 551CONFIG_UNUSED_SYMBOLS=y
552CONFIG_MAGIC_SYSRQ=y 552CONFIG_MAGIC_SYSRQ=y
553CONFIG_DEBUG_MEMORY_INIT=y 553CONFIG_DEBUG_MEMORY_INIT=y
554CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
555CONFIG_PANIC_ON_OOPS=y 554CONFIG_PANIC_ON_OOPS=y
556CONFIG_TIMER_STATS=y 555CONFIG_TIMER_STATS=y
557CONFIG_RCU_TORTURE_TEST=m 556CONFIG_RCU_TORTURE_TEST=m
558CONFIG_RCU_CPU_STALL_TIMEOUT=60 557CONFIG_RCU_CPU_STALL_TIMEOUT=60
559CONFIG_NOTIFIER_ERROR_INJECTION=m
560CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
561CONFIG_PM_NOTIFIER_ERROR_INJECT=m
562CONFIG_LATENCYTOP=y 558CONFIG_LATENCYTOP=y
559CONFIG_SCHED_TRACER=y
560CONFIG_FTRACE_SYSCALLS=y
561CONFIG_STACK_TRACER=y
563CONFIG_BLK_DEV_IO_TRACE=y 562CONFIG_BLK_DEV_IO_TRACE=y
564# CONFIG_KPROBE_EVENT is not set 563CONFIG_UPROBE_EVENT=y
564CONFIG_FUNCTION_PROFILER=y
565CONFIG_HIST_TRIGGERS=y
565CONFIG_TRACE_ENUM_MAP_FILE=y 566CONFIG_TRACE_ENUM_MAP_FILE=y
566CONFIG_LKDTM=m 567CONFIG_LKDTM=m
567CONFIG_RBTREE_TEST=m
568CONFIG_INTERVAL_TREE_TEST=m
569CONFIG_PERCPU_TEST=m 568CONFIG_PERCPU_TEST=m
570CONFIG_ATOMIC64_SELFTEST=y 569CONFIG_ATOMIC64_SELFTEST=y
571CONFIG_TEST_BPF=m 570CONFIG_TEST_BPF=m
571CONFIG_BUG_ON_DATA_CORRUPTION=y
572CONFIG_S390_PTDUMP=y 572CONFIG_S390_PTDUMP=y
573CONFIG_PERSISTENT_KEYRINGS=y
574CONFIG_BIG_KEYS=y
573CONFIG_ENCRYPTED_KEYS=m 575CONFIG_ENCRYPTED_KEYS=m
574CONFIG_SECURITY=y 576CONFIG_SECURITY=y
575CONFIG_SECURITY_NETWORK=y 577CONFIG_SECURITY_NETWORK=y
@@ -577,18 +579,25 @@ CONFIG_SECURITY_SELINUX=y
577CONFIG_SECURITY_SELINUX_BOOTPARAM=y 579CONFIG_SECURITY_SELINUX_BOOTPARAM=y
578CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 580CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
579CONFIG_SECURITY_SELINUX_DISABLE=y 581CONFIG_SECURITY_SELINUX_DISABLE=y
582CONFIG_INTEGRITY_SIGNATURE=y
583CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
580CONFIG_IMA=y 584CONFIG_IMA=y
585CONFIG_IMA_WRITE_POLICY=y
581CONFIG_IMA_APPRAISE=y 586CONFIG_IMA_APPRAISE=y
587CONFIG_CRYPTO_DH=m
588CONFIG_CRYPTO_ECDH=m
582CONFIG_CRYPTO_USER=m 589CONFIG_CRYPTO_USER=m
583# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set 590# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
591CONFIG_CRYPTO_PCRYPT=m
584CONFIG_CRYPTO_CRYPTD=m 592CONFIG_CRYPTO_CRYPTD=m
593CONFIG_CRYPTO_MCRYPTD=m
585CONFIG_CRYPTO_TEST=m 594CONFIG_CRYPTO_TEST=m
586CONFIG_CRYPTO_CCM=m 595CONFIG_CRYPTO_CCM=m
587CONFIG_CRYPTO_GCM=m 596CONFIG_CRYPTO_GCM=m
588CONFIG_CRYPTO_CTS=m 597CONFIG_CRYPTO_CHACHA20POLY1305=m
589CONFIG_CRYPTO_LRW=m 598CONFIG_CRYPTO_LRW=m
590CONFIG_CRYPTO_PCBC=m 599CONFIG_CRYPTO_PCBC=m
591CONFIG_CRYPTO_XTS=m 600CONFIG_CRYPTO_KEYWRAP=m
592CONFIG_CRYPTO_XCBC=m 601CONFIG_CRYPTO_XCBC=m
593CONFIG_CRYPTO_VMAC=m 602CONFIG_CRYPTO_VMAC=m
594CONFIG_CRYPTO_CRC32=m 603CONFIG_CRYPTO_CRC32=m
@@ -598,6 +607,7 @@ CONFIG_CRYPTO_RMD160=m
598CONFIG_CRYPTO_RMD256=m 607CONFIG_CRYPTO_RMD256=m
599CONFIG_CRYPTO_RMD320=m 608CONFIG_CRYPTO_RMD320=m
600CONFIG_CRYPTO_SHA512=m 609CONFIG_CRYPTO_SHA512=m
610CONFIG_CRYPTO_SHA3=m
601CONFIG_CRYPTO_TGR192=m 611CONFIG_CRYPTO_TGR192=m
602CONFIG_CRYPTO_WP512=m 612CONFIG_CRYPTO_WP512=m
603CONFIG_CRYPTO_ANUBIS=m 613CONFIG_CRYPTO_ANUBIS=m
@@ -612,10 +622,13 @@ CONFIG_CRYPTO_SEED=m
612CONFIG_CRYPTO_SERPENT=m 622CONFIG_CRYPTO_SERPENT=m
613CONFIG_CRYPTO_TEA=m 623CONFIG_CRYPTO_TEA=m
614CONFIG_CRYPTO_TWOFISH=m 624CONFIG_CRYPTO_TWOFISH=m
625CONFIG_CRYPTO_842=m
615CONFIG_CRYPTO_LZ4=m 626CONFIG_CRYPTO_LZ4=m
616CONFIG_CRYPTO_LZ4HC=m 627CONFIG_CRYPTO_LZ4HC=m
617CONFIG_CRYPTO_USER_API_HASH=m 628CONFIG_CRYPTO_USER_API_HASH=m
618CONFIG_CRYPTO_USER_API_SKCIPHER=m 629CONFIG_CRYPTO_USER_API_SKCIPHER=m
630CONFIG_CRYPTO_USER_API_RNG=m
631CONFIG_CRYPTO_USER_API_AEAD=m
619CONFIG_ZCRYPT=m 632CONFIG_ZCRYPT=m
620CONFIG_CRYPTO_SHA1_S390=m 633CONFIG_CRYPTO_SHA1_S390=m
621CONFIG_CRYPTO_SHA256_S390=m 634CONFIG_CRYPTO_SHA256_S390=m
@@ -624,9 +637,6 @@ CONFIG_CRYPTO_DES_S390=m
624CONFIG_CRYPTO_AES_S390=m 637CONFIG_CRYPTO_AES_S390=m
625CONFIG_CRYPTO_GHASH_S390=m 638CONFIG_CRYPTO_GHASH_S390=m
626CONFIG_CRYPTO_CRC32_S390=y 639CONFIG_CRYPTO_CRC32_S390=y
627CONFIG_ASYMMETRIC_KEY_TYPE=y
628CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
629CONFIG_X509_CERTIFICATE_PARSER=m
630CONFIG_CRC7=m 640CONFIG_CRC7=m
631CONFIG_CRC8=m 641CONFIG_CRC8=m
632CONFIG_CORDIC=m 642CONFIG_CORDIC=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index bc7b176f5795..2cf87343b590 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -65,6 +65,7 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
65CONFIG_CLEANCACHE=y 65CONFIG_CLEANCACHE=y
66CONFIG_FRONTSWAP=y 66CONFIG_FRONTSWAP=y
67CONFIG_CMA=y 67CONFIG_CMA=y
68CONFIG_MEM_SOFT_DIRTY=y
68CONFIG_ZSWAP=y 69CONFIG_ZSWAP=y
69CONFIG_ZBUD=m 70CONFIG_ZBUD=m
70CONFIG_ZSMALLOC=m 71CONFIG_ZSMALLOC=m
@@ -136,8 +137,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
136CONFIG_NF_CONNTRACK_EVENTS=y 137CONFIG_NF_CONNTRACK_EVENTS=y
137CONFIG_NF_CONNTRACK_TIMEOUT=y 138CONFIG_NF_CONNTRACK_TIMEOUT=y
138CONFIG_NF_CONNTRACK_TIMESTAMP=y 139CONFIG_NF_CONNTRACK_TIMESTAMP=y
139CONFIG_NF_CT_PROTO_DCCP=m
140CONFIG_NF_CT_PROTO_UDPLITE=m
141CONFIG_NF_CONNTRACK_AMANDA=m 140CONFIG_NF_CONNTRACK_AMANDA=m
142CONFIG_NF_CONNTRACK_FTP=m 141CONFIG_NF_CONNTRACK_FTP=m
143CONFIG_NF_CONNTRACK_H323=m 142CONFIG_NF_CONNTRACK_H323=m
@@ -154,13 +153,12 @@ CONFIG_NF_TABLES=m
154CONFIG_NFT_EXTHDR=m 153CONFIG_NFT_EXTHDR=m
155CONFIG_NFT_META=m 154CONFIG_NFT_META=m
156CONFIG_NFT_CT=m 155CONFIG_NFT_CT=m
157CONFIG_NFT_RBTREE=m
158CONFIG_NFT_HASH=m
159CONFIG_NFT_COUNTER=m 156CONFIG_NFT_COUNTER=m
160CONFIG_NFT_LOG=m 157CONFIG_NFT_LOG=m
161CONFIG_NFT_LIMIT=m 158CONFIG_NFT_LIMIT=m
162CONFIG_NFT_NAT=m 159CONFIG_NFT_NAT=m
163CONFIG_NFT_COMPAT=m 160CONFIG_NFT_COMPAT=m
161CONFIG_NFT_HASH=m
164CONFIG_NETFILTER_XT_SET=m 162CONFIG_NETFILTER_XT_SET=m
165CONFIG_NETFILTER_XT_TARGET_AUDIT=m 163CONFIG_NETFILTER_XT_TARGET_AUDIT=m
166CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 164CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -214,7 +212,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
214CONFIG_NETFILTER_XT_MATCH_RATEEST=m 212CONFIG_NETFILTER_XT_MATCH_RATEEST=m
215CONFIG_NETFILTER_XT_MATCH_REALM=m 213CONFIG_NETFILTER_XT_MATCH_REALM=m
216CONFIG_NETFILTER_XT_MATCH_RECENT=m 214CONFIG_NETFILTER_XT_MATCH_RECENT=m
217CONFIG_NETFILTER_XT_MATCH_SOCKET=m
218CONFIG_NETFILTER_XT_MATCH_STATE=m 215CONFIG_NETFILTER_XT_MATCH_STATE=m
219CONFIG_NETFILTER_XT_MATCH_STATISTIC=m 216CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
220CONFIG_NETFILTER_XT_MATCH_STRING=m 217CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -253,7 +250,6 @@ CONFIG_IP_VS_NQ=m
253CONFIG_IP_VS_FTP=m 250CONFIG_IP_VS_FTP=m
254CONFIG_IP_VS_PE_SIP=m 251CONFIG_IP_VS_PE_SIP=m
255CONFIG_NF_CONNTRACK_IPV4=m 252CONFIG_NF_CONNTRACK_IPV4=m
256# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
257CONFIG_NF_TABLES_IPV4=m 253CONFIG_NF_TABLES_IPV4=m
258CONFIG_NFT_CHAIN_ROUTE_IPV4=m 254CONFIG_NFT_CHAIN_ROUTE_IPV4=m
259CONFIG_NF_TABLES_ARP=m 255CONFIG_NF_TABLES_ARP=m
@@ -430,7 +426,6 @@ CONFIG_EQUALIZER=m
430CONFIG_IFB=m 426CONFIG_IFB=m
431CONFIG_MACVLAN=m 427CONFIG_MACVLAN=m
432CONFIG_MACVTAP=m 428CONFIG_MACVTAP=m
433CONFIG_IPVLAN=m
434CONFIG_VXLAN=m 429CONFIG_VXLAN=m
435CONFIG_TUN=m 430CONFIG_TUN=m
436CONFIG_VETH=m 431CONFIG_VETH=m
@@ -474,6 +469,7 @@ CONFIG_VIRTIO_BALLOON=m
474CONFIG_EXT4_FS=y 469CONFIG_EXT4_FS=y
475CONFIG_EXT4_FS_POSIX_ACL=y 470CONFIG_EXT4_FS_POSIX_ACL=y
476CONFIG_EXT4_FS_SECURITY=y 471CONFIG_EXT4_FS_SECURITY=y
472CONFIG_EXT4_ENCRYPTION=y
477CONFIG_JBD2_DEBUG=y 473CONFIG_JBD2_DEBUG=y
478CONFIG_JFS_FS=m 474CONFIG_JFS_FS=m
479CONFIG_JFS_POSIX_ACL=y 475CONFIG_JFS_POSIX_ACL=y
@@ -496,6 +492,7 @@ CONFIG_AUTOFS4_FS=m
496CONFIG_FUSE_FS=y 492CONFIG_FUSE_FS=y
497CONFIG_CUSE=m 493CONFIG_CUSE=m
498CONFIG_OVERLAY_FS=m 494CONFIG_OVERLAY_FS=m
495CONFIG_OVERLAY_FS_REDIRECT_DIR=y
499CONFIG_FSCACHE=m 496CONFIG_FSCACHE=m
500CONFIG_CACHEFILES=m 497CONFIG_CACHEFILES=m
501CONFIG_ISO9660_FS=y 498CONFIG_ISO9660_FS=y
@@ -563,12 +560,16 @@ CONFIG_STACK_TRACER=y
563CONFIG_BLK_DEV_IO_TRACE=y 560CONFIG_BLK_DEV_IO_TRACE=y
564CONFIG_UPROBE_EVENT=y 561CONFIG_UPROBE_EVENT=y
565CONFIG_FUNCTION_PROFILER=y 562CONFIG_FUNCTION_PROFILER=y
563CONFIG_HIST_TRIGGERS=y
566CONFIG_TRACE_ENUM_MAP_FILE=y 564CONFIG_TRACE_ENUM_MAP_FILE=y
567CONFIG_LKDTM=m 565CONFIG_LKDTM=m
568CONFIG_PERCPU_TEST=m 566CONFIG_PERCPU_TEST=m
569CONFIG_ATOMIC64_SELFTEST=y 567CONFIG_ATOMIC64_SELFTEST=y
570CONFIG_TEST_BPF=m 568CONFIG_TEST_BPF=m
569CONFIG_BUG_ON_DATA_CORRUPTION=y
571CONFIG_S390_PTDUMP=y 570CONFIG_S390_PTDUMP=y
571CONFIG_PERSISTENT_KEYRINGS=y
572CONFIG_BIG_KEYS=y
572CONFIG_ENCRYPTED_KEYS=m 573CONFIG_ENCRYPTED_KEYS=m
573CONFIG_SECURITY=y 574CONFIG_SECURITY=y
574CONFIG_SECURITY_NETWORK=y 575CONFIG_SECURITY_NETWORK=y
@@ -576,18 +577,25 @@ CONFIG_SECURITY_SELINUX=y
576CONFIG_SECURITY_SELINUX_BOOTPARAM=y 577CONFIG_SECURITY_SELINUX_BOOTPARAM=y
577CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 578CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
578CONFIG_SECURITY_SELINUX_DISABLE=y 579CONFIG_SECURITY_SELINUX_DISABLE=y
580CONFIG_INTEGRITY_SIGNATURE=y
581CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
579CONFIG_IMA=y 582CONFIG_IMA=y
583CONFIG_IMA_WRITE_POLICY=y
580CONFIG_IMA_APPRAISE=y 584CONFIG_IMA_APPRAISE=y
585CONFIG_CRYPTO_DH=m
586CONFIG_CRYPTO_ECDH=m
581CONFIG_CRYPTO_USER=m 587CONFIG_CRYPTO_USER=m
582# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set 588# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
589CONFIG_CRYPTO_PCRYPT=m
583CONFIG_CRYPTO_CRYPTD=m 590CONFIG_CRYPTO_CRYPTD=m
591CONFIG_CRYPTO_MCRYPTD=m
584CONFIG_CRYPTO_TEST=m 592CONFIG_CRYPTO_TEST=m
585CONFIG_CRYPTO_CCM=m 593CONFIG_CRYPTO_CCM=m
586CONFIG_CRYPTO_GCM=m 594CONFIG_CRYPTO_GCM=m
587CONFIG_CRYPTO_CTS=m 595CONFIG_CRYPTO_CHACHA20POLY1305=m
588CONFIG_CRYPTO_LRW=m 596CONFIG_CRYPTO_LRW=m
589CONFIG_CRYPTO_PCBC=m 597CONFIG_CRYPTO_PCBC=m
590CONFIG_CRYPTO_XTS=m 598CONFIG_CRYPTO_KEYWRAP=m
591CONFIG_CRYPTO_XCBC=m 599CONFIG_CRYPTO_XCBC=m
592CONFIG_CRYPTO_VMAC=m 600CONFIG_CRYPTO_VMAC=m
593CONFIG_CRYPTO_CRC32=m 601CONFIG_CRYPTO_CRC32=m
@@ -597,6 +605,7 @@ CONFIG_CRYPTO_RMD160=m
597CONFIG_CRYPTO_RMD256=m 605CONFIG_CRYPTO_RMD256=m
598CONFIG_CRYPTO_RMD320=m 606CONFIG_CRYPTO_RMD320=m
599CONFIG_CRYPTO_SHA512=m 607CONFIG_CRYPTO_SHA512=m
608CONFIG_CRYPTO_SHA3=m
600CONFIG_CRYPTO_TGR192=m 609CONFIG_CRYPTO_TGR192=m
601CONFIG_CRYPTO_WP512=m 610CONFIG_CRYPTO_WP512=m
602CONFIG_CRYPTO_ANUBIS=m 611CONFIG_CRYPTO_ANUBIS=m
@@ -611,10 +620,13 @@ CONFIG_CRYPTO_SEED=m
611CONFIG_CRYPTO_SERPENT=m 620CONFIG_CRYPTO_SERPENT=m
612CONFIG_CRYPTO_TEA=m 621CONFIG_CRYPTO_TEA=m
613CONFIG_CRYPTO_TWOFISH=m 622CONFIG_CRYPTO_TWOFISH=m
623CONFIG_CRYPTO_842=m
614CONFIG_CRYPTO_LZ4=m 624CONFIG_CRYPTO_LZ4=m
615CONFIG_CRYPTO_LZ4HC=m 625CONFIG_CRYPTO_LZ4HC=m
616CONFIG_CRYPTO_USER_API_HASH=m 626CONFIG_CRYPTO_USER_API_HASH=m
617CONFIG_CRYPTO_USER_API_SKCIPHER=m 627CONFIG_CRYPTO_USER_API_SKCIPHER=m
628CONFIG_CRYPTO_USER_API_RNG=m
629CONFIG_CRYPTO_USER_API_AEAD=m
618CONFIG_ZCRYPT=m 630CONFIG_ZCRYPT=m
619CONFIG_CRYPTO_SHA1_S390=m 631CONFIG_CRYPTO_SHA1_S390=m
620CONFIG_CRYPTO_SHA256_S390=m 632CONFIG_CRYPTO_SHA256_S390=m
@@ -623,9 +635,6 @@ CONFIG_CRYPTO_DES_S390=m
623CONFIG_CRYPTO_AES_S390=m 635CONFIG_CRYPTO_AES_S390=m
624CONFIG_CRYPTO_GHASH_S390=m 636CONFIG_CRYPTO_GHASH_S390=m
625CONFIG_CRYPTO_CRC32_S390=y 637CONFIG_CRYPTO_CRC32_S390=y
626CONFIG_ASYMMETRIC_KEY_TYPE=y
627CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
628CONFIG_X509_CERTIFICATE_PARSER=m
629CONFIG_CRC7=m 638CONFIG_CRC7=m
630CONFIG_CRC8=m 639CONFIG_CRC8=m
631CONFIG_CORDIC=m 640CONFIG_CORDIC=m
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 2d40ef0a6295..d00e368fb5e6 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -38,7 +38,6 @@ CONFIG_JUMP_LABEL=y
38CONFIG_STATIC_KEYS_SELFTEST=y 38CONFIG_STATIC_KEYS_SELFTEST=y
39CONFIG_MODULES=y 39CONFIG_MODULES=y
40CONFIG_MODULE_UNLOAD=y 40CONFIG_MODULE_UNLOAD=y
41CONFIG_MODVERSIONS=y
42CONFIG_BLK_DEV_INTEGRITY=y 41CONFIG_BLK_DEV_INTEGRITY=y
43CONFIG_PARTITION_ADVANCED=y 42CONFIG_PARTITION_ADVANCED=y
44CONFIG_IBM_PARTITION=y 43CONFIG_IBM_PARTITION=y
@@ -130,8 +129,11 @@ CONFIG_DUMMY=m
130CONFIG_EQUALIZER=m 129CONFIG_EQUALIZER=m
131CONFIG_TUN=m 130CONFIG_TUN=m
132CONFIG_VIRTIO_NET=y 131CONFIG_VIRTIO_NET=y
132# CONFIG_NET_VENDOR_ALACRITECH is not set
133# CONFIG_NET_VENDOR_SOLARFLARE is not set
133# CONFIG_INPUT is not set 134# CONFIG_INPUT is not set
134# CONFIG_SERIO is not set 135# CONFIG_SERIO is not set
136CONFIG_DEVKMEM=y
135CONFIG_RAW_DRIVER=m 137CONFIG_RAW_DRIVER=m
136CONFIG_VIRTIO_BALLOON=y 138CONFIG_VIRTIO_BALLOON=y
137CONFIG_EXT4_FS=y 139CONFIG_EXT4_FS=y
@@ -183,7 +185,6 @@ CONFIG_TRACE_ENUM_MAP_FILE=y
183CONFIG_KPROBES_SANITY_TEST=y 185CONFIG_KPROBES_SANITY_TEST=y
184CONFIG_S390_PTDUMP=y 186CONFIG_S390_PTDUMP=y
185CONFIG_CRYPTO_CRYPTD=m 187CONFIG_CRYPTO_CRYPTD=m
186CONFIG_CRYPTO_AUTHENC=m
187CONFIG_CRYPTO_TEST=m 188CONFIG_CRYPTO_TEST=m
188CONFIG_CRYPTO_CCM=m 189CONFIG_CRYPTO_CCM=m
189CONFIG_CRYPTO_GCM=m 190CONFIG_CRYPTO_GCM=m
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index d7697ab802f6..8e136b88cdf4 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -15,7 +15,9 @@
15 BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ 15 BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
16 asm volatile( \ 16 asm volatile( \
17 " lctlg %1,%2,%0\n" \ 17 " lctlg %1,%2,%0\n" \
18 : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\ 18 : \
19 : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \
20 : "memory"); \
19} 21}
20 22
21#define __ctl_store(array, low, high) { \ 23#define __ctl_store(array, low, high) { \
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 7447ba509c30..12020b55887b 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -963,6 +963,11 @@ static int s390_fpregs_set(struct task_struct *target,
963 if (target == current) 963 if (target == current)
964 save_fpu_regs(); 964 save_fpu_regs();
965 965
966 if (MACHINE_HAS_VX)
967 convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
968 else
969 memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
970
966 /* If setting FPC, must validate it first. */ 971 /* If setting FPC, must validate it first. */
967 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) { 972 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
968 u32 ufpc[2] = { target->thread.fpu.fpc, 0 }; 973 u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
@@ -1067,6 +1072,9 @@ static int s390_vxrs_low_set(struct task_struct *target,
1067 if (target == current) 1072 if (target == current)
1068 save_fpu_regs(); 1073 save_fpu_regs();
1069 1074
1075 for (i = 0; i < __NUM_VXRS_LOW; i++)
1076 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1077
1070 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); 1078 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1071 if (rc == 0) 1079 if (rc == 0)
1072 for (i = 0; i < __NUM_VXRS_LOW; i++) 1080 for (i = 0; i < __NUM_VXRS_LOW; i++)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index bec71e902be3..6484a250021e 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -916,7 +916,7 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
916 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, 916 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
917 S390_ARCH_FAC_LIST_SIZE_BYTE); 917 S390_ARCH_FAC_LIST_SIZE_BYTE);
918 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, 918 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
919 S390_ARCH_FAC_LIST_SIZE_BYTE); 919 sizeof(S390_lowcore.stfle_fac_list));
920 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) 920 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
921 ret = -EFAULT; 921 ret = -EFAULT;
922 kfree(mach); 922 kfree(mach);
@@ -1437,7 +1437,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1437 1437
1438 /* Populate the facility mask initially. */ 1438 /* Populate the facility mask initially. */
1439 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list, 1439 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
1440 S390_ARCH_FAC_LIST_SIZE_BYTE); 1440 sizeof(S390_lowcore.stfle_fac_list));
1441 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { 1441 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1442 if (i < kvm_s390_fac_list_mask_size()) 1442 if (i < kvm_s390_fac_list_mask_size())
1443 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i]; 1443 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 7a1897c51c54..d56ef26d4681 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -202,7 +202,7 @@ static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
202 return pgste; 202 return pgste;
203} 203}
204 204
205static inline void ptep_xchg_commit(struct mm_struct *mm, 205static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
206 unsigned long addr, pte_t *ptep, 206 unsigned long addr, pte_t *ptep,
207 pgste_t pgste, pte_t old, pte_t new) 207 pgste_t pgste, pte_t old, pte_t new)
208{ 208{
@@ -220,6 +220,7 @@ static inline void ptep_xchg_commit(struct mm_struct *mm,
220 } else { 220 } else {
221 *ptep = new; 221 *ptep = new;
222 } 222 }
223 return old;
223} 224}
224 225
225pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr, 226pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
@@ -231,7 +232,7 @@ pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
231 preempt_disable(); 232 preempt_disable();
232 pgste = ptep_xchg_start(mm, addr, ptep); 233 pgste = ptep_xchg_start(mm, addr, ptep);
233 old = ptep_flush_direct(mm, addr, ptep); 234 old = ptep_flush_direct(mm, addr, ptep);
234 ptep_xchg_commit(mm, addr, ptep, pgste, old, new); 235 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
235 preempt_enable(); 236 preempt_enable();
236 return old; 237 return old;
237} 238}
@@ -246,7 +247,7 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
246 preempt_disable(); 247 preempt_disable();
247 pgste = ptep_xchg_start(mm, addr, ptep); 248 pgste = ptep_xchg_start(mm, addr, ptep);
248 old = ptep_flush_lazy(mm, addr, ptep); 249 old = ptep_flush_lazy(mm, addr, ptep);
249 ptep_xchg_commit(mm, addr, ptep, pgste, old, new); 250 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
250 preempt_enable(); 251 preempt_enable();
251 return old; 252 return old;
252} 253}
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index d89b7011667c..e279572824b1 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -111,7 +111,7 @@ static int tile_gpr_set(struct task_struct *target,
111 const void *kbuf, const void __user *ubuf) 111 const void *kbuf, const void __user *ubuf)
112{ 112{
113 int ret; 113 int ret;
114 struct pt_regs regs; 114 struct pt_regs regs = *task_pt_regs(target);
115 115
116 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0, 116 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0,
117 sizeof(regs)); 117 sizeof(regs));
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 05612a2529c8..496e60391fac 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -1010,7 +1010,7 @@ static __init int amd_ibs_init(void)
1010 * all online cpus. 1010 * all online cpus.
1011 */ 1011 */
1012 cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING, 1012 cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
1013 "perf/x86/amd/ibs:STARTING", 1013 "perf/x86/amd/ibs:starting",
1014 x86_pmu_amd_ibs_starting_cpu, 1014 x86_pmu_amd_ibs_starting_cpu,
1015 x86_pmu_amd_ibs_dying_cpu); 1015 x86_pmu_amd_ibs_dying_cpu);
1016 1016
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index d611cab214a6..eb1484c86bb4 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3176,13 +3176,16 @@ static void intel_pmu_cpu_starting(int cpu)
3176 3176
3177 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { 3177 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
3178 for_each_cpu(i, topology_sibling_cpumask(cpu)) { 3178 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
3179 struct cpu_hw_events *sibling;
3179 struct intel_excl_cntrs *c; 3180 struct intel_excl_cntrs *c;
3180 3181
3181 c = per_cpu(cpu_hw_events, i).excl_cntrs; 3182 sibling = &per_cpu(cpu_hw_events, i);
3183 c = sibling->excl_cntrs;
3182 if (c && c->core_id == core_id) { 3184 if (c && c->core_id == core_id) {
3183 cpuc->kfree_on_online[1] = cpuc->excl_cntrs; 3185 cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
3184 cpuc->excl_cntrs = c; 3186 cpuc->excl_cntrs = c;
3185 cpuc->excl_thread_id = 1; 3187 if (!sibling->excl_thread_id)
3188 cpuc->excl_thread_id = 1;
3186 break; 3189 break;
3187 } 3190 }
3188 } 3191 }
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 945e512a112a..1e35dd06b090 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1875,6 +1875,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
1875 .irq_ack = irq_chip_ack_parent, 1875 .irq_ack = irq_chip_ack_parent,
1876 .irq_eoi = ioapic_ack_level, 1876 .irq_eoi = ioapic_ack_level,
1877 .irq_set_affinity = ioapic_set_affinity, 1877 .irq_set_affinity = ioapic_set_affinity,
1878 .irq_retrigger = irq_chip_retrigger_hierarchy,
1878 .flags = IRQCHIP_SKIP_SET_WAKE, 1879 .flags = IRQCHIP_SKIP_SET_WAKE,
1879}; 1880};
1880 1881
@@ -1886,6 +1887,7 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
1886 .irq_ack = irq_chip_ack_parent, 1887 .irq_ack = irq_chip_ack_parent,
1887 .irq_eoi = ioapic_ir_ack_level, 1888 .irq_eoi = ioapic_ir_ack_level,
1888 .irq_set_affinity = ioapic_set_affinity, 1889 .irq_set_affinity = ioapic_set_affinity,
1890 .irq_retrigger = irq_chip_retrigger_hierarchy,
1889 .flags = IRQCHIP_SKIP_SET_WAKE, 1891 .flags = IRQCHIP_SKIP_SET_WAKE,
1890}; 1892};
1891 1893
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 57d8a856cdc5..d153be8929a6 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6171,7 +6171,8 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
6171 6171
6172 kvm_x86_ops->patch_hypercall(vcpu, instruction); 6172 kvm_x86_ops->patch_hypercall(vcpu, instruction);
6173 6173
6174 return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); 6174 return emulator_write_emulated(ctxt, rip, instruction, 3,
6175 &ctxt->exception);
6175} 6176}
6176 6177
6177static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) 6178static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 3cd69832d7f4..3961103e9176 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -114,6 +114,16 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
114 DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"), 114 DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"),
115 }, 115 },
116 }, 116 },
117 /* https://bugzilla.kernel.org/show_bug.cgi?id=42606 */
118 {
119 .callback = set_nouse_crs,
120 .ident = "Supermicro X8DTH",
121 .matches = {
122 DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
123 DMI_MATCH(DMI_PRODUCT_NAME, "X8DTH-i/6/iF/6F"),
124 DMI_MATCH(DMI_BIOS_VERSION, "2.0a"),
125 },
126 },
117 127
118 /* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */ 128 /* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */
119 { 129 {
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a8e67a155d04..c3400b5444a7 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -912,7 +912,6 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
912static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx) 912static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx)
913{ 913{
914 LIST_HEAD(rq_list); 914 LIST_HEAD(rq_list);
915 LIST_HEAD(driver_list);
916 915
917 if (unlikely(blk_mq_hctx_stopped(hctx))) 916 if (unlikely(blk_mq_hctx_stopped(hctx)))
918 return; 917 return;
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 82b0b5710979..b0399e8f6d27 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -852,23 +852,18 @@ acpi_tb_install_and_load_table(acpi_physical_address address,
852 852
853 ACPI_FUNCTION_TRACE(tb_install_and_load_table); 853 ACPI_FUNCTION_TRACE(tb_install_and_load_table);
854 854
855 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
856
857 /* Install the table and load it into the namespace */ 855 /* Install the table and load it into the namespace */
858 856
859 status = acpi_tb_install_standard_table(address, flags, TRUE, 857 status = acpi_tb_install_standard_table(address, flags, TRUE,
860 override, &i); 858 override, &i);
861 if (ACPI_FAILURE(status)) { 859 if (ACPI_FAILURE(status)) {
862 goto unlock_and_exit; 860 goto exit;
863 } 861 }
864 862
865 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
866 status = acpi_tb_load_table(i, acpi_gbl_root_node); 863 status = acpi_tb_load_table(i, acpi_gbl_root_node);
867 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
868 864
869unlock_and_exit: 865exit:
870 *table_index = i; 866 *table_index = i;
871 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
872 return_ACPI_STATUS(status); 867 return_ACPI_STATUS(status);
873} 868}
874 869
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 5fdf251a9f97..01e1b3d63fc0 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -217,6 +217,10 @@ acpi_tb_install_standard_table(acpi_physical_address address,
217 goto release_and_exit; 217 goto release_and_exit;
218 } 218 }
219 219
220 /* Acquire the table lock */
221
222 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
223
220 if (reload) { 224 if (reload) {
221 /* 225 /*
222 * Validate the incoming table signature. 226 * Validate the incoming table signature.
@@ -244,7 +248,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
244 new_table_desc.signature.integer)); 248 new_table_desc.signature.integer));
245 249
246 status = AE_BAD_SIGNATURE; 250 status = AE_BAD_SIGNATURE;
247 goto release_and_exit; 251 goto unlock_and_exit;
248 } 252 }
249 253
250 /* Check if table is already registered */ 254 /* Check if table is already registered */
@@ -279,7 +283,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
279 /* Table is still loaded, this is an error */ 283 /* Table is still loaded, this is an error */
280 284
281 status = AE_ALREADY_EXISTS; 285 status = AE_ALREADY_EXISTS;
282 goto release_and_exit; 286 goto unlock_and_exit;
283 } else { 287 } else {
284 /* 288 /*
285 * Table was unloaded, allow it to be reloaded. 289 * Table was unloaded, allow it to be reloaded.
@@ -290,6 +294,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
290 * indicate the re-installation. 294 * indicate the re-installation.
291 */ 295 */
292 acpi_tb_uninstall_table(&new_table_desc); 296 acpi_tb_uninstall_table(&new_table_desc);
297 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
293 *table_index = i; 298 *table_index = i;
294 return_ACPI_STATUS(AE_OK); 299 return_ACPI_STATUS(AE_OK);
295 } 300 }
@@ -303,11 +308,19 @@ acpi_tb_install_standard_table(acpi_physical_address address,
303 308
304 /* Invoke table handler if present */ 309 /* Invoke table handler if present */
305 310
311 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
306 if (acpi_gbl_table_handler) { 312 if (acpi_gbl_table_handler) {
307 (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL, 313 (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL,
308 new_table_desc.pointer, 314 new_table_desc.pointer,
309 acpi_gbl_table_handler_context); 315 acpi_gbl_table_handler_context);
310 } 316 }
317 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
318
319unlock_and_exit:
320
321 /* Release the table lock */
322
323 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
311 324
312release_and_exit: 325release_and_exit:
313 326
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 9b6cebe227a0..54abb26b7366 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -674,14 +674,6 @@ static void acpi_sleep_suspend_setup(void)
674 if (acpi_sleep_state_supported(i)) 674 if (acpi_sleep_state_supported(i))
675 sleep_states[i] = 1; 675 sleep_states[i] = 1;
676 676
677 /*
678 * Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set and
679 * the default suspend mode was not selected from the command line.
680 */
681 if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0 &&
682 mem_sleep_default > PM_SUSPEND_MEM)
683 mem_sleep_default = PM_SUSPEND_FREEZE;
684
685 suspend_set_ops(old_suspend_ordering ? 677 suspend_set_ops(old_suspend_ordering ?
686 &acpi_suspend_ops_old : &acpi_suspend_ops); 678 &acpi_suspend_ops_old : &acpi_suspend_ops);
687 freeze_set_ops(&acpi_freeze_ops); 679 freeze_set_ops(&acpi_freeze_ops);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 02ded25c82e4..7f48156cbc0c 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -305,17 +305,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
305 DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"), 305 DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"),
306 }, 306 },
307 }, 307 },
308 {
309 /* https://bugzilla.redhat.com/show_bug.cgi?id=1204476 */
310 /* https://bugs.launchpad.net/ubuntu/+source/linux-lts-trusty/+bug/1416940 */
311 .callback = video_detect_force_native,
312 .ident = "HP Pavilion dv6",
313 .matches = {
314 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
315 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"),
316 },
317 },
318
319 { }, 308 { },
320}; 309};
321 310
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 8ab8ea1253e6..dacb6a8418aa 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -408,14 +408,14 @@ static ssize_t show_valid_zones(struct device *dev,
408 sprintf(buf, "%s", zone->name); 408 sprintf(buf, "%s", zone->name);
409 409
410 /* MMOP_ONLINE_KERNEL */ 410 /* MMOP_ONLINE_KERNEL */
411 zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL); 411 zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift);
412 if (zone_shift) { 412 if (zone_shift) {
413 strcat(buf, " "); 413 strcat(buf, " ");
414 strcat(buf, (zone + zone_shift)->name); 414 strcat(buf, (zone + zone_shift)->name);
415 } 415 }
416 416
417 /* MMOP_ONLINE_MOVABLE */ 417 /* MMOP_ONLINE_MOVABLE */
418 zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE); 418 zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift);
419 if (zone_shift) { 419 if (zone_shift) {
420 strcat(buf, " "); 420 strcat(buf, " ");
421 strcat(buf, (zone + zone_shift)->name); 421 strcat(buf, (zone + zone_shift)->name);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 50a2020b5b72..9fd06eeb1a17 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -271,7 +271,7 @@ static inline int sock_send_bvec(struct nbd_device *nbd, int index,
271static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) 271static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
272{ 272{
273 struct request *req = blk_mq_rq_from_pdu(cmd); 273 struct request *req = blk_mq_rq_from_pdu(cmd);
274 int result, flags; 274 int result;
275 struct nbd_request request; 275 struct nbd_request request;
276 unsigned long size = blk_rq_bytes(req); 276 unsigned long size = blk_rq_bytes(req);
277 struct bio *bio; 277 struct bio *bio;
@@ -310,7 +310,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
310 if (type != NBD_CMD_WRITE) 310 if (type != NBD_CMD_WRITE)
311 return 0; 311 return 0;
312 312
313 flags = 0;
314 bio = req->bio; 313 bio = req->bio;
315 while (bio) { 314 while (bio) {
316 struct bio *next = bio->bi_next; 315 struct bio *next = bio->bi_next;
@@ -319,9 +318,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
319 318
320 bio_for_each_segment(bvec, bio, iter) { 319 bio_for_each_segment(bvec, bio, iter) {
321 bool is_last = !next && bio_iter_last(bvec, iter); 320 bool is_last = !next && bio_iter_last(bvec, iter);
321 int flags = is_last ? 0 : MSG_MORE;
322 322
323 if (is_last)
324 flags = MSG_MORE;
325 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", 323 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
326 cmd, bvec.bv_len); 324 cmd, bvec.bv_len);
327 result = sock_send_bvec(nbd, index, &bvec, flags); 325 result = sock_send_bvec(nbd, index, &bvec, flags);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index b2bdfa81f929..265f1a7072e9 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -197,13 +197,13 @@ struct blkfront_info
197 /* Number of pages per ring buffer. */ 197 /* Number of pages per ring buffer. */
198 unsigned int nr_ring_pages; 198 unsigned int nr_ring_pages;
199 struct request_queue *rq; 199 struct request_queue *rq;
200 unsigned int feature_flush; 200 unsigned int feature_flush:1;
201 unsigned int feature_fua; 201 unsigned int feature_fua:1;
202 unsigned int feature_discard:1; 202 unsigned int feature_discard:1;
203 unsigned int feature_secdiscard:1; 203 unsigned int feature_secdiscard:1;
204 unsigned int feature_persistent:1;
204 unsigned int discard_granularity; 205 unsigned int discard_granularity;
205 unsigned int discard_alignment; 206 unsigned int discard_alignment;
206 unsigned int feature_persistent:1;
207 /* Number of 4KB segments handled */ 207 /* Number of 4KB segments handled */
208 unsigned int max_indirect_segments; 208 unsigned int max_indirect_segments;
209 int is_ready; 209 int is_ready;
@@ -2223,7 +2223,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
2223 } 2223 }
2224 else 2224 else
2225 grants = info->max_indirect_segments; 2225 grants = info->max_indirect_segments;
2226 psegs = grants / GRANTS_PER_PSEG; 2226 psegs = DIV_ROUND_UP(grants, GRANTS_PER_PSEG);
2227 2227
2228 err = fill_grant_buffer(rinfo, 2228 err = fill_grant_buffer(rinfo,
2229 (grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info)); 2229 (grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
@@ -2323,13 +2323,16 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
2323 blkfront_setup_discard(info); 2323 blkfront_setup_discard(info);
2324 2324
2325 info->feature_persistent = 2325 info->feature_persistent =
2326 xenbus_read_unsigned(info->xbdev->otherend, 2326 !!xenbus_read_unsigned(info->xbdev->otherend,
2327 "feature-persistent", 0); 2327 "feature-persistent", 0);
2328 2328
2329 indirect_segments = xenbus_read_unsigned(info->xbdev->otherend, 2329 indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
2330 "feature-max-indirect-segments", 0); 2330 "feature-max-indirect-segments", 0);
2331 info->max_indirect_segments = min(indirect_segments, 2331 if (indirect_segments > xen_blkif_max_segments)
2332 xen_blkif_max_segments); 2332 indirect_segments = xen_blkif_max_segments;
2333 if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
2334 indirect_segments = 0;
2335 info->max_indirect_segments = indirect_segments;
2333} 2336}
2334 2337
2335/* 2338/*
@@ -2652,6 +2655,9 @@ static int __init xlblk_init(void)
2652 if (!xen_domain()) 2655 if (!xen_domain())
2653 return -ENODEV; 2656 return -ENODEV;
2654 2657
2658 if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
2659 xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
2660
2655 if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) { 2661 if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
2656 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n", 2662 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
2657 xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER); 2663 xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 8b00e79c2683..17857beb4892 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1862,7 +1862,7 @@ static void config_work_handler(struct work_struct *work)
1862{ 1862{
1863 struct ports_device *portdev; 1863 struct ports_device *portdev;
1864 1864
1865 portdev = container_of(work, struct ports_device, control_work); 1865 portdev = container_of(work, struct ports_device, config_work);
1866 if (!use_multiport(portdev)) { 1866 if (!use_multiport(portdev)) {
1867 struct virtio_device *vdev; 1867 struct virtio_device *vdev;
1868 struct port *port; 1868 struct port *port;
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 8c8b495cbf0d..cdc092a1d9ef 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -586,7 +586,7 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
586 GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam", 586 GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam",
587 GATE_BUS_TOP, 24, 0, 0), 587 GATE_BUS_TOP, 24, 0, 0),
588 GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler", 588 GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
589 GATE_BUS_TOP, 27, 0, 0), 589 GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
590}; 590};
591 591
592static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = { 592static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
@@ -956,20 +956,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
956 GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk333_g2d", GATE_IP_G2D, 7, 0, 0), 956 GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk333_g2d", GATE_IP_G2D, 7, 0, 0),
957 957
958 GATE(0, "aclk200_fsys", "mout_user_aclk200_fsys", 958 GATE(0, "aclk200_fsys", "mout_user_aclk200_fsys",
959 GATE_BUS_FSYS0, 9, CLK_IGNORE_UNUSED, 0), 959 GATE_BUS_FSYS0, 9, CLK_IS_CRITICAL, 0),
960 GATE(0, "aclk200_fsys2", "mout_user_aclk200_fsys2", 960 GATE(0, "aclk200_fsys2", "mout_user_aclk200_fsys2",
961 GATE_BUS_FSYS0, 10, CLK_IGNORE_UNUSED, 0), 961 GATE_BUS_FSYS0, 10, CLK_IGNORE_UNUSED, 0),
962 962
963 GATE(0, "aclk333_g2d", "mout_user_aclk333_g2d", 963 GATE(0, "aclk333_g2d", "mout_user_aclk333_g2d",
964 GATE_BUS_TOP, 0, CLK_IGNORE_UNUSED, 0), 964 GATE_BUS_TOP, 0, CLK_IGNORE_UNUSED, 0),
965 GATE(0, "aclk266_g2d", "mout_user_aclk266_g2d", 965 GATE(0, "aclk266_g2d", "mout_user_aclk266_g2d",
966 GATE_BUS_TOP, 1, CLK_IGNORE_UNUSED, 0), 966 GATE_BUS_TOP, 1, CLK_IS_CRITICAL, 0),
967 GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg", 967 GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg",
968 GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0), 968 GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0),
969 GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0", 969 GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0",
970 GATE_BUS_TOP, 5, 0, 0), 970 GATE_BUS_TOP, 5, 0, 0),
971 GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl", 971 GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl",
972 GATE_BUS_TOP, 6, CLK_IGNORE_UNUSED, 0), 972 GATE_BUS_TOP, 6, CLK_IS_CRITICAL, 0),
973 GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl", 973 GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl",
974 GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0), 974 GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0),
975 GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp", 975 GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp",
@@ -983,20 +983,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
983 GATE(0, "aclk166", "mout_user_aclk166", 983 GATE(0, "aclk166", "mout_user_aclk166",
984 GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0), 984 GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0),
985 GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333", 985 GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333",
986 GATE_BUS_TOP, 15, CLK_IGNORE_UNUSED, 0), 986 GATE_BUS_TOP, 15, CLK_IS_CRITICAL, 0),
987 GATE(0, "aclk400_isp", "mout_user_aclk400_isp", 987 GATE(0, "aclk400_isp", "mout_user_aclk400_isp",
988 GATE_BUS_TOP, 16, 0, 0), 988 GATE_BUS_TOP, 16, 0, 0),
989 GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl", 989 GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl",
990 GATE_BUS_TOP, 17, 0, 0), 990 GATE_BUS_TOP, 17, 0, 0),
991 GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1", 991 GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1",
992 GATE_BUS_TOP, 18, 0, 0), 992 GATE_BUS_TOP, 18, CLK_IS_CRITICAL, 0),
993 GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24", 993 GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24",
994 GATE_BUS_TOP, 28, 0, 0), 994 GATE_BUS_TOP, 28, 0, 0),
995 GATE(CLK_SCLK_HSIC_12M, "sclk_hsic_12m", "ff_hsic_12m", 995 GATE(CLK_SCLK_HSIC_12M, "sclk_hsic_12m", "ff_hsic_12m",
996 GATE_BUS_TOP, 29, 0, 0), 996 GATE_BUS_TOP, 29, 0, 0),
997 997
998 GATE(0, "aclk300_disp1", "mout_user_aclk300_disp1", 998 GATE(0, "aclk300_disp1", "mout_user_aclk300_disp1",
999 SRC_MASK_TOP2, 24, 0, 0), 999 SRC_MASK_TOP2, 24, CLK_IS_CRITICAL, 0),
1000 1000
1001 GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk", 1001 GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
1002 SRC_MASK_TOP7, 20, 0, 0), 1002 SRC_MASK_TOP7, 20, 0, 0),
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 4da1dc2278bd..670ff0f25b67 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -495,6 +495,7 @@ static int exynos4_mct_dying_cpu(unsigned int cpu)
495 if (mct_int_type == MCT_INT_SPI) { 495 if (mct_int_type == MCT_INT_SPI) {
496 if (evt->irq != -1) 496 if (evt->irq != -1)
497 disable_irq_nosync(evt->irq); 497 disable_irq_nosync(evt->irq);
498 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
498 } else { 499 } else {
499 disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); 500 disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
500 } 501 }
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index f91c25718d16..a54d65aa776d 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2005,7 +2005,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
2005 limits = &performance_limits; 2005 limits = &performance_limits;
2006 perf_limits = limits; 2006 perf_limits = limits;
2007 } 2007 }
2008 if (policy->max >= policy->cpuinfo.max_freq) { 2008 if (policy->max >= policy->cpuinfo.max_freq &&
2009 !limits->no_turbo) {
2009 pr_debug("set performance\n"); 2010 pr_debug("set performance\n");
2010 intel_pstate_set_performance_limits(perf_limits); 2011 intel_pstate_set_performance_limits(perf_limits);
2011 goto out; 2012 goto out;
@@ -2047,6 +2048,17 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
2047 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 2048 policy->policy != CPUFREQ_POLICY_PERFORMANCE)
2048 return -EINVAL; 2049 return -EINVAL;
2049 2050
2051 /* When per-CPU limits are used, sysfs limits are not used */
2052 if (!per_cpu_limits) {
2053 unsigned int max_freq, min_freq;
2054
2055 max_freq = policy->cpuinfo.max_freq *
2056 limits->max_sysfs_pct / 100;
2057 min_freq = policy->cpuinfo.max_freq *
2058 limits->min_sysfs_pct / 100;
2059 cpufreq_verify_within_limits(policy, min_freq, max_freq);
2060 }
2061
2050 return 0; 2062 return 0;
2051} 2063}
2052 2064
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 86bf3b84ada5..a07ae9e37930 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1723,7 +1723,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
1723} 1723}
1724 1724
1725/** 1725/**
1726 * _gpiochip_irqchip_add() - adds an irqchip to a gpiochip 1726 * gpiochip_irqchip_add_key() - adds an irqchip to a gpiochip
1727 * @gpiochip: the gpiochip to add the irqchip to 1727 * @gpiochip: the gpiochip to add the irqchip to
1728 * @irqchip: the irqchip to add to the gpiochip 1728 * @irqchip: the irqchip to add to the gpiochip
1729 * @first_irq: if not dynamically assigned, the base (first) IRQ to 1729 * @first_irq: if not dynamically assigned, the base (first) IRQ to
@@ -1749,13 +1749,13 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
1749 * the pins on the gpiochip can generate a unique IRQ. Everything else 1749 * the pins on the gpiochip can generate a unique IRQ. Everything else
1750 * need to be open coded. 1750 * need to be open coded.
1751 */ 1751 */
1752int _gpiochip_irqchip_add(struct gpio_chip *gpiochip, 1752int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
1753 struct irq_chip *irqchip, 1753 struct irq_chip *irqchip,
1754 unsigned int first_irq, 1754 unsigned int first_irq,
1755 irq_flow_handler_t handler, 1755 irq_flow_handler_t handler,
1756 unsigned int type, 1756 unsigned int type,
1757 bool nested, 1757 bool nested,
1758 struct lock_class_key *lock_key) 1758 struct lock_class_key *lock_key)
1759{ 1759{
1760 struct device_node *of_node; 1760 struct device_node *of_node;
1761 bool irq_base_set = false; 1761 bool irq_base_set = false;
@@ -1840,7 +1840,7 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
1840 1840
1841 return 0; 1841 return 0;
1842} 1842}
1843EXPORT_SYMBOL_GPL(_gpiochip_irqchip_add); 1843EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
1844 1844
1845#else /* CONFIG_GPIOLIB_IRQCHIP */ 1845#else /* CONFIG_GPIOLIB_IRQCHIP */
1846 1846
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 29d6d84d1c28..41e41f90265d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
83 } 83 }
84 break; 84 break;
85 } 85 }
86
87 if (!(*out_ring && (*out_ring)->adev)) {
88 DRM_ERROR("Ring %d is not initialized on IP %d\n",
89 ring, ip_type);
90 return -EINVAL;
91 }
92
86 return 0; 93 return 0;
87} 94}
88 95
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 9999dc71b998..ccb5e02e7b20 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2512,6 +2512,8 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
2512 2512
2513 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2513 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2514 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2514 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2515 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2516 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2515 2517
2516 return 0; 2518 return 0;
2517} 2519}
@@ -2537,7 +2539,6 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
2537 int32_t hot_y) 2539 int32_t hot_y)
2538{ 2540{
2539 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2541 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2540 struct amdgpu_device *adev = crtc->dev->dev_private;
2541 struct drm_gem_object *obj; 2542 struct drm_gem_object *obj;
2542 struct amdgpu_bo *aobj; 2543 struct amdgpu_bo *aobj;
2543 int ret; 2544 int ret;
@@ -2578,7 +2579,9 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
2578 2579
2579 dce_v10_0_lock_cursor(crtc, true); 2580 dce_v10_0_lock_cursor(crtc, true);
2580 2581
2581 if (hot_x != amdgpu_crtc->cursor_hot_x || 2582 if (width != amdgpu_crtc->cursor_width ||
2583 height != amdgpu_crtc->cursor_height ||
2584 hot_x != amdgpu_crtc->cursor_hot_x ||
2582 hot_y != amdgpu_crtc->cursor_hot_y) { 2585 hot_y != amdgpu_crtc->cursor_hot_y) {
2583 int x, y; 2586 int x, y;
2584 2587
@@ -2587,16 +2590,10 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
2587 2590
2588 dce_v10_0_cursor_move_locked(crtc, x, y); 2591 dce_v10_0_cursor_move_locked(crtc, x, y);
2589 2592
2590 amdgpu_crtc->cursor_hot_x = hot_x;
2591 amdgpu_crtc->cursor_hot_y = hot_y;
2592 }
2593
2594 if (width != amdgpu_crtc->cursor_width ||
2595 height != amdgpu_crtc->cursor_height) {
2596 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2597 (width - 1) << 16 | (height - 1));
2598 amdgpu_crtc->cursor_width = width; 2593 amdgpu_crtc->cursor_width = width;
2599 amdgpu_crtc->cursor_height = height; 2594 amdgpu_crtc->cursor_height = height;
2595 amdgpu_crtc->cursor_hot_x = hot_x;
2596 amdgpu_crtc->cursor_hot_y = hot_y;
2600 } 2597 }
2601 2598
2602 dce_v10_0_show_cursor(crtc); 2599 dce_v10_0_show_cursor(crtc);
@@ -2620,7 +2617,6 @@ unpin:
2620static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) 2617static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
2621{ 2618{
2622 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2619 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2623 struct amdgpu_device *adev = crtc->dev->dev_private;
2624 2620
2625 if (amdgpu_crtc->cursor_bo) { 2621 if (amdgpu_crtc->cursor_bo) {
2626 dce_v10_0_lock_cursor(crtc, true); 2622 dce_v10_0_lock_cursor(crtc, true);
@@ -2628,10 +2624,6 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
2628 dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2624 dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2629 amdgpu_crtc->cursor_y); 2625 amdgpu_crtc->cursor_y);
2630 2626
2631 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2632 (amdgpu_crtc->cursor_width - 1) << 16 |
2633 (amdgpu_crtc->cursor_height - 1));
2634
2635 dce_v10_0_show_cursor(crtc); 2627 dce_v10_0_show_cursor(crtc);
2636 2628
2637 dce_v10_0_lock_cursor(crtc, false); 2629 dce_v10_0_lock_cursor(crtc, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 2006abbbfb62..a7af5b33a5e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2532,6 +2532,8 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
2532 2532
2533 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2533 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2534 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2534 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2535 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2536 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2535 2537
2536 return 0; 2538 return 0;
2537} 2539}
@@ -2557,7 +2559,6 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
2557 int32_t hot_y) 2559 int32_t hot_y)
2558{ 2560{
2559 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2561 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2560 struct amdgpu_device *adev = crtc->dev->dev_private;
2561 struct drm_gem_object *obj; 2562 struct drm_gem_object *obj;
2562 struct amdgpu_bo *aobj; 2563 struct amdgpu_bo *aobj;
2563 int ret; 2564 int ret;
@@ -2598,7 +2599,9 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
2598 2599
2599 dce_v11_0_lock_cursor(crtc, true); 2600 dce_v11_0_lock_cursor(crtc, true);
2600 2601
2601 if (hot_x != amdgpu_crtc->cursor_hot_x || 2602 if (width != amdgpu_crtc->cursor_width ||
2603 height != amdgpu_crtc->cursor_height ||
2604 hot_x != amdgpu_crtc->cursor_hot_x ||
2602 hot_y != amdgpu_crtc->cursor_hot_y) { 2605 hot_y != amdgpu_crtc->cursor_hot_y) {
2603 int x, y; 2606 int x, y;
2604 2607
@@ -2607,16 +2610,10 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
2607 2610
2608 dce_v11_0_cursor_move_locked(crtc, x, y); 2611 dce_v11_0_cursor_move_locked(crtc, x, y);
2609 2612
2610 amdgpu_crtc->cursor_hot_x = hot_x;
2611 amdgpu_crtc->cursor_hot_y = hot_y;
2612 }
2613
2614 if (width != amdgpu_crtc->cursor_width ||
2615 height != amdgpu_crtc->cursor_height) {
2616 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2617 (width - 1) << 16 | (height - 1));
2618 amdgpu_crtc->cursor_width = width; 2613 amdgpu_crtc->cursor_width = width;
2619 amdgpu_crtc->cursor_height = height; 2614 amdgpu_crtc->cursor_height = height;
2615 amdgpu_crtc->cursor_hot_x = hot_x;
2616 amdgpu_crtc->cursor_hot_y = hot_y;
2620 } 2617 }
2621 2618
2622 dce_v11_0_show_cursor(crtc); 2619 dce_v11_0_show_cursor(crtc);
@@ -2640,7 +2637,6 @@ unpin:
2640static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) 2637static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
2641{ 2638{
2642 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2639 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2643 struct amdgpu_device *adev = crtc->dev->dev_private;
2644 2640
2645 if (amdgpu_crtc->cursor_bo) { 2641 if (amdgpu_crtc->cursor_bo) {
2646 dce_v11_0_lock_cursor(crtc, true); 2642 dce_v11_0_lock_cursor(crtc, true);
@@ -2648,10 +2644,6 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
2648 dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2644 dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2649 amdgpu_crtc->cursor_y); 2645 amdgpu_crtc->cursor_y);
2650 2646
2651 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2652 (amdgpu_crtc->cursor_width - 1) << 16 |
2653 (amdgpu_crtc->cursor_height - 1));
2654
2655 dce_v11_0_show_cursor(crtc); 2647 dce_v11_0_show_cursor(crtc);
2656 2648
2657 dce_v11_0_lock_cursor(crtc, false); 2649 dce_v11_0_lock_cursor(crtc, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index b4e4ec630e8c..39df6a50637f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1859,6 +1859,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
1859 struct amdgpu_device *adev = crtc->dev->dev_private; 1859 struct amdgpu_device *adev = crtc->dev->dev_private;
1860 int xorigin = 0, yorigin = 0; 1860 int xorigin = 0, yorigin = 0;
1861 1861
1862 int w = amdgpu_crtc->cursor_width;
1863
1862 amdgpu_crtc->cursor_x = x; 1864 amdgpu_crtc->cursor_x = x;
1863 amdgpu_crtc->cursor_y = y; 1865 amdgpu_crtc->cursor_y = y;
1864 1866
@@ -1878,6 +1880,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
1878 1880
1879 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 1881 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
1880 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 1882 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
1883 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
1884 ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
1881 1885
1882 return 0; 1886 return 0;
1883} 1887}
@@ -1903,7 +1907,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
1903 int32_t hot_y) 1907 int32_t hot_y)
1904{ 1908{
1905 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1909 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1906 struct amdgpu_device *adev = crtc->dev->dev_private;
1907 struct drm_gem_object *obj; 1910 struct drm_gem_object *obj;
1908 struct amdgpu_bo *aobj; 1911 struct amdgpu_bo *aobj;
1909 int ret; 1912 int ret;
@@ -1944,7 +1947,9 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
1944 1947
1945 dce_v6_0_lock_cursor(crtc, true); 1948 dce_v6_0_lock_cursor(crtc, true);
1946 1949
1947 if (hot_x != amdgpu_crtc->cursor_hot_x || 1950 if (width != amdgpu_crtc->cursor_width ||
1951 height != amdgpu_crtc->cursor_height ||
1952 hot_x != amdgpu_crtc->cursor_hot_x ||
1948 hot_y != amdgpu_crtc->cursor_hot_y) { 1953 hot_y != amdgpu_crtc->cursor_hot_y) {
1949 int x, y; 1954 int x, y;
1950 1955
@@ -1953,16 +1958,10 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
1953 1958
1954 dce_v6_0_cursor_move_locked(crtc, x, y); 1959 dce_v6_0_cursor_move_locked(crtc, x, y);
1955 1960
1956 amdgpu_crtc->cursor_hot_x = hot_x;
1957 amdgpu_crtc->cursor_hot_y = hot_y;
1958 }
1959
1960 if (width != amdgpu_crtc->cursor_width ||
1961 height != amdgpu_crtc->cursor_height) {
1962 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
1963 (width - 1) << 16 | (height - 1));
1964 amdgpu_crtc->cursor_width = width; 1961 amdgpu_crtc->cursor_width = width;
1965 amdgpu_crtc->cursor_height = height; 1962 amdgpu_crtc->cursor_height = height;
1963 amdgpu_crtc->cursor_hot_x = hot_x;
1964 amdgpu_crtc->cursor_hot_y = hot_y;
1966 } 1965 }
1967 1966
1968 dce_v6_0_show_cursor(crtc); 1967 dce_v6_0_show_cursor(crtc);
@@ -1986,7 +1985,6 @@ unpin:
1986static void dce_v6_0_cursor_reset(struct drm_crtc *crtc) 1985static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
1987{ 1986{
1988 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1987 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1989 struct amdgpu_device *adev = crtc->dev->dev_private;
1990 1988
1991 if (amdgpu_crtc->cursor_bo) { 1989 if (amdgpu_crtc->cursor_bo) {
1992 dce_v6_0_lock_cursor(crtc, true); 1990 dce_v6_0_lock_cursor(crtc, true);
@@ -1994,10 +1992,6 @@ static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
1994 dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 1992 dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
1995 amdgpu_crtc->cursor_y); 1993 amdgpu_crtc->cursor_y);
1996 1994
1997 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
1998 (amdgpu_crtc->cursor_width - 1) << 16 |
1999 (amdgpu_crtc->cursor_height - 1));
2000
2001 dce_v6_0_show_cursor(crtc); 1995 dce_v6_0_show_cursor(crtc);
2002 dce_v6_0_lock_cursor(crtc, false); 1996 dce_v6_0_lock_cursor(crtc, false);
2003 } 1997 }
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 584abe834a3c..28102bb1704d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2363,6 +2363,8 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
2363 2363
2364 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2364 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2365 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2365 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2366 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2367 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2366 2368
2367 return 0; 2369 return 0;
2368} 2370}
@@ -2388,7 +2390,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2388 int32_t hot_y) 2390 int32_t hot_y)
2389{ 2391{
2390 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2392 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2391 struct amdgpu_device *adev = crtc->dev->dev_private;
2392 struct drm_gem_object *obj; 2393 struct drm_gem_object *obj;
2393 struct amdgpu_bo *aobj; 2394 struct amdgpu_bo *aobj;
2394 int ret; 2395 int ret;
@@ -2429,7 +2430,9 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2429 2430
2430 dce_v8_0_lock_cursor(crtc, true); 2431 dce_v8_0_lock_cursor(crtc, true);
2431 2432
2432 if (hot_x != amdgpu_crtc->cursor_hot_x || 2433 if (width != amdgpu_crtc->cursor_width ||
2434 height != amdgpu_crtc->cursor_height ||
2435 hot_x != amdgpu_crtc->cursor_hot_x ||
2433 hot_y != amdgpu_crtc->cursor_hot_y) { 2436 hot_y != amdgpu_crtc->cursor_hot_y) {
2434 int x, y; 2437 int x, y;
2435 2438
@@ -2438,16 +2441,10 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2438 2441
2439 dce_v8_0_cursor_move_locked(crtc, x, y); 2442 dce_v8_0_cursor_move_locked(crtc, x, y);
2440 2443
2441 amdgpu_crtc->cursor_hot_x = hot_x;
2442 amdgpu_crtc->cursor_hot_y = hot_y;
2443 }
2444
2445 if (width != amdgpu_crtc->cursor_width ||
2446 height != amdgpu_crtc->cursor_height) {
2447 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2448 (width - 1) << 16 | (height - 1));
2449 amdgpu_crtc->cursor_width = width; 2444 amdgpu_crtc->cursor_width = width;
2450 amdgpu_crtc->cursor_height = height; 2445 amdgpu_crtc->cursor_height = height;
2446 amdgpu_crtc->cursor_hot_x = hot_x;
2447 amdgpu_crtc->cursor_hot_y = hot_y;
2451 } 2448 }
2452 2449
2453 dce_v8_0_show_cursor(crtc); 2450 dce_v8_0_show_cursor(crtc);
@@ -2471,7 +2468,6 @@ unpin:
2471static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) 2468static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2472{ 2469{
2473 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2470 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2474 struct amdgpu_device *adev = crtc->dev->dev_private;
2475 2471
2476 if (amdgpu_crtc->cursor_bo) { 2472 if (amdgpu_crtc->cursor_bo) {
2477 dce_v8_0_lock_cursor(crtc, true); 2473 dce_v8_0_lock_cursor(crtc, true);
@@ -2479,10 +2475,6 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2479 dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2475 dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2480 amdgpu_crtc->cursor_y); 2476 amdgpu_crtc->cursor_y);
2481 2477
2482 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2483 (amdgpu_crtc->cursor_width - 1) << 16 |
2484 (amdgpu_crtc->cursor_height - 1));
2485
2486 dce_v8_0_show_cursor(crtc); 2478 dce_v8_0_show_cursor(crtc);
2487 2479
2488 dce_v8_0_lock_cursor(crtc, false); 2480 dce_v8_0_lock_cursor(crtc, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 762f8e82ceb7..e9a176891e13 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -627,11 +627,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs =
627 627
628static void dce_virtual_encoder_destroy(struct drm_encoder *encoder) 628static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
629{ 629{
630 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
631
632 kfree(amdgpu_encoder->enc_priv);
633 drm_encoder_cleanup(encoder); 630 drm_encoder_cleanup(encoder);
634 kfree(amdgpu_encoder); 631 kfree(encoder);
635} 632}
636 633
637static const struct drm_encoder_funcs dce_virtual_encoder_funcs = { 634static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 45a573e63d4a..e2b0b1646f99 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -44,6 +44,7 @@ MODULE_FIRMWARE("radeon/tahiti_mc.bin");
44MODULE_FIRMWARE("radeon/pitcairn_mc.bin"); 44MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
45MODULE_FIRMWARE("radeon/verde_mc.bin"); 45MODULE_FIRMWARE("radeon/verde_mc.bin");
46MODULE_FIRMWARE("radeon/oland_mc.bin"); 46MODULE_FIRMWARE("radeon/oland_mc.bin");
47MODULE_FIRMWARE("radeon/si58_mc.bin");
47 48
48#define MC_SEQ_MISC0__MT__MASK 0xf0000000 49#define MC_SEQ_MISC0__MT__MASK 0xf0000000
49#define MC_SEQ_MISC0__MT__GDDR1 0x10000000 50#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
@@ -113,6 +114,7 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
113 const char *chip_name; 114 const char *chip_name;
114 char fw_name[30]; 115 char fw_name[30];
115 int err; 116 int err;
117 bool is_58_fw = false;
116 118
117 DRM_DEBUG("\n"); 119 DRM_DEBUG("\n");
118 120
@@ -135,7 +137,14 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
135 default: BUG(); 137 default: BUG();
136 } 138 }
137 139
138 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 140 /* this memory configuration requires special firmware */
141 if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
142 is_58_fw = true;
143
144 if (is_58_fw)
145 snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
146 else
147 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
139 err = request_firmware(&adev->mc.fw, fw_name, adev->dev); 148 err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
140 if (err) 149 if (err)
141 goto out; 150 goto out;
@@ -463,19 +472,11 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
463 WREG32(mmVM_CONTEXT1_CNTL, 472 WREG32(mmVM_CONTEXT1_CNTL,
464 VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK | 473 VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
465 (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) | 474 (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
466 ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) | 475 ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
467 VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 476 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
468 VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | 477 gmc_v6_0_set_fault_enable_default(adev, false);
469 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 478 else
470 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | 479 gmc_v6_0_set_fault_enable_default(adev, true);
471 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
472 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
473 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
474 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
475 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
476 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
477 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
478 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
479 480
480 gmc_v6_0_gart_flush_gpu_tlb(adev, 0); 481 gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
481 dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", 482 dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -754,7 +755,10 @@ static int gmc_v6_0_late_init(void *handle)
754{ 755{
755 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 756 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
756 757
757 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 758 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
759 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
760 else
761 return 0;
758} 762}
759 763
760static int gmc_v6_0_sw_init(void *handle) 764static int gmc_v6_0_sw_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 10bedfac27b8..6e150db8f380 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -64,6 +64,7 @@ MODULE_FIRMWARE("radeon/oland_smc.bin");
64MODULE_FIRMWARE("radeon/oland_k_smc.bin"); 64MODULE_FIRMWARE("radeon/oland_k_smc.bin");
65MODULE_FIRMWARE("radeon/hainan_smc.bin"); 65MODULE_FIRMWARE("radeon/hainan_smc.bin");
66MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); 66MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
67MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
67 68
68union power_info { 69union power_info {
69 struct _ATOM_POWERPLAY_INFO info; 70 struct _ATOM_POWERPLAY_INFO info;
@@ -3487,17 +3488,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
3487 (adev->pdev->device == 0x6817) || 3488 (adev->pdev->device == 0x6817) ||
3488 (adev->pdev->device == 0x6806)) 3489 (adev->pdev->device == 0x6806))
3489 max_mclk = 120000; 3490 max_mclk = 120000;
3490 } else if (adev->asic_type == CHIP_OLAND) {
3491 if ((adev->pdev->revision == 0xC7) ||
3492 (adev->pdev->revision == 0x80) ||
3493 (adev->pdev->revision == 0x81) ||
3494 (adev->pdev->revision == 0x83) ||
3495 (adev->pdev->revision == 0x87) ||
3496 (adev->pdev->device == 0x6604) ||
3497 (adev->pdev->device == 0x6605)) {
3498 max_sclk = 75000;
3499 max_mclk = 80000;
3500 }
3501 } else if (adev->asic_type == CHIP_HAINAN) { 3491 } else if (adev->asic_type == CHIP_HAINAN) {
3502 if ((adev->pdev->revision == 0x81) || 3492 if ((adev->pdev->revision == 0x81) ||
3503 (adev->pdev->revision == 0x83) || 3493 (adev->pdev->revision == 0x83) ||
@@ -3506,7 +3496,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
3506 (adev->pdev->device == 0x6665) || 3496 (adev->pdev->device == 0x6665) ||
3507 (adev->pdev->device == 0x6667)) { 3497 (adev->pdev->device == 0x6667)) {
3508 max_sclk = 75000; 3498 max_sclk = 75000;
3509 max_mclk = 80000;
3510 } 3499 }
3511 } 3500 }
3512 /* Apply dpm quirks */ 3501 /* Apply dpm quirks */
@@ -7713,10 +7702,11 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
7713 ((adev->pdev->device == 0x6660) || 7702 ((adev->pdev->device == 0x6660) ||
7714 (adev->pdev->device == 0x6663) || 7703 (adev->pdev->device == 0x6663) ||
7715 (adev->pdev->device == 0x6665) || 7704 (adev->pdev->device == 0x6665) ||
7716 (adev->pdev->device == 0x6667))) || 7705 (adev->pdev->device == 0x6667))))
7717 ((adev->pdev->revision == 0xc3) &&
7718 (adev->pdev->device == 0x6665)))
7719 chip_name = "hainan_k"; 7706 chip_name = "hainan_k";
7707 else if ((adev->pdev->revision == 0xc3) &&
7708 (adev->pdev->device == 0x6665))
7709 chip_name = "banks_k_2";
7720 else 7710 else
7721 chip_name = "hainan"; 7711 chip_name = "hainan";
7722 break; 7712 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 96444e4d862a..7fb9137dd89b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -40,13 +40,14 @@
40#include "smu/smu_7_0_1_sh_mask.h" 40#include "smu/smu_7_0_1_sh_mask.h"
41 41
42static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); 42static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
43static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
44static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); 43static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
45static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev); 44static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
46static int uvd_v4_2_start(struct amdgpu_device *adev); 45static int uvd_v4_2_start(struct amdgpu_device *adev);
47static void uvd_v4_2_stop(struct amdgpu_device *adev); 46static void uvd_v4_2_stop(struct amdgpu_device *adev);
48static int uvd_v4_2_set_clockgating_state(void *handle, 47static int uvd_v4_2_set_clockgating_state(void *handle,
49 enum amd_clockgating_state state); 48 enum amd_clockgating_state state);
49static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
50 bool sw_mode);
50/** 51/**
51 * uvd_v4_2_ring_get_rptr - get read pointer 52 * uvd_v4_2_ring_get_rptr - get read pointer
52 * 53 *
@@ -140,7 +141,8 @@ static int uvd_v4_2_sw_fini(void *handle)
140 141
141 return r; 142 return r;
142} 143}
143 144static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
145 bool enable);
144/** 146/**
145 * uvd_v4_2_hw_init - start and test UVD block 147 * uvd_v4_2_hw_init - start and test UVD block
146 * 148 *
@@ -155,8 +157,7 @@ static int uvd_v4_2_hw_init(void *handle)
155 uint32_t tmp; 157 uint32_t tmp;
156 int r; 158 int r;
157 159
158 uvd_v4_2_init_cg(adev); 160 uvd_v4_2_enable_mgcg(adev, true);
159 uvd_v4_2_set_clockgating_state(adev, AMD_CG_STATE_GATE);
160 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); 161 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
161 r = uvd_v4_2_start(adev); 162 r = uvd_v4_2_start(adev);
162 if (r) 163 if (r)
@@ -266,11 +267,13 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
266 struct amdgpu_ring *ring = &adev->uvd.ring; 267 struct amdgpu_ring *ring = &adev->uvd.ring;
267 uint32_t rb_bufsz; 268 uint32_t rb_bufsz;
268 int i, j, r; 269 int i, j, r;
269
270 /* disable byte swapping */ 270 /* disable byte swapping */
271 u32 lmi_swap_cntl = 0; 271 u32 lmi_swap_cntl = 0;
272 u32 mp_swap_cntl = 0; 272 u32 mp_swap_cntl = 0;
273 273
274 WREG32(mmUVD_CGC_GATE, 0);
275 uvd_v4_2_set_dcm(adev, true);
276
274 uvd_v4_2_mc_resume(adev); 277 uvd_v4_2_mc_resume(adev);
275 278
276 /* disable interupt */ 279 /* disable interupt */
@@ -406,6 +409,8 @@ static void uvd_v4_2_stop(struct amdgpu_device *adev)
406 409
407 /* Unstall UMC and register bus */ 410 /* Unstall UMC and register bus */
408 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 411 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
412
413 uvd_v4_2_set_dcm(adev, false);
409} 414}
410 415
411/** 416/**
@@ -619,19 +624,6 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
619 WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); 624 WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
620} 625}
621 626
622static void uvd_v4_2_init_cg(struct amdgpu_device *adev)
623{
624 bool hw_mode = true;
625
626 if (hw_mode) {
627 uvd_v4_2_set_dcm(adev, false);
628 } else {
629 u32 tmp = RREG32(mmUVD_CGC_CTRL);
630 tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
631 WREG32(mmUVD_CGC_CTRL, tmp);
632 }
633}
634
635static bool uvd_v4_2_is_idle(void *handle) 627static bool uvd_v4_2_is_idle(void *handle)
636{ 628{
637 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 629 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -685,17 +677,6 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
685static int uvd_v4_2_set_clockgating_state(void *handle, 677static int uvd_v4_2_set_clockgating_state(void *handle,
686 enum amd_clockgating_state state) 678 enum amd_clockgating_state state)
687{ 679{
688 bool gate = false;
689 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
690
691 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
692 return 0;
693
694 if (state == AMD_CG_STATE_GATE)
695 gate = true;
696
697 uvd_v4_2_enable_mgcg(adev, gate);
698
699 return 0; 680 return 0;
700} 681}
701 682
@@ -711,9 +692,6 @@ static int uvd_v4_2_set_powergating_state(void *handle,
711 */ 692 */
712 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 693 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
713 694
714 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
715 return 0;
716
717 if (state == AMD_PG_STATE_GATE) { 695 if (state == AMD_PG_STATE_GATE) {
718 uvd_v4_2_stop(adev); 696 uvd_v4_2_stop(adev);
719 return 0; 697 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 5fb0b7f5c065..37ca685e5a9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -43,9 +43,13 @@
43 43
44#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 44#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
45#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 45#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
46#define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07
47
46#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 48#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
47#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 49#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
48#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 50#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
51#define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000
52
49#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 53#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
50 54
51#define VCE_V3_0_FW_SIZE (384 * 1024) 55#define VCE_V3_0_FW_SIZE (384 * 1024)
@@ -54,6 +58,9 @@
54 58
55#define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8)) 59#define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8))
56 60
61#define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \
62 | GRBM_GFX_INDEX__VCE_ALL_PIPE)
63
57static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); 64static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
58static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); 65static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
59static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); 66static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -175,7 +182,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
175 WREG32(mmVCE_UENC_CLOCK_GATING_2, data); 182 WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
176 183
177 data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); 184 data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
178 data &= ~0xffc00000; 185 data &= ~0x3ff;
179 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); 186 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
180 187
181 data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); 188 data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
@@ -249,7 +256,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
249 if (adev->vce.harvest_config & (1 << idx)) 256 if (adev->vce.harvest_config & (1 << idx))
250 continue; 257 continue;
251 258
252 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); 259 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
253 vce_v3_0_mc_resume(adev, idx); 260 vce_v3_0_mc_resume(adev, idx);
254 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); 261 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
255 262
@@ -273,7 +280,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
273 } 280 }
274 } 281 }
275 282
276 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 283 WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
277 mutex_unlock(&adev->grbm_idx_mutex); 284 mutex_unlock(&adev->grbm_idx_mutex);
278 285
279 return 0; 286 return 0;
@@ -288,7 +295,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
288 if (adev->vce.harvest_config & (1 << idx)) 295 if (adev->vce.harvest_config & (1 << idx))
289 continue; 296 continue;
290 297
291 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); 298 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
292 299
293 if (adev->asic_type >= CHIP_STONEY) 300 if (adev->asic_type >= CHIP_STONEY)
294 WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); 301 WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
@@ -306,7 +313,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
306 vce_v3_0_set_vce_sw_clock_gating(adev, false); 313 vce_v3_0_set_vce_sw_clock_gating(adev, false);
307 } 314 }
308 315
309 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 316 WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
310 mutex_unlock(&adev->grbm_idx_mutex); 317 mutex_unlock(&adev->grbm_idx_mutex);
311 318
312 return 0; 319 return 0;
@@ -586,17 +593,17 @@ static bool vce_v3_0_check_soft_reset(void *handle)
586 * VCE team suggest use bit 3--bit 6 for busy status check 593 * VCE team suggest use bit 3--bit 6 for busy status check
587 */ 594 */
588 mutex_lock(&adev->grbm_idx_mutex); 595 mutex_lock(&adev->grbm_idx_mutex);
589 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); 596 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
590 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { 597 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
591 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 598 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
592 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 599 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
593 } 600 }
594 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); 601 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
595 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { 602 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
596 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 603 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
597 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 604 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
598 } 605 }
599 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); 606 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
600 mutex_unlock(&adev->grbm_idx_mutex); 607 mutex_unlock(&adev->grbm_idx_mutex);
601 608
602 if (srbm_soft_reset) { 609 if (srbm_soft_reset) {
@@ -734,7 +741,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
734 if (adev->vce.harvest_config & (1 << i)) 741 if (adev->vce.harvest_config & (1 << i))
735 continue; 742 continue;
736 743
737 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i); 744 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i));
738 745
739 if (enable) { 746 if (enable) {
740 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ 747 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
@@ -753,7 +760,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
753 vce_v3_0_set_vce_sw_clock_gating(adev, enable); 760 vce_v3_0_set_vce_sw_clock_gating(adev, enable);
754 } 761 }
755 762
756 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 763 WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
757 mutex_unlock(&adev->grbm_idx_mutex); 764 mutex_unlock(&adev->grbm_idx_mutex);
758 765
759 return 0; 766 return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index b0c63c5f54c9..6bb79c94cb9f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -200,7 +200,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
200 cgs_set_clockgating_state( 200 cgs_set_clockgating_state(
201 hwmgr->device, 201 hwmgr->device,
202 AMD_IP_BLOCK_TYPE_VCE, 202 AMD_IP_BLOCK_TYPE_VCE,
203 AMD_CG_STATE_UNGATE); 203 AMD_CG_STATE_GATE);
204 cgs_set_powergating_state( 204 cgs_set_powergating_state(
205 hwmgr->device, 205 hwmgr->device,
206 AMD_IP_BLOCK_TYPE_VCE, 206 AMD_IP_BLOCK_TYPE_VCE,
@@ -218,7 +218,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
218 cgs_set_clockgating_state( 218 cgs_set_clockgating_state(
219 hwmgr->device, 219 hwmgr->device,
220 AMD_IP_BLOCK_TYPE_VCE, 220 AMD_IP_BLOCK_TYPE_VCE,
221 AMD_PG_STATE_GATE); 221 AMD_PG_STATE_UNGATE);
222 cz_dpm_update_vce_dpm(hwmgr); 222 cz_dpm_update_vce_dpm(hwmgr);
223 cz_enable_disable_vce_dpm(hwmgr, true); 223 cz_enable_disable_vce_dpm(hwmgr, true);
224 return 0; 224 return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 4b14f259a147..0fb4e8c8f5e1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1402,14 +1402,22 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
1402 cz_hwmgr->vce_dpm.hard_min_clk, 1402 cz_hwmgr->vce_dpm.hard_min_clk,
1403 PPSMC_MSG_SetEclkHardMin)); 1403 PPSMC_MSG_SetEclkHardMin));
1404 } else { 1404 } else {
1405 /*EPR# 419220 -HW limitation to to */ 1405 /*Program HardMin based on the vce_arbiter.ecclk */
1406 cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; 1406 if (hwmgr->vce_arbiter.ecclk == 0) {
1407 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, 1407 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1408 PPSMC_MSG_SetEclkHardMin, 1408 PPSMC_MSG_SetEclkHardMin, 0);
1409 cz_get_eclk_level(hwmgr, 1409 /* disable ECLK DPM 0. Otherwise VCE could hang if
1410 cz_hwmgr->vce_dpm.hard_min_clk, 1410 * switching SCLK from DPM 0 to 6/7 */
1411 PPSMC_MSG_SetEclkHardMin)); 1411 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1412 1412 PPSMC_MSG_SetEclkSoftMin, 1);
1413 } else {
1414 cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
1415 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1416 PPSMC_MSG_SetEclkHardMin,
1417 cz_get_eclk_level(hwmgr,
1418 cz_hwmgr->vce_dpm.hard_min_clk,
1419 PPSMC_MSG_SetEclkHardMin));
1420 }
1413 } 1421 }
1414 return 0; 1422 return 0;
1415} 1423}
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 908011d2c8f5..7abda94fc2cf 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -113,6 +113,7 @@ struct ast_private {
113 struct ttm_bo_kmap_obj cache_kmap; 113 struct ttm_bo_kmap_obj cache_kmap;
114 int next_cursor; 114 int next_cursor;
115 bool support_wide_screen; 115 bool support_wide_screen;
116 bool DisableP2A;
116 117
117 enum ast_tx_chip tx_chip_type; 118 enum ast_tx_chip tx_chip_type;
118 u8 dp501_maxclk; 119 u8 dp501_maxclk;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f75c6421db62..533e762d036d 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
124 } else 124 } else
125 *need_post = false; 125 *need_post = false;
126 126
127 /* Check P2A Access */
128 ast->DisableP2A = true;
129 data = ast_read32(ast, 0xf004);
130 if (data != 0xFFFFFFFF)
131 ast->DisableP2A = false;
132
127 /* Check if we support wide screen */ 133 /* Check if we support wide screen */
128 switch (ast->chip) { 134 switch (ast->chip) {
129 case AST1180: 135 case AST1180:
@@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
140 ast->support_wide_screen = true; 146 ast->support_wide_screen = true;
141 else { 147 else {
142 ast->support_wide_screen = false; 148 ast->support_wide_screen = false;
143 /* Read SCU7c (silicon revision register) */ 149 if (ast->DisableP2A == false) {
144 ast_write32(ast, 0xf004, 0x1e6e0000); 150 /* Read SCU7c (silicon revision register) */
145 ast_write32(ast, 0xf000, 0x1); 151 ast_write32(ast, 0xf004, 0x1e6e0000);
146 data = ast_read32(ast, 0x1207c); 152 ast_write32(ast, 0xf000, 0x1);
147 data &= 0x300; 153 data = ast_read32(ast, 0x1207c);
148 if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ 154 data &= 0x300;
149 ast->support_wide_screen = true; 155 if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
150 if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ 156 ast->support_wide_screen = true;
151 ast->support_wide_screen = true; 157 if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
158 ast->support_wide_screen = true;
159 }
152 } 160 }
153 break; 161 break;
154 } 162 }
@@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_device *dev)
216 uint32_t data, data2; 224 uint32_t data, data2;
217 uint32_t denum, num, div, ref_pll; 225 uint32_t denum, num, div, ref_pll;
218 226
219 ast_write32(ast, 0xf004, 0x1e6e0000); 227 if (ast->DisableP2A)
220 ast_write32(ast, 0xf000, 0x1); 228 {
221
222
223 ast_write32(ast, 0x10000, 0xfc600309);
224
225 do {
226 if (pci_channel_offline(dev->pdev))
227 return -EIO;
228 } while (ast_read32(ast, 0x10000) != 0x01);
229 data = ast_read32(ast, 0x10004);
230
231 if (data & 0x40)
232 ast->dram_bus_width = 16; 229 ast->dram_bus_width = 16;
230 ast->dram_type = AST_DRAM_1Gx16;
231 ast->mclk = 396;
232 }
233 else 233 else
234 ast->dram_bus_width = 32; 234 {
235 ast_write32(ast, 0xf004, 0x1e6e0000);
236 ast_write32(ast, 0xf000, 0x1);
237 data = ast_read32(ast, 0x10004);
238
239 if (data & 0x40)
240 ast->dram_bus_width = 16;
241 else
242 ast->dram_bus_width = 32;
243
244 if (ast->chip == AST2300 || ast->chip == AST2400) {
245 switch (data & 0x03) {
246 case 0:
247 ast->dram_type = AST_DRAM_512Mx16;
248 break;
249 default:
250 case 1:
251 ast->dram_type = AST_DRAM_1Gx16;
252 break;
253 case 2:
254 ast->dram_type = AST_DRAM_2Gx16;
255 break;
256 case 3:
257 ast->dram_type = AST_DRAM_4Gx16;
258 break;
259 }
260 } else {
261 switch (data & 0x0c) {
262 case 0:
263 case 4:
264 ast->dram_type = AST_DRAM_512Mx16;
265 break;
266 case 8:
267 if (data & 0x40)
268 ast->dram_type = AST_DRAM_1Gx16;
269 else
270 ast->dram_type = AST_DRAM_512Mx32;
271 break;
272 case 0xc:
273 ast->dram_type = AST_DRAM_1Gx32;
274 break;
275 }
276 }
235 277
236 if (ast->chip == AST2300 || ast->chip == AST2400) { 278 data = ast_read32(ast, 0x10120);
237 switch (data & 0x03) { 279 data2 = ast_read32(ast, 0x10170);
238 case 0: 280 if (data2 & 0x2000)
239 ast->dram_type = AST_DRAM_512Mx16; 281 ref_pll = 14318;
240 break; 282 else
241 default: 283 ref_pll = 12000;
242 case 1: 284
243 ast->dram_type = AST_DRAM_1Gx16; 285 denum = data & 0x1f;
244 break; 286 num = (data & 0x3fe0) >> 5;
245 case 2: 287 data = (data & 0xc000) >> 14;
246 ast->dram_type = AST_DRAM_2Gx16; 288 switch (data) {
247 break;
248 case 3: 289 case 3:
249 ast->dram_type = AST_DRAM_4Gx16; 290 div = 0x4;
250 break;
251 }
252 } else {
253 switch (data & 0x0c) {
254 case 0:
255 case 4:
256 ast->dram_type = AST_DRAM_512Mx16;
257 break; 291 break;
258 case 8: 292 case 2:
259 if (data & 0x40) 293 case 1:
260 ast->dram_type = AST_DRAM_1Gx16; 294 div = 0x2;
261 else
262 ast->dram_type = AST_DRAM_512Mx32;
263 break; 295 break;
264 case 0xc: 296 default:
265 ast->dram_type = AST_DRAM_1Gx32; 297 div = 0x1;
266 break; 298 break;
267 } 299 }
300 ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
268 } 301 }
269
270 data = ast_read32(ast, 0x10120);
271 data2 = ast_read32(ast, 0x10170);
272 if (data2 & 0x2000)
273 ref_pll = 14318;
274 else
275 ref_pll = 12000;
276
277 denum = data & 0x1f;
278 num = (data & 0x3fe0) >> 5;
279 data = (data & 0xc000) >> 14;
280 switch (data) {
281 case 3:
282 div = 0x4;
283 break;
284 case 2:
285 case 1:
286 div = 0x2;
287 break;
288 default:
289 div = 0x1;
290 break;
291 }
292 ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
293 return 0; 302 return 0;
294} 303}
295 304
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 810c51d92b99..5331ee1df086 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -379,12 +379,20 @@ void ast_post_gpu(struct drm_device *dev)
379 ast_open_key(ast); 379 ast_open_key(ast);
380 ast_set_def_ext_reg(dev); 380 ast_set_def_ext_reg(dev);
381 381
382 if (ast->chip == AST2300 || ast->chip == AST2400) 382 if (ast->DisableP2A == false)
383 ast_init_dram_2300(dev); 383 {
384 else 384 if (ast->chip == AST2300 || ast->chip == AST2400)
385 ast_init_dram_reg(dev); 385 ast_init_dram_2300(dev);
386 else
387 ast_init_dram_reg(dev);
386 388
387 ast_init_3rdtx(dev); 389 ast_init_3rdtx(dev);
390 }
391 else
392 {
393 if (ast->tx_chip_type != AST_TX_NONE)
394 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
395 }
388} 396}
389 397
390/* AST 2300 DRAM settings */ 398/* AST 2300 DRAM settings */
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index eb9bf8786c24..18eefdcbf1ba 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1382,6 +1382,7 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
1382 1382
1383 pm_runtime_enable(dev); 1383 pm_runtime_enable(dev);
1384 1384
1385 pm_runtime_get_sync(dev);
1385 phy_power_on(dp->phy); 1386 phy_power_on(dp->phy);
1386 1387
1387 analogix_dp_init_dp(dp); 1388 analogix_dp_init_dp(dp);
@@ -1414,9 +1415,15 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
1414 goto err_disable_pm_runtime; 1415 goto err_disable_pm_runtime;
1415 } 1416 }
1416 1417
1418 phy_power_off(dp->phy);
1419 pm_runtime_put(dev);
1420
1417 return 0; 1421 return 0;
1418 1422
1419err_disable_pm_runtime: 1423err_disable_pm_runtime:
1424
1425 phy_power_off(dp->phy);
1426 pm_runtime_put(dev);
1420 pm_runtime_disable(dev); 1427 pm_runtime_disable(dev);
1421 1428
1422 return ret; 1429 return ret;
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
index 04b3c161dfae..7f4cc6e172ab 100644
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -7,3 +7,12 @@ config DRM_CIRRUS_QEMU
7 This is a KMS driver for emulated cirrus device in qemu. 7 This is a KMS driver for emulated cirrus device in qemu.
8 It is *NOT* intended for real cirrus devices. This requires 8 It is *NOT* intended for real cirrus devices. This requires
9 the modesetting userspace X.org driver. 9 the modesetting userspace X.org driver.
10
11 Cirrus is obsolete, the hardware was designed in the 90ies
12 and can't keep up with todays needs. More background:
13 https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
14
15 Better alternatives are:
16 - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+)
17 - qxl (DRM_QXL, qemu -vga qxl, works best with spice)
18 - virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 60697482b94c..50f5cf7b69d1 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -291,15 +291,15 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
291EXPORT_SYMBOL(drm_atomic_get_crtc_state); 291EXPORT_SYMBOL(drm_atomic_get_crtc_state);
292 292
293static void set_out_fence_for_crtc(struct drm_atomic_state *state, 293static void set_out_fence_for_crtc(struct drm_atomic_state *state,
294 struct drm_crtc *crtc, s64 __user *fence_ptr) 294 struct drm_crtc *crtc, s32 __user *fence_ptr)
295{ 295{
296 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr; 296 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
297} 297}
298 298
299static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, 299static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
300 struct drm_crtc *crtc) 300 struct drm_crtc *crtc)
301{ 301{
302 s64 __user *fence_ptr; 302 s32 __user *fence_ptr;
303 303
304 fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr; 304 fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
305 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL; 305 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
@@ -512,7 +512,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
512 state->color_mgmt_changed |= replaced; 512 state->color_mgmt_changed |= replaced;
513 return ret; 513 return ret;
514 } else if (property == config->prop_out_fence_ptr) { 514 } else if (property == config->prop_out_fence_ptr) {
515 s64 __user *fence_ptr = u64_to_user_ptr(val); 515 s32 __user *fence_ptr = u64_to_user_ptr(val);
516 516
517 if (!fence_ptr) 517 if (!fence_ptr)
518 return 0; 518 return 0;
@@ -1915,7 +1915,7 @@ EXPORT_SYMBOL(drm_atomic_clean_old_fb);
1915 */ 1915 */
1916 1916
1917struct drm_out_fence_state { 1917struct drm_out_fence_state {
1918 s64 __user *out_fence_ptr; 1918 s32 __user *out_fence_ptr;
1919 struct sync_file *sync_file; 1919 struct sync_file *sync_file;
1920 int fd; 1920 int fd;
1921}; 1921};
@@ -1952,7 +1952,7 @@ static int prepare_crtc_signaling(struct drm_device *dev,
1952 return 0; 1952 return 0;
1953 1953
1954 for_each_crtc_in_state(state, crtc, crtc_state, i) { 1954 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1955 u64 __user *fence_ptr; 1955 s32 __user *fence_ptr;
1956 1956
1957 fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc); 1957 fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
1958 1958
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index ac6a35212501..e6b19bc9021a 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1460,6 +1460,13 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
1460 return NULL; 1460 return NULL;
1461 1461
1462 mode->type |= DRM_MODE_TYPE_USERDEF; 1462 mode->type |= DRM_MODE_TYPE_USERDEF;
1463 /* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
1464 if (cmd->xres == 1366 && mode->hdisplay == 1368) {
1465 mode->hdisplay = 1366;
1466 mode->hsync_start--;
1467 mode->hsync_end--;
1468 drm_mode_set_name(mode);
1469 }
1463 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); 1470 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
1464 return mode; 1471 return mode;
1465} 1472}
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index ac953f037be7..cf8f0128c161 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -143,8 +143,18 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
143 } 143 }
144 144
145 if (dev->mode_config.delayed_event) { 145 if (dev->mode_config.delayed_event) {
146 /*
147 * FIXME:
148 *
149 * Use short (1s) delay to handle the initial delayed event.
150 * This delay should not be needed, but Optimus/nouveau will
151 * fail in a mysterious way if the delayed event is handled as
152 * soon as possible like it is done in
153 * drm_helper_probe_single_connector_modes() in case the poll
154 * was enabled before.
155 */
146 poll = true; 156 poll = true;
147 delay = 0; 157 delay = HZ;
148 } 158 }
149 159
150 if (poll) 160 if (poll)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 169ac96e8f08..fe0e85b41310 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -116,9 +116,14 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
116 struct list_head list; 116 struct list_head list;
117 bool found; 117 bool found;
118 118
119 /*
120 * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick
121 * drm_mm into giving out a low IOVA after address space
122 * rollover. This needs a proper fix.
123 */
119 ret = drm_mm_insert_node_in_range(&mmu->mm, node, 124 ret = drm_mm_insert_node_in_range(&mmu->mm, node,
120 size, 0, mmu->last_iova, ~0UL, 125 size, 0, mmu->last_iova, ~0UL,
121 DRM_MM_SEARCH_DEFAULT); 126 mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW);
122 127
123 if (ret != -ENOSPC) 128 if (ret != -ENOSPC)
124 break; 129 break;
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 6ca1f3117fe8..75eeb831ed6a 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -46,7 +46,8 @@ enum decon_flag_bits {
46 BIT_CLKS_ENABLED, 46 BIT_CLKS_ENABLED,
47 BIT_IRQS_ENABLED, 47 BIT_IRQS_ENABLED,
48 BIT_WIN_UPDATED, 48 BIT_WIN_UPDATED,
49 BIT_SUSPENDED 49 BIT_SUSPENDED,
50 BIT_REQUEST_UPDATE
50}; 51};
51 52
52struct decon_context { 53struct decon_context {
@@ -141,12 +142,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
141 m->crtc_vsync_end = m->crtc_vsync_start + 1; 142 m->crtc_vsync_end = m->crtc_vsync_start + 1;
142 } 143 }
143 144
144 decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID, 0);
145
146 /* enable clock gate */
147 val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F;
148 writel(val, ctx->addr + DECON_CMU);
149
150 if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)) 145 if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))
151 decon_setup_trigger(ctx); 146 decon_setup_trigger(ctx);
152 147
@@ -315,6 +310,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
315 310
316 /* window enable */ 311 /* window enable */
317 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0); 312 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
313 set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
318} 314}
319 315
320static void decon_disable_plane(struct exynos_drm_crtc *crtc, 316static void decon_disable_plane(struct exynos_drm_crtc *crtc,
@@ -327,6 +323,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
327 return; 323 return;
328 324
329 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0); 325 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
326 set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
330} 327}
331 328
332static void decon_atomic_flush(struct exynos_drm_crtc *crtc) 329static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
@@ -340,8 +337,8 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
340 for (i = ctx->first_win; i < WINDOWS_NR; i++) 337 for (i = ctx->first_win; i < WINDOWS_NR; i++)
341 decon_shadow_protect_win(ctx, i, false); 338 decon_shadow_protect_win(ctx, i, false);
342 339
343 /* standalone update */ 340 if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags))
344 decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); 341 decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
345 342
346 if (ctx->out_type & IFTYPE_I80) 343 if (ctx->out_type & IFTYPE_I80)
347 set_bit(BIT_WIN_UPDATED, &ctx->flags); 344 set_bit(BIT_WIN_UPDATED, &ctx->flags);
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 0d41ebc4aea6..f7bce8603958 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -37,13 +37,6 @@
37#include "i915_drv.h" 37#include "i915_drv.h"
38#include "gvt.h" 38#include "gvt.h"
39 39
40#define MB_TO_BYTES(mb) ((mb) << 20ULL)
41#define BYTES_TO_MB(b) ((b) >> 20ULL)
42
43#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
44#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
45#define HOST_FENCE 4
46
47static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) 40static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
48{ 41{
49 struct intel_gvt *gvt = vgpu->gvt; 42 struct intel_gvt *gvt = vgpu->gvt;
@@ -165,6 +158,14 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
165 POSTING_READ(fence_reg_lo); 158 POSTING_READ(fence_reg_lo);
166} 159}
167 160
161static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
162{
163 int i;
164
165 for (i = 0; i < vgpu_fence_sz(vgpu); i++)
166 intel_vgpu_write_fence(vgpu, i, 0);
167}
168
168static void free_vgpu_fence(struct intel_vgpu *vgpu) 169static void free_vgpu_fence(struct intel_vgpu *vgpu)
169{ 170{
170 struct intel_gvt *gvt = vgpu->gvt; 171 struct intel_gvt *gvt = vgpu->gvt;
@@ -178,9 +179,9 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
178 intel_runtime_pm_get(dev_priv); 179 intel_runtime_pm_get(dev_priv);
179 180
180 mutex_lock(&dev_priv->drm.struct_mutex); 181 mutex_lock(&dev_priv->drm.struct_mutex);
182 _clear_vgpu_fence(vgpu);
181 for (i = 0; i < vgpu_fence_sz(vgpu); i++) { 183 for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
182 reg = vgpu->fence.regs[i]; 184 reg = vgpu->fence.regs[i];
183 intel_vgpu_write_fence(vgpu, i, 0);
184 list_add_tail(&reg->link, 185 list_add_tail(&reg->link,
185 &dev_priv->mm.fence_list); 186 &dev_priv->mm.fence_list);
186 } 187 }
@@ -208,13 +209,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
208 continue; 209 continue;
209 list_del(pos); 210 list_del(pos);
210 vgpu->fence.regs[i] = reg; 211 vgpu->fence.regs[i] = reg;
211 intel_vgpu_write_fence(vgpu, i, 0);
212 if (++i == vgpu_fence_sz(vgpu)) 212 if (++i == vgpu_fence_sz(vgpu))
213 break; 213 break;
214 } 214 }
215 if (i != vgpu_fence_sz(vgpu)) 215 if (i != vgpu_fence_sz(vgpu))
216 goto out_free_fence; 216 goto out_free_fence;
217 217
218 _clear_vgpu_fence(vgpu);
219
218 mutex_unlock(&dev_priv->drm.struct_mutex); 220 mutex_unlock(&dev_priv->drm.struct_mutex);
219 intel_runtime_pm_put(dev_priv); 221 intel_runtime_pm_put(dev_priv);
220 return 0; 222 return 0;
@@ -314,6 +316,22 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
314} 316}
315 317
316/** 318/**
319 * intel_vgpu_reset_resource - reset resource state owned by a vGPU
320 * @vgpu: a vGPU
321 *
322 * This function is used to reset resource state owned by a vGPU.
323 *
324 */
325void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
326{
327 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
328
329 intel_runtime_pm_get(dev_priv);
330 _clear_vgpu_fence(vgpu);
331 intel_runtime_pm_put(dev_priv);
332}
333
334/**
317 * intel_alloc_vgpu_resource - allocate HW resource for a vGPU 335 * intel_alloc_vgpu_resource - allocate HW resource for a vGPU
318 * @vgpu: vGPU 336 * @vgpu: vGPU
319 * @param: vGPU creation params 337 * @param: vGPU creation params
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 711c31c8d8b4..4a6a2ed65732 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -282,3 +282,77 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
282 } 282 }
283 return 0; 283 return 0;
284} 284}
285
286/**
287 * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU
288 *
289 * @vgpu: a vGPU
290 * @primary: is the vGPU presented as primary
291 *
292 */
293void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
294 bool primary)
295{
296 struct intel_gvt *gvt = vgpu->gvt;
297 const struct intel_gvt_device_info *info = &gvt->device_info;
298 u16 *gmch_ctl;
299 int i;
300
301 memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
302 info->cfg_space_size);
303
304 if (!primary) {
305 vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
306 INTEL_GVT_PCI_CLASS_VGA_OTHER;
307 vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
308 INTEL_GVT_PCI_CLASS_VGA_OTHER;
309 }
310
311 /* Show guest that there isn't any stolen memory.*/
312 gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
313 *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
314
315 intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
316 gvt_aperture_pa_base(gvt), true);
317
318 vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
319 | PCI_COMMAND_MEMORY
320 | PCI_COMMAND_MASTER);
321 /*
322 * Clear the bar upper 32bit and let guest to assign the new value
323 */
324 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
325 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
326 memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
327
328 for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
329 vgpu->cfg_space.bar[i].size = pci_resource_len(
330 gvt->dev_priv->drm.pdev, i * 2);
331 vgpu->cfg_space.bar[i].tracked = false;
332 }
333}
334
335/**
336 * intel_vgpu_reset_cfg_space - reset vGPU configuration space
337 *
338 * @vgpu: a vGPU
339 *
340 */
341void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu)
342{
343 u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND];
344 bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] !=
345 INTEL_GVT_PCI_CLASS_VGA_OTHER;
346
347 if (cmd & PCI_COMMAND_MEMORY) {
348 trap_gttmmio(vgpu, false);
349 map_aperture(vgpu, false);
350 }
351
352 /**
353 * Currently we only do such reset when vGPU is not
354 * owned by any VM, so we simply restore entire cfg
355 * space to default value.
356 */
357 intel_vgpu_init_cfg_space(vgpu, primary);
358}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index d26a092c70e8..e4563984cb1e 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -481,7 +481,6 @@ struct parser_exec_state {
481 (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2) 481 (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
482 482
483static unsigned long bypass_scan_mask = 0; 483static unsigned long bypass_scan_mask = 0;
484static bool bypass_batch_buffer_scan = true;
485 484
486/* ring ALL, type = 0 */ 485/* ring ALL, type = 0 */
487static struct sub_op_bits sub_op_mi[] = { 486static struct sub_op_bits sub_op_mi[] = {
@@ -1525,9 +1524,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
1525{ 1524{
1526 struct intel_gvt *gvt = s->vgpu->gvt; 1525 struct intel_gvt *gvt = s->vgpu->gvt;
1527 1526
1528 if (bypass_batch_buffer_scan)
1529 return 0;
1530
1531 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { 1527 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
1532 /* BDW decides privilege based on address space */ 1528 /* BDW decides privilege based on address space */
1533 if (cmd_val(s, 0) & (1 << 8)) 1529 if (cmd_val(s, 0) & (1 << 8))
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index f32bb6f6495c..34083731669d 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -364,58 +364,30 @@ static void free_workload(struct intel_vgpu_workload *workload)
364#define get_desc_from_elsp_dwords(ed, i) \ 364#define get_desc_from_elsp_dwords(ed, i) \
365 ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) 365 ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
366 366
367
368#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
369#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
370static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
371 unsigned long add, int gmadr_bytes)
372{
373 if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
374 return -1;
375
376 *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
377 BATCH_BUFFER_ADDR_MASK;
378 if (gmadr_bytes == 8) {
379 *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
380 add & BATCH_BUFFER_ADDR_HIGH_MASK;
381 }
382
383 return 0;
384}
385
386static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) 367static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
387{ 368{
388 int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; 369 const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
370 struct intel_shadow_bb_entry *entry_obj;
389 371
390 /* pin the gem object to ggtt */ 372 /* pin the gem object to ggtt */
391 if (!list_empty(&workload->shadow_bb)) { 373 list_for_each_entry(entry_obj, &workload->shadow_bb, list) {
392 struct intel_shadow_bb_entry *entry_obj = 374 struct i915_vma *vma;
393 list_first_entry(&workload->shadow_bb,
394 struct intel_shadow_bb_entry,
395 list);
396 struct intel_shadow_bb_entry *temp;
397 375
398 list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb, 376 vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
399 list) { 377 if (IS_ERR(vma)) {
400 struct i915_vma *vma; 378 gvt_err("Cannot pin\n");
401 379 return;
402 vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
403 4, 0);
404 if (IS_ERR(vma)) {
405 gvt_err("Cannot pin\n");
406 return;
407 }
408
409 /* FIXME: we are not tracking our pinned VMA leaving it
410 * up to the core to fix up the stray pin_count upon
411 * free.
412 */
413
414 /* update the relocate gma with shadow batch buffer*/
415 set_gma_to_bb_cmd(entry_obj,
416 i915_ggtt_offset(vma),
417 gmadr_bytes);
418 } 380 }
381
382 /* FIXME: we are not tracking our pinned VMA leaving it
383 * up to the core to fix up the stray pin_count upon
384 * free.
385 */
386
387 /* update the relocate gma with shadow batch buffer*/
388 entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma);
389 if (gmadr_bytes == 8)
390 entry_obj->bb_start_cmd_va[2] = 0;
419 } 391 }
420} 392}
421 393
@@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
826 INIT_LIST_HEAD(&vgpu->workload_q_head[i]); 798 INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
827 } 799 }
828 800
829 vgpu->workloads = kmem_cache_create("gvt-g vgpu workload", 801 vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
830 sizeof(struct intel_vgpu_workload), 0, 802 sizeof(struct intel_vgpu_workload), 0,
831 SLAB_HWCACHE_ALIGN, 803 SLAB_HWCACHE_ALIGN,
832 NULL); 804 NULL);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 6c5fdf5b2ce2..47dec4acf7ff 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -240,15 +240,8 @@ static inline int get_pse_type(int type)
240static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) 240static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
241{ 241{
242 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 242 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
243 u64 pte;
244 243
245#ifdef readq 244 return readq(addr);
246 pte = readq(addr);
247#else
248 pte = ioread32(addr);
249 pte |= (u64)ioread32(addr + 4) << 32;
250#endif
251 return pte;
252} 245}
253 246
254static void write_pte64(struct drm_i915_private *dev_priv, 247static void write_pte64(struct drm_i915_private *dev_priv,
@@ -256,12 +249,8 @@ static void write_pte64(struct drm_i915_private *dev_priv,
256{ 249{
257 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 250 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
258 251
259#ifdef writeq
260 writeq(pte, addr); 252 writeq(pte, addr);
261#else 253
262 iowrite32((u32)pte, addr);
263 iowrite32(pte >> 32, addr + 4);
264#endif
265 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 254 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
266 POSTING_READ(GFX_FLSH_CNTL_GEN6); 255 POSTING_READ(GFX_FLSH_CNTL_GEN6);
267} 256}
@@ -1380,8 +1369,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
1380 info->gtt_entry_size; 1369 info->gtt_entry_size;
1381 mem = kzalloc(mm->has_shadow_page_table ? 1370 mem = kzalloc(mm->has_shadow_page_table ?
1382 mm->page_table_entry_size * 2 1371 mm->page_table_entry_size * 2
1383 : mm->page_table_entry_size, 1372 : mm->page_table_entry_size, GFP_KERNEL);
1384 GFP_ATOMIC);
1385 if (!mem) 1373 if (!mem)
1386 return -ENOMEM; 1374 return -ENOMEM;
1387 mm->virtual_page_table = mem; 1375 mm->virtual_page_table = mem;
@@ -1532,7 +1520,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
1532 struct intel_vgpu_mm *mm; 1520 struct intel_vgpu_mm *mm;
1533 int ret; 1521 int ret;
1534 1522
1535 mm = kzalloc(sizeof(*mm), GFP_ATOMIC); 1523 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
1536 if (!mm) { 1524 if (!mm) {
1537 ret = -ENOMEM; 1525 ret = -ENOMEM;
1538 goto fail; 1526 goto fail;
@@ -1886,30 +1874,27 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1886 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1874 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1887 int page_entry_num = GTT_PAGE_SIZE >> 1875 int page_entry_num = GTT_PAGE_SIZE >>
1888 vgpu->gvt->device_info.gtt_entry_size_shift; 1876 vgpu->gvt->device_info.gtt_entry_size_shift;
1889 struct page *scratch_pt; 1877 void *scratch_pt;
1890 unsigned long mfn; 1878 unsigned long mfn;
1891 int i; 1879 int i;
1892 void *p;
1893 1880
1894 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) 1881 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
1895 return -EINVAL; 1882 return -EINVAL;
1896 1883
1897 scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); 1884 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
1898 if (!scratch_pt) { 1885 if (!scratch_pt) {
1899 gvt_err("fail to allocate scratch page\n"); 1886 gvt_err("fail to allocate scratch page\n");
1900 return -ENOMEM; 1887 return -ENOMEM;
1901 } 1888 }
1902 1889
1903 p = kmap_atomic(scratch_pt); 1890 mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt);
1904 mfn = intel_gvt_hypervisor_virt_to_mfn(p);
1905 if (mfn == INTEL_GVT_INVALID_ADDR) { 1891 if (mfn == INTEL_GVT_INVALID_ADDR) {
1906 gvt_err("fail to translate vaddr:0x%llx\n", (u64)p); 1892 gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt);
1907 kunmap_atomic(p); 1893 free_page((unsigned long)scratch_pt);
1908 __free_page(scratch_pt);
1909 return -EFAULT; 1894 return -EFAULT;
1910 } 1895 }
1911 gtt->scratch_pt[type].page_mfn = mfn; 1896 gtt->scratch_pt[type].page_mfn = mfn;
1912 gtt->scratch_pt[type].page = scratch_pt; 1897 gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
1913 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", 1898 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
1914 vgpu->id, type, mfn); 1899 vgpu->id, type, mfn);
1915 1900
@@ -1918,7 +1903,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1918 * scratch_pt[type] indicate the scratch pt/scratch page used by the 1903 * scratch_pt[type] indicate the scratch pt/scratch page used by the
1919 * 'type' pt. 1904 * 'type' pt.
1920 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by 1905 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
1921 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self 1906 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
1922 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. 1907 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
1923 */ 1908 */
1924 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { 1909 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
@@ -1936,11 +1921,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1936 se.val64 |= PPAT_CACHED_INDEX; 1921 se.val64 |= PPAT_CACHED_INDEX;
1937 1922
1938 for (i = 0; i < page_entry_num; i++) 1923 for (i = 0; i < page_entry_num; i++)
1939 ops->set_entry(p, &se, i, false, 0, vgpu); 1924 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
1940 } 1925 }
1941 1926
1942 kunmap_atomic(p);
1943
1944 return 0; 1927 return 0;
1945} 1928}
1946 1929
@@ -2208,7 +2191,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
2208int intel_gvt_init_gtt(struct intel_gvt *gvt) 2191int intel_gvt_init_gtt(struct intel_gvt *gvt)
2209{ 2192{
2210 int ret; 2193 int ret;
2211 void *page_addr; 2194 void *page;
2212 2195
2213 gvt_dbg_core("init gtt\n"); 2196 gvt_dbg_core("init gtt\n");
2214 2197
@@ -2221,17 +2204,14 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
2221 return -ENODEV; 2204 return -ENODEV;
2222 } 2205 }
2223 2206
2224 gvt->gtt.scratch_ggtt_page = 2207 page = (void *)get_zeroed_page(GFP_KERNEL);
2225 alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); 2208 if (!page) {
2226 if (!gvt->gtt.scratch_ggtt_page) {
2227 gvt_err("fail to allocate scratch ggtt page\n"); 2209 gvt_err("fail to allocate scratch ggtt page\n");
2228 return -ENOMEM; 2210 return -ENOMEM;
2229 } 2211 }
2212 gvt->gtt.scratch_ggtt_page = virt_to_page(page);
2230 2213
2231 page_addr = page_address(gvt->gtt.scratch_ggtt_page); 2214 gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page);
2232
2233 gvt->gtt.scratch_ggtt_mfn =
2234 intel_gvt_hypervisor_virt_to_mfn(page_addr);
2235 if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) { 2215 if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
2236 gvt_err("fail to translate scratch ggtt page\n"); 2216 gvt_err("fail to translate scratch ggtt page\n");
2237 __free_page(gvt->gtt.scratch_ggtt_page); 2217 __free_page(gvt->gtt.scratch_ggtt_page);
@@ -2297,3 +2277,30 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
2297 for (offset = 0; offset < num_entries; offset++) 2277 for (offset = 0; offset < num_entries; offset++)
2298 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); 2278 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
2299} 2279}
2280
2281/**
2282 * intel_vgpu_reset_gtt - reset the all GTT related status
2283 * @vgpu: a vGPU
2284 * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset
2285 *
2286 * This function is called from vfio core to reset reset all
2287 * GTT related status, including GGTT, PPGTT, scratch page.
2288 *
2289 */
2290void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
2291{
2292 int i;
2293
2294 ppgtt_free_all_shadow_page(vgpu);
2295 if (!dmlr)
2296 return;
2297
2298 intel_vgpu_reset_ggtt(vgpu);
2299
2300 /* clear scratch page for security */
2301 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2302 if (vgpu->gtt.scratch_pt[i].page != NULL)
2303 memset(page_address(vgpu->gtt.scratch_pt[i].page),
2304 0, PAGE_SIZE);
2305 }
2306}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index b315ab3593ec..f88eb5e89bea 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -208,6 +208,7 @@ extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
208void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); 208void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
209 209
210extern int intel_gvt_init_gtt(struct intel_gvt *gvt); 210extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
211extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr);
211extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); 212extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
212 213
213extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, 214extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 398877c3d2fd..e6bf5c533fbe 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -201,6 +201,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
201 intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt); 201 intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
202 intel_gvt_clean_vgpu_types(gvt); 202 intel_gvt_clean_vgpu_types(gvt);
203 203
204 idr_destroy(&gvt->vgpu_idr);
205
204 kfree(dev_priv->gvt); 206 kfree(dev_priv->gvt);
205 dev_priv->gvt = NULL; 207 dev_priv->gvt = NULL;
206} 208}
@@ -237,6 +239,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
237 239
238 gvt_dbg_core("init gvt device\n"); 240 gvt_dbg_core("init gvt device\n");
239 241
242 idr_init(&gvt->vgpu_idr);
243
240 mutex_init(&gvt->lock); 244 mutex_init(&gvt->lock);
241 gvt->dev_priv = dev_priv; 245 gvt->dev_priv = dev_priv;
242 246
@@ -244,7 +248,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
244 248
245 ret = intel_gvt_setup_mmio_info(gvt); 249 ret = intel_gvt_setup_mmio_info(gvt);
246 if (ret) 250 if (ret)
247 return ret; 251 goto out_clean_idr;
248 252
249 ret = intel_gvt_load_firmware(gvt); 253 ret = intel_gvt_load_firmware(gvt);
250 if (ret) 254 if (ret)
@@ -313,6 +317,8 @@ out_free_firmware:
313 intel_gvt_free_firmware(gvt); 317 intel_gvt_free_firmware(gvt);
314out_clean_mmio_info: 318out_clean_mmio_info:
315 intel_gvt_clean_mmio_info(gvt); 319 intel_gvt_clean_mmio_info(gvt);
320out_clean_idr:
321 idr_destroy(&gvt->vgpu_idr);
316 kfree(gvt); 322 kfree(gvt);
317 return ret; 323 return ret;
318} 324}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 0af17016f33f..e227caf5859e 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -323,6 +323,7 @@ struct intel_vgpu_creation_params {
323 323
324int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, 324int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
325 struct intel_vgpu_creation_params *param); 325 struct intel_vgpu_creation_params *param);
326void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
326void intel_vgpu_free_resource(struct intel_vgpu *vgpu); 327void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
327void intel_vgpu_write_fence(struct intel_vgpu *vgpu, 328void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
328 u32 fence, u64 value); 329 u32 fence, u64 value);
@@ -375,6 +376,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
375struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, 376struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
376 struct intel_vgpu_type *type); 377 struct intel_vgpu_type *type);
377void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); 378void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
379void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
380 unsigned int engine_mask);
378void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); 381void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
379 382
380 383
@@ -411,6 +414,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
411int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, 414int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
412 unsigned long *g_index); 415 unsigned long *g_index);
413 416
417void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
418 bool primary);
419void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
420
414int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, 421int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
415 void *p_data, unsigned int bytes); 422 void *p_data, unsigned int bytes);
416 423
@@ -424,7 +431,6 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
424int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); 431int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
425 432
426int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); 433int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
427int setup_vgpu_mmio(struct intel_vgpu *vgpu);
428void populate_pvinfo_page(struct intel_vgpu *vgpu); 434void populate_pvinfo_page(struct intel_vgpu *vgpu);
429 435
430struct intel_gvt_ops { 436struct intel_gvt_ops {
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 522809710312..ab2ea157da4c 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -93,7 +93,8 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
93static int new_mmio_info(struct intel_gvt *gvt, 93static int new_mmio_info(struct intel_gvt *gvt,
94 u32 offset, u32 flags, u32 size, 94 u32 offset, u32 flags, u32 size,
95 u32 addr_mask, u32 ro_mask, u32 device, 95 u32 addr_mask, u32 ro_mask, u32 device,
96 void *read, void *write) 96 int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int),
97 int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int))
97{ 98{
98 struct intel_gvt_mmio_info *info, *p; 99 struct intel_gvt_mmio_info *info, *p;
99 u32 start, end, i; 100 u32 start, end, i;
@@ -219,7 +220,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
219 default: 220 default:
220 /*should not hit here*/ 221 /*should not hit here*/
221 gvt_err("invalid forcewake offset 0x%x\n", offset); 222 gvt_err("invalid forcewake offset 0x%x\n", offset);
222 return 1; 223 return -EINVAL;
223 } 224 }
224 } else { 225 } else {
225 ack_reg_offset = FORCEWAKE_ACK_HSW_REG; 226 ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
@@ -230,77 +231,45 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
230 return 0; 231 return 0;
231} 232}
232 233
233static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
234 void *p_data, unsigned int bytes, unsigned long bitmap)
235{
236 struct intel_gvt_workload_scheduler *scheduler =
237 &vgpu->gvt->scheduler;
238
239 vgpu->resetting = true;
240
241 intel_vgpu_stop_schedule(vgpu);
242 /*
243 * The current_vgpu will set to NULL after stopping the
244 * scheduler when the reset is triggered by current vgpu.
245 */
246 if (scheduler->current_vgpu == NULL) {
247 mutex_unlock(&vgpu->gvt->lock);
248 intel_gvt_wait_vgpu_idle(vgpu);
249 mutex_lock(&vgpu->gvt->lock);
250 }
251
252 intel_vgpu_reset_execlist(vgpu, bitmap);
253
254 /* full GPU reset */
255 if (bitmap == 0xff) {
256 mutex_unlock(&vgpu->gvt->lock);
257 intel_vgpu_clean_gtt(vgpu);
258 mutex_lock(&vgpu->gvt->lock);
259 setup_vgpu_mmio(vgpu);
260 populate_pvinfo_page(vgpu);
261 intel_vgpu_init_gtt(vgpu);
262 }
263
264 vgpu->resetting = false;
265
266 return 0;
267}
268
269static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 234static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
270 void *p_data, unsigned int bytes) 235 void *p_data, unsigned int bytes)
271{ 236{
237 unsigned int engine_mask = 0;
272 u32 data; 238 u32 data;
273 u64 bitmap = 0;
274 239
275 write_vreg(vgpu, offset, p_data, bytes); 240 write_vreg(vgpu, offset, p_data, bytes);
276 data = vgpu_vreg(vgpu, offset); 241 data = vgpu_vreg(vgpu, offset);
277 242
278 if (data & GEN6_GRDOM_FULL) { 243 if (data & GEN6_GRDOM_FULL) {
279 gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id); 244 gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
280 bitmap = 0xff; 245 engine_mask = ALL_ENGINES;
281 } 246 } else {
282 if (data & GEN6_GRDOM_RENDER) { 247 if (data & GEN6_GRDOM_RENDER) {
283 gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); 248 gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
284 bitmap |= (1 << RCS); 249 engine_mask |= (1 << RCS);
285 } 250 }
286 if (data & GEN6_GRDOM_MEDIA) { 251 if (data & GEN6_GRDOM_MEDIA) {
287 gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); 252 gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
288 bitmap |= (1 << VCS); 253 engine_mask |= (1 << VCS);
289 } 254 }
290 if (data & GEN6_GRDOM_BLT) { 255 if (data & GEN6_GRDOM_BLT) {
291 gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); 256 gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
292 bitmap |= (1 << BCS); 257 engine_mask |= (1 << BCS);
293 } 258 }
294 if (data & GEN6_GRDOM_VECS) { 259 if (data & GEN6_GRDOM_VECS) {
295 gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); 260 gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
296 bitmap |= (1 << VECS); 261 engine_mask |= (1 << VECS);
297 } 262 }
298 if (data & GEN8_GRDOM_MEDIA2) { 263 if (data & GEN8_GRDOM_MEDIA2) {
299 gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); 264 gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
300 if (HAS_BSD2(vgpu->gvt->dev_priv)) 265 if (HAS_BSD2(vgpu->gvt->dev_priv))
301 bitmap |= (1 << VCS2); 266 engine_mask |= (1 << VCS2);
267 }
302 } 268 }
303 return handle_device_reset(vgpu, offset, p_data, bytes, bitmap); 269
270 intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
271
272 return 0;
304} 273}
305 274
306static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, 275static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
@@ -974,7 +943,7 @@ static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
974 return 0; 943 return 0;
975} 944}
976 945
977static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 946static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
978 void *p_data, unsigned int bytes) 947 void *p_data, unsigned int bytes)
979{ 948{
980 u32 data; 949 u32 data;
@@ -1366,7 +1335,6 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1366static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, 1335static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
1367 unsigned int offset, void *p_data, unsigned int bytes) 1336 unsigned int offset, void *p_data, unsigned int bytes)
1368{ 1337{
1369 int rc = 0;
1370 unsigned int id = 0; 1338 unsigned int id = 0;
1371 1339
1372 write_vreg(vgpu, offset, p_data, bytes); 1340 write_vreg(vgpu, offset, p_data, bytes);
@@ -1389,12 +1357,11 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
1389 id = VECS; 1357 id = VECS;
1390 break; 1358 break;
1391 default: 1359 default:
1392 rc = -EINVAL; 1360 return -EINVAL;
1393 break;
1394 } 1361 }
1395 set_bit(id, (void *)vgpu->tlb_handle_pending); 1362 set_bit(id, (void *)vgpu->tlb_handle_pending);
1396 1363
1397 return rc; 1364 return 0;
1398} 1365}
1399 1366
1400static int ring_reset_ctl_write(struct intel_vgpu *vgpu, 1367static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index faaae07ae487..3f656e3a6e5a 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -230,8 +230,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
230 return NULL; 230 return NULL;
231} 231}
232 232
233static ssize_t available_instance_show(struct kobject *kobj, struct device *dev, 233static ssize_t available_instances_show(struct kobject *kobj,
234 char *buf) 234 struct device *dev, char *buf)
235{ 235{
236 struct intel_vgpu_type *type; 236 struct intel_vgpu_type *type;
237 unsigned int num = 0; 237 unsigned int num = 0;
@@ -269,12 +269,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
269 type->fence); 269 type->fence);
270} 270}
271 271
272static MDEV_TYPE_ATTR_RO(available_instance); 272static MDEV_TYPE_ATTR_RO(available_instances);
273static MDEV_TYPE_ATTR_RO(device_api); 273static MDEV_TYPE_ATTR_RO(device_api);
274static MDEV_TYPE_ATTR_RO(description); 274static MDEV_TYPE_ATTR_RO(description);
275 275
276static struct attribute *type_attrs[] = { 276static struct attribute *type_attrs[] = {
277 &mdev_type_attr_available_instance.attr, 277 &mdev_type_attr_available_instances.attr,
278 &mdev_type_attr_device_api.attr, 278 &mdev_type_attr_device_api.attr,
279 &mdev_type_attr_description.attr, 279 &mdev_type_attr_description.attr,
280 NULL, 280 NULL,
@@ -398,6 +398,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
398 struct intel_vgpu_type *type; 398 struct intel_vgpu_type *type;
399 struct device *pdev; 399 struct device *pdev;
400 void *gvt; 400 void *gvt;
401 int ret;
401 402
402 pdev = mdev_parent_dev(mdev); 403 pdev = mdev_parent_dev(mdev);
403 gvt = kdev_to_i915(pdev)->gvt; 404 gvt = kdev_to_i915(pdev)->gvt;
@@ -406,13 +407,15 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
406 if (!type) { 407 if (!type) {
407 gvt_err("failed to find type %s to create\n", 408 gvt_err("failed to find type %s to create\n",
408 kobject_name(kobj)); 409 kobject_name(kobj));
409 return -EINVAL; 410 ret = -EINVAL;
411 goto out;
410 } 412 }
411 413
412 vgpu = intel_gvt_ops->vgpu_create(gvt, type); 414 vgpu = intel_gvt_ops->vgpu_create(gvt, type);
413 if (IS_ERR_OR_NULL(vgpu)) { 415 if (IS_ERR_OR_NULL(vgpu)) {
414 gvt_err("create intel vgpu failed\n"); 416 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
415 return -EINVAL; 417 gvt_err("failed to create intel vgpu: %d\n", ret);
418 goto out;
416 } 419 }
417 420
418 INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); 421 INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
@@ -422,7 +425,10 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
422 425
423 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", 426 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
424 dev_name(mdev_dev(mdev))); 427 dev_name(mdev_dev(mdev)));
425 return 0; 428 ret = 0;
429
430out:
431 return ret;
426} 432}
427 433
428static int intel_vgpu_remove(struct mdev_device *mdev) 434static int intel_vgpu_remove(struct mdev_device *mdev)
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 09c9450a1946..4df078bc5d04 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -125,25 +125,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
125 if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) 125 if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
126 goto err; 126 goto err;
127 127
128 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
129 if (!mmio && !vgpu->mmio.disable_warn_untrack) {
130 gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
131 vgpu->id, offset, bytes, *(u32 *)p_data);
132
133 if (offset == 0x206c) {
134 gvt_err("------------------------------------------\n");
135 gvt_err("vgpu%d: likely triggers a gfx reset\n",
136 vgpu->id);
137 gvt_err("------------------------------------------\n");
138 vgpu->mmio.disable_warn_untrack = true;
139 }
140 }
141
142 if (!intel_gvt_mmio_is_unalign(gvt, offset)) { 128 if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
143 if (WARN_ON(!IS_ALIGNED(offset, bytes))) 129 if (WARN_ON(!IS_ALIGNED(offset, bytes)))
144 goto err; 130 goto err;
145 } 131 }
146 132
133 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
147 if (mmio) { 134 if (mmio) {
148 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { 135 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
149 if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) 136 if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
@@ -152,9 +139,23 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
152 goto err; 139 goto err;
153 } 140 }
154 ret = mmio->read(vgpu, offset, p_data, bytes); 141 ret = mmio->read(vgpu, offset, p_data, bytes);
155 } else 142 } else {
156 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); 143 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
157 144
145 if (!vgpu->mmio.disable_warn_untrack) {
146 gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n",
147 vgpu->id, offset, bytes, *(u32 *)p_data);
148
149 if (offset == 0x206c) {
150 gvt_err("------------------------------------------\n");
151 gvt_err("vgpu%d: likely triggers a gfx reset\n",
152 vgpu->id);
153 gvt_err("------------------------------------------\n");
154 vgpu->mmio.disable_warn_untrack = true;
155 }
156 }
157 }
158
158 if (ret) 159 if (ret)
159 goto err; 160 goto err;
160 161
@@ -302,3 +303,56 @@ err:
302 mutex_unlock(&gvt->lock); 303 mutex_unlock(&gvt->lock);
303 return ret; 304 return ret;
304} 305}
306
307
308/**
309 * intel_vgpu_reset_mmio - reset virtual MMIO space
310 * @vgpu: a vGPU
311 *
312 */
313void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
314{
315 struct intel_gvt *gvt = vgpu->gvt;
316 const struct intel_gvt_device_info *info = &gvt->device_info;
317
318 memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
319 memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
320
321 vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
322
323 /* set the bit 0:2(Core C-State ) to C0 */
324 vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
325}
326
327/**
328 * intel_vgpu_init_mmio - init MMIO space
329 * @vgpu: a vGPU
330 *
331 * Returns:
332 * Zero on success, negative error code if failed
333 */
334int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
335{
336 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
337
338 vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
339 if (!vgpu->mmio.vreg)
340 return -ENOMEM;
341
342 vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
343
344 intel_vgpu_reset_mmio(vgpu);
345
346 return 0;
347}
348
349/**
350 * intel_vgpu_clean_mmio - clean MMIO space
351 * @vgpu: a vGPU
352 *
353 */
354void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
355{
356 vfree(vgpu->mmio.vreg);
357 vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
358}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 87d5b5e366a3..3bc620f56f35 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -86,6 +86,10 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
86 *offset; \ 86 *offset; \
87}) 87})
88 88
89int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
90void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu);
91void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
92
89int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa); 93int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
90 94
91int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, 95int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 81cd921770c6..d9fb41ab7119 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -36,9 +36,9 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
36 vgpu->id)) 36 vgpu->id))
37 return -EINVAL; 37 return -EINVAL;
38 38
39 vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC | 39 vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
40 GFP_DMA32 | __GFP_ZERO, 40 __GFP_ZERO,
41 INTEL_GVT_OPREGION_PORDER); 41 get_order(INTEL_GVT_OPREGION_SIZE));
42 42
43 if (!vgpu_opregion(vgpu)->va) 43 if (!vgpu_opregion(vgpu)->va)
44 return -ENOMEM; 44 return -ENOMEM;
@@ -97,7 +97,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
97 if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { 97 if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
98 map_vgpu_opregion(vgpu, false); 98 map_vgpu_opregion(vgpu, false);
99 free_pages((unsigned long)vgpu_opregion(vgpu)->va, 99 free_pages((unsigned long)vgpu_opregion(vgpu)->va,
100 INTEL_GVT_OPREGION_PORDER); 100 get_order(INTEL_GVT_OPREGION_SIZE));
101 101
102 vgpu_opregion(vgpu)->va = NULL; 102 vgpu_opregion(vgpu)->va = NULL;
103 } 103 }
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 0dfe789d8f02..fbd023a16f18 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -50,8 +50,7 @@
50#define INTEL_GVT_OPREGION_PARM 0x204 50#define INTEL_GVT_OPREGION_PARM 0x204
51 51
52#define INTEL_GVT_OPREGION_PAGES 2 52#define INTEL_GVT_OPREGION_PAGES 2
53#define INTEL_GVT_OPREGION_PORDER 1 53#define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE)
54#define INTEL_GVT_OPREGION_SIZE (2 * 4096)
55 54
56#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) 55#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
57 56
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 4db242250235..e91885dffeff 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
350{ 350{
351 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 351 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
352 struct intel_vgpu_workload *workload; 352 struct intel_vgpu_workload *workload;
353 struct intel_vgpu *vgpu;
353 int event; 354 int event;
354 355
355 mutex_lock(&gvt->lock); 356 mutex_lock(&gvt->lock);
356 357
357 workload = scheduler->current_workload[ring_id]; 358 workload = scheduler->current_workload[ring_id];
359 vgpu = workload->vgpu;
358 360
359 if (!workload->status && !workload->vgpu->resetting) { 361 if (!workload->status && !vgpu->resetting) {
360 wait_event(workload->shadow_ctx_status_wq, 362 wait_event(workload->shadow_ctx_status_wq,
361 !atomic_read(&workload->shadow_ctx_active)); 363 !atomic_read(&workload->shadow_ctx_active));
362 364
@@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
364 366
365 for_each_set_bit(event, workload->pending_events, 367 for_each_set_bit(event, workload->pending_events,
366 INTEL_GVT_EVENT_MAX) 368 INTEL_GVT_EVENT_MAX)
367 intel_vgpu_trigger_virtual_event(workload->vgpu, 369 intel_vgpu_trigger_virtual_event(vgpu, event);
368 event);
369 } 370 }
370 371
371 gvt_dbg_sched("ring id %d complete workload %p status %d\n", 372 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
373 374
374 scheduler->current_workload[ring_id] = NULL; 375 scheduler->current_workload[ring_id] = NULL;
375 376
376 atomic_dec(&workload->vgpu->running_workload_num);
377
378 list_del_init(&workload->list); 377 list_del_init(&workload->list);
379 workload->complete(workload); 378 workload->complete(workload);
380 379
380 atomic_dec(&vgpu->running_workload_num);
381 wake_up(&scheduler->workload_complete_wq); 381 wake_up(&scheduler->workload_complete_wq);
382 mutex_unlock(&gvt->lock); 382 mutex_unlock(&gvt->lock);
383} 383}
@@ -459,11 +459,11 @@ complete:
459 gvt_dbg_sched("will complete workload %p\n, status: %d\n", 459 gvt_dbg_sched("will complete workload %p\n, status: %d\n",
460 workload, workload->status); 460 workload, workload->status);
461 461
462 complete_current_workload(gvt, ring_id);
463
464 if (workload->req) 462 if (workload->req)
465 i915_gem_request_put(fetch_and_zero(&workload->req)); 463 i915_gem_request_put(fetch_and_zero(&workload->req));
466 464
465 complete_current_workload(gvt, ring_id);
466
467 if (need_force_wake) 467 if (need_force_wake)
468 intel_uncore_forcewake_put(gvt->dev_priv, 468 intel_uncore_forcewake_put(gvt->dev_priv,
469 FORCEWAKE_ALL); 469 FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 3b30c28bff51..2833dfa8c9ae 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -113,7 +113,7 @@ struct intel_shadow_bb_entry {
113 struct drm_i915_gem_object *obj; 113 struct drm_i915_gem_object *obj;
114 void *va; 114 void *va;
115 unsigned long len; 115 unsigned long len;
116 void *bb_start_cmd_va; 116 u32 *bb_start_cmd_va;
117}; 117};
118 118
119#define workload_q_head(vgpu, ring_id) \ 119#define workload_q_head(vgpu, ring_id) \
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 536d2b9d5777..7295bc8e12fb 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -35,79 +35,6 @@
35#include "gvt.h" 35#include "gvt.h"
36#include "i915_pvinfo.h" 36#include "i915_pvinfo.h"
37 37
38static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
39{
40 vfree(vgpu->mmio.vreg);
41 vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
42}
43
44int setup_vgpu_mmio(struct intel_vgpu *vgpu)
45{
46 struct intel_gvt *gvt = vgpu->gvt;
47 const struct intel_gvt_device_info *info = &gvt->device_info;
48
49 if (vgpu->mmio.vreg)
50 memset(vgpu->mmio.vreg, 0, info->mmio_size * 2);
51 else {
52 vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
53 if (!vgpu->mmio.vreg)
54 return -ENOMEM;
55 }
56
57 vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
58
59 memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
60 memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
61
62 vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
63
64 /* set the bit 0:2(Core C-State ) to C0 */
65 vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
66 return 0;
67}
68
69static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
70 struct intel_vgpu_creation_params *param)
71{
72 struct intel_gvt *gvt = vgpu->gvt;
73 const struct intel_gvt_device_info *info = &gvt->device_info;
74 u16 *gmch_ctl;
75 int i;
76
77 memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
78 info->cfg_space_size);
79
80 if (!param->primary) {
81 vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
82 INTEL_GVT_PCI_CLASS_VGA_OTHER;
83 vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
84 INTEL_GVT_PCI_CLASS_VGA_OTHER;
85 }
86
87 /* Show guest that there isn't any stolen memory.*/
88 gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
89 *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
90
91 intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
92 gvt_aperture_pa_base(gvt), true);
93
94 vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
95 | PCI_COMMAND_MEMORY
96 | PCI_COMMAND_MASTER);
97 /*
98 * Clear the bar upper 32bit and let guest to assign the new value
99 */
100 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
101 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
102 memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
103
104 for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
105 vgpu->cfg_space.bar[i].size = pci_resource_len(
106 gvt->dev_priv->drm.pdev, i * 2);
107 vgpu->cfg_space.bar[i].tracked = false;
108 }
109}
110
111void populate_pvinfo_page(struct intel_vgpu *vgpu) 38void populate_pvinfo_page(struct intel_vgpu *vgpu)
112{ 39{
113 /* setup the ballooning information */ 40 /* setup the ballooning information */
@@ -177,7 +104,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
177 if (low_avail / min_low == 0) 104 if (low_avail / min_low == 0)
178 break; 105 break;
179 gvt->types[i].low_gm_size = min_low; 106 gvt->types[i].low_gm_size = min_low;
180 gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size; 107 gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
181 gvt->types[i].fence = 4; 108 gvt->types[i].fence = 4;
182 gvt->types[i].max_instance = low_avail / min_low; 109 gvt->types[i].max_instance = low_avail / min_low;
183 gvt->types[i].avail_instance = gvt->types[i].max_instance; 110 gvt->types[i].avail_instance = gvt->types[i].max_instance;
@@ -217,7 +144,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
217 */ 144 */
218 low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE - 145 low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
219 gvt->gm.vgpu_allocated_low_gm_size; 146 gvt->gm.vgpu_allocated_low_gm_size;
220 high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE - 147 high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE -
221 gvt->gm.vgpu_allocated_high_gm_size; 148 gvt->gm.vgpu_allocated_high_gm_size;
222 fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - 149 fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
223 gvt->fence.vgpu_allocated_fence_num; 150 gvt->fence.vgpu_allocated_fence_num;
@@ -268,7 +195,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
268 intel_vgpu_clean_gtt(vgpu); 195 intel_vgpu_clean_gtt(vgpu);
269 intel_gvt_hypervisor_detach_vgpu(vgpu); 196 intel_gvt_hypervisor_detach_vgpu(vgpu);
270 intel_vgpu_free_resource(vgpu); 197 intel_vgpu_free_resource(vgpu);
271 clean_vgpu_mmio(vgpu); 198 intel_vgpu_clean_mmio(vgpu);
272 vfree(vgpu); 199 vfree(vgpu);
273 200
274 intel_gvt_update_vgpu_types(gvt); 201 intel_gvt_update_vgpu_types(gvt);
@@ -300,11 +227,11 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
300 vgpu->gvt = gvt; 227 vgpu->gvt = gvt;
301 bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES); 228 bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
302 229
303 setup_vgpu_cfg_space(vgpu, param); 230 intel_vgpu_init_cfg_space(vgpu, param->primary);
304 231
305 ret = setup_vgpu_mmio(vgpu); 232 ret = intel_vgpu_init_mmio(vgpu);
306 if (ret) 233 if (ret)
307 goto out_free_vgpu; 234 goto out_clean_idr;
308 235
309 ret = intel_vgpu_alloc_resource(vgpu, param); 236 ret = intel_vgpu_alloc_resource(vgpu, param);
310 if (ret) 237 if (ret)
@@ -354,7 +281,9 @@ out_detach_hypervisor_vgpu:
354out_clean_vgpu_resource: 281out_clean_vgpu_resource:
355 intel_vgpu_free_resource(vgpu); 282 intel_vgpu_free_resource(vgpu);
356out_clean_vgpu_mmio: 283out_clean_vgpu_mmio:
357 clean_vgpu_mmio(vgpu); 284 intel_vgpu_clean_mmio(vgpu);
285out_clean_idr:
286 idr_remove(&gvt->vgpu_idr, vgpu->id);
358out_free_vgpu: 287out_free_vgpu:
359 vfree(vgpu); 288 vfree(vgpu);
360 mutex_unlock(&gvt->lock); 289 mutex_unlock(&gvt->lock);
@@ -398,7 +327,75 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
398} 327}
399 328
400/** 329/**
401 * intel_gvt_reset_vgpu - reset a virtual GPU 330 * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
331 * @vgpu: virtual GPU
332 * @dmlr: vGPU Device Model Level Reset or GT Reset
333 * @engine_mask: engines to reset for GT reset
334 *
335 * This function is called when user wants to reset a virtual GPU through
336 * device model reset or GT reset. The caller should hold the gvt lock.
337 *
338 * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
339 * the whole vGPU to default state as when it is created. This vGPU function
340 * is required both for functionary and security concerns.The ultimate goal
341 * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
342 * assign a vGPU to a virtual machine we must isse such reset first.
343 *
344 * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
345 * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
346 * Unlike the FLR, GT reset only reset particular resource of a vGPU per
347 * the reset request. Guest driver can issue a GT reset by programming the
348 * virtual GDRST register to reset specific virtual GPU engine or all
349 * engines.
350 *
351 * The parameter dev_level is to identify if we will do DMLR or GT reset.
352 * The parameter engine_mask is to specific the engines that need to be
353 * resetted. If value ALL_ENGINES is given for engine_mask, it means
354 * the caller requests a full GT reset that we will reset all virtual
355 * GPU engines. For FLR, engine_mask is ignored.
356 */
357void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
358 unsigned int engine_mask)
359{
360 struct intel_gvt *gvt = vgpu->gvt;
361 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
362
363 gvt_dbg_core("------------------------------------------\n");
364 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
365 vgpu->id, dmlr, engine_mask);
366 vgpu->resetting = true;
367
368 intel_vgpu_stop_schedule(vgpu);
369 /*
370 * The current_vgpu will set to NULL after stopping the
371 * scheduler when the reset is triggered by current vgpu.
372 */
373 if (scheduler->current_vgpu == NULL) {
374 mutex_unlock(&gvt->lock);
375 intel_gvt_wait_vgpu_idle(vgpu);
376 mutex_lock(&gvt->lock);
377 }
378
379 intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
380
381 /* full GPU reset or device model level reset */
382 if (engine_mask == ALL_ENGINES || dmlr) {
383 intel_vgpu_reset_gtt(vgpu, dmlr);
384 intel_vgpu_reset_resource(vgpu);
385 intel_vgpu_reset_mmio(vgpu);
386 populate_pvinfo_page(vgpu);
387
388 if (dmlr)
389 intel_vgpu_reset_cfg_space(vgpu);
390 }
391
392 vgpu->resetting = false;
393 gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
394 gvt_dbg_core("------------------------------------------\n");
395}
396
397/**
398 * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
402 * @vgpu: virtual GPU 399 * @vgpu: virtual GPU
403 * 400 *
404 * This function is called when user wants to reset a virtual GPU. 401 * This function is called when user wants to reset a virtual GPU.
@@ -406,4 +403,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
406 */ 403 */
407void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu) 404void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
408{ 405{
406 mutex_lock(&vgpu->gvt->lock);
407 intel_gvt_reset_vgpu_locked(vgpu, true, 0);
408 mutex_unlock(&vgpu->gvt->lock);
409} 409}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 445fec9c2841..b2c4a0b8a627 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2378,7 +2378,7 @@ static int intel_runtime_suspend(struct device *kdev)
2378 2378
2379 assert_forcewakes_inactive(dev_priv); 2379 assert_forcewakes_inactive(dev_priv);
2380 2380
2381 if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv)) 2381 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2382 intel_hpd_poll_init(dev_priv); 2382 intel_hpd_poll_init(dev_priv);
2383 2383
2384 DRM_DEBUG_KMS("Device suspended\n"); 2384 DRM_DEBUG_KMS("Device suspended\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 243224aeabf8..69bc3b0c4390 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1977,6 +1977,11 @@ struct drm_i915_private {
1977 1977
1978 struct i915_frontbuffer_tracking fb_tracking; 1978 struct i915_frontbuffer_tracking fb_tracking;
1979 1979
1980 struct intel_atomic_helper {
1981 struct llist_head free_list;
1982 struct work_struct free_work;
1983 } atomic_helper;
1984
1980 u16 orig_clock; 1985 u16 orig_clock;
1981 1986
1982 bool mchbar_need_disable; 1987 bool mchbar_need_disable;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3dd7fc662859..4b23a7814713 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -595,47 +595,21 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
595 struct drm_i915_gem_pwrite *args, 595 struct drm_i915_gem_pwrite *args,
596 struct drm_file *file) 596 struct drm_file *file)
597{ 597{
598 struct drm_device *dev = obj->base.dev;
599 void *vaddr = obj->phys_handle->vaddr + args->offset; 598 void *vaddr = obj->phys_handle->vaddr + args->offset;
600 char __user *user_data = u64_to_user_ptr(args->data_ptr); 599 char __user *user_data = u64_to_user_ptr(args->data_ptr);
601 int ret;
602 600
603 /* We manually control the domain here and pretend that it 601 /* We manually control the domain here and pretend that it
604 * remains coherent i.e. in the GTT domain, like shmem_pwrite. 602 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
605 */ 603 */
606 lockdep_assert_held(&obj->base.dev->struct_mutex);
607 ret = i915_gem_object_wait(obj,
608 I915_WAIT_INTERRUPTIBLE |
609 I915_WAIT_LOCKED |
610 I915_WAIT_ALL,
611 MAX_SCHEDULE_TIMEOUT,
612 to_rps_client(file));
613 if (ret)
614 return ret;
615
616 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 604 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
617 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { 605 if (copy_from_user(vaddr, user_data, args->size))
618 unsigned long unwritten; 606 return -EFAULT;
619
620 /* The physical object once assigned is fixed for the lifetime
621 * of the obj, so we can safely drop the lock and continue
622 * to access vaddr.
623 */
624 mutex_unlock(&dev->struct_mutex);
625 unwritten = copy_from_user(vaddr, user_data, args->size);
626 mutex_lock(&dev->struct_mutex);
627 if (unwritten) {
628 ret = -EFAULT;
629 goto out;
630 }
631 }
632 607
633 drm_clflush_virt_range(vaddr, args->size); 608 drm_clflush_virt_range(vaddr, args->size);
634 i915_gem_chipset_flush(to_i915(dev)); 609 i915_gem_chipset_flush(to_i915(obj->base.dev));
635 610
636out:
637 intel_fb_obj_flush(obj, false, ORIGIN_CPU); 611 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
638 return ret; 612 return 0;
639} 613}
640 614
641void *i915_gem_object_alloc(struct drm_device *dev) 615void *i915_gem_object_alloc(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index bd08814b015c..d534a316a16e 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -199,6 +199,7 @@ found:
199 } 199 }
200 200
201 /* Unbinding will emit any required flushes */ 201 /* Unbinding will emit any required flushes */
202 ret = 0;
202 while (!list_empty(&eviction_list)) { 203 while (!list_empty(&eviction_list)) {
203 vma = list_first_entry(&eviction_list, 204 vma = list_first_entry(&eviction_list,
204 struct i915_vma, 205 struct i915_vma,
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index a792dcb902b5..e924a9516079 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -185,6 +185,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
185 return ret; 185 return ret;
186 } 186 }
187 187
188 trace_i915_vma_bind(vma, bind_flags);
188 ret = vma->vm->bind_vma(vma, cache_level, bind_flags); 189 ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
189 if (ret) 190 if (ret)
190 return ret; 191 return ret;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 86ecec5601d4..588470eb8d39 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -499,6 +499,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
499 struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev); 499 struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
500 struct edid *edid; 500 struct edid *edid;
501 struct i2c_adapter *i2c; 501 struct i2c_adapter *i2c;
502 bool ret = false;
502 503
503 BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); 504 BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
504 505
@@ -515,17 +516,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
515 */ 516 */
516 if (!is_digital) { 517 if (!is_digital) {
517 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); 518 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
518 return true; 519 ret = true;
520 } else {
521 DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
519 } 522 }
520
521 DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
522 } else { 523 } else {
523 DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n"); 524 DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
524 } 525 }
525 526
526 kfree(edid); 527 kfree(edid);
527 528
528 return false; 529 return ret;
529} 530}
530 531
531static enum drm_connector_status 532static enum drm_connector_status
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3dc8724df400..77f7b1d849a4 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2585,8 +2585,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
2585 * We only keep the x/y offsets, so push all of the 2585 * We only keep the x/y offsets, so push all of the
2586 * gtt offset into the x/y offsets. 2586 * gtt offset into the x/y offsets.
2587 */ 2587 */
2588 _intel_adjust_tile_offset(&x, &y, tile_size, 2588 _intel_adjust_tile_offset(&x, &y,
2589 tile_width, tile_height, pitch_tiles, 2589 tile_width, tile_height,
2590 tile_size, pitch_tiles,
2590 gtt_offset_rotated * tile_size, 0); 2591 gtt_offset_rotated * tile_size, 0);
2591 2592
2592 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; 2593 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
@@ -2967,6 +2968,9 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
2967 unsigned int rotation = plane_state->base.rotation; 2968 unsigned int rotation = plane_state->base.rotation;
2968 int ret; 2969 int ret;
2969 2970
2971 if (!plane_state->base.visible)
2972 return 0;
2973
2970 /* Rotate src coordinates to match rotated GTT view */ 2974 /* Rotate src coordinates to match rotated GTT view */
2971 if (drm_rotation_90_or_270(rotation)) 2975 if (drm_rotation_90_or_270(rotation))
2972 drm_rect_rotate(&plane_state->base.src, 2976 drm_rect_rotate(&plane_state->base.src,
@@ -6846,6 +6850,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6846 } 6850 }
6847 6851
6848 state = drm_atomic_state_alloc(crtc->dev); 6852 state = drm_atomic_state_alloc(crtc->dev);
6853 if (!state) {
6854 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6855 crtc->base.id, crtc->name);
6856 return;
6857 }
6858
6849 state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; 6859 state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
6850 6860
6851 /* Everything's already locked, -EDEADLK can't happen. */ 6861 /* Everything's already locked, -EDEADLK can't happen. */
@@ -11243,6 +11253,7 @@ found:
11243 } 11253 }
11244 11254
11245 old->restore_state = restore_state; 11255 old->restore_state = restore_state;
11256 drm_atomic_state_put(state);
11246 11257
11247 /* let the connector get through one full cycle before testing */ 11258 /* let the connector get through one full cycle before testing */
11248 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); 11259 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
@@ -14512,8 +14523,14 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
14512 break; 14523 break;
14513 14524
14514 case FENCE_FREE: 14525 case FENCE_FREE:
14515 drm_atomic_state_put(&state->base); 14526 {
14516 break; 14527 struct intel_atomic_helper *helper =
14528 &to_i915(state->base.dev)->atomic_helper;
14529
14530 if (llist_add(&state->freed, &helper->free_list))
14531 schedule_work(&helper->free_work);
14532 break;
14533 }
14517 } 14534 }
14518 14535
14519 return NOTIFY_DONE; 14536 return NOTIFY_DONE;
@@ -16392,6 +16409,18 @@ fail:
16392 drm_modeset_acquire_fini(&ctx); 16409 drm_modeset_acquire_fini(&ctx);
16393} 16410}
16394 16411
16412static void intel_atomic_helper_free_state(struct work_struct *work)
16413{
16414 struct drm_i915_private *dev_priv =
16415 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
16416 struct intel_atomic_state *state, *next;
16417 struct llist_node *freed;
16418
16419 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
16420 llist_for_each_entry_safe(state, next, freed, freed)
16421 drm_atomic_state_put(&state->base);
16422}
16423
16395int intel_modeset_init(struct drm_device *dev) 16424int intel_modeset_init(struct drm_device *dev)
16396{ 16425{
16397 struct drm_i915_private *dev_priv = to_i915(dev); 16426 struct drm_i915_private *dev_priv = to_i915(dev);
@@ -16411,6 +16440,9 @@ int intel_modeset_init(struct drm_device *dev)
16411 16440
16412 dev->mode_config.funcs = &intel_mode_funcs; 16441 dev->mode_config.funcs = &intel_mode_funcs;
16413 16442
16443 INIT_WORK(&dev_priv->atomic_helper.free_work,
16444 intel_atomic_helper_free_state);
16445
16414 intel_init_quirks(dev); 16446 intel_init_quirks(dev);
16415 16447
16416 intel_init_pm(dev_priv); 16448 intel_init_pm(dev_priv);
@@ -17024,7 +17056,8 @@ void intel_display_resume(struct drm_device *dev)
17024 17056
17025 if (ret) 17057 if (ret)
17026 DRM_ERROR("Restoring old state failed with %i\n", ret); 17058 DRM_ERROR("Restoring old state failed with %i\n", ret);
17027 drm_atomic_state_put(state); 17059 if (state)
17060 drm_atomic_state_put(state);
17028} 17061}
17029 17062
17030void intel_modeset_gem_init(struct drm_device *dev) 17063void intel_modeset_gem_init(struct drm_device *dev)
@@ -17094,6 +17127,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
17094{ 17127{
17095 struct drm_i915_private *dev_priv = to_i915(dev); 17128 struct drm_i915_private *dev_priv = to_i915(dev);
17096 17129
17130 flush_work(&dev_priv->atomic_helper.free_work);
17131 WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
17132
17097 intel_disable_gt_powersave(dev_priv); 17133 intel_disable_gt_powersave(dev_priv);
17098 17134
17099 /* 17135 /*
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index cd132c216a67..cd72ae171eeb 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -370,6 +370,8 @@ struct intel_atomic_state {
370 struct skl_wm_values wm_results; 370 struct skl_wm_values wm_results;
371 371
372 struct i915_sw_fence commit_ready; 372 struct i915_sw_fence commit_ready;
373
374 struct llist_node freed;
373}; 375};
374 376
375struct intel_plane_state { 377struct intel_plane_state {
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index beb08982dc0b..8cf2d80f2254 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -742,6 +742,9 @@ void intel_fbdev_initial_config_async(struct drm_device *dev)
742{ 742{
743 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; 743 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
744 744
745 if (!ifbdev)
746 return;
747
745 ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev); 748 ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
746} 749}
747 750
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index d4961fa20c73..beabc17e7c8a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -979,18 +979,8 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
979 uint32_t *batch, 979 uint32_t *batch,
980 uint32_t index) 980 uint32_t index)
981{ 981{
982 struct drm_i915_private *dev_priv = engine->i915;
983 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES); 982 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
984 983
985 /*
986 * WaDisableLSQCROPERFforOCL:kbl
987 * This WA is implemented in skl_init_clock_gating() but since
988 * this batch updates GEN8_L3SQCREG4 with default value we need to
989 * set this bit here to retain the WA during flush.
990 */
991 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
992 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
993
994 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | 984 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
995 MI_SRM_LRM_GLOBAL_GTT)); 985 MI_SRM_LRM_GLOBAL_GTT));
996 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); 986 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index aeb637dc1fdf..91cb4c422ad5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1095,14 +1095,6 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
1095 WA_SET_BIT_MASKED(HDC_CHICKEN0, 1095 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1096 HDC_FENCE_DEST_SLM_DISABLE); 1096 HDC_FENCE_DEST_SLM_DISABLE);
1097 1097
1098 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1099 * involving this register should also be added to WA batch as required.
1100 */
1101 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
1102 /* WaDisableLSQCROPERFforOCL:kbl */
1103 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1104 GEN8_LQSC_RO_PERF_DIS);
1105
1106 /* WaToEnableHwFixForPushConstHWBug:kbl */ 1098 /* WaToEnableHwFixForPushConstHWBug:kbl */
1107 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER)) 1099 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
1108 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, 1100 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 14ff87686a36..686a580c711a 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -345,7 +345,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
345{ 345{
346 struct adreno_platform_config *config = pdev->dev.platform_data; 346 struct adreno_platform_config *config = pdev->dev.platform_data;
347 struct msm_gpu *gpu = &adreno_gpu->base; 347 struct msm_gpu *gpu = &adreno_gpu->base;
348 struct msm_mmu *mmu;
349 int ret; 348 int ret;
350 349
351 adreno_gpu->funcs = funcs; 350 adreno_gpu->funcs = funcs;
@@ -385,8 +384,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
385 return ret; 384 return ret;
386 } 385 }
387 386
388 mmu = gpu->aspace->mmu; 387 if (gpu->aspace && gpu->aspace->mmu) {
389 if (mmu) { 388 struct msm_mmu *mmu = gpu->aspace->mmu;
390 ret = mmu->funcs->attach(mmu, iommu_ports, 389 ret = mmu->funcs->attach(mmu, iommu_ports,
391 ARRAY_SIZE(iommu_ports)); 390 ARRAY_SIZE(iommu_ports));
392 if (ret) 391 if (ret)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 5f6cd8745dbc..c396d459a9d0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -119,13 +119,7 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
119 119
120static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) 120static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
121{ 121{
122 int i;
123 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 122 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
124 struct drm_plane *plane;
125 struct drm_plane_state *plane_state;
126
127 for_each_plane_in_state(state, plane, plane_state, i)
128 mdp5_plane_complete_commit(plane, plane_state);
129 123
130 if (mdp5_kms->smp) 124 if (mdp5_kms->smp)
131 mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp); 125 mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 17b0cc101171..cdfc63d90c7b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -104,8 +104,6 @@ struct mdp5_plane_state {
104 104
105 /* assigned by crtc blender */ 105 /* assigned by crtc blender */
106 enum mdp_mixer_stage_id stage; 106 enum mdp_mixer_stage_id stage;
107
108 bool pending : 1;
109}; 107};
110#define to_mdp5_plane_state(x) \ 108#define to_mdp5_plane_state(x) \
111 container_of(x, struct mdp5_plane_state, base) 109 container_of(x, struct mdp5_plane_state, base)
@@ -232,8 +230,6 @@ int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
232void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); 230void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
233 231
234uint32_t mdp5_plane_get_flush(struct drm_plane *plane); 232uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
235void mdp5_plane_complete_commit(struct drm_plane *plane,
236 struct drm_plane_state *state);
237enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); 233enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
238struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary); 234struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary);
239 235
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index c099da7bc212..25d9d0a97156 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -179,7 +179,6 @@ mdp5_plane_atomic_print_state(struct drm_printer *p,
179 drm_printf(p, "\tzpos=%u\n", pstate->zpos); 179 drm_printf(p, "\tzpos=%u\n", pstate->zpos);
180 drm_printf(p, "\talpha=%u\n", pstate->alpha); 180 drm_printf(p, "\talpha=%u\n", pstate->alpha);
181 drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage)); 181 drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
182 drm_printf(p, "\tpending=%u\n", pstate->pending);
183} 182}
184 183
185static void mdp5_plane_reset(struct drm_plane *plane) 184static void mdp5_plane_reset(struct drm_plane *plane)
@@ -220,8 +219,6 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
220 if (mdp5_state && mdp5_state->base.fb) 219 if (mdp5_state && mdp5_state->base.fb)
221 drm_framebuffer_reference(mdp5_state->base.fb); 220 drm_framebuffer_reference(mdp5_state->base.fb);
222 221
223 mdp5_state->pending = false;
224
225 return &mdp5_state->base; 222 return &mdp5_state->base;
226} 223}
227 224
@@ -288,13 +285,6 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
288 DBG("%s: check (%d -> %d)", plane->name, 285 DBG("%s: check (%d -> %d)", plane->name,
289 plane_enabled(old_state), plane_enabled(state)); 286 plane_enabled(old_state), plane_enabled(state));
290 287
291 /* We don't allow faster-than-vblank updates.. if we did add this
292 * some day, we would need to disallow in cases where hwpipe
293 * changes
294 */
295 if (WARN_ON(to_mdp5_plane_state(old_state)->pending))
296 return -EBUSY;
297
298 max_width = config->hw->lm.max_width << 16; 288 max_width = config->hw->lm.max_width << 16;
299 max_height = config->hw->lm.max_height << 16; 289 max_height = config->hw->lm.max_height << 16;
300 290
@@ -370,12 +360,9 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
370 struct drm_plane_state *old_state) 360 struct drm_plane_state *old_state)
371{ 361{
372 struct drm_plane_state *state = plane->state; 362 struct drm_plane_state *state = plane->state;
373 struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
374 363
375 DBG("%s: update", plane->name); 364 DBG("%s: update", plane->name);
376 365
377 mdp5_state->pending = true;
378
379 if (plane_enabled(state)) { 366 if (plane_enabled(state)) {
380 int ret; 367 int ret;
381 368
@@ -851,15 +838,6 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
851 return pstate->hwpipe->flush_mask; 838 return pstate->hwpipe->flush_mask;
852} 839}
853 840
854/* called after vsync in thread context */
855void mdp5_plane_complete_commit(struct drm_plane *plane,
856 struct drm_plane_state *state)
857{
858 struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
859
860 pstate->pending = false;
861}
862
863/* initialize plane */ 841/* initialize plane */
864struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary) 842struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary)
865{ 843{
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index d8bc59c7e261..8098677a3916 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -294,6 +294,8 @@ put_iova(struct drm_gem_object *obj)
294 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 294 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
295 295
296 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { 296 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
297 if (!priv->aspace[id])
298 continue;
297 msm_gem_unmap_vma(priv->aspace[id], 299 msm_gem_unmap_vma(priv->aspace[id],
298 &msm_obj->domain[id], msm_obj->sgt); 300 &msm_obj->domain[id], msm_obj->sgt);
299 } 301 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index cef08da1da4e..6a157763dfc3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -411,7 +411,8 @@ nouveau_display_init(struct drm_device *dev)
411 return ret; 411 return ret;
412 412
413 /* enable polling for external displays */ 413 /* enable polling for external displays */
414 drm_kms_helper_poll_enable(dev); 414 if (!dev->mode_config.poll_enabled)
415 drm_kms_helper_poll_enable(dev);
415 416
416 /* enable hotplug interrupts */ 417 /* enable hotplug interrupts */
417 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 418 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 59348fc41c77..bc85a45f91cd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -773,7 +773,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
773 pci_set_master(pdev); 773 pci_set_master(pdev);
774 774
775 ret = nouveau_do_resume(drm_dev, true); 775 ret = nouveau_do_resume(drm_dev, true);
776 drm_kms_helper_poll_enable(drm_dev); 776
777 if (!drm_dev->mode_config.poll_enabled)
778 drm_kms_helper_poll_enable(drm_dev);
779
777 /* do magic */ 780 /* do magic */
778 nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); 781 nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
779 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); 782 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 8d5ed5bfdacb..42c1fa53d431 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -165,6 +165,8 @@ struct nouveau_drm {
165 struct backlight_device *backlight; 165 struct backlight_device *backlight;
166 struct list_head bl_connectors; 166 struct list_head bl_connectors;
167 struct work_struct hpd_work; 167 struct work_struct hpd_work;
168 struct work_struct fbcon_work;
169 int fbcon_new_state;
168#ifdef CONFIG_ACPI 170#ifdef CONFIG_ACPI
169 struct notifier_block acpi_nb; 171 struct notifier_block acpi_nb;
170#endif 172#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 2f2a3dcd4ad7..fa2d0a978ccc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -470,19 +470,43 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
470 .fb_probe = nouveau_fbcon_create, 470 .fb_probe = nouveau_fbcon_create,
471}; 471};
472 472
473static void
474nouveau_fbcon_set_suspend_work(struct work_struct *work)
475{
476 struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work);
477 int state = READ_ONCE(drm->fbcon_new_state);
478
479 if (state == FBINFO_STATE_RUNNING)
480 pm_runtime_get_sync(drm->dev->dev);
481
482 console_lock();
483 if (state == FBINFO_STATE_RUNNING)
484 nouveau_fbcon_accel_restore(drm->dev);
485 drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
486 if (state != FBINFO_STATE_RUNNING)
487 nouveau_fbcon_accel_save_disable(drm->dev);
488 console_unlock();
489
490 if (state == FBINFO_STATE_RUNNING) {
491 pm_runtime_mark_last_busy(drm->dev->dev);
492 pm_runtime_put_sync(drm->dev->dev);
493 }
494}
495
473void 496void
474nouveau_fbcon_set_suspend(struct drm_device *dev, int state) 497nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
475{ 498{
476 struct nouveau_drm *drm = nouveau_drm(dev); 499 struct nouveau_drm *drm = nouveau_drm(dev);
477 if (drm->fbcon) { 500
478 console_lock(); 501 if (!drm->fbcon)
479 if (state == FBINFO_STATE_RUNNING) 502 return;
480 nouveau_fbcon_accel_restore(dev); 503
481 drm_fb_helper_set_suspend(&drm->fbcon->helper, state); 504 drm->fbcon_new_state = state;
482 if (state != FBINFO_STATE_RUNNING) 505 /* Since runtime resume can happen as a result of a sysfs operation,
483 nouveau_fbcon_accel_save_disable(dev); 506 * it's possible we already have the console locked. So handle fbcon
484 console_unlock(); 507 * init/deinit from a seperate work thread
485 } 508 */
509 schedule_work(&drm->fbcon_work);
486} 510}
487 511
488int 512int
@@ -502,6 +526,7 @@ nouveau_fbcon_init(struct drm_device *dev)
502 return -ENOMEM; 526 return -ENOMEM;
503 527
504 drm->fbcon = fbcon; 528 drm->fbcon = fbcon;
529 INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
505 530
506 drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); 531 drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
507 532
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 00ea0002b539..e0c143b865f3 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -366,11 +366,10 @@ static void
366radeon_pci_shutdown(struct pci_dev *pdev) 366radeon_pci_shutdown(struct pci_dev *pdev)
367{ 367{
368 /* if we are running in a VM, make sure the device 368 /* if we are running in a VM, make sure the device
369 * torn down properly on reboot/shutdown. 369 * torn down properly on reboot/shutdown
370 * unfortunately we can't detect certain
371 * hypervisors so just do this all the time.
372 */ 370 */
373 radeon_pci_remove(pdev); 371 if (radeon_device_is_virtual())
372 radeon_pci_remove(pdev);
374} 373}
375 374
376static int radeon_pmops_suspend(struct device *dev) 375static int radeon_pmops_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index e8a38d296855..414776811e71 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -114,6 +114,9 @@ MODULE_FIRMWARE("radeon/hainan_mc.bin");
114MODULE_FIRMWARE("radeon/hainan_rlc.bin"); 114MODULE_FIRMWARE("radeon/hainan_rlc.bin");
115MODULE_FIRMWARE("radeon/hainan_smc.bin"); 115MODULE_FIRMWARE("radeon/hainan_smc.bin");
116MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); 116MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
117MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
118
119MODULE_FIRMWARE("radeon/si58_mc.bin");
117 120
118static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); 121static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
119static void si_pcie_gen3_enable(struct radeon_device *rdev); 122static void si_pcie_gen3_enable(struct radeon_device *rdev);
@@ -1650,6 +1653,8 @@ static int si_init_microcode(struct radeon_device *rdev)
1650 int err; 1653 int err;
1651 int new_fw = 0; 1654 int new_fw = 0;
1652 bool new_smc = false; 1655 bool new_smc = false;
1656 bool si58_fw = false;
1657 bool banks2_fw = false;
1653 1658
1654 DRM_DEBUG("\n"); 1659 DRM_DEBUG("\n");
1655 1660
@@ -1727,10 +1732,11 @@ static int si_init_microcode(struct radeon_device *rdev)
1727 ((rdev->pdev->device == 0x6660) || 1732 ((rdev->pdev->device == 0x6660) ||
1728 (rdev->pdev->device == 0x6663) || 1733 (rdev->pdev->device == 0x6663) ||
1729 (rdev->pdev->device == 0x6665) || 1734 (rdev->pdev->device == 0x6665) ||
1730 (rdev->pdev->device == 0x6667))) || 1735 (rdev->pdev->device == 0x6667))))
1731 ((rdev->pdev->revision == 0xc3) &&
1732 (rdev->pdev->device == 0x6665)))
1733 new_smc = true; 1736 new_smc = true;
1737 else if ((rdev->pdev->revision == 0xc3) &&
1738 (rdev->pdev->device == 0x6665))
1739 banks2_fw = true;
1734 new_chip_name = "hainan"; 1740 new_chip_name = "hainan";
1735 pfp_req_size = SI_PFP_UCODE_SIZE * 4; 1741 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1736 me_req_size = SI_PM4_UCODE_SIZE * 4; 1742 me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1742,6 +1748,10 @@ static int si_init_microcode(struct radeon_device *rdev)
1742 default: BUG(); 1748 default: BUG();
1743 } 1749 }
1744 1750
1751 /* this memory configuration requires special firmware */
1752 if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
1753 si58_fw = true;
1754
1745 DRM_INFO("Loading %s Microcode\n", new_chip_name); 1755 DRM_INFO("Loading %s Microcode\n", new_chip_name);
1746 1756
1747 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name); 1757 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
@@ -1845,7 +1855,10 @@ static int si_init_microcode(struct radeon_device *rdev)
1845 } 1855 }
1846 } 1856 }
1847 1857
1848 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); 1858 if (si58_fw)
1859 snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
1860 else
1861 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
1849 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); 1862 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1850 if (err) { 1863 if (err) {
1851 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); 1864 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
@@ -1876,7 +1889,9 @@ static int si_init_microcode(struct radeon_device *rdev)
1876 } 1889 }
1877 } 1890 }
1878 1891
1879 if (new_smc) 1892 if (banks2_fw)
1893 snprintf(fw_name, sizeof(fw_name), "radeon/banks_k_2_smc.bin");
1894 else if (new_smc)
1880 snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name); 1895 snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
1881 else 1896 else
1882 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name); 1897 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 13ba73fd9b68..2944916f7102 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3008,17 +3008,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
3008 (rdev->pdev->device == 0x6817) || 3008 (rdev->pdev->device == 0x6817) ||
3009 (rdev->pdev->device == 0x6806)) 3009 (rdev->pdev->device == 0x6806))
3010 max_mclk = 120000; 3010 max_mclk = 120000;
3011 } else if (rdev->family == CHIP_OLAND) {
3012 if ((rdev->pdev->revision == 0xC7) ||
3013 (rdev->pdev->revision == 0x80) ||
3014 (rdev->pdev->revision == 0x81) ||
3015 (rdev->pdev->revision == 0x83) ||
3016 (rdev->pdev->revision == 0x87) ||
3017 (rdev->pdev->device == 0x6604) ||
3018 (rdev->pdev->device == 0x6605)) {
3019 max_sclk = 75000;
3020 max_mclk = 80000;
3021 }
3022 } else if (rdev->family == CHIP_HAINAN) { 3011 } else if (rdev->family == CHIP_HAINAN) {
3023 if ((rdev->pdev->revision == 0x81) || 3012 if ((rdev->pdev->revision == 0x81) ||
3024 (rdev->pdev->revision == 0x83) || 3013 (rdev->pdev->revision == 0x83) ||
@@ -3027,7 +3016,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
3027 (rdev->pdev->device == 0x6665) || 3016 (rdev->pdev->device == 0x6665) ||
3028 (rdev->pdev->device == 0x6667)) { 3017 (rdev->pdev->device == 0x6667)) {
3029 max_sclk = 75000; 3018 max_sclk = 75000;
3030 max_mclk = 80000;
3031 } 3019 }
3032 } 3020 }
3033 /* Apply dpm quirks */ 3021 /* Apply dpm quirks */
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index a0fd3e66bc4b..7aadce1f7e7a 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -839,7 +839,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
839 839
840 } 840 }
841 841
842 __drm_atomic_helper_crtc_destroy_state(state); 842 drm_atomic_helper_crtc_destroy_state(crtc, state);
843} 843}
844 844
845static const struct drm_crtc_funcs vc4_crtc_funcs = { 845static const struct drm_crtc_funcs vc4_crtc_funcs = {
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index db920771bfb5..ab3016982466 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -594,12 +594,14 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
594 args->shader_rec_count); 594 args->shader_rec_count);
595 struct vc4_bo *bo; 595 struct vc4_bo *bo;
596 596
597 if (uniforms_offset < shader_rec_offset || 597 if (shader_rec_offset < args->bin_cl_size ||
598 uniforms_offset < shader_rec_offset ||
598 exec_size < uniforms_offset || 599 exec_size < uniforms_offset ||
599 args->shader_rec_count >= (UINT_MAX / 600 args->shader_rec_count >= (UINT_MAX /
600 sizeof(struct vc4_shader_state)) || 601 sizeof(struct vc4_shader_state)) ||
601 temp_size < exec_size) { 602 temp_size < exec_size) {
602 DRM_ERROR("overflow in exec arguments\n"); 603 DRM_ERROR("overflow in exec arguments\n");
604 ret = -EINVAL;
603 goto fail; 605 goto fail;
604 } 606 }
605 607
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 08886a309757..5cdd003605f5 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -461,7 +461,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
461 } 461 }
462 462
463 ret = vc4_full_res_bounds_check(exec, *obj, surf); 463 ret = vc4_full_res_bounds_check(exec, *obj, surf);
464 if (!ret) 464 if (ret)
465 return ret; 465 return ret;
466 466
467 return 0; 467 return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index dd21f950e129..cde9f3758106 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -331,7 +331,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
331 info->fbops = &virtio_gpufb_ops; 331 info->fbops = &virtio_gpufb_ops;
332 info->pixmap.flags = FB_PIXMAP_SYSTEM; 332 info->pixmap.flags = FB_PIXMAP_SYSTEM;
333 333
334 info->screen_base = obj->vmap; 334 info->screen_buffer = obj->vmap;
335 info->screen_size = obj->gem_base.size; 335 info->screen_size = obj->gem_base.size;
336 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 336 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
337 drm_fb_helper_fill_var(info, &vfbdev->helper, 337 drm_fb_helper_fill_var(info, &vfbdev->helper,
diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c
index 717704e9ae07..c0303f61c26a 100644
--- a/drivers/hid/hid-corsair.c
+++ b/drivers/hid/hid-corsair.c
@@ -148,26 +148,36 @@ static enum led_brightness k90_backlight_get(struct led_classdev *led_cdev)
148 struct usb_interface *usbif = to_usb_interface(dev->parent); 148 struct usb_interface *usbif = to_usb_interface(dev->parent);
149 struct usb_device *usbdev = interface_to_usbdev(usbif); 149 struct usb_device *usbdev = interface_to_usbdev(usbif);
150 int brightness; 150 int brightness;
151 char data[8]; 151 char *data;
152
153 data = kmalloc(8, GFP_KERNEL);
154 if (!data)
155 return -ENOMEM;
152 156
153 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 157 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
154 K90_REQUEST_STATUS, 158 K90_REQUEST_STATUS,
155 USB_DIR_IN | USB_TYPE_VENDOR | 159 USB_DIR_IN | USB_TYPE_VENDOR |
156 USB_RECIP_DEVICE, 0, 0, data, 8, 160 USB_RECIP_DEVICE, 0, 0, data, 8,
157 USB_CTRL_SET_TIMEOUT); 161 USB_CTRL_SET_TIMEOUT);
158 if (ret < 0) { 162 if (ret < 5) {
159 dev_warn(dev, "Failed to get K90 initial state (error %d).\n", 163 dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
160 ret); 164 ret);
161 return -EIO; 165 ret = -EIO;
166 goto out;
162 } 167 }
163 brightness = data[4]; 168 brightness = data[4];
164 if (brightness < 0 || brightness > 3) { 169 if (brightness < 0 || brightness > 3) {
165 dev_warn(dev, 170 dev_warn(dev,
166 "Read invalid backlight brightness: %02hhx.\n", 171 "Read invalid backlight brightness: %02hhx.\n",
167 data[4]); 172 data[4]);
168 return -EIO; 173 ret = -EIO;
174 goto out;
169 } 175 }
170 return brightness; 176 ret = brightness;
177out:
178 kfree(data);
179
180 return ret;
171} 181}
172 182
173static enum led_brightness k90_record_led_get(struct led_classdev *led_cdev) 183static enum led_brightness k90_record_led_get(struct led_classdev *led_cdev)
@@ -253,17 +263,22 @@ static ssize_t k90_show_macro_mode(struct device *dev,
253 struct usb_interface *usbif = to_usb_interface(dev->parent); 263 struct usb_interface *usbif = to_usb_interface(dev->parent);
254 struct usb_device *usbdev = interface_to_usbdev(usbif); 264 struct usb_device *usbdev = interface_to_usbdev(usbif);
255 const char *macro_mode; 265 const char *macro_mode;
256 char data[8]; 266 char *data;
267
268 data = kmalloc(2, GFP_KERNEL);
269 if (!data)
270 return -ENOMEM;
257 271
258 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 272 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
259 K90_REQUEST_GET_MODE, 273 K90_REQUEST_GET_MODE,
260 USB_DIR_IN | USB_TYPE_VENDOR | 274 USB_DIR_IN | USB_TYPE_VENDOR |
261 USB_RECIP_DEVICE, 0, 0, data, 2, 275 USB_RECIP_DEVICE, 0, 0, data, 2,
262 USB_CTRL_SET_TIMEOUT); 276 USB_CTRL_SET_TIMEOUT);
263 if (ret < 0) { 277 if (ret < 1) {
264 dev_warn(dev, "Failed to get K90 initial mode (error %d).\n", 278 dev_warn(dev, "Failed to get K90 initial mode (error %d).\n",
265 ret); 279 ret);
266 return -EIO; 280 ret = -EIO;
281 goto out;
267 } 282 }
268 283
269 switch (data[0]) { 284 switch (data[0]) {
@@ -277,10 +292,15 @@ static ssize_t k90_show_macro_mode(struct device *dev,
277 default: 292 default:
278 dev_warn(dev, "K90 in unknown mode: %02hhx.\n", 293 dev_warn(dev, "K90 in unknown mode: %02hhx.\n",
279 data[0]); 294 data[0]);
280 return -EIO; 295 ret = -EIO;
296 goto out;
281 } 297 }
282 298
283 return snprintf(buf, PAGE_SIZE, "%s\n", macro_mode); 299 ret = snprintf(buf, PAGE_SIZE, "%s\n", macro_mode);
300out:
301 kfree(data);
302
303 return ret;
284} 304}
285 305
286static ssize_t k90_store_macro_mode(struct device *dev, 306static ssize_t k90_store_macro_mode(struct device *dev,
@@ -320,26 +340,36 @@ static ssize_t k90_show_current_profile(struct device *dev,
320 struct usb_interface *usbif = to_usb_interface(dev->parent); 340 struct usb_interface *usbif = to_usb_interface(dev->parent);
321 struct usb_device *usbdev = interface_to_usbdev(usbif); 341 struct usb_device *usbdev = interface_to_usbdev(usbif);
322 int current_profile; 342 int current_profile;
323 char data[8]; 343 char *data;
344
345 data = kmalloc(8, GFP_KERNEL);
346 if (!data)
347 return -ENOMEM;
324 348
325 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 349 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
326 K90_REQUEST_STATUS, 350 K90_REQUEST_STATUS,
327 USB_DIR_IN | USB_TYPE_VENDOR | 351 USB_DIR_IN | USB_TYPE_VENDOR |
328 USB_RECIP_DEVICE, 0, 0, data, 8, 352 USB_RECIP_DEVICE, 0, 0, data, 8,
329 USB_CTRL_SET_TIMEOUT); 353 USB_CTRL_SET_TIMEOUT);
330 if (ret < 0) { 354 if (ret < 8) {
331 dev_warn(dev, "Failed to get K90 initial state (error %d).\n", 355 dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
332 ret); 356 ret);
333 return -EIO; 357 ret = -EIO;
358 goto out;
334 } 359 }
335 current_profile = data[7]; 360 current_profile = data[7];
336 if (current_profile < 1 || current_profile > 3) { 361 if (current_profile < 1 || current_profile > 3) {
337 dev_warn(dev, "Read invalid current profile: %02hhx.\n", 362 dev_warn(dev, "Read invalid current profile: %02hhx.\n",
338 data[7]); 363 data[7]);
339 return -EIO; 364 ret = -EIO;
365 goto out;
340 } 366 }
341 367
342 return snprintf(buf, PAGE_SIZE, "%d\n", current_profile); 368 ret = snprintf(buf, PAGE_SIZE, "%d\n", current_profile);
369out:
370 kfree(data);
371
372 return ret;
343} 373}
344 374
345static ssize_t k90_store_current_profile(struct device *dev, 375static ssize_t k90_store_current_profile(struct device *dev,
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index b9779bcbd140..8aeca038cc73 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -740,6 +740,11 @@ static int wacom_add_shared_data(struct hid_device *hdev)
740 return retval; 740 return retval;
741 } 741 }
742 742
743 if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
744 wacom_wac->shared->touch = hdev;
745 else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
746 wacom_wac->shared->pen = hdev;
747
743out: 748out:
744 mutex_unlock(&wacom_udev_list_lock); 749 mutex_unlock(&wacom_udev_list_lock);
745 return retval; 750 return retval;
@@ -2036,10 +2041,6 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
2036 if (error) 2041 if (error)
2037 goto fail; 2042 goto fail;
2038 2043
2039 error = wacom_add_shared_data(hdev);
2040 if (error)
2041 goto fail;
2042
2043 /* 2044 /*
2044 * Bamboo Pad has a generic hid handling for the Pen, and we switch it 2045 * Bamboo Pad has a generic hid handling for the Pen, and we switch it
2045 * into debug mode for the touch part. 2046 * into debug mode for the touch part.
@@ -2080,10 +2081,9 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
2080 2081
2081 wacom_update_name(wacom, wireless ? " (WL)" : ""); 2082 wacom_update_name(wacom, wireless ? " (WL)" : "");
2082 2083
2083 if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH) 2084 error = wacom_add_shared_data(hdev);
2084 wacom_wac->shared->touch = hdev; 2085 if (error)
2085 else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN) 2086 goto fail;
2086 wacom_wac->shared->pen = hdev;
2087 2087
2088 if (!(features->device_type & WACOM_DEVICETYPE_WL_MONITOR) && 2088 if (!(features->device_type & WACOM_DEVICETYPE_WL_MONITOR) &&
2089 (features->quirks & WACOM_QUIRK_BATTERY)) { 2089 (features->quirks & WACOM_QUIRK_BATTERY)) {
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index b1a9a3ca6d56..0884dc9554fd 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2187,6 +2187,16 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
2187 2187
2188 wacom_report_events(hdev, report); 2188 wacom_report_events(hdev, report);
2189 2189
2190 /*
2191 * Non-input reports may be sent prior to the device being
2192 * completely initialized. Since only their events need
2193 * to be processed, exit after 'wacom_report_events' has
2194 * been called to prevent potential crashes in the report-
2195 * processing functions.
2196 */
2197 if (report->type != HID_INPUT_REPORT)
2198 return;
2199
2190 if (WACOM_PAD_FIELD(field)) { 2200 if (WACOM_PAD_FIELD(field)) {
2191 wacom_wac_pad_battery_report(hdev, report); 2201 wacom_wac_pad_battery_report(hdev, report);
2192 if (wacom->wacom_wac.pad_input) 2202 if (wacom->wacom_wac.pad_input)
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index e7dcfac877ca..3e70a9c5d79d 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2811,7 +2811,8 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
2811 if (!src_addr || !src_addr->sa_family) { 2811 if (!src_addr || !src_addr->sa_family) {
2812 src_addr = (struct sockaddr *) &id->route.addr.src_addr; 2812 src_addr = (struct sockaddr *) &id->route.addr.src_addr;
2813 src_addr->sa_family = dst_addr->sa_family; 2813 src_addr->sa_family = dst_addr->sa_family;
2814 if (dst_addr->sa_family == AF_INET6) { 2814 if (IS_ENABLED(CONFIG_IPV6) &&
2815 dst_addr->sa_family == AF_INET6) {
2815 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; 2816 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
2816 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; 2817 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
2817 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; 2818 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 1e62a5f0cb28..4609b921f899 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -134,6 +134,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
134 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); 134 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
135 135
136 if (access & IB_ACCESS_ON_DEMAND) { 136 if (access & IB_ACCESS_ON_DEMAND) {
137 put_pid(umem->pid);
137 ret = ib_umem_odp_get(context, umem); 138 ret = ib_umem_odp_get(context, umem);
138 if (ret) { 139 if (ret) {
139 kfree(umem); 140 kfree(umem);
@@ -149,6 +150,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
149 150
150 page_list = (struct page **) __get_free_page(GFP_KERNEL); 151 page_list = (struct page **) __get_free_page(GFP_KERNEL);
151 if (!page_list) { 152 if (!page_list) {
153 put_pid(umem->pid);
152 kfree(umem); 154 kfree(umem);
153 return ERR_PTR(-ENOMEM); 155 return ERR_PTR(-ENOMEM);
154 } 156 }
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 9d5fe1853da4..6262dc035f3c 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1135,16 +1135,7 @@ static int iwch_query_port(struct ib_device *ibdev,
1135 1135
1136 memset(props, 0, sizeof(struct ib_port_attr)); 1136 memset(props, 0, sizeof(struct ib_port_attr));
1137 props->max_mtu = IB_MTU_4096; 1137 props->max_mtu = IB_MTU_4096;
1138 if (netdev->mtu >= 4096) 1138 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
1139 props->active_mtu = IB_MTU_4096;
1140 else if (netdev->mtu >= 2048)
1141 props->active_mtu = IB_MTU_2048;
1142 else if (netdev->mtu >= 1024)
1143 props->active_mtu = IB_MTU_1024;
1144 else if (netdev->mtu >= 512)
1145 props->active_mtu = IB_MTU_512;
1146 else
1147 props->active_mtu = IB_MTU_256;
1148 1139
1149 if (!netif_carrier_ok(netdev)) 1140 if (!netif_carrier_ok(netdev))
1150 props->state = IB_PORT_DOWN; 1141 props->state = IB_PORT_DOWN;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index f1510cc76d2d..9398143d7c5e 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1804,20 +1804,21 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1804 skb_trim(skb, dlen); 1804 skb_trim(skb, dlen);
1805 mutex_lock(&ep->com.mutex); 1805 mutex_lock(&ep->com.mutex);
1806 1806
1807 /* update RX credits */
1808 update_rx_credits(ep, dlen);
1809
1810 switch (ep->com.state) { 1807 switch (ep->com.state) {
1811 case MPA_REQ_SENT: 1808 case MPA_REQ_SENT:
1809 update_rx_credits(ep, dlen);
1812 ep->rcv_seq += dlen; 1810 ep->rcv_seq += dlen;
1813 disconnect = process_mpa_reply(ep, skb); 1811 disconnect = process_mpa_reply(ep, skb);
1814 break; 1812 break;
1815 case MPA_REQ_WAIT: 1813 case MPA_REQ_WAIT:
1814 update_rx_credits(ep, dlen);
1816 ep->rcv_seq += dlen; 1815 ep->rcv_seq += dlen;
1817 disconnect = process_mpa_request(ep, skb); 1816 disconnect = process_mpa_request(ep, skb);
1818 break; 1817 break;
1819 case FPDU_MODE: { 1818 case FPDU_MODE: {
1820 struct c4iw_qp_attributes attrs; 1819 struct c4iw_qp_attributes attrs;
1820
1821 update_rx_credits(ep, dlen);
1821 BUG_ON(!ep->com.qp); 1822 BUG_ON(!ep->com.qp);
1822 if (status) 1823 if (status)
1823 pr_err("%s Unexpected streaming data." \ 1824 pr_err("%s Unexpected streaming data." \
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 19c6477af19f..bec82a600d77 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -505,6 +505,15 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
505 } 505 }
506 506
507 /* 507 /*
508 * Special cqe for drain WR completions...
509 */
510 if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
511 *cookie = CQE_DRAIN_COOKIE(hw_cqe);
512 *cqe = *hw_cqe;
513 goto skip_cqe;
514 }
515
516 /*
508 * Gotta tweak READ completions: 517 * Gotta tweak READ completions:
509 * 1) the cqe doesn't contain the sq_wptr from the wr. 518 * 1) the cqe doesn't contain the sq_wptr from the wr.
510 * 2) opcode not reflected from the wr. 519 * 2) opcode not reflected from the wr.
@@ -753,6 +762,9 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
753 c4iw_invalidate_mr(qhp->rhp, 762 c4iw_invalidate_mr(qhp->rhp,
754 CQE_WRID_FR_STAG(&cqe)); 763 CQE_WRID_FR_STAG(&cqe));
755 break; 764 break;
765 case C4IW_DRAIN_OPCODE:
766 wc->opcode = IB_WC_SEND;
767 break;
756 default: 768 default:
757 printk(KERN_ERR MOD "Unexpected opcode %d " 769 printk(KERN_ERR MOD "Unexpected opcode %d "
758 "in the CQE received for QPID=0x%0x\n", 770 "in the CQE received for QPID=0x%0x\n",
@@ -817,15 +829,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
817 } 829 }
818 } 830 }
819out: 831out:
820 if (wq) { 832 if (wq)
821 if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) {
822 if (t4_sq_empty(wq))
823 complete(&qhp->sq_drained);
824 if (t4_rq_empty(wq))
825 complete(&qhp->rq_drained);
826 }
827 spin_unlock(&qhp->lock); 833 spin_unlock(&qhp->lock);
828 }
829 return ret; 834 return ret;
830} 835}
831 836
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 516b0ae6dc3f..40c0e7b9fc6e 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -846,9 +846,17 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
846 } 846 }
847 } 847 }
848 848
849 rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
850 if (!rdev->free_workq) {
851 err = -ENOMEM;
852 goto err_free_status_page;
853 }
854
849 rdev->status_page->db_off = 0; 855 rdev->status_page->db_off = 0;
850 856
851 return 0; 857 return 0;
858err_free_status_page:
859 free_page((unsigned long)rdev->status_page);
852destroy_ocqp_pool: 860destroy_ocqp_pool:
853 c4iw_ocqp_pool_destroy(rdev); 861 c4iw_ocqp_pool_destroy(rdev);
854destroy_rqtpool: 862destroy_rqtpool:
@@ -862,6 +870,7 @@ destroy_resource:
862 870
863static void c4iw_rdev_close(struct c4iw_rdev *rdev) 871static void c4iw_rdev_close(struct c4iw_rdev *rdev)
864{ 872{
873 destroy_workqueue(rdev->free_workq);
865 kfree(rdev->wr_log); 874 kfree(rdev->wr_log);
866 free_page((unsigned long)rdev->status_page); 875 free_page((unsigned long)rdev->status_page);
867 c4iw_pblpool_destroy(rdev); 876 c4iw_pblpool_destroy(rdev);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 4788e1a46fde..8cd4d054a87e 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -45,6 +45,7 @@
45#include <linux/kref.h> 45#include <linux/kref.h>
46#include <linux/timer.h> 46#include <linux/timer.h>
47#include <linux/io.h> 47#include <linux/io.h>
48#include <linux/workqueue.h>
48 49
49#include <asm/byteorder.h> 50#include <asm/byteorder.h>
50 51
@@ -107,6 +108,7 @@ struct c4iw_dev_ucontext {
107 struct list_head qpids; 108 struct list_head qpids;
108 struct list_head cqids; 109 struct list_head cqids;
109 struct mutex lock; 110 struct mutex lock;
111 struct kref kref;
110}; 112};
111 113
112enum c4iw_rdev_flags { 114enum c4iw_rdev_flags {
@@ -183,6 +185,7 @@ struct c4iw_rdev {
183 atomic_t wr_log_idx; 185 atomic_t wr_log_idx;
184 struct wr_log_entry *wr_log; 186 struct wr_log_entry *wr_log;
185 int wr_log_size; 187 int wr_log_size;
188 struct workqueue_struct *free_workq;
186}; 189};
187 190
188static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) 191static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -480,8 +483,8 @@ struct c4iw_qp {
480 wait_queue_head_t wait; 483 wait_queue_head_t wait;
481 struct timer_list timer; 484 struct timer_list timer;
482 int sq_sig_all; 485 int sq_sig_all;
483 struct completion rq_drained; 486 struct work_struct free_work;
484 struct completion sq_drained; 487 struct c4iw_ucontext *ucontext;
485}; 488};
486 489
487static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp) 490static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
@@ -495,6 +498,7 @@ struct c4iw_ucontext {
495 u32 key; 498 u32 key;
496 spinlock_t mmap_lock; 499 spinlock_t mmap_lock;
497 struct list_head mmaps; 500 struct list_head mmaps;
501 struct kref kref;
498}; 502};
499 503
500static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) 504static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
@@ -502,6 +506,18 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
502 return container_of(c, struct c4iw_ucontext, ibucontext); 506 return container_of(c, struct c4iw_ucontext, ibucontext);
503} 507}
504 508
509void _c4iw_free_ucontext(struct kref *kref);
510
511static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext)
512{
513 kref_put(&ucontext->kref, _c4iw_free_ucontext);
514}
515
516static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext)
517{
518 kref_get(&ucontext->kref);
519}
520
505struct c4iw_mm_entry { 521struct c4iw_mm_entry {
506 struct list_head entry; 522 struct list_head entry;
507 u64 addr; 523 u64 addr;
@@ -615,6 +631,8 @@ static inline int to_ib_qp_state(int c4iw_qp_state)
615 return IB_QPS_ERR; 631 return IB_QPS_ERR;
616} 632}
617 633
634#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
635
618static inline u32 c4iw_ib_to_tpt_access(int a) 636static inline u32 c4iw_ib_to_tpt_access(int a)
619{ 637{
620 return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | 638 return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
@@ -997,8 +1015,6 @@ extern int c4iw_wr_log;
997extern int db_fc_threshold; 1015extern int db_fc_threshold;
998extern int db_coalescing_threshold; 1016extern int db_coalescing_threshold;
999extern int use_dsgl; 1017extern int use_dsgl;
1000void c4iw_drain_rq(struct ib_qp *qp);
1001void c4iw_drain_sq(struct ib_qp *qp);
1002void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey); 1018void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
1003 1019
1004#endif 1020#endif
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 49b51b7e0fd7..3345e1c312f7 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -93,17 +93,28 @@ static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
93 return -ENOSYS; 93 return -ENOSYS;
94} 94}
95 95
96static int c4iw_dealloc_ucontext(struct ib_ucontext *context) 96void _c4iw_free_ucontext(struct kref *kref)
97{ 97{
98 struct c4iw_dev *rhp = to_c4iw_dev(context->device); 98 struct c4iw_ucontext *ucontext;
99 struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context); 99 struct c4iw_dev *rhp;
100 struct c4iw_mm_entry *mm, *tmp; 100 struct c4iw_mm_entry *mm, *tmp;
101 101
102 PDBG("%s context %p\n", __func__, context); 102 ucontext = container_of(kref, struct c4iw_ucontext, kref);
103 rhp = to_c4iw_dev(ucontext->ibucontext.device);
104
105 PDBG("%s ucontext %p\n", __func__, ucontext);
103 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) 106 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
104 kfree(mm); 107 kfree(mm);
105 c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx); 108 c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
106 kfree(ucontext); 109 kfree(ucontext);
110}
111
112static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
113{
114 struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
115
116 PDBG("%s context %p\n", __func__, context);
117 c4iw_put_ucontext(ucontext);
107 return 0; 118 return 0;
108} 119}
109 120
@@ -127,6 +138,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
127 c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); 138 c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
128 INIT_LIST_HEAD(&context->mmaps); 139 INIT_LIST_HEAD(&context->mmaps);
129 spin_lock_init(&context->mmap_lock); 140 spin_lock_init(&context->mmap_lock);
141 kref_init(&context->kref);
130 142
131 if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) { 143 if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
132 if (!warned++) 144 if (!warned++)
@@ -361,16 +373,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
361 373
362 memset(props, 0, sizeof(struct ib_port_attr)); 374 memset(props, 0, sizeof(struct ib_port_attr));
363 props->max_mtu = IB_MTU_4096; 375 props->max_mtu = IB_MTU_4096;
364 if (netdev->mtu >= 4096) 376 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
365 props->active_mtu = IB_MTU_4096;
366 else if (netdev->mtu >= 2048)
367 props->active_mtu = IB_MTU_2048;
368 else if (netdev->mtu >= 1024)
369 props->active_mtu = IB_MTU_1024;
370 else if (netdev->mtu >= 512)
371 props->active_mtu = IB_MTU_512;
372 else
373 props->active_mtu = IB_MTU_256;
374 377
375 if (!netif_carrier_ok(netdev)) 378 if (!netif_carrier_ok(netdev))
376 props->state = IB_PORT_DOWN; 379 props->state = IB_PORT_DOWN;
@@ -607,8 +610,6 @@ int c4iw_register_device(struct c4iw_dev *dev)
607 dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION; 610 dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
608 dev->ibdev.get_port_immutable = c4iw_port_immutable; 611 dev->ibdev.get_port_immutable = c4iw_port_immutable;
609 dev->ibdev.get_dev_fw_str = get_dev_fw_str; 612 dev->ibdev.get_dev_fw_str = get_dev_fw_str;
610 dev->ibdev.drain_sq = c4iw_drain_sq;
611 dev->ibdev.drain_rq = c4iw_drain_rq;
612 613
613 dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); 614 dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
614 if (!dev->ibdev.iwcm) 615 if (!dev->ibdev.iwcm)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index cda5542e13a2..04c1c382dedb 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -715,13 +715,32 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
715 return 0; 715 return 0;
716} 716}
717 717
718static void _free_qp(struct kref *kref) 718static void free_qp_work(struct work_struct *work)
719{
720 struct c4iw_ucontext *ucontext;
721 struct c4iw_qp *qhp;
722 struct c4iw_dev *rhp;
723
724 qhp = container_of(work, struct c4iw_qp, free_work);
725 ucontext = qhp->ucontext;
726 rhp = qhp->rhp;
727
728 PDBG("%s qhp %p ucontext %p\n", __func__, qhp, ucontext);
729 destroy_qp(&rhp->rdev, &qhp->wq,
730 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
731
732 if (ucontext)
733 c4iw_put_ucontext(ucontext);
734 kfree(qhp);
735}
736
737static void queue_qp_free(struct kref *kref)
719{ 738{
720 struct c4iw_qp *qhp; 739 struct c4iw_qp *qhp;
721 740
722 qhp = container_of(kref, struct c4iw_qp, kref); 741 qhp = container_of(kref, struct c4iw_qp, kref);
723 PDBG("%s qhp %p\n", __func__, qhp); 742 PDBG("%s qhp %p\n", __func__, qhp);
724 kfree(qhp); 743 queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
725} 744}
726 745
727void c4iw_qp_add_ref(struct ib_qp *qp) 746void c4iw_qp_add_ref(struct ib_qp *qp)
@@ -733,7 +752,7 @@ void c4iw_qp_add_ref(struct ib_qp *qp)
733void c4iw_qp_rem_ref(struct ib_qp *qp) 752void c4iw_qp_rem_ref(struct ib_qp *qp)
734{ 753{
735 PDBG("%s ib_qp %p\n", __func__, qp); 754 PDBG("%s ib_qp %p\n", __func__, qp);
736 kref_put(&to_c4iw_qp(qp)->kref, _free_qp); 755 kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
737} 756}
738 757
739static void add_to_fc_list(struct list_head *head, struct list_head *entry) 758static void add_to_fc_list(struct list_head *head, struct list_head *entry)
@@ -776,6 +795,64 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
776 return 0; 795 return 0;
777} 796}
778 797
798static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
799{
800 struct t4_cqe cqe = {};
801 struct c4iw_cq *schp;
802 unsigned long flag;
803 struct t4_cq *cq;
804
805 schp = to_c4iw_cq(qhp->ibqp.send_cq);
806 cq = &schp->cq;
807
808 cqe.u.drain_cookie = wr->wr_id;
809 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
810 CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
811 CQE_TYPE_V(1) |
812 CQE_SWCQE_V(1) |
813 CQE_QPID_V(qhp->wq.sq.qid));
814
815 spin_lock_irqsave(&schp->lock, flag);
816 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
817 cq->sw_queue[cq->sw_pidx] = cqe;
818 t4_swcq_produce(cq);
819 spin_unlock_irqrestore(&schp->lock, flag);
820
821 spin_lock_irqsave(&schp->comp_handler_lock, flag);
822 (*schp->ibcq.comp_handler)(&schp->ibcq,
823 schp->ibcq.cq_context);
824 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
825}
826
827static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
828{
829 struct t4_cqe cqe = {};
830 struct c4iw_cq *rchp;
831 unsigned long flag;
832 struct t4_cq *cq;
833
834 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
835 cq = &rchp->cq;
836
837 cqe.u.drain_cookie = wr->wr_id;
838 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
839 CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
840 CQE_TYPE_V(0) |
841 CQE_SWCQE_V(1) |
842 CQE_QPID_V(qhp->wq.sq.qid));
843
844 spin_lock_irqsave(&rchp->lock, flag);
845 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
846 cq->sw_queue[cq->sw_pidx] = cqe;
847 t4_swcq_produce(cq);
848 spin_unlock_irqrestore(&rchp->lock, flag);
849
850 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
851 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
852 rchp->ibcq.cq_context);
853 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
854}
855
779int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 856int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
780 struct ib_send_wr **bad_wr) 857 struct ib_send_wr **bad_wr)
781{ 858{
@@ -794,8 +871,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
794 spin_lock_irqsave(&qhp->lock, flag); 871 spin_lock_irqsave(&qhp->lock, flag);
795 if (t4_wq_in_error(&qhp->wq)) { 872 if (t4_wq_in_error(&qhp->wq)) {
796 spin_unlock_irqrestore(&qhp->lock, flag); 873 spin_unlock_irqrestore(&qhp->lock, flag);
797 *bad_wr = wr; 874 complete_sq_drain_wr(qhp, wr);
798 return -EINVAL; 875 return err;
799 } 876 }
800 num_wrs = t4_sq_avail(&qhp->wq); 877 num_wrs = t4_sq_avail(&qhp->wq);
801 if (num_wrs == 0) { 878 if (num_wrs == 0) {
@@ -937,8 +1014,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
937 spin_lock_irqsave(&qhp->lock, flag); 1014 spin_lock_irqsave(&qhp->lock, flag);
938 if (t4_wq_in_error(&qhp->wq)) { 1015 if (t4_wq_in_error(&qhp->wq)) {
939 spin_unlock_irqrestore(&qhp->lock, flag); 1016 spin_unlock_irqrestore(&qhp->lock, flag);
940 *bad_wr = wr; 1017 complete_rq_drain_wr(qhp, wr);
941 return -EINVAL; 1018 return err;
942 } 1019 }
943 num_wrs = t4_rq_avail(&qhp->wq); 1020 num_wrs = t4_rq_avail(&qhp->wq);
944 if (num_wrs == 0) { 1021 if (num_wrs == 0) {
@@ -1550,7 +1627,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1550 } 1627 }
1551 break; 1628 break;
1552 case C4IW_QP_STATE_CLOSING: 1629 case C4IW_QP_STATE_CLOSING:
1553 if (!internal) { 1630
1631 /*
1632 * Allow kernel users to move to ERROR for qp draining.
1633 */
1634 if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
1635 C4IW_QP_STATE_ERROR)) {
1554 ret = -EINVAL; 1636 ret = -EINVAL;
1555 goto out; 1637 goto out;
1556 } 1638 }
@@ -1643,7 +1725,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
1643 struct c4iw_dev *rhp; 1725 struct c4iw_dev *rhp;
1644 struct c4iw_qp *qhp; 1726 struct c4iw_qp *qhp;
1645 struct c4iw_qp_attributes attrs; 1727 struct c4iw_qp_attributes attrs;
1646 struct c4iw_ucontext *ucontext;
1647 1728
1648 qhp = to_c4iw_qp(ib_qp); 1729 qhp = to_c4iw_qp(ib_qp);
1649 rhp = qhp->rhp; 1730 rhp = qhp->rhp;
@@ -1663,11 +1744,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
1663 spin_unlock_irq(&rhp->lock); 1744 spin_unlock_irq(&rhp->lock);
1664 free_ird(rhp, qhp->attr.max_ird); 1745 free_ird(rhp, qhp->attr.max_ird);
1665 1746
1666 ucontext = ib_qp->uobject ?
1667 to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
1668 destroy_qp(&rhp->rdev, &qhp->wq,
1669 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1670
1671 c4iw_qp_rem_ref(ib_qp); 1747 c4iw_qp_rem_ref(ib_qp);
1672 1748
1673 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid); 1749 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
@@ -1763,11 +1839,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1763 qhp->attr.max_ird = 0; 1839 qhp->attr.max_ird = 0;
1764 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; 1840 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
1765 spin_lock_init(&qhp->lock); 1841 spin_lock_init(&qhp->lock);
1766 init_completion(&qhp->sq_drained);
1767 init_completion(&qhp->rq_drained);
1768 mutex_init(&qhp->mutex); 1842 mutex_init(&qhp->mutex);
1769 init_waitqueue_head(&qhp->wait); 1843 init_waitqueue_head(&qhp->wait);
1770 kref_init(&qhp->kref); 1844 kref_init(&qhp->kref);
1845 INIT_WORK(&qhp->free_work, free_qp_work);
1771 1846
1772 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); 1847 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1773 if (ret) 1848 if (ret)
@@ -1854,6 +1929,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1854 ma_sync_key_mm->len = PAGE_SIZE; 1929 ma_sync_key_mm->len = PAGE_SIZE;
1855 insert_mmap(ucontext, ma_sync_key_mm); 1930 insert_mmap(ucontext, ma_sync_key_mm);
1856 } 1931 }
1932
1933 c4iw_get_ucontext(ucontext);
1934 qhp->ucontext = ucontext;
1857 } 1935 }
1858 qhp->ibqp.qp_num = qhp->wq.sq.qid; 1936 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1859 init_timer(&(qhp->timer)); 1937 init_timer(&(qhp->timer));
@@ -1958,40 +2036,3 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1958 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; 2036 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
1959 return 0; 2037 return 0;
1960} 2038}
1961
1962static void move_qp_to_err(struct c4iw_qp *qp)
1963{
1964 struct c4iw_qp_attributes attrs = { .next_state = C4IW_QP_STATE_ERROR };
1965
1966 (void)c4iw_modify_qp(qp->rhp, qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1967}
1968
1969void c4iw_drain_sq(struct ib_qp *ibqp)
1970{
1971 struct c4iw_qp *qp = to_c4iw_qp(ibqp);
1972 unsigned long flag;
1973 bool need_to_wait;
1974
1975 move_qp_to_err(qp);
1976 spin_lock_irqsave(&qp->lock, flag);
1977 need_to_wait = !t4_sq_empty(&qp->wq);
1978 spin_unlock_irqrestore(&qp->lock, flag);
1979
1980 if (need_to_wait)
1981 wait_for_completion(&qp->sq_drained);
1982}
1983
1984void c4iw_drain_rq(struct ib_qp *ibqp)
1985{
1986 struct c4iw_qp *qp = to_c4iw_qp(ibqp);
1987 unsigned long flag;
1988 bool need_to_wait;
1989
1990 move_qp_to_err(qp);
1991 spin_lock_irqsave(&qp->lock, flag);
1992 need_to_wait = !t4_rq_empty(&qp->wq);
1993 spin_unlock_irqrestore(&qp->lock, flag);
1994
1995 if (need_to_wait)
1996 wait_for_completion(&qp->rq_drained);
1997}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 862381aa83c8..640d22148a3e 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -179,6 +179,7 @@ struct t4_cqe {
179 __be32 wrid_hi; 179 __be32 wrid_hi;
180 __be32 wrid_low; 180 __be32 wrid_low;
181 } gen; 181 } gen;
182 u64 drain_cookie;
182 } u; 183 } u;
183 __be64 reserved; 184 __be64 reserved;
184 __be64 bits_type_ts; 185 __be64 bits_type_ts;
@@ -238,6 +239,7 @@ struct t4_cqe {
238/* generic accessor macros */ 239/* generic accessor macros */
239#define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi)) 240#define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi))
240#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low)) 241#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low))
242#define CQE_DRAIN_COOKIE(x) ((x)->u.drain_cookie)
241 243
242/* macros for flit 3 of the cqe */ 244/* macros for flit 3 of the cqe */
243#define CQE_GENBIT_S 63 245#define CQE_GENBIT_S 63
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 29e97df9e1a7..4c000d60d5c6 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -100,16 +100,7 @@ static int i40iw_query_port(struct ib_device *ibdev,
100 memset(props, 0, sizeof(*props)); 100 memset(props, 0, sizeof(*props));
101 101
102 props->max_mtu = IB_MTU_4096; 102 props->max_mtu = IB_MTU_4096;
103 if (netdev->mtu >= 4096) 103 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
104 props->active_mtu = IB_MTU_4096;
105 else if (netdev->mtu >= 2048)
106 props->active_mtu = IB_MTU_2048;
107 else if (netdev->mtu >= 1024)
108 props->active_mtu = IB_MTU_1024;
109 else if (netdev->mtu >= 512)
110 props->active_mtu = IB_MTU_512;
111 else
112 props->active_mtu = IB_MTU_256;
113 104
114 props->lid = 1; 105 props->lid = 1;
115 if (netif_carrier_ok(iwdev->netdev)) 106 if (netif_carrier_ok(iwdev->netdev))
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index aff9fb14768b..5a31f3c6a421 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -478,17 +478,7 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
478 memset(props, 0, sizeof(*props)); 478 memset(props, 0, sizeof(*props));
479 479
480 props->max_mtu = IB_MTU_4096; 480 props->max_mtu = IB_MTU_4096;
481 481 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
482 if (netdev->mtu >= 4096)
483 props->active_mtu = IB_MTU_4096;
484 else if (netdev->mtu >= 2048)
485 props->active_mtu = IB_MTU_2048;
486 else if (netdev->mtu >= 1024)
487 props->active_mtu = IB_MTU_1024;
488 else if (netdev->mtu >= 512)
489 props->active_mtu = IB_MTU_512;
490 else
491 props->active_mtu = IB_MTU_256;
492 482
493 props->lid = 1; 483 props->lid = 1;
494 props->lmc = 0; 484 props->lmc = 0;
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 7b74d09a8217..3ac8aa5ef37d 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -576,8 +576,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
576 return 0; 576 return 0;
577} 577}
578 578
579void qedr_unaffiliated_event(void *context, 579void qedr_unaffiliated_event(void *context, u8 event_code)
580 u8 event_code)
581{ 580{
582 pr_err("unaffiliated event not implemented yet\n"); 581 pr_err("unaffiliated event not implemented yet\n");
583} 582}
@@ -792,6 +791,9 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
792 if (device_create_file(&dev->ibdev.dev, qedr_attributes[i])) 791 if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
793 goto sysfs_err; 792 goto sysfs_err;
794 793
794 if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
795 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
796
795 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n"); 797 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
796 return dev; 798 return dev;
797 799
@@ -824,11 +826,10 @@ static void qedr_remove(struct qedr_dev *dev)
824 ib_dealloc_device(&dev->ibdev); 826 ib_dealloc_device(&dev->ibdev);
825} 827}
826 828
827static int qedr_close(struct qedr_dev *dev) 829static void qedr_close(struct qedr_dev *dev)
828{ 830{
829 qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); 831 if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
830 832 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
831 return 0;
832} 833}
833 834
834static void qedr_shutdown(struct qedr_dev *dev) 835static void qedr_shutdown(struct qedr_dev *dev)
@@ -837,6 +838,12 @@ static void qedr_shutdown(struct qedr_dev *dev)
837 qedr_remove(dev); 838 qedr_remove(dev);
838} 839}
839 840
841static void qedr_open(struct qedr_dev *dev)
842{
843 if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
844 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
845}
846
840static void qedr_mac_address_change(struct qedr_dev *dev) 847static void qedr_mac_address_change(struct qedr_dev *dev)
841{ 848{
842 union ib_gid *sgid = &dev->sgid_tbl[0]; 849 union ib_gid *sgid = &dev->sgid_tbl[0];
@@ -863,7 +870,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev)
863 870
864 ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); 871 ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
865 872
866 qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE); 873 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
867 874
868 if (rc) 875 if (rc)
869 DP_ERR(dev, "Error updating mac filter\n"); 876 DP_ERR(dev, "Error updating mac filter\n");
@@ -877,7 +884,7 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
877{ 884{
878 switch (event) { 885 switch (event) {
879 case QEDE_UP: 886 case QEDE_UP:
880 qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); 887 qedr_open(dev);
881 break; 888 break;
882 case QEDE_DOWN: 889 case QEDE_DOWN:
883 qedr_close(dev); 890 qedr_close(dev);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 620badd7d4fb..bb32e4792ec9 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -113,6 +113,8 @@ struct qedr_device_attr {
113 struct qed_rdma_events events; 113 struct qed_rdma_events events;
114}; 114};
115 115
116#define QEDR_ENET_STATE_BIT (0)
117
116struct qedr_dev { 118struct qedr_dev {
117 struct ib_device ibdev; 119 struct ib_device ibdev;
118 struct qed_dev *cdev; 120 struct qed_dev *cdev;
@@ -153,6 +155,8 @@ struct qedr_dev {
153 struct qedr_cq *gsi_sqcq; 155 struct qedr_cq *gsi_sqcq;
154 struct qedr_cq *gsi_rqcq; 156 struct qedr_cq *gsi_rqcq;
155 struct qedr_qp *gsi_qp; 157 struct qedr_qp *gsi_qp;
158
159 unsigned long enet_state;
156}; 160};
157 161
158#define QEDR_MAX_SQ_PBL (0x8000) 162#define QEDR_MAX_SQ_PBL (0x8000)
@@ -188,6 +192,7 @@ struct qedr_dev {
188#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000) 192#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000)
189 193
190#define QEDR_MAX_PORT (1) 194#define QEDR_MAX_PORT (1)
195#define QEDR_PORT (1)
191 196
192#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) 197#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
193 198
@@ -251,9 +256,6 @@ struct qedr_cq {
251 256
252 u16 icid; 257 u16 icid;
253 258
254 /* Lock to protect completion handler */
255 spinlock_t comp_handler_lock;
256
257 /* Lock to protect multiplem CQ's */ 259 /* Lock to protect multiplem CQ's */
258 spinlock_t cq_lock; 260 spinlock_t cq_lock;
259 u8 arm_flags; 261 u8 arm_flags;
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index 63890ebb72bd..a9a8d8745d2e 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -87,11 +87,8 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
87 qedr_inc_sw_gsi_cons(&qp->sq); 87 qedr_inc_sw_gsi_cons(&qp->sq);
88 spin_unlock_irqrestore(&qp->q_lock, flags); 88 spin_unlock_irqrestore(&qp->q_lock, flags);
89 89
90 if (cq->ibcq.comp_handler) { 90 if (cq->ibcq.comp_handler)
91 spin_lock_irqsave(&cq->comp_handler_lock, flags);
92 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); 91 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
93 spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
94 }
95} 92}
96 93
97void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, 94void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
@@ -113,11 +110,8 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
113 110
114 spin_unlock_irqrestore(&qp->q_lock, flags); 111 spin_unlock_irqrestore(&qp->q_lock, flags);
115 112
116 if (cq->ibcq.comp_handler) { 113 if (cq->ibcq.comp_handler)
117 spin_lock_irqsave(&cq->comp_handler_lock, flags);
118 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); 114 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
119 spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
120 }
121} 115}
122 116
123static void qedr_destroy_gsi_cq(struct qedr_dev *dev, 117static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
@@ -404,9 +398,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
404 } 398 }
405 399
406 if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h)) 400 if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
407 packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
408 else
409 packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB; 401 packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
402 else
403 packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
410 404
411 packet->roce_mode = roce_mode; 405 packet->roce_mode = roce_mode;
412 memcpy(packet->header.vaddr, ud_header_buffer, header_size); 406 memcpy(packet->header.vaddr, ud_header_buffer, header_size);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 57c8de208077..c7d6c9a783bd 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -471,8 +471,6 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
471 struct ib_ucontext *context, struct ib_udata *udata) 471 struct ib_ucontext *context, struct ib_udata *udata)
472{ 472{
473 struct qedr_dev *dev = get_qedr_dev(ibdev); 473 struct qedr_dev *dev = get_qedr_dev(ibdev);
474 struct qedr_ucontext *uctx = NULL;
475 struct qedr_alloc_pd_uresp uresp;
476 struct qedr_pd *pd; 474 struct qedr_pd *pd;
477 u16 pd_id; 475 u16 pd_id;
478 int rc; 476 int rc;
@@ -489,21 +487,33 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
489 if (!pd) 487 if (!pd)
490 return ERR_PTR(-ENOMEM); 488 return ERR_PTR(-ENOMEM);
491 489
492 dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id); 490 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
491 if (rc)
492 goto err;
493 493
494 uresp.pd_id = pd_id;
495 pd->pd_id = pd_id; 494 pd->pd_id = pd_id;
496 495
497 if (udata && context) { 496 if (udata && context) {
497 struct qedr_alloc_pd_uresp uresp;
498
499 uresp.pd_id = pd_id;
500
498 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 501 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
499 if (rc) 502 if (rc) {
500 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id); 503 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
501 uctx = get_qedr_ucontext(context); 504 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
502 uctx->pd = pd; 505 goto err;
503 pd->uctx = uctx; 506 }
507
508 pd->uctx = get_qedr_ucontext(context);
509 pd->uctx->pd = pd;
504 } 510 }
505 511
506 return &pd->ibpd; 512 return &pd->ibpd;
513
514err:
515 kfree(pd);
516 return ERR_PTR(rc);
507} 517}
508 518
509int qedr_dealloc_pd(struct ib_pd *ibpd) 519int qedr_dealloc_pd(struct ib_pd *ibpd)
@@ -1600,7 +1610,7 @@ err0:
1600 return ERR_PTR(-EFAULT); 1610 return ERR_PTR(-EFAULT);
1601} 1611}
1602 1612
1603enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state) 1613static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
1604{ 1614{
1605 switch (qp_state) { 1615 switch (qp_state) {
1606 case QED_ROCE_QP_STATE_RESET: 1616 case QED_ROCE_QP_STATE_RESET:
@@ -1621,7 +1631,8 @@ enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
1621 return IB_QPS_ERR; 1631 return IB_QPS_ERR;
1622} 1632}
1623 1633
1624enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state) 1634static enum qed_roce_qp_state qedr_get_state_from_ibqp(
1635 enum ib_qp_state qp_state)
1625{ 1636{
1626 switch (qp_state) { 1637 switch (qp_state) {
1627 case IB_QPS_RESET: 1638 case IB_QPS_RESET:
@@ -1657,7 +1668,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
1657 int status = 0; 1668 int status = 0;
1658 1669
1659 if (new_state == qp->state) 1670 if (new_state == qp->state)
1660 return 1; 1671 return 0;
1661 1672
1662 switch (qp->state) { 1673 switch (qp->state) {
1663 case QED_ROCE_QP_STATE_RESET: 1674 case QED_ROCE_QP_STATE_RESET:
@@ -1733,6 +1744,14 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
1733 /* ERR->XXX */ 1744 /* ERR->XXX */
1734 switch (new_state) { 1745 switch (new_state) {
1735 case QED_ROCE_QP_STATE_RESET: 1746 case QED_ROCE_QP_STATE_RESET:
1747 if ((qp->rq.prod != qp->rq.cons) ||
1748 (qp->sq.prod != qp->sq.cons)) {
1749 DP_NOTICE(dev,
1750 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
1751 qp->rq.prod, qp->rq.cons, qp->sq.prod,
1752 qp->sq.cons);
1753 status = -EINVAL;
1754 }
1736 break; 1755 break;
1737 default: 1756 default:
1738 status = -EINVAL; 1757 status = -EINVAL;
@@ -1865,7 +1884,6 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1865 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]); 1884 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
1866 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n", 1885 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
1867 qp_params.remote_mac_addr); 1886 qp_params.remote_mac_addr);
1868;
1869 1887
1870 qp_params.mtu = qp->mtu; 1888 qp_params.mtu = qp->mtu;
1871 qp_params.lb_indication = false; 1889 qp_params.lb_indication = false;
@@ -2016,7 +2034,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
2016 2034
2017 qp_attr->qp_state = qedr_get_ibqp_state(params.state); 2035 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2018 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state); 2036 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2019 qp_attr->path_mtu = iboe_get_mtu(params.mtu); 2037 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
2020 qp_attr->path_mig_state = IB_MIG_MIGRATED; 2038 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2021 qp_attr->rq_psn = params.rq_psn; 2039 qp_attr->rq_psn = params.rq_psn;
2022 qp_attr->sq_psn = params.sq_psn; 2040 qp_attr->sq_psn = params.sq_psn;
@@ -2028,7 +2046,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
2028 qp_attr->cap.max_recv_wr = qp->rq.max_wr; 2046 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2029 qp_attr->cap.max_send_sge = qp->sq.max_sges; 2047 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2030 qp_attr->cap.max_recv_sge = qp->rq.max_sges; 2048 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2031 qp_attr->cap.max_inline_data = qp->max_inline_data; 2049 qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
2032 qp_init_attr->cap = qp_attr->cap; 2050 qp_init_attr->cap = qp_attr->cap;
2033 2051
2034 memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0], 2052 memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
@@ -2302,7 +2320,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr)
2302 return rc; 2320 return rc;
2303} 2321}
2304 2322
2305struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len) 2323static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
2324 int max_page_list_len)
2306{ 2325{
2307 struct qedr_pd *pd = get_qedr_pd(ibpd); 2326 struct qedr_pd *pd = get_qedr_pd(ibpd);
2308 struct qedr_dev *dev = get_qedr_dev(ibpd->device); 2327 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
@@ -2704,7 +2723,7 @@ static int qedr_prepare_reg(struct qedr_qp *qp,
2704 return 0; 2723 return 0;
2705} 2724}
2706 2725
2707enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode) 2726static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
2708{ 2727{
2709 switch (opcode) { 2728 switch (opcode) {
2710 case IB_WR_RDMA_WRITE: 2729 case IB_WR_RDMA_WRITE:
@@ -2729,7 +2748,7 @@ enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
2729 } 2748 }
2730} 2749}
2731 2750
2732inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr) 2751static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
2733{ 2752{
2734 int wq_is_full, err_wr, pbl_is_full; 2753 int wq_is_full, err_wr, pbl_is_full;
2735 struct qedr_dev *dev = qp->dev; 2754 struct qedr_dev *dev = qp->dev;
@@ -2766,7 +2785,7 @@ inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
2766 return true; 2785 return true;
2767} 2786}
2768 2787
2769int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 2788static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2770 struct ib_send_wr **bad_wr) 2789 struct ib_send_wr **bad_wr)
2771{ 2790{
2772 struct qedr_dev *dev = get_qedr_dev(ibqp->device); 2791 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
@@ -3234,9 +3253,10 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
3234 IB_WC_SUCCESS, 0); 3253 IB_WC_SUCCESS, 0);
3235 break; 3254 break;
3236 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR: 3255 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
3237 DP_ERR(dev, 3256 if (qp->state != QED_ROCE_QP_STATE_ERR)
3238 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n", 3257 DP_ERR(dev,
3239 cq->icid, qp->icid); 3258 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3259 cq->icid, qp->icid);
3240 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons, 3260 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3241 IB_WC_WR_FLUSH_ERR, 1); 3261 IB_WC_WR_FLUSH_ERR, 1);
3242 break; 3262 break;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 231a1ce1f4be..bd8fbd3d2032 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -1029,7 +1029,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
1029 if (ret) { 1029 if (ret) {
1030 dev_err(&pdev->dev, "failed to allocate interrupts\n"); 1030 dev_err(&pdev->dev, "failed to allocate interrupts\n");
1031 ret = -ENOMEM; 1031 ret = -ENOMEM;
1032 goto err_netdevice; 1032 goto err_free_cq_ring;
1033 } 1033 }
1034 1034
1035 /* Allocate UAR table. */ 1035 /* Allocate UAR table. */
@@ -1092,8 +1092,6 @@ err_free_uar_table:
1092err_free_intrs: 1092err_free_intrs:
1093 pvrdma_free_irq(dev); 1093 pvrdma_free_irq(dev);
1094 pvrdma_disable_msi_all(dev); 1094 pvrdma_disable_msi_all(dev);
1095err_netdevice:
1096 unregister_netdevice_notifier(&dev->nb_netdev);
1097err_free_cq_ring: 1095err_free_cq_ring:
1098 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); 1096 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
1099err_free_async_ring: 1097err_free_async_ring:
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index 54891370d18a..c2aa52638dcb 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -306,7 +306,7 @@ struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
306 union pvrdma_cmd_resp rsp; 306 union pvrdma_cmd_resp rsp;
307 struct pvrdma_cmd_create_uc *cmd = &req.create_uc; 307 struct pvrdma_cmd_create_uc *cmd = &req.create_uc;
308 struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp; 308 struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp;
309 struct pvrdma_alloc_ucontext_resp uresp; 309 struct pvrdma_alloc_ucontext_resp uresp = {0};
310 int ret; 310 int ret;
311 void *ptr; 311 void *ptr;
312 312
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 342e78163613..4abdeb359fb4 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -555,7 +555,7 @@ struct rxe_dev *rxe_net_add(struct net_device *ndev)
555 } 555 }
556 556
557 spin_lock_bh(&dev_list_lock); 557 spin_lock_bh(&dev_list_lock);
558 list_add_tail(&rxe_dev_list, &rxe->list); 558 list_add_tail(&rxe->list, &rxe_dev_list);
559 spin_unlock_bh(&dev_list_lock); 559 spin_unlock_bh(&dev_list_lock);
560 return rxe; 560 return rxe;
561} 561}
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 486d576e55bc..44b2108253bd 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -813,8 +813,7 @@ void rxe_qp_destroy(struct rxe_qp *qp)
813 del_timer_sync(&qp->rnr_nak_timer); 813 del_timer_sync(&qp->rnr_nak_timer);
814 814
815 rxe_cleanup_task(&qp->req.task); 815 rxe_cleanup_task(&qp->req.task);
816 if (qp_type(qp) == IB_QPT_RC) 816 rxe_cleanup_task(&qp->comp.task);
817 rxe_cleanup_task(&qp->comp.task);
818 817
819 /* flush out any receive wr's or pending requests */ 818 /* flush out any receive wr's or pending requests */
820 __rxe_do_task(&qp->req.task); 819 __rxe_do_task(&qp->req.task);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 9104e6b8cac9..e71af717e71b 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -651,13 +651,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
651 SHOST_DIX_GUARD_CRC); 651 SHOST_DIX_GUARD_CRC);
652 } 652 }
653 653
654 /*
655 * Limit the sg_tablesize and max_sectors based on the device
656 * max fastreg page list length.
657 */
658 shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize,
659 ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len);
660
661 if (iscsi_host_add(shost, 654 if (iscsi_host_add(shost,
662 ib_conn->device->ib_device->dma_device)) { 655 ib_conn->device->ib_device->dma_device)) {
663 mutex_unlock(&iser_conn->state_mutex); 656 mutex_unlock(&iser_conn->state_mutex);
@@ -679,6 +672,10 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
679 max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9; 672 max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9;
680 shost->max_sectors = min(iser_max_sectors, max_fr_sectors); 673 shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
681 674
675 iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
676 iser_conn, shost->sg_tablesize,
677 shost->max_sectors);
678
682 if (cmds_max > max_cmds) { 679 if (cmds_max > max_cmds) {
683 iser_info("cmds_max changed from %u to %u\n", 680 iser_info("cmds_max changed from %u to %u\n",
684 cmds_max, max_cmds); 681 cmds_max, max_cmds);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 0be6a7c5ddb5..9d0b22ad58c1 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -496,7 +496,6 @@ struct ib_conn {
496 * @rx_descs: rx buffers array (cyclic buffer) 496 * @rx_descs: rx buffers array (cyclic buffer)
497 * @num_rx_descs: number of rx descriptors 497 * @num_rx_descs: number of rx descriptors
498 * @scsi_sg_tablesize: scsi host sg_tablesize 498 * @scsi_sg_tablesize: scsi host sg_tablesize
499 * @scsi_max_sectors: scsi host max sectors
500 */ 499 */
501struct iser_conn { 500struct iser_conn {
502 struct ib_conn ib_conn; 501 struct ib_conn ib_conn;
@@ -519,7 +518,6 @@ struct iser_conn {
519 struct iser_rx_desc *rx_descs; 518 struct iser_rx_desc *rx_descs;
520 u32 num_rx_descs; 519 u32 num_rx_descs;
521 unsigned short scsi_sg_tablesize; 520 unsigned short scsi_sg_tablesize;
522 unsigned int scsi_max_sectors;
523 bool snd_w_inv; 521 bool snd_w_inv;
524}; 522};
525 523
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 8ae7a3beddb7..6a9d1cb548ee 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -707,18 +707,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
707 sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE, 707 sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE,
708 device->ib_device->attrs.max_fast_reg_page_list_len); 708 device->ib_device->attrs.max_fast_reg_page_list_len);
709 709
710 if (sg_tablesize > sup_sg_tablesize) { 710 iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
711 sg_tablesize = sup_sg_tablesize;
712 iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512;
713 } else {
714 iser_conn->scsi_max_sectors = max_sectors;
715 }
716
717 iser_conn->scsi_sg_tablesize = sg_tablesize;
718
719 iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
720 iser_conn, iser_conn->scsi_sg_tablesize,
721 iser_conn->scsi_max_sectors);
722} 711}
723 712
724/** 713/**
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 8ddc07123193..79bf48477ddb 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -371,6 +371,7 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
371 struct srp_fr_desc *d; 371 struct srp_fr_desc *d;
372 struct ib_mr *mr; 372 struct ib_mr *mr;
373 int i, ret = -EINVAL; 373 int i, ret = -EINVAL;
374 enum ib_mr_type mr_type;
374 375
375 if (pool_size <= 0) 376 if (pool_size <= 0)
376 goto err; 377 goto err;
@@ -384,9 +385,13 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
384 spin_lock_init(&pool->lock); 385 spin_lock_init(&pool->lock);
385 INIT_LIST_HEAD(&pool->free_list); 386 INIT_LIST_HEAD(&pool->free_list);
386 387
388 if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
389 mr_type = IB_MR_TYPE_SG_GAPS;
390 else
391 mr_type = IB_MR_TYPE_MEM_REG;
392
387 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { 393 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
388 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 394 mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
389 max_page_list_len);
390 if (IS_ERR(mr)) { 395 if (IS_ERR(mr)) {
391 ret = PTR_ERR(mr); 396 ret = PTR_ERR(mr);
392 if (ret == -ENOMEM) 397 if (ret == -ENOMEM)
@@ -3694,6 +3699,12 @@ static int __init srp_init_module(void)
3694 indirect_sg_entries = cmd_sg_entries; 3699 indirect_sg_entries = cmd_sg_entries;
3695 } 3700 }
3696 3701
3702 if (indirect_sg_entries > SG_MAX_SEGMENTS) {
3703 pr_warn("Clamping indirect_sg_entries to %u\n",
3704 SG_MAX_SEGMENTS);
3705 indirect_sg_entries = SG_MAX_SEGMENTS;
3706 }
3707
3697 srp_remove_wq = create_workqueue("srp_remove"); 3708 srp_remove_wq = create_workqueue("srp_remove");
3698 if (!srp_remove_wq) { 3709 if (!srp_remove_wq) {
3699 ret = -ENOMEM; 3710 ret = -ENOMEM;
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index 5dcfa2913ceb..3b11422b1cce 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -11297,7 +11297,8 @@ static void mixer_notify_update(PLCI *plci, byte others)
11297 ((CAPI_MSG *) msg)->header.ncci = 0; 11297 ((CAPI_MSG *) msg)->header.ncci = 0;
11298 ((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT; 11298 ((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT;
11299 ((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3; 11299 ((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3;
11300 PUT_WORD(&(((CAPI_MSG *) msg)->info.facility_req.structs[1]), LI_REQ_SILENT_UPDATE); 11300 ((CAPI_MSG *) msg)->info.facility_req.structs[1] = LI_REQ_SILENT_UPDATE & 0xff;
11301 ((CAPI_MSG *) msg)->info.facility_req.structs[2] = LI_REQ_SILENT_UPDATE >> 8;
11301 ((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0; 11302 ((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0;
11302 w = api_put(notify_plci->appl, (CAPI_MSG *) msg); 11303 w = api_put(notify_plci->appl, (CAPI_MSG *) msg);
11303 if (w != _QUEUE_FULL) 11304 if (w != _QUEUE_FULL)
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index 0ea4efb3de66..ebb5e391b800 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -30,8 +30,9 @@
30 30
31#include "cec-priv.h" 31#include "cec-priv.h"
32 32
33static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx); 33static void cec_fill_msg_report_features(struct cec_adapter *adap,
34static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx); 34 struct cec_msg *msg,
35 unsigned int la_idx);
35 36
36/* 37/*
37 * 400 ms is the time it takes for one 16 byte message to be 38 * 400 ms is the time it takes for one 16 byte message to be
@@ -288,10 +289,10 @@ static void cec_data_cancel(struct cec_data *data)
288 289
289 /* Mark it as an error */ 290 /* Mark it as an error */
290 data->msg.tx_ts = ktime_get_ns(); 291 data->msg.tx_ts = ktime_get_ns();
291 data->msg.tx_status = CEC_TX_STATUS_ERROR | 292 data->msg.tx_status |= CEC_TX_STATUS_ERROR |
292 CEC_TX_STATUS_MAX_RETRIES; 293 CEC_TX_STATUS_MAX_RETRIES;
294 data->msg.tx_error_cnt++;
293 data->attempts = 0; 295 data->attempts = 0;
294 data->msg.tx_error_cnt = 1;
295 /* Queue transmitted message for monitoring purposes */ 296 /* Queue transmitted message for monitoring purposes */
296 cec_queue_msg_monitor(data->adap, &data->msg, 1); 297 cec_queue_msg_monitor(data->adap, &data->msg, 1);
297 298
@@ -851,7 +852,7 @@ static const u8 cec_msg_size[256] = {
851 [CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED, 852 [CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED,
852 [CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED, 853 [CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED,
853 [CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST, 854 [CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST,
854 [CEC_MSG_REPORT_CURRENT_LATENCY] = 7 | BCAST, 855 [CEC_MSG_REPORT_CURRENT_LATENCY] = 6 | BCAST,
855 [CEC_MSG_CDC_MESSAGE] = 2 | BCAST, 856 [CEC_MSG_CDC_MESSAGE] = 2 | BCAST,
856}; 857};
857 858
@@ -1250,30 +1251,49 @@ configured:
1250 for (i = 1; i < las->num_log_addrs; i++) 1251 for (i = 1; i < las->num_log_addrs; i++)
1251 las->log_addr[i] = CEC_LOG_ADDR_INVALID; 1252 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1252 } 1253 }
1254 for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
1255 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1253 adap->is_configured = true; 1256 adap->is_configured = true;
1254 adap->is_configuring = false; 1257 adap->is_configuring = false;
1255 cec_post_state_event(adap); 1258 cec_post_state_event(adap);
1256 mutex_unlock(&adap->lock);
1257 1259
1260 /*
1261 * Now post the Report Features and Report Physical Address broadcast
1262 * messages. Note that these are non-blocking transmits, meaning that
1263 * they are just queued up and once adap->lock is unlocked the main
1264 * thread will kick in and start transmitting these.
1265 *
1266 * If after this function is done (but before one or more of these
1267 * messages are actually transmitted) the CEC adapter is unconfigured,
1268 * then any remaining messages will be dropped by the main thread.
1269 */
1258 for (i = 0; i < las->num_log_addrs; i++) { 1270 for (i = 0; i < las->num_log_addrs; i++) {
1271 struct cec_msg msg = {};
1272
1259 if (las->log_addr[i] == CEC_LOG_ADDR_INVALID || 1273 if (las->log_addr[i] == CEC_LOG_ADDR_INVALID ||
1260 (las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY)) 1274 (las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY))
1261 continue; 1275 continue;
1262 1276
1263 /* 1277 msg.msg[0] = (las->log_addr[i] << 4) | 0x0f;
1264 * Report Features must come first according 1278
1265 * to CEC 2.0 1279 /* Report Features must come first according to CEC 2.0 */
1266 */ 1280 if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED &&
1267 if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED) 1281 adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0) {
1268 cec_report_features(adap, i); 1282 cec_fill_msg_report_features(adap, &msg, i);
1269 cec_report_phys_addr(adap, i); 1283 cec_transmit_msg_fh(adap, &msg, NULL, false);
1284 }
1285
1286 /* Report Physical Address */
1287 cec_msg_report_physical_addr(&msg, adap->phys_addr,
1288 las->primary_device_type[i]);
1289 dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
1290 las->log_addr[i],
1291 cec_phys_addr_exp(adap->phys_addr));
1292 cec_transmit_msg_fh(adap, &msg, NULL, false);
1270 } 1293 }
1271 for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
1272 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1273 mutex_lock(&adap->lock);
1274 adap->kthread_config = NULL; 1294 adap->kthread_config = NULL;
1275 mutex_unlock(&adap->lock);
1276 complete(&adap->config_completion); 1295 complete(&adap->config_completion);
1296 mutex_unlock(&adap->lock);
1277 return 0; 1297 return 0;
1278 1298
1279unconfigure: 1299unconfigure:
@@ -1526,52 +1546,32 @@ EXPORT_SYMBOL_GPL(cec_s_log_addrs);
1526 1546
1527/* High-level core CEC message handling */ 1547/* High-level core CEC message handling */
1528 1548
1529/* Transmit the Report Features message */ 1549/* Fill in the Report Features message */
1530static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx) 1550static void cec_fill_msg_report_features(struct cec_adapter *adap,
1551 struct cec_msg *msg,
1552 unsigned int la_idx)
1531{ 1553{
1532 struct cec_msg msg = { };
1533 const struct cec_log_addrs *las = &adap->log_addrs; 1554 const struct cec_log_addrs *las = &adap->log_addrs;
1534 const u8 *features = las->features[la_idx]; 1555 const u8 *features = las->features[la_idx];
1535 bool op_is_dev_features = false; 1556 bool op_is_dev_features = false;
1536 unsigned int idx; 1557 unsigned int idx;
1537 1558
1538 /* This is 2.0 and up only */
1539 if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
1540 return 0;
1541
1542 /* Report Features */ 1559 /* Report Features */
1543 msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f; 1560 msg->msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
1544 msg.len = 4; 1561 msg->len = 4;
1545 msg.msg[1] = CEC_MSG_REPORT_FEATURES; 1562 msg->msg[1] = CEC_MSG_REPORT_FEATURES;
1546 msg.msg[2] = adap->log_addrs.cec_version; 1563 msg->msg[2] = adap->log_addrs.cec_version;
1547 msg.msg[3] = las->all_device_types[la_idx]; 1564 msg->msg[3] = las->all_device_types[la_idx];
1548 1565
1549 /* Write RC Profiles first, then Device Features */ 1566 /* Write RC Profiles first, then Device Features */
1550 for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) { 1567 for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) {
1551 msg.msg[msg.len++] = features[idx]; 1568 msg->msg[msg->len++] = features[idx];
1552 if ((features[idx] & CEC_OP_FEAT_EXT) == 0) { 1569 if ((features[idx] & CEC_OP_FEAT_EXT) == 0) {
1553 if (op_is_dev_features) 1570 if (op_is_dev_features)
1554 break; 1571 break;
1555 op_is_dev_features = true; 1572 op_is_dev_features = true;
1556 } 1573 }
1557 } 1574 }
1558 return cec_transmit_msg(adap, &msg, false);
1559}
1560
1561/* Transmit the Report Physical Address message */
1562static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx)
1563{
1564 const struct cec_log_addrs *las = &adap->log_addrs;
1565 struct cec_msg msg = { };
1566
1567 /* Report Physical Address */
1568 msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
1569 cec_msg_report_physical_addr(&msg, adap->phys_addr,
1570 las->primary_device_type[la_idx]);
1571 dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
1572 las->log_addr[la_idx],
1573 cec_phys_addr_exp(adap->phys_addr));
1574 return cec_transmit_msg(adap, &msg, false);
1575} 1575}
1576 1576
1577/* Transmit the Feature Abort message */ 1577/* Transmit the Feature Abort message */
@@ -1777,9 +1777,10 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
1777 } 1777 }
1778 1778
1779 case CEC_MSG_GIVE_FEATURES: 1779 case CEC_MSG_GIVE_FEATURES:
1780 if (adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0) 1780 if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
1781 return cec_report_features(adap, la_idx); 1781 return cec_feature_abort(adap, msg);
1782 return 0; 1782 cec_fill_msg_report_features(adap, &tx_cec_msg, la_idx);
1783 return cec_transmit_msg(adap, &tx_cec_msg, false);
1783 1784
1784 default: 1785 default:
1785 /* 1786 /*
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index bc5e8cfe7ca2..8f11d7e45993 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -719,6 +719,9 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
719 skb_copy_from_linear_data(h->priv->ule_skb, dest_addr, 719 skb_copy_from_linear_data(h->priv->ule_skb, dest_addr,
720 ETH_ALEN); 720 ETH_ALEN);
721 skb_pull(h->priv->ule_skb, ETH_ALEN); 721 skb_pull(h->priv->ule_skb, ETH_ALEN);
722 } else {
723 /* dest_addr buffer is only valid if h->priv->ule_dbit == 0 */
724 eth_zero_addr(dest_addr);
722 } 725 }
723 726
724 /* Handle ULE Extension Headers. */ 727 /* Handle ULE Extension Headers. */
@@ -750,16 +753,8 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
750 if (!h->priv->ule_bridged) { 753 if (!h->priv->ule_bridged) {
751 skb_push(h->priv->ule_skb, ETH_HLEN); 754 skb_push(h->priv->ule_skb, ETH_HLEN);
752 h->ethh = (struct ethhdr *)h->priv->ule_skb->data; 755 h->ethh = (struct ethhdr *)h->priv->ule_skb->data;
753 if (!h->priv->ule_dbit) { 756 memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
754 /* 757 eth_zero_addr(h->ethh->h_source);
755 * dest_addr buffer is only valid if
756 * h->priv->ule_dbit == 0
757 */
758 memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
759 eth_zero_addr(h->ethh->h_source);
760 } else /* zeroize source and dest */
761 memset(h->ethh, 0, ETH_ALEN * 2);
762
763 h->ethh->h_proto = htons(h->priv->ule_sndu_type); 758 h->ethh->h_proto = htons(h->priv->ule_sndu_type);
764 } 759 }
765 /* else: skb is in correct state; nothing to do. */ 760 /* else: skb is in correct state; nothing to do. */
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index b31fa6fae009..b979ea148251 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -655,6 +655,7 @@ config VIDEO_S5K6A3
655config VIDEO_S5K4ECGX 655config VIDEO_S5K4ECGX
656 tristate "Samsung S5K4ECGX sensor support" 656 tristate "Samsung S5K4ECGX sensor support"
657 depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API 657 depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
658 select CRC32
658 ---help--- 659 ---help---
659 This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M 660 This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M
660 camera sensor with an embedded SoC image signal processor. 661 camera sensor with an embedded SoC image signal processor.
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 59872b31f832..f4e92bdfe192 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -2741,9 +2741,7 @@ static const struct v4l2_subdev_internal_ops smiapp_internal_ops = {
2741 * I2C Driver 2741 * I2C Driver
2742 */ 2742 */
2743 2743
2744#ifdef CONFIG_PM 2744static int __maybe_unused smiapp_suspend(struct device *dev)
2745
2746static int smiapp_suspend(struct device *dev)
2747{ 2745{
2748 struct i2c_client *client = to_i2c_client(dev); 2746 struct i2c_client *client = to_i2c_client(dev);
2749 struct v4l2_subdev *subdev = i2c_get_clientdata(client); 2747 struct v4l2_subdev *subdev = i2c_get_clientdata(client);
@@ -2768,7 +2766,7 @@ static int smiapp_suspend(struct device *dev)
2768 return 0; 2766 return 0;
2769} 2767}
2770 2768
2771static int smiapp_resume(struct device *dev) 2769static int __maybe_unused smiapp_resume(struct device *dev)
2772{ 2770{
2773 struct i2c_client *client = to_i2c_client(dev); 2771 struct i2c_client *client = to_i2c_client(dev);
2774 struct v4l2_subdev *subdev = i2c_get_clientdata(client); 2772 struct v4l2_subdev *subdev = i2c_get_clientdata(client);
@@ -2783,13 +2781,6 @@ static int smiapp_resume(struct device *dev)
2783 return rval; 2781 return rval;
2784} 2782}
2785 2783
2786#else
2787
2788#define smiapp_suspend NULL
2789#define smiapp_resume NULL
2790
2791#endif /* CONFIG_PM */
2792
2793static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev) 2784static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
2794{ 2785{
2795 struct smiapp_hwconfig *hwcfg; 2786 struct smiapp_hwconfig *hwcfg;
@@ -2913,13 +2904,9 @@ static int smiapp_probe(struct i2c_client *client,
2913 if (IS_ERR(sensor->xshutdown)) 2904 if (IS_ERR(sensor->xshutdown))
2914 return PTR_ERR(sensor->xshutdown); 2905 return PTR_ERR(sensor->xshutdown);
2915 2906
2916 pm_runtime_enable(&client->dev); 2907 rval = smiapp_power_on(&client->dev);
2917 2908 if (rval < 0)
2918 rval = pm_runtime_get_sync(&client->dev); 2909 return rval;
2919 if (rval < 0) {
2920 rval = -ENODEV;
2921 goto out_power_off;
2922 }
2923 2910
2924 rval = smiapp_identify_module(sensor); 2911 rval = smiapp_identify_module(sensor);
2925 if (rval) { 2912 if (rval) {
@@ -3100,6 +3087,9 @@ static int smiapp_probe(struct i2c_client *client,
3100 if (rval < 0) 3087 if (rval < 0)
3101 goto out_media_entity_cleanup; 3088 goto out_media_entity_cleanup;
3102 3089
3090 pm_runtime_set_active(&client->dev);
3091 pm_runtime_get_noresume(&client->dev);
3092 pm_runtime_enable(&client->dev);
3103 pm_runtime_set_autosuspend_delay(&client->dev, 1000); 3093 pm_runtime_set_autosuspend_delay(&client->dev, 1000);
3104 pm_runtime_use_autosuspend(&client->dev); 3094 pm_runtime_use_autosuspend(&client->dev);
3105 pm_runtime_put_autosuspend(&client->dev); 3095 pm_runtime_put_autosuspend(&client->dev);
@@ -3113,8 +3103,7 @@ out_cleanup:
3113 smiapp_cleanup(sensor); 3103 smiapp_cleanup(sensor);
3114 3104
3115out_power_off: 3105out_power_off:
3116 pm_runtime_put(&client->dev); 3106 smiapp_power_off(&client->dev);
3117 pm_runtime_disable(&client->dev);
3118 3107
3119 return rval; 3108 return rval;
3120} 3109}
@@ -3127,8 +3116,10 @@ static int smiapp_remove(struct i2c_client *client)
3127 3116
3128 v4l2_async_unregister_subdev(subdev); 3117 v4l2_async_unregister_subdev(subdev);
3129 3118
3130 pm_runtime_suspend(&client->dev);
3131 pm_runtime_disable(&client->dev); 3119 pm_runtime_disable(&client->dev);
3120 if (!pm_runtime_status_suspended(&client->dev))
3121 smiapp_power_off(&client->dev);
3122 pm_runtime_set_suspended(&client->dev);
3132 3123
3133 for (i = 0; i < sensor->ssds_used; i++) { 3124 for (i = 0; i < sensor->ssds_used; i++) {
3134 v4l2_device_unregister_subdev(&sensor->ssds[i].sd); 3125 v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 3a0fe8cc64e9..48646a7f3fb0 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -291,8 +291,12 @@ static void tvp5150_selmux(struct v4l2_subdev *sd)
291 tvp5150_write(sd, TVP5150_OP_MODE_CTL, opmode); 291 tvp5150_write(sd, TVP5150_OP_MODE_CTL, opmode);
292 tvp5150_write(sd, TVP5150_VD_IN_SRC_SEL_1, input); 292 tvp5150_write(sd, TVP5150_VD_IN_SRC_SEL_1, input);
293 293
294 /* Svideo should enable YCrCb output and disable GPCL output 294 /*
295 * For Composite and TV, it should be the reverse 295 * Setup the FID/GLCO/VLK/HVLK and INTREQ/GPCL/VBLK output signals. For
296 * S-Video we output the vertical lock (VLK) signal on FID/GLCO/VLK/HVLK
297 * and set INTREQ/GPCL/VBLK to logic 0. For composite we output the
298 * field indicator (FID) signal on FID/GLCO/VLK/HVLK and set
299 * INTREQ/GPCL/VBLK to logic 1.
296 */ 300 */
297 val = tvp5150_read(sd, TVP5150_MISC_CTL); 301 val = tvp5150_read(sd, TVP5150_MISC_CTL);
298 if (val < 0) { 302 if (val < 0) {
@@ -301,9 +305,9 @@ static void tvp5150_selmux(struct v4l2_subdev *sd)
301 } 305 }
302 306
303 if (decoder->input == TVP5150_SVIDEO) 307 if (decoder->input == TVP5150_SVIDEO)
304 val = (val & ~0x40) | 0x10; 308 val = (val & ~TVP5150_MISC_CTL_GPCL) | TVP5150_MISC_CTL_HVLK;
305 else 309 else
306 val = (val & ~0x10) | 0x40; 310 val = (val & ~TVP5150_MISC_CTL_HVLK) | TVP5150_MISC_CTL_GPCL;
307 tvp5150_write(sd, TVP5150_MISC_CTL, val); 311 tvp5150_write(sd, TVP5150_MISC_CTL, val);
308}; 312};
309 313
@@ -455,7 +459,12 @@ static const struct i2c_reg_value tvp5150_init_enable[] = {
455 },{ /* Automatic offset and AGC enabled */ 459 },{ /* Automatic offset and AGC enabled */
456 TVP5150_ANAL_CHL_CTL, 0x15 460 TVP5150_ANAL_CHL_CTL, 0x15
457 },{ /* Activate YCrCb output 0x9 or 0xd ? */ 461 },{ /* Activate YCrCb output 0x9 or 0xd ? */
458 TVP5150_MISC_CTL, 0x6f 462 TVP5150_MISC_CTL, TVP5150_MISC_CTL_GPCL |
463 TVP5150_MISC_CTL_INTREQ_OE |
464 TVP5150_MISC_CTL_YCBCR_OE |
465 TVP5150_MISC_CTL_SYNC_OE |
466 TVP5150_MISC_CTL_VBLANK |
467 TVP5150_MISC_CTL_CLOCK_OE,
459 },{ /* Activates video std autodetection for all standards */ 468 },{ /* Activates video std autodetection for all standards */
460 TVP5150_AUTOSW_MSK, 0x0 469 TVP5150_AUTOSW_MSK, 0x0
461 },{ /* Default format: 0x47. For 4:2:2: 0x40 */ 470 },{ /* Default format: 0x47. For 4:2:2: 0x40 */
@@ -861,8 +870,6 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
861 870
862 f = &format->format; 871 f = &format->format;
863 872
864 tvp5150_reset(sd, 0);
865
866 f->width = decoder->rect.width; 873 f->width = decoder->rect.width;
867 f->height = decoder->rect.height / 2; 874 f->height = decoder->rect.height / 2;
868 875
@@ -1051,21 +1058,27 @@ static const struct media_entity_operations tvp5150_sd_media_ops = {
1051static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable) 1058static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable)
1052{ 1059{
1053 struct tvp5150 *decoder = to_tvp5150(sd); 1060 struct tvp5150 *decoder = to_tvp5150(sd);
1054 /* Output format: 8-bit ITU-R BT.656 with embedded syncs */ 1061 int val;
1055 int val = 0x09;
1056
1057 /* Output format: 8-bit 4:2:2 YUV with discrete sync */
1058 if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
1059 val = 0x0d;
1060 1062
1061 /* Initializes TVP5150 to its default values */ 1063 /* Enable or disable the video output signals. */
1062 /* # set PCLK (27MHz) */ 1064 val = tvp5150_read(sd, TVP5150_MISC_CTL);
1063 tvp5150_write(sd, TVP5150_CONF_SHARED_PIN, 0x00); 1065 if (val < 0)
1066 return val;
1067
1068 val &= ~(TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_SYNC_OE |
1069 TVP5150_MISC_CTL_CLOCK_OE);
1070
1071 if (enable) {
1072 /*
1073 * Enable the YCbCr and clock outputs. In discrete sync mode
1074 * (non-BT.656) additionally enable the the sync outputs.
1075 */
1076 val |= TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_CLOCK_OE;
1077 if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
1078 val |= TVP5150_MISC_CTL_SYNC_OE;
1079 }
1064 1080
1065 if (enable) 1081 tvp5150_write(sd, TVP5150_MISC_CTL, val);
1066 tvp5150_write(sd, TVP5150_MISC_CTL, val);
1067 else
1068 tvp5150_write(sd, TVP5150_MISC_CTL, 0x00);
1069 1082
1070 return 0; 1083 return 0;
1071} 1084}
@@ -1524,7 +1537,6 @@ static int tvp5150_probe(struct i2c_client *c,
1524 res = core->hdl.error; 1537 res = core->hdl.error;
1525 goto err; 1538 goto err;
1526 } 1539 }
1527 v4l2_ctrl_handler_setup(&core->hdl);
1528 1540
1529 /* Default is no cropping */ 1541 /* Default is no cropping */
1530 core->rect.top = 0; 1542 core->rect.top = 0;
@@ -1535,6 +1547,8 @@ static int tvp5150_probe(struct i2c_client *c,
1535 core->rect.left = 0; 1547 core->rect.left = 0;
1536 core->rect.width = TVP5150_H_MAX; 1548 core->rect.width = TVP5150_H_MAX;
1537 1549
1550 tvp5150_reset(sd, 0); /* Calls v4l2_ctrl_handler_setup() */
1551
1538 res = v4l2_async_register_subdev(sd); 1552 res = v4l2_async_register_subdev(sd);
1539 if (res < 0) 1553 if (res < 0)
1540 goto err; 1554 goto err;
diff --git a/drivers/media/i2c/tvp5150_reg.h b/drivers/media/i2c/tvp5150_reg.h
index 25a994944918..30a48c28d05a 100644
--- a/drivers/media/i2c/tvp5150_reg.h
+++ b/drivers/media/i2c/tvp5150_reg.h
@@ -9,6 +9,15 @@
9#define TVP5150_ANAL_CHL_CTL 0x01 /* Analog channel controls */ 9#define TVP5150_ANAL_CHL_CTL 0x01 /* Analog channel controls */
10#define TVP5150_OP_MODE_CTL 0x02 /* Operation mode controls */ 10#define TVP5150_OP_MODE_CTL 0x02 /* Operation mode controls */
11#define TVP5150_MISC_CTL 0x03 /* Miscellaneous controls */ 11#define TVP5150_MISC_CTL 0x03 /* Miscellaneous controls */
12#define TVP5150_MISC_CTL_VBLK_GPCL BIT(7)
13#define TVP5150_MISC_CTL_GPCL BIT(6)
14#define TVP5150_MISC_CTL_INTREQ_OE BIT(5)
15#define TVP5150_MISC_CTL_HVLK BIT(4)
16#define TVP5150_MISC_CTL_YCBCR_OE BIT(3)
17#define TVP5150_MISC_CTL_SYNC_OE BIT(2)
18#define TVP5150_MISC_CTL_VBLANK BIT(1)
19#define TVP5150_MISC_CTL_CLOCK_OE BIT(0)
20
12#define TVP5150_AUTOSW_MSK 0x04 /* Autoswitch mask: TVP5150A / TVP5150AM */ 21#define TVP5150_AUTOSW_MSK 0x04 /* Autoswitch mask: TVP5150A / TVP5150AM */
13 22
14/* Reserved 05h */ 23/* Reserved 05h */
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index 979634000597..d5c911c09e2b 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -308,9 +308,7 @@ static void cobalt_pci_iounmap(struct cobalt *cobalt, struct pci_dev *pci_dev)
308static void cobalt_free_msi(struct cobalt *cobalt, struct pci_dev *pci_dev) 308static void cobalt_free_msi(struct cobalt *cobalt, struct pci_dev *pci_dev)
309{ 309{
310 free_irq(pci_dev->irq, (void *)cobalt); 310 free_irq(pci_dev->irq, (void *)cobalt);
311 311 pci_free_irq_vectors(pci_dev);
312 if (cobalt->msi_enabled)
313 pci_disable_msi(pci_dev);
314} 312}
315 313
316static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev, 314static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
@@ -387,14 +385,12 @@ static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
387 from being generated. */ 385 from being generated. */
388 cobalt_set_interrupt(cobalt, false); 386 cobalt_set_interrupt(cobalt, false);
389 387
390 if (pci_enable_msi_range(pci_dev, 1, 1) < 1) { 388 if (pci_alloc_irq_vectors(pci_dev, 1, 1, PCI_IRQ_MSI) < 1) {
391 cobalt_err("Could not enable MSI\n"); 389 cobalt_err("Could not enable MSI\n");
392 cobalt->msi_enabled = false;
393 ret = -EIO; 390 ret = -EIO;
394 goto err_release; 391 goto err_release;
395 } 392 }
396 msi_config_show(cobalt, pci_dev); 393 msi_config_show(cobalt, pci_dev);
397 cobalt->msi_enabled = true;
398 394
399 /* Register IRQ */ 395 /* Register IRQ */
400 if (request_irq(pci_dev->irq, cobalt_irq_handler, IRQF_SHARED, 396 if (request_irq(pci_dev->irq, cobalt_irq_handler, IRQF_SHARED,
diff --git a/drivers/media/pci/cobalt/cobalt-driver.h b/drivers/media/pci/cobalt/cobalt-driver.h
index ed00dc9d9399..00f773ec359a 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.h
+++ b/drivers/media/pci/cobalt/cobalt-driver.h
@@ -287,8 +287,6 @@ struct cobalt {
287 u32 irq_none; 287 u32 irq_none;
288 u32 irq_full_fifo; 288 u32 irq_full_fifo;
289 289
290 bool msi_enabled;
291
292 /* omnitek dma */ 290 /* omnitek dma */
293 int dma_channels; 291 int dma_channels;
294 int first_fifo_channel; 292 int first_fifo_channel;
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index 07fa08be9e99..d54ebe7e0215 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -97,14 +97,13 @@ struct pctv452e_state {
97 u8 c; /* transaction counter, wraps around... */ 97 u8 c; /* transaction counter, wraps around... */
98 u8 initialized; /* set to 1 if 0x15 has been sent */ 98 u8 initialized; /* set to 1 if 0x15 has been sent */
99 u16 last_rc_key; 99 u16 last_rc_key;
100
101 unsigned char data[80];
102}; 100};
103 101
104static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data, 102static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
105 unsigned int write_len, unsigned int read_len) 103 unsigned int write_len, unsigned int read_len)
106{ 104{
107 struct pctv452e_state *state = (struct pctv452e_state *)d->priv; 105 struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
106 u8 *buf;
108 u8 id; 107 u8 id;
109 unsigned int rlen; 108 unsigned int rlen;
110 int ret; 109 int ret;
@@ -114,36 +113,39 @@ static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
114 return -EIO; 113 return -EIO;
115 } 114 }
116 115
117 mutex_lock(&state->ca_mutex); 116 buf = kmalloc(64, GFP_KERNEL);
117 if (!buf)
118 return -ENOMEM;
119
118 id = state->c++; 120 id = state->c++;
119 121
120 state->data[0] = SYNC_BYTE_OUT; 122 buf[0] = SYNC_BYTE_OUT;
121 state->data[1] = id; 123 buf[1] = id;
122 state->data[2] = cmd; 124 buf[2] = cmd;
123 state->data[3] = write_len; 125 buf[3] = write_len;
124 126
125 memcpy(state->data + 4, data, write_len); 127 memcpy(buf + 4, data, write_len);
126 128
127 rlen = (read_len > 0) ? 64 : 0; 129 rlen = (read_len > 0) ? 64 : 0;
128 ret = dvb_usb_generic_rw(d, state->data, 4 + write_len, 130 ret = dvb_usb_generic_rw(d, buf, 4 + write_len,
129 state->data, rlen, /* delay_ms */ 0); 131 buf, rlen, /* delay_ms */ 0);
130 if (0 != ret) 132 if (0 != ret)
131 goto failed; 133 goto failed;
132 134
133 ret = -EIO; 135 ret = -EIO;
134 if (SYNC_BYTE_IN != state->data[0] || id != state->data[1]) 136 if (SYNC_BYTE_IN != buf[0] || id != buf[1])
135 goto failed; 137 goto failed;
136 138
137 memcpy(data, state->data + 4, read_len); 139 memcpy(data, buf + 4, read_len);
138 140
139 mutex_unlock(&state->ca_mutex); 141 kfree(buf);
140 return 0; 142 return 0;
141 143
142failed: 144failed:
143 err("CI error %d; %02X %02X %02X -> %*ph.", 145 err("CI error %d; %02X %02X %02X -> %*ph.",
144 ret, SYNC_BYTE_OUT, id, cmd, 3, state->data); 146 ret, SYNC_BYTE_OUT, id, cmd, 3, buf);
145 147
146 mutex_unlock(&state->ca_mutex); 148 kfree(buf);
147 return ret; 149 return ret;
148} 150}
149 151
@@ -410,53 +412,57 @@ static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr,
410 u8 *rcv_buf, u8 rcv_len) 412 u8 *rcv_buf, u8 rcv_len)
411{ 413{
412 struct pctv452e_state *state = (struct pctv452e_state *)d->priv; 414 struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
415 u8 *buf;
413 u8 id; 416 u8 id;
414 int ret; 417 int ret;
415 418
416 mutex_lock(&state->ca_mutex); 419 buf = kmalloc(64, GFP_KERNEL);
420 if (!buf)
421 return -ENOMEM;
422
417 id = state->c++; 423 id = state->c++;
418 424
419 ret = -EINVAL; 425 ret = -EINVAL;
420 if (snd_len > 64 - 7 || rcv_len > 64 - 7) 426 if (snd_len > 64 - 7 || rcv_len > 64 - 7)
421 goto failed; 427 goto failed;
422 428
423 state->data[0] = SYNC_BYTE_OUT; 429 buf[0] = SYNC_BYTE_OUT;
424 state->data[1] = id; 430 buf[1] = id;
425 state->data[2] = PCTV_CMD_I2C; 431 buf[2] = PCTV_CMD_I2C;
426 state->data[3] = snd_len + 3; 432 buf[3] = snd_len + 3;
427 state->data[4] = addr << 1; 433 buf[4] = addr << 1;
428 state->data[5] = snd_len; 434 buf[5] = snd_len;
429 state->data[6] = rcv_len; 435 buf[6] = rcv_len;
430 436
431 memcpy(state->data + 7, snd_buf, snd_len); 437 memcpy(buf + 7, snd_buf, snd_len);
432 438
433 ret = dvb_usb_generic_rw(d, state->data, 7 + snd_len, 439 ret = dvb_usb_generic_rw(d, buf, 7 + snd_len,
434 state->data, /* rcv_len */ 64, 440 buf, /* rcv_len */ 64,
435 /* delay_ms */ 0); 441 /* delay_ms */ 0);
436 if (ret < 0) 442 if (ret < 0)
437 goto failed; 443 goto failed;
438 444
439 /* TT USB protocol error. */ 445 /* TT USB protocol error. */
440 ret = -EIO; 446 ret = -EIO;
441 if (SYNC_BYTE_IN != state->data[0] || id != state->data[1]) 447 if (SYNC_BYTE_IN != buf[0] || id != buf[1])
442 goto failed; 448 goto failed;
443 449
444 /* I2C device didn't respond as expected. */ 450 /* I2C device didn't respond as expected. */
445 ret = -EREMOTEIO; 451 ret = -EREMOTEIO;
446 if (state->data[5] < snd_len || state->data[6] < rcv_len) 452 if (buf[5] < snd_len || buf[6] < rcv_len)
447 goto failed; 453 goto failed;
448 454
449 memcpy(rcv_buf, state->data + 7, rcv_len); 455 memcpy(rcv_buf, buf + 7, rcv_len);
450 mutex_unlock(&state->ca_mutex);
451 456
457 kfree(buf);
452 return rcv_len; 458 return rcv_len;
453 459
454failed: 460failed:
455 err("I2C error %d; %02X %02X %02X %02X %02X -> %*ph", 461 err("I2C error %d; %02X %02X %02X %02X %02X -> %*ph",
456 ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len, 462 ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len,
457 7, state->data); 463 7, buf);
458 464
459 mutex_unlock(&state->ca_mutex); 465 kfree(buf);
460 return ret; 466 return ret;
461} 467}
462 468
@@ -505,7 +511,7 @@ static u32 pctv452e_i2c_func(struct i2c_adapter *adapter)
505static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i) 511static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
506{ 512{
507 struct pctv452e_state *state = (struct pctv452e_state *)d->priv; 513 struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
508 u8 *rx; 514 u8 *b0, *rx;
509 int ret; 515 int ret;
510 516
511 info("%s: %d\n", __func__, i); 517 info("%s: %d\n", __func__, i);
@@ -516,11 +522,12 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
516 if (state->initialized) 522 if (state->initialized)
517 return 0; 523 return 0;
518 524
519 rx = kmalloc(PCTV_ANSWER_LEN, GFP_KERNEL); 525 b0 = kmalloc(5 + PCTV_ANSWER_LEN, GFP_KERNEL);
520 if (!rx) 526 if (!b0)
521 return -ENOMEM; 527 return -ENOMEM;
522 528
523 mutex_lock(&state->ca_mutex); 529 rx = b0 + 5;
530
524 /* hmm where shoud this should go? */ 531 /* hmm where shoud this should go? */
525 ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE); 532 ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE);
526 if (ret != 0) 533 if (ret != 0)
@@ -528,66 +535,70 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
528 __func__, ret); 535 __func__, ret);
529 536
530 /* this is a one-time initialization, dont know where to put */ 537 /* this is a one-time initialization, dont know where to put */
531 state->data[0] = 0xaa; 538 b0[0] = 0xaa;
532 state->data[1] = state->c++; 539 b0[1] = state->c++;
533 state->data[2] = PCTV_CMD_RESET; 540 b0[2] = PCTV_CMD_RESET;
534 state->data[3] = 1; 541 b0[3] = 1;
535 state->data[4] = 0; 542 b0[4] = 0;
536 /* reset board */ 543 /* reset board */
537 ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0); 544 ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0);
538 if (ret) 545 if (ret)
539 goto ret; 546 goto ret;
540 547
541 state->data[1] = state->c++; 548 b0[1] = state->c++;
542 state->data[4] = 1; 549 b0[4] = 1;
543 /* reset board (again?) */ 550 /* reset board (again?) */
544 ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0); 551 ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0);
545 if (ret) 552 if (ret)
546 goto ret; 553 goto ret;
547 554
548 state->initialized = 1; 555 state->initialized = 1;
549 556
550ret: 557ret:
551 mutex_unlock(&state->ca_mutex); 558 kfree(b0);
552 kfree(rx);
553 return ret; 559 return ret;
554} 560}
555 561
556static int pctv452e_rc_query(struct dvb_usb_device *d) 562static int pctv452e_rc_query(struct dvb_usb_device *d)
557{ 563{
558 struct pctv452e_state *state = (struct pctv452e_state *)d->priv; 564 struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
565 u8 *b, *rx;
559 int ret, i; 566 int ret, i;
560 u8 id; 567 u8 id;
561 568
562 mutex_lock(&state->ca_mutex); 569 b = kmalloc(CMD_BUFFER_SIZE + PCTV_ANSWER_LEN, GFP_KERNEL);
570 if (!b)
571 return -ENOMEM;
572
573 rx = b + CMD_BUFFER_SIZE;
574
563 id = state->c++; 575 id = state->c++;
564 576
565 /* prepare command header */ 577 /* prepare command header */
566 state->data[0] = SYNC_BYTE_OUT; 578 b[0] = SYNC_BYTE_OUT;
567 state->data[1] = id; 579 b[1] = id;
568 state->data[2] = PCTV_CMD_IR; 580 b[2] = PCTV_CMD_IR;
569 state->data[3] = 0; 581 b[3] = 0;
570 582
571 /* send ir request */ 583 /* send ir request */
572 ret = dvb_usb_generic_rw(d, state->data, 4, 584 ret = dvb_usb_generic_rw(d, b, 4, rx, PCTV_ANSWER_LEN, 0);
573 state->data, PCTV_ANSWER_LEN, 0);
574 if (ret != 0) 585 if (ret != 0)
575 goto ret; 586 goto ret;
576 587
577 if (debug > 3) { 588 if (debug > 3) {
578 info("%s: read: %2d: %*ph: ", __func__, ret, 3, state->data); 589 info("%s: read: %2d: %*ph: ", __func__, ret, 3, rx);
579 for (i = 0; (i < state->data[3]) && ((i + 3) < PCTV_ANSWER_LEN); i++) 590 for (i = 0; (i < rx[3]) && ((i+3) < PCTV_ANSWER_LEN); i++)
580 info(" %02x", state->data[i + 3]); 591 info(" %02x", rx[i+3]);
581 592
582 info("\n"); 593 info("\n");
583 } 594 }
584 595
585 if ((state->data[3] == 9) && (state->data[12] & 0x01)) { 596 if ((rx[3] == 9) && (rx[12] & 0x01)) {
586 /* got a "press" event */ 597 /* got a "press" event */
587 state->last_rc_key = RC_SCANCODE_RC5(state->data[7], state->data[6]); 598 state->last_rc_key = RC_SCANCODE_RC5(rx[7], rx[6]);
588 if (debug > 2) 599 if (debug > 2)
589 info("%s: cmd=0x%02x sys=0x%02x\n", 600 info("%s: cmd=0x%02x sys=0x%02x\n",
590 __func__, state->data[6], state->data[7]); 601 __func__, rx[6], rx[7]);
591 602
592 rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0); 603 rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0);
593 } else if (state->last_rc_key) { 604 } else if (state->last_rc_key) {
@@ -595,7 +606,7 @@ static int pctv452e_rc_query(struct dvb_usb_device *d)
595 state->last_rc_key = 0; 606 state->last_rc_key = 0;
596 } 607 }
597ret: 608ret:
598 mutex_unlock(&state->ca_mutex); 609 kfree(b);
599 return ret; 610 return ret;
600} 611}
601 612
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index a0547dbf9806..76382c858c35 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -330,7 +330,7 @@ static int h_memstick_read_dev_id(struct memstick_dev *card,
330 struct ms_id_register id_reg; 330 struct ms_id_register id_reg;
331 331
332 if (!(*mrq)) { 332 if (!(*mrq)) {
333 memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, NULL, 333 memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg,
334 sizeof(struct ms_id_register)); 334 sizeof(struct ms_id_register));
335 *mrq = &card->current_mrq; 335 *mrq = &card->current_mrq;
336 return 0; 336 return 0;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index b44306b886cb..73db08558e4d 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -3354,10 +3354,11 @@ int dw_mci_runtime_resume(struct device *dev)
3354 3354
3355 if (!slot) 3355 if (!slot)
3356 continue; 3356 continue;
3357 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) { 3357 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
3358 dw_mci_set_ios(slot->mmc, &slot->mmc->ios); 3358 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3359 dw_mci_setup_bus(slot, true); 3359
3360 } 3360 /* Force setup bus to guarantee available clock output */
3361 dw_mci_setup_bus(slot, true);
3361 } 3362 }
3362 3363
3363 /* Now that slots are all setup, we can enable card detect */ 3364 /* Now that slots are all setup, we can enable card detect */
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index 7be393c96b1a..cf7c18947189 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -161,6 +161,7 @@ static int c_can_pci_probe(struct pci_dev *pdev,
161 161
162 dev->irq = pdev->irq; 162 dev->irq = pdev->irq;
163 priv->base = addr; 163 priv->base = addr;
164 priv->device = &pdev->dev;
164 165
165 if (!c_can_pci_data->freq) { 166 if (!c_can_pci_data->freq) {
166 dev_err(&pdev->dev, "no clock frequency defined\n"); 167 dev_err(&pdev->dev, "no clock frequency defined\n");
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 680d1ff07a55..6749b1829469 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -948,7 +948,12 @@ static int ti_hecc_probe(struct platform_device *pdev)
948 netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll, 948 netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
949 HECC_DEF_NAPI_WEIGHT); 949 HECC_DEF_NAPI_WEIGHT);
950 950
951 clk_enable(priv->clk); 951 err = clk_prepare_enable(priv->clk);
952 if (err) {
953 dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
954 goto probe_exit_clk;
955 }
956
952 err = register_candev(ndev); 957 err = register_candev(ndev);
953 if (err) { 958 if (err) {
954 dev_err(&pdev->dev, "register_candev() failed\n"); 959 dev_err(&pdev->dev, "register_candev() failed\n");
@@ -981,7 +986,7 @@ static int ti_hecc_remove(struct platform_device *pdev)
981 struct ti_hecc_priv *priv = netdev_priv(ndev); 986 struct ti_hecc_priv *priv = netdev_priv(ndev);
982 987
983 unregister_candev(ndev); 988 unregister_candev(ndev);
984 clk_disable(priv->clk); 989 clk_disable_unprepare(priv->clk);
985 clk_put(priv->clk); 990 clk_put(priv->clk);
986 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 991 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
987 iounmap(priv->base); 992 iounmap(priv->base);
@@ -1006,7 +1011,7 @@ static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state)
1006 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR); 1011 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
1007 priv->can.state = CAN_STATE_SLEEPING; 1012 priv->can.state = CAN_STATE_SLEEPING;
1008 1013
1009 clk_disable(priv->clk); 1014 clk_disable_unprepare(priv->clk);
1010 1015
1011 return 0; 1016 return 0;
1012} 1017}
@@ -1015,8 +1020,11 @@ static int ti_hecc_resume(struct platform_device *pdev)
1015{ 1020{
1016 struct net_device *dev = platform_get_drvdata(pdev); 1021 struct net_device *dev = platform_get_drvdata(pdev);
1017 struct ti_hecc_priv *priv = netdev_priv(dev); 1022 struct ti_hecc_priv *priv = netdev_priv(dev);
1023 int err;
1018 1024
1019 clk_enable(priv->clk); 1025 err = clk_prepare_enable(priv->clk);
1026 if (err)
1027 return err;
1020 1028
1021 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR); 1029 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
1022 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1030 priv->can.state = CAN_STATE_ERROR_ACTIVE;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 5b7ba25e0065..8a280e7d66bd 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -891,6 +891,8 @@
891#define PCS_V1_WINDOW_SELECT 0x03fc 891#define PCS_V1_WINDOW_SELECT 0x03fc
892#define PCS_V2_WINDOW_DEF 0x9060 892#define PCS_V2_WINDOW_DEF 0x9060
893#define PCS_V2_WINDOW_SELECT 0x9064 893#define PCS_V2_WINDOW_SELECT 0x9064
894#define PCS_V2_RV_WINDOW_DEF 0x1060
895#define PCS_V2_RV_WINDOW_SELECT 0x1064
894 896
895/* PCS register entry bit positions and sizes */ 897/* PCS register entry bit positions and sizes */
896#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6 898#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index aaf0350076a9..a7d16db5c4b2 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1151,7 +1151,7 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
1151 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); 1151 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
1152 1152
1153 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1153 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1154 XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index); 1154 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1155 mmd_data = XPCS16_IOREAD(pdata, offset); 1155 mmd_data = XPCS16_IOREAD(pdata, offset);
1156 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1156 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1157 1157
@@ -1183,7 +1183,7 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
1183 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); 1183 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
1184 1184
1185 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1185 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1186 XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index); 1186 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1187 XPCS16_IOWRITE(pdata, offset, mmd_data); 1187 XPCS16_IOWRITE(pdata, offset, mmd_data);
1188 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1188 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1189} 1189}
@@ -3407,8 +3407,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
3407 3407
3408 /* Flush Tx queues */ 3408 /* Flush Tx queues */
3409 ret = xgbe_flush_tx_queues(pdata); 3409 ret = xgbe_flush_tx_queues(pdata);
3410 if (ret) 3410 if (ret) {
3411 netdev_err(pdata->netdev, "error flushing TX queues\n");
3411 return ret; 3412 return ret;
3413 }
3412 3414
3413 /* 3415 /*
3414 * Initialize DMA related features 3416 * Initialize DMA related features
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index f8648e4dbca3..3aa457c8ca21 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1070,7 +1070,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
1070 1070
1071 DBGPR("-->xgbe_start\n"); 1071 DBGPR("-->xgbe_start\n");
1072 1072
1073 hw_if->init(pdata); 1073 ret = hw_if->init(pdata);
1074 if (ret)
1075 return ret;
1074 1076
1075 xgbe_napi_enable(pdata, 1); 1077 xgbe_napi_enable(pdata, 1);
1076 1078
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index e76b7f65b805..c2730f15bd8b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -265,6 +265,7 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
265 struct xgbe_prv_data *pdata; 265 struct xgbe_prv_data *pdata;
266 struct device *dev = &pdev->dev; 266 struct device *dev = &pdev->dev;
267 void __iomem * const *iomap_table; 267 void __iomem * const *iomap_table;
268 struct pci_dev *rdev;
268 unsigned int ma_lo, ma_hi; 269 unsigned int ma_lo, ma_hi;
269 unsigned int reg; 270 unsigned int reg;
270 int bar_mask; 271 int bar_mask;
@@ -326,8 +327,20 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
326 if (netif_msg_probe(pdata)) 327 if (netif_msg_probe(pdata))
327 dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs); 328 dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
328 329
330 /* Set the PCS indirect addressing definition registers */
331 rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
332 if (rdev &&
333 (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
334 pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
335 pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
336 } else {
337 pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
338 pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
339 }
340 pci_dev_put(rdev);
341
329 /* Configure the PCS indirect addressing support */ 342 /* Configure the PCS indirect addressing support */
330 reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF); 343 reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
331 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); 344 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
332 pdata->xpcs_window <<= 6; 345 pdata->xpcs_window <<= 6;
333 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); 346 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index f52a9bd05bac..00108815b55e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -955,6 +955,8 @@ struct xgbe_prv_data {
955 955
956 /* XPCS indirect addressing lock */ 956 /* XPCS indirect addressing lock */
957 spinlock_t xpcs_lock; 957 spinlock_t xpcs_lock;
958 unsigned int xpcs_window_def_reg;
959 unsigned int xpcs_window_sel_reg;
958 unsigned int xpcs_window; 960 unsigned int xpcs_window;
959 unsigned int xpcs_window_size; 961 unsigned int xpcs_window_size;
960 unsigned int xpcs_window_mask; 962 unsigned int xpcs_window_mask;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 4c80e0689db9..391bb5c09a6a 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -685,8 +685,6 @@ static int alx_alloc_rings(struct alx_priv *alx)
685 return -ENOMEM; 685 return -ENOMEM;
686 } 686 }
687 687
688 alx_reinit_rings(alx);
689
690 return 0; 688 return 0;
691} 689}
692 690
@@ -703,7 +701,7 @@ static void alx_free_rings(struct alx_priv *alx)
703 if (alx->qnapi[0] && alx->qnapi[0]->rxq) 701 if (alx->qnapi[0] && alx->qnapi[0]->rxq)
704 kfree(alx->qnapi[0]->rxq->bufs); 702 kfree(alx->qnapi[0]->rxq->bufs);
705 703
706 if (!alx->descmem.virt) 704 if (alx->descmem.virt)
707 dma_free_coherent(&alx->hw.pdev->dev, 705 dma_free_coherent(&alx->hw.pdev->dev,
708 alx->descmem.size, 706 alx->descmem.size,
709 alx->descmem.virt, 707 alx->descmem.virt,
@@ -984,6 +982,7 @@ static int alx_realloc_resources(struct alx_priv *alx)
984 alx_free_rings(alx); 982 alx_free_rings(alx);
985 alx_free_napis(alx); 983 alx_free_napis(alx);
986 alx_disable_advanced_intr(alx); 984 alx_disable_advanced_intr(alx);
985 alx_init_intr(alx, false);
987 986
988 err = alx_alloc_napis(alx); 987 err = alx_alloc_napis(alx);
989 if (err) 988 if (err)
@@ -1241,6 +1240,12 @@ static int __alx_open(struct alx_priv *alx, bool resume)
1241 if (err) 1240 if (err)
1242 goto out_free_rings; 1241 goto out_free_rings;
1243 1242
1243 /* must be called after alx_request_irq because the chip stops working
1244 * if we copy the dma addresses in alx_init_ring_ptrs twice when
1245 * requesting msi-x interrupts failed
1246 */
1247 alx_reinit_rings(alx);
1248
1244 netif_set_real_num_tx_queues(alx->dev, alx->num_txq); 1249 netif_set_real_num_tx_queues(alx->dev, alx->num_txq);
1245 netif_set_real_num_rx_queues(alx->dev, alx->num_rxq); 1250 netif_set_real_num_rx_queues(alx->dev, alx->num_rxq);
1246 1251
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 3b14d5144228..c483618b57bd 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -913,6 +913,8 @@ static int bcm_enet_open(struct net_device *dev)
913 priv->old_link = 0; 913 priv->old_link = 0;
914 priv->old_duplex = -1; 914 priv->old_duplex = -1;
915 priv->old_pause = -1; 915 priv->old_pause = -1;
916 } else {
917 phydev = NULL;
916 } 918 }
917 919
918 /* mask all interrupts and request them */ 920 /* mask all interrupts and request them */
@@ -1083,7 +1085,7 @@ static int bcm_enet_open(struct net_device *dev)
1083 enet_dmac_writel(priv, priv->dma_chan_int_mask, 1085 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1084 ENETDMAC_IRMASK, priv->tx_chan); 1086 ENETDMAC_IRMASK, priv->tx_chan);
1085 1087
1086 if (priv->has_phy) 1088 if (phydev)
1087 phy_start(phydev); 1089 phy_start(phydev);
1088 else 1090 else
1089 bcm_enet_adjust_link(dev); 1091 bcm_enet_adjust_link(dev);
@@ -1126,7 +1128,7 @@ out_freeirq:
1126 free_irq(dev->irq, dev); 1128 free_irq(dev->irq, dev);
1127 1129
1128out_phy_disconnect: 1130out_phy_disconnect:
1129 if (priv->has_phy) 1131 if (phydev)
1130 phy_disconnect(phydev); 1132 phy_disconnect(phydev);
1131 1133
1132 return ret; 1134 return ret;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 2b46f9b09a03..3d83b9028014 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1096,7 +1096,7 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1096{ 1096{
1097#ifdef CONFIG_INET 1097#ifdef CONFIG_INET
1098 struct tcphdr *th; 1098 struct tcphdr *th;
1099 int len, nw_off, tcp_opt_len; 1099 int len, nw_off, tcp_opt_len = 0;
1100 1100
1101 if (tcp_ts) 1101 if (tcp_ts)
1102 tcp_opt_len = 12; 1102 tcp_opt_len = 12;
@@ -5441,17 +5441,12 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
5441 if ((link_info->support_auto_speeds | diff) != 5441 if ((link_info->support_auto_speeds | diff) !=
5442 link_info->support_auto_speeds) { 5442 link_info->support_auto_speeds) {
5443 /* An advertised speed is no longer supported, so we need to 5443 /* An advertised speed is no longer supported, so we need to
5444 * update the advertisement settings. See bnxt_reset() for 5444 * update the advertisement settings. Caller holds RTNL
5445 * comments about the rtnl_lock() sequence below. 5445 * so we can modify link settings.
5446 */ 5446 */
5447 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5448 rtnl_lock();
5449 link_info->advertising = link_info->support_auto_speeds; 5447 link_info->advertising = link_info->support_auto_speeds;
5450 if (test_bit(BNXT_STATE_OPEN, &bp->state) && 5448 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
5451 (link_info->autoneg & BNXT_AUTONEG_SPEED))
5452 bnxt_hwrm_set_link_setting(bp, true, false); 5449 bnxt_hwrm_set_link_setting(bp, true, false);
5453 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5454 rtnl_unlock();
5455 } 5450 }
5456 return 0; 5451 return 0;
5457} 5452}
@@ -6367,29 +6362,37 @@ bnxt_restart_timer:
6367 mod_timer(&bp->timer, jiffies + bp->current_interval); 6362 mod_timer(&bp->timer, jiffies + bp->current_interval);
6368} 6363}
6369 6364
6370/* Only called from bnxt_sp_task() */ 6365static void bnxt_rtnl_lock_sp(struct bnxt *bp)
6371static void bnxt_reset(struct bnxt *bp, bool silent)
6372{ 6366{
6373 /* bnxt_reset_task() calls bnxt_close_nic() which waits 6367 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
6374 * for BNXT_STATE_IN_SP_TASK to clear. 6368 * set. If the device is being closed, bnxt_close() may be holding
6375 * If there is a parallel dev_close(), bnxt_close() may be holding
6376 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we 6369 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
6377 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). 6370 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
6378 */ 6371 */
6379 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6372 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6380 rtnl_lock(); 6373 rtnl_lock();
6381 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 6374}
6382 bnxt_reset_task(bp, silent); 6375
6376static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
6377{
6383 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6378 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6384 rtnl_unlock(); 6379 rtnl_unlock();
6385} 6380}
6386 6381
6382/* Only called from bnxt_sp_task() */
6383static void bnxt_reset(struct bnxt *bp, bool silent)
6384{
6385 bnxt_rtnl_lock_sp(bp);
6386 if (test_bit(BNXT_STATE_OPEN, &bp->state))
6387 bnxt_reset_task(bp, silent);
6388 bnxt_rtnl_unlock_sp(bp);
6389}
6390
6387static void bnxt_cfg_ntp_filters(struct bnxt *); 6391static void bnxt_cfg_ntp_filters(struct bnxt *);
6388 6392
6389static void bnxt_sp_task(struct work_struct *work) 6393static void bnxt_sp_task(struct work_struct *work)
6390{ 6394{
6391 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 6395 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
6392 int rc;
6393 6396
6394 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6397 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6395 smp_mb__after_atomic(); 6398 smp_mb__after_atomic();
@@ -6403,16 +6406,6 @@ static void bnxt_sp_task(struct work_struct *work)
6403 6406
6404 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) 6407 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
6405 bnxt_cfg_ntp_filters(bp); 6408 bnxt_cfg_ntp_filters(bp);
6406 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
6407 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
6408 &bp->sp_event))
6409 bnxt_hwrm_phy_qcaps(bp);
6410
6411 rc = bnxt_update_link(bp, true);
6412 if (rc)
6413 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
6414 rc);
6415 }
6416 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 6409 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
6417 bnxt_hwrm_exec_fwd_req(bp); 6410 bnxt_hwrm_exec_fwd_req(bp);
6418 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { 6411 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
@@ -6433,18 +6426,39 @@ static void bnxt_sp_task(struct work_struct *work)
6433 bnxt_hwrm_tunnel_dst_port_free( 6426 bnxt_hwrm_tunnel_dst_port_free(
6434 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 6427 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6435 } 6428 }
6429 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
6430 bnxt_hwrm_port_qstats(bp);
6431
6432 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
6433 * must be the last functions to be called before exiting.
6434 */
6435 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
6436 int rc = 0;
6437
6438 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
6439 &bp->sp_event))
6440 bnxt_hwrm_phy_qcaps(bp);
6441
6442 bnxt_rtnl_lock_sp(bp);
6443 if (test_bit(BNXT_STATE_OPEN, &bp->state))
6444 rc = bnxt_update_link(bp, true);
6445 bnxt_rtnl_unlock_sp(bp);
6446 if (rc)
6447 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
6448 rc);
6449 }
6450 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
6451 bnxt_rtnl_lock_sp(bp);
6452 if (test_bit(BNXT_STATE_OPEN, &bp->state))
6453 bnxt_get_port_module_status(bp);
6454 bnxt_rtnl_unlock_sp(bp);
6455 }
6436 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 6456 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
6437 bnxt_reset(bp, false); 6457 bnxt_reset(bp, false);
6438 6458
6439 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) 6459 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
6440 bnxt_reset(bp, true); 6460 bnxt_reset(bp, true);
6441 6461
6442 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
6443 bnxt_get_port_module_status(bp);
6444
6445 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
6446 bnxt_hwrm_port_qstats(bp);
6447
6448 smp_mb__before_atomic(); 6462 smp_mb__before_atomic();
6449 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6463 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6450} 6464}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index a6e7afa878be..c1b671667920 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2948,7 +2948,7 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2948 } 2948 }
2949 2949
2950 /* try reuse page */ 2950 /* try reuse page */
2951 if (unlikely(page_count(page) != 1)) 2951 if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
2952 return false; 2952 return false;
2953 2953
2954 /* change offset to the other half */ 2954 /* change offset to the other half */
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index c6ba75c595e0..b618be6d14cd 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1607,8 +1607,11 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1607 netdev->netdev_ops = &ibmveth_netdev_ops; 1607 netdev->netdev_ops = &ibmveth_netdev_ops;
1608 netdev->ethtool_ops = &netdev_ethtool_ops; 1608 netdev->ethtool_ops = &netdev_ethtool_ops;
1609 SET_NETDEV_DEV(netdev, &dev->dev); 1609 SET_NETDEV_DEV(netdev, &dev->dev);
1610 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | 1610 netdev->hw_features = NETIF_F_SG;
1611 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1611 if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) {
1612 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1613 NETIF_F_RXCSUM;
1614 }
1612 1615
1613 netdev->features |= netdev->hw_features; 1616 netdev->features |= netdev->hw_features;
1614 1617
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 25ae0c5bce3a..9e757684816d 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2515,7 +2515,7 @@ static int mtk_remove(struct platform_device *pdev)
2515} 2515}
2516 2516
2517const struct of_device_id of_mtk_match[] = { 2517const struct of_device_id of_mtk_match[] = {
2518 { .compatible = "mediatek,mt7623-eth" }, 2518 { .compatible = "mediatek,mt2701-eth" },
2519 {}, 2519 {},
2520}; 2520};
2521MODULE_DEVICE_TABLE(of, of_mtk_match); 2521MODULE_DEVICE_TABLE(of, of_mtk_match);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index d9c9f86a30df..d5a9372ed84d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1732,8 +1732,6 @@ static void mlx4_en_get_channels(struct net_device *dev,
1732{ 1732{
1733 struct mlx4_en_priv *priv = netdev_priv(dev); 1733 struct mlx4_en_priv *priv = netdev_priv(dev);
1734 1734
1735 memset(channel, 0, sizeof(*channel));
1736
1737 channel->max_rx = MAX_RX_RINGS; 1735 channel->max_rx = MAX_RX_RINGS;
1738 channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP; 1736 channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
1739 1737
@@ -1752,10 +1750,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
1752 int xdp_count; 1750 int xdp_count;
1753 int err = 0; 1751 int err = 0;
1754 1752
1755 if (channel->other_count || channel->combined_count || 1753 if (!channel->tx_count || !channel->rx_count)
1756 channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP ||
1757 channel->rx_count > MAX_RX_RINGS ||
1758 !channel->tx_count || !channel->rx_count)
1759 return -EINVAL; 1754 return -EINVAL;
1760 1755
1761 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 1756 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 6c1a5cb43f8c..6236ce95b8e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -560,7 +560,6 @@ static int mlx5e_set_channels(struct net_device *dev,
560 struct ethtool_channels *ch) 560 struct ethtool_channels *ch)
561{ 561{
562 struct mlx5e_priv *priv = netdev_priv(dev); 562 struct mlx5e_priv *priv = netdev_priv(dev);
563 int ncv = priv->profile->max_nch(priv->mdev);
564 unsigned int count = ch->combined_count; 563 unsigned int count = ch->combined_count;
565 bool arfs_enabled; 564 bool arfs_enabled;
566 bool was_opened; 565 bool was_opened;
@@ -571,16 +570,6 @@ static int mlx5e_set_channels(struct net_device *dev,
571 __func__); 570 __func__);
572 return -EINVAL; 571 return -EINVAL;
573 } 572 }
574 if (ch->rx_count || ch->tx_count) {
575 netdev_info(dev, "%s: separate rx/tx count not supported\n",
576 __func__);
577 return -EINVAL;
578 }
579 if (count > ncv) {
580 netdev_info(dev, "%s: count (%d) > max (%d)\n",
581 __func__, count, ncv);
582 return -EINVAL;
583 }
584 573
585 if (priv->params.num_channels == count) 574 if (priv->params.num_channels == count)
586 return 0; 575 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 3d2e1a1886a5..fd8dff6acc12 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -190,6 +190,9 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
190 return false; 190 return false;
191 } 191 }
192 192
193 if (unlikely(page_is_pfmemalloc(dma_info->page)))
194 return false;
195
193 cache->page_cache[cache->tail] = *dma_info; 196 cache->page_cache[cache->tail] = *dma_info;
194 cache->tail = tail_next; 197 cache->tail = tail_next;
195 return true; 198 return true;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 01d0efa9c5c7..9e494a446b7e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1172,7 +1172,8 @@ static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1172 1172
1173static int 1173static int
1174mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp, 1174mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
1175 struct mlxsw_sp_nexthop_group *nh_grp) 1175 struct mlxsw_sp_nexthop_group *nh_grp,
1176 bool reallocate)
1176{ 1177{
1177 u32 adj_index = nh_grp->adj_index; /* base */ 1178 u32 adj_index = nh_grp->adj_index; /* base */
1178 struct mlxsw_sp_nexthop *nh; 1179 struct mlxsw_sp_nexthop *nh;
@@ -1187,7 +1188,7 @@ mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
1187 continue; 1188 continue;
1188 } 1189 }
1189 1190
1190 if (nh->update) { 1191 if (nh->update || reallocate) {
1191 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp, 1192 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1192 adj_index, nh); 1193 adj_index, nh);
1193 if (err) 1194 if (err)
@@ -1248,7 +1249,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1248 /* Nothing was added or removed, so no need to reallocate. Just 1249 /* Nothing was added or removed, so no need to reallocate. Just
1249 * update MAC on existing adjacency indexes. 1250 * update MAC on existing adjacency indexes.
1250 */ 1251 */
1251 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp); 1252 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
1253 false);
1252 if (err) { 1254 if (err) {
1253 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); 1255 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1254 goto set_trap; 1256 goto set_trap;
@@ -1276,7 +1278,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1276 nh_grp->adj_index_valid = 1; 1278 nh_grp->adj_index_valid = 1;
1277 nh_grp->adj_index = adj_index; 1279 nh_grp->adj_index = adj_index;
1278 nh_grp->ecmp_size = ecmp_size; 1280 nh_grp->ecmp_size = ecmp_size;
1279 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp); 1281 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
1280 if (err) { 1282 if (err) {
1281 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); 1283 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1282 goto set_trap; 1284 goto set_trap;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 05e32f4322eb..02c5d47cfc6d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -320,7 +320,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
320 list_del(&p_pkt->list_entry); 320 list_del(&p_pkt->list_entry);
321 b_last_packet = list_empty(&p_tx->active_descq); 321 b_last_packet = list_empty(&p_tx->active_descq);
322 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); 322 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
323 if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) { 323 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
324 struct qed_ooo_buffer *p_buffer; 324 struct qed_ooo_buffer *p_buffer;
325 325
326 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; 326 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
@@ -332,7 +332,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
332 b_last_frag = 332 b_last_frag =
333 p_tx->cur_completing_bd_idx == p_pkt->bd_used; 333 p_tx->cur_completing_bd_idx == p_pkt->bd_used;
334 tx_frag = p_pkt->bds_set[0].tx_frag; 334 tx_frag = p_pkt->bds_set[0].tx_frag;
335 if (p_ll2_conn->gsi_enable) 335 if (p_ll2_conn->conn.gsi_enable)
336 qed_ll2b_release_tx_gsi_packet(p_hwfn, 336 qed_ll2b_release_tx_gsi_packet(p_hwfn,
337 p_ll2_conn-> 337 p_ll2_conn->
338 my_id, 338 my_id,
@@ -401,7 +401,7 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
401 401
402 spin_unlock_irqrestore(&p_tx->lock, flags); 402 spin_unlock_irqrestore(&p_tx->lock, flags);
403 tx_frag = p_pkt->bds_set[0].tx_frag; 403 tx_frag = p_pkt->bds_set[0].tx_frag;
404 if (p_ll2_conn->gsi_enable) 404 if (p_ll2_conn->conn.gsi_enable)
405 qed_ll2b_complete_tx_gsi_packet(p_hwfn, 405 qed_ll2b_complete_tx_gsi_packet(p_hwfn,
406 p_ll2_conn->my_id, 406 p_ll2_conn->my_id,
407 p_pkt->cookie, 407 p_pkt->cookie,
@@ -573,7 +573,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
573 573
574 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); 574 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
575 575
576 if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) { 576 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
577 struct qed_ooo_buffer *p_buffer; 577 struct qed_ooo_buffer *p_buffer;
578 578
579 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; 579 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
@@ -761,7 +761,7 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
761 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1, 761 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
762 p_buffer->vlan, bd_flags, 762 p_buffer->vlan, bd_flags,
763 l4_hdr_offset_w, 763 l4_hdr_offset_w,
764 p_ll2_conn->tx_dest, 0, 764 p_ll2_conn->conn.tx_dest, 0,
765 first_frag, 765 first_frag,
766 p_buffer->packet_length, 766 p_buffer->packet_length,
767 p_buffer, true); 767 p_buffer, true);
@@ -881,7 +881,7 @@ qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
881 u16 buf_idx; 881 u16 buf_idx;
882 int rc = 0; 882 int rc = 0;
883 883
884 if (p_ll2_info->conn_type != QED_LL2_TYPE_ISCSI_OOO) 884 if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
885 return rc; 885 return rc;
886 886
887 if (!rx_num_ooo_buffers) 887 if (!rx_num_ooo_buffers)
@@ -924,7 +924,7 @@ static void
924qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, 924qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
925 struct qed_ll2_info *p_ll2_conn) 925 struct qed_ll2_info *p_ll2_conn)
926{ 926{
927 if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO) 927 if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
928 return; 928 return;
929 929
930 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); 930 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
@@ -936,7 +936,7 @@ static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
936{ 936{
937 struct qed_ooo_buffer *p_buffer; 937 struct qed_ooo_buffer *p_buffer;
938 938
939 if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO) 939 if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
940 return; 940 return;
941 941
942 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); 942 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
@@ -968,23 +968,19 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev,
968{ 968{
969 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 969 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
970 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; 970 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
971 struct qed_ll2_info *ll2_info; 971 struct qed_ll2_conn ll2_info;
972 int rc; 972 int rc;
973 973
974 ll2_info = kzalloc(sizeof(*ll2_info), GFP_KERNEL); 974 ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
975 if (!ll2_info) 975 ll2_info.mtu = params->mtu;
976 return -ENOMEM; 976 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
977 ll2_info->conn_type = QED_LL2_TYPE_ISCSI_OOO; 977 ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
978 ll2_info->mtu = params->mtu; 978 ll2_info.tx_tc = OOO_LB_TC;
979 ll2_info->rx_drop_ttl0_flg = params->drop_ttl0_packets; 979 ll2_info.tx_dest = CORE_TX_DEST_LB;
980 ll2_info->rx_vlan_removal_en = params->rx_vlan_stripping; 980
981 ll2_info->tx_tc = OOO_LB_TC; 981 rc = qed_ll2_acquire_connection(hwfn, &ll2_info,
982 ll2_info->tx_dest = CORE_TX_DEST_LB;
983
984 rc = qed_ll2_acquire_connection(hwfn, ll2_info,
985 QED_LL2_RX_SIZE, QED_LL2_TX_SIZE, 982 QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
986 handle); 983 handle);
987 kfree(ll2_info);
988 if (rc) { 984 if (rc) {
989 DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n"); 985 DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
990 goto out; 986 goto out;
@@ -1029,7 +1025,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
1029 struct qed_ll2_info *p_ll2_conn, 1025 struct qed_ll2_info *p_ll2_conn,
1030 u8 action_on_error) 1026 u8 action_on_error)
1031{ 1027{
1032 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type; 1028 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
1033 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; 1029 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
1034 struct core_rx_start_ramrod_data *p_ramrod = NULL; 1030 struct core_rx_start_ramrod_data *p_ramrod = NULL;
1035 struct qed_spq_entry *p_ent = NULL; 1031 struct qed_spq_entry *p_ent = NULL;
@@ -1055,7 +1051,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
1055 p_ramrod->sb_index = p_rx->rx_sb_index; 1051 p_ramrod->sb_index = p_rx->rx_sb_index;
1056 p_ramrod->complete_event_flg = 1; 1052 p_ramrod->complete_event_flg = 1;
1057 1053
1058 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu); 1054 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
1059 DMA_REGPAIR_LE(p_ramrod->bd_base, 1055 DMA_REGPAIR_LE(p_ramrod->bd_base,
1060 p_rx->rxq_chain.p_phys_addr); 1056 p_rx->rxq_chain.p_phys_addr);
1061 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain); 1057 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
@@ -1063,8 +1059,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
1063 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, 1059 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
1064 qed_chain_get_pbl_phys(&p_rx->rcq_chain)); 1060 qed_chain_get_pbl_phys(&p_rx->rcq_chain));
1065 1061
1066 p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg; 1062 p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg;
1067 p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en; 1063 p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en;
1068 p_ramrod->queue_id = p_ll2_conn->queue_id; 1064 p_ramrod->queue_id = p_ll2_conn->queue_id;
1069 p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0 1065 p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
1070 : 1; 1066 : 1;
@@ -1079,14 +1075,14 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
1079 } 1075 }
1080 1076
1081 p_ramrod->action_on_error.error_type = action_on_error; 1077 p_ramrod->action_on_error.error_type = action_on_error;
1082 p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable; 1078 p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
1083 return qed_spq_post(p_hwfn, p_ent, NULL); 1079 return qed_spq_post(p_hwfn, p_ent, NULL);
1084} 1080}
1085 1081
1086static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, 1082static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1087 struct qed_ll2_info *p_ll2_conn) 1083 struct qed_ll2_info *p_ll2_conn)
1088{ 1084{
1089 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type; 1085 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
1090 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; 1086 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1091 struct core_tx_start_ramrod_data *p_ramrod = NULL; 1087 struct core_tx_start_ramrod_data *p_ramrod = NULL;
1092 struct qed_spq_entry *p_ent = NULL; 1088 struct qed_spq_entry *p_ent = NULL;
@@ -1098,7 +1094,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1098 if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) 1094 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
1099 return 0; 1095 return 0;
1100 1096
1101 if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) 1097 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
1102 p_ll2_conn->tx_stats_en = 0; 1098 p_ll2_conn->tx_stats_en = 0;
1103 else 1099 else
1104 p_ll2_conn->tx_stats_en = 1; 1100 p_ll2_conn->tx_stats_en = 1;
@@ -1119,7 +1115,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1119 1115
1120 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn)); 1116 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1121 p_ramrod->sb_index = p_tx->tx_sb_index; 1117 p_ramrod->sb_index = p_tx->tx_sb_index;
1122 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu); 1118 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
1123 p_ramrod->stats_en = p_ll2_conn->tx_stats_en; 1119 p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
1124 p_ramrod->stats_id = p_ll2_conn->tx_stats_id; 1120 p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
1125 1121
@@ -1129,7 +1125,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1129 p_ramrod->pbl_size = cpu_to_le16(pbl_size); 1125 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1130 1126
1131 memset(&pq_params, 0, sizeof(pq_params)); 1127 memset(&pq_params, 0, sizeof(pq_params));
1132 pq_params.core.tc = p_ll2_conn->tx_tc; 1128 pq_params.core.tc = p_ll2_conn->conn.tx_tc;
1133 pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params); 1129 pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
1134 p_ramrod->qm_pq_id = cpu_to_le16(pq_id); 1130 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1135 1131
@@ -1146,7 +1142,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1146 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type); 1142 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
1147 } 1143 }
1148 1144
1149 p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable; 1145 p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
1150 return qed_spq_post(p_hwfn, p_ent, NULL); 1146 return qed_spq_post(p_hwfn, p_ent, NULL);
1151} 1147}
1152 1148
@@ -1247,7 +1243,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
1247 1243
1248 DP_VERBOSE(p_hwfn, QED_MSG_LL2, 1244 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1249 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n", 1245 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
1250 p_ll2_info->conn_type, rx_num_desc); 1246 p_ll2_info->conn.conn_type, rx_num_desc);
1251 1247
1252out: 1248out:
1253 return rc; 1249 return rc;
@@ -1285,7 +1281,7 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
1285 1281
1286 DP_VERBOSE(p_hwfn, QED_MSG_LL2, 1282 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1287 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n", 1283 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
1288 p_ll2_info->conn_type, tx_num_desc); 1284 p_ll2_info->conn.conn_type, tx_num_desc);
1289 1285
1290out: 1286out:
1291 if (rc) 1287 if (rc)
@@ -1296,7 +1292,7 @@ out:
1296} 1292}
1297 1293
1298int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, 1294int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
1299 struct qed_ll2_info *p_params, 1295 struct qed_ll2_conn *p_params,
1300 u16 rx_num_desc, 1296 u16 rx_num_desc,
1301 u16 tx_num_desc, 1297 u16 tx_num_desc,
1302 u8 *p_connection_handle) 1298 u8 *p_connection_handle)
@@ -1325,15 +1321,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
1325 if (!p_ll2_info) 1321 if (!p_ll2_info)
1326 return -EBUSY; 1322 return -EBUSY;
1327 1323
1328 p_ll2_info->conn_type = p_params->conn_type; 1324 p_ll2_info->conn = *p_params;
1329 p_ll2_info->mtu = p_params->mtu;
1330 p_ll2_info->rx_drop_ttl0_flg = p_params->rx_drop_ttl0_flg;
1331 p_ll2_info->rx_vlan_removal_en = p_params->rx_vlan_removal_en;
1332 p_ll2_info->tx_tc = p_params->tx_tc;
1333 p_ll2_info->tx_dest = p_params->tx_dest;
1334 p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big;
1335 p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf;
1336 p_ll2_info->gsi_enable = p_params->gsi_enable;
1337 1325
1338 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc); 1326 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
1339 if (rc) 1327 if (rc)
@@ -1394,9 +1382,9 @@ static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
1394 1382
1395 SET_FIELD(action_on_error, 1383 SET_FIELD(action_on_error,
1396 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, 1384 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
1397 p_ll2_conn->ai_err_packet_too_big); 1385 p_ll2_conn->conn.ai_err_packet_too_big);
1398 SET_FIELD(action_on_error, 1386 SET_FIELD(action_on_error,
1399 CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->ai_err_no_buf); 1387 CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf);
1400 1388
1401 return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error); 1389 return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
1402} 1390}
@@ -1623,7 +1611,7 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1623 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n", 1611 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1624 p_ll2->queue_id, 1612 p_ll2->queue_id,
1625 p_ll2->cid, 1613 p_ll2->cid,
1626 p_ll2->conn_type, 1614 p_ll2->conn.conn_type,
1627 prod_idx, 1615 prod_idx,
1628 first_frag_len, 1616 first_frag_len,
1629 num_of_bds, 1617 num_of_bds,
@@ -1699,7 +1687,7 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1699 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), 1687 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1700 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n", 1688 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1701 p_ll2_conn->queue_id, 1689 p_ll2_conn->queue_id,
1702 p_ll2_conn->cid, p_ll2_conn->conn_type, db_msg.spq_prod); 1690 p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod);
1703} 1691}
1704 1692
1705int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, 1693int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
@@ -1840,7 +1828,7 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1840 qed_ll2_rxq_flush(p_hwfn, connection_handle); 1828 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1841 } 1829 }
1842 1830
1843 if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) 1831 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
1844 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); 1832 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1845 1833
1846 return rc; 1834 return rc;
@@ -2016,7 +2004,7 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
2016 2004
2017static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) 2005static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2018{ 2006{
2019 struct qed_ll2_info ll2_info; 2007 struct qed_ll2_conn ll2_info;
2020 struct qed_ll2_buffer *buffer, *tmp_buffer; 2008 struct qed_ll2_buffer *buffer, *tmp_buffer;
2021 enum qed_ll2_conn_type conn_type; 2009 enum qed_ll2_conn_type conn_type;
2022 struct qed_ptt *p_ptt; 2010 struct qed_ptt *p_ptt;
@@ -2064,6 +2052,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2064 2052
2065 /* Prepare the temporary ll2 information */ 2053 /* Prepare the temporary ll2 information */
2066 memset(&ll2_info, 0, sizeof(ll2_info)); 2054 memset(&ll2_info, 0, sizeof(ll2_info));
2055
2067 ll2_info.conn_type = conn_type; 2056 ll2_info.conn_type = conn_type;
2068 ll2_info.mtu = params->mtu; 2057 ll2_info.mtu = params->mtu;
2069 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets; 2058 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
@@ -2143,7 +2132,6 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2143 } 2132 }
2144 2133
2145 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address); 2134 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
2146
2147 return 0; 2135 return 0;
2148 2136
2149release_terminate_all: 2137release_terminate_all:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index c7f2975590ee..db3e4fc78e09 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -135,15 +135,8 @@ struct qed_ll2_tx_queue {
135 bool b_completing_packet; 135 bool b_completing_packet;
136}; 136};
137 137
138struct qed_ll2_info { 138struct qed_ll2_conn {
139 /* Lock protecting the state of LL2 */
140 struct mutex mutex;
141 enum qed_ll2_conn_type conn_type; 139 enum qed_ll2_conn_type conn_type;
142 u32 cid;
143 u8 my_id;
144 u8 queue_id;
145 u8 tx_stats_id;
146 bool b_active;
147 u16 mtu; 140 u16 mtu;
148 u8 rx_drop_ttl0_flg; 141 u8 rx_drop_ttl0_flg;
149 u8 rx_vlan_removal_en; 142 u8 rx_vlan_removal_en;
@@ -151,10 +144,21 @@ struct qed_ll2_info {
151 enum core_tx_dest tx_dest; 144 enum core_tx_dest tx_dest;
152 enum core_error_handle ai_err_packet_too_big; 145 enum core_error_handle ai_err_packet_too_big;
153 enum core_error_handle ai_err_no_buf; 146 enum core_error_handle ai_err_no_buf;
147 u8 gsi_enable;
148};
149
150struct qed_ll2_info {
151 /* Lock protecting the state of LL2 */
152 struct mutex mutex;
153 struct qed_ll2_conn conn;
154 u32 cid;
155 u8 my_id;
156 u8 queue_id;
157 u8 tx_stats_id;
158 bool b_active;
154 u8 tx_stats_en; 159 u8 tx_stats_en;
155 struct qed_ll2_rx_queue rx_queue; 160 struct qed_ll2_rx_queue rx_queue;
156 struct qed_ll2_tx_queue tx_queue; 161 struct qed_ll2_tx_queue tx_queue;
157 u8 gsi_enable;
158}; 162};
159 163
160/** 164/**
@@ -172,7 +176,7 @@ struct qed_ll2_info {
172 * @return 0 on success, failure otherwise 176 * @return 0 on success, failure otherwise
173 */ 177 */
174int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, 178int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
175 struct qed_ll2_info *p_params, 179 struct qed_ll2_conn *p_params,
176 u16 rx_num_desc, 180 u16 rx_num_desc,
177 u16 tx_num_desc, 181 u16 tx_num_desc,
178 u8 *p_connection_handle); 182 u8 *p_connection_handle);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index bd4cad2b343b..c3c8c5018e93 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -2632,7 +2632,7 @@ static int qed_roce_ll2_start(struct qed_dev *cdev,
2632{ 2632{
2633 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2633 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2634 struct qed_roce_ll2_info *roce_ll2; 2634 struct qed_roce_ll2_info *roce_ll2;
2635 struct qed_ll2_info ll2_params; 2635 struct qed_ll2_conn ll2_params;
2636 int rc; 2636 int rc;
2637 2637
2638 if (!params) { 2638 if (!params) {
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 89ac1e3f6175..301f48755093 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -179,6 +179,49 @@ static struct mdiobb_ops bb_ops = {
179 .get_mdio_data = ravb_get_mdio_data, 179 .get_mdio_data = ravb_get_mdio_data,
180}; 180};
181 181
182/* Free TX skb function for AVB-IP */
183static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
184{
185 struct ravb_private *priv = netdev_priv(ndev);
186 struct net_device_stats *stats = &priv->stats[q];
187 struct ravb_tx_desc *desc;
188 int free_num = 0;
189 int entry;
190 u32 size;
191
192 for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
193 bool txed;
194
195 entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
196 NUM_TX_DESC);
197 desc = &priv->tx_ring[q][entry];
198 txed = desc->die_dt == DT_FEMPTY;
199 if (free_txed_only && !txed)
200 break;
201 /* Descriptor type must be checked before all other reads */
202 dma_rmb();
203 size = le16_to_cpu(desc->ds_tagl) & TX_DS;
204 /* Free the original skb. */
205 if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
206 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
207 size, DMA_TO_DEVICE);
208 /* Last packet descriptor? */
209 if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
210 entry /= NUM_TX_DESC;
211 dev_kfree_skb_any(priv->tx_skb[q][entry]);
212 priv->tx_skb[q][entry] = NULL;
213 if (txed)
214 stats->tx_packets++;
215 }
216 free_num++;
217 }
218 if (txed)
219 stats->tx_bytes += size;
220 desc->die_dt = DT_EEMPTY;
221 }
222 return free_num;
223}
224
182/* Free skb's and DMA buffers for Ethernet AVB */ 225/* Free skb's and DMA buffers for Ethernet AVB */
183static void ravb_ring_free(struct net_device *ndev, int q) 226static void ravb_ring_free(struct net_device *ndev, int q)
184{ 227{
@@ -194,19 +237,21 @@ static void ravb_ring_free(struct net_device *ndev, int q)
194 kfree(priv->rx_skb[q]); 237 kfree(priv->rx_skb[q]);
195 priv->rx_skb[q] = NULL; 238 priv->rx_skb[q] = NULL;
196 239
197 /* Free TX skb ringbuffer */
198 if (priv->tx_skb[q]) {
199 for (i = 0; i < priv->num_tx_ring[q]; i++)
200 dev_kfree_skb(priv->tx_skb[q][i]);
201 }
202 kfree(priv->tx_skb[q]);
203 priv->tx_skb[q] = NULL;
204
205 /* Free aligned TX buffers */ 240 /* Free aligned TX buffers */
206 kfree(priv->tx_align[q]); 241 kfree(priv->tx_align[q]);
207 priv->tx_align[q] = NULL; 242 priv->tx_align[q] = NULL;
208 243
209 if (priv->rx_ring[q]) { 244 if (priv->rx_ring[q]) {
245 for (i = 0; i < priv->num_rx_ring[q]; i++) {
246 struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
247
248 if (!dma_mapping_error(ndev->dev.parent,
249 le32_to_cpu(desc->dptr)))
250 dma_unmap_single(ndev->dev.parent,
251 le32_to_cpu(desc->dptr),
252 PKT_BUF_SZ,
253 DMA_FROM_DEVICE);
254 }
210 ring_size = sizeof(struct ravb_ex_rx_desc) * 255 ring_size = sizeof(struct ravb_ex_rx_desc) *
211 (priv->num_rx_ring[q] + 1); 256 (priv->num_rx_ring[q] + 1);
212 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], 257 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
@@ -215,12 +260,20 @@ static void ravb_ring_free(struct net_device *ndev, int q)
215 } 260 }
216 261
217 if (priv->tx_ring[q]) { 262 if (priv->tx_ring[q]) {
263 ravb_tx_free(ndev, q, false);
264
218 ring_size = sizeof(struct ravb_tx_desc) * 265 ring_size = sizeof(struct ravb_tx_desc) *
219 (priv->num_tx_ring[q] * NUM_TX_DESC + 1); 266 (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
220 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], 267 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
221 priv->tx_desc_dma[q]); 268 priv->tx_desc_dma[q]);
222 priv->tx_ring[q] = NULL; 269 priv->tx_ring[q] = NULL;
223 } 270 }
271
272 /* Free TX skb ringbuffer.
273 * SKBs are freed by ravb_tx_free() call above.
274 */
275 kfree(priv->tx_skb[q]);
276 priv->tx_skb[q] = NULL;
224} 277}
225 278
226/* Format skb and descriptor buffer for Ethernet AVB */ 279/* Format skb and descriptor buffer for Ethernet AVB */
@@ -431,44 +484,6 @@ static int ravb_dmac_init(struct net_device *ndev)
431 return 0; 484 return 0;
432} 485}
433 486
434/* Free TX skb function for AVB-IP */
435static int ravb_tx_free(struct net_device *ndev, int q)
436{
437 struct ravb_private *priv = netdev_priv(ndev);
438 struct net_device_stats *stats = &priv->stats[q];
439 struct ravb_tx_desc *desc;
440 int free_num = 0;
441 int entry;
442 u32 size;
443
444 for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
445 entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
446 NUM_TX_DESC);
447 desc = &priv->tx_ring[q][entry];
448 if (desc->die_dt != DT_FEMPTY)
449 break;
450 /* Descriptor type must be checked before all other reads */
451 dma_rmb();
452 size = le16_to_cpu(desc->ds_tagl) & TX_DS;
453 /* Free the original skb. */
454 if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
455 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
456 size, DMA_TO_DEVICE);
457 /* Last packet descriptor? */
458 if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
459 entry /= NUM_TX_DESC;
460 dev_kfree_skb_any(priv->tx_skb[q][entry]);
461 priv->tx_skb[q][entry] = NULL;
462 stats->tx_packets++;
463 }
464 free_num++;
465 }
466 stats->tx_bytes += size;
467 desc->die_dt = DT_EEMPTY;
468 }
469 return free_num;
470}
471
472static void ravb_get_tx_tstamp(struct net_device *ndev) 487static void ravb_get_tx_tstamp(struct net_device *ndev)
473{ 488{
474 struct ravb_private *priv = netdev_priv(ndev); 489 struct ravb_private *priv = netdev_priv(ndev);
@@ -902,7 +917,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
902 spin_lock_irqsave(&priv->lock, flags); 917 spin_lock_irqsave(&priv->lock, flags);
903 /* Clear TX interrupt */ 918 /* Clear TX interrupt */
904 ravb_write(ndev, ~mask, TIS); 919 ravb_write(ndev, ~mask, TIS);
905 ravb_tx_free(ndev, q); 920 ravb_tx_free(ndev, q, true);
906 netif_wake_subqueue(ndev, q); 921 netif_wake_subqueue(ndev, q);
907 mmiowb(); 922 mmiowb();
908 spin_unlock_irqrestore(&priv->lock, flags); 923 spin_unlock_irqrestore(&priv->lock, flags);
@@ -1567,7 +1582,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1567 1582
1568 priv->cur_tx[q] += NUM_TX_DESC; 1583 priv->cur_tx[q] += NUM_TX_DESC;
1569 if (priv->cur_tx[q] - priv->dirty_tx[q] > 1584 if (priv->cur_tx[q] - priv->dirty_tx[q] >
1570 (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q)) 1585 (priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
1586 !ravb_tx_free(ndev, q, true))
1571 netif_stop_subqueue(ndev, q); 1587 netif_stop_subqueue(ndev, q);
1572 1588
1573exit: 1589exit:
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 8b6810bad54b..99d3df788ce8 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -69,7 +69,6 @@ struct gtp_dev {
69 struct socket *sock0; 69 struct socket *sock0;
70 struct socket *sock1u; 70 struct socket *sock1u;
71 71
72 struct net *net;
73 struct net_device *dev; 72 struct net_device *dev;
74 73
75 unsigned int hash_size; 74 unsigned int hash_size;
@@ -316,7 +315,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
316 315
317 netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk); 316 netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
318 317
319 xnet = !net_eq(gtp->net, dev_net(gtp->dev)); 318 xnet = !net_eq(sock_net(sk), dev_net(gtp->dev));
320 319
321 switch (udp_sk(sk)->encap_type) { 320 switch (udp_sk(sk)->encap_type) {
322 case UDP_ENCAP_GTP0: 321 case UDP_ENCAP_GTP0:
@@ -612,7 +611,7 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
612 pktinfo.fl4.saddr, pktinfo.fl4.daddr, 611 pktinfo.fl4.saddr, pktinfo.fl4.daddr,
613 pktinfo.iph->tos, 612 pktinfo.iph->tos,
614 ip4_dst_hoplimit(&pktinfo.rt->dst), 613 ip4_dst_hoplimit(&pktinfo.rt->dst),
615 htons(IP_DF), 614 0,
616 pktinfo.gtph_port, pktinfo.gtph_port, 615 pktinfo.gtph_port, pktinfo.gtph_port,
617 true, false); 616 true, false);
618 break; 617 break;
@@ -658,7 +657,7 @@ static void gtp_link_setup(struct net_device *dev)
658static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize); 657static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
659static void gtp_hashtable_free(struct gtp_dev *gtp); 658static void gtp_hashtable_free(struct gtp_dev *gtp);
660static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, 659static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
661 int fd_gtp0, int fd_gtp1, struct net *src_net); 660 int fd_gtp0, int fd_gtp1);
662 661
663static int gtp_newlink(struct net *src_net, struct net_device *dev, 662static int gtp_newlink(struct net *src_net, struct net_device *dev,
664 struct nlattr *tb[], struct nlattr *data[]) 663 struct nlattr *tb[], struct nlattr *data[])
@@ -675,7 +674,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
675 fd0 = nla_get_u32(data[IFLA_GTP_FD0]); 674 fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
676 fd1 = nla_get_u32(data[IFLA_GTP_FD1]); 675 fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
677 676
678 err = gtp_encap_enable(dev, gtp, fd0, fd1, src_net); 677 err = gtp_encap_enable(dev, gtp, fd0, fd1);
679 if (err < 0) 678 if (err < 0)
680 goto out_err; 679 goto out_err;
681 680
@@ -821,7 +820,7 @@ static void gtp_hashtable_free(struct gtp_dev *gtp)
821} 820}
822 821
823static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, 822static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
824 int fd_gtp0, int fd_gtp1, struct net *src_net) 823 int fd_gtp0, int fd_gtp1)
825{ 824{
826 struct udp_tunnel_sock_cfg tuncfg = {NULL}; 825 struct udp_tunnel_sock_cfg tuncfg = {NULL};
827 struct socket *sock0, *sock1u; 826 struct socket *sock0, *sock1u;
@@ -858,7 +857,6 @@ static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
858 857
859 gtp->sock0 = sock0; 858 gtp->sock0 = sock0;
860 gtp->sock1u = sock1u; 859 gtp->sock1u = sock1u;
861 gtp->net = src_net;
862 860
863 tuncfg.sk_user_data = gtp; 861 tuncfg.sk_user_data = gtp;
864 tuncfg.encap_rcv = gtp_encap_recv; 862 tuncfg.encap_rcv = gtp_encap_recv;
@@ -1376,3 +1374,4 @@ MODULE_LICENSE("GPL");
1376MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>"); 1374MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
1377MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic"); 1375MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
1378MODULE_ALIAS_RTNL_LINK("gtp"); 1376MODULE_ALIAS_RTNL_LINK("gtp");
1377MODULE_ALIAS_GENL_FAMILY("gtp");
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 5c26653eceb5..402618565838 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -825,7 +825,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
825 return -EINVAL; 825 return -EINVAL;
826 826
827 if (virtio_net_hdr_from_skb(skb, &vnet_hdr, 827 if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
828 macvtap_is_little_endian(q))) 828 macvtap_is_little_endian(q), true))
829 BUG(); 829 BUG();
830 830
831 if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != 831 if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index e741bf614c4e..b0492ef2cdaa 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -21,6 +21,23 @@ MODULE_DESCRIPTION("Broadcom 63xx internal PHY driver");
21MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>"); 21MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
22MODULE_LICENSE("GPL"); 22MODULE_LICENSE("GPL");
23 23
24static int bcm63xx_config_intr(struct phy_device *phydev)
25{
26 int reg, err;
27
28 reg = phy_read(phydev, MII_BCM63XX_IR);
29 if (reg < 0)
30 return reg;
31
32 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
33 reg &= ~MII_BCM63XX_IR_GMASK;
34 else
35 reg |= MII_BCM63XX_IR_GMASK;
36
37 err = phy_write(phydev, MII_BCM63XX_IR, reg);
38 return err;
39}
40
24static int bcm63xx_config_init(struct phy_device *phydev) 41static int bcm63xx_config_init(struct phy_device *phydev)
25{ 42{
26 int reg, err; 43 int reg, err;
@@ -55,7 +72,7 @@ static struct phy_driver bcm63xx_driver[] = {
55 .config_aneg = genphy_config_aneg, 72 .config_aneg = genphy_config_aneg,
56 .read_status = genphy_read_status, 73 .read_status = genphy_read_status,
57 .ack_interrupt = bcm_phy_ack_intr, 74 .ack_interrupt = bcm_phy_ack_intr,
58 .config_intr = bcm_phy_config_intr, 75 .config_intr = bcm63xx_config_intr,
59}, { 76}, {
60 /* same phy as above, with just a different OUI */ 77 /* same phy as above, with just a different OUI */
61 .phy_id = 0x002bdc00, 78 .phy_id = 0x002bdc00,
@@ -67,7 +84,7 @@ static struct phy_driver bcm63xx_driver[] = {
67 .config_aneg = genphy_config_aneg, 84 .config_aneg = genphy_config_aneg,
68 .read_status = genphy_read_status, 85 .read_status = genphy_read_status,
69 .ack_interrupt = bcm_phy_ack_intr, 86 .ack_interrupt = bcm_phy_ack_intr,
70 .config_intr = bcm_phy_config_intr, 87 .config_intr = bcm63xx_config_intr,
71} }; 88} };
72 89
73module_phy_driver(bcm63xx_driver); 90module_phy_driver(bcm63xx_driver);
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 800b39f06279..a10d0e7fc5f7 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -17,6 +17,7 @@
17#include <linux/phy.h> 17#include <linux/phy.h>
18 18
19#define TI_DP83848C_PHY_ID 0x20005ca0 19#define TI_DP83848C_PHY_ID 0x20005ca0
20#define TI_DP83620_PHY_ID 0x20005ce0
20#define NS_DP83848C_PHY_ID 0x20005c90 21#define NS_DP83848C_PHY_ID 0x20005c90
21#define TLK10X_PHY_ID 0x2000a210 22#define TLK10X_PHY_ID 0x2000a210
22#define TI_DP83822_PHY_ID 0x2000a240 23#define TI_DP83822_PHY_ID 0x2000a240
@@ -77,6 +78,7 @@ static int dp83848_config_intr(struct phy_device *phydev)
77static struct mdio_device_id __maybe_unused dp83848_tbl[] = { 78static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
78 { TI_DP83848C_PHY_ID, 0xfffffff0 }, 79 { TI_DP83848C_PHY_ID, 0xfffffff0 },
79 { NS_DP83848C_PHY_ID, 0xfffffff0 }, 80 { NS_DP83848C_PHY_ID, 0xfffffff0 },
81 { TI_DP83620_PHY_ID, 0xfffffff0 },
80 { TLK10X_PHY_ID, 0xfffffff0 }, 82 { TLK10X_PHY_ID, 0xfffffff0 },
81 { TI_DP83822_PHY_ID, 0xfffffff0 }, 83 { TI_DP83822_PHY_ID, 0xfffffff0 },
82 { } 84 { }
@@ -106,6 +108,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
106static struct phy_driver dp83848_driver[] = { 108static struct phy_driver dp83848_driver[] = {
107 DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"), 109 DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
108 DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"), 110 DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
111 DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY"),
109 DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"), 112 DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
110 DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"), 113 DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"),
111}; 114};
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index b5b73ff4329a..a3e3733813a7 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -2094,6 +2094,8 @@ static struct phy_driver marvell_drivers[] = {
2094 .ack_interrupt = &marvell_ack_interrupt, 2094 .ack_interrupt = &marvell_ack_interrupt,
2095 .config_intr = &marvell_config_intr, 2095 .config_intr = &marvell_config_intr,
2096 .did_interrupt = &m88e1121_did_interrupt, 2096 .did_interrupt = &m88e1121_did_interrupt,
2097 .get_wol = &m88e1318_get_wol,
2098 .set_wol = &m88e1318_set_wol,
2097 .resume = &marvell_resume, 2099 .resume = &marvell_resume,
2098 .suspend = &marvell_suspend, 2100 .suspend = &marvell_suspend,
2099 .get_sset_count = marvell_get_sset_count, 2101 .get_sset_count = marvell_get_sset_count,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 9a77289109b7..e55809c5beb7 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1008,6 +1008,20 @@ static struct phy_driver ksphy_driver[] = {
1008 .get_stats = kszphy_get_stats, 1008 .get_stats = kszphy_get_stats,
1009 .suspend = genphy_suspend, 1009 .suspend = genphy_suspend,
1010 .resume = genphy_resume, 1010 .resume = genphy_resume,
1011}, {
1012 .phy_id = PHY_ID_KSZ8795,
1013 .phy_id_mask = MICREL_PHY_ID_MASK,
1014 .name = "Micrel KSZ8795",
1015 .features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause),
1016 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
1017 .config_init = kszphy_config_init,
1018 .config_aneg = ksz8873mll_config_aneg,
1019 .read_status = ksz8873mll_read_status,
1020 .get_sset_count = kszphy_get_sset_count,
1021 .get_strings = kszphy_get_strings,
1022 .get_stats = kszphy_get_stats,
1023 .suspend = genphy_suspend,
1024 .resume = genphy_resume,
1011} }; 1025} };
1012 1026
1013module_phy_driver(ksphy_driver); 1027module_phy_driver(ksphy_driver);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 48da6e93c3f7..7cc1b7dcfe05 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -29,6 +29,7 @@
29#include <linux/mii.h> 29#include <linux/mii.h>
30#include <linux/ethtool.h> 30#include <linux/ethtool.h>
31#include <linux/phy.h> 31#include <linux/phy.h>
32#include <linux/phy_led_triggers.h>
32#include <linux/timer.h> 33#include <linux/timer.h>
33#include <linux/workqueue.h> 34#include <linux/workqueue.h>
34#include <linux/mdio.h> 35#include <linux/mdio.h>
@@ -649,14 +650,18 @@ void phy_start_machine(struct phy_device *phydev)
649 * phy_trigger_machine - trigger the state machine to run 650 * phy_trigger_machine - trigger the state machine to run
650 * 651 *
651 * @phydev: the phy_device struct 652 * @phydev: the phy_device struct
653 * @sync: indicate whether we should wait for the workqueue cancelation
652 * 654 *
653 * Description: There has been a change in state which requires that the 655 * Description: There has been a change in state which requires that the
654 * state machine runs. 656 * state machine runs.
655 */ 657 */
656 658
657static void phy_trigger_machine(struct phy_device *phydev) 659static void phy_trigger_machine(struct phy_device *phydev, bool sync)
658{ 660{
659 cancel_delayed_work_sync(&phydev->state_queue); 661 if (sync)
662 cancel_delayed_work_sync(&phydev->state_queue);
663 else
664 cancel_delayed_work(&phydev->state_queue);
660 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0); 665 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
661} 666}
662 667
@@ -693,7 +698,7 @@ static void phy_error(struct phy_device *phydev)
693 phydev->state = PHY_HALTED; 698 phydev->state = PHY_HALTED;
694 mutex_unlock(&phydev->lock); 699 mutex_unlock(&phydev->lock);
695 700
696 phy_trigger_machine(phydev); 701 phy_trigger_machine(phydev, false);
697} 702}
698 703
699/** 704/**
@@ -840,7 +845,7 @@ void phy_change(struct phy_device *phydev)
840 } 845 }
841 846
842 /* reschedule state queue work to run as soon as possible */ 847 /* reschedule state queue work to run as soon as possible */
843 phy_trigger_machine(phydev); 848 phy_trigger_machine(phydev, true);
844 return; 849 return;
845 850
846ignore: 851ignore:
@@ -942,7 +947,7 @@ void phy_start(struct phy_device *phydev)
942 if (do_resume) 947 if (do_resume)
943 phy_resume(phydev); 948 phy_resume(phydev);
944 949
945 phy_trigger_machine(phydev); 950 phy_trigger_machine(phydev, true);
946} 951}
947EXPORT_SYMBOL(phy_start); 952EXPORT_SYMBOL(phy_start);
948 953
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index fa62bdf2f526..94ca42e630bb 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -12,6 +12,7 @@
12 */ 12 */
13#include <linux/leds.h> 13#include <linux/leds.h>
14#include <linux/phy.h> 14#include <linux/phy.h>
15#include <linux/phy_led_triggers.h>
15#include <linux/netdevice.h> 16#include <linux/netdevice.h>
16 17
17static struct phy_led_trigger *phy_speed_to_led_trigger(struct phy_device *phy, 18static struct phy_led_trigger *phy_speed_to_led_trigger(struct phy_device *phy,
@@ -102,8 +103,10 @@ int phy_led_triggers_register(struct phy_device *phy)
102 sizeof(struct phy_led_trigger) * 103 sizeof(struct phy_led_trigger) *
103 phy->phy_num_led_triggers, 104 phy->phy_num_led_triggers,
104 GFP_KERNEL); 105 GFP_KERNEL);
105 if (!phy->phy_led_triggers) 106 if (!phy->phy_led_triggers) {
106 return -ENOMEM; 107 err = -ENOMEM;
108 goto out_clear;
109 }
107 110
108 for (i = 0; i < phy->phy_num_led_triggers; i++) { 111 for (i = 0; i < phy->phy_num_led_triggers; i++) {
109 err = phy_led_trigger_register(phy, &phy->phy_led_triggers[i], 112 err = phy_led_trigger_register(phy, &phy->phy_led_triggers[i],
@@ -120,6 +123,8 @@ out_unreg:
120 while (i--) 123 while (i--)
121 phy_led_trigger_unregister(&phy->phy_led_triggers[i]); 124 phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
122 devm_kfree(&phy->mdio.dev, phy->phy_led_triggers); 125 devm_kfree(&phy->mdio.dev, phy->phy_led_triggers);
126out_clear:
127 phy->phy_num_led_triggers = 0;
123 return err; 128 return err;
124} 129}
125EXPORT_SYMBOL_GPL(phy_led_triggers_register); 130EXPORT_SYMBOL_GPL(phy_led_triggers_register);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 13890ac3cb37..8a7d6b905362 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1396,7 +1396,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1396 return -EINVAL; 1396 return -EINVAL;
1397 1397
1398 if (virtio_net_hdr_from_skb(skb, &gso, 1398 if (virtio_net_hdr_from_skb(skb, &gso,
1399 tun_is_little_endian(tun))) { 1399 tun_is_little_endian(tun), true)) {
1400 struct skb_shared_info *sinfo = skb_shinfo(skb); 1400 struct skb_shared_info *sinfo = skb_shinfo(skb);
1401 pr_err("unexpected GSO type: " 1401 pr_err("unexpected GSO type: "
1402 "0x%x, gso_size %d, hdr_len %d\n", 1402 "0x%x, gso_size %d, hdr_len %d\n",
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 620ba8e530b5..f5552aaaa77a 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -531,6 +531,7 @@ static const struct driver_info wwan_info = {
531#define SAMSUNG_VENDOR_ID 0x04e8 531#define SAMSUNG_VENDOR_ID 0x04e8
532#define LENOVO_VENDOR_ID 0x17ef 532#define LENOVO_VENDOR_ID 0x17ef
533#define NVIDIA_VENDOR_ID 0x0955 533#define NVIDIA_VENDOR_ID 0x0955
534#define HP_VENDOR_ID 0x03f0
534 535
535static const struct usb_device_id products[] = { 536static const struct usb_device_id products[] = {
536/* BLACKLIST !! 537/* BLACKLIST !!
@@ -677,6 +678,13 @@ static const struct usb_device_id products[] = {
677 .driver_info = 0, 678 .driver_info = 0,
678}, 679},
679 680
681/* HP lt2523 (Novatel E371) - handled by qmi_wwan */
682{
683 USB_DEVICE_AND_INTERFACE_INFO(HP_VENDOR_ID, 0x421d, USB_CLASS_COMM,
684 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
685 .driver_info = 0,
686},
687
680/* AnyDATA ADU960S - handled by qmi_wwan */ 688/* AnyDATA ADU960S - handled by qmi_wwan */
681{ 689{
682 USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM, 690 USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 6fe1cdb0174f..24d5272cdce5 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -654,6 +654,13 @@ static const struct usb_device_id products[] = {
654 USB_CDC_PROTO_NONE), 654 USB_CDC_PROTO_NONE),
655 .driver_info = (unsigned long)&qmi_wwan_info, 655 .driver_info = (unsigned long)&qmi_wwan_info,
656 }, 656 },
657 { /* HP lt2523 (Novatel E371) */
658 USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d,
659 USB_CLASS_COMM,
660 USB_CDC_SUBCLASS_ETHERNET,
661 USB_CDC_PROTO_NONE),
662 .driver_info = (unsigned long)&qmi_wwan_info,
663 },
657 { /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */ 664 { /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
658 USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7), 665 USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
659 .driver_info = (unsigned long)&qmi_wwan_info, 666 .driver_info = (unsigned long)&qmi_wwan_info,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d59d7737708b..986243c932cc 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -32,7 +32,7 @@
32#define NETNEXT_VERSION "08" 32#define NETNEXT_VERSION "08"
33 33
34/* Information for net */ 34/* Information for net */
35#define NET_VERSION "6" 35#define NET_VERSION "8"
36 36
37#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION 37#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
38#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" 38#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -1936,6 +1936,9 @@ static int r8152_poll(struct napi_struct *napi, int budget)
1936 napi_complete(napi); 1936 napi_complete(napi);
1937 if (!list_empty(&tp->rx_done)) 1937 if (!list_empty(&tp->rx_done))
1938 napi_schedule(napi); 1938 napi_schedule(napi);
1939 else if (!skb_queue_empty(&tp->tx_queue) &&
1940 !list_empty(&tp->tx_free))
1941 napi_schedule(napi);
1939 } 1942 }
1940 1943
1941 return work_done; 1944 return work_done;
@@ -3155,10 +3158,13 @@ static void set_carrier(struct r8152 *tp)
3155 if (!netif_carrier_ok(netdev)) { 3158 if (!netif_carrier_ok(netdev)) {
3156 tp->rtl_ops.enable(tp); 3159 tp->rtl_ops.enable(tp);
3157 set_bit(RTL8152_SET_RX_MODE, &tp->flags); 3160 set_bit(RTL8152_SET_RX_MODE, &tp->flags);
3161 netif_stop_queue(netdev);
3158 napi_disable(&tp->napi); 3162 napi_disable(&tp->napi);
3159 netif_carrier_on(netdev); 3163 netif_carrier_on(netdev);
3160 rtl_start_rx(tp); 3164 rtl_start_rx(tp);
3161 napi_enable(&tp->napi); 3165 napi_enable(&tp->napi);
3166 netif_wake_queue(netdev);
3167 netif_info(tp, link, netdev, "carrier on\n");
3162 } 3168 }
3163 } else { 3169 } else {
3164 if (netif_carrier_ok(netdev)) { 3170 if (netif_carrier_ok(netdev)) {
@@ -3166,6 +3172,7 @@ static void set_carrier(struct r8152 *tp)
3166 napi_disable(&tp->napi); 3172 napi_disable(&tp->napi);
3167 tp->rtl_ops.disable(tp); 3173 tp->rtl_ops.disable(tp);
3168 napi_enable(&tp->napi); 3174 napi_enable(&tp->napi);
3175 netif_info(tp, link, netdev, "carrier off\n");
3169 } 3176 }
3170 } 3177 }
3171} 3178}
@@ -3515,12 +3522,12 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
3515 if (!netif_running(netdev)) 3522 if (!netif_running(netdev))
3516 return 0; 3523 return 0;
3517 3524
3525 netif_stop_queue(netdev);
3518 napi_disable(&tp->napi); 3526 napi_disable(&tp->napi);
3519 clear_bit(WORK_ENABLE, &tp->flags); 3527 clear_bit(WORK_ENABLE, &tp->flags);
3520 usb_kill_urb(tp->intr_urb); 3528 usb_kill_urb(tp->intr_urb);
3521 cancel_delayed_work_sync(&tp->schedule); 3529 cancel_delayed_work_sync(&tp->schedule);
3522 if (netif_carrier_ok(netdev)) { 3530 if (netif_carrier_ok(netdev)) {
3523 netif_stop_queue(netdev);
3524 mutex_lock(&tp->control); 3531 mutex_lock(&tp->control);
3525 tp->rtl_ops.disable(tp); 3532 tp->rtl_ops.disable(tp);
3526 mutex_unlock(&tp->control); 3533 mutex_unlock(&tp->control);
@@ -3545,12 +3552,17 @@ static int rtl8152_post_reset(struct usb_interface *intf)
3545 if (netif_carrier_ok(netdev)) { 3552 if (netif_carrier_ok(netdev)) {
3546 mutex_lock(&tp->control); 3553 mutex_lock(&tp->control);
3547 tp->rtl_ops.enable(tp); 3554 tp->rtl_ops.enable(tp);
3555 rtl_start_rx(tp);
3548 rtl8152_set_rx_mode(netdev); 3556 rtl8152_set_rx_mode(netdev);
3549 mutex_unlock(&tp->control); 3557 mutex_unlock(&tp->control);
3550 netif_wake_queue(netdev);
3551 } 3558 }
3552 3559
3553 napi_enable(&tp->napi); 3560 napi_enable(&tp->napi);
3561 netif_wake_queue(netdev);
3562 usb_submit_urb(tp->intr_urb, GFP_KERNEL);
3563
3564 if (!list_empty(&tp->rx_done))
3565 napi_schedule(&tp->napi);
3554 3566
3555 return 0; 3567 return 0;
3556} 3568}
@@ -3572,6 +3584,8 @@ static bool delay_autosuspend(struct r8152 *tp)
3572 */ 3584 */
3573 if (!sw_linking && tp->rtl_ops.in_nway(tp)) 3585 if (!sw_linking && tp->rtl_ops.in_nway(tp))
3574 return true; 3586 return true;
3587 else if (!skb_queue_empty(&tp->tx_queue))
3588 return true;
3575 else 3589 else
3576 return false; 3590 return false;
3577} 3591}
@@ -3581,10 +3595,15 @@ static int rtl8152_runtime_suspend(struct r8152 *tp)
3581 struct net_device *netdev = tp->netdev; 3595 struct net_device *netdev = tp->netdev;
3582 int ret = 0; 3596 int ret = 0;
3583 3597
3598 set_bit(SELECTIVE_SUSPEND, &tp->flags);
3599 smp_mb__after_atomic();
3600
3584 if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) { 3601 if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
3585 u32 rcr = 0; 3602 u32 rcr = 0;
3586 3603
3587 if (delay_autosuspend(tp)) { 3604 if (delay_autosuspend(tp)) {
3605 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3606 smp_mb__after_atomic();
3588 ret = -EBUSY; 3607 ret = -EBUSY;
3589 goto out1; 3608 goto out1;
3590 } 3609 }
@@ -3601,6 +3620,8 @@ static int rtl8152_runtime_suspend(struct r8152 *tp)
3601 if (!(ocp_data & RXFIFO_EMPTY)) { 3620 if (!(ocp_data & RXFIFO_EMPTY)) {
3602 rxdy_gated_en(tp, false); 3621 rxdy_gated_en(tp, false);
3603 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr); 3622 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
3623 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3624 smp_mb__after_atomic();
3604 ret = -EBUSY; 3625 ret = -EBUSY;
3605 goto out1; 3626 goto out1;
3606 } 3627 }
@@ -3620,8 +3641,6 @@ static int rtl8152_runtime_suspend(struct r8152 *tp)
3620 } 3641 }
3621 } 3642 }
3622 3643
3623 set_bit(SELECTIVE_SUSPEND, &tp->flags);
3624
3625out1: 3644out1:
3626 return ret; 3645 return ret;
3627} 3646}
@@ -3677,12 +3696,15 @@ static int rtl8152_resume(struct usb_interface *intf)
3677 if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) { 3696 if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
3678 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3697 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3679 tp->rtl_ops.autosuspend_en(tp, false); 3698 tp->rtl_ops.autosuspend_en(tp, false);
3680 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3681 napi_disable(&tp->napi); 3699 napi_disable(&tp->napi);
3682 set_bit(WORK_ENABLE, &tp->flags); 3700 set_bit(WORK_ENABLE, &tp->flags);
3683 if (netif_carrier_ok(tp->netdev)) 3701 if (netif_carrier_ok(tp->netdev))
3684 rtl_start_rx(tp); 3702 rtl_start_rx(tp);
3685 napi_enable(&tp->napi); 3703 napi_enable(&tp->napi);
3704 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3705 smp_mb__after_atomic();
3706 if (!list_empty(&tp->rx_done))
3707 napi_schedule(&tp->napi);
3686 } else { 3708 } else {
3687 tp->rtl_ops.up(tp); 3709 tp->rtl_ops.up(tp);
3688 netif_carrier_off(tp->netdev); 3710 netif_carrier_off(tp->netdev);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index f9bf94887ff1..bd22cf306a92 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -49,8 +49,16 @@ module_param(gso, bool, 0444);
49 */ 49 */
50DECLARE_EWMA(pkt_len, 1, 64) 50DECLARE_EWMA(pkt_len, 1, 64)
51 51
52/* With mergeable buffers we align buffer address and use the low bits to
53 * encode its true size. Buffer size is up to 1 page so we need to align to
54 * square root of page size to ensure we reserve enough bits to encode the true
55 * size.
56 */
57#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2)
58
52/* Minimum alignment for mergeable packet buffers. */ 59/* Minimum alignment for mergeable packet buffers. */
53#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256) 60#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \
61 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT)
54 62
55#define VIRTNET_DRIVER_VERSION "1.0.0" 63#define VIRTNET_DRIVER_VERSION "1.0.0"
56 64
@@ -1110,7 +1118,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
1110 hdr = skb_vnet_hdr(skb); 1118 hdr = skb_vnet_hdr(skb);
1111 1119
1112 if (virtio_net_hdr_from_skb(skb, &hdr->hdr, 1120 if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
1113 virtio_is_little_endian(vi->vdev))) 1121 virtio_is_little_endian(vi->vdev), false))
1114 BUG(); 1122 BUG();
1115 1123
1116 if (vi->mergeable_rx_bufs) 1124 if (vi->mergeable_rx_bufs)
@@ -1710,6 +1718,11 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
1710 u16 xdp_qp = 0, curr_qp; 1718 u16 xdp_qp = 0, curr_qp;
1711 int i, err; 1719 int i, err;
1712 1720
1721 if (prog && prog->xdp_adjust_head) {
1722 netdev_warn(dev, "Does not support bpf_xdp_adjust_head()\n");
1723 return -EOPNOTSUPP;
1724 }
1725
1713 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || 1726 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1714 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || 1727 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
1715 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || 1728 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
@@ -1893,8 +1906,12 @@ static void free_receive_page_frags(struct virtnet_info *vi)
1893 put_page(vi->rq[i].alloc_frag.page); 1906 put_page(vi->rq[i].alloc_frag.page);
1894} 1907}
1895 1908
1896static bool is_xdp_queue(struct virtnet_info *vi, int q) 1909static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
1897{ 1910{
1911 /* For small receive mode always use kfree_skb variants */
1912 if (!vi->mergeable_rx_bufs)
1913 return false;
1914
1898 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) 1915 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
1899 return false; 1916 return false;
1900 else if (q < vi->curr_queue_pairs) 1917 else if (q < vi->curr_queue_pairs)
@@ -1911,7 +1928,7 @@ static void free_unused_bufs(struct virtnet_info *vi)
1911 for (i = 0; i < vi->max_queue_pairs; i++) { 1928 for (i = 0; i < vi->max_queue_pairs; i++) {
1912 struct virtqueue *vq = vi->sq[i].vq; 1929 struct virtqueue *vq = vi->sq[i].vq;
1913 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 1930 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1914 if (!is_xdp_queue(vi, i)) 1931 if (!is_xdp_raw_buffer_queue(vi, i))
1915 dev_kfree_skb(buf); 1932 dev_kfree_skb(buf);
1916 else 1933 else
1917 put_page(virt_to_head_page(buf)); 1934 put_page(virt_to_head_page(buf));
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 19b1653e1bd6..2e48ce22eabf 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2268,7 +2268,7 @@ static void vxlan_cleanup(unsigned long arg)
2268 = container_of(p, struct vxlan_fdb, hlist); 2268 = container_of(p, struct vxlan_fdb, hlist);
2269 unsigned long timeout; 2269 unsigned long timeout;
2270 2270
2271 if (f->state & NUD_PERMANENT) 2271 if (f->state & (NUD_PERMANENT | NUD_NOARP))
2272 continue; 2272 continue;
2273 2273
2274 timeout = f->used + vxlan->cfg.age_interval * HZ; 2274 timeout = f->used + vxlan->cfg.age_interval * HZ;
@@ -2354,7 +2354,7 @@ static int vxlan_open(struct net_device *dev)
2354} 2354}
2355 2355
2356/* Purge the forwarding table */ 2356/* Purge the forwarding table */
2357static void vxlan_flush(struct vxlan_dev *vxlan) 2357static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
2358{ 2358{
2359 unsigned int h; 2359 unsigned int h;
2360 2360
@@ -2364,6 +2364,8 @@ static void vxlan_flush(struct vxlan_dev *vxlan)
2364 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { 2364 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2365 struct vxlan_fdb *f 2365 struct vxlan_fdb *f
2366 = container_of(p, struct vxlan_fdb, hlist); 2366 = container_of(p, struct vxlan_fdb, hlist);
2367 if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
2368 continue;
2367 /* the all_zeros_mac entry is deleted at vxlan_uninit */ 2369 /* the all_zeros_mac entry is deleted at vxlan_uninit */
2368 if (!is_zero_ether_addr(f->eth_addr)) 2370 if (!is_zero_ether_addr(f->eth_addr))
2369 vxlan_fdb_destroy(vxlan, f); 2371 vxlan_fdb_destroy(vxlan, f);
@@ -2385,7 +2387,7 @@ static int vxlan_stop(struct net_device *dev)
2385 2387
2386 del_timer_sync(&vxlan->age_timer); 2388 del_timer_sync(&vxlan->age_timer);
2387 2389
2388 vxlan_flush(vxlan); 2390 vxlan_flush(vxlan, false);
2389 vxlan_sock_release(vxlan); 2391 vxlan_sock_release(vxlan);
2390 2392
2391 return ret; 2393 return ret;
@@ -2890,7 +2892,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2890 memcpy(&vxlan->cfg, conf, sizeof(*conf)); 2892 memcpy(&vxlan->cfg, conf, sizeof(*conf));
2891 if (!vxlan->cfg.dst_port) { 2893 if (!vxlan->cfg.dst_port) {
2892 if (conf->flags & VXLAN_F_GPE) 2894 if (conf->flags & VXLAN_F_GPE)
2893 vxlan->cfg.dst_port = 4790; /* IANA assigned VXLAN-GPE port */ 2895 vxlan->cfg.dst_port = htons(4790); /* IANA VXLAN-GPE port */
2894 else 2896 else
2895 vxlan->cfg.dst_port = default_port; 2897 vxlan->cfg.dst_port = default_port;
2896 } 2898 }
@@ -3058,6 +3060,8 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
3058 struct vxlan_dev *vxlan = netdev_priv(dev); 3060 struct vxlan_dev *vxlan = netdev_priv(dev);
3059 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 3061 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
3060 3062
3063 vxlan_flush(vxlan, true);
3064
3061 spin_lock(&vn->sock_lock); 3065 spin_lock(&vn->sock_lock);
3062 if (!hlist_unhashed(&vxlan->hlist)) 3066 if (!hlist_unhashed(&vxlan->hlist))
3063 hlist_del_rcu(&vxlan->hlist); 3067 hlist_del_rcu(&vxlan->hlist);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index e30ffd29b7e9..579521327b03 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -221,18 +221,18 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
221{ 221{
222 struct xenvif *vif = netdev_priv(dev); 222 struct xenvif *vif = netdev_priv(dev);
223 struct xenvif_queue *queue = NULL; 223 struct xenvif_queue *queue = NULL;
224 unsigned int num_queues = vif->num_queues;
225 unsigned long rx_bytes = 0; 224 unsigned long rx_bytes = 0;
226 unsigned long rx_packets = 0; 225 unsigned long rx_packets = 0;
227 unsigned long tx_bytes = 0; 226 unsigned long tx_bytes = 0;
228 unsigned long tx_packets = 0; 227 unsigned long tx_packets = 0;
229 unsigned int index; 228 unsigned int index;
230 229
230 spin_lock(&vif->lock);
231 if (vif->queues == NULL) 231 if (vif->queues == NULL)
232 goto out; 232 goto out;
233 233
234 /* Aggregate tx and rx stats from each queue */ 234 /* Aggregate tx and rx stats from each queue */
235 for (index = 0; index < num_queues; ++index) { 235 for (index = 0; index < vif->num_queues; ++index) {
236 queue = &vif->queues[index]; 236 queue = &vif->queues[index];
237 rx_bytes += queue->stats.rx_bytes; 237 rx_bytes += queue->stats.rx_bytes;
238 rx_packets += queue->stats.rx_packets; 238 rx_packets += queue->stats.rx_packets;
@@ -241,6 +241,8 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
241 } 241 }
242 242
243out: 243out:
244 spin_unlock(&vif->lock);
245
244 vif->dev->stats.rx_bytes = rx_bytes; 246 vif->dev->stats.rx_bytes = rx_bytes;
245 vif->dev->stats.rx_packets = rx_packets; 247 vif->dev->stats.rx_packets = rx_packets;
246 vif->dev->stats.tx_bytes = tx_bytes; 248 vif->dev->stats.tx_bytes = tx_bytes;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 3124eaec9427..85b742e1c42f 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -493,11 +493,22 @@ static int backend_create_xenvif(struct backend_info *be)
493static void backend_disconnect(struct backend_info *be) 493static void backend_disconnect(struct backend_info *be)
494{ 494{
495 if (be->vif) { 495 if (be->vif) {
496 unsigned int queue_index;
497
496 xen_unregister_watchers(be->vif); 498 xen_unregister_watchers(be->vif);
497#ifdef CONFIG_DEBUG_FS 499#ifdef CONFIG_DEBUG_FS
498 xenvif_debugfs_delif(be->vif); 500 xenvif_debugfs_delif(be->vif);
499#endif /* CONFIG_DEBUG_FS */ 501#endif /* CONFIG_DEBUG_FS */
500 xenvif_disconnect_data(be->vif); 502 xenvif_disconnect_data(be->vif);
503 for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
504 xenvif_deinit_queue(&be->vif->queues[queue_index]);
505
506 spin_lock(&be->vif->lock);
507 vfree(be->vif->queues);
508 be->vif->num_queues = 0;
509 be->vif->queues = NULL;
510 spin_unlock(&be->vif->lock);
511
501 xenvif_disconnect_ctrl(be->vif); 512 xenvif_disconnect_ctrl(be->vif);
502 } 513 }
503} 514}
@@ -1034,6 +1045,8 @@ static void connect(struct backend_info *be)
1034err: 1045err:
1035 if (be->vif->num_queues > 0) 1046 if (be->vif->num_queues > 0)
1036 xenvif_disconnect_data(be->vif); /* Clean up existing queues */ 1047 xenvif_disconnect_data(be->vif); /* Clean up existing queues */
1048 for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
1049 xenvif_deinit_queue(&be->vif->queues[queue_index]);
1037 vfree(be->vif->queues); 1050 vfree(be->vif->queues);
1038 be->vif->queues = NULL; 1051 be->vif->queues = NULL;
1039 be->vif->num_queues = 0; 1052 be->vif->num_queues = 0;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 40f26b69beb1..2c7c29fa268d 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -321,7 +321,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
321 queue->rx.req_prod_pvt = req_prod; 321 queue->rx.req_prod_pvt = req_prod;
322 322
323 /* Not enough requests? Try again later. */ 323 /* Not enough requests? Try again later. */
324 if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) { 324 if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) {
325 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); 325 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
326 return; 326 return;
327 } 327 }
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 6307088b375f..a518cb1b59d4 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -957,6 +957,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
957{ 957{
958 resource_size_t allocated = 0, available = 0; 958 resource_size_t allocated = 0, available = 0;
959 struct nd_region *nd_region = to_nd_region(dev->parent); 959 struct nd_region *nd_region = to_nd_region(dev->parent);
960 struct nd_namespace_common *ndns = to_ndns(dev);
960 struct nd_mapping *nd_mapping; 961 struct nd_mapping *nd_mapping;
961 struct nvdimm_drvdata *ndd; 962 struct nvdimm_drvdata *ndd;
962 struct nd_label_id label_id; 963 struct nd_label_id label_id;
@@ -964,7 +965,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
964 u8 *uuid = NULL; 965 u8 *uuid = NULL;
965 int rc, i; 966 int rc, i;
966 967
967 if (dev->driver || to_ndns(dev)->claim) 968 if (dev->driver || ndns->claim)
968 return -EBUSY; 969 return -EBUSY;
969 970
970 if (is_namespace_pmem(dev)) { 971 if (is_namespace_pmem(dev)) {
@@ -1034,20 +1035,16 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
1034 1035
1035 nd_namespace_pmem_set_resource(nd_region, nspm, 1036 nd_namespace_pmem_set_resource(nd_region, nspm,
1036 val * nd_region->ndr_mappings); 1037 val * nd_region->ndr_mappings);
1037 } else if (is_namespace_blk(dev)) {
1038 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1039
1040 /*
1041 * Try to delete the namespace if we deleted all of its
1042 * allocation, this is not the seed device for the
1043 * region, and it is not actively claimed by a btt
1044 * instance.
1045 */
1046 if (val == 0 && nd_region->ns_seed != dev
1047 && !nsblk->common.claim)
1048 nd_device_unregister(dev, ND_ASYNC);
1049 } 1038 }
1050 1039
1040 /*
1041 * Try to delete the namespace if we deleted all of its
1042 * allocation, this is not the seed device for the region, and
1043 * it is not actively claimed by a btt instance.
1044 */
1045 if (val == 0 && nd_region->ns_seed != dev && !ndns->claim)
1046 nd_device_unregister(dev, ND_ASYNC);
1047
1051 return rc; 1048 return rc;
1052} 1049}
1053 1050
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 7282d7495bf1..5b536be5a12e 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -90,7 +90,9 @@ static int read_pmem(struct page *page, unsigned int off,
90 90
91 rc = memcpy_from_pmem(mem + off, pmem_addr, len); 91 rc = memcpy_from_pmem(mem + off, pmem_addr, len);
92 kunmap_atomic(mem); 92 kunmap_atomic(mem);
93 return rc; 93 if (rc)
94 return -EIO;
95 return 0;
94} 96}
95 97
96static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, 98static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index fcc9dcfdf675..e65041c640cb 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1663,13 +1663,13 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
1663 return 0; 1663 return 0;
1664 1664
1665 freq->sg_table.sgl = freq->first_sgl; 1665 freq->sg_table.sgl = freq->first_sgl;
1666 ret = sg_alloc_table_chained(&freq->sg_table, rq->nr_phys_segments, 1666 ret = sg_alloc_table_chained(&freq->sg_table,
1667 freq->sg_table.sgl); 1667 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
1668 if (ret) 1668 if (ret)
1669 return -ENOMEM; 1669 return -ENOMEM;
1670 1670
1671 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); 1671 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
1672 WARN_ON(op->nents > rq->nr_phys_segments); 1672 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
1673 dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 1673 dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
1674 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, 1674 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
1675 op->nents, dir); 1675 op->nents, dir);
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 6f5074153dcd..be8c800078e2 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -631,6 +631,7 @@ static void nvmet_subsys_release(struct config_item *item)
631{ 631{
632 struct nvmet_subsys *subsys = to_subsys(item); 632 struct nvmet_subsys *subsys = to_subsys(item);
633 633
634 nvmet_subsys_del_ctrls(subsys);
634 nvmet_subsys_put(subsys); 635 nvmet_subsys_put(subsys);
635} 636}
636 637
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b1d66ed655c9..fc5ba2f9e15f 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -200,7 +200,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
200 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n", 200 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
201 ctrl->cntlid, ctrl->kato); 201 ctrl->cntlid, ctrl->kato);
202 202
203 ctrl->ops->delete_ctrl(ctrl); 203 nvmet_ctrl_fatal_error(ctrl);
204} 204}
205 205
206static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) 206static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
@@ -816,6 +816,9 @@ static void nvmet_ctrl_free(struct kref *ref)
816 list_del(&ctrl->subsys_entry); 816 list_del(&ctrl->subsys_entry);
817 mutex_unlock(&subsys->lock); 817 mutex_unlock(&subsys->lock);
818 818
819 flush_work(&ctrl->async_event_work);
820 cancel_work_sync(&ctrl->fatal_err_work);
821
819 ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid); 822 ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid);
820 nvmet_subsys_put(subsys); 823 nvmet_subsys_put(subsys);
821 824
@@ -935,6 +938,16 @@ static void nvmet_subsys_free(struct kref *ref)
935 kfree(subsys); 938 kfree(subsys);
936} 939}
937 940
941void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
942{
943 struct nvmet_ctrl *ctrl;
944
945 mutex_lock(&subsys->lock);
946 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
947 ctrl->ops->delete_ctrl(ctrl);
948 mutex_unlock(&subsys->lock);
949}
950
938void nvmet_subsys_put(struct nvmet_subsys *subsys) 951void nvmet_subsys_put(struct nvmet_subsys *subsys)
939{ 952{
940 kref_put(&subsys->ref, nvmet_subsys_free); 953 kref_put(&subsys->ref, nvmet_subsys_free);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 173e842f19c9..ba57f9852bde 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1314,7 +1314,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1314 (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf; 1314 (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1315 struct fcnvme_ls_disconnect_acc *acc = 1315 struct fcnvme_ls_disconnect_acc *acc =
1316 (struct fcnvme_ls_disconnect_acc *)iod->rspbuf; 1316 (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
1317 struct nvmet_fc_tgt_queue *queue; 1317 struct nvmet_fc_tgt_queue *queue = NULL;
1318 struct nvmet_fc_tgt_assoc *assoc; 1318 struct nvmet_fc_tgt_assoc *assoc;
1319 int ret = 0; 1319 int ret = 0;
1320 bool del_assoc = false; 1320 bool del_assoc = false;
@@ -1348,7 +1348,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1348 assoc = nvmet_fc_find_target_assoc(tgtport, 1348 assoc = nvmet_fc_find_target_assoc(tgtport,
1349 be64_to_cpu(rqst->associd.association_id)); 1349 be64_to_cpu(rqst->associd.association_id));
1350 iod->assoc = assoc; 1350 iod->assoc = assoc;
1351 if (!assoc) 1351 if (assoc) {
1352 if (rqst->discon_cmd.scope ==
1353 FCNVME_DISCONN_CONNECTION) {
1354 queue = nvmet_fc_find_target_queue(tgtport,
1355 be64_to_cpu(
1356 rqst->discon_cmd.id));
1357 if (!queue) {
1358 nvmet_fc_tgt_a_put(assoc);
1359 ret = VERR_NO_CONN;
1360 }
1361 }
1362 } else
1352 ret = VERR_NO_ASSOC; 1363 ret = VERR_NO_ASSOC;
1353 } 1364 }
1354 1365
@@ -1373,21 +1384,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1373 FCNVME_LS_DISCONNECT); 1384 FCNVME_LS_DISCONNECT);
1374 1385
1375 1386
1376 if (rqst->discon_cmd.scope == FCNVME_DISCONN_CONNECTION) { 1387 /* are we to delete a Connection ID (queue) */
1377 queue = nvmet_fc_find_target_queue(tgtport, 1388 if (queue) {
1378 be64_to_cpu(rqst->discon_cmd.id)); 1389 int qid = queue->qid;
1379 if (queue) {
1380 int qid = queue->qid;
1381 1390
1382 nvmet_fc_delete_target_queue(queue); 1391 nvmet_fc_delete_target_queue(queue);
1383 1392
1384 /* release the get taken by find_target_queue */ 1393 /* release the get taken by find_target_queue */
1385 nvmet_fc_tgt_q_put(queue); 1394 nvmet_fc_tgt_q_put(queue);
1386 1395
1387 /* tear association down if io queue terminated */ 1396 /* tear association down if io queue terminated */
1388 if (!qid) 1397 if (!qid)
1389 del_assoc = true; 1398 del_assoc = true;
1390 }
1391 } 1399 }
1392 1400
1393 /* release get taken in nvmet_fc_find_target_assoc */ 1401 /* release get taken in nvmet_fc_find_target_assoc */
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 23d5eb1c944f..cc7ad06b43a7 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -282,6 +282,7 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
282struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, 282struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
283 enum nvme_subsys_type type); 283 enum nvme_subsys_type type);
284void nvmet_subsys_put(struct nvmet_subsys *subsys); 284void nvmet_subsys_put(struct nvmet_subsys *subsys);
285void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
285 286
286struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid); 287struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
287void nvmet_put_namespace(struct nvmet_ns *ns); 288void nvmet_put_namespace(struct nvmet_ns *ns);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 8c3760a78ac0..60990220bd83 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -438,6 +438,10 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
438{ 438{
439 struct ib_recv_wr *bad_wr; 439 struct ib_recv_wr *bad_wr;
440 440
441 ib_dma_sync_single_for_device(ndev->device,
442 cmd->sge[0].addr, cmd->sge[0].length,
443 DMA_FROM_DEVICE);
444
441 if (ndev->srq) 445 if (ndev->srq)
442 return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr); 446 return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
443 return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr); 447 return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
@@ -538,6 +542,11 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
538 first_wr = &rsp->send_wr; 542 first_wr = &rsp->send_wr;
539 543
540 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); 544 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
545
546 ib_dma_sync_single_for_device(rsp->queue->dev->device,
547 rsp->send_sge.addr, rsp->send_sge.length,
548 DMA_TO_DEVICE);
549
541 if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) { 550 if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
542 pr_err("sending cmd response failed\n"); 551 pr_err("sending cmd response failed\n");
543 nvmet_rdma_release_rsp(rsp); 552 nvmet_rdma_release_rsp(rsp);
@@ -698,6 +707,14 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
698 cmd->n_rdma = 0; 707 cmd->n_rdma = 0;
699 cmd->req.port = queue->port; 708 cmd->req.port = queue->port;
700 709
710
711 ib_dma_sync_single_for_cpu(queue->dev->device,
712 cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
713 DMA_FROM_DEVICE);
714 ib_dma_sync_single_for_cpu(queue->dev->device,
715 cmd->send_sge.addr, cmd->send_sge.length,
716 DMA_TO_DEVICE);
717
701 if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, 718 if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
702 &queue->nvme_sq, &nvmet_rdma_ops)) 719 &queue->nvme_sq, &nvmet_rdma_ops))
703 return; 720 return;
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c
index 1f38d0836751..f1b633bce525 100644
--- a/drivers/pci/host/pci-xgene-msi.c
+++ b/drivers/pci/host/pci-xgene-msi.c
@@ -517,7 +517,7 @@ static int xgene_msi_probe(struct platform_device *pdev)
517 517
518 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online", 518 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online",
519 xgene_msi_hwirq_alloc, NULL); 519 xgene_msi_hwirq_alloc, NULL);
520 if (rc) 520 if (rc < 0)
521 goto err_cpuhp; 521 goto err_cpuhp;
522 pci_xgene_online = rc; 522 pci_xgene_online = rc;
523 rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL, 523 rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL,
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index bed19994c1e9..af8f6e92e885 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -807,11 +807,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
807{ 807{
808 u32 val; 808 u32 val;
809 809
810 /* get iATU unroll support */
811 pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
812 dev_dbg(pp->dev, "iATU unroll: %s\n",
813 pp->iatu_unroll_enabled ? "enabled" : "disabled");
814
815 /* set the number of lanes */ 810 /* set the number of lanes */
816 val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL); 811 val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
817 val &= ~PORT_LINK_MODE_MASK; 812 val &= ~PORT_LINK_MODE_MASK;
@@ -882,6 +877,11 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
882 * we should not program the ATU here. 877 * we should not program the ATU here.
883 */ 878 */
884 if (!pp->ops->rd_other_conf) { 879 if (!pp->ops->rd_other_conf) {
880 /* get iATU unroll support */
881 pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
882 dev_dbg(pp->dev, "iATU unroll: %s\n",
883 pp->iatu_unroll_enabled ? "enabled" : "disabled");
884
885 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, 885 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
886 PCIE_ATU_TYPE_MEM, pp->mem_base, 886 PCIE_ATU_TYPE_MEM, pp->mem_base,
887 pp->mem_bus_addr, pp->mem_size); 887 pp->mem_bus_addr, pp->mem_size);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index e164b5c9f0f0..204960e70333 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1169,6 +1169,7 @@ void set_pcie_port_type(struct pci_dev *pdev)
1169 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 1169 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1170 if (!pos) 1170 if (!pos)
1171 return; 1171 return;
1172
1172 pdev->pcie_cap = pos; 1173 pdev->pcie_cap = pos;
1173 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16); 1174 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
1174 pdev->pcie_flags_reg = reg16; 1175 pdev->pcie_flags_reg = reg16;
@@ -1176,13 +1177,14 @@ void set_pcie_port_type(struct pci_dev *pdev)
1176 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; 1177 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
1177 1178
1178 /* 1179 /*
1179 * A Root Port is always the upstream end of a Link. No PCIe 1180 * A Root Port or a PCI-to-PCIe bridge is always the upstream end
1180 * component has two Links. Two Links are connected by a Switch 1181 * of a Link. No PCIe component has two Links. Two Links are
1181 * that has a Port on each Link and internal logic to connect the 1182 * connected by a Switch that has a Port on each Link and internal
1182 * two Ports. 1183 * logic to connect the two Ports.
1183 */ 1184 */
1184 type = pci_pcie_type(pdev); 1185 type = pci_pcie_type(pdev);
1185 if (type == PCI_EXP_TYPE_ROOT_PORT) 1186 if (type == PCI_EXP_TYPE_ROOT_PORT ||
1187 type == PCI_EXP_TYPE_PCIE_BRIDGE)
1186 pdev->has_secondary_link = 1; 1188 pdev->has_secondary_link = 1;
1187 else if (type == PCI_EXP_TYPE_UPSTREAM || 1189 else if (type == PCI_EXP_TYPE_UPSTREAM ||
1188 type == PCI_EXP_TYPE_DOWNSTREAM) { 1190 type == PCI_EXP_TYPE_DOWNSTREAM) {
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 37300634b7d2..c123488266ce 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1092,6 +1092,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
1092 enum pin_config_param param = pinconf_to_config_param(*config); 1092 enum pin_config_param param = pinconf_to_config_param(*config);
1093 void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); 1093 void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
1094 void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); 1094 void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
1095 void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
1095 unsigned long flags; 1096 unsigned long flags;
1096 u32 conf, pull, val, debounce; 1097 u32 conf, pull, val, debounce;
1097 u16 arg = 0; 1098 u16 arg = 0;
@@ -1128,7 +1129,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
1128 return -EINVAL; 1129 return -EINVAL;
1129 1130
1130 raw_spin_lock_irqsave(&vg->lock, flags); 1131 raw_spin_lock_irqsave(&vg->lock, flags);
1131 debounce = readl(byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG)); 1132 debounce = readl(db_reg);
1132 raw_spin_unlock_irqrestore(&vg->lock, flags); 1133 raw_spin_unlock_irqrestore(&vg->lock, flags);
1133 1134
1134 switch (debounce & BYT_DEBOUNCE_PULSE_MASK) { 1135 switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
@@ -1176,6 +1177,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
1176 unsigned int param, arg; 1177 unsigned int param, arg;
1177 void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); 1178 void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
1178 void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); 1179 void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
1180 void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
1179 unsigned long flags; 1181 unsigned long flags;
1180 u32 conf, val, debounce; 1182 u32 conf, val, debounce;
1181 int i, ret = 0; 1183 int i, ret = 0;
@@ -1238,36 +1240,40 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
1238 1240
1239 break; 1241 break;
1240 case PIN_CONFIG_INPUT_DEBOUNCE: 1242 case PIN_CONFIG_INPUT_DEBOUNCE:
1241 debounce = readl(byt_gpio_reg(vg, offset, 1243 debounce = readl(db_reg);
1242 BYT_DEBOUNCE_REG)); 1244 debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
1243 conf &= ~BYT_DEBOUNCE_PULSE_MASK;
1244 1245
1245 switch (arg) { 1246 switch (arg) {
1247 case 0:
1248 conf &= BYT_DEBOUNCE_EN;
1249 break;
1246 case 375: 1250 case 375:
1247 conf |= BYT_DEBOUNCE_PULSE_375US; 1251 debounce |= BYT_DEBOUNCE_PULSE_375US;
1248 break; 1252 break;
1249 case 750: 1253 case 750:
1250 conf |= BYT_DEBOUNCE_PULSE_750US; 1254 debounce |= BYT_DEBOUNCE_PULSE_750US;
1251 break; 1255 break;
1252 case 1500: 1256 case 1500:
1253 conf |= BYT_DEBOUNCE_PULSE_1500US; 1257 debounce |= BYT_DEBOUNCE_PULSE_1500US;
1254 break; 1258 break;
1255 case 3000: 1259 case 3000:
1256 conf |= BYT_DEBOUNCE_PULSE_3MS; 1260 debounce |= BYT_DEBOUNCE_PULSE_3MS;
1257 break; 1261 break;
1258 case 6000: 1262 case 6000:
1259 conf |= BYT_DEBOUNCE_PULSE_6MS; 1263 debounce |= BYT_DEBOUNCE_PULSE_6MS;
1260 break; 1264 break;
1261 case 12000: 1265 case 12000:
1262 conf |= BYT_DEBOUNCE_PULSE_12MS; 1266 debounce |= BYT_DEBOUNCE_PULSE_12MS;
1263 break; 1267 break;
1264 case 24000: 1268 case 24000:
1265 conf |= BYT_DEBOUNCE_PULSE_24MS; 1269 debounce |= BYT_DEBOUNCE_PULSE_24MS;
1266 break; 1270 break;
1267 default: 1271 default:
1268 ret = -EINVAL; 1272 ret = -EINVAL;
1269 } 1273 }
1270 1274
1275 if (!ret)
1276 writel(debounce, db_reg);
1271 break; 1277 break;
1272 default: 1278 default:
1273 ret = -ENOTSUPP; 1279 ret = -ENOTSUPP;
@@ -1617,6 +1623,8 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
1617 1623
1618static void byt_gpio_irq_init_hw(struct byt_gpio *vg) 1624static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
1619{ 1625{
1626 struct gpio_chip *gc = &vg->chip;
1627 struct device *dev = &vg->pdev->dev;
1620 void __iomem *reg; 1628 void __iomem *reg;
1621 u32 base, value; 1629 u32 base, value;
1622 int i; 1630 int i;
@@ -1638,10 +1646,12 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
1638 } 1646 }
1639 1647
1640 value = readl(reg); 1648 value = readl(reg);
1641 if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) && 1649 if (value & BYT_DIRECT_IRQ_EN) {
1642 !(value & BYT_DIRECT_IRQ_EN)) { 1650 clear_bit(i, gc->irq_valid_mask);
1651 dev_dbg(dev, "excluding GPIO %d from IRQ domain\n", i);
1652 } else if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i)) {
1643 byt_gpio_clear_triggering(vg, i); 1653 byt_gpio_clear_triggering(vg, i);
1644 dev_dbg(&vg->pdev->dev, "disabling GPIO %d\n", i); 1654 dev_dbg(dev, "disabling GPIO %d\n", i);
1645 } 1655 }
1646 } 1656 }
1647 1657
@@ -1680,6 +1690,7 @@ static int byt_gpio_probe(struct byt_gpio *vg)
1680 gc->can_sleep = false; 1690 gc->can_sleep = false;
1681 gc->parent = &vg->pdev->dev; 1691 gc->parent = &vg->pdev->dev;
1682 gc->ngpio = vg->soc_data->npins; 1692 gc->ngpio = vg->soc_data->npins;
1693 gc->irq_need_valid_mask = true;
1683 1694
1684#ifdef CONFIG_PM_SLEEP 1695#ifdef CONFIG_PM_SLEEP
1685 vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio, 1696 vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio,
diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c
index 59cb7a6fc5be..901b356b09d7 100644
--- a/drivers/pinctrl/intel/pinctrl-broxton.c
+++ b/drivers/pinctrl/intel/pinctrl-broxton.c
@@ -19,7 +19,7 @@
19 19
20#define BXT_PAD_OWN 0x020 20#define BXT_PAD_OWN 0x020
21#define BXT_HOSTSW_OWN 0x080 21#define BXT_HOSTSW_OWN 0x080
22#define BXT_PADCFGLOCK 0x090 22#define BXT_PADCFGLOCK 0x060
23#define BXT_GPI_IE 0x110 23#define BXT_GPI_IE 0x110
24 24
25#define BXT_COMMUNITY(s, e) \ 25#define BXT_COMMUNITY(s, e) \
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 1e139672f1af..6df35dcb29ae 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -353,6 +353,21 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
353 return 0; 353 return 0;
354} 354}
355 355
356static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
357{
358 u32 value;
359
360 value = readl(padcfg0);
361 if (input) {
362 value &= ~PADCFG0_GPIORXDIS;
363 value |= PADCFG0_GPIOTXDIS;
364 } else {
365 value &= ~PADCFG0_GPIOTXDIS;
366 value |= PADCFG0_GPIORXDIS;
367 }
368 writel(value, padcfg0);
369}
370
356static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, 371static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
357 struct pinctrl_gpio_range *range, 372 struct pinctrl_gpio_range *range,
358 unsigned pin) 373 unsigned pin)
@@ -375,11 +390,11 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
375 /* Disable SCI/SMI/NMI generation */ 390 /* Disable SCI/SMI/NMI generation */
376 value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI); 391 value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
377 value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI); 392 value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
378 /* Disable TX buffer and enable RX (this will be input) */
379 value &= ~PADCFG0_GPIORXDIS;
380 value |= PADCFG0_GPIOTXDIS;
381 writel(value, padcfg0); 393 writel(value, padcfg0);
382 394
395 /* Disable TX buffer and enable RX (this will be input) */
396 __intel_gpio_set_direction(padcfg0, true);
397
383 raw_spin_unlock_irqrestore(&pctrl->lock, flags); 398 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
384 399
385 return 0; 400 return 0;
@@ -392,18 +407,11 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev,
392 struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); 407 struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
393 void __iomem *padcfg0; 408 void __iomem *padcfg0;
394 unsigned long flags; 409 unsigned long flags;
395 u32 value;
396 410
397 raw_spin_lock_irqsave(&pctrl->lock, flags); 411 raw_spin_lock_irqsave(&pctrl->lock, flags);
398 412
399 padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); 413 padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
400 414 __intel_gpio_set_direction(padcfg0, input);
401 value = readl(padcfg0);
402 if (input)
403 value |= PADCFG0_GPIOTXDIS;
404 else
405 value &= ~PADCFG0_GPIOTXDIS;
406 writel(value, padcfg0);
407 415
408 raw_spin_unlock_irqrestore(&pctrl->lock, flags); 416 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
409 417
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index c3928aa3fefa..e0bca4df2a2f 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -253,9 +253,8 @@ static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) };
253static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) }; 253static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) };
254static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) }; 254static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
255static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) }; 255static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
256static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_0, 0) }; 256static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_4, 0) };
257static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_1, 0), 257static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_5, 0) };
258 PIN(GPIOAO_5, 0) };
259static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) }; 258static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
260static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) }; 259static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
261 260
@@ -498,7 +497,7 @@ static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
498 GPIO_GROUP(GPIOAO_13, 0), 497 GPIO_GROUP(GPIOAO_13, 0),
499 498
500 /* bank AO */ 499 /* bank AO */
501 GROUP(uart_tx_ao_b, 0, 26), 500 GROUP(uart_tx_ao_b, 0, 24),
502 GROUP(uart_rx_ao_b, 0, 25), 501 GROUP(uart_rx_ao_b, 0, 25),
503 GROUP(uart_tx_ao_a, 0, 12), 502 GROUP(uart_tx_ao_a, 0, 12),
504 GROUP(uart_rx_ao_a, 0, 11), 503 GROUP(uart_rx_ao_a, 0, 11),
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 25694f7094c7..b69743b07a1d 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -214,9 +214,8 @@ static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) };
214static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) }; 214static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) };
215static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) }; 215static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
216static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) }; 216static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
217static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_0, 0) }; 217static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_4, 0) };
218static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_1, 0), 218static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_5, 0) };
219 PIN(GPIOAO_5, 0) };
220static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) }; 219static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
221static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) }; 220static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
222 221
@@ -409,7 +408,7 @@ static struct meson_pmx_group meson_gxl_aobus_groups[] = {
409 GPIO_GROUP(GPIOAO_9, 0), 408 GPIO_GROUP(GPIOAO_9, 0),
410 409
411 /* bank AO */ 410 /* bank AO */
412 GROUP(uart_tx_ao_b, 0, 26), 411 GROUP(uart_tx_ao_b, 0, 24),
413 GROUP(uart_rx_ao_b, 0, 25), 412 GROUP(uart_rx_ao_b, 0, 25),
414 GROUP(uart_tx_ao_a, 0, 12), 413 GROUP(uart_tx_ao_a, 0, 12),
415 GROUP(uart_rx_ao_a, 0, 11), 414 GROUP(uart_rx_ao_a, 0, 11),
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index c9a146948192..537b52055756 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -202,6 +202,8 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
202 i = 128; 202 i = 128;
203 pin_num = AMD_GPIO_PINS_BANK2 + i; 203 pin_num = AMD_GPIO_PINS_BANK2 + i;
204 break; 204 break;
205 default:
206 return;
205 } 207 }
206 208
207 for (; i < pin_num; i++) { 209 for (; i < pin_num; i++) {
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
index aa8bd9794683..96686336e3a3 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
@@ -561,7 +561,7 @@ static const int ether_rgmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
561 0, 0, 0, 0}; 561 0, 0, 0, 0};
562static const unsigned ether_rmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 39, 562static const unsigned ether_rmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 39,
563 41, 42, 45}; 563 41, 42, 45};
564static const int ether_rmii_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; 564static const int ether_rmii_muxvals[] = {0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1};
565static const unsigned i2c0_pins[] = {63, 64}; 565static const unsigned i2c0_pins[] = {63, 64};
566static const int i2c0_muxvals[] = {0, 0}; 566static const int i2c0_muxvals[] = {0, 0};
567static const unsigned i2c1_pins[] = {65, 66}; 567static const unsigned i2c1_pins[] = {65, 66};
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 410741acb3c9..f46ece2ce3c4 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -813,6 +813,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
813 case 8: 813 case 8:
814 case 7: 814 case 7:
815 case 6: 815 case 6:
816 case 1:
816 ideapad_input_report(priv, vpc_bit); 817 ideapad_input_report(priv, vpc_bit);
817 break; 818 break;
818 case 5: 819 case 5:
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 1fc0de870ff8..361770568ad0 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -77,7 +77,7 @@ static int mfld_pb_probe(struct platform_device *pdev)
77 77
78 input_set_capability(input, EV_KEY, KEY_POWER); 78 input_set_capability(input, EV_KEY, KEY_POWER);
79 79
80 error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0, 80 error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_ONESHOT,
81 DRIVER_NAME, input); 81 DRIVER_NAME, input);
82 if (error) { 82 if (error) {
83 dev_err(&pdev->dev, "Unable to request irq %d for mfld power" 83 dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 97b4c3a219c0..25f15df5c2d7 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -326,7 +326,7 @@ static int __init mlxplat_init(void)
326 return 0; 326 return 0;
327 327
328fail_platform_mux_register: 328fail_platform_mux_register:
329 for (i--; i > 0 ; i--) 329 while (--i >= 0)
330 platform_device_unregister(priv->pdev_mux[i]); 330 platform_device_unregister(priv->pdev_mux[i]);
331 platform_device_unregister(priv->pdev_i2c); 331 platform_device_unregister(priv->pdev_i2c);
332fail_alloc: 332fail_alloc:
diff --git a/drivers/platform/x86/surface3-wmi.c b/drivers/platform/x86/surface3-wmi.c
index cbf4d83a7271..25b176996cb7 100644
--- a/drivers/platform/x86/surface3-wmi.c
+++ b/drivers/platform/x86/surface3-wmi.c
@@ -139,7 +139,7 @@ static acpi_status s3_wmi_attach_spi_device(acpi_handle handle,
139 139
140static int s3_wmi_check_platform_device(struct device *dev, void *data) 140static int s3_wmi_check_platform_device(struct device *dev, void *data)
141{ 141{
142 struct acpi_device *adev, *ts_adev; 142 struct acpi_device *adev, *ts_adev = NULL;
143 acpi_handle handle; 143 acpi_handle handle;
144 acpi_status status; 144 acpi_status status;
145 145
@@ -244,13 +244,11 @@ static int s3_wmi_remove(struct platform_device *device)
244 return 0; 244 return 0;
245} 245}
246 246
247#ifdef CONFIG_PM 247static int __maybe_unused s3_wmi_resume(struct device *dev)
248static int s3_wmi_resume(struct device *dev)
249{ 248{
250 s3_wmi_send_lid_state(); 249 s3_wmi_send_lid_state();
251 return 0; 250 return 0;
252} 251}
253#endif
254static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume); 252static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume);
255 253
256static struct platform_driver s3_wmi_driver = { 254static struct platform_driver s3_wmi_driver = {
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 639ed4e6afd1..070c4da95f48 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -145,6 +145,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
145#define CCW_CMD_WRITE_CONF 0x21 145#define CCW_CMD_WRITE_CONF 0x21
146#define CCW_CMD_WRITE_STATUS 0x31 146#define CCW_CMD_WRITE_STATUS 0x31
147#define CCW_CMD_READ_VQ_CONF 0x32 147#define CCW_CMD_READ_VQ_CONF 0x32
148#define CCW_CMD_READ_STATUS 0x72
148#define CCW_CMD_SET_IND_ADAPTER 0x73 149#define CCW_CMD_SET_IND_ADAPTER 0x73
149#define CCW_CMD_SET_VIRTIO_REV 0x83 150#define CCW_CMD_SET_VIRTIO_REV 0x83
150 151
@@ -160,6 +161,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
160#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000 161#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
161#define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000 162#define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
162#define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000 163#define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000
164#define VIRTIO_CCW_DOING_READ_STATUS 0x20000000
163#define VIRTIO_CCW_INTPARM_MASK 0xffff0000 165#define VIRTIO_CCW_INTPARM_MASK 0xffff0000
164 166
165static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev) 167static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
@@ -452,7 +454,7 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
452 * This may happen on device detach. 454 * This may happen on device detach.
453 */ 455 */
454 if (ret && (ret != -ENODEV)) 456 if (ret && (ret != -ENODEV))
455 dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d", 457 dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d\n",
456 ret, index); 458 ret, index);
457 459
458 vring_del_virtqueue(vq); 460 vring_del_virtqueue(vq);
@@ -892,6 +894,28 @@ out_free:
892static u8 virtio_ccw_get_status(struct virtio_device *vdev) 894static u8 virtio_ccw_get_status(struct virtio_device *vdev)
893{ 895{
894 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 896 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
897 u8 old_status = *vcdev->status;
898 struct ccw1 *ccw;
899
900 if (vcdev->revision < 1)
901 return *vcdev->status;
902
903 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
904 if (!ccw)
905 return old_status;
906
907 ccw->cmd_code = CCW_CMD_READ_STATUS;
908 ccw->flags = 0;
909 ccw->count = sizeof(*vcdev->status);
910 ccw->cda = (__u32)(unsigned long)vcdev->status;
911 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS);
912/*
913 * If the channel program failed (should only happen if the device
914 * was hotunplugged, and then we clean up via the machine check
915 * handler anyway), vcdev->status was not overwritten and we just
916 * return the old status, which is fine.
917*/
918 kfree(ccw);
895 919
896 return *vcdev->status; 920 return *vcdev->status;
897} 921}
@@ -920,7 +944,7 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
920 kfree(ccw); 944 kfree(ccw);
921} 945}
922 946
923static struct virtio_config_ops virtio_ccw_config_ops = { 947static const struct virtio_config_ops virtio_ccw_config_ops = {
924 .get_features = virtio_ccw_get_features, 948 .get_features = virtio_ccw_get_features,
925 .finalize_features = virtio_ccw_finalize_features, 949 .finalize_features = virtio_ccw_finalize_features,
926 .get = virtio_ccw_get_config, 950 .get = virtio_ccw_get_config,
@@ -987,6 +1011,7 @@ static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev,
987 case VIRTIO_CCW_DOING_READ_CONFIG: 1011 case VIRTIO_CCW_DOING_READ_CONFIG:
988 case VIRTIO_CCW_DOING_WRITE_CONFIG: 1012 case VIRTIO_CCW_DOING_WRITE_CONFIG:
989 case VIRTIO_CCW_DOING_WRITE_STATUS: 1013 case VIRTIO_CCW_DOING_WRITE_STATUS:
1014 case VIRTIO_CCW_DOING_READ_STATUS:
990 case VIRTIO_CCW_DOING_SET_VQ: 1015 case VIRTIO_CCW_DOING_SET_VQ:
991 case VIRTIO_CCW_DOING_SET_IND: 1016 case VIRTIO_CCW_DOING_SET_IND:
992 case VIRTIO_CCW_DOING_SET_CONF_IND: 1017 case VIRTIO_CCW_DOING_SET_CONF_IND:
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index a9a00169ad91..b2e8c0dfc79c 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3363,7 +3363,7 @@ bfad_im_bsg_els_ct_request(struct bsg_job *job)
3363 struct bfad_fcxp *drv_fcxp; 3363 struct bfad_fcxp *drv_fcxp;
3364 struct bfa_fcs_lport_s *fcs_port; 3364 struct bfa_fcs_lport_s *fcs_port;
3365 struct bfa_fcs_rport_s *fcs_rport; 3365 struct bfa_fcs_rport_s *fcs_rport;
3366 struct fc_bsg_request *bsg_request = bsg_request; 3366 struct fc_bsg_request *bsg_request = job->request;
3367 struct fc_bsg_reply *bsg_reply = job->reply; 3367 struct fc_bsg_reply *bsg_reply = job->reply;
3368 uint32_t command_type = bsg_request->msgcode; 3368 uint32_t command_type = bsg_request->msgcode;
3369 unsigned long flags; 3369 unsigned long flags;
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 8fb5c54c7dd3..99b747cedbeb 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -46,6 +46,7 @@
46 46
47#define INITIAL_SRP_LIMIT 800 47#define INITIAL_SRP_LIMIT 800
48#define DEFAULT_MAX_SECTORS 256 48#define DEFAULT_MAX_SECTORS 256
49#define MAX_TXU 1024 * 1024
49 50
50static uint max_vdma_size = MAX_H_COPY_RDMA; 51static uint max_vdma_size = MAX_H_COPY_RDMA;
51 52
@@ -1391,7 +1392,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
1391 } 1392 }
1392 1393
1393 info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token, 1394 info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token,
1394 GFP_KERNEL); 1395 GFP_ATOMIC);
1395 if (!info) { 1396 if (!info) {
1396 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", 1397 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
1397 iue->target); 1398 iue->target);
@@ -1443,7 +1444,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
1443 info->mad_version = cpu_to_be32(MAD_VERSION_1); 1444 info->mad_version = cpu_to_be32(MAD_VERSION_1);
1444 info->os_type = cpu_to_be32(LINUX); 1445 info->os_type = cpu_to_be32(LINUX);
1445 memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu)); 1446 memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu));
1446 info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE); 1447 info->port_max_txu[0] = cpu_to_be32(MAX_TXU);
1447 1448
1448 dma_wmb(); 1449 dma_wmb();
1449 rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn, 1450 rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn,
@@ -1509,7 +1510,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1509 } 1510 }
1510 1511
1511 cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token, 1512 cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token,
1512 GFP_KERNEL); 1513 GFP_ATOMIC);
1513 if (!cap) { 1514 if (!cap) {
1514 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", 1515 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
1515 iue->target); 1516 iue->target);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 236e4e51d161..7b6bd8ed0d0b 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -3590,12 +3590,14 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
3590 } else { 3590 } else {
3591 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; 3591 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
3592 lpfc_els_free_data(phba, buf_ptr1); 3592 lpfc_els_free_data(phba, buf_ptr1);
3593 elsiocb->context2 = NULL;
3593 } 3594 }
3594 } 3595 }
3595 3596
3596 if (elsiocb->context3) { 3597 if (elsiocb->context3) {
3597 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; 3598 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
3598 lpfc_els_free_bpl(phba, buf_ptr); 3599 lpfc_els_free_bpl(phba, buf_ptr);
3600 elsiocb->context3 = NULL;
3599 } 3601 }
3600 lpfc_sli_release_iocbq(phba, elsiocb); 3602 lpfc_sli_release_iocbq(phba, elsiocb);
3601 return 0; 3603 return 0;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 4faa7672fc1d..a78a3df68f67 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -5954,18 +5954,25 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5954 5954
5955 free_vfi_bmask: 5955 free_vfi_bmask:
5956 kfree(phba->sli4_hba.vfi_bmask); 5956 kfree(phba->sli4_hba.vfi_bmask);
5957 phba->sli4_hba.vfi_bmask = NULL;
5957 free_xri_ids: 5958 free_xri_ids:
5958 kfree(phba->sli4_hba.xri_ids); 5959 kfree(phba->sli4_hba.xri_ids);
5960 phba->sli4_hba.xri_ids = NULL;
5959 free_xri_bmask: 5961 free_xri_bmask:
5960 kfree(phba->sli4_hba.xri_bmask); 5962 kfree(phba->sli4_hba.xri_bmask);
5963 phba->sli4_hba.xri_bmask = NULL;
5961 free_vpi_ids: 5964 free_vpi_ids:
5962 kfree(phba->vpi_ids); 5965 kfree(phba->vpi_ids);
5966 phba->vpi_ids = NULL;
5963 free_vpi_bmask: 5967 free_vpi_bmask:
5964 kfree(phba->vpi_bmask); 5968 kfree(phba->vpi_bmask);
5969 phba->vpi_bmask = NULL;
5965 free_rpi_ids: 5970 free_rpi_ids:
5966 kfree(phba->sli4_hba.rpi_ids); 5971 kfree(phba->sli4_hba.rpi_ids);
5972 phba->sli4_hba.rpi_ids = NULL;
5967 free_rpi_bmask: 5973 free_rpi_bmask:
5968 kfree(phba->sli4_hba.rpi_bmask); 5974 kfree(phba->sli4_hba.rpi_bmask);
5975 phba->sli4_hba.rpi_bmask = NULL;
5969 err_exit: 5976 err_exit:
5970 return rc; 5977 return rc;
5971} 5978}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 394fe1338d09..dcb33f4fa687 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -393,6 +393,7 @@ struct MPT3SAS_TARGET {
393 * @eedp_enable: eedp support enable bit 393 * @eedp_enable: eedp support enable bit
394 * @eedp_type: 0(type_1), 1(type_2), 2(type_3) 394 * @eedp_type: 0(type_1), 1(type_2), 2(type_3)
395 * @eedp_block_length: block size 395 * @eedp_block_length: block size
396 * @ata_command_pending: SATL passthrough outstanding for device
396 */ 397 */
397struct MPT3SAS_DEVICE { 398struct MPT3SAS_DEVICE {
398 struct MPT3SAS_TARGET *sas_target; 399 struct MPT3SAS_TARGET *sas_target;
@@ -404,6 +405,17 @@ struct MPT3SAS_DEVICE {
404 u8 ignore_delay_remove; 405 u8 ignore_delay_remove;
405 /* Iopriority Command Handling */ 406 /* Iopriority Command Handling */
406 u8 ncq_prio_enable; 407 u8 ncq_prio_enable;
408 /*
409 * Bug workaround for SATL handling: the mpt2/3sas firmware
410 * doesn't return BUSY or TASK_SET_FULL for subsequent
411 * commands while a SATL pass through is in operation as the
412 * spec requires, it simply does nothing with them until the
413 * pass through completes, causing them possibly to timeout if
414 * the passthrough is a long executing command (like format or
415 * secure erase). This variable allows us to do the right
416 * thing while a SATL command is pending.
417 */
418 unsigned long ata_command_pending;
407 419
408}; 420};
409 421
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index b5c966e319d3..75f3fce1c867 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3899,9 +3899,18 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
3899 } 3899 }
3900} 3900}
3901 3901
3902static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd) 3902static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
3903{ 3903{
3904 return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16); 3904 struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
3905
3906 if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
3907 return 0;
3908
3909 if (pending)
3910 return test_and_set_bit(0, &priv->ata_command_pending);
3911
3912 clear_bit(0, &priv->ata_command_pending);
3913 return 0;
3905} 3914}
3906 3915
3907/** 3916/**
@@ -3925,9 +3934,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
3925 if (!scmd) 3934 if (!scmd)
3926 continue; 3935 continue;
3927 count++; 3936 count++;
3928 if (ata_12_16_cmd(scmd)) 3937 _scsih_set_satl_pending(scmd, false);
3929 scsi_internal_device_unblock(scmd->device,
3930 SDEV_RUNNING);
3931 mpt3sas_base_free_smid(ioc, smid); 3938 mpt3sas_base_free_smid(ioc, smid);
3932 scsi_dma_unmap(scmd); 3939 scsi_dma_unmap(scmd);
3933 if (ioc->pci_error_recovery) 3940 if (ioc->pci_error_recovery)
@@ -4063,13 +4070,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4063 if (ioc->logging_level & MPT_DEBUG_SCSI) 4070 if (ioc->logging_level & MPT_DEBUG_SCSI)
4064 scsi_print_command(scmd); 4071 scsi_print_command(scmd);
4065 4072
4066 /*
4067 * Lock the device for any subsequent command until command is
4068 * done.
4069 */
4070 if (ata_12_16_cmd(scmd))
4071 scsi_internal_device_block(scmd->device);
4072
4073 sas_device_priv_data = scmd->device->hostdata; 4073 sas_device_priv_data = scmd->device->hostdata;
4074 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 4074 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
4075 scmd->result = DID_NO_CONNECT << 16; 4075 scmd->result = DID_NO_CONNECT << 16;
@@ -4083,6 +4083,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4083 return 0; 4083 return 0;
4084 } 4084 }
4085 4085
4086 /*
4087 * Bug work around for firmware SATL handling. The loop
4088 * is based on atomic operations and ensures consistency
4089 * since we're lockless at this point
4090 */
4091 do {
4092 if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
4093 scmd->result = SAM_STAT_BUSY;
4094 scmd->scsi_done(scmd);
4095 return 0;
4096 }
4097 } while (_scsih_set_satl_pending(scmd, true));
4098
4086 sas_target_priv_data = sas_device_priv_data->sas_target; 4099 sas_target_priv_data = sas_device_priv_data->sas_target;
4087 4100
4088 /* invalid device handle */ 4101 /* invalid device handle */
@@ -4650,8 +4663,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4650 if (scmd == NULL) 4663 if (scmd == NULL)
4651 return 1; 4664 return 1;
4652 4665
4653 if (ata_12_16_cmd(scmd)) 4666 _scsih_set_satl_pending(scmd, false);
4654 scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
4655 4667
4656 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4668 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4657 4669
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 47eb4d545d13..f201f4099620 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -243,12 +243,15 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
243 struct qla_hw_data *ha = vha->hw; 243 struct qla_hw_data *ha = vha->hw;
244 ssize_t rval = 0; 244 ssize_t rval = 0;
245 245
246 mutex_lock(&ha->optrom_mutex);
247
246 if (ha->optrom_state != QLA_SREADING) 248 if (ha->optrom_state != QLA_SREADING)
247 return 0; 249 goto out;
248 250
249 mutex_lock(&ha->optrom_mutex);
250 rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, 251 rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
251 ha->optrom_region_size); 252 ha->optrom_region_size);
253
254out:
252 mutex_unlock(&ha->optrom_mutex); 255 mutex_unlock(&ha->optrom_mutex);
253 256
254 return rval; 257 return rval;
@@ -263,14 +266,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
263 struct device, kobj))); 266 struct device, kobj)));
264 struct qla_hw_data *ha = vha->hw; 267 struct qla_hw_data *ha = vha->hw;
265 268
266 if (ha->optrom_state != QLA_SWRITING) 269 mutex_lock(&ha->optrom_mutex);
270
271 if (ha->optrom_state != QLA_SWRITING) {
272 mutex_unlock(&ha->optrom_mutex);
267 return -EINVAL; 273 return -EINVAL;
268 if (off > ha->optrom_region_size) 274 }
275 if (off > ha->optrom_region_size) {
276 mutex_unlock(&ha->optrom_mutex);
269 return -ERANGE; 277 return -ERANGE;
278 }
270 if (off + count > ha->optrom_region_size) 279 if (off + count > ha->optrom_region_size)
271 count = ha->optrom_region_size - off; 280 count = ha->optrom_region_size - off;
272 281
273 mutex_lock(&ha->optrom_mutex);
274 memcpy(&ha->optrom_buffer[off], buf, count); 282 memcpy(&ha->optrom_buffer[off], buf, count);
275 mutex_unlock(&ha->optrom_mutex); 283 mutex_unlock(&ha->optrom_mutex);
276 284
@@ -753,7 +761,6 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
753 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 761 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
754 struct device, kobj))); 762 struct device, kobj)));
755 int type; 763 int type;
756 int rval = 0;
757 port_id_t did; 764 port_id_t did;
758 765
759 type = simple_strtol(buf, NULL, 10); 766 type = simple_strtol(buf, NULL, 10);
@@ -767,7 +774,7 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
767 774
768 ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type); 775 ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
769 776
770 rval = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did); 777 qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
771 return count; 778 return count;
772} 779}
773 780
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index f7df01b76714..5b1287a63c49 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1556,7 +1556,8 @@ typedef struct {
1556struct atio { 1556struct atio {
1557 uint8_t entry_type; /* Entry type. */ 1557 uint8_t entry_type; /* Entry type. */
1558 uint8_t entry_count; /* Entry count. */ 1558 uint8_t entry_count; /* Entry count. */
1559 uint8_t data[58]; 1559 __le16 attr_n_length;
1560 uint8_t data[56];
1560 uint32_t signature; 1561 uint32_t signature;
1561#define ATIO_PROCESSED 0xDEADDEAD /* Signature */ 1562#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
1562}; 1563};
@@ -2732,7 +2733,7 @@ struct isp_operations {
2732#define QLA_MSIX_FW_MODE(m) (((m) & (BIT_7|BIT_8|BIT_9)) >> 7) 2733#define QLA_MSIX_FW_MODE(m) (((m) & (BIT_7|BIT_8|BIT_9)) >> 7)
2733#define QLA_MSIX_FW_MODE_1(m) (QLA_MSIX_FW_MODE(m) == 1) 2734#define QLA_MSIX_FW_MODE_1(m) (QLA_MSIX_FW_MODE(m) == 1)
2734 2735
2735#define QLA_MSIX_DEFAULT 0x00 2736#define QLA_BASE_VECTORS 2 /* default + RSP */
2736#define QLA_MSIX_RSP_Q 0x01 2737#define QLA_MSIX_RSP_Q 0x01
2737#define QLA_ATIO_VECTOR 0x02 2738#define QLA_ATIO_VECTOR 0x02
2738#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03 2739#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03
@@ -2754,7 +2755,6 @@ struct qla_msix_entry {
2754 uint16_t entry; 2755 uint16_t entry;
2755 char name[30]; 2756 char name[30];
2756 void *handle; 2757 void *handle;
2757 struct irq_affinity_notify irq_notify;
2758 int cpuid; 2758 int cpuid;
2759}; 2759};
2760 2760
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 632d5f30386a..7b6317c8c2e9 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1191,7 +1191,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
1191 1191
1192 /* Wait for soft-reset to complete. */ 1192 /* Wait for soft-reset to complete. */
1193 RD_REG_DWORD(&reg->ctrl_status); 1193 RD_REG_DWORD(&reg->ctrl_status);
1194 for (cnt = 0; cnt < 6000000; cnt++) { 1194 for (cnt = 0; cnt < 60; cnt++) {
1195 barrier(); 1195 barrier();
1196 if ((RD_REG_DWORD(&reg->ctrl_status) & 1196 if ((RD_REG_DWORD(&reg->ctrl_status) &
1197 CSRX_ISP_SOFT_RESET) == 0) 1197 CSRX_ISP_SOFT_RESET) == 0)
@@ -1234,7 +1234,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
1234 RD_REG_DWORD(&reg->hccr); 1234 RD_REG_DWORD(&reg->hccr);
1235 1235
1236 RD_REG_WORD(&reg->mailbox0); 1236 RD_REG_WORD(&reg->mailbox0);
1237 for (cnt = 6000000; RD_REG_WORD(&reg->mailbox0) != 0 && 1237 for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
1238 rval == QLA_SUCCESS; cnt--) { 1238 rval == QLA_SUCCESS; cnt--) {
1239 barrier(); 1239 barrier();
1240 if (cnt) 1240 if (cnt)
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 5093ca9b02ec..dc88a09f9043 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -19,10 +19,6 @@ static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
19static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 19static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
20static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 20static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
21 sts_entry_t *); 21 sts_entry_t *);
22static void qla_irq_affinity_notify(struct irq_affinity_notify *,
23 const cpumask_t *);
24static void qla_irq_affinity_release(struct kref *);
25
26 22
27/** 23/**
28 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 24 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -2496,6 +2492,10 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2496 if (pkt->entry_status & RF_BUSY) 2492 if (pkt->entry_status & RF_BUSY)
2497 res = DID_BUS_BUSY << 16; 2493 res = DID_BUS_BUSY << 16;
2498 2494
2495 if (pkt->entry_type == NOTIFY_ACK_TYPE &&
2496 pkt->handle == QLA_TGT_SKIP_HANDLE)
2497 return;
2498
2499 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2499 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2500 if (sp) { 2500 if (sp) {
2501 sp->done(ha, sp, res); 2501 sp->done(ha, sp, res);
@@ -2572,14 +2572,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2572 if (!vha->flags.online) 2572 if (!vha->flags.online)
2573 return; 2573 return;
2574 2574
2575 if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
2576 /* if kernel does not notify qla of IRQ's CPU change,
2577 * then set it here.
2578 */
2579 rsp->msix->cpuid = smp_processor_id();
2580 ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid;
2581 }
2582
2583 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2575 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2584 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 2576 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2585 2577
@@ -3018,13 +3010,20 @@ static struct qla_init_msix_entry qla82xx_msix_entries[] = {
3018static int 3010static int
3019qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 3011qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3020{ 3012{
3021#define MIN_MSIX_COUNT 2
3022 int i, ret; 3013 int i, ret;
3023 struct qla_msix_entry *qentry; 3014 struct qla_msix_entry *qentry;
3024 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3015 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3016 struct irq_affinity desc = {
3017 .pre_vectors = QLA_BASE_VECTORS,
3018 };
3019
3020 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha))
3021 desc.pre_vectors++;
3022
3023 ret = pci_alloc_irq_vectors_affinity(ha->pdev, QLA_BASE_VECTORS,
3024 ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
3025 &desc);
3025 3026
3026 ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count,
3027 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
3028 if (ret < 0) { 3027 if (ret < 0) {
3029 ql_log(ql_log_fatal, vha, 0x00c7, 3028 ql_log(ql_log_fatal, vha, 0x00c7,
3030 "MSI-X: Failed to enable support, " 3029 "MSI-X: Failed to enable support, "
@@ -3069,13 +3068,10 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3069 qentry->have_irq = 0; 3068 qentry->have_irq = 0;
3070 qentry->in_use = 0; 3069 qentry->in_use = 0;
3071 qentry->handle = NULL; 3070 qentry->handle = NULL;
3072 qentry->irq_notify.notify = qla_irq_affinity_notify;
3073 qentry->irq_notify.release = qla_irq_affinity_release;
3074 qentry->cpuid = -1;
3075 } 3071 }
3076 3072
3077 /* Enable MSI-X vectors for the base queue */ 3073 /* Enable MSI-X vectors for the base queue */
3078 for (i = 0; i < (QLA_MSIX_RSP_Q + 1); i++) { 3074 for (i = 0; i < QLA_BASE_VECTORS; i++) {
3079 qentry = &ha->msix_entries[i]; 3075 qentry = &ha->msix_entries[i];
3080 qentry->handle = rsp; 3076 qentry->handle = rsp;
3081 rsp->msix = qentry; 3077 rsp->msix = qentry;
@@ -3093,18 +3089,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3093 goto msix_register_fail; 3089 goto msix_register_fail;
3094 qentry->have_irq = 1; 3090 qentry->have_irq = 1;
3095 qentry->in_use = 1; 3091 qentry->in_use = 1;
3096
3097 /* Register for CPU affinity notification. */
3098 irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
3099
3100 /* Schedule work (ie. trigger a notification) to read cpu
3101 * mask for this specific irq.
3102 * kref_get is required because
3103 * irq_affinity_notify() will do
3104 * kref_put().
3105 */
3106 kref_get(&qentry->irq_notify.kref);
3107 schedule_work(&qentry->irq_notify.work);
3108 } 3092 }
3109 3093
3110 /* 3094 /*
@@ -3301,49 +3285,3 @@ int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
3301 msix->handle = qpair; 3285 msix->handle = qpair;
3302 return ret; 3286 return ret;
3303} 3287}
3304
3305
3306/* irq_set_affinity/irqbalance will trigger notification of cpu mask update */
3307static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
3308 const cpumask_t *mask)
3309{
3310 struct qla_msix_entry *e =
3311 container_of(notify, struct qla_msix_entry, irq_notify);
3312 struct qla_hw_data *ha;
3313 struct scsi_qla_host *base_vha;
3314 struct rsp_que *rsp = e->handle;
3315
3316 /* user is recommended to set mask to just 1 cpu */
3317 e->cpuid = cpumask_first(mask);
3318
3319 ha = rsp->hw;
3320 base_vha = pci_get_drvdata(ha->pdev);
3321
3322 ql_dbg(ql_dbg_init, base_vha, 0xffff,
3323 "%s: host %ld : vector %d cpu %d \n", __func__,
3324 base_vha->host_no, e->vector, e->cpuid);
3325
3326 if (e->have_irq) {
3327 if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
3328 (e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) {
3329 ha->tgt.rspq_vector_cpuid = e->cpuid;
3330 ql_dbg(ql_dbg_init, base_vha, 0xffff,
3331 "%s: host%ld: rspq vector %d cpu %d runtime change\n",
3332 __func__, base_vha->host_no, e->vector, e->cpuid);
3333 }
3334 }
3335}
3336
3337static void qla_irq_affinity_release(struct kref *ref)
3338{
3339 struct irq_affinity_notify *notify =
3340 container_of(ref, struct irq_affinity_notify, kref);
3341 struct qla_msix_entry *e =
3342 container_of(notify, struct qla_msix_entry, irq_notify);
3343 struct rsp_que *rsp = e->handle;
3344 struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev);
3345
3346 ql_dbg(ql_dbg_init, base_vha, 0xffff,
3347 "%s: host%ld: vector %d cpu %d\n", __func__,
3348 base_vha->host_no, e->vector, e->cpuid);
3349}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 2819ceb96041..67f64db390b0 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -10,7 +10,7 @@
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/gfp.h> 11#include <linux/gfp.h>
12 12
13struct rom_cmd { 13static struct rom_cmd {
14 uint16_t cmd; 14 uint16_t cmd;
15} rom_cmds[] = { 15} rom_cmds[] = {
16 { MBC_LOAD_RAM }, 16 { MBC_LOAD_RAM },
@@ -101,12 +101,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
101 return QLA_FUNCTION_TIMEOUT; 101 return QLA_FUNCTION_TIMEOUT;
102 } 102 }
103 103
104 /* if PCI error, then avoid mbx processing.*/ 104 /* if PCI error, then avoid mbx processing.*/
105 if (test_bit(PCI_ERR, &base_vha->dpc_flags)) { 105 if (test_bit(PCI_ERR, &base_vha->dpc_flags)) {
106 ql_log(ql_log_warn, vha, 0x1191, 106 ql_log(ql_log_warn, vha, 0x1191,
107 "PCI error, exiting.\n"); 107 "PCI error, exiting.\n");
108 return QLA_FUNCTION_TIMEOUT; 108 return QLA_FUNCTION_TIMEOUT;
109 } 109 }
110 110
111 reg = ha->iobase; 111 reg = ha->iobase;
112 io_lock_on = base_vha->flags.init_done; 112 io_lock_on = base_vha->flags.init_done;
@@ -323,20 +323,33 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
323 } 323 }
324 } else { 324 } else {
325 325
326 uint16_t mb0; 326 uint16_t mb[8];
327 uint32_t ictrl; 327 uint32_t ictrl, host_status, hccr;
328 uint16_t w; 328 uint16_t w;
329 329
330 if (IS_FWI2_CAPABLE(ha)) { 330 if (IS_FWI2_CAPABLE(ha)) {
331 mb0 = RD_REG_WORD(&reg->isp24.mailbox0); 331 mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
332 mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
333 mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
334 mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
335 mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
332 ictrl = RD_REG_DWORD(&reg->isp24.ictrl); 336 ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
337 host_status = RD_REG_DWORD(&reg->isp24.host_status);
338 hccr = RD_REG_DWORD(&reg->isp24.hccr);
339
340 ql_log(ql_log_warn, vha, 0x1119,
341 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
342 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
343 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
344 mb[7], host_status, hccr);
345
333 } else { 346 } else {
334 mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0); 347 mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
335 ictrl = RD_REG_WORD(&reg->isp.ictrl); 348 ictrl = RD_REG_WORD(&reg->isp.ictrl);
349 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
350 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
351 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
336 } 352 }
337 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
338 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
339 "mb[0]=0x%x\n", command, ictrl, jiffies, mb0);
340 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); 353 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
341 354
342 /* Capture FW dump only, if PCI device active */ 355 /* Capture FW dump only, if PCI device active */
@@ -684,7 +697,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
684 mbx_cmd_t mc; 697 mbx_cmd_t mc;
685 mbx_cmd_t *mcp = &mc; 698 mbx_cmd_t *mcp = &mc;
686 struct qla_hw_data *ha = vha->hw; 699 struct qla_hw_data *ha = vha->hw;
687 int configured_count;
688 700
689 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a, 701 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
690 "Entered %s.\n", __func__); 702 "Entered %s.\n", __func__);
@@ -707,7 +719,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
707 /*EMPTY*/ 719 /*EMPTY*/
708 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval); 720 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
709 } else { 721 } else {
710 configured_count = mcp->mb[11];
711 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, 722 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
712 "Done %s.\n", __func__); 723 "Done %s.\n", __func__);
713 } 724 }
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 54380b434b30..0a1723cc08cf 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -42,6 +42,11 @@ static int qla82xx_crb_table_initialized;
42 (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \ 42 (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
43 QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20) 43 QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
44 44
45const int MD_MIU_TEST_AGT_RDDATA[] = {
46 0x410000A8, 0x410000AC,
47 0x410000B8, 0x410000BC
48};
49
45static void qla82xx_crb_addr_transform_setup(void) 50static void qla82xx_crb_addr_transform_setup(void)
46{ 51{
47 qla82xx_crb_addr_transform(XDMA); 52 qla82xx_crb_addr_transform(XDMA);
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 6201dce3553b..77624eac95a4 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1176,8 +1176,7 @@ struct qla82xx_md_entry_queue {
1176#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094 1176#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
1177#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098 1177#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
1178 1178
1179static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC, 1179extern const int MD_MIU_TEST_AGT_RDDATA[4];
1180 0x410000B8, 0x410000BC };
1181 1180
1182#define CRB_NIU_XG_PAUSE_CTL_P0 0x1 1181#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
1183#define CRB_NIU_XG_PAUSE_CTL_P1 0x8 1182#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 007192d7bad8..dc1ec9b61027 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -15,6 +15,23 @@
15 15
16#define TIMEOUT_100_MS 100 16#define TIMEOUT_100_MS 100
17 17
18static const uint32_t qla8044_reg_tbl[] = {
19 QLA8044_PEG_HALT_STATUS1,
20 QLA8044_PEG_HALT_STATUS2,
21 QLA8044_PEG_ALIVE_COUNTER,
22 QLA8044_CRB_DRV_ACTIVE,
23 QLA8044_CRB_DEV_STATE,
24 QLA8044_CRB_DRV_STATE,
25 QLA8044_CRB_DRV_SCRATCH,
26 QLA8044_CRB_DEV_PART_INFO1,
27 QLA8044_CRB_IDC_VER_MAJOR,
28 QLA8044_FW_VER_MAJOR,
29 QLA8044_FW_VER_MINOR,
30 QLA8044_FW_VER_SUB,
31 QLA8044_CMDPEG_STATE,
32 QLA8044_ASIC_TEMP,
33};
34
18/* 8044 Flash Read/Write functions */ 35/* 8044 Flash Read/Write functions */
19uint32_t 36uint32_t
20qla8044_rd_reg(struct qla_hw_data *ha, ulong addr) 37qla8044_rd_reg(struct qla_hw_data *ha, ulong addr)
diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h
index 02fe3c4cdf55..83c1b7e17c80 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.h
+++ b/drivers/scsi/qla2xxx/qla_nx2.h
@@ -535,23 +535,6 @@ enum qla_regs {
535#define CRB_CMDPEG_CHECK_RETRY_COUNT 60 535#define CRB_CMDPEG_CHECK_RETRY_COUNT 60
536#define CRB_CMDPEG_CHECK_DELAY 500 536#define CRB_CMDPEG_CHECK_DELAY 500
537 537
538static const uint32_t qla8044_reg_tbl[] = {
539 QLA8044_PEG_HALT_STATUS1,
540 QLA8044_PEG_HALT_STATUS2,
541 QLA8044_PEG_ALIVE_COUNTER,
542 QLA8044_CRB_DRV_ACTIVE,
543 QLA8044_CRB_DEV_STATE,
544 QLA8044_CRB_DRV_STATE,
545 QLA8044_CRB_DRV_SCRATCH,
546 QLA8044_CRB_DEV_PART_INFO1,
547 QLA8044_CRB_IDC_VER_MAJOR,
548 QLA8044_FW_VER_MAJOR,
549 QLA8044_FW_VER_MINOR,
550 QLA8044_FW_VER_SUB,
551 QLA8044_CMDPEG_STATE,
552 QLA8044_ASIC_TEMP,
553};
554
555/* MiniDump Structures */ 538/* MiniDump Structures */
556 539
557/* Driver_code is for driver to write some info about the entry 540/* Driver_code is for driver to write some info about the entry
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8521cfe302e9..0a000ecf0881 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -466,7 +466,7 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
466 continue; 466 continue;
467 467
468 rsp = ha->rsp_q_map[cnt]; 468 rsp = ha->rsp_q_map[cnt];
469 clear_bit(cnt, ha->req_qid_map); 469 clear_bit(cnt, ha->rsp_qid_map);
470 ha->rsp_q_map[cnt] = NULL; 470 ha->rsp_q_map[cnt] = NULL;
471 spin_unlock_irqrestore(&ha->hardware_lock, flags); 471 spin_unlock_irqrestore(&ha->hardware_lock, flags);
472 qla2x00_free_rsp_que(ha, rsp); 472 qla2x00_free_rsp_que(ha, rsp);
@@ -3662,7 +3662,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
3662 sizeof(struct ct6_dsd), 0, 3662 sizeof(struct ct6_dsd), 0,
3663 SLAB_HWCACHE_ALIGN, NULL); 3663 SLAB_HWCACHE_ALIGN, NULL);
3664 if (!ctx_cachep) 3664 if (!ctx_cachep)
3665 goto fail_free_gid_list; 3665 goto fail_free_srb_mempool;
3666 } 3666 }
3667 ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ, 3667 ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
3668 ctx_cachep); 3668 ctx_cachep);
@@ -3815,7 +3815,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
3815 ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long), 3815 ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
3816 GFP_KERNEL); 3816 GFP_KERNEL);
3817 if (!ha->loop_id_map) 3817 if (!ha->loop_id_map)
3818 goto fail_async_pd; 3818 goto fail_loop_id_map;
3819 else { 3819 else {
3820 qla2x00_set_reserved_loop_ids(ha); 3820 qla2x00_set_reserved_loop_ids(ha);
3821 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, 3821 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
@@ -3824,6 +3824,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
3824 3824
3825 return 0; 3825 return 0;
3826 3826
3827fail_loop_id_map:
3828 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
3827fail_async_pd: 3829fail_async_pd:
3828 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); 3830 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
3829fail_ex_init_cb: 3831fail_ex_init_cb:
@@ -3851,6 +3853,10 @@ fail_free_ms_iocb:
3851 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 3853 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
3852 ha->ms_iocb = NULL; 3854 ha->ms_iocb = NULL;
3853 ha->ms_iocb_dma = 0; 3855 ha->ms_iocb_dma = 0;
3856
3857 if (ha->sns_cmd)
3858 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
3859 ha->sns_cmd, ha->sns_cmd_dma);
3854fail_dma_pool: 3860fail_dma_pool:
3855 if (IS_QLA82XX(ha) || ql2xenabledif) { 3861 if (IS_QLA82XX(ha) || ql2xenabledif) {
3856 dma_pool_destroy(ha->fcp_cmnd_dma_pool); 3862 dma_pool_destroy(ha->fcp_cmnd_dma_pool);
@@ -3868,10 +3874,12 @@ fail_free_nvram:
3868 kfree(ha->nvram); 3874 kfree(ha->nvram);
3869 ha->nvram = NULL; 3875 ha->nvram = NULL;
3870fail_free_ctx_mempool: 3876fail_free_ctx_mempool:
3871 mempool_destroy(ha->ctx_mempool); 3877 if (ha->ctx_mempool)
3878 mempool_destroy(ha->ctx_mempool);
3872 ha->ctx_mempool = NULL; 3879 ha->ctx_mempool = NULL;
3873fail_free_srb_mempool: 3880fail_free_srb_mempool:
3874 mempool_destroy(ha->srb_mempool); 3881 if (ha->srb_mempool)
3882 mempool_destroy(ha->srb_mempool);
3875 ha->srb_mempool = NULL; 3883 ha->srb_mempool = NULL;
3876fail_free_gid_list: 3884fail_free_gid_list:
3877 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 3885 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index bff9689f5ca9..e4fda84b959e 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -668,11 +668,9 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
668{ 668{
669 struct qla_hw_data *ha = vha->hw; 669 struct qla_hw_data *ha = vha->hw;
670 struct qla_tgt_sess *sess = NULL; 670 struct qla_tgt_sess *sess = NULL;
671 uint32_t unpacked_lun, lun = 0;
672 uint16_t loop_id; 671 uint16_t loop_id;
673 int res = 0; 672 int res = 0;
674 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; 673 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
675 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
676 unsigned long flags; 674 unsigned long flags;
677 675
678 loop_id = le16_to_cpu(n->u.isp24.nport_handle); 676 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
@@ -725,11 +723,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
725 "loop_id %d)\n", vha->host_no, sess, sess->port_name, 723 "loop_id %d)\n", vha->host_no, sess, sess->port_name,
726 mcmd, loop_id); 724 mcmd, loop_id);
727 725
728 lun = a->u.isp24.fcp_cmnd.lun; 726 return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
729 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
730
731 return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
732 iocb, QLA24XX_MGMT_SEND_NACK);
733} 727}
734 728
735/* ha->tgt.sess_lock supposed to be held on entry */ 729/* ha->tgt.sess_lock supposed to be held on entry */
@@ -3067,7 +3061,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3067 3061
3068 pkt->entry_type = NOTIFY_ACK_TYPE; 3062 pkt->entry_type = NOTIFY_ACK_TYPE;
3069 pkt->entry_count = 1; 3063 pkt->entry_count = 1;
3070 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 3064 pkt->handle = QLA_TGT_SKIP_HANDLE;
3071 3065
3072 nack = (struct nack_to_isp *)pkt; 3066 nack = (struct nack_to_isp *)pkt;
3073 nack->ox_id = ntfy->ox_id; 3067 nack->ox_id = ntfy->ox_id;
@@ -3110,6 +3104,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3110#if 0 /* Todo */ 3104#if 0 /* Todo */
3111 if (rc == -ENOMEM) 3105 if (rc == -ENOMEM)
3112 qlt_alloc_qfull_cmd(vha, imm, 0, 0); 3106 qlt_alloc_qfull_cmd(vha, imm, 0, 0);
3107#else
3108 if (rc) {
3109 }
3113#endif 3110#endif
3114 goto done; 3111 goto done;
3115 } 3112 }
@@ -6457,12 +6454,29 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6457 if (!vha->flags.online) 6454 if (!vha->flags.online)
6458 return; 6455 return;
6459 6456
6460 while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) { 6457 while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
6458 fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
6461 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 6459 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6462 cnt = pkt->u.raw.entry_count; 6460 cnt = pkt->u.raw.entry_count;
6463 6461
6464 qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt, 6462 if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
6465 ha_locked); 6463 /*
6464 * This packet is corrupted. The header + payload
6465 * can not be trusted. There is no point in passing
6466 * it further up.
6467 */
6468 ql_log(ql_log_warn, vha, 0xffff,
6469 "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
6470 pkt->u.isp24.fcp_hdr.s_id,
6471 be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
6472 le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
6473
6474 adjust_corrupted_atio(pkt);
6475 qlt_send_term_exchange(vha, NULL, pkt, ha_locked, 0);
6476 } else {
6477 qlt_24xx_atio_pkt_all_vps(vha,
6478 (struct atio_from_isp *)pkt, ha_locked);
6479 }
6466 6480
6467 for (i = 0; i < cnt; i++) { 6481 for (i = 0; i < cnt; i++) {
6468 ha->tgt.atio_ring_index++; 6482 ha->tgt.atio_ring_index++;
@@ -6545,6 +6559,13 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6545 6559
6546 /* Disable Full Login after LIP */ 6560 /* Disable Full Login after LIP */
6547 nv->host_p &= cpu_to_le32(~BIT_10); 6561 nv->host_p &= cpu_to_le32(~BIT_10);
6562
6563 /*
6564 * clear BIT 15 explicitly as we have seen at least
6565 * a couple of instances where this was set and this
6566 * was causing the firmware to not be initialized.
6567 */
6568 nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6548 /* Enable target PRLI control */ 6569 /* Enable target PRLI control */
6549 nv->firmware_options_2 |= cpu_to_le32(BIT_14); 6570 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6550 } else { 6571 } else {
@@ -6560,9 +6581,6 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6560 return; 6581 return;
6561 } 6582 }
6562 6583
6563 /* out-of-order frames reassembly */
6564 nv->firmware_options_3 |= BIT_6|BIT_9;
6565
6566 if (ha->tgt.enable_class_2) { 6584 if (ha->tgt.enable_class_2) {
6567 if (vha->flags.init_done) 6585 if (vha->flags.init_done)
6568 fc_host_supported_classes(vha->host) = 6586 fc_host_supported_classes(vha->host) =
@@ -6629,11 +6647,17 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6629 /* Disable ini mode, if requested */ 6647 /* Disable ini mode, if requested */
6630 if (!qla_ini_mode_enabled(vha)) 6648 if (!qla_ini_mode_enabled(vha))
6631 nv->firmware_options_1 |= cpu_to_le32(BIT_5); 6649 nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6632
6633 /* Disable Full Login after LIP */ 6650 /* Disable Full Login after LIP */
6634 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 6651 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6635 /* Enable initial LIP */ 6652 /* Enable initial LIP */
6636 nv->firmware_options_1 &= cpu_to_le32(~BIT_9); 6653 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6654 /*
6655 * clear BIT 15 explicitly as we have seen at
6656 * least a couple of instances where this was set
6657 * and this was causing the firmware to not be
6658 * initialized.
6659 */
6660 nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6637 if (ql2xtgt_tape_enable) 6661 if (ql2xtgt_tape_enable)
6638 /* Enable FC tape support */ 6662 /* Enable FC tape support */
6639 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 6663 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
@@ -6658,9 +6682,6 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6658 return; 6682 return;
6659 } 6683 }
6660 6684
6661 /* out-of-order frames reassembly */
6662 nv->firmware_options_3 |= BIT_6|BIT_9;
6663
6664 if (ha->tgt.enable_class_2) { 6685 if (ha->tgt.enable_class_2) {
6665 if (vha->flags.init_done) 6686 if (vha->flags.init_done)
6666 fc_host_supported_classes(vha->host) = 6687 fc_host_supported_classes(vha->host) =
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index f26c5f60eedd..0824a8164a24 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -427,13 +427,33 @@ struct atio_from_isp {
427 struct { 427 struct {
428 uint8_t entry_type; /* Entry type. */ 428 uint8_t entry_type; /* Entry type. */
429 uint8_t entry_count; /* Entry count. */ 429 uint8_t entry_count; /* Entry count. */
430 uint8_t data[58]; 430 __le16 attr_n_length;
431#define FCP_CMD_LENGTH_MASK 0x0fff
432#define FCP_CMD_LENGTH_MIN 0x38
433 uint8_t data[56];
431 uint32_t signature; 434 uint32_t signature;
432#define ATIO_PROCESSED 0xDEADDEAD /* Signature */ 435#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
433 } raw; 436 } raw;
434 } u; 437 } u;
435} __packed; 438} __packed;
436 439
440static inline int fcpcmd_is_corrupted(struct atio *atio)
441{
442 if (atio->entry_type == ATIO_TYPE7 &&
443 (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) <
444 FCP_CMD_LENGTH_MIN))
445 return 1;
446 else
447 return 0;
448}
449
450/* adjust corrupted atio so we won't trip over the same entry again. */
451static inline void adjust_corrupted_atio(struct atio_from_isp *atio)
452{
453 atio->u.raw.attr_n_length = cpu_to_le16(FCP_CMD_LENGTH_MIN);
454 atio->u.isp24.fcp_cmnd.add_cdb_len = 0;
455}
456
437#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ 457#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
438 458
439/* 459/*
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 36935c9ed669..8a58ef3adab4 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -433,6 +433,18 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
433 count++; 433 count++;
434 } 434 }
435 } 435 }
436 } else if (QLA_TGT_MODE_ENABLED() &&
437 ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
438 struct qla_hw_data *ha = vha->hw;
439 struct atio *atr = ha->tgt.atio_ring;
440
441 if (atr || !buf) {
442 length = ha->tgt.atio_q_length;
443 qla27xx_insert16(0, buf, len);
444 qla27xx_insert16(length, buf, len);
445 qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len);
446 count++;
447 }
436 } else { 448 } else {
437 ql_dbg(ql_dbg_misc, vha, 0xd026, 449 ql_dbg(ql_dbg_misc, vha, 0xd026,
438 "%s: unknown queue %x\n", __func__, ent->t263.queue_type); 450 "%s: unknown queue %x\n", __func__, ent->t263.queue_type);
@@ -676,6 +688,18 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
676 count++; 688 count++;
677 } 689 }
678 } 690 }
691 } else if (QLA_TGT_MODE_ENABLED() &&
692 ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
693 struct qla_hw_data *ha = vha->hw;
694 struct atio *atr = ha->tgt.atio_ring_ptr;
695
696 if (atr || !buf) {
697 qla27xx_insert16(0, buf, len);
698 qla27xx_insert16(1, buf, len);
699 qla27xx_insert32(ha->tgt.atio_q_in ?
700 readl(ha->tgt.atio_q_in) : 0, buf, len);
701 count++;
702 }
679 } else { 703 } else {
680 ql_dbg(ql_dbg_misc, vha, 0xd02f, 704 ql_dbg(ql_dbg_misc, vha, 0xd02f,
681 "%s: unknown queue %x\n", __func__, ent->t274.queue_type); 705 "%s: unknown queue %x\n", __func__, ent->t274.queue_type);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 6643f6fc7795..d925910be761 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1800,7 +1800,7 @@ static ssize_t tcm_qla2xxx_wwn_version_show(struct config_item *item,
1800{ 1800{
1801 return sprintf(page, 1801 return sprintf(page,
1802 "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on " 1802 "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
1803 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, 1803 UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname,
1804 utsname()->machine); 1804 utsname()->machine);
1805} 1805}
1806 1806
@@ -1906,7 +1906,7 @@ static int tcm_qla2xxx_register_configfs(void)
1906 int ret; 1906 int ret;
1907 1907
1908 pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on " 1908 pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
1909 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, 1909 UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname,
1910 utsname()->machine); 1910 utsname()->machine);
1911 1911
1912 ret = target_register_template(&tcm_qla2xxx_ops); 1912 ret = target_register_template(&tcm_qla2xxx_ops);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 37e026a4823d..cf8430be183b 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -1,7 +1,6 @@
1#include <target/target_core_base.h> 1#include <target/target_core_base.h>
2#include <linux/btree.h> 2#include <linux/btree.h>
3 3
4#define TCM_QLA2XXX_VERSION "v0.1"
5/* length of ASCII WWPNs including pad */ 4/* length of ASCII WWPNs including pad */
6#define TCM_QLA2XXX_NAMELEN 32 5#define TCM_QLA2XXX_NAMELEN 32
7/* 6/*
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 1fbb1ecf49f2..1f5d92a25a49 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -836,6 +836,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
836 struct bio *bio = rq->bio; 836 struct bio *bio = rq->bio;
837 sector_t sector = blk_rq_pos(rq); 837 sector_t sector = blk_rq_pos(rq);
838 unsigned int nr_sectors = blk_rq_sectors(rq); 838 unsigned int nr_sectors = blk_rq_sectors(rq);
839 unsigned int nr_bytes = blk_rq_bytes(rq);
839 int ret; 840 int ret;
840 841
841 if (sdkp->device->no_write_same) 842 if (sdkp->device->no_write_same)
@@ -868,7 +869,21 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
868 869
869 cmd->transfersize = sdp->sector_size; 870 cmd->transfersize = sdp->sector_size;
870 cmd->allowed = SD_MAX_RETRIES; 871 cmd->allowed = SD_MAX_RETRIES;
871 return scsi_init_io(cmd); 872
873 /*
874 * For WRITE SAME the data transferred via the DATA OUT buffer is
875 * different from the amount of data actually written to the target.
876 *
877 * We set up __data_len to the amount of data transferred via the
878 * DATA OUT buffer so that blk_rq_map_sg sets up the proper S/G list
879 * to transfer a single sector of data first, but then reset it to
880 * the amount of data to be written right after so that the I/O path
881 * knows how much to actually write.
882 */
883 rq->__data_len = sdp->sector_size;
884 ret = scsi_init_io(cmd);
885 rq->__data_len = nr_bytes;
886 return ret;
872} 887}
873 888
874static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd) 889static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
@@ -2585,7 +2600,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2585 if (sdp->broken_fua) { 2600 if (sdp->broken_fua) {
2586 sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n"); 2601 sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
2587 sdkp->DPOFUA = 0; 2602 sdkp->DPOFUA = 0;
2588 } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { 2603 } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
2604 !sdkp->device->use_16_for_rw) {
2589 sd_first_printk(KERN_NOTICE, sdkp, 2605 sd_first_printk(KERN_NOTICE, sdkp,
2590 "Uses READ/WRITE(6), disabling FUA\n"); 2606 "Uses READ/WRITE(6), disabling FUA\n");
2591 sdkp->DPOFUA = 0; 2607 sdkp->DPOFUA = 0;
@@ -2768,13 +2784,21 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
2768 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); 2784 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
2769 } 2785 }
2770 2786
2771 sdkp->zoned = (buffer[8] >> 4) & 3; 2787 if (sdkp->device->type == TYPE_ZBC) {
2772 if (sdkp->zoned == 1) 2788 /* Host-managed */
2773 q->limits.zoned = BLK_ZONED_HA;
2774 else if (sdkp->device->type == TYPE_ZBC)
2775 q->limits.zoned = BLK_ZONED_HM; 2789 q->limits.zoned = BLK_ZONED_HM;
2776 else 2790 } else {
2777 q->limits.zoned = BLK_ZONED_NONE; 2791 sdkp->zoned = (buffer[8] >> 4) & 3;
2792 if (sdkp->zoned == 1)
2793 /* Host-aware */
2794 q->limits.zoned = BLK_ZONED_HA;
2795 else
2796 /*
2797 * Treat drive-managed devices as
2798 * regular block devices.
2799 */
2800 q->limits.zoned = BLK_ZONED_NONE;
2801 }
2778 if (blk_queue_is_zoned(q) && sdkp->first_scan) 2802 if (blk_queue_is_zoned(q) && sdkp->first_scan)
2779 sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n", 2803 sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
2780 q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware"); 2804 q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 8c9a35c91705..50adabbb5808 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
587 587
588 ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); 588 ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
589 589
590 if (scsi_is_sas_rphy(&sdev->sdev_gendev)) 590 if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent))
591 efd.addr = sas_get_address(sdev); 591 efd.addr = sas_get_address(sdev);
592 592
593 if (efd.addr) { 593 if (efd.addr) {
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index 8823cc81ae45..5bb376009d98 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -459,6 +459,7 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
459 459
460 if (IS_ERR(task)) { 460 if (IS_ERR(task)) {
461 dev_err(dev, "can't create rproc_boot thread\n"); 461 dev_err(dev, "can't create rproc_boot thread\n");
462 ret = PTR_ERR(task);
462 goto err_put_rproc; 463 goto err_put_rproc;
463 } 464 }
464 465
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index ec4aa252d6e8..2922a9908302 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -378,6 +378,7 @@ config SPI_FSL_SPI
378config SPI_FSL_DSPI 378config SPI_FSL_DSPI
379 tristate "Freescale DSPI controller" 379 tristate "Freescale DSPI controller"
380 select REGMAP_MMIO 380 select REGMAP_MMIO
381 depends on HAS_DMA
381 depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST 382 depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
382 help 383 help
383 This enables support for the Freescale DSPI controller in master 384 This enables support for the Freescale DSPI controller in master
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index e89da0af45d2..0314c6b9e044 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -800,7 +800,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
800 struct spi_master *master; 800 struct spi_master *master;
801 struct a3700_spi *spi; 801 struct a3700_spi *spi;
802 u32 num_cs = 0; 802 u32 num_cs = 0;
803 int ret = 0; 803 int irq, ret = 0;
804 804
805 master = spi_alloc_master(dev, sizeof(*spi)); 805 master = spi_alloc_master(dev, sizeof(*spi));
806 if (!master) { 806 if (!master) {
@@ -825,7 +825,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
825 master->unprepare_message = a3700_spi_unprepare_message; 825 master->unprepare_message = a3700_spi_unprepare_message;
826 master->set_cs = a3700_spi_set_cs; 826 master->set_cs = a3700_spi_set_cs;
827 master->flags = SPI_MASTER_HALF_DUPLEX; 827 master->flags = SPI_MASTER_HALF_DUPLEX;
828 master->mode_bits |= (SPI_RX_DUAL | SPI_RX_DUAL | 828 master->mode_bits |= (SPI_RX_DUAL | SPI_TX_DUAL |
829 SPI_RX_QUAD | SPI_TX_QUAD); 829 SPI_RX_QUAD | SPI_TX_QUAD);
830 830
831 platform_set_drvdata(pdev, master); 831 platform_set_drvdata(pdev, master);
@@ -846,12 +846,13 @@ static int a3700_spi_probe(struct platform_device *pdev)
846 goto error; 846 goto error;
847 } 847 }
848 848
849 spi->irq = platform_get_irq(pdev, 0); 849 irq = platform_get_irq(pdev, 0);
850 if (spi->irq < 0) { 850 if (irq < 0) {
851 dev_err(dev, "could not get irq: %d\n", spi->irq); 851 dev_err(dev, "could not get irq: %d\n", irq);
852 ret = -ENXIO; 852 ret = -ENXIO;
853 goto error; 853 goto error;
854 } 854 }
855 spi->irq = irq;
855 856
856 init_completion(&spi->done); 857 init_completion(&spi->done);
857 858
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 319225d7e761..6ab4c7700228 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -494,7 +494,8 @@ static int spi_engine_probe(struct platform_device *pdev)
494 SPI_ENGINE_VERSION_MAJOR(version), 494 SPI_ENGINE_VERSION_MAJOR(version),
495 SPI_ENGINE_VERSION_MINOR(version), 495 SPI_ENGINE_VERSION_MINOR(version),
496 SPI_ENGINE_VERSION_PATCH(version)); 496 SPI_ENGINE_VERSION_PATCH(version));
497 return -ENODEV; 497 ret = -ENODEV;
498 goto err_put_master;
498 } 499 }
499 500
500 spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk"); 501 spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index d36c11b73a35..02fb96797ac8 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -646,7 +646,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
646 buf = t->rx_buf; 646 buf = t->rx_buf;
647 t->rx_dma = dma_map_single(&spi->dev, buf, 647 t->rx_dma = dma_map_single(&spi->dev, buf,
648 t->len, DMA_FROM_DEVICE); 648 t->len, DMA_FROM_DEVICE);
649 if (!t->rx_dma) { 649 if (dma_mapping_error(&spi->dev, !t->rx_dma)) {
650 ret = -EFAULT; 650 ret = -EFAULT;
651 goto err_rx_map; 651 goto err_rx_map;
652 } 652 }
@@ -660,7 +660,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
660 buf = (void *)t->tx_buf; 660 buf = (void *)t->tx_buf;
661 t->tx_dma = dma_map_single(&spi->dev, buf, 661 t->tx_dma = dma_map_single(&spi->dev, buf,
662 t->len, DMA_TO_DEVICE); 662 t->len, DMA_TO_DEVICE);
663 if (!t->tx_dma) { 663 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
664 ret = -EFAULT; 664 ret = -EFAULT;
665 goto err_tx_map; 665 goto err_tx_map;
666 } 666 }
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index e31971f91475..837cb8d0bac6 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -274,11 +274,11 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
274static void mid_spi_dma_stop(struct dw_spi *dws) 274static void mid_spi_dma_stop(struct dw_spi *dws)
275{ 275{
276 if (test_bit(TX_BUSY, &dws->dma_chan_busy)) { 276 if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
277 dmaengine_terminate_all(dws->txchan); 277 dmaengine_terminate_sync(dws->txchan);
278 clear_bit(TX_BUSY, &dws->dma_chan_busy); 278 clear_bit(TX_BUSY, &dws->dma_chan_busy);
279 } 279 }
280 if (test_bit(RX_BUSY, &dws->dma_chan_busy)) { 280 if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
281 dmaengine_terminate_all(dws->rxchan); 281 dmaengine_terminate_sync(dws->rxchan);
282 clear_bit(RX_BUSY, &dws->dma_chan_busy); 282 clear_bit(RX_BUSY, &dws->dma_chan_busy);
283 } 283 }
284} 284}
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index b715a26a9148..054012f87567 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -107,7 +107,10 @@ static const struct file_operations dw_spi_regs_ops = {
107 107
108static int dw_spi_debugfs_init(struct dw_spi *dws) 108static int dw_spi_debugfs_init(struct dw_spi *dws)
109{ 109{
110 dws->debugfs = debugfs_create_dir("dw_spi", NULL); 110 char name[128];
111
112 snprintf(name, 128, "dw_spi-%s", dev_name(&dws->master->dev));
113 dws->debugfs = debugfs_create_dir(name, NULL);
111 if (!dws->debugfs) 114 if (!dws->debugfs)
112 return -ENOMEM; 115 return -ENOMEM;
113 116
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index dd7b5b47291d..d6239fa718be 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1690,6 +1690,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1690 pxa2xx_spi_write(drv_data, SSCR1, tmp); 1690 pxa2xx_spi_write(drv_data, SSCR1, tmp);
1691 tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8); 1691 tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
1692 pxa2xx_spi_write(drv_data, SSCR0, tmp); 1692 pxa2xx_spi_write(drv_data, SSCR0, tmp);
1693 break;
1693 default: 1694 default:
1694 tmp = SSCR1_RxTresh(RX_THRESH_DFLT) | 1695 tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
1695 SSCR1_TxTresh(TX_THRESH_DFLT); 1696 SSCR1_TxTresh(TX_THRESH_DFLT);
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 0012ad02e569..1f00eeb0b5a3 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -973,14 +973,16 @@ static const struct sh_msiof_chipdata r8a779x_data = {
973}; 973};
974 974
975static const struct of_device_id sh_msiof_match[] = { 975static const struct of_device_id sh_msiof_match[] = {
976 { .compatible = "renesas,sh-msiof", .data = &sh_data },
977 { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data }, 976 { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
978 { .compatible = "renesas,msiof-r8a7790", .data = &r8a779x_data }, 977 { .compatible = "renesas,msiof-r8a7790", .data = &r8a779x_data },
979 { .compatible = "renesas,msiof-r8a7791", .data = &r8a779x_data }, 978 { .compatible = "renesas,msiof-r8a7791", .data = &r8a779x_data },
980 { .compatible = "renesas,msiof-r8a7792", .data = &r8a779x_data }, 979 { .compatible = "renesas,msiof-r8a7792", .data = &r8a779x_data },
981 { .compatible = "renesas,msiof-r8a7793", .data = &r8a779x_data }, 980 { .compatible = "renesas,msiof-r8a7793", .data = &r8a779x_data },
982 { .compatible = "renesas,msiof-r8a7794", .data = &r8a779x_data }, 981 { .compatible = "renesas,msiof-r8a7794", .data = &r8a779x_data },
982 { .compatible = "renesas,rcar-gen2-msiof", .data = &r8a779x_data },
983 { .compatible = "renesas,msiof-r8a7796", .data = &r8a779x_data }, 983 { .compatible = "renesas,msiof-r8a7796", .data = &r8a779x_data },
984 { .compatible = "renesas,rcar-gen3-msiof", .data = &r8a779x_data },
985 { .compatible = "renesas,sh-msiof", .data = &sh_data }, /* Deprecated */
984 {}, 986 {},
985}; 987};
986MODULE_DEVICE_TABLE(of, sh_msiof_match); 988MODULE_DEVICE_TABLE(of, sh_msiof_match);
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index b811b0fb61b1..4c7796512453 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -118,12 +118,12 @@ struct rockchip_tsadc_chip {
118 void (*control)(void __iomem *reg, bool on); 118 void (*control)(void __iomem *reg, bool on);
119 119
120 /* Per-sensor methods */ 120 /* Per-sensor methods */
121 int (*get_temp)(struct chip_tsadc_table table, 121 int (*get_temp)(const struct chip_tsadc_table *table,
122 int chn, void __iomem *reg, int *temp); 122 int chn, void __iomem *reg, int *temp);
123 void (*set_alarm_temp)(struct chip_tsadc_table table, 123 int (*set_alarm_temp)(const struct chip_tsadc_table *table,
124 int chn, void __iomem *reg, int temp); 124 int chn, void __iomem *reg, int temp);
125 void (*set_tshut_temp)(struct chip_tsadc_table table, 125 int (*set_tshut_temp)(const struct chip_tsadc_table *table,
126 int chn, void __iomem *reg, int temp); 126 int chn, void __iomem *reg, int temp);
127 void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m); 127 void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m);
128 128
129 /* Per-table methods */ 129 /* Per-table methods */
@@ -317,6 +317,7 @@ static const struct tsadc_table rk3288_code_table[] = {
317 {3452, 115000}, 317 {3452, 115000},
318 {3437, 120000}, 318 {3437, 120000},
319 {3421, 125000}, 319 {3421, 125000},
320 {0, 125000},
320}; 321};
321 322
322static const struct tsadc_table rk3368_code_table[] = { 323static const struct tsadc_table rk3368_code_table[] = {
@@ -397,59 +398,80 @@ static const struct tsadc_table rk3399_code_table[] = {
397 {TSADCV3_DATA_MASK, 125000}, 398 {TSADCV3_DATA_MASK, 125000},
398}; 399};
399 400
400static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table, 401static u32 rk_tsadcv2_temp_to_code(const struct chip_tsadc_table *table,
401 int temp) 402 int temp)
402{ 403{
403 int high, low, mid; 404 int high, low, mid;
404 u32 error = 0; 405 unsigned long num;
406 unsigned int denom;
407 u32 error = table->data_mask;
405 408
406 low = 0; 409 low = 0;
407 high = table.length - 1; 410 high = (table->length - 1) - 1; /* ignore the last check for table */
408 mid = (high + low) / 2; 411 mid = (high + low) / 2;
409 412
410 /* Return mask code data when the temp is over table range */ 413 /* Return mask code data when the temp is over table range */
411 if (temp < table.id[low].temp || temp > table.id[high].temp) { 414 if (temp < table->id[low].temp || temp > table->id[high].temp)
412 error = table.data_mask;
413 goto exit; 415 goto exit;
414 }
415 416
416 while (low <= high) { 417 while (low <= high) {
417 if (temp == table.id[mid].temp) 418 if (temp == table->id[mid].temp)
418 return table.id[mid].code; 419 return table->id[mid].code;
419 else if (temp < table.id[mid].temp) 420 else if (temp < table->id[mid].temp)
420 high = mid - 1; 421 high = mid - 1;
421 else 422 else
422 low = mid + 1; 423 low = mid + 1;
423 mid = (low + high) / 2; 424 mid = (low + high) / 2;
424 } 425 }
425 426
427 /*
428 * The conversion code granularity provided by the table. Let's
429 * assume that the relationship between temperature and
430 * analog value between 2 table entries is linear and interpolate
431 * to produce less granular result.
432 */
433 num = abs(table->id[mid + 1].code - table->id[mid].code);
434 num *= temp - table->id[mid].temp;
435 denom = table->id[mid + 1].temp - table->id[mid].temp;
436
437 switch (table->mode) {
438 case ADC_DECREMENT:
439 return table->id[mid].code - (num / denom);
440 case ADC_INCREMENT:
441 return table->id[mid].code + (num / denom);
442 default:
443 pr_err("%s: unknown table mode: %d\n", __func__, table->mode);
444 return error;
445 }
446
426exit: 447exit:
427 pr_err("Invalid the conversion, error=%d\n", error); 448 pr_err("%s: invalid temperature, temp=%d error=%d\n",
449 __func__, temp, error);
428 return error; 450 return error;
429} 451}
430 452
431static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code, 453static int rk_tsadcv2_code_to_temp(const struct chip_tsadc_table *table,
432 int *temp) 454 u32 code, int *temp)
433{ 455{
434 unsigned int low = 1; 456 unsigned int low = 1;
435 unsigned int high = table.length - 1; 457 unsigned int high = table->length - 1;
436 unsigned int mid = (low + high) / 2; 458 unsigned int mid = (low + high) / 2;
437 unsigned int num; 459 unsigned int num;
438 unsigned long denom; 460 unsigned long denom;
439 461
440 WARN_ON(table.length < 2); 462 WARN_ON(table->length < 2);
441 463
442 switch (table.mode) { 464 switch (table->mode) {
443 case ADC_DECREMENT: 465 case ADC_DECREMENT:
444 code &= table.data_mask; 466 code &= table->data_mask;
445 if (code < table.id[high].code) 467 if (code <= table->id[high].code)
446 return -EAGAIN; /* Incorrect reading */ 468 return -EAGAIN; /* Incorrect reading */
447 469
448 while (low <= high) { 470 while (low <= high) {
449 if (code >= table.id[mid].code && 471 if (code >= table->id[mid].code &&
450 code < table.id[mid - 1].code) 472 code < table->id[mid - 1].code)
451 break; 473 break;
452 else if (code < table.id[mid].code) 474 else if (code < table->id[mid].code)
453 low = mid + 1; 475 low = mid + 1;
454 else 476 else
455 high = mid - 1; 477 high = mid - 1;
@@ -458,15 +480,15 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
458 } 480 }
459 break; 481 break;
460 case ADC_INCREMENT: 482 case ADC_INCREMENT:
461 code &= table.data_mask; 483 code &= table->data_mask;
462 if (code < table.id[low].code) 484 if (code < table->id[low].code)
463 return -EAGAIN; /* Incorrect reading */ 485 return -EAGAIN; /* Incorrect reading */
464 486
465 while (low <= high) { 487 while (low <= high) {
466 if (code <= table.id[mid].code && 488 if (code <= table->id[mid].code &&
467 code > table.id[mid - 1].code) 489 code > table->id[mid - 1].code)
468 break; 490 break;
469 else if (code > table.id[mid].code) 491 else if (code > table->id[mid].code)
470 low = mid + 1; 492 low = mid + 1;
471 else 493 else
472 high = mid - 1; 494 high = mid - 1;
@@ -475,7 +497,8 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
475 } 497 }
476 break; 498 break;
477 default: 499 default:
478 pr_err("Invalid the conversion table\n"); 500 pr_err("%s: unknown table mode: %d\n", __func__, table->mode);
501 return -EINVAL;
479 } 502 }
480 503
481 /* 504 /*
@@ -484,10 +507,10 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
484 * temperature between 2 table entries is linear and interpolate 507 * temperature between 2 table entries is linear and interpolate
485 * to produce less granular result. 508 * to produce less granular result.
486 */ 509 */
487 num = table.id[mid].temp - table.id[mid - 1].temp; 510 num = table->id[mid].temp - table->id[mid - 1].temp;
488 num *= abs(table.id[mid - 1].code - code); 511 num *= abs(table->id[mid - 1].code - code);
489 denom = abs(table.id[mid - 1].code - table.id[mid].code); 512 denom = abs(table->id[mid - 1].code - table->id[mid].code);
490 *temp = table.id[mid - 1].temp + (num / denom); 513 *temp = table->id[mid - 1].temp + (num / denom);
491 514
492 return 0; 515 return 0;
493} 516}
@@ -638,7 +661,7 @@ static void rk_tsadcv3_control(void __iomem *regs, bool enable)
638 writel_relaxed(val, regs + TSADCV2_AUTO_CON); 661 writel_relaxed(val, regs + TSADCV2_AUTO_CON);
639} 662}
640 663
641static int rk_tsadcv2_get_temp(struct chip_tsadc_table table, 664static int rk_tsadcv2_get_temp(const struct chip_tsadc_table *table,
642 int chn, void __iomem *regs, int *temp) 665 int chn, void __iomem *regs, int *temp)
643{ 666{
644 u32 val; 667 u32 val;
@@ -648,39 +671,57 @@ static int rk_tsadcv2_get_temp(struct chip_tsadc_table table,
648 return rk_tsadcv2_code_to_temp(table, val, temp); 671 return rk_tsadcv2_code_to_temp(table, val, temp);
649} 672}
650 673
651static void rk_tsadcv2_alarm_temp(struct chip_tsadc_table table, 674static int rk_tsadcv2_alarm_temp(const struct chip_tsadc_table *table,
652 int chn, void __iomem *regs, int temp) 675 int chn, void __iomem *regs, int temp)
653{ 676{
654 u32 alarm_value, int_en; 677 u32 alarm_value;
678 u32 int_en, int_clr;
679
680 /*
681 * In some cases, some sensors didn't need the trip points, the
682 * set_trips will pass {-INT_MAX, INT_MAX} to trigger tsadc alarm
683 * in the end, ignore this case and disable the high temperature
684 * interrupt.
685 */
686 if (temp == INT_MAX) {
687 int_clr = readl_relaxed(regs + TSADCV2_INT_EN);
688 int_clr &= ~TSADCV2_INT_SRC_EN(chn);
689 writel_relaxed(int_clr, regs + TSADCV2_INT_EN);
690 return 0;
691 }
655 692
656 /* Make sure the value is valid */ 693 /* Make sure the value is valid */
657 alarm_value = rk_tsadcv2_temp_to_code(table, temp); 694 alarm_value = rk_tsadcv2_temp_to_code(table, temp);
658 if (alarm_value == table.data_mask) 695 if (alarm_value == table->data_mask)
659 return; 696 return -ERANGE;
660 697
661 writel_relaxed(alarm_value & table.data_mask, 698 writel_relaxed(alarm_value & table->data_mask,
662 regs + TSADCV2_COMP_INT(chn)); 699 regs + TSADCV2_COMP_INT(chn));
663 700
664 int_en = readl_relaxed(regs + TSADCV2_INT_EN); 701 int_en = readl_relaxed(regs + TSADCV2_INT_EN);
665 int_en |= TSADCV2_INT_SRC_EN(chn); 702 int_en |= TSADCV2_INT_SRC_EN(chn);
666 writel_relaxed(int_en, regs + TSADCV2_INT_EN); 703 writel_relaxed(int_en, regs + TSADCV2_INT_EN);
704
705 return 0;
667} 706}
668 707
669static void rk_tsadcv2_tshut_temp(struct chip_tsadc_table table, 708static int rk_tsadcv2_tshut_temp(const struct chip_tsadc_table *table,
670 int chn, void __iomem *regs, int temp) 709 int chn, void __iomem *regs, int temp)
671{ 710{
672 u32 tshut_value, val; 711 u32 tshut_value, val;
673 712
674 /* Make sure the value is valid */ 713 /* Make sure the value is valid */
675 tshut_value = rk_tsadcv2_temp_to_code(table, temp); 714 tshut_value = rk_tsadcv2_temp_to_code(table, temp);
676 if (tshut_value == table.data_mask) 715 if (tshut_value == table->data_mask)
677 return; 716 return -ERANGE;
678 717
679 writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn)); 718 writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn));
680 719
681 /* TSHUT will be valid */ 720 /* TSHUT will be valid */
682 val = readl_relaxed(regs + TSADCV2_AUTO_CON); 721 val = readl_relaxed(regs + TSADCV2_AUTO_CON);
683 writel_relaxed(val | TSADCV2_AUTO_SRC_EN(chn), regs + TSADCV2_AUTO_CON); 722 writel_relaxed(val | TSADCV2_AUTO_SRC_EN(chn), regs + TSADCV2_AUTO_CON);
723
724 return 0;
684} 725}
685 726
686static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs, 727static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs,
@@ -883,10 +924,8 @@ static int rockchip_thermal_set_trips(void *_sensor, int low, int high)
883 dev_dbg(&thermal->pdev->dev, "%s: sensor %d: low: %d, high %d\n", 924 dev_dbg(&thermal->pdev->dev, "%s: sensor %d: low: %d, high %d\n",
884 __func__, sensor->id, low, high); 925 __func__, sensor->id, low, high);
885 926
886 tsadc->set_alarm_temp(tsadc->table, 927 return tsadc->set_alarm_temp(&tsadc->table,
887 sensor->id, thermal->regs, high); 928 sensor->id, thermal->regs, high);
888
889 return 0;
890} 929}
891 930
892static int rockchip_thermal_get_temp(void *_sensor, int *out_temp) 931static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
@@ -896,7 +935,7 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
896 const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip; 935 const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip;
897 int retval; 936 int retval;
898 937
899 retval = tsadc->get_temp(tsadc->table, 938 retval = tsadc->get_temp(&tsadc->table,
900 sensor->id, thermal->regs, out_temp); 939 sensor->id, thermal->regs, out_temp);
901 dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n", 940 dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n",
902 sensor->id, *out_temp, retval); 941 sensor->id, *out_temp, retval);
@@ -982,8 +1021,12 @@ rockchip_thermal_register_sensor(struct platform_device *pdev,
982 int error; 1021 int error;
983 1022
984 tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode); 1023 tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode);
985 tsadc->set_tshut_temp(tsadc->table, id, thermal->regs, 1024
1025 error = tsadc->set_tshut_temp(&tsadc->table, id, thermal->regs,
986 thermal->tshut_temp); 1026 thermal->tshut_temp);
1027 if (error)
1028 dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n",
1029 __func__, thermal->tshut_temp, error);
987 1030
988 sensor->thermal = thermal; 1031 sensor->thermal = thermal;
989 sensor->id = id; 1032 sensor->id = id;
@@ -1196,9 +1239,13 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev)
1196 1239
1197 thermal->chip->set_tshut_mode(id, thermal->regs, 1240 thermal->chip->set_tshut_mode(id, thermal->regs,
1198 thermal->tshut_mode); 1241 thermal->tshut_mode);
1199 thermal->chip->set_tshut_temp(thermal->chip->table, 1242
1243 error = thermal->chip->set_tshut_temp(&thermal->chip->table,
1200 id, thermal->regs, 1244 id, thermal->regs,
1201 thermal->tshut_temp); 1245 thermal->tshut_temp);
1246 if (error)
1247 dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n",
1248 __func__, thermal->tshut_temp, error);
1202 } 1249 }
1203 1250
1204 thermal->chip->control(thermal->regs, true); 1251 thermal->chip->control(thermal->regs, true);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 641faab6e24b..655591316a88 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -799,6 +799,11 @@ static void thermal_release(struct device *dev)
799 if (!strncmp(dev_name(dev), "thermal_zone", 799 if (!strncmp(dev_name(dev), "thermal_zone",
800 sizeof("thermal_zone") - 1)) { 800 sizeof("thermal_zone") - 1)) {
801 tz = to_thermal_zone(dev); 801 tz = to_thermal_zone(dev);
802 kfree(tz->trip_type_attrs);
803 kfree(tz->trip_temp_attrs);
804 kfree(tz->trip_hyst_attrs);
805 kfree(tz->trips_attribute_group.attrs);
806 kfree(tz->device.groups);
802 kfree(tz); 807 kfree(tz);
803 } else if (!strncmp(dev_name(dev), "cooling_device", 808 } else if (!strncmp(dev_name(dev), "cooling_device",
804 sizeof("cooling_device") - 1)) { 809 sizeof("cooling_device") - 1)) {
@@ -1305,10 +1310,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
1305 1310
1306 thermal_zone_device_set_polling(tz, 0); 1311 thermal_zone_device_set_polling(tz, 0);
1307 1312
1308 kfree(tz->trip_type_attrs);
1309 kfree(tz->trip_temp_attrs);
1310 kfree(tz->trip_hyst_attrs);
1311 kfree(tz->trips_attribute_group.attrs);
1312 thermal_set_governor(tz, NULL); 1313 thermal_set_governor(tz, NULL);
1313 1314
1314 thermal_remove_hwmon_sysfs(tz); 1315 thermal_remove_hwmon_sysfs(tz);
@@ -1316,7 +1317,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
1316 idr_destroy(&tz->idr); 1317 idr_destroy(&tz->idr);
1317 mutex_destroy(&tz->lock); 1318 mutex_destroy(&tz->lock);
1318 device_unregister(&tz->device); 1319 device_unregister(&tz->device);
1319 kfree(tz->device.groups);
1320} 1320}
1321EXPORT_SYMBOL_GPL(thermal_zone_device_unregister); 1321EXPORT_SYMBOL_GPL(thermal_zone_device_unregister);
1322 1322
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 9548d3e03453..302b8f5f7d27 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -513,8 +513,8 @@ struct dwc2_core_params {
513 /* Gadget parameters */ 513 /* Gadget parameters */
514 bool g_dma; 514 bool g_dma;
515 bool g_dma_desc; 515 bool g_dma_desc;
516 u16 g_rx_fifo_size; 516 u32 g_rx_fifo_size;
517 u16 g_np_tx_fifo_size; 517 u32 g_np_tx_fifo_size;
518 u32 g_tx_fifo_size[MAX_EPS_CHANNELS]; 518 u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
519}; 519};
520 520
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index c55db4aa54d6..77c5fcf3a5bf 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3169,7 +3169,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
3169 /* keep other bits untouched (so e.g. forced modes are not lost) */ 3169 /* keep other bits untouched (so e.g. forced modes are not lost) */
3170 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 3170 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
3171 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | 3171 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
3172 GUSBCFG_HNPCAP); 3172 GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
3173 3173
3174 if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS && 3174 if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS &&
3175 (hsotg->params.speed == DWC2_SPEED_PARAM_FULL || 3175 (hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
@@ -3749,8 +3749,8 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
3749 __func__, epctrl, epctrl_reg); 3749 __func__, epctrl, epctrl_reg);
3750 3750
3751 /* Allocate DMA descriptor chain for non-ctrl endpoints */ 3751 /* Allocate DMA descriptor chain for non-ctrl endpoints */
3752 if (using_desc_dma(hsotg)) { 3752 if (using_desc_dma(hsotg) && !hs_ep->desc_list) {
3753 hs_ep->desc_list = dma_alloc_coherent(hsotg->dev, 3753 hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev,
3754 MAX_DMA_DESC_NUM_GENERIC * 3754 MAX_DMA_DESC_NUM_GENERIC *
3755 sizeof(struct dwc2_dma_desc), 3755 sizeof(struct dwc2_dma_desc),
3756 &hs_ep->desc_list_dma, GFP_ATOMIC); 3756 &hs_ep->desc_list_dma, GFP_ATOMIC);
@@ -3872,7 +3872,7 @@ error1:
3872 3872
3873error2: 3873error2:
3874 if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) { 3874 if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
3875 dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC * 3875 dmam_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
3876 sizeof(struct dwc2_dma_desc), 3876 sizeof(struct dwc2_dma_desc),
3877 hs_ep->desc_list, hs_ep->desc_list_dma); 3877 hs_ep->desc_list, hs_ep->desc_list_dma);
3878 hs_ep->desc_list = NULL; 3878 hs_ep->desc_list = NULL;
@@ -3902,14 +3902,6 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
3902 return -EINVAL; 3902 return -EINVAL;
3903 } 3903 }
3904 3904
3905 /* Remove DMA memory allocated for non-control Endpoints */
3906 if (using_desc_dma(hsotg)) {
3907 dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
3908 sizeof(struct dwc2_dma_desc),
3909 hs_ep->desc_list, hs_ep->desc_list_dma);
3910 hs_ep->desc_list = NULL;
3911 }
3912
3913 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); 3905 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
3914 3906
3915 spin_lock_irqsave(&hsotg->lock, flags); 3907 spin_lock_irqsave(&hsotg->lock, flags);
@@ -4131,7 +4123,7 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
4131 /* keep other bits untouched (so e.g. forced modes are not lost) */ 4123 /* keep other bits untouched (so e.g. forced modes are not lost) */
4132 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 4124 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
4133 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | 4125 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
4134 GUSBCFG_HNPCAP); 4126 GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
4135 4127
4136 /* set the PLL on, remove the HNP/SRP and set the PHY */ 4128 /* set the PLL on, remove the HNP/SRP and set the PHY */
4137 trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; 4129 trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 911c3b36ac06..46d0ad5105e4 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -4367,6 +4367,9 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
4367 if (!HCD_HW_ACCESSIBLE(hcd)) 4367 if (!HCD_HW_ACCESSIBLE(hcd))
4368 goto unlock; 4368 goto unlock;
4369 4369
4370 if (hsotg->op_state == OTG_STATE_B_PERIPHERAL)
4371 goto unlock;
4372
4370 if (!hsotg->params.hibernation) 4373 if (!hsotg->params.hibernation)
4371 goto skip_power_saving; 4374 goto skip_power_saving;
4372 4375
@@ -4489,8 +4492,8 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
4489{ 4492{
4490#ifdef VERBOSE_DEBUG 4493#ifdef VERBOSE_DEBUG
4491 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 4494 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4492 char *pipetype; 4495 char *pipetype = NULL;
4493 char *speed; 4496 char *speed = NULL;
4494 4497
4495 dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb); 4498 dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb);
4496 dev_vdbg(hsotg->dev, " Device address: %d\n", 4499 dev_vdbg(hsotg->dev, " Device address: %d\n",
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 11fe68a4627b..bcd1e19b4076 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -385,16 +385,16 @@ static void dwc2_set_param(struct dwc2_hsotg *hsotg, void *param,
385} 385}
386 386
387/** 387/**
388 * dwc2_set_param_u16() - Set a u16 parameter 388 * dwc2_set_param_u32() - Set a u32 parameter
389 * 389 *
390 * See dwc2_set_param(). 390 * See dwc2_set_param().
391 */ 391 */
392static void dwc2_set_param_u16(struct dwc2_hsotg *hsotg, u16 *param, 392static void dwc2_set_param_u32(struct dwc2_hsotg *hsotg, u32 *param,
393 bool lookup, char *property, u16 legacy, 393 bool lookup, char *property, u16 legacy,
394 u16 def, u16 min, u16 max) 394 u16 def, u16 min, u16 max)
395{ 395{
396 dwc2_set_param(hsotg, param, lookup, property, 396 dwc2_set_param(hsotg, param, lookup, property,
397 legacy, def, min, max, 2); 397 legacy, def, min, max, 4);
398} 398}
399 399
400/** 400/**
@@ -1178,12 +1178,12 @@ static void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
1178 * auto-detect if the hardware does not support the 1178 * auto-detect if the hardware does not support the
1179 * default. 1179 * default.
1180 */ 1180 */
1181 dwc2_set_param_u16(hsotg, &p->g_rx_fifo_size, 1181 dwc2_set_param_u32(hsotg, &p->g_rx_fifo_size,
1182 true, "g-rx-fifo-size", 2048, 1182 true, "g-rx-fifo-size", 2048,
1183 hw->rx_fifo_size, 1183 hw->rx_fifo_size,
1184 16, hw->rx_fifo_size); 1184 16, hw->rx_fifo_size);
1185 1185
1186 dwc2_set_param_u16(hsotg, &p->g_np_tx_fifo_size, 1186 dwc2_set_param_u32(hsotg, &p->g_np_tx_fifo_size,
1187 true, "g-np-tx-fifo-size", 1024, 1187 true, "g-np-tx-fifo-size", 1024,
1188 hw->dev_nperio_tx_fifo_size, 1188 hw->dev_nperio_tx_fifo_size,
1189 16, hw->dev_nperio_tx_fifo_size); 1189 16, hw->dev_nperio_tx_fifo_size);
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index e27899bb5706..e956306d9b0f 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -138,7 +138,8 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
138 exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk"); 138 exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk");
139 if (IS_ERR(exynos->axius_clk)) { 139 if (IS_ERR(exynos->axius_clk)) {
140 dev_err(dev, "no AXI UpScaler clk specified\n"); 140 dev_err(dev, "no AXI UpScaler clk specified\n");
141 return -ENODEV; 141 ret = -ENODEV;
142 goto axius_clk_err;
142 } 143 }
143 clk_prepare_enable(exynos->axius_clk); 144 clk_prepare_enable(exynos->axius_clk);
144 } else { 145 } else {
@@ -196,6 +197,7 @@ err3:
196 regulator_disable(exynos->vdd33); 197 regulator_disable(exynos->vdd33);
197err2: 198err2:
198 clk_disable_unprepare(exynos->axius_clk); 199 clk_disable_unprepare(exynos->axius_clk);
200axius_clk_err:
199 clk_disable_unprepare(exynos->susp_clk); 201 clk_disable_unprepare(exynos->susp_clk);
200 clk_disable_unprepare(exynos->clk); 202 clk_disable_unprepare(exynos->clk);
201 return ret; 203 return ret;
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 002822d98fda..49d685ad0da9 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -2147,7 +2147,7 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
2147 cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL); 2147 cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
2148 if (!cdev->os_desc_req->buf) { 2148 if (!cdev->os_desc_req->buf) {
2149 ret = -ENOMEM; 2149 ret = -ENOMEM;
2150 kfree(cdev->os_desc_req); 2150 usb_ep_free_request(ep0, cdev->os_desc_req);
2151 goto end; 2151 goto end;
2152 } 2152 }
2153 cdev->os_desc_req->context = cdev; 2153 cdev->os_desc_req->context = cdev;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 5e746adc8a2d..5490fc51638e 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1806,7 +1806,7 @@ static void ffs_func_eps_disable(struct ffs_function *func)
1806 unsigned long flags; 1806 unsigned long flags;
1807 1807
1808 spin_lock_irqsave(&func->ffs->eps_lock, flags); 1808 spin_lock_irqsave(&func->ffs->eps_lock, flags);
1809 do { 1809 while (count--) {
1810 /* pending requests get nuked */ 1810 /* pending requests get nuked */
1811 if (likely(ep->ep)) 1811 if (likely(ep->ep))
1812 usb_ep_disable(ep->ep); 1812 usb_ep_disable(ep->ep);
@@ -1817,7 +1817,7 @@ static void ffs_func_eps_disable(struct ffs_function *func)
1817 __ffs_epfile_read_buffer_free(epfile); 1817 __ffs_epfile_read_buffer_free(epfile);
1818 ++epfile; 1818 ++epfile;
1819 } 1819 }
1820 } while (--count); 1820 }
1821 spin_unlock_irqrestore(&func->ffs->eps_lock, flags); 1821 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1822} 1822}
1823 1823
@@ -1831,7 +1831,7 @@ static int ffs_func_eps_enable(struct ffs_function *func)
1831 int ret = 0; 1831 int ret = 0;
1832 1832
1833 spin_lock_irqsave(&func->ffs->eps_lock, flags); 1833 spin_lock_irqsave(&func->ffs->eps_lock, flags);
1834 do { 1834 while(count--) {
1835 struct usb_endpoint_descriptor *ds; 1835 struct usb_endpoint_descriptor *ds;
1836 int desc_idx; 1836 int desc_idx;
1837 1837
@@ -1867,7 +1867,7 @@ static int ffs_func_eps_enable(struct ffs_function *func)
1867 1867
1868 ++ep; 1868 ++ep;
1869 ++epfile; 1869 ++epfile;
1870 } while (--count); 1870 }
1871 spin_unlock_irqrestore(&func->ffs->eps_lock, flags); 1871 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1872 1872
1873 return ret; 1873 return ret;
@@ -3448,12 +3448,12 @@ static void ffs_func_unbind(struct usb_configuration *c,
3448 3448
3449 /* cleanup after autoconfig */ 3449 /* cleanup after autoconfig */
3450 spin_lock_irqsave(&func->ffs->eps_lock, flags); 3450 spin_lock_irqsave(&func->ffs->eps_lock, flags);
3451 do { 3451 while (count--) {
3452 if (ep->ep && ep->req) 3452 if (ep->ep && ep->req)
3453 usb_ep_free_request(ep->ep, ep->req); 3453 usb_ep_free_request(ep->ep, ep->req);
3454 ep->req = NULL; 3454 ep->req = NULL;
3455 ++ep; 3455 ++ep;
3456 } while (--count); 3456 }
3457 spin_unlock_irqrestore(&func->ffs->eps_lock, flags); 3457 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
3458 kfree(func->eps); 3458 kfree(func->eps);
3459 func->eps = NULL; 3459 func->eps = NULL;
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index f3212db9bc37..12c7687216e6 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1978,7 +1978,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
1978 dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret); 1978 dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
1979 goto err; 1979 goto err;
1980 } 1980 }
1981 ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index); 1981 sprintf(ep->name, "ep%d", ep->index);
1982 ep->ep.name = ep->name;
1982 1983
1983 ep->ep_regs = udc->regs + USBA_EPT_BASE(i); 1984 ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
1984 ep->dma_regs = udc->regs + USBA_DMA_BASE(i); 1985 ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h
index 3e1c9d589dfa..b03b2ebfc53a 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.h
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.h
@@ -280,6 +280,7 @@ struct usba_ep {
280 void __iomem *ep_regs; 280 void __iomem *ep_regs;
281 void __iomem *dma_regs; 281 void __iomem *dma_regs;
282 void __iomem *fifo; 282 void __iomem *fifo;
283 char name[8];
283 struct usb_ep ep; 284 struct usb_ep ep;
284 struct usba_udc *udc; 285 struct usba_udc *udc;
285 286
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index ddfab301e366..e5834dd9bcde 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -165,7 +165,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
165 return -ENODEV; 165 return -ENODEV;
166 166
167 /* Try to set 64-bit DMA first */ 167 /* Try to set 64-bit DMA first */
168 if (WARN_ON(!pdev->dev.dma_mask)) 168 if (!pdev->dev.dma_mask)
169 /* Platform did not initialize dma_mask */ 169 /* Platform did not initialize dma_mask */
170 ret = dma_coerce_mask_and_coherent(&pdev->dev, 170 ret = dma_coerce_mask_and_coherent(&pdev->dev,
171 DMA_BIT_MASK(64)); 171 DMA_BIT_MASK(64));
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index c8823578a1b2..128d10282d16 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -1270,6 +1270,10 @@ static int tce_iommu_attach_group(void *iommu_data,
1270 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n", 1270 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
1271 iommu_group_id(iommu_group), iommu_group); */ 1271 iommu_group_id(iommu_group), iommu_group); */
1272 table_group = iommu_group_get_iommudata(iommu_group); 1272 table_group = iommu_group_get_iommudata(iommu_group);
1273 if (!table_group) {
1274 ret = -ENODEV;
1275 goto unlock_exit;
1276 }
1273 1277
1274 if (tce_groups_attached(container) && (!table_group->ops || 1278 if (tce_groups_attached(container) && (!table_group->ops ||
1275 !table_group->ops->take_ownership || 1279 !table_group->ops->take_ownership ||
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 253310cdaaca..fd6c8b66f06f 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -843,7 +843,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
843 struct iov_iter out_iter, in_iter, prot_iter, data_iter; 843 struct iov_iter out_iter, in_iter, prot_iter, data_iter;
844 u64 tag; 844 u64 tag;
845 u32 exp_data_len, data_direction; 845 u32 exp_data_len, data_direction;
846 unsigned out, in; 846 unsigned int out = 0, in = 0;
847 int head, ret, prot_bytes; 847 int head, ret, prot_bytes;
848 size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp); 848 size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
849 size_t out_size, in_size; 849 size_t out_size, in_size;
@@ -2087,7 +2087,7 @@ static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2087 NULL, 2087 NULL,
2088}; 2088};
2089 2089
2090static struct target_core_fabric_ops vhost_scsi_ops = { 2090static const struct target_core_fabric_ops vhost_scsi_ops = {
2091 .module = THIS_MODULE, 2091 .module = THIS_MODULE,
2092 .name = "vhost", 2092 .name = "vhost",
2093 .get_fabric_name = vhost_scsi_get_fabric_name, 2093 .get_fabric_name = vhost_scsi_get_fabric_name,
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index bbbf588540ed..ce5e63d2c66a 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -373,6 +373,7 @@ static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
373 373
374static int vhost_vsock_start(struct vhost_vsock *vsock) 374static int vhost_vsock_start(struct vhost_vsock *vsock)
375{ 375{
376 struct vhost_virtqueue *vq;
376 size_t i; 377 size_t i;
377 int ret; 378 int ret;
378 379
@@ -383,19 +384,20 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
383 goto err; 384 goto err;
384 385
385 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { 386 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
386 struct vhost_virtqueue *vq = &vsock->vqs[i]; 387 vq = &vsock->vqs[i];
387 388
388 mutex_lock(&vq->mutex); 389 mutex_lock(&vq->mutex);
389 390
390 if (!vhost_vq_access_ok(vq)) { 391 if (!vhost_vq_access_ok(vq)) {
391 ret = -EFAULT; 392 ret = -EFAULT;
392 mutex_unlock(&vq->mutex);
393 goto err_vq; 393 goto err_vq;
394 } 394 }
395 395
396 if (!vq->private_data) { 396 if (!vq->private_data) {
397 vq->private_data = vsock; 397 vq->private_data = vsock;
398 vhost_vq_init_access(vq); 398 ret = vhost_vq_init_access(vq);
399 if (ret)
400 goto err_vq;
399 } 401 }
400 402
401 mutex_unlock(&vq->mutex); 403 mutex_unlock(&vq->mutex);
@@ -405,8 +407,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
405 return 0; 407 return 0;
406 408
407err_vq: 409err_vq:
410 vq->private_data = NULL;
411 mutex_unlock(&vq->mutex);
412
408 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { 413 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
409 struct vhost_virtqueue *vq = &vsock->vqs[i]; 414 vq = &vsock->vqs[i];
410 415
411 mutex_lock(&vq->mutex); 416 mutex_lock(&vq->mutex);
412 vq->private_data = NULL; 417 vq->private_data = NULL;
diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
index f89245b8ba8e..68a113594808 100644
--- a/drivers/video/fbdev/core/fbcmap.c
+++ b/drivers/video/fbdev/core/fbcmap.c
@@ -163,17 +163,18 @@ void fb_dealloc_cmap(struct fb_cmap *cmap)
163 163
164int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to) 164int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
165{ 165{
166 int tooff = 0, fromoff = 0; 166 unsigned int tooff = 0, fromoff = 0;
167 int size; 167 size_t size;
168 168
169 if (to->start > from->start) 169 if (to->start > from->start)
170 fromoff = to->start - from->start; 170 fromoff = to->start - from->start;
171 else 171 else
172 tooff = from->start - to->start; 172 tooff = from->start - to->start;
173 size = to->len - tooff; 173 if (fromoff >= from->len || tooff >= to->len)
174 if (size > (int) (from->len - fromoff)) 174 return -EINVAL;
175 size = from->len - fromoff; 175
176 if (size <= 0) 176 size = min_t(size_t, to->len - tooff, from->len - fromoff);
177 if (size == 0)
177 return -EINVAL; 178 return -EINVAL;
178 size *= sizeof(u16); 179 size *= sizeof(u16);
179 180
@@ -187,17 +188,18 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
187 188
188int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to) 189int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to)
189{ 190{
190 int tooff = 0, fromoff = 0; 191 unsigned int tooff = 0, fromoff = 0;
191 int size; 192 size_t size;
192 193
193 if (to->start > from->start) 194 if (to->start > from->start)
194 fromoff = to->start - from->start; 195 fromoff = to->start - from->start;
195 else 196 else
196 tooff = from->start - to->start; 197 tooff = from->start - to->start;
197 size = to->len - tooff; 198 if (fromoff >= from->len || tooff >= to->len)
198 if (size > (int) (from->len - fromoff)) 199 return -EINVAL;
199 size = from->len - fromoff; 200
200 if (size <= 0) 201 size = min_t(size_t, to->len - tooff, from->len - fromoff);
202 if (size == 0)
201 return -EINVAL; 203 return -EINVAL;
202 size *= sizeof(u16); 204 size *= sizeof(u16);
203 205
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index d47a2fcef818..c71fde5fe835 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -59,6 +59,7 @@
59#define pr_fmt(fmt) "virtio-mmio: " fmt 59#define pr_fmt(fmt) "virtio-mmio: " fmt
60 60
61#include <linux/acpi.h> 61#include <linux/acpi.h>
62#include <linux/dma-mapping.h>
62#include <linux/highmem.h> 63#include <linux/highmem.h>
63#include <linux/interrupt.h> 64#include <linux/interrupt.h>
64#include <linux/io.h> 65#include <linux/io.h>
@@ -498,6 +499,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
498 struct virtio_mmio_device *vm_dev; 499 struct virtio_mmio_device *vm_dev;
499 struct resource *mem; 500 struct resource *mem;
500 unsigned long magic; 501 unsigned long magic;
502 int rc;
501 503
502 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 504 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
503 if (!mem) 505 if (!mem)
@@ -547,9 +549,25 @@ static int virtio_mmio_probe(struct platform_device *pdev)
547 } 549 }
548 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); 550 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
549 551
550 if (vm_dev->version == 1) 552 if (vm_dev->version == 1) {
551 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); 553 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
552 554
555 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
556 /*
557 * In the legacy case, ensure our coherently-allocated virtio
558 * ring will be at an address expressable as a 32-bit PFN.
559 */
560 if (!rc)
561 dma_set_coherent_mask(&pdev->dev,
562 DMA_BIT_MASK(32 + PAGE_SHIFT));
563 } else {
564 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
565 }
566 if (rc)
567 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
568 if (rc)
569 dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
570
553 platform_set_drvdata(pdev, vm_dev); 571 platform_set_drvdata(pdev, vm_dev);
554 572
555 return register_virtio_device(&vm_dev->vdev); 573 return register_virtio_device(&vm_dev->vdev);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 409aeaa49246..7e38ed79c3fc 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -159,6 +159,13 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
159 if (xen_domain()) 159 if (xen_domain())
160 return true; 160 return true;
161 161
162 /*
163 * On ARM-based machines, the DMA ops will do the right thing,
164 * so always use them with legacy devices.
165 */
166 if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64))
167 return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
168
162 return false; 169 return false;
163} 170}
164 171
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 112ce422dc22..2a165cc8a43c 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -42,6 +42,7 @@
42static unsigned long platform_mmio; 42static unsigned long platform_mmio;
43static unsigned long platform_mmio_alloc; 43static unsigned long platform_mmio_alloc;
44static unsigned long platform_mmiolen; 44static unsigned long platform_mmiolen;
45static uint64_t callback_via;
45 46
46static unsigned long alloc_xen_mmio(unsigned long len) 47static unsigned long alloc_xen_mmio(unsigned long len)
47{ 48{
@@ -54,6 +55,51 @@ static unsigned long alloc_xen_mmio(unsigned long len)
54 return addr; 55 return addr;
55} 56}
56 57
58static uint64_t get_callback_via(struct pci_dev *pdev)
59{
60 u8 pin;
61 int irq;
62
63 irq = pdev->irq;
64 if (irq < 16)
65 return irq; /* ISA IRQ */
66
67 pin = pdev->pin;
68
69 /* We don't know the GSI. Specify the PCI INTx line instead. */
70 return ((uint64_t)0x01 << HVM_CALLBACK_VIA_TYPE_SHIFT) | /* PCI INTx identifier */
71 ((uint64_t)pci_domain_nr(pdev->bus) << 32) |
72 ((uint64_t)pdev->bus->number << 16) |
73 ((uint64_t)(pdev->devfn & 0xff) << 8) |
74 ((uint64_t)(pin - 1) & 3);
75}
76
77static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id)
78{
79 xen_hvm_evtchn_do_upcall();
80 return IRQ_HANDLED;
81}
82
83static int xen_allocate_irq(struct pci_dev *pdev)
84{
85 return request_irq(pdev->irq, do_hvm_evtchn_intr,
86 IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
87 "xen-platform-pci", pdev);
88}
89
90static int platform_pci_resume(struct pci_dev *pdev)
91{
92 int err;
93 if (!xen_pv_domain())
94 return 0;
95 err = xen_set_callback_via(callback_via);
96 if (err) {
97 dev_err(&pdev->dev, "platform_pci_resume failure!\n");
98 return err;
99 }
100 return 0;
101}
102
57static int platform_pci_probe(struct pci_dev *pdev, 103static int platform_pci_probe(struct pci_dev *pdev,
58 const struct pci_device_id *ent) 104 const struct pci_device_id *ent)
59{ 105{
@@ -92,6 +138,28 @@ static int platform_pci_probe(struct pci_dev *pdev,
92 platform_mmio = mmio_addr; 138 platform_mmio = mmio_addr;
93 platform_mmiolen = mmio_len; 139 platform_mmiolen = mmio_len;
94 140
141 /*
142 * Xen HVM guests always use the vector callback mechanism.
143 * L1 Dom0 in a nested Xen environment is a PV guest inside in an
144 * HVM environment. It needs the platform-pci driver to get
145 * notifications from L0 Xen, but it cannot use the vector callback
146 * as it is not exported by L1 Xen.
147 */
148 if (xen_pv_domain()) {
149 ret = xen_allocate_irq(pdev);
150 if (ret) {
151 dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);
152 goto out;
153 }
154 callback_via = get_callback_via(pdev);
155 ret = xen_set_callback_via(callback_via);
156 if (ret) {
157 dev_warn(&pdev->dev, "Unable to set the evtchn callback "
158 "err=%d\n", ret);
159 goto out;
160 }
161 }
162
95 max_nr_gframes = gnttab_max_grant_frames(); 163 max_nr_gframes = gnttab_max_grant_frames();
96 grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); 164 grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
97 ret = gnttab_setup_auto_xlat_frames(grant_frames); 165 ret = gnttab_setup_auto_xlat_frames(grant_frames);
@@ -123,6 +191,9 @@ static struct pci_driver platform_driver = {
123 .name = DRV_NAME, 191 .name = DRV_NAME,
124 .probe = platform_pci_probe, 192 .probe = platform_pci_probe,
125 .id_table = platform_pci_tbl, 193 .id_table = platform_pci_tbl,
194#ifdef CONFIG_PM
195 .resume_early = platform_pci_resume,
196#endif
126}; 197};
127 198
128builtin_pci_driver(platform_driver); 199builtin_pci_driver(platform_driver);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index f905d6eeb048..f8afc6dcc29f 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -414,9 +414,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
414 if (map == SWIOTLB_MAP_ERROR) 414 if (map == SWIOTLB_MAP_ERROR)
415 return DMA_ERROR_CODE; 415 return DMA_ERROR_CODE;
416 416
417 dev_addr = xen_phys_to_bus(map);
417 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), 418 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
418 dev_addr, map & ~PAGE_MASK, size, dir, attrs); 419 dev_addr, map & ~PAGE_MASK, size, dir, attrs);
419 dev_addr = xen_phys_to_bus(map);
420 420
421 /* 421 /*
422 * Ensure that the address returned is DMA'ble 422 * Ensure that the address returned is DMA'ble
@@ -575,13 +575,14 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
575 sg_dma_len(sgl) = 0; 575 sg_dma_len(sgl) = 0;
576 return 0; 576 return 0;
577 } 577 }
578 dev_addr = xen_phys_to_bus(map);
578 xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT), 579 xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
579 dev_addr, 580 dev_addr,
580 map & ~PAGE_MASK, 581 map & ~PAGE_MASK,
581 sg->length, 582 sg->length,
582 dir, 583 dir,
583 attrs); 584 attrs);
584 sg->dma_address = xen_phys_to_bus(map); 585 sg->dma_address = dev_addr;
585 } else { 586 } else {
586 /* we are not interested in the dma_addr returned by 587 /* we are not interested in the dma_addr returned by
587 * xen_dma_map_page, only in the potential cache flushes executed 588 * xen_dma_map_page, only in the potential cache flushes executed
diff --git a/fs/Kconfig b/fs/Kconfig
index c2a377cdda2b..83eab52fb3f6 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -38,6 +38,7 @@ config FS_DAX
38 bool "Direct Access (DAX) support" 38 bool "Direct Access (DAX) support"
39 depends on MMU 39 depends on MMU
40 depends on !(ARM || MIPS || SPARC) 40 depends on !(ARM || MIPS || SPARC)
41 select FS_IOMAP
41 help 42 help
42 Direct Access (DAX) can be used on memory-backed block devices. 43 Direct Access (DAX) can be used on memory-backed block devices.
43 If the block device supports DAX and the filesystem supports DAX, 44 If the block device supports DAX and the filesystem supports DAX,
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 5db5d1340d69..3c47614a4b32 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -331,7 +331,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
331 struct blk_plug plug; 331 struct blk_plug plug;
332 struct blkdev_dio *dio; 332 struct blkdev_dio *dio;
333 struct bio *bio; 333 struct bio *bio;
334 bool is_read = (iov_iter_rw(iter) == READ); 334 bool is_read = (iov_iter_rw(iter) == READ), is_sync;
335 loff_t pos = iocb->ki_pos; 335 loff_t pos = iocb->ki_pos;
336 blk_qc_t qc = BLK_QC_T_NONE; 336 blk_qc_t qc = BLK_QC_T_NONE;
337 int ret; 337 int ret;
@@ -344,7 +344,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
344 bio_get(bio); /* extra ref for the completion handler */ 344 bio_get(bio); /* extra ref for the completion handler */
345 345
346 dio = container_of(bio, struct blkdev_dio, bio); 346 dio = container_of(bio, struct blkdev_dio, bio);
347 dio->is_sync = is_sync_kiocb(iocb); 347 dio->is_sync = is_sync = is_sync_kiocb(iocb);
348 if (dio->is_sync) 348 if (dio->is_sync)
349 dio->waiter = current; 349 dio->waiter = current;
350 else 350 else
@@ -398,7 +398,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
398 } 398 }
399 blk_finish_plug(&plug); 399 blk_finish_plug(&plug);
400 400
401 if (!dio->is_sync) 401 if (!is_sync)
402 return -EIOCBQUEUED; 402 return -EIOCBQUEUED;
403 403
404 for (;;) { 404 for (;;) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 4e024260ad71..1e861a063721 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3835,10 +3835,7 @@ cache_acl:
3835 break; 3835 break;
3836 case S_IFDIR: 3836 case S_IFDIR:
3837 inode->i_fop = &btrfs_dir_file_operations; 3837 inode->i_fop = &btrfs_dir_file_operations;
3838 if (root == fs_info->tree_root) 3838 inode->i_op = &btrfs_dir_inode_operations;
3839 inode->i_op = &btrfs_dir_ro_inode_operations;
3840 else
3841 inode->i_op = &btrfs_dir_inode_operations;
3842 break; 3839 break;
3843 case S_IFLNK: 3840 case S_IFLNK:
3844 inode->i_op = &btrfs_symlink_inode_operations; 3841 inode->i_op = &btrfs_symlink_inode_operations;
@@ -4505,8 +4502,19 @@ search_again:
4505 if (found_type > min_type) { 4502 if (found_type > min_type) {
4506 del_item = 1; 4503 del_item = 1;
4507 } else { 4504 } else {
4508 if (item_end < new_size) 4505 if (item_end < new_size) {
4506 /*
4507 * With NO_HOLES mode, for the following mapping
4508 *
4509 * [0-4k][hole][8k-12k]
4510 *
4511 * if truncating isize down to 6k, it ends up
4512 * isize being 8k.
4513 */
4514 if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
4515 last_size = new_size;
4509 break; 4516 break;
4517 }
4510 if (found_key.offset >= new_size) 4518 if (found_key.offset >= new_size)
4511 del_item = 1; 4519 del_item = 1;
4512 else 4520 else
@@ -5710,6 +5718,7 @@ static struct inode *new_simple_dir(struct super_block *s,
5710 5718
5711 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; 5719 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5712 inode->i_op = &btrfs_dir_ro_inode_operations; 5720 inode->i_op = &btrfs_dir_ro_inode_operations;
5721 inode->i_opflags &= ~IOP_XATTR;
5713 inode->i_fop = &simple_dir_operations; 5722 inode->i_fop = &simple_dir_operations;
5714 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; 5723 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5715 inode->i_mtime = current_time(inode); 5724 inode->i_mtime = current_time(inode);
@@ -7215,7 +7224,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
7215 struct extent_map *em = NULL; 7224 struct extent_map *em = NULL;
7216 int ret; 7225 int ret;
7217 7226
7218 down_read(&BTRFS_I(inode)->dio_sem);
7219 if (type != BTRFS_ORDERED_NOCOW) { 7227 if (type != BTRFS_ORDERED_NOCOW) {
7220 em = create_pinned_em(inode, start, len, orig_start, 7228 em = create_pinned_em(inode, start, len, orig_start,
7221 block_start, block_len, orig_block_len, 7229 block_start, block_len, orig_block_len,
@@ -7234,7 +7242,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
7234 em = ERR_PTR(ret); 7242 em = ERR_PTR(ret);
7235 } 7243 }
7236 out: 7244 out:
7237 up_read(&BTRFS_I(inode)->dio_sem);
7238 7245
7239 return em; 7246 return em;
7240} 7247}
@@ -8692,6 +8699,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
8692 dio_data.unsubmitted_oe_range_start = (u64)offset; 8699 dio_data.unsubmitted_oe_range_start = (u64)offset;
8693 dio_data.unsubmitted_oe_range_end = (u64)offset; 8700 dio_data.unsubmitted_oe_range_end = (u64)offset;
8694 current->journal_info = &dio_data; 8701 current->journal_info = &dio_data;
8702 down_read(&BTRFS_I(inode)->dio_sem);
8695 } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, 8703 } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
8696 &BTRFS_I(inode)->runtime_flags)) { 8704 &BTRFS_I(inode)->runtime_flags)) {
8697 inode_dio_end(inode); 8705 inode_dio_end(inode);
@@ -8704,6 +8712,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
8704 iter, btrfs_get_blocks_direct, NULL, 8712 iter, btrfs_get_blocks_direct, NULL,
8705 btrfs_submit_direct, flags); 8713 btrfs_submit_direct, flags);
8706 if (iov_iter_rw(iter) == WRITE) { 8714 if (iov_iter_rw(iter) == WRITE) {
8715 up_read(&BTRFS_I(inode)->dio_sem);
8707 current->journal_info = NULL; 8716 current->journal_info = NULL;
8708 if (ret < 0 && ret != -EIOCBQUEUED) { 8717 if (ret < 0 && ret != -EIOCBQUEUED) {
8709 if (dio_data.reserve) 8718 if (dio_data.reserve)
@@ -9212,6 +9221,7 @@ static int btrfs_truncate(struct inode *inode)
9212 break; 9221 break;
9213 } 9222 }
9214 9223
9224 btrfs_block_rsv_release(fs_info, rsv, -1);
9215 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, 9225 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
9216 rsv, min_size, 0); 9226 rsv, min_size, 0);
9217 BUG_ON(ret); /* shouldn't happen */ 9227 BUG_ON(ret); /* shouldn't happen */
@@ -10579,8 +10589,6 @@ static const struct inode_operations btrfs_dir_inode_operations = {
10579static const struct inode_operations btrfs_dir_ro_inode_operations = { 10589static const struct inode_operations btrfs_dir_ro_inode_operations = {
10580 .lookup = btrfs_lookup, 10590 .lookup = btrfs_lookup,
10581 .permission = btrfs_permission, 10591 .permission = btrfs_permission,
10582 .get_acl = btrfs_get_acl,
10583 .set_acl = btrfs_set_acl,
10584 .update_time = btrfs_update_time, 10592 .update_time = btrfs_update_time,
10585}; 10593};
10586 10594
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index baea866a6751..94fd76d04683 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2591,8 +2591,13 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
2591 add_wait_queue(&ci->i_cap_wq, &wait); 2591 add_wait_queue(&ci->i_cap_wq, &wait);
2592 2592
2593 while (!try_get_cap_refs(ci, need, want, endoff, 2593 while (!try_get_cap_refs(ci, need, want, endoff,
2594 true, &_got, &err)) 2594 true, &_got, &err)) {
2595 if (signal_pending(current)) {
2596 ret = -ERESTARTSYS;
2597 break;
2598 }
2595 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 2599 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
2600 }
2596 2601
2597 remove_wait_queue(&ci->i_cap_wq, &wait); 2602 remove_wait_queue(&ci->i_cap_wq, &wait);
2598 2603
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index d7a93696663b..8ab1fdf0bd49 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1230,7 +1230,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
1230 struct ceph_mds_client *mdsc = 1230 struct ceph_mds_client *mdsc =
1231 ceph_sb_to_client(dir->i_sb)->mdsc; 1231 ceph_sb_to_client(dir->i_sb)->mdsc;
1232 struct ceph_mds_request *req; 1232 struct ceph_mds_request *req;
1233 int op, mask, err; 1233 int op, err;
1234 u32 mask;
1234 1235
1235 if (flags & LOOKUP_RCU) 1236 if (flags & LOOKUP_RCU)
1236 return -ECHILD; 1237 return -ECHILD;
@@ -1245,7 +1246,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
1245 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; 1246 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
1246 if (ceph_security_xattr_wanted(dir)) 1247 if (ceph_security_xattr_wanted(dir))
1247 mask |= CEPH_CAP_XATTR_SHARED; 1248 mask |= CEPH_CAP_XATTR_SHARED;
1248 req->r_args.getattr.mask = mask; 1249 req->r_args.getattr.mask = cpu_to_le32(mask);
1249 1250
1250 err = ceph_mdsc_do_request(mdsc, NULL, req); 1251 err = ceph_mdsc_do_request(mdsc, NULL, req);
1251 switch (err) { 1252 switch (err) {
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 398e5328b309..5e659d054b40 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -305,7 +305,8 @@ static int frag_tree_split_cmp(const void *l, const void *r)
305{ 305{
306 struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l; 306 struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
307 struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r; 307 struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
308 return ceph_frag_compare(ls->frag, rs->frag); 308 return ceph_frag_compare(le32_to_cpu(ls->frag),
309 le32_to_cpu(rs->frag));
309} 310}
310 311
311static bool is_frag_child(u32 f, struct ceph_inode_frag *frag) 312static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index ec6b35e9f966..c9d2e553a6c4 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -288,12 +288,13 @@ static int parse_reply_info_extra(void **p, void *end,
288 struct ceph_mds_reply_info_parsed *info, 288 struct ceph_mds_reply_info_parsed *info,
289 u64 features) 289 u64 features)
290{ 290{
291 if (info->head->op == CEPH_MDS_OP_GETFILELOCK) 291 u32 op = le32_to_cpu(info->head->op);
292
293 if (op == CEPH_MDS_OP_GETFILELOCK)
292 return parse_reply_info_filelock(p, end, info, features); 294 return parse_reply_info_filelock(p, end, info, features);
293 else if (info->head->op == CEPH_MDS_OP_READDIR || 295 else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
294 info->head->op == CEPH_MDS_OP_LSSNAP)
295 return parse_reply_info_dir(p, end, info, features); 296 return parse_reply_info_dir(p, end, info, features);
296 else if (info->head->op == CEPH_MDS_OP_CREATE) 297 else if (op == CEPH_MDS_OP_CREATE)
297 return parse_reply_info_create(p, end, info, features); 298 return parse_reply_info_create(p, end, info, features);
298 else 299 else
299 return -EIO; 300 return -EIO;
diff --git a/fs/dax.c b/fs/dax.c
index ddcddfeaa03b..3af2da5e64ce 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -990,7 +990,6 @@ int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
990} 990}
991EXPORT_SYMBOL_GPL(__dax_zero_page_range); 991EXPORT_SYMBOL_GPL(__dax_zero_page_range);
992 992
993#ifdef CONFIG_FS_IOMAP
994static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) 993static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
995{ 994{
996 return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9); 995 return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
@@ -1428,4 +1427,3 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
1428} 1427}
1429EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault); 1428EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault);
1430#endif /* CONFIG_FS_DAX_PMD */ 1429#endif /* CONFIG_FS_DAX_PMD */
1431#endif /* CONFIG_FS_IOMAP */
diff --git a/fs/ext2/Kconfig b/fs/ext2/Kconfig
index 36bea5adcaba..c634874e12d9 100644
--- a/fs/ext2/Kconfig
+++ b/fs/ext2/Kconfig
@@ -1,6 +1,5 @@
1config EXT2_FS 1config EXT2_FS
2 tristate "Second extended fs support" 2 tristate "Second extended fs support"
3 select FS_IOMAP if FS_DAX
4 help 3 help
5 Ext2 is a standard Linux file system for hard disks. 4 Ext2 is a standard Linux file system for hard disks.
6 5
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index 7b90691e98c4..e38039fd96ff 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -37,7 +37,6 @@ config EXT4_FS
37 select CRC16 37 select CRC16
38 select CRYPTO 38 select CRYPTO
39 select CRYPTO_CRC32C 39 select CRYPTO_CRC32C
40 select FS_IOMAP if FS_DAX
41 help 40 help
42 This is the next generation of the ext3 filesystem. 41 This is the next generation of the ext3 filesystem.
43 42
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 70ea57c7b6bb..4e06a27ed7f8 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -2025,7 +2025,6 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head)
2025 struct fuse_req *req; 2025 struct fuse_req *req;
2026 req = list_entry(head->next, struct fuse_req, list); 2026 req = list_entry(head->next, struct fuse_req, list);
2027 req->out.h.error = -ECONNABORTED; 2027 req->out.h.error = -ECONNABORTED;
2028 clear_bit(FR_PENDING, &req->flags);
2029 clear_bit(FR_SENT, &req->flags); 2028 clear_bit(FR_SENT, &req->flags);
2030 list_del_init(&req->list); 2029 list_del_init(&req->list);
2031 request_end(fc, req); 2030 request_end(fc, req);
@@ -2103,6 +2102,8 @@ void fuse_abort_conn(struct fuse_conn *fc)
2103 spin_lock(&fiq->waitq.lock); 2102 spin_lock(&fiq->waitq.lock);
2104 fiq->connected = 0; 2103 fiq->connected = 0;
2105 list_splice_init(&fiq->pending, &to_end2); 2104 list_splice_init(&fiq->pending, &to_end2);
2105 list_for_each_entry(req, &to_end2, list)
2106 clear_bit(FR_PENDING, &req->flags);
2106 while (forget_pending(fiq)) 2107 while (forget_pending(fiq))
2107 kfree(dequeue_forget(fiq, 1, NULL)); 2108 kfree(dequeue_forget(fiq, 1, NULL));
2108 wake_up_all_locked(&fiq->waitq); 2109 wake_up_all_locked(&fiq->waitq);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 1f7c732f32b0..811fd8929a18 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -68,7 +68,7 @@ static u64 time_to_jiffies(u64 sec, u32 nsec)
68 if (sec || nsec) { 68 if (sec || nsec) {
69 struct timespec64 ts = { 69 struct timespec64 ts = {
70 sec, 70 sec,
71 max_t(u32, nsec, NSEC_PER_SEC - 1) 71 min_t(u32, nsec, NSEC_PER_SEC - 1)
72 }; 72 };
73 73
74 return get_jiffies_64() + timespec64_to_jiffies(&ts); 74 return get_jiffies_64() + timespec64_to_jiffies(&ts);
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index 9ad48d9202a9..023bb0b03352 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -154,29 +154,38 @@ out_err:
154static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d, 154static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d,
155 struct dentry **ret) 155 struct dentry **ret)
156{ 156{
157 const char *s = d->name.name; 157 /* Counting down from the end, since the prefix can change */
158 size_t rem = d->name.len - 1;
158 struct dentry *dentry = NULL; 159 struct dentry *dentry = NULL;
159 int err; 160 int err;
160 161
161 if (*s != '/') 162 if (d->name.name[0] != '/')
162 return ovl_lookup_single(base, d, d->name.name, d->name.len, 163 return ovl_lookup_single(base, d, d->name.name, d->name.len,
163 0, "", ret); 164 0, "", ret);
164 165
165 while (*s++ == '/' && !IS_ERR_OR_NULL(base) && d_can_lookup(base)) { 166 while (!IS_ERR_OR_NULL(base) && d_can_lookup(base)) {
167 const char *s = d->name.name + d->name.len - rem;
166 const char *next = strchrnul(s, '/'); 168 const char *next = strchrnul(s, '/');
167 size_t slen = strlen(s); 169 size_t thislen = next - s;
170 bool end = !next[0];
168 171
169 if (WARN_ON(slen > d->name.len) || 172 /* Verify we did not go off the rails */
170 WARN_ON(strcmp(d->name.name + d->name.len - slen, s))) 173 if (WARN_ON(s[-1] != '/'))
171 return -EIO; 174 return -EIO;
172 175
173 err = ovl_lookup_single(base, d, s, next - s, 176 err = ovl_lookup_single(base, d, s, thislen,
174 d->name.len - slen, next, &base); 177 d->name.len - rem, next, &base);
175 dput(dentry); 178 dput(dentry);
176 if (err) 179 if (err)
177 return err; 180 return err;
178 dentry = base; 181 dentry = base;
179 s = next; 182 if (end)
183 break;
184
185 rem -= thislen + 1;
186
187 if (WARN_ON(rem >= d->name.len))
188 return -EIO;
180 } 189 }
181 *ret = dentry; 190 *ret = dentry;
182 return 0; 191 return 0;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 8e7e61b28f31..87c9a9aacda3 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -3179,6 +3179,8 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
3179 iter.tgid += 1, iter = next_tgid(ns, iter)) { 3179 iter.tgid += 1, iter = next_tgid(ns, iter)) {
3180 char name[PROC_NUMBUF]; 3180 char name[PROC_NUMBUF];
3181 int len; 3181 int len;
3182
3183 cond_resched();
3182 if (!has_pid_permissions(ns, iter.task, 2)) 3184 if (!has_pid_permissions(ns, iter.task, 2))
3183 continue; 3185 continue;
3184 3186
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index d0f8a38dfafa..0186fe6d39f3 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -74,6 +74,7 @@
74#include <linux/highmem.h> 74#include <linux/highmem.h>
75#include <linux/pagemap.h> 75#include <linux/pagemap.h>
76#include <linux/uaccess.h> 76#include <linux/uaccess.h>
77#include <linux/major.h>
77#include "internal.h" 78#include "internal.h"
78 79
79static struct kmem_cache *romfs_inode_cachep; 80static struct kmem_cache *romfs_inode_cachep;
@@ -416,7 +417,22 @@ static void romfs_destroy_inode(struct inode *inode)
416static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf) 417static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
417{ 418{
418 struct super_block *sb = dentry->d_sb; 419 struct super_block *sb = dentry->d_sb;
419 u64 id = huge_encode_dev(sb->s_bdev->bd_dev); 420 u64 id = 0;
421
422 /* When calling huge_encode_dev(),
423 * use sb->s_bdev->bd_dev when,
424 * - CONFIG_ROMFS_ON_BLOCK defined
425 * use sb->s_dev when,
426 * - CONFIG_ROMFS_ON_BLOCK undefined and
427 * - CONFIG_ROMFS_ON_MTD defined
428 * leave id as 0 when,
429 * - CONFIG_ROMFS_ON_BLOCK undefined and
430 * - CONFIG_ROMFS_ON_MTD undefined
431 */
432 if (sb->s_bdev)
433 id = huge_encode_dev(sb->s_bdev->bd_dev);
434 else if (sb->s_dev)
435 id = huge_encode_dev(sb->s_dev);
420 436
421 buf->f_type = ROMFS_MAGIC; 437 buf->f_type = ROMFS_MAGIC;
422 buf->f_namelen = ROMFS_MAXFN; 438 buf->f_namelen = ROMFS_MAXFN;
@@ -489,6 +505,11 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent)
489 sb->s_flags |= MS_RDONLY | MS_NOATIME; 505 sb->s_flags |= MS_RDONLY | MS_NOATIME;
490 sb->s_op = &romfs_super_ops; 506 sb->s_op = &romfs_super_ops;
491 507
508#ifdef CONFIG_ROMFS_ON_MTD
509 /* Use same dev ID from the underlying mtdblock device */
510 if (sb->s_mtd)
511 sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index);
512#endif
492 /* read the image superblock and check it */ 513 /* read the image superblock and check it */
493 rsb = kmalloc(512, GFP_KERNEL); 514 rsb = kmalloc(512, GFP_KERNEL);
494 if (!rsb) 515 if (!rsb)
diff --git a/fs/ubifs/Kconfig b/fs/ubifs/Kconfig
index 0a908ae7af13..b0d0623c83ed 100644
--- a/fs/ubifs/Kconfig
+++ b/fs/ubifs/Kconfig
@@ -53,7 +53,7 @@ config UBIFS_ATIME_SUPPORT
53 53
54config UBIFS_FS_ENCRYPTION 54config UBIFS_FS_ENCRYPTION
55 bool "UBIFS Encryption" 55 bool "UBIFS Encryption"
56 depends on UBIFS_FS 56 depends on UBIFS_FS && BLOCK
57 select FS_ENCRYPTION 57 select FS_ENCRYPTION
58 default n 58 default n
59 help 59 help
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 1c5331ac9614..528369f3e472 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -390,16 +390,6 @@ static int do_tmpfile(struct inode *dir, struct dentry *dentry,
390 dbg_gen("dent '%pd', mode %#hx in dir ino %lu", 390 dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
391 dentry, mode, dir->i_ino); 391 dentry, mode, dir->i_ino);
392 392
393 if (ubifs_crypt_is_encrypted(dir)) {
394 err = fscrypt_get_encryption_info(dir);
395 if (err)
396 return err;
397
398 if (!fscrypt_has_encryption_key(dir)) {
399 return -EPERM;
400 }
401 }
402
403 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); 393 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
404 if (err) 394 if (err)
405 return err; 395 return err;
@@ -741,17 +731,9 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
741 ubifs_assert(inode_is_locked(dir)); 731 ubifs_assert(inode_is_locked(dir));
742 ubifs_assert(inode_is_locked(inode)); 732 ubifs_assert(inode_is_locked(inode));
743 733
744 if (ubifs_crypt_is_encrypted(dir)) { 734 if (ubifs_crypt_is_encrypted(dir) &&
745 if (!fscrypt_has_permitted_context(dir, inode)) 735 !fscrypt_has_permitted_context(dir, inode))
746 return -EPERM; 736 return -EPERM;
747
748 err = fscrypt_get_encryption_info(inode);
749 if (err)
750 return err;
751
752 if (!fscrypt_has_encryption_key(inode))
753 return -EPERM;
754 }
755 737
756 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); 738 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
757 if (err) 739 if (err)
@@ -1000,17 +982,6 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
1000 if (err) 982 if (err)
1001 return err; 983 return err;
1002 984
1003 if (ubifs_crypt_is_encrypted(dir)) {
1004 err = fscrypt_get_encryption_info(dir);
1005 if (err)
1006 goto out_budg;
1007
1008 if (!fscrypt_has_encryption_key(dir)) {
1009 err = -EPERM;
1010 goto out_budg;
1011 }
1012 }
1013
1014 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); 985 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
1015 if (err) 986 if (err)
1016 goto out_budg; 987 goto out_budg;
@@ -1096,17 +1067,6 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
1096 return err; 1067 return err;
1097 } 1068 }
1098 1069
1099 if (ubifs_crypt_is_encrypted(dir)) {
1100 err = fscrypt_get_encryption_info(dir);
1101 if (err)
1102 goto out_budg;
1103
1104 if (!fscrypt_has_encryption_key(dir)) {
1105 err = -EPERM;
1106 goto out_budg;
1107 }
1108 }
1109
1110 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); 1070 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
1111 if (err) 1071 if (err)
1112 goto out_budg; 1072 goto out_budg;
@@ -1231,18 +1191,6 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
1231 goto out_inode; 1191 goto out_inode;
1232 } 1192 }
1233 1193
1234 err = fscrypt_get_encryption_info(inode);
1235 if (err) {
1236 kfree(sd);
1237 goto out_inode;
1238 }
1239
1240 if (!fscrypt_has_encryption_key(inode)) {
1241 kfree(sd);
1242 err = -EPERM;
1243 goto out_inode;
1244 }
1245
1246 ostr.name = sd->encrypted_path; 1194 ostr.name = sd->encrypted_path;
1247 ostr.len = disk_link.len; 1195 ostr.len = disk_link.len;
1248 1196
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
index 78d713644df3..da519ba205f6 100644
--- a/fs/ubifs/ioctl.c
+++ b/fs/ubifs/ioctl.c
@@ -217,6 +217,9 @@ long ubifs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
217 case FS_IOC32_SETFLAGS: 217 case FS_IOC32_SETFLAGS:
218 cmd = FS_IOC_SETFLAGS; 218 cmd = FS_IOC_SETFLAGS;
219 break; 219 break;
220 case FS_IOC_SET_ENCRYPTION_POLICY:
221 case FS_IOC_GET_ENCRYPTION_POLICY:
222 break;
220 default: 223 default:
221 return -ENOIOCTLCMD; 224 return -ENOIOCTLCMD;
222 } 225 }
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index a459211a1c21..294519b98874 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -744,6 +744,7 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
744 744
745 } else { 745 } else {
746 data->compr_size = 0; 746 data->compr_size = 0;
747 out_len = compr_len;
747 } 748 }
748 749
749 dlen = UBIFS_DATA_NODE_SZ + out_len; 750 dlen = UBIFS_DATA_NODE_SZ + out_len;
@@ -1319,6 +1320,7 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in
1319 dn->compr_type = cpu_to_le16(compr_type); 1320 dn->compr_type = cpu_to_le16(compr_type);
1320 dn->size = cpu_to_le32(*new_len); 1321 dn->size = cpu_to_le32(*new_len);
1321 *new_len = UBIFS_DATA_NODE_SZ + out_len; 1322 *new_len = UBIFS_DATA_NODE_SZ + out_len;
1323 err = 0;
1322out: 1324out:
1323 kfree(buf); 1325 kfree(buf);
1324 return err; 1326 return err;
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index 74ae2de949df..709aa098dd46 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -34,6 +34,11 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include "ubifs.h" 35#include "ubifs.h"
36 36
37static int try_read_node(const struct ubifs_info *c, void *buf, int type,
38 int len, int lnum, int offs);
39static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
40 struct ubifs_zbranch *zbr, void *node);
41
37/* 42/*
38 * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions. 43 * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions.
39 * @NAME_LESS: name corresponding to the first argument is less than second 44 * @NAME_LESS: name corresponding to the first argument is less than second
@@ -402,7 +407,19 @@ static int tnc_read_hashed_node(struct ubifs_info *c, struct ubifs_zbranch *zbr,
402 return 0; 407 return 0;
403 } 408 }
404 409
405 err = ubifs_tnc_read_node(c, zbr, node); 410 if (c->replaying) {
411 err = fallible_read_node(c, &zbr->key, zbr, node);
412 /*
413 * When the node was not found, return -ENOENT, 0 otherwise.
414 * Negative return codes stay as-is.
415 */
416 if (err == 0)
417 err = -ENOENT;
418 else if (err == 1)
419 err = 0;
420 } else {
421 err = ubifs_tnc_read_node(c, zbr, node);
422 }
406 if (err) 423 if (err)
407 return err; 424 return err;
408 425
@@ -2857,7 +2874,11 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
2857 if (fname_len(nm) > 0) { 2874 if (fname_len(nm) > 0) {
2858 if (err) { 2875 if (err) {
2859 /* Handle collisions */ 2876 /* Handle collisions */
2860 err = resolve_collision(c, key, &znode, &n, nm); 2877 if (c->replaying)
2878 err = fallible_resolve_collision(c, key, &znode, &n,
2879 nm, 0);
2880 else
2881 err = resolve_collision(c, key, &znode, &n, nm);
2861 dbg_tnc("rc returned %d, znode %p, n %d", 2882 dbg_tnc("rc returned %d, znode %p, n %d",
2862 err, znode, n); 2883 err, znode, n);
2863 if (unlikely(err < 0)) 2884 if (unlikely(err < 0))
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index d96e2f30084b..43953e03c356 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -63,6 +63,7 @@ struct userfaultfd_wait_queue {
63 struct uffd_msg msg; 63 struct uffd_msg msg;
64 wait_queue_t wq; 64 wait_queue_t wq;
65 struct userfaultfd_ctx *ctx; 65 struct userfaultfd_ctx *ctx;
66 bool waken;
66}; 67};
67 68
68struct userfaultfd_wake_range { 69struct userfaultfd_wake_range {
@@ -86,6 +87,12 @@ static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode,
86 if (len && (start > uwq->msg.arg.pagefault.address || 87 if (len && (start > uwq->msg.arg.pagefault.address ||
87 start + len <= uwq->msg.arg.pagefault.address)) 88 start + len <= uwq->msg.arg.pagefault.address))
88 goto out; 89 goto out;
90 WRITE_ONCE(uwq->waken, true);
91 /*
92 * The implicit smp_mb__before_spinlock in try_to_wake_up()
93 * renders uwq->waken visible to other CPUs before the task is
94 * waken.
95 */
89 ret = wake_up_state(wq->private, mode); 96 ret = wake_up_state(wq->private, mode);
90 if (ret) 97 if (ret)
91 /* 98 /*
@@ -264,6 +271,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
264 struct userfaultfd_wait_queue uwq; 271 struct userfaultfd_wait_queue uwq;
265 int ret; 272 int ret;
266 bool must_wait, return_to_userland; 273 bool must_wait, return_to_userland;
274 long blocking_state;
267 275
268 BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); 276 BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
269 277
@@ -334,10 +342,13 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
334 uwq.wq.private = current; 342 uwq.wq.private = current;
335 uwq.msg = userfault_msg(vmf->address, vmf->flags, reason); 343 uwq.msg = userfault_msg(vmf->address, vmf->flags, reason);
336 uwq.ctx = ctx; 344 uwq.ctx = ctx;
345 uwq.waken = false;
337 346
338 return_to_userland = 347 return_to_userland =
339 (vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) == 348 (vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
340 (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE); 349 (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
350 blocking_state = return_to_userland ? TASK_INTERRUPTIBLE :
351 TASK_KILLABLE;
341 352
342 spin_lock(&ctx->fault_pending_wqh.lock); 353 spin_lock(&ctx->fault_pending_wqh.lock);
343 /* 354 /*
@@ -350,8 +361,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
350 * following the spin_unlock to happen before the list_add in 361 * following the spin_unlock to happen before the list_add in
351 * __add_wait_queue. 362 * __add_wait_queue.
352 */ 363 */
353 set_current_state(return_to_userland ? TASK_INTERRUPTIBLE : 364 set_current_state(blocking_state);
354 TASK_KILLABLE);
355 spin_unlock(&ctx->fault_pending_wqh.lock); 365 spin_unlock(&ctx->fault_pending_wqh.lock);
356 366
357 must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags, 367 must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
@@ -364,6 +374,29 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
364 wake_up_poll(&ctx->fd_wqh, POLLIN); 374 wake_up_poll(&ctx->fd_wqh, POLLIN);
365 schedule(); 375 schedule();
366 ret |= VM_FAULT_MAJOR; 376 ret |= VM_FAULT_MAJOR;
377
378 /*
379 * False wakeups can orginate even from rwsem before
380 * up_read() however userfaults will wait either for a
381 * targeted wakeup on the specific uwq waitqueue from
382 * wake_userfault() or for signals or for uffd
383 * release.
384 */
385 while (!READ_ONCE(uwq.waken)) {
386 /*
387 * This needs the full smp_store_mb()
388 * guarantee as the state write must be
389 * visible to other CPUs before reading
390 * uwq.waken from other CPUs.
391 */
392 set_current_state(blocking_state);
393 if (READ_ONCE(uwq.waken) ||
394 READ_ONCE(ctx->released) ||
395 (return_to_userland ? signal_pending(current) :
396 fatal_signal_pending(current)))
397 break;
398 schedule();
399 }
367 } 400 }
368 401
369 __set_current_state(TASK_RUNNING); 402 __set_current_state(TASK_RUNNING);
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index d346d42c54d1..33db69be4832 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -39,6 +39,7 @@
39#include "xfs_rmap_btree.h" 39#include "xfs_rmap_btree.h"
40#include "xfs_btree.h" 40#include "xfs_btree.h"
41#include "xfs_refcount_btree.h" 41#include "xfs_refcount_btree.h"
42#include "xfs_ialloc_btree.h"
42 43
43/* 44/*
44 * Per-AG Block Reservations 45 * Per-AG Block Reservations
@@ -200,22 +201,30 @@ __xfs_ag_resv_init(
200 struct xfs_mount *mp = pag->pag_mount; 201 struct xfs_mount *mp = pag->pag_mount;
201 struct xfs_ag_resv *resv; 202 struct xfs_ag_resv *resv;
202 int error; 203 int error;
204 xfs_extlen_t reserved;
203 205
204 resv = xfs_perag_resv(pag, type);
205 if (used > ask) 206 if (used > ask)
206 ask = used; 207 ask = used;
207 resv->ar_asked = ask; 208 reserved = ask - used;
208 resv->ar_reserved = resv->ar_orig_reserved = ask - used;
209 mp->m_ag_max_usable -= ask;
210 209
211 trace_xfs_ag_resv_init(pag, type, ask); 210 error = xfs_mod_fdblocks(mp, -(int64_t)reserved, true);
212 211 if (error) {
213 error = xfs_mod_fdblocks(mp, -(int64_t)resv->ar_reserved, true);
214 if (error)
215 trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno, 212 trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno,
216 error, _RET_IP_); 213 error, _RET_IP_);
214 xfs_warn(mp,
215"Per-AG reservation for AG %u failed. Filesystem may run out of space.",
216 pag->pag_agno);
217 return error;
218 }
217 219
218 return error; 220 mp->m_ag_max_usable -= ask;
221
222 resv = xfs_perag_resv(pag, type);
223 resv->ar_asked = ask;
224 resv->ar_reserved = resv->ar_orig_reserved = reserved;
225
226 trace_xfs_ag_resv_init(pag, type, ask);
227 return 0;
219} 228}
220 229
221/* Create a per-AG block reservation. */ 230/* Create a per-AG block reservation. */
@@ -223,6 +232,8 @@ int
223xfs_ag_resv_init( 232xfs_ag_resv_init(
224 struct xfs_perag *pag) 233 struct xfs_perag *pag)
225{ 234{
235 struct xfs_mount *mp = pag->pag_mount;
236 xfs_agnumber_t agno = pag->pag_agno;
226 xfs_extlen_t ask; 237 xfs_extlen_t ask;
227 xfs_extlen_t used; 238 xfs_extlen_t used;
228 int error = 0; 239 int error = 0;
@@ -231,23 +242,45 @@ xfs_ag_resv_init(
231 if (pag->pag_meta_resv.ar_asked == 0) { 242 if (pag->pag_meta_resv.ar_asked == 0) {
232 ask = used = 0; 243 ask = used = 0;
233 244
234 error = xfs_refcountbt_calc_reserves(pag->pag_mount, 245 error = xfs_refcountbt_calc_reserves(mp, agno, &ask, &used);
235 pag->pag_agno, &ask, &used);
236 if (error) 246 if (error)
237 goto out; 247 goto out;
238 248
239 error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA, 249 error = xfs_finobt_calc_reserves(mp, agno, &ask, &used);
240 ask, used);
241 if (error) 250 if (error)
242 goto out; 251 goto out;
252
253 error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
254 ask, used);
255 if (error) {
256 /*
257 * Because we didn't have per-AG reservations when the
258 * finobt feature was added we might not be able to
259 * reserve all needed blocks. Warn and fall back to the
260 * old and potentially buggy code in that case, but
261 * ensure we do have the reservation for the refcountbt.
262 */
263 ask = used = 0;
264
265 mp->m_inotbt_nores = true;
266
267 error = xfs_refcountbt_calc_reserves(mp, agno, &ask,
268 &used);
269 if (error)
270 goto out;
271
272 error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
273 ask, used);
274 if (error)
275 goto out;
276 }
243 } 277 }
244 278
245 /* Create the AGFL metadata reservation */ 279 /* Create the AGFL metadata reservation */
246 if (pag->pag_agfl_resv.ar_asked == 0) { 280 if (pag->pag_agfl_resv.ar_asked == 0) {
247 ask = used = 0; 281 ask = used = 0;
248 282
249 error = xfs_rmapbt_calc_reserves(pag->pag_mount, pag->pag_agno, 283 error = xfs_rmapbt_calc_reserves(mp, agno, &ask, &used);
250 &ask, &used);
251 if (error) 284 if (error)
252 goto out; 285 goto out;
253 286
@@ -256,9 +289,16 @@ xfs_ag_resv_init(
256 goto out; 289 goto out;
257 } 290 }
258 291
292#ifdef DEBUG
293 /* need to read in the AGF for the ASSERT below to work */
294 error = xfs_alloc_pagf_init(pag->pag_mount, NULL, pag->pag_agno, 0);
295 if (error)
296 return error;
297
259 ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved + 298 ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved +
260 xfs_perag_resv(pag, XFS_AG_RESV_AGFL)->ar_reserved <= 299 xfs_perag_resv(pag, XFS_AG_RESV_AGFL)->ar_reserved <=
261 pag->pagf_freeblks + pag->pagf_flcount); 300 pag->pagf_freeblks + pag->pagf_flcount);
301#endif
262out: 302out:
263 return error; 303 return error;
264} 304}
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index af1ecb19121e..6622d46ddec3 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -131,9 +131,6 @@ xfs_attr_get(
131 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 131 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
132 return -EIO; 132 return -EIO;
133 133
134 if (!xfs_inode_hasattr(ip))
135 return -ENOATTR;
136
137 error = xfs_attr_args_init(&args, ip, name, flags); 134 error = xfs_attr_args_init(&args, ip, name, flags);
138 if (error) 135 if (error)
139 return error; 136 return error;
@@ -392,9 +389,6 @@ xfs_attr_remove(
392 if (XFS_FORCED_SHUTDOWN(dp->i_mount)) 389 if (XFS_FORCED_SHUTDOWN(dp->i_mount))
393 return -EIO; 390 return -EIO;
394 391
395 if (!xfs_inode_hasattr(dp))
396 return -ENOATTR;
397
398 error = xfs_attr_args_init(&args, dp, name, flags); 392 error = xfs_attr_args_init(&args, dp, name, flags);
399 if (error) 393 if (error)
400 return error; 394 return error;
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 44773c9eb957..bfc00de5c6f1 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3629,7 +3629,7 @@ xfs_bmap_btalloc(
3629 align = xfs_get_cowextsz_hint(ap->ip); 3629 align = xfs_get_cowextsz_hint(ap->ip);
3630 else if (xfs_alloc_is_userdata(ap->datatype)) 3630 else if (xfs_alloc_is_userdata(ap->datatype))
3631 align = xfs_get_extsz_hint(ap->ip); 3631 align = xfs_get_extsz_hint(ap->ip);
3632 if (unlikely(align)) { 3632 if (align) {
3633 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, 3633 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
3634 align, 0, ap->eof, 0, ap->conv, 3634 align, 0, ap->eof, 0, ap->conv,
3635 &ap->offset, &ap->length); 3635 &ap->offset, &ap->length);
@@ -3701,7 +3701,7 @@ xfs_bmap_btalloc(
3701 args.minlen = ap->minlen; 3701 args.minlen = ap->minlen;
3702 } 3702 }
3703 /* apply extent size hints if obtained earlier */ 3703 /* apply extent size hints if obtained earlier */
3704 if (unlikely(align)) { 3704 if (align) {
3705 args.prod = align; 3705 args.prod = align;
3706 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod))) 3706 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
3707 args.mod = (xfs_extlen_t)(args.prod - args.mod); 3707 args.mod = (xfs_extlen_t)(args.prod - args.mod);
@@ -4514,8 +4514,6 @@ xfs_bmapi_write(
4514 int n; /* current extent index */ 4514 int n; /* current extent index */
4515 xfs_fileoff_t obno; /* old block number (offset) */ 4515 xfs_fileoff_t obno; /* old block number (offset) */
4516 int whichfork; /* data or attr fork */ 4516 int whichfork; /* data or attr fork */
4517 char inhole; /* current location is hole in file */
4518 char wasdelay; /* old extent was delayed */
4519 4517
4520#ifdef DEBUG 4518#ifdef DEBUG
4521 xfs_fileoff_t orig_bno; /* original block number value */ 4519 xfs_fileoff_t orig_bno; /* original block number value */
@@ -4603,22 +4601,44 @@ xfs_bmapi_write(
4603 bma.firstblock = firstblock; 4601 bma.firstblock = firstblock;
4604 4602
4605 while (bno < end && n < *nmap) { 4603 while (bno < end && n < *nmap) {
4606 inhole = eof || bma.got.br_startoff > bno; 4604 bool need_alloc = false, wasdelay = false;
4607 wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
4608 4605
4609 /* 4606 /* in hole or beyoned EOF? */
4610 * Make sure we only reflink into a hole. 4607 if (eof || bma.got.br_startoff > bno) {
4611 */ 4608 if (flags & XFS_BMAPI_DELALLOC) {
4612 if (flags & XFS_BMAPI_REMAP) 4609 /*
4613 ASSERT(inhole); 4610 * For the COW fork we can reasonably get a
4614 if (flags & XFS_BMAPI_COWFORK) 4611 * request for converting an extent that races
4615 ASSERT(!inhole); 4612 * with other threads already having converted
4613 * part of it, as there converting COW to
4614 * regular blocks is not protected using the
4615 * IOLOCK.
4616 */
4617 ASSERT(flags & XFS_BMAPI_COWFORK);
4618 if (!(flags & XFS_BMAPI_COWFORK)) {
4619 error = -EIO;
4620 goto error0;
4621 }
4622
4623 if (eof || bno >= end)
4624 break;
4625 } else {
4626 need_alloc = true;
4627 }
4628 } else {
4629 /*
4630 * Make sure we only reflink into a hole.
4631 */
4632 ASSERT(!(flags & XFS_BMAPI_REMAP));
4633 if (isnullstartblock(bma.got.br_startblock))
4634 wasdelay = true;
4635 }
4616 4636
4617 /* 4637 /*
4618 * First, deal with the hole before the allocated space 4638 * First, deal with the hole before the allocated space
4619 * that we found, if any. 4639 * that we found, if any.
4620 */ 4640 */
4621 if (inhole || wasdelay) { 4641 if (need_alloc || wasdelay) {
4622 bma.eof = eof; 4642 bma.eof = eof;
4623 bma.conv = !!(flags & XFS_BMAPI_CONVERT); 4643 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4624 bma.wasdel = wasdelay; 4644 bma.wasdel = wasdelay;
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index cecd094404cc..cdef87db5262 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -110,6 +110,9 @@ struct xfs_extent_free_item
110/* Map something in the CoW fork. */ 110/* Map something in the CoW fork. */
111#define XFS_BMAPI_COWFORK 0x200 111#define XFS_BMAPI_COWFORK 0x200
112 112
113/* Only convert delalloc space, don't allocate entirely new extents */
114#define XFS_BMAPI_DELALLOC 0x400
115
113#define XFS_BMAPI_FLAGS \ 116#define XFS_BMAPI_FLAGS \
114 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ 117 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \
115 { XFS_BMAPI_METADATA, "METADATA" }, \ 118 { XFS_BMAPI_METADATA, "METADATA" }, \
@@ -120,7 +123,8 @@ struct xfs_extent_free_item
120 { XFS_BMAPI_CONVERT, "CONVERT" }, \ 123 { XFS_BMAPI_CONVERT, "CONVERT" }, \
121 { XFS_BMAPI_ZERO, "ZERO" }, \ 124 { XFS_BMAPI_ZERO, "ZERO" }, \
122 { XFS_BMAPI_REMAP, "REMAP" }, \ 125 { XFS_BMAPI_REMAP, "REMAP" }, \
123 { XFS_BMAPI_COWFORK, "COWFORK" } 126 { XFS_BMAPI_COWFORK, "COWFORK" }, \
127 { XFS_BMAPI_DELALLOC, "DELALLOC" }
124 128
125 129
126static inline int xfs_bmapi_aflag(int w) 130static inline int xfs_bmapi_aflag(int w)
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index c58d72c220f5..2f389d366e93 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -36,21 +36,29 @@
36struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR }; 36struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR };
37 37
38/* 38/*
39 * @mode, if set, indicates that the type field needs to be set up. 39 * Convert inode mode to directory entry filetype
40 * This uses the transformation from file mode to DT_* as defined in linux/fs.h
41 * for file type specification. This will be propagated into the directory
42 * structure if appropriate for the given operation and filesystem config.
43 */ 40 */
44const unsigned char xfs_mode_to_ftype[S_IFMT >> S_SHIFT] = { 41unsigned char xfs_mode_to_ftype(int mode)
45 [0] = XFS_DIR3_FT_UNKNOWN, 42{
46 [S_IFREG >> S_SHIFT] = XFS_DIR3_FT_REG_FILE, 43 switch (mode & S_IFMT) {
47 [S_IFDIR >> S_SHIFT] = XFS_DIR3_FT_DIR, 44 case S_IFREG:
48 [S_IFCHR >> S_SHIFT] = XFS_DIR3_FT_CHRDEV, 45 return XFS_DIR3_FT_REG_FILE;
49 [S_IFBLK >> S_SHIFT] = XFS_DIR3_FT_BLKDEV, 46 case S_IFDIR:
50 [S_IFIFO >> S_SHIFT] = XFS_DIR3_FT_FIFO, 47 return XFS_DIR3_FT_DIR;
51 [S_IFSOCK >> S_SHIFT] = XFS_DIR3_FT_SOCK, 48 case S_IFCHR:
52 [S_IFLNK >> S_SHIFT] = XFS_DIR3_FT_SYMLINK, 49 return XFS_DIR3_FT_CHRDEV;
53}; 50 case S_IFBLK:
51 return XFS_DIR3_FT_BLKDEV;
52 case S_IFIFO:
53 return XFS_DIR3_FT_FIFO;
54 case S_IFSOCK:
55 return XFS_DIR3_FT_SOCK;
56 case S_IFLNK:
57 return XFS_DIR3_FT_SYMLINK;
58 default:
59 return XFS_DIR3_FT_UNKNOWN;
60 }
61}
54 62
55/* 63/*
56 * ASCII case-insensitive (ie. A-Z) support for directories that was 64 * ASCII case-insensitive (ie. A-Z) support for directories that was
@@ -631,7 +639,8 @@ xfs_dir2_isblock(
631 if ((rval = xfs_bmap_last_offset(args->dp, &last, XFS_DATA_FORK))) 639 if ((rval = xfs_bmap_last_offset(args->dp, &last, XFS_DATA_FORK)))
632 return rval; 640 return rval;
633 rval = XFS_FSB_TO_B(args->dp->i_mount, last) == args->geo->blksize; 641 rval = XFS_FSB_TO_B(args->dp->i_mount, last) == args->geo->blksize;
634 ASSERT(rval == 0 || args->dp->i_d.di_size == args->geo->blksize); 642 if (rval != 0 && args->dp->i_d.di_size != args->geo->blksize)
643 return -EFSCORRUPTED;
635 *vp = rval; 644 *vp = rval;
636 return 0; 645 return 0;
637} 646}
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
index 0197590fa7d7..d6e6d9d16f6c 100644
--- a/fs/xfs/libxfs/xfs_dir2.h
+++ b/fs/xfs/libxfs/xfs_dir2.h
@@ -18,6 +18,9 @@
18#ifndef __XFS_DIR2_H__ 18#ifndef __XFS_DIR2_H__
19#define __XFS_DIR2_H__ 19#define __XFS_DIR2_H__
20 20
21#include "xfs_da_format.h"
22#include "xfs_da_btree.h"
23
21struct xfs_defer_ops; 24struct xfs_defer_ops;
22struct xfs_da_args; 25struct xfs_da_args;
23struct xfs_inode; 26struct xfs_inode;
@@ -32,10 +35,9 @@ struct xfs_dir2_data_unused;
32extern struct xfs_name xfs_name_dotdot; 35extern struct xfs_name xfs_name_dotdot;
33 36
34/* 37/*
35 * directory filetype conversion tables. 38 * Convert inode mode to directory entry filetype
36 */ 39 */
37#define S_SHIFT 12 40extern unsigned char xfs_mode_to_ftype(int mode);
38extern const unsigned char xfs_mode_to_ftype[];
39 41
40/* 42/*
41 * directory operations vector for encode/decode routines 43 * directory operations vector for encode/decode routines
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 0fd086d03d41..7c471881c9a6 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -82,11 +82,12 @@ xfs_finobt_set_root(
82} 82}
83 83
84STATIC int 84STATIC int
85xfs_inobt_alloc_block( 85__xfs_inobt_alloc_block(
86 struct xfs_btree_cur *cur, 86 struct xfs_btree_cur *cur,
87 union xfs_btree_ptr *start, 87 union xfs_btree_ptr *start,
88 union xfs_btree_ptr *new, 88 union xfs_btree_ptr *new,
89 int *stat) 89 int *stat,
90 enum xfs_ag_resv_type resv)
90{ 91{
91 xfs_alloc_arg_t args; /* block allocation args */ 92 xfs_alloc_arg_t args; /* block allocation args */
92 int error; /* error return value */ 93 int error; /* error return value */
@@ -103,6 +104,7 @@ xfs_inobt_alloc_block(
103 args.maxlen = 1; 104 args.maxlen = 1;
104 args.prod = 1; 105 args.prod = 1;
105 args.type = XFS_ALLOCTYPE_NEAR_BNO; 106 args.type = XFS_ALLOCTYPE_NEAR_BNO;
107 args.resv = resv;
106 108
107 error = xfs_alloc_vextent(&args); 109 error = xfs_alloc_vextent(&args);
108 if (error) { 110 if (error) {
@@ -123,6 +125,27 @@ xfs_inobt_alloc_block(
123} 125}
124 126
125STATIC int 127STATIC int
128xfs_inobt_alloc_block(
129 struct xfs_btree_cur *cur,
130 union xfs_btree_ptr *start,
131 union xfs_btree_ptr *new,
132 int *stat)
133{
134 return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
135}
136
137STATIC int
138xfs_finobt_alloc_block(
139 struct xfs_btree_cur *cur,
140 union xfs_btree_ptr *start,
141 union xfs_btree_ptr *new,
142 int *stat)
143{
144 return __xfs_inobt_alloc_block(cur, start, new, stat,
145 XFS_AG_RESV_METADATA);
146}
147
148STATIC int
126xfs_inobt_free_block( 149xfs_inobt_free_block(
127 struct xfs_btree_cur *cur, 150 struct xfs_btree_cur *cur,
128 struct xfs_buf *bp) 151 struct xfs_buf *bp)
@@ -328,7 +351,7 @@ static const struct xfs_btree_ops xfs_finobt_ops = {
328 351
329 .dup_cursor = xfs_inobt_dup_cursor, 352 .dup_cursor = xfs_inobt_dup_cursor,
330 .set_root = xfs_finobt_set_root, 353 .set_root = xfs_finobt_set_root,
331 .alloc_block = xfs_inobt_alloc_block, 354 .alloc_block = xfs_finobt_alloc_block,
332 .free_block = xfs_inobt_free_block, 355 .free_block = xfs_inobt_free_block,
333 .get_minrecs = xfs_inobt_get_minrecs, 356 .get_minrecs = xfs_inobt_get_minrecs,
334 .get_maxrecs = xfs_inobt_get_maxrecs, 357 .get_maxrecs = xfs_inobt_get_maxrecs,
@@ -480,3 +503,64 @@ xfs_inobt_rec_check_count(
480 return 0; 503 return 0;
481} 504}
482#endif /* DEBUG */ 505#endif /* DEBUG */
506
507static xfs_extlen_t
508xfs_inobt_max_size(
509 struct xfs_mount *mp)
510{
511 /* Bail out if we're uninitialized, which can happen in mkfs. */
512 if (mp->m_inobt_mxr[0] == 0)
513 return 0;
514
515 return xfs_btree_calc_size(mp, mp->m_inobt_mnr,
516 (uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock /
517 XFS_INODES_PER_CHUNK);
518}
519
520static int
521xfs_inobt_count_blocks(
522 struct xfs_mount *mp,
523 xfs_agnumber_t agno,
524 xfs_btnum_t btnum,
525 xfs_extlen_t *tree_blocks)
526{
527 struct xfs_buf *agbp;
528 struct xfs_btree_cur *cur;
529 int error;
530
531 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
532 if (error)
533 return error;
534
535 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, btnum);
536 error = xfs_btree_count_blocks(cur, tree_blocks);
537 xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
538 xfs_buf_relse(agbp);
539
540 return error;
541}
542
543/*
544 * Figure out how many blocks to reserve and how many are used by this btree.
545 */
546int
547xfs_finobt_calc_reserves(
548 struct xfs_mount *mp,
549 xfs_agnumber_t agno,
550 xfs_extlen_t *ask,
551 xfs_extlen_t *used)
552{
553 xfs_extlen_t tree_len = 0;
554 int error;
555
556 if (!xfs_sb_version_hasfinobt(&mp->m_sb))
557 return 0;
558
559 error = xfs_inobt_count_blocks(mp, agno, XFS_BTNUM_FINO, &tree_len);
560 if (error)
561 return error;
562
563 *ask += xfs_inobt_max_size(mp);
564 *used += tree_len;
565 return 0;
566}
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.h b/fs/xfs/libxfs/xfs_ialloc_btree.h
index bd88453217ce..aa81e2e63f3f 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.h
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.h
@@ -72,4 +72,7 @@ int xfs_inobt_rec_check_count(struct xfs_mount *,
72#define xfs_inobt_rec_check_count(mp, rec) 0 72#define xfs_inobt_rec_check_count(mp, rec) 0
73#endif /* DEBUG */ 73#endif /* DEBUG */
74 74
75int xfs_finobt_calc_reserves(struct xfs_mount *mp, xfs_agnumber_t agno,
76 xfs_extlen_t *ask, xfs_extlen_t *used);
77
75#endif /* __XFS_IALLOC_BTREE_H__ */ 78#endif /* __XFS_IALLOC_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index dd483e2767f7..d93f9d918cfc 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -29,6 +29,7 @@
29#include "xfs_icache.h" 29#include "xfs_icache.h"
30#include "xfs_trans.h" 30#include "xfs_trans.h"
31#include "xfs_ialloc.h" 31#include "xfs_ialloc.h"
32#include "xfs_dir2.h"
32 33
33/* 34/*
34 * Check that none of the inode's in the buffer have a next 35 * Check that none of the inode's in the buffer have a next
@@ -386,6 +387,7 @@ xfs_dinode_verify(
386 xfs_ino_t ino, 387 xfs_ino_t ino,
387 struct xfs_dinode *dip) 388 struct xfs_dinode *dip)
388{ 389{
390 uint16_t mode;
389 uint16_t flags; 391 uint16_t flags;
390 uint64_t flags2; 392 uint64_t flags2;
391 393
@@ -396,8 +398,12 @@ xfs_dinode_verify(
396 if (be64_to_cpu(dip->di_size) & (1ULL << 63)) 398 if (be64_to_cpu(dip->di_size) & (1ULL << 63))
397 return false; 399 return false;
398 400
399 /* No zero-length symlinks. */ 401 mode = be16_to_cpu(dip->di_mode);
400 if (S_ISLNK(be16_to_cpu(dip->di_mode)) && dip->di_size == 0) 402 if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
403 return false;
404
405 /* No zero-length symlinks/dirs. */
406 if ((S_ISLNK(mode) || S_ISDIR(mode)) && dip->di_size == 0)
401 return false; 407 return false;
402 408
403 /* only version 3 or greater inodes are extensively verified here */ 409 /* only version 3 or greater inodes are extensively verified here */
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 2580262e4ea0..584ec896a533 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -242,7 +242,7 @@ xfs_mount_validate_sb(
242 sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG || 242 sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG ||
243 sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG || 243 sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
244 sbp->sb_blocksize != (1 << sbp->sb_blocklog) || 244 sbp->sb_blocksize != (1 << sbp->sb_blocklog) ||
245 sbp->sb_dirblklog > XFS_MAX_BLOCKSIZE_LOG || 245 sbp->sb_dirblklog + sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
246 sbp->sb_inodesize < XFS_DINODE_MIN_SIZE || 246 sbp->sb_inodesize < XFS_DINODE_MIN_SIZE ||
247 sbp->sb_inodesize > XFS_DINODE_MAX_SIZE || 247 sbp->sb_inodesize > XFS_DINODE_MAX_SIZE ||
248 sbp->sb_inodelog < XFS_DINODE_MIN_LOG || 248 sbp->sb_inodelog < XFS_DINODE_MIN_LOG ||
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index b9abce524c33..c1417919ab0a 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -528,7 +528,6 @@ xfs_getbmap(
528 xfs_bmbt_irec_t *map; /* buffer for user's data */ 528 xfs_bmbt_irec_t *map; /* buffer for user's data */
529 xfs_mount_t *mp; /* file system mount point */ 529 xfs_mount_t *mp; /* file system mount point */
530 int nex; /* # of user extents can do */ 530 int nex; /* # of user extents can do */
531 int nexleft; /* # of user extents left */
532 int subnex; /* # of bmapi's can do */ 531 int subnex; /* # of bmapi's can do */
533 int nmap; /* number of map entries */ 532 int nmap; /* number of map entries */
534 struct getbmapx *out; /* output structure */ 533 struct getbmapx *out; /* output structure */
@@ -686,10 +685,8 @@ xfs_getbmap(
686 goto out_free_map; 685 goto out_free_map;
687 } 686 }
688 687
689 nexleft = nex;
690
691 do { 688 do {
692 nmap = (nexleft > subnex) ? subnex : nexleft; 689 nmap = (nex> subnex) ? subnex : nex;
693 error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset), 690 error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
694 XFS_BB_TO_FSB(mp, bmv->bmv_length), 691 XFS_BB_TO_FSB(mp, bmv->bmv_length),
695 map, &nmap, bmapi_flags); 692 map, &nmap, bmapi_flags);
@@ -697,8 +694,8 @@ xfs_getbmap(
697 goto out_free_map; 694 goto out_free_map;
698 ASSERT(nmap <= subnex); 695 ASSERT(nmap <= subnex);
699 696
700 for (i = 0; i < nmap && nexleft && bmv->bmv_length && 697 for (i = 0; i < nmap && bmv->bmv_length &&
701 cur_ext < bmv->bmv_count; i++) { 698 cur_ext < bmv->bmv_count - 1; i++) {
702 out[cur_ext].bmv_oflags = 0; 699 out[cur_ext].bmv_oflags = 0;
703 if (map[i].br_state == XFS_EXT_UNWRITTEN) 700 if (map[i].br_state == XFS_EXT_UNWRITTEN)
704 out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC; 701 out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
@@ -760,16 +757,27 @@ xfs_getbmap(
760 continue; 757 continue;
761 } 758 }
762 759
760 /*
761 * In order to report shared extents accurately,
762 * we report each distinct shared/unshared part
763 * of a single bmbt record using multiple bmap
764 * extents. To make that happen, we iterate the
765 * same map array item multiple times, each
766 * time trimming out the subextent that we just
767 * reported.
768 *
769 * Because of this, we must check the out array
770 * index (cur_ext) directly against bmv_count-1
771 * to avoid overflows.
772 */
763 if (inject_map.br_startblock != NULLFSBLOCK) { 773 if (inject_map.br_startblock != NULLFSBLOCK) {
764 map[i] = inject_map; 774 map[i] = inject_map;
765 i--; 775 i--;
766 } else 776 }
767 nexleft--;
768 bmv->bmv_entries++; 777 bmv->bmv_entries++;
769 cur_ext++; 778 cur_ext++;
770 } 779 }
771 } while (nmap && nexleft && bmv->bmv_length && 780 } while (nmap && bmv->bmv_length && cur_ext < bmv->bmv_count - 1);
772 cur_ext < bmv->bmv_count);
773 781
774 out_free_map: 782 out_free_map:
775 kmem_free(map); 783 kmem_free(map);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 7f0a01f7b592..ac3b4db519df 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -422,6 +422,7 @@ retry:
422out_free_pages: 422out_free_pages:
423 for (i = 0; i < bp->b_page_count; i++) 423 for (i = 0; i < bp->b_page_count; i++)
424 __free_page(bp->b_pages[i]); 424 __free_page(bp->b_pages[i]);
425 bp->b_flags &= ~_XBF_PAGES;
425 return error; 426 return error;
426} 427}
427 428
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 7a30b8f11db7..9d06cc30e875 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -710,6 +710,10 @@ xfs_dq_get_next_id(
710 /* Simple advance */ 710 /* Simple advance */
711 next_id = *id + 1; 711 next_id = *id + 1;
712 712
713 /* If we'd wrap past the max ID, stop */
714 if (next_id < *id)
715 return -ENOENT;
716
713 /* If new ID is within the current chunk, advancing it sufficed */ 717 /* If new ID is within the current chunk, advancing it sufficed */
714 if (next_id % mp->m_quotainfo->qi_dqperchunk) { 718 if (next_id % mp->m_quotainfo->qi_dqperchunk) {
715 *id = next_id; 719 *id = next_id;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index b9557795eb74..de32f0fe47c8 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1792,22 +1792,23 @@ xfs_inactive_ifree(
1792 int error; 1792 int error;
1793 1793
1794 /* 1794 /*
1795 * The ifree transaction might need to allocate blocks for record 1795 * We try to use a per-AG reservation for any block needed by the finobt
1796 * insertion to the finobt. We don't want to fail here at ENOSPC, so 1796 * tree, but as the finobt feature predates the per-AG reservation
1797 * allow ifree to dip into the reserved block pool if necessary. 1797 * support a degraded file system might not have enough space for the
1798 * 1798 * reservation at mount time. In that case try to dip into the reserved
1799 * Freeing large sets of inodes generally means freeing inode chunks, 1799 * pool and pray.
1800 * directory and file data blocks, so this should be relatively safe.
1801 * Only under severe circumstances should it be possible to free enough
1802 * inodes to exhaust the reserve block pool via finobt expansion while
1803 * at the same time not creating free space in the filesystem.
1804 * 1800 *
1805 * Send a warning if the reservation does happen to fail, as the inode 1801 * Send a warning if the reservation does happen to fail, as the inode
1806 * now remains allocated and sits on the unlinked list until the fs is 1802 * now remains allocated and sits on the unlinked list until the fs is
1807 * repaired. 1803 * repaired.
1808 */ 1804 */
1809 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 1805 if (unlikely(mp->m_inotbt_nores)) {
1810 XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp); 1806 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1807 XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1808 &tp);
1809 } else {
1810 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1811 }
1811 if (error) { 1812 if (error) {
1812 if (error == -ENOSPC) { 1813 if (error == -ENOSPC) {
1813 xfs_warn_ratelimited(mp, 1814 xfs_warn_ratelimited(mp,
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 0d147428971e..1aa3abd67b36 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -681,7 +681,7 @@ xfs_iomap_write_allocate(
681 xfs_trans_t *tp; 681 xfs_trans_t *tp;
682 int nimaps; 682 int nimaps;
683 int error = 0; 683 int error = 0;
684 int flags = 0; 684 int flags = XFS_BMAPI_DELALLOC;
685 int nres; 685 int nres;
686 686
687 if (whichfork == XFS_COW_FORK) 687 if (whichfork == XFS_COW_FORK)
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 308bebb6dfd2..22c16155f1b4 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -98,12 +98,27 @@ xfs_init_security(
98static void 98static void
99xfs_dentry_to_name( 99xfs_dentry_to_name(
100 struct xfs_name *namep, 100 struct xfs_name *namep,
101 struct dentry *dentry)
102{
103 namep->name = dentry->d_name.name;
104 namep->len = dentry->d_name.len;
105 namep->type = XFS_DIR3_FT_UNKNOWN;
106}
107
108static int
109xfs_dentry_mode_to_name(
110 struct xfs_name *namep,
101 struct dentry *dentry, 111 struct dentry *dentry,
102 int mode) 112 int mode)
103{ 113{
104 namep->name = dentry->d_name.name; 114 namep->name = dentry->d_name.name;
105 namep->len = dentry->d_name.len; 115 namep->len = dentry->d_name.len;
106 namep->type = xfs_mode_to_ftype[(mode & S_IFMT) >> S_SHIFT]; 116 namep->type = xfs_mode_to_ftype(mode);
117
118 if (unlikely(namep->type == XFS_DIR3_FT_UNKNOWN))
119 return -EFSCORRUPTED;
120
121 return 0;
107} 122}
108 123
109STATIC void 124STATIC void
@@ -119,7 +134,7 @@ xfs_cleanup_inode(
119 * xfs_init_security we must back out. 134 * xfs_init_security we must back out.
120 * ENOSPC can hit here, among other things. 135 * ENOSPC can hit here, among other things.
121 */ 136 */
122 xfs_dentry_to_name(&teardown, dentry, 0); 137 xfs_dentry_to_name(&teardown, dentry);
123 138
124 xfs_remove(XFS_I(dir), &teardown, XFS_I(inode)); 139 xfs_remove(XFS_I(dir), &teardown, XFS_I(inode));
125} 140}
@@ -154,8 +169,12 @@ xfs_generic_create(
154 if (error) 169 if (error)
155 return error; 170 return error;
156 171
172 /* Verify mode is valid also for tmpfile case */
173 error = xfs_dentry_mode_to_name(&name, dentry, mode);
174 if (unlikely(error))
175 goto out_free_acl;
176
157 if (!tmpfile) { 177 if (!tmpfile) {
158 xfs_dentry_to_name(&name, dentry, mode);
159 error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip); 178 error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
160 } else { 179 } else {
161 error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip); 180 error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip);
@@ -248,7 +267,7 @@ xfs_vn_lookup(
248 if (dentry->d_name.len >= MAXNAMELEN) 267 if (dentry->d_name.len >= MAXNAMELEN)
249 return ERR_PTR(-ENAMETOOLONG); 268 return ERR_PTR(-ENAMETOOLONG);
250 269
251 xfs_dentry_to_name(&name, dentry, 0); 270 xfs_dentry_to_name(&name, dentry);
252 error = xfs_lookup(XFS_I(dir), &name, &cip, NULL); 271 error = xfs_lookup(XFS_I(dir), &name, &cip, NULL);
253 if (unlikely(error)) { 272 if (unlikely(error)) {
254 if (unlikely(error != -ENOENT)) 273 if (unlikely(error != -ENOENT))
@@ -275,7 +294,7 @@ xfs_vn_ci_lookup(
275 if (dentry->d_name.len >= MAXNAMELEN) 294 if (dentry->d_name.len >= MAXNAMELEN)
276 return ERR_PTR(-ENAMETOOLONG); 295 return ERR_PTR(-ENAMETOOLONG);
277 296
278 xfs_dentry_to_name(&xname, dentry, 0); 297 xfs_dentry_to_name(&xname, dentry);
279 error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name); 298 error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name);
280 if (unlikely(error)) { 299 if (unlikely(error)) {
281 if (unlikely(error != -ENOENT)) 300 if (unlikely(error != -ENOENT))
@@ -310,7 +329,9 @@ xfs_vn_link(
310 struct xfs_name name; 329 struct xfs_name name;
311 int error; 330 int error;
312 331
313 xfs_dentry_to_name(&name, dentry, inode->i_mode); 332 error = xfs_dentry_mode_to_name(&name, dentry, inode->i_mode);
333 if (unlikely(error))
334 return error;
314 335
315 error = xfs_link(XFS_I(dir), XFS_I(inode), &name); 336 error = xfs_link(XFS_I(dir), XFS_I(inode), &name);
316 if (unlikely(error)) 337 if (unlikely(error))
@@ -329,7 +350,7 @@ xfs_vn_unlink(
329 struct xfs_name name; 350 struct xfs_name name;
330 int error; 351 int error;
331 352
332 xfs_dentry_to_name(&name, dentry, 0); 353 xfs_dentry_to_name(&name, dentry);
333 354
334 error = xfs_remove(XFS_I(dir), &name, XFS_I(d_inode(dentry))); 355 error = xfs_remove(XFS_I(dir), &name, XFS_I(d_inode(dentry)));
335 if (error) 356 if (error)
@@ -359,7 +380,9 @@ xfs_vn_symlink(
359 380
360 mode = S_IFLNK | 381 mode = S_IFLNK |
361 (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO); 382 (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO);
362 xfs_dentry_to_name(&name, dentry, mode); 383 error = xfs_dentry_mode_to_name(&name, dentry, mode);
384 if (unlikely(error))
385 goto out;
363 386
364 error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip); 387 error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip);
365 if (unlikely(error)) 388 if (unlikely(error))
@@ -395,6 +418,7 @@ xfs_vn_rename(
395{ 418{
396 struct inode *new_inode = d_inode(ndentry); 419 struct inode *new_inode = d_inode(ndentry);
397 int omode = 0; 420 int omode = 0;
421 int error;
398 struct xfs_name oname; 422 struct xfs_name oname;
399 struct xfs_name nname; 423 struct xfs_name nname;
400 424
@@ -405,8 +429,14 @@ xfs_vn_rename(
405 if (flags & RENAME_EXCHANGE) 429 if (flags & RENAME_EXCHANGE)
406 omode = d_inode(ndentry)->i_mode; 430 omode = d_inode(ndentry)->i_mode;
407 431
408 xfs_dentry_to_name(&oname, odentry, omode); 432 error = xfs_dentry_mode_to_name(&oname, odentry, omode);
409 xfs_dentry_to_name(&nname, ndentry, d_inode(odentry)->i_mode); 433 if (omode && unlikely(error))
434 return error;
435
436 error = xfs_dentry_mode_to_name(&nname, ndentry,
437 d_inode(odentry)->i_mode);
438 if (unlikely(error))
439 return error;
410 440
411 return xfs_rename(XFS_I(odir), &oname, XFS_I(d_inode(odentry)), 441 return xfs_rename(XFS_I(odir), &oname, XFS_I(d_inode(odentry)),
412 XFS_I(ndir), &nname, 442 XFS_I(ndir), &nname,
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index e467218c0098..7a989de224f4 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -331,11 +331,11 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
331} 331}
332 332
333#define ASSERT_ALWAYS(expr) \ 333#define ASSERT_ALWAYS(expr) \
334 (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) 334 (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
335 335
336#ifdef DEBUG 336#ifdef DEBUG
337#define ASSERT(expr) \ 337#define ASSERT(expr) \
338 (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) 338 (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
339 339
340#ifndef STATIC 340#ifndef STATIC
341# define STATIC noinline 341# define STATIC noinline
@@ -346,7 +346,7 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
346#ifdef XFS_WARN 346#ifdef XFS_WARN
347 347
348#define ASSERT(expr) \ 348#define ASSERT(expr) \
349 (unlikely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__)) 349 (likely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__))
350 350
351#ifndef STATIC 351#ifndef STATIC
352# define STATIC static noinline 352# define STATIC static noinline
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 84f785218907..7f351f706b7a 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -140,6 +140,7 @@ typedef struct xfs_mount {
140 int m_fixedfsid[2]; /* unchanged for life of FS */ 140 int m_fixedfsid[2]; /* unchanged for life of FS */
141 uint m_dmevmask; /* DMI events for this FS */ 141 uint m_dmevmask; /* DMI events for this FS */
142 __uint64_t m_flags; /* global mount flags */ 142 __uint64_t m_flags; /* global mount flags */
143 bool m_inotbt_nores; /* no per-AG finobt resv. */
143 int m_ialloc_inos; /* inodes in inode allocation */ 144 int m_ialloc_inos; /* inodes in inode allocation */
144 int m_ialloc_blks; /* blocks in inode allocation */ 145 int m_ialloc_blks; /* blocks in inode allocation */
145 int m_ialloc_min_blks;/* min blocks in sparse inode 146 int m_ialloc_min_blks;/* min blocks in sparse inode
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 45e50ea90769..b669b123287b 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -1177,7 +1177,8 @@ xfs_qm_dqusage_adjust(
1177 * the case in all other instances. It's OK that we do this because 1177 * the case in all other instances. It's OK that we do this because
1178 * quotacheck is done only at mount time. 1178 * quotacheck is done only at mount time.
1179 */ 1179 */
1180 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip); 1180 error = xfs_iget(mp, NULL, ino, XFS_IGET_DONTCACHE, XFS_ILOCK_EXCL,
1181 &ip);
1181 if (error) { 1182 if (error) {
1182 *res = BULKSTAT_RV_NOTHING; 1183 *res = BULKSTAT_RV_NOTHING;
1183 return error; 1184 return error;
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index d6d241f63b9f..56814e8ae7ea 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -144,7 +144,7 @@ struct __drm_crtcs_state {
144 struct drm_crtc *ptr; 144 struct drm_crtc *ptr;
145 struct drm_crtc_state *state; 145 struct drm_crtc_state *state;
146 struct drm_crtc_commit *commit; 146 struct drm_crtc_commit *commit;
147 s64 __user *out_fence_ptr; 147 s32 __user *out_fence_ptr;
148}; 148};
149 149
150struct __drm_connnectors_state { 150struct __drm_connnectors_state {
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index bf9991b20611..137432386310 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -488,7 +488,7 @@ struct drm_mode_config {
488 /** 488 /**
489 * @prop_out_fence_ptr: Sync File fd pointer representing the 489 * @prop_out_fence_ptr: Sync File fd pointer representing the
490 * outgoing fences for a CRTC. Userspace should provide a pointer to a 490 * outgoing fences for a CRTC. Userspace should provide a pointer to a
491 * value of type s64, and then cast that pointer to u64. 491 * value of type s32, and then cast that pointer to u64.
492 */ 492 */
493 struct drm_property *prop_out_fence_ptr; 493 struct drm_property *prop_out_fence_ptr;
494 /** 494 /**
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index b717ed9d2b75..5c970ce67949 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -76,4 +76,5 @@ void kvm_timer_unschedule(struct kvm_vcpu *vcpu);
76 76
77void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu); 77void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
78 78
79void kvm_timer_init_vhe(void);
79#endif 80#endif
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 5d417eacc519..57d60dc5b600 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -248,6 +248,8 @@ struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
248void bpf_map_put_with_uref(struct bpf_map *map); 248void bpf_map_put_with_uref(struct bpf_map *map);
249void bpf_map_put(struct bpf_map *map); 249void bpf_map_put(struct bpf_map *map);
250int bpf_map_precharge_memlock(u32 pages); 250int bpf_map_precharge_memlock(u32 pages);
251void *bpf_map_area_alloc(size_t size);
252void bpf_map_area_free(void *base);
251 253
252extern int sysctl_unprivileged_bpf_disabled; 254extern int sysctl_unprivileged_bpf_disabled;
253 255
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 20bfefbe7594..d936a0021839 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -74,6 +74,8 @@ enum cpuhp_state {
74 CPUHP_ZCOMP_PREPARE, 74 CPUHP_ZCOMP_PREPARE,
75 CPUHP_TIMERS_DEAD, 75 CPUHP_TIMERS_DEAD,
76 CPUHP_MIPS_SOC_PREPARE, 76 CPUHP_MIPS_SOC_PREPARE,
77 CPUHP_BP_PREPARE_DYN,
78 CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
77 CPUHP_BRINGUP_CPU, 79 CPUHP_BRINGUP_CPU,
78 CPUHP_AP_IDLE_DEAD, 80 CPUHP_AP_IDLE_DEAD,
79 CPUHP_AP_OFFLINE, 81 CPUHP_AP_OFFLINE,
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index c2748accea71..e973faba69dc 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -274,37 +274,67 @@ void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
274 struct irq_chip *irqchip, 274 struct irq_chip *irqchip,
275 int parent_irq); 275 int parent_irq);
276 276
277int _gpiochip_irqchip_add(struct gpio_chip *gpiochip, 277int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
278 struct irq_chip *irqchip,
279 unsigned int first_irq,
280 irq_flow_handler_t handler,
281 unsigned int type,
282 bool nested,
283 struct lock_class_key *lock_key);
284
285#ifdef CONFIG_LOCKDEP
286
287/*
288 * Lockdep requires that each irqchip instance be created with a
289 * unique key so as to avoid unnecessary warnings. This upfront
290 * boilerplate static inlines provides such a key for each
291 * unique instance.
292 */
293static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
294 struct irq_chip *irqchip,
295 unsigned int first_irq,
296 irq_flow_handler_t handler,
297 unsigned int type)
298{
299 static struct lock_class_key key;
300
301 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
302 handler, type, false, &key);
303}
304
305static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
278 struct irq_chip *irqchip, 306 struct irq_chip *irqchip,
279 unsigned int first_irq, 307 unsigned int first_irq,
280 irq_flow_handler_t handler, 308 irq_flow_handler_t handler,
281 unsigned int type, 309 unsigned int type)
282 bool nested, 310{
283 struct lock_class_key *lock_key); 311
312 static struct lock_class_key key;
313
314 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
315 handler, type, true, &key);
316}
317#else
318static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
319 struct irq_chip *irqchip,
320 unsigned int first_irq,
321 irq_flow_handler_t handler,
322 unsigned int type)
323{
324 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
325 handler, type, false, NULL);
326}
284 327
285/* FIXME: I assume threaded IRQchips do not have the lockdep problem */
286static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, 328static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
287 struct irq_chip *irqchip, 329 struct irq_chip *irqchip,
288 unsigned int first_irq, 330 unsigned int first_irq,
289 irq_flow_handler_t handler, 331 irq_flow_handler_t handler,
290 unsigned int type) 332 unsigned int type)
291{ 333{
292 return _gpiochip_irqchip_add(gpiochip, irqchip, first_irq, 334 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
293 handler, type, true, NULL); 335 handler, type, true, NULL);
294} 336}
295 337#endif /* CONFIG_LOCKDEP */
296#ifdef CONFIG_LOCKDEP
297#define gpiochip_irqchip_add(...) \
298( \
299 ({ \
300 static struct lock_class_key _key; \
301 _gpiochip_irqchip_add(__VA_ARGS__, false, &_key); \
302 }) \
303)
304#else
305#define gpiochip_irqchip_add(...) \
306 _gpiochip_irqchip_add(__VA_ARGS__, false, NULL)
307#endif
308 338
309#endif /* CONFIG_GPIOLIB_IRQCHIP */ 339#endif /* CONFIG_GPIOLIB_IRQCHIP */
310 340
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 56aec84237ad..cb09238f6d32 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -514,8 +514,8 @@ extern enum system_states {
514#define TAINT_FLAGS_COUNT 16 514#define TAINT_FLAGS_COUNT 16
515 515
516struct taint_flag { 516struct taint_flag {
517 char true; /* character printed when tainted */ 517 char c_true; /* character printed when tainted */
518 char false; /* character printed when not tainted */ 518 char c_false; /* character printed when not tainted */
519 bool module; /* also show as a per-module taint flag */ 519 bool module; /* also show as a per-module taint flag */
520}; 520};
521 521
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 01033fadea47..c1784c0b4f35 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -284,7 +284,7 @@ extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
284 unsigned long map_offset); 284 unsigned long map_offset);
285extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, 285extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
286 unsigned long pnum); 286 unsigned long pnum);
287extern int zone_can_shift(unsigned long pfn, unsigned long nr_pages, 287extern bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
288 enum zone_type target); 288 enum zone_type target, int *zone_shift);
289 289
290#endif /* __LINUX_MEMORY_HOTPLUG_H */ 290#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 257173e0095e..f541da68d1e7 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -35,6 +35,8 @@
35#define PHY_ID_KSZ886X 0x00221430 35#define PHY_ID_KSZ886X 0x00221430
36#define PHY_ID_KSZ8863 0x00221435 36#define PHY_ID_KSZ8863 0x00221435
37 37
38#define PHY_ID_KSZ8795 0x00221550
39
38/* struct phy_device dev_flags definitions */ 40/* struct phy_device dev_flags definitions */
39#define MICREL_PHY_50MHZ_CLK 0x00000001 41#define MICREL_PHY_50MHZ_CLK 0x00000001
40#define MICREL_PHY_FXEN 0x00000002 42#define MICREL_PHY_FXEN 0x00000002
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 36d9896fbc1e..f4aac87adcc3 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -972,12 +972,16 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
972 * @zonelist - The zonelist to search for a suitable zone 972 * @zonelist - The zonelist to search for a suitable zone
973 * @highest_zoneidx - The zone index of the highest zone to return 973 * @highest_zoneidx - The zone index of the highest zone to return
974 * @nodes - An optional nodemask to filter the zonelist with 974 * @nodes - An optional nodemask to filter the zonelist with
975 * @zone - The first suitable zone found is returned via this parameter 975 * @return - Zoneref pointer for the first suitable zone found (see below)
976 * 976 *
977 * This function returns the first zone at or below a given zone index that is 977 * This function returns the first zone at or below a given zone index that is
978 * within the allowed nodemask. The zoneref returned is a cursor that can be 978 * within the allowed nodemask. The zoneref returned is a cursor that can be
979 * used to iterate the zonelist with next_zones_zonelist by advancing it by 979 * used to iterate the zonelist with next_zones_zonelist by advancing it by
980 * one before calling. 980 * one before calling.
981 *
982 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
983 * never NULL). This may happen either genuinely, or due to concurrent nodemask
984 * update due to cpuset modification.
981 */ 985 */
982static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, 986static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
983 enum zone_type highest_zoneidx, 987 enum zone_type highest_zoneidx,
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index aacca824a6ae..0a3fadc32693 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -110,6 +110,7 @@ extern int watchdog_user_enabled;
110extern int watchdog_thresh; 110extern int watchdog_thresh;
111extern unsigned long watchdog_enabled; 111extern unsigned long watchdog_enabled;
112extern unsigned long *watchdog_cpumask_bits; 112extern unsigned long *watchdog_cpumask_bits;
113extern atomic_t watchdog_park_in_progress;
113#ifdef CONFIG_SMP 114#ifdef CONFIG_SMP
114extern int sysctl_softlockup_all_cpu_backtrace; 115extern int sysctl_softlockup_all_cpu_backtrace;
115extern int sysctl_hardlockup_all_cpu_backtrace; 116extern int sysctl_hardlockup_all_cpu_backtrace;
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 5c9d2529685f..43474f39ef65 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -25,7 +25,6 @@
25#include <linux/timer.h> 25#include <linux/timer.h>
26#include <linux/workqueue.h> 26#include <linux/workqueue.h>
27#include <linux/mod_devicetable.h> 27#include <linux/mod_devicetable.h>
28#include <linux/phy_led_triggers.h>
29 28
30#include <linux/atomic.h> 29#include <linux/atomic.h>
31 30
diff --git a/include/linux/phy_led_triggers.h b/include/linux/phy_led_triggers.h
index a2daea0a37d2..b37b05bfd1a6 100644
--- a/include/linux/phy_led_triggers.h
+++ b/include/linux/phy_led_triggers.h
@@ -18,11 +18,11 @@ struct phy_device;
18#ifdef CONFIG_LED_TRIGGER_PHY 18#ifdef CONFIG_LED_TRIGGER_PHY
19 19
20#include <linux/leds.h> 20#include <linux/leds.h>
21#include <linux/phy.h>
21 22
22#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10 23#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10
23#define PHY_MII_BUS_ID_SIZE (20 - 3)
24 24
25#define PHY_LINK_LED_TRIGGER_NAME_SIZE (PHY_MII_BUS_ID_SIZE + \ 25#define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \
26 FIELD_SIZEOF(struct mdio_device, addr)+\ 26 FIELD_SIZEOF(struct mdio_device, addr)+\
27 PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE) 27 PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE)
28 28
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 321f9ed552a9..01f71e1d2e94 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -444,6 +444,10 @@ bool __rcu_is_watching(void);
444#error "Unknown RCU implementation specified to kernel configuration" 444#error "Unknown RCU implementation specified to kernel configuration"
445#endif 445#endif
446 446
447#define RCU_SCHEDULER_INACTIVE 0
448#define RCU_SCHEDULER_INIT 1
449#define RCU_SCHEDULER_RUNNING 2
450
447/* 451/*
448 * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic 452 * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
449 * initialization and destruction of rcu_head on the stack. rcu_head structures 453 * initialization and destruction of rcu_head on the stack. rcu_head structures
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 0c729c3c8549..d9718378a8be 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -194,8 +194,6 @@ struct platform_freeze_ops {
194}; 194};
195 195
196#ifdef CONFIG_SUSPEND 196#ifdef CONFIG_SUSPEND
197extern suspend_state_t mem_sleep_default;
198
199/** 197/**
200 * suspend_set_ops - set platform dependent suspend operations 198 * suspend_set_ops - set platform dependent suspend operations
201 * @ops: The new suspend operations to set. 199 * @ops: The new suspend operations to set.
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 56436472ccc7..5209b5ed2a64 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -56,7 +56,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
56 56
57static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb, 57static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
58 struct virtio_net_hdr *hdr, 58 struct virtio_net_hdr *hdr,
59 bool little_endian) 59 bool little_endian,
60 bool has_data_valid)
60{ 61{
61 memset(hdr, 0, sizeof(*hdr)); /* no info leak */ 62 memset(hdr, 0, sizeof(*hdr)); /* no info leak */
62 63
@@ -91,6 +92,9 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
91 skb_checksum_start_offset(skb)); 92 skb_checksum_start_offset(skb));
92 hdr->csum_offset = __cpu_to_virtio16(little_endian, 93 hdr->csum_offset = __cpu_to_virtio16(little_endian,
93 skb->csum_offset); 94 skb->csum_offset);
95 } else if (has_data_valid &&
96 skb->ip_summed == CHECKSUM_UNNECESSARY) {
97 hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
94 } /* else everything is zero */ 98 } /* else everything is zero */
95 99
96 return 0; 100 return 0;
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 487e57391664..7afe991e900e 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -871,7 +871,7 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
871 * upper-layer output functions 871 * upper-layer output functions
872 */ 872 */
873int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, 873int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
874 struct ipv6_txoptions *opt, int tclass); 874 __u32 mark, struct ipv6_txoptions *opt, int tclass);
875 875
876int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr); 876int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
877 877
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index d4c1c75b8862..73dd87647460 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -44,6 +44,8 @@ struct lwtunnel_encap_ops {
44 int (*get_encap_size)(struct lwtunnel_state *lwtstate); 44 int (*get_encap_size)(struct lwtunnel_state *lwtstate);
45 int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b); 45 int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
46 int (*xmit)(struct sk_buff *skb); 46 int (*xmit)(struct sk_buff *skb);
47
48 struct module *owner;
47}; 49};
48 50
49#ifdef CONFIG_LWTUNNEL 51#ifdef CONFIG_LWTUNNEL
@@ -105,6 +107,8 @@ int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
105 unsigned int num); 107 unsigned int num);
106int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op, 108int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
107 unsigned int num); 109 unsigned int num);
110int lwtunnel_valid_encap_type(u16 encap_type);
111int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len);
108int lwtunnel_build_state(struct net_device *dev, u16 encap_type, 112int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
109 struct nlattr *encap, 113 struct nlattr *encap,
110 unsigned int family, const void *cfg, 114 unsigned int family, const void *cfg,
@@ -168,6 +172,15 @@ static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
168 return -EOPNOTSUPP; 172 return -EOPNOTSUPP;
169} 173}
170 174
175static inline int lwtunnel_valid_encap_type(u16 encap_type)
176{
177 return -EOPNOTSUPP;
178}
179static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len)
180{
181 return -EOPNOTSUPP;
182}
183
171static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type, 184static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
172 struct nlattr *encap, 185 struct nlattr *encap,
173 unsigned int family, const void *cfg, 186 unsigned int family, const void *cfg,
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 924325c46aab..7dfdb517f0be 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -207,9 +207,9 @@ struct nft_set_iter {
207 unsigned int skip; 207 unsigned int skip;
208 int err; 208 int err;
209 int (*fn)(const struct nft_ctx *ctx, 209 int (*fn)(const struct nft_ctx *ctx,
210 const struct nft_set *set, 210 struct nft_set *set,
211 const struct nft_set_iter *iter, 211 const struct nft_set_iter *iter,
212 const struct nft_set_elem *elem); 212 struct nft_set_elem *elem);
213}; 213};
214 214
215/** 215/**
@@ -301,7 +301,7 @@ struct nft_set_ops {
301 void (*remove)(const struct nft_set *set, 301 void (*remove)(const struct nft_set *set,
302 const struct nft_set_elem *elem); 302 const struct nft_set_elem *elem);
303 void (*walk)(const struct nft_ctx *ctx, 303 void (*walk)(const struct nft_ctx *ctx,
304 const struct nft_set *set, 304 struct nft_set *set,
305 struct nft_set_iter *iter); 305 struct nft_set_iter *iter);
306 306
307 unsigned int (*privsize)(const struct nlattr * const nla[]); 307 unsigned int (*privsize)(const struct nlattr * const nla[]);
diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h
index cbedda077db2..5ceb2205e4e3 100644
--- a/include/net/netfilter/nft_fib.h
+++ b/include/net/netfilter/nft_fib.h
@@ -9,6 +9,12 @@ struct nft_fib {
9 9
10extern const struct nla_policy nft_fib_policy[]; 10extern const struct nla_policy nft_fib_policy[];
11 11
12static inline bool
13nft_fib_is_loopback(const struct sk_buff *skb, const struct net_device *in)
14{
15 return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
16}
17
12int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr); 18int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr);
13int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr, 19int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
14 const struct nlattr * const tb[]); 20 const struct nlattr * const tb[]);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 958a24d8fae7..b567e4452a47 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -352,6 +352,20 @@ static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
352 } 352 }
353} 353}
354 354
355static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
356{
357 if (mtu >= 4096)
358 return IB_MTU_4096;
359 else if (mtu >= 2048)
360 return IB_MTU_2048;
361 else if (mtu >= 1024)
362 return IB_MTU_1024;
363 else if (mtu >= 512)
364 return IB_MTU_512;
365 else
366 return IB_MTU_256;
367}
368
355enum ib_port_state { 369enum ib_port_state {
356 IB_PORT_NOP = 0, 370 IB_PORT_NOP = 0,
357 IB_PORT_DOWN = 1, 371 IB_PORT_DOWN = 1,
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 96dd0b3f70d7..da5033dd8cbc 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -809,11 +809,11 @@ static inline void fc_set_wwnn(struct fc_lport *lport, u64 wwnn)
809/** 809/**
810 * fc_set_wwpn() - Set the World Wide Port Name of a local port 810 * fc_set_wwpn() - Set the World Wide Port Name of a local port
811 * @lport: The local port whose WWPN is to be set 811 * @lport: The local port whose WWPN is to be set
812 * @wwnn: The new WWPN 812 * @wwpn: The new WWPN
813 */ 813 */
814static inline void fc_set_wwpn(struct fc_lport *lport, u64 wwnn) 814static inline void fc_set_wwpn(struct fc_lport *lport, u64 wwpn)
815{ 815{
816 lport->wwpn = wwnn; 816 lport->wwpn = wwpn;
817} 817}
818 818
819/** 819/**
diff --git a/include/uapi/linux/cec-funcs.h b/include/uapi/linux/cec-funcs.h
index 3cbc327801d6..c451eec42a83 100644
--- a/include/uapi/linux/cec-funcs.h
+++ b/include/uapi/linux/cec-funcs.h
@@ -1665,14 +1665,15 @@ static inline void cec_msg_report_current_latency(struct cec_msg *msg,
1665 __u8 audio_out_compensated, 1665 __u8 audio_out_compensated,
1666 __u8 audio_out_delay) 1666 __u8 audio_out_delay)
1667{ 1667{
1668 msg->len = 7; 1668 msg->len = 6;
1669 msg->msg[0] |= 0xf; /* broadcast */ 1669 msg->msg[0] |= 0xf; /* broadcast */
1670 msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY; 1670 msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY;
1671 msg->msg[2] = phys_addr >> 8; 1671 msg->msg[2] = phys_addr >> 8;
1672 msg->msg[3] = phys_addr & 0xff; 1672 msg->msg[3] = phys_addr & 0xff;
1673 msg->msg[4] = video_latency; 1673 msg->msg[4] = video_latency;
1674 msg->msg[5] = (low_latency_mode << 2) | audio_out_compensated; 1674 msg->msg[5] = (low_latency_mode << 2) | audio_out_compensated;
1675 msg->msg[6] = audio_out_delay; 1675 if (audio_out_compensated == 3)
1676 msg->msg[msg->len++] = audio_out_delay;
1676} 1677}
1677 1678
1678static inline void cec_ops_report_current_latency(const struct cec_msg *msg, 1679static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
@@ -1686,7 +1687,10 @@ static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
1686 *video_latency = msg->msg[4]; 1687 *video_latency = msg->msg[4];
1687 *low_latency_mode = (msg->msg[5] >> 2) & 1; 1688 *low_latency_mode = (msg->msg[5] >> 2) & 1;
1688 *audio_out_compensated = msg->msg[5] & 3; 1689 *audio_out_compensated = msg->msg[5] & 3;
1689 *audio_out_delay = msg->msg[6]; 1690 if (*audio_out_compensated == 3 && msg->len >= 7)
1691 *audio_out_delay = msg->msg[6];
1692 else
1693 *audio_out_delay = 0;
1690} 1694}
1691 1695
1692static inline void cec_msg_request_current_latency(struct cec_msg *msg, 1696static inline void cec_msg_request_current_latency(struct cec_msg *msg,
diff --git a/include/uapi/linux/netfilter/nf_log.h b/include/uapi/linux/netfilter/nf_log.h
index 8be21e02387d..d0b5fa91ff54 100644
--- a/include/uapi/linux/netfilter/nf_log.h
+++ b/include/uapi/linux/netfilter/nf_log.h
@@ -9,4 +9,6 @@
9#define NF_LOG_MACDECODE 0x20 /* Decode MAC header */ 9#define NF_LOG_MACDECODE 0x20 /* Decode MAC header */
10#define NF_LOG_MASK 0x2f 10#define NF_LOG_MASK 0x2f
11 11
12#define NF_LOG_PREFIXLEN 128
13
12#endif /* _NETFILTER_NF_LOG_H */ 14#endif /* _NETFILTER_NF_LOG_H */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 881d49e94569..e3f27e09eb2b 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -235,7 +235,7 @@ enum nft_rule_compat_flags {
235/** 235/**
236 * enum nft_rule_compat_attributes - nf_tables rule compat attributes 236 * enum nft_rule_compat_attributes - nf_tables rule compat attributes
237 * 237 *
238 * @NFTA_RULE_COMPAT_PROTO: numerice value of handled protocol (NLA_U32) 238 * @NFTA_RULE_COMPAT_PROTO: numeric value of handled protocol (NLA_U32)
239 * @NFTA_RULE_COMPAT_FLAGS: bitmask of enum nft_rule_compat_flags (NLA_U32) 239 * @NFTA_RULE_COMPAT_FLAGS: bitmask of enum nft_rule_compat_flags (NLA_U32)
240 */ 240 */
241enum nft_rule_compat_attributes { 241enum nft_rule_compat_attributes {
@@ -499,7 +499,7 @@ enum nft_bitwise_attributes {
499 * enum nft_byteorder_ops - nf_tables byteorder operators 499 * enum nft_byteorder_ops - nf_tables byteorder operators
500 * 500 *
501 * @NFT_BYTEORDER_NTOH: network to host operator 501 * @NFT_BYTEORDER_NTOH: network to host operator
502 * @NFT_BYTEORDER_HTON: host to network opertaor 502 * @NFT_BYTEORDER_HTON: host to network operator
503 */ 503 */
504enum nft_byteorder_ops { 504enum nft_byteorder_ops {
505 NFT_BYTEORDER_NTOH, 505 NFT_BYTEORDER_NTOH,
diff --git a/include/uapi/rdma/Kbuild b/include/uapi/rdma/Kbuild
index 82bdf5626859..bb68cb1b04ed 100644
--- a/include/uapi/rdma/Kbuild
+++ b/include/uapi/rdma/Kbuild
@@ -16,3 +16,4 @@ header-y += nes-abi.h
16header-y += ocrdma-abi.h 16header-y += ocrdma-abi.h
17header-y += hns-abi.h 17header-y += hns-abi.h
18header-y += vmw_pvrdma-abi.h 18header-y += vmw_pvrdma-abi.h
19header-y += qedr-abi.h
diff --git a/include/uapi/rdma/cxgb3-abi.h b/include/uapi/rdma/cxgb3-abi.h
index 48a19bda071b..d24eee12128f 100644
--- a/include/uapi/rdma/cxgb3-abi.h
+++ b/include/uapi/rdma/cxgb3-abi.h
@@ -30,7 +30,7 @@
30 * SOFTWARE. 30 * SOFTWARE.
31 */ 31 */
32#ifndef CXGB3_ABI_USER_H 32#ifndef CXGB3_ABI_USER_H
33#define CXBG3_ABI_USER_H 33#define CXGB3_ABI_USER_H
34 34
35#include <linux/types.h> 35#include <linux/types.h>
36 36
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 229a5d5df977..3d55d95dcf49 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -11,7 +11,6 @@
11 */ 11 */
12#include <linux/bpf.h> 12#include <linux/bpf.h>
13#include <linux/err.h> 13#include <linux/err.h>
14#include <linux/vmalloc.h>
15#include <linux/slab.h> 14#include <linux/slab.h>
16#include <linux/mm.h> 15#include <linux/mm.h>
17#include <linux/filter.h> 16#include <linux/filter.h>
@@ -74,14 +73,10 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
74 if (array_size >= U32_MAX - PAGE_SIZE) 73 if (array_size >= U32_MAX - PAGE_SIZE)
75 return ERR_PTR(-ENOMEM); 74 return ERR_PTR(-ENOMEM);
76 75
77
78 /* allocate all map elements and zero-initialize them */ 76 /* allocate all map elements and zero-initialize them */
79 array = kzalloc(array_size, GFP_USER | __GFP_NOWARN); 77 array = bpf_map_area_alloc(array_size);
80 if (!array) { 78 if (!array)
81 array = vzalloc(array_size); 79 return ERR_PTR(-ENOMEM);
82 if (!array)
83 return ERR_PTR(-ENOMEM);
84 }
85 80
86 /* copy mandatory map attributes */ 81 /* copy mandatory map attributes */
87 array->map.map_type = attr->map_type; 82 array->map.map_type = attr->map_type;
@@ -97,7 +92,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
97 92
98 if (array_size >= U32_MAX - PAGE_SIZE || 93 if (array_size >= U32_MAX - PAGE_SIZE ||
99 elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) { 94 elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
100 kvfree(array); 95 bpf_map_area_free(array);
101 return ERR_PTR(-ENOMEM); 96 return ERR_PTR(-ENOMEM);
102 } 97 }
103out: 98out:
@@ -262,7 +257,7 @@ static void array_map_free(struct bpf_map *map)
262 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 257 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
263 bpf_array_free_percpu(array); 258 bpf_array_free_percpu(array);
264 259
265 kvfree(array); 260 bpf_map_area_free(array);
266} 261}
267 262
268static const struct bpf_map_ops array_ops = { 263static const struct bpf_map_ops array_ops = {
@@ -319,7 +314,8 @@ static void fd_array_map_free(struct bpf_map *map)
319 /* make sure it's empty */ 314 /* make sure it's empty */
320 for (i = 0; i < array->map.max_entries; i++) 315 for (i = 0; i < array->map.max_entries; i++)
321 BUG_ON(array->ptrs[i] != NULL); 316 BUG_ON(array->ptrs[i] != NULL);
322 kvfree(array); 317
318 bpf_map_area_free(array);
323} 319}
324 320
325static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) 321static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 3f2bb58952d8..a753bbe7df0a 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -13,7 +13,6 @@
13#include <linux/bpf.h> 13#include <linux/bpf.h>
14#include <linux/jhash.h> 14#include <linux/jhash.h>
15#include <linux/filter.h> 15#include <linux/filter.h>
16#include <linux/vmalloc.h>
17#include "percpu_freelist.h" 16#include "percpu_freelist.h"
18#include "bpf_lru_list.h" 17#include "bpf_lru_list.h"
19 18
@@ -103,7 +102,7 @@ static void htab_free_elems(struct bpf_htab *htab)
103 free_percpu(pptr); 102 free_percpu(pptr);
104 } 103 }
105free_elems: 104free_elems:
106 vfree(htab->elems); 105 bpf_map_area_free(htab->elems);
107} 106}
108 107
109static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, 108static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
@@ -125,7 +124,8 @@ static int prealloc_init(struct bpf_htab *htab)
125{ 124{
126 int err = -ENOMEM, i; 125 int err = -ENOMEM, i;
127 126
128 htab->elems = vzalloc(htab->elem_size * htab->map.max_entries); 127 htab->elems = bpf_map_area_alloc(htab->elem_size *
128 htab->map.max_entries);
129 if (!htab->elems) 129 if (!htab->elems)
130 return -ENOMEM; 130 return -ENOMEM;
131 131
@@ -320,14 +320,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
320 goto free_htab; 320 goto free_htab;
321 321
322 err = -ENOMEM; 322 err = -ENOMEM;
323 htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket), 323 htab->buckets = bpf_map_area_alloc(htab->n_buckets *
324 GFP_USER | __GFP_NOWARN); 324 sizeof(struct bucket));
325 325 if (!htab->buckets)
326 if (!htab->buckets) { 326 goto free_htab;
327 htab->buckets = vmalloc(htab->n_buckets * sizeof(struct bucket));
328 if (!htab->buckets)
329 goto free_htab;
330 }
331 327
332 for (i = 0; i < htab->n_buckets; i++) { 328 for (i = 0; i < htab->n_buckets; i++) {
333 INIT_HLIST_HEAD(&htab->buckets[i].head); 329 INIT_HLIST_HEAD(&htab->buckets[i].head);
@@ -354,7 +350,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
354free_extra_elems: 350free_extra_elems:
355 free_percpu(htab->extra_elems); 351 free_percpu(htab->extra_elems);
356free_buckets: 352free_buckets:
357 kvfree(htab->buckets); 353 bpf_map_area_free(htab->buckets);
358free_htab: 354free_htab:
359 kfree(htab); 355 kfree(htab);
360 return ERR_PTR(err); 356 return ERR_PTR(err);
@@ -1014,7 +1010,7 @@ static void htab_map_free(struct bpf_map *map)
1014 prealloc_destroy(htab); 1010 prealloc_destroy(htab);
1015 1011
1016 free_percpu(htab->extra_elems); 1012 free_percpu(htab->extra_elems);
1017 kvfree(htab->buckets); 1013 bpf_map_area_free(htab->buckets);
1018 kfree(htab); 1014 kfree(htab);
1019} 1015}
1020 1016
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 732ae16d12b7..be8519148c25 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -7,7 +7,6 @@
7#include <linux/bpf.h> 7#include <linux/bpf.h>
8#include <linux/jhash.h> 8#include <linux/jhash.h>
9#include <linux/filter.h> 9#include <linux/filter.h>
10#include <linux/vmalloc.h>
11#include <linux/stacktrace.h> 10#include <linux/stacktrace.h>
12#include <linux/perf_event.h> 11#include <linux/perf_event.h>
13#include "percpu_freelist.h" 12#include "percpu_freelist.h"
@@ -32,7 +31,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
32 u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size; 31 u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
33 int err; 32 int err;
34 33
35 smap->elems = vzalloc(elem_size * smap->map.max_entries); 34 smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries);
36 if (!smap->elems) 35 if (!smap->elems)
37 return -ENOMEM; 36 return -ENOMEM;
38 37
@@ -45,7 +44,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
45 return 0; 44 return 0;
46 45
47free_elems: 46free_elems:
48 vfree(smap->elems); 47 bpf_map_area_free(smap->elems);
49 return err; 48 return err;
50} 49}
51 50
@@ -76,12 +75,9 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
76 if (cost >= U32_MAX - PAGE_SIZE) 75 if (cost >= U32_MAX - PAGE_SIZE)
77 return ERR_PTR(-E2BIG); 76 return ERR_PTR(-E2BIG);
78 77
79 smap = kzalloc(cost, GFP_USER | __GFP_NOWARN); 78 smap = bpf_map_area_alloc(cost);
80 if (!smap) { 79 if (!smap)
81 smap = vzalloc(cost); 80 return ERR_PTR(-ENOMEM);
82 if (!smap)
83 return ERR_PTR(-ENOMEM);
84 }
85 81
86 err = -E2BIG; 82 err = -E2BIG;
87 cost += n_buckets * (value_size + sizeof(struct stack_map_bucket)); 83 cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
@@ -112,7 +108,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
112put_buffers: 108put_buffers:
113 put_callchain_buffers(); 109 put_callchain_buffers();
114free_smap: 110free_smap:
115 kvfree(smap); 111 bpf_map_area_free(smap);
116 return ERR_PTR(err); 112 return ERR_PTR(err);
117} 113}
118 114
@@ -262,9 +258,9 @@ static void stack_map_free(struct bpf_map *map)
262 /* wait for bpf programs to complete before freeing stack map */ 258 /* wait for bpf programs to complete before freeing stack map */
263 synchronize_rcu(); 259 synchronize_rcu();
264 260
265 vfree(smap->elems); 261 bpf_map_area_free(smap->elems);
266 pcpu_freelist_destroy(&smap->freelist); 262 pcpu_freelist_destroy(&smap->freelist);
267 kvfree(smap); 263 bpf_map_area_free(smap);
268 put_callchain_buffers(); 264 put_callchain_buffers();
269} 265}
270 266
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 05ad086ab71d..08a4d287226b 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -13,6 +13,8 @@
13#include <linux/bpf_trace.h> 13#include <linux/bpf_trace.h>
14#include <linux/syscalls.h> 14#include <linux/syscalls.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/vmalloc.h>
17#include <linux/mmzone.h>
16#include <linux/anon_inodes.h> 18#include <linux/anon_inodes.h>
17#include <linux/file.h> 19#include <linux/file.h>
18#include <linux/license.h> 20#include <linux/license.h>
@@ -50,6 +52,30 @@ void bpf_register_map_type(struct bpf_map_type_list *tl)
50 list_add(&tl->list_node, &bpf_map_types); 52 list_add(&tl->list_node, &bpf_map_types);
51} 53}
52 54
55void *bpf_map_area_alloc(size_t size)
56{
57 /* We definitely need __GFP_NORETRY, so OOM killer doesn't
58 * trigger under memory pressure as we really just want to
59 * fail instead.
60 */
61 const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
62 void *area;
63
64 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
65 area = kmalloc(size, GFP_USER | flags);
66 if (area != NULL)
67 return area;
68 }
69
70 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
71 PAGE_KERNEL);
72}
73
74void bpf_map_area_free(void *area)
75{
76 kvfree(area);
77}
78
53int bpf_map_precharge_memlock(u32 pages) 79int bpf_map_precharge_memlock(u32 pages)
54{ 80{
55 struct user_struct *user = get_current_user(); 81 struct user_struct *user = get_current_user();
diff --git a/kernel/cpu.c b/kernel/cpu.c
index f75c4d031eeb..0a5f630f5c54 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -764,7 +764,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
764{ 764{
765 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 765 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
766 int prev_state, ret = 0; 766 int prev_state, ret = 0;
767 bool hasdied = false;
768 767
769 if (num_online_cpus() == 1) 768 if (num_online_cpus() == 1)
770 return -EBUSY; 769 return -EBUSY;
@@ -809,7 +808,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
809 cpuhp_kick_ap_work(cpu); 808 cpuhp_kick_ap_work(cpu);
810 } 809 }
811 810
812 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
813out: 811out:
814 cpu_hotplug_done(); 812 cpu_hotplug_done();
815 return ret; 813 return ret;
@@ -1302,10 +1300,24 @@ static int cpuhp_cb_check(enum cpuhp_state state)
1302 */ 1300 */
1303static int cpuhp_reserve_state(enum cpuhp_state state) 1301static int cpuhp_reserve_state(enum cpuhp_state state)
1304{ 1302{
1305 enum cpuhp_state i; 1303 enum cpuhp_state i, end;
1304 struct cpuhp_step *step;
1306 1305
1307 for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) { 1306 switch (state) {
1308 if (!cpuhp_ap_states[i].name) 1307 case CPUHP_AP_ONLINE_DYN:
1308 step = cpuhp_ap_states + CPUHP_AP_ONLINE_DYN;
1309 end = CPUHP_AP_ONLINE_DYN_END;
1310 break;
1311 case CPUHP_BP_PREPARE_DYN:
1312 step = cpuhp_bp_states + CPUHP_BP_PREPARE_DYN;
1313 end = CPUHP_BP_PREPARE_DYN_END;
1314 break;
1315 default:
1316 return -EINVAL;
1317 }
1318
1319 for (i = state; i <= end; i++, step++) {
1320 if (!step->name)
1309 return i; 1321 return i;
1310 } 1322 }
1311 WARN(1, "No more dynamic states available for CPU hotplug\n"); 1323 WARN(1, "No more dynamic states available for CPU hotplug\n");
@@ -1323,7 +1335,7 @@ static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1323 1335
1324 mutex_lock(&cpuhp_state_mutex); 1336 mutex_lock(&cpuhp_state_mutex);
1325 1337
1326 if (state == CPUHP_AP_ONLINE_DYN) { 1338 if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) {
1327 ret = cpuhp_reserve_state(state); 1339 ret = cpuhp_reserve_state(state);
1328 if (ret < 0) 1340 if (ret < 0)
1329 goto out; 1341 goto out;
diff --git a/kernel/module.c b/kernel/module.c
index 5088784c0cf9..38d4270925d4 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1145,7 +1145,7 @@ static size_t module_flags_taint(struct module *mod, char *buf)
1145 1145
1146 for (i = 0; i < TAINT_FLAGS_COUNT; i++) { 1146 for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
1147 if (taint_flags[i].module && test_bit(i, &mod->taints)) 1147 if (taint_flags[i].module && test_bit(i, &mod->taints))
1148 buf[l++] = taint_flags[i].true; 1148 buf[l++] = taint_flags[i].c_true;
1149 } 1149 }
1150 1150
1151 return l; 1151 return l;
diff --git a/kernel/panic.c b/kernel/panic.c
index c51edaa04fce..08aa88dde7de 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -249,7 +249,7 @@ void panic(const char *fmt, ...)
249 * Delay timeout seconds before rebooting the machine. 249 * Delay timeout seconds before rebooting the machine.
250 * We can't use the "normal" timers since we just panicked. 250 * We can't use the "normal" timers since we just panicked.
251 */ 251 */
252 pr_emerg("Rebooting in %d seconds..", panic_timeout); 252 pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
253 253
254 for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { 254 for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
255 touch_nmi_watchdog(); 255 touch_nmi_watchdog();
@@ -355,7 +355,7 @@ const char *print_tainted(void)
355 for (i = 0; i < TAINT_FLAGS_COUNT; i++) { 355 for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
356 const struct taint_flag *t = &taint_flags[i]; 356 const struct taint_flag *t = &taint_flags[i];
357 *s++ = test_bit(i, &tainted_mask) ? 357 *s++ = test_bit(i, &tainted_mask) ?
358 t->true : t->false; 358 t->c_true : t->c_false;
359 } 359 }
360 *s = 0; 360 *s = 0;
361 } else 361 } else
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index f67ceb7768b8..15e6baef5c73 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -46,7 +46,7 @@ static const char * const mem_sleep_labels[] = {
46const char *mem_sleep_states[PM_SUSPEND_MAX]; 46const char *mem_sleep_states[PM_SUSPEND_MAX];
47 47
48suspend_state_t mem_sleep_current = PM_SUSPEND_FREEZE; 48suspend_state_t mem_sleep_current = PM_SUSPEND_FREEZE;
49suspend_state_t mem_sleep_default = PM_SUSPEND_MAX; 49static suspend_state_t mem_sleep_default = PM_SUSPEND_MEM;
50 50
51unsigned int pm_suspend_global_flags; 51unsigned int pm_suspend_global_flags;
52EXPORT_SYMBOL_GPL(pm_suspend_global_flags); 52EXPORT_SYMBOL_GPL(pm_suspend_global_flags);
@@ -168,7 +168,7 @@ void suspend_set_ops(const struct platform_suspend_ops *ops)
168 } 168 }
169 if (valid_state(PM_SUSPEND_MEM)) { 169 if (valid_state(PM_SUSPEND_MEM)) {
170 mem_sleep_states[PM_SUSPEND_MEM] = mem_sleep_labels[PM_SUSPEND_MEM]; 170 mem_sleep_states[PM_SUSPEND_MEM] = mem_sleep_labels[PM_SUSPEND_MEM];
171 if (mem_sleep_default >= PM_SUSPEND_MEM) 171 if (mem_sleep_default == PM_SUSPEND_MEM)
172 mem_sleep_current = PM_SUSPEND_MEM; 172 mem_sleep_current = PM_SUSPEND_MEM;
173 } 173 }
174 174
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 80adef7d4c3d..0d6ff3e471be 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -136,6 +136,7 @@ int rcu_jiffies_till_stall_check(void);
136#define TPS(x) tracepoint_string(x) 136#define TPS(x) tracepoint_string(x)
137 137
138void rcu_early_boot_tests(void); 138void rcu_early_boot_tests(void);
139void rcu_test_sync_prims(void);
139 140
140/* 141/*
141 * This function really isn't for public consumption, but RCU is special in 142 * This function really isn't for public consumption, but RCU is special in
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 1898559e6b60..b23a4d076f3d 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -185,9 +185,6 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
185 * benefits of doing might_sleep() to reduce latency.) 185 * benefits of doing might_sleep() to reduce latency.)
186 * 186 *
187 * Cool, huh? (Due to Josh Triplett.) 187 * Cool, huh? (Due to Josh Triplett.)
188 *
189 * But we want to make this a static inline later. The cond_resched()
190 * currently makes this problematic.
191 */ 188 */
192void synchronize_sched(void) 189void synchronize_sched(void)
193{ 190{
@@ -195,7 +192,6 @@ void synchronize_sched(void)
195 lock_is_held(&rcu_lock_map) || 192 lock_is_held(&rcu_lock_map) ||
196 lock_is_held(&rcu_sched_lock_map), 193 lock_is_held(&rcu_sched_lock_map),
197 "Illegal synchronize_sched() in RCU read-side critical section"); 194 "Illegal synchronize_sched() in RCU read-side critical section");
198 cond_resched();
199} 195}
200EXPORT_SYMBOL_GPL(synchronize_sched); 196EXPORT_SYMBOL_GPL(synchronize_sched);
201 197
diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
index 196f0302e2f4..c64b827ecbca 100644
--- a/kernel/rcu/tiny_plugin.h
+++ b/kernel/rcu/tiny_plugin.h
@@ -60,12 +60,17 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
60 60
61/* 61/*
62 * During boot, we forgive RCU lockdep issues. After this function is 62 * During boot, we forgive RCU lockdep issues. After this function is
63 * invoked, we start taking RCU lockdep issues seriously. 63 * invoked, we start taking RCU lockdep issues seriously. Note that unlike
64 * Tree RCU, Tiny RCU transitions directly from RCU_SCHEDULER_INACTIVE
65 * to RCU_SCHEDULER_RUNNING, skipping the RCU_SCHEDULER_INIT stage.
66 * The reason for this is that Tiny RCU does not need kthreads, so does
67 * not have to care about the fact that the scheduler is half-initialized
68 * at a certain phase of the boot process.
64 */ 69 */
65void __init rcu_scheduler_starting(void) 70void __init rcu_scheduler_starting(void)
66{ 71{
67 WARN_ON(nr_context_switches() > 0); 72 WARN_ON(nr_context_switches() > 0);
68 rcu_scheduler_active = 1; 73 rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
69} 74}
70 75
71#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 76#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 96c52e43f7ca..cb4e2056ccf3 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -127,13 +127,16 @@ int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
127int sysctl_panic_on_rcu_stall __read_mostly; 127int sysctl_panic_on_rcu_stall __read_mostly;
128 128
129/* 129/*
130 * The rcu_scheduler_active variable transitions from zero to one just 130 * The rcu_scheduler_active variable is initialized to the value
131 * before the first task is spawned. So when this variable is zero, RCU 131 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
132 * can assume that there is but one task, allowing RCU to (for example) 132 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE,
133 * RCU can assume that there is but one task, allowing RCU to (for example)
133 * optimize synchronize_rcu() to a simple barrier(). When this variable 134 * optimize synchronize_rcu() to a simple barrier(). When this variable
134 * is one, RCU must actually do all the hard work required to detect real 135 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
135 * grace periods. This variable is also used to suppress boot-time false 136 * to detect real grace periods. This variable is also used to suppress
136 * positives from lockdep-RCU error checking. 137 * boot-time false positives from lockdep-RCU error checking. Finally, it
138 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
139 * is fully initialized, including all of its kthreads having been spawned.
137 */ 140 */
138int rcu_scheduler_active __read_mostly; 141int rcu_scheduler_active __read_mostly;
139EXPORT_SYMBOL_GPL(rcu_scheduler_active); 142EXPORT_SYMBOL_GPL(rcu_scheduler_active);
@@ -3980,18 +3983,22 @@ static int __init rcu_spawn_gp_kthread(void)
3980early_initcall(rcu_spawn_gp_kthread); 3983early_initcall(rcu_spawn_gp_kthread);
3981 3984
3982/* 3985/*
3983 * This function is invoked towards the end of the scheduler's initialization 3986 * This function is invoked towards the end of the scheduler's
3984 * process. Before this is called, the idle task might contain 3987 * initialization process. Before this is called, the idle task might
3985 * RCU read-side critical sections (during which time, this idle 3988 * contain synchronous grace-period primitives (during which time, this idle
3986 * task is booting the system). After this function is called, the 3989 * task is booting the system, and such primitives are no-ops). After this
3987 * idle tasks are prohibited from containing RCU read-side critical 3990 * function is called, any synchronous grace-period primitives are run as
3988 * sections. This function also enables RCU lockdep checking. 3991 * expedited, with the requesting task driving the grace period forward.
3992 * A later core_initcall() rcu_exp_runtime_mode() will switch to full
3993 * runtime RCU functionality.
3989 */ 3994 */
3990void rcu_scheduler_starting(void) 3995void rcu_scheduler_starting(void)
3991{ 3996{
3992 WARN_ON(num_online_cpus() != 1); 3997 WARN_ON(num_online_cpus() != 1);
3993 WARN_ON(nr_context_switches() > 0); 3998 WARN_ON(nr_context_switches() > 0);
3994 rcu_scheduler_active = 1; 3999 rcu_test_sync_prims();
4000 rcu_scheduler_active = RCU_SCHEDULER_INIT;
4001 rcu_test_sync_prims();
3995} 4002}
3996 4003
3997/* 4004/*
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index d3053e99fdb6..e59e1849b89a 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -532,18 +532,28 @@ struct rcu_exp_work {
532}; 532};
533 533
534/* 534/*
535 * Common code to drive an expedited grace period forward, used by
536 * workqueues and mid-boot-time tasks.
537 */
538static void rcu_exp_sel_wait_wake(struct rcu_state *rsp,
539 smp_call_func_t func, unsigned long s)
540{
541 /* Initialize the rcu_node tree in preparation for the wait. */
542 sync_rcu_exp_select_cpus(rsp, func);
543
544 /* Wait and clean up, including waking everyone. */
545 rcu_exp_wait_wake(rsp, s);
546}
547
548/*
535 * Work-queue handler to drive an expedited grace period forward. 549 * Work-queue handler to drive an expedited grace period forward.
536 */ 550 */
537static void wait_rcu_exp_gp(struct work_struct *wp) 551static void wait_rcu_exp_gp(struct work_struct *wp)
538{ 552{
539 struct rcu_exp_work *rewp; 553 struct rcu_exp_work *rewp;
540 554
541 /* Initialize the rcu_node tree in preparation for the wait. */
542 rewp = container_of(wp, struct rcu_exp_work, rew_work); 555 rewp = container_of(wp, struct rcu_exp_work, rew_work);
543 sync_rcu_exp_select_cpus(rewp->rew_rsp, rewp->rew_func); 556 rcu_exp_sel_wait_wake(rewp->rew_rsp, rewp->rew_func, rewp->rew_s);
544
545 /* Wait and clean up, including waking everyone. */
546 rcu_exp_wait_wake(rewp->rew_rsp, rewp->rew_s);
547} 557}
548 558
549/* 559/*
@@ -569,12 +579,18 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp,
569 if (exp_funnel_lock(rsp, s)) 579 if (exp_funnel_lock(rsp, s))
570 return; /* Someone else did our work for us. */ 580 return; /* Someone else did our work for us. */
571 581
572 /* Marshall arguments and schedule the expedited grace period. */ 582 /* Ensure that load happens before action based on it. */
573 rew.rew_func = func; 583 if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
574 rew.rew_rsp = rsp; 584 /* Direct call during scheduler init and early_initcalls(). */
575 rew.rew_s = s; 585 rcu_exp_sel_wait_wake(rsp, func, s);
576 INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp); 586 } else {
577 schedule_work(&rew.rew_work); 587 /* Marshall arguments & schedule the expedited grace period. */
588 rew.rew_func = func;
589 rew.rew_rsp = rsp;
590 rew.rew_s = s;
591 INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
592 schedule_work(&rew.rew_work);
593 }
578 594
579 /* Wait for expedited grace period to complete. */ 595 /* Wait for expedited grace period to complete. */
580 rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id()); 596 rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
@@ -676,6 +692,8 @@ void synchronize_rcu_expedited(void)
676{ 692{
677 struct rcu_state *rsp = rcu_state_p; 693 struct rcu_state *rsp = rcu_state_p;
678 694
695 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
696 return;
679 _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler); 697 _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler);
680} 698}
681EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 699EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
@@ -693,3 +711,15 @@ void synchronize_rcu_expedited(void)
693EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 711EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
694 712
695#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 713#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
714
715/*
716 * Switch to run-time mode once Tree RCU has fully initialized.
717 */
718static int __init rcu_exp_runtime_mode(void)
719{
720 rcu_test_sync_prims();
721 rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
722 rcu_test_sync_prims();
723 return 0;
724}
725core_initcall(rcu_exp_runtime_mode);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 85c5a883c6e3..56583e764ebf 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -670,7 +670,7 @@ void synchronize_rcu(void)
670 lock_is_held(&rcu_lock_map) || 670 lock_is_held(&rcu_lock_map) ||
671 lock_is_held(&rcu_sched_lock_map), 671 lock_is_held(&rcu_sched_lock_map),
672 "Illegal synchronize_rcu() in RCU read-side critical section"); 672 "Illegal synchronize_rcu() in RCU read-side critical section");
673 if (!rcu_scheduler_active) 673 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
674 return; 674 return;
675 if (rcu_gp_is_expedited()) 675 if (rcu_gp_is_expedited())
676 synchronize_rcu_expedited(); 676 synchronize_rcu_expedited();
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index f19271dce0a9..4f6db7e6a117 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -121,11 +121,14 @@ EXPORT_SYMBOL(rcu_read_lock_sched_held);
121 * Should expedited grace-period primitives always fall back to their 121 * Should expedited grace-period primitives always fall back to their
122 * non-expedited counterparts? Intended for use within RCU. Note 122 * non-expedited counterparts? Intended for use within RCU. Note
123 * that if the user specifies both rcu_expedited and rcu_normal, then 123 * that if the user specifies both rcu_expedited and rcu_normal, then
124 * rcu_normal wins. 124 * rcu_normal wins. (Except during the time period during boot from
125 * when the first task is spawned until the rcu_exp_runtime_mode()
126 * core_initcall() is invoked, at which point everything is expedited.)
125 */ 127 */
126bool rcu_gp_is_normal(void) 128bool rcu_gp_is_normal(void)
127{ 129{
128 return READ_ONCE(rcu_normal); 130 return READ_ONCE(rcu_normal) &&
131 rcu_scheduler_active != RCU_SCHEDULER_INIT;
129} 132}
130EXPORT_SYMBOL_GPL(rcu_gp_is_normal); 133EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
131 134
@@ -135,13 +138,14 @@ static atomic_t rcu_expedited_nesting =
135/* 138/*
136 * Should normal grace-period primitives be expedited? Intended for 139 * Should normal grace-period primitives be expedited? Intended for
137 * use within RCU. Note that this function takes the rcu_expedited 140 * use within RCU. Note that this function takes the rcu_expedited
138 * sysfs/boot variable into account as well as the rcu_expedite_gp() 141 * sysfs/boot variable and rcu_scheduler_active into account as well
139 * nesting. So looping on rcu_unexpedite_gp() until rcu_gp_is_expedited() 142 * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp()
140 * returns false is a -really- bad idea. 143 * until rcu_gp_is_expedited() returns false is a -really- bad idea.
141 */ 144 */
142bool rcu_gp_is_expedited(void) 145bool rcu_gp_is_expedited(void)
143{ 146{
144 return rcu_expedited || atomic_read(&rcu_expedited_nesting); 147 return rcu_expedited || atomic_read(&rcu_expedited_nesting) ||
148 rcu_scheduler_active == RCU_SCHEDULER_INIT;
145} 149}
146EXPORT_SYMBOL_GPL(rcu_gp_is_expedited); 150EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
147 151
@@ -257,7 +261,7 @@ EXPORT_SYMBOL_GPL(rcu_callback_map);
257 261
258int notrace debug_lockdep_rcu_enabled(void) 262int notrace debug_lockdep_rcu_enabled(void)
259{ 263{
260 return rcu_scheduler_active && debug_locks && 264 return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
261 current->lockdep_recursion == 0; 265 current->lockdep_recursion == 0;
262} 266}
263EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); 267EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
@@ -591,7 +595,7 @@ EXPORT_SYMBOL_GPL(call_rcu_tasks);
591void synchronize_rcu_tasks(void) 595void synchronize_rcu_tasks(void)
592{ 596{
593 /* Complain if the scheduler has not started. */ 597 /* Complain if the scheduler has not started. */
594 RCU_LOCKDEP_WARN(!rcu_scheduler_active, 598 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
595 "synchronize_rcu_tasks called too soon"); 599 "synchronize_rcu_tasks called too soon");
596 600
597 /* Wait for the grace period. */ 601 /* Wait for the grace period. */
@@ -813,6 +817,23 @@ static void rcu_spawn_tasks_kthread(void)
813 817
814#endif /* #ifdef CONFIG_TASKS_RCU */ 818#endif /* #ifdef CONFIG_TASKS_RCU */
815 819
820/*
821 * Test each non-SRCU synchronous grace-period wait API. This is
822 * useful just after a change in mode for these primitives, and
823 * during early boot.
824 */
825void rcu_test_sync_prims(void)
826{
827 if (!IS_ENABLED(CONFIG_PROVE_RCU))
828 return;
829 synchronize_rcu();
830 synchronize_rcu_bh();
831 synchronize_sched();
832 synchronize_rcu_expedited();
833 synchronize_rcu_bh_expedited();
834 synchronize_sched_expedited();
835}
836
816#ifdef CONFIG_PROVE_RCU 837#ifdef CONFIG_PROVE_RCU
817 838
818/* 839/*
@@ -865,6 +886,7 @@ void rcu_early_boot_tests(void)
865 early_boot_test_call_rcu_bh(); 886 early_boot_test_call_rcu_bh();
866 if (rcu_self_test_sched) 887 if (rcu_self_test_sched)
867 early_boot_test_call_rcu_sched(); 888 early_boot_test_call_rcu_sched();
889 rcu_test_sync_prims();
868} 890}
869 891
870static int rcu_verify_early_boot_tests(void) 892static int rcu_verify_early_boot_tests(void)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 8dbaec0e4f7f..1aea594a54db 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2475,6 +2475,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
2475 break; 2475 break;
2476 if (neg) 2476 if (neg)
2477 continue; 2477 continue;
2478 val = convmul * val / convdiv;
2478 if ((min && val < *min) || (max && val > *max)) 2479 if ((min && val < *min) || (max && val > *max))
2479 continue; 2480 continue;
2480 *i = val; 2481 *i = val;
diff --git a/kernel/ucount.c b/kernel/ucount.c
index 9d20d5dd298a..4bbd38ec3788 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -128,10 +128,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
128 struct hlist_head *hashent = ucounts_hashentry(ns, uid); 128 struct hlist_head *hashent = ucounts_hashentry(ns, uid);
129 struct ucounts *ucounts, *new; 129 struct ucounts *ucounts, *new;
130 130
131 spin_lock(&ucounts_lock); 131 spin_lock_irq(&ucounts_lock);
132 ucounts = find_ucounts(ns, uid, hashent); 132 ucounts = find_ucounts(ns, uid, hashent);
133 if (!ucounts) { 133 if (!ucounts) {
134 spin_unlock(&ucounts_lock); 134 spin_unlock_irq(&ucounts_lock);
135 135
136 new = kzalloc(sizeof(*new), GFP_KERNEL); 136 new = kzalloc(sizeof(*new), GFP_KERNEL);
137 if (!new) 137 if (!new)
@@ -141,7 +141,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
141 new->uid = uid; 141 new->uid = uid;
142 atomic_set(&new->count, 0); 142 atomic_set(&new->count, 0);
143 143
144 spin_lock(&ucounts_lock); 144 spin_lock_irq(&ucounts_lock);
145 ucounts = find_ucounts(ns, uid, hashent); 145 ucounts = find_ucounts(ns, uid, hashent);
146 if (ucounts) { 146 if (ucounts) {
147 kfree(new); 147 kfree(new);
@@ -152,16 +152,18 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
152 } 152 }
153 if (!atomic_add_unless(&ucounts->count, 1, INT_MAX)) 153 if (!atomic_add_unless(&ucounts->count, 1, INT_MAX))
154 ucounts = NULL; 154 ucounts = NULL;
155 spin_unlock(&ucounts_lock); 155 spin_unlock_irq(&ucounts_lock);
156 return ucounts; 156 return ucounts;
157} 157}
158 158
159static void put_ucounts(struct ucounts *ucounts) 159static void put_ucounts(struct ucounts *ucounts)
160{ 160{
161 unsigned long flags;
162
161 if (atomic_dec_and_test(&ucounts->count)) { 163 if (atomic_dec_and_test(&ucounts->count)) {
162 spin_lock(&ucounts_lock); 164 spin_lock_irqsave(&ucounts_lock, flags);
163 hlist_del_init(&ucounts->node); 165 hlist_del_init(&ucounts->node);
164 spin_unlock(&ucounts_lock); 166 spin_unlock_irqrestore(&ucounts_lock, flags);
165 167
166 kfree(ucounts); 168 kfree(ucounts);
167 } 169 }
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index d4b0fa01cae3..63177be0159e 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -49,6 +49,8 @@ unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
49#define for_each_watchdog_cpu(cpu) \ 49#define for_each_watchdog_cpu(cpu) \
50 for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask) 50 for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
51 51
52atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
53
52/* 54/*
53 * The 'watchdog_running' variable is set to 1 when the watchdog threads 55 * The 'watchdog_running' variable is set to 1 when the watchdog threads
54 * are registered/started and is set to 0 when the watchdog threads are 56 * are registered/started and is set to 0 when the watchdog threads are
@@ -260,6 +262,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
260 int duration; 262 int duration;
261 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace; 263 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
262 264
265 if (atomic_read(&watchdog_park_in_progress) != 0)
266 return HRTIMER_NORESTART;
267
263 /* kick the hardlockup detector */ 268 /* kick the hardlockup detector */
264 watchdog_interrupt_count(); 269 watchdog_interrupt_count();
265 270
@@ -467,12 +472,16 @@ static int watchdog_park_threads(void)
467{ 472{
468 int cpu, ret = 0; 473 int cpu, ret = 0;
469 474
475 atomic_set(&watchdog_park_in_progress, 1);
476
470 for_each_watchdog_cpu(cpu) { 477 for_each_watchdog_cpu(cpu) {
471 ret = kthread_park(per_cpu(softlockup_watchdog, cpu)); 478 ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
472 if (ret) 479 if (ret)
473 break; 480 break;
474 } 481 }
475 482
483 atomic_set(&watchdog_park_in_progress, 0);
484
476 return ret; 485 return ret;
477} 486}
478 487
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
index 84016c8aee6b..12b8dd640786 100644
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -84,6 +84,9 @@ static void watchdog_overflow_callback(struct perf_event *event,
84 /* Ensure the watchdog never gets throttled */ 84 /* Ensure the watchdog never gets throttled */
85 event->hw.interrupts = 0; 85 event->hw.interrupts = 0;
86 86
87 if (atomic_read(&watchdog_park_in_progress) != 0)
88 return;
89
87 if (__this_cpu_read(watchdog_nmi_touch) == true) { 90 if (__this_cpu_read(watchdog_nmi_touch) == true) {
88 __this_cpu_write(watchdog_nmi_touch, false); 91 __this_cpu_write(watchdog_nmi_touch, false);
89 return; 92 return;
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 86c8911b0e3a..a3e14ce92a56 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -144,4 +144,3 @@ int ioremap_page_range(unsigned long addr,
144 144
145 return err; 145 return err;
146} 146}
147EXPORT_SYMBOL_GPL(ioremap_page_range);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 0b92d605fb69..84812a9fb16f 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -769,7 +769,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
769 struct radix_tree_node *old = child; 769 struct radix_tree_node *old = child;
770 offset = child->offset + 1; 770 offset = child->offset + 1;
771 child = child->parent; 771 child = child->parent;
772 WARN_ON_ONCE(!list_empty(&node->private_list)); 772 WARN_ON_ONCE(!list_empty(&old->private_list));
773 radix_tree_node_free(old); 773 radix_tree_node_free(old);
774 if (old == entry_to_node(node)) 774 if (old == entry_to_node(node))
775 return; 775 return;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 9a6bd6c8d55a..5f3ad65c85de 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -783,6 +783,12 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
783 783
784 assert_spin_locked(pmd_lockptr(mm, pmd)); 784 assert_spin_locked(pmd_lockptr(mm, pmd));
785 785
786 /*
787 * When we COW a devmap PMD entry, we split it into PTEs, so we should
788 * not be in this function with `flags & FOLL_COW` set.
789 */
790 WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
791
786 if (flags & FOLL_WRITE && !pmd_write(*pmd)) 792 if (flags & FOLL_WRITE && !pmd_write(*pmd))
787 return NULL; 793 return NULL;
788 794
@@ -1128,6 +1134,16 @@ out_unlock:
1128 return ret; 1134 return ret;
1129} 1135}
1130 1136
1137/*
1138 * FOLL_FORCE can write to even unwritable pmd's, but only
1139 * after we've gone through a COW cycle and they are dirty.
1140 */
1141static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
1142{
1143 return pmd_write(pmd) ||
1144 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
1145}
1146
1131struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 1147struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1132 unsigned long addr, 1148 unsigned long addr,
1133 pmd_t *pmd, 1149 pmd_t *pmd,
@@ -1138,7 +1154,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1138 1154
1139 assert_spin_locked(pmd_lockptr(mm, pmd)); 1155 assert_spin_locked(pmd_lockptr(mm, pmd));
1140 1156
1141 if (flags & FOLL_WRITE && !pmd_write(*pmd)) 1157 if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
1142 goto out; 1158 goto out;
1143 1159
1144 /* Avoid dumping huge zero page */ 1160 /* Avoid dumping huge zero page */
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a63a8f832664..b822e158b319 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4353,9 +4353,9 @@ static int mem_cgroup_do_precharge(unsigned long count)
4353 return ret; 4353 return ret;
4354 } 4354 }
4355 4355
4356 /* Try charges one by one with reclaim */ 4356 /* Try charges one by one with reclaim, but do not retry */
4357 while (count--) { 4357 while (count--) {
4358 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1); 4358 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
4359 if (ret) 4359 if (ret)
4360 return ret; 4360 return ret;
4361 mc.precharge++; 4361 mc.precharge++;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index e43142c15631..ca2723d47338 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1033,36 +1033,39 @@ static void node_states_set_node(int node, struct memory_notify *arg)
1033 node_set_state(node, N_MEMORY); 1033 node_set_state(node, N_MEMORY);
1034} 1034}
1035 1035
1036int zone_can_shift(unsigned long pfn, unsigned long nr_pages, 1036bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
1037 enum zone_type target) 1037 enum zone_type target, int *zone_shift)
1038{ 1038{
1039 struct zone *zone = page_zone(pfn_to_page(pfn)); 1039 struct zone *zone = page_zone(pfn_to_page(pfn));
1040 enum zone_type idx = zone_idx(zone); 1040 enum zone_type idx = zone_idx(zone);
1041 int i; 1041 int i;
1042 1042
1043 *zone_shift = 0;
1044
1043 if (idx < target) { 1045 if (idx < target) {
1044 /* pages must be at end of current zone */ 1046 /* pages must be at end of current zone */
1045 if (pfn + nr_pages != zone_end_pfn(zone)) 1047 if (pfn + nr_pages != zone_end_pfn(zone))
1046 return 0; 1048 return false;
1047 1049
1048 /* no zones in use between current zone and target */ 1050 /* no zones in use between current zone and target */
1049 for (i = idx + 1; i < target; i++) 1051 for (i = idx + 1; i < target; i++)
1050 if (zone_is_initialized(zone - idx + i)) 1052 if (zone_is_initialized(zone - idx + i))
1051 return 0; 1053 return false;
1052 } 1054 }
1053 1055
1054 if (target < idx) { 1056 if (target < idx) {
1055 /* pages must be at beginning of current zone */ 1057 /* pages must be at beginning of current zone */
1056 if (pfn != zone->zone_start_pfn) 1058 if (pfn != zone->zone_start_pfn)
1057 return 0; 1059 return false;
1058 1060
1059 /* no zones in use between current zone and target */ 1061 /* no zones in use between current zone and target */
1060 for (i = target + 1; i < idx; i++) 1062 for (i = target + 1; i < idx; i++)
1061 if (zone_is_initialized(zone - idx + i)) 1063 if (zone_is_initialized(zone - idx + i))
1062 return 0; 1064 return false;
1063 } 1065 }
1064 1066
1065 return target - idx; 1067 *zone_shift = target - idx;
1068 return true;
1066} 1069}
1067 1070
1068/* Must be protected by mem_hotplug_begin() */ 1071/* Must be protected by mem_hotplug_begin() */
@@ -1089,10 +1092,13 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
1089 !can_online_high_movable(zone)) 1092 !can_online_high_movable(zone))
1090 return -EINVAL; 1093 return -EINVAL;
1091 1094
1092 if (online_type == MMOP_ONLINE_KERNEL) 1095 if (online_type == MMOP_ONLINE_KERNEL) {
1093 zone_shift = zone_can_shift(pfn, nr_pages, ZONE_NORMAL); 1096 if (!zone_can_shift(pfn, nr_pages, ZONE_NORMAL, &zone_shift))
1094 else if (online_type == MMOP_ONLINE_MOVABLE) 1097 return -EINVAL;
1095 zone_shift = zone_can_shift(pfn, nr_pages, ZONE_MOVABLE); 1098 } else if (online_type == MMOP_ONLINE_MOVABLE) {
1099 if (!zone_can_shift(pfn, nr_pages, ZONE_MOVABLE, &zone_shift))
1100 return -EINVAL;
1101 }
1096 1102
1097 zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages); 1103 zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages);
1098 if (!zone) 1104 if (!zone)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 2e346645eb80..1e7873e40c9a 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2017,8 +2017,8 @@ retry_cpuset:
2017 2017
2018 nmask = policy_nodemask(gfp, pol); 2018 nmask = policy_nodemask(gfp, pol);
2019 zl = policy_zonelist(gfp, pol, node); 2019 zl = policy_zonelist(gfp, pol, node);
2020 mpol_cond_put(pol);
2021 page = __alloc_pages_nodemask(gfp, order, zl, nmask); 2020 page = __alloc_pages_nodemask(gfp, order, zl, nmask);
2021 mpol_cond_put(pol);
2022out: 2022out:
2023 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) 2023 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2024 goto retry_cpuset; 2024 goto retry_cpuset;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d604d2596b7b..f3e0c69a97b7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3523,12 +3523,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3523 struct page *page = NULL; 3523 struct page *page = NULL;
3524 unsigned int alloc_flags; 3524 unsigned int alloc_flags;
3525 unsigned long did_some_progress; 3525 unsigned long did_some_progress;
3526 enum compact_priority compact_priority = DEF_COMPACT_PRIORITY; 3526 enum compact_priority compact_priority;
3527 enum compact_result compact_result; 3527 enum compact_result compact_result;
3528 int compaction_retries = 0; 3528 int compaction_retries;
3529 int no_progress_loops = 0; 3529 int no_progress_loops;
3530 unsigned long alloc_start = jiffies; 3530 unsigned long alloc_start = jiffies;
3531 unsigned int stall_timeout = 10 * HZ; 3531 unsigned int stall_timeout = 10 * HZ;
3532 unsigned int cpuset_mems_cookie;
3532 3533
3533 /* 3534 /*
3534 * In the slowpath, we sanity check order to avoid ever trying to 3535 * In the slowpath, we sanity check order to avoid ever trying to
@@ -3549,6 +3550,23 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3549 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM))) 3550 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
3550 gfp_mask &= ~__GFP_ATOMIC; 3551 gfp_mask &= ~__GFP_ATOMIC;
3551 3552
3553retry_cpuset:
3554 compaction_retries = 0;
3555 no_progress_loops = 0;
3556 compact_priority = DEF_COMPACT_PRIORITY;
3557 cpuset_mems_cookie = read_mems_allowed_begin();
3558 /*
3559 * We need to recalculate the starting point for the zonelist iterator
3560 * because we might have used different nodemask in the fast path, or
3561 * there was a cpuset modification and we are retrying - otherwise we
3562 * could end up iterating over non-eligible zones endlessly.
3563 */
3564 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3565 ac->high_zoneidx, ac->nodemask);
3566 if (!ac->preferred_zoneref->zone)
3567 goto nopage;
3568
3569
3552 /* 3570 /*
3553 * The fast path uses conservative alloc_flags to succeed only until 3571 * The fast path uses conservative alloc_flags to succeed only until
3554 * kswapd needs to be woken up, and to avoid the cost of setting up 3572 * kswapd needs to be woken up, and to avoid the cost of setting up
@@ -3708,6 +3726,13 @@ retry:
3708 &compaction_retries)) 3726 &compaction_retries))
3709 goto retry; 3727 goto retry;
3710 3728
3729 /*
3730 * It's possible we raced with cpuset update so the OOM would be
3731 * premature (see below the nopage: label for full explanation).
3732 */
3733 if (read_mems_allowed_retry(cpuset_mems_cookie))
3734 goto retry_cpuset;
3735
3711 /* Reclaim has failed us, start killing things */ 3736 /* Reclaim has failed us, start killing things */
3712 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 3737 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
3713 if (page) 3738 if (page)
@@ -3720,6 +3745,16 @@ retry:
3720 } 3745 }
3721 3746
3722nopage: 3747nopage:
3748 /*
3749 * When updating a task's mems_allowed or mempolicy nodemask, it is
3750 * possible to race with parallel threads in such a way that our
3751 * allocation can fail while the mask is being updated. If we are about
3752 * to fail, check if the cpuset changed during allocation and if so,
3753 * retry.
3754 */
3755 if (read_mems_allowed_retry(cpuset_mems_cookie))
3756 goto retry_cpuset;
3757
3723 warn_alloc(gfp_mask, 3758 warn_alloc(gfp_mask,
3724 "page allocation failure: order:%u", order); 3759 "page allocation failure: order:%u", order);
3725got_pg: 3760got_pg:
@@ -3734,7 +3769,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
3734 struct zonelist *zonelist, nodemask_t *nodemask) 3769 struct zonelist *zonelist, nodemask_t *nodemask)
3735{ 3770{
3736 struct page *page; 3771 struct page *page;
3737 unsigned int cpuset_mems_cookie;
3738 unsigned int alloc_flags = ALLOC_WMARK_LOW; 3772 unsigned int alloc_flags = ALLOC_WMARK_LOW;
3739 gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */ 3773 gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
3740 struct alloc_context ac = { 3774 struct alloc_context ac = {
@@ -3771,9 +3805,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
3771 if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE) 3805 if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
3772 alloc_flags |= ALLOC_CMA; 3806 alloc_flags |= ALLOC_CMA;
3773 3807
3774retry_cpuset:
3775 cpuset_mems_cookie = read_mems_allowed_begin();
3776
3777 /* Dirty zone balancing only done in the fast path */ 3808 /* Dirty zone balancing only done in the fast path */
3778 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); 3809 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
3779 3810
@@ -3784,8 +3815,13 @@ retry_cpuset:
3784 */ 3815 */
3785 ac.preferred_zoneref = first_zones_zonelist(ac.zonelist, 3816 ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
3786 ac.high_zoneidx, ac.nodemask); 3817 ac.high_zoneidx, ac.nodemask);
3787 if (!ac.preferred_zoneref) { 3818 if (!ac.preferred_zoneref->zone) {
3788 page = NULL; 3819 page = NULL;
3820 /*
3821 * This might be due to race with cpuset_current_mems_allowed
3822 * update, so make sure we retry with original nodemask in the
3823 * slow path.
3824 */
3789 goto no_zone; 3825 goto no_zone;
3790 } 3826 }
3791 3827
@@ -3794,6 +3830,7 @@ retry_cpuset:
3794 if (likely(page)) 3830 if (likely(page))
3795 goto out; 3831 goto out;
3796 3832
3833no_zone:
3797 /* 3834 /*
3798 * Runtime PM, block IO and its error handling path can deadlock 3835 * Runtime PM, block IO and its error handling path can deadlock
3799 * because I/O on the device might not complete. 3836 * because I/O on the device might not complete.
@@ -3805,21 +3842,10 @@ retry_cpuset:
3805 * Restore the original nodemask if it was potentially replaced with 3842 * Restore the original nodemask if it was potentially replaced with
3806 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 3843 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
3807 */ 3844 */
3808 if (cpusets_enabled()) 3845 if (unlikely(ac.nodemask != nodemask))
3809 ac.nodemask = nodemask; 3846 ac.nodemask = nodemask;
3810 page = __alloc_pages_slowpath(alloc_mask, order, &ac);
3811 3847
3812no_zone: 3848 page = __alloc_pages_slowpath(alloc_mask, order, &ac);
3813 /*
3814 * When updating a task's mems_allowed, it is possible to race with
3815 * parallel threads in such a way that an allocation can fail while
3816 * the mask is being updated. If a page allocation is about to fail,
3817 * check if the cpuset changed during allocation and if so, retry.
3818 */
3819 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) {
3820 alloc_mask = gfp_mask;
3821 goto retry_cpuset;
3822 }
3823 3849
3824out: 3850out:
3825 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page && 3851 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
@@ -7248,6 +7274,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
7248 .zone = page_zone(pfn_to_page(start)), 7274 .zone = page_zone(pfn_to_page(start)),
7249 .mode = MIGRATE_SYNC, 7275 .mode = MIGRATE_SYNC,
7250 .ignore_skip_hint = true, 7276 .ignore_skip_hint = true,
7277 .gfp_mask = GFP_KERNEL,
7251 }; 7278 };
7252 INIT_LIST_HEAD(&cc.migratepages); 7279 INIT_LIST_HEAD(&cc.migratepages);
7253 7280
diff --git a/mm/slub.c b/mm/slub.c
index 067598a00849..7aa6f433f4de 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -496,10 +496,11 @@ static inline int check_valid_pointer(struct kmem_cache *s,
496 return 1; 496 return 1;
497} 497}
498 498
499static void print_section(char *text, u8 *addr, unsigned int length) 499static void print_section(char *level, char *text, u8 *addr,
500 unsigned int length)
500{ 501{
501 metadata_access_enable(); 502 metadata_access_enable();
502 print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr, 503 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
503 length, 1); 504 length, 1);
504 metadata_access_disable(); 505 metadata_access_disable();
505} 506}
@@ -636,14 +637,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
636 p, p - addr, get_freepointer(s, p)); 637 p, p - addr, get_freepointer(s, p));
637 638
638 if (s->flags & SLAB_RED_ZONE) 639 if (s->flags & SLAB_RED_ZONE)
639 print_section("Redzone ", p - s->red_left_pad, s->red_left_pad); 640 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
641 s->red_left_pad);
640 else if (p > addr + 16) 642 else if (p > addr + 16)
641 print_section("Bytes b4 ", p - 16, 16); 643 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
642 644
643 print_section("Object ", p, min_t(unsigned long, s->object_size, 645 print_section(KERN_ERR, "Object ", p,
644 PAGE_SIZE)); 646 min_t(unsigned long, s->object_size, PAGE_SIZE));
645 if (s->flags & SLAB_RED_ZONE) 647 if (s->flags & SLAB_RED_ZONE)
646 print_section("Redzone ", p + s->object_size, 648 print_section(KERN_ERR, "Redzone ", p + s->object_size,
647 s->inuse - s->object_size); 649 s->inuse - s->object_size);
648 650
649 if (s->offset) 651 if (s->offset)
@@ -658,7 +660,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
658 660
659 if (off != size_from_object(s)) 661 if (off != size_from_object(s))
660 /* Beginning of the filler is the free pointer */ 662 /* Beginning of the filler is the free pointer */
661 print_section("Padding ", p + off, size_from_object(s) - off); 663 print_section(KERN_ERR, "Padding ", p + off,
664 size_from_object(s) - off);
662 665
663 dump_stack(); 666 dump_stack();
664} 667}
@@ -820,7 +823,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
820 end--; 823 end--;
821 824
822 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); 825 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
823 print_section("Padding ", end - remainder, remainder); 826 print_section(KERN_ERR, "Padding ", end - remainder, remainder);
824 827
825 restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end); 828 restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
826 return 0; 829 return 0;
@@ -973,7 +976,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
973 page->freelist); 976 page->freelist);
974 977
975 if (!alloc) 978 if (!alloc)
976 print_section("Object ", (void *)object, 979 print_section(KERN_INFO, "Object ", (void *)object,
977 s->object_size); 980 s->object_size);
978 981
979 dump_stack(); 982 dump_stack();
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 42bfbd801a1b..ead18ca836de 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -474,7 +474,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
474 primary_if = batadv_primary_if_get_selected(bat_priv); 474 primary_if = batadv_primary_if_get_selected(bat_priv);
475 if (!primary_if) { 475 if (!primary_if) {
476 ret = -EINVAL; 476 ret = -EINVAL;
477 goto put_primary_if; 477 goto free_skb;
478 } 478 }
479 479
480 /* Create one header to be copied to all fragments */ 480 /* Create one header to be copied to all fragments */
@@ -502,7 +502,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
502 skb_fragment = batadv_frag_create(skb, &frag_header, mtu); 502 skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
503 if (!skb_fragment) { 503 if (!skb_fragment) {
504 ret = -ENOMEM; 504 ret = -ENOMEM;
505 goto free_skb; 505 goto put_primary_if;
506 } 506 }
507 507
508 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX); 508 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
@@ -511,7 +511,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
511 ret = batadv_send_unicast_skb(skb_fragment, neigh_node); 511 ret = batadv_send_unicast_skb(skb_fragment, neigh_node);
512 if (ret != NET_XMIT_SUCCESS) { 512 if (ret != NET_XMIT_SUCCESS) {
513 ret = NET_XMIT_DROP; 513 ret = NET_XMIT_DROP;
514 goto free_skb; 514 goto put_primary_if;
515 } 515 }
516 516
517 frag_header.no++; 517 frag_header.no++;
@@ -519,7 +519,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
519 /* The initial check in this function should cover this case */ 519 /* The initial check in this function should cover this case */
520 if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) { 520 if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) {
521 ret = -EINVAL; 521 ret = -EINVAL;
522 goto free_skb; 522 goto put_primary_if;
523 } 523 }
524 } 524 }
525 525
@@ -527,7 +527,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
527 if (batadv_skb_head_push(skb, header_size) < 0 || 527 if (batadv_skb_head_push(skb, header_size) < 0 ||
528 pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) { 528 pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) {
529 ret = -ENOMEM; 529 ret = -ENOMEM;
530 goto free_skb; 530 goto put_primary_if;
531 } 531 }
532 532
533 memcpy(skb->data, &frag_header, header_size); 533 memcpy(skb->data, &frag_header, header_size);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 6c087cd049b9..1ca25498fe4d 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -786,20 +786,6 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
786 return 0; 786 return 0;
787} 787}
788 788
789static int br_dev_newlink(struct net *src_net, struct net_device *dev,
790 struct nlattr *tb[], struct nlattr *data[])
791{
792 struct net_bridge *br = netdev_priv(dev);
793
794 if (tb[IFLA_ADDRESS]) {
795 spin_lock_bh(&br->lock);
796 br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
797 spin_unlock_bh(&br->lock);
798 }
799
800 return register_netdevice(dev);
801}
802
803static int br_port_slave_changelink(struct net_device *brdev, 789static int br_port_slave_changelink(struct net_device *brdev,
804 struct net_device *dev, 790 struct net_device *dev,
805 struct nlattr *tb[], 791 struct nlattr *tb[],
@@ -1120,6 +1106,25 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
1120 return 0; 1106 return 0;
1121} 1107}
1122 1108
1109static int br_dev_newlink(struct net *src_net, struct net_device *dev,
1110 struct nlattr *tb[], struct nlattr *data[])
1111{
1112 struct net_bridge *br = netdev_priv(dev);
1113 int err;
1114
1115 if (tb[IFLA_ADDRESS]) {
1116 spin_lock_bh(&br->lock);
1117 br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
1118 spin_unlock_bh(&br->lock);
1119 }
1120
1121 err = br_changelink(dev, tb, data);
1122 if (err)
1123 return err;
1124
1125 return register_netdevice(dev);
1126}
1127
1123static size_t br_get_size(const struct net_device *brdev) 1128static size_t br_get_size(const struct net_device *brdev)
1124{ 1129{
1125 return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */ 1130 return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 3949ce70be07..292e33bd916e 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -214,7 +214,7 @@ static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
214 SKCIPHER_REQUEST_ON_STACK(req, key->tfm); 214 SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
215 struct sg_table sgt; 215 struct sg_table sgt;
216 struct scatterlist prealloc_sg; 216 struct scatterlist prealloc_sg;
217 char iv[AES_BLOCK_SIZE]; 217 char iv[AES_BLOCK_SIZE] __aligned(8);
218 int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1)); 218 int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1));
219 int crypt_len = encrypt ? in_len + pad_byte : in_len; 219 int crypt_len = encrypt ? in_len + pad_byte : in_len;
220 int ret; 220 int ret;
diff --git a/net/core/dev.c b/net/core/dev.c
index c8f1f67ff16c..be11abac89b3 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2773,9 +2773,9 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
2773 if (skb->ip_summed != CHECKSUM_NONE && 2773 if (skb->ip_summed != CHECKSUM_NONE &&
2774 !can_checksum_protocol(features, type)) { 2774 !can_checksum_protocol(features, type)) {
2775 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 2775 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2776 } else if (illegal_highdma(skb->dev, skb)) {
2777 features &= ~NETIF_F_SG;
2778 } 2776 }
2777 if (illegal_highdma(skb->dev, skb))
2778 features &= ~NETIF_F_SG;
2779 2779
2780 return features; 2780 return features;
2781} 2781}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index e23766c7e3ba..236a21e3c878 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1712,7 +1712,7 @@ static noinline_for_stack int ethtool_get_channels(struct net_device *dev,
1712static noinline_for_stack int ethtool_set_channels(struct net_device *dev, 1712static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
1713 void __user *useraddr) 1713 void __user *useraddr)
1714{ 1714{
1715 struct ethtool_channels channels, max; 1715 struct ethtool_channels channels, max = { .cmd = ETHTOOL_GCHANNELS };
1716 u32 max_rx_in_use = 0; 1716 u32 max_rx_in_use = 0;
1717 1717
1718 if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels) 1718 if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels)
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index 40ef8ae8d93d..03600459bcfd 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -386,6 +386,7 @@ static const struct lwtunnel_encap_ops bpf_encap_ops = {
386 .fill_encap = bpf_fill_encap_info, 386 .fill_encap = bpf_fill_encap_info,
387 .get_encap_size = bpf_encap_nlsize, 387 .get_encap_size = bpf_encap_nlsize,
388 .cmp_encap = bpf_encap_cmp, 388 .cmp_encap = bpf_encap_cmp,
389 .owner = THIS_MODULE,
389}; 390};
390 391
391static int __init bpf_lwt_init(void) 392static int __init bpf_lwt_init(void)
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
index a5d4e866ce88..c23465005f2f 100644
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@ -26,6 +26,7 @@
26#include <net/lwtunnel.h> 26#include <net/lwtunnel.h>
27#include <net/rtnetlink.h> 27#include <net/rtnetlink.h>
28#include <net/ip6_fib.h> 28#include <net/ip6_fib.h>
29#include <net/nexthop.h>
29 30
30#ifdef CONFIG_MODULES 31#ifdef CONFIG_MODULES
31 32
@@ -114,25 +115,77 @@ int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
114 ret = -EOPNOTSUPP; 115 ret = -EOPNOTSUPP;
115 rcu_read_lock(); 116 rcu_read_lock();
116 ops = rcu_dereference(lwtun_encaps[encap_type]); 117 ops = rcu_dereference(lwtun_encaps[encap_type]);
118 if (likely(ops && ops->build_state && try_module_get(ops->owner))) {
119 ret = ops->build_state(dev, encap, family, cfg, lws);
120 if (ret)
121 module_put(ops->owner);
122 }
123 rcu_read_unlock();
124
125 return ret;
126}
127EXPORT_SYMBOL(lwtunnel_build_state);
128
129int lwtunnel_valid_encap_type(u16 encap_type)
130{
131 const struct lwtunnel_encap_ops *ops;
132 int ret = -EINVAL;
133
134 if (encap_type == LWTUNNEL_ENCAP_NONE ||
135 encap_type > LWTUNNEL_ENCAP_MAX)
136 return ret;
137
138 rcu_read_lock();
139 ops = rcu_dereference(lwtun_encaps[encap_type]);
140 rcu_read_unlock();
117#ifdef CONFIG_MODULES 141#ifdef CONFIG_MODULES
118 if (!ops) { 142 if (!ops) {
119 const char *encap_type_str = lwtunnel_encap_str(encap_type); 143 const char *encap_type_str = lwtunnel_encap_str(encap_type);
120 144
121 if (encap_type_str) { 145 if (encap_type_str) {
122 rcu_read_unlock(); 146 __rtnl_unlock();
123 request_module("rtnl-lwt-%s", encap_type_str); 147 request_module("rtnl-lwt-%s", encap_type_str);
148 rtnl_lock();
149
124 rcu_read_lock(); 150 rcu_read_lock();
125 ops = rcu_dereference(lwtun_encaps[encap_type]); 151 ops = rcu_dereference(lwtun_encaps[encap_type]);
152 rcu_read_unlock();
126 } 153 }
127 } 154 }
128#endif 155#endif
129 if (likely(ops && ops->build_state)) 156 return ops ? 0 : -EOPNOTSUPP;
130 ret = ops->build_state(dev, encap, family, cfg, lws); 157}
131 rcu_read_unlock(); 158EXPORT_SYMBOL(lwtunnel_valid_encap_type);
132 159
133 return ret; 160int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining)
161{
162 struct rtnexthop *rtnh = (struct rtnexthop *)attr;
163 struct nlattr *nla_entype;
164 struct nlattr *attrs;
165 struct nlattr *nla;
166 u16 encap_type;
167 int attrlen;
168
169 while (rtnh_ok(rtnh, remaining)) {
170 attrlen = rtnh_attrlen(rtnh);
171 if (attrlen > 0) {
172 attrs = rtnh_attrs(rtnh);
173 nla = nla_find(attrs, attrlen, RTA_ENCAP);
174 nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
175
176 if (nla_entype) {
177 encap_type = nla_get_u16(nla_entype);
178
179 if (lwtunnel_valid_encap_type(encap_type) != 0)
180 return -EOPNOTSUPP;
181 }
182 }
183 rtnh = rtnh_next(rtnh, &remaining);
184 }
185
186 return 0;
134} 187}
135EXPORT_SYMBOL(lwtunnel_build_state); 188EXPORT_SYMBOL(lwtunnel_valid_encap_type_attr);
136 189
137void lwtstate_free(struct lwtunnel_state *lws) 190void lwtstate_free(struct lwtunnel_state *lws)
138{ 191{
@@ -144,6 +197,7 @@ void lwtstate_free(struct lwtunnel_state *lws)
144 } else { 197 } else {
145 kfree(lws); 198 kfree(lws);
146 } 199 }
200 module_put(ops->owner);
147} 201}
148EXPORT_SYMBOL(lwtstate_free); 202EXPORT_SYMBOL(lwtstate_free);
149 203
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 08bcdc3d1717..cef60a4a2803 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -227,7 +227,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
227 opt = ireq->ipv6_opt; 227 opt = ireq->ipv6_opt;
228 if (!opt) 228 if (!opt)
229 opt = rcu_dereference(np->opt); 229 opt = rcu_dereference(np->opt);
230 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); 230 err = ip6_xmit(sk, skb, &fl6, sk->sk_mark, opt, np->tclass);
231 rcu_read_unlock(); 231 rcu_read_unlock();
232 err = net_xmit_eval(err); 232 err = net_xmit_eval(err);
233 } 233 }
@@ -281,7 +281,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
281 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); 281 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
282 if (!IS_ERR(dst)) { 282 if (!IS_ERR(dst)) {
283 skb_dst_set(skb, dst); 283 skb_dst_set(skb, dst);
284 ip6_xmit(ctl_sk, skb, &fl6, NULL, 0); 284 ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0);
285 DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 285 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
286 DCCP_INC_STATS(DCCP_MIB_OUTRSTS); 286 DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
287 return; 287 return;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index b8e58689a9a1..9750dd6f8c17 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1116,10 +1116,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
1116 /* Use already configured phy mode */ 1116 /* Use already configured phy mode */
1117 if (p->phy_interface == PHY_INTERFACE_MODE_NA) 1117 if (p->phy_interface == PHY_INTERFACE_MODE_NA)
1118 p->phy_interface = p->phy->interface; 1118 p->phy_interface = p->phy->interface;
1119 phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, 1119 return phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
1120 p->phy_interface); 1120 p->phy_interface);
1121
1122 return 0;
1123} 1121}
1124 1122
1125static int dsa_slave_phy_setup(struct dsa_slave_priv *p, 1123static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
@@ -1214,6 +1212,8 @@ int dsa_slave_suspend(struct net_device *slave_dev)
1214{ 1212{
1215 struct dsa_slave_priv *p = netdev_priv(slave_dev); 1213 struct dsa_slave_priv *p = netdev_priv(slave_dev);
1216 1214
1215 netif_device_detach(slave_dev);
1216
1217 if (p->phy) { 1217 if (p->phy) {
1218 phy_stop(p->phy); 1218 phy_stop(p->phy);
1219 p->old_pause = -1; 1219 p->old_pause = -1;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index eae0332b0e8c..7db2ad2e82d3 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -46,6 +46,7 @@
46#include <net/rtnetlink.h> 46#include <net/rtnetlink.h>
47#include <net/xfrm.h> 47#include <net/xfrm.h>
48#include <net/l3mdev.h> 48#include <net/l3mdev.h>
49#include <net/lwtunnel.h>
49#include <trace/events/fib.h> 50#include <trace/events/fib.h>
50 51
51#ifndef CONFIG_IP_MULTIPLE_TABLES 52#ifndef CONFIG_IP_MULTIPLE_TABLES
@@ -677,6 +678,10 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
677 cfg->fc_mx_len = nla_len(attr); 678 cfg->fc_mx_len = nla_len(attr);
678 break; 679 break;
679 case RTA_MULTIPATH: 680 case RTA_MULTIPATH:
681 err = lwtunnel_valid_encap_type_attr(nla_data(attr),
682 nla_len(attr));
683 if (err < 0)
684 goto errout;
680 cfg->fc_mp = nla_data(attr); 685 cfg->fc_mp = nla_data(attr);
681 cfg->fc_mp_len = nla_len(attr); 686 cfg->fc_mp_len = nla_len(attr);
682 break; 687 break;
@@ -691,6 +696,9 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
691 break; 696 break;
692 case RTA_ENCAP_TYPE: 697 case RTA_ENCAP_TYPE:
693 cfg->fc_encap_type = nla_get_u16(attr); 698 cfg->fc_encap_type = nla_get_u16(attr);
699 err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
700 if (err < 0)
701 goto errout;
694 break; 702 break;
695 } 703 }
696 } 704 }
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index fac275c48108..b67719f45953 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1629,6 +1629,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
1629 sk->sk_protocol = ip_hdr(skb)->protocol; 1629 sk->sk_protocol = ip_hdr(skb)->protocol;
1630 sk->sk_bound_dev_if = arg->bound_dev_if; 1630 sk->sk_bound_dev_if = arg->bound_dev_if;
1631 sk->sk_sndbuf = sysctl_wmem_default; 1631 sk->sk_sndbuf = sysctl_wmem_default;
1632 sk->sk_mark = fl4.flowi4_mark;
1632 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, 1633 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1633 len, 0, &ipc, &rt, MSG_DONTWAIT); 1634 len, 0, &ipc, &rt, MSG_DONTWAIT);
1634 if (unlikely(err)) { 1635 if (unlikely(err)) {
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 5476110598f7..9d6c10096d44 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -311,6 +311,7 @@ static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
311 .fill_encap = ip_tun_fill_encap_info, 311 .fill_encap = ip_tun_fill_encap_info,
312 .get_encap_size = ip_tun_encap_nlsize, 312 .get_encap_size = ip_tun_encap_nlsize,
313 .cmp_encap = ip_tun_cmp_encap, 313 .cmp_encap = ip_tun_cmp_encap,
314 .owner = THIS_MODULE,
314}; 315};
315 316
316static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = { 317static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
@@ -401,6 +402,7 @@ static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
401 .fill_encap = ip6_tun_fill_encap_info, 402 .fill_encap = ip6_tun_fill_encap_info,
402 .get_encap_size = ip6_tun_encap_nlsize, 403 .get_encap_size = ip6_tun_encap_nlsize,
403 .cmp_encap = ip_tun_cmp_encap, 404 .cmp_encap = ip_tun_cmp_encap,
405 .owner = THIS_MODULE,
404}; 406};
405 407
406void __init ip_tunnel_core_init(void) 408void __init ip_tunnel_core_init(void)
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index a6b8c1a4102b..0a783cd73faf 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -144,7 +144,12 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry)
144 rcu_read_lock_bh(); 144 rcu_read_lock_bh();
145 c = __clusterip_config_find(net, clusterip); 145 c = __clusterip_config_find(net, clusterip);
146 if (c) { 146 if (c) {
147 if (!c->pde || unlikely(!atomic_inc_not_zero(&c->refcount))) 147#ifdef CONFIG_PROC_FS
148 if (!c->pde)
149 c = NULL;
150 else
151#endif
152 if (unlikely(!atomic_inc_not_zero(&c->refcount)))
148 c = NULL; 153 c = NULL;
149 else if (entry) 154 else if (entry)
150 atomic_inc(&c->entries); 155 atomic_inc(&c->entries);
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index f273098e48fd..37fb9552e858 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -63,10 +63,10 @@ static bool rpfilter_lookup_reverse(struct net *net, struct flowi4 *fl4,
63 return dev_match || flags & XT_RPFILTER_LOOSE; 63 return dev_match || flags & XT_RPFILTER_LOOSE;
64} 64}
65 65
66static bool rpfilter_is_local(const struct sk_buff *skb) 66static bool
67rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in)
67{ 68{
68 const struct rtable *rt = skb_rtable(skb); 69 return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
69 return rt && (rt->rt_flags & RTCF_LOCAL);
70} 70}
71 71
72static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) 72static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
@@ -79,7 +79,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
79 info = par->matchinfo; 79 info = par->matchinfo;
80 invert = info->flags & XT_RPFILTER_INVERT; 80 invert = info->flags & XT_RPFILTER_INVERT;
81 81
82 if (rpfilter_is_local(skb)) 82 if (rpfilter_is_loopback(skb, xt_in(par)))
83 return true ^ invert; 83 return true ^ invert;
84 84
85 iph = ip_hdr(skb); 85 iph = ip_hdr(skb);
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index fd8220213afc..146d86105183 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -126,6 +126,8 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
126 /* ip_route_me_harder expects skb->dst to be set */ 126 /* ip_route_me_harder expects skb->dst to be set */
127 skb_dst_set_noref(nskb, skb_dst(oldskb)); 127 skb_dst_set_noref(nskb, skb_dst(oldskb));
128 128
129 nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
130
129 skb_reserve(nskb, LL_MAX_HEADER); 131 skb_reserve(nskb, LL_MAX_HEADER);
130 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, 132 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
131 ip4_dst_hoplimit(skb_dst(nskb))); 133 ip4_dst_hoplimit(skb_dst(nskb)));
diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c
index 965b1a161369..2981291910dd 100644
--- a/net/ipv4/netfilter/nft_fib_ipv4.c
+++ b/net/ipv4/netfilter/nft_fib_ipv4.c
@@ -26,13 +26,6 @@ static __be32 get_saddr(__be32 addr)
26 return addr; 26 return addr;
27} 27}
28 28
29static bool fib4_is_local(const struct sk_buff *skb)
30{
31 const struct rtable *rt = skb_rtable(skb);
32
33 return rt && (rt->rt_flags & RTCF_LOCAL);
34}
35
36#define DSCP_BITS 0xfc 29#define DSCP_BITS 0xfc
37 30
38void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs, 31void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
@@ -95,8 +88,10 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
95 else 88 else
96 oif = NULL; 89 oif = NULL;
97 90
98 if (nft_hook(pkt) == NF_INET_PRE_ROUTING && fib4_is_local(pkt->skb)) { 91 if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
99 nft_fib_store_result(dest, priv->result, pkt, LOOPBACK_IFINDEX); 92 nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
93 nft_fib_store_result(dest, priv->result, pkt,
94 nft_in(pkt)->ifindex);
100 return; 95 return;
101 } 96 }
102 97
@@ -131,7 +126,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
131 switch (res.type) { 126 switch (res.type) {
132 case RTN_UNICAST: 127 case RTN_UNICAST:
133 break; 128 break;
134 case RTN_LOCAL: /* should not appear here, see fib4_is_local() above */ 129 case RTN_LOCAL: /* Should not see RTN_LOCAL here */
135 return; 130 return;
136 default: 131 default:
137 break; 132 break;
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 9674bec4a0f8..8ea4e9787f82 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -205,6 +205,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
205 * scaled. So correct it appropriately. 205 * scaled. So correct it appropriately.
206 */ 206 */
207 tp->snd_wnd = ntohs(tcp_hdr(skb)->window); 207 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
208 tp->max_window = tp->snd_wnd;
208 209
209 /* Activate the retrans timer so that SYNACK can be retransmitted. 210 /* Activate the retrans timer so that SYNACK can be retransmitted.
210 * The request socket is not added to the ehash 211 * The request socket is not added to the ehash
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3de6eba378ad..27c95acbb52f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5029,7 +5029,7 @@ static void tcp_check_space(struct sock *sk)
5029 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { 5029 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
5030 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); 5030 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
5031 /* pairs with tcp_poll() */ 5031 /* pairs with tcp_poll() */
5032 smp_mb__after_atomic(); 5032 smp_mb();
5033 if (sk->sk_socket && 5033 if (sk->sk_socket &&
5034 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 5034 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
5035 tcp_new_space(sk); 5035 tcp_new_space(sk);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 4c47656b9f09..156ed578d3c0 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -5568,8 +5568,7 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
5568 struct net_device *dev; 5568 struct net_device *dev;
5569 struct inet6_dev *idev; 5569 struct inet6_dev *idev;
5570 5570
5571 rcu_read_lock(); 5571 for_each_netdev(net, dev) {
5572 for_each_netdev_rcu(net, dev) {
5573 idev = __in6_dev_get(dev); 5572 idev = __in6_dev_get(dev);
5574 if (idev) { 5573 if (idev) {
5575 int changed = (!idev->cnf.disable_ipv6) ^ (!newf); 5574 int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
@@ -5578,7 +5577,6 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
5578 dev_disable_change(idev); 5577 dev_disable_change(idev);
5579 } 5578 }
5580 } 5579 }
5581 rcu_read_unlock();
5582} 5580}
5583 5581
5584static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf) 5582static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
index a7bc54ab46e2..13b5e85fe0d5 100644
--- a/net/ipv6/ila/ila_lwt.c
+++ b/net/ipv6/ila/ila_lwt.c
@@ -238,6 +238,7 @@ static const struct lwtunnel_encap_ops ila_encap_ops = {
238 .fill_encap = ila_fill_encap_info, 238 .fill_encap = ila_fill_encap_info,
239 .get_encap_size = ila_encap_nlsize, 239 .get_encap_size = ila_encap_nlsize,
240 .cmp_encap = ila_encap_cmp, 240 .cmp_encap = ila_encap_cmp,
241 .owner = THIS_MODULE,
241}; 242};
242 243
243int ila_lwt_init(void) 244int ila_lwt_init(void)
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 97074c459fe6..9a31d13bf180 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -136,7 +136,7 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
136 /* Restore final destination back after routing done */ 136 /* Restore final destination back after routing done */
137 fl6.daddr = sk->sk_v6_daddr; 137 fl6.daddr = sk->sk_v6_daddr;
138 138
139 res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt), 139 res = ip6_xmit(sk, skb, &fl6, sk->sk_mark, rcu_dereference(np->opt),
140 np->tclass); 140 np->tclass);
141 rcu_read_unlock(); 141 rcu_read_unlock();
142 return res; 142 return res;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 1ba7567b4d8f..51b9835b3176 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -577,6 +577,9 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
577 return -1; 577 return -1;
578 578
579 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 579 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
580 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
581 ipv6h = ipv6_hdr(skb);
582
580 if (offset > 0) { 583 if (offset > 0) {
581 struct ipv6_tlv_tnl_enc_lim *tel; 584 struct ipv6_tlv_tnl_enc_lim *tel;
582 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; 585 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 38122d04fadc..2c0df09e9036 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -172,7 +172,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
172 * which are using proper atomic operations or spinlocks. 172 * which are using proper atomic operations or spinlocks.
173 */ 173 */
174int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, 174int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
175 struct ipv6_txoptions *opt, int tclass) 175 __u32 mark, struct ipv6_txoptions *opt, int tclass)
176{ 176{
177 struct net *net = sock_net(sk); 177 struct net *net = sock_net(sk);
178 const struct ipv6_pinfo *np = inet6_sk(sk); 178 const struct ipv6_pinfo *np = inet6_sk(sk);
@@ -240,7 +240,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
240 240
241 skb->protocol = htons(ETH_P_IPV6); 241 skb->protocol = htons(ETH_P_IPV6);
242 skb->priority = sk->sk_priority; 242 skb->priority = sk->sk_priority;
243 skb->mark = sk->sk_mark; 243 skb->mark = mark;
244 244
245 mtu = dst_mtu(dst); 245 mtu = dst_mtu(dst);
246 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) { 246 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 753d6d0860fb..ff8ee06491c3 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -400,18 +400,19 @@ ip6_tnl_dev_uninit(struct net_device *dev)
400 400
401__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) 401__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
402{ 402{
403 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw; 403 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
404 __u8 nexthdr = ipv6h->nexthdr; 404 unsigned int nhoff = raw - skb->data;
405 __u16 off = sizeof(*ipv6h); 405 unsigned int off = nhoff + sizeof(*ipv6h);
406 u8 next, nexthdr = ipv6h->nexthdr;
406 407
407 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) { 408 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
408 __u16 optlen = 0;
409 struct ipv6_opt_hdr *hdr; 409 struct ipv6_opt_hdr *hdr;
410 if (raw + off + sizeof(*hdr) > skb->data && 410 u16 optlen;
411 !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr))) 411
412 if (!pskb_may_pull(skb, off + sizeof(*hdr)))
412 break; 413 break;
413 414
414 hdr = (struct ipv6_opt_hdr *) (raw + off); 415 hdr = (struct ipv6_opt_hdr *)(skb->data + off);
415 if (nexthdr == NEXTHDR_FRAGMENT) { 416 if (nexthdr == NEXTHDR_FRAGMENT) {
416 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr; 417 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
417 if (frag_hdr->frag_off) 418 if (frag_hdr->frag_off)
@@ -422,20 +423,29 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
422 } else { 423 } else {
423 optlen = ipv6_optlen(hdr); 424 optlen = ipv6_optlen(hdr);
424 } 425 }
426 /* cache hdr->nexthdr, since pskb_may_pull() might
427 * invalidate hdr
428 */
429 next = hdr->nexthdr;
425 if (nexthdr == NEXTHDR_DEST) { 430 if (nexthdr == NEXTHDR_DEST) {
426 __u16 i = off + 2; 431 u16 i = 2;
432
433 /* Remember : hdr is no longer valid at this point. */
434 if (!pskb_may_pull(skb, off + optlen))
435 break;
436
427 while (1) { 437 while (1) {
428 struct ipv6_tlv_tnl_enc_lim *tel; 438 struct ipv6_tlv_tnl_enc_lim *tel;
429 439
430 /* No more room for encapsulation limit */ 440 /* No more room for encapsulation limit */
431 if (i + sizeof (*tel) > off + optlen) 441 if (i + sizeof(*tel) > optlen)
432 break; 442 break;
433 443
434 tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i]; 444 tel = (struct ipv6_tlv_tnl_enc_lim *) skb->data + off + i;
435 /* return index of option if found and valid */ 445 /* return index of option if found and valid */
436 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && 446 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
437 tel->length == 1) 447 tel->length == 1)
438 return i; 448 return i + off - nhoff;
439 /* else jump to next option */ 449 /* else jump to next option */
440 if (tel->type) 450 if (tel->type)
441 i += tel->length + 2; 451 i += tel->length + 2;
@@ -443,7 +453,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
443 i++; 453 i++;
444 } 454 }
445 } 455 }
446 nexthdr = hdr->nexthdr; 456 nexthdr = next;
447 off += optlen; 457 off += optlen;
448 } 458 }
449 return 0; 459 return 0;
@@ -1303,6 +1313,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1303 fl6.flowlabel = key->label; 1313 fl6.flowlabel = key->label;
1304 } else { 1314 } else {
1305 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 1315 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
1316 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
1317 ipv6h = ipv6_hdr(skb);
1306 if (offset > 0) { 1318 if (offset > 0) {
1307 struct ipv6_tlv_tnl_enc_lim *tel; 1319 struct ipv6_tlv_tnl_enc_lim *tel;
1308 1320
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
index d5263dc364a9..b12e61b7b16c 100644
--- a/net/ipv6/netfilter/ip6t_rpfilter.c
+++ b/net/ipv6/netfilter/ip6t_rpfilter.c
@@ -72,10 +72,10 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
72 return ret; 72 return ret;
73} 73}
74 74
75static bool rpfilter_is_local(const struct sk_buff *skb) 75static bool
76rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in)
76{ 77{
77 const struct rt6_info *rt = (const void *) skb_dst(skb); 78 return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
78 return rt && (rt->rt6i_flags & RTF_LOCAL);
79} 79}
80 80
81static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) 81static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
@@ -85,7 +85,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
85 struct ipv6hdr *iph; 85 struct ipv6hdr *iph;
86 bool invert = info->flags & XT_RPFILTER_INVERT; 86 bool invert = info->flags & XT_RPFILTER_INVERT;
87 87
88 if (rpfilter_is_local(skb)) 88 if (rpfilter_is_loopback(skb, xt_in(par)))
89 return true ^ invert; 89 return true ^ invert;
90 90
91 iph = ipv6_hdr(skb); 91 iph = ipv6_hdr(skb);
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 10090400c72f..eedee5d108d9 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -157,6 +157,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
157 fl6.fl6_sport = otcph->dest; 157 fl6.fl6_sport = otcph->dest;
158 fl6.fl6_dport = otcph->source; 158 fl6.fl6_dport = otcph->source;
159 fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev); 159 fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev);
160 fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark);
160 security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); 161 security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
161 dst = ip6_route_output(net, NULL, &fl6); 162 dst = ip6_route_output(net, NULL, &fl6);
162 if (dst->error) { 163 if (dst->error) {
@@ -180,6 +181,8 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
180 181
181 skb_dst_set(nskb, dst); 182 skb_dst_set(nskb, dst);
182 183
184 nskb->mark = fl6.flowi6_mark;
185
183 skb_reserve(nskb, hh_len + dst->header_len); 186 skb_reserve(nskb, hh_len + dst->header_len);
184 ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, 187 ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
185 ip6_dst_hoplimit(dst)); 188 ip6_dst_hoplimit(dst));
diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
index c947aad8bcc6..765facf03d45 100644
--- a/net/ipv6/netfilter/nft_fib_ipv6.c
+++ b/net/ipv6/netfilter/nft_fib_ipv6.c
@@ -18,13 +18,6 @@
18#include <net/ip6_fib.h> 18#include <net/ip6_fib.h>
19#include <net/ip6_route.h> 19#include <net/ip6_route.h>
20 20
21static bool fib6_is_local(const struct sk_buff *skb)
22{
23 const struct rt6_info *rt = (const void *)skb_dst(skb);
24
25 return rt && (rt->rt6i_flags & RTF_LOCAL);
26}
27
28static int get_ifindex(const struct net_device *dev) 21static int get_ifindex(const struct net_device *dev)
29{ 22{
30 return dev ? dev->ifindex : 0; 23 return dev ? dev->ifindex : 0;
@@ -164,8 +157,10 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
164 157
165 lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif); 158 lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif);
166 159
167 if (nft_hook(pkt) == NF_INET_PRE_ROUTING && fib6_is_local(pkt->skb)) { 160 if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
168 nft_fib_store_result(dest, priv->result, pkt, LOOPBACK_IFINDEX); 161 nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
162 nft_fib_store_result(dest, priv->result, pkt,
163 nft_in(pkt)->ifindex);
169 return; 164 return;
170 } 165 }
171 166
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 5046d2b24004..61d7006324ed 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2899,6 +2899,11 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2899 if (tb[RTA_MULTIPATH]) { 2899 if (tb[RTA_MULTIPATH]) {
2900 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]); 2900 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
2901 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]); 2901 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
2902
2903 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
2904 cfg->fc_mp_len);
2905 if (err < 0)
2906 goto errout;
2902 } 2907 }
2903 2908
2904 if (tb[RTA_PREF]) { 2909 if (tb[RTA_PREF]) {
@@ -2912,9 +2917,14 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2912 if (tb[RTA_ENCAP]) 2917 if (tb[RTA_ENCAP])
2913 cfg->fc_encap = tb[RTA_ENCAP]; 2918 cfg->fc_encap = tb[RTA_ENCAP];
2914 2919
2915 if (tb[RTA_ENCAP_TYPE]) 2920 if (tb[RTA_ENCAP_TYPE]) {
2916 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]); 2921 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
2917 2922
2923 err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
2924 if (err < 0)
2925 goto errout;
2926 }
2927
2918 if (tb[RTA_EXPIRES]) { 2928 if (tb[RTA_EXPIRES]) {
2919 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ); 2929 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
2920 2930
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index b172d85c650a..a855eb325b03 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -176,6 +176,8 @@ static int seg6_genl_set_tunsrc(struct sk_buff *skb, struct genl_info *info)
176 176
177 val = nla_data(info->attrs[SEG6_ATTR_DST]); 177 val = nla_data(info->attrs[SEG6_ATTR_DST]);
178 t_new = kmemdup(val, sizeof(*val), GFP_KERNEL); 178 t_new = kmemdup(val, sizeof(*val), GFP_KERNEL);
179 if (!t_new)
180 return -ENOMEM;
179 181
180 mutex_lock(&sdata->lock); 182 mutex_lock(&sdata->lock);
181 183
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 1d60cb132835..c46f8cbf5ab5 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -422,6 +422,7 @@ static const struct lwtunnel_encap_ops seg6_iptun_ops = {
422 .fill_encap = seg6_fill_encap_info, 422 .fill_encap = seg6_fill_encap_info,
423 .get_encap_size = seg6_encap_nlsize, 423 .get_encap_size = seg6_encap_nlsize,
424 .cmp_encap = seg6_encap_cmp, 424 .cmp_encap = seg6_encap_cmp,
425 .owner = THIS_MODULE,
425}; 426};
426 427
427int __init seg6_iptunnel_init(void) 428int __init seg6_iptunnel_init(void)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 95c05e5293b1..64834ec5ab73 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -474,7 +474,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
474 opt = ireq->ipv6_opt; 474 opt = ireq->ipv6_opt;
475 if (!opt) 475 if (!opt)
476 opt = rcu_dereference(np->opt); 476 opt = rcu_dereference(np->opt);
477 err = ip6_xmit(sk, skb, fl6, opt, np->tclass); 477 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
478 rcu_read_unlock(); 478 rcu_read_unlock();
479 err = net_xmit_eval(err); 479 err = net_xmit_eval(err);
480 } 480 }
@@ -845,7 +845,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
845 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); 845 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
846 if (!IS_ERR(dst)) { 846 if (!IS_ERR(dst)) {
847 skb_dst_set(buff, dst); 847 skb_dst_set(buff, dst);
848 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass); 848 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
849 TCP_INC_STATS(net, TCP_MIB_OUTSEGS); 849 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
850 if (rst) 850 if (rst)
851 TCP_INC_STATS(net, TCP_MIB_OUTRSTS); 851 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 9e2641d45587..206698bc93f4 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -40,8 +40,6 @@ void rate_control_rate_init(struct sta_info *sta)
40 40
41 ieee80211_sta_set_rx_nss(sta); 41 ieee80211_sta_set_rx_nss(sta);
42 42
43 ieee80211_recalc_min_chandef(sta->sdata);
44
45 if (!ref) 43 if (!ref)
46 return; 44 return;
47 45
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 4dc81963af8f..64d3bf269a26 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -119,18 +119,19 @@ void mpls_stats_inc_outucastpkts(struct net_device *dev,
119} 119}
120EXPORT_SYMBOL_GPL(mpls_stats_inc_outucastpkts); 120EXPORT_SYMBOL_GPL(mpls_stats_inc_outucastpkts);
121 121
122static u32 mpls_multipath_hash(struct mpls_route *rt, 122static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb)
123 struct sk_buff *skb, bool bos)
124{ 123{
125 struct mpls_entry_decoded dec; 124 struct mpls_entry_decoded dec;
125 unsigned int mpls_hdr_len = 0;
126 struct mpls_shim_hdr *hdr; 126 struct mpls_shim_hdr *hdr;
127 bool eli_seen = false; 127 bool eli_seen = false;
128 int label_index; 128 int label_index;
129 u32 hash = 0; 129 u32 hash = 0;
130 130
131 for (label_index = 0; label_index < MAX_MP_SELECT_LABELS && !bos; 131 for (label_index = 0; label_index < MAX_MP_SELECT_LABELS;
132 label_index++) { 132 label_index++) {
133 if (!pskb_may_pull(skb, sizeof(*hdr) * label_index)) 133 mpls_hdr_len += sizeof(*hdr);
134 if (!pskb_may_pull(skb, mpls_hdr_len))
134 break; 135 break;
135 136
136 /* Read and decode the current label */ 137 /* Read and decode the current label */
@@ -155,37 +156,38 @@ static u32 mpls_multipath_hash(struct mpls_route *rt,
155 eli_seen = true; 156 eli_seen = true;
156 } 157 }
157 158
158 bos = dec.bos; 159 if (!dec.bos)
159 if (bos && pskb_may_pull(skb, sizeof(*hdr) * label_index + 160 continue;
160 sizeof(struct iphdr))) { 161
162 /* found bottom label; does skb have room for a header? */
163 if (pskb_may_pull(skb, mpls_hdr_len + sizeof(struct iphdr))) {
161 const struct iphdr *v4hdr; 164 const struct iphdr *v4hdr;
162 165
163 v4hdr = (const struct iphdr *)(mpls_hdr(skb) + 166 v4hdr = (const struct iphdr *)(hdr + 1);
164 label_index);
165 if (v4hdr->version == 4) { 167 if (v4hdr->version == 4) {
166 hash = jhash_3words(ntohl(v4hdr->saddr), 168 hash = jhash_3words(ntohl(v4hdr->saddr),
167 ntohl(v4hdr->daddr), 169 ntohl(v4hdr->daddr),
168 v4hdr->protocol, hash); 170 v4hdr->protocol, hash);
169 } else if (v4hdr->version == 6 && 171 } else if (v4hdr->version == 6 &&
170 pskb_may_pull(skb, sizeof(*hdr) * label_index + 172 pskb_may_pull(skb, mpls_hdr_len +
171 sizeof(struct ipv6hdr))) { 173 sizeof(struct ipv6hdr))) {
172 const struct ipv6hdr *v6hdr; 174 const struct ipv6hdr *v6hdr;
173 175
174 v6hdr = (const struct ipv6hdr *)(mpls_hdr(skb) + 176 v6hdr = (const struct ipv6hdr *)(hdr + 1);
175 label_index);
176
177 hash = __ipv6_addr_jhash(&v6hdr->saddr, hash); 177 hash = __ipv6_addr_jhash(&v6hdr->saddr, hash);
178 hash = __ipv6_addr_jhash(&v6hdr->daddr, hash); 178 hash = __ipv6_addr_jhash(&v6hdr->daddr, hash);
179 hash = jhash_1word(v6hdr->nexthdr, hash); 179 hash = jhash_1word(v6hdr->nexthdr, hash);
180 } 180 }
181 } 181 }
182
183 break;
182 } 184 }
183 185
184 return hash; 186 return hash;
185} 187}
186 188
187static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt, 189static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
188 struct sk_buff *skb, bool bos) 190 struct sk_buff *skb)
189{ 191{
190 int alive = ACCESS_ONCE(rt->rt_nhn_alive); 192 int alive = ACCESS_ONCE(rt->rt_nhn_alive);
191 u32 hash = 0; 193 u32 hash = 0;
@@ -201,7 +203,7 @@ static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
201 if (alive <= 0) 203 if (alive <= 0)
202 return NULL; 204 return NULL;
203 205
204 hash = mpls_multipath_hash(rt, skb, bos); 206 hash = mpls_multipath_hash(rt, skb);
205 nh_index = hash % alive; 207 nh_index = hash % alive;
206 if (alive == rt->rt_nhn) 208 if (alive == rt->rt_nhn)
207 goto out; 209 goto out;
@@ -308,22 +310,22 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
308 hdr = mpls_hdr(skb); 310 hdr = mpls_hdr(skb);
309 dec = mpls_entry_decode(hdr); 311 dec = mpls_entry_decode(hdr);
310 312
311 /* Pop the label */
312 skb_pull(skb, sizeof(*hdr));
313 skb_reset_network_header(skb);
314
315 skb_orphan(skb);
316
317 rt = mpls_route_input_rcu(net, dec.label); 313 rt = mpls_route_input_rcu(net, dec.label);
318 if (!rt) { 314 if (!rt) {
319 MPLS_INC_STATS(mdev, rx_noroute); 315 MPLS_INC_STATS(mdev, rx_noroute);
320 goto drop; 316 goto drop;
321 } 317 }
322 318
323 nh = mpls_select_multipath(rt, skb, dec.bos); 319 nh = mpls_select_multipath(rt, skb);
324 if (!nh) 320 if (!nh)
325 goto err; 321 goto err;
326 322
323 /* Pop the label */
324 skb_pull(skb, sizeof(*hdr));
325 skb_reset_network_header(skb);
326
327 skb_orphan(skb);
328
327 if (skb_warn_if_lro(skb)) 329 if (skb_warn_if_lro(skb))
328 goto err; 330 goto err;
329 331
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index 02531284bc49..67b7a955de65 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -222,6 +222,7 @@ static const struct lwtunnel_encap_ops mpls_iptun_ops = {
222 .fill_encap = mpls_fill_encap_info, 222 .fill_encap = mpls_fill_encap_info,
223 .get_encap_size = mpls_encap_nlsize, 223 .get_encap_size = mpls_encap_nlsize,
224 .cmp_encap = mpls_encap_cmp, 224 .cmp_encap = mpls_encap_cmp,
225 .owner = THIS_MODULE,
225}; 226};
226 227
227static int __init mpls_iptunnel_init(void) 228static int __init mpls_iptunnel_init(void)
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 63729b489c2c..bbc45f8a7b2d 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -494,7 +494,7 @@ config NFT_CT
494 depends on NF_CONNTRACK 494 depends on NF_CONNTRACK
495 tristate "Netfilter nf_tables conntrack module" 495 tristate "Netfilter nf_tables conntrack module"
496 help 496 help
497 This option adds the "meta" expression that you can use to match 497 This option adds the "ct" expression that you can use to match
498 connection tracking information such as the flow state. 498 connection tracking information such as the flow state.
499 499
500config NFT_SET_RBTREE 500config NFT_SET_RBTREE
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 3a073cd9fcf4..4e8083c5e01d 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -85,11 +85,11 @@ static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
85static __read_mostly bool nf_conntrack_locks_all; 85static __read_mostly bool nf_conntrack_locks_all;
86 86
87/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */ 87/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
88#define GC_MAX_BUCKETS_DIV 64u 88#define GC_MAX_BUCKETS_DIV 128u
89/* upper bound of scan intervals */ 89/* upper bound of full table scan */
90#define GC_INTERVAL_MAX (2 * HZ) 90#define GC_MAX_SCAN_JIFFIES (16u * HZ)
91/* maximum conntracks to evict per gc run */ 91/* desired ratio of entries found to be expired */
92#define GC_MAX_EVICTS 256u 92#define GC_EVICT_RATIO 50u
93 93
94static struct conntrack_gc_work conntrack_gc_work; 94static struct conntrack_gc_work conntrack_gc_work;
95 95
@@ -938,6 +938,7 @@ static noinline int early_drop(struct net *net, unsigned int _hash)
938 938
939static void gc_worker(struct work_struct *work) 939static void gc_worker(struct work_struct *work)
940{ 940{
941 unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
941 unsigned int i, goal, buckets = 0, expired_count = 0; 942 unsigned int i, goal, buckets = 0, expired_count = 0;
942 struct conntrack_gc_work *gc_work; 943 struct conntrack_gc_work *gc_work;
943 unsigned int ratio, scanned = 0; 944 unsigned int ratio, scanned = 0;
@@ -979,8 +980,7 @@ static void gc_worker(struct work_struct *work)
979 */ 980 */
980 rcu_read_unlock(); 981 rcu_read_unlock();
981 cond_resched_rcu_qs(); 982 cond_resched_rcu_qs();
982 } while (++buckets < goal && 983 } while (++buckets < goal);
983 expired_count < GC_MAX_EVICTS);
984 984
985 if (gc_work->exiting) 985 if (gc_work->exiting)
986 return; 986 return;
@@ -997,27 +997,25 @@ static void gc_worker(struct work_struct *work)
997 * 1. Minimize time until we notice a stale entry 997 * 1. Minimize time until we notice a stale entry
998 * 2. Maximize scan intervals to not waste cycles 998 * 2. Maximize scan intervals to not waste cycles
999 * 999 *
1000 * Normally, expired_count will be 0, this increases the next_run time 1000 * Normally, expire ratio will be close to 0.
1001 * to priorize 2) above.
1002 * 1001 *
1003 * As soon as a timed-out entry is found, move towards 1) and increase 1002 * As soon as a sizeable fraction of the entries have expired
1004 * the scan frequency. 1003 * increase scan frequency.
1005 * In case we have lots of evictions next scan is done immediately.
1006 */ 1004 */
1007 ratio = scanned ? expired_count * 100 / scanned : 0; 1005 ratio = scanned ? expired_count * 100 / scanned : 0;
1008 if (ratio >= 90 || expired_count == GC_MAX_EVICTS) { 1006 if (ratio > GC_EVICT_RATIO) {
1009 gc_work->next_gc_run = 0; 1007 gc_work->next_gc_run = min_interval;
1010 next_run = 0;
1011 } else if (expired_count) {
1012 gc_work->next_gc_run /= 2U;
1013 next_run = msecs_to_jiffies(1);
1014 } else { 1008 } else {
1015 if (gc_work->next_gc_run < GC_INTERVAL_MAX) 1009 unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
1016 gc_work->next_gc_run += msecs_to_jiffies(1);
1017 1010
1018 next_run = gc_work->next_gc_run; 1011 BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
1012
1013 gc_work->next_gc_run += min_interval;
1014 if (gc_work->next_gc_run > max)
1015 gc_work->next_gc_run = max;
1019 } 1016 }
1020 1017
1018 next_run = gc_work->next_gc_run;
1021 gc_work->last_bucket = i; 1019 gc_work->last_bucket = i;
1022 queue_delayed_work(system_long_wq, &gc_work->dwork, next_run); 1020 queue_delayed_work(system_long_wq, &gc_work->dwork, next_run);
1023} 1021}
@@ -1025,7 +1023,7 @@ static void gc_worker(struct work_struct *work)
1025static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) 1023static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
1026{ 1024{
1027 INIT_DELAYED_WORK(&gc_work->dwork, gc_worker); 1025 INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
1028 gc_work->next_gc_run = GC_INTERVAL_MAX; 1026 gc_work->next_gc_run = HZ;
1029 gc_work->exiting = false; 1027 gc_work->exiting = false;
1030} 1028}
1031 1029
@@ -1917,7 +1915,7 @@ int nf_conntrack_init_start(void)
1917 nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); 1915 nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
1918 1916
1919 conntrack_gc_work_init(&conntrack_gc_work); 1917 conntrack_gc_work_init(&conntrack_gc_work);
1920 queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, GC_INTERVAL_MAX); 1918 queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, HZ);
1921 1919
1922 return 0; 1920 return 0;
1923 1921
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 3dca90dc24ad..ffb9e8ada899 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -13,7 +13,6 @@
13/* Internal logging interface, which relies on the real 13/* Internal logging interface, which relies on the real
14 LOG target modules */ 14 LOG target modules */
15 15
16#define NF_LOG_PREFIXLEN 128
17#define NFLOGGER_NAME_LEN 64 16#define NFLOGGER_NAME_LEN 64
18 17
19static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly; 18static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 0db5f9782265..1b913760f205 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -928,7 +928,8 @@ static struct nft_chain *nf_tables_chain_lookup(const struct nft_table *table,
928} 928}
929 929
930static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = { 930static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = {
931 [NFTA_CHAIN_TABLE] = { .type = NLA_STRING }, 931 [NFTA_CHAIN_TABLE] = { .type = NLA_STRING,
932 .len = NFT_TABLE_MAXNAMELEN - 1 },
932 [NFTA_CHAIN_HANDLE] = { .type = NLA_U64 }, 933 [NFTA_CHAIN_HANDLE] = { .type = NLA_U64 },
933 [NFTA_CHAIN_NAME] = { .type = NLA_STRING, 934 [NFTA_CHAIN_NAME] = { .type = NLA_STRING,
934 .len = NFT_CHAIN_MAXNAMELEN - 1 }, 935 .len = NFT_CHAIN_MAXNAMELEN - 1 },
@@ -1854,7 +1855,8 @@ static struct nft_rule *nf_tables_rule_lookup(const struct nft_chain *chain,
1854} 1855}
1855 1856
1856static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = { 1857static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
1857 [NFTA_RULE_TABLE] = { .type = NLA_STRING }, 1858 [NFTA_RULE_TABLE] = { .type = NLA_STRING,
1859 .len = NFT_TABLE_MAXNAMELEN - 1 },
1858 [NFTA_RULE_CHAIN] = { .type = NLA_STRING, 1860 [NFTA_RULE_CHAIN] = { .type = NLA_STRING,
1859 .len = NFT_CHAIN_MAXNAMELEN - 1 }, 1861 .len = NFT_CHAIN_MAXNAMELEN - 1 },
1860 [NFTA_RULE_HANDLE] = { .type = NLA_U64 }, 1862 [NFTA_RULE_HANDLE] = { .type = NLA_U64 },
@@ -2443,7 +2445,8 @@ nft_select_set_ops(const struct nlattr * const nla[],
2443} 2445}
2444 2446
2445static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = { 2447static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
2446 [NFTA_SET_TABLE] = { .type = NLA_STRING }, 2448 [NFTA_SET_TABLE] = { .type = NLA_STRING,
2449 .len = NFT_TABLE_MAXNAMELEN - 1 },
2447 [NFTA_SET_NAME] = { .type = NLA_STRING, 2450 [NFTA_SET_NAME] = { .type = NLA_STRING,
2448 .len = NFT_SET_MAXNAMELEN - 1 }, 2451 .len = NFT_SET_MAXNAMELEN - 1 },
2449 [NFTA_SET_FLAGS] = { .type = NLA_U32 }, 2452 [NFTA_SET_FLAGS] = { .type = NLA_U32 },
@@ -3084,9 +3087,9 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk,
3084} 3087}
3085 3088
3086static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx, 3089static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
3087 const struct nft_set *set, 3090 struct nft_set *set,
3088 const struct nft_set_iter *iter, 3091 const struct nft_set_iter *iter,
3089 const struct nft_set_elem *elem) 3092 struct nft_set_elem *elem)
3090{ 3093{
3091 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); 3094 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
3092 enum nft_registers dreg; 3095 enum nft_registers dreg;
@@ -3192,8 +3195,10 @@ static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = {
3192}; 3195};
3193 3196
3194static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = { 3197static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
3195 [NFTA_SET_ELEM_LIST_TABLE] = { .type = NLA_STRING }, 3198 [NFTA_SET_ELEM_LIST_TABLE] = { .type = NLA_STRING,
3196 [NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING }, 3199 .len = NFT_TABLE_MAXNAMELEN - 1 },
3200 [NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING,
3201 .len = NFT_SET_MAXNAMELEN - 1 },
3197 [NFTA_SET_ELEM_LIST_ELEMENTS] = { .type = NLA_NESTED }, 3202 [NFTA_SET_ELEM_LIST_ELEMENTS] = { .type = NLA_NESTED },
3198 [NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 }, 3203 [NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 },
3199}; 3204};
@@ -3303,9 +3308,9 @@ struct nft_set_dump_args {
3303}; 3308};
3304 3309
3305static int nf_tables_dump_setelem(const struct nft_ctx *ctx, 3310static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
3306 const struct nft_set *set, 3311 struct nft_set *set,
3307 const struct nft_set_iter *iter, 3312 const struct nft_set_iter *iter,
3308 const struct nft_set_elem *elem) 3313 struct nft_set_elem *elem)
3309{ 3314{
3310 struct nft_set_dump_args *args; 3315 struct nft_set_dump_args *args;
3311 3316
@@ -3317,7 +3322,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
3317{ 3322{
3318 struct net *net = sock_net(skb->sk); 3323 struct net *net = sock_net(skb->sk);
3319 u8 genmask = nft_genmask_cur(net); 3324 u8 genmask = nft_genmask_cur(net);
3320 const struct nft_set *set; 3325 struct nft_set *set;
3321 struct nft_set_dump_args args; 3326 struct nft_set_dump_args args;
3322 struct nft_ctx ctx; 3327 struct nft_ctx ctx;
3323 struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1]; 3328 struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1];
@@ -3740,10 +3745,18 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
3740 goto err5; 3745 goto err5;
3741 } 3746 }
3742 3747
3748 if (set->size &&
3749 !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact)) {
3750 err = -ENFILE;
3751 goto err6;
3752 }
3753
3743 nft_trans_elem(trans) = elem; 3754 nft_trans_elem(trans) = elem;
3744 list_add_tail(&trans->list, &ctx->net->nft.commit_list); 3755 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
3745 return 0; 3756 return 0;
3746 3757
3758err6:
3759 set->ops->remove(set, &elem);
3747err5: 3760err5:
3748 kfree(trans); 3761 kfree(trans);
3749err4: 3762err4:
@@ -3790,15 +3803,9 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
3790 return -EBUSY; 3803 return -EBUSY;
3791 3804
3792 nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { 3805 nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
3793 if (set->size &&
3794 !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact))
3795 return -ENFILE;
3796
3797 err = nft_add_set_elem(&ctx, set, attr, nlh->nlmsg_flags); 3806 err = nft_add_set_elem(&ctx, set, attr, nlh->nlmsg_flags);
3798 if (err < 0) { 3807 if (err < 0)
3799 atomic_dec(&set->nelems);
3800 break; 3808 break;
3801 }
3802 } 3809 }
3803 return err; 3810 return err;
3804} 3811}
@@ -3883,9 +3890,9 @@ err1:
3883} 3890}
3884 3891
3885static int nft_flush_set(const struct nft_ctx *ctx, 3892static int nft_flush_set(const struct nft_ctx *ctx,
3886 const struct nft_set *set, 3893 struct nft_set *set,
3887 const struct nft_set_iter *iter, 3894 const struct nft_set_iter *iter,
3888 const struct nft_set_elem *elem) 3895 struct nft_set_elem *elem)
3889{ 3896{
3890 struct nft_trans *trans; 3897 struct nft_trans *trans;
3891 int err; 3898 int err;
@@ -3899,9 +3906,10 @@ static int nft_flush_set(const struct nft_ctx *ctx,
3899 err = -ENOENT; 3906 err = -ENOENT;
3900 goto err1; 3907 goto err1;
3901 } 3908 }
3909 set->ndeact++;
3902 3910
3903 nft_trans_elem_set(trans) = (struct nft_set *)set; 3911 nft_trans_elem_set(trans) = set;
3904 nft_trans_elem(trans) = *((struct nft_set_elem *)elem); 3912 nft_trans_elem(trans) = *elem;
3905 list_add_tail(&trans->list, &ctx->net->nft.commit_list); 3913 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
3906 3914
3907 return 0; 3915 return 0;
@@ -4032,8 +4040,10 @@ struct nft_object *nf_tables_obj_lookup(const struct nft_table *table,
4032EXPORT_SYMBOL_GPL(nf_tables_obj_lookup); 4040EXPORT_SYMBOL_GPL(nf_tables_obj_lookup);
4033 4041
4034static const struct nla_policy nft_obj_policy[NFTA_OBJ_MAX + 1] = { 4042static const struct nla_policy nft_obj_policy[NFTA_OBJ_MAX + 1] = {
4035 [NFTA_OBJ_TABLE] = { .type = NLA_STRING }, 4043 [NFTA_OBJ_TABLE] = { .type = NLA_STRING,
4036 [NFTA_OBJ_NAME] = { .type = NLA_STRING }, 4044 .len = NFT_TABLE_MAXNAMELEN - 1 },
4045 [NFTA_OBJ_NAME] = { .type = NLA_STRING,
4046 .len = NFT_OBJ_MAXNAMELEN - 1 },
4037 [NFTA_OBJ_TYPE] = { .type = NLA_U32 }, 4047 [NFTA_OBJ_TYPE] = { .type = NLA_U32 },
4038 [NFTA_OBJ_DATA] = { .type = NLA_NESTED }, 4048 [NFTA_OBJ_DATA] = { .type = NLA_NESTED },
4039}; 4049};
@@ -4262,10 +4272,11 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
4262 if (idx > s_idx) 4272 if (idx > s_idx)
4263 memset(&cb->args[1], 0, 4273 memset(&cb->args[1], 0,
4264 sizeof(cb->args) - sizeof(cb->args[0])); 4274 sizeof(cb->args) - sizeof(cb->args[0]));
4265 if (filter->table[0] && 4275 if (filter && filter->table[0] &&
4266 strcmp(filter->table, table->name)) 4276 strcmp(filter->table, table->name))
4267 goto cont; 4277 goto cont;
4268 if (filter->type != NFT_OBJECT_UNSPEC && 4278 if (filter &&
4279 filter->type != NFT_OBJECT_UNSPEC &&
4269 obj->type->type != filter->type) 4280 obj->type->type != filter->type)
4270 goto cont; 4281 goto cont;
4271 4282
@@ -5009,9 +5020,9 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
5009 const struct nft_chain *chain); 5020 const struct nft_chain *chain);
5010 5021
5011static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx, 5022static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
5012 const struct nft_set *set, 5023 struct nft_set *set,
5013 const struct nft_set_iter *iter, 5024 const struct nft_set_iter *iter,
5014 const struct nft_set_elem *elem) 5025 struct nft_set_elem *elem)
5015{ 5026{
5016 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); 5027 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
5017 const struct nft_data *data; 5028 const struct nft_data *data;
@@ -5035,7 +5046,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
5035{ 5046{
5036 const struct nft_rule *rule; 5047 const struct nft_rule *rule;
5037 const struct nft_expr *expr, *last; 5048 const struct nft_expr *expr, *last;
5038 const struct nft_set *set; 5049 struct nft_set *set;
5039 struct nft_set_binding *binding; 5050 struct nft_set_binding *binding;
5040 struct nft_set_iter iter; 5051 struct nft_set_iter iter;
5041 5052
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 7de2f46734a4..049ad2d9ee66 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -98,7 +98,8 @@ out:
98} 98}
99 99
100static const struct nla_policy nft_dynset_policy[NFTA_DYNSET_MAX + 1] = { 100static const struct nla_policy nft_dynset_policy[NFTA_DYNSET_MAX + 1] = {
101 [NFTA_DYNSET_SET_NAME] = { .type = NLA_STRING }, 101 [NFTA_DYNSET_SET_NAME] = { .type = NLA_STRING,
102 .len = NFT_SET_MAXNAMELEN - 1 },
102 [NFTA_DYNSET_SET_ID] = { .type = NLA_U32 }, 103 [NFTA_DYNSET_SET_ID] = { .type = NLA_U32 },
103 [NFTA_DYNSET_OP] = { .type = NLA_U32 }, 104 [NFTA_DYNSET_OP] = { .type = NLA_U32 },
104 [NFTA_DYNSET_SREG_KEY] = { .type = NLA_U32 }, 105 [NFTA_DYNSET_SREG_KEY] = { .type = NLA_U32 },
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index 6271e40a3dd6..6f6e64423643 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -39,7 +39,8 @@ static void nft_log_eval(const struct nft_expr *expr,
39 39
40static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = { 40static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = {
41 [NFTA_LOG_GROUP] = { .type = NLA_U16 }, 41 [NFTA_LOG_GROUP] = { .type = NLA_U16 },
42 [NFTA_LOG_PREFIX] = { .type = NLA_STRING }, 42 [NFTA_LOG_PREFIX] = { .type = NLA_STRING,
43 .len = NF_LOG_PREFIXLEN - 1 },
43 [NFTA_LOG_SNAPLEN] = { .type = NLA_U32 }, 44 [NFTA_LOG_SNAPLEN] = { .type = NLA_U32 },
44 [NFTA_LOG_QTHRESHOLD] = { .type = NLA_U16 }, 45 [NFTA_LOG_QTHRESHOLD] = { .type = NLA_U16 },
45 [NFTA_LOG_LEVEL] = { .type = NLA_U32 }, 46 [NFTA_LOG_LEVEL] = { .type = NLA_U32 },
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index d4f97fa7e21d..e21aea7e5ec8 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -49,7 +49,8 @@ static void nft_lookup_eval(const struct nft_expr *expr,
49} 49}
50 50
51static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = { 51static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = {
52 [NFTA_LOOKUP_SET] = { .type = NLA_STRING }, 52 [NFTA_LOOKUP_SET] = { .type = NLA_STRING,
53 .len = NFT_SET_MAXNAMELEN - 1 },
53 [NFTA_LOOKUP_SET_ID] = { .type = NLA_U32 }, 54 [NFTA_LOOKUP_SET_ID] = { .type = NLA_U32 },
54 [NFTA_LOOKUP_SREG] = { .type = NLA_U32 }, 55 [NFTA_LOOKUP_SREG] = { .type = NLA_U32 },
55 [NFTA_LOOKUP_DREG] = { .type = NLA_U32 }, 56 [NFTA_LOOKUP_DREG] = { .type = NLA_U32 },
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index 415a65ba2b85..1ae8c49ca4a1 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -193,10 +193,12 @@ nft_objref_select_ops(const struct nft_ctx *ctx,
193} 193}
194 194
195static const struct nla_policy nft_objref_policy[NFTA_OBJREF_MAX + 1] = { 195static const struct nla_policy nft_objref_policy[NFTA_OBJREF_MAX + 1] = {
196 [NFTA_OBJREF_IMM_NAME] = { .type = NLA_STRING }, 196 [NFTA_OBJREF_IMM_NAME] = { .type = NLA_STRING,
197 .len = NFT_OBJ_MAXNAMELEN - 1 },
197 [NFTA_OBJREF_IMM_TYPE] = { .type = NLA_U32 }, 198 [NFTA_OBJREF_IMM_TYPE] = { .type = NLA_U32 },
198 [NFTA_OBJREF_SET_SREG] = { .type = NLA_U32 }, 199 [NFTA_OBJREF_SET_SREG] = { .type = NLA_U32 },
199 [NFTA_OBJREF_SET_NAME] = { .type = NLA_STRING }, 200 [NFTA_OBJREF_SET_NAME] = { .type = NLA_STRING,
201 .len = NFT_SET_MAXNAMELEN - 1 },
200 [NFTA_OBJREF_SET_ID] = { .type = NLA_U32 }, 202 [NFTA_OBJREF_SET_ID] = { .type = NLA_U32 },
201}; 203};
202 204
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 1e20e2bbb6d9..e36069fb76ae 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -212,7 +212,7 @@ static void nft_hash_remove(const struct nft_set *set,
212 rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params); 212 rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params);
213} 213}
214 214
215static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set, 215static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set,
216 struct nft_set_iter *iter) 216 struct nft_set_iter *iter)
217{ 217{
218 struct nft_hash *priv = nft_set_priv(set); 218 struct nft_hash *priv = nft_set_priv(set);
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 08376e50f6cd..f06f55ee516d 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -221,7 +221,7 @@ static void *nft_rbtree_deactivate(const struct net *net,
221} 221}
222 222
223static void nft_rbtree_walk(const struct nft_ctx *ctx, 223static void nft_rbtree_walk(const struct nft_ctx *ctx,
224 const struct nft_set *set, 224 struct nft_set *set,
225 struct nft_set_iter *iter) 225 struct nft_set_iter *iter)
226{ 226{
227 const struct nft_rbtree *priv = nft_set_priv(set); 227 const struct nft_rbtree *priv = nft_set_priv(set);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ddbda255b6ae..9854baad66ab 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1984,7 +1984,7 @@ static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
1984 return -EINVAL; 1984 return -EINVAL;
1985 *len -= sizeof(vnet_hdr); 1985 *len -= sizeof(vnet_hdr);
1986 1986
1987 if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le())) 1987 if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true))
1988 return -EINVAL; 1988 return -EINVAL;
1989 1989
1990 return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr)); 1990 return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
@@ -2245,7 +2245,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2245 if (po->has_vnet_hdr) { 2245 if (po->has_vnet_hdr) {
2246 if (virtio_net_hdr_from_skb(skb, h.raw + macoff - 2246 if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
2247 sizeof(struct virtio_net_hdr), 2247 sizeof(struct virtio_net_hdr),
2248 vio_le())) { 2248 vio_le(), true)) {
2249 spin_lock(&sk->sk_receive_queue.lock); 2249 spin_lock(&sk->sk_receive_queue.lock);
2250 goto drop_n_account; 2250 goto drop_n_account;
2251 } 2251 }
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 6619367bb6ca..063baac5b9fe 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -222,7 +222,8 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
222 SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); 222 SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
223 223
224 rcu_read_lock(); 224 rcu_read_lock();
225 res = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), np->tclass); 225 res = ip6_xmit(sk, skb, fl6, sk->sk_mark, rcu_dereference(np->opt),
226 np->tclass);
226 rcu_read_unlock(); 227 rcu_read_unlock();
227 return res; 228 return res;
228} 229}
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 7e869d0cca69..4f5a2b580aa5 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -68,7 +68,7 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
68 goto out; 68 goto out;
69 } 69 }
70 70
71 segs = skb_segment(skb, features | NETIF_F_HW_CSUM); 71 segs = skb_segment(skb, features | NETIF_F_HW_CSUM | NETIF_F_SG);
72 if (IS_ERR(segs)) 72 if (IS_ERR(segs))
73 goto out; 73 goto out;
74 74
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index d699d2cbf275..5fc7122c76de 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -235,8 +235,12 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
235 sctp_assoc_t id) 235 sctp_assoc_t id)
236{ 236{
237 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; 237 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL;
238 struct sctp_transport *transport; 238 struct sctp_af *af = sctp_get_af_specific(addr->ss_family);
239 union sctp_addr *laddr = (union sctp_addr *)addr; 239 union sctp_addr *laddr = (union sctp_addr *)addr;
240 struct sctp_transport *transport;
241
242 if (sctp_verify_addr(sk, laddr, af->sockaddr_len))
243 return NULL;
240 244
241 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, 245 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
242 laddr, 246 laddr,
diff --git a/net/tipc/node.c b/net/tipc/node.c
index f96dacf173ab..e9295fa3a554 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -263,6 +263,11 @@ static void tipc_node_write_lock(struct tipc_node *n)
263 write_lock_bh(&n->lock); 263 write_lock_bh(&n->lock);
264} 264}
265 265
266static void tipc_node_write_unlock_fast(struct tipc_node *n)
267{
268 write_unlock_bh(&n->lock);
269}
270
266static void tipc_node_write_unlock(struct tipc_node *n) 271static void tipc_node_write_unlock(struct tipc_node *n)
267{ 272{
268 struct net *net = n->net; 273 struct net *net = n->net;
@@ -417,7 +422,7 @@ void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
417 } 422 }
418 tipc_node_write_lock(n); 423 tipc_node_write_lock(n);
419 list_add_tail(subscr, &n->publ_list); 424 list_add_tail(subscr, &n->publ_list);
420 tipc_node_write_unlock(n); 425 tipc_node_write_unlock_fast(n);
421 tipc_node_put(n); 426 tipc_node_put(n);
422} 427}
423 428
@@ -435,7 +440,7 @@ void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
435 } 440 }
436 tipc_node_write_lock(n); 441 tipc_node_write_lock(n);
437 list_del_init(subscr); 442 list_del_init(subscr);
438 tipc_node_write_unlock(n); 443 tipc_node_write_unlock_fast(n);
439 tipc_node_put(n); 444 tipc_node_put(n);
440} 445}
441 446
diff --git a/net/tipc/server.c b/net/tipc/server.c
index 215849ce453d..3cd6402e812c 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -86,12 +86,12 @@ struct outqueue_entry {
86static void tipc_recv_work(struct work_struct *work); 86static void tipc_recv_work(struct work_struct *work);
87static void tipc_send_work(struct work_struct *work); 87static void tipc_send_work(struct work_struct *work);
88static void tipc_clean_outqueues(struct tipc_conn *con); 88static void tipc_clean_outqueues(struct tipc_conn *con);
89static void tipc_sock_release(struct tipc_conn *con);
90 89
91static void tipc_conn_kref_release(struct kref *kref) 90static void tipc_conn_kref_release(struct kref *kref)
92{ 91{
93 struct tipc_conn *con = container_of(kref, struct tipc_conn, kref); 92 struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
94 struct sockaddr_tipc *saddr = con->server->saddr; 93 struct tipc_server *s = con->server;
94 struct sockaddr_tipc *saddr = s->saddr;
95 struct socket *sock = con->sock; 95 struct socket *sock = con->sock;
96 struct sock *sk; 96 struct sock *sk;
97 97
@@ -103,9 +103,13 @@ static void tipc_conn_kref_release(struct kref *kref)
103 } 103 }
104 saddr->scope = -TIPC_NODE_SCOPE; 104 saddr->scope = -TIPC_NODE_SCOPE;
105 kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr)); 105 kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr));
106 tipc_sock_release(con);
107 sock_release(sock); 106 sock_release(sock);
108 con->sock = NULL; 107 con->sock = NULL;
108
109 spin_lock_bh(&s->idr_lock);
110 idr_remove(&s->conn_idr, con->conid);
111 s->idr_in_use--;
112 spin_unlock_bh(&s->idr_lock);
109 } 113 }
110 114
111 tipc_clean_outqueues(con); 115 tipc_clean_outqueues(con);
@@ -128,8 +132,10 @@ static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
128 132
129 spin_lock_bh(&s->idr_lock); 133 spin_lock_bh(&s->idr_lock);
130 con = idr_find(&s->conn_idr, conid); 134 con = idr_find(&s->conn_idr, conid);
131 if (con) 135 if (con && test_bit(CF_CONNECTED, &con->flags))
132 conn_get(con); 136 conn_get(con);
137 else
138 con = NULL;
133 spin_unlock_bh(&s->idr_lock); 139 spin_unlock_bh(&s->idr_lock);
134 return con; 140 return con;
135} 141}
@@ -186,26 +192,15 @@ static void tipc_unregister_callbacks(struct tipc_conn *con)
186 write_unlock_bh(&sk->sk_callback_lock); 192 write_unlock_bh(&sk->sk_callback_lock);
187} 193}
188 194
189static void tipc_sock_release(struct tipc_conn *con)
190{
191 struct tipc_server *s = con->server;
192
193 if (con->conid)
194 s->tipc_conn_release(con->conid, con->usr_data);
195
196 tipc_unregister_callbacks(con);
197}
198
199static void tipc_close_conn(struct tipc_conn *con) 195static void tipc_close_conn(struct tipc_conn *con)
200{ 196{
201 struct tipc_server *s = con->server; 197 struct tipc_server *s = con->server;
202 198
203 if (test_and_clear_bit(CF_CONNECTED, &con->flags)) { 199 if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
200 tipc_unregister_callbacks(con);
204 201
205 spin_lock_bh(&s->idr_lock); 202 if (con->conid)
206 idr_remove(&s->conn_idr, con->conid); 203 s->tipc_conn_release(con->conid, con->usr_data);
207 s->idr_in_use--;
208 spin_unlock_bh(&s->idr_lock);
209 204
210 /* We shouldn't flush pending works as we may be in the 205 /* We shouldn't flush pending works as we may be in the
211 * thread. In fact the races with pending rx/tx work structs 206 * thread. In fact the races with pending rx/tx work structs
@@ -458,6 +453,11 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
458 if (!con) 453 if (!con)
459 return -EINVAL; 454 return -EINVAL;
460 455
456 if (!test_bit(CF_CONNECTED, &con->flags)) {
457 conn_put(con);
458 return 0;
459 }
460
461 e = tipc_alloc_entry(data, len); 461 e = tipc_alloc_entry(data, len);
462 if (!e) { 462 if (!e) {
463 conn_put(con); 463 conn_put(con);
@@ -471,12 +471,8 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
471 list_add_tail(&e->list, &con->outqueue); 471 list_add_tail(&e->list, &con->outqueue);
472 spin_unlock_bh(&con->outqueue_lock); 472 spin_unlock_bh(&con->outqueue_lock);
473 473
474 if (test_bit(CF_CONNECTED, &con->flags)) { 474 if (!queue_work(s->send_wq, &con->swork))
475 if (!queue_work(s->send_wq, &con->swork))
476 conn_put(con);
477 } else {
478 conn_put(con); 475 conn_put(con);
479 }
480 return 0; 476 return 0;
481} 477}
482 478
@@ -500,7 +496,7 @@ static void tipc_send_to_sock(struct tipc_conn *con)
500 int ret; 496 int ret;
501 497
502 spin_lock_bh(&con->outqueue_lock); 498 spin_lock_bh(&con->outqueue_lock);
503 while (1) { 499 while (test_bit(CF_CONNECTED, &con->flags)) {
504 e = list_entry(con->outqueue.next, struct outqueue_entry, 500 e = list_entry(con->outqueue.next, struct outqueue_entry,
505 list); 501 list);
506 if ((struct list_head *) e == &con->outqueue) 502 if ((struct list_head *) e == &con->outqueue)
@@ -623,14 +619,12 @@ int tipc_server_start(struct tipc_server *s)
623void tipc_server_stop(struct tipc_server *s) 619void tipc_server_stop(struct tipc_server *s)
624{ 620{
625 struct tipc_conn *con; 621 struct tipc_conn *con;
626 int total = 0;
627 int id; 622 int id;
628 623
629 spin_lock_bh(&s->idr_lock); 624 spin_lock_bh(&s->idr_lock);
630 for (id = 0; total < s->idr_in_use; id++) { 625 for (id = 0; s->idr_in_use; id++) {
631 con = idr_find(&s->conn_idr, id); 626 con = idr_find(&s->conn_idr, id);
632 if (con) { 627 if (con) {
633 total++;
634 spin_unlock_bh(&s->idr_lock); 628 spin_unlock_bh(&s->idr_lock);
635 tipc_close_conn(con); 629 tipc_close_conn(con);
636 spin_lock_bh(&s->idr_lock); 630 spin_lock_bh(&s->idr_lock);
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 0dd02244e21d..9d94e65d0894 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -54,6 +54,8 @@ struct tipc_subscriber {
54 54
55static void tipc_subscrp_delete(struct tipc_subscription *sub); 55static void tipc_subscrp_delete(struct tipc_subscription *sub);
56static void tipc_subscrb_put(struct tipc_subscriber *subscriber); 56static void tipc_subscrb_put(struct tipc_subscriber *subscriber);
57static void tipc_subscrp_put(struct tipc_subscription *subscription);
58static void tipc_subscrp_get(struct tipc_subscription *subscription);
57 59
58/** 60/**
59 * htohl - convert value to endianness used by destination 61 * htohl - convert value to endianness used by destination
@@ -123,6 +125,7 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
123{ 125{
124 struct tipc_name_seq seq; 126 struct tipc_name_seq seq;
125 127
128 tipc_subscrp_get(sub);
126 tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq); 129 tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq);
127 if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper)) 130 if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper))
128 return; 131 return;
@@ -132,30 +135,23 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
132 135
133 tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref, 136 tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref,
134 node); 137 node);
138 tipc_subscrp_put(sub);
135} 139}
136 140
137static void tipc_subscrp_timeout(unsigned long data) 141static void tipc_subscrp_timeout(unsigned long data)
138{ 142{
139 struct tipc_subscription *sub = (struct tipc_subscription *)data; 143 struct tipc_subscription *sub = (struct tipc_subscription *)data;
140 struct tipc_subscriber *subscriber = sub->subscriber;
141 144
142 /* Notify subscriber of timeout */ 145 /* Notify subscriber of timeout */
143 tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper, 146 tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
144 TIPC_SUBSCR_TIMEOUT, 0, 0); 147 TIPC_SUBSCR_TIMEOUT, 0, 0);
145 148
146 spin_lock_bh(&subscriber->lock); 149 tipc_subscrp_put(sub);
147 tipc_subscrp_delete(sub);
148 spin_unlock_bh(&subscriber->lock);
149
150 tipc_subscrb_put(subscriber);
151} 150}
152 151
153static void tipc_subscrb_kref_release(struct kref *kref) 152static void tipc_subscrb_kref_release(struct kref *kref)
154{ 153{
155 struct tipc_subscriber *subcriber = container_of(kref, 154 kfree(container_of(kref,struct tipc_subscriber, kref));
156 struct tipc_subscriber, kref);
157
158 kfree(subcriber);
159} 155}
160 156
161static void tipc_subscrb_put(struct tipc_subscriber *subscriber) 157static void tipc_subscrb_put(struct tipc_subscriber *subscriber)
@@ -168,6 +164,59 @@ static void tipc_subscrb_get(struct tipc_subscriber *subscriber)
168 kref_get(&subscriber->kref); 164 kref_get(&subscriber->kref);
169} 165}
170 166
167static void tipc_subscrp_kref_release(struct kref *kref)
168{
169 struct tipc_subscription *sub = container_of(kref,
170 struct tipc_subscription,
171 kref);
172 struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
173 struct tipc_subscriber *subscriber = sub->subscriber;
174
175 spin_lock_bh(&subscriber->lock);
176 tipc_nametbl_unsubscribe(sub);
177 list_del(&sub->subscrp_list);
178 atomic_dec(&tn->subscription_count);
179 spin_unlock_bh(&subscriber->lock);
180 kfree(sub);
181 tipc_subscrb_put(subscriber);
182}
183
184static void tipc_subscrp_put(struct tipc_subscription *subscription)
185{
186 kref_put(&subscription->kref, tipc_subscrp_kref_release);
187}
188
189static void tipc_subscrp_get(struct tipc_subscription *subscription)
190{
191 kref_get(&subscription->kref);
192}
193
194/* tipc_subscrb_subscrp_delete - delete a specific subscription or all
195 * subscriptions for a given subscriber.
196 */
197static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
198 struct tipc_subscr *s)
199{
200 struct list_head *subscription_list = &subscriber->subscrp_list;
201 struct tipc_subscription *sub, *temp;
202
203 spin_lock_bh(&subscriber->lock);
204 list_for_each_entry_safe(sub, temp, subscription_list, subscrp_list) {
205 if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
206 continue;
207
208 tipc_subscrp_get(sub);
209 spin_unlock_bh(&subscriber->lock);
210 tipc_subscrp_delete(sub);
211 tipc_subscrp_put(sub);
212 spin_lock_bh(&subscriber->lock);
213
214 if (s)
215 break;
216 }
217 spin_unlock_bh(&subscriber->lock);
218}
219
171static struct tipc_subscriber *tipc_subscrb_create(int conid) 220static struct tipc_subscriber *tipc_subscrb_create(int conid)
172{ 221{
173 struct tipc_subscriber *subscriber; 222 struct tipc_subscriber *subscriber;
@@ -177,8 +226,8 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid)
177 pr_warn("Subscriber rejected, no memory\n"); 226 pr_warn("Subscriber rejected, no memory\n");
178 return NULL; 227 return NULL;
179 } 228 }
180 kref_init(&subscriber->kref);
181 INIT_LIST_HEAD(&subscriber->subscrp_list); 229 INIT_LIST_HEAD(&subscriber->subscrp_list);
230 kref_init(&subscriber->kref);
182 subscriber->conid = conid; 231 subscriber->conid = conid;
183 spin_lock_init(&subscriber->lock); 232 spin_lock_init(&subscriber->lock);
184 233
@@ -187,55 +236,22 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid)
187 236
188static void tipc_subscrb_delete(struct tipc_subscriber *subscriber) 237static void tipc_subscrb_delete(struct tipc_subscriber *subscriber)
189{ 238{
190 struct tipc_subscription *sub, *temp; 239 tipc_subscrb_subscrp_delete(subscriber, NULL);
191 u32 timeout;
192
193 spin_lock_bh(&subscriber->lock);
194 /* Destroy any existing subscriptions for subscriber */
195 list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
196 subscrp_list) {
197 timeout = htohl(sub->evt.s.timeout, sub->swap);
198 if ((timeout == TIPC_WAIT_FOREVER) || del_timer(&sub->timer)) {
199 tipc_subscrp_delete(sub);
200 tipc_subscrb_put(subscriber);
201 }
202 }
203 spin_unlock_bh(&subscriber->lock);
204
205 tipc_subscrb_put(subscriber); 240 tipc_subscrb_put(subscriber);
206} 241}
207 242
208static void tipc_subscrp_delete(struct tipc_subscription *sub) 243static void tipc_subscrp_delete(struct tipc_subscription *sub)
209{ 244{
210 struct tipc_net *tn = net_generic(sub->net, tipc_net_id); 245 u32 timeout = htohl(sub->evt.s.timeout, sub->swap);
211 246
212 tipc_nametbl_unsubscribe(sub); 247 if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer))
213 list_del(&sub->subscrp_list); 248 tipc_subscrp_put(sub);
214 kfree(sub);
215 atomic_dec(&tn->subscription_count);
216} 249}
217 250
218static void tipc_subscrp_cancel(struct tipc_subscr *s, 251static void tipc_subscrp_cancel(struct tipc_subscr *s,
219 struct tipc_subscriber *subscriber) 252 struct tipc_subscriber *subscriber)
220{ 253{
221 struct tipc_subscription *sub, *temp; 254 tipc_subscrb_subscrp_delete(subscriber, s);
222 u32 timeout;
223
224 spin_lock_bh(&subscriber->lock);
225 /* Find first matching subscription, exit if not found */
226 list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
227 subscrp_list) {
228 if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
229 timeout = htohl(sub->evt.s.timeout, sub->swap);
230 if ((timeout == TIPC_WAIT_FOREVER) ||
231 del_timer(&sub->timer)) {
232 tipc_subscrp_delete(sub);
233 tipc_subscrb_put(subscriber);
234 }
235 break;
236 }
237 }
238 spin_unlock_bh(&subscriber->lock);
239} 255}
240 256
241static struct tipc_subscription *tipc_subscrp_create(struct net *net, 257static struct tipc_subscription *tipc_subscrp_create(struct net *net,
@@ -272,6 +288,7 @@ static struct tipc_subscription *tipc_subscrp_create(struct net *net,
272 sub->swap = swap; 288 sub->swap = swap;
273 memcpy(&sub->evt.s, s, sizeof(*s)); 289 memcpy(&sub->evt.s, s, sizeof(*s));
274 atomic_inc(&tn->subscription_count); 290 atomic_inc(&tn->subscription_count);
291 kref_init(&sub->kref);
275 return sub; 292 return sub;
276} 293}
277 294
@@ -288,17 +305,16 @@ static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
288 305
289 spin_lock_bh(&subscriber->lock); 306 spin_lock_bh(&subscriber->lock);
290 list_add(&sub->subscrp_list, &subscriber->subscrp_list); 307 list_add(&sub->subscrp_list, &subscriber->subscrp_list);
291 tipc_subscrb_get(subscriber);
292 sub->subscriber = subscriber; 308 sub->subscriber = subscriber;
293 tipc_nametbl_subscribe(sub); 309 tipc_nametbl_subscribe(sub);
310 tipc_subscrb_get(subscriber);
294 spin_unlock_bh(&subscriber->lock); 311 spin_unlock_bh(&subscriber->lock);
295 312
313 setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
296 timeout = htohl(sub->evt.s.timeout, swap); 314 timeout = htohl(sub->evt.s.timeout, swap);
297 if (timeout == TIPC_WAIT_FOREVER)
298 return;
299 315
300 setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub); 316 if (timeout != TIPC_WAIT_FOREVER)
301 mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout)); 317 mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
302} 318}
303 319
304/* Handle one termination request for the subscriber */ 320/* Handle one termination request for the subscriber */
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index be60103082c9..ffdc214c117a 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -57,6 +57,7 @@ struct tipc_subscriber;
57 * @evt: template for events generated by subscription 57 * @evt: template for events generated by subscription
58 */ 58 */
59struct tipc_subscription { 59struct tipc_subscription {
60 struct kref kref;
60 struct tipc_subscriber *subscriber; 61 struct tipc_subscriber *subscriber;
61 struct net *net; 62 struct net *net;
62 struct timer_list timer; 63 struct timer_list timer;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 127656ebe7be..cef79873b09d 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -995,6 +995,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
995 unsigned int hash; 995 unsigned int hash;
996 struct unix_address *addr; 996 struct unix_address *addr;
997 struct hlist_head *list; 997 struct hlist_head *list;
998 struct path path = { NULL, NULL };
998 999
999 err = -EINVAL; 1000 err = -EINVAL;
1000 if (sunaddr->sun_family != AF_UNIX) 1001 if (sunaddr->sun_family != AF_UNIX)
@@ -1010,9 +1011,20 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1010 goto out; 1011 goto out;
1011 addr_len = err; 1012 addr_len = err;
1012 1013
1014 if (sun_path[0]) {
1015 umode_t mode = S_IFSOCK |
1016 (SOCK_INODE(sock)->i_mode & ~current_umask());
1017 err = unix_mknod(sun_path, mode, &path);
1018 if (err) {
1019 if (err == -EEXIST)
1020 err = -EADDRINUSE;
1021 goto out;
1022 }
1023 }
1024
1013 err = mutex_lock_interruptible(&u->bindlock); 1025 err = mutex_lock_interruptible(&u->bindlock);
1014 if (err) 1026 if (err)
1015 goto out; 1027 goto out_put;
1016 1028
1017 err = -EINVAL; 1029 err = -EINVAL;
1018 if (u->addr) 1030 if (u->addr)
@@ -1029,16 +1041,6 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1029 atomic_set(&addr->refcnt, 1); 1041 atomic_set(&addr->refcnt, 1);
1030 1042
1031 if (sun_path[0]) { 1043 if (sun_path[0]) {
1032 struct path path;
1033 umode_t mode = S_IFSOCK |
1034 (SOCK_INODE(sock)->i_mode & ~current_umask());
1035 err = unix_mknod(sun_path, mode, &path);
1036 if (err) {
1037 if (err == -EEXIST)
1038 err = -EADDRINUSE;
1039 unix_release_addr(addr);
1040 goto out_up;
1041 }
1042 addr->hash = UNIX_HASH_SIZE; 1044 addr->hash = UNIX_HASH_SIZE;
1043 hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1); 1045 hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
1044 spin_lock(&unix_table_lock); 1046 spin_lock(&unix_table_lock);
@@ -1065,6 +1067,9 @@ out_unlock:
1065 spin_unlock(&unix_table_lock); 1067 spin_unlock(&unix_table_lock);
1066out_up: 1068out_up:
1067 mutex_unlock(&u->bindlock); 1069 mutex_unlock(&u->bindlock);
1070out_put:
1071 if (err)
1072 path_put(&path);
1068out: 1073out:
1069 return err; 1074 return err;
1070} 1075}
diff --git a/samples/bpf/tc_l2_redirect_kern.c b/samples/bpf/tc_l2_redirect_kern.c
index 92a44729dbe4..7ef2a12b25b2 100644
--- a/samples/bpf/tc_l2_redirect_kern.c
+++ b/samples/bpf/tc_l2_redirect_kern.c
@@ -4,6 +4,7 @@
4 * modify it under the terms of version 2 of the GNU General Public 4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation. 5 * License as published by the Free Software Foundation.
6 */ 6 */
7#define KBUILD_MODNAME "foo"
7#include <uapi/linux/bpf.h> 8#include <uapi/linux/bpf.h>
8#include <uapi/linux/if_ether.h> 9#include <uapi/linux/if_ether.h>
9#include <uapi/linux/if_packet.h> 10#include <uapi/linux/if_packet.h>
diff --git a/samples/bpf/xdp_tx_iptunnel_kern.c b/samples/bpf/xdp_tx_iptunnel_kern.c
index 85c38ecd3a2d..0f4f6e8c8611 100644
--- a/samples/bpf/xdp_tx_iptunnel_kern.c
+++ b/samples/bpf/xdp_tx_iptunnel_kern.c
@@ -8,6 +8,7 @@
8 * encapsulating the incoming packet in an IPv4/v6 header 8 * encapsulating the incoming packet in an IPv4/v6 header
9 * and then XDP_TX it out. 9 * and then XDP_TX it out.
10 */ 10 */
11#define KBUILD_MODNAME "foo"
11#include <uapi/linux/bpf.h> 12#include <uapi/linux/bpf.h>
12#include <linux/in.h> 13#include <linux/in.h>
13#include <linux/if_ether.h> 14#include <linux/if_ether.h>
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 4a57c8a60bd9..6a6f44dd594b 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -610,6 +610,33 @@ error:
610 return ret ? : -ENOENT; 610 return ret ? : -ENOENT;
611} 611}
612 612
613/* Adjust symbol name and address */
614static int post_process_probe_trace_point(struct probe_trace_point *tp,
615 struct map *map, unsigned long offs)
616{
617 struct symbol *sym;
618 u64 addr = tp->address + tp->offset - offs;
619
620 sym = map__find_symbol(map, addr);
621 if (!sym)
622 return -ENOENT;
623
624 if (strcmp(sym->name, tp->symbol)) {
625 /* If we have no realname, use symbol for it */
626 if (!tp->realname)
627 tp->realname = tp->symbol;
628 else
629 free(tp->symbol);
630 tp->symbol = strdup(sym->name);
631 if (!tp->symbol)
632 return -ENOMEM;
633 }
634 tp->offset = addr - sym->start;
635 tp->address -= offs;
636
637 return 0;
638}
639
613/* 640/*
614 * Rename DWARF symbols to ELF symbols -- gcc sometimes optimizes functions 641 * Rename DWARF symbols to ELF symbols -- gcc sometimes optimizes functions
615 * and generate new symbols with suffixes such as .constprop.N or .isra.N 642 * and generate new symbols with suffixes such as .constprop.N or .isra.N
@@ -622,11 +649,9 @@ static int
622post_process_offline_probe_trace_events(struct probe_trace_event *tevs, 649post_process_offline_probe_trace_events(struct probe_trace_event *tevs,
623 int ntevs, const char *pathname) 650 int ntevs, const char *pathname)
624{ 651{
625 struct symbol *sym;
626 struct map *map; 652 struct map *map;
627 unsigned long stext = 0; 653 unsigned long stext = 0;
628 u64 addr; 654 int i, ret = 0;
629 int i;
630 655
631 /* Prepare a map for offline binary */ 656 /* Prepare a map for offline binary */
632 map = dso__new_map(pathname); 657 map = dso__new_map(pathname);
@@ -636,23 +661,14 @@ post_process_offline_probe_trace_events(struct probe_trace_event *tevs,
636 } 661 }
637 662
638 for (i = 0; i < ntevs; i++) { 663 for (i = 0; i < ntevs; i++) {
639 addr = tevs[i].point.address + tevs[i].point.offset - stext; 664 ret = post_process_probe_trace_point(&tevs[i].point,
640 sym = map__find_symbol(map, addr); 665 map, stext);
641 if (!sym) 666 if (ret < 0)
642 continue; 667 break;
643 if (!strcmp(sym->name, tevs[i].point.symbol))
644 continue;
645 /* If we have no realname, use symbol for it */
646 if (!tevs[i].point.realname)
647 tevs[i].point.realname = tevs[i].point.symbol;
648 else
649 free(tevs[i].point.symbol);
650 tevs[i].point.symbol = strdup(sym->name);
651 tevs[i].point.offset = addr - sym->start;
652 } 668 }
653 map__put(map); 669 map__put(map);
654 670
655 return 0; 671 return ret;
656} 672}
657 673
658static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs, 674static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
@@ -682,18 +698,31 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
682 return ret; 698 return ret;
683} 699}
684 700
685static int add_module_to_probe_trace_events(struct probe_trace_event *tevs, 701static int
686 int ntevs, const char *module) 702post_process_module_probe_trace_events(struct probe_trace_event *tevs,
703 int ntevs, const char *module,
704 struct debuginfo *dinfo)
687{ 705{
706 Dwarf_Addr text_offs = 0;
688 int i, ret = 0; 707 int i, ret = 0;
689 char *mod_name = NULL; 708 char *mod_name = NULL;
709 struct map *map;
690 710
691 if (!module) 711 if (!module)
692 return 0; 712 return 0;
693 713
694 mod_name = find_module_name(module); 714 map = get_target_map(module, false);
715 if (!map || debuginfo__get_text_offset(dinfo, &text_offs, true) < 0) {
716 pr_warning("Failed to get ELF symbols for %s\n", module);
717 return -EINVAL;
718 }
695 719
720 mod_name = find_module_name(module);
696 for (i = 0; i < ntevs; i++) { 721 for (i = 0; i < ntevs; i++) {
722 ret = post_process_probe_trace_point(&tevs[i].point,
723 map, (unsigned long)text_offs);
724 if (ret < 0)
725 break;
697 tevs[i].point.module = 726 tevs[i].point.module =
698 strdup(mod_name ? mod_name : module); 727 strdup(mod_name ? mod_name : module);
699 if (!tevs[i].point.module) { 728 if (!tevs[i].point.module) {
@@ -703,6 +732,8 @@ static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
703 } 732 }
704 733
705 free(mod_name); 734 free(mod_name);
735 map__put(map);
736
706 return ret; 737 return ret;
707} 738}
708 739
@@ -760,7 +791,7 @@ arch__post_process_probe_trace_events(struct perf_probe_event *pev __maybe_unuse
760static int post_process_probe_trace_events(struct perf_probe_event *pev, 791static int post_process_probe_trace_events(struct perf_probe_event *pev,
761 struct probe_trace_event *tevs, 792 struct probe_trace_event *tevs,
762 int ntevs, const char *module, 793 int ntevs, const char *module,
763 bool uprobe) 794 bool uprobe, struct debuginfo *dinfo)
764{ 795{
765 int ret; 796 int ret;
766 797
@@ -768,7 +799,8 @@ static int post_process_probe_trace_events(struct perf_probe_event *pev,
768 ret = add_exec_to_probe_trace_events(tevs, ntevs, module); 799 ret = add_exec_to_probe_trace_events(tevs, ntevs, module);
769 else if (module) 800 else if (module)
770 /* Currently ref_reloc_sym based probe is not for drivers */ 801 /* Currently ref_reloc_sym based probe is not for drivers */
771 ret = add_module_to_probe_trace_events(tevs, ntevs, module); 802 ret = post_process_module_probe_trace_events(tevs, ntevs,
803 module, dinfo);
772 else 804 else
773 ret = post_process_kernel_probe_trace_events(tevs, ntevs); 805 ret = post_process_kernel_probe_trace_events(tevs, ntevs);
774 806
@@ -812,30 +844,27 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
812 } 844 }
813 } 845 }
814 846
815 debuginfo__delete(dinfo);
816
817 if (ntevs > 0) { /* Succeeded to find trace events */ 847 if (ntevs > 0) { /* Succeeded to find trace events */
818 pr_debug("Found %d probe_trace_events.\n", ntevs); 848 pr_debug("Found %d probe_trace_events.\n", ntevs);
819 ret = post_process_probe_trace_events(pev, *tevs, ntevs, 849 ret = post_process_probe_trace_events(pev, *tevs, ntevs,
820 pev->target, pev->uprobes); 850 pev->target, pev->uprobes, dinfo);
821 if (ret < 0 || ret == ntevs) { 851 if (ret < 0 || ret == ntevs) {
852 pr_debug("Post processing failed or all events are skipped. (%d)\n", ret);
822 clear_probe_trace_events(*tevs, ntevs); 853 clear_probe_trace_events(*tevs, ntevs);
823 zfree(tevs); 854 zfree(tevs);
855 ntevs = 0;
824 } 856 }
825 if (ret != ntevs)
826 return ret < 0 ? ret : ntevs;
827 ntevs = 0;
828 /* Fall through */
829 } 857 }
830 858
859 debuginfo__delete(dinfo);
860
831 if (ntevs == 0) { /* No error but failed to find probe point. */ 861 if (ntevs == 0) { /* No error but failed to find probe point. */
832 pr_warning("Probe point '%s' not found.\n", 862 pr_warning("Probe point '%s' not found.\n",
833 synthesize_perf_probe_point(&pev->point)); 863 synthesize_perf_probe_point(&pev->point));
834 return -ENOENT; 864 return -ENOENT;
835 } 865 } else if (ntevs < 0) {
836 /* Error path : ntevs < 0 */ 866 /* Error path : ntevs < 0 */
837 pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs); 867 pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
838 if (ntevs < 0) {
839 if (ntevs == -EBADF) 868 if (ntevs == -EBADF)
840 pr_warning("Warning: No dwarf info found in the vmlinux - " 869 pr_warning("Warning: No dwarf info found in the vmlinux - "
841 "please rebuild kernel with CONFIG_DEBUG_INFO=y.\n"); 870 "please rebuild kernel with CONFIG_DEBUG_INFO=y.\n");
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index df4debe564da..0d9d6e0803b8 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -1501,7 +1501,8 @@ int debuginfo__find_available_vars_at(struct debuginfo *dbg,
1501} 1501}
1502 1502
1503/* For the kernel module, we need a special code to get a DIE */ 1503/* For the kernel module, we need a special code to get a DIE */
1504static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs) 1504int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
1505 bool adjust_offset)
1505{ 1506{
1506 int n, i; 1507 int n, i;
1507 Elf32_Word shndx; 1508 Elf32_Word shndx;
@@ -1530,6 +1531,8 @@ static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs)
1530 if (!shdr) 1531 if (!shdr)
1531 return -ENOENT; 1532 return -ENOENT;
1532 *offs = shdr->sh_addr; 1533 *offs = shdr->sh_addr;
1534 if (adjust_offset)
1535 *offs -= shdr->sh_offset;
1533 } 1536 }
1534 } 1537 }
1535 return 0; 1538 return 0;
@@ -1543,16 +1546,12 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
1543 Dwarf_Addr _addr = 0, baseaddr = 0; 1546 Dwarf_Addr _addr = 0, baseaddr = 0;
1544 const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp; 1547 const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp;
1545 int baseline = 0, lineno = 0, ret = 0; 1548 int baseline = 0, lineno = 0, ret = 0;
1546 bool reloc = false;
1547 1549
1548retry: 1550 /* We always need to relocate the address for aranges */
1551 if (debuginfo__get_text_offset(dbg, &baseaddr, false) == 0)
1552 addr += baseaddr;
1549 /* Find cu die */ 1553 /* Find cu die */
1550 if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) { 1554 if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) {
1551 if (!reloc && debuginfo__get_text_offset(dbg, &baseaddr) == 0) {
1552 addr += baseaddr;
1553 reloc = true;
1554 goto retry;
1555 }
1556 pr_warning("Failed to find debug information for address %lx\n", 1555 pr_warning("Failed to find debug information for address %lx\n",
1557 addr); 1556 addr);
1558 ret = -EINVAL; 1557 ret = -EINVAL;
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index f1d8558f498e..2956c5198652 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -46,6 +46,9 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
46int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, 46int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
47 struct perf_probe_point *ppt); 47 struct perf_probe_point *ppt);
48 48
49int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
50 bool adjust_offset);
51
49/* Find a line range */ 52/* Find a line range */
50int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr); 53int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr);
51 54
diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c
index b13fed534d76..9f7bd1915c21 100644
--- a/tools/testing/selftests/bpf/test_lru_map.c
+++ b/tools/testing/selftests/bpf/test_lru_map.c
@@ -67,21 +67,23 @@ static int map_equal(int lru_map, int expected)
67 return map_subset(lru_map, expected) && map_subset(expected, lru_map); 67 return map_subset(lru_map, expected) && map_subset(expected, lru_map);
68} 68}
69 69
70static int sched_next_online(int pid, int next_to_try) 70static int sched_next_online(int pid, int *next_to_try)
71{ 71{
72 cpu_set_t cpuset; 72 cpu_set_t cpuset;
73 int next = *next_to_try;
74 int ret = -1;
73 75
74 if (next_to_try == nr_cpus) 76 while (next < nr_cpus) {
75 return -1;
76
77 while (next_to_try < nr_cpus) {
78 CPU_ZERO(&cpuset); 77 CPU_ZERO(&cpuset);
79 CPU_SET(next_to_try++, &cpuset); 78 CPU_SET(next++, &cpuset);
80 if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) 79 if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) {
80 ret = 0;
81 break; 81 break;
82 }
82 } 83 }
83 84
84 return next_to_try; 85 *next_to_try = next;
86 return ret;
85} 87}
86 88
87/* Size of the LRU amp is 2 89/* Size of the LRU amp is 2
@@ -96,11 +98,12 @@ static void test_lru_sanity0(int map_type, int map_flags)
96{ 98{
97 unsigned long long key, value[nr_cpus]; 99 unsigned long long key, value[nr_cpus];
98 int lru_map_fd, expected_map_fd; 100 int lru_map_fd, expected_map_fd;
101 int next_cpu = 0;
99 102
100 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, 103 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
101 map_flags); 104 map_flags);
102 105
103 assert(sched_next_online(0, 0) != -1); 106 assert(sched_next_online(0, &next_cpu) != -1);
104 107
105 if (map_flags & BPF_F_NO_COMMON_LRU) 108 if (map_flags & BPF_F_NO_COMMON_LRU)
106 lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus); 109 lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus);
@@ -183,6 +186,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
183 int lru_map_fd, expected_map_fd; 186 int lru_map_fd, expected_map_fd;
184 unsigned int batch_size; 187 unsigned int batch_size;
185 unsigned int map_size; 188 unsigned int map_size;
189 int next_cpu = 0;
186 190
187 if (map_flags & BPF_F_NO_COMMON_LRU) 191 if (map_flags & BPF_F_NO_COMMON_LRU)
188 /* Ther percpu lru list (i.e each cpu has its own LRU 192 /* Ther percpu lru list (i.e each cpu has its own LRU
@@ -196,7 +200,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
196 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, 200 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
197 map_flags); 201 map_flags);
198 202
199 assert(sched_next_online(0, 0) != -1); 203 assert(sched_next_online(0, &next_cpu) != -1);
200 204
201 batch_size = tgt_free / 2; 205 batch_size = tgt_free / 2;
202 assert(batch_size * 2 == tgt_free); 206 assert(batch_size * 2 == tgt_free);
@@ -262,6 +266,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
262 int lru_map_fd, expected_map_fd; 266 int lru_map_fd, expected_map_fd;
263 unsigned int batch_size; 267 unsigned int batch_size;
264 unsigned int map_size; 268 unsigned int map_size;
269 int next_cpu = 0;
265 270
266 if (map_flags & BPF_F_NO_COMMON_LRU) 271 if (map_flags & BPF_F_NO_COMMON_LRU)
267 /* Ther percpu lru list (i.e each cpu has its own LRU 272 /* Ther percpu lru list (i.e each cpu has its own LRU
@@ -275,7 +280,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
275 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, 280 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
276 map_flags); 281 map_flags);
277 282
278 assert(sched_next_online(0, 0) != -1); 283 assert(sched_next_online(0, &next_cpu) != -1);
279 284
280 batch_size = tgt_free / 2; 285 batch_size = tgt_free / 2;
281 assert(batch_size * 2 == tgt_free); 286 assert(batch_size * 2 == tgt_free);
@@ -370,11 +375,12 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
370 int lru_map_fd, expected_map_fd; 375 int lru_map_fd, expected_map_fd;
371 unsigned int batch_size; 376 unsigned int batch_size;
372 unsigned int map_size; 377 unsigned int map_size;
378 int next_cpu = 0;
373 379
374 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, 380 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
375 map_flags); 381 map_flags);
376 382
377 assert(sched_next_online(0, 0) != -1); 383 assert(sched_next_online(0, &next_cpu) != -1);
378 384
379 batch_size = tgt_free / 2; 385 batch_size = tgt_free / 2;
380 assert(batch_size * 2 == tgt_free); 386 assert(batch_size * 2 == tgt_free);
@@ -430,11 +436,12 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free)
430 int lru_map_fd, expected_map_fd; 436 int lru_map_fd, expected_map_fd;
431 unsigned long long key, value[nr_cpus]; 437 unsigned long long key, value[nr_cpus];
432 unsigned long long end_key; 438 unsigned long long end_key;
439 int next_cpu = 0;
433 440
434 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, 441 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
435 map_flags); 442 map_flags);
436 443
437 assert(sched_next_online(0, 0) != -1); 444 assert(sched_next_online(0, &next_cpu) != -1);
438 445
439 if (map_flags & BPF_F_NO_COMMON_LRU) 446 if (map_flags & BPF_F_NO_COMMON_LRU)
440 lru_map_fd = create_map(map_type, map_flags, 447 lru_map_fd = create_map(map_type, map_flags,
@@ -502,9 +509,8 @@ static void do_test_lru_sanity5(unsigned long long last_key, int map_fd)
502static void test_lru_sanity5(int map_type, int map_flags) 509static void test_lru_sanity5(int map_type, int map_flags)
503{ 510{
504 unsigned long long key, value[nr_cpus]; 511 unsigned long long key, value[nr_cpus];
505 int next_sched_cpu = 0; 512 int next_cpu = 0;
506 int map_fd; 513 int map_fd;
507 int i;
508 514
509 if (map_flags & BPF_F_NO_COMMON_LRU) 515 if (map_flags & BPF_F_NO_COMMON_LRU)
510 return; 516 return;
@@ -519,27 +525,20 @@ static void test_lru_sanity5(int map_type, int map_flags)
519 key = 0; 525 key = 0;
520 assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST)); 526 assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST));
521 527
522 for (i = 0; i < nr_cpus; i++) { 528 while (sched_next_online(0, &next_cpu) != -1) {
523 pid_t pid; 529 pid_t pid;
524 530
525 pid = fork(); 531 pid = fork();
526 if (pid == 0) { 532 if (pid == 0) {
527 next_sched_cpu = sched_next_online(0, next_sched_cpu); 533 do_test_lru_sanity5(key, map_fd);
528 if (next_sched_cpu != -1)
529 do_test_lru_sanity5(key, map_fd);
530 exit(0); 534 exit(0);
531 } else if (pid == -1) { 535 } else if (pid == -1) {
532 printf("couldn't spawn #%d process\n", i); 536 printf("couldn't spawn process to test key:%llu\n",
537 key);
533 exit(1); 538 exit(1);
534 } else { 539 } else {
535 int status; 540 int status;
536 541
537 /* It is mostly redundant and just allow the parent
538 * process to update next_shced_cpu for the next child
539 * process
540 */
541 next_sched_cpu = sched_next_online(pid, next_sched_cpu);
542
543 assert(waitpid(pid, &status, 0) == pid); 542 assert(waitpid(pid, &status, 0) == pid);
544 assert(status == 0); 543 assert(status == 0);
545 key++; 544 key++;
@@ -547,6 +546,8 @@ static void test_lru_sanity5(int map_type, int map_flags)
547 } 546 }
548 547
549 close(map_fd); 548 close(map_fd);
549 /* At least one key should be tested */
550 assert(key > 0);
550 551
551 printf("Pass\n"); 552 printf("Pass\n");
552} 553}
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
index c22860ab9733..30e1ac62e8cb 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
@@ -66,7 +66,7 @@ int pmc56_overflow(void)
66 66
67 FAIL_IF(ebb_event_enable(&event)); 67 FAIL_IF(ebb_event_enable(&event));
68 68
69 mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); 69 mtspr(SPRN_PMC2, pmc_sample_period(sample_period));
70 mtspr(SPRN_PMC5, 0); 70 mtspr(SPRN_PMC5, 0);
71 mtspr(SPRN_PMC6, 0); 71 mtspr(SPRN_PMC6, 0);
72 72
diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h
index 34e63cc4c572..14142faf040b 100644
--- a/tools/virtio/ringtest/main.h
+++ b/tools/virtio/ringtest/main.h
@@ -26,6 +26,16 @@ static inline void wait_cycles(unsigned long long cycles)
26#define VMEXIT_CYCLES 500 26#define VMEXIT_CYCLES 500
27#define VMENTRY_CYCLES 500 27#define VMENTRY_CYCLES 500
28 28
29#elif defined(__s390x__)
30static inline void wait_cycles(unsigned long long cycles)
31{
32 asm volatile("0: brctg %0,0b" : : "d" (cycles));
33}
34
35/* tweak me */
36#define VMEXIT_CYCLES 200
37#define VMENTRY_CYCLES 200
38
29#else 39#else
30static inline void wait_cycles(unsigned long long cycles) 40static inline void wait_cycles(unsigned long long cycles)
31{ 41{
@@ -81,6 +91,8 @@ extern unsigned ring_size;
81/* Is there a portable way to do this? */ 91/* Is there a portable way to do this? */
82#if defined(__x86_64__) || defined(__i386__) 92#if defined(__x86_64__) || defined(__i386__)
83#define cpu_relax() asm ("rep; nop" ::: "memory") 93#define cpu_relax() asm ("rep; nop" ::: "memory")
94#elif defined(__s390x__)
95#define cpu_relax() barrier()
84#else 96#else
85#define cpu_relax() assert(0) 97#define cpu_relax() assert(0)
86#endif 98#endif
diff --git a/tools/virtio/ringtest/run-on-all.sh b/tools/virtio/ringtest/run-on-all.sh
index 2e69ca812b4c..29b0d3920bfc 100755
--- a/tools/virtio/ringtest/run-on-all.sh
+++ b/tools/virtio/ringtest/run-on-all.sh
@@ -1,12 +1,13 @@
1#!/bin/sh 1#!/bin/sh
2 2
3CPUS_ONLINE=$(lscpu --online -p=cpu|grep -v -e '#')
3#use last CPU for host. Why not the first? 4#use last CPU for host. Why not the first?
4#many devices tend to use cpu0 by default so 5#many devices tend to use cpu0 by default so
5#it tends to be busier 6#it tends to be busier
6HOST_AFFINITY=$(lscpu -p=cpu | tail -1) 7HOST_AFFINITY=$(echo "${CPUS_ONLINE}"|tail -n 1)
7 8
8#run command on all cpus 9#run command on all cpus
9for cpu in $(seq 0 $HOST_AFFINITY) 10for cpu in $CPUS_ONLINE
10do 11do
11 #Don't run guest and host on same CPU 12 #Don't run guest and host on same CPU
12 #It actually works ok if using signalling 13 #It actually works ok if using signalling
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index a2dbbccbb6a3..6a084cd57b88 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -24,6 +24,7 @@
24 24
25#include <clocksource/arm_arch_timer.h> 25#include <clocksource/arm_arch_timer.h>
26#include <asm/arch_timer.h> 26#include <asm/arch_timer.h>
27#include <asm/kvm_hyp.h>
27 28
28#include <kvm/arm_vgic.h> 29#include <kvm/arm_vgic.h>
29#include <kvm/arm_arch_timer.h> 30#include <kvm/arm_arch_timer.h>
@@ -89,9 +90,6 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
89 struct kvm_vcpu *vcpu; 90 struct kvm_vcpu *vcpu;
90 91
91 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired); 92 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
92 vcpu->arch.timer_cpu.armed = false;
93
94 WARN_ON(!kvm_timer_should_fire(vcpu));
95 93
96 /* 94 /*
97 * If the vcpu is blocked we want to wake it up so that it will see 95 * If the vcpu is blocked we want to wake it up so that it will see
@@ -512,3 +510,25 @@ void kvm_timer_init(struct kvm *kvm)
512{ 510{
513 kvm->arch.timer.cntvoff = kvm_phys_timer_read(); 511 kvm->arch.timer.cntvoff = kvm_phys_timer_read();
514} 512}
513
514/*
515 * On VHE system, we only need to configure trap on physical timer and counter
516 * accesses in EL0 and EL1 once, not for every world switch.
517 * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
518 * and this makes those bits have no effect for the host kernel execution.
519 */
520void kvm_timer_init_vhe(void)
521{
522 /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
523 u32 cnthctl_shift = 10;
524 u64 val;
525
526 /*
527 * Disallow physical timer access for the guest.
528 * Physical counter access is allowed.
529 */
530 val = read_sysreg(cnthctl_el2);
531 val &= ~(CNTHCTL_EL1PCEN << cnthctl_shift);
532 val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
533 write_sysreg(val, cnthctl_el2);
534}
diff --git a/virt/kvm/arm/hyp/timer-sr.c b/virt/kvm/arm/hyp/timer-sr.c
index 798866a8d875..63e28dd18bb0 100644
--- a/virt/kvm/arm/hyp/timer-sr.c
+++ b/virt/kvm/arm/hyp/timer-sr.c
@@ -35,10 +35,16 @@ void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
35 /* Disable the virtual timer */ 35 /* Disable the virtual timer */
36 write_sysreg_el0(0, cntv_ctl); 36 write_sysreg_el0(0, cntv_ctl);
37 37
38 /* Allow physical timer/counter access for the host */ 38 /*
39 val = read_sysreg(cnthctl_el2); 39 * We don't need to do this for VHE since the host kernel runs in EL2
40 val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; 40 * with HCR_EL2.TGE ==1, which makes those bits have no impact.
41 write_sysreg(val, cnthctl_el2); 41 */
42 if (!has_vhe()) {
43 /* Allow physical timer/counter access for the host */
44 val = read_sysreg(cnthctl_el2);
45 val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
46 write_sysreg(val, cnthctl_el2);
47 }
42 48
43 /* Clear cntvoff for the host */ 49 /* Clear cntvoff for the host */
44 write_sysreg(0, cntvoff_el2); 50 write_sysreg(0, cntvoff_el2);
@@ -50,14 +56,17 @@ void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
50 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 56 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
51 u64 val; 57 u64 val;
52 58
53 /* 59 /* Those bits are already configured at boot on VHE-system */
54 * Disallow physical timer access for the guest 60 if (!has_vhe()) {
55 * Physical counter access is allowed 61 /*
56 */ 62 * Disallow physical timer access for the guest
57 val = read_sysreg(cnthctl_el2); 63 * Physical counter access is allowed
58 val &= ~CNTHCTL_EL1PCEN; 64 */
59 val |= CNTHCTL_EL1PCTEN; 65 val = read_sysreg(cnthctl_el2);
60 write_sysreg(val, cnthctl_el2); 66 val &= ~CNTHCTL_EL1PCEN;
67 val |= CNTHCTL_EL1PCTEN;
68 write_sysreg(val, cnthctl_el2);
69 }
61 70
62 if (timer->enabled) { 71 if (timer->enabled) {
63 write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2); 72 write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 5114391b7e5a..c737ea0a310a 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -268,15 +268,11 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
268{ 268{
269 struct vgic_dist *dist = &kvm->arch.vgic; 269 struct vgic_dist *dist = &kvm->arch.vgic;
270 270
271 mutex_lock(&kvm->lock);
272
273 dist->ready = false; 271 dist->ready = false;
274 dist->initialized = false; 272 dist->initialized = false;
275 273
276 kfree(dist->spis); 274 kfree(dist->spis);
277 dist->nr_spis = 0; 275 dist->nr_spis = 0;
278
279 mutex_unlock(&kvm->lock);
280} 276}
281 277
282void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) 278void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -286,7 +282,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
286 INIT_LIST_HEAD(&vgic_cpu->ap_list_head); 282 INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
287} 283}
288 284
289void kvm_vgic_destroy(struct kvm *kvm) 285/* To be called with kvm->lock held */
286static void __kvm_vgic_destroy(struct kvm *kvm)
290{ 287{
291 struct kvm_vcpu *vcpu; 288 struct kvm_vcpu *vcpu;
292 int i; 289 int i;
@@ -297,6 +294,13 @@ void kvm_vgic_destroy(struct kvm *kvm)
297 kvm_vgic_vcpu_destroy(vcpu); 294 kvm_vgic_vcpu_destroy(vcpu);
298} 295}
299 296
297void kvm_vgic_destroy(struct kvm *kvm)
298{
299 mutex_lock(&kvm->lock);
300 __kvm_vgic_destroy(kvm);
301 mutex_unlock(&kvm->lock);
302}
303
300/** 304/**
301 * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest 305 * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest
302 * is a GICv2. A GICv3 must be explicitly initialized by the guest using the 306 * is a GICv2. A GICv3 must be explicitly initialized by the guest using the
@@ -348,6 +352,10 @@ int kvm_vgic_map_resources(struct kvm *kvm)
348 ret = vgic_v2_map_resources(kvm); 352 ret = vgic_v2_map_resources(kvm);
349 else 353 else
350 ret = vgic_v3_map_resources(kvm); 354 ret = vgic_v3_map_resources(kvm);
355
356 if (ret)
357 __kvm_vgic_destroy(kvm);
358
351out: 359out:
352 mutex_unlock(&kvm->lock); 360 mutex_unlock(&kvm->lock);
353 return ret; 361 return ret;
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 9bab86757fa4..834137e7b83f 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -293,8 +293,6 @@ int vgic_v2_map_resources(struct kvm *kvm)
293 dist->ready = true; 293 dist->ready = true;
294 294
295out: 295out:
296 if (ret)
297 kvm_vgic_destroy(kvm);
298 return ret; 296 return ret;
299} 297}
300 298
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 5c9f9745e6ca..e6b03fd8c374 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -302,8 +302,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
302 dist->ready = true; 302 dist->ready = true;
303 303
304out: 304out:
305 if (ret)
306 kvm_vgic_destroy(kvm);
307 return ret; 305 return ret;
308} 306}
309 307