aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt6
-rw-r--r--Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt1
-rw-r--r--Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt2
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,nsp-usb3-phy.txt39
-rw-r--r--Documentation/devicetree/bindings/rng/omap_rng.txt3
-rw-r--r--Documentation/extcon/intel-int3496.txt5
-rw-r--r--Documentation/gcc-plugins.txt4
-rw-r--r--Documentation/virtual/kvm/api.txt63
-rw-r--r--MAINTAINERS13
-rw-r--r--Makefile16
-rw-r--r--arch/arc/boot/dts/skeleton.dtsi1
-rw-r--r--arch/arc/boot/dts/skeleton_hs.dtsi1
-rw-r--r--arch/arc/boot/dts/skeleton_hs_idu.dtsi21
-rw-r--r--arch/arc/boot/dts/vdk_axs10x_mb.dtsi20
-rw-r--r--arch/arc/include/asm/kprobes.h4
-rw-r--r--arch/arc/kernel/entry-arcv2.S12
-rw-r--r--arch/arc/kernel/setup.c16
-rw-r--r--arch/arc/mm/cache.c3
-rw-r--r--arch/arm/boot/dts/am335x-pcm-953.dtsi4
-rw-r--r--arch/arm/boot/dts/am57xx-idk-common.dtsi14
-rw-r--r--arch/arm/boot/dts/bcm5301x.dtsi4
-rw-r--r--arch/arm/boot/dts/bcm953012k.dts5
-rw-r--r--arch/arm/boot/dts/bcm958522er.dts1
-rw-r--r--arch/arm/boot/dts/bcm958525er.dts1
-rw-r--r--arch/arm/boot/dts/bcm958525xmc.dts1
-rw-r--r--arch/arm/boot/dts/bcm958622hr.dts1
-rw-r--r--arch/arm/boot/dts/bcm958623hr.dts1
-rw-r--r--arch/arm/boot/dts/bcm958625hr.dts1
-rw-r--r--arch/arm/boot/dts/bcm988312hr.dts1
-rw-r--r--arch/arm/boot/dts/imx6sx-udoo-neo.dtsi5
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-dbx5x0.dtsi19
-rw-r--r--arch/arm/boot/dts/ste-href.dtsi9
-rw-r--r--arch/arm/boot/dts/ste-snowball.dts9
-rw-r--r--arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-a23-a33.dtsi2
-rw-r--r--arch/arm/boot/dts/sun8i-a33.dtsi4
-rw-r--r--arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi7
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/mach-at91/pm.c18
-rw-r--r--arch/arm/mach-omap2/Makefile3
-rw-r--r--arch/arm/mach-omap2/gpmc-nand.c154
-rw-r--r--arch/arm/mach-omap2/gpmc-onenand.c10
-rw-r--r--arch/arm/mach-omap2/omap-headsmp.S3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c45
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2.dtsi11
-rw-r--r--arch/arm64/include/asm/current.h2
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h8
-rw-r--r--arch/arm64/kernel/kaslr.c10
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/kernel/vdso/.gitignore1
-rw-r--r--arch/c6x/kernel/ptrace.c41
-rw-r--r--arch/h8300/kernel/ptrace.c8
-rw-r--r--arch/m68k/configs/amiga_defconfig14
-rw-r--r--arch/m68k/configs/apollo_defconfig14
-rw-r--r--arch/m68k/configs/atari_defconfig14
-rw-r--r--arch/m68k/configs/bvme6000_defconfig14
-rw-r--r--arch/m68k/configs/hp300_defconfig14
-rw-r--r--arch/m68k/configs/mac_defconfig14
-rw-r--r--arch/m68k/configs/multi_defconfig14
-rw-r--r--arch/m68k/configs/mvme147_defconfig14
-rw-r--r--arch/m68k/configs/mvme16x_defconfig14
-rw-r--r--arch/m68k/configs/q40_defconfig14
-rw-r--r--arch/m68k/configs/sun3_defconfig14
-rw-r--r--arch/m68k/configs/sun3x_defconfig14
-rw-r--r--arch/m68k/include/asm/bitops.h2
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h1
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/metag/kernel/ptrace.c19
-rw-r--r--arch/mips/kernel/ptrace.c3
-rw-r--r--arch/nios2/kernel/prom.c7
-rw-r--r--arch/nios2/kernel/setup.c3
-rw-r--r--arch/parisc/include/asm/uaccess.h59
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c10
-rw-r--r--arch/parisc/kernel/process.c2
-rw-r--r--arch/parisc/lib/Makefile2
-rw-r--r--arch/parisc/lib/fixup.S98
-rw-r--r--arch/parisc/lib/lusercopy.S318
-rw-r--r--arch/parisc/lib/memcpy.c461
-rw-r--r--arch/parisc/mm/fault.c17
-rw-r--r--arch/powerpc/kernel/idle_book3s.S20
-rw-r--r--arch/powerpc/mm/init_64.c3
-rw-r--r--arch/s390/boot/compressed/misc.c35
-rw-r--r--arch/s390/include/asm/sections.h1
-rw-r--r--arch/s390/include/asm/uaccess.h2
-rw-r--r--arch/s390/kernel/smp.c5
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/sparc/kernel/ptrace_64.c2
-rw-r--r--arch/x86/Makefile35
-rw-r--r--arch/x86/Makefile_32.cpu18
-rw-r--r--arch/x86/boot/compressed/error.c1
-rw-r--r--arch/x86/events/core.c9
-rw-r--r--arch/x86/include/asm/kvm_page_track.h1
-rw-r--r--arch/x86/include/asm/timer.h2
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h8
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c7
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c2
-rw-r--r--arch/x86/kernel/ftrace.c6
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/kvm/i8259.c3
-rw-r--r--arch/x86/kvm/ioapic.c3
-rw-r--r--arch/x86/kvm/page_track.c8
-rw-r--r--arch/x86/kvm/svm.c3
-rw-r--r--arch/x86/kvm/vmx.c44
-rw-r--r--arch/x86/kvm/x86.c7
-rw-r--r--arch/x86/lib/memcpy_64.S2
-rw-r--r--arch/x86/mm/kaslr.c4
-rw-r--r--arch/x86/purgatory/Makefile1
-rw-r--r--arch/xtensa/include/asm/page.h13
-rw-r--r--arch/xtensa/include/uapi/asm/unistd.h5
-rw-r--r--arch/xtensa/kernel/traps.c6
-rw-r--r--block/blk-mq.c18
-rw-r--r--block/blk-stat.c4
-rw-r--r--crypto/lrw.c7
-rw-r--r--crypto/xts.c7
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_platform.c8
-rw-r--r--drivers/acpi/apei/ghes.c1
-rw-r--r--drivers/acpi/ioapic.c6
-rw-r--r--drivers/acpi/spcr.c2
-rw-r--r--drivers/auxdisplay/img-ascii-lcd.c1
-rw-r--r--drivers/block/nbd.c136
-rw-r--r--drivers/char/hw_random/amd-rng.c42
-rw-r--r--drivers/char/hw_random/geode-rng.c50
-rw-r--r--drivers/char/ppdev.c11
-rw-r--r--drivers/clk/clk.c3
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c9
-rw-r--r--drivers/clk/sunxi-ng/Kconfig1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c8
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.c2
-rw-r--r--drivers/clocksource/clkevt-probe.c2
-rw-r--r--drivers/cpufreq/cpufreq.c41
-rw-r--r--drivers/cpufreq/intel_pstate.c167
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c18
-rw-r--r--drivers/cpuidle/sysfs.c12
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c1
-rw-r--r--drivers/crypto/ccp/ccp-dev.c5
-rw-r--r--drivers/crypto/ccp/ccp-dev.h5
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c42
-rw-r--r--drivers/dma/bcm2835-dma.c5
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/edac/Kconfig10
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/i5000_edac.c2
-rw-r--r--drivers/edac/i5400_edac.c5
-rw-r--r--drivers/edac/pnd2_edac.c1546
-rw-r--r--drivers/edac/pnd2_edac.h301
-rw-r--r--drivers/edac/xgene_edac.c2
-rw-r--r--drivers/extcon/Kconfig2
-rw-r--r--drivers/extcon/extcon-intel-int3496.c39
-rw-r--r--drivers/firmware/efi/efi.c1
-rw-r--r--drivers/firmware/efi/esrt.c2
-rw-r--r--drivers/gpio/gpiolib-acpi.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c10
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c87
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c24
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c109
-rw-r--r--drivers/gpu/drm/i915/gvt/debug.h8
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c29
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c82
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c55
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c39
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c38
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c113
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c7
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c24
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c52
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c83
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c9
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c5
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c24
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c14
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c5
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c29
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c14
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c79
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c31
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-xinmo.c1
-rw-r--r--drivers/hid/wacom_sys.c18
-rw-r--r--drivers/hv/channel.c25
-rw-r--r--drivers/hv/channel_mgmt.c27
-rw-r--r--drivers/hv/hv_fcopy.c4
-rw-r--r--drivers/hv/hv_kvp.c4
-rw-r--r--drivers/hv/hv_snapshot.c4
-rw-r--r--drivers/hv/hv_util.c2
-rw-r--r--drivers/hv/hv_utils_transport.c12
-rw-r--r--drivers/hv/hv_utils_transport.h1
-rw-r--r--drivers/hv/vmbus_drv.c6
-rw-r--r--drivers/hwmon/asus_atk0110.c3
-rw-r--r--drivers/hwmon/it87.c24
-rw-r--r--drivers/hwmon/max31790.c2
-rw-r--r--drivers/hwtracing/intel_th/core.c4
-rw-r--r--drivers/hwtracing/intel_th/pci.c10
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c34
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c13
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c6
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c4
-rw-r--r--drivers/iio/magnetometer/ak8974.c4
-rw-r--r--drivers/infiniband/core/cq.c10
-rw-r--r--drivers/infiniband/core/device.c29
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c8
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h3
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c17
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c42
-rw-r--r--drivers/infiniband/sw/rdmavt/mmap.c4
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mmap.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c9
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c8
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c3
-rw-r--r--drivers/input/misc/cm109.c4
-rw-r--r--drivers/input/misc/ims-pcu.c4
-rw-r--r--drivers/input/misc/yealink.c4
-rw-r--r--drivers/input/mouse/alps.c72
-rw-r--r--drivers/input/mouse/alps.h11
-rw-r--r--drivers/input/mouse/elan_i2c_core.c20
-rw-r--r--drivers/input/rmi4/rmi_f30.c4
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h14
-rw-r--r--drivers/input/tablet/hanwang.c3
-rw-r--r--drivers/input/tablet/kbtab.c3
-rw-r--r--drivers/input/touchscreen/sur40.c3
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/arm-smmu-v3.c2
-rw-r--r--drivers/iommu/arm-smmu.c2
-rw-r--r--drivers/iommu/exynos-iommu.c8
-rw-r--r--drivers/iommu/intel-iommu.c4
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c6
-rw-r--r--drivers/iommu/io-pgtable-arm.c6
-rw-r--r--drivers/iommu/iommu.c5
-rw-r--r--drivers/irqchip/Kconfig1
-rw-r--r--drivers/irqchip/irq-mips-gic.c4
-rw-r--r--drivers/isdn/capi/kcapi.c1
-rw-r--r--drivers/media/platform/coda/imx-vdoa.c2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c2
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c2
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-firmware.c22
-rw-r--r--drivers/memory/omap-gpmc.c4
-rw-r--r--drivers/misc/cxl/pci.c13
-rw-r--r--drivers/misc/mei/bus-fixup.c14
-rw-r--r--drivers/misc/mei/init.c8
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c4
-rw-r--r--drivers/mmc/core/block.c7
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/host/mtk-sd.c4
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c14
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c30
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c4
-rw-r--r--drivers/mmc/host/sdhci.c10
-rw-r--r--drivers/mmc/host/ushc.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c23
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c30
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c10
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c9
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c4
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c28
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c5
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c20
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c3
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c11
-rw-r--r--drivers/net/ethernet/ti/cpsw.c16
-rw-r--r--drivers/net/irda/vlsi_ir.c8
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/usb/cdc_ether.c15
-rw-r--r--drivers/net/usb/r8152.c19
-rw-r--r--drivers/nvme/host/rdma.c28
-rw-r--r--drivers/nvme/target/core.c11
-rw-r--r--drivers/nvme/target/loop.c90
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/rdma.c8
-rw-r--r--drivers/parport/share.c6
-rw-r--r--drivers/pci/host/pci-thunder-pem.c58
-rw-r--r--drivers/pci/host/pcie-iproc-bcma.c24
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c19
-rw-r--r--drivers/pci/host/pcie-iproc.h1
-rw-r--r--drivers/phy/Kconfig9
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/phy-bcm-nsp-usb3.c177
-rw-r--r--drivers/phy/phy-exynos-pcie.c4
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c4
-rw-r--r--drivers/pinctrl/pinctrl-st.c30
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq4019.c30
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c4
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c13
-rw-r--r--drivers/pinctrl/ti/Kconfig2
-rw-r--r--drivers/ptp/ptp_kvm.c5
-rw-r--r--drivers/rapidio/devices/tsi721.c4
-rw-r--r--drivers/rapidio/devices/tsi721.h4
-rw-r--r--drivers/s390/crypto/pkey_api.c53
-rw-r--r--drivers/s390/net/qeth_core.h3
-rw-r--r--drivers/s390/net/qeth_core_main.c5
-rw-r--r--drivers/s390/net/qeth_l2_main.c5
-rw-r--r--drivers/s390/net/qeth_l3_main.c20
-rw-r--r--drivers/scsi/aacraid/commsup.c14
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c38
-rw-r--r--drivers/scsi/hpsa.c1
-rw-r--r--drivers/scsi/libsas/sas_ata.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c4
-rw-r--r--drivers/scsi/qedi/qedi_main.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c3
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c4
-rw-r--r--drivers/scsi/ufs/ufshcd.c2
-rw-r--r--drivers/thermal/cpu_cooling.c39
-rw-r--r--drivers/thermal/devfreq_cooling.c14
-rw-r--r--drivers/tty/serial/8250/8250_dw.c9
-rw-r--r--drivers/tty/serial/8250/Kconfig8
-rw-r--r--drivers/tty/serial/amba-pl011.c25
-rw-r--r--drivers/tty/serial/atmel_serial.c8
-rw-r--r--drivers/tty/serial/mxs-auart.c2
-rw-r--r--drivers/tty/tty_ldisc.c92
-rw-r--r--drivers/tty/vt/keyboard.c1
-rw-r--r--drivers/usb/class/usbtmc.c18
-rw-r--r--drivers/usb/core/config.c10
-rw-r--r--drivers/usb/core/hcd.c7
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/core/quirks.c8
-rw-r--r--drivers/usb/dwc3/gadget.c21
-rw-r--r--drivers/usb/gadget/function/f_acm.c4
-rw-r--r--drivers/usb/gadget/function/f_hid.c8
-rw-r--r--drivers/usb/gadget/function/f_uvc.c10
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c1
-rw-r--r--drivers/usb/host/xhci-plat.c1
-rw-r--r--drivers/usb/host/xhci-ring.c3
-rw-r--r--drivers/usb/host/xhci.c43
-rw-r--r--drivers/usb/misc/idmouse.c3
-rw-r--r--drivers/usb/misc/lvstest.c4
-rw-r--r--drivers/usb/misc/uss720.c5
-rw-r--r--drivers/usb/musb/musb_core.c2
-rw-r--r--drivers/usb/musb/musb_cppi41.c23
-rw-r--r--drivers/usb/musb/musb_dsps.c5
-rw-r--r--drivers/usb/phy/phy-isp1301.c2
-rw-r--r--drivers/usb/serial/option.c17
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/wusbcore/wa-hc.c3
-rw-r--r--drivers/uwb/hwa-rc.c3
-rw-r--r--drivers/uwb/i1480/dfu/usb.c3
-rw-r--r--drivers/vfio/vfio.c8
-rw-r--r--drivers/vfio/vfio_iommu_type1.c7
-rw-r--r--drivers/virtio/virtio_balloon.c19
-rw-r--r--drivers/virtio/virtio_pci_common.c9
-rw-r--r--drivers/xen/xen-acpi-processor.c36
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/extent_io.c46
-rw-r--r--fs/btrfs/inode.c6
-rw-r--r--fs/btrfs/qgroup.c10
-rw-r--r--fs/btrfs/send.c7
-rw-r--r--fs/crypto/crypto.c10
-rw-r--r--fs/crypto/fname.c2
-rw-r--r--fs/crypto/fscrypt_private.h4
-rw-r--r--fs/crypto/keyinfo.c52
-rw-r--r--fs/crypto/policy.c7
-rw-r--r--fs/ext4/inline.c5
-rw-r--r--fs/ext4/inode.c2
-rw-r--r--fs/ext4/move_extent.c2
-rw-r--r--fs/ext4/super.c10
-rw-r--r--fs/ext4/xattr.c65
-rw-r--r--fs/hugetlbfs/inode.c25
-rw-r--r--fs/jbd2/journal.c22
-rw-r--r--fs/jbd2/revoke.c1
-rw-r--r--fs/kernfs/file.c3
-rw-r--r--fs/nfs/dir.c9
-rw-r--r--fs/nfs/filelayout/filelayout.c151
-rw-r--r--fs/nfs/filelayout/filelayout.h19
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c4
-rw-r--r--fs/nfs/nfs4proc.c9
-rw-r--r--fs/nfsd/nfsctl.c43
-rw-r--r--fs/nfsd/nfsproc.c1
-rw-r--r--fs/nfsd/nfssvc.c28
-rw-r--r--include/asm-generic/sections.h6
-rw-r--r--include/asm-generic/vmlinux.lds.h6
-rw-r--r--include/drm/ttm/ttm_object.h5
-rw-r--r--include/linux/ccp.h2
-rw-r--r--include/linux/clockchips.h2
-rw-r--r--include/linux/fscrypt_common.h1
-rw-r--r--include/linux/hwmon.h1
-rw-r--r--include/linux/hyperv.h10
-rw-r--r--include/linux/iio/sw_device.h2
-rw-r--r--include/linux/iommu.h18
-rw-r--r--include/linux/kasan.h3
-rw-r--r--include/linux/kvm_host.h4
-rw-r--r--include/linux/memcontrol.h6
-rw-r--r--include/linux/mfd/cros_ec.h3
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/omap-gpmc.h16
-rw-r--r--include/linux/reset.h9
-rw-r--r--include/linux/sched/clock.h13
-rw-r--r--include/linux/usb/quirks.h6
-rw-r--r--include/net/sctp/sctp.h22
-rw-r--r--include/net/sctp/structs.h11
-rw-r--r--include/rdma/ib_verbs.h30
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/rdma/mlx5-abi.h3
-rw-r--r--include/video/exynos5433_decon.h12
-rw-r--r--init/main.c2
-rw-r--r--kernel/audit.c639
-rw-r--r--kernel/audit.h9
-rw-r--r--kernel/auditsc.c6
-rw-r--r--kernel/bpf/verifier.c64
-rw-r--r--kernel/padata.c5
-rw-r--r--kernel/sched/clock.c46
-rw-r--r--kernel/sched/cpufreq_schedutil.c20
-rw-r--r--lib/syscall.c1
-rw-r--r--lib/test_kasan.c10
-rw-r--r--mm/hugetlb.c10
-rw-r--r--mm/kasan/kasan.h5
-rw-r--r--mm/kasan/report.c36
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/migrate.c7
-rw-r--r--mm/rmap.c4
-rw-r--r--mm/vmstat.c4
-rw-r--r--mm/workingset.c2
-rw-r--r--net/ceph/messenger.c6
-rw-r--r--net/core/flow_dissector.c2
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/core/secure_seq.c31
-rw-r--r--net/core/sysctl_net_core.c6
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c20
-rw-r--r--net/ipv4/ping.c5
-rw-r--r--net/ipv4/tcp_input.c41
-rw-r--r--net/ipv4/tcp_recovery.c3
-rw-r--r--net/kcm/kcmsock.c6
-rw-r--r--net/l2tp/l2tp_core.c160
-rw-r--r--net/l2tp/l2tp_core.h9
-rw-r--r--net/l2tp/l2tp_debugfs.c10
-rw-r--r--net/l2tp/l2tp_eth.c10
-rw-r--r--net/l2tp/l2tp_ip.c22
-rw-r--r--net/l2tp/l2tp_ip6.c23
-rw-r--r--net/l2tp/l2tp_netlink.c52
-rw-r--r--net/l2tp/l2tp_ppp.c94
-rw-r--r--net/mac80211/iface.c3
-rw-r--r--net/netfilter/nf_conntrack_ecache.c2
-rw-r--r--net/netfilter/nf_conntrack_extend.c13
-rw-r--r--net/netfilter/nf_conntrack_netlink.c1
-rw-r--r--net/netfilter/nf_nat_core.c2
-rw-r--r--net/netfilter/nfnetlink_cthelper.c287
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c9
-rw-r--r--net/openvswitch/conntrack.c4
-rw-r--r--net/openvswitch/flow.c10
-rw-r--r--net/packet/af_packet.c8
-rw-r--r--net/sctp/associola.c13
-rw-r--r--net/sctp/input.c4
-rw-r--r--net/sctp/output.c69
-rw-r--r--net/sctp/outqueue.c3
-rw-r--r--net/sctp/proc.c4
-rw-r--r--net/sctp/sm_make_chunk.c9
-rw-r--r--net/sctp/sm_statefuns.c6
-rw-r--r--net/sctp/socket.c18
-rw-r--r--net/sctp/stream.c43
-rw-r--r--net/sctp/transport.c19
-rw-r--r--net/sunrpc/svcsock.c1
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c1
-rw-r--r--net/wireless/sysfs.c10
-rw-r--r--net/xfrm/xfrm_user.c9
-rw-r--r--scripts/Kbuild.include4
-rw-r--r--scripts/Makefile.lib2
-rw-r--r--scripts/kconfig/gconf.c2
-rw-r--r--sound/core/seq/seq_fifo.c4
-rw-r--r--sound/pci/hda/patch_realtek.c12
-rw-r--r--sound/soc/atmel/atmel-classd.c2
-rw-r--r--sound/soc/codecs/hdac_hdmi.c16
-rw-r--r--sound/soc/codecs/rt5665.c10
-rw-r--r--sound/soc/codecs/rt5665.h2
-rw-r--r--sound/soc/codecs/wm_adsp.c9
-rw-r--r--sound/soc/generic/simple-card-utils.c1
-rw-r--r--sound/soc/intel/skylake/skl-topology.c2
-rw-r--r--sound/soc/mediatek/Kconfig2
-rw-r--r--sound/soc/sh/rcar/cmd.c36
-rw-r--r--sound/soc/sh/rcar/dma.c18
-rw-r--r--sound/soc/sh/rcar/ssiu.c6
-rw-r--r--sound/soc/soc-core.c8
-rw-r--r--sound/soc/sti/uniperif_reader.c3
-rw-r--r--sound/soc/sunxi/sun8i-codec.c67
-rw-r--r--tools/include/linux/filter.h10
-rw-r--r--tools/testing/selftests/bpf/Makefile9
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c295
-rw-r--r--virt/kvm/eventfd.c3
-rw-r--r--virt/kvm/kvm_main.c44
551 files changed, 7439 insertions, 3798 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 2ba45caabada..facc20a3f962 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1725,6 +1725,12 @@
1725 kernel and module base offset ASLR (Address Space 1725 kernel and module base offset ASLR (Address Space
1726 Layout Randomization). 1726 Layout Randomization).
1727 1727
1728 kasan_multi_shot
1729 [KNL] Enforce KASAN (Kernel Address Sanitizer) to print
1730 report on every invalid memory access. Without this
1731 parameter KASAN will print report only for the first
1732 invalid access.
1733
1728 keepinitrd [HW,ARM] 1734 keepinitrd [HW,ARM]
1729 1735
1730 kernelcore= [KNL,X86,IA-64,PPC] 1736 kernelcore= [KNL,X86,IA-64,PPC]
diff --git a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
index 30c546900b60..07dbb358182c 100644
--- a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
+++ b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
@@ -45,7 +45,7 @@ The following clocks are available:
45 - 1 15 SATA 45 - 1 15 SATA
46 - 1 16 SATA USB 46 - 1 16 SATA USB
47 - 1 17 Main 47 - 1 17 Main
48 - 1 18 SD/MMC 48 - 1 18 SD/MMC/GOP
49 - 1 21 Slow IO (SPI, NOR, BootROM, I2C, UART) 49 - 1 21 Slow IO (SPI, NOR, BootROM, I2C, UART)
50 - 1 22 USB3H0 50 - 1 22 USB3H0
51 - 1 23 USB3H1 51 - 1 23 USB3H1
@@ -65,7 +65,7 @@ Required properties:
65 "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio", 65 "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio",
66 "cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none", 66 "cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none",
67 "cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata", 67 "cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata",
68 "cpm-sata-usb", "cpm-main", "cpm-sd-mmc", "none", "none", "cpm-slow-io", 68 "cpm-sata-usb", "cpm-main", "cpm-sd-mmc-gop", "none", "none", "cpm-slow-io",
69 "cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197"; 69 "cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197";
70 70
71Example: 71Example:
@@ -78,6 +78,6 @@ Example:
78 gate-clock-output-names = "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio", 78 gate-clock-output-names = "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio",
79 "cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none", 79 "cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none",
80 "cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata", 80 "cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata",
81 "cpm-sata-usb", "cpm-main", "cpm-sd-mmc", "none", "none", "cpm-slow-io", 81 "cpm-sata-usb", "cpm-main", "cpm-sd-mmc-gop", "none", "none", "cpm-slow-io",
82 "cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197"; 82 "cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197";
83 }; 83 };
diff --git a/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt b/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt
index a78265993665..ca5204b3bc21 100644
--- a/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt
+++ b/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt
@@ -4,7 +4,6 @@ Required properties:
4 - compatible: value should be one of the following 4 - compatible: value should be one of the following
5 "samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */ 5 "samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */
6 "samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */ 6 "samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */
7 "samsung,exynos4415-mipi-dsi" /* for Exynos4415 SoC */
8 "samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */ 7 "samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */
9 "samsung,exynos5422-mipi-dsi" /* for Exynos5422/5800 SoCs */ 8 "samsung,exynos5422-mipi-dsi" /* for Exynos5422/5800 SoCs */
10 "samsung,exynos5433-mipi-dsi" /* for Exynos5433 SoCs */ 9 "samsung,exynos5433-mipi-dsi" /* for Exynos5433 SoCs */
diff --git a/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt b/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt
index 18645e0228b0..5837402c3ade 100644
--- a/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt
+++ b/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt
@@ -11,7 +11,6 @@ Required properties:
11 "samsung,s5pv210-fimd"; /* for S5PV210 SoC */ 11 "samsung,s5pv210-fimd"; /* for S5PV210 SoC */
12 "samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */ 12 "samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */
13 "samsung,exynos4210-fimd"; /* for Exynos4 SoCs */ 13 "samsung,exynos4210-fimd"; /* for Exynos4 SoCs */
14 "samsung,exynos4415-fimd"; /* for Exynos4415 SoC */
15 "samsung,exynos5250-fimd"; /* for Exynos5250 SoCs */ 14 "samsung,exynos5250-fimd"; /* for Exynos5250 SoCs */
16 "samsung,exynos5420-fimd"; /* for Exynos5420/5422/5800 SoCs */ 15 "samsung,exynos5420-fimd"; /* for Exynos5420/5422/5800 SoCs */
17 16
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
index ea9c1c9607f6..520d61dad6dd 100644
--- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
@@ -13,7 +13,7 @@ Required Properties:
13 - "rockchip,rk2928-dw-mshc": for Rockchip RK2928 and following, 13 - "rockchip,rk2928-dw-mshc": for Rockchip RK2928 and following,
14 before RK3288 14 before RK3288
15 - "rockchip,rk3288-dw-mshc": for Rockchip RK3288 15 - "rockchip,rk3288-dw-mshc": for Rockchip RK3288
16 - "rockchip,rk1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK1108 16 - "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RV1108
17 - "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036 17 - "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036
18 - "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368 18 - "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368
19 - "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399 19 - "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399
diff --git a/Documentation/devicetree/bindings/phy/brcm,nsp-usb3-phy.txt b/Documentation/devicetree/bindings/phy/brcm,nsp-usb3-phy.txt
deleted file mode 100644
index e68ae5dec9c9..000000000000
--- a/Documentation/devicetree/bindings/phy/brcm,nsp-usb3-phy.txt
+++ /dev/null
@@ -1,39 +0,0 @@
1Broadcom USB3 phy binding for northstar plus SoC
2The USB3 phy is internal to the SoC and is accessed using mdio interface.
3
4Required mdio bus properties:
5- reg: Should be 0x0 for SoC internal USB3 phy
6- #address-cells: must be 1
7- #size-cells: must be 0
8
9Required USB3 PHY properties:
10- compatible: should be "brcm,nsp-usb3-phy"
11- reg: USB3 Phy address on SoC internal MDIO bus and it should be 0x10.
12- usb3-ctrl-syscon: handler of syscon node defining physical address
13 of usb3 control register.
14- #phy-cells: must be 0
15
16Required usb3 control properties:
17- compatible: should be "brcm,nsp-usb3-ctrl"
18- reg: offset and length of the control registers
19
20Example:
21
22 mdio@0 {
23 reg = <0x0>;
24 #address-cells = <1>;
25 #size-cells = <0>;
26
27 usb3_phy: usb-phy@10 {
28 compatible = "brcm,nsp-usb3-phy";
29 reg = <0x10>;
30 usb3-ctrl-syscon = <&usb3_ctrl>;
31 #phy-cells = <0>;
32 status = "disabled";
33 };
34 };
35
36 usb3_ctrl: syscon@104408 {
37 compatible = "brcm,nsp-usb3-ctrl", "syscon";
38 reg = <0x104408 0x3fc>;
39 };
diff --git a/Documentation/devicetree/bindings/rng/omap_rng.txt b/Documentation/devicetree/bindings/rng/omap_rng.txt
index 471477299ece..9cf7876ab434 100644
--- a/Documentation/devicetree/bindings/rng/omap_rng.txt
+++ b/Documentation/devicetree/bindings/rng/omap_rng.txt
@@ -12,7 +12,8 @@ Required properties:
12- reg : Offset and length of the register set for the module 12- reg : Offset and length of the register set for the module
13- interrupts : the interrupt number for the RNG module. 13- interrupts : the interrupt number for the RNG module.
14 Used for "ti,omap4-rng" and "inside-secure,safexcel-eip76" 14 Used for "ti,omap4-rng" and "inside-secure,safexcel-eip76"
15- clocks: the trng clock source 15- clocks: the trng clock source. Only mandatory for the
16 "inside-secure,safexcel-eip76" compatible.
16 17
17Example: 18Example:
18/* AM335x */ 19/* AM335x */
diff --git a/Documentation/extcon/intel-int3496.txt b/Documentation/extcon/intel-int3496.txt
index af0b366c25b7..8155dbc7fad3 100644
--- a/Documentation/extcon/intel-int3496.txt
+++ b/Documentation/extcon/intel-int3496.txt
@@ -20,3 +20,8 @@ Index 1: The output gpio for enabling Vbus output from the device to the otg
20Index 2: The output gpio for muxing of the data pins between the USB host and 20Index 2: The output gpio for muxing of the data pins between the USB host and
21 the USB peripheral controller, write 1 to mux to the peripheral 21 the USB peripheral controller, write 1 to mux to the peripheral
22 controller 22 controller
23
24There is a mapping between indices and GPIO connection IDs as follows
25 id index 0
26 vbus index 1
27 mux index 2
diff --git a/Documentation/gcc-plugins.txt b/Documentation/gcc-plugins.txt
index 891c69464434..433eaefb4aa1 100644
--- a/Documentation/gcc-plugins.txt
+++ b/Documentation/gcc-plugins.txt
@@ -18,8 +18,8 @@ because gcc versions 4.5 and 4.6 are compiled by a C compiler,
18gcc-4.7 can be compiled by a C or a C++ compiler, 18gcc-4.7 can be compiled by a C or a C++ compiler,
19and versions 4.8+ can only be compiled by a C++ compiler. 19and versions 4.8+ can only be compiled by a C++ compiler.
20 20
21Currently the GCC plugin infrastructure supports only the x86, arm and arm64 21Currently the GCC plugin infrastructure supports only the x86, arm, arm64 and
22architectures. 22powerpc architectures.
23 23
24This infrastructure was ported from grsecurity [6] and PaX [7]. 24This infrastructure was ported from grsecurity [6] and PaX [7].
25 25
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 3c248f772ae6..fd106899afd1 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -3377,6 +3377,69 @@ struct kvm_ppc_resize_hpt {
3377 __u32 pad; 3377 __u32 pad;
3378}; 3378};
3379 3379
33804.104 KVM_X86_GET_MCE_CAP_SUPPORTED
3381
3382Capability: KVM_CAP_MCE
3383Architectures: x86
3384Type: system ioctl
3385Parameters: u64 mce_cap (out)
3386Returns: 0 on success, -1 on error
3387
3388Returns supported MCE capabilities. The u64 mce_cap parameter
3389has the same format as the MSR_IA32_MCG_CAP register. Supported
3390capabilities will have the corresponding bits set.
3391
33924.105 KVM_X86_SETUP_MCE
3393
3394Capability: KVM_CAP_MCE
3395Architectures: x86
3396Type: vcpu ioctl
3397Parameters: u64 mcg_cap (in)
3398Returns: 0 on success,
3399 -EFAULT if u64 mcg_cap cannot be read,
3400 -EINVAL if the requested number of banks is invalid,
3401 -EINVAL if requested MCE capability is not supported.
3402
3403Initializes MCE support for use. The u64 mcg_cap parameter
3404has the same format as the MSR_IA32_MCG_CAP register and
3405specifies which capabilities should be enabled. The maximum
3406supported number of error-reporting banks can be retrieved when
3407checking for KVM_CAP_MCE. The supported capabilities can be
3408retrieved with KVM_X86_GET_MCE_CAP_SUPPORTED.
3409
34104.106 KVM_X86_SET_MCE
3411
3412Capability: KVM_CAP_MCE
3413Architectures: x86
3414Type: vcpu ioctl
3415Parameters: struct kvm_x86_mce (in)
3416Returns: 0 on success,
3417 -EFAULT if struct kvm_x86_mce cannot be read,
3418 -EINVAL if the bank number is invalid,
3419 -EINVAL if VAL bit is not set in status field.
3420
3421Inject a machine check error (MCE) into the guest. The input
3422parameter is:
3423
3424struct kvm_x86_mce {
3425 __u64 status;
3426 __u64 addr;
3427 __u64 misc;
3428 __u64 mcg_status;
3429 __u8 bank;
3430 __u8 pad1[7];
3431 __u64 pad2[3];
3432};
3433
3434If the MCE being reported is an uncorrected error, KVM will
3435inject it as an MCE exception into the guest. If the guest
3436MCG_STATUS register reports that an MCE is in progress, KVM
3437causes an KVM_EXIT_SHUTDOWN vmexit.
3438
3439Otherwise, if the MCE is a corrected error, KVM will just
3440store it in the corresponding bank (provided this bank is
3441not holding a previously reported uncorrected error).
3442
33805. The kvm_run structure 34435. The kvm_run structure
3381------------------------ 3444------------------------
3382 3445
diff --git a/MAINTAINERS b/MAINTAINERS
index a1ce88e91444..5397f54af5fc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4782,6 +4782,12 @@ L: linux-edac@vger.kernel.org
4782S: Maintained 4782S: Maintained
4783F: drivers/edac/mpc85xx_edac.[ch] 4783F: drivers/edac/mpc85xx_edac.[ch]
4784 4784
4785EDAC-PND2
4786M: Tony Luck <tony.luck@intel.com>
4787L: linux-edac@vger.kernel.org
4788S: Maintained
4789F: drivers/edac/pnd2_edac.[ch]
4790
4785EDAC-PASEMI 4791EDAC-PASEMI
4786M: Egor Martovetsky <egor@pasemi.com> 4792M: Egor Martovetsky <egor@pasemi.com>
4787L: linux-edac@vger.kernel.org 4793L: linux-edac@vger.kernel.org
@@ -4929,6 +4935,7 @@ F: include/linux/netfilter_bridge/
4929F: net/bridge/ 4935F: net/bridge/
4930 4936
4931ETHERNET PHY LIBRARY 4937ETHERNET PHY LIBRARY
4938M: Andrew Lunn <andrew@lunn.ch>
4932M: Florian Fainelli <f.fainelli@gmail.com> 4939M: Florian Fainelli <f.fainelli@gmail.com>
4933L: netdev@vger.kernel.org 4940L: netdev@vger.kernel.org
4934S: Maintained 4941S: Maintained
@@ -7090,9 +7097,9 @@ S: Maintained
7090F: fs/autofs4/ 7097F: fs/autofs4/
7091 7098
7092KERNEL BUILD + files below scripts/ (unless maintained elsewhere) 7099KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
7100M: Masahiro Yamada <yamada.masahiro@socionext.com>
7093M: Michal Marek <mmarek@suse.com> 7101M: Michal Marek <mmarek@suse.com>
7094T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git for-next 7102T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git
7095T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git rc-fixes
7096L: linux-kbuild@vger.kernel.org 7103L: linux-kbuild@vger.kernel.org
7097S: Maintained 7104S: Maintained
7098F: Documentation/kbuild/ 7105F: Documentation/kbuild/
@@ -10815,6 +10822,7 @@ F: drivers/s390/block/dasd*
10815F: block/partitions/ibm.c 10822F: block/partitions/ibm.c
10816 10823
10817S390 NETWORK DRIVERS 10824S390 NETWORK DRIVERS
10825M: Julian Wiedmann <jwi@linux.vnet.ibm.com>
10818M: Ursula Braun <ubraun@linux.vnet.ibm.com> 10826M: Ursula Braun <ubraun@linux.vnet.ibm.com>
10819L: linux-s390@vger.kernel.org 10827L: linux-s390@vger.kernel.org
10820W: http://www.ibm.com/developerworks/linux/linux390/ 10828W: http://www.ibm.com/developerworks/linux/linux390/
@@ -10845,6 +10853,7 @@ S: Supported
10845F: drivers/s390/scsi/zfcp_* 10853F: drivers/s390/scsi/zfcp_*
10846 10854
10847S390 IUCV NETWORK LAYER 10855S390 IUCV NETWORK LAYER
10856M: Julian Wiedmann <jwi@linux.vnet.ibm.com>
10848M: Ursula Braun <ubraun@linux.vnet.ibm.com> 10857M: Ursula Braun <ubraun@linux.vnet.ibm.com>
10849L: linux-s390@vger.kernel.org 10858L: linux-s390@vger.kernel.org
10850W: http://www.ibm.com/developerworks/linux/linux390/ 10859W: http://www.ibm.com/developerworks/linux/linux390/
diff --git a/Makefile b/Makefile
index b2faa9319372..7acbcb324bae 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 11 2PATCHLEVEL = 11
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc3 4EXTRAVERSION = -rc5
5NAME = Fearless Coyote 5NAME = Fearless Coyote
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -372,7 +372,7 @@ LDFLAGS_MODULE =
372CFLAGS_KERNEL = 372CFLAGS_KERNEL =
373AFLAGS_KERNEL = 373AFLAGS_KERNEL =
374LDFLAGS_vmlinux = 374LDFLAGS_vmlinux =
375CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized 375CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
376CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,) 376CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,)
377 377
378 378
@@ -653,6 +653,12 @@ KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \
653# Tell gcc to never replace conditional load with a non-conditional one 653# Tell gcc to never replace conditional load with a non-conditional one
654KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) 654KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
655 655
656# check for 'asm goto'
657ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
658 KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
659 KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
660endif
661
656include scripts/Makefile.gcc-plugins 662include scripts/Makefile.gcc-plugins
657 663
658ifdef CONFIG_READABLE_ASM 664ifdef CONFIG_READABLE_ASM
@@ -798,12 +804,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types)
798# use the deterministic mode of AR if available 804# use the deterministic mode of AR if available
799KBUILD_ARFLAGS := $(call ar-option,D) 805KBUILD_ARFLAGS := $(call ar-option,D)
800 806
801# check for 'asm goto'
802ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
803 KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
804 KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
805endif
806
807include scripts/Makefile.kasan 807include scripts/Makefile.kasan
808include scripts/Makefile.extrawarn 808include scripts/Makefile.extrawarn
809include scripts/Makefile.ubsan 809include scripts/Makefile.ubsan
diff --git a/arch/arc/boot/dts/skeleton.dtsi b/arch/arc/boot/dts/skeleton.dtsi
index 65808fe0a290..2891cb266cf0 100644
--- a/arch/arc/boot/dts/skeleton.dtsi
+++ b/arch/arc/boot/dts/skeleton.dtsi
@@ -26,6 +26,7 @@
26 device_type = "cpu"; 26 device_type = "cpu";
27 compatible = "snps,arc770d"; 27 compatible = "snps,arc770d";
28 reg = <0>; 28 reg = <0>;
29 clocks = <&core_clk>;
29 }; 30 };
30 }; 31 };
31 32
diff --git a/arch/arc/boot/dts/skeleton_hs.dtsi b/arch/arc/boot/dts/skeleton_hs.dtsi
index 2dfe8037dfbb..5e944d3e5b74 100644
--- a/arch/arc/boot/dts/skeleton_hs.dtsi
+++ b/arch/arc/boot/dts/skeleton_hs.dtsi
@@ -21,6 +21,7 @@
21 device_type = "cpu"; 21 device_type = "cpu";
22 compatible = "snps,archs38"; 22 compatible = "snps,archs38";
23 reg = <0>; 23 reg = <0>;
24 clocks = <&core_clk>;
24 }; 25 };
25 }; 26 };
26 27
diff --git a/arch/arc/boot/dts/skeleton_hs_idu.dtsi b/arch/arc/boot/dts/skeleton_hs_idu.dtsi
index 4c11079f3565..54b277d7dea0 100644
--- a/arch/arc/boot/dts/skeleton_hs_idu.dtsi
+++ b/arch/arc/boot/dts/skeleton_hs_idu.dtsi
@@ -19,8 +19,27 @@
19 19
20 cpu@0 { 20 cpu@0 {
21 device_type = "cpu"; 21 device_type = "cpu";
22 compatible = "snps,archs38xN"; 22 compatible = "snps,archs38";
23 reg = <0>; 23 reg = <0>;
24 clocks = <&core_clk>;
25 };
26 cpu@1 {
27 device_type = "cpu";
28 compatible = "snps,archs38";
29 reg = <1>;
30 clocks = <&core_clk>;
31 };
32 cpu@2 {
33 device_type = "cpu";
34 compatible = "snps,archs38";
35 reg = <2>;
36 clocks = <&core_clk>;
37 };
38 cpu@3 {
39 device_type = "cpu";
40 compatible = "snps,archs38";
41 reg = <3>;
42 clocks = <&core_clk>;
24 }; 43 };
25 }; 44 };
26 45
diff --git a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
index f0df59b23e21..459fc656b759 100644
--- a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
@@ -112,13 +112,19 @@
112 interrupts = <7>; 112 interrupts = <7>;
113 bus-width = <4>; 113 bus-width = <4>;
114 }; 114 };
115 };
115 116
116 /* Embedded Vision subsystem UIO mappings; only relevant for EV VDK */ 117 /*
117 uio_ev: uio@0xD0000000 { 118 * Embedded Vision subsystem UIO mappings; only relevant for EV VDK
118 compatible = "generic-uio"; 119 *
119 reg = <0xD0000000 0x2000 0xD1000000 0x2000 0x90000000 0x10000000 0xC0000000 0x10000000>; 120 * This node is intentionally put outside of MB above becase
120 reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem"; 121 * it maps areas outside of MB's 0xEz-0xFz.
121 interrupts = <23>; 122 */
122 }; 123 uio_ev: uio@0xD0000000 {
124 compatible = "generic-uio";
125 reg = <0xD0000000 0x2000 0xD1000000 0x2000 0x90000000 0x10000000 0xC0000000 0x10000000>;
126 reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem";
127 interrupt-parent = <&mb_intc>;
128 interrupts = <23>;
123 }; 129 };
124}; 130};
diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h
index 00bdbe167615..2e52d18e6bc7 100644
--- a/arch/arc/include/asm/kprobes.h
+++ b/arch/arc/include/asm/kprobes.h
@@ -54,9 +54,7 @@ int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause);
54void kretprobe_trampoline(void); 54void kretprobe_trampoline(void);
55void trap_is_kprobe(unsigned long address, struct pt_regs *regs); 55void trap_is_kprobe(unsigned long address, struct pt_regs *regs);
56#else 56#else
57static void trap_is_kprobe(unsigned long address, struct pt_regs *regs) 57#define trap_is_kprobe(address, regs)
58{
59}
60#endif /* CONFIG_KPROBES */ 58#endif /* CONFIG_KPROBES */
61 59
62#endif /* _ARC_KPROBES_H */ 60#endif /* _ARC_KPROBES_H */
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index 2585632eaa68..cc558a25b8fa 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -100,15 +100,21 @@ END(handle_interrupt)
100;################### Non TLB Exception Handling ############################# 100;################### Non TLB Exception Handling #############################
101 101
102ENTRY(EV_SWI) 102ENTRY(EV_SWI)
103 flag 1 103 ; TODO: implement this
104 EXCEPTION_PROLOGUE
105 b ret_from_exception
104END(EV_SWI) 106END(EV_SWI)
105 107
106ENTRY(EV_DivZero) 108ENTRY(EV_DivZero)
107 flag 1 109 ; TODO: implement this
110 EXCEPTION_PROLOGUE
111 b ret_from_exception
108END(EV_DivZero) 112END(EV_DivZero)
109 113
110ENTRY(EV_DCError) 114ENTRY(EV_DCError)
111 flag 1 115 ; TODO: implement this
116 EXCEPTION_PROLOGUE
117 b ret_from_exception
112END(EV_DCError) 118END(EV_DCError)
113 119
114; --------------------------------------------- 120; ---------------------------------------------
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 3093fa898a23..fa62404ba58f 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -10,6 +10,7 @@
10#include <linux/fs.h> 10#include <linux/fs.h>
11#include <linux/delay.h> 11#include <linux/delay.h>
12#include <linux/root_dev.h> 12#include <linux/root_dev.h>
13#include <linux/clk.h>
13#include <linux/clk-provider.h> 14#include <linux/clk-provider.h>
14#include <linux/clocksource.h> 15#include <linux/clocksource.h>
15#include <linux/console.h> 16#include <linux/console.h>
@@ -488,8 +489,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
488{ 489{
489 char *str; 490 char *str;
490 int cpu_id = ptr_to_cpu(v); 491 int cpu_id = ptr_to_cpu(v);
491 struct device_node *core_clk = of_find_node_by_name(NULL, "core_clk"); 492 struct device *cpu_dev = get_cpu_device(cpu_id);
492 u32 freq = 0; 493 struct clk *cpu_clk;
494 unsigned long freq = 0;
493 495
494 if (!cpu_online(cpu_id)) { 496 if (!cpu_online(cpu_id)) {
495 seq_printf(m, "processor [%d]\t: Offline\n", cpu_id); 497 seq_printf(m, "processor [%d]\t: Offline\n", cpu_id);
@@ -502,9 +504,15 @@ static int show_cpuinfo(struct seq_file *m, void *v)
502 504
503 seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE)); 505 seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE));
504 506
505 of_property_read_u32(core_clk, "clock-frequency", &freq); 507 cpu_clk = clk_get(cpu_dev, NULL);
508 if (IS_ERR(cpu_clk)) {
509 seq_printf(m, "CPU speed \t: Cannot get clock for processor [%d]\n",
510 cpu_id);
511 } else {
512 freq = clk_get_rate(cpu_clk);
513 }
506 if (freq) 514 if (freq)
507 seq_printf(m, "CPU speed\t: %u.%02u Mhz\n", 515 seq_printf(m, "CPU speed\t: %lu.%02lu Mhz\n",
508 freq / 1000000, (freq / 10000) % 100); 516 freq / 1000000, (freq / 10000) % 100);
509 517
510 seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n", 518 seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n",
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index d408fa21a07c..928562967f3c 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -633,6 +633,9 @@ noinline static void slc_entire_op(const int op)
633 633
634 write_aux_reg(ARC_REG_SLC_INVALIDATE, 1); 634 write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
635 635
636 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
637 read_aux_reg(r);
638
636 /* Important to wait for flush to complete */ 639 /* Important to wait for flush to complete */
637 while (read_aux_reg(r) & SLC_CTRL_BUSY); 640 while (read_aux_reg(r) & SLC_CTRL_BUSY);
638} 641}
diff --git a/arch/arm/boot/dts/am335x-pcm-953.dtsi b/arch/arm/boot/dts/am335x-pcm-953.dtsi
index 02981eae96b9..1ec8e0d80191 100644
--- a/arch/arm/boot/dts/am335x-pcm-953.dtsi
+++ b/arch/arm/boot/dts/am335x-pcm-953.dtsi
@@ -63,14 +63,14 @@
63 label = "home"; 63 label = "home";
64 linux,code = <KEY_HOME>; 64 linux,code = <KEY_HOME>;
65 gpios = <&gpio3 7 GPIO_ACTIVE_HIGH>; 65 gpios = <&gpio3 7 GPIO_ACTIVE_HIGH>;
66 gpio-key,wakeup; 66 wakeup-source;
67 }; 67 };
68 68
69 button@1 { 69 button@1 {
70 label = "menu"; 70 label = "menu";
71 linux,code = <KEY_MENU>; 71 linux,code = <KEY_MENU>;
72 gpios = <&gpio3 8 GPIO_ACTIVE_HIGH>; 72 gpios = <&gpio3 8 GPIO_ACTIVE_HIGH>;
73 gpio-key,wakeup; 73 wakeup-source;
74 }; 74 };
75 75
76 }; 76 };
diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi
index 0d341c545b01..e5ac1d81d15c 100644
--- a/arch/arm/boot/dts/am57xx-idk-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi
@@ -315,6 +315,13 @@
315 /* ID & VBUS GPIOs provided in board dts */ 315 /* ID & VBUS GPIOs provided in board dts */
316 }; 316 };
317 }; 317 };
318
319 tpic2810: tpic2810@60 {
320 compatible = "ti,tpic2810";
321 reg = <0x60>;
322 gpio-controller;
323 #gpio-cells = <2>;
324 };
318}; 325};
319 326
320&mcspi3 { 327&mcspi3 {
@@ -330,13 +337,6 @@
330 spi-max-frequency = <1000000>; 337 spi-max-frequency = <1000000>;
331 spi-cpol; 338 spi-cpol;
332 }; 339 };
333
334 tpic2810: tpic2810@60 {
335 compatible = "ti,tpic2810";
336 reg = <0x60>;
337 gpio-controller;
338 #gpio-cells = <2>;
339 };
340}; 340};
341 341
342&uart3 { 342&uart3 {
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index 4fbb089cf5ad..00de62dc0042 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -66,14 +66,14 @@
66 timer@20200 { 66 timer@20200 {
67 compatible = "arm,cortex-a9-global-timer"; 67 compatible = "arm,cortex-a9-global-timer";
68 reg = <0x20200 0x100>; 68 reg = <0x20200 0x100>;
69 interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>; 69 interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
70 clocks = <&periph_clk>; 70 clocks = <&periph_clk>;
71 }; 71 };
72 72
73 local-timer@20600 { 73 local-timer@20600 {
74 compatible = "arm,cortex-a9-twd-timer"; 74 compatible = "arm,cortex-a9-twd-timer";
75 reg = <0x20600 0x100>; 75 reg = <0x20600 0x100>;
76 interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>; 76 interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>;
77 clocks = <&periph_clk>; 77 clocks = <&periph_clk>;
78 }; 78 };
79 79
diff --git a/arch/arm/boot/dts/bcm953012k.dts b/arch/arm/boot/dts/bcm953012k.dts
index bfd923096a8c..ae31a5826e91 100644
--- a/arch/arm/boot/dts/bcm953012k.dts
+++ b/arch/arm/boot/dts/bcm953012k.dts
@@ -48,15 +48,14 @@
48 }; 48 };
49 49
50 memory { 50 memory {
51 reg = <0x00000000 0x10000000>; 51 reg = <0x80000000 0x10000000>;
52 }; 52 };
53}; 53};
54 54
55&uart0 { 55&uart0 {
56 clock-frequency = <62499840>; 56 status = "okay";
57}; 57};
58 58
59&uart1 { 59&uart1 {
60 clock-frequency = <62499840>;
61 status = "okay"; 60 status = "okay";
62}; 61};
diff --git a/arch/arm/boot/dts/bcm958522er.dts b/arch/arm/boot/dts/bcm958522er.dts
index 3f04a40eb90c..df05e7f568af 100644
--- a/arch/arm/boot/dts/bcm958522er.dts
+++ b/arch/arm/boot/dts/bcm958522er.dts
@@ -55,6 +55,7 @@
55 gpio-restart { 55 gpio-restart {
56 compatible = "gpio-restart"; 56 compatible = "gpio-restart";
57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; 57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
58 open-source;
58 priority = <200>; 59 priority = <200>;
59 }; 60 };
60}; 61};
diff --git a/arch/arm/boot/dts/bcm958525er.dts b/arch/arm/boot/dts/bcm958525er.dts
index 9fd542200d3d..4a3ab19c6281 100644
--- a/arch/arm/boot/dts/bcm958525er.dts
+++ b/arch/arm/boot/dts/bcm958525er.dts
@@ -55,6 +55,7 @@
55 gpio-restart { 55 gpio-restart {
56 compatible = "gpio-restart"; 56 compatible = "gpio-restart";
57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; 57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
58 open-source;
58 priority = <200>; 59 priority = <200>;
59 }; 60 };
60}; 61};
diff --git a/arch/arm/boot/dts/bcm958525xmc.dts b/arch/arm/boot/dts/bcm958525xmc.dts
index 41e7fd350fcd..81f78435d8c7 100644
--- a/arch/arm/boot/dts/bcm958525xmc.dts
+++ b/arch/arm/boot/dts/bcm958525xmc.dts
@@ -55,6 +55,7 @@
55 gpio-restart { 55 gpio-restart {
56 compatible = "gpio-restart"; 56 compatible = "gpio-restart";
57 gpios = <&gpioa 31 GPIO_ACTIVE_LOW>; 57 gpios = <&gpioa 31 GPIO_ACTIVE_LOW>;
58 open-source;
58 priority = <200>; 59 priority = <200>;
59 }; 60 };
60}; 61};
diff --git a/arch/arm/boot/dts/bcm958622hr.dts b/arch/arm/boot/dts/bcm958622hr.dts
index 477c4860db52..c88b8fefcb2f 100644
--- a/arch/arm/boot/dts/bcm958622hr.dts
+++ b/arch/arm/boot/dts/bcm958622hr.dts
@@ -55,6 +55,7 @@
55 gpio-restart { 55 gpio-restart {
56 compatible = "gpio-restart"; 56 compatible = "gpio-restart";
57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; 57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
58 open-source;
58 priority = <200>; 59 priority = <200>;
59 }; 60 };
60}; 61};
diff --git a/arch/arm/boot/dts/bcm958623hr.dts b/arch/arm/boot/dts/bcm958623hr.dts
index c0a499d5ba44..d503fa0dde31 100644
--- a/arch/arm/boot/dts/bcm958623hr.dts
+++ b/arch/arm/boot/dts/bcm958623hr.dts
@@ -55,6 +55,7 @@
55 gpio-restart { 55 gpio-restart {
56 compatible = "gpio-restart"; 56 compatible = "gpio-restart";
57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; 57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
58 open-source;
58 priority = <200>; 59 priority = <200>;
59 }; 60 };
60}; 61};
diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
index f7eb5854a224..cc0363b843c1 100644
--- a/arch/arm/boot/dts/bcm958625hr.dts
+++ b/arch/arm/boot/dts/bcm958625hr.dts
@@ -55,6 +55,7 @@
55 gpio-restart { 55 gpio-restart {
56 compatible = "gpio-restart"; 56 compatible = "gpio-restart";
57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; 57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
58 open-source;
58 priority = <200>; 59 priority = <200>;
59 }; 60 };
60}; 61};
diff --git a/arch/arm/boot/dts/bcm988312hr.dts b/arch/arm/boot/dts/bcm988312hr.dts
index 16666324fda8..74e15a3cd9f8 100644
--- a/arch/arm/boot/dts/bcm988312hr.dts
+++ b/arch/arm/boot/dts/bcm988312hr.dts
@@ -55,6 +55,7 @@
55 gpio-restart { 55 gpio-restart {
56 compatible = "gpio-restart"; 56 compatible = "gpio-restart";
57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; 57 gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
58 open-source;
58 priority = <200>; 59 priority = <200>;
59 }; 60 };
60}; 61};
diff --git a/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi b/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
index 49f466fe0b1d..dcfc97591433 100644
--- a/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
+++ b/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
@@ -121,11 +121,6 @@
121 }; 121 };
122}; 122};
123 123
124&cpu0 {
125 arm-supply = <&sw1a_reg>;
126 soc-supply = <&sw1c_reg>;
127};
128
129&fec1 { 124&fec1 {
130 pinctrl-names = "default"; 125 pinctrl-names = "default";
131 pinctrl-0 = <&pinctrl_enet1>; 126 pinctrl-0 = <&pinctrl_enet1>;
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 22332be72140..528b4e9c6d3d 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -266,7 +266,7 @@
266 }; 266 };
267 267
268 usb1: ohci@00400000 { 268 usb1: ohci@00400000 {
269 compatible = "atmel,sama5d2-ohci", "usb-ohci"; 269 compatible = "atmel,at91rm9200-ohci", "usb-ohci";
270 reg = <0x00400000 0x100000>; 270 reg = <0x00400000 0x100000>;
271 interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>; 271 interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>;
272 clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>; 272 clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index 82d8c4771293..162e1eb5373d 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -14,6 +14,7 @@
14#include <dt-bindings/mfd/dbx500-prcmu.h> 14#include <dt-bindings/mfd/dbx500-prcmu.h>
15#include <dt-bindings/arm/ux500_pm_domains.h> 15#include <dt-bindings/arm/ux500_pm_domains.h>
16#include <dt-bindings/gpio/gpio.h> 16#include <dt-bindings/gpio/gpio.h>
17#include <dt-bindings/clock/ste-ab8500.h>
17#include "skeleton.dtsi" 18#include "skeleton.dtsi"
18 19
19/ { 20/ {
@@ -603,6 +604,11 @@
603 interrupt-controller; 604 interrupt-controller;
604 #interrupt-cells = <2>; 605 #interrupt-cells = <2>;
605 606
607 ab8500_clock: clock-controller {
608 compatible = "stericsson,ab8500-clk";
609 #clock-cells = <1>;
610 };
611
606 ab8500_gpio: ab8500-gpio { 612 ab8500_gpio: ab8500-gpio {
607 compatible = "stericsson,ab8500-gpio"; 613 compatible = "stericsson,ab8500-gpio";
608 gpio-controller; 614 gpio-controller;
@@ -686,6 +692,8 @@
686 692
687 ab8500-pwm { 693 ab8500-pwm {
688 compatible = "stericsson,ab8500-pwm"; 694 compatible = "stericsson,ab8500-pwm";
695 clocks = <&ab8500_clock AB8500_SYSCLK_INT>;
696 clock-names = "intclk";
689 }; 697 };
690 698
691 ab8500-debugfs { 699 ab8500-debugfs {
@@ -700,6 +708,9 @@
700 V-AMIC2-supply = <&ab8500_ldo_anamic2_reg>; 708 V-AMIC2-supply = <&ab8500_ldo_anamic2_reg>;
701 V-DMIC-supply = <&ab8500_ldo_dmic_reg>; 709 V-DMIC-supply = <&ab8500_ldo_dmic_reg>;
702 710
711 clocks = <&ab8500_clock AB8500_SYSCLK_AUDIO>;
712 clock-names = "audioclk";
713
703 stericsson,earpeice-cmv = <950>; /* Units in mV. */ 714 stericsson,earpeice-cmv = <950>; /* Units in mV. */
704 }; 715 };
705 716
@@ -1095,6 +1106,14 @@
1095 status = "disabled"; 1106 status = "disabled";
1096 }; 1107 };
1097 1108
1109 sound {
1110 compatible = "stericsson,snd-soc-mop500";
1111 stericsson,cpu-dai = <&msp1 &msp3>;
1112 stericsson,audio-codec = <&codec>;
1113 clocks = <&prcmu_clk PRCMU_SYSCLK>, <&ab8500_clock AB8500_SYSCLK_ULP>, <&ab8500_clock AB8500_SYSCLK_INT>;
1114 clock-names = "sysclk", "ulpclk", "intclk";
1115 };
1116
1098 msp0: msp@80123000 { 1117 msp0: msp@80123000 {
1099 compatible = "stericsson,ux500-msp-i2s"; 1118 compatible = "stericsson,ux500-msp-i2s";
1100 reg = <0x80123000 0x1000>; 1119 reg = <0x80123000 0x1000>;
diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
index f37f9e10713c..9e359e4f342e 100644
--- a/arch/arm/boot/dts/ste-href.dtsi
+++ b/arch/arm/boot/dts/ste-href.dtsi
@@ -186,15 +186,6 @@
186 status = "okay"; 186 status = "okay";
187 }; 187 };
188 188
189 sound {
190 compatible = "stericsson,snd-soc-mop500";
191
192 stericsson,cpu-dai = <&msp1 &msp3>;
193 stericsson,audio-codec = <&codec>;
194 clocks = <&prcmu_clk PRCMU_SYSCLK>;
195 clock-names = "sysclk";
196 };
197
198 msp0: msp@80123000 { 189 msp0: msp@80123000 {
199 pinctrl-names = "default"; 190 pinctrl-names = "default";
200 pinctrl-0 = <&msp0_default_mode>; 191 pinctrl-0 = <&msp0_default_mode>;
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
index dd5514def604..ade1d0d4e5f4 100644
--- a/arch/arm/boot/dts/ste-snowball.dts
+++ b/arch/arm/boot/dts/ste-snowball.dts
@@ -159,15 +159,6 @@
159 "", "", "", "", "", "", "", ""; 159 "", "", "", "", "", "", "", "";
160 }; 160 };
161 161
162 sound {
163 compatible = "stericsson,snd-soc-mop500";
164
165 stericsson,cpu-dai = <&msp1 &msp3>;
166 stericsson,audio-codec = <&codec>;
167 clocks = <&prcmu_clk PRCMU_SYSCLK>;
168 clock-names = "sysclk";
169 };
170
171 msp0: msp@80123000 { 162 msp0: msp@80123000 {
172 pinctrl-names = "default"; 163 pinctrl-names = "default";
173 pinctrl-0 = <&msp0_default_mode>; 164 pinctrl-0 = <&msp0_default_mode>;
diff --git a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
index 72ec0d5ae052..bbf1c8cbaac6 100644
--- a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
+++ b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
@@ -167,7 +167,7 @@
167 reg = <8>; 167 reg = <8>;
168 label = "cpu"; 168 label = "cpu";
169 ethernet = <&gmac>; 169 ethernet = <&gmac>;
170 phy-mode = "rgmii"; 170 phy-mode = "rgmii-txid";
171 fixed-link { 171 fixed-link {
172 speed = <1000>; 172 speed = <1000>;
173 full-duplex; 173 full-duplex;
diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
index a952cc0703cc..8a3ed21cb7bc 100644
--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
@@ -495,7 +495,7 @@
495 resets = <&ccu RST_BUS_GPU>; 495 resets = <&ccu RST_BUS_GPU>;
496 496
497 assigned-clocks = <&ccu CLK_GPU>; 497 assigned-clocks = <&ccu CLK_GPU>;
498 assigned-clock-rates = <408000000>; 498 assigned-clock-rates = <384000000>;
499 }; 499 };
500 500
501 gic: interrupt-controller@01c81000 { 501 gic: interrupt-controller@01c81000 {
diff --git a/arch/arm/boot/dts/sun8i-a33.dtsi b/arch/arm/boot/dts/sun8i-a33.dtsi
index 18c174fef84f..0467fb365bfc 100644
--- a/arch/arm/boot/dts/sun8i-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a33.dtsi
@@ -113,8 +113,8 @@
113 simple-audio-card,mclk-fs = <512>; 113 simple-audio-card,mclk-fs = <512>;
114 simple-audio-card,aux-devs = <&codec_analog>; 114 simple-audio-card,aux-devs = <&codec_analog>;
115 simple-audio-card,routing = 115 simple-audio-card,routing =
116 "Left DAC", "Digital Left DAC", 116 "Left DAC", "AIF1 Slot 0 Left",
117 "Right DAC", "Digital Right DAC"; 117 "Right DAC", "AIF1 Slot 0 Right";
118 status = "disabled"; 118 status = "disabled";
119 119
120 simple-audio-card,cpu { 120 simple-audio-card,cpu {
diff --git a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
index 7097c18ff487..d6bd15898db6 100644
--- a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
+++ b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
@@ -50,8 +50,6 @@
50 50
51 backlight: backlight { 51 backlight: backlight {
52 compatible = "pwm-backlight"; 52 compatible = "pwm-backlight";
53 pinctrl-names = "default";
54 pinctrl-0 = <&bl_en_pin>;
55 pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>; 53 pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
56 brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>; 54 brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
57 default-brightness-level = <8>; 55 default-brightness-level = <8>;
@@ -93,11 +91,6 @@
93}; 91};
94 92
95&pio { 93&pio {
96 bl_en_pin: bl_en_pin@0 {
97 pins = "PH6";
98 function = "gpio_in";
99 };
100
101 mmc0_cd_pin: mmc0_cd_pin@0 { 94 mmc0_cd_pin: mmc0_cd_pin@0 {
102 pins = "PB4"; 95 pins = "PB4";
103 function = "gpio_in"; 96 function = "gpio_in";
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index f2462a6bdba6..decd388d613d 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -188,6 +188,7 @@ CONFIG_WL12XX=m
188CONFIG_WL18XX=m 188CONFIG_WL18XX=m
189CONFIG_WLCORE_SPI=m 189CONFIG_WLCORE_SPI=m
190CONFIG_WLCORE_SDIO=m 190CONFIG_WLCORE_SDIO=m
191CONFIG_INPUT_MOUSEDEV=m
191CONFIG_INPUT_JOYDEV=m 192CONFIG_INPUT_JOYDEV=m
192CONFIG_INPUT_EVDEV=m 193CONFIG_INPUT_EVDEV=m
193CONFIG_KEYBOARD_ATKBD=m 194CONFIG_KEYBOARD_ATKBD=m
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 3d89b7905bd9..a277981f414d 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -289,6 +289,22 @@ static void at91_ddr_standby(void)
289 at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1); 289 at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
290} 290}
291 291
292static void sama5d3_ddr_standby(void)
293{
294 u32 lpr0;
295 u32 saved_lpr0;
296
297 saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
298 lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
299 lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN;
300
301 at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
302
303 cpu_do_idle();
304
305 at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
306}
307
292/* We manage both DDRAM/SDRAM controllers, we need more than one value to 308/* We manage both DDRAM/SDRAM controllers, we need more than one value to
293 * remember. 309 * remember.
294 */ 310 */
@@ -323,7 +339,7 @@ static const struct of_device_id const ramc_ids[] __initconst = {
323 { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby }, 339 { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
324 { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby }, 340 { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
325 { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby }, 341 { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
326 { .compatible = "atmel,sama5d3-ddramc", .data = at91_ddr_standby }, 342 { .compatible = "atmel,sama5d3-ddramc", .data = sama5d3_ddr_standby },
327 { /*sentinel*/ } 343 { /*sentinel*/ }
328}; 344};
329 345
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 093458b62c8d..c89757abb0ae 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -241,6 +241,3 @@ obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o
241 241
242onenand-$(CONFIG_MTD_ONENAND_OMAP2) := gpmc-onenand.o 242onenand-$(CONFIG_MTD_ONENAND_OMAP2) := gpmc-onenand.o
243obj-y += $(onenand-m) $(onenand-y) 243obj-y += $(onenand-m) $(onenand-y)
244
245nand-$(CONFIG_MTD_NAND_OMAP2) := gpmc-nand.o
246obj-y += $(nand-m) $(nand-y)
diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c
deleted file mode 100644
index f6ac027f3c3b..000000000000
--- a/arch/arm/mach-omap2/gpmc-nand.c
+++ /dev/null
@@ -1,154 +0,0 @@
1/*
2 * gpmc-nand.c
3 *
4 * Copyright (C) 2009 Texas Instruments
5 * Vimal Singh <vimalsingh@ti.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/kernel.h>
13#include <linux/platform_device.h>
14#include <linux/io.h>
15#include <linux/omap-gpmc.h>
16#include <linux/mtd/nand.h>
17#include <linux/platform_data/mtd-nand-omap2.h>
18
19#include <asm/mach/flash.h>
20
21#include "soc.h"
22
23/* minimum size for IO mapping */
24#define NAND_IO_SIZE 4
25
26static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
27{
28 /* platforms which support all ECC schemes */
29 if (soc_is_am33xx() || soc_is_am43xx() || cpu_is_omap44xx() ||
30 soc_is_omap54xx() || soc_is_dra7xx())
31 return 1;
32
33 if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW ||
34 ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) {
35 if (cpu_is_omap24xx())
36 return 0;
37 else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0))
38 return 0;
39 else
40 return 1;
41 }
42
43 /* OMAP3xxx do not have ELM engine, so cannot support ECC schemes
44 * which require H/W based ECC error detection */
45 if ((cpu_is_omap34xx() || cpu_is_omap3630()) &&
46 ((ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
47 (ecc_opt == OMAP_ECC_BCH8_CODE_HW)))
48 return 0;
49
50 /* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */
51 if (ecc_opt == OMAP_ECC_HAM1_CODE_HW ||
52 ecc_opt == OMAP_ECC_HAM1_CODE_SW)
53 return 1;
54 else
55 return 0;
56}
57
58/* This function will go away once the device-tree convertion is complete */
59static void gpmc_set_legacy(struct omap_nand_platform_data *gpmc_nand_data,
60 struct gpmc_settings *s)
61{
62 /* Enable RD PIN Monitoring Reg */
63 if (gpmc_nand_data->dev_ready) {
64 s->wait_on_read = true;
65 s->wait_on_write = true;
66 }
67
68 if (gpmc_nand_data->devsize == NAND_BUSWIDTH_16)
69 s->device_width = GPMC_DEVWIDTH_16BIT;
70 else
71 s->device_width = GPMC_DEVWIDTH_8BIT;
72}
73
74int gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data,
75 struct gpmc_timings *gpmc_t)
76{
77 int err = 0;
78 struct gpmc_settings s;
79 struct platform_device *pdev;
80 struct resource gpmc_nand_res[] = {
81 { .flags = IORESOURCE_MEM, },
82 { .flags = IORESOURCE_IRQ, },
83 { .flags = IORESOURCE_IRQ, },
84 };
85
86 BUG_ON(gpmc_nand_data->cs >= GPMC_CS_NUM);
87
88 err = gpmc_cs_request(gpmc_nand_data->cs, NAND_IO_SIZE,
89 (unsigned long *)&gpmc_nand_res[0].start);
90 if (err < 0) {
91 pr_err("omap2-gpmc: Cannot request GPMC CS %d, error %d\n",
92 gpmc_nand_data->cs, err);
93 return err;
94 }
95 gpmc_nand_res[0].end = gpmc_nand_res[0].start + NAND_IO_SIZE - 1;
96 gpmc_nand_res[1].start = gpmc_get_client_irq(GPMC_IRQ_FIFOEVENTENABLE);
97 gpmc_nand_res[2].start = gpmc_get_client_irq(GPMC_IRQ_COUNT_EVENT);
98
99 memset(&s, 0, sizeof(struct gpmc_settings));
100 gpmc_set_legacy(gpmc_nand_data, &s);
101
102 s.device_nand = true;
103
104 if (gpmc_t) {
105 err = gpmc_cs_set_timings(gpmc_nand_data->cs, gpmc_t, &s);
106 if (err < 0) {
107 pr_err("omap2-gpmc: Unable to set gpmc timings: %d\n",
108 err);
109 return err;
110 }
111 }
112
113 err = gpmc_cs_program_settings(gpmc_nand_data->cs, &s);
114 if (err < 0)
115 goto out_free_cs;
116
117 err = gpmc_configure(GPMC_CONFIG_WP, 0);
118 if (err < 0)
119 goto out_free_cs;
120
121 if (!gpmc_hwecc_bch_capable(gpmc_nand_data->ecc_opt)) {
122 pr_err("omap2-nand: Unsupported NAND ECC scheme selected\n");
123 err = -EINVAL;
124 goto out_free_cs;
125 }
126
127
128 pdev = platform_device_alloc("omap2-nand", gpmc_nand_data->cs);
129 if (pdev) {
130 err = platform_device_add_resources(pdev, gpmc_nand_res,
131 ARRAY_SIZE(gpmc_nand_res));
132 if (!err)
133 pdev->dev.platform_data = gpmc_nand_data;
134 } else {
135 err = -ENOMEM;
136 }
137 if (err)
138 goto out_free_pdev;
139
140 err = platform_device_add(pdev);
141 if (err) {
142 dev_err(&pdev->dev, "Unable to register NAND device\n");
143 goto out_free_pdev;
144 }
145
146 return 0;
147
148out_free_pdev:
149 platform_device_put(pdev);
150out_free_cs:
151 gpmc_cs_free(gpmc_nand_data->cs);
152
153 return err;
154}
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
index 8633c703546a..2944af820558 100644
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ b/arch/arm/mach-omap2/gpmc-onenand.c
@@ -367,7 +367,7 @@ static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr)
367 return ret; 367 return ret;
368} 368}
369 369
370void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data) 370int gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
371{ 371{
372 int err; 372 int err;
373 struct device *dev = &gpmc_onenand_device.dev; 373 struct device *dev = &gpmc_onenand_device.dev;
@@ -393,15 +393,17 @@ void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
393 if (err < 0) { 393 if (err < 0) {
394 dev_err(dev, "Cannot request GPMC CS %d, error %d\n", 394 dev_err(dev, "Cannot request GPMC CS %d, error %d\n",
395 gpmc_onenand_data->cs, err); 395 gpmc_onenand_data->cs, err);
396 return; 396 return err;
397 } 397 }
398 398
399 gpmc_onenand_resource.end = gpmc_onenand_resource.start + 399 gpmc_onenand_resource.end = gpmc_onenand_resource.start +
400 ONENAND_IO_SIZE - 1; 400 ONENAND_IO_SIZE - 1;
401 401
402 if (platform_device_register(&gpmc_onenand_device) < 0) { 402 err = platform_device_register(&gpmc_onenand_device);
403 if (err) {
403 dev_err(dev, "Unable to register OneNAND device\n"); 404 dev_err(dev, "Unable to register OneNAND device\n");
404 gpmc_cs_free(gpmc_onenand_data->cs); 405 gpmc_cs_free(gpmc_onenand_data->cs);
405 return;
406 } 406 }
407
408 return err;
407} 409}
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index fe36ce2734d4..4c6f14cf92a8 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -17,6 +17,7 @@
17 17
18#include <linux/linkage.h> 18#include <linux/linkage.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <asm/assembler.h>
20 21
21#include "omap44xx.h" 22#include "omap44xx.h"
22 23
@@ -66,7 +67,7 @@ wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
66 cmp r0, r4 67 cmp r0, r4
67 bne wait_2 68 bne wait_2
68 ldr r12, =API_HYP_ENTRY 69 ldr r12, =API_HYP_ENTRY
69 adr r0, hyp_boot 70 badr r0, hyp_boot
70 smc #0 71 smc #0
71hyp_boot: 72hyp_boot:
72 b omap_secondary_startup 73 b omap_secondary_startup
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 56f917ec8621..1435fee39a89 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -2112,11 +2112,20 @@ static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = {
2112}; 2112};
2113 2113
2114/* L4 CORE -> SR1 interface */ 2114/* L4 CORE -> SR1 interface */
2115static struct omap_hwmod_addr_space omap3_sr1_addr_space[] = {
2116 {
2117 .pa_start = OMAP34XX_SR1_BASE,
2118 .pa_end = OMAP34XX_SR1_BASE + SZ_1K - 1,
2119 .flags = ADDR_TYPE_RT,
2120 },
2121 { },
2122};
2115 2123
2116static struct omap_hwmod_ocp_if omap34xx_l4_core__sr1 = { 2124static struct omap_hwmod_ocp_if omap34xx_l4_core__sr1 = {
2117 .master = &omap3xxx_l4_core_hwmod, 2125 .master = &omap3xxx_l4_core_hwmod,
2118 .slave = &omap34xx_sr1_hwmod, 2126 .slave = &omap34xx_sr1_hwmod,
2119 .clk = "sr_l4_ick", 2127 .clk = "sr_l4_ick",
2128 .addr = omap3_sr1_addr_space,
2120 .user = OCP_USER_MPU, 2129 .user = OCP_USER_MPU,
2121}; 2130};
2122 2131
@@ -2124,15 +2133,25 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr1 = {
2124 .master = &omap3xxx_l4_core_hwmod, 2133 .master = &omap3xxx_l4_core_hwmod,
2125 .slave = &omap36xx_sr1_hwmod, 2134 .slave = &omap36xx_sr1_hwmod,
2126 .clk = "sr_l4_ick", 2135 .clk = "sr_l4_ick",
2136 .addr = omap3_sr1_addr_space,
2127 .user = OCP_USER_MPU, 2137 .user = OCP_USER_MPU,
2128}; 2138};
2129 2139
2130/* L4 CORE -> SR1 interface */ 2140/* L4 CORE -> SR1 interface */
2141static struct omap_hwmod_addr_space omap3_sr2_addr_space[] = {
2142 {
2143 .pa_start = OMAP34XX_SR2_BASE,
2144 .pa_end = OMAP34XX_SR2_BASE + SZ_1K - 1,
2145 .flags = ADDR_TYPE_RT,
2146 },
2147 { },
2148};
2131 2149
2132static struct omap_hwmod_ocp_if omap34xx_l4_core__sr2 = { 2150static struct omap_hwmod_ocp_if omap34xx_l4_core__sr2 = {
2133 .master = &omap3xxx_l4_core_hwmod, 2151 .master = &omap3xxx_l4_core_hwmod,
2134 .slave = &omap34xx_sr2_hwmod, 2152 .slave = &omap34xx_sr2_hwmod,
2135 .clk = "sr_l4_ick", 2153 .clk = "sr_l4_ick",
2154 .addr = omap3_sr2_addr_space,
2136 .user = OCP_USER_MPU, 2155 .user = OCP_USER_MPU,
2137}; 2156};
2138 2157
@@ -2140,6 +2159,7 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr2 = {
2140 .master = &omap3xxx_l4_core_hwmod, 2159 .master = &omap3xxx_l4_core_hwmod,
2141 .slave = &omap36xx_sr2_hwmod, 2160 .slave = &omap36xx_sr2_hwmod,
2142 .clk = "sr_l4_ick", 2161 .clk = "sr_l4_ick",
2162 .addr = omap3_sr2_addr_space,
2143 .user = OCP_USER_MPU, 2163 .user = OCP_USER_MPU,
2144}; 2164};
2145 2165
@@ -3111,16 +3131,20 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_hwmod_ocp_ifs[] __initdata = {
3111 * Return: 0 if device named @dev_name is not likely to be accessible, 3131 * Return: 0 if device named @dev_name is not likely to be accessible,
3112 * or 1 if it is likely to be accessible. 3132 * or 1 if it is likely to be accessible.
3113 */ 3133 */
3114static int __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus, 3134static bool __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
3115 const char *dev_name) 3135 const char *dev_name)
3116{ 3136{
3137 struct device_node *node;
3138 bool available;
3139
3117 if (!bus) 3140 if (!bus)
3118 return (omap_type() == OMAP2_DEVICE_TYPE_GP) ? 1 : 0; 3141 return omap_type() == OMAP2_DEVICE_TYPE_GP;
3119 3142
3120 if (of_device_is_available(of_find_node_by_name(bus, dev_name))) 3143 node = of_get_child_by_name(bus, dev_name);
3121 return 1; 3144 available = of_device_is_available(node);
3145 of_node_put(node);
3122 3146
3123 return 0; 3147 return available;
3124} 3148}
3125 3149
3126int __init omap3xxx_hwmod_init(void) 3150int __init omap3xxx_hwmod_init(void)
@@ -3189,15 +3213,20 @@ int __init omap3xxx_hwmod_init(void)
3189 3213
3190 if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) { 3214 if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) {
3191 r = omap_hwmod_register_links(h_sham); 3215 r = omap_hwmod_register_links(h_sham);
3192 if (r < 0) 3216 if (r < 0) {
3217 of_node_put(bus);
3193 return r; 3218 return r;
3219 }
3194 } 3220 }
3195 3221
3196 if (h_aes && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "aes")) { 3222 if (h_aes && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "aes")) {
3197 r = omap_hwmod_register_links(h_aes); 3223 r = omap_hwmod_register_links(h_aes);
3198 if (r < 0) 3224 if (r < 0) {
3225 of_node_put(bus);
3199 return r; 3226 return r;
3227 }
3200 } 3228 }
3229 of_node_put(bus);
3201 3230
3202 /* 3231 /*
3203 * Register hwmod links specific to certain ES levels of a 3232 * Register hwmod links specific to certain ES levels of a
diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi
index 9f9e203c09c5..bcb03fc32665 100644
--- a/arch/arm64/boot/dts/broadcom/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi
@@ -114,6 +114,7 @@
114 pcie0: pcie@20020000 { 114 pcie0: pcie@20020000 {
115 compatible = "brcm,iproc-pcie"; 115 compatible = "brcm,iproc-pcie";
116 reg = <0 0x20020000 0 0x1000>; 116 reg = <0 0x20020000 0 0x1000>;
117 dma-coherent;
117 118
118 #interrupt-cells = <1>; 119 #interrupt-cells = <1>;
119 interrupt-map-mask = <0 0 0 0>; 120 interrupt-map-mask = <0 0 0 0>;
@@ -144,6 +145,7 @@
144 pcie4: pcie@50020000 { 145 pcie4: pcie@50020000 {
145 compatible = "brcm,iproc-pcie"; 146 compatible = "brcm,iproc-pcie";
146 reg = <0 0x50020000 0 0x1000>; 147 reg = <0 0x50020000 0 0x1000>;
148 dma-coherent;
147 149
148 #interrupt-cells = <1>; 150 #interrupt-cells = <1>;
149 interrupt-map-mask = <0 0 0 0>; 151 interrupt-map-mask = <0 0 0 0>;
@@ -174,6 +176,7 @@
174 pcie8: pcie@60c00000 { 176 pcie8: pcie@60c00000 {
175 compatible = "brcm,iproc-pcie-paxc"; 177 compatible = "brcm,iproc-pcie-paxc";
176 reg = <0 0x60c00000 0 0x1000>; 178 reg = <0 0x60c00000 0 0x1000>;
179 dma-coherent;
177 linux,pci-domain = <8>; 180 linux,pci-domain = <8>;
178 181
179 bus-range = <0x0 0x1>; 182 bus-range = <0x0 0x1>;
@@ -203,6 +206,7 @@
203 <0x61030000 0x100>; 206 <0x61030000 0x100>;
204 reg-names = "amac_base", "idm_base", "nicpm_base"; 207 reg-names = "amac_base", "idm_base", "nicpm_base";
205 interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>; 208 interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>;
209 dma-coherent;
206 phy-handle = <&gphy0>; 210 phy-handle = <&gphy0>;
207 phy-mode = "rgmii"; 211 phy-mode = "rgmii";
208 status = "disabled"; 212 status = "disabled";
@@ -213,6 +217,7 @@
213 reg = <0x612c0000 0x445>; /* PDC FS0 regs */ 217 reg = <0x612c0000 0x445>; /* PDC FS0 regs */
214 interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>; 218 interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
215 #mbox-cells = <1>; 219 #mbox-cells = <1>;
220 dma-coherent;
216 brcm,rx-status-len = <32>; 221 brcm,rx-status-len = <32>;
217 brcm,use-bcm-hdr; 222 brcm,use-bcm-hdr;
218 }; 223 };
@@ -222,6 +227,7 @@
222 reg = <0x612e0000 0x445>; /* PDC FS1 regs */ 227 reg = <0x612e0000 0x445>; /* PDC FS1 regs */
223 interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>; 228 interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
224 #mbox-cells = <1>; 229 #mbox-cells = <1>;
230 dma-coherent;
225 brcm,rx-status-len = <32>; 231 brcm,rx-status-len = <32>;
226 brcm,use-bcm-hdr; 232 brcm,use-bcm-hdr;
227 }; 233 };
@@ -231,6 +237,7 @@
231 reg = <0x61300000 0x445>; /* PDC FS2 regs */ 237 reg = <0x61300000 0x445>; /* PDC FS2 regs */
232 interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>; 238 interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
233 #mbox-cells = <1>; 239 #mbox-cells = <1>;
240 dma-coherent;
234 brcm,rx-status-len = <32>; 241 brcm,rx-status-len = <32>;
235 brcm,use-bcm-hdr; 242 brcm,use-bcm-hdr;
236 }; 243 };
@@ -240,6 +247,7 @@
240 reg = <0x61320000 0x445>; /* PDC FS3 regs */ 247 reg = <0x61320000 0x445>; /* PDC FS3 regs */
241 interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>; 248 interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
242 #mbox-cells = <1>; 249 #mbox-cells = <1>;
250 dma-coherent;
243 brcm,rx-status-len = <32>; 251 brcm,rx-status-len = <32>;
244 brcm,use-bcm-hdr; 252 brcm,use-bcm-hdr;
245 }; 253 };
@@ -644,6 +652,7 @@
644 sata: ahci@663f2000 { 652 sata: ahci@663f2000 {
645 compatible = "brcm,iproc-ahci", "generic-ahci"; 653 compatible = "brcm,iproc-ahci", "generic-ahci";
646 reg = <0x663f2000 0x1000>; 654 reg = <0x663f2000 0x1000>;
655 dma-coherent;
647 reg-names = "ahci"; 656 reg-names = "ahci";
648 interrupts = <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>; 657 interrupts = <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>;
649 #address-cells = <1>; 658 #address-cells = <1>;
@@ -667,6 +676,7 @@
667 compatible = "brcm,sdhci-iproc-cygnus"; 676 compatible = "brcm,sdhci-iproc-cygnus";
668 reg = <0x66420000 0x100>; 677 reg = <0x66420000 0x100>;
669 interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>; 678 interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>;
679 dma-coherent;
670 bus-width = <8>; 680 bus-width = <8>;
671 clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>; 681 clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>;
672 status = "disabled"; 682 status = "disabled";
@@ -676,6 +686,7 @@
676 compatible = "brcm,sdhci-iproc-cygnus"; 686 compatible = "brcm,sdhci-iproc-cygnus";
677 reg = <0x66430000 0x100>; 687 reg = <0x66430000 0x100>;
678 interrupts = <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>; 688 interrupts = <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>;
689 dma-coherent;
679 bus-width = <8>; 690 bus-width = <8>;
680 clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>; 691 clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>;
681 status = "disabled"; 692 status = "disabled";
diff --git a/arch/arm64/include/asm/current.h b/arch/arm64/include/asm/current.h
index 86c404171305..f6580d4afb0e 100644
--- a/arch/arm64/include/asm/current.h
+++ b/arch/arm64/include/asm/current.h
@@ -3,8 +3,6 @@
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5 5
6#include <asm/sysreg.h>
7
8#ifndef __ASSEMBLY__ 6#ifndef __ASSEMBLY__
9 7
10struct task_struct; 8struct task_struct;
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index e78ac26324bd..bdbeb06dc11e 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -44,7 +44,7 @@
44#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) 44#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
45#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) 45#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
46 46
47#define __NR_compat_syscalls 394 47#define __NR_compat_syscalls 398
48#endif 48#endif
49 49
50#define __ARCH_WANT_SYS_CLONE 50#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index b7e8ef16ff0d..c66b51aab195 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -809,6 +809,14 @@ __SYSCALL(__NR_copy_file_range, sys_copy_file_range)
809__SYSCALL(__NR_preadv2, compat_sys_preadv2) 809__SYSCALL(__NR_preadv2, compat_sys_preadv2)
810#define __NR_pwritev2 393 810#define __NR_pwritev2 393
811__SYSCALL(__NR_pwritev2, compat_sys_pwritev2) 811__SYSCALL(__NR_pwritev2, compat_sys_pwritev2)
812#define __NR_pkey_mprotect 394
813__SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
814#define __NR_pkey_alloc 395
815__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
816#define __NR_pkey_free 396
817__SYSCALL(__NR_pkey_free, sys_pkey_free)
818#define __NR_statx 397
819__SYSCALL(__NR_statx, sys_statx)
812 820
813/* 821/*
814 * Please add new compat syscalls above this comment and update 822 * Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 769f24ef628c..d7e90d97f5c4 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -131,11 +131,15 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
131 /* 131 /*
132 * The kernel Image should not extend across a 1GB/32MB/512MB alignment 132 * The kernel Image should not extend across a 1GB/32MB/512MB alignment
133 * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this 133 * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
134 * happens, increase the KASLR offset by the size of the kernel image. 134 * happens, increase the KASLR offset by the size of the kernel image
135 * rounded up by SWAPPER_BLOCK_SIZE.
135 */ 136 */
136 if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) != 137 if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) !=
137 (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) 138 (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) {
138 offset = (offset + (u64)(_end - _text)) & mask; 139 u64 kimg_sz = _end - _text;
140 offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE))
141 & mask;
142 }
139 143
140 if (IS_ENABLED(CONFIG_KASAN)) 144 if (IS_ENABLED(CONFIG_KASAN))
141 /* 145 /*
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index ef1caae02110..9b1036570586 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -944,7 +944,7 @@ static bool have_cpu_die(void)
944#ifdef CONFIG_HOTPLUG_CPU 944#ifdef CONFIG_HOTPLUG_CPU
945 int any_cpu = raw_smp_processor_id(); 945 int any_cpu = raw_smp_processor_id();
946 946
947 if (cpu_ops[any_cpu]->cpu_die) 947 if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die)
948 return true; 948 return true;
949#endif 949#endif
950 return false; 950 return false;
diff --git a/arch/arm64/kernel/vdso/.gitignore b/arch/arm64/kernel/vdso/.gitignore
index b8cc94e9698b..f8b69d84238e 100644
--- a/arch/arm64/kernel/vdso/.gitignore
+++ b/arch/arm64/kernel/vdso/.gitignore
@@ -1,2 +1 @@
1vdso.lds vdso.lds
2vdso-offsets.h
diff --git a/arch/c6x/kernel/ptrace.c b/arch/c6x/kernel/ptrace.c
index a27e1f02ce18..8801dc98fd44 100644
--- a/arch/c6x/kernel/ptrace.c
+++ b/arch/c6x/kernel/ptrace.c
@@ -70,46 +70,6 @@ static int gpr_get(struct task_struct *target,
70 0, sizeof(*regs)); 70 0, sizeof(*regs));
71} 71}
72 72
73static int gpr_set(struct task_struct *target,
74 const struct user_regset *regset,
75 unsigned int pos, unsigned int count,
76 const void *kbuf, const void __user *ubuf)
77{
78 int ret;
79 struct pt_regs *regs = task_pt_regs(target);
80
81 /* Don't copyin TSR or CSR */
82 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
83 &regs,
84 0, PT_TSR * sizeof(long));
85 if (ret)
86 return ret;
87
88 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
89 PT_TSR * sizeof(long),
90 (PT_TSR + 1) * sizeof(long));
91 if (ret)
92 return ret;
93
94 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
95 &regs,
96 (PT_TSR + 1) * sizeof(long),
97 PT_CSR * sizeof(long));
98 if (ret)
99 return ret;
100
101 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
102 PT_CSR * sizeof(long),
103 (PT_CSR + 1) * sizeof(long));
104 if (ret)
105 return ret;
106
107 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
108 &regs,
109 (PT_CSR + 1) * sizeof(long), -1);
110 return ret;
111}
112
113enum c6x_regset { 73enum c6x_regset {
114 REGSET_GPR, 74 REGSET_GPR,
115}; 75};
@@ -121,7 +81,6 @@ static const struct user_regset c6x_regsets[] = {
121 .size = sizeof(u32), 81 .size = sizeof(u32),
122 .align = sizeof(u32), 82 .align = sizeof(u32),
123 .get = gpr_get, 83 .get = gpr_get,
124 .set = gpr_set
125 }, 84 },
126}; 85};
127 86
diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c
index 92075544a19a..0dc1c8f622bc 100644
--- a/arch/h8300/kernel/ptrace.c
+++ b/arch/h8300/kernel/ptrace.c
@@ -95,7 +95,8 @@ static int regs_get(struct task_struct *target,
95 long *reg = (long *)&regs; 95 long *reg = (long *)&regs;
96 96
97 /* build user regs in buffer */ 97 /* build user regs in buffer */
98 for (r = 0; r < ARRAY_SIZE(register_offset); r++) 98 BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0);
99 for (r = 0; r < sizeof(regs) / sizeof(long); r++)
99 *reg++ = h8300_get_reg(target, r); 100 *reg++ = h8300_get_reg(target, r);
100 101
101 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 102 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
@@ -113,7 +114,8 @@ static int regs_set(struct task_struct *target,
113 long *reg; 114 long *reg;
114 115
115 /* build user regs in buffer */ 116 /* build user regs in buffer */
116 for (reg = (long *)&regs, r = 0; r < ARRAY_SIZE(register_offset); r++) 117 BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0);
118 for (reg = (long *)&regs, r = 0; r < sizeof(regs) / sizeof(long); r++)
117 *reg++ = h8300_get_reg(target, r); 119 *reg++ = h8300_get_reg(target, r);
118 120
119 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 121 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
@@ -122,7 +124,7 @@ static int regs_set(struct task_struct *target,
122 return ret; 124 return ret;
123 125
124 /* write back to pt_regs */ 126 /* write back to pt_regs */
125 for (reg = (long *)&regs, r = 0; r < ARRAY_SIZE(register_offset); r++) 127 for (reg = (long *)&regs, r = 0; r < sizeof(regs) / sizeof(long); r++)
126 h8300_put_reg(target, r, *reg++); 128 h8300_put_reg(target, r, *reg++);
127 return 0; 129 return 0;
128} 130}
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 048bf076f7df..531cb9eb3319 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y
25# CONFIG_EFI_PARTITION is not set 25# CONFIG_EFI_PARTITION is not set
26CONFIG_SYSV68_PARTITION=y 26CONFIG_SYSV68_PARTITION=y
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_M68020=y 31CONFIG_M68020=y
@@ -60,6 +61,7 @@ CONFIG_NET_IPVTI=m
60CONFIG_NET_FOU_IP_TUNNELS=y 61CONFIG_NET_FOU_IP_TUNNELS=y
61CONFIG_INET_AH=m 62CONFIG_INET_AH=m
62CONFIG_INET_ESP=m 63CONFIG_INET_ESP=m
64CONFIG_INET_ESP_OFFLOAD=m
63CONFIG_INET_IPCOMP=m 65CONFIG_INET_IPCOMP=m
64CONFIG_INET_XFRM_MODE_TRANSPORT=m 66CONFIG_INET_XFRM_MODE_TRANSPORT=m
65CONFIG_INET_XFRM_MODE_TUNNEL=m 67CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -71,6 +73,7 @@ CONFIG_IPV6=m
71CONFIG_IPV6_ROUTER_PREF=y 73CONFIG_IPV6_ROUTER_PREF=y
72CONFIG_INET6_AH=m 74CONFIG_INET6_AH=m
73CONFIG_INET6_ESP=m 75CONFIG_INET6_ESP=m
76CONFIG_INET6_ESP_OFFLOAD=m
74CONFIG_INET6_IPCOMP=m 77CONFIG_INET6_IPCOMP=m
75CONFIG_IPV6_ILA=m 78CONFIG_IPV6_ILA=m
76CONFIG_IPV6_VTI=m 79CONFIG_IPV6_VTI=m
@@ -101,6 +104,7 @@ CONFIG_NFT_NUMGEN=m
101CONFIG_NFT_CT=m 104CONFIG_NFT_CT=m
102CONFIG_NFT_SET_RBTREE=m 105CONFIG_NFT_SET_RBTREE=m
103CONFIG_NFT_SET_HASH=m 106CONFIG_NFT_SET_HASH=m
107CONFIG_NFT_SET_BITMAP=m
104CONFIG_NFT_COUNTER=m 108CONFIG_NFT_COUNTER=m
105CONFIG_NFT_LOG=m 109CONFIG_NFT_LOG=m
106CONFIG_NFT_LIMIT=m 110CONFIG_NFT_LIMIT=m
@@ -298,6 +302,8 @@ CONFIG_MPLS_IPTUNNEL=m
298CONFIG_NET_L3_MASTER_DEV=y 302CONFIG_NET_L3_MASTER_DEV=y
299CONFIG_AF_KCM=m 303CONFIG_AF_KCM=m
300# CONFIG_WIRELESS is not set 304# CONFIG_WIRELESS is not set
305CONFIG_PSAMPLE=m
306CONFIG_NET_IFE=m
301CONFIG_NET_DEVLINK=m 307CONFIG_NET_DEVLINK=m
302# CONFIG_UEVENT_HELPER is not set 308# CONFIG_UEVENT_HELPER is not set
303CONFIG_DEVTMPFS=y 309CONFIG_DEVTMPFS=y
@@ -371,6 +377,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
371CONFIG_MACVLAN=m 377CONFIG_MACVLAN=m
372CONFIG_MACVTAP=m 378CONFIG_MACVTAP=m
373CONFIG_IPVLAN=m 379CONFIG_IPVLAN=m
380CONFIG_IPVTAP=m
374CONFIG_VXLAN=m 381CONFIG_VXLAN=m
375CONFIG_GENEVE=m 382CONFIG_GENEVE=m
376CONFIG_GTP=m 383CONFIG_GTP=m
@@ -383,6 +390,7 @@ CONFIG_VETH=m
383# CONFIG_NET_VENDOR_AMAZON is not set 390# CONFIG_NET_VENDOR_AMAZON is not set
384CONFIG_A2065=y 391CONFIG_A2065=y
385CONFIG_ARIADNE=y 392CONFIG_ARIADNE=y
393# CONFIG_NET_VENDOR_AQUANTIA is not set
386# CONFIG_NET_VENDOR_ARC is not set 394# CONFIG_NET_VENDOR_ARC is not set
387# CONFIG_NET_CADENCE is not set 395# CONFIG_NET_CADENCE is not set
388# CONFIG_NET_VENDOR_BROADCOM is not set 396# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -404,7 +412,6 @@ CONFIG_ZORRO8390=y
404# CONFIG_NET_VENDOR_SOLARFLARE is not set 412# CONFIG_NET_VENDOR_SOLARFLARE is not set
405# CONFIG_NET_VENDOR_SMSC is not set 413# CONFIG_NET_VENDOR_SMSC is not set
406# CONFIG_NET_VENDOR_STMICRO is not set 414# CONFIG_NET_VENDOR_STMICRO is not set
407# CONFIG_NET_VENDOR_SYNOPSYS is not set
408# CONFIG_NET_VENDOR_VIA is not set 415# CONFIG_NET_VENDOR_VIA is not set
409# CONFIG_NET_VENDOR_WIZNET is not set 416# CONFIG_NET_VENDOR_WIZNET is not set
410CONFIG_PPP=m 417CONFIG_PPP=m
@@ -564,6 +571,8 @@ CONFIG_NLS_MAC_TURKISH=m
564CONFIG_DLM=m 571CONFIG_DLM=m
565# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 572# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
566CONFIG_MAGIC_SYSRQ=y 573CONFIG_MAGIC_SYSRQ=y
574CONFIG_WW_MUTEX_SELFTEST=m
575CONFIG_ATOMIC64_SELFTEST=m
567CONFIG_ASYNC_RAID6_TEST=m 576CONFIG_ASYNC_RAID6_TEST=m
568CONFIG_TEST_HEXDUMP=m 577CONFIG_TEST_HEXDUMP=m
569CONFIG_TEST_STRING_HELPERS=m 578CONFIG_TEST_STRING_HELPERS=m
@@ -594,6 +603,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
594CONFIG_CRYPTO_LRW=m 603CONFIG_CRYPTO_LRW=m
595CONFIG_CRYPTO_PCBC=m 604CONFIG_CRYPTO_PCBC=m
596CONFIG_CRYPTO_KEYWRAP=m 605CONFIG_CRYPTO_KEYWRAP=m
606CONFIG_CRYPTO_CMAC=m
597CONFIG_CRYPTO_XCBC=m 607CONFIG_CRYPTO_XCBC=m
598CONFIG_CRYPTO_VMAC=m 608CONFIG_CRYPTO_VMAC=m
599CONFIG_CRYPTO_MICHAEL_MIC=m 609CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -605,6 +615,7 @@ CONFIG_CRYPTO_SHA512=m
605CONFIG_CRYPTO_SHA3=m 615CONFIG_CRYPTO_SHA3=m
606CONFIG_CRYPTO_TGR192=m 616CONFIG_CRYPTO_TGR192=m
607CONFIG_CRYPTO_WP512=m 617CONFIG_CRYPTO_WP512=m
618CONFIG_CRYPTO_AES_TI=m
608CONFIG_CRYPTO_ANUBIS=m 619CONFIG_CRYPTO_ANUBIS=m
609CONFIG_CRYPTO_BLOWFISH=m 620CONFIG_CRYPTO_BLOWFISH=m
610CONFIG_CRYPTO_CAMELLIA=m 621CONFIG_CRYPTO_CAMELLIA=m
@@ -629,4 +640,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
629CONFIG_CRYPTO_USER_API_RNG=m 640CONFIG_CRYPTO_USER_API_RNG=m
630CONFIG_CRYPTO_USER_API_AEAD=m 641CONFIG_CRYPTO_USER_API_AEAD=m
631# CONFIG_CRYPTO_HW is not set 642# CONFIG_CRYPTO_HW is not set
643CONFIG_CRC32_SELFTEST=m
632CONFIG_XZ_DEC_TEST=m 644CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index d4de24963f5f..ca91d39555da 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y
26# CONFIG_EFI_PARTITION is not set 26# CONFIG_EFI_PARTITION is not set
27CONFIG_SYSV68_PARTITION=y 27CONFIG_SYSV68_PARTITION=y
28CONFIG_IOSCHED_DEADLINE=m 28CONFIG_IOSCHED_DEADLINE=m
29CONFIG_MQ_IOSCHED_DEADLINE=m
29CONFIG_KEXEC=y 30CONFIG_KEXEC=y
30CONFIG_BOOTINFO_PROC=y 31CONFIG_BOOTINFO_PROC=y
31CONFIG_M68020=y 32CONFIG_M68020=y
@@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m
58CONFIG_NET_FOU_IP_TUNNELS=y 59CONFIG_NET_FOU_IP_TUNNELS=y
59CONFIG_INET_AH=m 60CONFIG_INET_AH=m
60CONFIG_INET_ESP=m 61CONFIG_INET_ESP=m
62CONFIG_INET_ESP_OFFLOAD=m
61CONFIG_INET_IPCOMP=m 63CONFIG_INET_IPCOMP=m
62CONFIG_INET_XFRM_MODE_TRANSPORT=m 64CONFIG_INET_XFRM_MODE_TRANSPORT=m
63CONFIG_INET_XFRM_MODE_TUNNEL=m 65CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -69,6 +71,7 @@ CONFIG_IPV6=m
69CONFIG_IPV6_ROUTER_PREF=y 71CONFIG_IPV6_ROUTER_PREF=y
70CONFIG_INET6_AH=m 72CONFIG_INET6_AH=m
71CONFIG_INET6_ESP=m 73CONFIG_INET6_ESP=m
74CONFIG_INET6_ESP_OFFLOAD=m
72CONFIG_INET6_IPCOMP=m 75CONFIG_INET6_IPCOMP=m
73CONFIG_IPV6_ILA=m 76CONFIG_IPV6_ILA=m
74CONFIG_IPV6_VTI=m 77CONFIG_IPV6_VTI=m
@@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m
99CONFIG_NFT_CT=m 102CONFIG_NFT_CT=m
100CONFIG_NFT_SET_RBTREE=m 103CONFIG_NFT_SET_RBTREE=m
101CONFIG_NFT_SET_HASH=m 104CONFIG_NFT_SET_HASH=m
105CONFIG_NFT_SET_BITMAP=m
102CONFIG_NFT_COUNTER=m 106CONFIG_NFT_COUNTER=m
103CONFIG_NFT_LOG=m 107CONFIG_NFT_LOG=m
104CONFIG_NFT_LIMIT=m 108CONFIG_NFT_LIMIT=m
@@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m
296CONFIG_NET_L3_MASTER_DEV=y 300CONFIG_NET_L3_MASTER_DEV=y
297CONFIG_AF_KCM=m 301CONFIG_AF_KCM=m
298# CONFIG_WIRELESS is not set 302# CONFIG_WIRELESS is not set
303CONFIG_PSAMPLE=m
304CONFIG_NET_IFE=m
299CONFIG_NET_DEVLINK=m 305CONFIG_NET_DEVLINK=m
300# CONFIG_UEVENT_HELPER is not set 306# CONFIG_UEVENT_HELPER is not set
301CONFIG_DEVTMPFS=y 307CONFIG_DEVTMPFS=y
@@ -353,6 +359,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
353CONFIG_MACVLAN=m 359CONFIG_MACVLAN=m
354CONFIG_MACVTAP=m 360CONFIG_MACVTAP=m
355CONFIG_IPVLAN=m 361CONFIG_IPVLAN=m
362CONFIG_IPVTAP=m
356CONFIG_VXLAN=m 363CONFIG_VXLAN=m
357CONFIG_GENEVE=m 364CONFIG_GENEVE=m
358CONFIG_GTP=m 365CONFIG_GTP=m
@@ -362,6 +369,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y
362CONFIG_VETH=m 369CONFIG_VETH=m
363# CONFIG_NET_VENDOR_ALACRITECH is not set 370# CONFIG_NET_VENDOR_ALACRITECH is not set
364# CONFIG_NET_VENDOR_AMAZON is not set 371# CONFIG_NET_VENDOR_AMAZON is not set
372# CONFIG_NET_VENDOR_AQUANTIA is not set
365# CONFIG_NET_VENDOR_ARC is not set 373# CONFIG_NET_VENDOR_ARC is not set
366# CONFIG_NET_CADENCE is not set 374# CONFIG_NET_CADENCE is not set
367# CONFIG_NET_VENDOR_BROADCOM is not set 375# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -378,7 +386,6 @@ CONFIG_VETH=m
378# CONFIG_NET_VENDOR_SEEQ is not set 386# CONFIG_NET_VENDOR_SEEQ is not set
379# CONFIG_NET_VENDOR_SOLARFLARE is not set 387# CONFIG_NET_VENDOR_SOLARFLARE is not set
380# CONFIG_NET_VENDOR_STMICRO is not set 388# CONFIG_NET_VENDOR_STMICRO is not set
381# CONFIG_NET_VENDOR_SYNOPSYS is not set
382# CONFIG_NET_VENDOR_VIA is not set 389# CONFIG_NET_VENDOR_VIA is not set
383# CONFIG_NET_VENDOR_WIZNET is not set 390# CONFIG_NET_VENDOR_WIZNET is not set
384CONFIG_PPP=m 391CONFIG_PPP=m
@@ -523,6 +530,8 @@ CONFIG_NLS_MAC_TURKISH=m
523CONFIG_DLM=m 530CONFIG_DLM=m
524# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 531# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
525CONFIG_MAGIC_SYSRQ=y 532CONFIG_MAGIC_SYSRQ=y
533CONFIG_WW_MUTEX_SELFTEST=m
534CONFIG_ATOMIC64_SELFTEST=m
526CONFIG_ASYNC_RAID6_TEST=m 535CONFIG_ASYNC_RAID6_TEST=m
527CONFIG_TEST_HEXDUMP=m 536CONFIG_TEST_HEXDUMP=m
528CONFIG_TEST_STRING_HELPERS=m 537CONFIG_TEST_STRING_HELPERS=m
@@ -553,6 +562,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
553CONFIG_CRYPTO_LRW=m 562CONFIG_CRYPTO_LRW=m
554CONFIG_CRYPTO_PCBC=m 563CONFIG_CRYPTO_PCBC=m
555CONFIG_CRYPTO_KEYWRAP=m 564CONFIG_CRYPTO_KEYWRAP=m
565CONFIG_CRYPTO_CMAC=m
556CONFIG_CRYPTO_XCBC=m 566CONFIG_CRYPTO_XCBC=m
557CONFIG_CRYPTO_VMAC=m 567CONFIG_CRYPTO_VMAC=m
558CONFIG_CRYPTO_MICHAEL_MIC=m 568CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -564,6 +574,7 @@ CONFIG_CRYPTO_SHA512=m
564CONFIG_CRYPTO_SHA3=m 574CONFIG_CRYPTO_SHA3=m
565CONFIG_CRYPTO_TGR192=m 575CONFIG_CRYPTO_TGR192=m
566CONFIG_CRYPTO_WP512=m 576CONFIG_CRYPTO_WP512=m
577CONFIG_CRYPTO_AES_TI=m
567CONFIG_CRYPTO_ANUBIS=m 578CONFIG_CRYPTO_ANUBIS=m
568CONFIG_CRYPTO_BLOWFISH=m 579CONFIG_CRYPTO_BLOWFISH=m
569CONFIG_CRYPTO_CAMELLIA=m 580CONFIG_CRYPTO_CAMELLIA=m
@@ -588,4 +599,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
588CONFIG_CRYPTO_USER_API_RNG=m 599CONFIG_CRYPTO_USER_API_RNG=m
589CONFIG_CRYPTO_USER_API_AEAD=m 600CONFIG_CRYPTO_USER_API_AEAD=m
590# CONFIG_CRYPTO_HW is not set 601# CONFIG_CRYPTO_HW is not set
602CONFIG_CRC32_SELFTEST=m
591CONFIG_XZ_DEC_TEST=m 603CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index fc0fd3f871f3..23a3d8a691e2 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y
25# CONFIG_EFI_PARTITION is not set 25# CONFIG_EFI_PARTITION is not set
26CONFIG_SYSV68_PARTITION=y 26CONFIG_SYSV68_PARTITION=y
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_M68020=y 31CONFIG_M68020=y
@@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m
58CONFIG_NET_FOU_IP_TUNNELS=y 59CONFIG_NET_FOU_IP_TUNNELS=y
59CONFIG_INET_AH=m 60CONFIG_INET_AH=m
60CONFIG_INET_ESP=m 61CONFIG_INET_ESP=m
62CONFIG_INET_ESP_OFFLOAD=m
61CONFIG_INET_IPCOMP=m 63CONFIG_INET_IPCOMP=m
62CONFIG_INET_XFRM_MODE_TRANSPORT=m 64CONFIG_INET_XFRM_MODE_TRANSPORT=m
63CONFIG_INET_XFRM_MODE_TUNNEL=m 65CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -69,6 +71,7 @@ CONFIG_IPV6=m
69CONFIG_IPV6_ROUTER_PREF=y 71CONFIG_IPV6_ROUTER_PREF=y
70CONFIG_INET6_AH=m 72CONFIG_INET6_AH=m
71CONFIG_INET6_ESP=m 73CONFIG_INET6_ESP=m
74CONFIG_INET6_ESP_OFFLOAD=m
72CONFIG_INET6_IPCOMP=m 75CONFIG_INET6_IPCOMP=m
73CONFIG_IPV6_ILA=m 76CONFIG_IPV6_ILA=m
74CONFIG_IPV6_VTI=m 77CONFIG_IPV6_VTI=m
@@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m
99CONFIG_NFT_CT=m 102CONFIG_NFT_CT=m
100CONFIG_NFT_SET_RBTREE=m 103CONFIG_NFT_SET_RBTREE=m
101CONFIG_NFT_SET_HASH=m 104CONFIG_NFT_SET_HASH=m
105CONFIG_NFT_SET_BITMAP=m
102CONFIG_NFT_COUNTER=m 106CONFIG_NFT_COUNTER=m
103CONFIG_NFT_LOG=m 107CONFIG_NFT_LOG=m
104CONFIG_NFT_LIMIT=m 108CONFIG_NFT_LIMIT=m
@@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m
296CONFIG_NET_L3_MASTER_DEV=y 300CONFIG_NET_L3_MASTER_DEV=y
297CONFIG_AF_KCM=m 301CONFIG_AF_KCM=m
298# CONFIG_WIRELESS is not set 302# CONFIG_WIRELESS is not set
303CONFIG_PSAMPLE=m
304CONFIG_NET_IFE=m
299CONFIG_NET_DEVLINK=m 305CONFIG_NET_DEVLINK=m
300# CONFIG_UEVENT_HELPER is not set 306# CONFIG_UEVENT_HELPER is not set
301CONFIG_DEVTMPFS=y 307CONFIG_DEVTMPFS=y
@@ -362,6 +368,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
362CONFIG_MACVLAN=m 368CONFIG_MACVLAN=m
363CONFIG_MACVTAP=m 369CONFIG_MACVTAP=m
364CONFIG_IPVLAN=m 370CONFIG_IPVLAN=m
371CONFIG_IPVTAP=m
365CONFIG_VXLAN=m 372CONFIG_VXLAN=m
366CONFIG_GENEVE=m 373CONFIG_GENEVE=m
367CONFIG_GTP=m 374CONFIG_GTP=m
@@ -372,6 +379,7 @@ CONFIG_VETH=m
372# CONFIG_NET_VENDOR_ALACRITECH is not set 379# CONFIG_NET_VENDOR_ALACRITECH is not set
373# CONFIG_NET_VENDOR_AMAZON is not set 380# CONFIG_NET_VENDOR_AMAZON is not set
374CONFIG_ATARILANCE=y 381CONFIG_ATARILANCE=y
382# CONFIG_NET_VENDOR_AQUANTIA is not set
375# CONFIG_NET_VENDOR_ARC is not set 383# CONFIG_NET_VENDOR_ARC is not set
376# CONFIG_NET_CADENCE is not set 384# CONFIG_NET_CADENCE is not set
377# CONFIG_NET_VENDOR_BROADCOM is not set 385# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -389,7 +397,6 @@ CONFIG_NE2000=y
389# CONFIG_NET_VENDOR_SOLARFLARE is not set 397# CONFIG_NET_VENDOR_SOLARFLARE is not set
390CONFIG_SMC91X=y 398CONFIG_SMC91X=y
391# CONFIG_NET_VENDOR_STMICRO is not set 399# CONFIG_NET_VENDOR_STMICRO is not set
392# CONFIG_NET_VENDOR_SYNOPSYS is not set
393# CONFIG_NET_VENDOR_VIA is not set 400# CONFIG_NET_VENDOR_VIA is not set
394# CONFIG_NET_VENDOR_WIZNET is not set 401# CONFIG_NET_VENDOR_WIZNET is not set
395CONFIG_PPP=m 402CONFIG_PPP=m
@@ -544,6 +551,8 @@ CONFIG_NLS_MAC_TURKISH=m
544CONFIG_DLM=m 551CONFIG_DLM=m
545# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 552# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
546CONFIG_MAGIC_SYSRQ=y 553CONFIG_MAGIC_SYSRQ=y
554CONFIG_WW_MUTEX_SELFTEST=m
555CONFIG_ATOMIC64_SELFTEST=m
547CONFIG_ASYNC_RAID6_TEST=m 556CONFIG_ASYNC_RAID6_TEST=m
548CONFIG_TEST_HEXDUMP=m 557CONFIG_TEST_HEXDUMP=m
549CONFIG_TEST_STRING_HELPERS=m 558CONFIG_TEST_STRING_HELPERS=m
@@ -574,6 +583,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
574CONFIG_CRYPTO_LRW=m 583CONFIG_CRYPTO_LRW=m
575CONFIG_CRYPTO_PCBC=m 584CONFIG_CRYPTO_PCBC=m
576CONFIG_CRYPTO_KEYWRAP=m 585CONFIG_CRYPTO_KEYWRAP=m
586CONFIG_CRYPTO_CMAC=m
577CONFIG_CRYPTO_XCBC=m 587CONFIG_CRYPTO_XCBC=m
578CONFIG_CRYPTO_VMAC=m 588CONFIG_CRYPTO_VMAC=m
579CONFIG_CRYPTO_MICHAEL_MIC=m 589CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -585,6 +595,7 @@ CONFIG_CRYPTO_SHA512=m
585CONFIG_CRYPTO_SHA3=m 595CONFIG_CRYPTO_SHA3=m
586CONFIG_CRYPTO_TGR192=m 596CONFIG_CRYPTO_TGR192=m
587CONFIG_CRYPTO_WP512=m 597CONFIG_CRYPTO_WP512=m
598CONFIG_CRYPTO_AES_TI=m
588CONFIG_CRYPTO_ANUBIS=m 599CONFIG_CRYPTO_ANUBIS=m
589CONFIG_CRYPTO_BLOWFISH=m 600CONFIG_CRYPTO_BLOWFISH=m
590CONFIG_CRYPTO_CAMELLIA=m 601CONFIG_CRYPTO_CAMELLIA=m
@@ -609,4 +620,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
609CONFIG_CRYPTO_USER_API_RNG=m 620CONFIG_CRYPTO_USER_API_RNG=m
610CONFIG_CRYPTO_USER_API_AEAD=m 621CONFIG_CRYPTO_USER_API_AEAD=m
611# CONFIG_CRYPTO_HW is not set 622# CONFIG_CRYPTO_HW is not set
623CONFIG_CRC32_SELFTEST=m
612CONFIG_XZ_DEC_TEST=m 624CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 52e984a0aa69..95deb95140fe 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
25CONFIG_SUN_PARTITION=y 25CONFIG_SUN_PARTITION=y
26# CONFIG_EFI_PARTITION is not set 26# CONFIG_EFI_PARTITION is not set
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_M68040=y 31CONFIG_M68040=y
@@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m
56CONFIG_NET_FOU_IP_TUNNELS=y 57CONFIG_NET_FOU_IP_TUNNELS=y
57CONFIG_INET_AH=m 58CONFIG_INET_AH=m
58CONFIG_INET_ESP=m 59CONFIG_INET_ESP=m
60CONFIG_INET_ESP_OFFLOAD=m
59CONFIG_INET_IPCOMP=m 61CONFIG_INET_IPCOMP=m
60CONFIG_INET_XFRM_MODE_TRANSPORT=m 62CONFIG_INET_XFRM_MODE_TRANSPORT=m
61CONFIG_INET_XFRM_MODE_TUNNEL=m 63CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -67,6 +69,7 @@ CONFIG_IPV6=m
67CONFIG_IPV6_ROUTER_PREF=y 69CONFIG_IPV6_ROUTER_PREF=y
68CONFIG_INET6_AH=m 70CONFIG_INET6_AH=m
69CONFIG_INET6_ESP=m 71CONFIG_INET6_ESP=m
72CONFIG_INET6_ESP_OFFLOAD=m
70CONFIG_INET6_IPCOMP=m 73CONFIG_INET6_IPCOMP=m
71CONFIG_IPV6_ILA=m 74CONFIG_IPV6_ILA=m
72CONFIG_IPV6_VTI=m 75CONFIG_IPV6_VTI=m
@@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m
97CONFIG_NFT_CT=m 100CONFIG_NFT_CT=m
98CONFIG_NFT_SET_RBTREE=m 101CONFIG_NFT_SET_RBTREE=m
99CONFIG_NFT_SET_HASH=m 102CONFIG_NFT_SET_HASH=m
103CONFIG_NFT_SET_BITMAP=m
100CONFIG_NFT_COUNTER=m 104CONFIG_NFT_COUNTER=m
101CONFIG_NFT_LOG=m 105CONFIG_NFT_LOG=m
102CONFIG_NFT_LIMIT=m 106CONFIG_NFT_LIMIT=m
@@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m
294CONFIG_NET_L3_MASTER_DEV=y 298CONFIG_NET_L3_MASTER_DEV=y
295CONFIG_AF_KCM=m 299CONFIG_AF_KCM=m
296# CONFIG_WIRELESS is not set 300# CONFIG_WIRELESS is not set
301CONFIG_PSAMPLE=m
302CONFIG_NET_IFE=m
297CONFIG_NET_DEVLINK=m 303CONFIG_NET_DEVLINK=m
298# CONFIG_UEVENT_HELPER is not set 304# CONFIG_UEVENT_HELPER is not set
299CONFIG_DEVTMPFS=y 305CONFIG_DEVTMPFS=y
@@ -352,6 +358,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
352CONFIG_MACVLAN=m 358CONFIG_MACVLAN=m
353CONFIG_MACVTAP=m 359CONFIG_MACVTAP=m
354CONFIG_IPVLAN=m 360CONFIG_IPVLAN=m
361CONFIG_IPVTAP=m
355CONFIG_VXLAN=m 362CONFIG_VXLAN=m
356CONFIG_GENEVE=m 363CONFIG_GENEVE=m
357CONFIG_GTP=m 364CONFIG_GTP=m
@@ -361,6 +368,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y
361CONFIG_VETH=m 368CONFIG_VETH=m
362# CONFIG_NET_VENDOR_ALACRITECH is not set 369# CONFIG_NET_VENDOR_ALACRITECH is not set
363# CONFIG_NET_VENDOR_AMAZON is not set 370# CONFIG_NET_VENDOR_AMAZON is not set
371# CONFIG_NET_VENDOR_AQUANTIA is not set
364# CONFIG_NET_VENDOR_ARC is not set 372# CONFIG_NET_VENDOR_ARC is not set
365# CONFIG_NET_CADENCE is not set 373# CONFIG_NET_CADENCE is not set
366# CONFIG_NET_VENDOR_BROADCOM is not set 374# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -377,7 +385,6 @@ CONFIG_BVME6000_NET=y
377# CONFIG_NET_VENDOR_SEEQ is not set 385# CONFIG_NET_VENDOR_SEEQ is not set
378# CONFIG_NET_VENDOR_SOLARFLARE is not set 386# CONFIG_NET_VENDOR_SOLARFLARE is not set
379# CONFIG_NET_VENDOR_STMICRO is not set 387# CONFIG_NET_VENDOR_STMICRO is not set
380# CONFIG_NET_VENDOR_SYNOPSYS is not set
381# CONFIG_NET_VENDOR_VIA is not set 388# CONFIG_NET_VENDOR_VIA is not set
382# CONFIG_NET_VENDOR_WIZNET is not set 389# CONFIG_NET_VENDOR_WIZNET is not set
383CONFIG_PPP=m 390CONFIG_PPP=m
@@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m
515CONFIG_DLM=m 522CONFIG_DLM=m
516# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 523# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
517CONFIG_MAGIC_SYSRQ=y 524CONFIG_MAGIC_SYSRQ=y
525CONFIG_WW_MUTEX_SELFTEST=m
526CONFIG_ATOMIC64_SELFTEST=m
518CONFIG_ASYNC_RAID6_TEST=m 527CONFIG_ASYNC_RAID6_TEST=m
519CONFIG_TEST_HEXDUMP=m 528CONFIG_TEST_HEXDUMP=m
520CONFIG_TEST_STRING_HELPERS=m 529CONFIG_TEST_STRING_HELPERS=m
@@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
545CONFIG_CRYPTO_LRW=m 554CONFIG_CRYPTO_LRW=m
546CONFIG_CRYPTO_PCBC=m 555CONFIG_CRYPTO_PCBC=m
547CONFIG_CRYPTO_KEYWRAP=m 556CONFIG_CRYPTO_KEYWRAP=m
557CONFIG_CRYPTO_CMAC=m
548CONFIG_CRYPTO_XCBC=m 558CONFIG_CRYPTO_XCBC=m
549CONFIG_CRYPTO_VMAC=m 559CONFIG_CRYPTO_VMAC=m
550CONFIG_CRYPTO_MICHAEL_MIC=m 560CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m
556CONFIG_CRYPTO_SHA3=m 566CONFIG_CRYPTO_SHA3=m
557CONFIG_CRYPTO_TGR192=m 567CONFIG_CRYPTO_TGR192=m
558CONFIG_CRYPTO_WP512=m 568CONFIG_CRYPTO_WP512=m
569CONFIG_CRYPTO_AES_TI=m
559CONFIG_CRYPTO_ANUBIS=m 570CONFIG_CRYPTO_ANUBIS=m
560CONFIG_CRYPTO_BLOWFISH=m 571CONFIG_CRYPTO_BLOWFISH=m
561CONFIG_CRYPTO_CAMELLIA=m 572CONFIG_CRYPTO_CAMELLIA=m
@@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
580CONFIG_CRYPTO_USER_API_RNG=m 591CONFIG_CRYPTO_USER_API_RNG=m
581CONFIG_CRYPTO_USER_API_AEAD=m 592CONFIG_CRYPTO_USER_API_AEAD=m
582# CONFIG_CRYPTO_HW is not set 593# CONFIG_CRYPTO_HW is not set
594CONFIG_CRC32_SELFTEST=m
583CONFIG_XZ_DEC_TEST=m 595CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index aaeed4422cc9..afae6958db2d 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y
26# CONFIG_EFI_PARTITION is not set 26# CONFIG_EFI_PARTITION is not set
27CONFIG_SYSV68_PARTITION=y 27CONFIG_SYSV68_PARTITION=y
28CONFIG_IOSCHED_DEADLINE=m 28CONFIG_IOSCHED_DEADLINE=m
29CONFIG_MQ_IOSCHED_DEADLINE=m
29CONFIG_KEXEC=y 30CONFIG_KEXEC=y
30CONFIG_BOOTINFO_PROC=y 31CONFIG_BOOTINFO_PROC=y
31CONFIG_M68020=y 32CONFIG_M68020=y
@@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m
58CONFIG_NET_FOU_IP_TUNNELS=y 59CONFIG_NET_FOU_IP_TUNNELS=y
59CONFIG_INET_AH=m 60CONFIG_INET_AH=m
60CONFIG_INET_ESP=m 61CONFIG_INET_ESP=m
62CONFIG_INET_ESP_OFFLOAD=m
61CONFIG_INET_IPCOMP=m 63CONFIG_INET_IPCOMP=m
62CONFIG_INET_XFRM_MODE_TRANSPORT=m 64CONFIG_INET_XFRM_MODE_TRANSPORT=m
63CONFIG_INET_XFRM_MODE_TUNNEL=m 65CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -69,6 +71,7 @@ CONFIG_IPV6=m
69CONFIG_IPV6_ROUTER_PREF=y 71CONFIG_IPV6_ROUTER_PREF=y
70CONFIG_INET6_AH=m 72CONFIG_INET6_AH=m
71CONFIG_INET6_ESP=m 73CONFIG_INET6_ESP=m
74CONFIG_INET6_ESP_OFFLOAD=m
72CONFIG_INET6_IPCOMP=m 75CONFIG_INET6_IPCOMP=m
73CONFIG_IPV6_ILA=m 76CONFIG_IPV6_ILA=m
74CONFIG_IPV6_VTI=m 77CONFIG_IPV6_VTI=m
@@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m
99CONFIG_NFT_CT=m 102CONFIG_NFT_CT=m
100CONFIG_NFT_SET_RBTREE=m 103CONFIG_NFT_SET_RBTREE=m
101CONFIG_NFT_SET_HASH=m 104CONFIG_NFT_SET_HASH=m
105CONFIG_NFT_SET_BITMAP=m
102CONFIG_NFT_COUNTER=m 106CONFIG_NFT_COUNTER=m
103CONFIG_NFT_LOG=m 107CONFIG_NFT_LOG=m
104CONFIG_NFT_LIMIT=m 108CONFIG_NFT_LIMIT=m
@@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m
296CONFIG_NET_L3_MASTER_DEV=y 300CONFIG_NET_L3_MASTER_DEV=y
297CONFIG_AF_KCM=m 301CONFIG_AF_KCM=m
298# CONFIG_WIRELESS is not set 302# CONFIG_WIRELESS is not set
303CONFIG_PSAMPLE=m
304CONFIG_NET_IFE=m
299CONFIG_NET_DEVLINK=m 305CONFIG_NET_DEVLINK=m
300# CONFIG_UEVENT_HELPER is not set 306# CONFIG_UEVENT_HELPER is not set
301CONFIG_DEVTMPFS=y 307CONFIG_DEVTMPFS=y
@@ -353,6 +359,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
353CONFIG_MACVLAN=m 359CONFIG_MACVLAN=m
354CONFIG_MACVTAP=m 360CONFIG_MACVTAP=m
355CONFIG_IPVLAN=m 361CONFIG_IPVLAN=m
362CONFIG_IPVTAP=m
356CONFIG_VXLAN=m 363CONFIG_VXLAN=m
357CONFIG_GENEVE=m 364CONFIG_GENEVE=m
358CONFIG_GTP=m 365CONFIG_GTP=m
@@ -363,6 +370,7 @@ CONFIG_VETH=m
363# CONFIG_NET_VENDOR_ALACRITECH is not set 370# CONFIG_NET_VENDOR_ALACRITECH is not set
364# CONFIG_NET_VENDOR_AMAZON is not set 371# CONFIG_NET_VENDOR_AMAZON is not set
365CONFIG_HPLANCE=y 372CONFIG_HPLANCE=y
373# CONFIG_NET_VENDOR_AQUANTIA is not set
366# CONFIG_NET_VENDOR_ARC is not set 374# CONFIG_NET_VENDOR_ARC is not set
367# CONFIG_NET_CADENCE is not set 375# CONFIG_NET_CADENCE is not set
368# CONFIG_NET_VENDOR_BROADCOM is not set 376# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -379,7 +387,6 @@ CONFIG_HPLANCE=y
379# CONFIG_NET_VENDOR_SEEQ is not set 387# CONFIG_NET_VENDOR_SEEQ is not set
380# CONFIG_NET_VENDOR_SOLARFLARE is not set 388# CONFIG_NET_VENDOR_SOLARFLARE is not set
381# CONFIG_NET_VENDOR_STMICRO is not set 389# CONFIG_NET_VENDOR_STMICRO is not set
382# CONFIG_NET_VENDOR_SYNOPSYS is not set
383# CONFIG_NET_VENDOR_VIA is not set 390# CONFIG_NET_VENDOR_VIA is not set
384# CONFIG_NET_VENDOR_WIZNET is not set 391# CONFIG_NET_VENDOR_WIZNET is not set
385CONFIG_PPP=m 392CONFIG_PPP=m
@@ -525,6 +532,8 @@ CONFIG_NLS_MAC_TURKISH=m
525CONFIG_DLM=m 532CONFIG_DLM=m
526# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 533# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
527CONFIG_MAGIC_SYSRQ=y 534CONFIG_MAGIC_SYSRQ=y
535CONFIG_WW_MUTEX_SELFTEST=m
536CONFIG_ATOMIC64_SELFTEST=m
528CONFIG_ASYNC_RAID6_TEST=m 537CONFIG_ASYNC_RAID6_TEST=m
529CONFIG_TEST_HEXDUMP=m 538CONFIG_TEST_HEXDUMP=m
530CONFIG_TEST_STRING_HELPERS=m 539CONFIG_TEST_STRING_HELPERS=m
@@ -555,6 +564,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
555CONFIG_CRYPTO_LRW=m 564CONFIG_CRYPTO_LRW=m
556CONFIG_CRYPTO_PCBC=m 565CONFIG_CRYPTO_PCBC=m
557CONFIG_CRYPTO_KEYWRAP=m 566CONFIG_CRYPTO_KEYWRAP=m
567CONFIG_CRYPTO_CMAC=m
558CONFIG_CRYPTO_XCBC=m 568CONFIG_CRYPTO_XCBC=m
559CONFIG_CRYPTO_VMAC=m 569CONFIG_CRYPTO_VMAC=m
560CONFIG_CRYPTO_MICHAEL_MIC=m 570CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -566,6 +576,7 @@ CONFIG_CRYPTO_SHA512=m
566CONFIG_CRYPTO_SHA3=m 576CONFIG_CRYPTO_SHA3=m
567CONFIG_CRYPTO_TGR192=m 577CONFIG_CRYPTO_TGR192=m
568CONFIG_CRYPTO_WP512=m 578CONFIG_CRYPTO_WP512=m
579CONFIG_CRYPTO_AES_TI=m
569CONFIG_CRYPTO_ANUBIS=m 580CONFIG_CRYPTO_ANUBIS=m
570CONFIG_CRYPTO_BLOWFISH=m 581CONFIG_CRYPTO_BLOWFISH=m
571CONFIG_CRYPTO_CAMELLIA=m 582CONFIG_CRYPTO_CAMELLIA=m
@@ -590,4 +601,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
590CONFIG_CRYPTO_USER_API_RNG=m 601CONFIG_CRYPTO_USER_API_RNG=m
591CONFIG_CRYPTO_USER_API_AEAD=m 602CONFIG_CRYPTO_USER_API_AEAD=m
592# CONFIG_CRYPTO_HW is not set 603# CONFIG_CRYPTO_HW is not set
604CONFIG_CRC32_SELFTEST=m
593CONFIG_XZ_DEC_TEST=m 605CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 3bbc9b2f0dac..b010734729a7 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y
25# CONFIG_EFI_PARTITION is not set 25# CONFIG_EFI_PARTITION is not set
26CONFIG_SYSV68_PARTITION=y 26CONFIG_SYSV68_PARTITION=y
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_M68020=y 31CONFIG_M68020=y
@@ -57,6 +58,7 @@ CONFIG_NET_IPVTI=m
57CONFIG_NET_FOU_IP_TUNNELS=y 58CONFIG_NET_FOU_IP_TUNNELS=y
58CONFIG_INET_AH=m 59CONFIG_INET_AH=m
59CONFIG_INET_ESP=m 60CONFIG_INET_ESP=m
61CONFIG_INET_ESP_OFFLOAD=m
60CONFIG_INET_IPCOMP=m 62CONFIG_INET_IPCOMP=m
61CONFIG_INET_XFRM_MODE_TRANSPORT=m 63CONFIG_INET_XFRM_MODE_TRANSPORT=m
62CONFIG_INET_XFRM_MODE_TUNNEL=m 64CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -68,6 +70,7 @@ CONFIG_IPV6=m
68CONFIG_IPV6_ROUTER_PREF=y 70CONFIG_IPV6_ROUTER_PREF=y
69CONFIG_INET6_AH=m 71CONFIG_INET6_AH=m
70CONFIG_INET6_ESP=m 72CONFIG_INET6_ESP=m
73CONFIG_INET6_ESP_OFFLOAD=m
71CONFIG_INET6_IPCOMP=m 74CONFIG_INET6_IPCOMP=m
72CONFIG_IPV6_ILA=m 75CONFIG_IPV6_ILA=m
73CONFIG_IPV6_VTI=m 76CONFIG_IPV6_VTI=m
@@ -98,6 +101,7 @@ CONFIG_NFT_NUMGEN=m
98CONFIG_NFT_CT=m 101CONFIG_NFT_CT=m
99CONFIG_NFT_SET_RBTREE=m 102CONFIG_NFT_SET_RBTREE=m
100CONFIG_NFT_SET_HASH=m 103CONFIG_NFT_SET_HASH=m
104CONFIG_NFT_SET_BITMAP=m
101CONFIG_NFT_COUNTER=m 105CONFIG_NFT_COUNTER=m
102CONFIG_NFT_LOG=m 106CONFIG_NFT_LOG=m
103CONFIG_NFT_LIMIT=m 107CONFIG_NFT_LIMIT=m
@@ -298,6 +302,8 @@ CONFIG_MPLS_IPTUNNEL=m
298CONFIG_NET_L3_MASTER_DEV=y 302CONFIG_NET_L3_MASTER_DEV=y
299CONFIG_AF_KCM=m 303CONFIG_AF_KCM=m
300# CONFIG_WIRELESS is not set 304# CONFIG_WIRELESS is not set
305CONFIG_PSAMPLE=m
306CONFIG_NET_IFE=m
301CONFIG_NET_DEVLINK=m 307CONFIG_NET_DEVLINK=m
302# CONFIG_UEVENT_HELPER is not set 308# CONFIG_UEVENT_HELPER is not set
303CONFIG_DEVTMPFS=y 309CONFIG_DEVTMPFS=y
@@ -369,6 +375,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
369CONFIG_MACVLAN=m 375CONFIG_MACVLAN=m
370CONFIG_MACVTAP=m 376CONFIG_MACVTAP=m
371CONFIG_IPVLAN=m 377CONFIG_IPVLAN=m
378CONFIG_IPVTAP=m
372CONFIG_VXLAN=m 379CONFIG_VXLAN=m
373CONFIG_GENEVE=m 380CONFIG_GENEVE=m
374CONFIG_GTP=m 381CONFIG_GTP=m
@@ -379,6 +386,7 @@ CONFIG_VETH=m
379# CONFIG_NET_VENDOR_ALACRITECH is not set 386# CONFIG_NET_VENDOR_ALACRITECH is not set
380# CONFIG_NET_VENDOR_AMAZON is not set 387# CONFIG_NET_VENDOR_AMAZON is not set
381CONFIG_MACMACE=y 388CONFIG_MACMACE=y
389# CONFIG_NET_VENDOR_AQUANTIA is not set
382# CONFIG_NET_VENDOR_ARC is not set 390# CONFIG_NET_VENDOR_ARC is not set
383# CONFIG_NET_CADENCE is not set 391# CONFIG_NET_CADENCE is not set
384# CONFIG_NET_VENDOR_BROADCOM is not set 392# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -398,7 +406,6 @@ CONFIG_MAC8390=y
398# CONFIG_NET_VENDOR_SOLARFLARE is not set 406# CONFIG_NET_VENDOR_SOLARFLARE is not set
399# CONFIG_NET_VENDOR_SMSC is not set 407# CONFIG_NET_VENDOR_SMSC is not set
400# CONFIG_NET_VENDOR_STMICRO is not set 408# CONFIG_NET_VENDOR_STMICRO is not set
401# CONFIG_NET_VENDOR_SYNOPSYS is not set
402# CONFIG_NET_VENDOR_VIA is not set 409# CONFIG_NET_VENDOR_VIA is not set
403# CONFIG_NET_VENDOR_WIZNET is not set 410# CONFIG_NET_VENDOR_WIZNET is not set
404CONFIG_PPP=m 411CONFIG_PPP=m
@@ -547,6 +554,8 @@ CONFIG_NLS_MAC_TURKISH=m
547CONFIG_DLM=m 554CONFIG_DLM=m
548# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 555# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
549CONFIG_MAGIC_SYSRQ=y 556CONFIG_MAGIC_SYSRQ=y
557CONFIG_WW_MUTEX_SELFTEST=m
558CONFIG_ATOMIC64_SELFTEST=m
550CONFIG_ASYNC_RAID6_TEST=m 559CONFIG_ASYNC_RAID6_TEST=m
551CONFIG_TEST_HEXDUMP=m 560CONFIG_TEST_HEXDUMP=m
552CONFIG_TEST_STRING_HELPERS=m 561CONFIG_TEST_STRING_HELPERS=m
@@ -577,6 +586,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
577CONFIG_CRYPTO_LRW=m 586CONFIG_CRYPTO_LRW=m
578CONFIG_CRYPTO_PCBC=m 587CONFIG_CRYPTO_PCBC=m
579CONFIG_CRYPTO_KEYWRAP=m 588CONFIG_CRYPTO_KEYWRAP=m
589CONFIG_CRYPTO_CMAC=m
580CONFIG_CRYPTO_XCBC=m 590CONFIG_CRYPTO_XCBC=m
581CONFIG_CRYPTO_VMAC=m 591CONFIG_CRYPTO_VMAC=m
582CONFIG_CRYPTO_MICHAEL_MIC=m 592CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -588,6 +598,7 @@ CONFIG_CRYPTO_SHA512=m
588CONFIG_CRYPTO_SHA3=m 598CONFIG_CRYPTO_SHA3=m
589CONFIG_CRYPTO_TGR192=m 599CONFIG_CRYPTO_TGR192=m
590CONFIG_CRYPTO_WP512=m 600CONFIG_CRYPTO_WP512=m
601CONFIG_CRYPTO_AES_TI=m
591CONFIG_CRYPTO_ANUBIS=m 602CONFIG_CRYPTO_ANUBIS=m
592CONFIG_CRYPTO_BLOWFISH=m 603CONFIG_CRYPTO_BLOWFISH=m
593CONFIG_CRYPTO_CAMELLIA=m 604CONFIG_CRYPTO_CAMELLIA=m
@@ -612,4 +623,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
612CONFIG_CRYPTO_USER_API_RNG=m 623CONFIG_CRYPTO_USER_API_RNG=m
613CONFIG_CRYPTO_USER_API_AEAD=m 624CONFIG_CRYPTO_USER_API_AEAD=m
614# CONFIG_CRYPTO_HW is not set 625# CONFIG_CRYPTO_HW is not set
626CONFIG_CRC32_SELFTEST=m
615CONFIG_XZ_DEC_TEST=m 627CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 8f2c0decb2f8..0e414549b235 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -21,6 +21,7 @@ CONFIG_SOLARIS_X86_PARTITION=y
21CONFIG_UNIXWARE_DISKLABEL=y 21CONFIG_UNIXWARE_DISKLABEL=y
22# CONFIG_EFI_PARTITION is not set 22# CONFIG_EFI_PARTITION is not set
23CONFIG_IOSCHED_DEADLINE=m 23CONFIG_IOSCHED_DEADLINE=m
24CONFIG_MQ_IOSCHED_DEADLINE=m
24CONFIG_KEXEC=y 25CONFIG_KEXEC=y
25CONFIG_BOOTINFO_PROC=y 26CONFIG_BOOTINFO_PROC=y
26CONFIG_M68020=y 27CONFIG_M68020=y
@@ -67,6 +68,7 @@ CONFIG_NET_IPVTI=m
67CONFIG_NET_FOU_IP_TUNNELS=y 68CONFIG_NET_FOU_IP_TUNNELS=y
68CONFIG_INET_AH=m 69CONFIG_INET_AH=m
69CONFIG_INET_ESP=m 70CONFIG_INET_ESP=m
71CONFIG_INET_ESP_OFFLOAD=m
70CONFIG_INET_IPCOMP=m 72CONFIG_INET_IPCOMP=m
71CONFIG_INET_XFRM_MODE_TRANSPORT=m 73CONFIG_INET_XFRM_MODE_TRANSPORT=m
72CONFIG_INET_XFRM_MODE_TUNNEL=m 74CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -78,6 +80,7 @@ CONFIG_IPV6=m
78CONFIG_IPV6_ROUTER_PREF=y 80CONFIG_IPV6_ROUTER_PREF=y
79CONFIG_INET6_AH=m 81CONFIG_INET6_AH=m
80CONFIG_INET6_ESP=m 82CONFIG_INET6_ESP=m
83CONFIG_INET6_ESP_OFFLOAD=m
81CONFIG_INET6_IPCOMP=m 84CONFIG_INET6_IPCOMP=m
82CONFIG_IPV6_ILA=m 85CONFIG_IPV6_ILA=m
83CONFIG_IPV6_VTI=m 86CONFIG_IPV6_VTI=m
@@ -108,6 +111,7 @@ CONFIG_NFT_NUMGEN=m
108CONFIG_NFT_CT=m 111CONFIG_NFT_CT=m
109CONFIG_NFT_SET_RBTREE=m 112CONFIG_NFT_SET_RBTREE=m
110CONFIG_NFT_SET_HASH=m 113CONFIG_NFT_SET_HASH=m
114CONFIG_NFT_SET_BITMAP=m
111CONFIG_NFT_COUNTER=m 115CONFIG_NFT_COUNTER=m
112CONFIG_NFT_LOG=m 116CONFIG_NFT_LOG=m
113CONFIG_NFT_LIMIT=m 117CONFIG_NFT_LIMIT=m
@@ -308,6 +312,8 @@ CONFIG_MPLS_IPTUNNEL=m
308CONFIG_NET_L3_MASTER_DEV=y 312CONFIG_NET_L3_MASTER_DEV=y
309CONFIG_AF_KCM=m 313CONFIG_AF_KCM=m
310# CONFIG_WIRELESS is not set 314# CONFIG_WIRELESS is not set
315CONFIG_PSAMPLE=m
316CONFIG_NET_IFE=m
311CONFIG_NET_DEVLINK=m 317CONFIG_NET_DEVLINK=m
312# CONFIG_UEVENT_HELPER is not set 318# CONFIG_UEVENT_HELPER is not set
313CONFIG_DEVTMPFS=y 319CONFIG_DEVTMPFS=y
@@ -402,6 +408,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
402CONFIG_MACVLAN=m 408CONFIG_MACVLAN=m
403CONFIG_MACVTAP=m 409CONFIG_MACVTAP=m
404CONFIG_IPVLAN=m 410CONFIG_IPVLAN=m
411CONFIG_IPVTAP=m
405CONFIG_VXLAN=m 412CONFIG_VXLAN=m
406CONFIG_GENEVE=m 413CONFIG_GENEVE=m
407CONFIG_GTP=m 414CONFIG_GTP=m
@@ -419,6 +426,7 @@ CONFIG_HPLANCE=y
419CONFIG_MVME147_NET=y 426CONFIG_MVME147_NET=y
420CONFIG_SUN3LANCE=y 427CONFIG_SUN3LANCE=y
421CONFIG_MACMACE=y 428CONFIG_MACMACE=y
429# CONFIG_NET_VENDOR_AQUANTIA is not set
422# CONFIG_NET_VENDOR_ARC is not set 430# CONFIG_NET_VENDOR_ARC is not set
423# CONFIG_NET_CADENCE is not set 431# CONFIG_NET_CADENCE is not set
424# CONFIG_NET_VENDOR_BROADCOM is not set 432# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -444,7 +452,6 @@ CONFIG_ZORRO8390=y
444# CONFIG_NET_VENDOR_SOLARFLARE is not set 452# CONFIG_NET_VENDOR_SOLARFLARE is not set
445CONFIG_SMC91X=y 453CONFIG_SMC91X=y
446# CONFIG_NET_VENDOR_STMICRO is not set 454# CONFIG_NET_VENDOR_STMICRO is not set
447# CONFIG_NET_VENDOR_SYNOPSYS is not set
448# CONFIG_NET_VENDOR_VIA is not set 455# CONFIG_NET_VENDOR_VIA is not set
449# CONFIG_NET_VENDOR_WIZNET is not set 456# CONFIG_NET_VENDOR_WIZNET is not set
450CONFIG_PLIP=m 457CONFIG_PLIP=m
@@ -627,6 +634,8 @@ CONFIG_NLS_MAC_TURKISH=m
627CONFIG_DLM=m 634CONFIG_DLM=m
628# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 635# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
629CONFIG_MAGIC_SYSRQ=y 636CONFIG_MAGIC_SYSRQ=y
637CONFIG_WW_MUTEX_SELFTEST=m
638CONFIG_ATOMIC64_SELFTEST=m
630CONFIG_ASYNC_RAID6_TEST=m 639CONFIG_ASYNC_RAID6_TEST=m
631CONFIG_TEST_HEXDUMP=m 640CONFIG_TEST_HEXDUMP=m
632CONFIG_TEST_STRING_HELPERS=m 641CONFIG_TEST_STRING_HELPERS=m
@@ -657,6 +666,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
657CONFIG_CRYPTO_LRW=m 666CONFIG_CRYPTO_LRW=m
658CONFIG_CRYPTO_PCBC=m 667CONFIG_CRYPTO_PCBC=m
659CONFIG_CRYPTO_KEYWRAP=m 668CONFIG_CRYPTO_KEYWRAP=m
669CONFIG_CRYPTO_CMAC=m
660CONFIG_CRYPTO_XCBC=m 670CONFIG_CRYPTO_XCBC=m
661CONFIG_CRYPTO_VMAC=m 671CONFIG_CRYPTO_VMAC=m
662CONFIG_CRYPTO_MICHAEL_MIC=m 672CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -668,6 +678,7 @@ CONFIG_CRYPTO_SHA512=m
668CONFIG_CRYPTO_SHA3=m 678CONFIG_CRYPTO_SHA3=m
669CONFIG_CRYPTO_TGR192=m 679CONFIG_CRYPTO_TGR192=m
670CONFIG_CRYPTO_WP512=m 680CONFIG_CRYPTO_WP512=m
681CONFIG_CRYPTO_AES_TI=m
671CONFIG_CRYPTO_ANUBIS=m 682CONFIG_CRYPTO_ANUBIS=m
672CONFIG_CRYPTO_BLOWFISH=m 683CONFIG_CRYPTO_BLOWFISH=m
673CONFIG_CRYPTO_CAMELLIA=m 684CONFIG_CRYPTO_CAMELLIA=m
@@ -692,4 +703,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
692CONFIG_CRYPTO_USER_API_RNG=m 703CONFIG_CRYPTO_USER_API_RNG=m
693CONFIG_CRYPTO_USER_API_AEAD=m 704CONFIG_CRYPTO_USER_API_AEAD=m
694# CONFIG_CRYPTO_HW is not set 705# CONFIG_CRYPTO_HW is not set
706CONFIG_CRC32_SELFTEST=m
695CONFIG_XZ_DEC_TEST=m 707CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index c743dd22e96f..b2e687a0ec3d 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
25CONFIG_SUN_PARTITION=y 25CONFIG_SUN_PARTITION=y
26# CONFIG_EFI_PARTITION is not set 26# CONFIG_EFI_PARTITION is not set
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_M68030=y 31CONFIG_M68030=y
@@ -55,6 +56,7 @@ CONFIG_NET_IPVTI=m
55CONFIG_NET_FOU_IP_TUNNELS=y 56CONFIG_NET_FOU_IP_TUNNELS=y
56CONFIG_INET_AH=m 57CONFIG_INET_AH=m
57CONFIG_INET_ESP=m 58CONFIG_INET_ESP=m
59CONFIG_INET_ESP_OFFLOAD=m
58CONFIG_INET_IPCOMP=m 60CONFIG_INET_IPCOMP=m
59CONFIG_INET_XFRM_MODE_TRANSPORT=m 61CONFIG_INET_XFRM_MODE_TRANSPORT=m
60CONFIG_INET_XFRM_MODE_TUNNEL=m 62CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -66,6 +68,7 @@ CONFIG_IPV6=m
66CONFIG_IPV6_ROUTER_PREF=y 68CONFIG_IPV6_ROUTER_PREF=y
67CONFIG_INET6_AH=m 69CONFIG_INET6_AH=m
68CONFIG_INET6_ESP=m 70CONFIG_INET6_ESP=m
71CONFIG_INET6_ESP_OFFLOAD=m
69CONFIG_INET6_IPCOMP=m 72CONFIG_INET6_IPCOMP=m
70CONFIG_IPV6_ILA=m 73CONFIG_IPV6_ILA=m
71CONFIG_IPV6_VTI=m 74CONFIG_IPV6_VTI=m
@@ -96,6 +99,7 @@ CONFIG_NFT_NUMGEN=m
96CONFIG_NFT_CT=m 99CONFIG_NFT_CT=m
97CONFIG_NFT_SET_RBTREE=m 100CONFIG_NFT_SET_RBTREE=m
98CONFIG_NFT_SET_HASH=m 101CONFIG_NFT_SET_HASH=m
102CONFIG_NFT_SET_BITMAP=m
99CONFIG_NFT_COUNTER=m 103CONFIG_NFT_COUNTER=m
100CONFIG_NFT_LOG=m 104CONFIG_NFT_LOG=m
101CONFIG_NFT_LIMIT=m 105CONFIG_NFT_LIMIT=m
@@ -293,6 +297,8 @@ CONFIG_MPLS_IPTUNNEL=m
293CONFIG_NET_L3_MASTER_DEV=y 297CONFIG_NET_L3_MASTER_DEV=y
294CONFIG_AF_KCM=m 298CONFIG_AF_KCM=m
295# CONFIG_WIRELESS is not set 299# CONFIG_WIRELESS is not set
300CONFIG_PSAMPLE=m
301CONFIG_NET_IFE=m
296CONFIG_NET_DEVLINK=m 302CONFIG_NET_DEVLINK=m
297# CONFIG_UEVENT_HELPER is not set 303# CONFIG_UEVENT_HELPER is not set
298CONFIG_DEVTMPFS=y 304CONFIG_DEVTMPFS=y
@@ -351,6 +357,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
351CONFIG_MACVLAN=m 357CONFIG_MACVLAN=m
352CONFIG_MACVTAP=m 358CONFIG_MACVTAP=m
353CONFIG_IPVLAN=m 359CONFIG_IPVLAN=m
360CONFIG_IPVTAP=m
354CONFIG_VXLAN=m 361CONFIG_VXLAN=m
355CONFIG_GENEVE=m 362CONFIG_GENEVE=m
356CONFIG_GTP=m 363CONFIG_GTP=m
@@ -361,6 +368,7 @@ CONFIG_VETH=m
361# CONFIG_NET_VENDOR_ALACRITECH is not set 368# CONFIG_NET_VENDOR_ALACRITECH is not set
362# CONFIG_NET_VENDOR_AMAZON is not set 369# CONFIG_NET_VENDOR_AMAZON is not set
363CONFIG_MVME147_NET=y 370CONFIG_MVME147_NET=y
371# CONFIG_NET_VENDOR_AQUANTIA is not set
364# CONFIG_NET_VENDOR_ARC is not set 372# CONFIG_NET_VENDOR_ARC is not set
365# CONFIG_NET_CADENCE is not set 373# CONFIG_NET_CADENCE is not set
366# CONFIG_NET_VENDOR_BROADCOM is not set 374# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -377,7 +385,6 @@ CONFIG_MVME147_NET=y
377# CONFIG_NET_VENDOR_SEEQ is not set 385# CONFIG_NET_VENDOR_SEEQ is not set
378# CONFIG_NET_VENDOR_SOLARFLARE is not set 386# CONFIG_NET_VENDOR_SOLARFLARE is not set
379# CONFIG_NET_VENDOR_STMICRO is not set 387# CONFIG_NET_VENDOR_STMICRO is not set
380# CONFIG_NET_VENDOR_SYNOPSYS is not set
381# CONFIG_NET_VENDOR_VIA is not set 388# CONFIG_NET_VENDOR_VIA is not set
382# CONFIG_NET_VENDOR_WIZNET is not set 389# CONFIG_NET_VENDOR_WIZNET is not set
383CONFIG_PPP=m 390CONFIG_PPP=m
@@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m
515CONFIG_DLM=m 522CONFIG_DLM=m
516# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 523# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
517CONFIG_MAGIC_SYSRQ=y 524CONFIG_MAGIC_SYSRQ=y
525CONFIG_WW_MUTEX_SELFTEST=m
526CONFIG_ATOMIC64_SELFTEST=m
518CONFIG_ASYNC_RAID6_TEST=m 527CONFIG_ASYNC_RAID6_TEST=m
519CONFIG_TEST_HEXDUMP=m 528CONFIG_TEST_HEXDUMP=m
520CONFIG_TEST_STRING_HELPERS=m 529CONFIG_TEST_STRING_HELPERS=m
@@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
545CONFIG_CRYPTO_LRW=m 554CONFIG_CRYPTO_LRW=m
546CONFIG_CRYPTO_PCBC=m 555CONFIG_CRYPTO_PCBC=m
547CONFIG_CRYPTO_KEYWRAP=m 556CONFIG_CRYPTO_KEYWRAP=m
557CONFIG_CRYPTO_CMAC=m
548CONFIG_CRYPTO_XCBC=m 558CONFIG_CRYPTO_XCBC=m
549CONFIG_CRYPTO_VMAC=m 559CONFIG_CRYPTO_VMAC=m
550CONFIG_CRYPTO_MICHAEL_MIC=m 560CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m
556CONFIG_CRYPTO_SHA3=m 566CONFIG_CRYPTO_SHA3=m
557CONFIG_CRYPTO_TGR192=m 567CONFIG_CRYPTO_TGR192=m
558CONFIG_CRYPTO_WP512=m 568CONFIG_CRYPTO_WP512=m
569CONFIG_CRYPTO_AES_TI=m
559CONFIG_CRYPTO_ANUBIS=m 570CONFIG_CRYPTO_ANUBIS=m
560CONFIG_CRYPTO_BLOWFISH=m 571CONFIG_CRYPTO_BLOWFISH=m
561CONFIG_CRYPTO_CAMELLIA=m 572CONFIG_CRYPTO_CAMELLIA=m
@@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
580CONFIG_CRYPTO_USER_API_RNG=m 591CONFIG_CRYPTO_USER_API_RNG=m
581CONFIG_CRYPTO_USER_API_AEAD=m 592CONFIG_CRYPTO_USER_API_AEAD=m
582# CONFIG_CRYPTO_HW is not set 593# CONFIG_CRYPTO_HW is not set
594CONFIG_CRC32_SELFTEST=m
583CONFIG_XZ_DEC_TEST=m 595CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 2ccaca858f05..cbd8ee24d1bc 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
25CONFIG_SUN_PARTITION=y 25CONFIG_SUN_PARTITION=y
26# CONFIG_EFI_PARTITION is not set 26# CONFIG_EFI_PARTITION is not set
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_M68040=y 31CONFIG_M68040=y
@@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m
56CONFIG_NET_FOU_IP_TUNNELS=y 57CONFIG_NET_FOU_IP_TUNNELS=y
57CONFIG_INET_AH=m 58CONFIG_INET_AH=m
58CONFIG_INET_ESP=m 59CONFIG_INET_ESP=m
60CONFIG_INET_ESP_OFFLOAD=m
59CONFIG_INET_IPCOMP=m 61CONFIG_INET_IPCOMP=m
60CONFIG_INET_XFRM_MODE_TRANSPORT=m 62CONFIG_INET_XFRM_MODE_TRANSPORT=m
61CONFIG_INET_XFRM_MODE_TUNNEL=m 63CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -67,6 +69,7 @@ CONFIG_IPV6=m
67CONFIG_IPV6_ROUTER_PREF=y 69CONFIG_IPV6_ROUTER_PREF=y
68CONFIG_INET6_AH=m 70CONFIG_INET6_AH=m
69CONFIG_INET6_ESP=m 71CONFIG_INET6_ESP=m
72CONFIG_INET6_ESP_OFFLOAD=m
70CONFIG_INET6_IPCOMP=m 73CONFIG_INET6_IPCOMP=m
71CONFIG_IPV6_ILA=m 74CONFIG_IPV6_ILA=m
72CONFIG_IPV6_VTI=m 75CONFIG_IPV6_VTI=m
@@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m
97CONFIG_NFT_CT=m 100CONFIG_NFT_CT=m
98CONFIG_NFT_SET_RBTREE=m 101CONFIG_NFT_SET_RBTREE=m
99CONFIG_NFT_SET_HASH=m 102CONFIG_NFT_SET_HASH=m
103CONFIG_NFT_SET_BITMAP=m
100CONFIG_NFT_COUNTER=m 104CONFIG_NFT_COUNTER=m
101CONFIG_NFT_LOG=m 105CONFIG_NFT_LOG=m
102CONFIG_NFT_LIMIT=m 106CONFIG_NFT_LIMIT=m
@@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m
294CONFIG_NET_L3_MASTER_DEV=y 298CONFIG_NET_L3_MASTER_DEV=y
295CONFIG_AF_KCM=m 299CONFIG_AF_KCM=m
296# CONFIG_WIRELESS is not set 300# CONFIG_WIRELESS is not set
301CONFIG_PSAMPLE=m
302CONFIG_NET_IFE=m
297CONFIG_NET_DEVLINK=m 303CONFIG_NET_DEVLINK=m
298# CONFIG_UEVENT_HELPER is not set 304# CONFIG_UEVENT_HELPER is not set
299CONFIG_DEVTMPFS=y 305CONFIG_DEVTMPFS=y
@@ -352,6 +358,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
352CONFIG_MACVLAN=m 358CONFIG_MACVLAN=m
353CONFIG_MACVTAP=m 359CONFIG_MACVTAP=m
354CONFIG_IPVLAN=m 360CONFIG_IPVLAN=m
361CONFIG_IPVTAP=m
355CONFIG_VXLAN=m 362CONFIG_VXLAN=m
356CONFIG_GENEVE=m 363CONFIG_GENEVE=m
357CONFIG_GTP=m 364CONFIG_GTP=m
@@ -361,6 +368,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y
361CONFIG_VETH=m 368CONFIG_VETH=m
362# CONFIG_NET_VENDOR_ALACRITECH is not set 369# CONFIG_NET_VENDOR_ALACRITECH is not set
363# CONFIG_NET_VENDOR_AMAZON is not set 370# CONFIG_NET_VENDOR_AMAZON is not set
371# CONFIG_NET_VENDOR_AQUANTIA is not set
364# CONFIG_NET_VENDOR_ARC is not set 372# CONFIG_NET_VENDOR_ARC is not set
365# CONFIG_NET_CADENCE is not set 373# CONFIG_NET_CADENCE is not set
366# CONFIG_NET_VENDOR_BROADCOM is not set 374# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -377,7 +385,6 @@ CONFIG_MVME16x_NET=y
377# CONFIG_NET_VENDOR_SEEQ is not set 385# CONFIG_NET_VENDOR_SEEQ is not set
378# CONFIG_NET_VENDOR_SOLARFLARE is not set 386# CONFIG_NET_VENDOR_SOLARFLARE is not set
379# CONFIG_NET_VENDOR_STMICRO is not set 387# CONFIG_NET_VENDOR_STMICRO is not set
380# CONFIG_NET_VENDOR_SYNOPSYS is not set
381# CONFIG_NET_VENDOR_VIA is not set 388# CONFIG_NET_VENDOR_VIA is not set
382# CONFIG_NET_VENDOR_WIZNET is not set 389# CONFIG_NET_VENDOR_WIZNET is not set
383CONFIG_PPP=m 390CONFIG_PPP=m
@@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m
515CONFIG_DLM=m 522CONFIG_DLM=m
516# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 523# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
517CONFIG_MAGIC_SYSRQ=y 524CONFIG_MAGIC_SYSRQ=y
525CONFIG_WW_MUTEX_SELFTEST=m
526CONFIG_ATOMIC64_SELFTEST=m
518CONFIG_ASYNC_RAID6_TEST=m 527CONFIG_ASYNC_RAID6_TEST=m
519CONFIG_TEST_HEXDUMP=m 528CONFIG_TEST_HEXDUMP=m
520CONFIG_TEST_STRING_HELPERS=m 529CONFIG_TEST_STRING_HELPERS=m
@@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
545CONFIG_CRYPTO_LRW=m 554CONFIG_CRYPTO_LRW=m
546CONFIG_CRYPTO_PCBC=m 555CONFIG_CRYPTO_PCBC=m
547CONFIG_CRYPTO_KEYWRAP=m 556CONFIG_CRYPTO_KEYWRAP=m
557CONFIG_CRYPTO_CMAC=m
548CONFIG_CRYPTO_XCBC=m 558CONFIG_CRYPTO_XCBC=m
549CONFIG_CRYPTO_VMAC=m 559CONFIG_CRYPTO_VMAC=m
550CONFIG_CRYPTO_MICHAEL_MIC=m 560CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m
556CONFIG_CRYPTO_SHA3=m 566CONFIG_CRYPTO_SHA3=m
557CONFIG_CRYPTO_TGR192=m 567CONFIG_CRYPTO_TGR192=m
558CONFIG_CRYPTO_WP512=m 568CONFIG_CRYPTO_WP512=m
569CONFIG_CRYPTO_AES_TI=m
559CONFIG_CRYPTO_ANUBIS=m 570CONFIG_CRYPTO_ANUBIS=m
560CONFIG_CRYPTO_BLOWFISH=m 571CONFIG_CRYPTO_BLOWFISH=m
561CONFIG_CRYPTO_CAMELLIA=m 572CONFIG_CRYPTO_CAMELLIA=m
@@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
580CONFIG_CRYPTO_USER_API_RNG=m 591CONFIG_CRYPTO_USER_API_RNG=m
581CONFIG_CRYPTO_USER_API_AEAD=m 592CONFIG_CRYPTO_USER_API_AEAD=m
582# CONFIG_CRYPTO_HW is not set 593# CONFIG_CRYPTO_HW is not set
594CONFIG_CRC32_SELFTEST=m
583CONFIG_XZ_DEC_TEST=m 595CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 5599f3fd5fcd..1e82cc944339 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y
26# CONFIG_EFI_PARTITION is not set 26# CONFIG_EFI_PARTITION is not set
27CONFIG_SYSV68_PARTITION=y 27CONFIG_SYSV68_PARTITION=y
28CONFIG_IOSCHED_DEADLINE=m 28CONFIG_IOSCHED_DEADLINE=m
29CONFIG_MQ_IOSCHED_DEADLINE=m
29CONFIG_KEXEC=y 30CONFIG_KEXEC=y
30CONFIG_BOOTINFO_PROC=y 31CONFIG_BOOTINFO_PROC=y
31CONFIG_M68040=y 32CONFIG_M68040=y
@@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m
56CONFIG_NET_FOU_IP_TUNNELS=y 57CONFIG_NET_FOU_IP_TUNNELS=y
57CONFIG_INET_AH=m 58CONFIG_INET_AH=m
58CONFIG_INET_ESP=m 59CONFIG_INET_ESP=m
60CONFIG_INET_ESP_OFFLOAD=m
59CONFIG_INET_IPCOMP=m 61CONFIG_INET_IPCOMP=m
60CONFIG_INET_XFRM_MODE_TRANSPORT=m 62CONFIG_INET_XFRM_MODE_TRANSPORT=m
61CONFIG_INET_XFRM_MODE_TUNNEL=m 63CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -67,6 +69,7 @@ CONFIG_IPV6=m
67CONFIG_IPV6_ROUTER_PREF=y 69CONFIG_IPV6_ROUTER_PREF=y
68CONFIG_INET6_AH=m 70CONFIG_INET6_AH=m
69CONFIG_INET6_ESP=m 71CONFIG_INET6_ESP=m
72CONFIG_INET6_ESP_OFFLOAD=m
70CONFIG_INET6_IPCOMP=m 73CONFIG_INET6_IPCOMP=m
71CONFIG_IPV6_ILA=m 74CONFIG_IPV6_ILA=m
72CONFIG_IPV6_VTI=m 75CONFIG_IPV6_VTI=m
@@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m
97CONFIG_NFT_CT=m 100CONFIG_NFT_CT=m
98CONFIG_NFT_SET_RBTREE=m 101CONFIG_NFT_SET_RBTREE=m
99CONFIG_NFT_SET_HASH=m 102CONFIG_NFT_SET_HASH=m
103CONFIG_NFT_SET_BITMAP=m
100CONFIG_NFT_COUNTER=m 104CONFIG_NFT_COUNTER=m
101CONFIG_NFT_LOG=m 105CONFIG_NFT_LOG=m
102CONFIG_NFT_LIMIT=m 106CONFIG_NFT_LIMIT=m
@@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m
294CONFIG_NET_L3_MASTER_DEV=y 298CONFIG_NET_L3_MASTER_DEV=y
295CONFIG_AF_KCM=m 299CONFIG_AF_KCM=m
296# CONFIG_WIRELESS is not set 300# CONFIG_WIRELESS is not set
301CONFIG_PSAMPLE=m
302CONFIG_NET_IFE=m
297CONFIG_NET_DEVLINK=m 303CONFIG_NET_DEVLINK=m
298# CONFIG_UEVENT_HELPER is not set 304# CONFIG_UEVENT_HELPER is not set
299CONFIG_DEVTMPFS=y 305CONFIG_DEVTMPFS=y
@@ -358,6 +364,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
358CONFIG_MACVLAN=m 364CONFIG_MACVLAN=m
359CONFIG_MACVTAP=m 365CONFIG_MACVTAP=m
360CONFIG_IPVLAN=m 366CONFIG_IPVLAN=m
367CONFIG_IPVTAP=m
361CONFIG_VXLAN=m 368CONFIG_VXLAN=m
362CONFIG_GENEVE=m 369CONFIG_GENEVE=m
363CONFIG_GTP=m 370CONFIG_GTP=m
@@ -369,6 +376,7 @@ CONFIG_VETH=m
369# CONFIG_NET_VENDOR_ALACRITECH is not set 376# CONFIG_NET_VENDOR_ALACRITECH is not set
370# CONFIG_NET_VENDOR_AMAZON is not set 377# CONFIG_NET_VENDOR_AMAZON is not set
371# CONFIG_NET_VENDOR_AMD is not set 378# CONFIG_NET_VENDOR_AMD is not set
379# CONFIG_NET_VENDOR_AQUANTIA is not set
372# CONFIG_NET_VENDOR_ARC is not set 380# CONFIG_NET_VENDOR_ARC is not set
373# CONFIG_NET_CADENCE is not set 381# CONFIG_NET_CADENCE is not set
374# CONFIG_NET_VENDOR_BROADCOM is not set 382# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -388,7 +396,6 @@ CONFIG_NE2000=y
388# CONFIG_NET_VENDOR_SOLARFLARE is not set 396# CONFIG_NET_VENDOR_SOLARFLARE is not set
389# CONFIG_NET_VENDOR_SMSC is not set 397# CONFIG_NET_VENDOR_SMSC is not set
390# CONFIG_NET_VENDOR_STMICRO is not set 398# CONFIG_NET_VENDOR_STMICRO is not set
391# CONFIG_NET_VENDOR_SYNOPSYS is not set
392# CONFIG_NET_VENDOR_VIA is not set 399# CONFIG_NET_VENDOR_VIA is not set
393# CONFIG_NET_VENDOR_WIZNET is not set 400# CONFIG_NET_VENDOR_WIZNET is not set
394CONFIG_PLIP=m 401CONFIG_PLIP=m
@@ -538,6 +545,8 @@ CONFIG_NLS_MAC_TURKISH=m
538CONFIG_DLM=m 545CONFIG_DLM=m
539# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 546# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
540CONFIG_MAGIC_SYSRQ=y 547CONFIG_MAGIC_SYSRQ=y
548CONFIG_WW_MUTEX_SELFTEST=m
549CONFIG_ATOMIC64_SELFTEST=m
541CONFIG_ASYNC_RAID6_TEST=m 550CONFIG_ASYNC_RAID6_TEST=m
542CONFIG_TEST_HEXDUMP=m 551CONFIG_TEST_HEXDUMP=m
543CONFIG_TEST_STRING_HELPERS=m 552CONFIG_TEST_STRING_HELPERS=m
@@ -568,6 +577,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
568CONFIG_CRYPTO_LRW=m 577CONFIG_CRYPTO_LRW=m
569CONFIG_CRYPTO_PCBC=m 578CONFIG_CRYPTO_PCBC=m
570CONFIG_CRYPTO_KEYWRAP=m 579CONFIG_CRYPTO_KEYWRAP=m
580CONFIG_CRYPTO_CMAC=m
571CONFIG_CRYPTO_XCBC=m 581CONFIG_CRYPTO_XCBC=m
572CONFIG_CRYPTO_VMAC=m 582CONFIG_CRYPTO_VMAC=m
573CONFIG_CRYPTO_MICHAEL_MIC=m 583CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -579,6 +589,7 @@ CONFIG_CRYPTO_SHA512=m
579CONFIG_CRYPTO_SHA3=m 589CONFIG_CRYPTO_SHA3=m
580CONFIG_CRYPTO_TGR192=m 590CONFIG_CRYPTO_TGR192=m
581CONFIG_CRYPTO_WP512=m 591CONFIG_CRYPTO_WP512=m
592CONFIG_CRYPTO_AES_TI=m
582CONFIG_CRYPTO_ANUBIS=m 593CONFIG_CRYPTO_ANUBIS=m
583CONFIG_CRYPTO_BLOWFISH=m 594CONFIG_CRYPTO_BLOWFISH=m
584CONFIG_CRYPTO_CAMELLIA=m 595CONFIG_CRYPTO_CAMELLIA=m
@@ -603,4 +614,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
603CONFIG_CRYPTO_USER_API_RNG=m 614CONFIG_CRYPTO_USER_API_RNG=m
604CONFIG_CRYPTO_USER_API_AEAD=m 615CONFIG_CRYPTO_USER_API_AEAD=m
605# CONFIG_CRYPTO_HW is not set 616# CONFIG_CRYPTO_HW is not set
617CONFIG_CRC32_SELFTEST=m
606CONFIG_XZ_DEC_TEST=m 618CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 313bf0a562ad..f9e77f57a972 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
25# CONFIG_EFI_PARTITION is not set 25# CONFIG_EFI_PARTITION is not set
26CONFIG_SYSV68_PARTITION=y 26CONFIG_SYSV68_PARTITION=y
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_SUN3=y 31CONFIG_SUN3=y
@@ -53,6 +54,7 @@ CONFIG_NET_IPVTI=m
53CONFIG_NET_FOU_IP_TUNNELS=y 54CONFIG_NET_FOU_IP_TUNNELS=y
54CONFIG_INET_AH=m 55CONFIG_INET_AH=m
55CONFIG_INET_ESP=m 56CONFIG_INET_ESP=m
57CONFIG_INET_ESP_OFFLOAD=m
56CONFIG_INET_IPCOMP=m 58CONFIG_INET_IPCOMP=m
57CONFIG_INET_XFRM_MODE_TRANSPORT=m 59CONFIG_INET_XFRM_MODE_TRANSPORT=m
58CONFIG_INET_XFRM_MODE_TUNNEL=m 60CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -64,6 +66,7 @@ CONFIG_IPV6=m
64CONFIG_IPV6_ROUTER_PREF=y 66CONFIG_IPV6_ROUTER_PREF=y
65CONFIG_INET6_AH=m 67CONFIG_INET6_AH=m
66CONFIG_INET6_ESP=m 68CONFIG_INET6_ESP=m
69CONFIG_INET6_ESP_OFFLOAD=m
67CONFIG_INET6_IPCOMP=m 70CONFIG_INET6_IPCOMP=m
68CONFIG_IPV6_ILA=m 71CONFIG_IPV6_ILA=m
69CONFIG_IPV6_VTI=m 72CONFIG_IPV6_VTI=m
@@ -94,6 +97,7 @@ CONFIG_NFT_NUMGEN=m
94CONFIG_NFT_CT=m 97CONFIG_NFT_CT=m
95CONFIG_NFT_SET_RBTREE=m 98CONFIG_NFT_SET_RBTREE=m
96CONFIG_NFT_SET_HASH=m 99CONFIG_NFT_SET_HASH=m
100CONFIG_NFT_SET_BITMAP=m
97CONFIG_NFT_COUNTER=m 101CONFIG_NFT_COUNTER=m
98CONFIG_NFT_LOG=m 102CONFIG_NFT_LOG=m
99CONFIG_NFT_LIMIT=m 103CONFIG_NFT_LIMIT=m
@@ -291,6 +295,8 @@ CONFIG_MPLS_IPTUNNEL=m
291CONFIG_NET_L3_MASTER_DEV=y 295CONFIG_NET_L3_MASTER_DEV=y
292CONFIG_AF_KCM=m 296CONFIG_AF_KCM=m
293# CONFIG_WIRELESS is not set 297# CONFIG_WIRELESS is not set
298CONFIG_PSAMPLE=m
299CONFIG_NET_IFE=m
294CONFIG_NET_DEVLINK=m 300CONFIG_NET_DEVLINK=m
295# CONFIG_UEVENT_HELPER is not set 301# CONFIG_UEVENT_HELPER is not set
296CONFIG_DEVTMPFS=y 302CONFIG_DEVTMPFS=y
@@ -349,6 +355,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
349CONFIG_MACVLAN=m 355CONFIG_MACVLAN=m
350CONFIG_MACVTAP=m 356CONFIG_MACVTAP=m
351CONFIG_IPVLAN=m 357CONFIG_IPVLAN=m
358CONFIG_IPVTAP=m
352CONFIG_VXLAN=m 359CONFIG_VXLAN=m
353CONFIG_GENEVE=m 360CONFIG_GENEVE=m
354CONFIG_GTP=m 361CONFIG_GTP=m
@@ -359,6 +366,7 @@ CONFIG_VETH=m
359# CONFIG_NET_VENDOR_ALACRITECH is not set 366# CONFIG_NET_VENDOR_ALACRITECH is not set
360# CONFIG_NET_VENDOR_AMAZON is not set 367# CONFIG_NET_VENDOR_AMAZON is not set
361CONFIG_SUN3LANCE=y 368CONFIG_SUN3LANCE=y
369# CONFIG_NET_VENDOR_AQUANTIA is not set
362# CONFIG_NET_VENDOR_ARC is not set 370# CONFIG_NET_VENDOR_ARC is not set
363# CONFIG_NET_CADENCE is not set 371# CONFIG_NET_CADENCE is not set
364# CONFIG_NET_VENDOR_EZCHIP is not set 372# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -375,7 +383,6 @@ CONFIG_SUN3_82586=y
375# CONFIG_NET_VENDOR_SOLARFLARE is not set 383# CONFIG_NET_VENDOR_SOLARFLARE is not set
376# CONFIG_NET_VENDOR_STMICRO is not set 384# CONFIG_NET_VENDOR_STMICRO is not set
377# CONFIG_NET_VENDOR_SUN is not set 385# CONFIG_NET_VENDOR_SUN is not set
378# CONFIG_NET_VENDOR_SYNOPSYS is not set
379# CONFIG_NET_VENDOR_VIA is not set 386# CONFIG_NET_VENDOR_VIA is not set
380# CONFIG_NET_VENDOR_WIZNET is not set 387# CONFIG_NET_VENDOR_WIZNET is not set
381CONFIG_PPP=m 388CONFIG_PPP=m
@@ -517,6 +524,8 @@ CONFIG_NLS_MAC_TURKISH=m
517CONFIG_DLM=m 524CONFIG_DLM=m
518# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 525# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
519CONFIG_MAGIC_SYSRQ=y 526CONFIG_MAGIC_SYSRQ=y
527CONFIG_WW_MUTEX_SELFTEST=m
528CONFIG_ATOMIC64_SELFTEST=m
520CONFIG_ASYNC_RAID6_TEST=m 529CONFIG_ASYNC_RAID6_TEST=m
521CONFIG_TEST_HEXDUMP=m 530CONFIG_TEST_HEXDUMP=m
522CONFIG_TEST_STRING_HELPERS=m 531CONFIG_TEST_STRING_HELPERS=m
@@ -546,6 +555,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
546CONFIG_CRYPTO_LRW=m 555CONFIG_CRYPTO_LRW=m
547CONFIG_CRYPTO_PCBC=m 556CONFIG_CRYPTO_PCBC=m
548CONFIG_CRYPTO_KEYWRAP=m 557CONFIG_CRYPTO_KEYWRAP=m
558CONFIG_CRYPTO_CMAC=m
549CONFIG_CRYPTO_XCBC=m 559CONFIG_CRYPTO_XCBC=m
550CONFIG_CRYPTO_VMAC=m 560CONFIG_CRYPTO_VMAC=m
551CONFIG_CRYPTO_MICHAEL_MIC=m 561CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -557,6 +567,7 @@ CONFIG_CRYPTO_SHA512=m
557CONFIG_CRYPTO_SHA3=m 567CONFIG_CRYPTO_SHA3=m
558CONFIG_CRYPTO_TGR192=m 568CONFIG_CRYPTO_TGR192=m
559CONFIG_CRYPTO_WP512=m 569CONFIG_CRYPTO_WP512=m
570CONFIG_CRYPTO_AES_TI=m
560CONFIG_CRYPTO_ANUBIS=m 571CONFIG_CRYPTO_ANUBIS=m
561CONFIG_CRYPTO_BLOWFISH=m 572CONFIG_CRYPTO_BLOWFISH=m
562CONFIG_CRYPTO_CAMELLIA=m 573CONFIG_CRYPTO_CAMELLIA=m
@@ -581,4 +592,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
581CONFIG_CRYPTO_USER_API_RNG=m 592CONFIG_CRYPTO_USER_API_RNG=m
582CONFIG_CRYPTO_USER_API_AEAD=m 593CONFIG_CRYPTO_USER_API_AEAD=m
583# CONFIG_CRYPTO_HW is not set 594# CONFIG_CRYPTO_HW is not set
595CONFIG_CRC32_SELFTEST=m
584CONFIG_XZ_DEC_TEST=m 596CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 38b61365f769..3c394fcfb368 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
25# CONFIG_EFI_PARTITION is not set 25# CONFIG_EFI_PARTITION is not set
26CONFIG_SYSV68_PARTITION=y 26CONFIG_SYSV68_PARTITION=y
27CONFIG_IOSCHED_DEADLINE=m 27CONFIG_IOSCHED_DEADLINE=m
28CONFIG_MQ_IOSCHED_DEADLINE=m
28CONFIG_KEXEC=y 29CONFIG_KEXEC=y
29CONFIG_BOOTINFO_PROC=y 30CONFIG_BOOTINFO_PROC=y
30CONFIG_SUN3X=y 31CONFIG_SUN3X=y
@@ -53,6 +54,7 @@ CONFIG_NET_IPVTI=m
53CONFIG_NET_FOU_IP_TUNNELS=y 54CONFIG_NET_FOU_IP_TUNNELS=y
54CONFIG_INET_AH=m 55CONFIG_INET_AH=m
55CONFIG_INET_ESP=m 56CONFIG_INET_ESP=m
57CONFIG_INET_ESP_OFFLOAD=m
56CONFIG_INET_IPCOMP=m 58CONFIG_INET_IPCOMP=m
57CONFIG_INET_XFRM_MODE_TRANSPORT=m 59CONFIG_INET_XFRM_MODE_TRANSPORT=m
58CONFIG_INET_XFRM_MODE_TUNNEL=m 60CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -64,6 +66,7 @@ CONFIG_IPV6=m
64CONFIG_IPV6_ROUTER_PREF=y 66CONFIG_IPV6_ROUTER_PREF=y
65CONFIG_INET6_AH=m 67CONFIG_INET6_AH=m
66CONFIG_INET6_ESP=m 68CONFIG_INET6_ESP=m
69CONFIG_INET6_ESP_OFFLOAD=m
67CONFIG_INET6_IPCOMP=m 70CONFIG_INET6_IPCOMP=m
68CONFIG_IPV6_ILA=m 71CONFIG_IPV6_ILA=m
69CONFIG_IPV6_VTI=m 72CONFIG_IPV6_VTI=m
@@ -94,6 +97,7 @@ CONFIG_NFT_NUMGEN=m
94CONFIG_NFT_CT=m 97CONFIG_NFT_CT=m
95CONFIG_NFT_SET_RBTREE=m 98CONFIG_NFT_SET_RBTREE=m
96CONFIG_NFT_SET_HASH=m 99CONFIG_NFT_SET_HASH=m
100CONFIG_NFT_SET_BITMAP=m
97CONFIG_NFT_COUNTER=m 101CONFIG_NFT_COUNTER=m
98CONFIG_NFT_LOG=m 102CONFIG_NFT_LOG=m
99CONFIG_NFT_LIMIT=m 103CONFIG_NFT_LIMIT=m
@@ -291,6 +295,8 @@ CONFIG_MPLS_IPTUNNEL=m
291CONFIG_NET_L3_MASTER_DEV=y 295CONFIG_NET_L3_MASTER_DEV=y
292CONFIG_AF_KCM=m 296CONFIG_AF_KCM=m
293# CONFIG_WIRELESS is not set 297# CONFIG_WIRELESS is not set
298CONFIG_PSAMPLE=m
299CONFIG_NET_IFE=m
294CONFIG_NET_DEVLINK=m 300CONFIG_NET_DEVLINK=m
295# CONFIG_UEVENT_HELPER is not set 301# CONFIG_UEVENT_HELPER is not set
296CONFIG_DEVTMPFS=y 302CONFIG_DEVTMPFS=y
@@ -349,6 +355,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
349CONFIG_MACVLAN=m 355CONFIG_MACVLAN=m
350CONFIG_MACVTAP=m 356CONFIG_MACVTAP=m
351CONFIG_IPVLAN=m 357CONFIG_IPVLAN=m
358CONFIG_IPVTAP=m
352CONFIG_VXLAN=m 359CONFIG_VXLAN=m
353CONFIG_GENEVE=m 360CONFIG_GENEVE=m
354CONFIG_GTP=m 361CONFIG_GTP=m
@@ -359,6 +366,7 @@ CONFIG_VETH=m
359# CONFIG_NET_VENDOR_ALACRITECH is not set 366# CONFIG_NET_VENDOR_ALACRITECH is not set
360# CONFIG_NET_VENDOR_AMAZON is not set 367# CONFIG_NET_VENDOR_AMAZON is not set
361CONFIG_SUN3LANCE=y 368CONFIG_SUN3LANCE=y
369# CONFIG_NET_VENDOR_AQUANTIA is not set
362# CONFIG_NET_VENDOR_ARC is not set 370# CONFIG_NET_VENDOR_ARC is not set
363# CONFIG_NET_CADENCE is not set 371# CONFIG_NET_CADENCE is not set
364# CONFIG_NET_VENDOR_BROADCOM is not set 372# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -375,7 +383,6 @@ CONFIG_SUN3LANCE=y
375# CONFIG_NET_VENDOR_SEEQ is not set 383# CONFIG_NET_VENDOR_SEEQ is not set
376# CONFIG_NET_VENDOR_SOLARFLARE is not set 384# CONFIG_NET_VENDOR_SOLARFLARE is not set
377# CONFIG_NET_VENDOR_STMICRO is not set 385# CONFIG_NET_VENDOR_STMICRO is not set
378# CONFIG_NET_VENDOR_SYNOPSYS is not set
379# CONFIG_NET_VENDOR_VIA is not set 386# CONFIG_NET_VENDOR_VIA is not set
380# CONFIG_NET_VENDOR_WIZNET is not set 387# CONFIG_NET_VENDOR_WIZNET is not set
381CONFIG_PPP=m 388CONFIG_PPP=m
@@ -517,6 +524,8 @@ CONFIG_NLS_MAC_TURKISH=m
517CONFIG_DLM=m 524CONFIG_DLM=m
518# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set 525# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
519CONFIG_MAGIC_SYSRQ=y 526CONFIG_MAGIC_SYSRQ=y
527CONFIG_WW_MUTEX_SELFTEST=m
528CONFIG_ATOMIC64_SELFTEST=m
520CONFIG_ASYNC_RAID6_TEST=m 529CONFIG_ASYNC_RAID6_TEST=m
521CONFIG_TEST_HEXDUMP=m 530CONFIG_TEST_HEXDUMP=m
522CONFIG_TEST_STRING_HELPERS=m 531CONFIG_TEST_STRING_HELPERS=m
@@ -547,6 +556,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
547CONFIG_CRYPTO_LRW=m 556CONFIG_CRYPTO_LRW=m
548CONFIG_CRYPTO_PCBC=m 557CONFIG_CRYPTO_PCBC=m
549CONFIG_CRYPTO_KEYWRAP=m 558CONFIG_CRYPTO_KEYWRAP=m
559CONFIG_CRYPTO_CMAC=m
550CONFIG_CRYPTO_XCBC=m 560CONFIG_CRYPTO_XCBC=m
551CONFIG_CRYPTO_VMAC=m 561CONFIG_CRYPTO_VMAC=m
552CONFIG_CRYPTO_MICHAEL_MIC=m 562CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -558,6 +568,7 @@ CONFIG_CRYPTO_SHA512=m
558CONFIG_CRYPTO_SHA3=m 568CONFIG_CRYPTO_SHA3=m
559CONFIG_CRYPTO_TGR192=m 569CONFIG_CRYPTO_TGR192=m
560CONFIG_CRYPTO_WP512=m 570CONFIG_CRYPTO_WP512=m
571CONFIG_CRYPTO_AES_TI=m
561CONFIG_CRYPTO_ANUBIS=m 572CONFIG_CRYPTO_ANUBIS=m
562CONFIG_CRYPTO_BLOWFISH=m 573CONFIG_CRYPTO_BLOWFISH=m
563CONFIG_CRYPTO_CAMELLIA=m 574CONFIG_CRYPTO_CAMELLIA=m
@@ -582,4 +593,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
582CONFIG_CRYPTO_USER_API_RNG=m 593CONFIG_CRYPTO_USER_API_RNG=m
583CONFIG_CRYPTO_USER_API_AEAD=m 594CONFIG_CRYPTO_USER_API_AEAD=m
584# CONFIG_CRYPTO_HW is not set 595# CONFIG_CRYPTO_HW is not set
596CONFIG_CRC32_SELFTEST=m
585CONFIG_XZ_DEC_TEST=m 597CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h
index b4a9b0d5928d..dda58cfe8c22 100644
--- a/arch/m68k/include/asm/bitops.h
+++ b/arch/m68k/include/asm/bitops.h
@@ -148,7 +148,7 @@ static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
148#define __change_bit(nr, vaddr) change_bit(nr, vaddr) 148#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
149 149
150 150
151static inline int test_bit(int nr, const unsigned long *vaddr) 151static inline int test_bit(int nr, const volatile unsigned long *vaddr)
152{ 152{
153 return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; 153 return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
154} 154}
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index a857d82ec509..aab1edd0d4ba 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
4#include <uapi/asm/unistd.h> 4#include <uapi/asm/unistd.h>
5 5
6 6
7#define NR_syscalls 379 7#define NR_syscalls 380
8 8
9#define __ARCH_WANT_OLD_READDIR 9#define __ARCH_WANT_OLD_READDIR
10#define __ARCH_WANT_OLD_STAT 10#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 9fe674bf911f..25589f5b8669 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -384,5 +384,6 @@
384#define __NR_copy_file_range 376 384#define __NR_copy_file_range 376
385#define __NR_preadv2 377 385#define __NR_preadv2 377
386#define __NR_pwritev2 378 386#define __NR_pwritev2 378
387#define __NR_statx 379
387 388
388#endif /* _UAPI_ASM_M68K_UNISTD_H_ */ 389#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index d6fd6d9ced24..8c9fcfafe0dd 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -399,3 +399,4 @@ ENTRY(sys_call_table)
399 .long sys_copy_file_range 399 .long sys_copy_file_range
400 .long sys_preadv2 400 .long sys_preadv2
401 .long sys_pwritev2 401 .long sys_pwritev2
402 .long sys_statx
diff --git a/arch/metag/kernel/ptrace.c b/arch/metag/kernel/ptrace.c
index 5fd16ee5280c..e615603a4b0a 100644
--- a/arch/metag/kernel/ptrace.c
+++ b/arch/metag/kernel/ptrace.c
@@ -26,6 +26,16 @@
26 * user_regset definitions. 26 * user_regset definitions.
27 */ 27 */
28 28
29static unsigned long user_txstatus(const struct pt_regs *regs)
30{
31 unsigned long data = (unsigned long)regs->ctx.Flags;
32
33 if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
34 data |= USER_GP_REGS_STATUS_CATCH_BIT;
35
36 return data;
37}
38
29int metag_gp_regs_copyout(const struct pt_regs *regs, 39int metag_gp_regs_copyout(const struct pt_regs *regs,
30 unsigned int pos, unsigned int count, 40 unsigned int pos, unsigned int count,
31 void *kbuf, void __user *ubuf) 41 void *kbuf, void __user *ubuf)
@@ -64,9 +74,7 @@ int metag_gp_regs_copyout(const struct pt_regs *regs,
64 if (ret) 74 if (ret)
65 goto out; 75 goto out;
66 /* TXSTATUS */ 76 /* TXSTATUS */
67 data = (unsigned long)regs->ctx.Flags; 77 data = user_txstatus(regs);
68 if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
69 data |= USER_GP_REGS_STATUS_CATCH_BIT;
70 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 78 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
71 &data, 4*25, 4*26); 79 &data, 4*25, 4*26);
72 if (ret) 80 if (ret)
@@ -121,6 +129,7 @@ int metag_gp_regs_copyin(struct pt_regs *regs,
121 if (ret) 129 if (ret)
122 goto out; 130 goto out;
123 /* TXSTATUS */ 131 /* TXSTATUS */
132 data = user_txstatus(regs);
124 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 133 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
125 &data, 4*25, 4*26); 134 &data, 4*25, 4*26);
126 if (ret) 135 if (ret)
@@ -246,6 +255,8 @@ int metag_rp_state_copyin(struct pt_regs *regs,
246 unsigned long long *ptr; 255 unsigned long long *ptr;
247 int ret, i; 256 int ret, i;
248 257
258 if (count < 4*13)
259 return -EINVAL;
249 /* Read the entire pipeline before making any changes */ 260 /* Read the entire pipeline before making any changes */
250 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 261 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
251 &rp, 0, 4*13); 262 &rp, 0, 4*13);
@@ -305,7 +316,7 @@ static int metag_tls_set(struct task_struct *target,
305 const void *kbuf, const void __user *ubuf) 316 const void *kbuf, const void __user *ubuf)
306{ 317{
307 int ret; 318 int ret;
308 void __user *tls; 319 void __user *tls = target->thread.tls_ptr;
309 320
310 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 321 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
311 if (ret) 322 if (ret)
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 339601267265..6931fe722a0b 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -456,7 +456,8 @@ static int fpr_set(struct task_struct *target,
456 &target->thread.fpu, 456 &target->thread.fpu,
457 0, sizeof(elf_fpregset_t)); 457 0, sizeof(elf_fpregset_t));
458 458
459 for (i = 0; i < NUM_FPU_REGS; i++) { 459 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
460 for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) {
460 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 461 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
461 &fpr_val, i * sizeof(elf_fpreg_t), 462 &fpr_val, i * sizeof(elf_fpreg_t),
462 (i + 1) * sizeof(elf_fpreg_t)); 463 (i + 1) * sizeof(elf_fpreg_t));
diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
index 367c5426157b..3901b80d4420 100644
--- a/arch/nios2/kernel/prom.c
+++ b/arch/nios2/kernel/prom.c
@@ -48,6 +48,13 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
48 return alloc_bootmem_align(size, align); 48 return alloc_bootmem_align(size, align);
49} 49}
50 50
51int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
52 bool nomap)
53{
54 reserve_bootmem(base, size, BOOTMEM_DEFAULT);
55 return 0;
56}
57
51void __init early_init_devtree(void *params) 58void __init early_init_devtree(void *params)
52{ 59{
53 __be32 *dtb = (u32 *)__dtb_start; 60 __be32 *dtb = (u32 *)__dtb_start;
diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
index 6e57ffa5db27..6044d9be28b4 100644
--- a/arch/nios2/kernel/setup.c
+++ b/arch/nios2/kernel/setup.c
@@ -201,6 +201,9 @@ void __init setup_arch(char **cmdline_p)
201 } 201 }
202#endif /* CONFIG_BLK_DEV_INITRD */ 202#endif /* CONFIG_BLK_DEV_INITRD */
203 203
204 early_init_fdt_reserve_self();
205 early_init_fdt_scan_reserved_mem();
206
204 unflatten_and_copy_device_tree(); 207 unflatten_and_copy_device_tree();
205 208
206 setup_cpuinfo(); 209 setup_cpuinfo();
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index edfbf9d6a6dd..8442727f28d2 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -65,6 +65,15 @@ struct exception_table_entry {
65 ".previous\n" 65 ".previous\n"
66 66
67/* 67/*
68 * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
69 * (with lowest bit set) for which the fault handler in fixup_exception() will
70 * load -EFAULT into %r8 for a read or write fault, and zeroes the target
71 * register in case of a read fault in get_user().
72 */
73#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
74 ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
75
76/*
68 * The page fault handler stores, in a per-cpu area, the following information 77 * The page fault handler stores, in a per-cpu area, the following information
69 * if a fixup routine is available. 78 * if a fixup routine is available.
70 */ 79 */
@@ -91,7 +100,7 @@ struct exception_data {
91#define __get_user(x, ptr) \ 100#define __get_user(x, ptr) \
92({ \ 101({ \
93 register long __gu_err __asm__ ("r8") = 0; \ 102 register long __gu_err __asm__ ("r8") = 0; \
94 register long __gu_val __asm__ ("r9") = 0; \ 103 register long __gu_val; \
95 \ 104 \
96 load_sr2(); \ 105 load_sr2(); \
97 switch (sizeof(*(ptr))) { \ 106 switch (sizeof(*(ptr))) { \
@@ -107,22 +116,23 @@ struct exception_data {
107}) 116})
108 117
109#define __get_user_asm(ldx, ptr) \ 118#define __get_user_asm(ldx, ptr) \
110 __asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t" \ 119 __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
111 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\ 120 "9:\n" \
121 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
112 : "=r"(__gu_val), "=r"(__gu_err) \ 122 : "=r"(__gu_val), "=r"(__gu_err) \
113 : "r"(ptr), "1"(__gu_err) \ 123 : "r"(ptr), "1"(__gu_err));
114 : "r1");
115 124
116#if !defined(CONFIG_64BIT) 125#if !defined(CONFIG_64BIT)
117 126
118#define __get_user_asm64(ptr) \ 127#define __get_user_asm64(ptr) \
119 __asm__("\n1:\tldw 0(%%sr2,%2),%0" \ 128 __asm__(" copy %%r0,%R0\n" \
120 "\n2:\tldw 4(%%sr2,%2),%R0\n\t" \ 129 "1: ldw 0(%%sr2,%2),%0\n" \
121 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\ 130 "2: ldw 4(%%sr2,%2),%R0\n" \
122 ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\ 131 "9:\n" \
132 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
133 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
123 : "=r"(__gu_val), "=r"(__gu_err) \ 134 : "=r"(__gu_val), "=r"(__gu_err) \
124 : "r"(ptr), "1"(__gu_err) \ 135 : "r"(ptr), "1"(__gu_err));
125 : "r1");
126 136
127#endif /* !defined(CONFIG_64BIT) */ 137#endif /* !defined(CONFIG_64BIT) */
128 138
@@ -148,32 +158,31 @@ struct exception_data {
148 * The "__put_user/kernel_asm()" macros tell gcc they read from memory 158 * The "__put_user/kernel_asm()" macros tell gcc they read from memory
149 * instead of writing. This is because they do not write to any memory 159 * instead of writing. This is because they do not write to any memory
150 * gcc knows about, so there are no aliasing issues. These macros must 160 * gcc knows about, so there are no aliasing issues. These macros must
151 * also be aware that "fixup_put_user_skip_[12]" are executed in the 161 * also be aware that fixups are executed in the context of the fault,
152 * context of the fault, and any registers used there must be listed 162 * and any registers used there must be listed as clobbers.
153 * as clobbers. In this case only "r1" is used by the current routines. 163 * r8 is already listed as err.
154 * r8/r9 are already listed as err/val.
155 */ 164 */
156 165
157#define __put_user_asm(stx, x, ptr) \ 166#define __put_user_asm(stx, x, ptr) \
158 __asm__ __volatile__ ( \ 167 __asm__ __volatile__ ( \
159 "\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t" \ 168 "1: " stx " %2,0(%%sr2,%1)\n" \
160 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\ 169 "9:\n" \
170 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
161 : "=r"(__pu_err) \ 171 : "=r"(__pu_err) \
162 : "r"(ptr), "r"(x), "0"(__pu_err) \ 172 : "r"(ptr), "r"(x), "0"(__pu_err))
163 : "r1")
164 173
165 174
166#if !defined(CONFIG_64BIT) 175#if !defined(CONFIG_64BIT)
167 176
168#define __put_user_asm64(__val, ptr) do { \ 177#define __put_user_asm64(__val, ptr) do { \
169 __asm__ __volatile__ ( \ 178 __asm__ __volatile__ ( \
170 "\n1:\tstw %2,0(%%sr2,%1)" \ 179 "1: stw %2,0(%%sr2,%1)\n" \
171 "\n2:\tstw %R2,4(%%sr2,%1)\n\t" \ 180 "2: stw %R2,4(%%sr2,%1)\n" \
172 ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\ 181 "9:\n" \
173 ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\ 182 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
183 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
174 : "=r"(__pu_err) \ 184 : "=r"(__pu_err) \
175 : "r"(ptr), "r"(__val), "0"(__pu_err) \ 185 : "r"(ptr), "r"(__val), "0"(__pu_err)); \
176 : "r1"); \
177} while (0) 186} while (0)
178 187
179#endif /* !defined(CONFIG_64BIT) */ 188#endif /* !defined(CONFIG_64BIT) */
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 7484b3d11e0d..c6d6272a934f 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -47,16 +47,6 @@ EXPORT_SYMBOL(__cmpxchg_u64);
47EXPORT_SYMBOL(lclear_user); 47EXPORT_SYMBOL(lclear_user);
48EXPORT_SYMBOL(lstrnlen_user); 48EXPORT_SYMBOL(lstrnlen_user);
49 49
50/* Global fixups - defined as int to avoid creation of function pointers */
51extern int fixup_get_user_skip_1;
52extern int fixup_get_user_skip_2;
53extern int fixup_put_user_skip_1;
54extern int fixup_put_user_skip_2;
55EXPORT_SYMBOL(fixup_get_user_skip_1);
56EXPORT_SYMBOL(fixup_get_user_skip_2);
57EXPORT_SYMBOL(fixup_put_user_skip_1);
58EXPORT_SYMBOL(fixup_put_user_skip_2);
59
60#ifndef CONFIG_64BIT 50#ifndef CONFIG_64BIT
61/* Needed so insmod can set dp value */ 51/* Needed so insmod can set dp value */
62extern int $global$; 52extern int $global$;
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index b76f503eee4a..4516a5b53f38 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -143,6 +143,8 @@ void machine_power_off(void)
143 printk(KERN_EMERG "System shut down completed.\n" 143 printk(KERN_EMERG "System shut down completed.\n"
144 "Please power this system off now."); 144 "Please power this system off now.");
145 145
146 /* prevent soft lockup/stalled CPU messages for endless loop. */
147 rcu_sysrq_start();
146 for (;;); 148 for (;;);
147} 149}
148 150
diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile
index 8fa92b8d839a..f2dac4d73b1b 100644
--- a/arch/parisc/lib/Makefile
+++ b/arch/parisc/lib/Makefile
@@ -2,7 +2,7 @@
2# Makefile for parisc-specific library files 2# Makefile for parisc-specific library files
3# 3#
4 4
5lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \ 5lib-y := lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \
6 ucmpdi2.o delay.o 6 ucmpdi2.o delay.o
7 7
8obj-y := iomap.o 8obj-y := iomap.o
diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
deleted file mode 100644
index a5b72f22c7a6..000000000000
--- a/arch/parisc/lib/fixup.S
+++ /dev/null
@@ -1,98 +0,0 @@
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * Copyright (C) 2004 Randolph Chung <tausq@debian.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 *
20 * Fixup routines for kernel exception handling.
21 */
22#include <asm/asm-offsets.h>
23#include <asm/assembly.h>
24#include <asm/errno.h>
25#include <linux/linkage.h>
26
27#ifdef CONFIG_SMP
28 .macro get_fault_ip t1 t2
29 loadgp
30 addil LT%__per_cpu_offset,%r27
31 LDREG RT%__per_cpu_offset(%r1),\t1
32 /* t2 = smp_processor_id() */
33 mfctl 30,\t2
34 ldw TI_CPU(\t2),\t2
35#ifdef CONFIG_64BIT
36 extrd,u \t2,63,32,\t2
37#endif
38 /* t2 = &__per_cpu_offset[smp_processor_id()]; */
39 LDREGX \t2(\t1),\t2
40 addil LT%exception_data,%r27
41 LDREG RT%exception_data(%r1),\t1
42 /* t1 = this_cpu_ptr(&exception_data) */
43 add,l \t1,\t2,\t1
44 /* %r27 = t1->fault_gp - restore gp */
45 LDREG EXCDATA_GP(\t1), %r27
46 /* t1 = t1->fault_ip */
47 LDREG EXCDATA_IP(\t1), \t1
48 .endm
49#else
50 .macro get_fault_ip t1 t2
51 loadgp
52 /* t1 = this_cpu_ptr(&exception_data) */
53 addil LT%exception_data,%r27
54 LDREG RT%exception_data(%r1),\t2
55 /* %r27 = t2->fault_gp - restore gp */
56 LDREG EXCDATA_GP(\t2), %r27
57 /* t1 = t2->fault_ip */
58 LDREG EXCDATA_IP(\t2), \t1
59 .endm
60#endif
61
62 .level LEVEL
63
64 .text
65 .section .fixup, "ax"
66
67 /* get_user() fixups, store -EFAULT in r8, and 0 in r9 */
68ENTRY_CFI(fixup_get_user_skip_1)
69 get_fault_ip %r1,%r8
70 ldo 4(%r1), %r1
71 ldi -EFAULT, %r8
72 bv %r0(%r1)
73 copy %r0, %r9
74ENDPROC_CFI(fixup_get_user_skip_1)
75
76ENTRY_CFI(fixup_get_user_skip_2)
77 get_fault_ip %r1,%r8
78 ldo 8(%r1), %r1
79 ldi -EFAULT, %r8
80 bv %r0(%r1)
81 copy %r0, %r9
82ENDPROC_CFI(fixup_get_user_skip_2)
83
84 /* put_user() fixups, store -EFAULT in r8 */
85ENTRY_CFI(fixup_put_user_skip_1)
86 get_fault_ip %r1,%r8
87 ldo 4(%r1), %r1
88 bv %r0(%r1)
89 ldi -EFAULT, %r8
90ENDPROC_CFI(fixup_put_user_skip_1)
91
92ENTRY_CFI(fixup_put_user_skip_2)
93 get_fault_ip %r1,%r8
94 ldo 8(%r1), %r1
95 bv %r0(%r1)
96 ldi -EFAULT, %r8
97ENDPROC_CFI(fixup_put_user_skip_2)
98
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
index 56845de6b5df..f01188c044ee 100644
--- a/arch/parisc/lib/lusercopy.S
+++ b/arch/parisc/lib/lusercopy.S
@@ -5,6 +5,8 @@
5 * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org> 5 * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
6 * Copyright (C) 2001 Matthieu Delahaye <delahaym at esiee.fr> 6 * Copyright (C) 2001 Matthieu Delahaye <delahaym at esiee.fr>
7 * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org> 7 * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
8 * Copyright (C) 2017 Helge Deller <deller@gmx.de>
9 * Copyright (C) 2017 John David Anglin <dave.anglin@bell.net>
8 * 10 *
9 * 11 *
10 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
@@ -132,4 +134,320 @@ ENDPROC_CFI(lstrnlen_user)
132 134
133 .procend 135 .procend
134 136
137
138
139/*
140 * unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
141 *
142 * Inputs:
143 * - sr1 already contains space of source region
144 * - sr2 already contains space of destination region
145 *
146 * Returns:
147 * - number of bytes that could not be copied.
148 * On success, this will be zero.
149 *
150 * This code is based on a C-implementation of a copy routine written by
151 * Randolph Chung, which in turn was derived from the glibc.
152 *
153 * Several strategies are tried to try to get the best performance for various
154 * conditions. In the optimal case, we copy by loops that copy 32- or 16-bytes
155 * at a time using general registers. Unaligned copies are handled either by
156 * aligning the destination and then using shift-and-write method, or in a few
157 * cases by falling back to a byte-at-a-time copy.
158 *
159 * Testing with various alignments and buffer sizes shows that this code is
160 * often >10x faster than a simple byte-at-a-time copy, even for strangely
161 * aligned operands. It is interesting to note that the glibc version of memcpy
162 * (written in C) is actually quite fast already. This routine is able to beat
163 * it by 30-40% for aligned copies because of the loop unrolling, but in some
164 * cases the glibc version is still slightly faster. This lends more
165 * credibility that gcc can generate very good code as long as we are careful.
166 *
167 * Possible optimizations:
168 * - add cache prefetching
169 * - try not to use the post-increment address modifiers; they may create
170 * additional interlocks. Assumption is that those were only efficient on old
171 * machines (pre PA8000 processors)
172 */
173
174 dst = arg0
175 src = arg1
176 len = arg2
177 end = arg3
178 t1 = r19
179 t2 = r20
180 t3 = r21
181 t4 = r22
182 srcspc = sr1
183 dstspc = sr2
184
185 t0 = r1
186 a1 = t1
187 a2 = t2
188 a3 = t3
189 a0 = t4
190
191 save_src = ret0
192 save_dst = ret1
193 save_len = r31
194
195ENTRY_CFI(pa_memcpy)
196 .proc
197 .callinfo NO_CALLS
198 .entry
199
200 /* Last destination address */
201 add dst,len,end
202
203 /* short copy with less than 16 bytes? */
204 cmpib,>>=,n 15,len,.Lbyte_loop
205
206 /* same alignment? */
207 xor src,dst,t0
208 extru t0,31,2,t1
209 cmpib,<>,n 0,t1,.Lunaligned_copy
210
211#ifdef CONFIG_64BIT
212 /* only do 64-bit copies if we can get aligned. */
213 extru t0,31,3,t1
214 cmpib,<>,n 0,t1,.Lalign_loop32
215
216 /* loop until we are 64-bit aligned */
217.Lalign_loop64:
218 extru dst,31,3,t1
219 cmpib,=,n 0,t1,.Lcopy_loop_16
22020: ldb,ma 1(srcspc,src),t1
22121: stb,ma t1,1(dstspc,dst)
222 b .Lalign_loop64
223 ldo -1(len),len
224
225 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
226 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
227
228 ldi 31,t0
229.Lcopy_loop_16:
230 cmpb,COND(>>=),n t0,len,.Lword_loop
231
23210: ldd 0(srcspc,src),t1
23311: ldd 8(srcspc,src),t2
234 ldo 16(src),src
23512: std,ma t1,8(dstspc,dst)
23613: std,ma t2,8(dstspc,dst)
23714: ldd 0(srcspc,src),t1
23815: ldd 8(srcspc,src),t2
239 ldo 16(src),src
24016: std,ma t1,8(dstspc,dst)
24117: std,ma t2,8(dstspc,dst)
242
243 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
244 ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy16_fault)
245 ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
246 ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
247 ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
248 ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy16_fault)
249 ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
250 ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
251
252 b .Lcopy_loop_16
253 ldo -32(len),len
254
255.Lword_loop:
256 cmpib,COND(>>=),n 3,len,.Lbyte_loop
25720: ldw,ma 4(srcspc,src),t1
25821: stw,ma t1,4(dstspc,dst)
259 b .Lword_loop
260 ldo -4(len),len
261
262 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
263 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
264
265#endif /* CONFIG_64BIT */
266
267 /* loop until we are 32-bit aligned */
268.Lalign_loop32:
269 extru dst,31,2,t1
270 cmpib,=,n 0,t1,.Lcopy_loop_4
27120: ldb,ma 1(srcspc,src),t1
27221: stb,ma t1,1(dstspc,dst)
273 b .Lalign_loop32
274 ldo -1(len),len
275
276 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
277 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
278
279
280.Lcopy_loop_4:
281 cmpib,COND(>>=),n 15,len,.Lbyte_loop
282
28310: ldw 0(srcspc,src),t1
28411: ldw 4(srcspc,src),t2
28512: stw,ma t1,4(dstspc,dst)
28613: stw,ma t2,4(dstspc,dst)
28714: ldw 8(srcspc,src),t1
28815: ldw 12(srcspc,src),t2
289 ldo 16(src),src
29016: stw,ma t1,4(dstspc,dst)
29117: stw,ma t2,4(dstspc,dst)
292
293 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
294 ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy8_fault)
295 ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
296 ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
297 ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
298 ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy8_fault)
299 ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
300 ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
301
302 b .Lcopy_loop_4
303 ldo -16(len),len
304
305.Lbyte_loop:
306 cmpclr,COND(<>) len,%r0,%r0
307 b,n .Lcopy_done
30820: ldb 0(srcspc,src),t1
309 ldo 1(src),src
31021: stb,ma t1,1(dstspc,dst)
311 b .Lbyte_loop
312 ldo -1(len),len
313
314 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
315 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
316
317.Lcopy_done:
318 bv %r0(%r2)
319 sub end,dst,ret0
320
321
322 /* src and dst are not aligned the same way. */
323 /* need to go the hard way */
324.Lunaligned_copy:
325 /* align until dst is 32bit-word-aligned */
326 extru dst,31,2,t1
327 cmpib,COND(=),n 0,t1,.Lcopy_dstaligned
32820: ldb 0(srcspc,src),t1
329 ldo 1(src),src
33021: stb,ma t1,1(dstspc,dst)
331 b .Lunaligned_copy
332 ldo -1(len),len
333
334 ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
335 ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
336
337.Lcopy_dstaligned:
338
339 /* store src, dst and len in safe place */
340 copy src,save_src
341 copy dst,save_dst
342 copy len,save_len
343
344 /* len now needs give number of words to copy */
345 SHRREG len,2,len
346
347 /*
348 * Copy from a not-aligned src to an aligned dst using shifts.
349 * Handles 4 words per loop.
350 */
351
352 depw,z src,28,2,t0
353 subi 32,t0,t0
354 mtsar t0
355 extru len,31,2,t0
356 cmpib,= 2,t0,.Lcase2
357 /* Make src aligned by rounding it down. */
358 depi 0,31,2,src
359
360 cmpiclr,<> 3,t0,%r0
361 b,n .Lcase3
362 cmpiclr,<> 1,t0,%r0
363 b,n .Lcase1
364.Lcase0:
365 cmpb,= %r0,len,.Lcda_finish
366 nop
367
3681: ldw,ma 4(srcspc,src), a3
369 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
3701: ldw,ma 4(srcspc,src), a0
371 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
372 b,n .Ldo3
373.Lcase1:
3741: ldw,ma 4(srcspc,src), a2
375 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
3761: ldw,ma 4(srcspc,src), a3
377 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
378 ldo -1(len),len
379 cmpb,=,n %r0,len,.Ldo0
380.Ldo4:
3811: ldw,ma 4(srcspc,src), a0
382 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
383 shrpw a2, a3, %sar, t0
3841: stw,ma t0, 4(dstspc,dst)
385 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
386.Ldo3:
3871: ldw,ma 4(srcspc,src), a1
388 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
389 shrpw a3, a0, %sar, t0
3901: stw,ma t0, 4(dstspc,dst)
391 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
392.Ldo2:
3931: ldw,ma 4(srcspc,src), a2
394 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
395 shrpw a0, a1, %sar, t0
3961: stw,ma t0, 4(dstspc,dst)
397 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
398.Ldo1:
3991: ldw,ma 4(srcspc,src), a3
400 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
401 shrpw a1, a2, %sar, t0
4021: stw,ma t0, 4(dstspc,dst)
403 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
404 ldo -4(len),len
405 cmpb,<> %r0,len,.Ldo4
406 nop
407.Ldo0:
408 shrpw a2, a3, %sar, t0
4091: stw,ma t0, 4(dstspc,dst)
410 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
411
412.Lcda_rdfault:
413.Lcda_finish:
414 /* calculate new src, dst and len and jump to byte-copy loop */
415 sub dst,save_dst,t0
416 add save_src,t0,src
417 b .Lbyte_loop
418 sub save_len,t0,len
419
420.Lcase3:
4211: ldw,ma 4(srcspc,src), a0
422 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
4231: ldw,ma 4(srcspc,src), a1
424 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
425 b .Ldo2
426 ldo 1(len),len
427.Lcase2:
4281: ldw,ma 4(srcspc,src), a1
429 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
4301: ldw,ma 4(srcspc,src), a2
431 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
432 b .Ldo1
433 ldo 2(len),len
434
435
436 /* fault exception fixup handlers: */
437#ifdef CONFIG_64BIT
438.Lcopy16_fault:
43910: b .Lcopy_done
440 std,ma t1,8(dstspc,dst)
441 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
442#endif
443
444.Lcopy8_fault:
44510: b .Lcopy_done
446 stw,ma t1,4(dstspc,dst)
447 ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
448
449 .exit
450ENDPROC_CFI(pa_memcpy)
451 .procend
452
135 .end 453 .end
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index f82ff10ed974..b3d47ec1d80a 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -2,7 +2,7 @@
2 * Optimized memory copy routines. 2 * Optimized memory copy routines.
3 * 3 *
4 * Copyright (C) 2004 Randolph Chung <tausq@debian.org> 4 * Copyright (C) 2004 Randolph Chung <tausq@debian.org>
5 * Copyright (C) 2013 Helge Deller <deller@gmx.de> 5 * Copyright (C) 2013-2017 Helge Deller <deller@gmx.de>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -21,474 +21,21 @@
21 * Portions derived from the GNU C Library 21 * Portions derived from the GNU C Library
22 * Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc. 22 * Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc.
23 * 23 *
24 * Several strategies are tried to try to get the best performance for various
25 * conditions. In the optimal case, we copy 64-bytes in an unrolled loop using
26 * fp regs. This is followed by loops that copy 32- or 16-bytes at a time using
27 * general registers. Unaligned copies are handled either by aligning the
28 * destination and then using shift-and-write method, or in a few cases by
29 * falling back to a byte-at-a-time copy.
30 *
31 * I chose to implement this in C because it is easier to maintain and debug,
32 * and in my experiments it appears that the C code generated by gcc (3.3/3.4
33 * at the time of writing) is fairly optimal. Unfortunately some of the
34 * semantics of the copy routine (exception handling) is difficult to express
35 * in C, so we have to play some tricks to get it to work.
36 *
37 * All the loads and stores are done via explicit asm() code in order to use
38 * the right space registers.
39 *
40 * Testing with various alignments and buffer sizes shows that this code is
41 * often >10x faster than a simple byte-at-a-time copy, even for strangely
42 * aligned operands. It is interesting to note that the glibc version
43 * of memcpy (written in C) is actually quite fast already. This routine is
44 * able to beat it by 30-40% for aligned copies because of the loop unrolling,
45 * but in some cases the glibc version is still slightly faster. This lends
46 * more credibility that gcc can generate very good code as long as we are
47 * careful.
48 *
49 * TODO:
50 * - cache prefetching needs more experimentation to get optimal settings
51 * - try not to use the post-increment address modifiers; they create additional
52 * interlocks
53 * - replace byte-copy loops with stybs sequences
54 */ 24 */
55 25
56#ifdef __KERNEL__
57#include <linux/module.h> 26#include <linux/module.h>
58#include <linux/compiler.h> 27#include <linux/compiler.h>
59#include <linux/uaccess.h> 28#include <linux/uaccess.h>
60#define s_space "%%sr1"
61#define d_space "%%sr2"
62#else
63#include "memcpy.h"
64#define s_space "%%sr0"
65#define d_space "%%sr0"
66#define pa_memcpy new2_copy
67#endif
68 29
69DECLARE_PER_CPU(struct exception_data, exception_data); 30DECLARE_PER_CPU(struct exception_data, exception_data);
70 31
71#define preserve_branch(label) do { \
72 volatile int dummy = 0; \
73 /* The following branch is never taken, it's just here to */ \
74 /* prevent gcc from optimizing away our exception code. */ \
75 if (unlikely(dummy != dummy)) \
76 goto label; \
77} while (0)
78
79#define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3)) 32#define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3))
80#define get_kernel_space() (0) 33#define get_kernel_space() (0)
81 34
82#define MERGE(w0, sh_1, w1, sh_2) ({ \
83 unsigned int _r; \
84 asm volatile ( \
85 "mtsar %3\n" \
86 "shrpw %1, %2, %%sar, %0\n" \
87 : "=r"(_r) \
88 : "r"(w0), "r"(w1), "r"(sh_2) \
89 ); \
90 _r; \
91})
92#define THRESHOLD 16
93
94#ifdef DEBUG_MEMCPY
95#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
96#else
97#define DPRINTF(fmt, args...)
98#endif
99
100#define def_load_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
101 __asm__ __volatile__ ( \
102 "1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n\t" \
103 ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
104 : _tt(_t), "+r"(_a) \
105 : \
106 : "r8")
107
108#define def_store_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
109 __asm__ __volatile__ ( \
110 "1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n\t" \
111 ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
112 : "+r"(_a) \
113 : _tt(_t) \
114 : "r8")
115
116#define ldbma(_s, _a, _t, _e) def_load_ai_insn(ldbs,1,"=r",_s,_a,_t,_e)
117#define stbma(_s, _t, _a, _e) def_store_ai_insn(stbs,1,"r",_s,_a,_t,_e)
118#define ldwma(_s, _a, _t, _e) def_load_ai_insn(ldw,4,"=r",_s,_a,_t,_e)
119#define stwma(_s, _t, _a, _e) def_store_ai_insn(stw,4,"r",_s,_a,_t,_e)
120#define flddma(_s, _a, _t, _e) def_load_ai_insn(fldd,8,"=f",_s,_a,_t,_e)
121#define fstdma(_s, _t, _a, _e) def_store_ai_insn(fstd,8,"f",_s,_a,_t,_e)
122
123#define def_load_insn(_insn,_tt,_s,_o,_a,_t,_e) \
124 __asm__ __volatile__ ( \
125 "1:\t" #_insn " " #_o "(" _s ",%1), %0\n\t" \
126 ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
127 : _tt(_t) \
128 : "r"(_a) \
129 : "r8")
130
131#define def_store_insn(_insn,_tt,_s,_t,_o,_a,_e) \
132 __asm__ __volatile__ ( \
133 "1:\t" #_insn " %0, " #_o "(" _s ",%1)\n\t" \
134 ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
135 : \
136 : _tt(_t), "r"(_a) \
137 : "r8")
138
139#define ldw(_s,_o,_a,_t,_e) def_load_insn(ldw,"=r",_s,_o,_a,_t,_e)
140#define stw(_s,_t,_o,_a,_e) def_store_insn(stw,"r",_s,_t,_o,_a,_e)
141
142#ifdef CONFIG_PREFETCH
143static inline void prefetch_src(const void *addr)
144{
145 __asm__("ldw 0(" s_space ",%0), %%r0" : : "r" (addr));
146}
147
148static inline void prefetch_dst(const void *addr)
149{
150 __asm__("ldd 0(" d_space ",%0), %%r0" : : "r" (addr));
151}
152#else
153#define prefetch_src(addr) do { } while(0)
154#define prefetch_dst(addr) do { } while(0)
155#endif
156
157#define PA_MEMCPY_OK 0
158#define PA_MEMCPY_LOAD_ERROR 1
159#define PA_MEMCPY_STORE_ERROR 2
160
161/* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words
162 * per loop. This code is derived from glibc.
163 */
164static noinline unsigned long copy_dstaligned(unsigned long dst,
165 unsigned long src, unsigned long len)
166{
167 /* gcc complains that a2 and a3 may be uninitialized, but actually
168 * they cannot be. Initialize a2/a3 to shut gcc up.
169 */
170 register unsigned int a0, a1, a2 = 0, a3 = 0;
171 int sh_1, sh_2;
172
173 /* prefetch_src((const void *)src); */
174
175 /* Calculate how to shift a word read at the memory operation
176 aligned srcp to make it aligned for copy. */
177 sh_1 = 8 * (src % sizeof(unsigned int));
178 sh_2 = 8 * sizeof(unsigned int) - sh_1;
179
180 /* Make src aligned by rounding it down. */
181 src &= -sizeof(unsigned int);
182
183 switch (len % 4)
184 {
185 case 2:
186 /* a1 = ((unsigned int *) src)[0];
187 a2 = ((unsigned int *) src)[1]; */
188 ldw(s_space, 0, src, a1, cda_ldw_exc);
189 ldw(s_space, 4, src, a2, cda_ldw_exc);
190 src -= 1 * sizeof(unsigned int);
191 dst -= 3 * sizeof(unsigned int);
192 len += 2;
193 goto do1;
194 case 3:
195 /* a0 = ((unsigned int *) src)[0];
196 a1 = ((unsigned int *) src)[1]; */
197 ldw(s_space, 0, src, a0, cda_ldw_exc);
198 ldw(s_space, 4, src, a1, cda_ldw_exc);
199 src -= 0 * sizeof(unsigned int);
200 dst -= 2 * sizeof(unsigned int);
201 len += 1;
202 goto do2;
203 case 0:
204 if (len == 0)
205 return PA_MEMCPY_OK;
206 /* a3 = ((unsigned int *) src)[0];
207 a0 = ((unsigned int *) src)[1]; */
208 ldw(s_space, 0, src, a3, cda_ldw_exc);
209 ldw(s_space, 4, src, a0, cda_ldw_exc);
210 src -=-1 * sizeof(unsigned int);
211 dst -= 1 * sizeof(unsigned int);
212 len += 0;
213 goto do3;
214 case 1:
215 /* a2 = ((unsigned int *) src)[0];
216 a3 = ((unsigned int *) src)[1]; */
217 ldw(s_space, 0, src, a2, cda_ldw_exc);
218 ldw(s_space, 4, src, a3, cda_ldw_exc);
219 src -=-2 * sizeof(unsigned int);
220 dst -= 0 * sizeof(unsigned int);
221 len -= 1;
222 if (len == 0)
223 goto do0;
224 goto do4; /* No-op. */
225 }
226
227 do
228 {
229 /* prefetch_src((const void *)(src + 4 * sizeof(unsigned int))); */
230do4:
231 /* a0 = ((unsigned int *) src)[0]; */
232 ldw(s_space, 0, src, a0, cda_ldw_exc);
233 /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
234 stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
235do3:
236 /* a1 = ((unsigned int *) src)[1]; */
237 ldw(s_space, 4, src, a1, cda_ldw_exc);
238 /* ((unsigned int *) dst)[1] = MERGE (a3, sh_1, a0, sh_2); */
239 stw(d_space, MERGE (a3, sh_1, a0, sh_2), 4, dst, cda_stw_exc);
240do2:
241 /* a2 = ((unsigned int *) src)[2]; */
242 ldw(s_space, 8, src, a2, cda_ldw_exc);
243 /* ((unsigned int *) dst)[2] = MERGE (a0, sh_1, a1, sh_2); */
244 stw(d_space, MERGE (a0, sh_1, a1, sh_2), 8, dst, cda_stw_exc);
245do1:
246 /* a3 = ((unsigned int *) src)[3]; */
247 ldw(s_space, 12, src, a3, cda_ldw_exc);
248 /* ((unsigned int *) dst)[3] = MERGE (a1, sh_1, a2, sh_2); */
249 stw(d_space, MERGE (a1, sh_1, a2, sh_2), 12, dst, cda_stw_exc);
250
251 src += 4 * sizeof(unsigned int);
252 dst += 4 * sizeof(unsigned int);
253 len -= 4;
254 }
255 while (len != 0);
256
257do0:
258 /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
259 stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
260
261 preserve_branch(handle_load_error);
262 preserve_branch(handle_store_error);
263
264 return PA_MEMCPY_OK;
265
266handle_load_error:
267 __asm__ __volatile__ ("cda_ldw_exc:\n");
268 return PA_MEMCPY_LOAD_ERROR;
269
270handle_store_error:
271 __asm__ __volatile__ ("cda_stw_exc:\n");
272 return PA_MEMCPY_STORE_ERROR;
273}
274
275
276/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR.
277 * In case of an access fault the faulty address can be read from the per_cpu
278 * exception data struct. */
279static noinline unsigned long pa_memcpy_internal(void *dstp, const void *srcp,
280 unsigned long len)
281{
282 register unsigned long src, dst, t1, t2, t3;
283 register unsigned char *pcs, *pcd;
284 register unsigned int *pws, *pwd;
285 register double *pds, *pdd;
286 unsigned long ret;
287
288 src = (unsigned long)srcp;
289 dst = (unsigned long)dstp;
290 pcs = (unsigned char *)srcp;
291 pcd = (unsigned char *)dstp;
292
293 /* prefetch_src((const void *)srcp); */
294
295 if (len < THRESHOLD)
296 goto byte_copy;
297
298 /* Check alignment */
299 t1 = (src ^ dst);
300 if (unlikely(t1 & (sizeof(double)-1)))
301 goto unaligned_copy;
302
303 /* src and dst have same alignment. */
304
305 /* Copy bytes till we are double-aligned. */
306 t2 = src & (sizeof(double) - 1);
307 if (unlikely(t2 != 0)) {
308 t2 = sizeof(double) - t2;
309 while (t2 && len) {
310 /* *pcd++ = *pcs++; */
311 ldbma(s_space, pcs, t3, pmc_load_exc);
312 len--;
313 stbma(d_space, t3, pcd, pmc_store_exc);
314 t2--;
315 }
316 }
317
318 pds = (double *)pcs;
319 pdd = (double *)pcd;
320
321#if 0
322 /* Copy 8 doubles at a time */
323 while (len >= 8*sizeof(double)) {
324 register double r1, r2, r3, r4, r5, r6, r7, r8;
325 /* prefetch_src((char *)pds + L1_CACHE_BYTES); */
326 flddma(s_space, pds, r1, pmc_load_exc);
327 flddma(s_space, pds, r2, pmc_load_exc);
328 flddma(s_space, pds, r3, pmc_load_exc);
329 flddma(s_space, pds, r4, pmc_load_exc);
330 fstdma(d_space, r1, pdd, pmc_store_exc);
331 fstdma(d_space, r2, pdd, pmc_store_exc);
332 fstdma(d_space, r3, pdd, pmc_store_exc);
333 fstdma(d_space, r4, pdd, pmc_store_exc);
334
335#if 0
336 if (L1_CACHE_BYTES <= 32)
337 prefetch_src((char *)pds + L1_CACHE_BYTES);
338#endif
339 flddma(s_space, pds, r5, pmc_load_exc);
340 flddma(s_space, pds, r6, pmc_load_exc);
341 flddma(s_space, pds, r7, pmc_load_exc);
342 flddma(s_space, pds, r8, pmc_load_exc);
343 fstdma(d_space, r5, pdd, pmc_store_exc);
344 fstdma(d_space, r6, pdd, pmc_store_exc);
345 fstdma(d_space, r7, pdd, pmc_store_exc);
346 fstdma(d_space, r8, pdd, pmc_store_exc);
347 len -= 8*sizeof(double);
348 }
349#endif
350
351 pws = (unsigned int *)pds;
352 pwd = (unsigned int *)pdd;
353
354word_copy:
355 while (len >= 8*sizeof(unsigned int)) {
356 register unsigned int r1,r2,r3,r4,r5,r6,r7,r8;
357 /* prefetch_src((char *)pws + L1_CACHE_BYTES); */
358 ldwma(s_space, pws, r1, pmc_load_exc);
359 ldwma(s_space, pws, r2, pmc_load_exc);
360 ldwma(s_space, pws, r3, pmc_load_exc);
361 ldwma(s_space, pws, r4, pmc_load_exc);
362 stwma(d_space, r1, pwd, pmc_store_exc);
363 stwma(d_space, r2, pwd, pmc_store_exc);
364 stwma(d_space, r3, pwd, pmc_store_exc);
365 stwma(d_space, r4, pwd, pmc_store_exc);
366
367 ldwma(s_space, pws, r5, pmc_load_exc);
368 ldwma(s_space, pws, r6, pmc_load_exc);
369 ldwma(s_space, pws, r7, pmc_load_exc);
370 ldwma(s_space, pws, r8, pmc_load_exc);
371 stwma(d_space, r5, pwd, pmc_store_exc);
372 stwma(d_space, r6, pwd, pmc_store_exc);
373 stwma(d_space, r7, pwd, pmc_store_exc);
374 stwma(d_space, r8, pwd, pmc_store_exc);
375 len -= 8*sizeof(unsigned int);
376 }
377
378 while (len >= 4*sizeof(unsigned int)) {
379 register unsigned int r1,r2,r3,r4;
380 ldwma(s_space, pws, r1, pmc_load_exc);
381 ldwma(s_space, pws, r2, pmc_load_exc);
382 ldwma(s_space, pws, r3, pmc_load_exc);
383 ldwma(s_space, pws, r4, pmc_load_exc);
384 stwma(d_space, r1, pwd, pmc_store_exc);
385 stwma(d_space, r2, pwd, pmc_store_exc);
386 stwma(d_space, r3, pwd, pmc_store_exc);
387 stwma(d_space, r4, pwd, pmc_store_exc);
388 len -= 4*sizeof(unsigned int);
389 }
390
391 pcs = (unsigned char *)pws;
392 pcd = (unsigned char *)pwd;
393
394byte_copy:
395 while (len) {
396 /* *pcd++ = *pcs++; */
397 ldbma(s_space, pcs, t3, pmc_load_exc);
398 stbma(d_space, t3, pcd, pmc_store_exc);
399 len--;
400 }
401
402 return PA_MEMCPY_OK;
403
404unaligned_copy:
405 /* possibly we are aligned on a word, but not on a double... */
406 if (likely((t1 & (sizeof(unsigned int)-1)) == 0)) {
407 t2 = src & (sizeof(unsigned int) - 1);
408
409 if (unlikely(t2 != 0)) {
410 t2 = sizeof(unsigned int) - t2;
411 while (t2) {
412 /* *pcd++ = *pcs++; */
413 ldbma(s_space, pcs, t3, pmc_load_exc);
414 stbma(d_space, t3, pcd, pmc_store_exc);
415 len--;
416 t2--;
417 }
418 }
419
420 pws = (unsigned int *)pcs;
421 pwd = (unsigned int *)pcd;
422 goto word_copy;
423 }
424
425 /* Align the destination. */
426 if (unlikely((dst & (sizeof(unsigned int) - 1)) != 0)) {
427 t2 = sizeof(unsigned int) - (dst & (sizeof(unsigned int) - 1));
428 while (t2) {
429 /* *pcd++ = *pcs++; */
430 ldbma(s_space, pcs, t3, pmc_load_exc);
431 stbma(d_space, t3, pcd, pmc_store_exc);
432 len--;
433 t2--;
434 }
435 dst = (unsigned long)pcd;
436 src = (unsigned long)pcs;
437 }
438
439 ret = copy_dstaligned(dst, src, len / sizeof(unsigned int));
440 if (ret)
441 return ret;
442
443 pcs += (len & -sizeof(unsigned int));
444 pcd += (len & -sizeof(unsigned int));
445 len %= sizeof(unsigned int);
446
447 preserve_branch(handle_load_error);
448 preserve_branch(handle_store_error);
449
450 goto byte_copy;
451
452handle_load_error:
453 __asm__ __volatile__ ("pmc_load_exc:\n");
454 return PA_MEMCPY_LOAD_ERROR;
455
456handle_store_error:
457 __asm__ __volatile__ ("pmc_store_exc:\n");
458 return PA_MEMCPY_STORE_ERROR;
459}
460
461
462/* Returns 0 for success, otherwise, returns number of bytes not transferred. */ 35/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
463static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) 36extern unsigned long pa_memcpy(void *dst, const void *src,
464{ 37 unsigned long len);
465 unsigned long ret, fault_addr, reference;
466 struct exception_data *d;
467
468 ret = pa_memcpy_internal(dstp, srcp, len);
469 if (likely(ret == PA_MEMCPY_OK))
470 return 0;
471
472 /* if a load or store fault occured we can get the faulty addr */
473 d = this_cpu_ptr(&exception_data);
474 fault_addr = d->fault_addr;
475
476 /* error in load or store? */
477 if (ret == PA_MEMCPY_LOAD_ERROR)
478 reference = (unsigned long) srcp;
479 else
480 reference = (unsigned long) dstp;
481 38
482 DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n",
483 ret, len, fault_addr, reference);
484
485 if (fault_addr >= reference)
486 return len - (fault_addr - reference);
487 else
488 return len;
489}
490
491#ifdef __KERNEL__
492unsigned long __copy_to_user(void __user *dst, const void *src, 39unsigned long __copy_to_user(void __user *dst, const void *src,
493 unsigned long len) 40 unsigned long len)
494{ 41{
@@ -537,5 +84,3 @@ long probe_kernel_read(void *dst, const void *src, size_t size)
537 84
538 return __probe_kernel_read(dst, src, size); 85 return __probe_kernel_read(dst, src, size);
539} 86}
540
541#endif
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index deab89a8915a..32ec22146141 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -150,6 +150,23 @@ int fixup_exception(struct pt_regs *regs)
150 d->fault_space = regs->isr; 150 d->fault_space = regs->isr;
151 d->fault_addr = regs->ior; 151 d->fault_addr = regs->ior;
152 152
153 /*
154 * Fix up get_user() and put_user().
155 * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
156 * bit in the relative address of the fixup routine to indicate
157 * that %r8 should be loaded with -EFAULT to report a userspace
158 * access error.
159 */
160 if (fix->fixup & 1) {
161 regs->gr[8] = -EFAULT;
162
163 /* zero target register for get_user() */
164 if (parisc_acctyp(0, regs->iir) == VM_READ) {
165 int treg = regs->iir & 0x1f;
166 regs->gr[treg] = 0;
167 }
168 }
169
153 regs->iaoq[0] = (unsigned long)&fix->fixup + fix->fixup; 170 regs->iaoq[0] = (unsigned long)&fix->fixup + fix->fixup;
154 regs->iaoq[0] &= ~3; 171 regs->iaoq[0] &= ~3;
155 /* 172 /*
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 995728736677..6fd08219248d 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -449,9 +449,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
449_GLOBAL(pnv_wakeup_tb_loss) 449_GLOBAL(pnv_wakeup_tb_loss)
450 ld r1,PACAR1(r13) 450 ld r1,PACAR1(r13)
451 /* 451 /*
452 * Before entering any idle state, the NVGPRs are saved in the stack 452 * Before entering any idle state, the NVGPRs are saved in the stack.
453 * and they are restored before switching to the process context. Hence 453 * If there was a state loss, or PACA_NAPSTATELOST was set, then the
454 * until they are restored, they are free to be used. 454 * NVGPRs are restored. If we are here, it is likely that state is lost,
455 * but not guaranteed -- neither ISA207 nor ISA300 tests to reach
456 * here are the same as the test to restore NVGPRS:
457 * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300,
458 * and SRR1 test for restoring NVGPRs.
459 *
460 * We are about to clobber NVGPRs now, so set NAPSTATELOST to
461 * guarantee they will always be restored. This might be tightened
462 * with careful reading of specs (particularly for ISA300) but this
463 * is already a slow wakeup path and it's simpler to be safe.
464 */
465 li r0,1
466 stb r0,PACA_NAPSTATELOST(r13)
467
468 /*
455 * 469 *
456 * Save SRR1 and LR in NVGPRs as they might be clobbered in 470 * Save SRR1 and LR in NVGPRs as they might be clobbered in
457 * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required 471 * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 9be992083d2a..c22f207aa656 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -397,8 +397,7 @@ static void early_check_vec5(void)
397void __init mmu_early_init_devtree(void) 397void __init mmu_early_init_devtree(void)
398{ 398{
399 /* Disable radix mode based on kernel command line. */ 399 /* Disable radix mode based on kernel command line. */
400 /* We don't yet have the machinery to do radix as a guest. */ 400 if (disable_radix)
401 if (disable_radix || !(mfmsr() & MSR_HV))
402 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; 401 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
403 402
404 /* 403 /*
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index fa95041fa9f6..33ca29333e18 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -141,31 +141,34 @@ static void check_ipl_parmblock(void *start, unsigned long size)
141 141
142unsigned long decompress_kernel(void) 142unsigned long decompress_kernel(void)
143{ 143{
144 unsigned long output_addr; 144 void *output, *kernel_end;
145 unsigned char *output;
146 145
147 output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL; 146 output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE);
148 check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start); 147 kernel_end = output + SZ__bss_start;
149 memset(&_bss, 0, &_ebss - &_bss); 148 check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);
150 free_mem_ptr = (unsigned long)&_end;
151 free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
152 output = (unsigned char *) output_addr;
153 149
154#ifdef CONFIG_BLK_DEV_INITRD 150#ifdef CONFIG_BLK_DEV_INITRD
155 /* 151 /*
156 * Move the initrd right behind the end of the decompressed 152 * Move the initrd right behind the end of the decompressed
157 * kernel image. 153 * kernel image. This also prevents initrd corruption caused by
154 * bss clearing since kernel_end will always be located behind the
155 * current bss section..
158 */ 156 */
159 if (INITRD_START && INITRD_SIZE && 157 if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
160 INITRD_START < (unsigned long) output + SZ__bss_start) { 158 check_ipl_parmblock(kernel_end, INITRD_SIZE);
161 check_ipl_parmblock(output + SZ__bss_start, 159 memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
162 INITRD_START + INITRD_SIZE); 160 INITRD_START = (unsigned long) kernel_end;
163 memmove(output + SZ__bss_start,
164 (void *) INITRD_START, INITRD_SIZE);
165 INITRD_START = (unsigned long) output + SZ__bss_start;
166 } 161 }
167#endif 162#endif
168 163
164 /*
165 * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
166 * initialized afterwards since they reside in bss.
167 */
168 memset(&_bss, 0, &_ebss - &_bss);
169 free_mem_ptr = (unsigned long) &_end;
170 free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
171
169 puts("Uncompressing Linux... "); 172 puts("Uncompressing Linux... ");
170 __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error); 173 __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
171 puts("Ok, booting the kernel.\n"); 174 puts("Ok, booting the kernel.\n");
diff --git a/arch/s390/include/asm/sections.h b/arch/s390/include/asm/sections.h
index 5ce29fe100ba..fbd9116eb17b 100644
--- a/arch/s390/include/asm/sections.h
+++ b/arch/s390/include/asm/sections.h
@@ -4,6 +4,5 @@
4#include <asm-generic/sections.h> 4#include <asm-generic/sections.h>
5 5
6extern char _eshared[], _ehead[]; 6extern char _eshared[], _ehead[];
7extern char __start_ro_after_init[], __end_ro_after_init[];
8 7
9#endif 8#endif
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 136932ff4250..3ea1554d04b3 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -147,7 +147,7 @@ unsigned long __must_check __copy_to_user(void __user *to, const void *from,
147 " jg 2b\n" \ 147 " jg 2b\n" \
148 ".popsection\n" \ 148 ".popsection\n" \
149 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \ 149 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
150 : "=d" (__rc), "=Q" (*(to)) \ 150 : "=d" (__rc), "+Q" (*(to)) \
151 : "d" (size), "Q" (*(from)), \ 151 : "d" (size), "Q" (*(from)), \
152 "d" (__reg0), "K" (-EFAULT) \ 152 "d" (__reg0), "K" (-EFAULT) \
153 : "cc"); \ 153 : "cc"); \
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 47a973b5b4f1..5dab859b0d54 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -909,13 +909,11 @@ void __init smp_prepare_boot_cpu(void)
909{ 909{
910 struct pcpu *pcpu = pcpu_devices; 910 struct pcpu *pcpu = pcpu_devices;
911 911
912 WARN_ON(!cpu_present(0) || !cpu_online(0));
912 pcpu->state = CPU_STATE_CONFIGURED; 913 pcpu->state = CPU_STATE_CONFIGURED;
913 pcpu->address = stap();
914 pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix(); 914 pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
915 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 915 S390_lowcore.percpu_offset = __per_cpu_offset[0];
916 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); 916 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
917 set_cpu_present(0, true);
918 set_cpu_online(0, true);
919} 917}
920 918
921void __init smp_cpus_done(unsigned int max_cpus) 919void __init smp_cpus_done(unsigned int max_cpus)
@@ -924,6 +922,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
924 922
925void __init smp_setup_processor_id(void) 923void __init smp_setup_processor_id(void)
926{ 924{
925 pcpu_devices[0].address = stap();
927 S390_lowcore.cpu_nr = 0; 926 S390_lowcore.cpu_nr = 0;
928 S390_lowcore.spinlock_lockval = arch_spin_lockval(0); 927 S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
929} 928}
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 5ccf95396251..72307f108c40 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -63,11 +63,9 @@ SECTIONS
63 63
64 . = ALIGN(PAGE_SIZE); 64 . = ALIGN(PAGE_SIZE);
65 __start_ro_after_init = .; 65 __start_ro_after_init = .;
66 __start_data_ro_after_init = .;
67 .data..ro_after_init : { 66 .data..ro_after_init : {
68 *(.data..ro_after_init) 67 *(.data..ro_after_init)
69 } 68 }
70 __end_data_ro_after_init = .;
71 EXCEPTION_TABLE(16) 69 EXCEPTION_TABLE(16)
72 . = ALIGN(PAGE_SIZE); 70 . = ALIGN(PAGE_SIZE);
73 __end_ro_after_init = .; 71 __end_ro_after_init = .;
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index df9e731a76f5..fc5124ccdb53 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -351,7 +351,7 @@ static int genregs64_set(struct task_struct *target,
351 } 351 }
352 352
353 if (!ret) { 353 if (!ret) {
354 unsigned long y; 354 unsigned long y = regs->y;
355 355
356 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 356 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
357 &y, 357 &y,
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 2d449337a360..a94a4d10f2df 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -120,10 +120,6 @@ else
120 # -funit-at-a-time shrinks the kernel .text considerably 120 # -funit-at-a-time shrinks the kernel .text considerably
121 # unfortunately it makes reading oopses harder. 121 # unfortunately it makes reading oopses harder.
122 KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time) 122 KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
123
124 # this works around some issues with generating unwind tables in older gccs
125 # newer gccs do it by default
126 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
127endif 123endif
128 124
129ifdef CONFIG_X86_X32 125ifdef CONFIG_X86_X32
@@ -147,6 +143,37 @@ ifeq ($(CONFIG_KMEMCHECK),y)
147 KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy) 143 KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy)
148endif 144endif
149 145
146#
147# If the function graph tracer is used with mcount instead of fentry,
148# '-maccumulate-outgoing-args' is needed to prevent a GCC bug
149# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=42109)
150#
151ifdef CONFIG_FUNCTION_GRAPH_TRACER
152 ifndef CONFIG_HAVE_FENTRY
153 ACCUMULATE_OUTGOING_ARGS := 1
154 else
155 ifeq ($(call cc-option-yn, -mfentry), n)
156 ACCUMULATE_OUTGOING_ARGS := 1
157 endif
158 endif
159endif
160
161#
162# Jump labels need '-maccumulate-outgoing-args' for gcc < 4.5.2 to prevent a
163# GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=46226). There's no way
164# to test for this bug at compile-time because the test case needs to execute,
165# which is a no-go for cross compilers. So check the GCC version instead.
166#
167ifdef CONFIG_JUMP_LABEL
168 ifneq ($(ACCUMULATE_OUTGOING_ARGS), 1)
169 ACCUMULATE_OUTGOING_ARGS = $(call cc-if-fullversion, -lt, 040502, 1)
170 endif
171endif
172
173ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
174 KBUILD_CFLAGS += -maccumulate-outgoing-args
175endif
176
150# Stackpointer is addressed different for 32 bit and 64 bit x86 177# Stackpointer is addressed different for 32 bit and 64 bit x86
151sp-$(CONFIG_X86_32) := esp 178sp-$(CONFIG_X86_32) := esp
152sp-$(CONFIG_X86_64) := rsp 179sp-$(CONFIG_X86_64) := rsp
diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
index 6647ed49c66c..a45eb15b7cf2 100644
--- a/arch/x86/Makefile_32.cpu
+++ b/arch/x86/Makefile_32.cpu
@@ -45,24 +45,6 @@ cflags-$(CONFIG_MGEODE_LX) += $(call cc-option,-march=geode,-march=pentium-mmx)
45# cpu entries 45# cpu entries
46cflags-$(CONFIG_X86_GENERIC) += $(call tune,generic,$(call tune,i686)) 46cflags-$(CONFIG_X86_GENERIC) += $(call tune,generic,$(call tune,i686))
47 47
48# Work around the pentium-mmx code generator madness of gcc4.4.x which
49# does stack alignment by generating horrible code _before_ the mcount
50# prologue (push %ebp, mov %esp, %ebp) which breaks the function graph
51# tracer assumptions. For i686, generic, core2 this is set by the
52# compiler anyway
53ifeq ($(CONFIG_FUNCTION_GRAPH_TRACER), y)
54ADD_ACCUMULATE_OUTGOING_ARGS := y
55endif
56
57# Work around to a bug with asm goto with first implementations of it
58# in gcc causing gcc to mess up the push and pop of the stack in some
59# uses of asm goto.
60ifeq ($(CONFIG_JUMP_LABEL), y)
61ADD_ACCUMULATE_OUTGOING_ARGS := y
62endif
63
64cflags-$(ADD_ACCUMULATE_OUTGOING_ARGS) += $(call cc-option,-maccumulate-outgoing-args)
65
66# Bug fix for binutils: this option is required in order to keep 48# Bug fix for binutils: this option is required in order to keep
67# binutils from generating NOPL instructions against our will. 49# binutils from generating NOPL instructions against our will.
68ifneq ($(CONFIG_X86_P6_NOP),y) 50ifneq ($(CONFIG_X86_P6_NOP),y)
diff --git a/arch/x86/boot/compressed/error.c b/arch/x86/boot/compressed/error.c
index 6248740b68b5..31922023de49 100644
--- a/arch/x86/boot/compressed/error.c
+++ b/arch/x86/boot/compressed/error.c
@@ -4,6 +4,7 @@
4 * memcpy() and memmove() are defined for the compressed boot environment. 4 * memcpy() and memmove() are defined for the compressed boot environment.
5 */ 5 */
6#include "misc.h" 6#include "misc.h"
7#include "error.h"
7 8
8void warn(char *m) 9void warn(char *m)
9{ 10{
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 2aa1ad194db2..580b60f5ac83 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2256,6 +2256,7 @@ void arch_perf_update_userpage(struct perf_event *event,
2256 struct perf_event_mmap_page *userpg, u64 now) 2256 struct perf_event_mmap_page *userpg, u64 now)
2257{ 2257{
2258 struct cyc2ns_data *data; 2258 struct cyc2ns_data *data;
2259 u64 offset;
2259 2260
2260 userpg->cap_user_time = 0; 2261 userpg->cap_user_time = 0;
2261 userpg->cap_user_time_zero = 0; 2262 userpg->cap_user_time_zero = 0;
@@ -2263,11 +2264,13 @@ void arch_perf_update_userpage(struct perf_event *event,
2263 !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED); 2264 !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED);
2264 userpg->pmc_width = x86_pmu.cntval_bits; 2265 userpg->pmc_width = x86_pmu.cntval_bits;
2265 2266
2266 if (!sched_clock_stable()) 2267 if (!using_native_sched_clock() || !sched_clock_stable())
2267 return; 2268 return;
2268 2269
2269 data = cyc2ns_read_begin(); 2270 data = cyc2ns_read_begin();
2270 2271
2272 offset = data->cyc2ns_offset + __sched_clock_offset;
2273
2271 /* 2274 /*
2272 * Internal timekeeping for enabled/running/stopped times 2275 * Internal timekeeping for enabled/running/stopped times
2273 * is always in the local_clock domain. 2276 * is always in the local_clock domain.
@@ -2275,7 +2278,7 @@ void arch_perf_update_userpage(struct perf_event *event,
2275 userpg->cap_user_time = 1; 2278 userpg->cap_user_time = 1;
2276 userpg->time_mult = data->cyc2ns_mul; 2279 userpg->time_mult = data->cyc2ns_mul;
2277 userpg->time_shift = data->cyc2ns_shift; 2280 userpg->time_shift = data->cyc2ns_shift;
2278 userpg->time_offset = data->cyc2ns_offset - now; 2281 userpg->time_offset = offset - now;
2279 2282
2280 /* 2283 /*
2281 * cap_user_time_zero doesn't make sense when we're using a different 2284 * cap_user_time_zero doesn't make sense when we're using a different
@@ -2283,7 +2286,7 @@ void arch_perf_update_userpage(struct perf_event *event,
2283 */ 2286 */
2284 if (!event->attr.use_clockid) { 2287 if (!event->attr.use_clockid) {
2285 userpg->cap_user_time_zero = 1; 2288 userpg->cap_user_time_zero = 1;
2286 userpg->time_zero = data->cyc2ns_offset; 2289 userpg->time_zero = offset;
2287 } 2290 }
2288 2291
2289 cyc2ns_read_end(data); 2292 cyc2ns_read_end(data);
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
index d74747b031ec..c4eda791f877 100644
--- a/arch/x86/include/asm/kvm_page_track.h
+++ b/arch/x86/include/asm/kvm_page_track.h
@@ -46,6 +46,7 @@ struct kvm_page_track_notifier_node {
46}; 46};
47 47
48void kvm_page_track_init(struct kvm *kvm); 48void kvm_page_track_init(struct kvm *kvm);
49void kvm_page_track_cleanup(struct kvm *kvm);
49 50
50void kvm_page_track_free_memslot(struct kvm_memory_slot *free, 51void kvm_page_track_free_memslot(struct kvm_memory_slot *free,
51 struct kvm_memory_slot *dont); 52 struct kvm_memory_slot *dont);
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index a04eabd43d06..27e9f9d769b8 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -12,6 +12,8 @@ extern int recalibrate_cpu_khz(void);
12 12
13extern int no_timer_check; 13extern int no_timer_check;
14 14
15extern bool using_native_sched_clock(void);
16
15/* 17/*
16 * We use the full linear equation: f(x) = a + b*x, in order to allow 18 * We use the full linear equation: f(x) = a + b*x, in order to allow
17 * a continuous function in the face of dynamic freq changes. 19 * a continuous function in the face of dynamic freq changes.
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 72e8300b1e8a..9cffb44a3cf5 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -485,15 +485,17 @@ static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
485 485
486 if (paddr < uv_hub_info->lowmem_remap_top) 486 if (paddr < uv_hub_info->lowmem_remap_top)
487 paddr |= uv_hub_info->lowmem_remap_base; 487 paddr |= uv_hub_info->lowmem_remap_base;
488 paddr |= uv_hub_info->gnode_upper; 488
489 if (m_val) 489 if (m_val) {
490 paddr |= uv_hub_info->gnode_upper;
490 paddr = ((paddr << uv_hub_info->m_shift) 491 paddr = ((paddr << uv_hub_info->m_shift)
491 >> uv_hub_info->m_shift) | 492 >> uv_hub_info->m_shift) |
492 ((paddr >> uv_hub_info->m_val) 493 ((paddr >> uv_hub_info->m_val)
493 << uv_hub_info->n_lshift); 494 << uv_hub_info->n_lshift);
494 else 495 } else {
495 paddr |= uv_soc_phys_ram_to_nasid(paddr) 496 paddr |= uv_soc_phys_ram_to_nasid(paddr)
496 << uv_hub_info->gpa_shift; 497 << uv_hub_info->gpa_shift;
498 }
497 return paddr; 499 return paddr;
498} 500}
499 501
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index e9f8f8cdd570..86f20cc0a65e 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -1105,7 +1105,8 @@ void __init uv_init_hub_info(struct uv_hub_info_s *hi)
1105 node_id.v = uv_read_local_mmr(UVH_NODE_ID); 1105 node_id.v = uv_read_local_mmr(UVH_NODE_ID);
1106 uv_cpuid.gnode_shift = max_t(unsigned int, uv_cpuid.gnode_shift, mn.n_val); 1106 uv_cpuid.gnode_shift = max_t(unsigned int, uv_cpuid.gnode_shift, mn.n_val);
1107 hi->gnode_extra = (node_id.s.node_id & ~((1 << uv_cpuid.gnode_shift) - 1)) >> 1; 1107 hi->gnode_extra = (node_id.s.node_id & ~((1 << uv_cpuid.gnode_shift) - 1)) >> 1;
1108 hi->gnode_upper = (unsigned long)hi->gnode_extra << mn.m_val; 1108 if (mn.m_val)
1109 hi->gnode_upper = (u64)hi->gnode_extra << mn.m_val;
1109 1110
1110 if (uv_gp_table) { 1111 if (uv_gp_table) {
1111 hi->global_mmr_base = uv_gp_table->mmr_base; 1112 hi->global_mmr_base = uv_gp_table->mmr_base;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 8e9725c607ea..5accfbdee3f0 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -54,6 +54,8 @@
54 54
55static DEFINE_MUTEX(mce_chrdev_read_mutex); 55static DEFINE_MUTEX(mce_chrdev_read_mutex);
56 56
57static int mce_chrdev_open_count; /* #times opened */
58
57#define mce_log_get_idx_check(p) \ 59#define mce_log_get_idx_check(p) \
58({ \ 60({ \
59 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ 61 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
@@ -598,6 +600,10 @@ static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
598 if (atomic_read(&num_notifiers) > 2) 600 if (atomic_read(&num_notifiers) > 2)
599 return NOTIFY_DONE; 601 return NOTIFY_DONE;
600 602
603 /* Don't print when mcelog is running */
604 if (mce_chrdev_open_count > 0)
605 return NOTIFY_DONE;
606
601 __print_mce(m); 607 __print_mce(m);
602 608
603 return NOTIFY_DONE; 609 return NOTIFY_DONE;
@@ -1828,7 +1834,6 @@ void mcheck_cpu_clear(struct cpuinfo_x86 *c)
1828 */ 1834 */
1829 1835
1830static DEFINE_SPINLOCK(mce_chrdev_state_lock); 1836static DEFINE_SPINLOCK(mce_chrdev_state_lock);
1831static int mce_chrdev_open_count; /* #times opened */
1832static int mce_chrdev_open_exclu; /* already open exclusive? */ 1837static int mce_chrdev_open_exclu; /* already open exclusive? */
1833 1838
1834static int mce_chrdev_open(struct inode *inode, struct file *file) 1839static int mce_chrdev_open(struct inode *inode, struct file *file)
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 524cc5780a77..6e4a047e4b68 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -60,7 +60,7 @@ static const char * const th_names[] = {
60 "load_store", 60 "load_store",
61 "insn_fetch", 61 "insn_fetch",
62 "combined_unit", 62 "combined_unit",
63 "", 63 "decode_unit",
64 "northbridge", 64 "northbridge",
65 "execution_unit", 65 "execution_unit",
66}; 66};
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 8f3d9cf26ff9..cbd73eb42170 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -29,6 +29,12 @@
29#include <asm/ftrace.h> 29#include <asm/ftrace.h>
30#include <asm/nops.h> 30#include <asm/nops.h>
31 31
32#if defined(CONFIG_FUNCTION_GRAPH_TRACER) && \
33 !defined(CC_USING_FENTRY) && \
34 !defined(CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE)
35# error The following combination is not supported: ((compiler missing -mfentry) || (CONFIG_X86_32 and !CONFIG_DYNAMIC_FTRACE)) && CONFIG_FUNCTION_GRAPH_TRACER && CONFIG_CC_OPTIMIZE_FOR_SIZE
36#endif
37
32#ifdef CONFIG_DYNAMIC_FTRACE 38#ifdef CONFIG_DYNAMIC_FTRACE
33 39
34int ftrace_arch_code_modify_prepare(void) 40int ftrace_arch_code_modify_prepare(void)
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index c73a7f9e881a..714dfba6a1e7 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -328,7 +328,7 @@ unsigned long long sched_clock(void)
328 return paravirt_sched_clock(); 328 return paravirt_sched_clock();
329} 329}
330 330
331static inline bool using_native_sched_clock(void) 331bool using_native_sched_clock(void)
332{ 332{
333 return pv_time_ops.sched_clock == native_sched_clock; 333 return pv_time_ops.sched_clock == native_sched_clock;
334} 334}
@@ -336,7 +336,7 @@ static inline bool using_native_sched_clock(void)
336unsigned long long 336unsigned long long
337sched_clock(void) __attribute__((alias("native_sched_clock"))); 337sched_clock(void) __attribute__((alias("native_sched_clock")));
338 338
339static inline bool using_native_sched_clock(void) { return true; } 339bool using_native_sched_clock(void) { return true; }
340#endif 340#endif
341 341
342int check_tsc_unstable(void) 342int check_tsc_unstable(void)
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 73ea24d4f119..047b17a26269 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -657,6 +657,9 @@ void kvm_pic_destroy(struct kvm *kvm)
657{ 657{
658 struct kvm_pic *vpic = kvm->arch.vpic; 658 struct kvm_pic *vpic = kvm->arch.vpic;
659 659
660 if (!vpic)
661 return;
662
660 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master); 663 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master);
661 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave); 664 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave);
662 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr); 665 kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr);
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 6e219e5c07d2..289270a6aecb 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -635,6 +635,9 @@ void kvm_ioapic_destroy(struct kvm *kvm)
635{ 635{
636 struct kvm_ioapic *ioapic = kvm->arch.vioapic; 636 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
637 637
638 if (!ioapic)
639 return;
640
638 cancel_delayed_work_sync(&ioapic->eoi_inject); 641 cancel_delayed_work_sync(&ioapic->eoi_inject);
639 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); 642 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
640 kvm->arch.vioapic = NULL; 643 kvm->arch.vioapic = NULL;
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c
index 37942e419c32..60168cdd0546 100644
--- a/arch/x86/kvm/page_track.c
+++ b/arch/x86/kvm/page_track.c
@@ -160,6 +160,14 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
160 return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]); 160 return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
161} 161}
162 162
163void kvm_page_track_cleanup(struct kvm *kvm)
164{
165 struct kvm_page_track_notifier_head *head;
166
167 head = &kvm->arch.track_notifier_head;
168 cleanup_srcu_struct(&head->track_srcu);
169}
170
163void kvm_page_track_init(struct kvm *kvm) 171void kvm_page_track_init(struct kvm *kvm)
164{ 172{
165 struct kvm_page_track_notifier_head *head; 173 struct kvm_page_track_notifier_head *head;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d1efe2c62b3f..5fba70646c32 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1379,6 +1379,9 @@ static void avic_vm_destroy(struct kvm *kvm)
1379 unsigned long flags; 1379 unsigned long flags;
1380 struct kvm_arch *vm_data = &kvm->arch; 1380 struct kvm_arch *vm_data = &kvm->arch;
1381 1381
1382 if (!avic)
1383 return;
1384
1382 avic_free_vm_id(vm_data->avic_vm_id); 1385 avic_free_vm_id(vm_data->avic_vm_id);
1383 1386
1384 if (vm_data->avic_logical_id_table_page) 1387 if (vm_data->avic_logical_id_table_page)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 98e82ee1e699..2ee00dbbbd51 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1239,6 +1239,11 @@ static inline bool cpu_has_vmx_invvpid_global(void)
1239 return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT; 1239 return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
1240} 1240}
1241 1241
1242static inline bool cpu_has_vmx_invvpid(void)
1243{
1244 return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
1245}
1246
1242static inline bool cpu_has_vmx_ept(void) 1247static inline bool cpu_has_vmx_ept(void)
1243{ 1248{
1244 return vmcs_config.cpu_based_2nd_exec_ctrl & 1249 return vmcs_config.cpu_based_2nd_exec_ctrl &
@@ -2753,7 +2758,6 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2753 SECONDARY_EXEC_RDTSCP | 2758 SECONDARY_EXEC_RDTSCP |
2754 SECONDARY_EXEC_DESC | 2759 SECONDARY_EXEC_DESC |
2755 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2760 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2756 SECONDARY_EXEC_ENABLE_VPID |
2757 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2761 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2758 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2762 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2759 SECONDARY_EXEC_WBINVD_EXITING | 2763 SECONDARY_EXEC_WBINVD_EXITING |
@@ -2781,10 +2785,12 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2781 * though it is treated as global context. The alternative is 2785 * though it is treated as global context. The alternative is
2782 * not failing the single-context invvpid, and it is worse. 2786 * not failing the single-context invvpid, and it is worse.
2783 */ 2787 */
2784 if (enable_vpid) 2788 if (enable_vpid) {
2789 vmx->nested.nested_vmx_secondary_ctls_high |=
2790 SECONDARY_EXEC_ENABLE_VPID;
2785 vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT | 2791 vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
2786 VMX_VPID_EXTENT_SUPPORTED_MASK; 2792 VMX_VPID_EXTENT_SUPPORTED_MASK;
2787 else 2793 } else
2788 vmx->nested.nested_vmx_vpid_caps = 0; 2794 vmx->nested.nested_vmx_vpid_caps = 0;
2789 2795
2790 if (enable_unrestricted_guest) 2796 if (enable_unrestricted_guest)
@@ -4024,6 +4030,12 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
4024 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid); 4030 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid);
4025} 4031}
4026 4032
4033static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
4034{
4035 if (enable_ept)
4036 vmx_flush_tlb(vcpu);
4037}
4038
4027static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) 4039static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
4028{ 4040{
4029 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; 4041 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
@@ -6517,8 +6529,10 @@ static __init int hardware_setup(void)
6517 if (boot_cpu_has(X86_FEATURE_NX)) 6529 if (boot_cpu_has(X86_FEATURE_NX))
6518 kvm_enable_efer_bits(EFER_NX); 6530 kvm_enable_efer_bits(EFER_NX);
6519 6531
6520 if (!cpu_has_vmx_vpid()) 6532 if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
6533 !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
6521 enable_vpid = 0; 6534 enable_vpid = 0;
6535
6522 if (!cpu_has_vmx_shadow_vmcs()) 6536 if (!cpu_has_vmx_shadow_vmcs())
6523 enable_shadow_vmcs = 0; 6537 enable_shadow_vmcs = 0;
6524 if (enable_shadow_vmcs) 6538 if (enable_shadow_vmcs)
@@ -8501,7 +8515,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
8501 && kvm_vmx_exit_handlers[exit_reason]) 8515 && kvm_vmx_exit_handlers[exit_reason])
8502 return kvm_vmx_exit_handlers[exit_reason](vcpu); 8516 return kvm_vmx_exit_handlers[exit_reason](vcpu);
8503 else { 8517 else {
8504 WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason); 8518 vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
8519 exit_reason);
8505 kvm_queue_exception(vcpu, UD_VECTOR); 8520 kvm_queue_exception(vcpu, UD_VECTOR);
8506 return 1; 8521 return 1;
8507 } 8522 }
@@ -8547,6 +8562,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
8547 } else { 8562 } else {
8548 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 8563 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
8549 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 8564 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
8565 vmx_flush_tlb_ept_only(vcpu);
8550 } 8566 }
8551 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); 8567 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
8552 8568
@@ -8572,8 +8588,10 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
8572 */ 8588 */
8573 if (!is_guest_mode(vcpu) || 8589 if (!is_guest_mode(vcpu) ||
8574 !nested_cpu_has2(get_vmcs12(&vmx->vcpu), 8590 !nested_cpu_has2(get_vmcs12(&vmx->vcpu),
8575 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) 8591 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
8576 vmcs_write64(APIC_ACCESS_ADDR, hpa); 8592 vmcs_write64(APIC_ACCESS_ADDR, hpa);
8593 vmx_flush_tlb_ept_only(vcpu);
8594 }
8577} 8595}
8578 8596
8579static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) 8597static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
@@ -9974,7 +9992,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
9974{ 9992{
9975 struct vcpu_vmx *vmx = to_vmx(vcpu); 9993 struct vcpu_vmx *vmx = to_vmx(vcpu);
9976 u32 exec_control; 9994 u32 exec_control;
9977 bool nested_ept_enabled = false;
9978 9995
9979 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); 9996 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
9980 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); 9997 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
@@ -10121,8 +10138,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
10121 vmcs12->guest_intr_status); 10138 vmcs12->guest_intr_status);
10122 } 10139 }
10123 10140
10124 nested_ept_enabled = (exec_control & SECONDARY_EXEC_ENABLE_EPT) != 0;
10125
10126 /* 10141 /*
10127 * Write an illegal value to APIC_ACCESS_ADDR. Later, 10142 * Write an illegal value to APIC_ACCESS_ADDR. Later,
10128 * nested_get_vmcs12_pages will either fix it up or 10143 * nested_get_vmcs12_pages will either fix it up or
@@ -10255,6 +10270,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
10255 if (nested_cpu_has_ept(vmcs12)) { 10270 if (nested_cpu_has_ept(vmcs12)) {
10256 kvm_mmu_unload(vcpu); 10271 kvm_mmu_unload(vcpu);
10257 nested_ept_init_mmu_context(vcpu); 10272 nested_ept_init_mmu_context(vcpu);
10273 } else if (nested_cpu_has2(vmcs12,
10274 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
10275 vmx_flush_tlb_ept_only(vcpu);
10258 } 10276 }
10259 10277
10260 /* 10278 /*
@@ -10282,12 +10300,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
10282 vmx_set_efer(vcpu, vcpu->arch.efer); 10300 vmx_set_efer(vcpu, vcpu->arch.efer);
10283 10301
10284 /* Shadow page tables on either EPT or shadow page tables. */ 10302 /* Shadow page tables on either EPT or shadow page tables. */
10285 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_ept_enabled, 10303 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
10286 entry_failure_code)) 10304 entry_failure_code))
10287 return 1; 10305 return 1;
10288 10306
10289 kvm_mmu_reset_context(vcpu);
10290
10291 if (!enable_ept) 10307 if (!enable_ept)
10292 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; 10308 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
10293 10309
@@ -11056,6 +11072,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
11056 vmx->nested.change_vmcs01_virtual_x2apic_mode = false; 11072 vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
11057 vmx_set_virtual_x2apic_mode(vcpu, 11073 vmx_set_virtual_x2apic_mode(vcpu,
11058 vcpu->arch.apic_base & X2APIC_ENABLE); 11074 vcpu->arch.apic_base & X2APIC_ENABLE);
11075 } else if (!nested_cpu_has_ept(vmcs12) &&
11076 nested_cpu_has2(vmcs12,
11077 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
11078 vmx_flush_tlb_ept_only(vcpu);
11059 } 11079 }
11060 11080
11061 /* This is needed for same reason as it was needed in prepare_vmcs02 */ 11081 /* This is needed for same reason as it was needed in prepare_vmcs02 */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1faf620a6fdc..ccbd45ecd41a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8153,11 +8153,12 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
8153 if (kvm_x86_ops->vm_destroy) 8153 if (kvm_x86_ops->vm_destroy)
8154 kvm_x86_ops->vm_destroy(kvm); 8154 kvm_x86_ops->vm_destroy(kvm);
8155 kvm_iommu_unmap_guest(kvm); 8155 kvm_iommu_unmap_guest(kvm);
8156 kfree(kvm->arch.vpic); 8156 kvm_pic_destroy(kvm);
8157 kfree(kvm->arch.vioapic); 8157 kvm_ioapic_destroy(kvm);
8158 kvm_free_vcpus(kvm); 8158 kvm_free_vcpus(kvm);
8159 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); 8159 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
8160 kvm_mmu_uninit_vm(kvm); 8160 kvm_mmu_uninit_vm(kvm);
8161 kvm_page_track_cleanup(kvm);
8161} 8162}
8162 8163
8163void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 8164void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
@@ -8566,11 +8567,11 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
8566{ 8567{
8567 struct x86_exception fault; 8568 struct x86_exception fault;
8568 8569
8569 trace_kvm_async_pf_ready(work->arch.token, work->gva);
8570 if (work->wakeup_all) 8570 if (work->wakeup_all)
8571 work->arch.token = ~0; /* broadcast wakeup */ 8571 work->arch.token = ~0; /* broadcast wakeup */
8572 else 8572 else
8573 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); 8573 kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
8574 trace_kvm_async_pf_ready(work->arch.token, work->gva);
8574 8575
8575 if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && 8576 if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
8576 !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { 8577 !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 779782f58324..9a53a06e5a3e 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled)
290 _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail) 290 _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail)
291 _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail) 291 _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail)
292 _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail) 292 _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail)
293 _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail) 293 _ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail)
294 _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail) 294 _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
295 _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail) 295 _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail)
296 _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail) 296 _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail)
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 887e57182716..aed206475aa7 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -48,7 +48,7 @@ static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
48#if defined(CONFIG_X86_ESPFIX64) 48#if defined(CONFIG_X86_ESPFIX64)
49static const unsigned long vaddr_end = ESPFIX_BASE_ADDR; 49static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
50#elif defined(CONFIG_EFI) 50#elif defined(CONFIG_EFI)
51static const unsigned long vaddr_end = EFI_VA_START; 51static const unsigned long vaddr_end = EFI_VA_END;
52#else 52#else
53static const unsigned long vaddr_end = __START_KERNEL_map; 53static const unsigned long vaddr_end = __START_KERNEL_map;
54#endif 54#endif
@@ -105,7 +105,7 @@ void __init kernel_randomize_memory(void)
105 */ 105 */
106 BUILD_BUG_ON(vaddr_start >= vaddr_end); 106 BUILD_BUG_ON(vaddr_start >= vaddr_end);
107 BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) && 107 BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
108 vaddr_end >= EFI_VA_START); 108 vaddr_end >= EFI_VA_END);
109 BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) || 109 BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
110 IS_ENABLED(CONFIG_EFI)) && 110 IS_ENABLED(CONFIG_EFI)) &&
111 vaddr_end >= __START_KERNEL_map); 111 vaddr_end >= __START_KERNEL_map);
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 555b9fa0ad43..7dbdb780264d 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -8,6 +8,7 @@ PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
8LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib 8LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
9targets += purgatory.ro 9targets += purgatory.ro
10 10
11KASAN_SANITIZE := n
11KCOV_INSTRUMENT := n 12KCOV_INSTRUMENT := n
12 13
13# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That 14# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index 976b1d70edbc..4ddbfd57a7c8 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -164,8 +164,21 @@ void copy_user_highpage(struct page *to, struct page *from,
164 164
165#define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) 165#define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
166 166
167#ifdef CONFIG_MMU
168static inline unsigned long ___pa(unsigned long va)
169{
170 unsigned long off = va - PAGE_OFFSET;
171
172 if (off >= XCHAL_KSEG_SIZE)
173 off -= XCHAL_KSEG_SIZE;
174
175 return off + PHYS_OFFSET;
176}
177#define __pa(x) ___pa((unsigned long)(x))
178#else
167#define __pa(x) \ 179#define __pa(x) \
168 ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET) 180 ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
181#endif
169#define __va(x) \ 182#define __va(x) \
170 ((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET)) 183 ((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
171#define pfn_valid(pfn) \ 184#define pfn_valid(pfn) \
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
index cd400af4a6b2..6be7eb27fd29 100644
--- a/arch/xtensa/include/uapi/asm/unistd.h
+++ b/arch/xtensa/include/uapi/asm/unistd.h
@@ -774,7 +774,10 @@ __SYSCALL(349, sys_pkey_alloc, 2)
774#define __NR_pkey_free 350 774#define __NR_pkey_free 350
775__SYSCALL(350, sys_pkey_free, 1) 775__SYSCALL(350, sys_pkey_free, 1)
776 776
777#define __NR_syscall_count 351 777#define __NR_statx 351
778__SYSCALL(351, sys_statx, 5)
779
780#define __NR_syscall_count 352
778 781
779/* 782/*
780 * sysxtensa syscall handler 783 * sysxtensa syscall handler
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index c82c43bff296..bae697a06a98 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -483,10 +483,8 @@ void show_regs(struct pt_regs * regs)
483 483
484static int show_trace_cb(struct stackframe *frame, void *data) 484static int show_trace_cb(struct stackframe *frame, void *data)
485{ 485{
486 if (kernel_text_address(frame->pc)) { 486 if (kernel_text_address(frame->pc))
487 pr_cont(" [<%08lx>]", frame->pc); 487 pr_cont(" [<%08lx>] %pB\n", frame->pc, (void *)frame->pc);
488 print_symbol(" %s\n", frame->pc);
489 }
490 return 0; 488 return 0;
491} 489}
492 490
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a4546f060e80..6b6e7bc041db 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -697,17 +697,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
697{ 697{
698 struct blk_mq_timeout_data *data = priv; 698 struct blk_mq_timeout_data *data = priv;
699 699
700 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { 700 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
701 /*
702 * If a request wasn't started before the queue was
703 * marked dying, kill it here or it'll go unnoticed.
704 */
705 if (unlikely(blk_queue_dying(rq->q))) {
706 rq->errors = -EIO;
707 blk_mq_end_request(rq, rq->errors);
708 }
709 return; 701 return;
710 }
711 702
712 if (time_after_eq(jiffies, rq->deadline)) { 703 if (time_after_eq(jiffies, rq->deadline)) {
713 if (!blk_mark_rq_complete(rq)) 704 if (!blk_mark_rq_complete(rq))
@@ -978,7 +969,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
978 struct request *rq; 969 struct request *rq;
979 LIST_HEAD(driver_list); 970 LIST_HEAD(driver_list);
980 struct list_head *dptr; 971 struct list_head *dptr;
981 int queued, ret = BLK_MQ_RQ_QUEUE_OK; 972 int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK;
982 973
983 /* 974 /*
984 * Start off with dptr being NULL, so we start the first request 975 * Start off with dptr being NULL, so we start the first request
@@ -989,7 +980,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
989 /* 980 /*
990 * Now process all the entries, sending them to the driver. 981 * Now process all the entries, sending them to the driver.
991 */ 982 */
992 queued = 0; 983 errors = queued = 0;
993 while (!list_empty(list)) { 984 while (!list_empty(list)) {
994 struct blk_mq_queue_data bd; 985 struct blk_mq_queue_data bd;
995 986
@@ -1046,6 +1037,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1046 default: 1037 default:
1047 pr_err("blk-mq: bad return on queue: %d\n", ret); 1038 pr_err("blk-mq: bad return on queue: %d\n", ret);
1048 case BLK_MQ_RQ_QUEUE_ERROR: 1039 case BLK_MQ_RQ_QUEUE_ERROR:
1040 errors++;
1049 rq->errors = -EIO; 1041 rq->errors = -EIO;
1050 blk_mq_end_request(rq, rq->errors); 1042 blk_mq_end_request(rq, rq->errors);
1051 break; 1043 break;
@@ -1097,7 +1089,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1097 blk_mq_run_hw_queue(hctx, true); 1089 blk_mq_run_hw_queue(hctx, true);
1098 } 1090 }
1099 1091
1100 return queued != 0; 1092 return (queued + errors) != 0;
1101} 1093}
1102 1094
1103static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) 1095static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 9b43efb8933f..186fcb981e9b 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -30,11 +30,11 @@ static void blk_stat_flush_batch(struct blk_rq_stat *stat)
30 30
31static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src) 31static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
32{ 32{
33 blk_stat_flush_batch(src);
34
33 if (!src->nr_samples) 35 if (!src->nr_samples)
34 return; 36 return;
35 37
36 blk_stat_flush_batch(src);
37
38 dst->min = min(dst->min, src->min); 38 dst->min = min(dst->min, src->min);
39 dst->max = max(dst->max, src->max); 39 dst->max = max(dst->max, src->max);
40 40
diff --git a/crypto/lrw.c b/crypto/lrw.c
index ecd8474018e3..3ea095adafd9 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -286,8 +286,11 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
286 286
287 subreq->cryptlen = LRW_BUFFER_SIZE; 287 subreq->cryptlen = LRW_BUFFER_SIZE;
288 if (req->cryptlen > LRW_BUFFER_SIZE) { 288 if (req->cryptlen > LRW_BUFFER_SIZE) {
289 subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE); 289 unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
290 rctx->ext = kmalloc(subreq->cryptlen, gfp); 290
291 rctx->ext = kmalloc(n, gfp);
292 if (rctx->ext)
293 subreq->cryptlen = n;
291 } 294 }
292 295
293 rctx->src = req->src; 296 rctx->src = req->src;
diff --git a/crypto/xts.c b/crypto/xts.c
index baeb34dd8582..c976bfac29da 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -230,8 +230,11 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
230 230
231 subreq->cryptlen = XTS_BUFFER_SIZE; 231 subreq->cryptlen = XTS_BUFFER_SIZE;
232 if (req->cryptlen > XTS_BUFFER_SIZE) { 232 if (req->cryptlen > XTS_BUFFER_SIZE) {
233 subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE); 233 unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
234 rctx->ext = kmalloc(subreq->cryptlen, gfp); 234
235 rctx->ext = kmalloc(n, gfp);
236 if (rctx->ext)
237 subreq->cryptlen = n;
235 } 238 }
236 239
237 rctx->src = req->src; 240 rctx->src = req->src;
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index a391bbc48105..d94f92f88ca1 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -2,7 +2,6 @@
2# Makefile for the Linux ACPI interpreter 2# Makefile for the Linux ACPI interpreter
3# 3#
4 4
5ccflags-y := -Os
6ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT 5ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
7 6
8# 7#
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index b4c1a6a51da4..03250e1f1103 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -25,9 +25,11 @@
25ACPI_MODULE_NAME("platform"); 25ACPI_MODULE_NAME("platform");
26 26
27static const struct acpi_device_id forbidden_id_list[] = { 27static const struct acpi_device_id forbidden_id_list[] = {
28 {"PNP0000", 0}, /* PIC */ 28 {"PNP0000", 0}, /* PIC */
29 {"PNP0100", 0}, /* Timer */ 29 {"PNP0100", 0}, /* Timer */
30 {"PNP0200", 0}, /* AT DMA Controller */ 30 {"PNP0200", 0}, /* AT DMA Controller */
31 {"ACPI0009", 0}, /* IOxAPIC */
32 {"ACPI000A", 0}, /* IOAPIC */
31 {"", 0}, 33 {"", 0},
32}; 34};
33 35
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index b192b42a8351..79b3c9c5a3bc 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -1073,6 +1073,7 @@ static int ghes_remove(struct platform_device *ghes_dev)
1073 if (list_empty(&ghes_sci)) 1073 if (list_empty(&ghes_sci))
1074 unregister_acpi_hed_notifier(&ghes_notifier_sci); 1074 unregister_acpi_hed_notifier(&ghes_notifier_sci);
1075 mutex_unlock(&ghes_list_mutex); 1075 mutex_unlock(&ghes_list_mutex);
1076 synchronize_rcu();
1076 break; 1077 break;
1077 case ACPI_HEST_NOTIFY_NMI: 1078 case ACPI_HEST_NOTIFY_NMI:
1078 ghes_nmi_remove(ghes); 1079 ghes_nmi_remove(ghes);
diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c
index 1120dfd625b8..7e4fbf9a53a3 100644
--- a/drivers/acpi/ioapic.c
+++ b/drivers/acpi/ioapic.c
@@ -45,6 +45,12 @@ static acpi_status setup_res(struct acpi_resource *acpi_res, void *data)
45 struct resource *res = data; 45 struct resource *res = data;
46 struct resource_win win; 46 struct resource_win win;
47 47
48 /*
49 * We might assign this to 'res' later, make sure all pointers are
50 * cleared before the resource is added to the global list
51 */
52 memset(&win, 0, sizeof(win));
53
48 res->flags = 0; 54 res->flags = 0;
49 if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM)) 55 if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM))
50 return AE_OK; 56 return AE_OK;
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index 01c94669a2b0..3afa8c1fa127 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -30,7 +30,7 @@ static bool qdf2400_erratum_44_present(struct acpi_table_header *h)
30 return true; 30 return true;
31 31
32 if (!memcmp(h->oem_table_id, "QDF2400 ", ACPI_OEM_TABLE_ID_SIZE) && 32 if (!memcmp(h->oem_table_id, "QDF2400 ", ACPI_OEM_TABLE_ID_SIZE) &&
33 h->oem_revision == 0) 33 h->oem_revision == 1)
34 return true; 34 return true;
35 35
36 return false; 36 return false;
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
index bf43b5d2aafc..83f1439e57fd 100644
--- a/drivers/auxdisplay/img-ascii-lcd.c
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -218,6 +218,7 @@ static const struct of_device_id img_ascii_lcd_matches[] = {
218 { .compatible = "img,boston-lcd", .data = &boston_config }, 218 { .compatible = "img,boston-lcd", .data = &boston_config },
219 { .compatible = "mti,malta-lcd", .data = &malta_config }, 219 { .compatible = "mti,malta-lcd", .data = &malta_config },
220 { .compatible = "mti,sead3-lcd", .data = &sead3_config }, 220 { .compatible = "mti,sead3-lcd", .data = &sead3_config },
221 { /* sentinel */ }
221}; 222};
222 223
223/** 224/**
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 7e4287bc19e5..d8a23561b4cb 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -47,6 +47,8 @@ static DEFINE_MUTEX(nbd_index_mutex);
47struct nbd_sock { 47struct nbd_sock {
48 struct socket *sock; 48 struct socket *sock;
49 struct mutex tx_lock; 49 struct mutex tx_lock;
50 struct request *pending;
51 int sent;
50}; 52};
51 53
52#define NBD_TIMEDOUT 0 54#define NBD_TIMEDOUT 0
@@ -124,7 +126,8 @@ static const char *nbdcmd_to_ascii(int cmd)
124 126
125static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev) 127static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev)
126{ 128{
127 bd_set_size(bdev, 0); 129 if (bdev->bd_openers <= 1)
130 bd_set_size(bdev, 0);
128 set_capacity(nbd->disk, 0); 131 set_capacity(nbd->disk, 0);
129 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); 132 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
130 133
@@ -190,7 +193,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
190 193
191 dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n"); 194 dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
192 set_bit(NBD_TIMEDOUT, &nbd->runtime_flags); 195 set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
193 req->errors++; 196 req->errors = -EIO;
194 197
195 mutex_lock(&nbd->config_lock); 198 mutex_lock(&nbd->config_lock);
196 sock_shutdown(nbd); 199 sock_shutdown(nbd);
@@ -202,7 +205,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
202 * Send or receive packet. 205 * Send or receive packet.
203 */ 206 */
204static int sock_xmit(struct nbd_device *nbd, int index, int send, 207static int sock_xmit(struct nbd_device *nbd, int index, int send,
205 struct iov_iter *iter, int msg_flags) 208 struct iov_iter *iter, int msg_flags, int *sent)
206{ 209{
207 struct socket *sock = nbd->socks[index]->sock; 210 struct socket *sock = nbd->socks[index]->sock;
208 int result; 211 int result;
@@ -237,6 +240,8 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
237 result = -EPIPE; /* short read */ 240 result = -EPIPE; /* short read */
238 break; 241 break;
239 } 242 }
243 if (sent)
244 *sent += result;
240 } while (msg_data_left(&msg)); 245 } while (msg_data_left(&msg));
241 246
242 tsk_restore_flags(current, pflags, PF_MEMALLOC); 247 tsk_restore_flags(current, pflags, PF_MEMALLOC);
@@ -248,6 +253,7 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
248static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) 253static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
249{ 254{
250 struct request *req = blk_mq_rq_from_pdu(cmd); 255 struct request *req = blk_mq_rq_from_pdu(cmd);
256 struct nbd_sock *nsock = nbd->socks[index];
251 int result; 257 int result;
252 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)}; 258 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
253 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; 259 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
@@ -256,6 +262,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
256 struct bio *bio; 262 struct bio *bio;
257 u32 type; 263 u32 type;
258 u32 tag = blk_mq_unique_tag(req); 264 u32 tag = blk_mq_unique_tag(req);
265 int sent = nsock->sent, skip = 0;
259 266
260 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); 267 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
261 268
@@ -283,6 +290,17 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
283 return -EIO; 290 return -EIO;
284 } 291 }
285 292
293 /* We did a partial send previously, and we at least sent the whole
294 * request struct, so just go and send the rest of the pages in the
295 * request.
296 */
297 if (sent) {
298 if (sent >= sizeof(request)) {
299 skip = sent - sizeof(request);
300 goto send_pages;
301 }
302 iov_iter_advance(&from, sent);
303 }
286 request.type = htonl(type); 304 request.type = htonl(type);
287 if (type != NBD_CMD_FLUSH) { 305 if (type != NBD_CMD_FLUSH) {
288 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); 306 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
@@ -294,15 +312,27 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
294 cmd, nbdcmd_to_ascii(type), 312 cmd, nbdcmd_to_ascii(type),
295 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); 313 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
296 result = sock_xmit(nbd, index, 1, &from, 314 result = sock_xmit(nbd, index, 1, &from,
297 (type == NBD_CMD_WRITE) ? MSG_MORE : 0); 315 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
298 if (result <= 0) { 316 if (result <= 0) {
317 if (result == -ERESTARTSYS) {
318 /* If we havne't sent anything we can just return BUSY,
319 * however if we have sent something we need to make
320 * sure we only allow this req to be sent until we are
321 * completely done.
322 */
323 if (sent) {
324 nsock->pending = req;
325 nsock->sent = sent;
326 }
327 return BLK_MQ_RQ_QUEUE_BUSY;
328 }
299 dev_err_ratelimited(disk_to_dev(nbd->disk), 329 dev_err_ratelimited(disk_to_dev(nbd->disk),
300 "Send control failed (result %d)\n", result); 330 "Send control failed (result %d)\n", result);
301 return -EIO; 331 return -EIO;
302 } 332 }
303 333send_pages:
304 if (type != NBD_CMD_WRITE) 334 if (type != NBD_CMD_WRITE)
305 return 0; 335 goto out;
306 336
307 bio = req->bio; 337 bio = req->bio;
308 while (bio) { 338 while (bio) {
@@ -318,8 +348,25 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
318 cmd, bvec.bv_len); 348 cmd, bvec.bv_len);
319 iov_iter_bvec(&from, ITER_BVEC | WRITE, 349 iov_iter_bvec(&from, ITER_BVEC | WRITE,
320 &bvec, 1, bvec.bv_len); 350 &bvec, 1, bvec.bv_len);
321 result = sock_xmit(nbd, index, 1, &from, flags); 351 if (skip) {
352 if (skip >= iov_iter_count(&from)) {
353 skip -= iov_iter_count(&from);
354 continue;
355 }
356 iov_iter_advance(&from, skip);
357 skip = 0;
358 }
359 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
322 if (result <= 0) { 360 if (result <= 0) {
361 if (result == -ERESTARTSYS) {
362 /* We've already sent the header, we
363 * have no choice but to set pending and
364 * return BUSY.
365 */
366 nsock->pending = req;
367 nsock->sent = sent;
368 return BLK_MQ_RQ_QUEUE_BUSY;
369 }
323 dev_err(disk_to_dev(nbd->disk), 370 dev_err(disk_to_dev(nbd->disk),
324 "Send data failed (result %d)\n", 371 "Send data failed (result %d)\n",
325 result); 372 result);
@@ -336,6 +383,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
336 } 383 }
337 bio = next; 384 bio = next;
338 } 385 }
386out:
387 nsock->pending = NULL;
388 nsock->sent = 0;
339 return 0; 389 return 0;
340} 390}
341 391
@@ -353,7 +403,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
353 403
354 reply.magic = 0; 404 reply.magic = 0;
355 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); 405 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
356 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL); 406 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
357 if (result <= 0) { 407 if (result <= 0) {
358 if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) && 408 if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) &&
359 !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) 409 !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
@@ -383,7 +433,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
383 if (ntohl(reply.error)) { 433 if (ntohl(reply.error)) {
384 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", 434 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
385 ntohl(reply.error)); 435 ntohl(reply.error));
386 req->errors++; 436 req->errors = -EIO;
387 return cmd; 437 return cmd;
388 } 438 }
389 439
@@ -395,11 +445,11 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
395 rq_for_each_segment(bvec, req, iter) { 445 rq_for_each_segment(bvec, req, iter) {
396 iov_iter_bvec(&to, ITER_BVEC | READ, 446 iov_iter_bvec(&to, ITER_BVEC | READ,
397 &bvec, 1, bvec.bv_len); 447 &bvec, 1, bvec.bv_len);
398 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL); 448 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
399 if (result <= 0) { 449 if (result <= 0) {
400 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", 450 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
401 result); 451 result);
402 req->errors++; 452 req->errors = -EIO;
403 return cmd; 453 return cmd;
404 } 454 }
405 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", 455 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
@@ -469,7 +519,7 @@ static void nbd_clear_req(struct request *req, void *data, bool reserved)
469 if (!blk_mq_request_started(req)) 519 if (!blk_mq_request_started(req))
470 return; 520 return;
471 cmd = blk_mq_rq_to_pdu(req); 521 cmd = blk_mq_rq_to_pdu(req);
472 req->errors++; 522 req->errors = -EIO;
473 nbd_end_request(cmd); 523 nbd_end_request(cmd);
474} 524}
475 525
@@ -482,22 +532,23 @@ static void nbd_clear_que(struct nbd_device *nbd)
482} 532}
483 533
484 534
485static void nbd_handle_cmd(struct nbd_cmd *cmd, int index) 535static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
486{ 536{
487 struct request *req = blk_mq_rq_from_pdu(cmd); 537 struct request *req = blk_mq_rq_from_pdu(cmd);
488 struct nbd_device *nbd = cmd->nbd; 538 struct nbd_device *nbd = cmd->nbd;
489 struct nbd_sock *nsock; 539 struct nbd_sock *nsock;
540 int ret;
490 541
491 if (index >= nbd->num_connections) { 542 if (index >= nbd->num_connections) {
492 dev_err_ratelimited(disk_to_dev(nbd->disk), 543 dev_err_ratelimited(disk_to_dev(nbd->disk),
493 "Attempted send on invalid socket\n"); 544 "Attempted send on invalid socket\n");
494 goto error_out; 545 return -EINVAL;
495 } 546 }
496 547
497 if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) { 548 if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) {
498 dev_err_ratelimited(disk_to_dev(nbd->disk), 549 dev_err_ratelimited(disk_to_dev(nbd->disk),
499 "Attempted send on closed socket\n"); 550 "Attempted send on closed socket\n");
500 goto error_out; 551 return -EINVAL;
501 } 552 }
502 553
503 req->errors = 0; 554 req->errors = 0;
@@ -508,29 +559,30 @@ static void nbd_handle_cmd(struct nbd_cmd *cmd, int index)
508 mutex_unlock(&nsock->tx_lock); 559 mutex_unlock(&nsock->tx_lock);
509 dev_err_ratelimited(disk_to_dev(nbd->disk), 560 dev_err_ratelimited(disk_to_dev(nbd->disk),
510 "Attempted send on closed socket\n"); 561 "Attempted send on closed socket\n");
511 goto error_out; 562 return -EINVAL;
512 } 563 }
513 564
514 if (nbd_send_cmd(nbd, cmd, index) != 0) { 565 /* Handle the case that we have a pending request that was partially
515 dev_err_ratelimited(disk_to_dev(nbd->disk), 566 * transmitted that _has_ to be serviced first. We need to call requeue
516 "Request send failed\n"); 567 * here so that it gets put _after_ the request that is already on the
517 req->errors++; 568 * dispatch list.
518 nbd_end_request(cmd); 569 */
570 if (unlikely(nsock->pending && nsock->pending != req)) {
571 blk_mq_requeue_request(req, true);
572 ret = 0;
573 goto out;
519 } 574 }
520 575 ret = nbd_send_cmd(nbd, cmd, index);
576out:
521 mutex_unlock(&nsock->tx_lock); 577 mutex_unlock(&nsock->tx_lock);
522 578 return ret;
523 return;
524
525error_out:
526 req->errors++;
527 nbd_end_request(cmd);
528} 579}
529 580
530static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx, 581static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
531 const struct blk_mq_queue_data *bd) 582 const struct blk_mq_queue_data *bd)
532{ 583{
533 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 584 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
585 int ret;
534 586
535 /* 587 /*
536 * Since we look at the bio's to send the request over the network we 588 * Since we look at the bio's to send the request over the network we
@@ -543,10 +595,20 @@ static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
543 */ 595 */
544 init_completion(&cmd->send_complete); 596 init_completion(&cmd->send_complete);
545 blk_mq_start_request(bd->rq); 597 blk_mq_start_request(bd->rq);
546 nbd_handle_cmd(cmd, hctx->queue_num); 598
599 /* We can be called directly from the user space process, which means we
600 * could possibly have signals pending so our sendmsg will fail. In
601 * this case we need to return that we are busy, otherwise error out as
602 * appropriate.
603 */
604 ret = nbd_handle_cmd(cmd, hctx->queue_num);
605 if (ret < 0)
606 ret = BLK_MQ_RQ_QUEUE_ERROR;
607 if (!ret)
608 ret = BLK_MQ_RQ_QUEUE_OK;
547 complete(&cmd->send_complete); 609 complete(&cmd->send_complete);
548 610
549 return BLK_MQ_RQ_QUEUE_OK; 611 return ret;
550} 612}
551 613
552static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev, 614static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev,
@@ -581,6 +643,8 @@ static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev,
581 643
582 mutex_init(&nsock->tx_lock); 644 mutex_init(&nsock->tx_lock);
583 nsock->sock = sock; 645 nsock->sock = sock;
646 nsock->pending = NULL;
647 nsock->sent = 0;
584 socks[nbd->num_connections++] = nsock; 648 socks[nbd->num_connections++] = nsock;
585 649
586 if (max_part) 650 if (max_part)
@@ -602,6 +666,8 @@ static void nbd_reset(struct nbd_device *nbd)
602 666
603static void nbd_bdev_reset(struct block_device *bdev) 667static void nbd_bdev_reset(struct block_device *bdev)
604{ 668{
669 if (bdev->bd_openers > 1)
670 return;
605 set_device_ro(bdev, false); 671 set_device_ro(bdev, false);
606 bdev->bd_inode->i_size = 0; 672 bdev->bd_inode->i_size = 0;
607 if (max_part > 0) { 673 if (max_part > 0) {
@@ -634,7 +700,7 @@ static void send_disconnects(struct nbd_device *nbd)
634 700
635 for (i = 0; i < nbd->num_connections; i++) { 701 for (i = 0; i < nbd->num_connections; i++) {
636 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); 702 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
637 ret = sock_xmit(nbd, i, 1, &from, 0); 703 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
638 if (ret <= 0) 704 if (ret <= 0)
639 dev_err(disk_to_dev(nbd->disk), 705 dev_err(disk_to_dev(nbd->disk),
640 "Send disconnect failed %d\n", ret); 706 "Send disconnect failed %d\n", ret);
@@ -665,7 +731,8 @@ static int nbd_clear_sock(struct nbd_device *nbd, struct block_device *bdev)
665{ 731{
666 sock_shutdown(nbd); 732 sock_shutdown(nbd);
667 nbd_clear_que(nbd); 733 nbd_clear_que(nbd);
668 kill_bdev(bdev); 734
735 __invalidate_device(bdev, true);
669 nbd_bdev_reset(bdev); 736 nbd_bdev_reset(bdev);
670 /* 737 /*
671 * We want to give the run thread a chance to wait for everybody 738 * We want to give the run thread a chance to wait for everybody
@@ -781,7 +848,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
781 nbd_size_set(nbd, bdev, nbd->blksize, arg); 848 nbd_size_set(nbd, bdev, nbd->blksize, arg);
782 return 0; 849 return 0;
783 case NBD_SET_TIMEOUT: 850 case NBD_SET_TIMEOUT:
784 nbd->tag_set.timeout = arg * HZ; 851 if (arg) {
852 nbd->tag_set.timeout = arg * HZ;
853 blk_queue_rq_timeout(nbd->disk->queue, arg * HZ);
854 }
785 return 0; 855 return 0;
786 856
787 case NBD_SET_FLAGS: 857 case NBD_SET_FLAGS:
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index 4a99ac756f08..9959c762da2f 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -55,6 +55,7 @@ MODULE_DEVICE_TABLE(pci, pci_tbl);
55struct amd768_priv { 55struct amd768_priv {
56 void __iomem *iobase; 56 void __iomem *iobase;
57 struct pci_dev *pcidev; 57 struct pci_dev *pcidev;
58 u32 pmbase;
58}; 59};
59 60
60static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) 61static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
@@ -148,33 +149,58 @@ found:
148 if (pmbase == 0) 149 if (pmbase == 0)
149 return -EIO; 150 return -EIO;
150 151
151 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 152 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
152 if (!priv) 153 if (!priv)
153 return -ENOMEM; 154 return -ENOMEM;
154 155
155 if (!devm_request_region(&pdev->dev, pmbase + PMBASE_OFFSET, 156 if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) {
156 PMBASE_SIZE, DRV_NAME)) {
157 dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n", 157 dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n",
158 pmbase + 0xF0); 158 pmbase + 0xF0);
159 return -EBUSY; 159 err = -EBUSY;
160 goto out;
160 } 161 }
161 162
162 priv->iobase = devm_ioport_map(&pdev->dev, pmbase + PMBASE_OFFSET, 163 priv->iobase = ioport_map(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
163 PMBASE_SIZE);
164 if (!priv->iobase) { 164 if (!priv->iobase) {
165 pr_err(DRV_NAME "Cannot map ioport\n"); 165 pr_err(DRV_NAME "Cannot map ioport\n");
166 return -ENOMEM; 166 err = -EINVAL;
167 goto err_iomap;
167 } 168 }
168 169
169 amd_rng.priv = (unsigned long)priv; 170 amd_rng.priv = (unsigned long)priv;
171 priv->pmbase = pmbase;
170 priv->pcidev = pdev; 172 priv->pcidev = pdev;
171 173
172 pr_info(DRV_NAME " detected\n"); 174 pr_info(DRV_NAME " detected\n");
173 return devm_hwrng_register(&pdev->dev, &amd_rng); 175 err = hwrng_register(&amd_rng);
176 if (err) {
177 pr_err(DRV_NAME " registering failed (%d)\n", err);
178 goto err_hwrng;
179 }
180 return 0;
181
182err_hwrng:
183 ioport_unmap(priv->iobase);
184err_iomap:
185 release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
186out:
187 kfree(priv);
188 return err;
174} 189}
175 190
176static void __exit mod_exit(void) 191static void __exit mod_exit(void)
177{ 192{
193 struct amd768_priv *priv;
194
195 priv = (struct amd768_priv *)amd_rng.priv;
196
197 hwrng_unregister(&amd_rng);
198
199 ioport_unmap(priv->iobase);
200
201 release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE);
202
203 kfree(priv);
178} 204}
179 205
180module_init(mod_init); 206module_init(mod_init);
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index e7a245942029..e1d421a36a13 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -31,6 +31,9 @@
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/pci.h> 32#include <linux/pci.h>
33 33
34
35#define PFX KBUILD_MODNAME ": "
36
34#define GEODE_RNG_DATA_REG 0x50 37#define GEODE_RNG_DATA_REG 0x50
35#define GEODE_RNG_STATUS_REG 0x54 38#define GEODE_RNG_STATUS_REG 0x54
36 39
@@ -82,6 +85,7 @@ static struct hwrng geode_rng = {
82 85
83static int __init mod_init(void) 86static int __init mod_init(void)
84{ 87{
88 int err = -ENODEV;
85 struct pci_dev *pdev = NULL; 89 struct pci_dev *pdev = NULL;
86 const struct pci_device_id *ent; 90 const struct pci_device_id *ent;
87 void __iomem *mem; 91 void __iomem *mem;
@@ -89,27 +93,43 @@ static int __init mod_init(void)
89 93
90 for_each_pci_dev(pdev) { 94 for_each_pci_dev(pdev) {
91 ent = pci_match_id(pci_tbl, pdev); 95 ent = pci_match_id(pci_tbl, pdev);
92 if (ent) { 96 if (ent)
93 rng_base = pci_resource_start(pdev, 0); 97 goto found;
94 if (rng_base == 0)
95 return -ENODEV;
96
97 mem = devm_ioremap(&pdev->dev, rng_base, 0x58);
98 if (!mem)
99 return -ENOMEM;
100 geode_rng.priv = (unsigned long)mem;
101
102 pr_info("AMD Geode RNG detected\n");
103 return devm_hwrng_register(&pdev->dev, &geode_rng);
104 }
105 } 98 }
106
107 /* Device not found. */ 99 /* Device not found. */
108 return -ENODEV; 100 goto out;
101
102found:
103 rng_base = pci_resource_start(pdev, 0);
104 if (rng_base == 0)
105 goto out;
106 err = -ENOMEM;
107 mem = ioremap(rng_base, 0x58);
108 if (!mem)
109 goto out;
110 geode_rng.priv = (unsigned long)mem;
111
112 pr_info("AMD Geode RNG detected\n");
113 err = hwrng_register(&geode_rng);
114 if (err) {
115 pr_err(PFX "RNG registering failed (%d)\n",
116 err);
117 goto err_unmap;
118 }
119out:
120 return err;
121
122err_unmap:
123 iounmap(mem);
124 goto out;
109} 125}
110 126
111static void __exit mod_exit(void) 127static void __exit mod_exit(void)
112{ 128{
129 void __iomem *mem = (void __iomem *)geode_rng.priv;
130
131 hwrng_unregister(&geode_rng);
132 iounmap(mem);
113} 133}
114 134
115module_init(mod_init); 135module_init(mod_init);
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 2a558c706581..3e73bcdf9e65 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -84,11 +84,14 @@ struct pp_struct {
84 struct ieee1284_info state; 84 struct ieee1284_info state;
85 struct ieee1284_info saved_state; 85 struct ieee1284_info saved_state;
86 long default_inactivity; 86 long default_inactivity;
87 int index;
87}; 88};
88 89
89/* should we use PARDEVICE_MAX here? */ 90/* should we use PARDEVICE_MAX here? */
90static struct device *devices[PARPORT_MAX]; 91static struct device *devices[PARPORT_MAX];
91 92
93static DEFINE_IDA(ida_index);
94
92/* pp_struct.flags bitfields */ 95/* pp_struct.flags bitfields */
93#define PP_CLAIMED (1<<0) 96#define PP_CLAIMED (1<<0)
94#define PP_EXCL (1<<1) 97#define PP_EXCL (1<<1)
@@ -290,7 +293,7 @@ static int register_device(int minor, struct pp_struct *pp)
290 struct pardevice *pdev = NULL; 293 struct pardevice *pdev = NULL;
291 char *name; 294 char *name;
292 struct pardev_cb ppdev_cb; 295 struct pardev_cb ppdev_cb;
293 int rc = 0; 296 int rc = 0, index;
294 297
295 name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); 298 name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
296 if (name == NULL) 299 if (name == NULL)
@@ -303,20 +306,23 @@ static int register_device(int minor, struct pp_struct *pp)
303 goto err; 306 goto err;
304 } 307 }
305 308
309 index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
306 memset(&ppdev_cb, 0, sizeof(ppdev_cb)); 310 memset(&ppdev_cb, 0, sizeof(ppdev_cb));
307 ppdev_cb.irq_func = pp_irq; 311 ppdev_cb.irq_func = pp_irq;
308 ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; 312 ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
309 ppdev_cb.private = pp; 313 ppdev_cb.private = pp;
310 pdev = parport_register_dev_model(port, name, &ppdev_cb, minor); 314 pdev = parport_register_dev_model(port, name, &ppdev_cb, index);
311 parport_put_port(port); 315 parport_put_port(port);
312 316
313 if (!pdev) { 317 if (!pdev) {
314 pr_warn("%s: failed to register device!\n", name); 318 pr_warn("%s: failed to register device!\n", name);
315 rc = -ENXIO; 319 rc = -ENXIO;
320 ida_simple_remove(&ida_index, index);
316 goto err; 321 goto err;
317 } 322 }
318 323
319 pp->pdev = pdev; 324 pp->pdev = pdev;
325 pp->index = index;
320 dev_dbg(&pdev->dev, "registered pardevice\n"); 326 dev_dbg(&pdev->dev, "registered pardevice\n");
321err: 327err:
322 kfree(name); 328 kfree(name);
@@ -755,6 +761,7 @@ static int pp_release(struct inode *inode, struct file *file)
755 761
756 if (pp->pdev) { 762 if (pp->pdev) {
757 parport_unregister_device(pp->pdev); 763 parport_unregister_device(pp->pdev);
764 ida_simple_remove(&ida_index, pp->index);
758 pp->pdev = NULL; 765 pp->pdev = NULL;
759 pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); 766 pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
760 } 767 }
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 0fb39fe217d1..67201f67a14a 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -2502,7 +2502,7 @@ struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
2502 2502
2503 clk->core = hw->core; 2503 clk->core = hw->core;
2504 clk->dev_id = dev_id; 2504 clk->dev_id = dev_id;
2505 clk->con_id = con_id; 2505 clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
2506 clk->max_rate = ULONG_MAX; 2506 clk->max_rate = ULONG_MAX;
2507 2507
2508 clk_prepare_lock(); 2508 clk_prepare_lock();
@@ -2518,6 +2518,7 @@ void __clk_free_clk(struct clk *clk)
2518 hlist_del(&clk->clks_node); 2518 hlist_del(&clk->clks_node);
2519 clk_prepare_unlock(); 2519 clk_prepare_unlock();
2520 2520
2521 kfree_const(clk->con_id);
2521 kfree(clk); 2522 kfree(clk);
2522} 2523}
2523 2524
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index 924f560dcf80..00d4150e33c3 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -127,7 +127,7 @@ PNAME(mux_ddrphy_p) = { "dpll_ddr", "gpll_ddr" };
127PNAME(mux_pll_src_3plls_p) = { "apll", "dpll", "gpll" }; 127PNAME(mux_pll_src_3plls_p) = { "apll", "dpll", "gpll" };
128PNAME(mux_timer_p) = { "xin24m", "pclk_peri_src" }; 128PNAME(mux_timer_p) = { "xin24m", "pclk_peri_src" };
129 129
130PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll" "usb480m" }; 130PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll", "usb480m" };
131 131
132PNAME(mux_mmc_src_p) = { "apll", "dpll", "gpll", "xin24m" }; 132PNAME(mux_mmc_src_p) = { "apll", "dpll", "gpll", "xin24m" };
133PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" }; 133PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" };
@@ -450,6 +450,13 @@ static void __init rk3036_clk_init(struct device_node *np)
450 return; 450 return;
451 } 451 }
452 452
453 /*
454 * Make uart_pll_clk a child of the gpll, as all other sources are
455 * not that usable / stable.
456 */
457 writel_relaxed(HIWORD_UPDATE(0x2, 0x3, 10),
458 reg_base + RK2928_CLKSEL_CON(13));
459
453 ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); 460 ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
454 if (IS_ERR(ctx)) { 461 if (IS_ERR(ctx)) {
455 pr_err("%s: rockchip clk init failed\n", __func__); 462 pr_err("%s: rockchip clk init failed\n", __func__);
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index 695bbf9ef428..72109d2cf41b 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -80,6 +80,7 @@ config SUN6I_A31_CCU
80 select SUNXI_CCU_DIV 80 select SUNXI_CCU_DIV
81 select SUNXI_CCU_NK 81 select SUNXI_CCU_NK
82 select SUNXI_CCU_NKM 82 select SUNXI_CCU_NKM
83 select SUNXI_CCU_NKMP
83 select SUNXI_CCU_NM 84 select SUNXI_CCU_NM
84 select SUNXI_CCU_MP 85 select SUNXI_CCU_MP
85 select SUNXI_CCU_PHASE 86 select SUNXI_CCU_PHASE
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index e3c084cc6da5..f54114c607df 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -566,7 +566,7 @@ static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu",
566 0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT); 566 0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT);
567 567
568/* Fixed Factor clocks */ 568/* Fixed Factor clocks */
569static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 1, 2, 0); 569static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 2, 1, 0);
570 570
571/* We hardcode the divider to 4 for now */ 571/* We hardcode the divider to 4 for now */
572static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", 572static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 4c9a920ff4ab..89e68d29bf45 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -608,7 +608,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", lcd_ch1_parents,
608 0x150, 0, 4, 24, 2, BIT(31), 608 0x150, 0, 4, 24, 2, BIT(31),
609 CLK_SET_RATE_PARENT); 609 CLK_SET_RATE_PARENT);
610 610
611static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(31), 0); 611static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(30), 0);
612 612
613static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0); 613static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0);
614 614
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index 22c2ca7a2a22..b583f186a804 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -85,6 +85,10 @@ static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw,
85 unsigned int m, p; 85 unsigned int m, p;
86 u32 reg; 86 u32 reg;
87 87
88 /* Adjust parent_rate according to pre-dividers */
89 ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux,
90 -1, &parent_rate);
91
88 reg = readl(cmp->common.base + cmp->common.reg); 92 reg = readl(cmp->common.base + cmp->common.reg);
89 93
90 m = reg >> cmp->m.shift; 94 m = reg >> cmp->m.shift;
@@ -117,6 +121,10 @@ static int ccu_mp_set_rate(struct clk_hw *hw, unsigned long rate,
117 unsigned int m, p; 121 unsigned int m, p;
118 u32 reg; 122 u32 reg;
119 123
124 /* Adjust parent_rate according to pre-dividers */
125 ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux,
126 -1, &parent_rate);
127
120 max_m = cmp->m.max ?: 1 << cmp->m.width; 128 max_m = cmp->m.max ?: 1 << cmp->m.width;
121 max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1); 129 max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
122 130
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
index a2b40a000157..488055ed944f 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -107,7 +107,7 @@ static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw,
107 p = reg >> nkmp->p.shift; 107 p = reg >> nkmp->p.shift;
108 p &= (1 << nkmp->p.width) - 1; 108 p &= (1 << nkmp->p.width) - 1;
109 109
110 return parent_rate * n * k >> p / m; 110 return (parent_rate * n * k >> p) / m;
111} 111}
112 112
113static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate, 113static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clocksource/clkevt-probe.c b/drivers/clocksource/clkevt-probe.c
index 8c30fec86094..eb89b502acbd 100644
--- a/drivers/clocksource/clkevt-probe.c
+++ b/drivers/clocksource/clkevt-probe.c
@@ -17,7 +17,7 @@
17 17
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/of.h> 19#include <linux/of.h>
20#include <linux/clockchip.h> 20#include <linux/clockchips.h>
21 21
22extern struct of_device_id __clkevt_of_table[]; 22extern struct of_device_id __clkevt_of_table[];
23 23
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b8ff617d449d..bc96d423781a 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -918,11 +918,19 @@ static struct kobj_type ktype_cpufreq = {
918 .release = cpufreq_sysfs_release, 918 .release = cpufreq_sysfs_release,
919}; 919};
920 920
921static int add_cpu_dev_symlink(struct cpufreq_policy *policy, 921static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
922 struct device *dev)
923{ 922{
923 struct device *dev = get_cpu_device(cpu);
924
925 if (!dev)
926 return;
927
928 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
929 return;
930
924 dev_dbg(dev, "%s: Adding symlink\n", __func__); 931 dev_dbg(dev, "%s: Adding symlink\n", __func__);
925 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); 932 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
933 dev_err(dev, "cpufreq symlink creation failed\n");
926} 934}
927 935
928static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, 936static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
@@ -1180,10 +1188,13 @@ static int cpufreq_online(unsigned int cpu)
1180 policy->user_policy.min = policy->min; 1188 policy->user_policy.min = policy->min;
1181 policy->user_policy.max = policy->max; 1189 policy->user_policy.max = policy->max;
1182 1190
1183 write_lock_irqsave(&cpufreq_driver_lock, flags); 1191 for_each_cpu(j, policy->related_cpus) {
1184 for_each_cpu(j, policy->related_cpus)
1185 per_cpu(cpufreq_cpu_data, j) = policy; 1192 per_cpu(cpufreq_cpu_data, j) = policy;
1186 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1193 add_cpu_dev_symlink(policy, j);
1194 }
1195 } else {
1196 policy->min = policy->user_policy.min;
1197 policy->max = policy->user_policy.max;
1187 } 1198 }
1188 1199
1189 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 1200 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
@@ -1272,13 +1283,15 @@ out_exit_policy:
1272 1283
1273 if (cpufreq_driver->exit) 1284 if (cpufreq_driver->exit)
1274 cpufreq_driver->exit(policy); 1285 cpufreq_driver->exit(policy);
1286
1287 for_each_cpu(j, policy->real_cpus)
1288 remove_cpu_dev_symlink(policy, get_cpu_device(j));
1289
1275out_free_policy: 1290out_free_policy:
1276 cpufreq_policy_free(policy); 1291 cpufreq_policy_free(policy);
1277 return ret; 1292 return ret;
1278} 1293}
1279 1294
1280static int cpufreq_offline(unsigned int cpu);
1281
1282/** 1295/**
1283 * cpufreq_add_dev - the cpufreq interface for a CPU device. 1296 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1284 * @dev: CPU device. 1297 * @dev: CPU device.
@@ -1300,16 +1313,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1300 1313
1301 /* Create sysfs link on CPU registration */ 1314 /* Create sysfs link on CPU registration */
1302 policy = per_cpu(cpufreq_cpu_data, cpu); 1315 policy = per_cpu(cpufreq_cpu_data, cpu);
1303 if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus)) 1316 if (policy)
1304 return 0; 1317 add_cpu_dev_symlink(policy, cpu);
1305 1318
1306 ret = add_cpu_dev_symlink(policy, dev); 1319 return 0;
1307 if (ret) {
1308 cpumask_clear_cpu(cpu, policy->real_cpus);
1309 cpufreq_offline(cpu);
1310 }
1311
1312 return ret;
1313} 1320}
1314 1321
1315static int cpufreq_offline(unsigned int cpu) 1322static int cpufreq_offline(unsigned int cpu)
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 08e134ffba68..283491f742d3 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -364,9 +364,7 @@ static bool driver_registered __read_mostly;
364static bool acpi_ppc; 364static bool acpi_ppc;
365#endif 365#endif
366 366
367static struct perf_limits performance_limits; 367static struct perf_limits global;
368static struct perf_limits powersave_limits;
369static struct perf_limits *limits;
370 368
371static void intel_pstate_init_limits(struct perf_limits *limits) 369static void intel_pstate_init_limits(struct perf_limits *limits)
372{ 370{
@@ -377,14 +375,6 @@ static void intel_pstate_init_limits(struct perf_limits *limits)
377 limits->max_sysfs_pct = 100; 375 limits->max_sysfs_pct = 100;
378} 376}
379 377
380static void intel_pstate_set_performance_limits(struct perf_limits *limits)
381{
382 intel_pstate_init_limits(limits);
383 limits->min_perf_pct = 100;
384 limits->min_perf = int_ext_tofp(1);
385 limits->min_sysfs_pct = 100;
386}
387
388static DEFINE_MUTEX(intel_pstate_driver_lock); 378static DEFINE_MUTEX(intel_pstate_driver_lock);
389static DEFINE_MUTEX(intel_pstate_limits_lock); 379static DEFINE_MUTEX(intel_pstate_limits_lock);
390 380
@@ -507,7 +497,7 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
507 * correct max turbo frequency based on the turbo state. 497 * correct max turbo frequency based on the turbo state.
508 * Also need to convert to MHz as _PSS freq is in MHz. 498 * Also need to convert to MHz as _PSS freq is in MHz.
509 */ 499 */
510 if (!limits->turbo_disabled) 500 if (!global.turbo_disabled)
511 cpu->acpi_perf_data.states[0].core_frequency = 501 cpu->acpi_perf_data.states[0].core_frequency =
512 policy->cpuinfo.max_freq / 1000; 502 policy->cpuinfo.max_freq / 1000;
513 cpu->valid_pss_table = true; 503 cpu->valid_pss_table = true;
@@ -626,7 +616,7 @@ static inline void update_turbo_state(void)
626 616
627 cpu = all_cpu_data[0]; 617 cpu = all_cpu_data[0];
628 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 618 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
629 limits->turbo_disabled = 619 global.turbo_disabled =
630 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 620 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
631 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 621 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
632} 622}
@@ -851,7 +841,7 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
851static void intel_pstate_hwp_set(struct cpufreq_policy *policy) 841static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
852{ 842{
853 int min, hw_min, max, hw_max, cpu; 843 int min, hw_min, max, hw_max, cpu;
854 struct perf_limits *perf_limits = limits; 844 struct perf_limits *perf_limits = &global;
855 u64 value, cap; 845 u64 value, cap;
856 846
857 for_each_cpu(cpu, policy->cpus) { 847 for_each_cpu(cpu, policy->cpus) {
@@ -863,19 +853,22 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
863 853
864 rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); 854 rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
865 hw_min = HWP_LOWEST_PERF(cap); 855 hw_min = HWP_LOWEST_PERF(cap);
866 if (limits->no_turbo) 856 if (global.no_turbo)
867 hw_max = HWP_GUARANTEED_PERF(cap); 857 hw_max = HWP_GUARANTEED_PERF(cap);
868 else 858 else
869 hw_max = HWP_HIGHEST_PERF(cap); 859 hw_max = HWP_HIGHEST_PERF(cap);
870 860
871 min = fp_ext_toint(hw_max * perf_limits->min_perf); 861 max = fp_ext_toint(hw_max * perf_limits->max_perf);
862 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
863 min = max;
864 else
865 min = fp_ext_toint(hw_max * perf_limits->min_perf);
872 866
873 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 867 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
874 868
875 value &= ~HWP_MIN_PERF(~0L); 869 value &= ~HWP_MIN_PERF(~0L);
876 value |= HWP_MIN_PERF(min); 870 value |= HWP_MIN_PERF(min);
877 871
878 max = fp_ext_toint(hw_max * perf_limits->max_perf);
879 value &= ~HWP_MAX_PERF(~0L); 872 value &= ~HWP_MAX_PERF(~0L);
880 value |= HWP_MAX_PERF(max); 873 value |= HWP_MAX_PERF(max);
881 874
@@ -968,20 +961,11 @@ static int intel_pstate_resume(struct cpufreq_policy *policy)
968} 961}
969 962
970static void intel_pstate_update_policies(void) 963static void intel_pstate_update_policies(void)
971 __releases(&intel_pstate_limits_lock)
972 __acquires(&intel_pstate_limits_lock)
973{ 964{
974 struct perf_limits *saved_limits = limits;
975 int cpu; 965 int cpu;
976 966
977 mutex_unlock(&intel_pstate_limits_lock);
978
979 for_each_possible_cpu(cpu) 967 for_each_possible_cpu(cpu)
980 cpufreq_update_policy(cpu); 968 cpufreq_update_policy(cpu);
981
982 mutex_lock(&intel_pstate_limits_lock);
983
984 limits = saved_limits;
985} 969}
986 970
987/************************** debugfs begin ************************/ 971/************************** debugfs begin ************************/
@@ -1060,7 +1044,7 @@ static void intel_pstate_debug_hide_params(void)
1060 static ssize_t show_##file_name \ 1044 static ssize_t show_##file_name \
1061 (struct kobject *kobj, struct attribute *attr, char *buf) \ 1045 (struct kobject *kobj, struct attribute *attr, char *buf) \
1062 { \ 1046 { \
1063 return sprintf(buf, "%u\n", limits->object); \ 1047 return sprintf(buf, "%u\n", global.object); \
1064 } 1048 }
1065 1049
1066static ssize_t intel_pstate_show_status(char *buf); 1050static ssize_t intel_pstate_show_status(char *buf);
@@ -1151,10 +1135,10 @@ static ssize_t show_no_turbo(struct kobject *kobj,
1151 } 1135 }
1152 1136
1153 update_turbo_state(); 1137 update_turbo_state();
1154 if (limits->turbo_disabled) 1138 if (global.turbo_disabled)
1155 ret = sprintf(buf, "%u\n", limits->turbo_disabled); 1139 ret = sprintf(buf, "%u\n", global.turbo_disabled);
1156 else 1140 else
1157 ret = sprintf(buf, "%u\n", limits->no_turbo); 1141 ret = sprintf(buf, "%u\n", global.no_turbo);
1158 1142
1159 mutex_unlock(&intel_pstate_driver_lock); 1143 mutex_unlock(&intel_pstate_driver_lock);
1160 1144
@@ -1181,19 +1165,19 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
1181 mutex_lock(&intel_pstate_limits_lock); 1165 mutex_lock(&intel_pstate_limits_lock);
1182 1166
1183 update_turbo_state(); 1167 update_turbo_state();
1184 if (limits->turbo_disabled) { 1168 if (global.turbo_disabled) {
1185 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 1169 pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
1186 mutex_unlock(&intel_pstate_limits_lock); 1170 mutex_unlock(&intel_pstate_limits_lock);
1187 mutex_unlock(&intel_pstate_driver_lock); 1171 mutex_unlock(&intel_pstate_driver_lock);
1188 return -EPERM; 1172 return -EPERM;
1189 } 1173 }
1190 1174
1191 limits->no_turbo = clamp_t(int, input, 0, 1); 1175 global.no_turbo = clamp_t(int, input, 0, 1);
1192
1193 intel_pstate_update_policies();
1194 1176
1195 mutex_unlock(&intel_pstate_limits_lock); 1177 mutex_unlock(&intel_pstate_limits_lock);
1196 1178
1179 intel_pstate_update_policies();
1180
1197 mutex_unlock(&intel_pstate_driver_lock); 1181 mutex_unlock(&intel_pstate_driver_lock);
1198 1182
1199 return count; 1183 return count;
@@ -1218,19 +1202,16 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
1218 1202
1219 mutex_lock(&intel_pstate_limits_lock); 1203 mutex_lock(&intel_pstate_limits_lock);
1220 1204
1221 limits->max_sysfs_pct = clamp_t(int, input, 0 , 100); 1205 global.max_sysfs_pct = clamp_t(int, input, 0 , 100);
1222 limits->max_perf_pct = min(limits->max_policy_pct, 1206 global.max_perf_pct = min(global.max_policy_pct, global.max_sysfs_pct);
1223 limits->max_sysfs_pct); 1207 global.max_perf_pct = max(global.min_policy_pct, global.max_perf_pct);
1224 limits->max_perf_pct = max(limits->min_policy_pct, 1208 global.max_perf_pct = max(global.min_perf_pct, global.max_perf_pct);
1225 limits->max_perf_pct); 1209 global.max_perf = percent_ext_fp(global.max_perf_pct);
1226 limits->max_perf_pct = max(limits->min_perf_pct,
1227 limits->max_perf_pct);
1228 limits->max_perf = percent_ext_fp(limits->max_perf_pct);
1229
1230 intel_pstate_update_policies();
1231 1210
1232 mutex_unlock(&intel_pstate_limits_lock); 1211 mutex_unlock(&intel_pstate_limits_lock);
1233 1212
1213 intel_pstate_update_policies();
1214
1234 mutex_unlock(&intel_pstate_driver_lock); 1215 mutex_unlock(&intel_pstate_driver_lock);
1235 1216
1236 return count; 1217 return count;
@@ -1255,19 +1236,16 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
1255 1236
1256 mutex_lock(&intel_pstate_limits_lock); 1237 mutex_lock(&intel_pstate_limits_lock);
1257 1238
1258 limits->min_sysfs_pct = clamp_t(int, input, 0 , 100); 1239 global.min_sysfs_pct = clamp_t(int, input, 0 , 100);
1259 limits->min_perf_pct = max(limits->min_policy_pct, 1240 global.min_perf_pct = max(global.min_policy_pct, global.min_sysfs_pct);
1260 limits->min_sysfs_pct); 1241 global.min_perf_pct = min(global.max_policy_pct, global.min_perf_pct);
1261 limits->min_perf_pct = min(limits->max_policy_pct, 1242 global.min_perf_pct = min(global.max_perf_pct, global.min_perf_pct);
1262 limits->min_perf_pct); 1243 global.min_perf = percent_ext_fp(global.min_perf_pct);
1263 limits->min_perf_pct = min(limits->max_perf_pct,
1264 limits->min_perf_pct);
1265 limits->min_perf = percent_ext_fp(limits->min_perf_pct);
1266
1267 intel_pstate_update_policies();
1268 1244
1269 mutex_unlock(&intel_pstate_limits_lock); 1245 mutex_unlock(&intel_pstate_limits_lock);
1270 1246
1247 intel_pstate_update_policies();
1248
1271 mutex_unlock(&intel_pstate_driver_lock); 1249 mutex_unlock(&intel_pstate_driver_lock);
1272 1250
1273 return count; 1251 return count;
@@ -1387,7 +1365,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)
1387 u32 vid; 1365 u32 vid;
1388 1366
1389 val = (u64)pstate << 8; 1367 val = (u64)pstate << 8;
1390 if (limits->no_turbo && !limits->turbo_disabled) 1368 if (global.no_turbo && !global.turbo_disabled)
1391 val |= (u64)1 << 32; 1369 val |= (u64)1 << 32;
1392 1370
1393 vid_fp = cpudata->vid.min + mul_fp( 1371 vid_fp = cpudata->vid.min + mul_fp(
@@ -1557,7 +1535,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
1557 u64 val; 1535 u64 val;
1558 1536
1559 val = (u64)pstate << 8; 1537 val = (u64)pstate << 8;
1560 if (limits->no_turbo && !limits->turbo_disabled) 1538 if (global.no_turbo && !global.turbo_disabled)
1561 val |= (u64)1 << 32; 1539 val |= (u64)1 << 32;
1562 1540
1563 return val; 1541 return val;
@@ -1683,9 +1661,9 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
1683 int max_perf = cpu->pstate.turbo_pstate; 1661 int max_perf = cpu->pstate.turbo_pstate;
1684 int max_perf_adj; 1662 int max_perf_adj;
1685 int min_perf; 1663 int min_perf;
1686 struct perf_limits *perf_limits = limits; 1664 struct perf_limits *perf_limits = &global;
1687 1665
1688 if (limits->no_turbo || limits->turbo_disabled) 1666 if (global.no_turbo || global.turbo_disabled)
1689 max_perf = cpu->pstate.max_pstate; 1667 max_perf = cpu->pstate.max_pstate;
1690 1668
1691 if (per_cpu_limits) 1669 if (per_cpu_limits)
@@ -1820,7 +1798,7 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
1820 1798
1821 sample->busy_scaled = busy_frac * 100; 1799 sample->busy_scaled = busy_frac * 100;
1822 1800
1823 target = limits->no_turbo || limits->turbo_disabled ? 1801 target = global.no_turbo || global.turbo_disabled ?
1824 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 1802 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
1825 target += target >> 2; 1803 target += target >> 2;
1826 target = mul_fp(target, busy_frac); 1804 target = mul_fp(target, busy_frac);
@@ -2116,7 +2094,7 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
2116static int intel_pstate_set_policy(struct cpufreq_policy *policy) 2094static int intel_pstate_set_policy(struct cpufreq_policy *policy)
2117{ 2095{
2118 struct cpudata *cpu; 2096 struct cpudata *cpu;
2119 struct perf_limits *perf_limits = NULL; 2097 struct perf_limits *perf_limits = &global;
2120 2098
2121 if (!policy->cpuinfo.max_freq) 2099 if (!policy->cpuinfo.max_freq)
2122 return -ENODEV; 2100 return -ENODEV;
@@ -2139,21 +2117,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
2139 2117
2140 mutex_lock(&intel_pstate_limits_lock); 2118 mutex_lock(&intel_pstate_limits_lock);
2141 2119
2142 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
2143 pr_debug("set performance\n");
2144 if (!perf_limits) {
2145 limits = &performance_limits;
2146 perf_limits = limits;
2147 }
2148 } else {
2149 pr_debug("set powersave\n");
2150 if (!perf_limits) {
2151 limits = &powersave_limits;
2152 perf_limits = limits;
2153 }
2154
2155 }
2156
2157 intel_pstate_update_perf_limits(policy, perf_limits); 2120 intel_pstate_update_perf_limits(policy, perf_limits);
2158 2121
2159 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { 2122 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
@@ -2177,16 +2140,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
2177static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 2140static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
2178{ 2141{
2179 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2142 struct cpudata *cpu = all_cpu_data[policy->cpu];
2180 struct perf_limits *perf_limits;
2181
2182 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
2183 perf_limits = &performance_limits;
2184 else
2185 perf_limits = &powersave_limits;
2186 2143
2187 update_turbo_state(); 2144 update_turbo_state();
2188 policy->cpuinfo.max_freq = perf_limits->turbo_disabled || 2145 policy->cpuinfo.max_freq = global.turbo_disabled || global.no_turbo ?
2189 perf_limits->no_turbo ?
2190 cpu->pstate.max_freq : 2146 cpu->pstate.max_freq :
2191 cpu->pstate.turbo_freq; 2147 cpu->pstate.turbo_freq;
2192 2148
@@ -2201,9 +2157,9 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
2201 unsigned int max_freq, min_freq; 2157 unsigned int max_freq, min_freq;
2202 2158
2203 max_freq = policy->cpuinfo.max_freq * 2159 max_freq = policy->cpuinfo.max_freq *
2204 perf_limits->max_sysfs_pct / 100; 2160 global.max_sysfs_pct / 100;
2205 min_freq = policy->cpuinfo.max_freq * 2161 min_freq = policy->cpuinfo.max_freq *
2206 perf_limits->min_sysfs_pct / 100; 2162 global.min_sysfs_pct / 100;
2207 cpufreq_verify_within_limits(policy, min_freq, max_freq); 2163 cpufreq_verify_within_limits(policy, min_freq, max_freq);
2208 } 2164 }
2209 2165
@@ -2255,7 +2211,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
2255 /* cpuinfo and default policy values */ 2211 /* cpuinfo and default policy values */
2256 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 2212 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
2257 update_turbo_state(); 2213 update_turbo_state();
2258 policy->cpuinfo.max_freq = limits->turbo_disabled ? 2214 policy->cpuinfo.max_freq = global.turbo_disabled ?
2259 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 2215 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
2260 policy->cpuinfo.max_freq *= cpu->pstate.scaling; 2216 policy->cpuinfo.max_freq *= cpu->pstate.scaling;
2261 2217
@@ -2275,7 +2231,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
2275 return ret; 2231 return ret;
2276 2232
2277 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 2233 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
2278 if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100) 2234 if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE))
2279 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 2235 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
2280 else 2236 else
2281 policy->policy = CPUFREQ_POLICY_POWERSAVE; 2237 policy->policy = CPUFREQ_POLICY_POWERSAVE;
@@ -2301,7 +2257,7 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
2301 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2257 struct cpudata *cpu = all_cpu_data[policy->cpu];
2302 2258
2303 update_turbo_state(); 2259 update_turbo_state();
2304 policy->cpuinfo.max_freq = limits->turbo_disabled ? 2260 policy->cpuinfo.max_freq = global.no_turbo || global.turbo_disabled ?
2305 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2261 cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2306 2262
2307 cpufreq_verify_within_cpu_limits(policy); 2263 cpufreq_verify_within_cpu_limits(policy);
@@ -2309,26 +2265,6 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
2309 return 0; 2265 return 0;
2310} 2266}
2311 2267
2312static unsigned int intel_cpufreq_turbo_update(struct cpudata *cpu,
2313 struct cpufreq_policy *policy,
2314 unsigned int target_freq)
2315{
2316 unsigned int max_freq;
2317
2318 update_turbo_state();
2319
2320 max_freq = limits->no_turbo || limits->turbo_disabled ?
2321 cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2322 policy->cpuinfo.max_freq = max_freq;
2323 if (policy->max > max_freq)
2324 policy->max = max_freq;
2325
2326 if (target_freq > max_freq)
2327 target_freq = max_freq;
2328
2329 return target_freq;
2330}
2331
2332static int intel_cpufreq_target(struct cpufreq_policy *policy, 2268static int intel_cpufreq_target(struct cpufreq_policy *policy,
2333 unsigned int target_freq, 2269 unsigned int target_freq,
2334 unsigned int relation) 2270 unsigned int relation)
@@ -2337,8 +2273,10 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
2337 struct cpufreq_freqs freqs; 2273 struct cpufreq_freqs freqs;
2338 int target_pstate; 2274 int target_pstate;
2339 2275
2276 update_turbo_state();
2277
2340 freqs.old = policy->cur; 2278 freqs.old = policy->cur;
2341 freqs.new = intel_cpufreq_turbo_update(cpu, policy, target_freq); 2279 freqs.new = target_freq;
2342 2280
2343 cpufreq_freq_transition_begin(policy, &freqs); 2281 cpufreq_freq_transition_begin(policy, &freqs);
2344 switch (relation) { 2282 switch (relation) {
@@ -2370,7 +2308,8 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
2370 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2308 struct cpudata *cpu = all_cpu_data[policy->cpu];
2371 int target_pstate; 2309 int target_pstate;
2372 2310
2373 target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq); 2311 update_turbo_state();
2312
2374 target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); 2313 target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
2375 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2314 target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
2376 intel_pstate_update_pstate(cpu, target_pstate); 2315 intel_pstate_update_pstate(cpu, target_pstate);
@@ -2425,13 +2364,7 @@ static int intel_pstate_register_driver(void)
2425{ 2364{
2426 int ret; 2365 int ret;
2427 2366
2428 intel_pstate_init_limits(&powersave_limits); 2367 intel_pstate_init_limits(&global);
2429 intel_pstate_set_performance_limits(&performance_limits);
2430 if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) &&
2431 intel_pstate_driver == &intel_pstate)
2432 limits = &performance_limits;
2433 else
2434 limits = &powersave_limits;
2435 2368
2436 ret = cpufreq_register_driver(intel_pstate_driver); 2369 ret = cpufreq_register_driver(intel_pstate_driver);
2437 if (ret) { 2370 if (ret) {
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 370593006f5f..cda8f62d555b 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -175,6 +175,24 @@ static int powernv_cpuidle_driver_init(void)
175 drv->state_count += 1; 175 drv->state_count += 1;
176 } 176 }
177 177
178 /*
179 * On the PowerNV platform cpu_present may be less than cpu_possible in
180 * cases when firmware detects the CPU, but it is not available to the
181 * OS. If CONFIG_HOTPLUG_CPU=n, then such CPUs are not hotplugable at
182 * run time and hence cpu_devices are not created for those CPUs by the
183 * generic topology_init().
184 *
185 * drv->cpumask defaults to cpu_possible_mask in
186 * __cpuidle_driver_init(). This breaks cpuidle on PowerNV where
187 * cpu_devices are not created for CPUs in cpu_possible_mask that
188 * cannot be hot-added later at run time.
189 *
190 * Trying cpuidle_register_device() on a CPU without a cpu_device is
191 * incorrect, so pass a correct CPU mask to the generic cpuidle driver.
192 */
193
194 drv->cpumask = (struct cpumask *)cpu_present_mask;
195
178 return 0; 196 return 0;
179} 197}
180 198
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index c5adc8c9ac43..ae948b1da93a 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -615,6 +615,18 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
615 struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); 615 struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
616 int error; 616 int error;
617 617
618 /*
619 * Return if cpu_device is not setup for this CPU.
620 *
621 * This could happen if the arch did not set up cpu_device
622 * since this CPU is not in cpu_present mask and the
623 * driver did not send a correct CPU mask during registration.
624 * Without this check we would end up passing bogus
625 * value for &cpu_dev->kobj in kobject_init_and_add()
626 */
627 if (!cpu_dev)
628 return -ENODEV;
629
618 kdev = kzalloc(sizeof(*kdev), GFP_KERNEL); 630 kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
619 if (!kdev) 631 if (!kdev)
620 return -ENOMEM; 632 return -ENOMEM;
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 41cc853f8569..fc08b4ed69d9 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -1015,6 +1015,7 @@ const struct ccp_vdata ccpv5a = {
1015 1015
1016const struct ccp_vdata ccpv5b = { 1016const struct ccp_vdata ccpv5b = {
1017 .version = CCP_VERSION(5, 0), 1017 .version = CCP_VERSION(5, 0),
1018 .dma_chan_attr = DMA_PRIVATE,
1018 .setup = ccp5other_config, 1019 .setup = ccp5other_config,
1019 .perform = &ccp5_actions, 1020 .perform = &ccp5_actions,
1020 .bar = 2, 1021 .bar = 2,
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 511ab042b5e7..92d1c6959f08 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -283,11 +283,14 @@ EXPORT_SYMBOL_GPL(ccp_version);
283 */ 283 */
284int ccp_enqueue_cmd(struct ccp_cmd *cmd) 284int ccp_enqueue_cmd(struct ccp_cmd *cmd)
285{ 285{
286 struct ccp_device *ccp = ccp_get_device(); 286 struct ccp_device *ccp;
287 unsigned long flags; 287 unsigned long flags;
288 unsigned int i; 288 unsigned int i;
289 int ret; 289 int ret;
290 290
291 /* Some commands might need to be sent to a specific device */
292 ccp = cmd->ccp ? cmd->ccp : ccp_get_device();
293
291 if (!ccp) 294 if (!ccp)
292 return -ENODEV; 295 return -ENODEV;
293 296
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 2b5c01fade05..aa36f3f81860 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -179,6 +179,10 @@
179 179
180/* ------------------------ General CCP Defines ------------------------ */ 180/* ------------------------ General CCP Defines ------------------------ */
181 181
182#define CCP_DMA_DFLT 0x0
183#define CCP_DMA_PRIV 0x1
184#define CCP_DMA_PUB 0x2
185
182#define CCP_DMAPOOL_MAX_SIZE 64 186#define CCP_DMAPOOL_MAX_SIZE 64
183#define CCP_DMAPOOL_ALIGN BIT(5) 187#define CCP_DMAPOOL_ALIGN BIT(5)
184 188
@@ -636,6 +640,7 @@ struct ccp_actions {
636/* Structure to hold CCP version-specific values */ 640/* Structure to hold CCP version-specific values */
637struct ccp_vdata { 641struct ccp_vdata {
638 const unsigned int version; 642 const unsigned int version;
643 const unsigned int dma_chan_attr;
639 void (*setup)(struct ccp_device *); 644 void (*setup)(struct ccp_device *);
640 const struct ccp_actions *perform; 645 const struct ccp_actions *perform;
641 const unsigned int bar; 646 const unsigned int bar;
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index e5d9278f4019..e00be01fbf5a 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -10,6 +10,7 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/module.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/dmaengine.h> 15#include <linux/dmaengine.h>
15#include <linux/spinlock.h> 16#include <linux/spinlock.h>
@@ -25,6 +26,37 @@
25 (mask == 0) ? 64 : fls64(mask); \ 26 (mask == 0) ? 64 : fls64(mask); \
26}) 27})
27 28
29/* The CCP as a DMA provider can be configured for public or private
30 * channels. Default is specified in the vdata for the device (PCI ID).
31 * This module parameter will override for all channels on all devices:
32 * dma_chan_attr = 0x2 to force all channels public
33 * = 0x1 to force all channels private
34 * = 0x0 to defer to the vdata setting
35 * = any other value: warning, revert to 0x0
36 */
37static unsigned int dma_chan_attr = CCP_DMA_DFLT;
38module_param(dma_chan_attr, uint, 0444);
39MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public");
40
41unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp)
42{
43 switch (dma_chan_attr) {
44 case CCP_DMA_DFLT:
45 return ccp->vdata->dma_chan_attr;
46
47 case CCP_DMA_PRIV:
48 return DMA_PRIVATE;
49
50 case CCP_DMA_PUB:
51 return 0;
52
53 default:
54 dev_info_once(ccp->dev, "Invalid value for dma_chan_attr: %d\n",
55 dma_chan_attr);
56 return ccp->vdata->dma_chan_attr;
57 }
58}
59
28static void ccp_free_cmd_resources(struct ccp_device *ccp, 60static void ccp_free_cmd_resources(struct ccp_device *ccp,
29 struct list_head *list) 61 struct list_head *list)
30{ 62{
@@ -390,6 +422,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
390 goto err; 422 goto err;
391 423
392 ccp_cmd = &cmd->ccp_cmd; 424 ccp_cmd = &cmd->ccp_cmd;
425 ccp_cmd->ccp = chan->ccp;
393 ccp_pt = &ccp_cmd->u.passthru_nomap; 426 ccp_pt = &ccp_cmd->u.passthru_nomap;
394 ccp_cmd->flags = CCP_CMD_MAY_BACKLOG; 427 ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
395 ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP; 428 ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
@@ -674,6 +707,15 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
674 dma_cap_set(DMA_SG, dma_dev->cap_mask); 707 dma_cap_set(DMA_SG, dma_dev->cap_mask);
675 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); 708 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
676 709
710 /* The DMA channels for this device can be set to public or private,
711 * and overridden by the module parameter dma_chan_attr.
712 * Default: according to the value in vdata (dma_chan_attr=0)
713 * dma_chan_attr=0x1: all channels private (override vdata)
714 * dma_chan_attr=0x2: all channels public (override vdata)
715 */
716 if (ccp_get_dma_chan_attr(ccp) == DMA_PRIVATE)
717 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
718
677 INIT_LIST_HEAD(&dma_dev->channels); 719 INIT_LIST_HEAD(&dma_dev->channels);
678 for (i = 0; i < ccp->cmd_q_count; i++) { 720 for (i = 0; i < ccp->cmd_q_count; i++) {
679 chan = ccp->ccp_dma_chan + i; 721 chan = ccp->ccp_dma_chan + i;
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index e18dc596cf24..6204cc32d09c 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -251,8 +251,11 @@ static void bcm2835_dma_create_cb_set_length(
251 */ 251 */
252 252
253 /* have we filled in period_length yet? */ 253 /* have we filled in period_length yet? */
254 if (*total_len + control_block->length < period_len) 254 if (*total_len + control_block->length < period_len) {
255 /* update number of bytes in this period so far */
256 *total_len += control_block->length;
255 return; 257 return;
258 }
256 259
257 /* calculate the length that remains to reach period_length */ 260 /* calculate the length that remains to reach period_length */
258 control_block->length = period_len - *total_len; 261 control_block->length = period_len - *total_len;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 24e0221fd66d..d9118ec23025 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1108,12 +1108,14 @@ static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1108 switch (order) { 1108 switch (order) {
1109 case 0 ... 1: 1109 case 0 ... 1:
1110 return &unmap_pool[0]; 1110 return &unmap_pool[0];
1111#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1111 case 2 ... 4: 1112 case 2 ... 4:
1112 return &unmap_pool[1]; 1113 return &unmap_pool[1];
1113 case 5 ... 7: 1114 case 5 ... 7:
1114 return &unmap_pool[2]; 1115 return &unmap_pool[2];
1115 case 8: 1116 case 8:
1116 return &unmap_pool[3]; 1117 return &unmap_pool[3];
1118#endif
1117 default: 1119 default:
1118 BUG(); 1120 BUG();
1119 return NULL; 1121 return NULL;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 82d85cce81f8..4773f2867234 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -43,6 +43,7 @@ config EDAC_LEGACY_SYSFS
43 43
44config EDAC_DEBUG 44config EDAC_DEBUG
45 bool "Debugging" 45 bool "Debugging"
46 select DEBUG_FS
46 help 47 help
47 This turns on debugging information for the entire EDAC subsystem. 48 This turns on debugging information for the entire EDAC subsystem.
48 You do so by inserting edac_module with "edac_debug_level=x." Valid 49 You do so by inserting edac_module with "edac_debug_level=x." Valid
@@ -259,6 +260,15 @@ config EDAC_SKX
259 Support for error detection and correction the Intel 260 Support for error detection and correction the Intel
260 Skylake server Integrated Memory Controllers. 261 Skylake server Integrated Memory Controllers.
261 262
263config EDAC_PND2
264 tristate "Intel Pondicherry2"
265 depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL
266 help
267 Support for error detection and correction on the Intel
268 Pondicherry2 Integrated Memory Controller. This SoC IP is
269 first used on the Apollo Lake platform and Denverton
270 micro-server but may appear on others in the future.
271
262config EDAC_MPC85XX 272config EDAC_MPC85XX
263 tristate "Freescale MPC83xx / MPC85xx" 273 tristate "Freescale MPC83xx / MPC85xx"
264 depends on EDAC_MM_EDAC && FSL_SOC 274 depends on EDAC_MM_EDAC && FSL_SOC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 88e472e8b9a9..587107e90996 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_EDAC_I7300) += i7300_edac.o
32obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o 32obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o
33obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o 33obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o
34obj-$(CONFIG_EDAC_SKX) += skx_edac.o 34obj-$(CONFIG_EDAC_SKX) += skx_edac.o
35obj-$(CONFIG_EDAC_PND2) += pnd2_edac.o
35obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o 36obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
36obj-$(CONFIG_EDAC_E752X) += e752x_edac.o 37obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
37obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o 38obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 1670d27bcac8..f683919981b0 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1293,7 +1293,7 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
1293 dimm->mtype = MEM_FB_DDR2; 1293 dimm->mtype = MEM_FB_DDR2;
1294 1294
1295 /* ask what device type on this row */ 1295 /* ask what device type on this row */
1296 if (MTR_DRAM_WIDTH(mtr)) 1296 if (MTR_DRAM_WIDTH(mtr) == 8)
1297 dimm->dtype = DEV_X8; 1297 dimm->dtype = DEV_X8;
1298 else 1298 else
1299 dimm->dtype = DEV_X4; 1299 dimm->dtype = DEV_X4;
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index abf6ef22e220..37a9ba71da44 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -1207,13 +1207,14 @@ static int i5400_init_dimms(struct mem_ctl_info *mci)
1207 1207
1208 dimm->nr_pages = size_mb << 8; 1208 dimm->nr_pages = size_mb << 8;
1209 dimm->grain = 8; 1209 dimm->grain = 8;
1210 dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4; 1210 dimm->dtype = MTR_DRAM_WIDTH(mtr) == 8 ?
1211 DEV_X8 : DEV_X4;
1211 dimm->mtype = MEM_FB_DDR2; 1212 dimm->mtype = MEM_FB_DDR2;
1212 /* 1213 /*
1213 * The eccc mechanism is SDDC (aka SECC), with 1214 * The eccc mechanism is SDDC (aka SECC), with
1214 * is similar to Chipkill. 1215 * is similar to Chipkill.
1215 */ 1216 */
1216 dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ? 1217 dimm->edac_mode = MTR_DRAM_WIDTH(mtr) == 8 ?
1217 EDAC_S8ECD8ED : EDAC_S4ECD4ED; 1218 EDAC_S8ECD8ED : EDAC_S4ECD4ED;
1218 ndimms++; 1219 ndimms++;
1219 } 1220 }
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
new file mode 100644
index 000000000000..928e0dba41fc
--- /dev/null
+++ b/drivers/edac/pnd2_edac.c
@@ -0,0 +1,1546 @@
1/*
2 * Driver for Pondicherry2 memory controller.
3 *
4 * Copyright (c) 2016, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * [Derived from sb_edac.c]
16 *
17 * Translation of system physical addresses to DIMM addresses
18 * is a two stage process:
19 *
20 * First the Pondicherry 2 memory controller handles slice and channel interleaving
21 * in "sys2pmi()". This is (almost) completley common between platforms.
22 *
23 * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
24 * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
25 */
26
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/pci.h>
30#include <linux/pci_ids.h>
31#include <linux/slab.h>
32#include <linux/delay.h>
33#include <linux/edac.h>
34#include <linux/mmzone.h>
35#include <linux/smp.h>
36#include <linux/bitmap.h>
37#include <linux/math64.h>
38#include <linux/mod_devicetable.h>
39#include <asm/cpu_device_id.h>
40#include <asm/intel-family.h>
41#include <asm/processor.h>
42#include <asm/mce.h>
43
44#include "edac_mc.h"
45#include "edac_module.h"
46#include "pnd2_edac.h"
47
48#define APL_NUM_CHANNELS 4
49#define DNV_NUM_CHANNELS 2
50#define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */
51
52enum type {
53 APL,
54 DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
55};
56
57struct dram_addr {
58 int chan;
59 int dimm;
60 int rank;
61 int bank;
62 int row;
63 int col;
64};
65
66struct pnd2_pvt {
67 int dimm_geom[APL_NUM_CHANNELS];
68 u64 tolm, tohm;
69};
70
71/*
72 * System address space is divided into multiple regions with
73 * different interleave rules in each. The as0/as1 regions
74 * have no interleaving at all. The as2 region is interleaved
75 * between two channels. The mot region is magic and may overlap
76 * other regions, with its interleave rules taking precedence.
77 * Addresses not in any of these regions are interleaved across
78 * all four channels.
79 */
80static struct region {
81 u64 base;
82 u64 limit;
83 u8 enabled;
84} mot, as0, as1, as2;
85
86static struct dunit_ops {
87 char *name;
88 enum type type;
89 int pmiaddr_shift;
90 int pmiidx_shift;
91 int channels;
92 int dimms_per_channel;
93 int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
94 int (*get_registers)(void);
95 int (*check_ecc)(void);
96 void (*mk_region)(char *name, struct region *rp, void *asym);
97 void (*get_dimm_config)(struct mem_ctl_info *mci);
98 int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
99 struct dram_addr *daddr, char *msg);
100} *ops;
101
102static struct mem_ctl_info *pnd2_mci;
103
104#define PND2_MSG_SIZE 256
105
106/* Debug macros */
107#define pnd2_printk(level, fmt, arg...) \
108 edac_printk(level, "pnd2", fmt, ##arg)
109
110#define pnd2_mc_printk(mci, level, fmt, arg...) \
111 edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
112
113#define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
114#define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
115#define SELECTOR_DISABLED (-1)
116#define _4GB (1ul << 32)
117
118#define PMI_ADDRESS_WIDTH 31
119#define PND_MAX_PHYS_BIT 39
120
121#define APL_ASYMSHIFT 28
122#define DNV_ASYMSHIFT 31
123#define CH_HASH_MASK_LSB 6
124#define SLICE_HASH_MASK_LSB 6
125#define MOT_SLC_INTLV_BIT 12
126#define LOG2_PMI_ADDR_GRANULARITY 5
127#define MOT_SHIFT 24
128
129#define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
130#define U64_LSHIFT(val, s) ((u64)(val) << (s))
131
132#ifdef CONFIG_X86_INTEL_SBI_APL
133#include "linux/platform_data/sbi_apl.h"
134int sbi_send(int port, int off, int op, u32 *data)
135{
136 struct sbi_apl_message sbi_arg;
137 int ret, read = 0;
138
139 memset(&sbi_arg, 0, sizeof(sbi_arg));
140
141 if (op == 0 || op == 4 || op == 6)
142 read = 1;
143 else
144 sbi_arg.data = *data;
145
146 sbi_arg.opcode = op;
147 sbi_arg.port_address = port;
148 sbi_arg.register_offset = off;
149 ret = sbi_apl_commit(&sbi_arg);
150 if (ret || sbi_arg.status)
151 edac_dbg(2, "sbi_send status=%d ret=%d data=%x\n",
152 sbi_arg.status, ret, sbi_arg.data);
153
154 if (ret == 0)
155 ret = sbi_arg.status;
156
157 if (ret == 0 && read)
158 *data = sbi_arg.data;
159
160 return ret;
161}
162#else
163int sbi_send(int port, int off, int op, u32 *data)
164{
165 return -EUNATCH;
166}
167#endif
168
169static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
170{
171 int ret = 0;
172
173 edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
174 switch (sz) {
175 case 8:
176 ret = sbi_send(port, off + 4, op, (u32 *)(data + 4));
177 case 4:
178 ret = sbi_send(port, off, op, (u32 *)data);
179 pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
180 sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
181 break;
182 }
183
184 return ret;
185}
186
187static u64 get_mem_ctrl_hub_base_addr(void)
188{
189 struct b_cr_mchbar_lo_pci lo;
190 struct b_cr_mchbar_hi_pci hi;
191 struct pci_dev *pdev;
192
193 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
194 if (pdev) {
195 pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
196 pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
197 pci_dev_put(pdev);
198 } else {
199 return 0;
200 }
201
202 if (!lo.enable) {
203 edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
204 return 0;
205 }
206
207 return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
208}
209
210static u64 get_sideband_reg_base_addr(void)
211{
212 struct pci_dev *pdev;
213 u32 hi, lo;
214
215 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
216 if (pdev) {
217 pci_read_config_dword(pdev, 0x10, &lo);
218 pci_read_config_dword(pdev, 0x14, &hi);
219 pci_dev_put(pdev);
220 return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
221 } else {
222 return 0xfd000000;
223 }
224}
225
226static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
227{
228 struct pci_dev *pdev;
229 char *base;
230 u64 addr;
231
232 if (op == 4) {
233 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
234 if (!pdev)
235 return -ENODEV;
236
237 pci_read_config_dword(pdev, off, data);
238 pci_dev_put(pdev);
239 } else {
240 /* MMIO via memory controller hub base address */
241 if (op == 0 && port == 0x4c) {
242 addr = get_mem_ctrl_hub_base_addr();
243 if (!addr)
244 return -ENODEV;
245 } else {
246 /* MMIO via sideband register base address */
247 addr = get_sideband_reg_base_addr();
248 if (!addr)
249 return -ENODEV;
250 addr += (port << 16);
251 }
252
253 base = ioremap((resource_size_t)addr, 0x10000);
254 if (!base)
255 return -ENODEV;
256
257 if (sz == 8)
258 *(u32 *)(data + 4) = *(u32 *)(base + off + 4);
259 *(u32 *)data = *(u32 *)(base + off);
260
261 iounmap(base);
262 }
263
264 edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
265 (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
266
267 return 0;
268}
269
270#define RD_REGP(regp, regname, port) \
271 ops->rd_reg(port, \
272 regname##_offset, \
273 regname##_r_opcode, \
274 regp, sizeof(struct regname), \
275 #regname)
276
277#define RD_REG(regp, regname) \
278 ops->rd_reg(regname ## _port, \
279 regname##_offset, \
280 regname##_r_opcode, \
281 regp, sizeof(struct regname), \
282 #regname)
283
284static u64 top_lm, top_hm;
285static bool two_slices;
286static bool two_channels; /* Both PMI channels in one slice enabled */
287
288static u8 sym_chan_mask;
289static u8 asym_chan_mask;
290static u8 chan_mask;
291
292static int slice_selector = -1;
293static int chan_selector = -1;
294static u64 slice_hash_mask;
295static u64 chan_hash_mask;
296
297static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
298{
299 rp->enabled = 1;
300 rp->base = base;
301 rp->limit = limit;
302 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
303}
304
305static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
306{
307 if (mask == 0) {
308 pr_info(FW_BUG "MOT mask cannot be zero\n");
309 return;
310 }
311 if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
312 pr_info(FW_BUG "MOT mask not power of two\n");
313 return;
314 }
315 if (base & ~mask) {
316 pr_info(FW_BUG "MOT region base/mask alignment error\n");
317 return;
318 }
319 rp->base = base;
320 rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
321 rp->enabled = 1;
322 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
323}
324
325static bool in_region(struct region *rp, u64 addr)
326{
327 if (!rp->enabled)
328 return false;
329
330 return rp->base <= addr && addr <= rp->limit;
331}
332
333static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
334{
335 int mask = 0;
336
337 if (!p->slice_0_mem_disabled)
338 mask |= p->sym_slice0_channel_enabled;
339
340 if (!p->slice_1_disabled)
341 mask |= p->sym_slice1_channel_enabled << 2;
342
343 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
344 mask &= 0x5;
345
346 return mask;
347}
348
349static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
350 struct b_cr_asym_mem_region0_mchbar *as0,
351 struct b_cr_asym_mem_region1_mchbar *as1,
352 struct b_cr_asym_2way_mem_region_mchbar *as2way)
353{
354 const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
355 int mask = 0;
356
357 if (as2way->asym_2way_interleave_enable)
358 mask = intlv[as2way->asym_2way_intlv_mode];
359 if (as0->slice0_asym_enable)
360 mask |= (1 << as0->slice0_asym_channel_select);
361 if (as1->slice1_asym_enable)
362 mask |= (4 << as1->slice1_asym_channel_select);
363 if (p->slice_0_mem_disabled)
364 mask &= 0xc;
365 if (p->slice_1_disabled)
366 mask &= 0x3;
367 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
368 mask &= 0x5;
369
370 return mask;
371}
372
373static struct b_cr_tolud_pci tolud;
374static struct b_cr_touud_lo_pci touud_lo;
375static struct b_cr_touud_hi_pci touud_hi;
376static struct b_cr_asym_mem_region0_mchbar asym0;
377static struct b_cr_asym_mem_region1_mchbar asym1;
378static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
379static struct b_cr_mot_out_base_mchbar mot_base;
380static struct b_cr_mot_out_mask_mchbar mot_mask;
381static struct b_cr_slice_channel_hash chash;
382
383/* Apollo Lake dunit */
384/*
385 * Validated on board with just two DIMMs in the [0] and [2] positions
386 * in this array. Other port number matches documentation, but caution
387 * advised.
388 */
389static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
390static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
391
392/* Denverton dunit */
393static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
394static struct d_cr_dsch dsch;
395static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
396static struct d_cr_drp drp[DNV_NUM_CHANNELS];
397static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
398static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
399static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
400static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
401static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
402static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
403
404static void apl_mk_region(char *name, struct region *rp, void *asym)
405{
406 struct b_cr_asym_mem_region0_mchbar *a = asym;
407
408 mk_region(name, rp,
409 U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
410 U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
411 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
412}
413
414static void dnv_mk_region(char *name, struct region *rp, void *asym)
415{
416 struct b_cr_asym_mem_region_denverton *a = asym;
417
418 mk_region(name, rp,
419 U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
420 U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
421 GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
422}
423
424static int apl_get_registers(void)
425{
426 int i;
427
428 if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
429 return -ENODEV;
430
431 for (i = 0; i < APL_NUM_CHANNELS; i++)
432 if (RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
433 return -ENODEV;
434
435 return 0;
436}
437
438static int dnv_get_registers(void)
439{
440 int i;
441
442 if (RD_REG(&dsch, d_cr_dsch))
443 return -ENODEV;
444
445 for (i = 0; i < DNV_NUM_CHANNELS; i++)
446 if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
447 RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
448 RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
449 RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
450 RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
451 RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
452 RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
453 RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
454 return -ENODEV;
455
456 return 0;
457}
458
459/*
460 * Read all the h/w config registers once here (they don't
461 * change at run time. Figure out which address ranges have
462 * which interleave characteristics.
463 */
464static int get_registers(void)
465{
466 const int intlv[] = { 10, 11, 12, 12 };
467
468 if (RD_REG(&tolud, b_cr_tolud_pci) ||
469 RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
470 RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
471 RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
472 RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
473 RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
474 RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
475 RD_REG(&chash, b_cr_slice_channel_hash))
476 return -ENODEV;
477
478 if (ops->get_registers())
479 return -ENODEV;
480
481 if (ops->type == DNV) {
482 /* PMI channel idx (always 0) for asymmetric region */
483 asym0.slice0_asym_channel_select = 0;
484 asym1.slice1_asym_channel_select = 0;
485 /* PMI channel bitmap (always 1) for symmetric region */
486 chash.sym_slice0_channel_enabled = 0x1;
487 chash.sym_slice1_channel_enabled = 0x1;
488 }
489
490 if (asym0.slice0_asym_enable)
491 ops->mk_region("as0", &as0, &asym0);
492
493 if (asym1.slice1_asym_enable)
494 ops->mk_region("as1", &as1, &asym1);
495
496 if (asym_2way.asym_2way_interleave_enable) {
497 mk_region("as2way", &as2,
498 U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
499 U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
500 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
501 }
502
503 if (mot_base.imr_en) {
504 mk_region_mask("mot", &mot,
505 U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
506 U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
507 }
508
509 top_lm = U64_LSHIFT(tolud.tolud, 20);
510 top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
511
512 two_slices = !chash.slice_1_disabled &&
513 !chash.slice_0_mem_disabled &&
514 (chash.sym_slice0_channel_enabled != 0) &&
515 (chash.sym_slice1_channel_enabled != 0);
516 two_channels = !chash.ch_1_disabled &&
517 !chash.enable_pmi_dual_data_mode &&
518 ((chash.sym_slice0_channel_enabled == 3) ||
519 (chash.sym_slice1_channel_enabled == 3));
520
521 sym_chan_mask = gen_sym_mask(&chash);
522 asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
523 chan_mask = sym_chan_mask | asym_chan_mask;
524
525 if (two_slices && !two_channels) {
526 if (chash.hvm_mode)
527 slice_selector = 29;
528 else
529 slice_selector = intlv[chash.interleave_mode];
530 } else if (!two_slices && two_channels) {
531 if (chash.hvm_mode)
532 chan_selector = 29;
533 else
534 chan_selector = intlv[chash.interleave_mode];
535 } else if (two_slices && two_channels) {
536 if (chash.hvm_mode) {
537 slice_selector = 29;
538 chan_selector = 30;
539 } else {
540 slice_selector = intlv[chash.interleave_mode];
541 chan_selector = intlv[chash.interleave_mode] + 1;
542 }
543 }
544
545 if (two_slices) {
546 if (!chash.hvm_mode)
547 slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
548 if (!two_channels)
549 slice_hash_mask |= BIT_ULL(slice_selector);
550 }
551
552 if (two_channels) {
553 if (!chash.hvm_mode)
554 chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
555 if (!two_slices)
556 chan_hash_mask |= BIT_ULL(chan_selector);
557 }
558
559 return 0;
560}
561
562/* Get a contiguous memory address (remove the MMIO gap) */
563static u64 remove_mmio_gap(u64 sys)
564{
565 return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
566}
567
568/* Squeeze out one address bit, shift upper part down to fill gap */
569static void remove_addr_bit(u64 *addr, int bitidx)
570{
571 u64 mask;
572
573 if (bitidx == -1)
574 return;
575
576 mask = (1ull << bitidx) - 1;
577 *addr = ((*addr >> 1) & ~mask) | (*addr & mask);
578}
579
580/* XOR all the bits from addr specified in mask */
581static int hash_by_mask(u64 addr, u64 mask)
582{
583 u64 result = addr & mask;
584
585 result = (result >> 32) ^ result;
586 result = (result >> 16) ^ result;
587 result = (result >> 8) ^ result;
588 result = (result >> 4) ^ result;
589 result = (result >> 2) ^ result;
590 result = (result >> 1) ^ result;
591
592 return (int)result & 1;
593}
594
595/*
596 * First stage decode. Take the system address and figure out which
597 * second stage will deal with it based on interleave modes.
598 */
599static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
600{
601 u64 contig_addr, contig_base, contig_offset, contig_base_adj;
602 int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
603 MOT_CHAN_INTLV_BIT_1SLC_2CH;
604 int slice_intlv_bit_rm = SELECTOR_DISABLED;
605 int chan_intlv_bit_rm = SELECTOR_DISABLED;
606 /* Determine if address is in the MOT region. */
607 bool mot_hit = in_region(&mot, addr);
608 /* Calculate the number of symmetric regions enabled. */
609 int sym_channels = hweight8(sym_chan_mask);
610
611 /*
612 * The amount we need to shift the asym base can be determined by the
613 * number of enabled symmetric channels.
614 * NOTE: This can only work because symmetric memory is not supposed
615 * to do a 3-way interleave.
616 */
617 int sym_chan_shift = sym_channels >> 1;
618
619 /* Give up if address is out of range, or in MMIO gap */
620 if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
621 (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
622 snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
623 return -EINVAL;
624 }
625
626 /* Get a contiguous memory address (remove the MMIO gap) */
627 contig_addr = remove_mmio_gap(addr);
628
629 if (in_region(&as0, addr)) {
630 *pmiidx = asym0.slice0_asym_channel_select;
631
632 contig_base = remove_mmio_gap(as0.base);
633 contig_offset = contig_addr - contig_base;
634 contig_base_adj = (contig_base >> sym_chan_shift) *
635 ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
636 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
637 } else if (in_region(&as1, addr)) {
638 *pmiidx = 2u + asym1.slice1_asym_channel_select;
639
640 contig_base = remove_mmio_gap(as1.base);
641 contig_offset = contig_addr - contig_base;
642 contig_base_adj = (contig_base >> sym_chan_shift) *
643 ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
644 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
645 } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
646 bool channel1;
647
648 mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
649 *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
650 channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
651 hash_by_mask(contig_addr, chan_hash_mask);
652 *pmiidx |= (u32)channel1;
653
654 contig_base = remove_mmio_gap(as2.base);
655 chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
656 contig_offset = contig_addr - contig_base;
657 remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
658 contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
659 } else {
660 /* Otherwise we're in normal, boring symmetric mode. */
661 *pmiidx = 0u;
662
663 if (two_slices) {
664 bool slice1;
665
666 if (mot_hit) {
667 slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
668 slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
669 } else {
670 slice_intlv_bit_rm = slice_selector;
671 slice1 = hash_by_mask(addr, slice_hash_mask);
672 }
673
674 *pmiidx = (u32)slice1 << 1;
675 }
676
677 if (two_channels) {
678 bool channel1;
679
680 mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
681 MOT_CHAN_INTLV_BIT_1SLC_2CH;
682
683 if (mot_hit) {
684 chan_intlv_bit_rm = mot_intlv_bit;
685 channel1 = (addr >> mot_intlv_bit) & 1;
686 } else {
687 chan_intlv_bit_rm = chan_selector;
688 channel1 = hash_by_mask(contig_addr, chan_hash_mask);
689 }
690
691 *pmiidx |= (u32)channel1;
692 }
693 }
694
695 /* Remove the chan_selector bit first */
696 remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
697 /* Remove the slice bit (we remove it second because it must be lower */
698 remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
699 *pmiaddr = contig_addr;
700
701 return 0;
702}
703
704/* Translate PMI address to memory (rank, row, bank, column) */
705#define C(n) (0x10 | (n)) /* column */
706#define B(n) (0x20 | (n)) /* bank */
707#define R(n) (0x40 | (n)) /* row */
708#define RS (0x80) /* rank */
709
710/* addrdec values */
711#define AMAP_1KB 0
712#define AMAP_2KB 1
713#define AMAP_4KB 2
714#define AMAP_RSVD 3
715
716/* dden values */
717#define DEN_4Gb 0
718#define DEN_8Gb 2
719
720/* dwid values */
721#define X8 0
722#define X16 1
723
724static struct dimm_geometry {
725 u8 addrdec;
726 u8 dden;
727 u8 dwid;
728 u8 rowbits, colbits;
729 u16 bits[PMI_ADDRESS_WIDTH];
730} dimms[] = {
731 {
732 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
733 .rowbits = 15, .colbits = 10,
734 .bits = {
735 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
736 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
737 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
738 0, 0, 0, 0
739 }
740 },
741 {
742 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
743 .rowbits = 16, .colbits = 10,
744 .bits = {
745 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
746 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
747 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
748 R(15), 0, 0, 0
749 }
750 },
751 {
752 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
753 .rowbits = 16, .colbits = 10,
754 .bits = {
755 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
756 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
757 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
758 R(15), 0, 0, 0
759 }
760 },
761 {
762 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
763 .rowbits = 16, .colbits = 11,
764 .bits = {
765 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
766 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
767 R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13),
768 R(14), R(15), 0, 0
769 }
770 },
771 {
772 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
773 .rowbits = 15, .colbits = 10,
774 .bits = {
775 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
776 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
777 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
778 0, 0, 0, 0
779 }
780 },
781 {
782 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
783 .rowbits = 16, .colbits = 10,
784 .bits = {
785 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
786 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
787 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
788 R(15), 0, 0, 0
789 }
790 },
791 {
792 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
793 .rowbits = 16, .colbits = 10,
794 .bits = {
795 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
796 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
797 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
798 R(15), 0, 0, 0
799 }
800 },
801 {
802 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
803 .rowbits = 16, .colbits = 11,
804 .bits = {
805 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
806 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
807 R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13),
808 R(14), R(15), 0, 0
809 }
810 },
811 {
812 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
813 .rowbits = 15, .colbits = 10,
814 .bits = {
815 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
816 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
817 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
818 0, 0, 0, 0
819 }
820 },
821 {
822 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
823 .rowbits = 16, .colbits = 10,
824 .bits = {
825 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
826 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
827 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
828 R(15), 0, 0, 0
829 }
830 },
831 {
832 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
833 .rowbits = 16, .colbits = 10,
834 .bits = {
835 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
836 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
837 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
838 R(15), 0, 0, 0
839 }
840 },
841 {
842 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
843 .rowbits = 16, .colbits = 11,
844 .bits = {
845 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
846 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
847 R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13),
848 R(14), R(15), 0, 0
849 }
850 }
851};
852
853static int bank_hash(u64 pmiaddr, int idx, int shft)
854{
855 int bhash = 0;
856
857 switch (idx) {
858 case 0:
859 bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
860 break;
861 case 1:
862 bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
863 bhash ^= ((pmiaddr >> 22) & 1) << 1;
864 break;
865 case 2:
866 bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
867 break;
868 }
869
870 return bhash;
871}
872
873static int rank_hash(u64 pmiaddr)
874{
875 return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
876}
877
878/* Second stage decode. Compute rank, bank, row & column. */
879static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
880 struct dram_addr *daddr, char *msg)
881{
882 struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
883 struct pnd2_pvt *pvt = mci->pvt_info;
884 int g = pvt->dimm_geom[pmiidx];
885 struct dimm_geometry *d = &dimms[g];
886 int column = 0, bank = 0, row = 0, rank = 0;
887 int i, idx, type, skiprs = 0;
888
889 for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
890 int bit = (pmiaddr >> i) & 1;
891
892 if (i + skiprs >= PMI_ADDRESS_WIDTH) {
893 snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
894 return -EINVAL;
895 }
896
897 type = d->bits[i + skiprs] & ~0xf;
898 idx = d->bits[i + skiprs] & 0xf;
899
900 /*
901 * On single rank DIMMs ignore the rank select bit
902 * and shift remainder of "bits[]" down one place.
903 */
904 if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
905 skiprs = 1;
906 type = d->bits[i + skiprs] & ~0xf;
907 idx = d->bits[i + skiprs] & 0xf;
908 }
909
910 switch (type) {
911 case C(0):
912 column |= (bit << idx);
913 break;
914 case B(0):
915 bank |= (bit << idx);
916 if (cr_drp0->bahen)
917 bank ^= bank_hash(pmiaddr, idx, d->addrdec);
918 break;
919 case R(0):
920 row |= (bit << idx);
921 break;
922 case RS:
923 rank = bit;
924 if (cr_drp0->rsien)
925 rank ^= rank_hash(pmiaddr);
926 break;
927 default:
928 if (bit) {
929 snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
930 return -EINVAL;
931 }
932 goto done;
933 }
934 }
935
936done:
937 daddr->col = column;
938 daddr->bank = bank;
939 daddr->row = row;
940 daddr->rank = rank;
941 daddr->dimm = 0;
942
943 return 0;
944}
945
946/* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
947#define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
948
949static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
950 struct dram_addr *daddr, char *msg)
951{
952 /* Rank 0 or 1 */
953 daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
954 /* Rank 2 or 3 */
955 daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
956
957 /*
958 * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
959 * flip them if DIMM1 is larger than DIMM0.
960 */
961 daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
962
963 daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
964 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
965 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
966 if (dsch.ddr4en)
967 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
968 if (dmap1[pmiidx].bxor) {
969 if (dsch.ddr4en) {
970 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
971 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
972 if (dsch.chan_width == 0)
973 /* 64/72 bit dram channel width */
974 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
975 else
976 /* 32/40 bit dram channel width */
977 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
978 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
979 } else {
980 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
981 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
982 if (dsch.chan_width == 0)
983 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
984 else
985 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
986 }
987 }
988
989 daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
990 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
991 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
992 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
993 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
994 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
995 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
996 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
997 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
998 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
999 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1000 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1001 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1002 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1003 if (dmap4[pmiidx].row14 != 31)
1004 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1005 if (dmap4[pmiidx].row15 != 31)
1006 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1007 if (dmap4[pmiidx].row16 != 31)
1008 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1009 if (dmap4[pmiidx].row17 != 31)
1010 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1011
1012 daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1013 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1014 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1015 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1016 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1017 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1018 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1019 if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1020 daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1021
1022 return 0;
1023}
1024
1025static int check_channel(int ch)
1026{
1027 if (drp0[ch].dramtype != 0) {
1028 pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1029 return 1;
1030 } else if (drp0[ch].eccen == 0) {
1031 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1032 return 1;
1033 }
1034 return 0;
1035}
1036
1037static int apl_check_ecc_active(void)
1038{
1039 int i, ret = 0;
1040
1041 /* Check dramtype and ECC mode for each present DIMM */
1042 for (i = 0; i < APL_NUM_CHANNELS; i++)
1043 if (chan_mask & BIT(i))
1044 ret += check_channel(i);
1045 return ret ? -EINVAL : 0;
1046}
1047
1048#define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1049
1050static int check_unit(int ch)
1051{
1052 struct d_cr_drp *d = &drp[ch];
1053
1054 if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1055 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1056 return 1;
1057 }
1058 return 0;
1059}
1060
1061static int dnv_check_ecc_active(void)
1062{
1063 int i, ret = 0;
1064
1065 for (i = 0; i < DNV_NUM_CHANNELS; i++)
1066 ret += check_unit(i);
1067 return ret ? -EINVAL : 0;
1068}
1069
1070static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1071 struct dram_addr *daddr, char *msg)
1072{
1073 u64 pmiaddr;
1074 u32 pmiidx;
1075 int ret;
1076
1077 ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1078 if (ret)
1079 return ret;
1080
1081 pmiaddr >>= ops->pmiaddr_shift;
1082 /* pmi channel idx to dimm channel idx */
1083 pmiidx >>= ops->pmiidx_shift;
1084 daddr->chan = pmiidx;
1085
1086 ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1087 if (ret)
1088 return ret;
1089
1090 edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1091 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1092
1093 return 0;
1094}
1095
1096static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1097 struct dram_addr *daddr)
1098{
1099 enum hw_event_mc_err_type tp_event;
1100 char *optype, msg[PND2_MSG_SIZE];
1101 bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1102 bool overflow = m->status & MCI_STATUS_OVER;
1103 bool uc_err = m->status & MCI_STATUS_UC;
1104 bool recov = m->status & MCI_STATUS_S;
1105 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1106 u32 mscod = GET_BITFIELD(m->status, 16, 31);
1107 u32 errcode = GET_BITFIELD(m->status, 0, 15);
1108 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1109 int rc;
1110
1111 tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
1112 HW_EVENT_ERR_CORRECTED;
1113
1114 /*
1115 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1116 * memory errors should fit in this mask:
1117 * 000f 0000 1mmm cccc (binary)
1118 * where:
1119 * f = Correction Report Filtering Bit. If 1, subsequent errors
1120 * won't be shown
1121 * mmm = error type
1122 * cccc = channel
1123 * If the mask doesn't match, report an error to the parsing logic
1124 */
1125 if (!((errcode & 0xef80) == 0x80)) {
1126 optype = "Can't parse: it is not a mem";
1127 } else {
1128 switch (optypenum) {
1129 case 0:
1130 optype = "generic undef request error";
1131 break;
1132 case 1:
1133 optype = "memory read error";
1134 break;
1135 case 2:
1136 optype = "memory write error";
1137 break;
1138 case 3:
1139 optype = "addr/cmd error";
1140 break;
1141 case 4:
1142 optype = "memory scrubbing error";
1143 break;
1144 default:
1145 optype = "reserved";
1146 break;
1147 }
1148 }
1149
1150 /* Only decode errors with an valid address (ADDRV) */
1151 if (!(m->status & MCI_STATUS_ADDRV))
1152 return;
1153
1154 rc = get_memory_error_data(mci, m->addr, daddr, msg);
1155 if (rc)
1156 goto address_error;
1157
1158 snprintf(msg, sizeof(msg),
1159 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1160 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1161 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1162
1163 edac_dbg(0, "%s\n", msg);
1164
1165 /* Call the helper to output message */
1166 edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
1167 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
1168
1169 return;
1170
1171address_error:
1172 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1173}
1174
1175static void apl_get_dimm_config(struct mem_ctl_info *mci)
1176{
1177 struct pnd2_pvt *pvt = mci->pvt_info;
1178 struct dimm_info *dimm;
1179 struct d_cr_drp0 *d;
1180 u64 capacity;
1181 int i, g;
1182
1183 for (i = 0; i < APL_NUM_CHANNELS; i++) {
1184 if (!(chan_mask & BIT(i)))
1185 continue;
1186
1187 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0);
1188 if (!dimm) {
1189 edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1190 continue;
1191 }
1192
1193 d = &drp0[i];
1194 for (g = 0; g < ARRAY_SIZE(dimms); g++)
1195 if (dimms[g].addrdec == d->addrdec &&
1196 dimms[g].dden == d->dden &&
1197 dimms[g].dwid == d->dwid)
1198 break;
1199
1200 if (g == ARRAY_SIZE(dimms)) {
1201 edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1202 continue;
1203 }
1204
1205 pvt->dimm_geom[i] = g;
1206 capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
1207 (1ul << dimms[g].colbits);
1208 edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1209 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1210 dimm->grain = 32;
1211 dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1212 dimm->mtype = MEM_DDR3;
1213 dimm->edac_mode = EDAC_SECDED;
1214 snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1215 }
1216}
1217
1218static const int dnv_dtypes[] = {
1219 DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1220};
1221
1222static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1223{
1224 int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1225 struct dimm_info *dimm;
1226 struct d_cr_drp *d;
1227 u64 capacity;
1228
1229 if (dsch.ddr4en) {
1230 memtype = MEM_DDR4;
1231 banks = 16;
1232 colbits = 10;
1233 } else {
1234 memtype = MEM_DDR3;
1235 banks = 8;
1236 }
1237
1238 for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1239 if (dmap4[i].row14 == 31)
1240 rowbits = 14;
1241 else if (dmap4[i].row15 == 31)
1242 rowbits = 15;
1243 else if (dmap4[i].row16 == 31)
1244 rowbits = 16;
1245 else if (dmap4[i].row17 == 31)
1246 rowbits = 17;
1247 else
1248 rowbits = 18;
1249
1250 if (memtype == MEM_DDR3) {
1251 if (dmap1[i].ca11 != 0x3f)
1252 colbits = 12;
1253 else
1254 colbits = 10;
1255 }
1256
1257 d = &drp[i];
1258 /* DIMM0 is present if rank0 and/or rank1 is enabled */
1259 ranks_of_dimm[0] = d->rken0 + d->rken1;
1260 /* DIMM1 is present if rank2 and/or rank3 is enabled */
1261 ranks_of_dimm[1] = d->rken2 + d->rken3;
1262
1263 for (j = 0; j < DNV_MAX_DIMMS; j++) {
1264 if (!ranks_of_dimm[j])
1265 continue;
1266
1267 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
1268 if (!dimm) {
1269 edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1270 continue;
1271 }
1272
1273 capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
1274 edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1275 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1276 dimm->grain = 32;
1277 dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1278 dimm->mtype = memtype;
1279 dimm->edac_mode = EDAC_SECDED;
1280 snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1281 }
1282 }
1283}
1284
1285static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1286{
1287 struct edac_mc_layer layers[2];
1288 struct mem_ctl_info *mci;
1289 struct pnd2_pvt *pvt;
1290 int rc;
1291
1292 rc = ops->check_ecc();
1293 if (rc < 0)
1294 return rc;
1295
1296 /* Allocate a new MC control structure */
1297 layers[0].type = EDAC_MC_LAYER_CHANNEL;
1298 layers[0].size = ops->channels;
1299 layers[0].is_virt_csrow = false;
1300 layers[1].type = EDAC_MC_LAYER_SLOT;
1301 layers[1].size = ops->dimms_per_channel;
1302 layers[1].is_virt_csrow = true;
1303 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1304 if (!mci)
1305 return -ENOMEM;
1306
1307 pvt = mci->pvt_info;
1308 memset(pvt, 0, sizeof(*pvt));
1309
1310 mci->mod_name = "pnd2_edac.c";
1311 mci->dev_name = ops->name;
1312 mci->ctl_name = "Pondicherry2";
1313
1314 /* Get dimm basic config and the memory layout */
1315 ops->get_dimm_config(mci);
1316
1317 if (edac_mc_add_mc(mci)) {
1318 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1319 edac_mc_free(mci);
1320 return -EINVAL;
1321 }
1322
1323 *ppmci = mci;
1324
1325 return 0;
1326}
1327
1328static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1329{
1330 if (unlikely(!mci || !mci->pvt_info)) {
1331 pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1332 return;
1333 }
1334
1335 /* Remove MC sysfs nodes */
1336 edac_mc_del_mc(NULL);
1337 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1338 edac_mc_free(mci);
1339}
1340
1341/*
1342 * Callback function registered with core kernel mce code.
1343 * Called once for each logged error.
1344 */
1345static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1346{
1347 struct mce *mce = (struct mce *)data;
1348 struct mem_ctl_info *mci;
1349 struct dram_addr daddr;
1350 char *type;
1351
1352 if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
1353 return NOTIFY_DONE;
1354
1355 mci = pnd2_mci;
1356 if (!mci)
1357 return NOTIFY_DONE;
1358
1359 /*
1360 * Just let mcelog handle it if the error is
1361 * outside the memory controller. A memory error
1362 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1363 * bit 12 has an special meaning.
1364 */
1365 if ((mce->status & 0xefff) >> 7 != 1)
1366 return NOTIFY_DONE;
1367
1368 if (mce->mcgstatus & MCG_STATUS_MCIP)
1369 type = "Exception";
1370 else
1371 type = "Event";
1372
1373 pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1374 pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1375 mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1376 pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1377 pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1378 pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1379 pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1380 mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1381
1382 pnd2_mce_output_error(mci, mce, &daddr);
1383
1384 /* Advice mcelog that the error were handled */
1385 return NOTIFY_STOP;
1386}
1387
1388static struct notifier_block pnd2_mce_dec = {
1389 .notifier_call = pnd2_mce_check_error,
1390};
1391
1392#ifdef CONFIG_EDAC_DEBUG
1393/*
1394 * Write an address to this file to exercise the address decode
1395 * logic in this driver.
1396 */
1397static u64 pnd2_fake_addr;
1398#define PND2_BLOB_SIZE 1024
1399static char pnd2_result[PND2_BLOB_SIZE];
1400static struct dentry *pnd2_test;
1401static struct debugfs_blob_wrapper pnd2_blob = {
1402 .data = pnd2_result,
1403 .size = 0
1404};
1405
1406static int debugfs_u64_set(void *data, u64 val)
1407{
1408 struct dram_addr daddr;
1409 struct mce m;
1410
1411 *(u64 *)data = val;
1412 m.mcgstatus = 0;
1413 /* ADDRV + MemRd + Unknown channel */
1414 m.status = MCI_STATUS_ADDRV + 0x9f;
1415 m.addr = val;
1416 pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1417 snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1418 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1419 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1420 pnd2_blob.size = strlen(pnd2_blob.data);
1421
1422 return 0;
1423}
1424DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1425
1426static void setup_pnd2_debug(void)
1427{
1428 pnd2_test = edac_debugfs_create_dir("pnd2_test");
1429 edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1430 &pnd2_fake_addr, &fops_u64_wo);
1431 debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1432}
1433
1434static void teardown_pnd2_debug(void)
1435{
1436 debugfs_remove_recursive(pnd2_test);
1437}
1438#else
1439static void setup_pnd2_debug(void) {}
1440static void teardown_pnd2_debug(void) {}
1441#endif /* CONFIG_EDAC_DEBUG */
1442
1443
1444static int pnd2_probe(void)
1445{
1446 int rc;
1447
1448 edac_dbg(2, "\n");
1449 rc = get_registers();
1450 if (rc)
1451 return rc;
1452
1453 return pnd2_register_mci(&pnd2_mci);
1454}
1455
1456static void pnd2_remove(void)
1457{
1458 edac_dbg(0, "\n");
1459 pnd2_unregister_mci(pnd2_mci);
1460}
1461
1462static struct dunit_ops apl_ops = {
1463 .name = "pnd2/apl",
1464 .type = APL,
1465 .pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY,
1466 .pmiidx_shift = 0,
1467 .channels = APL_NUM_CHANNELS,
1468 .dimms_per_channel = 1,
1469 .rd_reg = apl_rd_reg,
1470 .get_registers = apl_get_registers,
1471 .check_ecc = apl_check_ecc_active,
1472 .mk_region = apl_mk_region,
1473 .get_dimm_config = apl_get_dimm_config,
1474 .pmi2mem = apl_pmi2mem,
1475};
1476
1477static struct dunit_ops dnv_ops = {
1478 .name = "pnd2/dnv",
1479 .type = DNV,
1480 .pmiaddr_shift = 0,
1481 .pmiidx_shift = 1,
1482 .channels = DNV_NUM_CHANNELS,
1483 .dimms_per_channel = 2,
1484 .rd_reg = dnv_rd_reg,
1485 .get_registers = dnv_get_registers,
1486 .check_ecc = dnv_check_ecc_active,
1487 .mk_region = dnv_mk_region,
1488 .get_dimm_config = dnv_get_dimm_config,
1489 .pmi2mem = dnv_pmi2mem,
1490};
1491
1492static const struct x86_cpu_id pnd2_cpuids[] = {
1493 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
1494 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops },
1495 { }
1496};
1497MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1498
1499static int __init pnd2_init(void)
1500{
1501 const struct x86_cpu_id *id;
1502 int rc;
1503
1504 edac_dbg(2, "\n");
1505
1506 id = x86_match_cpu(pnd2_cpuids);
1507 if (!id)
1508 return -ENODEV;
1509
1510 ops = (struct dunit_ops *)id->driver_data;
1511
1512 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1513 opstate_init();
1514
1515 rc = pnd2_probe();
1516 if (rc < 0) {
1517 pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1518 return rc;
1519 }
1520
1521 if (!pnd2_mci)
1522 return -ENODEV;
1523
1524 mce_register_decode_chain(&pnd2_mce_dec);
1525 setup_pnd2_debug();
1526
1527 return 0;
1528}
1529
1530static void __exit pnd2_exit(void)
1531{
1532 edac_dbg(2, "\n");
1533 teardown_pnd2_debug();
1534 mce_unregister_decode_chain(&pnd2_mce_dec);
1535 pnd2_remove();
1536}
1537
1538module_init(pnd2_init);
1539module_exit(pnd2_exit);
1540
1541module_param(edac_op_state, int, 0444);
1542MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1543
1544MODULE_LICENSE("GPL v2");
1545MODULE_AUTHOR("Tony Luck");
1546MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");
diff --git a/drivers/edac/pnd2_edac.h b/drivers/edac/pnd2_edac.h
new file mode 100644
index 000000000000..61b6e79492bb
--- /dev/null
+++ b/drivers/edac/pnd2_edac.h
@@ -0,0 +1,301 @@
1/*
2 * Register bitfield descriptions for Pondicherry2 memory controller.
3 *
4 * Copyright (c) 2016, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#ifndef _PND2_REGS_H
17#define _PND2_REGS_H
18
19struct b_cr_touud_lo_pci {
20 u32 lock : 1;
21 u32 reserved_1 : 19;
22 u32 touud : 12;
23};
24
25#define b_cr_touud_lo_pci_port 0x4c
26#define b_cr_touud_lo_pci_offset 0xa8
27#define b_cr_touud_lo_pci_r_opcode 0x04
28
29struct b_cr_touud_hi_pci {
30 u32 touud : 7;
31 u32 reserved_0 : 25;
32};
33
34#define b_cr_touud_hi_pci_port 0x4c
35#define b_cr_touud_hi_pci_offset 0xac
36#define b_cr_touud_hi_pci_r_opcode 0x04
37
38struct b_cr_tolud_pci {
39 u32 lock : 1;
40 u32 reserved_0 : 19;
41 u32 tolud : 12;
42};
43
44#define b_cr_tolud_pci_port 0x4c
45#define b_cr_tolud_pci_offset 0xbc
46#define b_cr_tolud_pci_r_opcode 0x04
47
48struct b_cr_mchbar_lo_pci {
49 u32 enable : 1;
50 u32 pad_3_1 : 3;
51 u32 pad_14_4: 11;
52 u32 base: 17;
53};
54
55struct b_cr_mchbar_hi_pci {
56 u32 base : 7;
57 u32 pad_31_7 : 25;
58};
59
60/* Symmetric region */
61struct b_cr_slice_channel_hash {
62 u64 slice_1_disabled : 1;
63 u64 hvm_mode : 1;
64 u64 interleave_mode : 2;
65 u64 slice_0_mem_disabled : 1;
66 u64 reserved_0 : 1;
67 u64 slice_hash_mask : 14;
68 u64 reserved_1 : 11;
69 u64 enable_pmi_dual_data_mode : 1;
70 u64 ch_1_disabled : 1;
71 u64 reserved_2 : 1;
72 u64 sym_slice0_channel_enabled : 2;
73 u64 sym_slice1_channel_enabled : 2;
74 u64 ch_hash_mask : 14;
75 u64 reserved_3 : 11;
76 u64 lock : 1;
77};
78
79#define b_cr_slice_channel_hash_port 0x4c
80#define b_cr_slice_channel_hash_offset 0x4c58
81#define b_cr_slice_channel_hash_r_opcode 0x06
82
83struct b_cr_mot_out_base_mchbar {
84 u32 reserved_0 : 14;
85 u32 mot_out_base : 15;
86 u32 reserved_1 : 1;
87 u32 tr_en : 1;
88 u32 imr_en : 1;
89};
90
91#define b_cr_mot_out_base_mchbar_port 0x4c
92#define b_cr_mot_out_base_mchbar_offset 0x6af0
93#define b_cr_mot_out_base_mchbar_r_opcode 0x00
94
95struct b_cr_mot_out_mask_mchbar {
96 u32 reserved_0 : 14;
97 u32 mot_out_mask : 15;
98 u32 reserved_1 : 1;
99 u32 ia_iwb_en : 1;
100 u32 gt_iwb_en : 1;
101};
102
103#define b_cr_mot_out_mask_mchbar_port 0x4c
104#define b_cr_mot_out_mask_mchbar_offset 0x6af4
105#define b_cr_mot_out_mask_mchbar_r_opcode 0x00
106
107struct b_cr_asym_mem_region0_mchbar {
108 u32 pad : 4;
109 u32 slice0_asym_base : 11;
110 u32 pad_18_15 : 4;
111 u32 slice0_asym_limit : 11;
112 u32 slice0_asym_channel_select : 1;
113 u32 slice0_asym_enable : 1;
114};
115
116#define b_cr_asym_mem_region0_mchbar_port 0x4c
117#define b_cr_asym_mem_region0_mchbar_offset 0x6e40
118#define b_cr_asym_mem_region0_mchbar_r_opcode 0x00
119
120struct b_cr_asym_mem_region1_mchbar {
121 u32 pad : 4;
122 u32 slice1_asym_base : 11;
123 u32 pad_18_15 : 4;
124 u32 slice1_asym_limit : 11;
125 u32 slice1_asym_channel_select : 1;
126 u32 slice1_asym_enable : 1;
127};
128
129#define b_cr_asym_mem_region1_mchbar_port 0x4c
130#define b_cr_asym_mem_region1_mchbar_offset 0x6e44
131#define b_cr_asym_mem_region1_mchbar_r_opcode 0x00
132
133/* Some bit fields moved in above two structs on Denverton */
134struct b_cr_asym_mem_region_denverton {
135 u32 pad : 4;
136 u32 slice_asym_base : 8;
137 u32 pad_19_12 : 8;
138 u32 slice_asym_limit : 8;
139 u32 pad_28_30 : 3;
140 u32 slice_asym_enable : 1;
141};
142
143struct b_cr_asym_2way_mem_region_mchbar {
144 u32 pad : 2;
145 u32 asym_2way_intlv_mode : 2;
146 u32 asym_2way_base : 11;
147 u32 pad_16_15 : 2;
148 u32 asym_2way_limit : 11;
149 u32 pad_30_28 : 3;
150 u32 asym_2way_interleave_enable : 1;
151};
152
153#define b_cr_asym_2way_mem_region_mchbar_port 0x4c
154#define b_cr_asym_2way_mem_region_mchbar_offset 0x6e50
155#define b_cr_asym_2way_mem_region_mchbar_r_opcode 0x00
156
157/* Apollo Lake d-unit */
158
159struct d_cr_drp0 {
160 u32 rken0 : 1;
161 u32 rken1 : 1;
162 u32 ddmen : 1;
163 u32 rsvd3 : 1;
164 u32 dwid : 2;
165 u32 dden : 3;
166 u32 rsvd13_9 : 5;
167 u32 rsien : 1;
168 u32 bahen : 1;
169 u32 rsvd18_16 : 3;
170 u32 caswizzle : 2;
171 u32 eccen : 1;
172 u32 dramtype : 3;
173 u32 blmode : 3;
174 u32 addrdec : 2;
175 u32 dramdevice_pr : 2;
176};
177
178#define d_cr_drp0_offset 0x1400
179#define d_cr_drp0_r_opcode 0x00
180
181/* Denverton d-unit */
182
183struct d_cr_dsch {
184 u32 ch0en : 1;
185 u32 ch1en : 1;
186 u32 ddr4en : 1;
187 u32 coldwake : 1;
188 u32 newbypdis : 1;
189 u32 chan_width : 1;
190 u32 rsvd6_6 : 1;
191 u32 ooodis : 1;
192 u32 rsvd18_8 : 11;
193 u32 ic : 1;
194 u32 rsvd31_20 : 12;
195};
196
197#define d_cr_dsch_port 0x16
198#define d_cr_dsch_offset 0x0
199#define d_cr_dsch_r_opcode 0x0
200
201struct d_cr_ecc_ctrl {
202 u32 eccen : 1;
203 u32 rsvd31_1 : 31;
204};
205
206#define d_cr_ecc_ctrl_offset 0x180
207#define d_cr_ecc_ctrl_r_opcode 0x0
208
209struct d_cr_drp {
210 u32 rken0 : 1;
211 u32 rken1 : 1;
212 u32 rken2 : 1;
213 u32 rken3 : 1;
214 u32 dimmdwid0 : 2;
215 u32 dimmdden0 : 2;
216 u32 dimmdwid1 : 2;
217 u32 dimmdden1 : 2;
218 u32 rsvd15_12 : 4;
219 u32 dimmflip : 1;
220 u32 rsvd31_17 : 15;
221};
222
223#define d_cr_drp_offset 0x158
224#define d_cr_drp_r_opcode 0x0
225
226struct d_cr_dmap {
227 u32 ba0 : 5;
228 u32 ba1 : 5;
229 u32 bg0 : 5; /* if ddr3, ba2 = bg0 */
230 u32 bg1 : 5; /* if ddr3, ba3 = bg1 */
231 u32 rs0 : 5;
232 u32 rs1 : 5;
233 u32 rsvd : 2;
234};
235
236#define d_cr_dmap_offset 0x174
237#define d_cr_dmap_r_opcode 0x0
238
239struct d_cr_dmap1 {
240 u32 ca11 : 6;
241 u32 bxor : 1;
242 u32 rsvd : 25;
243};
244
245#define d_cr_dmap1_offset 0xb4
246#define d_cr_dmap1_r_opcode 0x0
247
248struct d_cr_dmap2 {
249 u32 row0 : 5;
250 u32 row1 : 5;
251 u32 row2 : 5;
252 u32 row3 : 5;
253 u32 row4 : 5;
254 u32 row5 : 5;
255 u32 rsvd : 2;
256};
257
258#define d_cr_dmap2_offset 0x148
259#define d_cr_dmap2_r_opcode 0x0
260
261struct d_cr_dmap3 {
262 u32 row6 : 5;
263 u32 row7 : 5;
264 u32 row8 : 5;
265 u32 row9 : 5;
266 u32 row10 : 5;
267 u32 row11 : 5;
268 u32 rsvd : 2;
269};
270
271#define d_cr_dmap3_offset 0x14c
272#define d_cr_dmap3_r_opcode 0x0
273
274struct d_cr_dmap4 {
275 u32 row12 : 5;
276 u32 row13 : 5;
277 u32 row14 : 5;
278 u32 row15 : 5;
279 u32 row16 : 5;
280 u32 row17 : 5;
281 u32 rsvd : 2;
282};
283
284#define d_cr_dmap4_offset 0x150
285#define d_cr_dmap4_r_opcode 0x0
286
287struct d_cr_dmap5 {
288 u32 ca3 : 4;
289 u32 ca4 : 4;
290 u32 ca5 : 4;
291 u32 ca6 : 4;
292 u32 ca7 : 4;
293 u32 ca8 : 4;
294 u32 ca9 : 4;
295 u32 rsvd : 4;
296};
297
298#define d_cr_dmap5_offset 0x154
299#define d_cr_dmap5_r_opcode 0x0
300
301#endif /* _PND2_REGS_H */
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 6c270d9d304a..669246056812 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -1596,7 +1596,7 @@ static void xgene_edac_pa_report(struct edac_device_ctl_info *edac_dev)
1596 reg = readl(ctx->dev_csr + IOBPATRANSERRINTSTS); 1596 reg = readl(ctx->dev_csr + IOBPATRANSERRINTSTS);
1597 if (!reg) 1597 if (!reg)
1598 goto chk_iob_axi0; 1598 goto chk_iob_axi0;
1599 dev_err(edac_dev->dev, "IOB procesing agent (PA) transaction error\n"); 1599 dev_err(edac_dev->dev, "IOB processing agent (PA) transaction error\n");
1600 if (reg & IOBPA_RDATA_CORRUPT_MASK) 1600 if (reg & IOBPA_RDATA_CORRUPT_MASK)
1601 dev_err(edac_dev->dev, "IOB PA read data RAM error\n"); 1601 dev_err(edac_dev->dev, "IOB PA read data RAM error\n");
1602 if (reg & IOBPA_M_RDATA_CORRUPT_MASK) 1602 if (reg & IOBPA_M_RDATA_CORRUPT_MASK)
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 96bbae579c0b..fc09c76248b4 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -44,7 +44,7 @@ config EXTCON_GPIO
44 44
45config EXTCON_INTEL_INT3496 45config EXTCON_INTEL_INT3496
46 tristate "Intel INT3496 ACPI device extcon driver" 46 tristate "Intel INT3496 ACPI device extcon driver"
47 depends on GPIOLIB && ACPI 47 depends on GPIOLIB && ACPI && (X86 || COMPILE_TEST)
48 help 48 help
49 Say Y here to enable extcon support for USB OTG ports controlled by 49 Say Y here to enable extcon support for USB OTG ports controlled by
50 an Intel INT3496 ACPI device. 50 an Intel INT3496 ACPI device.
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c
index a3131b036de6..9d17984bbbd4 100644
--- a/drivers/extcon/extcon-intel-int3496.c
+++ b/drivers/extcon/extcon-intel-int3496.c
@@ -45,6 +45,17 @@ static const unsigned int int3496_cable[] = {
45 EXTCON_NONE, 45 EXTCON_NONE,
46}; 46};
47 47
48static const struct acpi_gpio_params id_gpios = { INT3496_GPIO_USB_ID, 0, false };
49static const struct acpi_gpio_params vbus_gpios = { INT3496_GPIO_VBUS_EN, 0, false };
50static const struct acpi_gpio_params mux_gpios = { INT3496_GPIO_USB_MUX, 0, false };
51
52static const struct acpi_gpio_mapping acpi_int3496_default_gpios[] = {
53 { "id-gpios", &id_gpios, 1 },
54 { "vbus-gpios", &vbus_gpios, 1 },
55 { "mux-gpios", &mux_gpios, 1 },
56 { },
57};
58
48static void int3496_do_usb_id(struct work_struct *work) 59static void int3496_do_usb_id(struct work_struct *work)
49{ 60{
50 struct int3496_data *data = 61 struct int3496_data *data =
@@ -83,6 +94,13 @@ static int int3496_probe(struct platform_device *pdev)
83 struct int3496_data *data; 94 struct int3496_data *data;
84 int ret; 95 int ret;
85 96
97 ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(dev),
98 acpi_int3496_default_gpios);
99 if (ret) {
100 dev_err(dev, "can't add GPIO ACPI mapping\n");
101 return ret;
102 }
103
86 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 104 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
87 if (!data) 105 if (!data)
88 return -ENOMEM; 106 return -ENOMEM;
@@ -90,30 +108,27 @@ static int int3496_probe(struct platform_device *pdev)
90 data->dev = dev; 108 data->dev = dev;
91 INIT_DELAYED_WORK(&data->work, int3496_do_usb_id); 109 INIT_DELAYED_WORK(&data->work, int3496_do_usb_id);
92 110
93 data->gpio_usb_id = devm_gpiod_get_index(dev, "id", 111 data->gpio_usb_id = devm_gpiod_get(dev, "id", GPIOD_IN);
94 INT3496_GPIO_USB_ID,
95 GPIOD_IN);
96 if (IS_ERR(data->gpio_usb_id)) { 112 if (IS_ERR(data->gpio_usb_id)) {
97 ret = PTR_ERR(data->gpio_usb_id); 113 ret = PTR_ERR(data->gpio_usb_id);
98 dev_err(dev, "can't request USB ID GPIO: %d\n", ret); 114 dev_err(dev, "can't request USB ID GPIO: %d\n", ret);
99 return ret; 115 return ret;
116 } else if (gpiod_get_direction(data->gpio_usb_id) != GPIOF_DIR_IN) {
117 dev_warn(dev, FW_BUG "USB ID GPIO not in input mode, fixing\n");
118 gpiod_direction_input(data->gpio_usb_id);
100 } 119 }
101 120
102 data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id); 121 data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id);
103 if (data->usb_id_irq <= 0) { 122 if (data->usb_id_irq < 0) {
104 dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq); 123 dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq);
105 return -EINVAL; 124 return data->usb_id_irq;
106 } 125 }
107 126
108 data->gpio_vbus_en = devm_gpiod_get_index(dev, "vbus en", 127 data->gpio_vbus_en = devm_gpiod_get(dev, "vbus", GPIOD_ASIS);
109 INT3496_GPIO_VBUS_EN,
110 GPIOD_ASIS);
111 if (IS_ERR(data->gpio_vbus_en)) 128 if (IS_ERR(data->gpio_vbus_en))
112 dev_info(dev, "can't request VBUS EN GPIO\n"); 129 dev_info(dev, "can't request VBUS EN GPIO\n");
113 130
114 data->gpio_usb_mux = devm_gpiod_get_index(dev, "usb mux", 131 data->gpio_usb_mux = devm_gpiod_get(dev, "mux", GPIOD_ASIS);
115 INT3496_GPIO_USB_MUX,
116 GPIOD_ASIS);
117 if (IS_ERR(data->gpio_usb_mux)) 132 if (IS_ERR(data->gpio_usb_mux))
118 dev_info(dev, "can't request USB MUX GPIO\n"); 133 dev_info(dev, "can't request USB MUX GPIO\n");
119 134
@@ -154,6 +169,8 @@ static int int3496_remove(struct platform_device *pdev)
154 devm_free_irq(&pdev->dev, data->usb_id_irq, data); 169 devm_free_irq(&pdev->dev, data->usb_id_irq, data);
155 cancel_delayed_work_sync(&data->work); 170 cancel_delayed_work_sync(&data->work);
156 171
172 acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev));
173
157 return 0; 174 return 0;
158} 175}
159 176
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index e7d404059b73..b372aad3b449 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -389,7 +389,6 @@ int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
389 return 0; 389 return 0;
390 } 390 }
391 } 391 }
392 pr_err_once("requested map not found.\n");
393 return -ENOENT; 392 return -ENOENT;
394} 393}
395 394
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 08b026864d4e..8554d7aec31c 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -254,7 +254,7 @@ void __init efi_esrt_init(void)
254 254
255 rc = efi_mem_desc_lookup(efi.esrt, &md); 255 rc = efi_mem_desc_lookup(efi.esrt, &md);
256 if (rc < 0) { 256 if (rc < 0) {
257 pr_err("ESRT header is not in the memory map.\n"); 257 pr_warn("ESRT header is not in the memory map.\n");
258 return; 258 return;
259 } 259 }
260 260
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 9b37a3692b3f..2bd683e2be02 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -266,6 +266,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
266 goto fail_free_event; 266 goto fail_free_event;
267 } 267 }
268 268
269 if (agpio->wake_capable == ACPI_WAKE_CAPABLE)
270 enable_irq_wake(irq);
271
269 list_add_tail(&event->node, &acpi_gpio->events); 272 list_add_tail(&event->node, &acpi_gpio->events);
270 return AE_OK; 273 return AE_OK;
271 274
@@ -339,6 +342,9 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
339 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { 342 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
340 struct gpio_desc *desc; 343 struct gpio_desc *desc;
341 344
345 if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
346 disable_irq_wake(event->irq);
347
342 free_irq(event->irq, event); 348 free_irq(event->irq, event);
343 desc = event->desc; 349 desc = event->desc;
344 if (WARN_ON(IS_ERR(desc))) 350 if (WARN_ON(IS_ERR(desc)))
@@ -571,8 +577,10 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
571 } 577 }
572 578
573 desc = acpi_get_gpiod_by_index(adev, propname, idx, &info); 579 desc = acpi_get_gpiod_by_index(adev, propname, idx, &info);
574 if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER)) 580 if (!IS_ERR(desc))
575 break; 581 break;
582 if (PTR_ERR(desc) == -EPROBE_DEFER)
583 return ERR_CAST(desc);
576 } 584 }
577 585
578 /* Then from plain _CRS GPIOs */ 586 /* Then from plain _CRS GPIOs */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index a3a105ec99e2..de0cf3315484 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -475,7 +475,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
475 int r; 475 int r;
476 476
477 if (adev->wb.wb_obj == NULL) { 477 if (adev->wb.wb_obj == NULL) {
478 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4, 478 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
479 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 479 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
480 &adev->wb.wb_obj, &adev->wb.gpu_addr, 480 &adev->wb.wb_obj, &adev->wb.gpu_addr,
481 (void **)&adev->wb.wb); 481 (void **)&adev->wb.wb);
@@ -488,7 +488,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
488 memset(&adev->wb.used, 0, sizeof(adev->wb.used)); 488 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
489 489
490 /* clear wb memory */ 490 /* clear wb memory */
491 memset((char *)adev->wb.wb, 0, AMDGPU_GPU_PAGE_SIZE); 491 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
492 } 492 }
493 493
494 return 0; 494 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f7adbace428a..b76cd699eb0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -421,6 +421,7 @@ static const struct pci_device_id pciidlist[] = {
421 {0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 421 {0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
422 {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 422 {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
423 {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 423 {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
424 {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
424 {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 425 {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
425 426
426 {0, 0, 0} 427 {0, 0, 0}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 33b504bafb88..c5dec210d529 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3465,9 +3465,13 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
3465 max_sclk = 75000; 3465 max_sclk = 75000;
3466 } 3466 }
3467 } else if (adev->asic_type == CHIP_OLAND) { 3467 } else if (adev->asic_type == CHIP_OLAND) {
3468 if ((adev->pdev->device == 0x6604) && 3468 if ((adev->pdev->revision == 0xC7) ||
3469 (adev->pdev->subsystem_vendor == 0x1028) && 3469 (adev->pdev->revision == 0x80) ||
3470 (adev->pdev->subsystem_device == 0x066F)) { 3470 (adev->pdev->revision == 0x81) ||
3471 (adev->pdev->revision == 0x83) ||
3472 (adev->pdev->revision == 0x87) ||
3473 (adev->pdev->device == 0x6604) ||
3474 (adev->pdev->device == 0x6605)) {
3471 max_sclk = 75000; 3475 max_sclk = 75000;
3472 } 3476 }
3473 } 3477 }
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index f6d4d9700734..324a688b3f30 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1260,9 +1260,9 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
1260 * to KMS, hence fail if different settings are requested. 1260 * to KMS, hence fail if different settings are requested.
1261 */ 1261 */
1262 if (var->bits_per_pixel != fb->format->cpp[0] * 8 || 1262 if (var->bits_per_pixel != fb->format->cpp[0] * 8 ||
1263 var->xres != fb->width || var->yres != fb->height || 1263 var->xres > fb->width || var->yres > fb->height ||
1264 var->xres_virtual != fb->width || var->yres_virtual != fb->height) { 1264 var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
1265 DRM_DEBUG("fb userspace requested width/height/bpp different than current fb " 1265 DRM_DEBUG("fb requested width/height/bpp can't fit in current fb "
1266 "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n", 1266 "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
1267 var->xres, var->yres, var->bits_per_pixel, 1267 var->xres, var->yres, var->bits_per_pixel,
1268 var->xres_virtual, var->yres_virtual, 1268 var->xres_virtual, var->yres_virtual,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 130d7d517a19..da48819ff2e6 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1311,6 +1311,8 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1311 goto out_pm_put; 1311 goto out_pm_put;
1312 } 1312 }
1313 1313
1314 mutex_lock(&gpu->lock);
1315
1314 fence = etnaviv_gpu_fence_alloc(gpu); 1316 fence = etnaviv_gpu_fence_alloc(gpu);
1315 if (!fence) { 1317 if (!fence) {
1316 event_free(gpu, event); 1318 event_free(gpu, event);
@@ -1318,8 +1320,6 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1318 goto out_pm_put; 1320 goto out_pm_put;
1319 } 1321 }
1320 1322
1321 mutex_lock(&gpu->lock);
1322
1323 gpu->event[event].fence = fence; 1323 gpu->event[event].fence = fence;
1324 submit->fence = fence->seqno; 1324 submit->fence = fence->seqno;
1325 gpu->active_fence = submit->fence; 1325 gpu->active_fence = submit->fence;
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 0fd6f7a18364..c0e8d3302292 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -68,6 +68,8 @@ struct decon_context {
68 unsigned long flags; 68 unsigned long flags;
69 unsigned long out_type; 69 unsigned long out_type;
70 int first_win; 70 int first_win;
71 spinlock_t vblank_lock;
72 u32 frame_id;
71}; 73};
72 74
73static const uint32_t decon_formats[] = { 75static const uint32_t decon_formats[] = {
@@ -103,7 +105,7 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
103 if (ctx->out_type & IFTYPE_I80) 105 if (ctx->out_type & IFTYPE_I80)
104 val |= VIDINTCON0_FRAMEDONE; 106 val |= VIDINTCON0_FRAMEDONE;
105 else 107 else
106 val |= VIDINTCON0_INTFRMEN; 108 val |= VIDINTCON0_INTFRMEN | VIDINTCON0_FRAMESEL_FP;
107 109
108 writel(val, ctx->addr + DECON_VIDINTCON0); 110 writel(val, ctx->addr + DECON_VIDINTCON0);
109 } 111 }
@@ -122,14 +124,56 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
122 writel(0, ctx->addr + DECON_VIDINTCON0); 124 writel(0, ctx->addr + DECON_VIDINTCON0);
123} 125}
124 126
127/* return number of starts/ends of frame transmissions since reset */
128static u32 decon_get_frame_count(struct decon_context *ctx, bool end)
129{
130 u32 frm, pfrm, status, cnt = 2;
131
132 /* To get consistent result repeat read until frame id is stable.
133 * Usually the loop will be executed once, in rare cases when the loop
134 * is executed at frame change time 2nd pass will be needed.
135 */
136 frm = readl(ctx->addr + DECON_CRFMID);
137 do {
138 status = readl(ctx->addr + DECON_VIDCON1);
139 pfrm = frm;
140 frm = readl(ctx->addr + DECON_CRFMID);
141 } while (frm != pfrm && --cnt);
142
143 /* CRFMID is incremented on BPORCH in case of I80 and on VSYNC in case
144 * of RGB, it should be taken into account.
145 */
146 if (!frm)
147 return 0;
148
149 switch (status & (VIDCON1_VSTATUS_MASK | VIDCON1_I80_ACTIVE)) {
150 case VIDCON1_VSTATUS_VS:
151 if (!(ctx->out_type & IFTYPE_I80))
152 --frm;
153 break;
154 case VIDCON1_VSTATUS_BP:
155 --frm;
156 break;
157 case VIDCON1_I80_ACTIVE:
158 case VIDCON1_VSTATUS_AC:
159 if (end)
160 --frm;
161 break;
162 default:
163 break;
164 }
165
166 return frm;
167}
168
125static void decon_setup_trigger(struct decon_context *ctx) 169static void decon_setup_trigger(struct decon_context *ctx)
126{ 170{
127 if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))) 171 if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)))
128 return; 172 return;
129 173
130 if (!(ctx->out_type & I80_HW_TRG)) { 174 if (!(ctx->out_type & I80_HW_TRG)) {
131 writel(TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN 175 writel(TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
132 | TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN, 176 TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN,
133 ctx->addr + DECON_TRIGCON); 177 ctx->addr + DECON_TRIGCON);
134 return; 178 return;
135 } 179 }
@@ -365,11 +409,14 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
365static void decon_atomic_flush(struct exynos_drm_crtc *crtc) 409static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
366{ 410{
367 struct decon_context *ctx = crtc->ctx; 411 struct decon_context *ctx = crtc->ctx;
412 unsigned long flags;
368 int i; 413 int i;
369 414
370 if (test_bit(BIT_SUSPENDED, &ctx->flags)) 415 if (test_bit(BIT_SUSPENDED, &ctx->flags))
371 return; 416 return;
372 417
418 spin_lock_irqsave(&ctx->vblank_lock, flags);
419
373 for (i = ctx->first_win; i < WINDOWS_NR; i++) 420 for (i = ctx->first_win; i < WINDOWS_NR; i++)
374 decon_shadow_protect_win(ctx, i, false); 421 decon_shadow_protect_win(ctx, i, false);
375 422
@@ -378,11 +425,18 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
378 425
379 if (ctx->out_type & IFTYPE_I80) 426 if (ctx->out_type & IFTYPE_I80)
380 set_bit(BIT_WIN_UPDATED, &ctx->flags); 427 set_bit(BIT_WIN_UPDATED, &ctx->flags);
428
429 ctx->frame_id = decon_get_frame_count(ctx, true);
430
431 exynos_crtc_handle_event(crtc);
432
433 spin_unlock_irqrestore(&ctx->vblank_lock, flags);
381} 434}
382 435
383static void decon_swreset(struct decon_context *ctx) 436static void decon_swreset(struct decon_context *ctx)
384{ 437{
385 unsigned int tries; 438 unsigned int tries;
439 unsigned long flags;
386 440
387 writel(0, ctx->addr + DECON_VIDCON0); 441 writel(0, ctx->addr + DECON_VIDCON0);
388 for (tries = 2000; tries; --tries) { 442 for (tries = 2000; tries; --tries) {
@@ -400,6 +454,10 @@ static void decon_swreset(struct decon_context *ctx)
400 454
401 WARN(tries == 0, "failed to software reset DECON\n"); 455 WARN(tries == 0, "failed to software reset DECON\n");
402 456
457 spin_lock_irqsave(&ctx->vblank_lock, flags);
458 ctx->frame_id = 0;
459 spin_unlock_irqrestore(&ctx->vblank_lock, flags);
460
403 if (!(ctx->out_type & IFTYPE_HDMI)) 461 if (!(ctx->out_type & IFTYPE_HDMI))
404 return; 462 return;
405 463
@@ -578,6 +636,24 @@ static const struct component_ops decon_component_ops = {
578 .unbind = decon_unbind, 636 .unbind = decon_unbind,
579}; 637};
580 638
639static void decon_handle_vblank(struct decon_context *ctx)
640{
641 u32 frm;
642
643 spin_lock(&ctx->vblank_lock);
644
645 frm = decon_get_frame_count(ctx, true);
646
647 if (frm != ctx->frame_id) {
648 /* handle only if incremented, take care of wrap-around */
649 if ((s32)(frm - ctx->frame_id) > 0)
650 drm_crtc_handle_vblank(&ctx->crtc->base);
651 ctx->frame_id = frm;
652 }
653
654 spin_unlock(&ctx->vblank_lock);
655}
656
581static irqreturn_t decon_irq_handler(int irq, void *dev_id) 657static irqreturn_t decon_irq_handler(int irq, void *dev_id)
582{ 658{
583 struct decon_context *ctx = dev_id; 659 struct decon_context *ctx = dev_id;
@@ -598,7 +674,7 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
598 (VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F)) 674 (VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F))
599 return IRQ_HANDLED; 675 return IRQ_HANDLED;
600 } 676 }
601 drm_crtc_handle_vblank(&ctx->crtc->base); 677 decon_handle_vblank(ctx);
602 } 678 }
603 679
604out: 680out:
@@ -671,6 +747,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
671 __set_bit(BIT_SUSPENDED, &ctx->flags); 747 __set_bit(BIT_SUSPENDED, &ctx->flags);
672 ctx->dev = dev; 748 ctx->dev = dev;
673 ctx->out_type = (unsigned long)of_device_get_match_data(dev); 749 ctx->out_type = (unsigned long)of_device_get_match_data(dev);
750 spin_lock_init(&ctx->vblank_lock);
674 751
675 if (ctx->out_type & IFTYPE_HDMI) { 752 if (ctx->out_type & IFTYPE_HDMI) {
676 ctx->first_win = 1; 753 ctx->first_win = 1;
@@ -678,7 +755,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
678 ctx->out_type |= IFTYPE_I80; 755 ctx->out_type |= IFTYPE_I80;
679 } 756 }
680 757
681 if (ctx->out_type | I80_HW_TRG) { 758 if (ctx->out_type & I80_HW_TRG) {
682 ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node, 759 ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
683 "samsung,disp-sysreg"); 760 "samsung,disp-sysreg");
684 if (IS_ERR(ctx->sysreg)) { 761 if (IS_ERR(ctx->sysreg)) {
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index f9ab19e205e2..48811806fa27 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -526,6 +526,7 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
526 526
527 for (i = 0; i < WINDOWS_NR; i++) 527 for (i = 0; i < WINDOWS_NR; i++)
528 decon_shadow_protect_win(ctx, i, false); 528 decon_shadow_protect_win(ctx, i, false);
529 exynos_crtc_handle_event(crtc);
529} 530}
530 531
531static void decon_init(struct decon_context *ctx) 532static void decon_init(struct decon_context *ctx)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 5367b6664fe3..c65f4509932c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -85,16 +85,28 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
85 struct drm_crtc_state *old_crtc_state) 85 struct drm_crtc_state *old_crtc_state)
86{ 86{
87 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 87 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
88 struct drm_pending_vblank_event *event;
89 unsigned long flags;
90 88
91 if (exynos_crtc->ops->atomic_flush) 89 if (exynos_crtc->ops->atomic_flush)
92 exynos_crtc->ops->atomic_flush(exynos_crtc); 90 exynos_crtc->ops->atomic_flush(exynos_crtc);
91}
92
93static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
94 .enable = exynos_drm_crtc_enable,
95 .disable = exynos_drm_crtc_disable,
96 .mode_set_nofb = exynos_drm_crtc_mode_set_nofb,
97 .atomic_check = exynos_crtc_atomic_check,
98 .atomic_begin = exynos_crtc_atomic_begin,
99 .atomic_flush = exynos_crtc_atomic_flush,
100};
101
102void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc)
103{
104 struct drm_crtc *crtc = &exynos_crtc->base;
105 struct drm_pending_vblank_event *event = crtc->state->event;
106 unsigned long flags;
93 107
94 event = crtc->state->event;
95 if (event) { 108 if (event) {
96 crtc->state->event = NULL; 109 crtc->state->event = NULL;
97
98 spin_lock_irqsave(&crtc->dev->event_lock, flags); 110 spin_lock_irqsave(&crtc->dev->event_lock, flags);
99 if (drm_crtc_vblank_get(crtc) == 0) 111 if (drm_crtc_vblank_get(crtc) == 0)
100 drm_crtc_arm_vblank_event(crtc, event); 112 drm_crtc_arm_vblank_event(crtc, event);
@@ -105,15 +117,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
105 117
106} 118}
107 119
108static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
109 .enable = exynos_drm_crtc_enable,
110 .disable = exynos_drm_crtc_disable,
111 .mode_set_nofb = exynos_drm_crtc_mode_set_nofb,
112 .atomic_check = exynos_crtc_atomic_check,
113 .atomic_begin = exynos_crtc_atomic_begin,
114 .atomic_flush = exynos_crtc_atomic_flush,
115};
116
117static void exynos_drm_crtc_destroy(struct drm_crtc *crtc) 120static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)
118{ 121{
119 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 122 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 6a581a8af465..abd5d6ceac0c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -40,4 +40,6 @@ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
40 */ 40 */
41void exynos_drm_crtc_te_handler(struct drm_crtc *crtc); 41void exynos_drm_crtc_te_handler(struct drm_crtc *crtc);
42 42
43void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc);
44
43#endif 45#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 812e2ec0761d..d7ef26370e67 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -86,7 +86,7 @@
86#define DSIM_SYNC_INFORM (1 << 27) 86#define DSIM_SYNC_INFORM (1 << 27)
87#define DSIM_EOT_DISABLE (1 << 28) 87#define DSIM_EOT_DISABLE (1 << 28)
88#define DSIM_MFLUSH_VS (1 << 29) 88#define DSIM_MFLUSH_VS (1 << 29)
89/* This flag is valid only for exynos3250/3472/4415/5260/5430 */ 89/* This flag is valid only for exynos3250/3472/5260/5430 */
90#define DSIM_CLKLANE_STOP (1 << 30) 90#define DSIM_CLKLANE_STOP (1 << 30)
91 91
92/* DSIM_ESCMODE */ 92/* DSIM_ESCMODE */
@@ -473,17 +473,6 @@ static const struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
473 .reg_values = reg_values, 473 .reg_values = reg_values,
474}; 474};
475 475
476static const struct exynos_dsi_driver_data exynos4415_dsi_driver_data = {
477 .reg_ofs = exynos_reg_ofs,
478 .plltmr_reg = 0x58,
479 .has_clklane_stop = 1,
480 .num_clks = 2,
481 .max_freq = 1000,
482 .wait_for_reset = 1,
483 .num_bits_resol = 11,
484 .reg_values = reg_values,
485};
486
487static const struct exynos_dsi_driver_data exynos5_dsi_driver_data = { 476static const struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
488 .reg_ofs = exynos_reg_ofs, 477 .reg_ofs = exynos_reg_ofs,
489 .plltmr_reg = 0x58, 478 .plltmr_reg = 0x58,
@@ -521,8 +510,6 @@ static const struct of_device_id exynos_dsi_of_match[] = {
521 .data = &exynos3_dsi_driver_data }, 510 .data = &exynos3_dsi_driver_data },
522 { .compatible = "samsung,exynos4210-mipi-dsi", 511 { .compatible = "samsung,exynos4210-mipi-dsi",
523 .data = &exynos4_dsi_driver_data }, 512 .data = &exynos4_dsi_driver_data },
524 { .compatible = "samsung,exynos4415-mipi-dsi",
525 .data = &exynos4415_dsi_driver_data },
526 { .compatible = "samsung,exynos5410-mipi-dsi", 513 { .compatible = "samsung,exynos5410-mipi-dsi",
527 .data = &exynos5_dsi_driver_data }, 514 .data = &exynos5_dsi_driver_data },
528 { .compatible = "samsung,exynos5422-mipi-dsi", 515 { .compatible = "samsung,exynos5422-mipi-dsi",
@@ -979,7 +966,7 @@ static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
979 bool first = !xfer->tx_done; 966 bool first = !xfer->tx_done;
980 u32 reg; 967 u32 reg;
981 968
982 dev_dbg(dev, "< xfer %p: tx len %u, done %u, rx len %u, done %u\n", 969 dev_dbg(dev, "< xfer %pK: tx len %u, done %u, rx len %u, done %u\n",
983 xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done); 970 xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done);
984 971
985 if (length > DSI_TX_FIFO_SIZE) 972 if (length > DSI_TX_FIFO_SIZE)
@@ -1177,7 +1164,7 @@ static bool exynos_dsi_transfer_finish(struct exynos_dsi *dsi)
1177 spin_unlock_irqrestore(&dsi->transfer_lock, flags); 1164 spin_unlock_irqrestore(&dsi->transfer_lock, flags);
1178 1165
1179 dev_dbg(dsi->dev, 1166 dev_dbg(dsi->dev,
1180 "> xfer %p, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n", 1167 "> xfer %pK, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n",
1181 xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len, 1168 xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len,
1182 xfer->rx_done); 1169 xfer->rx_done);
1183 1170
@@ -1348,9 +1335,12 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
1348 int te_gpio_irq; 1335 int te_gpio_irq;
1349 1336
1350 dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0); 1337 dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0);
1338 if (dsi->te_gpio == -ENOENT)
1339 return 0;
1340
1351 if (!gpio_is_valid(dsi->te_gpio)) { 1341 if (!gpio_is_valid(dsi->te_gpio)) {
1352 dev_err(dsi->dev, "no te-gpios specified\n");
1353 ret = dsi->te_gpio; 1342 ret = dsi->te_gpio;
1343 dev_err(dsi->dev, "cannot get te-gpios, %d\n", ret);
1354 goto out; 1344 goto out;
1355 } 1345 }
1356 1346
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 95871577015d..5b18b5c5fdf2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1695,7 +1695,7 @@ static int fimc_probe(struct platform_device *pdev)
1695 goto err_put_clk; 1695 goto err_put_clk;
1696 } 1696 }
1697 1697
1698 DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv); 1698 DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv);
1699 1699
1700 spin_lock_init(&ctx->lock); 1700 spin_lock_init(&ctx->lock);
1701 platform_set_drvdata(pdev, ctx); 1701 platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index a9fa444c6053..3f04d72c448d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -71,10 +71,10 @@
71#define TRIGCON 0x1A4 71#define TRIGCON 0x1A4
72#define TRGMODE_ENABLE (1 << 0) 72#define TRGMODE_ENABLE (1 << 0)
73#define SWTRGCMD_ENABLE (1 << 1) 73#define SWTRGCMD_ENABLE (1 << 1)
74/* Exynos3250, 3472, 4415, 5260 5410, 5420 and 5422 only supported. */ 74/* Exynos3250, 3472, 5260 5410, 5420 and 5422 only supported. */
75#define HWTRGEN_ENABLE (1 << 3) 75#define HWTRGEN_ENABLE (1 << 3)
76#define HWTRGMASK_ENABLE (1 << 4) 76#define HWTRGMASK_ENABLE (1 << 4)
77/* Exynos3250, 3472, 4415, 5260, 5420 and 5422 only supported. */ 77/* Exynos3250, 3472, 5260, 5420 and 5422 only supported. */
78#define HWTRIGEN_PER_ENABLE (1 << 31) 78#define HWTRIGEN_PER_ENABLE (1 << 31)
79 79
80/* display mode change control register except exynos4 */ 80/* display mode change control register except exynos4 */
@@ -138,18 +138,6 @@ static struct fimd_driver_data exynos4_fimd_driver_data = {
138 .has_vtsel = 1, 138 .has_vtsel = 1,
139}; 139};
140 140
141static struct fimd_driver_data exynos4415_fimd_driver_data = {
142 .timing_base = 0x20000,
143 .lcdblk_offset = 0x210,
144 .lcdblk_vt_shift = 10,
145 .lcdblk_bypass_shift = 1,
146 .trg_type = I80_HW_TRG,
147 .has_shadowcon = 1,
148 .has_vidoutcon = 1,
149 .has_vtsel = 1,
150 .has_trigger_per_te = 1,
151};
152
153static struct fimd_driver_data exynos5_fimd_driver_data = { 141static struct fimd_driver_data exynos5_fimd_driver_data = {
154 .timing_base = 0x20000, 142 .timing_base = 0x20000,
155 .lcdblk_offset = 0x214, 143 .lcdblk_offset = 0x214,
@@ -210,8 +198,6 @@ static const struct of_device_id fimd_driver_dt_match[] = {
210 .data = &exynos3_fimd_driver_data }, 198 .data = &exynos3_fimd_driver_data },
211 { .compatible = "samsung,exynos4210-fimd", 199 { .compatible = "samsung,exynos4210-fimd",
212 .data = &exynos4_fimd_driver_data }, 200 .data = &exynos4_fimd_driver_data },
213 { .compatible = "samsung,exynos4415-fimd",
214 .data = &exynos4415_fimd_driver_data },
215 { .compatible = "samsung,exynos5250-fimd", 201 { .compatible = "samsung,exynos5250-fimd",
216 .data = &exynos5_fimd_driver_data }, 202 .data = &exynos5_fimd_driver_data },
217 { .compatible = "samsung,exynos5420-fimd", 203 { .compatible = "samsung,exynos5420-fimd",
@@ -257,7 +243,7 @@ static int fimd_enable_vblank(struct exynos_drm_crtc *crtc)
257 val |= VIDINTCON0_INT_FRAME; 243 val |= VIDINTCON0_INT_FRAME;
258 244
259 val &= ~VIDINTCON0_FRAMESEL0_MASK; 245 val &= ~VIDINTCON0_FRAMESEL0_MASK;
260 val |= VIDINTCON0_FRAMESEL0_VSYNC; 246 val |= VIDINTCON0_FRAMESEL0_FRONTPORCH;
261 val &= ~VIDINTCON0_FRAMESEL1_MASK; 247 val &= ~VIDINTCON0_FRAMESEL1_MASK;
262 val |= VIDINTCON0_FRAMESEL1_NONE; 248 val |= VIDINTCON0_FRAMESEL1_NONE;
263 } 249 }
@@ -723,6 +709,8 @@ static void fimd_atomic_flush(struct exynos_drm_crtc *crtc)
723 709
724 for (i = 0; i < WINDOWS_NR; i++) 710 for (i = 0; i < WINDOWS_NR; i++)
725 fimd_shadow_protect_win(ctx, i, false); 711 fimd_shadow_protect_win(ctx, i, false);
712
713 exynos_crtc_handle_event(crtc);
726} 714}
727 715
728static void fimd_update_plane(struct exynos_drm_crtc *crtc, 716static void fimd_update_plane(struct exynos_drm_crtc *crtc,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 4c28f7ffcc4d..55a1579d11b3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -218,7 +218,7 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
218 return ERR_PTR(ret); 218 return ERR_PTR(ret);
219 } 219 }
220 220
221 DRM_DEBUG_KMS("created file object = %p\n", obj->filp); 221 DRM_DEBUG_KMS("created file object = %pK\n", obj->filp);
222 222
223 return exynos_gem; 223 return exynos_gem;
224} 224}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index bef57987759d..0506b2b17ac1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1723,7 +1723,7 @@ static int gsc_probe(struct platform_device *pdev)
1723 return ret; 1723 return ret;
1724 } 1724 }
1725 1725
1726 DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv); 1726 DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv);
1727 1727
1728 mutex_init(&ctx->lock); 1728 mutex_init(&ctx->lock);
1729 platform_set_drvdata(pdev, ctx); 1729 platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 9c84ee76f18a..3edda18cc2d2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -208,7 +208,7 @@ static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
208 * e.g PAUSE state, queue buf, command control. 208 * e.g PAUSE state, queue buf, command control.
209 */ 209 */
210 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 210 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
211 DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", count++, ippdrv); 211 DRM_DEBUG_KMS("count[%d]ippdrv[%pK]\n", count++, ippdrv);
212 212
213 mutex_lock(&ippdrv->cmd_lock); 213 mutex_lock(&ippdrv->cmd_lock);
214 list_for_each_entry(c_node, &ippdrv->cmd_list, list) { 214 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
@@ -388,7 +388,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
388 } 388 }
389 property->prop_id = ret; 389 property->prop_id = ret;
390 390
391 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%p]\n", 391 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%pK]\n",
392 property->prop_id, property->cmd, ippdrv); 392 property->prop_id, property->cmd, ippdrv);
393 393
394 /* stored property information and ippdrv in private data */ 394 /* stored property information and ippdrv in private data */
@@ -518,7 +518,7 @@ static int ipp_put_mem_node(struct drm_device *drm_dev,
518{ 518{
519 int i; 519 int i;
520 520
521 DRM_DEBUG_KMS("node[%p]\n", m_node); 521 DRM_DEBUG_KMS("node[%pK]\n", m_node);
522 522
523 if (!m_node) { 523 if (!m_node) {
524 DRM_ERROR("invalid dequeue node.\n"); 524 DRM_ERROR("invalid dequeue node.\n");
@@ -562,7 +562,7 @@ static struct drm_exynos_ipp_mem_node
562 m_node->buf_id = qbuf->buf_id; 562 m_node->buf_id = qbuf->buf_id;
563 INIT_LIST_HEAD(&m_node->list); 563 INIT_LIST_HEAD(&m_node->list);
564 564
565 DRM_DEBUG_KMS("m_node[%p]ops_id[%d]\n", m_node, qbuf->ops_id); 565 DRM_DEBUG_KMS("m_node[%pK]ops_id[%d]\n", m_node, qbuf->ops_id);
566 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id); 566 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
567 567
568 for_each_ipp_planar(i) { 568 for_each_ipp_planar(i) {
@@ -659,7 +659,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
659 659
660 mutex_lock(&c_node->event_lock); 660 mutex_lock(&c_node->event_lock);
661 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) { 661 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
662 DRM_DEBUG_KMS("count[%d]e[%p]\n", count++, e); 662 DRM_DEBUG_KMS("count[%d]e[%pK]\n", count++, e);
663 663
664 /* 664 /*
665 * qbuf == NULL condition means all event deletion. 665 * qbuf == NULL condition means all event deletion.
@@ -750,7 +750,7 @@ static struct drm_exynos_ipp_mem_node
750 750
751 /* find memory node from memory list */ 751 /* find memory node from memory list */
752 list_for_each_entry(m_node, head, list) { 752 list_for_each_entry(m_node, head, list) {
753 DRM_DEBUG_KMS("count[%d]m_node[%p]\n", count++, m_node); 753 DRM_DEBUG_KMS("count[%d]m_node[%pK]\n", count++, m_node);
754 754
755 /* compare buffer id */ 755 /* compare buffer id */
756 if (m_node->buf_id == qbuf->buf_id) 756 if (m_node->buf_id == qbuf->buf_id)
@@ -767,7 +767,7 @@ static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
767 struct exynos_drm_ipp_ops *ops = NULL; 767 struct exynos_drm_ipp_ops *ops = NULL;
768 int ret = 0; 768 int ret = 0;
769 769
770 DRM_DEBUG_KMS("node[%p]\n", m_node); 770 DRM_DEBUG_KMS("node[%pK]\n", m_node);
771 771
772 if (!m_node) { 772 if (!m_node) {
773 DRM_ERROR("invalid queue node.\n"); 773 DRM_ERROR("invalid queue node.\n");
@@ -1232,7 +1232,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1232 m_node = list_first_entry(head, 1232 m_node = list_first_entry(head,
1233 struct drm_exynos_ipp_mem_node, list); 1233 struct drm_exynos_ipp_mem_node, list);
1234 1234
1235 DRM_DEBUG_KMS("m_node[%p]\n", m_node); 1235 DRM_DEBUG_KMS("m_node[%pK]\n", m_node);
1236 1236
1237 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1237 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1238 if (ret) { 1238 if (ret) {
@@ -1601,7 +1601,7 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1601 } 1601 }
1602 ippdrv->prop_list.ipp_id = ret; 1602 ippdrv->prop_list.ipp_id = ret;
1603 1603
1604 DRM_DEBUG_KMS("count[%d]ippdrv[%p]ipp_id[%d]\n", 1604 DRM_DEBUG_KMS("count[%d]ippdrv[%pK]ipp_id[%d]\n",
1605 count++, ippdrv, ret); 1605 count++, ippdrv, ret);
1606 1606
1607 /* store parent device for node */ 1607 /* store parent device for node */
@@ -1659,7 +1659,7 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1659 1659
1660 file_priv->ipp_dev = dev; 1660 file_priv->ipp_dev = dev;
1661 1661
1662 DRM_DEBUG_KMS("done priv[%p]\n", dev); 1662 DRM_DEBUG_KMS("done priv[%pK]\n", dev);
1663 1663
1664 return 0; 1664 return 0;
1665} 1665}
@@ -1676,7 +1676,7 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1676 mutex_lock(&ippdrv->cmd_lock); 1676 mutex_lock(&ippdrv->cmd_lock);
1677 list_for_each_entry_safe(c_node, tc_node, 1677 list_for_each_entry_safe(c_node, tc_node,
1678 &ippdrv->cmd_list, list) { 1678 &ippdrv->cmd_list, list) {
1679 DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", 1679 DRM_DEBUG_KMS("count[%d]ippdrv[%pK]\n",
1680 count++, ippdrv); 1680 count++, ippdrv);
1681 1681
1682 if (c_node->filp == file) { 1682 if (c_node->filp == file) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 6591e406084c..79282a820ecc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -748,7 +748,7 @@ static int rotator_probe(struct platform_device *pdev)
748 goto err_ippdrv_register; 748 goto err_ippdrv_register;
749 } 749 }
750 750
751 DRM_DEBUG_KMS("ippdrv[%p]\n", ippdrv); 751 DRM_DEBUG_KMS("ippdrv[%pK]\n", ippdrv);
752 752
753 platform_set_drvdata(pdev, rot); 753 platform_set_drvdata(pdev, rot);
754 754
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 57fe514d5c5b..5d9a62a87eec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -170,6 +170,7 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
170 .enable_vblank = vidi_enable_vblank, 170 .enable_vblank = vidi_enable_vblank,
171 .disable_vblank = vidi_disable_vblank, 171 .disable_vblank = vidi_disable_vblank,
172 .update_plane = vidi_update_plane, 172 .update_plane = vidi_update_plane,
173 .atomic_flush = exynos_crtc_handle_event,
173}; 174};
174 175
175static void vidi_fake_vblank_timer(unsigned long arg) 176static void vidi_fake_vblank_timer(unsigned long arg)
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 72143ac10525..25edb635a197 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1012,6 +1012,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
1012 return; 1012 return;
1013 1013
1014 mixer_vsync_set_update(mixer_ctx, true); 1014 mixer_vsync_set_update(mixer_ctx, true);
1015 exynos_crtc_handle_event(crtc);
1015} 1016}
1016 1017
1017static void mixer_enable(struct exynos_drm_crtc *crtc) 1018static void mixer_enable(struct exynos_drm_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 3b6caaca9751..325618d969fe 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -242,7 +242,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
242 const char *item; 242 const char *item;
243 243
244 if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) { 244 if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
245 gvt_err("Invalid vGPU creation params\n"); 245 gvt_vgpu_err("Invalid vGPU creation params\n");
246 return -EINVAL; 246 return -EINVAL;
247 } 247 }
248 248
@@ -285,9 +285,9 @@ static int alloc_resource(struct intel_vgpu *vgpu,
285 return 0; 285 return 0;
286 286
287no_enough_resource: 287no_enough_resource:
288 gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item); 288 gvt_vgpu_err("fail to allocate resource %s\n", item);
289 gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n", 289 gvt_vgpu_err("request %luMB avail %luMB max %luMB taken %luMB\n",
290 vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail), 290 BYTES_TO_MB(request), BYTES_TO_MB(avail),
291 BYTES_TO_MB(max), BYTES_TO_MB(taken)); 291 BYTES_TO_MB(max), BYTES_TO_MB(taken));
292 return -ENOSPC; 292 return -ENOSPC;
293} 293}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 7ae6e2b241c8..2b92cc8a7d1a 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -817,6 +817,25 @@ static bool is_shadowed_mmio(unsigned int offset)
817 return ret; 817 return ret;
818} 818}
819 819
820static inline bool is_force_nonpriv_mmio(unsigned int offset)
821{
822 return (offset >= 0x24d0 && offset < 0x2500);
823}
824
825static int force_nonpriv_reg_handler(struct parser_exec_state *s,
826 unsigned int offset, unsigned int index)
827{
828 struct intel_gvt *gvt = s->vgpu->gvt;
829 unsigned int data = cmd_val(s, index + 1);
830
831 if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
832 gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
833 offset, data);
834 return -EINVAL;
835 }
836 return 0;
837}
838
820static int cmd_reg_handler(struct parser_exec_state *s, 839static int cmd_reg_handler(struct parser_exec_state *s,
821 unsigned int offset, unsigned int index, char *cmd) 840 unsigned int offset, unsigned int index, char *cmd)
822{ 841{
@@ -824,23 +843,26 @@ static int cmd_reg_handler(struct parser_exec_state *s,
824 struct intel_gvt *gvt = vgpu->gvt; 843 struct intel_gvt *gvt = vgpu->gvt;
825 844
826 if (offset + 4 > gvt->device_info.mmio_size) { 845 if (offset + 4 > gvt->device_info.mmio_size) {
827 gvt_err("%s access to (%x) outside of MMIO range\n", 846 gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
828 cmd, offset); 847 cmd, offset);
829 return -EINVAL; 848 return -EINVAL;
830 } 849 }
831 850
832 if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) { 851 if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
833 gvt_err("vgpu%d: %s access to non-render register (%x)\n", 852 gvt_vgpu_err("%s access to non-render register (%x)\n",
834 s->vgpu->id, cmd, offset); 853 cmd, offset);
835 return 0; 854 return 0;
836 } 855 }
837 856
838 if (is_shadowed_mmio(offset)) { 857 if (is_shadowed_mmio(offset)) {
839 gvt_err("vgpu%d: found access of shadowed MMIO %x\n", 858 gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
840 s->vgpu->id, offset);
841 return 0; 859 return 0;
842 } 860 }
843 861
862 if (is_force_nonpriv_mmio(offset) &&
863 force_nonpriv_reg_handler(s, offset, index))
864 return -EINVAL;
865
844 if (offset == i915_mmio_reg_offset(DERRMR) || 866 if (offset == i915_mmio_reg_offset(DERRMR) ||
845 offset == i915_mmio_reg_offset(FORCEWAKE_MT)) { 867 offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
846 /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */ 868 /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
@@ -1008,7 +1030,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
1008 ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl"); 1030 ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
1009 else if (post_sync == 1) { 1031 else if (post_sync == 1) {
1010 /* check ggtt*/ 1032 /* check ggtt*/
1011 if ((cmd_val(s, 2) & (1 << 2))) { 1033 if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) {
1012 gma = cmd_val(s, 2) & GENMASK(31, 3); 1034 gma = cmd_val(s, 2) & GENMASK(31, 3);
1013 if (gmadr_bytes == 8) 1035 if (gmadr_bytes == 8)
1014 gma |= (cmd_gma_hi(s, 3)) << 32; 1036 gma |= (cmd_gma_hi(s, 3)) << 32;
@@ -1129,6 +1151,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
1129 struct mi_display_flip_command_info *info) 1151 struct mi_display_flip_command_info *info)
1130{ 1152{
1131 struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; 1153 struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1154 struct intel_vgpu *vgpu = s->vgpu;
1132 u32 dword0 = cmd_val(s, 0); 1155 u32 dword0 = cmd_val(s, 0);
1133 u32 dword1 = cmd_val(s, 1); 1156 u32 dword1 = cmd_val(s, 1);
1134 u32 dword2 = cmd_val(s, 2); 1157 u32 dword2 = cmd_val(s, 2);
@@ -1167,7 +1190,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
1167 break; 1190 break;
1168 1191
1169 default: 1192 default:
1170 gvt_err("unknown plane code %d\n", plane); 1193 gvt_vgpu_err("unknown plane code %d\n", plane);
1171 return -EINVAL; 1194 return -EINVAL;
1172 } 1195 }
1173 1196
@@ -1274,25 +1297,26 @@ static int update_plane_mmio_from_mi_display_flip(
1274static int cmd_handler_mi_display_flip(struct parser_exec_state *s) 1297static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
1275{ 1298{
1276 struct mi_display_flip_command_info info; 1299 struct mi_display_flip_command_info info;
1300 struct intel_vgpu *vgpu = s->vgpu;
1277 int ret; 1301 int ret;
1278 int i; 1302 int i;
1279 int len = cmd_length(s); 1303 int len = cmd_length(s);
1280 1304
1281 ret = decode_mi_display_flip(s, &info); 1305 ret = decode_mi_display_flip(s, &info);
1282 if (ret) { 1306 if (ret) {
1283 gvt_err("fail to decode MI display flip command\n"); 1307 gvt_vgpu_err("fail to decode MI display flip command\n");
1284 return ret; 1308 return ret;
1285 } 1309 }
1286 1310
1287 ret = check_mi_display_flip(s, &info); 1311 ret = check_mi_display_flip(s, &info);
1288 if (ret) { 1312 if (ret) {
1289 gvt_err("invalid MI display flip command\n"); 1313 gvt_vgpu_err("invalid MI display flip command\n");
1290 return ret; 1314 return ret;
1291 } 1315 }
1292 1316
1293 ret = update_plane_mmio_from_mi_display_flip(s, &info); 1317 ret = update_plane_mmio_from_mi_display_flip(s, &info);
1294 if (ret) { 1318 if (ret) {
1295 gvt_err("fail to update plane mmio\n"); 1319 gvt_vgpu_err("fail to update plane mmio\n");
1296 return ret; 1320 return ret;
1297 } 1321 }
1298 1322
@@ -1350,7 +1374,8 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
1350 int ret; 1374 int ret;
1351 1375
1352 if (op_size > max_surface_size) { 1376 if (op_size > max_surface_size) {
1353 gvt_err("command address audit fail name %s\n", s->info->name); 1377 gvt_vgpu_err("command address audit fail name %s\n",
1378 s->info->name);
1354 return -EINVAL; 1379 return -EINVAL;
1355 } 1380 }
1356 1381
@@ -1367,7 +1392,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
1367 } 1392 }
1368 return 0; 1393 return 0;
1369err: 1394err:
1370 gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n", 1395 gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
1371 s->info->name, guest_gma, op_size); 1396 s->info->name, guest_gma, op_size);
1372 1397
1373 pr_err("cmd dump: "); 1398 pr_err("cmd dump: ");
@@ -1412,8 +1437,10 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
1412 1437
1413static inline int unexpected_cmd(struct parser_exec_state *s) 1438static inline int unexpected_cmd(struct parser_exec_state *s)
1414{ 1439{
1415 gvt_err("vgpu%d: Unexpected %s in command buffer!\n", 1440 struct intel_vgpu *vgpu = s->vgpu;
1416 s->vgpu->id, s->info->name); 1441
1442 gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
1443
1417 return -EINVAL; 1444 return -EINVAL;
1418} 1445}
1419 1446
@@ -1516,7 +1543,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
1516 while (gma != end_gma) { 1543 while (gma != end_gma) {
1517 gpa = intel_vgpu_gma_to_gpa(mm, gma); 1544 gpa = intel_vgpu_gma_to_gpa(mm, gma);
1518 if (gpa == INTEL_GVT_INVALID_ADDR) { 1545 if (gpa == INTEL_GVT_INVALID_ADDR) {
1519 gvt_err("invalid gma address: %lx\n", gma); 1546 gvt_vgpu_err("invalid gma address: %lx\n", gma);
1520 return -EFAULT; 1547 return -EFAULT;
1521 } 1548 }
1522 1549
@@ -1557,6 +1584,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
1557 uint32_t bb_size = 0; 1584 uint32_t bb_size = 0;
1558 uint32_t cmd_len = 0; 1585 uint32_t cmd_len = 0;
1559 bool met_bb_end = false; 1586 bool met_bb_end = false;
1587 struct intel_vgpu *vgpu = s->vgpu;
1560 u32 cmd; 1588 u32 cmd;
1561 1589
1562 /* get the start gm address of the batch buffer */ 1590 /* get the start gm address of the batch buffer */
@@ -1565,7 +1593,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
1565 1593
1566 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); 1594 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1567 if (info == NULL) { 1595 if (info == NULL) {
1568 gvt_err("unknown cmd 0x%x, opcode=0x%x\n", 1596 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
1569 cmd, get_opcode(cmd, s->ring_id)); 1597 cmd, get_opcode(cmd, s->ring_id));
1570 return -EINVAL; 1598 return -EINVAL;
1571 } 1599 }
@@ -1574,7 +1602,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
1574 gma, gma + 4, &cmd); 1602 gma, gma + 4, &cmd);
1575 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); 1603 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1576 if (info == NULL) { 1604 if (info == NULL) {
1577 gvt_err("unknown cmd 0x%x, opcode=0x%x\n", 1605 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
1578 cmd, get_opcode(cmd, s->ring_id)); 1606 cmd, get_opcode(cmd, s->ring_id));
1579 return -EINVAL; 1607 return -EINVAL;
1580 } 1608 }
@@ -1599,6 +1627,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
1599static int perform_bb_shadow(struct parser_exec_state *s) 1627static int perform_bb_shadow(struct parser_exec_state *s)
1600{ 1628{
1601 struct intel_shadow_bb_entry *entry_obj; 1629 struct intel_shadow_bb_entry *entry_obj;
1630 struct intel_vgpu *vgpu = s->vgpu;
1602 unsigned long gma = 0; 1631 unsigned long gma = 0;
1603 uint32_t bb_size; 1632 uint32_t bb_size;
1604 void *dst = NULL; 1633 void *dst = NULL;
@@ -1633,7 +1662,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
1633 1662
1634 ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false); 1663 ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
1635 if (ret) { 1664 if (ret) {
1636 gvt_err("failed to set shadow batch to CPU\n"); 1665 gvt_vgpu_err("failed to set shadow batch to CPU\n");
1637 goto unmap_src; 1666 goto unmap_src;
1638 } 1667 }
1639 1668
@@ -1645,7 +1674,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
1645 gma, gma + bb_size, 1674 gma, gma + bb_size,
1646 dst); 1675 dst);
1647 if (ret) { 1676 if (ret) {
1648 gvt_err("fail to copy guest ring buffer\n"); 1677 gvt_vgpu_err("fail to copy guest ring buffer\n");
1649 goto unmap_src; 1678 goto unmap_src;
1650 } 1679 }
1651 1680
@@ -1676,15 +1705,16 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
1676{ 1705{
1677 bool second_level; 1706 bool second_level;
1678 int ret = 0; 1707 int ret = 0;
1708 struct intel_vgpu *vgpu = s->vgpu;
1679 1709
1680 if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { 1710 if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1681 gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n"); 1711 gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
1682 return -EINVAL; 1712 return -EINVAL;
1683 } 1713 }
1684 1714
1685 second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1; 1715 second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
1686 if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) { 1716 if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
1687 gvt_err("Jumping to 2nd level BB from RB is not allowed\n"); 1717 gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
1688 return -EINVAL; 1718 return -EINVAL;
1689 } 1719 }
1690 1720
@@ -1702,7 +1732,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
1702 if (batch_buffer_needs_scan(s)) { 1732 if (batch_buffer_needs_scan(s)) {
1703 ret = perform_bb_shadow(s); 1733 ret = perform_bb_shadow(s);
1704 if (ret < 0) 1734 if (ret < 0)
1705 gvt_err("invalid shadow batch buffer\n"); 1735 gvt_vgpu_err("invalid shadow batch buffer\n");
1706 } else { 1736 } else {
1707 /* emulate a batch buffer end to do return right */ 1737 /* emulate a batch buffer end to do return right */
1708 ret = cmd_handler_mi_batch_buffer_end(s); 1738 ret = cmd_handler_mi_batch_buffer_end(s);
@@ -2429,6 +2459,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
2429 int ret = 0; 2459 int ret = 0;
2430 cycles_t t0, t1, t2; 2460 cycles_t t0, t1, t2;
2431 struct parser_exec_state s_before_advance_custom; 2461 struct parser_exec_state s_before_advance_custom;
2462 struct intel_vgpu *vgpu = s->vgpu;
2432 2463
2433 t0 = get_cycles(); 2464 t0 = get_cycles();
2434 2465
@@ -2436,7 +2467,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
2436 2467
2437 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); 2468 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
2438 if (info == NULL) { 2469 if (info == NULL) {
2439 gvt_err("unknown cmd 0x%x, opcode=0x%x\n", 2470 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
2440 cmd, get_opcode(cmd, s->ring_id)); 2471 cmd, get_opcode(cmd, s->ring_id));
2441 return -EINVAL; 2472 return -EINVAL;
2442 } 2473 }
@@ -2452,7 +2483,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
2452 if (info->handler) { 2483 if (info->handler) {
2453 ret = info->handler(s); 2484 ret = info->handler(s);
2454 if (ret < 0) { 2485 if (ret < 0) {
2455 gvt_err("%s handler error\n", info->name); 2486 gvt_vgpu_err("%s handler error\n", info->name);
2456 return ret; 2487 return ret;
2457 } 2488 }
2458 } 2489 }
@@ -2463,7 +2494,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
2463 if (!(info->flag & F_IP_ADVANCE_CUSTOM)) { 2494 if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
2464 ret = cmd_advance_default(s); 2495 ret = cmd_advance_default(s);
2465 if (ret) { 2496 if (ret) {
2466 gvt_err("%s IP advance error\n", info->name); 2497 gvt_vgpu_err("%s IP advance error\n", info->name);
2467 return ret; 2498 return ret;
2468 } 2499 }
2469 } 2500 }
@@ -2486,6 +2517,7 @@ static int command_scan(struct parser_exec_state *s,
2486 2517
2487 unsigned long gma_head, gma_tail, gma_bottom; 2518 unsigned long gma_head, gma_tail, gma_bottom;
2488 int ret = 0; 2519 int ret = 0;
2520 struct intel_vgpu *vgpu = s->vgpu;
2489 2521
2490 gma_head = rb_start + rb_head; 2522 gma_head = rb_start + rb_head;
2491 gma_tail = rb_start + rb_tail; 2523 gma_tail = rb_start + rb_tail;
@@ -2497,7 +2529,7 @@ static int command_scan(struct parser_exec_state *s,
2497 if (s->buf_type == RING_BUFFER_INSTRUCTION) { 2529 if (s->buf_type == RING_BUFFER_INSTRUCTION) {
2498 if (!(s->ip_gma >= rb_start) || 2530 if (!(s->ip_gma >= rb_start) ||
2499 !(s->ip_gma < gma_bottom)) { 2531 !(s->ip_gma < gma_bottom)) {
2500 gvt_err("ip_gma %lx out of ring scope." 2532 gvt_vgpu_err("ip_gma %lx out of ring scope."
2501 "(base:0x%lx, bottom: 0x%lx)\n", 2533 "(base:0x%lx, bottom: 0x%lx)\n",
2502 s->ip_gma, rb_start, 2534 s->ip_gma, rb_start,
2503 gma_bottom); 2535 gma_bottom);
@@ -2505,7 +2537,7 @@ static int command_scan(struct parser_exec_state *s,
2505 return -EINVAL; 2537 return -EINVAL;
2506 } 2538 }
2507 if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) { 2539 if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
2508 gvt_err("ip_gma %lx out of range." 2540 gvt_vgpu_err("ip_gma %lx out of range."
2509 "base 0x%lx head 0x%lx tail 0x%lx\n", 2541 "base 0x%lx head 0x%lx tail 0x%lx\n",
2510 s->ip_gma, rb_start, 2542 s->ip_gma, rb_start,
2511 rb_head, rb_tail); 2543 rb_head, rb_tail);
@@ -2515,7 +2547,7 @@ static int command_scan(struct parser_exec_state *s,
2515 } 2547 }
2516 ret = cmd_parser_exec(s); 2548 ret = cmd_parser_exec(s);
2517 if (ret) { 2549 if (ret) {
2518 gvt_err("cmd parser error\n"); 2550 gvt_vgpu_err("cmd parser error\n");
2519 parser_exec_state_dump(s); 2551 parser_exec_state_dump(s);
2520 break; 2552 break;
2521 } 2553 }
@@ -2639,7 +2671,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2639 gma_head, gma_top, 2671 gma_head, gma_top,
2640 workload->shadow_ring_buffer_va); 2672 workload->shadow_ring_buffer_va);
2641 if (ret) { 2673 if (ret) {
2642 gvt_err("fail to copy guest ring buffer\n"); 2674 gvt_vgpu_err("fail to copy guest ring buffer\n");
2643 return ret; 2675 return ret;
2644 } 2676 }
2645 copy_len = gma_top - gma_head; 2677 copy_len = gma_top - gma_head;
@@ -2651,7 +2683,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2651 gma_head, gma_tail, 2683 gma_head, gma_tail,
2652 workload->shadow_ring_buffer_va + copy_len); 2684 workload->shadow_ring_buffer_va + copy_len);
2653 if (ret) { 2685 if (ret) {
2654 gvt_err("fail to copy guest ring buffer\n"); 2686 gvt_vgpu_err("fail to copy guest ring buffer\n");
2655 return ret; 2687 return ret;
2656 } 2688 }
2657 ring->tail += workload->rb_len; 2689 ring->tail += workload->rb_len;
@@ -2662,16 +2694,17 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2662int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) 2694int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
2663{ 2695{
2664 int ret; 2696 int ret;
2697 struct intel_vgpu *vgpu = workload->vgpu;
2665 2698
2666 ret = shadow_workload_ring_buffer(workload); 2699 ret = shadow_workload_ring_buffer(workload);
2667 if (ret) { 2700 if (ret) {
2668 gvt_err("fail to shadow workload ring_buffer\n"); 2701 gvt_vgpu_err("fail to shadow workload ring_buffer\n");
2669 return ret; 2702 return ret;
2670 } 2703 }
2671 2704
2672 ret = scan_workload(workload); 2705 ret = scan_workload(workload);
2673 if (ret) { 2706 if (ret) {
2674 gvt_err("scan workload error\n"); 2707 gvt_vgpu_err("scan workload error\n");
2675 return ret; 2708 return ret;
2676 } 2709 }
2677 return 0; 2710 return 0;
@@ -2681,6 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2681{ 2714{
2682 int ctx_size = wa_ctx->indirect_ctx.size; 2715 int ctx_size = wa_ctx->indirect_ctx.size;
2683 unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma; 2716 unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
2717 struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
2684 struct drm_i915_gem_object *obj; 2718 struct drm_i915_gem_object *obj;
2685 int ret = 0; 2719 int ret = 0;
2686 void *map; 2720 void *map;
@@ -2694,14 +2728,14 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2694 /* get the va of the shadow batch buffer */ 2728 /* get the va of the shadow batch buffer */
2695 map = i915_gem_object_pin_map(obj, I915_MAP_WB); 2729 map = i915_gem_object_pin_map(obj, I915_MAP_WB);
2696 if (IS_ERR(map)) { 2730 if (IS_ERR(map)) {
2697 gvt_err("failed to vmap shadow indirect ctx\n"); 2731 gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
2698 ret = PTR_ERR(map); 2732 ret = PTR_ERR(map);
2699 goto put_obj; 2733 goto put_obj;
2700 } 2734 }
2701 2735
2702 ret = i915_gem_object_set_to_cpu_domain(obj, false); 2736 ret = i915_gem_object_set_to_cpu_domain(obj, false);
2703 if (ret) { 2737 if (ret) {
2704 gvt_err("failed to set shadow indirect ctx to CPU\n"); 2738 gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
2705 goto unmap_src; 2739 goto unmap_src;
2706 } 2740 }
2707 2741
@@ -2710,7 +2744,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2710 guest_gma, guest_gma + ctx_size, 2744 guest_gma, guest_gma + ctx_size,
2711 map); 2745 map);
2712 if (ret) { 2746 if (ret) {
2713 gvt_err("fail to copy guest indirect ctx\n"); 2747 gvt_vgpu_err("fail to copy guest indirect ctx\n");
2714 goto unmap_src; 2748 goto unmap_src;
2715 } 2749 }
2716 2750
@@ -2744,13 +2778,14 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2744int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) 2778int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2745{ 2779{
2746 int ret; 2780 int ret;
2781 struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
2747 2782
2748 if (wa_ctx->indirect_ctx.size == 0) 2783 if (wa_ctx->indirect_ctx.size == 0)
2749 return 0; 2784 return 0;
2750 2785
2751 ret = shadow_indirect_ctx(wa_ctx); 2786 ret = shadow_indirect_ctx(wa_ctx);
2752 if (ret) { 2787 if (ret) {
2753 gvt_err("fail to shadow indirect ctx\n"); 2788 gvt_vgpu_err("fail to shadow indirect ctx\n");
2754 return ret; 2789 return ret;
2755 } 2790 }
2756 2791
@@ -2758,7 +2793,7 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2758 2793
2759 ret = scan_wa_ctx(wa_ctx); 2794 ret = scan_wa_ctx(wa_ctx);
2760 if (ret) { 2795 if (ret) {
2761 gvt_err("scan wa ctx error\n"); 2796 gvt_vgpu_err("scan wa ctx error\n");
2762 return ret; 2797 return ret;
2763 } 2798 }
2764 2799
diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h
index 68cba7bd980a..b0cff4dc2684 100644
--- a/drivers/gpu/drm/i915/gvt/debug.h
+++ b/drivers/gpu/drm/i915/gvt/debug.h
@@ -27,6 +27,14 @@
27#define gvt_err(fmt, args...) \ 27#define gvt_err(fmt, args...) \
28 DRM_ERROR("gvt: "fmt, ##args) 28 DRM_ERROR("gvt: "fmt, ##args)
29 29
30#define gvt_vgpu_err(fmt, args...) \
31do { \
32 if (IS_ERR_OR_NULL(vgpu)) \
33 DRM_DEBUG_DRIVER("gvt: "fmt, ##args); \
34 else \
35 DRM_DEBUG_DRIVER("gvt: vgpu %d: "fmt, vgpu->id, ##args);\
36} while (0)
37
30#define gvt_dbg_core(fmt, args...) \ 38#define gvt_dbg_core(fmt, args...) \
31 DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args) 39 DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
32 40
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index bda85dff7b2a..42cd09ec63fa 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -52,16 +52,16 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
52 unsigned char chr = 0; 52 unsigned char chr = 0;
53 53
54 if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) { 54 if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
55 gvt_err("Driver tries to read EDID without proper sequence!\n"); 55 gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n");
56 return 0; 56 return 0;
57 } 57 }
58 if (edid->current_edid_read >= EDID_SIZE) { 58 if (edid->current_edid_read >= EDID_SIZE) {
59 gvt_err("edid_get_byte() exceeds the size of EDID!\n"); 59 gvt_vgpu_err("edid_get_byte() exceeds the size of EDID!\n");
60 return 0; 60 return 0;
61 } 61 }
62 62
63 if (!edid->edid_available) { 63 if (!edid->edid_available) {
64 gvt_err("Reading EDID but EDID is not available!\n"); 64 gvt_vgpu_err("Reading EDID but EDID is not available!\n");
65 return 0; 65 return 0;
66 } 66 }
67 67
@@ -72,7 +72,7 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
72 chr = edid_data->edid_block[edid->current_edid_read]; 72 chr = edid_data->edid_block[edid->current_edid_read];
73 edid->current_edid_read++; 73 edid->current_edid_read++;
74 } else { 74 } else {
75 gvt_err("No EDID available during the reading?\n"); 75 gvt_vgpu_err("No EDID available during the reading?\n");
76 } 76 }
77 return chr; 77 return chr;
78} 78}
@@ -223,7 +223,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
223 vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE; 223 vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
224 break; 224 break;
225 default: 225 default:
226 gvt_err("Unknown/reserved GMBUS cycle detected!\n"); 226 gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n");
227 break; 227 break;
228 } 228 }
229 /* 229 /*
@@ -292,8 +292,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
292 */ 292 */
293 } else { 293 } else {
294 memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes); 294 memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
295 gvt_err("vgpu%d: warning: gmbus3 read with nothing returned\n", 295 gvt_vgpu_err("warning: gmbus3 read with nothing returned\n");
296 vgpu->id);
297 } 296 }
298 return 0; 297 return 0;
299} 298}
@@ -496,7 +495,8 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
496 unsigned char val = edid_get_byte(vgpu); 495 unsigned char val = edid_get_byte(vgpu);
497 496
498 aux_data_for_write = (val << 16); 497 aux_data_for_write = (val << 16);
499 } 498 } else
499 aux_data_for_write = (0xff << 16);
500 } 500 }
501 /* write the return value in AUX_CH_DATA reg which includes: 501 /* write the return value in AUX_CH_DATA reg which includes:
502 * ACK of I2C_WRITE 502 * ACK of I2C_WRITE
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 46eb9fd3c03f..f1f426a97aa9 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -172,6 +172,7 @@ static int emulate_execlist_ctx_schedule_out(
172 struct intel_vgpu_execlist *execlist, 172 struct intel_vgpu_execlist *execlist,
173 struct execlist_ctx_descriptor_format *ctx) 173 struct execlist_ctx_descriptor_format *ctx)
174{ 174{
175 struct intel_vgpu *vgpu = execlist->vgpu;
175 struct intel_vgpu_execlist_slot *running = execlist->running_slot; 176 struct intel_vgpu_execlist_slot *running = execlist->running_slot;
176 struct intel_vgpu_execlist_slot *pending = execlist->pending_slot; 177 struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
177 struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0]; 178 struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
@@ -183,7 +184,7 @@ static int emulate_execlist_ctx_schedule_out(
183 gvt_dbg_el("schedule out context id %x\n", ctx->context_id); 184 gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
184 185
185 if (WARN_ON(!same_context(ctx, execlist->running_context))) { 186 if (WARN_ON(!same_context(ctx, execlist->running_context))) {
186 gvt_err("schedule out context is not running context," 187 gvt_vgpu_err("schedule out context is not running context,"
187 "ctx id %x running ctx id %x\n", 188 "ctx id %x running ctx id %x\n",
188 ctx->context_id, 189 ctx->context_id,
189 execlist->running_context->context_id); 190 execlist->running_context->context_id);
@@ -254,7 +255,7 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
254 status.udw = vgpu_vreg(vgpu, status_reg + 4); 255 status.udw = vgpu_vreg(vgpu, status_reg + 4);
255 256
256 if (status.execlist_queue_full) { 257 if (status.execlist_queue_full) {
257 gvt_err("virtual execlist slots are full\n"); 258 gvt_vgpu_err("virtual execlist slots are full\n");
258 return NULL; 259 return NULL;
259 } 260 }
260 261
@@ -270,11 +271,12 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
270 271
271 struct execlist_ctx_descriptor_format *ctx0, *ctx1; 272 struct execlist_ctx_descriptor_format *ctx0, *ctx1;
272 struct execlist_context_status_format status; 273 struct execlist_context_status_format status;
274 struct intel_vgpu *vgpu = execlist->vgpu;
273 275
274 gvt_dbg_el("emulate schedule-in\n"); 276 gvt_dbg_el("emulate schedule-in\n");
275 277
276 if (!slot) { 278 if (!slot) {
277 gvt_err("no available execlist slot\n"); 279 gvt_vgpu_err("no available execlist slot\n");
278 return -EINVAL; 280 return -EINVAL;
279 } 281 }
280 282
@@ -375,7 +377,6 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
375 377
376 vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); 378 vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
377 if (IS_ERR(vma)) { 379 if (IS_ERR(vma)) {
378 gvt_err("Cannot pin\n");
379 return; 380 return;
380 } 381 }
381 382
@@ -428,7 +429,6 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
428 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, 429 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
429 0, CACHELINE_BYTES, 0); 430 0, CACHELINE_BYTES, 0);
430 if (IS_ERR(vma)) { 431 if (IS_ERR(vma)) {
431 gvt_err("Cannot pin indirect ctx obj\n");
432 return; 432 return;
433 } 433 }
434 434
@@ -561,6 +561,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
561{ 561{
562 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; 562 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
563 struct intel_vgpu_mm *mm; 563 struct intel_vgpu_mm *mm;
564 struct intel_vgpu *vgpu = workload->vgpu;
564 int page_table_level; 565 int page_table_level;
565 u32 pdp[8]; 566 u32 pdp[8];
566 567
@@ -569,7 +570,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
569 } else if (desc->addressing_mode == 3) { /* legacy 64 bit */ 570 } else if (desc->addressing_mode == 3) { /* legacy 64 bit */
570 page_table_level = 4; 571 page_table_level = 4;
571 } else { 572 } else {
572 gvt_err("Advanced Context mode(SVM) is not supported!\n"); 573 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
573 return -EINVAL; 574 return -EINVAL;
574 } 575 }
575 576
@@ -583,7 +584,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
583 mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT, 584 mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
584 pdp, page_table_level, 0); 585 pdp, page_table_level, 0);
585 if (IS_ERR(mm)) { 586 if (IS_ERR(mm)) {
586 gvt_err("fail to create mm object.\n"); 587 gvt_vgpu_err("fail to create mm object.\n");
587 return PTR_ERR(mm); 588 return PTR_ERR(mm);
588 } 589 }
589 } 590 }
@@ -609,7 +610,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
609 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, 610 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
610 (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT)); 611 (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
611 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) { 612 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
612 gvt_err("invalid guest context LRCA: %x\n", desc->lrca); 613 gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
613 return -EINVAL; 614 return -EINVAL;
614 } 615 }
615 616
@@ -724,8 +725,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
724 continue; 725 continue;
725 726
726 if (!desc[i]->privilege_access) { 727 if (!desc[i]->privilege_access) {
727 gvt_err("vgpu%d: unexpected GGTT elsp submission\n", 728 gvt_vgpu_err("unexpected GGTT elsp submission\n");
728 vgpu->id);
729 return -EINVAL; 729 return -EINVAL;
730 } 730 }
731 731
@@ -735,15 +735,13 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
735 } 735 }
736 736
737 if (!valid_desc_bitmap) { 737 if (!valid_desc_bitmap) {
738 gvt_err("vgpu%d: no valid desc in a elsp submission\n", 738 gvt_vgpu_err("no valid desc in a elsp submission\n");
739 vgpu->id);
740 return -EINVAL; 739 return -EINVAL;
741 } 740 }
742 741
743 if (!test_bit(0, (void *)&valid_desc_bitmap) && 742 if (!test_bit(0, (void *)&valid_desc_bitmap) &&
744 test_bit(1, (void *)&valid_desc_bitmap)) { 743 test_bit(1, (void *)&valid_desc_bitmap)) {
745 gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n", 744 gvt_vgpu_err("weird elsp submission, desc 0 is not valid\n");
746 vgpu->id);
747 return -EINVAL; 745 return -EINVAL;
748 } 746 }
749 747
@@ -752,8 +750,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
752 ret = submit_context(vgpu, ring_id, &valid_desc[i], 750 ret = submit_context(vgpu, ring_id, &valid_desc[i],
753 emulate_schedule_in); 751 emulate_schedule_in);
754 if (ret) { 752 if (ret) {
755 gvt_err("vgpu%d: fail to schedule workload\n", 753 gvt_vgpu_err("fail to schedule workload\n");
756 vgpu->id);
757 return ret; 754 return ret;
758 } 755 }
759 emulate_schedule_in = false; 756 emulate_schedule_in = false;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 6a5ff23ded90..b832bea64e03 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -49,8 +49,8 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
49{ 49{
50 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size 50 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
51 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) { 51 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
52 gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n", 52 gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
53 vgpu->id, addr, size); 53 addr, size);
54 return false; 54 return false;
55 } 55 }
56 return true; 56 return true;
@@ -430,7 +430,7 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
430 430
431 mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn); 431 mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
432 if (mfn == INTEL_GVT_INVALID_ADDR) { 432 if (mfn == INTEL_GVT_INVALID_ADDR) {
433 gvt_err("fail to translate gfn: 0x%lx\n", gfn); 433 gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn);
434 return -ENXIO; 434 return -ENXIO;
435 } 435 }
436 436
@@ -611,7 +611,7 @@ static inline int init_shadow_page(struct intel_vgpu *vgpu,
611 611
612 daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); 612 daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
613 if (dma_mapping_error(kdev, daddr)) { 613 if (dma_mapping_error(kdev, daddr)) {
614 gvt_err("fail to map dma addr\n"); 614 gvt_vgpu_err("fail to map dma addr\n");
615 return -EINVAL; 615 return -EINVAL;
616 } 616 }
617 617
@@ -735,7 +735,7 @@ retry:
735 if (reclaim_one_mm(vgpu->gvt)) 735 if (reclaim_one_mm(vgpu->gvt))
736 goto retry; 736 goto retry;
737 737
738 gvt_err("fail to allocate ppgtt shadow page\n"); 738 gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
739 return ERR_PTR(-ENOMEM); 739 return ERR_PTR(-ENOMEM);
740 } 740 }
741 741
@@ -750,14 +750,14 @@ retry:
750 */ 750 */
751 ret = init_shadow_page(vgpu, &spt->shadow_page, type); 751 ret = init_shadow_page(vgpu, &spt->shadow_page, type);
752 if (ret) { 752 if (ret) {
753 gvt_err("fail to initialize shadow page for spt\n"); 753 gvt_vgpu_err("fail to initialize shadow page for spt\n");
754 goto err; 754 goto err;
755 } 755 }
756 756
757 ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page, 757 ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
758 gfn, ppgtt_write_protection_handler, NULL); 758 gfn, ppgtt_write_protection_handler, NULL);
759 if (ret) { 759 if (ret) {
760 gvt_err("fail to initialize guest page for spt\n"); 760 gvt_vgpu_err("fail to initialize guest page for spt\n");
761 goto err; 761 goto err;
762 } 762 }
763 763
@@ -776,8 +776,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
776 if (p) 776 if (p)
777 return shadow_page_to_ppgtt_spt(p); 777 return shadow_page_to_ppgtt_spt(p);
778 778
779 gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n", 779 gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn);
780 vgpu->id, mfn);
781 return NULL; 780 return NULL;
782} 781}
783 782
@@ -827,8 +826,8 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
827 } 826 }
828 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e)); 827 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
829 if (!s) { 828 if (!s) {
830 gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n", 829 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
831 vgpu->id, ops->get_pfn(e)); 830 ops->get_pfn(e));
832 return -ENXIO; 831 return -ENXIO;
833 } 832 }
834 return ppgtt_invalidate_shadow_page(s); 833 return ppgtt_invalidate_shadow_page(s);
@@ -836,6 +835,7 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
836 835
837static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) 836static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
838{ 837{
838 struct intel_vgpu *vgpu = spt->vgpu;
839 struct intel_gvt_gtt_entry e; 839 struct intel_gvt_gtt_entry e;
840 unsigned long index; 840 unsigned long index;
841 int ret; 841 int ret;
@@ -854,7 +854,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
854 854
855 for_each_present_shadow_entry(spt, &e, index) { 855 for_each_present_shadow_entry(spt, &e, index) {
856 if (!gtt_type_is_pt(get_next_pt_type(e.type))) { 856 if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
857 gvt_err("GVT doesn't support pse bit for now\n"); 857 gvt_vgpu_err("GVT doesn't support pse bit for now\n");
858 return -EINVAL; 858 return -EINVAL;
859 } 859 }
860 ret = ppgtt_invalidate_shadow_page_by_shadow_entry( 860 ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
@@ -868,8 +868,8 @@ release:
868 ppgtt_free_shadow_page(spt); 868 ppgtt_free_shadow_page(spt);
869 return 0; 869 return 0;
870fail: 870fail:
871 gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n", 871 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
872 spt->vgpu->id, spt, e.val64, e.type); 872 spt, e.val64, e.type);
873 return ret; 873 return ret;
874} 874}
875 875
@@ -914,8 +914,8 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
914 } 914 }
915 return s; 915 return s;
916fail: 916fail:
917 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", 917 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
918 vgpu->id, s, we->val64, we->type); 918 s, we->val64, we->type);
919 return ERR_PTR(ret); 919 return ERR_PTR(ret);
920} 920}
921 921
@@ -953,7 +953,7 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
953 953
954 for_each_present_guest_entry(spt, &ge, i) { 954 for_each_present_guest_entry(spt, &ge, i) {
955 if (!gtt_type_is_pt(get_next_pt_type(ge.type))) { 955 if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
956 gvt_err("GVT doesn't support pse bit now\n"); 956 gvt_vgpu_err("GVT doesn't support pse bit now\n");
957 ret = -EINVAL; 957 ret = -EINVAL;
958 goto fail; 958 goto fail;
959 } 959 }
@@ -969,8 +969,8 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
969 } 969 }
970 return 0; 970 return 0;
971fail: 971fail:
972 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", 972 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
973 vgpu->id, spt, ge.val64, ge.type); 973 spt, ge.val64, ge.type);
974 return ret; 974 return ret;
975} 975}
976 976
@@ -999,7 +999,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
999 struct intel_vgpu_ppgtt_spt *s = 999 struct intel_vgpu_ppgtt_spt *s =
1000 ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e)); 1000 ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
1001 if (!s) { 1001 if (!s) {
1002 gvt_err("fail to find guest page\n"); 1002 gvt_vgpu_err("fail to find guest page\n");
1003 ret = -ENXIO; 1003 ret = -ENXIO;
1004 goto fail; 1004 goto fail;
1005 } 1005 }
@@ -1011,8 +1011,8 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
1011 ppgtt_set_shadow_entry(spt, &e, index); 1011 ppgtt_set_shadow_entry(spt, &e, index);
1012 return 0; 1012 return 0;
1013fail: 1013fail:
1014 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", 1014 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1015 vgpu->id, spt, e.val64, e.type); 1015 spt, e.val64, e.type);
1016 return ret; 1016 return ret;
1017} 1017}
1018 1018
@@ -1046,8 +1046,8 @@ static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
1046 } 1046 }
1047 return 0; 1047 return 0;
1048fail: 1048fail:
1049 gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id, 1049 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1050 spt, we->val64, we->type); 1050 spt, we->val64, we->type);
1051 return ret; 1051 return ret;
1052} 1052}
1053 1053
@@ -1250,8 +1250,8 @@ static int ppgtt_handle_guest_write_page_table(
1250 } 1250 }
1251 return 0; 1251 return 0;
1252fail: 1252fail:
1253 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n", 1253 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
1254 vgpu->id, spt, we->val64, we->type); 1254 spt, we->val64, we->type);
1255 return ret; 1255 return ret;
1256} 1256}
1257 1257
@@ -1493,7 +1493,7 @@ static int shadow_mm(struct intel_vgpu_mm *mm)
1493 1493
1494 spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge); 1494 spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
1495 if (IS_ERR(spt)) { 1495 if (IS_ERR(spt)) {
1496 gvt_err("fail to populate guest root pointer\n"); 1496 gvt_vgpu_err("fail to populate guest root pointer\n");
1497 ret = PTR_ERR(spt); 1497 ret = PTR_ERR(spt);
1498 goto fail; 1498 goto fail;
1499 } 1499 }
@@ -1566,7 +1566,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
1566 1566
1567 ret = gtt->mm_alloc_page_table(mm); 1567 ret = gtt->mm_alloc_page_table(mm);
1568 if (ret) { 1568 if (ret) {
1569 gvt_err("fail to allocate page table for mm\n"); 1569 gvt_vgpu_err("fail to allocate page table for mm\n");
1570 goto fail; 1570 goto fail;
1571 } 1571 }
1572 1572
@@ -1584,7 +1584,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
1584 } 1584 }
1585 return mm; 1585 return mm;
1586fail: 1586fail:
1587 gvt_err("fail to create mm\n"); 1587 gvt_vgpu_err("fail to create mm\n");
1588 if (mm) 1588 if (mm)
1589 intel_gvt_mm_unreference(mm); 1589 intel_gvt_mm_unreference(mm);
1590 return ERR_PTR(ret); 1590 return ERR_PTR(ret);
@@ -1760,7 +1760,7 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
1760 mm->page_table_level, gma, gpa); 1760 mm->page_table_level, gma, gpa);
1761 return gpa; 1761 return gpa;
1762err: 1762err:
1763 gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma); 1763 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
1764 return INTEL_GVT_INVALID_ADDR; 1764 return INTEL_GVT_INVALID_ADDR;
1765} 1765}
1766 1766
@@ -1836,13 +1836,16 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1836 if (ops->test_present(&e)) { 1836 if (ops->test_present(&e)) {
1837 ret = gtt_entry_p2m(vgpu, &e, &m); 1837 ret = gtt_entry_p2m(vgpu, &e, &m);
1838 if (ret) { 1838 if (ret) {
1839 gvt_err("vgpu%d: fail to translate guest gtt entry\n", 1839 gvt_vgpu_err("fail to translate guest gtt entry\n");
1840 vgpu->id); 1840 /* guest driver may read/write the entry when partial
1841 return ret; 1841 * update the entry in this situation p2m will fail
1842 * settting the shadow entry to point to a scratch page
1843 */
1844 ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
1842 } 1845 }
1843 } else { 1846 } else {
1844 m = e; 1847 m = e;
1845 m.val64 = 0; 1848 ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
1846 } 1849 }
1847 1850
1848 ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index); 1851 ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
@@ -1893,14 +1896,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1893 1896
1894 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); 1897 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
1895 if (!scratch_pt) { 1898 if (!scratch_pt) {
1896 gvt_err("fail to allocate scratch page\n"); 1899 gvt_vgpu_err("fail to allocate scratch page\n");
1897 return -ENOMEM; 1900 return -ENOMEM;
1898 } 1901 }
1899 1902
1900 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, 1903 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
1901 4096, PCI_DMA_BIDIRECTIONAL); 1904 4096, PCI_DMA_BIDIRECTIONAL);
1902 if (dma_mapping_error(dev, daddr)) { 1905 if (dma_mapping_error(dev, daddr)) {
1903 gvt_err("fail to dmamap scratch_pt\n"); 1906 gvt_vgpu_err("fail to dmamap scratch_pt\n");
1904 __free_page(virt_to_page(scratch_pt)); 1907 __free_page(virt_to_page(scratch_pt));
1905 return -ENOMEM; 1908 return -ENOMEM;
1906 } 1909 }
@@ -2003,7 +2006,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2003 ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, 2006 ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
2004 NULL, 1, 0); 2007 NULL, 1, 0);
2005 if (IS_ERR(ggtt_mm)) { 2008 if (IS_ERR(ggtt_mm)) {
2006 gvt_err("fail to create mm for ggtt.\n"); 2009 gvt_vgpu_err("fail to create mm for ggtt.\n");
2007 return PTR_ERR(ggtt_mm); 2010 return PTR_ERR(ggtt_mm);
2008 } 2011 }
2009 2012
@@ -2076,7 +2079,6 @@ static int setup_spt_oos(struct intel_gvt *gvt)
2076 for (i = 0; i < preallocated_oos_pages; i++) { 2079 for (i = 0; i < preallocated_oos_pages; i++) {
2077 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL); 2080 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
2078 if (!oos_page) { 2081 if (!oos_page) {
2079 gvt_err("fail to pre-allocate oos page\n");
2080 ret = -ENOMEM; 2082 ret = -ENOMEM;
2081 goto fail; 2083 goto fail;
2082 } 2084 }
@@ -2166,7 +2168,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
2166 mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT, 2168 mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
2167 pdp, page_table_level, 0); 2169 pdp, page_table_level, 0);
2168 if (IS_ERR(mm)) { 2170 if (IS_ERR(mm)) {
2169 gvt_err("fail to create mm\n"); 2171 gvt_vgpu_err("fail to create mm\n");
2170 return PTR_ERR(mm); 2172 return PTR_ERR(mm);
2171 } 2173 }
2172 } 2174 }
@@ -2196,7 +2198,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
2196 2198
2197 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp); 2199 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
2198 if (!mm) { 2200 if (!mm) {
2199 gvt_err("fail to find ppgtt instance.\n"); 2201 gvt_vgpu_err("fail to find ppgtt instance.\n");
2200 return -EINVAL; 2202 return -EINVAL;
2201 } 2203 }
2202 intel_gvt_mm_unreference(mm); 2204 intel_gvt_mm_unreference(mm);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 23791920ced1..6dfc48b63b71 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -162,7 +162,6 @@ struct intel_vgpu {
162 atomic_t running_workload_num; 162 atomic_t running_workload_num;
163 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); 163 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
164 struct i915_gem_context *shadow_ctx; 164 struct i915_gem_context *shadow_ctx;
165 struct notifier_block shadow_ctx_notifier_block;
166 165
167#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) 166#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
168 struct { 167 struct {
@@ -233,6 +232,7 @@ struct intel_gvt {
233 struct intel_gvt_gtt gtt; 232 struct intel_gvt_gtt gtt;
234 struct intel_gvt_opregion opregion; 233 struct intel_gvt_opregion opregion;
235 struct intel_gvt_workload_scheduler scheduler; 234 struct intel_gvt_workload_scheduler scheduler;
235 struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
236 DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS); 236 DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
237 struct intel_vgpu_type *types; 237 struct intel_vgpu_type *types;
238 unsigned int num_types; 238 unsigned int num_types;
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 8e43395c748a..6da9ae1618e3 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -181,11 +181,9 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
181 GVT_FAILSAFE_UNSUPPORTED_GUEST); 181 GVT_FAILSAFE_UNSUPPORTED_GUEST);
182 182
183 if (!vgpu->mmio.disable_warn_untrack) { 183 if (!vgpu->mmio.disable_warn_untrack) {
184 gvt_err("vgpu%d: found oob fence register access\n", 184 gvt_vgpu_err("found oob fence register access\n");
185 vgpu->id); 185 gvt_vgpu_err("total fence %d, access fence %d\n",
186 gvt_err("vgpu%d: total fence %d, access fence %d\n", 186 vgpu_fence_sz(vgpu), fence_num);
187 vgpu->id, vgpu_fence_sz(vgpu),
188 fence_num);
189 } 187 }
190 memset(p_data, 0, bytes); 188 memset(p_data, 0, bytes);
191 return -EINVAL; 189 return -EINVAL;
@@ -249,7 +247,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
249 break; 247 break;
250 default: 248 default:
251 /*should not hit here*/ 249 /*should not hit here*/
252 gvt_err("invalid forcewake offset 0x%x\n", offset); 250 gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset);
253 return -EINVAL; 251 return -EINVAL;
254 } 252 }
255 } else { 253 } else {
@@ -530,7 +528,7 @@ static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
530 fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2; 528 fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
531 fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK; 529 fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
532 } else { 530 } else {
533 gvt_err("Invalid train pattern %d\n", train_pattern); 531 gvt_vgpu_err("Invalid train pattern %d\n", train_pattern);
534 return -EINVAL; 532 return -EINVAL;
535 } 533 }
536 534
@@ -588,7 +586,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
588 else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX) 586 else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
589 index = FDI_RX_IMR_TO_PIPE(offset); 587 index = FDI_RX_IMR_TO_PIPE(offset);
590 else { 588 else {
591 gvt_err("Unsupport registers %x\n", offset); 589 gvt_vgpu_err("Unsupport registers %x\n", offset);
592 return -EINVAL; 590 return -EINVAL;
593 } 591 }
594 592
@@ -818,7 +816,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
818 u32 data; 816 u32 data;
819 817
820 if (!dpy_is_valid_port(port_index)) { 818 if (!dpy_is_valid_port(port_index)) {
821 gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id); 819 gvt_vgpu_err("Unsupported DP port access!\n");
822 return 0; 820 return 0;
823 } 821 }
824 822
@@ -972,6 +970,14 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
972 return 0; 970 return 0;
973} 971}
974 972
973static int mbctl_write(struct intel_vgpu *vgpu, unsigned int offset,
974 void *p_data, unsigned int bytes)
975{
976 *(u32 *)p_data &= (~GEN6_MBCTL_ENABLE_BOOT_FETCH);
977 write_vreg(vgpu, offset, p_data, bytes);
978 return 0;
979}
980
975static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 981static int vga_control_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
976 void *p_data, unsigned int bytes) 982 void *p_data, unsigned int bytes)
977{ 983{
@@ -1016,8 +1022,7 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
1016 1022
1017 if (i == num) { 1023 if (i == num) {
1018 if (num == SBI_REG_MAX) { 1024 if (num == SBI_REG_MAX) {
1019 gvt_err("vgpu%d: SBI caching meets maximum limits\n", 1025 gvt_vgpu_err("SBI caching meets maximum limits\n");
1020 vgpu->id);
1021 return; 1026 return;
1022 } 1027 }
1023 display->sbi.number++; 1028 display->sbi.number++;
@@ -1097,7 +1102,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
1097 break; 1102 break;
1098 } 1103 }
1099 if (invalid_read) 1104 if (invalid_read)
1100 gvt_err("invalid pvinfo read: [%x:%x] = %x\n", 1105 gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n",
1101 offset, bytes, *(u32 *)p_data); 1106 offset, bytes, *(u32 *)p_data);
1102 vgpu->pv_notified = true; 1107 vgpu->pv_notified = true;
1103 return 0; 1108 return 0;
@@ -1125,7 +1130,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
1125 case 1: /* Remove this in guest driver. */ 1130 case 1: /* Remove this in guest driver. */
1126 break; 1131 break;
1127 default: 1132 default:
1128 gvt_err("Invalid PV notification %d\n", notification); 1133 gvt_vgpu_err("Invalid PV notification %d\n", notification);
1129 } 1134 }
1130 return ret; 1135 return ret;
1131} 1136}
@@ -1181,7 +1186,7 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1181 enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE); 1186 enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
1182 break; 1187 break;
1183 default: 1188 default:
1184 gvt_err("invalid pvinfo write offset %x bytes %x data %x\n", 1189 gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
1185 offset, bytes, data); 1190 offset, bytes, data);
1186 break; 1191 break;
1187 } 1192 }
@@ -1415,7 +1420,8 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1415 if (execlist->elsp_dwords.index == 3) { 1420 if (execlist->elsp_dwords.index == 3) {
1416 ret = intel_vgpu_submit_execlist(vgpu, ring_id); 1421 ret = intel_vgpu_submit_execlist(vgpu, ring_id);
1417 if(ret) 1422 if(ret)
1418 gvt_err("fail submit workload on ring %d\n", ring_id); 1423 gvt_vgpu_err("fail submit workload on ring %d\n",
1424 ring_id);
1419 } 1425 }
1420 1426
1421 ++execlist->elsp_dwords.index; 1427 ++execlist->elsp_dwords.index;
@@ -2240,7 +2246,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
2240 MMIO_D(0x7180, D_ALL); 2246 MMIO_D(0x7180, D_ALL);
2241 MMIO_D(0x7408, D_ALL); 2247 MMIO_D(0x7408, D_ALL);
2242 MMIO_D(0x7c00, D_ALL); 2248 MMIO_D(0x7c00, D_ALL);
2243 MMIO_D(GEN6_MBCTL, D_ALL); 2249 MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
2244 MMIO_D(0x911c, D_ALL); 2250 MMIO_D(0x911c, D_ALL);
2245 MMIO_D(0x9120, D_ALL); 2251 MMIO_D(0x9120, D_ALL);
2246 MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL); 2252 MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2988,3 +2994,20 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
2988 write_vreg(vgpu, offset, p_data, bytes); 2994 write_vreg(vgpu, offset, p_data, bytes);
2989 return 0; 2995 return 0;
2990} 2996}
2997
2998/**
2999 * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
3000 * force-nopriv register
3001 *
3002 * @gvt: a GVT device
3003 * @offset: register offset
3004 *
3005 * Returns:
3006 * True if the register is in force-nonpriv whitelist;
3007 * False if outside;
3008 */
3009bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
3010 unsigned int offset)
3011{
3012 return in_whitelist(offset);
3013}
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 84d801638ede..d641214578a7 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -426,7 +426,7 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
426 426
427static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) 427static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
428{ 428{
429 struct intel_vgpu *vgpu; 429 struct intel_vgpu *vgpu = NULL;
430 struct intel_vgpu_type *type; 430 struct intel_vgpu_type *type;
431 struct device *pdev; 431 struct device *pdev;
432 void *gvt; 432 void *gvt;
@@ -437,7 +437,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
437 437
438 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); 438 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
439 if (!type) { 439 if (!type) {
440 gvt_err("failed to find type %s to create\n", 440 gvt_vgpu_err("failed to find type %s to create\n",
441 kobject_name(kobj)); 441 kobject_name(kobj));
442 ret = -EINVAL; 442 ret = -EINVAL;
443 goto out; 443 goto out;
@@ -446,7 +446,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
446 vgpu = intel_gvt_ops->vgpu_create(gvt, type); 446 vgpu = intel_gvt_ops->vgpu_create(gvt, type);
447 if (IS_ERR_OR_NULL(vgpu)) { 447 if (IS_ERR_OR_NULL(vgpu)) {
448 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); 448 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
449 gvt_err("failed to create intel vgpu: %d\n", ret); 449 gvt_vgpu_err("failed to create intel vgpu: %d\n", ret);
450 goto out; 450 goto out;
451 } 451 }
452 452
@@ -526,7 +526,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
526 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events, 526 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
527 &vgpu->vdev.iommu_notifier); 527 &vgpu->vdev.iommu_notifier);
528 if (ret != 0) { 528 if (ret != 0) {
529 gvt_err("vfio_register_notifier for iommu failed: %d\n", ret); 529 gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
530 ret);
530 goto out; 531 goto out;
531 } 532 }
532 533
@@ -534,7 +535,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
534 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events, 535 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
535 &vgpu->vdev.group_notifier); 536 &vgpu->vdev.group_notifier);
536 if (ret != 0) { 537 if (ret != 0) {
537 gvt_err("vfio_register_notifier for group failed: %d\n", ret); 538 gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
539 ret);
538 goto undo_iommu; 540 goto undo_iommu;
539 } 541 }
540 542
@@ -635,7 +637,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
635 637
636 638
637 if (index >= VFIO_PCI_NUM_REGIONS) { 639 if (index >= VFIO_PCI_NUM_REGIONS) {
638 gvt_err("invalid index: %u\n", index); 640 gvt_vgpu_err("invalid index: %u\n", index);
639 return -EINVAL; 641 return -EINVAL;
640 } 642 }
641 643
@@ -669,7 +671,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
669 case VFIO_PCI_VGA_REGION_INDEX: 671 case VFIO_PCI_VGA_REGION_INDEX:
670 case VFIO_PCI_ROM_REGION_INDEX: 672 case VFIO_PCI_ROM_REGION_INDEX:
671 default: 673 default:
672 gvt_err("unsupported region: %u\n", index); 674 gvt_vgpu_err("unsupported region: %u\n", index);
673 } 675 }
674 676
675 return ret == 0 ? count : ret; 677 return ret == 0 ? count : ret;
@@ -861,7 +863,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
861 863
862 trigger = eventfd_ctx_fdget(fd); 864 trigger = eventfd_ctx_fdget(fd);
863 if (IS_ERR(trigger)) { 865 if (IS_ERR(trigger)) {
864 gvt_err("eventfd_ctx_fdget failed\n"); 866 gvt_vgpu_err("eventfd_ctx_fdget failed\n");
865 return PTR_ERR(trigger); 867 return PTR_ERR(trigger);
866 } 868 }
867 vgpu->vdev.msi_trigger = trigger; 869 vgpu->vdev.msi_trigger = trigger;
@@ -1120,7 +1122,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
1120 ret = vfio_set_irqs_validate_and_prepare(&hdr, max, 1122 ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
1121 VFIO_PCI_NUM_IRQS, &data_size); 1123 VFIO_PCI_NUM_IRQS, &data_size);
1122 if (ret) { 1124 if (ret) {
1123 gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n"); 1125 gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
1124 return -EINVAL; 1126 return -EINVAL;
1125 } 1127 }
1126 if (data_size) { 1128 if (data_size) {
@@ -1310,7 +1312,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
1310 1312
1311 kvm = vgpu->vdev.kvm; 1313 kvm = vgpu->vdev.kvm;
1312 if (!kvm || kvm->mm != current->mm) { 1314 if (!kvm || kvm->mm != current->mm) {
1313 gvt_err("KVM is required to use Intel vGPU\n"); 1315 gvt_vgpu_err("KVM is required to use Intel vGPU\n");
1314 return -ESRCH; 1316 return -ESRCH;
1315 } 1317 }
1316 1318
@@ -1324,6 +1326,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
1324 vgpu->handle = (unsigned long)info; 1326 vgpu->handle = (unsigned long)info;
1325 info->vgpu = vgpu; 1327 info->vgpu = vgpu;
1326 info->kvm = kvm; 1328 info->kvm = kvm;
1329 kvm_get_kvm(info->kvm);
1327 1330
1328 kvmgt_protect_table_init(info); 1331 kvmgt_protect_table_init(info);
1329 gvt_cache_init(vgpu); 1332 gvt_cache_init(vgpu);
@@ -1337,12 +1340,15 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
1337 1340
1338static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) 1341static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1339{ 1342{
1343 struct intel_vgpu *vgpu = info->vgpu;
1344
1340 if (!info) { 1345 if (!info) {
1341 gvt_err("kvmgt_guest_info invalid\n"); 1346 gvt_vgpu_err("kvmgt_guest_info invalid\n");
1342 return false; 1347 return false;
1343 } 1348 }
1344 1349
1345 kvm_page_track_unregister_notifier(info->kvm, &info->track_node); 1350 kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1351 kvm_put_kvm(info->kvm);
1346 kvmgt_protect_table_destroy(info); 1352 kvmgt_protect_table_destroy(info);
1347 gvt_cache_destroy(info->vgpu); 1353 gvt_cache_destroy(info->vgpu);
1348 vfree(info); 1354 vfree(info);
@@ -1383,12 +1389,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1383 unsigned long iova, pfn; 1389 unsigned long iova, pfn;
1384 struct kvmgt_guest_info *info; 1390 struct kvmgt_guest_info *info;
1385 struct device *dev; 1391 struct device *dev;
1392 struct intel_vgpu *vgpu;
1386 int rc; 1393 int rc;
1387 1394
1388 if (!handle_valid(handle)) 1395 if (!handle_valid(handle))
1389 return INTEL_GVT_INVALID_ADDR; 1396 return INTEL_GVT_INVALID_ADDR;
1390 1397
1391 info = (struct kvmgt_guest_info *)handle; 1398 info = (struct kvmgt_guest_info *)handle;
1399 vgpu = info->vgpu;
1392 iova = gvt_cache_find(info->vgpu, gfn); 1400 iova = gvt_cache_find(info->vgpu, gfn);
1393 if (iova != INTEL_GVT_INVALID_ADDR) 1401 if (iova != INTEL_GVT_INVALID_ADDR)
1394 return iova; 1402 return iova;
@@ -1397,13 +1405,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1397 dev = mdev_dev(info->vgpu->vdev.mdev); 1405 dev = mdev_dev(info->vgpu->vdev.mdev);
1398 rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn); 1406 rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
1399 if (rc != 1) { 1407 if (rc != 1) {
1400 gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc); 1408 gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
1409 gfn, rc);
1401 return INTEL_GVT_INVALID_ADDR; 1410 return INTEL_GVT_INVALID_ADDR;
1402 } 1411 }
1403 /* transfer to host iova for GFX to use DMA */ 1412 /* transfer to host iova for GFX to use DMA */
1404 rc = gvt_dma_map_iova(info->vgpu, pfn, &iova); 1413 rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
1405 if (rc) { 1414 if (rc) {
1406 gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn); 1415 gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
1407 vfio_unpin_pages(dev, &gfn, 1); 1416 vfio_unpin_pages(dev, &gfn, 1);
1408 return INTEL_GVT_INVALID_ADDR; 1417 return INTEL_GVT_INVALID_ADDR;
1409 } 1418 }
@@ -1417,7 +1426,7 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
1417{ 1426{
1418 struct kvmgt_guest_info *info; 1427 struct kvmgt_guest_info *info;
1419 struct kvm *kvm; 1428 struct kvm *kvm;
1420 int ret; 1429 int idx, ret;
1421 bool kthread = current->mm == NULL; 1430 bool kthread = current->mm == NULL;
1422 1431
1423 if (!handle_valid(handle)) 1432 if (!handle_valid(handle))
@@ -1429,8 +1438,10 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
1429 if (kthread) 1438 if (kthread)
1430 use_mm(kvm->mm); 1439 use_mm(kvm->mm);
1431 1440
1441 idx = srcu_read_lock(&kvm->srcu);
1432 ret = write ? kvm_write_guest(kvm, gpa, buf, len) : 1442 ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
1433 kvm_read_guest(kvm, gpa, buf, len); 1443 kvm_read_guest(kvm, gpa, buf, len);
1444 srcu_read_unlock(&kvm->srcu, idx);
1434 1445
1435 if (kthread) 1446 if (kthread)
1436 unuse_mm(kvm->mm); 1447 unuse_mm(kvm->mm);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 60b698cb8365..1ba3bdb09341 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -142,10 +142,10 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
142 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, 142 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
143 p_data, bytes); 143 p_data, bytes);
144 if (ret) { 144 if (ret) {
145 gvt_err("vgpu%d: guest page read error %d, " 145 gvt_vgpu_err("guest page read error %d, "
146 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", 146 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
147 vgpu->id, ret, 147 ret, gp->gfn, pa, *(u32 *)p_data,
148 gp->gfn, pa, *(u32 *)p_data, bytes); 148 bytes);
149 } 149 }
150 mutex_unlock(&gvt->lock); 150 mutex_unlock(&gvt->lock);
151 return ret; 151 return ret;
@@ -200,14 +200,13 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
200 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); 200 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
201 201
202 if (!vgpu->mmio.disable_warn_untrack) { 202 if (!vgpu->mmio.disable_warn_untrack) {
203 gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n", 203 gvt_vgpu_err("read untracked MMIO %x(%dB) val %x\n",
204 vgpu->id, offset, bytes, *(u32 *)p_data); 204 offset, bytes, *(u32 *)p_data);
205 205
206 if (offset == 0x206c) { 206 if (offset == 0x206c) {
207 gvt_err("------------------------------------------\n"); 207 gvt_vgpu_err("------------------------------------------\n");
208 gvt_err("vgpu%d: likely triggers a gfx reset\n", 208 gvt_vgpu_err("likely triggers a gfx reset\n");
209 vgpu->id); 209 gvt_vgpu_err("------------------------------------------\n");
210 gvt_err("------------------------------------------\n");
211 vgpu->mmio.disable_warn_untrack = true; 210 vgpu->mmio.disable_warn_untrack = true;
212 } 211 }
213 } 212 }
@@ -220,8 +219,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
220 mutex_unlock(&gvt->lock); 219 mutex_unlock(&gvt->lock);
221 return 0; 220 return 0;
222err: 221err:
223 gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n", 222 gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
224 vgpu->id, offset, bytes); 223 offset, bytes);
225 mutex_unlock(&gvt->lock); 224 mutex_unlock(&gvt->lock);
226 return ret; 225 return ret;
227} 226}
@@ -259,10 +258,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
259 if (gp) { 258 if (gp) {
260 ret = gp->handler(gp, pa, p_data, bytes); 259 ret = gp->handler(gp, pa, p_data, bytes);
261 if (ret) { 260 if (ret) {
262 gvt_err("vgpu%d: guest page write error %d, " 261 gvt_err("guest page write error %d, "
263 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", 262 "gfn 0x%lx, pa 0x%llx, "
264 vgpu->id, ret, 263 "var 0x%x, len %d\n",
265 gp->gfn, pa, *(u32 *)p_data, bytes); 264 ret, gp->gfn, pa,
265 *(u32 *)p_data, bytes);
266 } 266 }
267 mutex_unlock(&gvt->lock); 267 mutex_unlock(&gvt->lock);
268 return ret; 268 return ret;
@@ -329,8 +329,8 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
329 329
330 /* all register bits are RO. */ 330 /* all register bits are RO. */
331 if (ro_mask == ~(u64)0) { 331 if (ro_mask == ~(u64)0) {
332 gvt_err("vgpu%d: try to write RO reg %x\n", 332 gvt_vgpu_err("try to write RO reg %x\n",
333 vgpu->id, offset); 333 offset);
334 ret = 0; 334 ret = 0;
335 goto out; 335 goto out;
336 } 336 }
@@ -360,8 +360,8 @@ out:
360 mutex_unlock(&gvt->lock); 360 mutex_unlock(&gvt->lock);
361 return 0; 361 return 0;
362err: 362err:
363 gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n", 363 gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
364 vgpu->id, offset, bytes); 364 bytes);
365 mutex_unlock(&gvt->lock); 365 mutex_unlock(&gvt->lock);
366 return ret; 366 return ret;
367} 367}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 3bc620f56f35..a3a027025cd0 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -107,4 +107,7 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
107 void *p_data, unsigned int bytes); 107 void *p_data, unsigned int bytes);
108int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 108int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
109 void *p_data, unsigned int bytes); 109 void *p_data, unsigned int bytes);
110
111bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
112 unsigned int offset);
110#endif 113#endif
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 5d1caf9daba9..311799136d7f 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -67,14 +67,15 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
67 mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va 67 mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
68 + i * PAGE_SIZE); 68 + i * PAGE_SIZE);
69 if (mfn == INTEL_GVT_INVALID_ADDR) { 69 if (mfn == INTEL_GVT_INVALID_ADDR) {
70 gvt_err("fail to get MFN from VA\n"); 70 gvt_vgpu_err("fail to get MFN from VA\n");
71 return -EINVAL; 71 return -EINVAL;
72 } 72 }
73 ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, 73 ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
74 vgpu_opregion(vgpu)->gfn[i], 74 vgpu_opregion(vgpu)->gfn[i],
75 mfn, 1, map); 75 mfn, 1, map);
76 if (ret) { 76 if (ret) {
77 gvt_err("fail to map GFN to MFN, errno: %d\n", ret); 77 gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n",
78 ret);
78 return ret; 79 return ret;
79 } 80 }
80 } 81 }
@@ -287,7 +288,7 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
287 parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM; 288 parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
288 289
289 if (!(swsci & SWSCI_SCI_SELECT)) { 290 if (!(swsci & SWSCI_SCI_SELECT)) {
290 gvt_err("vgpu%d: requesting SMI service\n", vgpu->id); 291 gvt_vgpu_err("requesting SMI service\n");
291 return 0; 292 return 0;
292 } 293 }
293 /* ignore non 0->1 trasitions */ 294 /* ignore non 0->1 trasitions */
@@ -300,9 +301,8 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
300 func = GVT_OPREGION_FUNC(*scic); 301 func = GVT_OPREGION_FUNC(*scic);
301 subfunc = GVT_OPREGION_SUBFUNC(*scic); 302 subfunc = GVT_OPREGION_SUBFUNC(*scic);
302 if (!querying_capabilities(*scic)) { 303 if (!querying_capabilities(*scic)) {
303 gvt_err("vgpu%d: requesting runtime service: func \"%s\"," 304 gvt_vgpu_err("requesting runtime service: func \"%s\","
304 " subfunc \"%s\"\n", 305 " subfunc \"%s\"\n",
305 vgpu->id,
306 opregion_func_name(func), 306 opregion_func_name(func),
307 opregion_subfunc_name(subfunc)); 307 opregion_subfunc_name(subfunc));
308 /* 308 /*
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 73f052a4f424..0beb83563b08 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -167,7 +167,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
167 I915_WRITE_FW(reg, 0x1); 167 I915_WRITE_FW(reg, 0x1);
168 168
169 if (wait_for_atomic((I915_READ_FW(reg) == 0), 50)) 169 if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
170 gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id); 170 gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
171 else 171 else
172 vgpu_vreg(vgpu, regs[ring_id]) = 0; 172 vgpu_vreg(vgpu, regs[ring_id]) = 0;
173 173
@@ -207,7 +207,7 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
207 l3_offset.reg = 0xb020; 207 l3_offset.reg = 0xb020;
208 for (i = 0; i < 32; i++) { 208 for (i = 0; i < 32; i++) {
209 gen9_render_mocs_L3[i] = I915_READ(l3_offset); 209 gen9_render_mocs_L3[i] = I915_READ(l3_offset);
210 I915_WRITE(l3_offset, vgpu_vreg(vgpu, offset)); 210 I915_WRITE(l3_offset, vgpu_vreg(vgpu, l3_offset));
211 POSTING_READ(l3_offset); 211 POSTING_READ(l3_offset);
212 l3_offset.reg += 4; 212 l3_offset.reg += 4;
213 } 213 }
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 06c9584ac5f0..34b9acdf3479 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -101,7 +101,7 @@ struct tbs_sched_data {
101 struct list_head runq_head; 101 struct list_head runq_head;
102}; 102};
103 103
104#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000) 104#define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1))
105 105
106static void tbs_sched_func(struct work_struct *work) 106static void tbs_sched_func(struct work_struct *work)
107{ 107{
@@ -223,7 +223,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
223 return; 223 return;
224 224
225 list_add_tail(&vgpu_data->list, &sched_data->runq_head); 225 list_add_tail(&vgpu_data->list, &sched_data->runq_head);
226 schedule_delayed_work(&sched_data->work, sched_data->period); 226 schedule_delayed_work(&sched_data->work, 0);
227} 227}
228 228
229static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) 229static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index d3a56c949025..a44782412f2c 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -84,7 +84,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
84 (u32)((workload->ctx_desc.lrca + i) << 84 (u32)((workload->ctx_desc.lrca + i) <<
85 GTT_PAGE_SHIFT)); 85 GTT_PAGE_SHIFT));
86 if (context_gpa == INTEL_GVT_INVALID_ADDR) { 86 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
87 gvt_err("Invalid guest context descriptor\n"); 87 gvt_vgpu_err("Invalid guest context descriptor\n");
88 return -EINVAL; 88 return -EINVAL;
89 } 89 }
90 90
@@ -127,19 +127,22 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
127 return 0; 127 return 0;
128} 128}
129 129
130static inline bool is_gvt_request(struct drm_i915_gem_request *req)
131{
132 return i915_gem_context_force_single_submission(req->ctx);
133}
134
130static int shadow_context_status_change(struct notifier_block *nb, 135static int shadow_context_status_change(struct notifier_block *nb,
131 unsigned long action, void *data) 136 unsigned long action, void *data)
132{ 137{
133 struct intel_vgpu *vgpu = container_of(nb, 138 struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
134 struct intel_vgpu, shadow_ctx_notifier_block); 139 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
135 struct drm_i915_gem_request *req = 140 shadow_ctx_notifier_block[req->engine->id]);
136 (struct drm_i915_gem_request *)data; 141 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
137 struct intel_gvt_workload_scheduler *scheduler =
138 &vgpu->gvt->scheduler;
139 struct intel_vgpu_workload *workload = 142 struct intel_vgpu_workload *workload =
140 scheduler->current_workload[req->engine->id]; 143 scheduler->current_workload[req->engine->id];
141 144
142 if (unlikely(!workload)) 145 if (!is_gvt_request(req) || unlikely(!workload))
143 return NOTIFY_OK; 146 return NOTIFY_OK;
144 147
145 switch (action) { 148 switch (action) {
@@ -175,7 +178,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
175 int ring_id = workload->ring_id; 178 int ring_id = workload->ring_id;
176 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; 179 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
177 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; 180 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
181 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
178 struct drm_i915_gem_request *rq; 182 struct drm_i915_gem_request *rq;
183 struct intel_vgpu *vgpu = workload->vgpu;
179 int ret; 184 int ret;
180 185
181 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", 186 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
@@ -187,9 +192,24 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
187 192
188 mutex_lock(&dev_priv->drm.struct_mutex); 193 mutex_lock(&dev_priv->drm.struct_mutex);
189 194
195 /* pin shadow context by gvt even the shadow context will be pinned
196 * when i915 alloc request. That is because gvt will update the guest
197 * context from shadow context when workload is completed, and at that
198 * moment, i915 may already unpined the shadow context to make the
199 * shadow_ctx pages invalid. So gvt need to pin itself. After update
200 * the guest context, gvt can unpin the shadow_ctx safely.
201 */
202 ret = engine->context_pin(engine, shadow_ctx);
203 if (ret) {
204 gvt_vgpu_err("fail to pin shadow context\n");
205 workload->status = ret;
206 mutex_unlock(&dev_priv->drm.struct_mutex);
207 return ret;
208 }
209
190 rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); 210 rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
191 if (IS_ERR(rq)) { 211 if (IS_ERR(rq)) {
192 gvt_err("fail to allocate gem request\n"); 212 gvt_vgpu_err("fail to allocate gem request\n");
193 ret = PTR_ERR(rq); 213 ret = PTR_ERR(rq);
194 goto out; 214 goto out;
195 } 215 }
@@ -202,9 +222,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
202 if (ret) 222 if (ret)
203 goto out; 223 goto out;
204 224
205 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); 225 if ((workload->ring_id == RCS) &&
206 if (ret) 226 (workload->wa_ctx.indirect_ctx.size != 0)) {
207 goto out; 227 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
228 if (ret)
229 goto out;
230 }
208 231
209 ret = populate_shadow_context(workload); 232 ret = populate_shadow_context(workload);
210 if (ret) 233 if (ret)
@@ -227,6 +250,9 @@ out:
227 250
228 if (!IS_ERR_OR_NULL(rq)) 251 if (!IS_ERR_OR_NULL(rq))
229 i915_add_request_no_flush(rq); 252 i915_add_request_no_flush(rq);
253 else
254 engine->context_unpin(engine, shadow_ctx);
255
230 mutex_unlock(&dev_priv->drm.struct_mutex); 256 mutex_unlock(&dev_priv->drm.struct_mutex);
231 return ret; 257 return ret;
232} 258}
@@ -322,7 +348,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
322 (u32)((workload->ctx_desc.lrca + i) << 348 (u32)((workload->ctx_desc.lrca + i) <<
323 GTT_PAGE_SHIFT)); 349 GTT_PAGE_SHIFT));
324 if (context_gpa == INTEL_GVT_INVALID_ADDR) { 350 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
325 gvt_err("invalid guest context descriptor\n"); 351 gvt_vgpu_err("invalid guest context descriptor\n");
326 return; 352 return;
327 } 353 }
328 354
@@ -376,6 +402,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
376 * For the workload w/o request, directly complete the workload. 402 * For the workload w/o request, directly complete the workload.
377 */ 403 */
378 if (workload->req) { 404 if (workload->req) {
405 struct drm_i915_private *dev_priv =
406 workload->vgpu->gvt->dev_priv;
407 struct intel_engine_cs *engine =
408 dev_priv->engine[workload->ring_id];
379 wait_event(workload->shadow_ctx_status_wq, 409 wait_event(workload->shadow_ctx_status_wq,
380 !atomic_read(&workload->shadow_ctx_active)); 410 !atomic_read(&workload->shadow_ctx_active));
381 411
@@ -388,6 +418,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
388 INTEL_GVT_EVENT_MAX) 418 INTEL_GVT_EVENT_MAX)
389 intel_vgpu_trigger_virtual_event(vgpu, event); 419 intel_vgpu_trigger_virtual_event(vgpu, event);
390 } 420 }
421 mutex_lock(&dev_priv->drm.struct_mutex);
422 /* unpin shadow ctx as the shadow_ctx update is done */
423 engine->context_unpin(engine, workload->vgpu->shadow_ctx);
424 mutex_unlock(&dev_priv->drm.struct_mutex);
391 } 425 }
392 426
393 gvt_dbg_sched("ring id %d complete workload %p status %d\n", 427 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -417,6 +451,7 @@ static int workload_thread(void *priv)
417 int ring_id = p->ring_id; 451 int ring_id = p->ring_id;
418 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 452 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
419 struct intel_vgpu_workload *workload = NULL; 453 struct intel_vgpu_workload *workload = NULL;
454 struct intel_vgpu *vgpu = NULL;
420 int ret; 455 int ret;
421 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); 456 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
422 DEFINE_WAIT_FUNC(wait, woken_wake_function); 457 DEFINE_WAIT_FUNC(wait, woken_wake_function);
@@ -459,25 +494,14 @@ static int workload_thread(void *priv)
459 mutex_unlock(&gvt->lock); 494 mutex_unlock(&gvt->lock);
460 495
461 if (ret) { 496 if (ret) {
462 gvt_err("fail to dispatch workload, skip\n"); 497 vgpu = workload->vgpu;
498 gvt_vgpu_err("fail to dispatch workload, skip\n");
463 goto complete; 499 goto complete;
464 } 500 }
465 501
466 gvt_dbg_sched("ring id %d wait workload %p\n", 502 gvt_dbg_sched("ring id %d wait workload %p\n",
467 workload->ring_id, workload); 503 workload->ring_id, workload);
468retry: 504 i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
469 i915_wait_request(workload->req,
470 0, MAX_SCHEDULE_TIMEOUT);
471 /* I915 has replay mechanism and a request will be replayed
472 * if there is i915 reset. So the seqno will be updated anyway.
473 * If the seqno is not updated yet after waiting, which means
474 * the replay may still be in progress and we can wait again.
475 */
476 if (!i915_gem_request_completed(workload->req)) {
477 gvt_dbg_sched("workload %p not completed, wait again\n",
478 workload);
479 goto retry;
480 }
481 505
482complete: 506complete:
483 gvt_dbg_sched("will complete workload %p, status: %d\n", 507 gvt_dbg_sched("will complete workload %p, status: %d\n",
@@ -513,15 +537,16 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
513void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt) 537void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
514{ 538{
515 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 539 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
516 int i; 540 struct intel_engine_cs *engine;
541 enum intel_engine_id i;
517 542
518 gvt_dbg_core("clean workload scheduler\n"); 543 gvt_dbg_core("clean workload scheduler\n");
519 544
520 for (i = 0; i < I915_NUM_ENGINES; i++) { 545 for_each_engine(engine, gvt->dev_priv, i) {
521 if (scheduler->thread[i]) { 546 atomic_notifier_chain_unregister(
522 kthread_stop(scheduler->thread[i]); 547 &engine->context_status_notifier,
523 scheduler->thread[i] = NULL; 548 &gvt->shadow_ctx_notifier_block[i]);
524 } 549 kthread_stop(scheduler->thread[i]);
525 } 550 }
526} 551}
527 552
@@ -529,18 +554,15 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
529{ 554{
530 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 555 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
531 struct workload_thread_param *param = NULL; 556 struct workload_thread_param *param = NULL;
557 struct intel_engine_cs *engine;
558 enum intel_engine_id i;
532 int ret; 559 int ret;
533 int i;
534 560
535 gvt_dbg_core("init workload scheduler\n"); 561 gvt_dbg_core("init workload scheduler\n");
536 562
537 init_waitqueue_head(&scheduler->workload_complete_wq); 563 init_waitqueue_head(&scheduler->workload_complete_wq);
538 564
539 for (i = 0; i < I915_NUM_ENGINES; i++) { 565 for_each_engine(engine, gvt->dev_priv, i) {
540 /* check ring mask at init time */
541 if (!HAS_ENGINE(gvt->dev_priv, i))
542 continue;
543
544 init_waitqueue_head(&scheduler->waitq[i]); 566 init_waitqueue_head(&scheduler->waitq[i]);
545 567
546 param = kzalloc(sizeof(*param), GFP_KERNEL); 568 param = kzalloc(sizeof(*param), GFP_KERNEL);
@@ -559,6 +581,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
559 ret = PTR_ERR(scheduler->thread[i]); 581 ret = PTR_ERR(scheduler->thread[i]);
560 goto err; 582 goto err;
561 } 583 }
584
585 gvt->shadow_ctx_notifier_block[i].notifier_call =
586 shadow_context_status_change;
587 atomic_notifier_chain_register(&engine->context_status_notifier,
588 &gvt->shadow_ctx_notifier_block[i]);
562 } 589 }
563 return 0; 590 return 0;
564err: 591err:
@@ -570,9 +597,6 @@ err:
570 597
571void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu) 598void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
572{ 599{
573 atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
574 &vgpu->shadow_ctx_notifier_block);
575
576 i915_gem_context_put_unlocked(vgpu->shadow_ctx); 600 i915_gem_context_put_unlocked(vgpu->shadow_ctx);
577} 601}
578 602
@@ -587,10 +611,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
587 611
588 vgpu->shadow_ctx->engine[RCS].initialised = true; 612 vgpu->shadow_ctx->engine[RCS].initialised = true;
589 613
590 vgpu->shadow_ctx_notifier_block.notifier_call =
591 shadow_context_status_change;
592
593 atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
594 &vgpu->shadow_ctx_notifier_block);
595 return 0; 614 return 0;
596} 615}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e703556eba99..1c75402a59c1 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -248,6 +248,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
248 case I915_PARAM_IRQ_ACTIVE: 248 case I915_PARAM_IRQ_ACTIVE:
249 case I915_PARAM_ALLOW_BATCHBUFFER: 249 case I915_PARAM_ALLOW_BATCHBUFFER:
250 case I915_PARAM_LAST_DISPATCH: 250 case I915_PARAM_LAST_DISPATCH:
251 case I915_PARAM_HAS_EXEC_CONSTANTS:
251 /* Reject all old ums/dri params. */ 252 /* Reject all old ums/dri params. */
252 return -ENODEV; 253 return -ENODEV;
253 case I915_PARAM_CHIPSET_ID: 254 case I915_PARAM_CHIPSET_ID:
@@ -274,9 +275,6 @@ static int i915_getparam(struct drm_device *dev, void *data,
274 case I915_PARAM_HAS_BSD2: 275 case I915_PARAM_HAS_BSD2:
275 value = !!dev_priv->engine[VCS2]; 276 value = !!dev_priv->engine[VCS2];
276 break; 277 break;
277 case I915_PARAM_HAS_EXEC_CONSTANTS:
278 value = INTEL_GEN(dev_priv) >= 4;
279 break;
280 case I915_PARAM_HAS_LLC: 278 case I915_PARAM_HAS_LLC:
281 value = HAS_LLC(dev_priv); 279 value = HAS_LLC(dev_priv);
282 break; 280 break;
@@ -1788,7 +1786,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
1788 goto error; 1786 goto error;
1789 } 1787 }
1790 1788
1791 i915_gem_reset_finish(dev_priv); 1789 i915_gem_reset(dev_priv);
1792 intel_overlay_reset(dev_priv); 1790 intel_overlay_reset(dev_priv);
1793 1791
1794 /* Ok, now get things going again... */ 1792 /* Ok, now get things going again... */
@@ -1814,6 +1812,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
1814 i915_queue_hangcheck(dev_priv); 1812 i915_queue_hangcheck(dev_priv);
1815 1813
1816wakeup: 1814wakeup:
1815 i915_gem_reset_finish(dev_priv);
1817 enable_irq(dev_priv->drm.irq); 1816 enable_irq(dev_priv->drm.irq);
1818 wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS); 1817 wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
1819 return; 1818 return;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7febe6eecf72..1e53c31b6826 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1325,7 +1325,7 @@ struct intel_gen6_power_mgmt {
1325 unsigned boosts; 1325 unsigned boosts;
1326 1326
1327 /* manual wa residency calculations */ 1327 /* manual wa residency calculations */
1328 struct intel_rps_ei up_ei, down_ei; 1328 struct intel_rps_ei ei;
1329 1329
1330 /* 1330 /*
1331 * Protects RPS/RC6 register access and PCU communication. 1331 * Protects RPS/RC6 register access and PCU communication.
@@ -2064,8 +2064,6 @@ struct drm_i915_private {
2064 2064
2065 const struct intel_device_info info; 2065 const struct intel_device_info info;
2066 2066
2067 int relative_constants_mode;
2068
2069 void __iomem *regs; 2067 void __iomem *regs;
2070 2068
2071 struct intel_uncore uncore; 2069 struct intel_uncore uncore;
@@ -3342,6 +3340,7 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
3342} 3340}
3343 3341
3344int i915_gem_reset_prepare(struct drm_i915_private *dev_priv); 3342int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
3343void i915_gem_reset(struct drm_i915_private *dev_priv);
3345void i915_gem_reset_finish(struct drm_i915_private *dev_priv); 3344void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
3346void i915_gem_set_wedged(struct drm_i915_private *dev_priv); 3345void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
3347void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); 3346void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 10777da73039..67b1fc5a0331 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2719,7 +2719,16 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
2719 for_each_engine(engine, dev_priv, id) { 2719 for_each_engine(engine, dev_priv, id) {
2720 struct drm_i915_gem_request *request; 2720 struct drm_i915_gem_request *request;
2721 2721
2722 /* Prevent request submission to the hardware until we have
2723 * completed the reset in i915_gem_reset_finish(). If a request
2724 * is completed by one engine, it may then queue a request
2725 * to a second via its engine->irq_tasklet *just* as we are
2726 * calling engine->init_hw() and also writing the ELSP.
2727 * Turning off the engine->irq_tasklet until the reset is over
2728 * prevents the race.
2729 */
2722 tasklet_kill(&engine->irq_tasklet); 2730 tasklet_kill(&engine->irq_tasklet);
2731 tasklet_disable(&engine->irq_tasklet);
2723 2732
2724 if (engine_stalled(engine)) { 2733 if (engine_stalled(engine)) {
2725 request = i915_gem_find_active_request(engine); 2734 request = i915_gem_find_active_request(engine);
@@ -2834,7 +2843,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
2834 engine->reset_hw(engine, request); 2843 engine->reset_hw(engine, request);
2835} 2844}
2836 2845
2837void i915_gem_reset_finish(struct drm_i915_private *dev_priv) 2846void i915_gem_reset(struct drm_i915_private *dev_priv)
2838{ 2847{
2839 struct intel_engine_cs *engine; 2848 struct intel_engine_cs *engine;
2840 enum intel_engine_id id; 2849 enum intel_engine_id id;
@@ -2856,6 +2865,17 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
2856 } 2865 }
2857} 2866}
2858 2867
2868void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
2869{
2870 struct intel_engine_cs *engine;
2871 enum intel_engine_id id;
2872
2873 lockdep_assert_held(&dev_priv->drm.struct_mutex);
2874
2875 for_each_engine(engine, dev_priv, id)
2876 tasklet_enable(&engine->irq_tasklet);
2877}
2878
2859static void nop_submit_request(struct drm_i915_gem_request *request) 2879static void nop_submit_request(struct drm_i915_gem_request *request)
2860{ 2880{
2861 dma_fence_set_error(&request->fence, -EIO); 2881 dma_fence_set_error(&request->fence, -EIO);
@@ -4674,8 +4694,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
4674 init_waitqueue_head(&dev_priv->gpu_error.wait_queue); 4694 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
4675 init_waitqueue_head(&dev_priv->gpu_error.reset_queue); 4695 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4676 4696
4677 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4678
4679 init_waitqueue_head(&dev_priv->pending_flip_queue); 4697 init_waitqueue_head(&dev_priv->pending_flip_queue);
4680 4698
4681 dev_priv->mm.interruptible = true; 4699 dev_priv->mm.interruptible = true;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 17f90c618208..e2d83b6d376b 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -311,7 +311,6 @@ __create_hw_context(struct drm_i915_private *dev_priv,
311 ctx->ring_size = 4 * PAGE_SIZE; 311 ctx->ring_size = 4 * PAGE_SIZE;
312 ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) << 312 ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
313 GEN8_CTX_ADDRESSING_MODE_SHIFT; 313 GEN8_CTX_ADDRESSING_MODE_SHIFT;
314 ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
315 314
316 /* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not 315 /* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
317 * present or not in use we still need a small bias as ring wraparound 316 * present or not in use we still need a small bias as ring wraparound
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 0ac750b90f3d..e9c008fe14b1 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -160,9 +160,6 @@ struct i915_gem_context {
160 /** desc_template: invariant fields for the HW context descriptor */ 160 /** desc_template: invariant fields for the HW context descriptor */
161 u32 desc_template; 161 u32 desc_template;
162 162
163 /** status_notifier: list of callbacks for context-switch changes */
164 struct atomic_notifier_head status_notifier;
165
166 /** guilty_count: How many times this context has caused a GPU hang. */ 163 /** guilty_count: How many times this context has caused a GPU hang. */
167 unsigned int guilty_count; 164 unsigned int guilty_count;
168 /** 165 /**
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d02cfaefe1c8..30e0675fd7da 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1408,10 +1408,7 @@ execbuf_submit(struct i915_execbuffer_params *params,
1408 struct drm_i915_gem_execbuffer2 *args, 1408 struct drm_i915_gem_execbuffer2 *args,
1409 struct list_head *vmas) 1409 struct list_head *vmas)
1410{ 1410{
1411 struct drm_i915_private *dev_priv = params->request->i915;
1412 u64 exec_start, exec_len; 1411 u64 exec_start, exec_len;
1413 int instp_mode;
1414 u32 instp_mask;
1415 int ret; 1412 int ret;
1416 1413
1417 ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas); 1414 ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
@@ -1422,56 +1419,11 @@ execbuf_submit(struct i915_execbuffer_params *params,
1422 if (ret) 1419 if (ret)
1423 return ret; 1420 return ret;
1424 1421
1425 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; 1422 if (args->flags & I915_EXEC_CONSTANTS_MASK) {
1426 instp_mask = I915_EXEC_CONSTANTS_MASK; 1423 DRM_DEBUG("I915_EXEC_CONSTANTS_* unsupported\n");
1427 switch (instp_mode) {
1428 case I915_EXEC_CONSTANTS_REL_GENERAL:
1429 case I915_EXEC_CONSTANTS_ABSOLUTE:
1430 case I915_EXEC_CONSTANTS_REL_SURFACE:
1431 if (instp_mode != 0 && params->engine->id != RCS) {
1432 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1433 return -EINVAL;
1434 }
1435
1436 if (instp_mode != dev_priv->relative_constants_mode) {
1437 if (INTEL_INFO(dev_priv)->gen < 4) {
1438 DRM_DEBUG("no rel constants on pre-gen4\n");
1439 return -EINVAL;
1440 }
1441
1442 if (INTEL_INFO(dev_priv)->gen > 5 &&
1443 instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1444 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1445 return -EINVAL;
1446 }
1447
1448 /* The HW changed the meaning on this bit on gen6 */
1449 if (INTEL_INFO(dev_priv)->gen >= 6)
1450 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1451 }
1452 break;
1453 default:
1454 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
1455 return -EINVAL; 1424 return -EINVAL;
1456 } 1425 }
1457 1426
1458 if (params->engine->id == RCS &&
1459 instp_mode != dev_priv->relative_constants_mode) {
1460 struct intel_ring *ring = params->request->ring;
1461
1462 ret = intel_ring_begin(params->request, 4);
1463 if (ret)
1464 return ret;
1465
1466 intel_ring_emit(ring, MI_NOOP);
1467 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1468 intel_ring_emit_reg(ring, INSTPM);
1469 intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1470 intel_ring_advance(ring);
1471
1472 dev_priv->relative_constants_mode = instp_mode;
1473 }
1474
1475 if (args->flags & I915_EXEC_GEN7_SOL_RESET) { 1427 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1476 ret = i915_reset_gen7_sol_offsets(params->request); 1428 ret = i915_reset_gen7_sol_offsets(params->request);
1477 if (ret) 1429 if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 401006b4c6a3..d5d2b4c6ed38 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -263,7 +263,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
263 I915_SHRINK_BOUND | 263 I915_SHRINK_BOUND |
264 I915_SHRINK_UNBOUND | 264 I915_SHRINK_UNBOUND |
265 I915_SHRINK_ACTIVE); 265 I915_SHRINK_ACTIVE);
266 rcu_barrier(); /* wait until our RCU delayed slab frees are completed */ 266 synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
267 267
268 return freed; 268 return freed;
269} 269}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index e6ffef2f707a..b6c886ac901b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1046,68 +1046,51 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv,
1046 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1046 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1047} 1047}
1048 1048
1049static bool vlv_c0_above(struct drm_i915_private *dev_priv,
1050 const struct intel_rps_ei *old,
1051 const struct intel_rps_ei *now,
1052 int threshold)
1053{
1054 u64 time, c0;
1055 unsigned int mul = 100;
1056
1057 if (old->cz_clock == 0)
1058 return false;
1059
1060 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1061 mul <<= 8;
1062
1063 time = now->cz_clock - old->cz_clock;
1064 time *= threshold * dev_priv->czclk_freq;
1065
1066 /* Workload can be split between render + media, e.g. SwapBuffers
1067 * being blitted in X after being rendered in mesa. To account for
1068 * this we need to combine both engines into our activity counter.
1069 */
1070 c0 = now->render_c0 - old->render_c0;
1071 c0 += now->media_c0 - old->media_c0;
1072 c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1073
1074 return c0 >= time;
1075}
1076
1077void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1049void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1078{ 1050{
1079 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); 1051 memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
1080 dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1081} 1052}
1082 1053
1083static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1054static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1084{ 1055{
1056 const struct intel_rps_ei *prev = &dev_priv->rps.ei;
1085 struct intel_rps_ei now; 1057 struct intel_rps_ei now;
1086 u32 events = 0; 1058 u32 events = 0;
1087 1059
1088 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) 1060 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1089 return 0; 1061 return 0;
1090 1062
1091 vlv_c0_read(dev_priv, &now); 1063 vlv_c0_read(dev_priv, &now);
1092 if (now.cz_clock == 0) 1064 if (now.cz_clock == 0)
1093 return 0; 1065 return 0;
1094 1066
1095 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { 1067 if (prev->cz_clock) {
1096 if (!vlv_c0_above(dev_priv, 1068 u64 time, c0;
1097 &dev_priv->rps.down_ei, &now, 1069 unsigned int mul;
1098 dev_priv->rps.down_threshold)) 1070
1099 events |= GEN6_PM_RP_DOWN_THRESHOLD; 1071 mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
1100 dev_priv->rps.down_ei = now; 1072 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1101 } 1073 mul <<= 8;
1102 1074
1103 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { 1075 time = now.cz_clock - prev->cz_clock;
1104 if (vlv_c0_above(dev_priv, 1076 time *= dev_priv->czclk_freq;
1105 &dev_priv->rps.up_ei, &now, 1077
1106 dev_priv->rps.up_threshold)) 1078 /* Workload can be split between render + media,
1107 events |= GEN6_PM_RP_UP_THRESHOLD; 1079 * e.g. SwapBuffers being blitted in X after being rendered in
1108 dev_priv->rps.up_ei = now; 1080 * mesa. To account for this we need to combine both engines
1081 * into our activity counter.
1082 */
1083 c0 = now.render_c0 - prev->render_c0;
1084 c0 += now.media_c0 - prev->media_c0;
1085 c0 *= mul;
1086
1087 if (c0 > time * dev_priv->rps.up_threshold)
1088 events = GEN6_PM_RP_UP_THRESHOLD;
1089 else if (c0 < time * dev_priv->rps.down_threshold)
1090 events = GEN6_PM_RP_DOWN_THRESHOLD;
1109 } 1091 }
1110 1092
1093 dev_priv->rps.ei = now;
1111 return events; 1094 return events;
1112} 1095}
1113 1096
@@ -4228,7 +4211,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4228 /* Let's track the enabled rps events */ 4211 /* Let's track the enabled rps events */
4229 if (IS_VALLEYVIEW(dev_priv)) 4212 if (IS_VALLEYVIEW(dev_priv))
4230 /* WaGsvRC0ResidencyMethod:vlv */ 4213 /* WaGsvRC0ResidencyMethod:vlv */
4231 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; 4214 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4232 else 4215 else
4233 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4216 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4234 4217
@@ -4266,6 +4249,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4266 if (!IS_GEN2(dev_priv)) 4249 if (!IS_GEN2(dev_priv))
4267 dev->vblank_disable_immediate = true; 4250 dev->vblank_disable_immediate = true;
4268 4251
4252 /* Most platforms treat the display irq block as an always-on
4253 * power domain. vlv/chv can disable it at runtime and need
4254 * special care to avoid writing any of the display block registers
4255 * outside of the power domain. We defer setting up the display irqs
4256 * in this case to the runtime pm.
4257 */
4258 dev_priv->display_irqs_enabled = true;
4259 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4260 dev_priv->display_irqs_enabled = false;
4261
4269 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4262 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4270 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4263 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4271 4264
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 0085bc745f6a..de219b71fb76 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -35,7 +35,6 @@
35 */ 35 */
36 36
37#define I915_CSR_GLK "i915/glk_dmc_ver1_01.bin" 37#define I915_CSR_GLK "i915/glk_dmc_ver1_01.bin"
38MODULE_FIRMWARE(I915_CSR_GLK);
39#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 1) 38#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
40 39
41#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin" 40#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3282b0f4b134..ed1f4f272b4f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -16696,12 +16696,11 @@ int intel_modeset_init(struct drm_device *dev)
16696 } 16696 }
16697 } 16697 }
16698 16698
16699 intel_update_czclk(dev_priv);
16700 intel_update_cdclk(dev_priv);
16701 dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
16702
16703 intel_shared_dpll_init(dev); 16699 intel_shared_dpll_init(dev);
16704 16700
16701 intel_update_czclk(dev_priv);
16702 intel_modeset_init_hw(dev);
16703
16705 if (dev_priv->max_cdclk_freq == 0) 16704 if (dev_priv->max_cdclk_freq == 0)
16706 intel_update_max_cdclk(dev_priv); 16705 intel_update_max_cdclk(dev_priv);
16707 16706
@@ -17258,8 +17257,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
17258 17257
17259 intel_init_gt_powersave(dev_priv); 17258 intel_init_gt_powersave(dev_priv);
17260 17259
17261 intel_modeset_init_hw(dev);
17262
17263 intel_setup_overlay(dev_priv); 17260 intel_setup_overlay(dev_priv);
17264} 17261}
17265 17262
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 371acf109e34..ab1be5c80ea5 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -105,6 +105,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
105 /* Nothing to do here, execute in order of dependencies */ 105 /* Nothing to do here, execute in order of dependencies */
106 engine->schedule = NULL; 106 engine->schedule = NULL;
107 107
108 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
109
108 dev_priv->engine[id] = engine; 110 dev_priv->engine[id] = engine;
109 return 0; 111 return 0;
110} 112}
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index d23c0fcff751..8c04eca84351 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -77,6 +77,11 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
77 goto bail; 77 goto bail;
78 } 78 }
79 79
80 if (!i915.enable_execlists) {
81 DRM_INFO("GPU guest virtualisation [GVT-g] disabled due to disabled execlist submission [i915.enable_execlists module parameter]\n");
82 goto bail;
83 }
84
80 /* 85 /*
81 * We're not in host or fail to find a MPT module, disable GVT-g 86 * We're not in host or fail to find a MPT module, disable GVT-g
82 */ 87 */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index ebae2bd83918..24b2fa5b6282 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1298,16 +1298,34 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
1298 1298
1299static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state) 1299static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
1300{ 1300{
1301 struct drm_device *dev = crtc_state->base.crtc->dev; 1301 struct drm_i915_private *dev_priv =
1302 to_i915(crtc_state->base.crtc->dev);
1303 struct drm_atomic_state *state = crtc_state->base.state;
1304 struct drm_connector_state *connector_state;
1305 struct drm_connector *connector;
1306 int i;
1302 1307
1303 if (HAS_GMCH_DISPLAY(to_i915(dev))) 1308 if (HAS_GMCH_DISPLAY(dev_priv))
1304 return false; 1309 return false;
1305 1310
1306 /* 1311 /*
1307 * HDMI 12bpc affects the clocks, so it's only possible 1312 * HDMI 12bpc affects the clocks, so it's only possible
1308 * when not cloning with other encoder types. 1313 * when not cloning with other encoder types.
1309 */ 1314 */
1310 return crtc_state->output_types == 1 << INTEL_OUTPUT_HDMI; 1315 if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI)
1316 return false;
1317
1318 for_each_connector_in_state(state, connector, connector_state, i) {
1319 const struct drm_display_info *info = &connector->display_info;
1320
1321 if (connector_state->crtc != crtc_state->base.crtc)
1322 continue;
1323
1324 if ((info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36) == 0)
1325 return false;
1326 }
1327
1328 return true;
1311} 1329}
1312 1330
1313bool intel_hdmi_compute_config(struct intel_encoder *encoder, 1331bool intel_hdmi_compute_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index b62e3f8ad415..54208bef7a83 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -219,7 +219,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
219 } 219 }
220 } 220 }
221 } 221 }
222 if (dev_priv->display.hpd_irq_setup) 222 if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
223 dev_priv->display.hpd_irq_setup(dev_priv); 223 dev_priv->display.hpd_irq_setup(dev_priv);
224 spin_unlock_irq(&dev_priv->irq_lock); 224 spin_unlock_irq(&dev_priv->irq_lock);
225 225
@@ -425,7 +425,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
425 } 425 }
426 } 426 }
427 427
428 if (storm_detected) 428 if (storm_detected && dev_priv->display_irqs_enabled)
429 dev_priv->display.hpd_irq_setup(dev_priv); 429 dev_priv->display.hpd_irq_setup(dev_priv);
430 spin_unlock(&dev_priv->irq_lock); 430 spin_unlock(&dev_priv->irq_lock);
431 431
@@ -471,10 +471,12 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
471 * Interrupt setup is already guaranteed to be single-threaded, this is 471 * Interrupt setup is already guaranteed to be single-threaded, this is
472 * just to make the assert_spin_locked checks happy. 472 * just to make the assert_spin_locked checks happy.
473 */ 473 */
474 spin_lock_irq(&dev_priv->irq_lock); 474 if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
475 if (dev_priv->display.hpd_irq_setup) 475 spin_lock_irq(&dev_priv->irq_lock);
476 dev_priv->display.hpd_irq_setup(dev_priv); 476 if (dev_priv->display_irqs_enabled)
477 spin_unlock_irq(&dev_priv->irq_lock); 477 dev_priv->display.hpd_irq_setup(dev_priv);
478 spin_unlock_irq(&dev_priv->irq_lock);
479 }
478} 480}
479 481
480static void i915_hpd_poll_init_work(struct work_struct *work) 482static void i915_hpd_poll_init_work(struct work_struct *work)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index ebf8023d21e6..471af3b480ad 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -345,7 +345,8 @@ execlists_context_status_change(struct drm_i915_gem_request *rq,
345 if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) 345 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
346 return; 346 return;
347 347
348 atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq); 348 atomic_notifier_call_chain(&rq->engine->context_status_notifier,
349 status, rq);
349} 350}
350 351
351static void 352static void
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 940bab22d464..6a29784d2b41 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4928,8 +4928,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
4928{ 4928{
4929 u32 mask = 0; 4929 u32 mask = 0;
4930 4930
4931 /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
4931 if (val > dev_priv->rps.min_freq_softlimit) 4932 if (val > dev_priv->rps.min_freq_softlimit)
4932 mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; 4933 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
4933 if (val < dev_priv->rps.max_freq_softlimit) 4934 if (val < dev_priv->rps.max_freq_softlimit)
4934 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; 4935 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
4935 4936
@@ -5039,7 +5040,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
5039{ 5040{
5040 mutex_lock(&dev_priv->rps.hw_lock); 5041 mutex_lock(&dev_priv->rps.hw_lock);
5041 if (dev_priv->rps.enabled) { 5042 if (dev_priv->rps.enabled) {
5042 if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) 5043 if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
5043 gen6_rps_reset_ei(dev_priv); 5044 gen6_rps_reset_ei(dev_priv);
5044 I915_WRITE(GEN6_PMINTRMSK, 5045 I915_WRITE(GEN6_PMINTRMSK,
5045 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); 5046 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 91bc4abf5d3e..6c5f9958197d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2024,6 +2024,8 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine,
2024 ret = context_pin(ctx, flags); 2024 ret = context_pin(ctx, flags);
2025 if (ret) 2025 if (ret)
2026 goto error; 2026 goto error;
2027
2028 ce->state->obj->mm.dirty = true;
2027 } 2029 }
2028 2030
2029 /* The kernel context is only used as a placeholder for flushing the 2031 /* The kernel context is only used as a placeholder for flushing the
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 79c2b8d72322..13dccb18cd43 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -403,6 +403,9 @@ struct intel_engine_cs {
403 */ 403 */
404 struct i915_gem_context *legacy_active_context; 404 struct i915_gem_context *legacy_active_context;
405 405
406 /* status_notifier: list of callbacks for context-switch changes */
407 struct atomic_notifier_head context_status_notifier;
408
406 struct intel_engine_hangcheck hangcheck; 409 struct intel_engine_hangcheck hangcheck;
407 410
408 bool needs_cmd_parser; 411 bool needs_cmd_parser;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 4414cf73735d..36602ac7e248 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1,4 +1,4 @@
1/* Copyright (c) 2016 The Linux Foundation. All rights reserved. 1/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
2 * 2 *
3 * This program is free software; you can redistribute it and/or modify 3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and 4 * it under the terms of the GNU General Public License version 2 and
@@ -534,7 +534,7 @@ static void a5xx_destroy(struct msm_gpu *gpu)
534 } 534 }
535 535
536 if (a5xx_gpu->gpmu_bo) { 536 if (a5xx_gpu->gpmu_bo) {
537 if (a5xx_gpu->gpmu_bo) 537 if (a5xx_gpu->gpmu_iova)
538 msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id); 538 msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
539 drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo); 539 drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
540 } 540 }
@@ -860,7 +860,9 @@ static const struct adreno_gpu_funcs funcs = {
860 .idle = a5xx_idle, 860 .idle = a5xx_idle,
861 .irq = a5xx_irq, 861 .irq = a5xx_irq,
862 .destroy = a5xx_destroy, 862 .destroy = a5xx_destroy,
863#ifdef CONFIG_DEBUG_FS
863 .show = a5xx_show, 864 .show = a5xx_show,
865#endif
864 }, 866 },
865 .get_timestamp = a5xx_get_timestamp, 867 .get_timestamp = a5xx_get_timestamp,
866}; 868};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index c9bd1e6225f4..5ae65426b4e5 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -418,18 +418,27 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
418 return 0; 418 return 0;
419} 419}
420 420
421void adreno_gpu_cleanup(struct adreno_gpu *gpu) 421void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
422{ 422{
423 if (gpu->memptrs_bo) { 423 struct msm_gpu *gpu = &adreno_gpu->base;
424 if (gpu->memptrs) 424
425 msm_gem_put_vaddr(gpu->memptrs_bo); 425 if (adreno_gpu->memptrs_bo) {
426 if (adreno_gpu->memptrs)
427 msm_gem_put_vaddr(adreno_gpu->memptrs_bo);
428
429 if (adreno_gpu->memptrs_iova)
430 msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->id);
431
432 drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo);
433 }
434 release_firmware(adreno_gpu->pm4);
435 release_firmware(adreno_gpu->pfp);
426 436
427 if (gpu->memptrs_iova) 437 msm_gpu_cleanup(gpu);
428 msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
429 438
430 drm_gem_object_unreference_unlocked(gpu->memptrs_bo); 439 if (gpu->aspace) {
440 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
441 iommu_ports, ARRAY_SIZE(iommu_ports));
442 msm_gem_address_space_destroy(gpu->aspace);
431 } 443 }
432 release_firmware(gpu->pm4);
433 release_firmware(gpu->pfp);
434 msm_gpu_cleanup(&gpu->base);
435} 444}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 921270ea6059..a879ffa534b4 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -171,7 +171,7 @@ dsi_mgr_phy_enable(int id,
171 } 171 }
172 } 172 }
173 } else { 173 } else {
174 msm_dsi_host_reset_phy(mdsi->host); 174 msm_dsi_host_reset_phy(msm_dsi->host);
175 ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]); 175 ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]);
176 if (ret) 176 if (ret)
177 return ret; 177 return ret;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index a54d3bb5baad..8177e8511afd 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -18,13 +18,6 @@
18#include <linux/hdmi.h> 18#include <linux/hdmi.h>
19#include "hdmi.h" 19#include "hdmi.h"
20 20
21
22/* Supported HDMI Audio channels */
23#define MSM_HDMI_AUDIO_CHANNEL_2 0
24#define MSM_HDMI_AUDIO_CHANNEL_4 1
25#define MSM_HDMI_AUDIO_CHANNEL_6 2
26#define MSM_HDMI_AUDIO_CHANNEL_8 3
27
28/* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */ 21/* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */
29static int nchannels[] = { 2, 4, 6, 8 }; 22static int nchannels[] = { 2, 4, 6, 8 };
30 23
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
index 611da7a660c9..238901987e00 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
@@ -18,7 +18,8 @@
18#ifndef __MDP5_PIPE_H__ 18#ifndef __MDP5_PIPE_H__
19#define __MDP5_PIPE_H__ 19#define __MDP5_PIPE_H__
20 20
21#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */ 21/* TODO: Add SSPP_MAX in mdp5.xml.h */
22#define SSPP_MAX (SSPP_CURSOR1 + 1)
22 23
23/* represents a hw pipe, which is dynamically assigned to a plane */ 24/* represents a hw pipe, which is dynamically assigned to a plane */
24struct mdp5_hw_pipe { 25struct mdp5_hw_pipe {
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 59811f29607d..68e509b3b9e4 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -812,6 +812,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
812 812
813 size = PAGE_ALIGN(size); 813 size = PAGE_ALIGN(size);
814 814
815 /* Disallow zero sized objects as they make the underlying
816 * infrastructure grumpy
817 */
818 if (size == 0)
819 return ERR_PTR(-EINVAL);
820
815 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj); 821 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
816 if (ret) 822 if (ret)
817 goto fail; 823 goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 99e05aacbee1..af5b6ba4095b 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -706,9 +706,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
706 msm_ringbuffer_destroy(gpu->rb); 706 msm_ringbuffer_destroy(gpu->rb);
707 } 707 }
708 708
709 if (gpu->aspace)
710 msm_gem_address_space_destroy(gpu->aspace);
711
712 if (gpu->fctx) 709 if (gpu->fctx)
713 msm_fence_context_free(gpu->fctx); 710 msm_fence_context_free(gpu->fctx);
714} 711}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 684f1703aa5c..aaa3e80fecb4 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -213,8 +213,8 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
213 rbo->placement.num_busy_placement = 0; 213 rbo->placement.num_busy_placement = 0;
214 for (i = 0; i < rbo->placement.num_placement; i++) { 214 for (i = 0; i < rbo->placement.num_placement; i++) {
215 if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) { 215 if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
216 if (rbo->placements[0].fpfn < fpfn) 216 if (rbo->placements[i].fpfn < fpfn)
217 rbo->placements[0].fpfn = fpfn; 217 rbo->placements[i].fpfn = fpfn;
218 } else { 218 } else {
219 rbo->placement.busy_placement = 219 rbo->placement.busy_placement =
220 &rbo->placements[i]; 220 &rbo->placements[i];
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 72e1588580a1..c7af9fdd20c7 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2985,9 +2985,13 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2985 max_sclk = 75000; 2985 max_sclk = 75000;
2986 } 2986 }
2987 } else if (rdev->family == CHIP_OLAND) { 2987 } else if (rdev->family == CHIP_OLAND) {
2988 if ((rdev->pdev->device == 0x6604) && 2988 if ((rdev->pdev->revision == 0xC7) ||
2989 (rdev->pdev->subsystem_vendor == 0x1028) && 2989 (rdev->pdev->revision == 0x80) ||
2990 (rdev->pdev->subsystem_device == 0x066F)) { 2990 (rdev->pdev->revision == 0x81) ||
2991 (rdev->pdev->revision == 0x83) ||
2992 (rdev->pdev->revision == 0x87) ||
2993 (rdev->pdev->device == 0x6604) ||
2994 (rdev->pdev->device == 0x6605)) {
2991 max_sclk = 75000; 2995 max_sclk = 75000;
2992 } 2996 }
2993 } 2997 }
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index fdb451e3ec01..26a7ad0f4789 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -179,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
179 if (unlikely(ret != 0)) 179 if (unlikely(ret != 0))
180 goto out_err0; 180 goto out_err0;
181 181
182 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); 182 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
183 if (unlikely(ret != 0)) 183 if (unlikely(ret != 0))
184 goto out_err1; 184 goto out_err1;
185 185
@@ -318,7 +318,8 @@ EXPORT_SYMBOL(ttm_ref_object_exists);
318 318
319int ttm_ref_object_add(struct ttm_object_file *tfile, 319int ttm_ref_object_add(struct ttm_object_file *tfile,
320 struct ttm_base_object *base, 320 struct ttm_base_object *base,
321 enum ttm_ref_type ref_type, bool *existed) 321 enum ttm_ref_type ref_type, bool *existed,
322 bool require_existed)
322{ 323{
323 struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; 324 struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
324 struct ttm_ref_object *ref; 325 struct ttm_ref_object *ref;
@@ -345,6 +346,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
345 } 346 }
346 347
347 rcu_read_unlock(); 348 rcu_read_unlock();
349 if (require_existed)
350 return -EPERM;
351
348 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), 352 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
349 false, false); 353 false, false);
350 if (unlikely(ret != 0)) 354 if (unlikely(ret != 0))
@@ -449,10 +453,10 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
449 ttm_ref_object_release(&ref->kref); 453 ttm_ref_object_release(&ref->kref);
450 } 454 }
451 455
456 spin_unlock(&tfile->lock);
452 for (i = 0; i < TTM_REF_NUM; ++i) 457 for (i = 0; i < TTM_REF_NUM; ++i)
453 drm_ht_remove(&tfile->ref_hash[i]); 458 drm_ht_remove(&tfile->ref_hash[i]);
454 459
455 spin_unlock(&tfile->lock);
456 ttm_object_file_unref(&tfile); 460 ttm_object_file_unref(&tfile);
457} 461}
458EXPORT_SYMBOL(ttm_object_file_release); 462EXPORT_SYMBOL(ttm_object_file_release);
@@ -529,9 +533,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
529 533
530 *p_tdev = NULL; 534 *p_tdev = NULL;
531 535
532 spin_lock(&tdev->object_lock);
533 drm_ht_remove(&tdev->object_hash); 536 drm_ht_remove(&tdev->object_hash);
534 spin_unlock(&tdev->object_lock);
535 537
536 kfree(tdev); 538 kfree(tdev);
537} 539}
@@ -635,7 +637,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
635 prime = (struct ttm_prime_object *) dma_buf->priv; 637 prime = (struct ttm_prime_object *) dma_buf->priv;
636 base = &prime->base; 638 base = &prime->base;
637 *handle = base->hash.key; 639 *handle = base->hash.key;
638 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); 640 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
639 641
640 dma_buf_put(dma_buf); 642 dma_buf_put(dma_buf);
641 643
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 0c06844af445..9fcf05ca492b 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -846,6 +846,17 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
846 drm_atomic_helper_crtc_destroy_state(crtc, state); 846 drm_atomic_helper_crtc_destroy_state(crtc, state);
847} 847}
848 848
849static void
850vc4_crtc_reset(struct drm_crtc *crtc)
851{
852 if (crtc->state)
853 __drm_atomic_helper_crtc_destroy_state(crtc->state);
854
855 crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
856 if (crtc->state)
857 crtc->state->crtc = crtc;
858}
859
849static const struct drm_crtc_funcs vc4_crtc_funcs = { 860static const struct drm_crtc_funcs vc4_crtc_funcs = {
850 .set_config = drm_atomic_helper_set_config, 861 .set_config = drm_atomic_helper_set_config,
851 .destroy = vc4_crtc_destroy, 862 .destroy = vc4_crtc_destroy,
@@ -853,7 +864,7 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
853 .set_property = NULL, 864 .set_property = NULL,
854 .cursor_set = NULL, /* handled by drm_mode_cursor_universal */ 865 .cursor_set = NULL, /* handled by drm_mode_cursor_universal */
855 .cursor_move = NULL, /* handled by drm_mode_cursor_universal */ 866 .cursor_move = NULL, /* handled by drm_mode_cursor_universal */
856 .reset = drm_atomic_helper_crtc_reset, 867 .reset = vc4_crtc_reset,
857 .atomic_duplicate_state = vc4_crtc_duplicate_state, 868 .atomic_duplicate_state = vc4_crtc_duplicate_state,
858 .atomic_destroy_state = vc4_crtc_destroy_state, 869 .atomic_destroy_state = vc4_crtc_destroy_state,
859 .gamma_set = vc4_crtc_gamma_set, 870 .gamma_set = vc4_crtc_gamma_set,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 6541dd8b82dc..6b2708b4eafe 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -538,7 +538,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
538 struct vmw_fence_obj **p_fence) 538 struct vmw_fence_obj **p_fence)
539{ 539{
540 struct vmw_fence_obj *fence; 540 struct vmw_fence_obj *fence;
541 int ret; 541 int ret;
542 542
543 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 543 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
544 if (unlikely(fence == NULL)) 544 if (unlikely(fence == NULL))
@@ -701,6 +701,41 @@ void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
701} 701}
702 702
703 703
704/**
705 * vmw_fence_obj_lookup - Look up a user-space fence object
706 *
707 * @tfile: A struct ttm_object_file identifying the caller.
708 * @handle: A handle identifying the fence object.
709 * @return: A struct vmw_user_fence base ttm object on success or
710 * an error pointer on failure.
711 *
712 * The fence object is looked up and type-checked. The caller needs
713 * to have opened the fence object first, but since that happens on
714 * creation and fence objects aren't shareable, that's not an
715 * issue currently.
716 */
717static struct ttm_base_object *
718vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
719{
720 struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
721
722 if (!base) {
723 pr_err("Invalid fence object handle 0x%08lx.\n",
724 (unsigned long)handle);
725 return ERR_PTR(-EINVAL);
726 }
727
728 if (base->refcount_release != vmw_user_fence_base_release) {
729 pr_err("Invalid fence object handle 0x%08lx.\n",
730 (unsigned long)handle);
731 ttm_base_object_unref(&base);
732 return ERR_PTR(-EINVAL);
733 }
734
735 return base;
736}
737
738
704int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, 739int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
705 struct drm_file *file_priv) 740 struct drm_file *file_priv)
706{ 741{
@@ -726,13 +761,9 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
726 arg->kernel_cookie = jiffies + wait_timeout; 761 arg->kernel_cookie = jiffies + wait_timeout;
727 } 762 }
728 763
729 base = ttm_base_object_lookup(tfile, arg->handle); 764 base = vmw_fence_obj_lookup(tfile, arg->handle);
730 if (unlikely(base == NULL)) { 765 if (IS_ERR(base))
731 printk(KERN_ERR "Wait invalid fence object handle " 766 return PTR_ERR(base);
732 "0x%08lx.\n",
733 (unsigned long)arg->handle);
734 return -EINVAL;
735 }
736 767
737 fence = &(container_of(base, struct vmw_user_fence, base)->fence); 768 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
738 769
@@ -771,13 +802,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
771 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 802 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
772 struct vmw_private *dev_priv = vmw_priv(dev); 803 struct vmw_private *dev_priv = vmw_priv(dev);
773 804
774 base = ttm_base_object_lookup(tfile, arg->handle); 805 base = vmw_fence_obj_lookup(tfile, arg->handle);
775 if (unlikely(base == NULL)) { 806 if (IS_ERR(base))
776 printk(KERN_ERR "Fence signaled invalid fence object handle " 807 return PTR_ERR(base);
777 "0x%08lx.\n",
778 (unsigned long)arg->handle);
779 return -EINVAL;
780 }
781 808
782 fence = &(container_of(base, struct vmw_user_fence, base)->fence); 809 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
783 fman = fman_from_fence(fence); 810 fman = fman_from_fence(fence);
@@ -1024,6 +1051,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1024 (struct drm_vmw_fence_event_arg *) data; 1051 (struct drm_vmw_fence_event_arg *) data;
1025 struct vmw_fence_obj *fence = NULL; 1052 struct vmw_fence_obj *fence = NULL;
1026 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1053 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1054 struct ttm_object_file *tfile = vmw_fp->tfile;
1027 struct drm_vmw_fence_rep __user *user_fence_rep = 1055 struct drm_vmw_fence_rep __user *user_fence_rep =
1028 (struct drm_vmw_fence_rep __user *)(unsigned long) 1056 (struct drm_vmw_fence_rep __user *)(unsigned long)
1029 arg->fence_rep; 1057 arg->fence_rep;
@@ -1037,24 +1065,18 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1037 */ 1065 */
1038 if (arg->handle) { 1066 if (arg->handle) {
1039 struct ttm_base_object *base = 1067 struct ttm_base_object *base =
1040 ttm_base_object_lookup_for_ref(dev_priv->tdev, 1068 vmw_fence_obj_lookup(tfile, arg->handle);
1041 arg->handle); 1069
1042 1070 if (IS_ERR(base))
1043 if (unlikely(base == NULL)) { 1071 return PTR_ERR(base);
1044 DRM_ERROR("Fence event invalid fence object handle " 1072
1045 "0x%08lx.\n",
1046 (unsigned long)arg->handle);
1047 return -EINVAL;
1048 }
1049 fence = &(container_of(base, struct vmw_user_fence, 1073 fence = &(container_of(base, struct vmw_user_fence,
1050 base)->fence); 1074 base)->fence);
1051 (void) vmw_fence_obj_reference(fence); 1075 (void) vmw_fence_obj_reference(fence);
1052 1076
1053 if (user_fence_rep != NULL) { 1077 if (user_fence_rep != NULL) {
1054 bool existed;
1055
1056 ret = ttm_ref_object_add(vmw_fp->tfile, base, 1078 ret = ttm_ref_object_add(vmw_fp->tfile, base,
1057 TTM_REF_USAGE, &existed); 1079 TTM_REF_USAGE, NULL, false);
1058 if (unlikely(ret != 0)) { 1080 if (unlikely(ret != 0)) {
1059 DRM_ERROR("Failed to reference a fence " 1081 DRM_ERROR("Failed to reference a fence "
1060 "object.\n"); 1082 "object.\n");
@@ -1097,8 +1119,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1097 return 0; 1119 return 0;
1098out_no_create: 1120out_no_create:
1099 if (user_fence_rep != NULL) 1121 if (user_fence_rep != NULL)
1100 ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, 1122 ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
1101 handle, TTM_REF_USAGE);
1102out_no_ref_obj: 1123out_no_ref_obj:
1103 vmw_fence_obj_unreference(&fence); 1124 vmw_fence_obj_unreference(&fence);
1104 return ret; 1125 return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index b8c6a03c8c54..5ec24fd801cd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -114,8 +114,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
114 param->value = dev_priv->has_dx; 114 param->value = dev_priv->has_dx;
115 break; 115 break;
116 default: 116 default:
117 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
118 param->param);
119 return -EINVAL; 117 return -EINVAL;
120 } 118 }
121 119
@@ -186,7 +184,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
186 bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); 184 bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
187 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 185 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
188 186
189 if (unlikely(arg->pad64 != 0)) { 187 if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
190 DRM_ERROR("Illegal GET_3D_CAP argument.\n"); 188 DRM_ERROR("Illegal GET_3D_CAP argument.\n");
191 return -EINVAL; 189 return -EINVAL;
192 } 190 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 65b3f0369636..bf23153d4f55 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -589,7 +589,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
589 return ret; 589 return ret;
590 590
591 ret = ttm_ref_object_add(tfile, &user_bo->prime.base, 591 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
592 TTM_REF_SYNCCPU_WRITE, &existed); 592 TTM_REF_SYNCCPU_WRITE, &existed, false);
593 if (ret != 0 || existed) 593 if (ret != 0 || existed)
594 ttm_bo_synccpu_write_release(&user_bo->dma.base); 594 ttm_bo_synccpu_write_release(&user_bo->dma.base);
595 595
@@ -773,7 +773,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
773 773
774 *handle = user_bo->prime.base.hash.key; 774 *handle = user_bo->prime.base.hash.key;
775 return ttm_ref_object_add(tfile, &user_bo->prime.base, 775 return ttm_ref_object_add(tfile, &user_bo->prime.base,
776 TTM_REF_USAGE, NULL); 776 TTM_REF_USAGE, NULL, false);
777} 777}
778 778
779/* 779/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index b445ce9b9757..05fa092c942b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -713,11 +713,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
713 128; 713 128;
714 714
715 num_sizes = 0; 715 num_sizes = 0;
716 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) 716 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
717 if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
718 return -EINVAL;
717 num_sizes += req->mip_levels[i]; 719 num_sizes += req->mip_levels[i];
720 }
718 721
719 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * 722 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
720 DRM_VMW_MAX_MIP_LEVELS) 723 num_sizes == 0)
721 return -EINVAL; 724 return -EINVAL;
722 725
723 size = vmw_user_surface_size + 128 + 726 size = vmw_user_surface_size + 128 +
@@ -891,17 +894,16 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
891 uint32_t handle; 894 uint32_t handle;
892 struct ttm_base_object *base; 895 struct ttm_base_object *base;
893 int ret; 896 int ret;
897 bool require_exist = false;
894 898
895 if (handle_type == DRM_VMW_HANDLE_PRIME) { 899 if (handle_type == DRM_VMW_HANDLE_PRIME) {
896 ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle); 900 ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
897 if (unlikely(ret != 0)) 901 if (unlikely(ret != 0))
898 return ret; 902 return ret;
899 } else { 903 } else {
900 if (unlikely(drm_is_render_client(file_priv))) { 904 if (unlikely(drm_is_render_client(file_priv)))
901 DRM_ERROR("Render client refused legacy " 905 require_exist = true;
902 "surface reference.\n"); 906
903 return -EACCES;
904 }
905 if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) { 907 if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
906 DRM_ERROR("Locked master refused legacy " 908 DRM_ERROR("Locked master refused legacy "
907 "surface reference.\n"); 909 "surface reference.\n");
@@ -929,17 +931,14 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
929 931
930 /* 932 /*
931 * Make sure the surface creator has the same 933 * Make sure the surface creator has the same
932 * authenticating master. 934 * authenticating master, or is already registered with us.
933 */ 935 */
934 if (drm_is_primary_client(file_priv) && 936 if (drm_is_primary_client(file_priv) &&
935 user_srf->master != file_priv->master) { 937 user_srf->master != file_priv->master)
936 DRM_ERROR("Trying to reference surface outside of" 938 require_exist = true;
937 " master domain.\n");
938 ret = -EACCES;
939 goto out_bad_resource;
940 }
941 939
942 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); 940 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
941 require_exist);
943 if (unlikely(ret != 0)) { 942 if (unlikely(ret != 0)) {
944 DRM_ERROR("Could not add a reference to a surface.\n"); 943 DRM_ERROR("Could not add a reference to a surface.\n");
945 goto out_bad_resource; 944 goto out_bad_resource;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 3ceb4a2af381..63ec1993eaaa 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2112,6 +2112,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
2112 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) }, 2112 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
2113 { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) }, 2113 { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
2114 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, 2114 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
2115 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
2115 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, 2116 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
2116 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, 2117 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
2117 { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) }, 2118 { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 0e2e7c571d22..4e2648c86c8c 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -1082,6 +1082,7 @@
1082 1082
1083#define USB_VENDOR_ID_XIN_MO 0x16c0 1083#define USB_VENDOR_ID_XIN_MO 0x16c0
1084#define USB_DEVICE_ID_XIN_MO_DUAL_ARCADE 0x05e1 1084#define USB_DEVICE_ID_XIN_MO_DUAL_ARCADE 0x05e1
1085#define USB_DEVICE_ID_THT_2P_ARCADE 0x75e1
1085 1086
1086#define USB_VENDOR_ID_XIROKU 0x1477 1087#define USB_VENDOR_ID_XIROKU 0x1477
1087#define USB_DEVICE_ID_XIROKU_SPX 0x1006 1088#define USB_DEVICE_ID_XIROKU_SPX 0x1006
diff --git a/drivers/hid/hid-xinmo.c b/drivers/hid/hid-xinmo.c
index 7df5227a7e61..9ad7731d2e10 100644
--- a/drivers/hid/hid-xinmo.c
+++ b/drivers/hid/hid-xinmo.c
@@ -46,6 +46,7 @@ static int xinmo_event(struct hid_device *hdev, struct hid_field *field,
46 46
47static const struct hid_device_id xinmo_devices[] = { 47static const struct hid_device_id xinmo_devices[] = {
48 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, 48 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
49 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
49 { } 50 { }
50}; 51};
51 52
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 994bddc55b82..e2666ef84dc1 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -2165,6 +2165,14 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
2165 2165
2166 wacom_update_name(wacom, wireless ? " (WL)" : ""); 2166 wacom_update_name(wacom, wireless ? " (WL)" : "");
2167 2167
2168 /* pen only Bamboo neither support touch nor pad */
2169 if ((features->type == BAMBOO_PEN) &&
2170 ((features->device_type & WACOM_DEVICETYPE_TOUCH) ||
2171 (features->device_type & WACOM_DEVICETYPE_PAD))) {
2172 error = -ENODEV;
2173 goto fail;
2174 }
2175
2168 error = wacom_add_shared_data(hdev); 2176 error = wacom_add_shared_data(hdev);
2169 if (error) 2177 if (error)
2170 goto fail; 2178 goto fail;
@@ -2208,14 +2216,8 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
2208 /* touch only Bamboo doesn't support pen */ 2216 /* touch only Bamboo doesn't support pen */
2209 if ((features->type == BAMBOO_TOUCH) && 2217 if ((features->type == BAMBOO_TOUCH) &&
2210 (features->device_type & WACOM_DEVICETYPE_PEN)) { 2218 (features->device_type & WACOM_DEVICETYPE_PEN)) {
2211 error = -ENODEV; 2219 cancel_delayed_work_sync(&wacom->init_work);
2212 goto fail_quirks; 2220 _wacom_query_tablet_data(wacom);
2213 }
2214
2215 /* pen only Bamboo neither support touch nor pad */
2216 if ((features->type == BAMBOO_PEN) &&
2217 ((features->device_type & WACOM_DEVICETYPE_TOUCH) ||
2218 (features->device_type & WACOM_DEVICETYPE_PAD))) {
2219 error = -ENODEV; 2221 error = -ENODEV;
2220 goto fail_quirks; 2222 goto fail_quirks;
2221 } 2223 }
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index bd0d1988feb2..321b8833fa6f 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -502,12 +502,15 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
502 502
503 wait_for_completion(&info->waitevent); 503 wait_for_completion(&info->waitevent);
504 504
505 if (channel->rescind) {
506 ret = -ENODEV;
507 goto post_msg_err;
508 }
509
510post_msg_err: 505post_msg_err:
506 /*
507 * If the channel has been rescinded;
508 * we will be awakened by the rescind
509 * handler; set the error code to zero so we don't leak memory.
510 */
511 if (channel->rescind)
512 ret = 0;
513
511 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 514 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
512 list_del(&info->msglistentry); 515 list_del(&info->msglistentry);
513 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 516 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
@@ -530,15 +533,13 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
530 int ret; 533 int ret;
531 534
532 /* 535 /*
533 * vmbus_on_event(), running in the tasklet, can race 536 * vmbus_on_event(), running in the per-channel tasklet, can race
534 * with vmbus_close_internal() in the case of SMP guest, e.g., when 537 * with vmbus_close_internal() in the case of SMP guest, e.g., when
535 * the former is accessing channel->inbound.ring_buffer, the latter 538 * the former is accessing channel->inbound.ring_buffer, the latter
536 * could be freeing the ring_buffer pages. 539 * could be freeing the ring_buffer pages, so here we must stop it
537 * 540 * first.
538 * To resolve the race, we can serialize them by disabling the
539 * tasklet when the latter is running here.
540 */ 541 */
541 hv_event_tasklet_disable(channel); 542 tasklet_disable(&channel->callback_event);
542 543
543 /* 544 /*
544 * In case a device driver's probe() fails (e.g., 545 * In case a device driver's probe() fails (e.g.,
@@ -605,8 +606,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
605 get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); 606 get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
606 607
607out: 608out:
608 hv_event_tasklet_enable(channel);
609
610 return ret; 609 return ret;
611} 610}
612 611
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index f33465d78a02..fbcb06352308 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -350,7 +350,8 @@ static struct vmbus_channel *alloc_channel(void)
350static void free_channel(struct vmbus_channel *channel) 350static void free_channel(struct vmbus_channel *channel)
351{ 351{
352 tasklet_kill(&channel->callback_event); 352 tasklet_kill(&channel->callback_event);
353 kfree(channel); 353
354 kfree_rcu(channel, rcu);
354} 355}
355 356
356static void percpu_channel_enq(void *arg) 357static void percpu_channel_enq(void *arg)
@@ -359,14 +360,14 @@ static void percpu_channel_enq(void *arg)
359 struct hv_per_cpu_context *hv_cpu 360 struct hv_per_cpu_context *hv_cpu
360 = this_cpu_ptr(hv_context.cpu_context); 361 = this_cpu_ptr(hv_context.cpu_context);
361 362
362 list_add_tail(&channel->percpu_list, &hv_cpu->chan_list); 363 list_add_tail_rcu(&channel->percpu_list, &hv_cpu->chan_list);
363} 364}
364 365
365static void percpu_channel_deq(void *arg) 366static void percpu_channel_deq(void *arg)
366{ 367{
367 struct vmbus_channel *channel = arg; 368 struct vmbus_channel *channel = arg;
368 369
369 list_del(&channel->percpu_list); 370 list_del_rcu(&channel->percpu_list);
370} 371}
371 372
372 373
@@ -381,19 +382,6 @@ static void vmbus_release_relid(u32 relid)
381 true); 382 true);
382} 383}
383 384
384void hv_event_tasklet_disable(struct vmbus_channel *channel)
385{
386 tasklet_disable(&channel->callback_event);
387}
388
389void hv_event_tasklet_enable(struct vmbus_channel *channel)
390{
391 tasklet_enable(&channel->callback_event);
392
393 /* In case there is any pending event */
394 tasklet_schedule(&channel->callback_event);
395}
396
397void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) 385void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
398{ 386{
399 unsigned long flags; 387 unsigned long flags;
@@ -402,7 +390,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
402 BUG_ON(!channel->rescind); 390 BUG_ON(!channel->rescind);
403 BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); 391 BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
404 392
405 hv_event_tasklet_disable(channel);
406 if (channel->target_cpu != get_cpu()) { 393 if (channel->target_cpu != get_cpu()) {
407 put_cpu(); 394 put_cpu();
408 smp_call_function_single(channel->target_cpu, 395 smp_call_function_single(channel->target_cpu,
@@ -411,7 +398,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
411 percpu_channel_deq(channel); 398 percpu_channel_deq(channel);
412 put_cpu(); 399 put_cpu();
413 } 400 }
414 hv_event_tasklet_enable(channel);
415 401
416 if (channel->primary_channel == NULL) { 402 if (channel->primary_channel == NULL) {
417 list_del(&channel->listentry); 403 list_del(&channel->listentry);
@@ -505,7 +491,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
505 491
506 init_vp_index(newchannel, dev_type); 492 init_vp_index(newchannel, dev_type);
507 493
508 hv_event_tasklet_disable(newchannel);
509 if (newchannel->target_cpu != get_cpu()) { 494 if (newchannel->target_cpu != get_cpu()) {
510 put_cpu(); 495 put_cpu();
511 smp_call_function_single(newchannel->target_cpu, 496 smp_call_function_single(newchannel->target_cpu,
@@ -515,7 +500,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
515 percpu_channel_enq(newchannel); 500 percpu_channel_enq(newchannel);
516 put_cpu(); 501 put_cpu();
517 } 502 }
518 hv_event_tasklet_enable(newchannel);
519 503
520 /* 504 /*
521 * This state is used to indicate a successful open 505 * This state is used to indicate a successful open
@@ -565,7 +549,6 @@ err_deq_chan:
565 list_del(&newchannel->listentry); 549 list_del(&newchannel->listentry);
566 mutex_unlock(&vmbus_connection.channel_mutex); 550 mutex_unlock(&vmbus_connection.channel_mutex);
567 551
568 hv_event_tasklet_disable(newchannel);
569 if (newchannel->target_cpu != get_cpu()) { 552 if (newchannel->target_cpu != get_cpu()) {
570 put_cpu(); 553 put_cpu();
571 smp_call_function_single(newchannel->target_cpu, 554 smp_call_function_single(newchannel->target_cpu,
@@ -574,7 +557,6 @@ err_deq_chan:
574 percpu_channel_deq(newchannel); 557 percpu_channel_deq(newchannel);
575 put_cpu(); 558 put_cpu();
576 } 559 }
577 hv_event_tasklet_enable(newchannel);
578 560
579 vmbus_release_relid(newchannel->offermsg.child_relid); 561 vmbus_release_relid(newchannel->offermsg.child_relid);
580 562
@@ -814,6 +796,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
814 /* Allocate the channel object and save this offer. */ 796 /* Allocate the channel object and save this offer. */
815 newchannel = alloc_channel(); 797 newchannel = alloc_channel();
816 if (!newchannel) { 798 if (!newchannel) {
799 vmbus_release_relid(offer->child_relid);
817 pr_err("Unable to allocate channel object\n"); 800 pr_err("Unable to allocate channel object\n");
818 return; 801 return;
819 } 802 }
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index 9aee6014339d..a5596a642ed0 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -71,7 +71,6 @@ static DECLARE_WORK(fcopy_send_work, fcopy_send_data);
71static const char fcopy_devname[] = "vmbus/hv_fcopy"; 71static const char fcopy_devname[] = "vmbus/hv_fcopy";
72static u8 *recv_buffer; 72static u8 *recv_buffer;
73static struct hvutil_transport *hvt; 73static struct hvutil_transport *hvt;
74static struct completion release_event;
75/* 74/*
76 * This state maintains the version number registered by the daemon. 75 * This state maintains the version number registered by the daemon.
77 */ 76 */
@@ -331,7 +330,6 @@ static void fcopy_on_reset(void)
331 330
332 if (cancel_delayed_work_sync(&fcopy_timeout_work)) 331 if (cancel_delayed_work_sync(&fcopy_timeout_work))
333 fcopy_respond_to_host(HV_E_FAIL); 332 fcopy_respond_to_host(HV_E_FAIL);
334 complete(&release_event);
335} 333}
336 334
337int hv_fcopy_init(struct hv_util_service *srv) 335int hv_fcopy_init(struct hv_util_service *srv)
@@ -339,7 +337,6 @@ int hv_fcopy_init(struct hv_util_service *srv)
339 recv_buffer = srv->recv_buffer; 337 recv_buffer = srv->recv_buffer;
340 fcopy_transaction.recv_channel = srv->channel; 338 fcopy_transaction.recv_channel = srv->channel;
341 339
342 init_completion(&release_event);
343 /* 340 /*
344 * When this driver loads, the user level daemon that 341 * When this driver loads, the user level daemon that
345 * processes the host requests may not yet be running. 342 * processes the host requests may not yet be running.
@@ -361,5 +358,4 @@ void hv_fcopy_deinit(void)
361 fcopy_transaction.state = HVUTIL_DEVICE_DYING; 358 fcopy_transaction.state = HVUTIL_DEVICE_DYING;
362 cancel_delayed_work_sync(&fcopy_timeout_work); 359 cancel_delayed_work_sync(&fcopy_timeout_work);
363 hvutil_transport_destroy(hvt); 360 hvutil_transport_destroy(hvt);
364 wait_for_completion(&release_event);
365} 361}
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index de263712e247..a1adfe2cfb34 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -101,7 +101,6 @@ static DECLARE_WORK(kvp_sendkey_work, kvp_send_key);
101static const char kvp_devname[] = "vmbus/hv_kvp"; 101static const char kvp_devname[] = "vmbus/hv_kvp";
102static u8 *recv_buffer; 102static u8 *recv_buffer;
103static struct hvutil_transport *hvt; 103static struct hvutil_transport *hvt;
104static struct completion release_event;
105/* 104/*
106 * Register the kernel component with the user-level daemon. 105 * Register the kernel component with the user-level daemon.
107 * As part of this registration, pass the LIC version number. 106 * As part of this registration, pass the LIC version number.
@@ -714,7 +713,6 @@ static void kvp_on_reset(void)
714 if (cancel_delayed_work_sync(&kvp_timeout_work)) 713 if (cancel_delayed_work_sync(&kvp_timeout_work))
715 kvp_respond_to_host(NULL, HV_E_FAIL); 714 kvp_respond_to_host(NULL, HV_E_FAIL);
716 kvp_transaction.state = HVUTIL_DEVICE_INIT; 715 kvp_transaction.state = HVUTIL_DEVICE_INIT;
717 complete(&release_event);
718} 716}
719 717
720int 718int
@@ -723,7 +721,6 @@ hv_kvp_init(struct hv_util_service *srv)
723 recv_buffer = srv->recv_buffer; 721 recv_buffer = srv->recv_buffer;
724 kvp_transaction.recv_channel = srv->channel; 722 kvp_transaction.recv_channel = srv->channel;
725 723
726 init_completion(&release_event);
727 /* 724 /*
728 * When this driver loads, the user level daemon that 725 * When this driver loads, the user level daemon that
729 * processes the host requests may not yet be running. 726 * processes the host requests may not yet be running.
@@ -747,5 +744,4 @@ void hv_kvp_deinit(void)
747 cancel_delayed_work_sync(&kvp_timeout_work); 744 cancel_delayed_work_sync(&kvp_timeout_work);
748 cancel_work_sync(&kvp_sendkey_work); 745 cancel_work_sync(&kvp_sendkey_work);
749 hvutil_transport_destroy(hvt); 746 hvutil_transport_destroy(hvt);
750 wait_for_completion(&release_event);
751} 747}
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index bcc03f0748d6..e659d1b94a57 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -79,7 +79,6 @@ static int dm_reg_value;
79static const char vss_devname[] = "vmbus/hv_vss"; 79static const char vss_devname[] = "vmbus/hv_vss";
80static __u8 *recv_buffer; 80static __u8 *recv_buffer;
81static struct hvutil_transport *hvt; 81static struct hvutil_transport *hvt;
82static struct completion release_event;
83 82
84static void vss_timeout_func(struct work_struct *dummy); 83static void vss_timeout_func(struct work_struct *dummy);
85static void vss_handle_request(struct work_struct *dummy); 84static void vss_handle_request(struct work_struct *dummy);
@@ -361,13 +360,11 @@ static void vss_on_reset(void)
361 if (cancel_delayed_work_sync(&vss_timeout_work)) 360 if (cancel_delayed_work_sync(&vss_timeout_work))
362 vss_respond_to_host(HV_E_FAIL); 361 vss_respond_to_host(HV_E_FAIL);
363 vss_transaction.state = HVUTIL_DEVICE_INIT; 362 vss_transaction.state = HVUTIL_DEVICE_INIT;
364 complete(&release_event);
365} 363}
366 364
367int 365int
368hv_vss_init(struct hv_util_service *srv) 366hv_vss_init(struct hv_util_service *srv)
369{ 367{
370 init_completion(&release_event);
371 if (vmbus_proto_version < VERSION_WIN8_1) { 368 if (vmbus_proto_version < VERSION_WIN8_1) {
372 pr_warn("Integration service 'Backup (volume snapshot)'" 369 pr_warn("Integration service 'Backup (volume snapshot)'"
373 " not supported on this host version.\n"); 370 " not supported on this host version.\n");
@@ -400,5 +397,4 @@ void hv_vss_deinit(void)
400 cancel_delayed_work_sync(&vss_timeout_work); 397 cancel_delayed_work_sync(&vss_timeout_work);
401 cancel_work_sync(&vss_handle_request_work); 398 cancel_work_sync(&vss_handle_request_work);
402 hvutil_transport_destroy(hvt); 399 hvutil_transport_destroy(hvt);
403 wait_for_completion(&release_event);
404} 400}
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 3042eaa13062..186b10083c55 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -590,6 +590,8 @@ static int hv_timesync_init(struct hv_util_service *srv)
590 if (!hyperv_cs) 590 if (!hyperv_cs)
591 return -ENODEV; 591 return -ENODEV;
592 592
593 spin_lock_init(&host_ts.lock);
594
593 INIT_WORK(&wrk.work, hv_set_host_time); 595 INIT_WORK(&wrk.work, hv_set_host_time);
594 596
595 /* 597 /*
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
index c235a9515267..4402a71e23f7 100644
--- a/drivers/hv/hv_utils_transport.c
+++ b/drivers/hv/hv_utils_transport.c
@@ -182,10 +182,11 @@ static int hvt_op_release(struct inode *inode, struct file *file)
182 * connects back. 182 * connects back.
183 */ 183 */
184 hvt_reset(hvt); 184 hvt_reset(hvt);
185 mutex_unlock(&hvt->lock);
186 185
187 if (mode_old == HVUTIL_TRANSPORT_DESTROY) 186 if (mode_old == HVUTIL_TRANSPORT_DESTROY)
188 hvt_transport_free(hvt); 187 complete(&hvt->release);
188
189 mutex_unlock(&hvt->lock);
189 190
190 return 0; 191 return 0;
191} 192}
@@ -304,6 +305,7 @@ struct hvutil_transport *hvutil_transport_init(const char *name,
304 305
305 init_waitqueue_head(&hvt->outmsg_q); 306 init_waitqueue_head(&hvt->outmsg_q);
306 mutex_init(&hvt->lock); 307 mutex_init(&hvt->lock);
308 init_completion(&hvt->release);
307 309
308 spin_lock(&hvt_list_lock); 310 spin_lock(&hvt_list_lock);
309 list_add(&hvt->list, &hvt_list); 311 list_add(&hvt->list, &hvt_list);
@@ -351,6 +353,8 @@ void hvutil_transport_destroy(struct hvutil_transport *hvt)
351 if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0) 353 if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0)
352 cn_del_callback(&hvt->cn_id); 354 cn_del_callback(&hvt->cn_id);
353 355
354 if (mode_old != HVUTIL_TRANSPORT_CHARDEV) 356 if (mode_old == HVUTIL_TRANSPORT_CHARDEV)
355 hvt_transport_free(hvt); 357 wait_for_completion(&hvt->release);
358
359 hvt_transport_free(hvt);
356} 360}
diff --git a/drivers/hv/hv_utils_transport.h b/drivers/hv/hv_utils_transport.h
index d98f5225c3e6..79afb626e166 100644
--- a/drivers/hv/hv_utils_transport.h
+++ b/drivers/hv/hv_utils_transport.h
@@ -41,6 +41,7 @@ struct hvutil_transport {
41 int outmsg_len; /* its length */ 41 int outmsg_len; /* its length */
42 wait_queue_head_t outmsg_q; /* poll/read wait queue */ 42 wait_queue_head_t outmsg_q; /* poll/read wait queue */
43 struct mutex lock; /* protects struct members */ 43 struct mutex lock; /* protects struct members */
44 struct completion release; /* synchronize with fd release */
44}; 45};
45 46
46struct hvutil_transport *hvutil_transport_init(const char *name, 47struct hvutil_transport *hvutil_transport_init(const char *name,
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index da6b59ba5940..8370b9dc6037 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -939,8 +939,10 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
939 if (relid == 0) 939 if (relid == 0)
940 continue; 940 continue;
941 941
942 rcu_read_lock();
943
942 /* Find channel based on relid */ 944 /* Find channel based on relid */
943 list_for_each_entry(channel, &hv_cpu->chan_list, percpu_list) { 945 list_for_each_entry_rcu(channel, &hv_cpu->chan_list, percpu_list) {
944 if (channel->offermsg.child_relid != relid) 946 if (channel->offermsg.child_relid != relid)
945 continue; 947 continue;
946 948
@@ -956,6 +958,8 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
956 tasklet_schedule(&channel->callback_event); 958 tasklet_schedule(&channel->callback_event);
957 } 959 }
958 } 960 }
961
962 rcu_read_unlock();
959 } 963 }
960} 964}
961 965
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index cccef87963e0..975c43d446f8 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -646,6 +646,9 @@ static int atk_read_value(struct atk_sensor_data *sensor, u64 *value)
646 else 646 else
647 err = atk_read_value_new(sensor, value); 647 err = atk_read_value_new(sensor, value);
648 648
649 if (err)
650 return err;
651
649 sensor->is_valid = true; 652 sensor->is_valid = true;
650 sensor->last_updated = jiffies; 653 sensor->last_updated = jiffies;
651 sensor->cached_value = *value; 654 sensor->cached_value = *value;
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index efb01c247e2d..4dfc7238313e 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -3198,7 +3198,7 @@ static int __init sm_it87_init(void)
3198{ 3198{
3199 int sioaddr[2] = { REG_2E, REG_4E }; 3199 int sioaddr[2] = { REG_2E, REG_4E };
3200 struct it87_sio_data sio_data; 3200 struct it87_sio_data sio_data;
3201 unsigned short isa_address; 3201 unsigned short isa_address[2];
3202 bool found = false; 3202 bool found = false;
3203 int i, err; 3203 int i, err;
3204 3204
@@ -3208,15 +3208,29 @@ static int __init sm_it87_init(void)
3208 3208
3209 for (i = 0; i < ARRAY_SIZE(sioaddr); i++) { 3209 for (i = 0; i < ARRAY_SIZE(sioaddr); i++) {
3210 memset(&sio_data, 0, sizeof(struct it87_sio_data)); 3210 memset(&sio_data, 0, sizeof(struct it87_sio_data));
3211 isa_address = 0; 3211 isa_address[i] = 0;
3212 err = it87_find(sioaddr[i], &isa_address, &sio_data); 3212 err = it87_find(sioaddr[i], &isa_address[i], &sio_data);
3213 if (err || isa_address == 0) 3213 if (err || isa_address[i] == 0)
3214 continue; 3214 continue;
3215 /*
3216 * Don't register second chip if its ISA address matches
3217 * the first chip's ISA address.
3218 */
3219 if (i && isa_address[i] == isa_address[0])
3220 break;
3215 3221
3216 err = it87_device_add(i, isa_address, &sio_data); 3222 err = it87_device_add(i, isa_address[i], &sio_data);
3217 if (err) 3223 if (err)
3218 goto exit_dev_unregister; 3224 goto exit_dev_unregister;
3225
3219 found = true; 3226 found = true;
3227
3228 /*
3229 * IT8705F may respond on both SIO addresses.
3230 * Stop probing after finding one.
3231 */
3232 if (sio_data.type == it87)
3233 break;
3220 } 3234 }
3221 3235
3222 if (!found) { 3236 if (!found) {
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index c1b9275978f9..281491cca510 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -311,7 +311,7 @@ static int max31790_write_pwm(struct device *dev, u32 attr, int channel,
311 data->pwm[channel] = val << 8; 311 data->pwm[channel] = val << 8;
312 err = i2c_smbus_write_word_swapped(client, 312 err = i2c_smbus_write_word_swapped(client,
313 MAX31790_REG_PWMOUT(channel), 313 MAX31790_REG_PWMOUT(channel),
314 val); 314 data->pwm[channel]);
315 break; 315 break;
316 case hwmon_pwm_enable: 316 case hwmon_pwm_enable:
317 fan_config = data->fan_config[channel]; 317 fan_config = data->fan_config[channel];
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index cdd9b3b26195..7563eceeaaea 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -221,8 +221,10 @@ static int intel_th_output_activate(struct intel_th_device *thdev)
221 else 221 else
222 intel_th_trace_enable(thdev); 222 intel_th_trace_enable(thdev);
223 223
224 if (ret) 224 if (ret) {
225 pm_runtime_put(&thdev->dev); 225 pm_runtime_put(&thdev->dev);
226 module_put(thdrv->driver.owner);
227 }
226 228
227 return ret; 229 return ret;
228} 230}
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 0bba3842336e..590cf90dd21a 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -85,6 +85,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
85 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6), 85 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6),
86 .driver_data = (kernel_ulong_t)0, 86 .driver_data = (kernel_ulong_t)0,
87 }, 87 },
88 {
89 /* Denverton */
90 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x19e1),
91 .driver_data = (kernel_ulong_t)0,
92 },
93 {
94 /* Gemini Lake */
95 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
96 .driver_data = (kernel_ulong_t)0,
97 },
88 { 0 }, 98 { 0 },
89}; 99};
90 100
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index dfc1c0e37c40..ad31d21da316 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -35,7 +35,6 @@
35 * warranty of any kind, whether express or implied. 35 * warranty of any kind, whether express or implied.
36 */ 36 */
37 37
38#include <linux/acpi.h>
39#include <linux/device.h> 38#include <linux/device.h>
40#include <linux/gpio/consumer.h> 39#include <linux/gpio/consumer.h>
41#include <linux/i2c.h> 40#include <linux/i2c.h>
@@ -117,6 +116,10 @@ static const struct chip_desc chips[] = {
117 .has_irq = 1, 116 .has_irq = 1,
118 .muxtype = pca954x_isswi, 117 .muxtype = pca954x_isswi,
119 }, 118 },
119 [pca_9546] = {
120 .nchans = 4,
121 .muxtype = pca954x_isswi,
122 },
120 [pca_9547] = { 123 [pca_9547] = {
121 .nchans = 8, 124 .nchans = 8,
122 .enable = 0x8, 125 .enable = 0x8,
@@ -134,28 +137,13 @@ static const struct i2c_device_id pca954x_id[] = {
134 { "pca9543", pca_9543 }, 137 { "pca9543", pca_9543 },
135 { "pca9544", pca_9544 }, 138 { "pca9544", pca_9544 },
136 { "pca9545", pca_9545 }, 139 { "pca9545", pca_9545 },
137 { "pca9546", pca_9545 }, 140 { "pca9546", pca_9546 },
138 { "pca9547", pca_9547 }, 141 { "pca9547", pca_9547 },
139 { "pca9548", pca_9548 }, 142 { "pca9548", pca_9548 },
140 { } 143 { }
141}; 144};
142MODULE_DEVICE_TABLE(i2c, pca954x_id); 145MODULE_DEVICE_TABLE(i2c, pca954x_id);
143 146
144#ifdef CONFIG_ACPI
145static const struct acpi_device_id pca954x_acpi_ids[] = {
146 { .id = "PCA9540", .driver_data = pca_9540 },
147 { .id = "PCA9542", .driver_data = pca_9542 },
148 { .id = "PCA9543", .driver_data = pca_9543 },
149 { .id = "PCA9544", .driver_data = pca_9544 },
150 { .id = "PCA9545", .driver_data = pca_9545 },
151 { .id = "PCA9546", .driver_data = pca_9545 },
152 { .id = "PCA9547", .driver_data = pca_9547 },
153 { .id = "PCA9548", .driver_data = pca_9548 },
154 { }
155};
156MODULE_DEVICE_TABLE(acpi, pca954x_acpi_ids);
157#endif
158
159#ifdef CONFIG_OF 147#ifdef CONFIG_OF
160static const struct of_device_id pca954x_of_match[] = { 148static const struct of_device_id pca954x_of_match[] = {
161 { .compatible = "nxp,pca9540", .data = &chips[pca_9540] }, 149 { .compatible = "nxp,pca9540", .data = &chips[pca_9540] },
@@ -393,17 +381,8 @@ static int pca954x_probe(struct i2c_client *client,
393 match = of_match_device(of_match_ptr(pca954x_of_match), &client->dev); 381 match = of_match_device(of_match_ptr(pca954x_of_match), &client->dev);
394 if (match) 382 if (match)
395 data->chip = of_device_get_match_data(&client->dev); 383 data->chip = of_device_get_match_data(&client->dev);
396 else if (id) 384 else
397 data->chip = &chips[id->driver_data]; 385 data->chip = &chips[id->driver_data];
398 else {
399 const struct acpi_device_id *acpi_id;
400
401 acpi_id = acpi_match_device(ACPI_PTR(pca954x_acpi_ids),
402 &client->dev);
403 if (!acpi_id)
404 return -ENODEV;
405 data->chip = &chips[acpi_id->driver_data];
406 }
407 386
408 data->last_chan = 0; /* force the first selection */ 387 data->last_chan = 0; /* force the first selection */
409 388
@@ -492,7 +471,6 @@ static struct i2c_driver pca954x_driver = {
492 .name = "pca954x", 471 .name = "pca954x",
493 .pm = &pca954x_pm, 472 .pm = &pca954x_pm,
494 .of_match_table = of_match_ptr(pca954x_of_match), 473 .of_match_table = of_match_ptr(pca954x_of_match),
495 .acpi_match_table = ACPI_PTR(pca954x_acpi_ids),
496 }, 474 },
497 .probe = pca954x_probe, 475 .probe = pca954x_probe,
498 .remove = pca954x_remove, 476 .remove = pca954x_remove,
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index ad9dec30bb30..4282ceca3d8f 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -169,7 +169,9 @@ static irqreturn_t tiadc_irq_h(int irq, void *private)
169{ 169{
170 struct iio_dev *indio_dev = private; 170 struct iio_dev *indio_dev = private;
171 struct tiadc_device *adc_dev = iio_priv(indio_dev); 171 struct tiadc_device *adc_dev = iio_priv(indio_dev);
172 unsigned int status, config; 172 unsigned int status, config, adc_fsm;
173 unsigned short count = 0;
174
173 status = tiadc_readl(adc_dev, REG_IRQSTATUS); 175 status = tiadc_readl(adc_dev, REG_IRQSTATUS);
174 176
175 /* 177 /*
@@ -183,6 +185,15 @@ static irqreturn_t tiadc_irq_h(int irq, void *private)
183 tiadc_writel(adc_dev, REG_CTRL, config); 185 tiadc_writel(adc_dev, REG_CTRL, config);
184 tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1OVRRUN 186 tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1OVRRUN
185 | IRQENB_FIFO1UNDRFLW | IRQENB_FIFO1THRES); 187 | IRQENB_FIFO1UNDRFLW | IRQENB_FIFO1THRES);
188
189 /* wait for idle state.
190 * ADC needs to finish the current conversion
191 * before disabling the module
192 */
193 do {
194 adc_fsm = tiadc_readl(adc_dev, REG_ADCFSM);
195 } while (adc_fsm != 0x10 && count++ < 100);
196
186 tiadc_writel(adc_dev, REG_CTRL, (config | CNTRLREG_TSCSSENB)); 197 tiadc_writel(adc_dev, REG_CTRL, (config | CNTRLREG_TSCSSENB));
187 return IRQ_HANDLED; 198 return IRQ_HANDLED;
188 } else if (status & IRQENB_FIFO1THRES) { 199 } else if (status & IRQENB_FIFO1THRES) {
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index a3cce3a38300..ecf592d69043 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -51,8 +51,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
51 st->report_state.report_id, 51 st->report_state.report_id,
52 st->report_state.index, 52 st->report_state.index,
53 HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM); 53 HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM);
54
55 poll_value = hid_sensor_read_poll_value(st);
56 } else { 54 } else {
57 int val; 55 int val;
58 56
@@ -89,7 +87,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
89 sensor_hub_get_feature(st->hsdev, st->power_state.report_id, 87 sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
90 st->power_state.index, 88 st->power_state.index,
91 sizeof(state_val), &state_val); 89 sizeof(state_val), &state_val);
92 if (state && poll_value) 90 if (state)
91 poll_value = hid_sensor_read_poll_value(st);
92 if (poll_value > 0)
93 msleep_interruptible(poll_value * 2); 93 msleep_interruptible(poll_value * 2);
94 94
95 return 0; 95 return 0;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 78532ce07449..81b572d7699a 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -193,8 +193,8 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
193 if (err < 0) 193 if (err < 0)
194 goto out; 194 goto out;
195 195
196 fifo_watermark = ((data & ~ST_LSM6DSX_FIFO_TH_MASK) << 8) | 196 fifo_watermark = ((data << 8) & ~ST_LSM6DSX_FIFO_TH_MASK) |
197 (fifo_watermark & ST_LSM6DSX_FIFO_TH_MASK); 197 (fifo_watermark & ST_LSM6DSX_FIFO_TH_MASK);
198 198
199 wdata = cpu_to_le16(fifo_watermark); 199 wdata = cpu_to_le16(fifo_watermark);
200 err = hw->tf->write(hw->dev, ST_LSM6DSX_REG_FIFO_THL_ADDR, 200 err = hw->tf->write(hw->dev, ST_LSM6DSX_REG_FIFO_THL_ADDR,
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 6dd8cbd7ce95..e13370dc9b1c 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -763,7 +763,7 @@ power_off:
763 return ret; 763 return ret;
764} 764}
765 765
766static int __exit ak8974_remove(struct i2c_client *i2c) 766static int ak8974_remove(struct i2c_client *i2c)
767{ 767{
768 struct iio_dev *indio_dev = i2c_get_clientdata(i2c); 768 struct iio_dev *indio_dev = i2c_get_clientdata(i2c);
769 struct ak8974 *ak8974 = iio_priv(indio_dev); 769 struct ak8974 *ak8974 = iio_priv(indio_dev);
@@ -845,7 +845,7 @@ static struct i2c_driver ak8974_driver = {
845 .of_match_table = of_match_ptr(ak8974_of_match), 845 .of_match_table = of_match_ptr(ak8974_of_match),
846 }, 846 },
847 .probe = ak8974_probe, 847 .probe = ak8974_probe,
848 .remove = __exit_p(ak8974_remove), 848 .remove = ak8974_remove,
849 .id_table = ak8974_id, 849 .id_table = ak8974_id,
850}; 850};
851module_i2c_driver(ak8974_driver); 851module_i2c_driver(ak8974_driver);
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index e95510117a6d..f2ae75fa3128 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -29,7 +29,13 @@ static int __ib_process_cq(struct ib_cq *cq, int budget)
29{ 29{
30 int i, n, completed = 0; 30 int i, n, completed = 0;
31 31
32 while ((n = ib_poll_cq(cq, IB_POLL_BATCH, cq->wc)) > 0) { 32 /*
33 * budget might be (-1) if the caller does not
34 * want to bound this call, thus we need unsigned
35 * minimum here.
36 */
37 while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH,
38 budget - completed), cq->wc)) > 0) {
33 for (i = 0; i < n; i++) { 39 for (i = 0; i < n; i++) {
34 struct ib_wc *wc = &cq->wc[i]; 40 struct ib_wc *wc = &cq->wc[i];
35 41
@@ -196,7 +202,7 @@ void ib_free_cq(struct ib_cq *cq)
196 irq_poll_disable(&cq->iop); 202 irq_poll_disable(&cq->iop);
197 break; 203 break;
198 case IB_POLL_WORKQUEUE: 204 case IB_POLL_WORKQUEUE:
199 flush_work(&cq->work); 205 cancel_work_sync(&cq->work);
200 break; 206 break;
201 default: 207 default:
202 WARN_ON_ONCE(1); 208 WARN_ON_ONCE(1);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 593d2ce6ec7c..7c9e34d679d3 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -336,12 +336,26 @@ int ib_register_device(struct ib_device *device,
336 struct device *parent = device->dev.parent; 336 struct device *parent = device->dev.parent;
337 337
338 WARN_ON_ONCE(!parent); 338 WARN_ON_ONCE(!parent);
339 if (!device->dev.dma_ops) 339 WARN_ON_ONCE(device->dma_device);
340 device->dev.dma_ops = parent->dma_ops; 340 if (device->dev.dma_ops) {
341 if (!device->dev.dma_mask) 341 /*
342 device->dev.dma_mask = parent->dma_mask; 342 * The caller provided custom DMA operations. Copy the
343 if (!device->dev.coherent_dma_mask) 343 * DMA-related fields that are used by e.g. dma_alloc_coherent()
344 device->dev.coherent_dma_mask = parent->coherent_dma_mask; 344 * into device->dev.
345 */
346 device->dma_device = &device->dev;
347 if (!device->dev.dma_mask)
348 device->dev.dma_mask = parent->dma_mask;
349 if (!device->dev.coherent_dma_mask)
350 device->dev.coherent_dma_mask =
351 parent->coherent_dma_mask;
352 } else {
353 /*
354 * The caller did not provide custom DMA operations. Use the
355 * DMA mapping operations of the parent device.
356 */
357 device->dma_device = parent;
358 }
345 359
346 mutex_lock(&device_mutex); 360 mutex_lock(&device_mutex);
347 361
@@ -1015,8 +1029,7 @@ static int __init ib_core_init(void)
1015 return -ENOMEM; 1029 return -ENOMEM;
1016 1030
1017 ib_comp_wq = alloc_workqueue("ib-comp-wq", 1031 ib_comp_wq = alloc_workqueue("ib-comp-wq",
1018 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, 1032 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
1019 WQ_UNBOUND_MAX_ACTIVE);
1020 if (!ib_comp_wq) { 1033 if (!ib_comp_wq) {
1021 ret = -ENOMEM; 1034 ret = -ENOMEM;
1022 goto err; 1035 goto err;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 0f5d43d1f5fc..70c3e9e79508 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -160,6 +160,9 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
160 return NOTIFY_DONE; 160 return NOTIFY_DONE;
161 161
162 iwdev = &hdl->device; 162 iwdev = &hdl->device;
163 if (iwdev->init_state < INET_NOTIFIER)
164 return NOTIFY_DONE;
165
163 netdev = iwdev->ldev->netdev; 166 netdev = iwdev->ldev->netdev;
164 upper_dev = netdev_master_upper_dev_get(netdev); 167 upper_dev = netdev_master_upper_dev_get(netdev);
165 if (netdev != event_netdev) 168 if (netdev != event_netdev)
@@ -214,6 +217,9 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
214 return NOTIFY_DONE; 217 return NOTIFY_DONE;
215 218
216 iwdev = &hdl->device; 219 iwdev = &hdl->device;
220 if (iwdev->init_state < INET_NOTIFIER)
221 return NOTIFY_DONE;
222
217 netdev = iwdev->ldev->netdev; 223 netdev = iwdev->ldev->netdev;
218 if (netdev != event_netdev) 224 if (netdev != event_netdev)
219 return NOTIFY_DONE; 225 return NOTIFY_DONE;
@@ -260,6 +266,8 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *
260 if (!iwhdl) 266 if (!iwhdl)
261 return NOTIFY_DONE; 267 return NOTIFY_DONE;
262 iwdev = &iwhdl->device; 268 iwdev = &iwhdl->device;
269 if (iwdev->init_state < INET_NOTIFIER)
270 return NOTIFY_DONE;
263 p = (__be32 *)neigh->primary_key; 271 p = (__be32 *)neigh->primary_key;
264 i40iw_copy_ip_ntohl(local_ipaddr, p); 272 i40iw_copy_ip_ntohl(local_ipaddr, p);
265 if (neigh->nud_state & NUD_VALID) { 273 if (neigh->nud_state & NUD_VALID) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index bc9fb144e57b..c52edeafd616 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -372,7 +372,7 @@ static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
372 return 0; 372 return 0;
373} 373}
374 374
375static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id, 375static int ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
376 bool dpp_pool) 376 bool dpp_pool)
377{ 377{
378 int status; 378 int status;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 12c4208fd701..af9f596bb68b 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -7068,7 +7068,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
7068 unsigned long flags; 7068 unsigned long flags;
7069 7069
7070 while (wait) { 7070 while (wait) {
7071 unsigned long shadow; 7071 unsigned long shadow = 0;
7072 int cstart, previ = -1; 7072 int cstart, previ = -1;
7073 7073
7074 /* 7074 /*
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 3cd96c1b9502..9fbe22d3467b 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -69,6 +69,9 @@
69 */ 69 */
70#define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820 70#define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820
71 71
72#define PVRDMA_NUM_RING_PAGES 4
73#define PVRDMA_QP_NUM_HEADER_PAGES 1
74
72struct pvrdma_dev; 75struct pvrdma_dev;
73 76
74struct pvrdma_page_dir { 77struct pvrdma_page_dir {
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
index e69d6f3cae32..09078ccfaec7 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
@@ -132,7 +132,7 @@ enum pvrdma_pci_resource {
132 132
133enum pvrdma_device_ctl { 133enum pvrdma_device_ctl {
134 PVRDMA_DEVICE_CTL_ACTIVATE, /* Activate device. */ 134 PVRDMA_DEVICE_CTL_ACTIVATE, /* Activate device. */
135 PVRDMA_DEVICE_CTL_QUIESCE, /* Quiesce device. */ 135 PVRDMA_DEVICE_CTL_UNQUIESCE, /* Unquiesce device. */
136 PVRDMA_DEVICE_CTL_RESET, /* Reset device. */ 136 PVRDMA_DEVICE_CTL_RESET, /* Reset device. */
137}; 137};
138 138
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 100bea5c42ff..34ebc7615411 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -56,7 +56,7 @@
56#include "pvrdma.h" 56#include "pvrdma.h"
57 57
58#define DRV_NAME "vmw_pvrdma" 58#define DRV_NAME "vmw_pvrdma"
59#define DRV_VERSION "1.0.0.0-k" 59#define DRV_VERSION "1.0.1.0-k"
60 60
61static DEFINE_MUTEX(pvrdma_device_list_lock); 61static DEFINE_MUTEX(pvrdma_device_list_lock);
62static LIST_HEAD(pvrdma_device_list); 62static LIST_HEAD(pvrdma_device_list);
@@ -660,7 +660,16 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
660 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); 660 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
661 break; 661 break;
662 case NETDEV_UP: 662 case NETDEV_UP:
663 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); 663 pvrdma_write_reg(dev, PVRDMA_REG_CTL,
664 PVRDMA_DEVICE_CTL_UNQUIESCE);
665
666 mb();
667
668 if (pvrdma_read_reg(dev, PVRDMA_REG_ERR))
669 dev_err(&dev->pdev->dev,
670 "failed to activate device during link up\n");
671 else
672 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
664 break; 673 break;
665 default: 674 default:
666 dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n", 675 dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n",
@@ -858,7 +867,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
858 dev->dsr->resp_slot_dma = (u64)slot_dma; 867 dev->dsr->resp_slot_dma = (u64)slot_dma;
859 868
860 /* Async event ring */ 869 /* Async event ring */
861 dev->dsr->async_ring_pages.num_pages = 4; 870 dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
862 ret = pvrdma_page_dir_init(dev, &dev->async_pdir, 871 ret = pvrdma_page_dir_init(dev, &dev->async_pdir,
863 dev->dsr->async_ring_pages.num_pages, true); 872 dev->dsr->async_ring_pages.num_pages, true);
864 if (ret) 873 if (ret)
@@ -867,7 +876,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
867 dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma; 876 dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma;
868 877
869 /* CQ notification ring */ 878 /* CQ notification ring */
870 dev->dsr->cq_ring_pages.num_pages = 4; 879 dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
871 ret = pvrdma_page_dir_init(dev, &dev->cq_pdir, 880 ret = pvrdma_page_dir_init(dev, &dev->cq_pdir,
872 dev->dsr->cq_ring_pages.num_pages, true); 881 dev->dsr->cq_ring_pages.num_pages, true);
873 if (ret) 882 if (ret)
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index dbbfd35e7da7..30062aad3af1 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -170,8 +170,9 @@ static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap,
170 sizeof(struct pvrdma_sge) * 170 sizeof(struct pvrdma_sge) *
171 qp->sq.max_sg); 171 qp->sq.max_sg);
172 /* Note: one extra page for the header. */ 172 /* Note: one extra page for the header. */
173 qp->npages_send = 1 + (qp->sq.wqe_cnt * qp->sq.wqe_size + 173 qp->npages_send = PVRDMA_QP_NUM_HEADER_PAGES +
174 PAGE_SIZE - 1) / PAGE_SIZE; 174 (qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) /
175 PAGE_SIZE;
175 176
176 return 0; 177 return 0;
177} 178}
@@ -288,7 +289,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
288 qp->npages = qp->npages_send + qp->npages_recv; 289 qp->npages = qp->npages_send + qp->npages_recv;
289 290
290 /* Skip header page. */ 291 /* Skip header page. */
291 qp->sq.offset = PAGE_SIZE; 292 qp->sq.offset = PVRDMA_QP_NUM_HEADER_PAGES * PAGE_SIZE;
292 293
293 /* Recv queue pages are after send pages. */ 294 /* Recv queue pages are after send pages. */
294 qp->rq.offset = qp->npages_send * PAGE_SIZE; 295 qp->rq.offset = qp->npages_send * PAGE_SIZE;
@@ -341,7 +342,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
341 cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type); 342 cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type);
342 cmd->access_flags = IB_ACCESS_LOCAL_WRITE; 343 cmd->access_flags = IB_ACCESS_LOCAL_WRITE;
343 cmd->total_chunks = qp->npages; 344 cmd->total_chunks = qp->npages;
344 cmd->send_chunks = qp->npages_send - 1; 345 cmd->send_chunks = qp->npages_send - PVRDMA_QP_NUM_HEADER_PAGES;
345 cmd->pdir_dma = qp->pdir.dir_dma; 346 cmd->pdir_dma = qp->pdir.dir_dma;
346 347
347 dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n", 348 dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n",
@@ -554,13 +555,13 @@ out:
554 return ret; 555 return ret;
555} 556}
556 557
557static inline void *get_sq_wqe(struct pvrdma_qp *qp, int n) 558static inline void *get_sq_wqe(struct pvrdma_qp *qp, unsigned int n)
558{ 559{
559 return pvrdma_page_dir_get_ptr(&qp->pdir, 560 return pvrdma_page_dir_get_ptr(&qp->pdir,
560 qp->sq.offset + n * qp->sq.wqe_size); 561 qp->sq.offset + n * qp->sq.wqe_size);
561} 562}
562 563
563static inline void *get_rq_wqe(struct pvrdma_qp *qp, int n) 564static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n)
564{ 565{
565 return pvrdma_page_dir_get_ptr(&qp->pdir, 566 return pvrdma_page_dir_get_ptr(&qp->pdir,
566 qp->rq.offset + n * qp->rq.wqe_size); 567 qp->rq.offset + n * qp->rq.wqe_size);
@@ -598,9 +599,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
598 unsigned long flags; 599 unsigned long flags;
599 struct pvrdma_sq_wqe_hdr *wqe_hdr; 600 struct pvrdma_sq_wqe_hdr *wqe_hdr;
600 struct pvrdma_sge *sge; 601 struct pvrdma_sge *sge;
601 int i, index; 602 int i, ret;
602 int nreq;
603 int ret;
604 603
605 /* 604 /*
606 * In states lower than RTS, we can fail immediately. In other states, 605 * In states lower than RTS, we can fail immediately. In other states,
@@ -613,9 +612,8 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
613 612
614 spin_lock_irqsave(&qp->sq.lock, flags); 613 spin_lock_irqsave(&qp->sq.lock, flags);
615 614
616 index = pvrdma_idx(&qp->sq.ring->prod_tail, qp->sq.wqe_cnt); 615 while (wr) {
617 for (nreq = 0; wr; nreq++, wr = wr->next) { 616 unsigned int tail = 0;
618 unsigned int tail;
619 617
620 if (unlikely(!pvrdma_idx_ring_has_space( 618 if (unlikely(!pvrdma_idx_ring_has_space(
621 qp->sq.ring, qp->sq.wqe_cnt, &tail))) { 619 qp->sq.ring, qp->sq.wqe_cnt, &tail))) {
@@ -680,7 +678,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
680 } 678 }
681 } 679 }
682 680
683 wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, index); 681 wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, tail);
684 memset(wqe_hdr, 0, sizeof(*wqe_hdr)); 682 memset(wqe_hdr, 0, sizeof(*wqe_hdr));
685 wqe_hdr->wr_id = wr->wr_id; 683 wqe_hdr->wr_id = wr->wr_id;
686 wqe_hdr->num_sge = wr->num_sge; 684 wqe_hdr->num_sge = wr->num_sge;
@@ -771,12 +769,11 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
771 /* Make sure wqe is written before index update */ 769 /* Make sure wqe is written before index update */
772 smp_wmb(); 770 smp_wmb();
773 771
774 index++;
775 if (unlikely(index >= qp->sq.wqe_cnt))
776 index = 0;
777 /* Update shared sq ring */ 772 /* Update shared sq ring */
778 pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail, 773 pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail,
779 qp->sq.wqe_cnt); 774 qp->sq.wqe_cnt);
775
776 wr = wr->next;
780 } 777 }
781 778
782 ret = 0; 779 ret = 0;
@@ -806,7 +803,6 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
806 struct pvrdma_qp *qp = to_vqp(ibqp); 803 struct pvrdma_qp *qp = to_vqp(ibqp);
807 struct pvrdma_rq_wqe_hdr *wqe_hdr; 804 struct pvrdma_rq_wqe_hdr *wqe_hdr;
808 struct pvrdma_sge *sge; 805 struct pvrdma_sge *sge;
809 int index, nreq;
810 int ret = 0; 806 int ret = 0;
811 int i; 807 int i;
812 808
@@ -821,9 +817,8 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
821 817
822 spin_lock_irqsave(&qp->rq.lock, flags); 818 spin_lock_irqsave(&qp->rq.lock, flags);
823 819
824 index = pvrdma_idx(&qp->rq.ring->prod_tail, qp->rq.wqe_cnt); 820 while (wr) {
825 for (nreq = 0; wr; nreq++, wr = wr->next) { 821 unsigned int tail = 0;
826 unsigned int tail;
827 822
828 if (unlikely(wr->num_sge > qp->rq.max_sg || 823 if (unlikely(wr->num_sge > qp->rq.max_sg ||
829 wr->num_sge < 0)) { 824 wr->num_sge < 0)) {
@@ -843,7 +838,7 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
843 goto out; 838 goto out;
844 } 839 }
845 840
846 wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, index); 841 wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, tail);
847 wqe_hdr->wr_id = wr->wr_id; 842 wqe_hdr->wr_id = wr->wr_id;
848 wqe_hdr->num_sge = wr->num_sge; 843 wqe_hdr->num_sge = wr->num_sge;
849 wqe_hdr->total_len = 0; 844 wqe_hdr->total_len = 0;
@@ -859,12 +854,11 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
859 /* Make sure wqe is written before index update */ 854 /* Make sure wqe is written before index update */
860 smp_wmb(); 855 smp_wmb();
861 856
862 index++;
863 if (unlikely(index >= qp->rq.wqe_cnt))
864 index = 0;
865 /* Update shared rq ring */ 857 /* Update shared rq ring */
866 pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail, 858 pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail,
867 qp->rq.wqe_cnt); 859 qp->rq.wqe_cnt);
860
861 wr = wr->next;
868 } 862 }
869 863
870 spin_unlock_irqrestore(&qp->rq.lock, flags); 864 spin_unlock_irqrestore(&qp->rq.lock, flags);
diff --git a/drivers/infiniband/sw/rdmavt/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c
index e202b8142759..6b712eecbd37 100644
--- a/drivers/infiniband/sw/rdmavt/mmap.c
+++ b/drivers/infiniband/sw/rdmavt/mmap.c
@@ -170,9 +170,9 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi,
170 170
171 spin_lock_irq(&rdi->mmap_offset_lock); 171 spin_lock_irq(&rdi->mmap_offset_lock);
172 if (rdi->mmap_offset == 0) 172 if (rdi->mmap_offset == 0)
173 rdi->mmap_offset = PAGE_SIZE; 173 rdi->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
174 ip->offset = rdi->mmap_offset; 174 ip->offset = rdi->mmap_offset;
175 rdi->mmap_offset += size; 175 rdi->mmap_offset += ALIGN(size, SHMLBA);
176 spin_unlock_irq(&rdi->mmap_offset_lock); 176 spin_unlock_irq(&rdi->mmap_offset_lock);
177 177
178 INIT_LIST_HEAD(&ip->pending_mmaps); 178 INIT_LIST_HEAD(&ip->pending_mmaps);
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
index 7d1ac27ed251..6332dedc11e8 100644
--- a/drivers/infiniband/sw/rxe/Kconfig
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -22,4 +22,4 @@ config RDMA_RXE
22 To configure and work with soft-RoCE driver please use the 22 To configure and work with soft-RoCE driver please use the
23 following wiki page under "configure Soft-RoCE (RXE)" section: 23 following wiki page under "configure Soft-RoCE (RXE)" section:
24 24
25 https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home 25 https://github.com/linux-rdma/rdma-core/blob/master/Documentation/rxe.md
diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
index c572a4c09359..bd812e00988e 100644
--- a/drivers/infiniband/sw/rxe/rxe_mmap.c
+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
@@ -156,10 +156,10 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe,
156 spin_lock_bh(&rxe->mmap_offset_lock); 156 spin_lock_bh(&rxe->mmap_offset_lock);
157 157
158 if (rxe->mmap_offset == 0) 158 if (rxe->mmap_offset == 0)
159 rxe->mmap_offset = PAGE_SIZE; 159 rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
160 160
161 ip->info.offset = rxe->mmap_offset; 161 ip->info.offset = rxe->mmap_offset;
162 rxe->mmap_offset += size; 162 rxe->mmap_offset += ALIGN(size, SHMLBA);
163 163
164 spin_unlock_bh(&rxe->mmap_offset_lock); 164 spin_unlock_bh(&rxe->mmap_offset_lock);
165 165
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index dbfde0dc6ff7..9f95f50b2909 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -729,11 +729,11 @@ next_wqe:
729 ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb); 729 ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
730 if (ret) { 730 if (ret) {
731 qp->need_req_skb = 1; 731 qp->need_req_skb = 1;
732 kfree_skb(skb);
733 732
734 rollback_state(wqe, qp, &rollback_wqe, rollback_psn); 733 rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
735 734
736 if (ret == -EAGAIN) { 735 if (ret == -EAGAIN) {
736 kfree_skb(skb);
737 rxe_run_task(&qp->req.task, 1); 737 rxe_run_task(&qp->req.task, 1);
738 goto exit; 738 goto exit;
739 } 739 }
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index d404a8aba7af..c9dd385ce62e 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -813,18 +813,17 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
813 WARN_ON_ONCE(1); 813 WARN_ON_ONCE(1);
814 } 814 }
815 815
816 /* We successfully processed this new request. */
817 qp->resp.msn++;
818
819 /* next expected psn, read handles this separately */ 816 /* next expected psn, read handles this separately */
820 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; 817 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
821 818
822 qp->resp.opcode = pkt->opcode; 819 qp->resp.opcode = pkt->opcode;
823 qp->resp.status = IB_WC_SUCCESS; 820 qp->resp.status = IB_WC_SUCCESS;
824 821
825 if (pkt->mask & RXE_COMP_MASK) 822 if (pkt->mask & RXE_COMP_MASK) {
823 /* We successfully processed this new request. */
824 qp->resp.msn++;
826 return RESPST_COMPLETE; 825 return RESPST_COMPLETE;
827 else if (qp_type(qp) == IB_QPT_RC) 826 } else if (qp_type(qp) == IB_QPT_RC)
828 return RESPST_ACKNOWLEDGE; 827 return RESPST_ACKNOWLEDGE;
829 else 828 else
830 return RESPST_CLEANUP; 829 return RESPST_CLEANUP;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 9d0b22ad58c1..c1ae4aeae2f9 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -430,6 +430,7 @@ struct iser_fr_desc {
430 struct list_head list; 430 struct list_head list;
431 struct iser_reg_resources rsc; 431 struct iser_reg_resources rsc;
432 struct iser_pi_context *pi_ctx; 432 struct iser_pi_context *pi_ctx;
433 struct list_head all_list;
433}; 434};
434 435
435/** 436/**
@@ -443,6 +444,7 @@ struct iser_fr_pool {
443 struct list_head list; 444 struct list_head list;
444 spinlock_t lock; 445 spinlock_t lock;
445 int size; 446 int size;
447 struct list_head all_list;
446}; 448};
447 449
448/** 450/**
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 30b622f2ab73..c538a38c91ce 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -362,6 +362,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
362 int i, ret; 362 int i, ret;
363 363
364 INIT_LIST_HEAD(&fr_pool->list); 364 INIT_LIST_HEAD(&fr_pool->list);
365 INIT_LIST_HEAD(&fr_pool->all_list);
365 spin_lock_init(&fr_pool->lock); 366 spin_lock_init(&fr_pool->lock);
366 fr_pool->size = 0; 367 fr_pool->size = 0;
367 for (i = 0; i < cmds_max; i++) { 368 for (i = 0; i < cmds_max; i++) {
@@ -373,6 +374,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
373 } 374 }
374 375
375 list_add_tail(&desc->list, &fr_pool->list); 376 list_add_tail(&desc->list, &fr_pool->list);
377 list_add_tail(&desc->all_list, &fr_pool->all_list);
376 fr_pool->size++; 378 fr_pool->size++;
377 } 379 }
378 380
@@ -392,13 +394,13 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
392 struct iser_fr_desc *desc, *tmp; 394 struct iser_fr_desc *desc, *tmp;
393 int i = 0; 395 int i = 0;
394 396
395 if (list_empty(&fr_pool->list)) 397 if (list_empty(&fr_pool->all_list))
396 return; 398 return;
397 399
398 iser_info("freeing conn %p fr pool\n", ib_conn); 400 iser_info("freeing conn %p fr pool\n", ib_conn);
399 401
400 list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) { 402 list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) {
401 list_del(&desc->list); 403 list_del(&desc->all_list);
402 iser_free_reg_res(&desc->rsc); 404 iser_free_reg_res(&desc->rsc);
403 if (desc->pi_ctx) 405 if (desc->pi_ctx)
404 iser_free_pi_ctx(desc->pi_ctx); 406 iser_free_pi_ctx(desc->pi_ctx);
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index d96aa27dfcdc..db64adfbe1af 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -141,6 +141,9 @@ static int iforce_usb_probe(struct usb_interface *intf,
141 141
142 interface = intf->cur_altsetting; 142 interface = intf->cur_altsetting;
143 143
144 if (interface->desc.bNumEndpoints < 2)
145 return -ENODEV;
146
144 epirq = &interface->endpoint[0].desc; 147 epirq = &interface->endpoint[0].desc;
145 epout = &interface->endpoint[1].desc; 148 epout = &interface->endpoint[1].desc;
146 149
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index 9cc6d057c302..23c191a2a071 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -700,6 +700,10 @@ static int cm109_usb_probe(struct usb_interface *intf,
700 int error = -ENOMEM; 700 int error = -ENOMEM;
701 701
702 interface = intf->cur_altsetting; 702 interface = intf->cur_altsetting;
703
704 if (interface->desc.bNumEndpoints < 1)
705 return -ENODEV;
706
703 endpoint = &interface->endpoint[0].desc; 707 endpoint = &interface->endpoint[0].desc;
704 708
705 if (!usb_endpoint_is_int_in(endpoint)) 709 if (!usb_endpoint_is_int_in(endpoint))
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index 9c0ea36913b4..f4e8fbec6a94 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1667,6 +1667,10 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
1667 return -EINVAL; 1667 return -EINVAL;
1668 1668
1669 alt = pcu->ctrl_intf->cur_altsetting; 1669 alt = pcu->ctrl_intf->cur_altsetting;
1670
1671 if (alt->desc.bNumEndpoints < 1)
1672 return -ENODEV;
1673
1670 pcu->ep_ctrl = &alt->endpoint[0].desc; 1674 pcu->ep_ctrl = &alt->endpoint[0].desc;
1671 pcu->max_ctrl_size = usb_endpoint_maxp(pcu->ep_ctrl); 1675 pcu->max_ctrl_size = usb_endpoint_maxp(pcu->ep_ctrl);
1672 1676
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
index 79c964c075f1..6e7ff9561d92 100644
--- a/drivers/input/misc/yealink.c
+++ b/drivers/input/misc/yealink.c
@@ -875,6 +875,10 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
875 int ret, pipe, i; 875 int ret, pipe, i;
876 876
877 interface = intf->cur_altsetting; 877 interface = intf->cur_altsetting;
878
879 if (interface->desc.bNumEndpoints < 1)
880 return -ENODEV;
881
878 endpoint = &interface->endpoint[0].desc; 882 endpoint = &interface->endpoint[0].desc;
879 if (!usb_endpoint_is_int_in(endpoint)) 883 if (!usb_endpoint_is_int_in(endpoint))
880 return -ENODEV; 884 return -ENODEV;
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 72b28ebfe360..f210e19ddba6 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1282,10 +1282,8 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
1282 /* handle buttons */ 1282 /* handle buttons */
1283 if (pkt_id == SS4_PACKET_ID_STICK) { 1283 if (pkt_id == SS4_PACKET_ID_STICK) {
1284 f->ts_left = !!(SS4_BTN_V2(p) & 0x01); 1284 f->ts_left = !!(SS4_BTN_V2(p) & 0x01);
1285 if (!(priv->flags & ALPS_BUTTONPAD)) { 1285 f->ts_right = !!(SS4_BTN_V2(p) & 0x02);
1286 f->ts_right = !!(SS4_BTN_V2(p) & 0x02); 1286 f->ts_middle = !!(SS4_BTN_V2(p) & 0x04);
1287 f->ts_middle = !!(SS4_BTN_V2(p) & 0x04);
1288 }
1289 } else { 1287 } else {
1290 f->left = !!(SS4_BTN_V2(p) & 0x01); 1288 f->left = !!(SS4_BTN_V2(p) & 0x01);
1291 if (!(priv->flags & ALPS_BUTTONPAD)) { 1289 if (!(priv->flags & ALPS_BUTTONPAD)) {
@@ -2462,14 +2460,34 @@ static int alps_update_device_area_ss4_v2(unsigned char otp[][4],
2462 int num_y_electrode; 2460 int num_y_electrode;
2463 int x_pitch, y_pitch, x_phys, y_phys; 2461 int x_pitch, y_pitch, x_phys, y_phys;
2464 2462
2465 num_x_electrode = SS4_NUMSENSOR_XOFFSET + (otp[1][0] & 0x0F); 2463 if (IS_SS4PLUS_DEV(priv->dev_id)) {
2466 num_y_electrode = SS4_NUMSENSOR_YOFFSET + ((otp[1][0] >> 4) & 0x0F); 2464 num_x_electrode =
2465 SS4PLUS_NUMSENSOR_XOFFSET + (otp[0][2] & 0x0F);
2466 num_y_electrode =
2467 SS4PLUS_NUMSENSOR_YOFFSET + ((otp[0][2] >> 4) & 0x0F);
2468
2469 priv->x_max =
2470 (num_x_electrode - 1) * SS4PLUS_COUNT_PER_ELECTRODE;
2471 priv->y_max =
2472 (num_y_electrode - 1) * SS4PLUS_COUNT_PER_ELECTRODE;
2467 2473
2468 priv->x_max = (num_x_electrode - 1) * SS4_COUNT_PER_ELECTRODE; 2474 x_pitch = (otp[0][1] & 0x0F) + SS4PLUS_MIN_PITCH_MM;
2469 priv->y_max = (num_y_electrode - 1) * SS4_COUNT_PER_ELECTRODE; 2475 y_pitch = ((otp[0][1] >> 4) & 0x0F) + SS4PLUS_MIN_PITCH_MM;
2470 2476
2471 x_pitch = ((otp[1][2] >> 2) & 0x07) + SS4_MIN_PITCH_MM; 2477 } else {
2472 y_pitch = ((otp[1][2] >> 5) & 0x07) + SS4_MIN_PITCH_MM; 2478 num_x_electrode =
2479 SS4_NUMSENSOR_XOFFSET + (otp[1][0] & 0x0F);
2480 num_y_electrode =
2481 SS4_NUMSENSOR_YOFFSET + ((otp[1][0] >> 4) & 0x0F);
2482
2483 priv->x_max =
2484 (num_x_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
2485 priv->y_max =
2486 (num_y_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
2487
2488 x_pitch = ((otp[1][2] >> 2) & 0x07) + SS4_MIN_PITCH_MM;
2489 y_pitch = ((otp[1][2] >> 5) & 0x07) + SS4_MIN_PITCH_MM;
2490 }
2473 2491
2474 x_phys = x_pitch * (num_x_electrode - 1); /* In 0.1 mm units */ 2492 x_phys = x_pitch * (num_x_electrode - 1); /* In 0.1 mm units */
2475 y_phys = y_pitch * (num_y_electrode - 1); /* In 0.1 mm units */ 2493 y_phys = y_pitch * (num_y_electrode - 1); /* In 0.1 mm units */
@@ -2485,7 +2503,10 @@ static int alps_update_btn_info_ss4_v2(unsigned char otp[][4],
2485{ 2503{
2486 unsigned char is_btnless; 2504 unsigned char is_btnless;
2487 2505
2488 is_btnless = (otp[1][1] >> 3) & 0x01; 2506 if (IS_SS4PLUS_DEV(priv->dev_id))
2507 is_btnless = (otp[1][0] >> 1) & 0x01;
2508 else
2509 is_btnless = (otp[1][1] >> 3) & 0x01;
2489 2510
2490 if (is_btnless) 2511 if (is_btnless)
2491 priv->flags |= ALPS_BUTTONPAD; 2512 priv->flags |= ALPS_BUTTONPAD;
@@ -2493,6 +2514,21 @@ static int alps_update_btn_info_ss4_v2(unsigned char otp[][4],
2493 return 0; 2514 return 0;
2494} 2515}
2495 2516
2517static int alps_update_dual_info_ss4_v2(unsigned char otp[][4],
2518 struct alps_data *priv)
2519{
2520 bool is_dual = false;
2521
2522 if (IS_SS4PLUS_DEV(priv->dev_id))
2523 is_dual = (otp[0][0] >> 4) & 0x01;
2524
2525 if (is_dual)
2526 priv->flags |= ALPS_DUALPOINT |
2527 ALPS_DUALPOINT_WITH_PRESSURE;
2528
2529 return 0;
2530}
2531
2496static int alps_set_defaults_ss4_v2(struct psmouse *psmouse, 2532static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
2497 struct alps_data *priv) 2533 struct alps_data *priv)
2498{ 2534{
@@ -2508,6 +2544,8 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
2508 2544
2509 alps_update_btn_info_ss4_v2(otp, priv); 2545 alps_update_btn_info_ss4_v2(otp, priv);
2510 2546
2547 alps_update_dual_info_ss4_v2(otp, priv);
2548
2511 return 0; 2549 return 0;
2512} 2550}
2513 2551
@@ -2753,10 +2791,6 @@ static int alps_set_protocol(struct psmouse *psmouse,
2753 if (alps_set_defaults_ss4_v2(psmouse, priv)) 2791 if (alps_set_defaults_ss4_v2(psmouse, priv))
2754 return -EIO; 2792 return -EIO;
2755 2793
2756 if (priv->fw_ver[1] == 0x1)
2757 priv->flags |= ALPS_DUALPOINT |
2758 ALPS_DUALPOINT_WITH_PRESSURE;
2759
2760 break; 2794 break;
2761 } 2795 }
2762 2796
@@ -2827,10 +2861,7 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
2827 ec[2] >= 0x90 && ec[2] <= 0x9d) { 2861 ec[2] >= 0x90 && ec[2] <= 0x9d) {
2828 protocol = &alps_v3_protocol_data; 2862 protocol = &alps_v3_protocol_data;
2829 } else if (e7[0] == 0x73 && e7[1] == 0x03 && 2863 } else if (e7[0] == 0x73 && e7[1] == 0x03 &&
2830 e7[2] == 0x14 && ec[1] == 0x02) { 2864 (e7[2] == 0x14 || e7[2] == 0x28)) {
2831 protocol = &alps_v8_protocol_data;
2832 } else if (e7[0] == 0x73 && e7[1] == 0x03 &&
2833 e7[2] == 0x28 && ec[1] == 0x01) {
2834 protocol = &alps_v8_protocol_data; 2865 protocol = &alps_v8_protocol_data;
2835 } else { 2866 } else {
2836 psmouse_dbg(psmouse, 2867 psmouse_dbg(psmouse,
@@ -2840,7 +2871,8 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
2840 } 2871 }
2841 2872
2842 if (priv) { 2873 if (priv) {
2843 /* Save the Firmware version */ 2874 /* Save Device ID and Firmware version */
2875 memcpy(priv->dev_id, e7, 3);
2844 memcpy(priv->fw_ver, ec, 3); 2876 memcpy(priv->fw_ver, ec, 3);
2845 error = alps_set_protocol(psmouse, priv, protocol); 2877 error = alps_set_protocol(psmouse, priv, protocol);
2846 if (error) 2878 if (error)
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index 6d279aa27cb9..4334f2805d93 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -54,6 +54,16 @@ enum SS4_PACKET_ID {
54 54
55#define SS4_MASK_NORMAL_BUTTONS 0x07 55#define SS4_MASK_NORMAL_BUTTONS 0x07
56 56
57#define SS4PLUS_COUNT_PER_ELECTRODE 128
58#define SS4PLUS_NUMSENSOR_XOFFSET 16
59#define SS4PLUS_NUMSENSOR_YOFFSET 5
60#define SS4PLUS_MIN_PITCH_MM 37
61
62#define IS_SS4PLUS_DEV(_b) (((_b[0]) == 0x73) && \
63 ((_b[1]) == 0x03) && \
64 ((_b[2]) == 0x28) \
65 )
66
57#define SS4_IS_IDLE_V2(_b) (((_b[0]) == 0x18) && \ 67#define SS4_IS_IDLE_V2(_b) (((_b[0]) == 0x18) && \
58 ((_b[1]) == 0x10) && \ 68 ((_b[1]) == 0x10) && \
59 ((_b[2]) == 0x00) && \ 69 ((_b[2]) == 0x00) && \
@@ -283,6 +293,7 @@ struct alps_data {
283 int addr_command; 293 int addr_command;
284 u16 proto_version; 294 u16 proto_version;
285 u8 byte0, mask0; 295 u8 byte0, mask0;
296 u8 dev_id[3];
286 u8 fw_ver[3]; 297 u8 fw_ver[3];
287 int flags; 298 int flags;
288 int x_max; 299 int x_max;
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 352050e9031d..d5ab9ddef3e3 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -218,17 +218,19 @@ static int elan_query_product(struct elan_tp_data *data)
218 218
219static int elan_check_ASUS_special_fw(struct elan_tp_data *data) 219static int elan_check_ASUS_special_fw(struct elan_tp_data *data)
220{ 220{
221 if (data->ic_type != 0x0E) 221 if (data->ic_type == 0x0E) {
222 return false; 222 switch (data->product_id) {
223 223 case 0x05 ... 0x07:
224 switch (data->product_id) { 224 case 0x09:
225 case 0x05 ... 0x07: 225 case 0x13:
226 case 0x09: 226 return true;
227 case 0x13: 227 }
228 } else if (data->ic_type == 0x08 && data->product_id == 0x26) {
229 /* ASUS EeeBook X205TA */
228 return true; 230 return true;
229 default:
230 return false;
231 } 231 }
232
233 return false;
232} 234}
233 235
234static int __elan_initialize(struct elan_tp_data *data) 236static int __elan_initialize(struct elan_tp_data *data)
diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c
index 198678613382..34dfee555b20 100644
--- a/drivers/input/rmi4/rmi_f30.c
+++ b/drivers/input/rmi4/rmi_f30.c
@@ -170,6 +170,10 @@ static int rmi_f30_config(struct rmi_function *fn)
170 rmi_get_platform_data(fn->rmi_dev); 170 rmi_get_platform_data(fn->rmi_dev);
171 int error; 171 int error;
172 172
173 /* can happen if f30_data.disable is set */
174 if (!f30)
175 return 0;
176
173 if (pdata->f30_data.trackstick_buttons) { 177 if (pdata->f30_data.trackstick_buttons) {
174 /* Try [re-]establish link to F03. */ 178 /* Try [re-]establish link to F03. */
175 f30->f03 = rmi_find_function(fn->rmi_dev, 0x03); 179 f30->f03 = rmi_find_function(fn->rmi_dev, 0x03);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 05afd16ea9c9..312bd6ca9198 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -120,6 +120,13 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
120 }, 120 },
121 }, 121 },
122 { 122 {
123 /* Dell Embedded Box PC 3000 */
124 .matches = {
125 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
126 DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
127 },
128 },
129 {
123 /* OQO Model 01 */ 130 /* OQO Model 01 */
124 .matches = { 131 .matches = {
125 DMI_MATCH(DMI_SYS_VENDOR, "OQO"), 132 DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
@@ -513,6 +520,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
513 DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"), 520 DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
514 }, 521 },
515 }, 522 },
523 {
524 /* TUXEDO BU1406 */
525 .matches = {
526 DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
527 DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
528 },
529 },
516 { } 530 { }
517}; 531};
518 532
diff --git a/drivers/input/tablet/hanwang.c b/drivers/input/tablet/hanwang.c
index cd852059b99e..df4bea96d7ed 100644
--- a/drivers/input/tablet/hanwang.c
+++ b/drivers/input/tablet/hanwang.c
@@ -340,6 +340,9 @@ static int hanwang_probe(struct usb_interface *intf, const struct usb_device_id
340 int error; 340 int error;
341 int i; 341 int i;
342 342
343 if (intf->cur_altsetting->desc.bNumEndpoints < 1)
344 return -ENODEV;
345
343 hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL); 346 hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL);
344 input_dev = input_allocate_device(); 347 input_dev = input_allocate_device();
345 if (!hanwang || !input_dev) { 348 if (!hanwang || !input_dev) {
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index e850d7e8afbc..4d9d64908b59 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -122,6 +122,9 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
122 struct input_dev *input_dev; 122 struct input_dev *input_dev;
123 int error = -ENOMEM; 123 int error = -ENOMEM;
124 124
125 if (intf->cur_altsetting->desc.bNumEndpoints < 1)
126 return -ENODEV;
127
125 kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL); 128 kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
126 input_dev = input_allocate_device(); 129 input_dev = input_allocate_device();
127 if (!kbtab || !input_dev) 130 if (!kbtab || !input_dev)
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index aefb6e11f88a..4c0eecae065c 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -527,6 +527,9 @@ static int sur40_probe(struct usb_interface *interface,
527 if (iface_desc->desc.bInterfaceClass != 0xFF) 527 if (iface_desc->desc.bInterfaceClass != 0xFF)
528 return -ENODEV; 528 return -ENODEV;
529 529
530 if (iface_desc->desc.bNumEndpoints < 5)
531 return -ENODEV;
532
530 /* Use endpoint #4 (0x86). */ 533 /* Use endpoint #4 (0x86). */
531 endpoint = &iface_desc->endpoint[4].desc; 534 endpoint = &iface_desc->endpoint[4].desc;
532 if (endpoint->bEndpointAddress != TOUCH_ENDPOINT) 535 if (endpoint->bEndpointAddress != TOUCH_ENDPOINT)
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 98940d1392cb..b17536d6e69b 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3202,7 +3202,7 @@ static void amd_iommu_get_resv_regions(struct device *dev,
3202 3202
3203 region = iommu_alloc_resv_region(MSI_RANGE_START, 3203 region = iommu_alloc_resv_region(MSI_RANGE_START,
3204 MSI_RANGE_END - MSI_RANGE_START + 1, 3204 MSI_RANGE_END - MSI_RANGE_START + 1,
3205 0, IOMMU_RESV_RESERVED); 3205 0, IOMMU_RESV_MSI);
3206 if (!region) 3206 if (!region)
3207 return; 3207 return;
3208 list_add_tail(&region->list, head); 3208 list_add_tail(&region->list, head);
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 5806a6acc94e..591bb96047c9 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1888,7 +1888,7 @@ static void arm_smmu_get_resv_regions(struct device *dev,
1888 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; 1888 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1889 1889
1890 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, 1890 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
1891 prot, IOMMU_RESV_MSI); 1891 prot, IOMMU_RESV_SW_MSI);
1892 if (!region) 1892 if (!region)
1893 return; 1893 return;
1894 1894
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index abf6496843a6..b493c99e17f7 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1608,7 +1608,7 @@ static void arm_smmu_get_resv_regions(struct device *dev,
1608 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; 1608 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1609 1609
1610 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, 1610 region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
1611 prot, IOMMU_RESV_MSI); 1611 prot, IOMMU_RESV_SW_MSI);
1612 if (!region) 1612 if (!region)
1613 return; 1613 return;
1614 1614
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index a7e0821c9967..c01bfcdb2383 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -512,7 +512,13 @@ static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
512 spin_lock_irqsave(&data->lock, flags); 512 spin_lock_irqsave(&data->lock, flags);
513 if (data->active && data->version >= MAKE_MMU_VER(3, 3)) { 513 if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
514 clk_enable(data->clk_master); 514 clk_enable(data->clk_master);
515 __sysmmu_tlb_invalidate_entry(data, iova, 1); 515 if (sysmmu_block(data)) {
516 if (data->version >= MAKE_MMU_VER(5, 0))
517 __sysmmu_tlb_invalidate(data);
518 else
519 __sysmmu_tlb_invalidate_entry(data, iova, 1);
520 sysmmu_unblock(data);
521 }
516 clk_disable(data->clk_master); 522 clk_disable(data->clk_master);
517 } 523 }
518 spin_unlock_irqrestore(&data->lock, flags); 524 spin_unlock_irqrestore(&data->lock, flags);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 238ad3447712..d412a313a372 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -916,7 +916,7 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
916 * which we used for the IOMMU lookup. Strictly speaking 916 * which we used for the IOMMU lookup. Strictly speaking
917 * we could do this for all PCI devices; we only need to 917 * we could do this for all PCI devices; we only need to
918 * get the BDF# from the scope table for ACPI matches. */ 918 * get the BDF# from the scope table for ACPI matches. */
919 if (pdev->is_virtfn) 919 if (pdev && pdev->is_virtfn)
920 goto got_pdev; 920 goto got_pdev;
921 921
922 *bus = drhd->devices[i].bus; 922 *bus = drhd->devices[i].bus;
@@ -5249,7 +5249,7 @@ static void intel_iommu_get_resv_regions(struct device *device,
5249 5249
5250 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START, 5250 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5251 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1, 5251 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
5252 0, IOMMU_RESV_RESERVED); 5252 0, IOMMU_RESV_MSI);
5253 if (!reg) 5253 if (!reg)
5254 return; 5254 return;
5255 list_add_tail(&reg->list, head); 5255 list_add_tail(&reg->list, head);
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 1c049e2e12bf..8d6ca28c3e1f 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -422,8 +422,12 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
422 pte |= ARM_V7S_ATTR_NS_TABLE; 422 pte |= ARM_V7S_ATTR_NS_TABLE;
423 423
424 __arm_v7s_set_pte(ptep, pte, 1, cfg); 424 __arm_v7s_set_pte(ptep, pte, 1, cfg);
425 } else { 425 } else if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) {
426 cptep = iopte_deref(pte, lvl); 426 cptep = iopte_deref(pte, lvl);
427 } else {
428 /* We require an unmap first */
429 WARN_ON(!selftest_running);
430 return -EEXIST;
427 } 431 }
428 432
429 /* Rinse, repeat */ 433 /* Rinse, repeat */
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index feacc54bec68..f9bc6ebb8140 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -335,8 +335,12 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
335 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) 335 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
336 pte |= ARM_LPAE_PTE_NSTABLE; 336 pte |= ARM_LPAE_PTE_NSTABLE;
337 __arm_lpae_set_pte(ptep, pte, cfg); 337 __arm_lpae_set_pte(ptep, pte, cfg);
338 } else { 338 } else if (!iopte_leaf(pte, lvl)) {
339 cptep = iopte_deref(pte, data); 339 cptep = iopte_deref(pte, data);
340 } else {
341 /* We require an unmap first */
342 WARN_ON(!selftest_running);
343 return -EEXIST;
340 } 344 }
341 345
342 /* Rinse, repeat */ 346 /* Rinse, repeat */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 8ea14f41a979..3b67144dead2 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -72,6 +72,7 @@ static const char * const iommu_group_resv_type_string[] = {
72 [IOMMU_RESV_DIRECT] = "direct", 72 [IOMMU_RESV_DIRECT] = "direct",
73 [IOMMU_RESV_RESERVED] = "reserved", 73 [IOMMU_RESV_RESERVED] = "reserved",
74 [IOMMU_RESV_MSI] = "msi", 74 [IOMMU_RESV_MSI] = "msi",
75 [IOMMU_RESV_SW_MSI] = "msi",
75}; 76};
76 77
77#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 78#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
@@ -1743,8 +1744,8 @@ void iommu_put_resv_regions(struct device *dev, struct list_head *list)
1743} 1744}
1744 1745
1745struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, 1746struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
1746 size_t length, 1747 size_t length, int prot,
1747 int prot, int type) 1748 enum iommu_resv_type type)
1748{ 1749{
1749 struct iommu_resv_region *region; 1750 struct iommu_resv_region *region;
1750 1751
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 125528f39e92..8162121bb1bc 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -262,6 +262,7 @@ config IRQ_MXS
262 262
263config MVEBU_ODMI 263config MVEBU_ODMI
264 bool 264 bool
265 select GENERIC_MSI_IRQ_DOMAIN
265 266
266config MVEBU_PIC 267config MVEBU_PIC
267 bool 268 bool
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 11d12bccc4e7..cd20df12d63d 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -991,8 +991,12 @@ static void __init gic_map_single_int(struct device_node *node,
991 991
992static void __init gic_map_interrupts(struct device_node *node) 992static void __init gic_map_interrupts(struct device_node *node)
993{ 993{
994 gic_map_single_int(node, GIC_LOCAL_INT_WD);
995 gic_map_single_int(node, GIC_LOCAL_INT_COMPARE);
994 gic_map_single_int(node, GIC_LOCAL_INT_TIMER); 996 gic_map_single_int(node, GIC_LOCAL_INT_TIMER);
995 gic_map_single_int(node, GIC_LOCAL_INT_PERFCTR); 997 gic_map_single_int(node, GIC_LOCAL_INT_PERFCTR);
998 gic_map_single_int(node, GIC_LOCAL_INT_SWINT0);
999 gic_map_single_int(node, GIC_LOCAL_INT_SWINT1);
996 gic_map_single_int(node, GIC_LOCAL_INT_FDC); 1000 gic_map_single_int(node, GIC_LOCAL_INT_FDC);
997} 1001}
998 1002
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 1dfd1085a04f..9ca691d6c13b 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -1032,6 +1032,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
1032 sizeof(avmb1_carddef)))) 1032 sizeof(avmb1_carddef))))
1033 return -EFAULT; 1033 return -EFAULT;
1034 cdef.cardtype = AVM_CARDTYPE_B1; 1034 cdef.cardtype = AVM_CARDTYPE_B1;
1035 cdef.cardnr = 0;
1035 } else { 1036 } else {
1036 if ((retval = copy_from_user(&cdef, data, 1037 if ((retval = copy_from_user(&cdef, data,
1037 sizeof(avmb1_extcarddef)))) 1038 sizeof(avmb1_extcarddef))))
diff --git a/drivers/media/platform/coda/imx-vdoa.c b/drivers/media/platform/coda/imx-vdoa.c
index 67fd8ffa60a4..669a4c82f1ff 100644
--- a/drivers/media/platform/coda/imx-vdoa.c
+++ b/drivers/media/platform/coda/imx-vdoa.c
@@ -321,7 +321,7 @@ static const struct of_device_id vdoa_dt_ids[] = {
321}; 321};
322MODULE_DEVICE_TABLE(of, vdoa_dt_ids); 322MODULE_DEVICE_TABLE(of, vdoa_dt_ids);
323 323
324static const struct platform_driver vdoa_driver = { 324static struct platform_driver vdoa_driver = {
325 .probe = vdoa_probe, 325 .probe = vdoa_probe,
326 .remove = vdoa_remove, 326 .remove = vdoa_remove,
327 .driver = { 327 .driver = {
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index cbb03768f5d7..0f0c389f8897 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -861,9 +861,7 @@ int gsc_prepare_addr(struct gsc_ctx *ctx, struct vb2_buffer *vb,
861 861
862 if ((frame->fmt->pixelformat == V4L2_PIX_FMT_VYUY) || 862 if ((frame->fmt->pixelformat == V4L2_PIX_FMT_VYUY) ||
863 (frame->fmt->pixelformat == V4L2_PIX_FMT_YVYU) || 863 (frame->fmt->pixelformat == V4L2_PIX_FMT_YVYU) ||
864 (frame->fmt->pixelformat == V4L2_PIX_FMT_NV61) ||
865 (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) || 864 (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) ||
866 (frame->fmt->pixelformat == V4L2_PIX_FMT_NV21) ||
867 (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420M)) 865 (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420M))
868 swap(addr->cb, addr->cr); 866 swap(addr->cb, addr->cr);
869 867
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 823608112d89..7918b928f058 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -632,8 +632,8 @@ static int bdisp_open(struct file *file)
632 632
633error_ctrls: 633error_ctrls:
634 bdisp_ctrls_delete(ctx); 634 bdisp_ctrls_delete(ctx);
635error_fh:
636 v4l2_fh_del(&ctx->fh); 635 v4l2_fh_del(&ctx->fh);
636error_fh:
637 v4l2_fh_exit(&ctx->fh); 637 v4l2_fh_exit(&ctx->fh);
638 bdisp_hw_free_nodes(ctx); 638 bdisp_hw_free_nodes(ctx);
639mem_ctx: 639mem_ctx:
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
index ab9866024ec7..04033efe7ad5 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
@@ -36,16 +36,18 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
36int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type) 36int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
37{ 37{
38 struct hexline *hx; 38 struct hexline *hx;
39 u8 reset; 39 u8 *buf;
40 int ret,pos=0; 40 int ret, pos = 0;
41 u16 cpu_cs_register = cypress[type].cpu_cs_register;
41 42
42 hx = kmalloc(sizeof(*hx), GFP_KERNEL); 43 buf = kmalloc(sizeof(*hx), GFP_KERNEL);
43 if (!hx) 44 if (!buf)
44 return -ENOMEM; 45 return -ENOMEM;
46 hx = (struct hexline *)buf;
45 47
46 /* stop the CPU */ 48 /* stop the CPU */
47 reset = 1; 49 buf[0] = 1;
48 if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1) 50 if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1)
49 err("could not stop the USB controller CPU."); 51 err("could not stop the USB controller CPU.");
50 52
51 while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) { 53 while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) {
@@ -61,21 +63,21 @@ int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw
61 } 63 }
62 if (ret < 0) { 64 if (ret < 0) {
63 err("firmware download failed at %d with %d",pos,ret); 65 err("firmware download failed at %d with %d",pos,ret);
64 kfree(hx); 66 kfree(buf);
65 return ret; 67 return ret;
66 } 68 }
67 69
68 if (ret == 0) { 70 if (ret == 0) {
69 /* restart the CPU */ 71 /* restart the CPU */
70 reset = 0; 72 buf[0] = 0;
71 if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) { 73 if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) {
72 err("could not restart the USB controller CPU."); 74 err("could not restart the USB controller CPU.");
73 ret = -EINVAL; 75 ret = -EINVAL;
74 } 76 }
75 } else 77 } else
76 ret = -EIO; 78 ret = -EIO;
77 79
78 kfree(hx); 80 kfree(buf);
79 81
80 return ret; 82 return ret;
81} 83}
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 5457c361ad58..bf0fe0137dfe 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -1947,9 +1947,7 @@ static int gpmc_probe_onenand_child(struct platform_device *pdev,
1947 if (!of_property_read_u32(child, "dma-channel", &val)) 1947 if (!of_property_read_u32(child, "dma-channel", &val))
1948 gpmc_onenand_data->dma_channel = val; 1948 gpmc_onenand_data->dma_channel = val;
1949 1949
1950 gpmc_onenand_init(gpmc_onenand_data); 1950 return gpmc_onenand_init(gpmc_onenand_data);
1951
1952 return 0;
1953} 1951}
1954#else 1952#else
1955static int gpmc_probe_onenand_child(struct platform_device *pdev, 1953static int gpmc_probe_onenand_child(struct platform_device *pdev,
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 91f645992c94..b27ea98b781f 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1792,15 +1792,14 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
1792 1792
1793 /* If we're permanently dead, give up. */ 1793 /* If we're permanently dead, give up. */
1794 if (state == pci_channel_io_perm_failure) { 1794 if (state == pci_channel_io_perm_failure) {
1795 /* Tell the AFU drivers; but we don't care what they
1796 * say, we're going away.
1797 */
1798 for (i = 0; i < adapter->slices; i++) { 1795 for (i = 0; i < adapter->slices; i++) {
1799 afu = adapter->afu[i]; 1796 afu = adapter->afu[i];
1800 /* Only participate in EEH if we are on a virtual PHB */ 1797 /*
1801 if (afu->phb == NULL) 1798 * Tell the AFU drivers; but we don't care what they
1802 return PCI_ERS_RESULT_NONE; 1799 * say, we're going away.
1803 cxl_vphb_error_detected(afu, state); 1800 */
1801 if (afu->phb != NULL)
1802 cxl_vphb_error_detected(afu, state);
1804 } 1803 }
1805 return PCI_ERS_RESULT_DISCONNECT; 1804 return PCI_ERS_RESULT_DISCONNECT;
1806 } 1805 }
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 3600c9993a98..29f2daed37e0 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -112,11 +112,9 @@ struct mkhi_msg {
112 112
113static int mei_osver(struct mei_cl_device *cldev) 113static int mei_osver(struct mei_cl_device *cldev)
114{ 114{
115 int ret;
116 const size_t size = sizeof(struct mkhi_msg_hdr) + 115 const size_t size = sizeof(struct mkhi_msg_hdr) +
117 sizeof(struct mkhi_fwcaps) + 116 sizeof(struct mkhi_fwcaps) +
118 sizeof(struct mei_os_ver); 117 sizeof(struct mei_os_ver);
119 size_t length = 8;
120 char buf[size]; 118 char buf[size];
121 struct mkhi_msg *req; 119 struct mkhi_msg *req;
122 struct mkhi_fwcaps *fwcaps; 120 struct mkhi_fwcaps *fwcaps;
@@ -137,15 +135,7 @@ static int mei_osver(struct mei_cl_device *cldev)
137 os_ver = (struct mei_os_ver *)fwcaps->data; 135 os_ver = (struct mei_os_ver *)fwcaps->data;
138 os_ver->os_type = OSTYPE_LINUX; 136 os_ver->os_type = OSTYPE_LINUX;
139 137
140 ret = __mei_cl_send(cldev->cl, buf, size, mode); 138 return __mei_cl_send(cldev->cl, buf, size, mode);
141 if (ret < 0)
142 return ret;
143
144 ret = __mei_cl_recv(cldev->cl, buf, length, 0);
145 if (ret < 0)
146 return ret;
147
148 return 0;
149} 139}
150 140
151static void mei_mkhi_fix(struct mei_cl_device *cldev) 141static void mei_mkhi_fix(struct mei_cl_device *cldev)
@@ -160,7 +150,7 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev)
160 return; 150 return;
161 151
162 ret = mei_osver(cldev); 152 ret = mei_osver(cldev);
163 if (ret) 153 if (ret < 0)
164 dev_err(&cldev->dev, "OS version command failed %d\n", ret); 154 dev_err(&cldev->dev, "OS version command failed %d\n", ret);
165 155
166 mei_cldev_disable(cldev); 156 mei_cldev_disable(cldev);
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index cfb1cdf176fa..13c55b8f9261 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -124,8 +124,6 @@ int mei_reset(struct mei_device *dev)
124 124
125 mei_clear_interrupts(dev); 125 mei_clear_interrupts(dev);
126 126
127 mei_synchronize_irq(dev);
128
129 /* we're already in reset, cancel the init timer 127 /* we're already in reset, cancel the init timer
130 * if the reset was called due the hbm protocol error 128 * if the reset was called due the hbm protocol error
131 * we need to call it before hw start 129 * we need to call it before hw start
@@ -304,6 +302,9 @@ static void mei_reset_work(struct work_struct *work)
304 container_of(work, struct mei_device, reset_work); 302 container_of(work, struct mei_device, reset_work);
305 int ret; 303 int ret;
306 304
305 mei_clear_interrupts(dev);
306 mei_synchronize_irq(dev);
307
307 mutex_lock(&dev->device_lock); 308 mutex_lock(&dev->device_lock);
308 309
309 ret = mei_reset(dev); 310 ret = mei_reset(dev);
@@ -328,6 +329,9 @@ void mei_stop(struct mei_device *dev)
328 329
329 mei_cancel_work(dev); 330 mei_cancel_work(dev);
330 331
332 mei_clear_interrupts(dev);
333 mei_synchronize_irq(dev);
334
331 mutex_lock(&dev->device_lock); 335 mutex_lock(&dev->device_lock);
332 336
333 dev->dev_state = MEI_DEV_POWER_DOWN; 337 dev->dev_state = MEI_DEV_POWER_DOWN;
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index 9d659542a335..dad5abee656e 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -566,10 +566,10 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
566 */ 566 */
567 error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS, 567 error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS,
568 PCI_IRQ_MSIX); 568 PCI_IRQ_MSIX);
569 if (error) { 569 if (error < 0) {
570 error = pci_alloc_irq_vectors(pdev, 1, 1, 570 error = pci_alloc_irq_vectors(pdev, 1, 1,
571 PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY); 571 PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY);
572 if (error) 572 if (error < 0)
573 goto err_remove_bitmap; 573 goto err_remove_bitmap;
574 } else { 574 } else {
575 vmci_dev->exclusive_vectors = true; 575 vmci_dev->exclusive_vectors = true;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 1621fa08e206..ff3da960c473 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1560,11 +1560,8 @@ static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1560 struct mmc_blk_request *brq, struct request *req, 1560 struct mmc_blk_request *brq, struct request *req,
1561 bool old_req_pending) 1561 bool old_req_pending)
1562{ 1562{
1563 struct mmc_queue_req *mq_rq;
1564 bool req_pending; 1563 bool req_pending;
1565 1564
1566 mq_rq = container_of(brq, struct mmc_queue_req, brq);
1567
1568 /* 1565 /*
1569 * If this is an SD card and we're writing, we can first 1566 * If this is an SD card and we're writing, we can first
1570 * mark the known good sectors as ok. 1567 * mark the known good sectors as ok.
@@ -1701,7 +1698,8 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
1701 case MMC_BLK_CMD_ERR: 1698 case MMC_BLK_CMD_ERR:
1702 req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending); 1699 req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending);
1703 if (mmc_blk_reset(md, card->host, type)) { 1700 if (mmc_blk_reset(md, card->host, type)) {
1704 mmc_blk_rw_cmd_abort(card, old_req); 1701 if (req_pending)
1702 mmc_blk_rw_cmd_abort(card, old_req);
1705 mmc_blk_rw_try_restart(mq, new_req); 1703 mmc_blk_rw_try_restart(mq, new_req);
1706 return; 1704 return;
1707 } 1705 }
@@ -1817,6 +1815,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1817 mmc_blk_issue_flush(mq, req); 1815 mmc_blk_issue_flush(mq, req);
1818 } else { 1816 } else {
1819 mmc_blk_issue_rw_rq(mq, req); 1817 mmc_blk_issue_rw_rq(mq, req);
1818 card->host->context_info.is_waiting_last_req = false;
1820 } 1819 }
1821 1820
1822out: 1821out:
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 7fd722868875..b502601df228 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1730,7 +1730,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
1730 err = mmc_select_hs400(card); 1730 err = mmc_select_hs400(card);
1731 if (err) 1731 if (err)
1732 goto free_card; 1732 goto free_card;
1733 } else { 1733 } else if (!mmc_card_hs400es(card)) {
1734 /* Select the desired bus width optionally */ 1734 /* Select the desired bus width optionally */
1735 err = mmc_select_bus_width(card); 1735 err = mmc_select_bus_width(card);
1736 if (err > 0 && mmc_card_hs(card)) { 1736 if (err > 0 && mmc_card_hs(card)) {
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 8e32580c12b5..b235d8da0602 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -580,7 +580,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
580 } 580 }
581 } 581 }
582 sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV, 582 sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
583 (mode << 8) | (div % 0xff)); 583 (mode << 8) | div);
584 sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); 584 sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
585 while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB)) 585 while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
586 cpu_relax(); 586 cpu_relax();
@@ -1559,7 +1559,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
1559 host->src_clk_freq = clk_get_rate(host->src_clk); 1559 host->src_clk_freq = clk_get_rate(host->src_clk);
1560 /* Set host parameters to mmc */ 1560 /* Set host parameters to mmc */
1561 mmc->ops = &mt_msdc_ops; 1561 mmc->ops = &mt_msdc_ops;
1562 mmc->f_min = host->src_clk_freq / (4 * 255); 1562 mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
1563 1563
1564 mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23; 1564 mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
1565 /* MMC core transfer sizes tunable parameters */ 1565 /* MMC core transfer sizes tunable parameters */
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 410a55b1c25f..1cfd7f900339 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -28,13 +28,9 @@
28#include "sdhci-pltfm.h" 28#include "sdhci-pltfm.h"
29#include <linux/of.h> 29#include <linux/of.h>
30 30
31#define SDHCI_ARASAN_CLK_CTRL_OFFSET 0x2c
32#define SDHCI_ARASAN_VENDOR_REGISTER 0x78 31#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
33 32
34#define VENDOR_ENHANCED_STROBE BIT(0) 33#define VENDOR_ENHANCED_STROBE BIT(0)
35#define CLK_CTRL_TIMEOUT_SHIFT 16
36#define CLK_CTRL_TIMEOUT_MASK (0xf << CLK_CTRL_TIMEOUT_SHIFT)
37#define CLK_CTRL_TIMEOUT_MIN_EXP 13
38 34
39#define PHY_CLK_TOO_SLOW_HZ 400000 35#define PHY_CLK_TOO_SLOW_HZ 400000
40 36
@@ -163,15 +159,15 @@ static int sdhci_arasan_syscon_write(struct sdhci_host *host,
163 159
164static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host) 160static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host)
165{ 161{
166 u32 div;
167 unsigned long freq; 162 unsigned long freq;
168 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 163 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
169 164
170 div = readl(host->ioaddr + SDHCI_ARASAN_CLK_CTRL_OFFSET); 165 /* SDHCI timeout clock is in kHz */
171 div = (div & CLK_CTRL_TIMEOUT_MASK) >> CLK_CTRL_TIMEOUT_SHIFT; 166 freq = DIV_ROUND_UP(clk_get_rate(pltfm_host->clk), 1000);
172 167
173 freq = clk_get_rate(pltfm_host->clk); 168 /* or in MHz */
174 freq /= 1 << (CLK_CTRL_TIMEOUT_MIN_EXP + div); 169 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
170 freq = DIV_ROUND_UP(freq, 1000);
175 171
176 return freq; 172 return freq;
177} 173}
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 2f9ad213377a..d5430ed02a67 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -29,6 +29,8 @@
29 29
30#include "sdhci-pltfm.h" 30#include "sdhci-pltfm.h"
31 31
32#define SDMMC_MC1R 0x204
33#define SDMMC_MC1R_DDR BIT(3)
32#define SDMMC_CACR 0x230 34#define SDMMC_CACR 0x230
33#define SDMMC_CACR_CAPWREN BIT(0) 35#define SDMMC_CACR_CAPWREN BIT(0)
34#define SDMMC_CACR_KEY (0x46 << 8) 36#define SDMMC_CACR_KEY (0x46 << 8)
@@ -85,11 +87,37 @@ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
85 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 87 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
86} 88}
87 89
90/*
91 * In this specific implementation of the SDHCI controller, the power register
92 * needs to have a valid voltage set even when the power supply is managed by
93 * an external regulator.
94 */
95static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode,
96 unsigned short vdd)
97{
98 if (!IS_ERR(host->mmc->supply.vmmc)) {
99 struct mmc_host *mmc = host->mmc;
100
101 spin_unlock_irq(&host->lock);
102 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
103 spin_lock_irq(&host->lock);
104 }
105 sdhci_set_power_noreg(host, mode, vdd);
106}
107
108void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
109{
110 if (timing == MMC_TIMING_MMC_DDR52)
111 sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R);
112 sdhci_set_uhs_signaling(host, timing);
113}
114
88static const struct sdhci_ops sdhci_at91_sama5d2_ops = { 115static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
89 .set_clock = sdhci_at91_set_clock, 116 .set_clock = sdhci_at91_set_clock,
90 .set_bus_width = sdhci_set_bus_width, 117 .set_bus_width = sdhci_set_bus_width,
91 .reset = sdhci_reset, 118 .reset = sdhci_reset,
92 .set_uhs_signaling = sdhci_set_uhs_signaling, 119 .set_uhs_signaling = sdhci_at91_set_uhs_signaling,
120 .set_power = sdhci_at91_set_power,
93}; 121};
94 122
95static const struct sdhci_pltfm_data soc_data_sama5d2 = { 123static const struct sdhci_pltfm_data soc_data_sama5d2 = {
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 982b3e349426..86560d590786 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -451,6 +451,8 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
451 if (mode == MMC_POWER_OFF) 451 if (mode == MMC_POWER_OFF)
452 return; 452 return;
453 453
454 spin_unlock_irq(&host->lock);
455
454 /* 456 /*
455 * Bus power might not enable after D3 -> D0 transition due to the 457 * Bus power might not enable after D3 -> D0 transition due to the
456 * present state not yet having propagated. Retry for up to 2ms. 458 * present state not yet having propagated. Retry for up to 2ms.
@@ -463,6 +465,8 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
463 reg |= SDHCI_POWER_ON; 465 reg |= SDHCI_POWER_ON;
464 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); 466 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
465 } 467 }
468
469 spin_lock_irq(&host->lock);
466} 470}
467 471
468static const struct sdhci_ops sdhci_intel_byt_ops = { 472static const struct sdhci_ops sdhci_intel_byt_ops = {
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 6fdd7a70f229..63bc33a54d0d 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1362,7 +1362,9 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1362 return; 1362 return;
1363 } 1363 }
1364 timeout--; 1364 timeout--;
1365 mdelay(1); 1365 spin_unlock_irq(&host->lock);
1366 usleep_range(900, 1100);
1367 spin_lock_irq(&host->lock);
1366 } 1368 }
1367 1369
1368 clk |= SDHCI_CLOCK_CARD_EN; 1370 clk |= SDHCI_CLOCK_CARD_EN;
@@ -1828,6 +1830,9 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1828 struct sdhci_host *host = mmc_priv(mmc); 1830 struct sdhci_host *host = mmc_priv(mmc);
1829 unsigned long flags; 1831 unsigned long flags;
1830 1832
1833 if (enable)
1834 pm_runtime_get_noresume(host->mmc->parent);
1835
1831 spin_lock_irqsave(&host->lock, flags); 1836 spin_lock_irqsave(&host->lock, flags);
1832 if (enable) 1837 if (enable)
1833 host->flags |= SDHCI_SDIO_IRQ_ENABLED; 1838 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
@@ -1836,6 +1841,9 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1836 1841
1837 sdhci_enable_sdio_irq_nolock(host, enable); 1842 sdhci_enable_sdio_irq_nolock(host, enable);
1838 spin_unlock_irqrestore(&host->lock, flags); 1843 spin_unlock_irqrestore(&host->lock, flags);
1844
1845 if (!enable)
1846 pm_runtime_put_noidle(host->mmc->parent);
1839} 1847}
1840 1848
1841static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 1849static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
index d2c386f09d69..1d843357422e 100644
--- a/drivers/mmc/host/ushc.c
+++ b/drivers/mmc/host/ushc.c
@@ -426,6 +426,9 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id
426 struct ushc_data *ushc; 426 struct ushc_data *ushc;
427 int ret; 427 int ret;
428 428
429 if (intf->cur_altsetting->desc.bNumEndpoints < 1)
430 return -ENODEV;
431
429 mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev); 432 mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev);
430 if (mmc == NULL) 433 if (mmc == NULL)
431 return -ENOMEM; 434 return -ENOMEM;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index d05fbfdce5e5..5d6c40d86775 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -100,11 +100,6 @@ static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
100 goto err_exit; 100 goto err_exit;
101 ndev->mtu = new_mtu; 101 ndev->mtu = new_mtu;
102 102
103 if (netif_running(ndev)) {
104 aq_ndev_close(ndev);
105 aq_ndev_open(ndev);
106 }
107
108err_exit: 103err_exit:
109 return err; 104 return err;
110} 105}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index ee78444bfb88..cdb02991f249 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -487,6 +487,9 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
487 dx_buff->mss = skb_shinfo(skb)->gso_size; 487 dx_buff->mss = skb_shinfo(skb)->gso_size;
488 dx_buff->is_txc = 1U; 488 dx_buff->is_txc = 1U;
489 489
490 dx_buff->is_ipv6 =
491 (ip_hdr(skb)->version == 6) ? 1U : 0U;
492
490 dx = aq_ring_next_dx(ring, dx); 493 dx = aq_ring_next_dx(ring, dx);
491 dx_buff = &ring->buff_ring[dx]; 494 dx_buff = &ring->buff_ring[dx];
492 ++ret; 495 ++ret;
@@ -510,10 +513,22 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
510 if (skb->ip_summed == CHECKSUM_PARTIAL) { 513 if (skb->ip_summed == CHECKSUM_PARTIAL) {
511 dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? 514 dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
512 1U : 0U; 515 1U : 0U;
513 dx_buff->is_tcp_cso = 516
514 (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U; 517 if (ip_hdr(skb)->version == 4) {
515 dx_buff->is_udp_cso = 518 dx_buff->is_tcp_cso =
516 (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U; 519 (ip_hdr(skb)->protocol == IPPROTO_TCP) ?
520 1U : 0U;
521 dx_buff->is_udp_cso =
522 (ip_hdr(skb)->protocol == IPPROTO_UDP) ?
523 1U : 0U;
524 } else if (ip_hdr(skb)->version == 6) {
525 dx_buff->is_tcp_cso =
526 (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ?
527 1U : 0U;
528 dx_buff->is_udp_cso =
529 (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ?
530 1U : 0U;
531 }
517 } 532 }
518 533
519 for (; nr_frags--; ++frag_count) { 534 for (; nr_frags--; ++frag_count) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 0358e6072d45..3a8a4aa13687 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -101,6 +101,7 @@ int aq_ring_init(struct aq_ring_s *self)
101 self->hw_head = 0; 101 self->hw_head = 0;
102 self->sw_head = 0; 102 self->sw_head = 0;
103 self->sw_tail = 0; 103 self->sw_tail = 0;
104 spin_lock_init(&self->header.lock);
104 return 0; 105 return 0;
105} 106}
106 107
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 257254645068..eecd6d1c4d73 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -58,7 +58,8 @@ struct __packed aq_ring_buff_s {
58 u8 len_l2; 58 u8 len_l2;
59 u8 len_l3; 59 u8 len_l3;
60 u8 len_l4; 60 u8 len_l4;
61 u8 rsvd2; 61 u8 is_ipv6:1;
62 u8 rsvd2:7;
62 u32 len_pkt; 63 u32 len_pkt;
63 }; 64 };
64 }; 65 };
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index a2b746a2dd50..4ee15ff06a44 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -433,6 +433,9 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
433 buff->len_l3 + 433 buff->len_l3 +
434 buff->len_l2); 434 buff->len_l2);
435 is_gso = true; 435 is_gso = true;
436
437 if (buff->is_ipv6)
438 txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPV6;
436 } else { 439 } else {
437 buff_pa_len = buff->len; 440 buff_pa_len = buff->len;
438 441
@@ -458,6 +461,7 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
458 if (unlikely(buff->is_eop)) { 461 if (unlikely(buff->is_eop)) {
459 txd->ctl |= HW_ATL_A0_TXD_CTL_EOP; 462 txd->ctl |= HW_ATL_A0_TXD_CTL_EOP;
460 txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB; 463 txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB;
464 is_gso = false;
461 } 465 }
462 } 466 }
463 467
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index cab2931dab9a..42150708191d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -471,6 +471,9 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
471 buff->len_l3 + 471 buff->len_l3 +
472 buff->len_l2); 472 buff->len_l2);
473 is_gso = true; 473 is_gso = true;
474
475 if (buff->is_ipv6)
476 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6;
474 } else { 477 } else {
475 buff_pa_len = buff->len; 478 buff_pa_len = buff->len;
476 479
@@ -496,6 +499,7 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
496 if (unlikely(buff->is_eop)) { 499 if (unlikely(buff->is_eop)) {
497 txd->ctl |= HW_ATL_B0_TXD_CTL_EOP; 500 txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
498 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB; 501 txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
502 is_gso = false;
499 } 503 }
500 } 504 }
501 505
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 0a23034bbe3f..352beff796ae 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -2277,7 +2277,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2277 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \ 2277 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \
2278 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC)) 2278 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC))
2279 2279
2280#define HW_INTERRUT_ASSERT_SET_0 \ 2280#define HW_INTERRUPT_ASSERT_SET_0 \
2281 (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \ 2281 (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
2282 AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \ 2282 AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
2283 AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \ 2283 AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
@@ -2290,7 +2290,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2290 AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\ 2290 AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\
2291 AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\ 2291 AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\
2292 AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR) 2292 AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR)
2293#define HW_INTERRUT_ASSERT_SET_1 \ 2293#define HW_INTERRUPT_ASSERT_SET_1 \
2294 (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \ 2294 (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \
2295 AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \ 2295 AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \
2296 AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \ 2296 AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \
@@ -2318,7 +2318,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2318 AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \ 2318 AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \
2319 AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\ 2319 AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\
2320 AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR) 2320 AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR)
2321#define HW_INTERRUT_ASSERT_SET_2 \ 2321#define HW_INTERRUPT_ASSERT_SET_2 \
2322 (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \ 2322 (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \
2323 AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \ 2323 AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \
2324 AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \ 2324 AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index ac76fc251d26..a851f95c307a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -4166,14 +4166,14 @@ static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
4166 bnx2x_release_phy_lock(bp); 4166 bnx2x_release_phy_lock(bp);
4167 } 4167 }
4168 4168
4169 if (attn & HW_INTERRUT_ASSERT_SET_0) { 4169 if (attn & HW_INTERRUPT_ASSERT_SET_0) {
4170 4170
4171 val = REG_RD(bp, reg_offset); 4171 val = REG_RD(bp, reg_offset);
4172 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); 4172 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_0);
4173 REG_WR(bp, reg_offset, val); 4173 REG_WR(bp, reg_offset, val);
4174 4174
4175 BNX2X_ERR("FATAL HW block attention set0 0x%x\n", 4175 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
4176 (u32)(attn & HW_INTERRUT_ASSERT_SET_0)); 4176 (u32)(attn & HW_INTERRUPT_ASSERT_SET_0));
4177 bnx2x_panic(); 4177 bnx2x_panic();
4178 } 4178 }
4179} 4179}
@@ -4191,7 +4191,7 @@ static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4191 BNX2X_ERR("FATAL error from DORQ\n"); 4191 BNX2X_ERR("FATAL error from DORQ\n");
4192 } 4192 }
4193 4193
4194 if (attn & HW_INTERRUT_ASSERT_SET_1) { 4194 if (attn & HW_INTERRUPT_ASSERT_SET_1) {
4195 4195
4196 int port = BP_PORT(bp); 4196 int port = BP_PORT(bp);
4197 int reg_offset; 4197 int reg_offset;
@@ -4200,11 +4200,11 @@ static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4200 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); 4200 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
4201 4201
4202 val = REG_RD(bp, reg_offset); 4202 val = REG_RD(bp, reg_offset);
4203 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); 4203 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_1);
4204 REG_WR(bp, reg_offset, val); 4204 REG_WR(bp, reg_offset, val);
4205 4205
4206 BNX2X_ERR("FATAL HW block attention set1 0x%x\n", 4206 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
4207 (u32)(attn & HW_INTERRUT_ASSERT_SET_1)); 4207 (u32)(attn & HW_INTERRUPT_ASSERT_SET_1));
4208 bnx2x_panic(); 4208 bnx2x_panic();
4209 } 4209 }
4210} 4210}
@@ -4235,7 +4235,7 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4235 } 4235 }
4236 } 4236 }
4237 4237
4238 if (attn & HW_INTERRUT_ASSERT_SET_2) { 4238 if (attn & HW_INTERRUPT_ASSERT_SET_2) {
4239 4239
4240 int port = BP_PORT(bp); 4240 int port = BP_PORT(bp);
4241 int reg_offset; 4241 int reg_offset;
@@ -4244,11 +4244,11 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4244 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); 4244 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
4245 4245
4246 val = REG_RD(bp, reg_offset); 4246 val = REG_RD(bp, reg_offset);
4247 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); 4247 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_2);
4248 REG_WR(bp, reg_offset, val); 4248 REG_WR(bp, reg_offset, val);
4249 4249
4250 BNX2X_ERR("FATAL HW block attention set2 0x%x\n", 4250 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
4251 (u32)(attn & HW_INTERRUT_ASSERT_SET_2)); 4251 (u32)(attn & HW_INTERRUPT_ASSERT_SET_2));
4252 bnx2x_panic(); 4252 bnx2x_panic();
4253 } 4253 }
4254} 4254}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 43b7342c6e82..129b8101b932 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1983,20 +1983,25 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
1983 1983
1984 for (j = 0; j < max_idx; j++) { 1984 for (j = 0; j < max_idx; j++) {
1985 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; 1985 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
1986 dma_addr_t mapping = rx_buf->mapping;
1986 void *data = rx_buf->data; 1987 void *data = rx_buf->data;
1987 1988
1988 if (!data) 1989 if (!data)
1989 continue; 1990 continue;
1990 1991
1991 dma_unmap_single(&pdev->dev, rx_buf->mapping,
1992 bp->rx_buf_use_size, bp->rx_dir);
1993
1994 rx_buf->data = NULL; 1992 rx_buf->data = NULL;
1995 1993
1996 if (BNXT_RX_PAGE_MODE(bp)) 1994 if (BNXT_RX_PAGE_MODE(bp)) {
1995 mapping -= bp->rx_dma_offset;
1996 dma_unmap_page(&pdev->dev, mapping,
1997 PAGE_SIZE, bp->rx_dir);
1997 __free_page(data); 1998 __free_page(data);
1998 else 1999 } else {
2000 dma_unmap_single(&pdev->dev, mapping,
2001 bp->rx_buf_use_size,
2002 bp->rx_dir);
1999 kfree(data); 2003 kfree(data);
2004 }
2000 } 2005 }
2001 2006
2002 for (j = 0; j < max_agg_idx; j++) { 2007 for (j = 0; j < max_agg_idx; j++) {
@@ -2455,6 +2460,18 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2455 return 0; 2460 return 0;
2456} 2461}
2457 2462
2463static void bnxt_init_cp_rings(struct bnxt *bp)
2464{
2465 int i;
2466
2467 for (i = 0; i < bp->cp_nr_rings; i++) {
2468 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
2469 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
2470
2471 ring->fw_ring_id = INVALID_HW_RING_ID;
2472 }
2473}
2474
2458static int bnxt_init_rx_rings(struct bnxt *bp) 2475static int bnxt_init_rx_rings(struct bnxt *bp)
2459{ 2476{
2460 int i, rc = 0; 2477 int i, rc = 0;
@@ -4735,7 +4752,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
4735 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); 4752 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
4736 if (rc) { 4753 if (rc) {
4737 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 4754 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
4738 rc, i); 4755 i, rc);
4739 return rc; 4756 return rc;
4740 } 4757 }
4741 } 4758 }
@@ -5009,6 +5026,7 @@ static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
5009 5026
5010static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 5027static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
5011{ 5028{
5029 bnxt_init_cp_rings(bp);
5012 bnxt_init_rx_rings(bp); 5030 bnxt_init_rx_rings(bp);
5013 bnxt_init_tx_rings(bp); 5031 bnxt_init_tx_rings(bp);
5014 bnxt_init_ring_grps(bp, irq_re_init); 5032 bnxt_init_ring_grps(bp, irq_re_init);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 9e59663a6ead..0f6811860ad5 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -1930,13 +1930,13 @@ static void
1930bfa_ioc_send_enable(struct bfa_ioc *ioc) 1930bfa_ioc_send_enable(struct bfa_ioc *ioc)
1931{ 1931{
1932 struct bfi_ioc_ctrl_req enable_req; 1932 struct bfi_ioc_ctrl_req enable_req;
1933 struct timeval tv;
1934 1933
1935 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, 1934 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1936 bfa_ioc_portid(ioc)); 1935 bfa_ioc_portid(ioc));
1937 enable_req.clscode = htons(ioc->clscode); 1936 enable_req.clscode = htons(ioc->clscode);
1938 do_gettimeofday(&tv); 1937 enable_req.rsvd = htons(0);
1939 enable_req.tv_sec = ntohl(tv.tv_sec); 1938 /* overflow in 2106 */
1939 enable_req.tv_sec = ntohl(ktime_get_real_seconds());
1940 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req)); 1940 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1941} 1941}
1942 1942
@@ -1947,6 +1947,10 @@ bfa_ioc_send_disable(struct bfa_ioc *ioc)
1947 1947
1948 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ, 1948 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1949 bfa_ioc_portid(ioc)); 1949 bfa_ioc_portid(ioc));
1950 disable_req.clscode = htons(ioc->clscode);
1951 disable_req.rsvd = htons(0);
1952 /* overflow in 2106 */
1953 disable_req.tv_sec = ntohl(ktime_get_real_seconds());
1950 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req)); 1954 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1951} 1955}
1952 1956
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 30e855004c57..02dd5246dfae 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -4939,8 +4939,9 @@ static int
4939__be_cmd_set_logical_link_config(struct be_adapter *adapter, 4939__be_cmd_set_logical_link_config(struct be_adapter *adapter,
4940 int link_state, int version, u8 domain) 4940 int link_state, int version, u8 domain)
4941{ 4941{
4942 struct be_mcc_wrb *wrb;
4943 struct be_cmd_req_set_ll_link *req; 4942 struct be_cmd_req_set_ll_link *req;
4943 struct be_mcc_wrb *wrb;
4944 u32 link_config = 0;
4944 int status; 4945 int status;
4945 4946
4946 mutex_lock(&adapter->mcc_lock); 4947 mutex_lock(&adapter->mcc_lock);
@@ -4962,10 +4963,12 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
4962 4963
4963 if (link_state == IFLA_VF_LINK_STATE_ENABLE || 4964 if (link_state == IFLA_VF_LINK_STATE_ENABLE ||
4964 link_state == IFLA_VF_LINK_STATE_AUTO) 4965 link_state == IFLA_VF_LINK_STATE_AUTO)
4965 req->link_config |= PLINK_ENABLE; 4966 link_config |= PLINK_ENABLE;
4966 4967
4967 if (link_state == IFLA_VF_LINK_STATE_AUTO) 4968 if (link_state == IFLA_VF_LINK_STATE_AUTO)
4968 req->link_config |= PLINK_TRACK; 4969 link_config |= PLINK_TRACK;
4970
4971 req->link_config = cpu_to_le32(link_config);
4969 4972
4970 status = be_mcc_notify_wait(adapter); 4973 status = be_mcc_notify_wait(adapter);
4971err: 4974err:
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 70165fcbff9c..659f1ad37e96 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -190,11 +190,9 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
190 190
191 nps_enet_tx_handler(ndev); 191 nps_enet_tx_handler(ndev);
192 work_done = nps_enet_rx_handler(ndev); 192 work_done = nps_enet_rx_handler(ndev);
193 if (work_done < budget) { 193 if ((work_done < budget) && napi_complete_done(napi, work_done)) {
194 u32 buf_int_enable_value = 0; 194 u32 buf_int_enable_value = 0;
195 195
196 napi_complete_done(napi, work_done);
197
198 /* set tx_done and rx_rdy bits */ 196 /* set tx_done and rx_rdy bits */
199 buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT; 197 buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT;
200 buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT; 198 buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 333265060de1..ade6b3e4ed13 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -31,6 +31,7 @@
31#include <linux/of.h> 31#include <linux/of.h>
32#include <linux/phy.h> 32#include <linux/phy.h>
33#include <linux/platform_device.h> 33#include <linux/platform_device.h>
34#include <linux/property.h>
34#include <net/ip.h> 35#include <net/ip.h>
35#include <net/ncsi.h> 36#include <net/ncsi.h>
36 37
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 696f2ae8b075..0c1f56e58074 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -82,9 +82,12 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
82 else 82 else
83 *link_status = 0; 83 *link_status = 0;
84 84
85 ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb, &sfp_prsnt); 85 if (mac_cb->media_type == HNAE_MEDIA_TYPE_FIBER) {
86 if (!ret) 86 ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb,
87 *link_status = *link_status && sfp_prsnt; 87 &sfp_prsnt);
88 if (!ret)
89 *link_status = *link_status && sfp_prsnt;
90 }
88 91
89 mac_cb->link = *link_status; 92 mac_cb->link = *link_status;
90} 93}
@@ -816,7 +819,7 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
816 of_node_put(np); 819 of_node_put(np);
817 820
818 np = of_parse_phandle(to_of_node(mac_cb->fw_port), 821 np = of_parse_phandle(to_of_node(mac_cb->fw_port),
819 "serdes-syscon", 0); 822 "serdes-syscon", 0);
820 syscon = syscon_node_to_regmap(np); 823 syscon = syscon_node_to_regmap(np);
821 of_node_put(np); 824 of_node_put(np);
822 if (IS_ERR_OR_NULL(syscon)) { 825 if (IS_ERR_OR_NULL(syscon)) {
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index d07b4fe45a44..e0bc79ea3d88 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -1519,6 +1519,7 @@ static void hns_dsaf_set_mac_key(
1519 mac_key->high.bits.mac_3 = addr[3]; 1519 mac_key->high.bits.mac_3 = addr[3];
1520 mac_key->low.bits.mac_4 = addr[4]; 1520 mac_key->low.bits.mac_4 = addr[4];
1521 mac_key->low.bits.mac_5 = addr[5]; 1521 mac_key->low.bits.mac_5 = addr[5];
1522 mac_key->low.bits.port_vlan = 0;
1522 dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_VLAN_M, 1523 dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_VLAN_M,
1523 DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id); 1524 DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
1524 dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, 1525 dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
@@ -2683,10 +2684,11 @@ void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
2683 /* find the tcam entry index for promisc */ 2684 /* find the tcam entry index for promisc */
2684 entry_index = dsaf_promisc_tcam_entry(port); 2685 entry_index = dsaf_promisc_tcam_entry(port);
2685 2686
2687 memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
2688 memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
2689
2686 /* config key mask */ 2690 /* config key mask */
2687 if (enable) { 2691 if (enable) {
2688 memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
2689 memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
2690 dsaf_set_field(tbl_tcam_data.low.bits.port_vlan, 2692 dsaf_set_field(tbl_tcam_data.low.bits.port_vlan,
2691 DSAF_TBL_TCAM_KEY_PORT_M, 2693 DSAF_TBL_TCAM_KEY_PORT_M,
2692 DSAF_TBL_TCAM_KEY_PORT_S, port); 2694 DSAF_TBL_TCAM_KEY_PORT_S, port);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index a2c22d084ce9..e13aa064a8e9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -461,6 +461,32 @@ int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
461 return 0; 461 return 0;
462} 462}
463 463
464int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
465{
466 union acpi_object *obj;
467 union acpi_object obj_args, argv4;
468
469 obj_args.integer.type = ACPI_TYPE_INTEGER;
470 obj_args.integer.value = mac_cb->mac_id;
471
472 argv4.type = ACPI_TYPE_PACKAGE,
473 argv4.package.count = 1,
474 argv4.package.elements = &obj_args,
475
476 obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
477 hns_dsaf_acpi_dsm_uuid, 0,
478 HNS_OP_GET_SFP_STAT_FUNC, &argv4);
479
480 if (!obj || obj->type != ACPI_TYPE_INTEGER)
481 return -ENODEV;
482
483 *sfp_prsnt = obj->integer.value;
484
485 ACPI_FREE(obj);
486
487 return 0;
488}
489
464/** 490/**
465 * hns_mac_config_sds_loopback - set loop back for serdes 491 * hns_mac_config_sds_loopback - set loop back for serdes
466 * @mac_cb: mac control block 492 * @mac_cb: mac control block
@@ -592,7 +618,7 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
592 misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi; 618 misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi;
593 619
594 misc_op->get_phy_if = hns_mac_get_phy_if_acpi; 620 misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
595 misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt; 621 misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt_acpi;
596 622
597 misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi; 623 misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi;
598 } else { 624 } else {
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 2175cced402f..e9af89ad039c 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6274,8 +6274,8 @@ static int e1000e_pm_freeze(struct device *dev)
6274 /* Quiesce the device without resetting the hardware */ 6274 /* Quiesce the device without resetting the hardware */
6275 e1000e_down(adapter, false); 6275 e1000e_down(adapter, false);
6276 e1000_free_irq(adapter); 6276 e1000_free_irq(adapter);
6277 e1000e_reset_interrupt_capability(adapter);
6278 } 6277 }
6278 e1000e_reset_interrupt_capability(adapter);
6279 6279
6280 /* Allow time for pending master requests to run */ 6280 /* Allow time for pending master requests to run */
6281 e1000e_disable_pcie_master(&adapter->hw); 6281 e1000e_disable_pcie_master(&adapter->hw);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index a0506e28d167..703444e92964 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4331,8 +4331,12 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4331 if (!vsi->netdev) 4331 if (!vsi->netdev)
4332 return; 4332 return;
4333 4333
4334 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 4334 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4335 napi_enable(&vsi->q_vectors[q_idx]->napi); 4335 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4336
4337 if (q_vector->rx.ring || q_vector->tx.ring)
4338 napi_enable(&q_vector->napi);
4339 }
4336} 4340}
4337 4341
4338/** 4342/**
@@ -4346,8 +4350,12 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4346 if (!vsi->netdev) 4350 if (!vsi->netdev)
4347 return; 4351 return;
4348 4352
4349 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) 4353 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4350 napi_disable(&vsi->q_vectors[q_idx]->napi); 4354 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4355
4356 if (q_vector->rx.ring || q_vector->tx.ring)
4357 napi_disable(&q_vector->napi);
4358 }
4351} 4359}
4352 4360
4353/** 4361/**
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 55957246c0e8..b5d5519542e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -294,7 +294,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
294 struct netdev_notifier_changeupper_info *info) 294 struct netdev_notifier_changeupper_info *info)
295{ 295{
296 struct net_device *upper = info->upper_dev, *ndev_tmp; 296 struct net_device *upper = info->upper_dev, *ndev_tmp;
297 struct netdev_lag_upper_info *lag_upper_info; 297 struct netdev_lag_upper_info *lag_upper_info = NULL;
298 bool is_bonded; 298 bool is_bonded;
299 int bond_status = 0; 299 int bond_status = 0;
300 int num_slaves = 0; 300 int num_slaves = 0;
@@ -303,7 +303,8 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
303 if (!netif_is_lag_master(upper)) 303 if (!netif_is_lag_master(upper))
304 return 0; 304 return 0;
305 305
306 lag_upper_info = info->upper_info; 306 if (info->linking)
307 lag_upper_info = info->upper_info;
307 308
308 /* The event may still be of interest if the slave does not belong to 309 /* The event may still be of interest if the slave does not belong to
309 * us, but is enslaved to a master which has one or more of our netdevs 310 * us, but is enslaved to a master which has one or more of our netdevs
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 06c9f4100cb9..6ad44be08b33 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -25,6 +25,7 @@
25#include <linux/of_irq.h> 25#include <linux/of_irq.h>
26#include <linux/crc32.h> 26#include <linux/crc32.h>
27#include <linux/crc32c.h> 27#include <linux/crc32c.h>
28#include <linux/circ_buf.h>
28 29
29#include "moxart_ether.h" 30#include "moxart_ether.h"
30 31
@@ -278,6 +279,13 @@ rx_next:
278 return rx; 279 return rx;
279} 280}
280 281
282static int moxart_tx_queue_space(struct net_device *ndev)
283{
284 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
285
286 return CIRC_SPACE(priv->tx_head, priv->tx_tail, TX_DESC_NUM);
287}
288
281static void moxart_tx_finished(struct net_device *ndev) 289static void moxart_tx_finished(struct net_device *ndev)
282{ 290{
283 struct moxart_mac_priv_t *priv = netdev_priv(ndev); 291 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
@@ -297,6 +305,9 @@ static void moxart_tx_finished(struct net_device *ndev)
297 tx_tail = TX_NEXT(tx_tail); 305 tx_tail = TX_NEXT(tx_tail);
298 } 306 }
299 priv->tx_tail = tx_tail; 307 priv->tx_tail = tx_tail;
308 if (netif_queue_stopped(ndev) &&
309 moxart_tx_queue_space(ndev) >= TX_WAKE_THRESHOLD)
310 netif_wake_queue(ndev);
300} 311}
301 312
302static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id) 313static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
@@ -324,13 +335,18 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
324 struct moxart_mac_priv_t *priv = netdev_priv(ndev); 335 struct moxart_mac_priv_t *priv = netdev_priv(ndev);
325 void *desc; 336 void *desc;
326 unsigned int len; 337 unsigned int len;
327 unsigned int tx_head = priv->tx_head; 338 unsigned int tx_head;
328 u32 txdes1; 339 u32 txdes1;
329 int ret = NETDEV_TX_BUSY; 340 int ret = NETDEV_TX_BUSY;
330 341
342 spin_lock_irq(&priv->txlock);
343
344 tx_head = priv->tx_head;
331 desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head); 345 desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
332 346
333 spin_lock_irq(&priv->txlock); 347 if (moxart_tx_queue_space(ndev) == 1)
348 netif_stop_queue(ndev);
349
334 if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) { 350 if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
335 net_dbg_ratelimited("no TX space for packet\n"); 351 net_dbg_ratelimited("no TX space for packet\n");
336 priv->stats.tx_dropped++; 352 priv->stats.tx_dropped++;
diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h
index 93a9563ac7c6..afc32ec998c0 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.h
+++ b/drivers/net/ethernet/moxa/moxart_ether.h
@@ -59,6 +59,7 @@
59#define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM_MASK)) 59#define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM_MASK))
60#define TX_BUF_SIZE 1600 60#define TX_BUF_SIZE 1600
61#define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK+1) 61#define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK+1)
62#define TX_WAKE_THRESHOLD 16
62 63
63#define RX_DESC_NUM 64 64#define RX_DESC_NUM 64
64#define RX_DESC_NUM_MASK (RX_DESC_NUM-1) 65#define RX_DESC_NUM_MASK (RX_DESC_NUM-1)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 61e5741f935c..e2197160e4dc 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -3295,9 +3295,10 @@ void nfp_net_netdev_clean(struct net_device *netdev)
3295{ 3295{
3296 struct nfp_net *nn = netdev_priv(netdev); 3296 struct nfp_net *nn = netdev_priv(netdev);
3297 3297
3298 unregister_netdev(nn->dp.netdev);
3299
3298 if (nn->dp.xdp_prog) 3300 if (nn->dp.xdp_prog)
3299 bpf_prog_put(nn->dp.xdp_prog); 3301 bpf_prog_put(nn->dp.xdp_prog);
3300 if (nn->dp.bpf_offload_xdp) 3302 if (nn->dp.bpf_offload_xdp)
3301 nfp_net_xdp_offload(nn, NULL); 3303 nfp_net_xdp_offload(nn, NULL);
3302 unregister_netdev(nn->dp.netdev);
3303} 3304}
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 7cd76b6b5cb9..2ae852454780 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -2216,18 +2216,15 @@ static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
2216{ 2216{
2217 bool want[OFDPA_CTRL_MAX] = { 0, }; 2217 bool want[OFDPA_CTRL_MAX] = { 0, };
2218 bool prev_ctrls[OFDPA_CTRL_MAX]; 2218 bool prev_ctrls[OFDPA_CTRL_MAX];
2219 u8 uninitialized_var(prev_state); 2219 u8 prev_state;
2220 int err; 2220 int err;
2221 int i; 2221 int i;
2222 2222
2223 if (switchdev_trans_ph_prepare(trans)) { 2223 prev_state = ofdpa_port->stp_state;
2224 memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls)); 2224 if (prev_state == state)
2225 prev_state = ofdpa_port->stp_state;
2226 }
2227
2228 if (ofdpa_port->stp_state == state)
2229 return 0; 2225 return 0;
2230 2226
2227 memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
2231 ofdpa_port->stp_state = state; 2228 ofdpa_port->stp_state = state;
2232 2229
2233 switch (state) { 2230 switch (state) {
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 9f3d9c67e3fe..fa674a8bda0c 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1267,6 +1267,7 @@ static void soft_reset_slave(struct cpsw_slave *slave)
1267static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) 1267static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1268{ 1268{
1269 u32 slave_port; 1269 u32 slave_port;
1270 struct phy_device *phy;
1270 struct cpsw_common *cpsw = priv->cpsw; 1271 struct cpsw_common *cpsw = priv->cpsw;
1271 1272
1272 soft_reset_slave(slave); 1273 soft_reset_slave(slave);
@@ -1300,27 +1301,28 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1300 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); 1301 1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
1301 1302
1302 if (slave->data->phy_node) { 1303 if (slave->data->phy_node) {
1303 slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node, 1304 phy = of_phy_connect(priv->ndev, slave->data->phy_node,
1304 &cpsw_adjust_link, 0, slave->data->phy_if); 1305 &cpsw_adjust_link, 0, slave->data->phy_if);
1305 if (!slave->phy) { 1306 if (!phy) {
1306 dev_err(priv->dev, "phy \"%s\" not found on slave %d\n", 1307 dev_err(priv->dev, "phy \"%s\" not found on slave %d\n",
1307 slave->data->phy_node->full_name, 1308 slave->data->phy_node->full_name,
1308 slave->slave_num); 1309 slave->slave_num);
1309 return; 1310 return;
1310 } 1311 }
1311 } else { 1312 } else {
1312 slave->phy = phy_connect(priv->ndev, slave->data->phy_id, 1313 phy = phy_connect(priv->ndev, slave->data->phy_id,
1313 &cpsw_adjust_link, slave->data->phy_if); 1314 &cpsw_adjust_link, slave->data->phy_if);
1314 if (IS_ERR(slave->phy)) { 1315 if (IS_ERR(phy)) {
1315 dev_err(priv->dev, 1316 dev_err(priv->dev,
1316 "phy \"%s\" not found on slave %d, err %ld\n", 1317 "phy \"%s\" not found on slave %d, err %ld\n",
1317 slave->data->phy_id, slave->slave_num, 1318 slave->data->phy_id, slave->slave_num,
1318 PTR_ERR(slave->phy)); 1319 PTR_ERR(phy));
1319 slave->phy = NULL;
1320 return; 1320 return;
1321 } 1321 }
1322 } 1322 }
1323 1323
1324 slave->phy = phy;
1325
1324 phy_attached_info(slave->phy); 1326 phy_attached_info(slave->phy);
1325 1327
1326 phy_start(slave->phy); 1328 phy_start(slave->phy);
@@ -1817,6 +1819,8 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
1817 } 1819 }
1818 1820
1819 cpsw_intr_enable(cpsw); 1821 cpsw_intr_enable(cpsw);
1822 netif_trans_update(ndev);
1823 netif_tx_wake_all_queues(ndev);
1820} 1824}
1821 1825
1822static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) 1826static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index ffedad2a360a..15b920086251 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -418,8 +418,9 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
418 memset(rd, 0, sizeof(*rd)); 418 memset(rd, 0, sizeof(*rd));
419 rd->hw = hwmap + i; 419 rd->hw = hwmap + i;
420 rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA); 420 rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA);
421 if (rd->buf == NULL || 421 if (rd->buf)
422 !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) { 422 busaddr = pci_map_single(pdev, rd->buf, len, dir);
423 if (rd->buf == NULL || pci_dma_mapping_error(pdev, busaddr)) {
423 if (rd->buf) { 424 if (rd->buf) {
424 net_err_ratelimited("%s: failed to create PCI-MAP for %p\n", 425 net_err_ratelimited("%s: failed to create PCI-MAP for %p\n",
425 __func__, rd->buf); 426 __func__, rd->buf);
@@ -430,8 +431,7 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
430 rd = r->rd + j; 431 rd = r->rd + j;
431 busaddr = rd_get_addr(rd); 432 busaddr = rd_get_addr(rd);
432 rd_set_addr_status(rd, 0, 0); 433 rd_set_addr_status(rd, 0, 0);
433 if (busaddr) 434 pci_unmap_single(pdev, busaddr, len, dir);
434 pci_unmap_single(pdev, busaddr, len, dir);
435 kfree(rd->buf); 435 kfree(rd->buf);
436 rd->buf = NULL; 436 rd->buf = NULL;
437 } 437 }
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 6811d1ef4ef2..bf7d614ff18f 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -695,7 +695,7 @@ void phy_stop_machine(struct phy_device *phydev)
695 cancel_delayed_work_sync(&phydev->state_queue); 695 cancel_delayed_work_sync(&phydev->state_queue);
696 696
697 mutex_lock(&phydev->lock); 697 mutex_lock(&phydev->lock);
698 if (phydev->state > PHY_UP) 698 if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
699 phydev->state = PHY_UP; 699 phydev->state = PHY_UP;
700 mutex_unlock(&phydev->lock); 700 mutex_unlock(&phydev->lock);
701} 701}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index f5552aaaa77a..f3ae88fdf332 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -532,6 +532,7 @@ static const struct driver_info wwan_info = {
532#define LENOVO_VENDOR_ID 0x17ef 532#define LENOVO_VENDOR_ID 0x17ef
533#define NVIDIA_VENDOR_ID 0x0955 533#define NVIDIA_VENDOR_ID 0x0955
534#define HP_VENDOR_ID 0x03f0 534#define HP_VENDOR_ID 0x03f0
535#define MICROSOFT_VENDOR_ID 0x045e
535 536
536static const struct usb_device_id products[] = { 537static const struct usb_device_id products[] = {
537/* BLACKLIST !! 538/* BLACKLIST !!
@@ -761,6 +762,20 @@ static const struct usb_device_id products[] = {
761 .driver_info = 0, 762 .driver_info = 0,
762}, 763},
763 764
765/* Microsoft Surface 2 dock (based on Realtek RTL8152) */
766{
767 USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07ab, USB_CLASS_COMM,
768 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
769 .driver_info = 0,
770},
771
772/* Microsoft Surface 3 dock (based on Realtek RTL8153) */
773{
774 USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM,
775 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
776 .driver_info = 0,
777},
778
764/* WHITELIST!!! 779/* WHITELIST!!!
765 * 780 *
766 * CDC Ether uses two interfaces, not necessarily consecutive. 781 * CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 4deced102f72..ddc62cb69be8 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -517,6 +517,7 @@ enum rtl8152_flags {
517 517
518/* Define these values to match your device */ 518/* Define these values to match your device */
519#define VENDOR_ID_REALTEK 0x0bda 519#define VENDOR_ID_REALTEK 0x0bda
520#define VENDOR_ID_MICROSOFT 0x045e
520#define VENDOR_ID_SAMSUNG 0x04e8 521#define VENDOR_ID_SAMSUNG 0x04e8
521#define VENDOR_ID_LENOVO 0x17ef 522#define VENDOR_ID_LENOVO 0x17ef
522#define VENDOR_ID_NVIDIA 0x0955 523#define VENDOR_ID_NVIDIA 0x0955
@@ -1294,6 +1295,7 @@ static void intr_callback(struct urb *urb)
1294 } 1295 }
1295 } else { 1296 } else {
1296 if (netif_carrier_ok(tp->netdev)) { 1297 if (netif_carrier_ok(tp->netdev)) {
1298 netif_stop_queue(tp->netdev);
1297 set_bit(RTL8152_LINK_CHG, &tp->flags); 1299 set_bit(RTL8152_LINK_CHG, &tp->flags);
1298 schedule_delayed_work(&tp->schedule, 0); 1300 schedule_delayed_work(&tp->schedule, 0);
1299 } 1301 }
@@ -3171,6 +3173,9 @@ static void set_carrier(struct r8152 *tp)
3171 napi_enable(&tp->napi); 3173 napi_enable(&tp->napi);
3172 netif_wake_queue(netdev); 3174 netif_wake_queue(netdev);
3173 netif_info(tp, link, netdev, "carrier on\n"); 3175 netif_info(tp, link, netdev, "carrier on\n");
3176 } else if (netif_queue_stopped(netdev) &&
3177 skb_queue_len(&tp->tx_queue) < tp->tx_qlen) {
3178 netif_wake_queue(netdev);
3174 } 3179 }
3175 } else { 3180 } else {
3176 if (netif_carrier_ok(netdev)) { 3181 if (netif_carrier_ok(netdev)) {
@@ -3711,8 +3716,16 @@ static int rtl8152_resume(struct usb_interface *intf)
3711 tp->rtl_ops.autosuspend_en(tp, false); 3716 tp->rtl_ops.autosuspend_en(tp, false);
3712 napi_disable(napi); 3717 napi_disable(napi);
3713 set_bit(WORK_ENABLE, &tp->flags); 3718 set_bit(WORK_ENABLE, &tp->flags);
3714 if (netif_carrier_ok(netdev)) 3719 if (netif_carrier_ok(netdev)) {
3715 rtl_start_rx(tp); 3720 if (rtl8152_get_speed(tp) & LINK_STATUS) {
3721 rtl_start_rx(tp);
3722 } else {
3723 netif_carrier_off(netdev);
3724 tp->rtl_ops.disable(tp);
3725 netif_info(tp, link, netdev,
3726 "linking down\n");
3727 }
3728 }
3716 napi_enable(napi); 3729 napi_enable(napi);
3717 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3730 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3718 smp_mb__after_atomic(); 3731 smp_mb__after_atomic();
@@ -4543,6 +4556,8 @@ static void rtl8152_disconnect(struct usb_interface *intf)
4543static struct usb_device_id rtl8152_table[] = { 4556static struct usb_device_id rtl8152_table[] = {
4544 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)}, 4557 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)},
4545 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, 4558 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
4559 {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)},
4560 {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)},
4546 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, 4561 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
4547 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, 4562 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
4548 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)}, 4563 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 779f516e7a4e..47a479f26e5d 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -343,8 +343,6 @@ static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl,
343 struct ib_device *ibdev = dev->dev; 343 struct ib_device *ibdev = dev->dev;
344 int ret; 344 int ret;
345 345
346 BUG_ON(queue_idx >= ctrl->queue_count);
347
348 ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command), 346 ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command),
349 DMA_TO_DEVICE); 347 DMA_TO_DEVICE);
350 if (ret) 348 if (ret)
@@ -652,8 +650,22 @@ out_free_queues:
652 650
653static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl) 651static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
654{ 652{
653 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
654 unsigned int nr_io_queues;
655 int i, ret; 655 int i, ret;
656 656
657 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
658 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
659 if (ret)
660 return ret;
661
662 ctrl->queue_count = nr_io_queues + 1;
663 if (ctrl->queue_count < 2)
664 return 0;
665
666 dev_info(ctrl->ctrl.device,
667 "creating %d I/O queues.\n", nr_io_queues);
668
657 for (i = 1; i < ctrl->queue_count; i++) { 669 for (i = 1; i < ctrl->queue_count; i++) {
658 ret = nvme_rdma_init_queue(ctrl, i, 670 ret = nvme_rdma_init_queue(ctrl, i,
659 ctrl->ctrl.opts->queue_size); 671 ctrl->ctrl.opts->queue_size);
@@ -1791,20 +1803,8 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
1791 1803
1792static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl) 1804static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
1793{ 1805{
1794 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
1795 int ret; 1806 int ret;
1796 1807
1797 ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
1798 if (ret)
1799 return ret;
1800
1801 ctrl->queue_count = opts->nr_io_queues + 1;
1802 if (ctrl->queue_count < 2)
1803 return 0;
1804
1805 dev_info(ctrl->ctrl.device,
1806 "creating %d I/O queues.\n", opts->nr_io_queues);
1807
1808 ret = nvme_rdma_init_io_queues(ctrl); 1808 ret = nvme_rdma_init_io_queues(ctrl);
1809 if (ret) 1809 if (ret)
1810 return ret; 1810 return ret;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 11b0a0a5f661..798653b329b2 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -425,6 +425,13 @@ void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
425 ctrl->sqs[qid] = sq; 425 ctrl->sqs[qid] = sq;
426} 426}
427 427
428static void nvmet_confirm_sq(struct percpu_ref *ref)
429{
430 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
431
432 complete(&sq->confirm_done);
433}
434
428void nvmet_sq_destroy(struct nvmet_sq *sq) 435void nvmet_sq_destroy(struct nvmet_sq *sq)
429{ 436{
430 /* 437 /*
@@ -433,7 +440,8 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
433 */ 440 */
434 if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq) 441 if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
435 nvmet_async_events_free(sq->ctrl); 442 nvmet_async_events_free(sq->ctrl);
436 percpu_ref_kill(&sq->ref); 443 percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
444 wait_for_completion(&sq->confirm_done);
437 wait_for_completion(&sq->free_done); 445 wait_for_completion(&sq->free_done);
438 percpu_ref_exit(&sq->ref); 446 percpu_ref_exit(&sq->ref);
439 447
@@ -461,6 +469,7 @@ int nvmet_sq_init(struct nvmet_sq *sq)
461 return ret; 469 return ret;
462 } 470 }
463 init_completion(&sq->free_done); 471 init_completion(&sq->free_done);
472 init_completion(&sq->confirm_done);
464 473
465 return 0; 474 return 0;
466} 475}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index d1f06e7768ff..22f7bc6bac7f 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -223,8 +223,6 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
223static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, 223static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
224 struct nvme_loop_iod *iod, unsigned int queue_idx) 224 struct nvme_loop_iod *iod, unsigned int queue_idx)
225{ 225{
226 BUG_ON(queue_idx >= ctrl->queue_count);
227
228 iod->req.cmd = &iod->cmd; 226 iod->req.cmd = &iod->cmd;
229 iod->req.rsp = &iod->rsp; 227 iod->req.rsp = &iod->rsp;
230 iod->queue = &ctrl->queues[queue_idx]; 228 iod->queue = &ctrl->queues[queue_idx];
@@ -288,9 +286,9 @@ static struct blk_mq_ops nvme_loop_admin_mq_ops = {
288 286
289static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) 287static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
290{ 288{
289 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
291 blk_cleanup_queue(ctrl->ctrl.admin_q); 290 blk_cleanup_queue(ctrl->ctrl.admin_q);
292 blk_mq_free_tag_set(&ctrl->admin_tag_set); 291 blk_mq_free_tag_set(&ctrl->admin_tag_set);
293 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
294} 292}
295 293
296static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl) 294static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
@@ -314,6 +312,43 @@ free_ctrl:
314 kfree(ctrl); 312 kfree(ctrl);
315} 313}
316 314
315static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
316{
317 int i;
318
319 for (i = 1; i < ctrl->queue_count; i++)
320 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
321}
322
323static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
324{
325 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
326 unsigned int nr_io_queues;
327 int ret, i;
328
329 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
330 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
331 if (ret || !nr_io_queues)
332 return ret;
333
334 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
335
336 for (i = 1; i <= nr_io_queues; i++) {
337 ctrl->queues[i].ctrl = ctrl;
338 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
339 if (ret)
340 goto out_destroy_queues;
341
342 ctrl->queue_count++;
343 }
344
345 return 0;
346
347out_destroy_queues:
348 nvme_loop_destroy_io_queues(ctrl);
349 return ret;
350}
351
317static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) 352static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
318{ 353{
319 int error; 354 int error;
@@ -385,17 +420,13 @@ out_free_sq:
385 420
386static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) 421static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
387{ 422{
388 int i;
389
390 nvme_stop_keep_alive(&ctrl->ctrl); 423 nvme_stop_keep_alive(&ctrl->ctrl);
391 424
392 if (ctrl->queue_count > 1) { 425 if (ctrl->queue_count > 1) {
393 nvme_stop_queues(&ctrl->ctrl); 426 nvme_stop_queues(&ctrl->ctrl);
394 blk_mq_tagset_busy_iter(&ctrl->tag_set, 427 blk_mq_tagset_busy_iter(&ctrl->tag_set,
395 nvme_cancel_request, &ctrl->ctrl); 428 nvme_cancel_request, &ctrl->ctrl);
396 429 nvme_loop_destroy_io_queues(ctrl);
397 for (i = 1; i < ctrl->queue_count; i++)
398 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
399 } 430 }
400 431
401 if (ctrl->ctrl.state == NVME_CTRL_LIVE) 432 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
@@ -467,19 +498,14 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
467 if (ret) 498 if (ret)
468 goto out_disable; 499 goto out_disable;
469 500
470 for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) { 501 ret = nvme_loop_init_io_queues(ctrl);
471 ctrl->queues[i].ctrl = ctrl; 502 if (ret)
472 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); 503 goto out_destroy_admin;
473 if (ret)
474 goto out_free_queues;
475
476 ctrl->queue_count++;
477 }
478 504
479 for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) { 505 for (i = 1; i < ctrl->queue_count; i++) {
480 ret = nvmf_connect_io_queue(&ctrl->ctrl, i); 506 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
481 if (ret) 507 if (ret)
482 goto out_free_queues; 508 goto out_destroy_io;
483 } 509 }
484 510
485 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 511 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
@@ -492,9 +518,9 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
492 518
493 return; 519 return;
494 520
495out_free_queues: 521out_destroy_io:
496 for (i = 1; i < ctrl->queue_count; i++) 522 nvme_loop_destroy_io_queues(ctrl);
497 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); 523out_destroy_admin:
498 nvme_loop_destroy_admin_queue(ctrl); 524 nvme_loop_destroy_admin_queue(ctrl);
499out_disable: 525out_disable:
500 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); 526 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
@@ -533,25 +559,12 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
533 559
534static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) 560static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
535{ 561{
536 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
537 int ret, i; 562 int ret, i;
538 563
539 ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues); 564 ret = nvme_loop_init_io_queues(ctrl);
540 if (ret || !opts->nr_io_queues) 565 if (ret)
541 return ret; 566 return ret;
542 567
543 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
544 opts->nr_io_queues);
545
546 for (i = 1; i <= opts->nr_io_queues; i++) {
547 ctrl->queues[i].ctrl = ctrl;
548 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
549 if (ret)
550 goto out_destroy_queues;
551
552 ctrl->queue_count++;
553 }
554
555 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); 568 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
556 ctrl->tag_set.ops = &nvme_loop_mq_ops; 569 ctrl->tag_set.ops = &nvme_loop_mq_ops;
557 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; 570 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
@@ -575,7 +588,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
575 goto out_free_tagset; 588 goto out_free_tagset;
576 } 589 }
577 590
578 for (i = 1; i <= opts->nr_io_queues; i++) { 591 for (i = 1; i < ctrl->queue_count; i++) {
579 ret = nvmf_connect_io_queue(&ctrl->ctrl, i); 592 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
580 if (ret) 593 if (ret)
581 goto out_cleanup_connect_q; 594 goto out_cleanup_connect_q;
@@ -588,8 +601,7 @@ out_cleanup_connect_q:
588out_free_tagset: 601out_free_tagset:
589 blk_mq_free_tag_set(&ctrl->tag_set); 602 blk_mq_free_tag_set(&ctrl->tag_set);
590out_destroy_queues: 603out_destroy_queues:
591 for (i = 1; i < ctrl->queue_count; i++) 604 nvme_loop_destroy_io_queues(ctrl);
592 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
593 return ret; 605 return ret;
594} 606}
595 607
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 1370eee0a3c0..f7ff15f17ca9 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -73,6 +73,7 @@ struct nvmet_sq {
73 u16 qid; 73 u16 qid;
74 u16 size; 74 u16 size;
75 struct completion free_done; 75 struct completion free_done;
76 struct completion confirm_done;
76}; 77};
77 78
78/** 79/**
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 9aa1da3778b3..ecc4fe862561 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -703,11 +703,6 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
703{ 703{
704 u16 status; 704 u16 status;
705 705
706 cmd->queue = queue;
707 cmd->n_rdma = 0;
708 cmd->req.port = queue->port;
709
710
711 ib_dma_sync_single_for_cpu(queue->dev->device, 706 ib_dma_sync_single_for_cpu(queue->dev->device,
712 cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, 707 cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
713 DMA_FROM_DEVICE); 708 DMA_FROM_DEVICE);
@@ -760,9 +755,12 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
760 755
761 cmd->queue = queue; 756 cmd->queue = queue;
762 rsp = nvmet_rdma_get_rsp(queue); 757 rsp = nvmet_rdma_get_rsp(queue);
758 rsp->queue = queue;
763 rsp->cmd = cmd; 759 rsp->cmd = cmd;
764 rsp->flags = 0; 760 rsp->flags = 0;
765 rsp->req.cmd = cmd->nvme_cmd; 761 rsp->req.cmd = cmd->nvme_cmd;
762 rsp->req.port = queue->port;
763 rsp->n_rdma = 0;
766 764
767 if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { 765 if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
768 unsigned long flags; 766 unsigned long flags;
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index bc090daa850a..5dc53d420ca8 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -939,8 +939,10 @@ parport_register_dev_model(struct parport *port, const char *name,
939 * pardevice fields. -arca 939 * pardevice fields. -arca
940 */ 940 */
941 port->ops->init_state(par_dev, par_dev->state); 941 port->ops->init_state(par_dev, par_dev->state);
942 port->proc_device = par_dev; 942 if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
943 parport_device_proc_register(par_dev); 943 port->proc_device = par_dev;
944 parport_device_proc_register(par_dev);
945 }
944 946
945 return par_dev; 947 return par_dev;
946 948
diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c
index 52b5bdccf5f0..b89c373555c5 100644
--- a/drivers/pci/host/pci-thunder-pem.c
+++ b/drivers/pci/host/pci-thunder-pem.c
@@ -14,6 +14,7 @@
14 * Copyright (C) 2015 - 2016 Cavium, Inc. 14 * Copyright (C) 2015 - 2016 Cavium, Inc.
15 */ 15 */
16 16
17#include <linux/bitfield.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/of_address.h> 20#include <linux/of_address.h>
@@ -334,6 +335,50 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
334 335
335#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) 336#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
336 337
338#define PEM_RES_BASE 0x87e0c0000000UL
339#define PEM_NODE_MASK GENMASK(45, 44)
340#define PEM_INDX_MASK GENMASK(26, 24)
341#define PEM_MIN_DOM_IN_NODE 4
342#define PEM_MAX_DOM_IN_NODE 10
343
344static void thunder_pem_reserve_range(struct device *dev, int seg,
345 struct resource *r)
346{
347 resource_size_t start = r->start, end = r->end;
348 struct resource *res;
349 const char *regionid;
350
351 regionid = kasprintf(GFP_KERNEL, "PEM RC:%d", seg);
352 if (!regionid)
353 return;
354
355 res = request_mem_region(start, end - start + 1, regionid);
356 if (res)
357 res->flags &= ~IORESOURCE_BUSY;
358 else
359 kfree(regionid);
360
361 dev_info(dev, "%pR %s reserved\n", r,
362 res ? "has been" : "could not be");
363}
364
365static void thunder_pem_legacy_fw(struct acpi_pci_root *root,
366 struct resource *res_pem)
367{
368 int node = acpi_get_node(root->device->handle);
369 int index;
370
371 if (node == NUMA_NO_NODE)
372 node = 0;
373
374 index = root->segment - PEM_MIN_DOM_IN_NODE;
375 index -= node * PEM_MAX_DOM_IN_NODE;
376 res_pem->start = PEM_RES_BASE | FIELD_PREP(PEM_NODE_MASK, node) |
377 FIELD_PREP(PEM_INDX_MASK, index);
378 res_pem->end = res_pem->start + SZ_16M - 1;
379 res_pem->flags = IORESOURCE_MEM;
380}
381
337static int thunder_pem_acpi_init(struct pci_config_window *cfg) 382static int thunder_pem_acpi_init(struct pci_config_window *cfg)
338{ 383{
339 struct device *dev = cfg->parent; 384 struct device *dev = cfg->parent;
@@ -346,10 +391,17 @@ static int thunder_pem_acpi_init(struct pci_config_window *cfg)
346 if (!res_pem) 391 if (!res_pem)
347 return -ENOMEM; 392 return -ENOMEM;
348 393
349 ret = acpi_get_rc_resources(dev, "THRX0002", root->segment, res_pem); 394 ret = acpi_get_rc_resources(dev, "CAVA02B", root->segment, res_pem);
395
396 /*
397 * If we fail to gather resources it means that we run with old
398 * FW where we need to calculate PEM-specific resources manually.
399 */
350 if (ret) { 400 if (ret) {
351 dev_err(dev, "can't get rc base address\n"); 401 thunder_pem_legacy_fw(root, res_pem);
352 return ret; 402 /* Reserve PEM-specific resources and PCI configuration space */
403 thunder_pem_reserve_range(dev, root->segment, res_pem);
404 thunder_pem_reserve_range(dev, root->segment, &cfg->res);
353 } 405 }
354 406
355 return thunder_pem_init(dev, cfg, res_pem); 407 return thunder_pem_init(dev, cfg, res_pem);
diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c
index bd4c9ec25edc..384c27e664fe 100644
--- a/drivers/pci/host/pcie-iproc-bcma.c
+++ b/drivers/pci/host/pcie-iproc-bcma.c
@@ -44,8 +44,7 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
44{ 44{
45 struct device *dev = &bdev->dev; 45 struct device *dev = &bdev->dev;
46 struct iproc_pcie *pcie; 46 struct iproc_pcie *pcie;
47 LIST_HEAD(res); 47 LIST_HEAD(resources);
48 struct resource res_mem;
49 int ret; 48 int ret;
50 49
51 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 50 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
@@ -63,22 +62,23 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
63 62
64 pcie->base_addr = bdev->addr; 63 pcie->base_addr = bdev->addr;
65 64
66 res_mem.start = bdev->addr_s[0]; 65 pcie->mem.start = bdev->addr_s[0];
67 res_mem.end = bdev->addr_s[0] + SZ_128M - 1; 66 pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1;
68 res_mem.name = "PCIe MEM space"; 67 pcie->mem.name = "PCIe MEM space";
69 res_mem.flags = IORESOURCE_MEM; 68 pcie->mem.flags = IORESOURCE_MEM;
70 pci_add_resource(&res, &res_mem); 69 pci_add_resource(&resources, &pcie->mem);
71 70
72 pcie->map_irq = iproc_pcie_bcma_map_irq; 71 pcie->map_irq = iproc_pcie_bcma_map_irq;
73 72
74 ret = iproc_pcie_setup(pcie, &res); 73 ret = iproc_pcie_setup(pcie, &resources);
75 if (ret) 74 if (ret) {
76 dev_err(dev, "PCIe controller setup failed\n"); 75 dev_err(dev, "PCIe controller setup failed\n");
77 76 pci_free_resource_list(&resources);
78 pci_free_resource_list(&res); 77 return ret;
78 }
79 79
80 bcma_set_drvdata(bdev, pcie); 80 bcma_set_drvdata(bdev, pcie);
81 return ret; 81 return 0;
82} 82}
83 83
84static void iproc_pcie_bcma_remove(struct bcma_device *bdev) 84static void iproc_pcie_bcma_remove(struct bcma_device *bdev)
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index f4909bb0b2ad..8c6a327ca6cd 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -51,7 +51,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
51 struct device_node *np = dev->of_node; 51 struct device_node *np = dev->of_node;
52 struct resource reg; 52 struct resource reg;
53 resource_size_t iobase = 0; 53 resource_size_t iobase = 0;
54 LIST_HEAD(res); 54 LIST_HEAD(resources);
55 int ret; 55 int ret;
56 56
57 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 57 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
@@ -96,10 +96,10 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
96 pcie->phy = NULL; 96 pcie->phy = NULL;
97 } 97 }
98 98
99 ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &iobase); 99 ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &resources,
100 &iobase);
100 if (ret) { 101 if (ret) {
101 dev_err(dev, 102 dev_err(dev, "unable to get PCI host bridge resources\n");
102 "unable to get PCI host bridge resources\n");
103 return ret; 103 return ret;
104 } 104 }
105 105
@@ -112,14 +112,15 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
112 pcie->map_irq = of_irq_parse_and_map_pci; 112 pcie->map_irq = of_irq_parse_and_map_pci;
113 } 113 }
114 114
115 ret = iproc_pcie_setup(pcie, &res); 115 ret = iproc_pcie_setup(pcie, &resources);
116 if (ret) 116 if (ret) {
117 dev_err(dev, "PCIe controller setup failed\n"); 117 dev_err(dev, "PCIe controller setup failed\n");
118 118 pci_free_resource_list(&resources);
119 pci_free_resource_list(&res); 119 return ret;
120 }
120 121
121 platform_set_drvdata(pdev, pcie); 122 platform_set_drvdata(pdev, pcie);
122 return ret; 123 return 0;
123} 124}
124 125
125static int iproc_pcie_pltfm_remove(struct platform_device *pdev) 126static int iproc_pcie_pltfm_remove(struct platform_device *pdev)
diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h
index 04fed8e907f1..0bbe2ea44f3e 100644
--- a/drivers/pci/host/pcie-iproc.h
+++ b/drivers/pci/host/pcie-iproc.h
@@ -90,6 +90,7 @@ struct iproc_pcie {
90#ifdef CONFIG_ARM 90#ifdef CONFIG_ARM
91 struct pci_sys_data sysdata; 91 struct pci_sys_data sysdata;
92#endif 92#endif
93 struct resource mem;
93 struct pci_bus *root_bus; 94 struct pci_bus *root_bus;
94 struct phy *phy; 95 struct phy *phy;
95 int (*map_irq)(const struct pci_dev *, u8, u8); 96 int (*map_irq)(const struct pci_dev *, u8, u8);
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index dc5277ad1b5a..005cadb7a3f8 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -449,6 +449,7 @@ config PHY_QCOM_UFS
449config PHY_QCOM_USB_HS 449config PHY_QCOM_USB_HS
450 tristate "Qualcomm USB HS PHY module" 450 tristate "Qualcomm USB HS PHY module"
451 depends on USB_ULPI_BUS 451 depends on USB_ULPI_BUS
452 depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in
452 select GENERIC_PHY 453 select GENERIC_PHY
453 help 454 help
454 Support for the USB high-speed ULPI compliant phy on Qualcomm 455 Support for the USB high-speed ULPI compliant phy on Qualcomm
@@ -510,12 +511,4 @@ config PHY_MESON8B_USB2
510 and GXBB SoCs. 511 and GXBB SoCs.
511 If unsure, say N. 512 If unsure, say N.
512 513
513config PHY_NSP_USB3
514 tristate "Broadcom NorthStar plus USB3 PHY driver"
515 depends on OF && (ARCH_BCM_NSP || COMPILE_TEST)
516 select GENERIC_PHY
517 default ARCH_BCM_NSP
518 help
519 Enable this to support the Broadcom Northstar plus USB3 PHY.
520 If unsure, say N.
521endmenu 514endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index e7b0feb1e125..dd8f3b5d2918 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -62,4 +62,3 @@ obj-$(CONFIG_PHY_CYGNUS_PCIE) += phy-bcm-cygnus-pcie.o
62obj-$(CONFIG_ARCH_TEGRA) += tegra/ 62obj-$(CONFIG_ARCH_TEGRA) += tegra/
63obj-$(CONFIG_PHY_NS2_PCIE) += phy-bcm-ns2-pcie.o 63obj-$(CONFIG_PHY_NS2_PCIE) += phy-bcm-ns2-pcie.o
64obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o 64obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o
65obj-$(CONFIG_PHY_NSP_USB3) += phy-bcm-nsp-usb3.o
diff --git a/drivers/phy/phy-bcm-nsp-usb3.c b/drivers/phy/phy-bcm-nsp-usb3.c
deleted file mode 100644
index 49024eaa5545..000000000000
--- a/drivers/phy/phy-bcm-nsp-usb3.c
+++ /dev/null
@@ -1,177 +0,0 @@
1/*
2 * Copyright (C) 2016 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation version 2.
7 *
8 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
9 * kind, whether express or implied; without even the implied warranty
10 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/delay.h>
15#include <linux/io.h>
16#include <linux/kernel.h>
17#include <linux/mfd/syscon.h>
18#include <linux/mdio.h>
19#include <linux/module.h>
20#include <linux/of.h>
21#include <linux/of_address.h>
22#include <linux/phy/phy.h>
23#include <linux/regmap.h>
24
25#define NSP_USB3_RST_CTRL_OFFSET 0x3f8
26
27/* mdio reg access */
28#define NSP_USB3_PHY_BASE_ADDR_REG 0x1f
29
30#define NSP_USB3_PHY_PLL30_BLOCK 0x8000
31#define NSP_USB3_PLL_CONTROL 0x01
32#define NSP_USB3_PLLA_CONTROL0 0x0a
33#define NSP_USB3_PLLA_CONTROL1 0x0b
34
35#define NSP_USB3_PHY_TX_PMD_BLOCK 0x8040
36#define NSP_USB3_TX_PMD_CONTROL1 0x01
37
38#define NSP_USB3_PHY_PIPE_BLOCK 0x8060
39#define NSP_USB3_LFPS_CMP 0x02
40#define NSP_USB3_LFPS_DEGLITCH 0x03
41
42struct nsp_usb3_phy {
43 struct regmap *usb3_ctrl;
44 struct phy *phy;
45 struct mdio_device *mdiodev;
46};
47
48static int nsp_usb3_phy_init(struct phy *phy)
49{
50 struct nsp_usb3_phy *iphy = phy_get_drvdata(phy);
51 struct mii_bus *bus = iphy->mdiodev->bus;
52 int addr = iphy->mdiodev->addr;
53 u32 data;
54 int rc;
55
56 rc = regmap_read(iphy->usb3_ctrl, 0, &data);
57 if (rc)
58 return rc;
59 data |= 1;
60 rc = regmap_write(iphy->usb3_ctrl, 0, data);
61 if (rc)
62 return rc;
63
64 rc = regmap_write(iphy->usb3_ctrl, NSP_USB3_RST_CTRL_OFFSET, 1);
65 if (rc)
66 return rc;
67
68 rc = mdiobus_write(bus, addr, NSP_USB3_PHY_BASE_ADDR_REG,
69 NSP_USB3_PHY_PLL30_BLOCK);
70 if (rc)
71 return rc;
72
73 rc = mdiobus_write(bus, addr, NSP_USB3_PLL_CONTROL, 0x1000);
74 if (rc)
75 return rc;
76
77 rc = mdiobus_write(bus, addr, NSP_USB3_PLLA_CONTROL0, 0x6400);
78 if (rc)
79 return rc;
80
81 rc = mdiobus_write(bus, addr, NSP_USB3_PLLA_CONTROL1, 0xc000);
82 if (rc)
83 return rc;
84
85 rc = mdiobus_write(bus, addr, NSP_USB3_PLLA_CONTROL1, 0x8000);
86 if (rc)
87 return rc;
88
89 rc = regmap_write(iphy->usb3_ctrl, NSP_USB3_RST_CTRL_OFFSET, 0);
90 if (rc)
91 return rc;
92
93 rc = mdiobus_write(bus, addr, NSP_USB3_PLL_CONTROL, 0x9000);
94 if (rc)
95 return rc;
96
97 rc = mdiobus_write(bus, addr, NSP_USB3_PHY_BASE_ADDR_REG,
98 NSP_USB3_PHY_PIPE_BLOCK);
99 if (rc)
100 return rc;
101
102 rc = mdiobus_write(bus, addr, NSP_USB3_LFPS_CMP, 0xf30d);
103 if (rc)
104 return rc;
105
106 rc = mdiobus_write(bus, addr, NSP_USB3_LFPS_DEGLITCH, 0x6302);
107 if (rc)
108 return rc;
109
110 rc = mdiobus_write(bus, addr, NSP_USB3_PHY_BASE_ADDR_REG,
111 NSP_USB3_PHY_TX_PMD_BLOCK);
112 if (rc)
113 return rc;
114
115 rc = mdiobus_write(bus, addr, NSP_USB3_TX_PMD_CONTROL1, 0x1003);
116
117 return rc;
118}
119
120static struct phy_ops nsp_usb3_phy_ops = {
121 .init = nsp_usb3_phy_init,
122 .owner = THIS_MODULE,
123};
124
125static int nsp_usb3_phy_probe(struct mdio_device *mdiodev)
126{
127 struct device *dev = &mdiodev->dev;
128 struct phy_provider *provider;
129 struct nsp_usb3_phy *iphy;
130
131 iphy = devm_kzalloc(dev, sizeof(*iphy), GFP_KERNEL);
132 if (!iphy)
133 return -ENOMEM;
134 iphy->mdiodev = mdiodev;
135
136 iphy->usb3_ctrl = syscon_regmap_lookup_by_phandle(dev->of_node,
137 "usb3-ctrl-syscon");
138 if (IS_ERR(iphy->usb3_ctrl))
139 return PTR_ERR(iphy->usb3_ctrl);
140
141 iphy->phy = devm_phy_create(dev, dev->of_node, &nsp_usb3_phy_ops);
142 if (IS_ERR(iphy->phy)) {
143 dev_err(dev, "failed to create PHY\n");
144 return PTR_ERR(iphy->phy);
145 }
146
147 phy_set_drvdata(iphy->phy, iphy);
148
149 provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
150 if (IS_ERR(provider)) {
151 dev_err(dev, "could not register PHY provider\n");
152 return PTR_ERR(provider);
153 }
154
155 return 0;
156}
157
158static const struct of_device_id nsp_usb3_phy_of_match[] = {
159 {.compatible = "brcm,nsp-usb3-phy",},
160 { /* sentinel */ }
161};
162
163static struct mdio_driver nsp_usb3_phy_driver = {
164 .mdiodrv = {
165 .driver = {
166 .name = "nsp-usb3-phy",
167 .of_match_table = nsp_usb3_phy_of_match,
168 },
169 },
170 .probe = nsp_usb3_phy_probe,
171};
172
173mdio_module_driver(nsp_usb3_phy_driver);
174
175MODULE_DESCRIPTION("Broadcom NSP USB3 PHY driver");
176MODULE_LICENSE("GPL v2");
177MODULE_AUTHOR("Yendapally Reddy Dhananjaya Reddy <yendapally.reddy@broadcom.com");
diff --git a/drivers/phy/phy-exynos-pcie.c b/drivers/phy/phy-exynos-pcie.c
index 4f60b83641d5..60baf25d98e2 100644
--- a/drivers/phy/phy-exynos-pcie.c
+++ b/drivers/phy/phy-exynos-pcie.c
@@ -254,8 +254,8 @@ static int exynos_pcie_phy_probe(struct platform_device *pdev)
254 254
255 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 255 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
256 exynos_phy->blk_base = devm_ioremap_resource(dev, res); 256 exynos_phy->blk_base = devm_ioremap_resource(dev, res);
257 if (IS_ERR(exynos_phy->phy_base)) 257 if (IS_ERR(exynos_phy->blk_base))
258 return PTR_ERR(exynos_phy->phy_base); 258 return PTR_ERR(exynos_phy->blk_base);
259 259
260 exynos_phy->drv_data = drv_data; 260 exynos_phy->drv_data = drv_data;
261 261
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 7671424d46cb..31a3a98d067c 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -667,11 +667,11 @@ static const char * const uart_ao_b_groups[] = {
667}; 667};
668 668
669static const char * const i2c_ao_groups[] = { 669static const char * const i2c_ao_groups[] = {
670 "i2c_sdk_ao", "i2c_sda_ao", 670 "i2c_sck_ao", "i2c_sda_ao",
671}; 671};
672 672
673static const char * const i2c_slave_ao_groups[] = { 673static const char * const i2c_slave_ao_groups[] = {
674 "i2c_slave_sdk_ao", "i2c_slave_sda_ao", 674 "i2c_slave_sck_ao", "i2c_slave_sda_ao",
675}; 675};
676 676
677static const char * const remote_input_ao_groups[] = { 677static const char * const remote_input_ao_groups[] = {
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 676efcc032d2..3ae8066bc127 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1285,6 +1285,22 @@ static void st_gpio_irq_unmask(struct irq_data *d)
1285 writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK); 1285 writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK);
1286} 1286}
1287 1287
1288static int st_gpio_irq_request_resources(struct irq_data *d)
1289{
1290 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
1291
1292 st_gpio_direction_input(gc, d->hwirq);
1293
1294 return gpiochip_lock_as_irq(gc, d->hwirq);
1295}
1296
1297static void st_gpio_irq_release_resources(struct irq_data *d)
1298{
1299 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
1300
1301 gpiochip_unlock_as_irq(gc, d->hwirq);
1302}
1303
1288static int st_gpio_irq_set_type(struct irq_data *d, unsigned type) 1304static int st_gpio_irq_set_type(struct irq_data *d, unsigned type)
1289{ 1305{
1290 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 1306 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -1438,12 +1454,14 @@ static struct gpio_chip st_gpio_template = {
1438}; 1454};
1439 1455
1440static struct irq_chip st_gpio_irqchip = { 1456static struct irq_chip st_gpio_irqchip = {
1441 .name = "GPIO", 1457 .name = "GPIO",
1442 .irq_disable = st_gpio_irq_mask, 1458 .irq_request_resources = st_gpio_irq_request_resources,
1443 .irq_mask = st_gpio_irq_mask, 1459 .irq_release_resources = st_gpio_irq_release_resources,
1444 .irq_unmask = st_gpio_irq_unmask, 1460 .irq_disable = st_gpio_irq_mask,
1445 .irq_set_type = st_gpio_irq_set_type, 1461 .irq_mask = st_gpio_irq_mask,
1446 .flags = IRQCHIP_SKIP_SET_WAKE, 1462 .irq_unmask = st_gpio_irq_unmask,
1463 .irq_set_type = st_gpio_irq_set_type,
1464 .flags = IRQCHIP_SKIP_SET_WAKE,
1447}; 1465};
1448 1466
1449static int st_gpiolib_register_bank(struct st_pinctrl *info, 1467static int st_gpiolib_register_bank(struct st_pinctrl *info,
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
index b68ae424cee2..743d1f458205 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
@@ -405,6 +405,36 @@ static const struct msm_pingroup ipq4019_groups[] = {
405 PINGROUP(67, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 405 PINGROUP(67, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
406 PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 406 PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
407 PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), 407 PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
408 PINGROUP(70, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
409 PINGROUP(71, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
410 PINGROUP(72, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
411 PINGROUP(73, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
412 PINGROUP(74, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
413 PINGROUP(75, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
414 PINGROUP(76, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
415 PINGROUP(77, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
416 PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
417 PINGROUP(79, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
418 PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
419 PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
420 PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
421 PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
422 PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
423 PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
424 PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
425 PINGROUP(87, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
426 PINGROUP(88, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
427 PINGROUP(89, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
428 PINGROUP(90, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
429 PINGROUP(91, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
430 PINGROUP(92, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
431 PINGROUP(93, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
432 PINGROUP(94, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
433 PINGROUP(95, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
434 PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
435 PINGROUP(97, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
436 PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
437 PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
408}; 438};
409 439
410static const struct msm_pinctrl_soc_data ipq4019_pinctrl = { 440static const struct msm_pinctrl_soc_data ipq4019_pinctrl = {
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index c978be5eb9eb..273badd92561 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -609,10 +609,6 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
609 609
610 raw_spin_lock_irqsave(&pctrl->lock, flags); 610 raw_spin_lock_irqsave(&pctrl->lock, flags);
611 611
612 val = readl(pctrl->regs + g->intr_status_reg);
613 val &= ~BIT(g->intr_status_bit);
614 writel(val, pctrl->regs + g->intr_status_reg);
615
616 val = readl(pctrl->regs + g->intr_cfg_reg); 612 val = readl(pctrl->regs + g->intr_cfg_reg);
617 val |= BIT(g->intr_enable_bit); 613 val |= BIT(g->intr_enable_bit);
618 writel(val, pctrl->regs + g->intr_cfg_reg); 614 writel(val, pctrl->regs + g->intr_cfg_reg);
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index f9ddba7decc1..d7aa22cff480 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -988,9 +988,16 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
988 988
989 for (i = 0; i < ctrl->nr_ext_resources + 1; i++) { 989 for (i = 0; i < ctrl->nr_ext_resources + 1; i++) {
990 res = platform_get_resource(pdev, IORESOURCE_MEM, i); 990 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
991 virt_base[i] = devm_ioremap_resource(&pdev->dev, res); 991 if (!res) {
992 if (IS_ERR(virt_base[i])) 992 dev_err(&pdev->dev, "failed to get mem%d resource\n", i);
993 return ERR_CAST(virt_base[i]); 993 return ERR_PTR(-EINVAL);
994 }
995 virt_base[i] = devm_ioremap(&pdev->dev, res->start,
996 resource_size(res));
997 if (!virt_base[i]) {
998 dev_err(&pdev->dev, "failed to ioremap %pR\n", res);
999 return ERR_PTR(-EIO);
1000 }
994 } 1001 }
995 1002
996 bank = d->pin_banks; 1003 bank = d->pin_banks;
diff --git a/drivers/pinctrl/ti/Kconfig b/drivers/pinctrl/ti/Kconfig
index 815a88673d38..542077069391 100644
--- a/drivers/pinctrl/ti/Kconfig
+++ b/drivers/pinctrl/ti/Kconfig
@@ -1,6 +1,6 @@
1config PINCTRL_TI_IODELAY 1config PINCTRL_TI_IODELAY
2 tristate "TI IODelay Module pinconf driver" 2 tristate "TI IODelay Module pinconf driver"
3 depends on OF 3 depends on OF && (SOC_DRA7XX || COMPILE_TEST)
4 select GENERIC_PINCTRL_GROUPS 4 select GENERIC_PINCTRL_GROUPS
5 select GENERIC_PINMUX_FUNCTIONS 5 select GENERIC_PINMUX_FUNCTIONS
6 select GENERIC_PINCONF 6 select GENERIC_PINCONF
diff --git a/drivers/ptp/ptp_kvm.c b/drivers/ptp/ptp_kvm.c
index 09b4df74291e..bb865695d7a6 100644
--- a/drivers/ptp/ptp_kvm.c
+++ b/drivers/ptp/ptp_kvm.c
@@ -193,10 +193,7 @@ static int __init ptp_kvm_init(void)
193 193
194 kvm_ptp_clock.ptp_clock = ptp_clock_register(&kvm_ptp_clock.caps, NULL); 194 kvm_ptp_clock.ptp_clock = ptp_clock_register(&kvm_ptp_clock.caps, NULL);
195 195
196 if (IS_ERR(kvm_ptp_clock.ptp_clock)) 196 return PTR_ERR_OR_ZERO(kvm_ptp_clock.ptp_clock);
197 return PTR_ERR(kvm_ptp_clock.ptp_clock);
198
199 return 0;
200} 197}
201 198
202module_init(ptp_kvm_init); 199module_init(ptp_kvm_init);
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 9d19b9a62011..315a4be8dc1e 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -37,8 +37,8 @@
37#include "tsi721.h" 37#include "tsi721.h"
38 38
39#ifdef DEBUG 39#ifdef DEBUG
40u32 dbg_level; 40u32 tsi_dbg_level;
41module_param(dbg_level, uint, S_IWUSR | S_IRUGO); 41module_param_named(dbg_level, tsi_dbg_level, uint, S_IWUSR | S_IRUGO);
42MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); 42MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
43#endif 43#endif
44 44
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 5941437cbdd1..957eadc58150 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -40,11 +40,11 @@ enum {
40}; 40};
41 41
42#ifdef DEBUG 42#ifdef DEBUG
43extern u32 dbg_level; 43extern u32 tsi_dbg_level;
44 44
45#define tsi_debug(level, dev, fmt, arg...) \ 45#define tsi_debug(level, dev, fmt, arg...) \
46 do { \ 46 do { \
47 if (DBG_##level & dbg_level) \ 47 if (DBG_##level & tsi_dbg_level) \
48 dev_dbg(dev, "%s: " fmt "\n", __func__, ##arg); \ 48 dev_dbg(dev, "%s: " fmt "\n", __func__, ##arg); \
49 } while (0) 49 } while (0)
50#else 50#else
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 40f1136f5568..058db724b5a2 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -572,6 +572,12 @@ int pkey_sec2protkey(u16 cardnr, u16 domain,
572 rc = -EIO; 572 rc = -EIO;
573 goto out; 573 goto out;
574 } 574 }
575 if (prepcblk->ccp_rscode != 0) {
576 DEBUG_WARN(
577 "pkey_sec2protkey unwrap secure key warning, card response %d/%d\n",
578 (int) prepcblk->ccp_rtcode,
579 (int) prepcblk->ccp_rscode);
580 }
575 581
576 /* process response cprb param block */ 582 /* process response cprb param block */
577 prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); 583 prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
@@ -761,9 +767,10 @@ out:
761} 767}
762 768
763/* 769/*
764 * Fetch just the mkvp value via query_crypto_facility from adapter. 770 * Fetch the current and old mkvp values via
771 * query_crypto_facility from adapter.
765 */ 772 */
766static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp) 773static int fetch_mkvp(u16 cardnr, u16 domain, u64 mkvp[2])
767{ 774{
768 int rc, found = 0; 775 int rc, found = 0;
769 size_t rlen, vlen; 776 size_t rlen, vlen;
@@ -779,9 +786,10 @@ static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp)
779 rc = query_crypto_facility(cardnr, domain, "STATICSA", 786 rc = query_crypto_facility(cardnr, domain, "STATICSA",
780 rarray, &rlen, varray, &vlen); 787 rarray, &rlen, varray, &vlen);
781 if (rc == 0 && rlen > 8*8 && vlen > 184+8) { 788 if (rc == 0 && rlen > 8*8 && vlen > 184+8) {
782 if (rarray[64] == '2') { 789 if (rarray[8*8] == '2') {
783 /* current master key state is valid */ 790 /* current master key state is valid */
784 *mkvp = *((u64 *)(varray + 184)); 791 mkvp[0] = *((u64 *)(varray + 184));
792 mkvp[1] = *((u64 *)(varray + 172));
785 found = 1; 793 found = 1;
786 } 794 }
787 } 795 }
@@ -796,14 +804,14 @@ struct mkvp_info {
796 struct list_head list; 804 struct list_head list;
797 u16 cardnr; 805 u16 cardnr;
798 u16 domain; 806 u16 domain;
799 u64 mkvp; 807 u64 mkvp[2];
800}; 808};
801 809
802/* a list with mkvp_info entries */ 810/* a list with mkvp_info entries */
803static LIST_HEAD(mkvp_list); 811static LIST_HEAD(mkvp_list);
804static DEFINE_SPINLOCK(mkvp_list_lock); 812static DEFINE_SPINLOCK(mkvp_list_lock);
805 813
806static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp) 814static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 mkvp[2])
807{ 815{
808 int rc = -ENOENT; 816 int rc = -ENOENT;
809 struct mkvp_info *ptr; 817 struct mkvp_info *ptr;
@@ -812,7 +820,7 @@ static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp)
812 list_for_each_entry(ptr, &mkvp_list, list) { 820 list_for_each_entry(ptr, &mkvp_list, list) {
813 if (ptr->cardnr == cardnr && 821 if (ptr->cardnr == cardnr &&
814 ptr->domain == domain) { 822 ptr->domain == domain) {
815 *mkvp = ptr->mkvp; 823 memcpy(mkvp, ptr->mkvp, 2 * sizeof(u64));
816 rc = 0; 824 rc = 0;
817 break; 825 break;
818 } 826 }
@@ -822,7 +830,7 @@ static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp)
822 return rc; 830 return rc;
823} 831}
824 832
825static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp) 833static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp[2])
826{ 834{
827 int found = 0; 835 int found = 0;
828 struct mkvp_info *ptr; 836 struct mkvp_info *ptr;
@@ -831,7 +839,7 @@ static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp)
831 list_for_each_entry(ptr, &mkvp_list, list) { 839 list_for_each_entry(ptr, &mkvp_list, list) {
832 if (ptr->cardnr == cardnr && 840 if (ptr->cardnr == cardnr &&
833 ptr->domain == domain) { 841 ptr->domain == domain) {
834 ptr->mkvp = mkvp; 842 memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64));
835 found = 1; 843 found = 1;
836 break; 844 break;
837 } 845 }
@@ -844,7 +852,7 @@ static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp)
844 } 852 }
845 ptr->cardnr = cardnr; 853 ptr->cardnr = cardnr;
846 ptr->domain = domain; 854 ptr->domain = domain;
847 ptr->mkvp = mkvp; 855 memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64));
848 list_add(&ptr->list, &mkvp_list); 856 list_add(&ptr->list, &mkvp_list);
849 } 857 }
850 spin_unlock_bh(&mkvp_list_lock); 858 spin_unlock_bh(&mkvp_list_lock);
@@ -888,8 +896,8 @@ int pkey_findcard(const struct pkey_seckey *seckey,
888 struct secaeskeytoken *t = (struct secaeskeytoken *) seckey; 896 struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
889 struct zcrypt_device_matrix *device_matrix; 897 struct zcrypt_device_matrix *device_matrix;
890 u16 card, dom; 898 u16 card, dom;
891 u64 mkvp; 899 u64 mkvp[2];
892 int i, rc; 900 int i, rc, oi = -1;
893 901
894 /* mkvp must not be zero */ 902 /* mkvp must not be zero */
895 if (t->mkvp == 0) 903 if (t->mkvp == 0)
@@ -910,14 +918,14 @@ int pkey_findcard(const struct pkey_seckey *seckey,
910 device_matrix->device[i].functions & 0x04) { 918 device_matrix->device[i].functions & 0x04) {
911 /* an enabled CCA Coprocessor card */ 919 /* an enabled CCA Coprocessor card */
912 /* try cached mkvp */ 920 /* try cached mkvp */
913 if (mkvp_cache_fetch(card, dom, &mkvp) == 0 && 921 if (mkvp_cache_fetch(card, dom, mkvp) == 0 &&
914 t->mkvp == mkvp) { 922 t->mkvp == mkvp[0]) {
915 if (!verify) 923 if (!verify)
916 break; 924 break;
917 /* verify: fetch mkvp from adapter */ 925 /* verify: fetch mkvp from adapter */
918 if (fetch_mkvp(card, dom, &mkvp) == 0) { 926 if (fetch_mkvp(card, dom, mkvp) == 0) {
919 mkvp_cache_update(card, dom, mkvp); 927 mkvp_cache_update(card, dom, mkvp);
920 if (t->mkvp == mkvp) 928 if (t->mkvp == mkvp[0])
921 break; 929 break;
922 } 930 }
923 } 931 }
@@ -936,14 +944,21 @@ int pkey_findcard(const struct pkey_seckey *seckey,
936 card = AP_QID_CARD(device_matrix->device[i].qid); 944 card = AP_QID_CARD(device_matrix->device[i].qid);
937 dom = AP_QID_QUEUE(device_matrix->device[i].qid); 945 dom = AP_QID_QUEUE(device_matrix->device[i].qid);
938 /* fresh fetch mkvp from adapter */ 946 /* fresh fetch mkvp from adapter */
939 if (fetch_mkvp(card, dom, &mkvp) == 0) { 947 if (fetch_mkvp(card, dom, mkvp) == 0) {
940 mkvp_cache_update(card, dom, mkvp); 948 mkvp_cache_update(card, dom, mkvp);
941 if (t->mkvp == mkvp) 949 if (t->mkvp == mkvp[0])
942 break; 950 break;
951 if (t->mkvp == mkvp[1] && oi < 0)
952 oi = i;
943 } 953 }
944 } 954 }
955 if (i >= MAX_ZDEV_ENTRIES && oi >= 0) {
956 /* old mkvp matched, use this card then */
957 card = AP_QID_CARD(device_matrix->device[oi].qid);
958 dom = AP_QID_QUEUE(device_matrix->device[oi].qid);
959 }
945 } 960 }
946 if (i < MAX_ZDEV_ENTRIES) { 961 if (i < MAX_ZDEV_ENTRIES || oi >= 0) {
947 if (pcardnr) 962 if (pcardnr)
948 *pcardnr = card; 963 *pcardnr = card;
949 if (pdomain) 964 if (pdomain)
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index e7addea8741b..d9561e39c3b2 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -961,7 +961,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
961int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role); 961int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
962int qeth_bridgeport_an_set(struct qeth_card *card, int enable); 962int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
963int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); 963int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
964int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int); 964int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb,
965 int extra_elems, int data_offset);
965int qeth_get_elements_for_frags(struct sk_buff *); 966int qeth_get_elements_for_frags(struct sk_buff *);
966int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, 967int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
967 struct sk_buff *, struct qeth_hdr *, int, int, int); 968 struct sk_buff *, struct qeth_hdr *, int, int, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 315d8a2db7c0..9a5f99ccb122 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3837,6 +3837,7 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
3837 * @card: qeth card structure, to check max. elems. 3837 * @card: qeth card structure, to check max. elems.
3838 * @skb: SKB address 3838 * @skb: SKB address
3839 * @extra_elems: extra elems needed, to check against max. 3839 * @extra_elems: extra elems needed, to check against max.
3840 * @data_offset: range starts at skb->data + data_offset
3840 * 3841 *
3841 * Returns the number of pages, and thus QDIO buffer elements, needed to cover 3842 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3842 * skb data, including linear part and fragments. Checks if the result plus 3843 * skb data, including linear part and fragments. Checks if the result plus
@@ -3844,10 +3845,10 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
3844 * Note: extra_elems is not included in the returned result. 3845 * Note: extra_elems is not included in the returned result.
3845 */ 3846 */
3846int qeth_get_elements_no(struct qeth_card *card, 3847int qeth_get_elements_no(struct qeth_card *card,
3847 struct sk_buff *skb, int extra_elems) 3848 struct sk_buff *skb, int extra_elems, int data_offset)
3848{ 3849{
3849 int elements = qeth_get_elements_for_range( 3850 int elements = qeth_get_elements_for_range(
3850 (addr_t)skb->data, 3851 (addr_t)skb->data + data_offset,
3851 (addr_t)skb->data + skb_headlen(skb)) + 3852 (addr_t)skb->data + skb_headlen(skb)) +
3852 qeth_get_elements_for_frags(skb); 3853 qeth_get_elements_for_frags(skb);
3853 3854
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index bea483307618..af4e6a639fec 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -849,7 +849,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
849 * chaining we can not send long frag lists 849 * chaining we can not send long frag lists
850 */ 850 */
851 if ((card->info.type != QETH_CARD_TYPE_IQD) && 851 if ((card->info.type != QETH_CARD_TYPE_IQD) &&
852 !qeth_get_elements_no(card, new_skb, 0)) { 852 !qeth_get_elements_no(card, new_skb, 0, 0)) {
853 int lin_rc = skb_linearize(new_skb); 853 int lin_rc = skb_linearize(new_skb);
854 854
855 if (card->options.performance_stats) { 855 if (card->options.performance_stats) {
@@ -894,7 +894,8 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
894 } 894 }
895 } 895 }
896 896
897 elements = qeth_get_elements_no(card, new_skb, elements_needed); 897 elements = qeth_get_elements_no(card, new_skb, elements_needed,
898 (data_offset > 0) ? data_offset : 0);
898 if (!elements) { 899 if (!elements) {
899 if (data_offset >= 0) 900 if (data_offset >= 0)
900 kmem_cache_free(qeth_core_header_cache, hdr); 901 kmem_cache_free(qeth_core_header_cache, hdr);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 06d0addcc058..653f0fb76573 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2609,17 +2609,13 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
2609 char daddr[16]; 2609 char daddr[16];
2610 struct af_iucv_trans_hdr *iucv_hdr; 2610 struct af_iucv_trans_hdr *iucv_hdr;
2611 2611
2612 skb_pull(skb, 14);
2613 card->dev->header_ops->create(skb, card->dev, 0,
2614 card->dev->dev_addr, card->dev->dev_addr,
2615 card->dev->addr_len);
2616 skb_pull(skb, 14);
2617 iucv_hdr = (struct af_iucv_trans_hdr *)skb->data;
2618 memset(hdr, 0, sizeof(struct qeth_hdr)); 2612 memset(hdr, 0, sizeof(struct qeth_hdr));
2619 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; 2613 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
2620 hdr->hdr.l3.ext_flags = 0; 2614 hdr->hdr.l3.ext_flags = 0;
2621 hdr->hdr.l3.length = skb->len; 2615 hdr->hdr.l3.length = skb->len - ETH_HLEN;
2622 hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST; 2616 hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
2617
2618 iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN);
2623 memset(daddr, 0, sizeof(daddr)); 2619 memset(daddr, 0, sizeof(daddr));
2624 daddr[0] = 0xfe; 2620 daddr[0] = 0xfe;
2625 daddr[1] = 0x80; 2621 daddr[1] = 0x80;
@@ -2823,10 +2819,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2823 if ((card->info.type == QETH_CARD_TYPE_IQD) && 2819 if ((card->info.type == QETH_CARD_TYPE_IQD) &&
2824 !skb_is_nonlinear(skb)) { 2820 !skb_is_nonlinear(skb)) {
2825 new_skb = skb; 2821 new_skb = skb;
2826 if (new_skb->protocol == ETH_P_AF_IUCV) 2822 data_offset = ETH_HLEN;
2827 data_offset = 0;
2828 else
2829 data_offset = ETH_HLEN;
2830 hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); 2823 hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
2831 if (!hdr) 2824 if (!hdr)
2832 goto tx_drop; 2825 goto tx_drop;
@@ -2867,7 +2860,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2867 */ 2860 */
2868 if ((card->info.type != QETH_CARD_TYPE_IQD) && 2861 if ((card->info.type != QETH_CARD_TYPE_IQD) &&
2869 ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) || 2862 ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
2870 (!use_tso && !qeth_get_elements_no(card, new_skb, 0)))) { 2863 (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) {
2871 int lin_rc = skb_linearize(new_skb); 2864 int lin_rc = skb_linearize(new_skb);
2872 2865
2873 if (card->options.performance_stats) { 2866 if (card->options.performance_stats) {
@@ -2909,7 +2902,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2909 2902
2910 elements = use_tso ? 2903 elements = use_tso ?
2911 qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) : 2904 qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
2912 qeth_get_elements_no(card, new_skb, hdr_elements); 2905 qeth_get_elements_no(card, new_skb, hdr_elements,
2906 (data_offset > 0) ? data_offset : 0);
2913 if (!elements) { 2907 if (!elements) {
2914 if (data_offset >= 0) 2908 if (data_offset >= 0)
2915 kmem_cache_free(qeth_core_header_cache, hdr); 2909 kmem_cache_free(qeth_core_header_cache, hdr);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index a3ad04293487..c8172f16cf33 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -2056,7 +2056,6 @@ static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
2056{ 2056{
2057 struct hw_fib **hw_fib_p; 2057 struct hw_fib **hw_fib_p;
2058 struct fib **fib_p; 2058 struct fib **fib_p;
2059 int rcode = 1;
2060 2059
2061 hw_fib_p = hw_fib_pool; 2060 hw_fib_p = hw_fib_pool;
2062 fib_p = fib_pool; 2061 fib_p = fib_pool;
@@ -2074,11 +2073,11 @@ static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
2074 } 2073 }
2075 } 2074 }
2076 2075
2076 /*
2077 * Get the actual number of allocated fibs
2078 */
2077 num = hw_fib_p - hw_fib_pool; 2079 num = hw_fib_p - hw_fib_pool;
2078 if (!num) 2080 return num;
2079 rcode = 0;
2080
2081 return rcode;
2082} 2081}
2083 2082
2084static void wakeup_fibctx_threads(struct aac_dev *dev, 2083static void wakeup_fibctx_threads(struct aac_dev *dev,
@@ -2186,7 +2185,6 @@ static void aac_process_events(struct aac_dev *dev)
2186 struct fib *fib; 2185 struct fib *fib;
2187 unsigned long flags; 2186 unsigned long flags;
2188 spinlock_t *t_lock; 2187 spinlock_t *t_lock;
2189 unsigned int rcode;
2190 2188
2191 t_lock = dev->queues->queue[HostNormCmdQueue].lock; 2189 t_lock = dev->queues->queue[HostNormCmdQueue].lock;
2192 spin_lock_irqsave(t_lock, flags); 2190 spin_lock_irqsave(t_lock, flags);
@@ -2269,8 +2267,8 @@ static void aac_process_events(struct aac_dev *dev)
2269 * Fill up fib pointer pools with actual fibs 2267 * Fill up fib pointer pools with actual fibs
2270 * and hw_fibs 2268 * and hw_fibs
2271 */ 2269 */
2272 rcode = fillup_pools(dev, hw_fib_pool, fib_pool, num); 2270 num = fillup_pools(dev, hw_fib_pool, fib_pool, num);
2273 if (!rcode) 2271 if (!num)
2274 goto free_mem; 2272 goto free_mem;
2275 2273
2276 /* 2274 /*
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 48e200102221..c01b47e5b55a 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -113,7 +113,7 @@ struct alua_queue_data {
113#define ALUA_POLICY_SWITCH_ALL 1 113#define ALUA_POLICY_SWITCH_ALL 1
114 114
115static void alua_rtpg_work(struct work_struct *work); 115static void alua_rtpg_work(struct work_struct *work);
116static void alua_rtpg_queue(struct alua_port_group *pg, 116static bool alua_rtpg_queue(struct alua_port_group *pg,
117 struct scsi_device *sdev, 117 struct scsi_device *sdev,
118 struct alua_queue_data *qdata, bool force); 118 struct alua_queue_data *qdata, bool force);
119static void alua_check(struct scsi_device *sdev, bool force); 119static void alua_check(struct scsi_device *sdev, bool force);
@@ -862,7 +862,13 @@ static void alua_rtpg_work(struct work_struct *work)
862 kref_put(&pg->kref, release_port_group); 862 kref_put(&pg->kref, release_port_group);
863} 863}
864 864
865static void alua_rtpg_queue(struct alua_port_group *pg, 865/**
866 * alua_rtpg_queue() - cause RTPG to be submitted asynchronously
867 *
868 * Returns true if and only if alua_rtpg_work() will be called asynchronously.
869 * That function is responsible for calling @qdata->fn().
870 */
871static bool alua_rtpg_queue(struct alua_port_group *pg,
866 struct scsi_device *sdev, 872 struct scsi_device *sdev,
867 struct alua_queue_data *qdata, bool force) 873 struct alua_queue_data *qdata, bool force)
868{ 874{
@@ -870,8 +876,8 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
870 unsigned long flags; 876 unsigned long flags;
871 struct workqueue_struct *alua_wq = kaluad_wq; 877 struct workqueue_struct *alua_wq = kaluad_wq;
872 878
873 if (!pg) 879 if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev))
874 return; 880 return false;
875 881
876 spin_lock_irqsave(&pg->lock, flags); 882 spin_lock_irqsave(&pg->lock, flags);
877 if (qdata) { 883 if (qdata) {
@@ -884,14 +890,12 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
884 pg->flags |= ALUA_PG_RUN_RTPG; 890 pg->flags |= ALUA_PG_RUN_RTPG;
885 kref_get(&pg->kref); 891 kref_get(&pg->kref);
886 pg->rtpg_sdev = sdev; 892 pg->rtpg_sdev = sdev;
887 scsi_device_get(sdev);
888 start_queue = 1; 893 start_queue = 1;
889 } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) { 894 } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) {
890 pg->flags |= ALUA_PG_RUN_RTPG; 895 pg->flags |= ALUA_PG_RUN_RTPG;
891 /* Do not queue if the worker is already running */ 896 /* Do not queue if the worker is already running */
892 if (!(pg->flags & ALUA_PG_RUNNING)) { 897 if (!(pg->flags & ALUA_PG_RUNNING)) {
893 kref_get(&pg->kref); 898 kref_get(&pg->kref);
894 sdev = NULL;
895 start_queue = 1; 899 start_queue = 1;
896 } 900 }
897 } 901 }
@@ -900,13 +904,17 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
900 alua_wq = kaluad_sync_wq; 904 alua_wq = kaluad_sync_wq;
901 spin_unlock_irqrestore(&pg->lock, flags); 905 spin_unlock_irqrestore(&pg->lock, flags);
902 906
903 if (start_queue && 907 if (start_queue) {
904 !queue_delayed_work(alua_wq, &pg->rtpg_work, 908 if (queue_delayed_work(alua_wq, &pg->rtpg_work,
905 msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) { 909 msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS)))
906 if (sdev) 910 sdev = NULL;
907 scsi_device_put(sdev); 911 else
908 kref_put(&pg->kref, release_port_group); 912 kref_put(&pg->kref, release_port_group);
909 } 913 }
914 if (sdev)
915 scsi_device_put(sdev);
916
917 return true;
910} 918}
911 919
912/* 920/*
@@ -1007,11 +1015,13 @@ static int alua_activate(struct scsi_device *sdev,
1007 mutex_unlock(&h->init_mutex); 1015 mutex_unlock(&h->init_mutex);
1008 goto out; 1016 goto out;
1009 } 1017 }
1010 fn = NULL;
1011 rcu_read_unlock(); 1018 rcu_read_unlock();
1012 mutex_unlock(&h->init_mutex); 1019 mutex_unlock(&h->init_mutex);
1013 1020
1014 alua_rtpg_queue(pg, sdev, qdata, true); 1021 if (alua_rtpg_queue(pg, sdev, qdata, true))
1022 fn = NULL;
1023 else
1024 err = SCSI_DH_DEV_OFFLINED;
1015 kref_put(&pg->kref, release_port_group); 1025 kref_put(&pg->kref, release_port_group);
1016out: 1026out:
1017 if (fn) 1027 if (fn)
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 0d0be7754a65..9d659aaace15 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -3885,6 +3885,7 @@ static int hpsa_update_device_info(struct ctlr_info *h,
3885 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) 3885 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3886 hpsa_get_ioaccel_status(h, scsi3addr, this_device); 3886 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
3887 volume_offline = hpsa_volume_offline(h, scsi3addr); 3887 volume_offline = hpsa_volume_offline(h, scsi3addr);
3888 this_device->volume_offline = volume_offline;
3888 if (volume_offline == HPSA_LV_FAILED) { 3889 if (volume_offline == HPSA_LV_FAILED) {
3889 rc = HPSA_LV_FAILED; 3890 rc = HPSA_LV_FAILED;
3890 dev_err(&h->pdev->dev, 3891 dev_err(&h->pdev->dev,
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 763f012fdeca..87f5e694dbed 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -221,7 +221,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
221 task->num_scatter = qc->n_elem; 221 task->num_scatter = qc->n_elem;
222 } else { 222 } else {
223 for_each_sg(qc->sg, sg, qc->n_elem, si) 223 for_each_sg(qc->sg, sg, qc->n_elem, si)
224 xfer += sg->length; 224 xfer += sg_dma_len(sg);
225 225
226 task->total_xfer_len = xfer; 226 task->total_xfer_len = xfer;
227 task->num_scatter = si; 227 task->num_scatter = si;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index c05f56c3023f..7b7d314af0e0 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -44,14 +44,6 @@
44/* hbqinfo output buffer size */ 44/* hbqinfo output buffer size */
45#define LPFC_HBQINFO_SIZE 8192 45#define LPFC_HBQINFO_SIZE 8192
46 46
47enum {
48 DUMP_FCP,
49 DUMP_NVME,
50 DUMP_MBX,
51 DUMP_ELS,
52 DUMP_NVMELS,
53};
54
55/* nvmestat output buffer size */ 47/* nvmestat output buffer size */
56#define LPFC_NVMESTAT_SIZE 8192 48#define LPFC_NVMESTAT_SIZE 8192
57#define LPFC_NVMEKTIME_SIZE 8192 49#define LPFC_NVMEKTIME_SIZE 8192
@@ -283,8 +275,22 @@ struct lpfc_idiag {
283 struct lpfc_idiag_offset offset; 275 struct lpfc_idiag_offset offset;
284 void *ptr_private; 276 void *ptr_private;
285}; 277};
278
279#else
280
281#define lpfc_nvmeio_data(phba, fmt, arg...) \
282 no_printk(fmt, ##arg)
283
286#endif 284#endif
287 285
286enum {
287 DUMP_FCP,
288 DUMP_NVME,
289 DUMP_MBX,
290 DUMP_ELS,
291 DUMP_NVMELS,
292};
293
288/* Mask for discovery_trace */ 294/* Mask for discovery_trace */
289#define LPFC_DISC_TRC_ELS_CMD 0x1 /* Trace ELS commands */ 295#define LPFC_DISC_TRC_ELS_CMD 0x1 /* Trace ELS commands */
290#define LPFC_DISC_TRC_ELS_RSP 0x2 /* Trace ELS response */ 296#define LPFC_DISC_TRC_ELS_RSP 0x2 /* Trace ELS response */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index d9c61d030034..a5ca37e45fb6 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -7968,7 +7968,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7968 did, vport->port_state, ndlp->nlp_flag); 7968 did, vport->port_state, ndlp->nlp_flag);
7969 7969
7970 phba->fc_stat.elsRcvPRLI++; 7970 phba->fc_stat.elsRcvPRLI++;
7971 if (vport->port_state < LPFC_DISC_AUTH) { 7971 if ((vport->port_state < LPFC_DISC_AUTH) &&
7972 (vport->fc_flag & FC_FABRIC)) {
7972 rjt_err = LSRJT_UNABLE_TPC; 7973 rjt_err = LSRJT_UNABLE_TPC;
7973 rjt_exp = LSEXP_NOTHING_MORE; 7974 rjt_exp = LSEXP_NOTHING_MORE;
7974 break; 7975 break;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 7ca868f394da..acba1b67e505 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -520,7 +520,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
520 struct lpfc_hba *phba = ctxp->phba; 520 struct lpfc_hba *phba = ctxp->phba;
521 struct lpfc_iocbq *nvmewqeq; 521 struct lpfc_iocbq *nvmewqeq;
522 unsigned long iflags; 522 unsigned long iflags;
523 int rc, id; 523 int rc;
524 524
525#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 525#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
526 if (phba->ktime_on) { 526 if (phba->ktime_on) {
@@ -530,7 +530,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
530 ctxp->ts_nvme_data = ktime_get_ns(); 530 ctxp->ts_nvme_data = ktime_get_ns();
531 } 531 }
532 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { 532 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
533 id = smp_processor_id(); 533 int id = smp_processor_id();
534 ctxp->cpu = id; 534 ctxp->cpu = id;
535 if (id < LPFC_CHECK_CPU_CNT) 535 if (id < LPFC_CHECK_CPU_CNT)
536 phba->cpucheck_xmt_io[id]++; 536 phba->cpucheck_xmt_io[id]++;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 8e3d92807cb8..92775a8b74b1 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -2007,6 +2007,7 @@ static void qedi_remove(struct pci_dev *pdev)
2007 2007
2008static struct pci_device_id qedi_pci_tbl[] = { 2008static struct pci_device_id qedi_pci_tbl[] = {
2009 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) }, 2009 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) },
2010 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8084) },
2010 { 0 }, 2011 { 0 },
2011}; 2012};
2012MODULE_DEVICE_TABLE(pci, qedi_pci_tbl); 2013MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 41d5b09f7326..3e7011757c82 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1651,7 +1651,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1651 /* Don't abort commands in adapter during EEH 1651 /* Don't abort commands in adapter during EEH
1652 * recovery as it's not accessible/responding. 1652 * recovery as it's not accessible/responding.
1653 */ 1653 */
1654 if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) { 1654 if (GET_CMD_SP(sp) && !ha->flags.eeh_busy &&
1655 (sp->type == SRB_SCSI_CMD)) {
1655 /* Get a reference to the sp and drop the lock. 1656 /* Get a reference to the sp and drop the lock.
1656 * The reference ensures this sp->done() call 1657 * The reference ensures this sp->done() call
1657 * - and not the call in qla2xxx_eh_abort() - 1658 * - and not the call in qla2xxx_eh_abort() -
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 29b86505f796..225abaad4d1c 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -996,6 +996,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
996 result = get_user(val, ip); 996 result = get_user(val, ip);
997 if (result) 997 if (result)
998 return result; 998 return result;
999 if (val > SG_MAX_CDB_SIZE)
1000 return -ENOMEM;
999 sfp->next_cmd_len = (val > 0) ? val : 0; 1001 sfp->next_cmd_len = (val > 0) ? val : 0;
1000 return 0; 1002 return 0;
1001 case SG_GET_VERSION_NUM: 1003 case SG_GET_VERSION_NUM:
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index a72a4ba78125..8e5e6c04c035 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -309,8 +309,8 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
309 309
310 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 310 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
311 mmio_base = devm_ioremap_resource(dev, mem_res); 311 mmio_base = devm_ioremap_resource(dev, mem_res);
312 if (IS_ERR(*(void **)&mmio_base)) { 312 if (IS_ERR(mmio_base)) {
313 err = PTR_ERR(*(void **)&mmio_base); 313 err = PTR_ERR(mmio_base);
314 goto out; 314 goto out;
315 } 315 }
316 316
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index e8c26e6e6237..096e95b911bd 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -4662,8 +4662,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
4662 } 4662 }
4663 if (ufshcd_is_clkscaling_supported(hba)) 4663 if (ufshcd_is_clkscaling_supported(hba))
4664 hba->clk_scaling.active_reqs--; 4664 hba->clk_scaling.active_reqs--;
4665 if (ufshcd_is_clkscaling_supported(hba))
4666 hba->clk_scaling.active_reqs--;
4667 } 4665 }
4668 4666
4669 /* clear corresponding bits of completed commands */ 4667 /* clear corresponding bits of completed commands */
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 91048eeca28b..69d0f430b2d1 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -107,8 +107,6 @@ struct cpufreq_cooling_device {
107}; 107};
108static DEFINE_IDA(cpufreq_ida); 108static DEFINE_IDA(cpufreq_ida);
109 109
110static unsigned int cpufreq_dev_count;
111
112static DEFINE_MUTEX(cooling_list_lock); 110static DEFINE_MUTEX(cooling_list_lock);
113static LIST_HEAD(cpufreq_dev_list); 111static LIST_HEAD(cpufreq_dev_list);
114 112
@@ -395,13 +393,20 @@ static int get_static_power(struct cpufreq_cooling_device *cpufreq_device,
395 393
396 opp = dev_pm_opp_find_freq_exact(cpufreq_device->cpu_dev, freq_hz, 394 opp = dev_pm_opp_find_freq_exact(cpufreq_device->cpu_dev, freq_hz,
397 true); 395 true);
396 if (IS_ERR(opp)) {
397 dev_warn_ratelimited(cpufreq_device->cpu_dev,
398 "Failed to find OPP for frequency %lu: %ld\n",
399 freq_hz, PTR_ERR(opp));
400 return -EINVAL;
401 }
402
398 voltage = dev_pm_opp_get_voltage(opp); 403 voltage = dev_pm_opp_get_voltage(opp);
399 dev_pm_opp_put(opp); 404 dev_pm_opp_put(opp);
400 405
401 if (voltage == 0) { 406 if (voltage == 0) {
402 dev_warn_ratelimited(cpufreq_device->cpu_dev, 407 dev_err_ratelimited(cpufreq_device->cpu_dev,
403 "Failed to get voltage for frequency %lu: %ld\n", 408 "Failed to get voltage for frequency %lu\n",
404 freq_hz, IS_ERR(opp) ? PTR_ERR(opp) : 0); 409 freq_hz);
405 return -EINVAL; 410 return -EINVAL;
406 } 411 }
407 412
@@ -693,9 +698,9 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
693 698
694 *state = cpufreq_cooling_get_level(cpu, target_freq); 699 *state = cpufreq_cooling_get_level(cpu, target_freq);
695 if (*state == THERMAL_CSTATE_INVALID) { 700 if (*state == THERMAL_CSTATE_INVALID) {
696 dev_warn_ratelimited(&cdev->device, 701 dev_err_ratelimited(&cdev->device,
697 "Failed to convert %dKHz for cpu %d into a cdev state\n", 702 "Failed to convert %dKHz for cpu %d into a cdev state\n",
698 target_freq, cpu); 703 target_freq, cpu);
699 return -EINVAL; 704 return -EINVAL;
700 } 705 }
701 706
@@ -771,6 +776,7 @@ __cpufreq_cooling_register(struct device_node *np,
771 unsigned int freq, i, num_cpus; 776 unsigned int freq, i, num_cpus;
772 int ret; 777 int ret;
773 struct thermal_cooling_device_ops *cooling_ops; 778 struct thermal_cooling_device_ops *cooling_ops;
779 bool first;
774 780
775 if (!alloc_cpumask_var(&temp_mask, GFP_KERNEL)) 781 if (!alloc_cpumask_var(&temp_mask, GFP_KERNEL))
776 return ERR_PTR(-ENOMEM); 782 return ERR_PTR(-ENOMEM);
@@ -874,13 +880,14 @@ __cpufreq_cooling_register(struct device_node *np,
874 cpufreq_dev->cool_dev = cool_dev; 880 cpufreq_dev->cool_dev = cool_dev;
875 881
876 mutex_lock(&cooling_list_lock); 882 mutex_lock(&cooling_list_lock);
883 /* Register the notifier for first cpufreq cooling device */
884 first = list_empty(&cpufreq_dev_list);
877 list_add(&cpufreq_dev->node, &cpufreq_dev_list); 885 list_add(&cpufreq_dev->node, &cpufreq_dev_list);
886 mutex_unlock(&cooling_list_lock);
878 887
879 /* Register the notifier for first cpufreq cooling device */ 888 if (first)
880 if (!cpufreq_dev_count++)
881 cpufreq_register_notifier(&thermal_cpufreq_notifier_block, 889 cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
882 CPUFREQ_POLICY_NOTIFIER); 890 CPUFREQ_POLICY_NOTIFIER);
883 mutex_unlock(&cooling_list_lock);
884 891
885 goto put_policy; 892 goto put_policy;
886 893
@@ -1021,6 +1028,7 @@ EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
1021void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) 1028void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
1022{ 1029{
1023 struct cpufreq_cooling_device *cpufreq_dev; 1030 struct cpufreq_cooling_device *cpufreq_dev;
1031 bool last;
1024 1032
1025 if (!cdev) 1033 if (!cdev)
1026 return; 1034 return;
@@ -1028,14 +1036,15 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
1028 cpufreq_dev = cdev->devdata; 1036 cpufreq_dev = cdev->devdata;
1029 1037
1030 mutex_lock(&cooling_list_lock); 1038 mutex_lock(&cooling_list_lock);
1039 list_del(&cpufreq_dev->node);
1031 /* Unregister the notifier for the last cpufreq cooling device */ 1040 /* Unregister the notifier for the last cpufreq cooling device */
1032 if (!--cpufreq_dev_count) 1041 last = list_empty(&cpufreq_dev_list);
1042 mutex_unlock(&cooling_list_lock);
1043
1044 if (last)
1033 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block, 1045 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
1034 CPUFREQ_POLICY_NOTIFIER); 1046 CPUFREQ_POLICY_NOTIFIER);
1035 1047
1036 list_del(&cpufreq_dev->node);
1037 mutex_unlock(&cooling_list_lock);
1038
1039 thermal_cooling_device_unregister(cpufreq_dev->cool_dev); 1048 thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
1040 ida_simple_remove(&cpufreq_ida, cpufreq_dev->id); 1049 ida_simple_remove(&cpufreq_ida, cpufreq_dev->id);
1041 kfree(cpufreq_dev->dyn_power_table); 1050 kfree(cpufreq_dev->dyn_power_table);
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index 7743a78d4723..4bf4ad58cffd 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -186,16 +186,22 @@ get_static_power(struct devfreq_cooling_device *dfc, unsigned long freq)
186 return 0; 186 return 0;
187 187
188 opp = dev_pm_opp_find_freq_exact(dev, freq, true); 188 opp = dev_pm_opp_find_freq_exact(dev, freq, true);
189 if (IS_ERR(opp) && (PTR_ERR(opp) == -ERANGE)) 189 if (PTR_ERR(opp) == -ERANGE)
190 opp = dev_pm_opp_find_freq_exact(dev, freq, false); 190 opp = dev_pm_opp_find_freq_exact(dev, freq, false);
191 191
192 if (IS_ERR(opp)) {
193 dev_err_ratelimited(dev, "Failed to find OPP for frequency %lu: %ld\n",
194 freq, PTR_ERR(opp));
195 return 0;
196 }
197
192 voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */ 198 voltage = dev_pm_opp_get_voltage(opp) / 1000; /* mV */
193 dev_pm_opp_put(opp); 199 dev_pm_opp_put(opp);
194 200
195 if (voltage == 0) { 201 if (voltage == 0) {
196 dev_warn_ratelimited(dev, 202 dev_err_ratelimited(dev,
197 "Failed to get voltage for frequency %lu: %ld\n", 203 "Failed to get voltage for frequency %lu\n",
198 freq, IS_ERR(opp) ? PTR_ERR(opp) : 0); 204 freq);
199 return 0; 205 return 0;
200 } 206 }
201 207
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 6ee55a2d47bb..e65808c482f1 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -257,7 +257,7 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
257{ 257{
258 unsigned int baud = tty_termios_baud_rate(termios); 258 unsigned int baud = tty_termios_baud_rate(termios);
259 struct dw8250_data *d = p->private_data; 259 struct dw8250_data *d = p->private_data;
260 unsigned int rate; 260 long rate;
261 int ret; 261 int ret;
262 262
263 if (IS_ERR(d->clk) || !old) 263 if (IS_ERR(d->clk) || !old)
@@ -265,7 +265,12 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
265 265
266 clk_disable_unprepare(d->clk); 266 clk_disable_unprepare(d->clk);
267 rate = clk_round_rate(d->clk, baud * 16); 267 rate = clk_round_rate(d->clk, baud * 16);
268 ret = clk_set_rate(d->clk, rate); 268 if (rate < 0)
269 ret = rate;
270 else if (rate == 0)
271 ret = -ENOENT;
272 else
273 ret = clk_set_rate(d->clk, rate);
269 clk_prepare_enable(d->clk); 274 clk_prepare_enable(d->clk);
270 275
271 if (!ret) 276 if (!ret)
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index a65fb8197aec..0e3f529d50e9 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -128,9 +128,13 @@ config SERIAL_8250_PCI
128 by the parport_serial driver, enabled with CONFIG_PARPORT_SERIAL. 128 by the parport_serial driver, enabled with CONFIG_PARPORT_SERIAL.
129 129
130config SERIAL_8250_EXAR 130config SERIAL_8250_EXAR
131 tristate "8250/16550 PCI device support" 131 tristate "8250/16550 Exar/Commtech PCI/PCIe device support"
132 depends on SERIAL_8250_PCI 132 depends on SERIAL_8250_PCI
133 default SERIAL_8250 133 default SERIAL_8250
134 help
135 This builds support for XR17C1xx, XR17V3xx and some Commtech
136 422x PCIe serial cards that are not covered by the more generic
137 SERIAL_8250_PCI option.
134 138
135config SERIAL_8250_HP300 139config SERIAL_8250_HP300
136 tristate 140 tristate
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 8789ea423ccf..b0a377725d63 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2373,7 +2373,7 @@ static int __init pl011_console_match(struct console *co, char *name, int idx,
2373 if (strcmp(name, "qdf2400_e44") == 0) { 2373 if (strcmp(name, "qdf2400_e44") == 0) {
2374 pr_info_once("UART: Working around QDF2400 SoC erratum 44"); 2374 pr_info_once("UART: Working around QDF2400 SoC erratum 44");
2375 qdf2400_e44_present = true; 2375 qdf2400_e44_present = true;
2376 } else if (strcmp(name, "pl011") != 0 || strcmp(name, "ttyAMA") != 0) { 2376 } else if (strcmp(name, "pl011") != 0) {
2377 return -ENODEV; 2377 return -ENODEV;
2378 } 2378 }
2379 2379
@@ -2452,18 +2452,37 @@ static void pl011_early_write(struct console *con, const char *s, unsigned n)
2452 uart_console_write(&dev->port, s, n, pl011_putc); 2452 uart_console_write(&dev->port, s, n, pl011_putc);
2453} 2453}
2454 2454
2455/*
2456 * On non-ACPI systems, earlycon is enabled by specifying
2457 * "earlycon=pl011,<address>" on the kernel command line.
2458 *
2459 * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table,
2460 * by specifying only "earlycon" on the command line. Because it requires
2461 * SPCR, the console starts after ACPI is parsed, which is later than a
2462 * traditional early console.
2463 *
2464 * To get the traditional early console that starts before ACPI is parsed,
2465 * specify the full "earlycon=pl011,<address>" option.
2466 */
2455static int __init pl011_early_console_setup(struct earlycon_device *device, 2467static int __init pl011_early_console_setup(struct earlycon_device *device,
2456 const char *opt) 2468 const char *opt)
2457{ 2469{
2458 if (!device->port.membase) 2470 if (!device->port.membase)
2459 return -ENODEV; 2471 return -ENODEV;
2460 2472
2461 device->con->write = qdf2400_e44_present ? 2473 /* On QDF2400 SOCs affected by Erratum 44, the "qdf2400_e44" must
2462 qdf2400_e44_early_write : pl011_early_write; 2474 * also be specified, e.g. "earlycon=pl011,<address>,qdf2400_e44".
2475 */
2476 if (!strcmp(device->options, "qdf2400_e44"))
2477 device->con->write = qdf2400_e44_early_write;
2478 else
2479 device->con->write = pl011_early_write;
2480
2463 return 0; 2481 return 0;
2464} 2482}
2465OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup); 2483OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
2466OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup); 2484OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
2485EARLYCON_DECLARE(qdf2400_e44, pl011_early_console_setup);
2467 2486
2468#else 2487#else
2469#define AMBA_CONSOLE NULL 2488#define AMBA_CONSOLE NULL
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index dcebb28ffbc4..1f50a83ef958 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1951,6 +1951,11 @@ static void atmel_flush_buffer(struct uart_port *port)
1951 atmel_uart_writel(port, ATMEL_PDC_TCR, 0); 1951 atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
1952 atmel_port->pdc_tx.ofs = 0; 1952 atmel_port->pdc_tx.ofs = 0;
1953 } 1953 }
1954 /*
1955 * in uart_flush_buffer(), the xmit circular buffer has just
1956 * been cleared, so we have to reset tx_len accordingly.
1957 */
1958 atmel_port->tx_len = 0;
1954} 1959}
1955 1960
1956/* 1961/*
@@ -2483,6 +2488,9 @@ static void atmel_console_write(struct console *co, const char *s, u_int count)
2483 pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN; 2488 pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
2484 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 2489 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
2485 2490
2491 /* Make sure that tx path is actually able to send characters */
2492 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
2493
2486 uart_console_write(port, s, count, atmel_console_putchar); 2494 uart_console_write(port, s, count, atmel_console_putchar);
2487 2495
2488 /* 2496 /*
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 6989b227d134..be94246b6fcc 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1088,7 +1088,7 @@ static void mxs_auart_settermios(struct uart_port *u,
1088 AUART_LINECTRL_BAUD_DIV_MAX); 1088 AUART_LINECTRL_BAUD_DIV_MAX);
1089 baud_max = u->uartclk * 32 / AUART_LINECTRL_BAUD_DIV_MIN; 1089 baud_max = u->uartclk * 32 / AUART_LINECTRL_BAUD_DIV_MIN;
1090 baud = uart_get_baud_rate(u, termios, old, baud_min, baud_max); 1090 baud = uart_get_baud_rate(u, termios, old, baud_min, baud_max);
1091 div = u->uartclk * 32 / baud; 1091 div = DIV_ROUND_CLOSEST(u->uartclk * 32, baud);
1092 } 1092 }
1093 1093
1094 ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F); 1094 ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 68947f6de5ad..b0500a0a87b8 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -271,10 +271,13 @@ const struct file_operations tty_ldiscs_proc_fops = {
271 271
272struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty) 272struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
273{ 273{
274 struct tty_ldisc *ld;
275
274 ldsem_down_read(&tty->ldisc_sem, MAX_SCHEDULE_TIMEOUT); 276 ldsem_down_read(&tty->ldisc_sem, MAX_SCHEDULE_TIMEOUT);
275 if (!tty->ldisc) 277 ld = tty->ldisc;
278 if (!ld)
276 ldsem_up_read(&tty->ldisc_sem); 279 ldsem_up_read(&tty->ldisc_sem);
277 return tty->ldisc; 280 return ld;
278} 281}
279EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait); 282EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
280 283
@@ -489,41 +492,6 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
489} 492}
490 493
491/** 494/**
492 * tty_ldisc_restore - helper for tty ldisc change
493 * @tty: tty to recover
494 * @old: previous ldisc
495 *
496 * Restore the previous line discipline or N_TTY when a line discipline
497 * change fails due to an open error
498 */
499
500static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
501{
502 struct tty_ldisc *new_ldisc;
503 int r;
504
505 /* There is an outstanding reference here so this is safe */
506 old = tty_ldisc_get(tty, old->ops->num);
507 WARN_ON(IS_ERR(old));
508 tty->ldisc = old;
509 tty_set_termios_ldisc(tty, old->ops->num);
510 if (tty_ldisc_open(tty, old) < 0) {
511 tty_ldisc_put(old);
512 /* This driver is always present */
513 new_ldisc = tty_ldisc_get(tty, N_TTY);
514 if (IS_ERR(new_ldisc))
515 panic("n_tty: get");
516 tty->ldisc = new_ldisc;
517 tty_set_termios_ldisc(tty, N_TTY);
518 r = tty_ldisc_open(tty, new_ldisc);
519 if (r < 0)
520 panic("Couldn't open N_TTY ldisc for "
521 "%s --- error %d.",
522 tty_name(tty), r);
523 }
524}
525
526/**
527 * tty_set_ldisc - set line discipline 495 * tty_set_ldisc - set line discipline
528 * @tty: the terminal to set 496 * @tty: the terminal to set
529 * @ldisc: the line discipline 497 * @ldisc: the line discipline
@@ -536,12 +504,7 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
536 504
537int tty_set_ldisc(struct tty_struct *tty, int disc) 505int tty_set_ldisc(struct tty_struct *tty, int disc)
538{ 506{
539 int retval; 507 int retval, old_disc;
540 struct tty_ldisc *old_ldisc, *new_ldisc;
541
542 new_ldisc = tty_ldisc_get(tty, disc);
543 if (IS_ERR(new_ldisc))
544 return PTR_ERR(new_ldisc);
545 508
546 tty_lock(tty); 509 tty_lock(tty);
547 retval = tty_ldisc_lock(tty, 5 * HZ); 510 retval = tty_ldisc_lock(tty, 5 * HZ);
@@ -554,7 +517,8 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
554 } 517 }
555 518
556 /* Check the no-op case */ 519 /* Check the no-op case */
557 if (tty->ldisc->ops->num == disc) 520 old_disc = tty->ldisc->ops->num;
521 if (old_disc == disc)
558 goto out; 522 goto out;
559 523
560 if (test_bit(TTY_HUPPED, &tty->flags)) { 524 if (test_bit(TTY_HUPPED, &tty->flags)) {
@@ -563,34 +527,25 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
563 goto out; 527 goto out;
564 } 528 }
565 529
566 old_ldisc = tty->ldisc; 530 retval = tty_ldisc_reinit(tty, disc);
567
568 /* Shutdown the old discipline. */
569 tty_ldisc_close(tty, old_ldisc);
570
571 /* Now set up the new line discipline. */
572 tty->ldisc = new_ldisc;
573 tty_set_termios_ldisc(tty, disc);
574
575 retval = tty_ldisc_open(tty, new_ldisc);
576 if (retval < 0) { 531 if (retval < 0) {
577 /* Back to the old one or N_TTY if we can't */ 532 /* Back to the old one or N_TTY if we can't */
578 tty_ldisc_put(new_ldisc); 533 if (tty_ldisc_reinit(tty, old_disc) < 0) {
579 tty_ldisc_restore(tty, old_ldisc); 534 pr_err("tty: TIOCSETD failed, reinitializing N_TTY\n");
535 if (tty_ldisc_reinit(tty, N_TTY) < 0) {
536 /* At this point we have tty->ldisc == NULL. */
537 pr_err("tty: reinitializing N_TTY failed\n");
538 }
539 }
580 } 540 }
581 541
582 if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc) { 542 if (tty->ldisc && tty->ldisc->ops->num != old_disc &&
543 tty->ops->set_ldisc) {
583 down_read(&tty->termios_rwsem); 544 down_read(&tty->termios_rwsem);
584 tty->ops->set_ldisc(tty); 545 tty->ops->set_ldisc(tty);
585 up_read(&tty->termios_rwsem); 546 up_read(&tty->termios_rwsem);
586 } 547 }
587 548
588 /* At this point we hold a reference to the new ldisc and a
589 reference to the old ldisc, or we hold two references to
590 the old ldisc (if it was restored as part of error cleanup
591 above). In either case, releasing a single reference from
592 the old ldisc is correct. */
593 new_ldisc = old_ldisc;
594out: 549out:
595 tty_ldisc_unlock(tty); 550 tty_ldisc_unlock(tty);
596 551
@@ -598,7 +553,6 @@ out:
598 already running */ 553 already running */
599 tty_buffer_restart_work(tty->port); 554 tty_buffer_restart_work(tty->port);
600err: 555err:
601 tty_ldisc_put(new_ldisc); /* drop the extra reference */
602 tty_unlock(tty); 556 tty_unlock(tty);
603 return retval; 557 return retval;
604} 558}
@@ -659,10 +613,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
659 int retval; 613 int retval;
660 614
661 ld = tty_ldisc_get(tty, disc); 615 ld = tty_ldisc_get(tty, disc);
662 if (IS_ERR(ld)) { 616 if (IS_ERR(ld))
663 BUG_ON(disc == N_TTY);
664 return PTR_ERR(ld); 617 return PTR_ERR(ld);
665 }
666 618
667 if (tty->ldisc) { 619 if (tty->ldisc) {
668 tty_ldisc_close(tty, tty->ldisc); 620 tty_ldisc_close(tty, tty->ldisc);
@@ -674,10 +626,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
674 tty_set_termios_ldisc(tty, disc); 626 tty_set_termios_ldisc(tty, disc);
675 retval = tty_ldisc_open(tty, tty->ldisc); 627 retval = tty_ldisc_open(tty, tty->ldisc);
676 if (retval) { 628 if (retval) {
677 if (!WARN_ON(disc == N_TTY)) { 629 tty_ldisc_put(tty->ldisc);
678 tty_ldisc_put(tty->ldisc); 630 tty->ldisc = NULL;
679 tty->ldisc = NULL;
680 }
681 } 631 }
682 return retval; 632 return retval;
683} 633}
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index c5f0fc906136..8af8d9542663 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -28,7 +28,6 @@
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/sched/signal.h> 29#include <linux/sched/signal.h>
30#include <linux/sched/debug.h> 30#include <linux/sched/debug.h>
31#include <linux/sched/debug.h>
32#include <linux/tty.h> 31#include <linux/tty.h>
33#include <linux/tty_flip.h> 32#include <linux/tty_flip.h>
34#include <linux/mm.h> 33#include <linux/mm.h>
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index f03692ec5520..8fb309a0ff6b 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1381,7 +1381,7 @@ static int usbtmc_probe(struct usb_interface *intf,
1381 1381
1382 dev_dbg(&intf->dev, "%s called\n", __func__); 1382 dev_dbg(&intf->dev, "%s called\n", __func__);
1383 1383
1384 data = kmalloc(sizeof(*data), GFP_KERNEL); 1384 data = kzalloc(sizeof(*data), GFP_KERNEL);
1385 if (!data) 1385 if (!data)
1386 return -ENOMEM; 1386 return -ENOMEM;
1387 1387
@@ -1444,6 +1444,13 @@ static int usbtmc_probe(struct usb_interface *intf,
1444 break; 1444 break;
1445 } 1445 }
1446 } 1446 }
1447
1448 if (!data->bulk_out || !data->bulk_in) {
1449 dev_err(&intf->dev, "bulk endpoints not found\n");
1450 retcode = -ENODEV;
1451 goto err_put;
1452 }
1453
1447 /* Find int endpoint */ 1454 /* Find int endpoint */
1448 for (n = 0; n < iface_desc->desc.bNumEndpoints; n++) { 1455 for (n = 0; n < iface_desc->desc.bNumEndpoints; n++) {
1449 endpoint = &iface_desc->endpoint[n].desc; 1456 endpoint = &iface_desc->endpoint[n].desc;
@@ -1469,8 +1476,10 @@ static int usbtmc_probe(struct usb_interface *intf,
1469 if (data->iin_ep_present) { 1476 if (data->iin_ep_present) {
1470 /* allocate int urb */ 1477 /* allocate int urb */
1471 data->iin_urb = usb_alloc_urb(0, GFP_KERNEL); 1478 data->iin_urb = usb_alloc_urb(0, GFP_KERNEL);
1472 if (!data->iin_urb) 1479 if (!data->iin_urb) {
1480 retcode = -ENOMEM;
1473 goto error_register; 1481 goto error_register;
1482 }
1474 1483
1475 /* Protect interrupt in endpoint data until iin_urb is freed */ 1484 /* Protect interrupt in endpoint data until iin_urb is freed */
1476 kref_get(&data->kref); 1485 kref_get(&data->kref);
@@ -1478,8 +1487,10 @@ static int usbtmc_probe(struct usb_interface *intf,
1478 /* allocate buffer for interrupt in */ 1487 /* allocate buffer for interrupt in */
1479 data->iin_buffer = kmalloc(data->iin_wMaxPacketSize, 1488 data->iin_buffer = kmalloc(data->iin_wMaxPacketSize,
1480 GFP_KERNEL); 1489 GFP_KERNEL);
1481 if (!data->iin_buffer) 1490 if (!data->iin_buffer) {
1491 retcode = -ENOMEM;
1482 goto error_register; 1492 goto error_register;
1493 }
1483 1494
1484 /* fill interrupt urb */ 1495 /* fill interrupt urb */
1485 usb_fill_int_urb(data->iin_urb, data->usb_dev, 1496 usb_fill_int_urb(data->iin_urb, data->usb_dev,
@@ -1512,6 +1523,7 @@ error_register:
1512 sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp); 1523 sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
1513 sysfs_remove_group(&intf->dev.kobj, &data_attr_grp); 1524 sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
1514 usbtmc_free_int(data); 1525 usbtmc_free_int(data);
1526err_put:
1515 kref_put(&data->kref, usbtmc_delete); 1527 kref_put(&data->kref, usbtmc_delete);
1516 return retcode; 1528 return retcode;
1517} 1529}
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 25dbd8c7aec7..4be52c602e9b 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -280,6 +280,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
280 280
281 /* 281 /*
282 * Adjust bInterval for quirked devices. 282 * Adjust bInterval for quirked devices.
283 */
284 /*
285 * This quirk fixes bIntervals reported in ms.
286 */
287 if (to_usb_device(ddev)->quirks &
288 USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) {
289 n = clamp(fls(d->bInterval) + 3, i, j);
290 i = j = n;
291 }
292 /*
283 * This quirk fixes bIntervals reported in 293 * This quirk fixes bIntervals reported in
284 * linear microframes. 294 * linear microframes.
285 */ 295 */
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 612fab6e54fb..79bdca5cb9c7 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -520,8 +520,10 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
520 */ 520 */
521 tbuf_size = max_t(u16, sizeof(struct usb_hub_descriptor), wLength); 521 tbuf_size = max_t(u16, sizeof(struct usb_hub_descriptor), wLength);
522 tbuf = kzalloc(tbuf_size, GFP_KERNEL); 522 tbuf = kzalloc(tbuf_size, GFP_KERNEL);
523 if (!tbuf) 523 if (!tbuf) {
524 return -ENOMEM; 524 status = -ENOMEM;
525 goto err_alloc;
526 }
525 527
526 bufp = tbuf; 528 bufp = tbuf;
527 529
@@ -734,6 +736,7 @@ error:
734 } 736 }
735 737
736 kfree(tbuf); 738 kfree(tbuf);
739 err_alloc:
737 740
738 /* any errors get returned through the urb completion */ 741 /* any errors get returned through the urb completion */
739 spin_lock_irq(&hcd_root_hub_lock); 742 spin_lock_irq(&hcd_root_hub_lock);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index f0dd08198d74..5286bf67869a 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4275,7 +4275,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
4275 struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); 4275 struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
4276 int connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN; 4276 int connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN;
4277 4277
4278 if (!udev->usb2_hw_lpm_capable) 4278 if (!udev->usb2_hw_lpm_capable || !udev->bos)
4279 return; 4279 return;
4280 4280
4281 if (hub) 4281 if (hub)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 24f9f98968a5..96b21b0dac1e 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -170,6 +170,14 @@ static const struct usb_device_id usb_quirk_list[] = {
170 /* M-Systems Flash Disk Pioneers */ 170 /* M-Systems Flash Disk Pioneers */
171 { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, 171 { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
172 172
173 /* Baum Vario Ultra */
174 { USB_DEVICE(0x0904, 0x6101), .driver_info =
175 USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
176 { USB_DEVICE(0x0904, 0x6102), .driver_info =
177 USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
178 { USB_DEVICE(0x0904, 0x6103), .driver_info =
179 USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
180
173 /* Keytouch QWERTY Panel keyboard */ 181 /* Keytouch QWERTY Panel keyboard */
174 { USB_DEVICE(0x0926, 0x3333), .driver_info = 182 { USB_DEVICE(0x0926, 0x3333), .driver_info =
175 USB_QUIRK_CONFIG_INTF_STRINGS }, 183 USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 0d75158e43fe..79e7a3480d51 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -171,6 +171,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
171 int status) 171 int status)
172{ 172{
173 struct dwc3 *dwc = dep->dwc; 173 struct dwc3 *dwc = dep->dwc;
174 unsigned int unmap_after_complete = false;
174 175
175 req->started = false; 176 req->started = false;
176 list_del(&req->list); 177 list_del(&req->list);
@@ -180,11 +181,19 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
180 if (req->request.status == -EINPROGRESS) 181 if (req->request.status == -EINPROGRESS)
181 req->request.status = status; 182 req->request.status = status;
182 183
183 if (dwc->ep0_bounced && dep->number <= 1) 184 /*
185 * NOTICE we don't want to unmap before calling ->complete() if we're
186 * dealing with a bounced ep0 request. If we unmap it here, we would end
187 * up overwritting the contents of req->buf and this could confuse the
188 * gadget driver.
189 */
190 if (dwc->ep0_bounced && dep->number <= 1) {
184 dwc->ep0_bounced = false; 191 dwc->ep0_bounced = false;
185 192 unmap_after_complete = true;
186 usb_gadget_unmap_request_by_dev(dwc->sysdev, 193 } else {
187 &req->request, req->direction); 194 usb_gadget_unmap_request_by_dev(dwc->sysdev,
195 &req->request, req->direction);
196 }
188 197
189 trace_dwc3_gadget_giveback(req); 198 trace_dwc3_gadget_giveback(req);
190 199
@@ -192,6 +201,10 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
192 usb_gadget_giveback_request(&dep->endpoint, &req->request); 201 usb_gadget_giveback_request(&dep->endpoint, &req->request);
193 spin_lock(&dwc->lock); 202 spin_lock(&dwc->lock);
194 203
204 if (unmap_after_complete)
205 usb_gadget_unmap_request_by_dev(dwc->sysdev,
206 &req->request, req->direction);
207
195 if (dep->number > 1) 208 if (dep->number > 1)
196 pm_runtime_put(dwc->dev); 209 pm_runtime_put(dwc->dev);
197} 210}
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index a30766ca4226..5e3828d9dac7 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -535,13 +535,15 @@ static int acm_notify_serial_state(struct f_acm *acm)
535{ 535{
536 struct usb_composite_dev *cdev = acm->port.func.config->cdev; 536 struct usb_composite_dev *cdev = acm->port.func.config->cdev;
537 int status; 537 int status;
538 __le16 serial_state;
538 539
539 spin_lock(&acm->lock); 540 spin_lock(&acm->lock);
540 if (acm->notify_req) { 541 if (acm->notify_req) {
541 dev_dbg(&cdev->gadget->dev, "acm ttyGS%d serial state %04x\n", 542 dev_dbg(&cdev->gadget->dev, "acm ttyGS%d serial state %04x\n",
542 acm->port_num, acm->serial_state); 543 acm->port_num, acm->serial_state);
544 serial_state = cpu_to_le16(acm->serial_state);
543 status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE, 545 status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
544 0, &acm->serial_state, sizeof(acm->serial_state)); 546 0, &serial_state, sizeof(acm->serial_state));
545 } else { 547 } else {
546 acm->pending = true; 548 acm->pending = true;
547 status = 0; 549 status = 0;
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 89b48bcc377a..5eea44823ca0 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -367,7 +367,7 @@ try_again:
367 count = min_t(unsigned, count, hidg->report_length); 367 count = min_t(unsigned, count, hidg->report_length);
368 368
369 spin_unlock_irqrestore(&hidg->write_spinlock, flags); 369 spin_unlock_irqrestore(&hidg->write_spinlock, flags);
370 status = copy_from_user(hidg->req->buf, buffer, count); 370 status = copy_from_user(req->buf, buffer, count);
371 371
372 if (status != 0) { 372 if (status != 0) {
373 ERROR(hidg->func.config->cdev, 373 ERROR(hidg->func.config->cdev,
@@ -378,9 +378,9 @@ try_again:
378 378
379 spin_lock_irqsave(&hidg->write_spinlock, flags); 379 spin_lock_irqsave(&hidg->write_spinlock, flags);
380 380
381 /* we our function has been disabled by host */ 381 /* when our function has been disabled by host */
382 if (!hidg->req) { 382 if (!hidg->req) {
383 free_ep_req(hidg->in_ep, hidg->req); 383 free_ep_req(hidg->in_ep, req);
384 /* 384 /*
385 * TODO 385 * TODO
386 * Should we fail with error here? 386 * Should we fail with error here?
@@ -394,7 +394,7 @@ try_again:
394 req->complete = f_hidg_req_complete; 394 req->complete = f_hidg_req_complete;
395 req->context = hidg; 395 req->context = hidg;
396 396
397 status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC); 397 status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
398 if (status < 0) { 398 if (status < 0) {
399 ERROR(hidg->func.config->cdev, 399 ERROR(hidg->func.config->cdev,
400 "usb_ep_queue error on int endpoint %zd\n", status); 400 "usb_ep_queue error on int endpoint %zd\n", status);
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 29b41b5dee04..f8a1881609a2 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -594,6 +594,14 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
594 opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U); 594 opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U);
595 opts->streaming_maxburst = min(opts->streaming_maxburst, 15U); 595 opts->streaming_maxburst = min(opts->streaming_maxburst, 15U);
596 596
597 /* For SS, wMaxPacketSize has to be 1024 if bMaxBurst is not 0 */
598 if (opts->streaming_maxburst &&
599 (opts->streaming_maxpacket % 1024) != 0) {
600 opts->streaming_maxpacket = roundup(opts->streaming_maxpacket, 1024);
601 INFO(cdev, "overriding streaming_maxpacket to %d\n",
602 opts->streaming_maxpacket);
603 }
604
597 /* Fill in the FS/HS/SS Video Streaming specific descriptors from the 605 /* Fill in the FS/HS/SS Video Streaming specific descriptors from the
598 * module parameters. 606 * module parameters.
599 * 607 *
@@ -625,7 +633,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
625 uvc_ss_streaming_comp.bMaxBurst = opts->streaming_maxburst; 633 uvc_ss_streaming_comp.bMaxBurst = opts->streaming_maxburst;
626 uvc_ss_streaming_comp.wBytesPerInterval = 634 uvc_ss_streaming_comp.wBytesPerInterval =
627 cpu_to_le16(max_packet_size * max_packet_mult * 635 cpu_to_le16(max_packet_size * max_packet_mult *
628 opts->streaming_maxburst); 636 (opts->streaming_maxburst + 1));
629 637
630 /* Allocate endpoints. */ 638 /* Allocate endpoints. */
631 ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep); 639 ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep);
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index a97da645c1b9..8a365aad66fe 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -1523,7 +1523,6 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1523 td = phys_to_virt(addr); 1523 td = phys_to_virt(addr);
1524 addr2 = (dma_addr_t)td->next; 1524 addr2 = (dma_addr_t)td->next;
1525 pci_pool_free(dev->data_requests, td, addr); 1525 pci_pool_free(dev->data_requests, td, addr);
1526 td->next = 0x00;
1527 addr = addr2; 1526 addr = addr2;
1528 } 1527 }
1529 req->chain_len = 1; 1528 req->chain_len = 1;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index bd02a6cd8e2c..6ed468fa7d5e 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -344,6 +344,7 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
344static struct platform_driver usb_xhci_driver = { 344static struct platform_driver usb_xhci_driver = {
345 .probe = xhci_plat_probe, 345 .probe = xhci_plat_probe,
346 .remove = xhci_plat_remove, 346 .remove = xhci_plat_remove,
347 .shutdown = usb_hcd_platform_shutdown,
347 .driver = { 348 .driver = {
348 .name = "xhci-hcd", 349 .name = "xhci-hcd",
349 .pm = DEV_PM_OPS, 350 .pm = DEV_PM_OPS,
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index d9936c771fa0..a3309aa02993 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1989,6 +1989,9 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1989 case TRB_NORMAL: 1989 case TRB_NORMAL:
1990 td->urb->actual_length = requested - remaining; 1990 td->urb->actual_length = requested - remaining;
1991 goto finish_td; 1991 goto finish_td;
1992 case TRB_STATUS:
1993 td->urb->actual_length = requested;
1994 goto finish_td;
1992 default: 1995 default:
1993 xhci_warn(xhci, "WARN: unexpected TRB Type %d\n", 1996 xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
1994 trb_type); 1997 trb_type);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 50aee8b7718b..953fd8f62df0 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1477,6 +1477,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1477 struct xhci_ring *ep_ring; 1477 struct xhci_ring *ep_ring;
1478 struct xhci_virt_ep *ep; 1478 struct xhci_virt_ep *ep;
1479 struct xhci_command *command; 1479 struct xhci_command *command;
1480 struct xhci_virt_device *vdev;
1480 1481
1481 xhci = hcd_to_xhci(hcd); 1482 xhci = hcd_to_xhci(hcd);
1482 spin_lock_irqsave(&xhci->lock, flags); 1483 spin_lock_irqsave(&xhci->lock, flags);
@@ -1485,15 +1486,27 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1485 1486
1486 /* Make sure the URB hasn't completed or been unlinked already */ 1487 /* Make sure the URB hasn't completed or been unlinked already */
1487 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 1488 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1488 if (ret || !urb->hcpriv) 1489 if (ret)
1489 goto done; 1490 goto done;
1491
1492 /* give back URB now if we can't queue it for cancel */
1493 vdev = xhci->devs[urb->dev->slot_id];
1494 urb_priv = urb->hcpriv;
1495 if (!vdev || !urb_priv)
1496 goto err_giveback;
1497
1498 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1499 ep = &vdev->eps[ep_index];
1500 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1501 if (!ep || !ep_ring)
1502 goto err_giveback;
1503
1490 temp = readl(&xhci->op_regs->status); 1504 temp = readl(&xhci->op_regs->status);
1491 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { 1505 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1492 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1506 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1493 "HW died, freeing TD."); 1507 "HW died, freeing TD.");
1494 urb_priv = urb->hcpriv;
1495 for (i = urb_priv->num_tds_done; 1508 for (i = urb_priv->num_tds_done;
1496 i < urb_priv->num_tds && xhci->devs[urb->dev->slot_id]; 1509 i < urb_priv->num_tds;
1497 i++) { 1510 i++) {
1498 td = &urb_priv->td[i]; 1511 td = &urb_priv->td[i];
1499 if (!list_empty(&td->td_list)) 1512 if (!list_empty(&td->td_list))
@@ -1501,23 +1514,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1501 if (!list_empty(&td->cancelled_td_list)) 1514 if (!list_empty(&td->cancelled_td_list))
1502 list_del_init(&td->cancelled_td_list); 1515 list_del_init(&td->cancelled_td_list);
1503 } 1516 }
1504 1517 goto err_giveback;
1505 usb_hcd_unlink_urb_from_ep(hcd, urb);
1506 spin_unlock_irqrestore(&xhci->lock, flags);
1507 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1508 xhci_urb_free_priv(urb_priv);
1509 return ret;
1510 } 1518 }
1511 1519
1512 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1513 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
1514 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1515 if (!ep_ring) {
1516 ret = -EINVAL;
1517 goto done;
1518 }
1519
1520 urb_priv = urb->hcpriv;
1521 i = urb_priv->num_tds_done; 1520 i = urb_priv->num_tds_done;
1522 if (i < urb_priv->num_tds) 1521 if (i < urb_priv->num_tds)
1523 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1522 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
@@ -1554,6 +1553,14 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1554done: 1553done:
1555 spin_unlock_irqrestore(&xhci->lock, flags); 1554 spin_unlock_irqrestore(&xhci->lock, flags);
1556 return ret; 1555 return ret;
1556
1557err_giveback:
1558 if (urb_priv)
1559 xhci_urb_free_priv(urb_priv);
1560 usb_hcd_unlink_urb_from_ep(hcd, urb);
1561 spin_unlock_irqrestore(&xhci->lock, flags);
1562 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1563 return ret;
1557} 1564}
1558 1565
1559/* Drop an endpoint from a new bandwidth configuration for this device. 1566/* Drop an endpoint from a new bandwidth configuration for this device.
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 8b9fd7534f69..502bfe30a077 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -347,6 +347,9 @@ static int idmouse_probe(struct usb_interface *interface,
347 if (iface_desc->desc.bInterfaceClass != 0x0A) 347 if (iface_desc->desc.bInterfaceClass != 0x0A)
348 return -ENODEV; 348 return -ENODEV;
349 349
350 if (iface_desc->desc.bNumEndpoints < 1)
351 return -ENODEV;
352
350 /* allocate memory for our device state and initialize it */ 353 /* allocate memory for our device state and initialize it */
351 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 354 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
352 if (dev == NULL) 355 if (dev == NULL)
diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c
index 77176511658f..d3d124753266 100644
--- a/drivers/usb/misc/lvstest.c
+++ b/drivers/usb/misc/lvstest.c
@@ -366,6 +366,10 @@ static int lvs_rh_probe(struct usb_interface *intf,
366 366
367 hdev = interface_to_usbdev(intf); 367 hdev = interface_to_usbdev(intf);
368 desc = intf->cur_altsetting; 368 desc = intf->cur_altsetting;
369
370 if (desc->desc.bNumEndpoints < 1)
371 return -ENODEV;
372
369 endpoint = &desc->endpoint[0].desc; 373 endpoint = &desc->endpoint[0].desc;
370 374
371 /* valid only for SS root hub */ 375 /* valid only for SS root hub */
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index e45a3a680db8..07014cad6dbe 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -709,6 +709,11 @@ static int uss720_probe(struct usb_interface *intf,
709 709
710 interface = intf->cur_altsetting; 710 interface = intf->cur_altsetting;
711 711
712 if (interface->desc.bNumEndpoints < 3) {
713 usb_put_dev(usbdev);
714 return -ENODEV;
715 }
716
712 /* 717 /*
713 * Allocate parport interface 718 * Allocate parport interface
714 */ 719 */
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index d8bae6ca8904..0c3664ab705e 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2490,8 +2490,8 @@ static int musb_remove(struct platform_device *pdev)
2490 musb_host_cleanup(musb); 2490 musb_host_cleanup(musb);
2491 musb_gadget_cleanup(musb); 2491 musb_gadget_cleanup(musb);
2492 2492
2493 spin_lock_irqsave(&musb->lock, flags);
2494 musb_platform_disable(musb); 2493 musb_platform_disable(musb);
2494 spin_lock_irqsave(&musb->lock, flags);
2495 musb_disable_interrupts(musb); 2495 musb_disable_interrupts(musb);
2496 musb_writeb(musb->mregs, MUSB_DEVCTL, 0); 2496 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2497 spin_unlock_irqrestore(&musb->lock, flags); 2497 spin_unlock_irqrestore(&musb->lock, flags);
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index 00e272bfee39..355655f8a3fb 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -238,8 +238,27 @@ static void cppi41_dma_callback(void *private_data,
238 transferred < cppi41_channel->packet_sz) 238 transferred < cppi41_channel->packet_sz)
239 cppi41_channel->prog_len = 0; 239 cppi41_channel->prog_len = 0;
240 240
241 if (cppi41_channel->is_tx) 241 if (cppi41_channel->is_tx) {
242 empty = musb_is_tx_fifo_empty(hw_ep); 242 u8 type;
243
244 if (is_host_active(musb))
245 type = hw_ep->out_qh->type;
246 else
247 type = hw_ep->ep_in.type;
248
249 if (type == USB_ENDPOINT_XFER_ISOC)
250 /*
251 * Don't use the early-TX-interrupt workaround below
252 * for Isoch transfter. Since Isoch are periodic
253 * transfer, by the time the next transfer is
254 * scheduled, the current one should be done already.
255 *
256 * This avoids audio playback underrun issue.
257 */
258 empty = true;
259 else
260 empty = musb_is_tx_fifo_empty(hw_ep);
261 }
243 262
244 if (!cppi41_channel->is_tx || empty) { 263 if (!cppi41_channel->is_tx || empty) {
245 cppi41_trans_done(cppi41_channel); 264 cppi41_trans_done(cppi41_channel);
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 7c047c4a2565..9c7ee26ef388 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -933,7 +933,7 @@ static int dsps_probe(struct platform_device *pdev)
933 if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) { 933 if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) {
934 ret = dsps_setup_optional_vbus_irq(pdev, glue); 934 ret = dsps_setup_optional_vbus_irq(pdev, glue);
935 if (ret) 935 if (ret)
936 return ret; 936 goto err_iounmap;
937 } 937 }
938 938
939 platform_set_drvdata(pdev, glue); 939 platform_set_drvdata(pdev, glue);
@@ -946,6 +946,8 @@ static int dsps_probe(struct platform_device *pdev)
946 946
947err: 947err:
948 pm_runtime_disable(&pdev->dev); 948 pm_runtime_disable(&pdev->dev);
949err_iounmap:
950 iounmap(glue->usbss_base);
949 return ret; 951 return ret;
950} 952}
951 953
@@ -956,6 +958,7 @@ static int dsps_remove(struct platform_device *pdev)
956 platform_device_unregister(glue->musb); 958 platform_device_unregister(glue->musb);
957 959
958 pm_runtime_disable(&pdev->dev); 960 pm_runtime_disable(&pdev->dev);
961 iounmap(glue->usbss_base);
959 962
960 return 0; 963 return 0;
961} 964}
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c
index b3b33cf7ddf6..f333024660b4 100644
--- a/drivers/usb/phy/phy-isp1301.c
+++ b/drivers/usb/phy/phy-isp1301.c
@@ -136,7 +136,7 @@ static int isp1301_remove(struct i2c_client *client)
136static struct i2c_driver isp1301_driver = { 136static struct i2c_driver isp1301_driver = {
137 .driver = { 137 .driver = {
138 .name = DRV_NAME, 138 .name = DRV_NAME,
139 .of_match_table = of_match_ptr(isp1301_of_match), 139 .of_match_table = isp1301_of_match,
140 }, 140 },
141 .probe = isp1301_probe, 141 .probe = isp1301_probe,
142 .remove = isp1301_remove, 142 .remove = isp1301_remove,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 42cc72e54c05..af67a0de6b5d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -233,6 +233,14 @@ static void option_instat_callback(struct urb *urb);
233#define BANDRICH_PRODUCT_1012 0x1012 233#define BANDRICH_PRODUCT_1012 0x1012
234 234
235#define QUALCOMM_VENDOR_ID 0x05C6 235#define QUALCOMM_VENDOR_ID 0x05C6
236/* These Quectel products use Qualcomm's vendor ID */
237#define QUECTEL_PRODUCT_UC20 0x9003
238#define QUECTEL_PRODUCT_UC15 0x9090
239
240#define QUECTEL_VENDOR_ID 0x2c7c
241/* These Quectel products use Quectel's vendor ID */
242#define QUECTEL_PRODUCT_EC21 0x0121
243#define QUECTEL_PRODUCT_EC25 0x0125
236 244
237#define CMOTECH_VENDOR_ID 0x16d8 245#define CMOTECH_VENDOR_ID 0x16d8
238#define CMOTECH_PRODUCT_6001 0x6001 246#define CMOTECH_PRODUCT_6001 0x6001
@@ -1161,7 +1169,14 @@ static const struct usb_device_id option_ids[] = {
1161 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 1169 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
1162 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ 1170 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
1163 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ 1171 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
1164 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */ 1172 /* Quectel products using Qualcomm vendor ID */
1173 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
1174 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
1175 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1176 /* Quectel products using Quectel vendor ID */
1177 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
1178 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1179 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
1165 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1180 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1166 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, 1181 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1167 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, 1182 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 696458db7e3c..38b3f0d8cd58 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -169,6 +169,8 @@ static const struct usb_device_id id_table[] = {
169 {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ 169 {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
170 {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ 170 {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
171 {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ 171 {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
172 {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
173 {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
172 174
173 /* Huawei devices */ 175 /* Huawei devices */
174 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ 176 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c
index 252c7bd9218a..d01496fd27fe 100644
--- a/drivers/usb/wusbcore/wa-hc.c
+++ b/drivers/usb/wusbcore/wa-hc.c
@@ -39,6 +39,9 @@ int wa_create(struct wahc *wa, struct usb_interface *iface,
39 int result; 39 int result;
40 struct device *dev = &iface->dev; 40 struct device *dev = &iface->dev;
41 41
42 if (iface->cur_altsetting->desc.bNumEndpoints < 3)
43 return -ENODEV;
44
42 result = wa_rpipes_create(wa); 45 result = wa_rpipes_create(wa);
43 if (result < 0) 46 if (result < 0)
44 goto error_rpipes_create; 47 goto error_rpipes_create;
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 0aa6c3c29d17..35a1e777b449 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -823,6 +823,9 @@ static int hwarc_probe(struct usb_interface *iface,
823 struct hwarc *hwarc; 823 struct hwarc *hwarc;
824 struct device *dev = &iface->dev; 824 struct device *dev = &iface->dev;
825 825
826 if (iface->cur_altsetting->desc.bNumEndpoints < 1)
827 return -ENODEV;
828
826 result = -ENOMEM; 829 result = -ENOMEM;
827 uwb_rc = uwb_rc_alloc(); 830 uwb_rc = uwb_rc_alloc();
828 if (uwb_rc == NULL) { 831 if (uwb_rc == NULL) {
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
index 2bfc846ac071..6345e85822a4 100644
--- a/drivers/uwb/i1480/dfu/usb.c
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -362,6 +362,9 @@ int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
362 result); 362 result);
363 } 363 }
364 364
365 if (iface->cur_altsetting->desc.bNumEndpoints < 1)
366 return -ENODEV;
367
365 result = -ENOMEM; 368 result = -ENOMEM;
366 i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL); 369 i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL);
367 if (i1480_usb == NULL) { 370 if (i1480_usb == NULL) {
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 609f4f982c74..561084ab387f 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -403,6 +403,7 @@ static void vfio_group_release(struct kref *kref)
403 struct iommu_group *iommu_group = group->iommu_group; 403 struct iommu_group *iommu_group = group->iommu_group;
404 404
405 WARN_ON(!list_empty(&group->device_list)); 405 WARN_ON(!list_empty(&group->device_list));
406 WARN_ON(group->notifier.head);
406 407
407 list_for_each_entry_safe(unbound, tmp, 408 list_for_each_entry_safe(unbound, tmp,
408 &group->unbound_list, unbound_next) { 409 &group->unbound_list, unbound_next) {
@@ -1573,6 +1574,10 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep)
1573 return -EBUSY; 1574 return -EBUSY;
1574 } 1575 }
1575 1576
1577 /* Warn if previous user didn't cleanup and re-init to drop them */
1578 if (WARN_ON(group->notifier.head))
1579 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
1580
1576 filep->private_data = group; 1581 filep->private_data = group;
1577 1582
1578 return 0; 1583 return 0;
@@ -1584,9 +1589,6 @@ static int vfio_group_fops_release(struct inode *inode, struct file *filep)
1584 1589
1585 filep->private_data = NULL; 1590 filep->private_data = NULL;
1586 1591
1587 /* Any user didn't unregister? */
1588 WARN_ON(group->notifier.head);
1589
1590 vfio_group_try_dissolve_container(group); 1592 vfio_group_try_dissolve_container(group);
1591 1593
1592 atomic_dec(&group->opened); 1594 atomic_dec(&group->opened);
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index c26fa1f3ed86..32d2633092a3 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -1182,8 +1182,7 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
1182 return NULL; 1182 return NULL;
1183} 1183}
1184 1184
1185static bool vfio_iommu_has_resv_msi(struct iommu_group *group, 1185static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base)
1186 phys_addr_t *base)
1187{ 1186{
1188 struct list_head group_resv_regions; 1187 struct list_head group_resv_regions;
1189 struct iommu_resv_region *region, *next; 1188 struct iommu_resv_region *region, *next;
@@ -1192,7 +1191,7 @@ static bool vfio_iommu_has_resv_msi(struct iommu_group *group,
1192 INIT_LIST_HEAD(&group_resv_regions); 1191 INIT_LIST_HEAD(&group_resv_regions);
1193 iommu_get_group_resv_regions(group, &group_resv_regions); 1192 iommu_get_group_resv_regions(group, &group_resv_regions);
1194 list_for_each_entry(region, &group_resv_regions, list) { 1193 list_for_each_entry(region, &group_resv_regions, list) {
1195 if (region->type & IOMMU_RESV_MSI) { 1194 if (region->type == IOMMU_RESV_SW_MSI) {
1196 *base = region->start; 1195 *base = region->start;
1197 ret = true; 1196 ret = true;
1198 goto out; 1197 goto out;
@@ -1283,7 +1282,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
1283 if (ret) 1282 if (ret)
1284 goto out_domain; 1283 goto out_domain;
1285 1284
1286 resv_msi = vfio_iommu_has_resv_msi(iommu_group, &resv_msi_base); 1285 resv_msi = vfio_iommu_has_sw_msi(iommu_group, &resv_msi_base);
1287 1286
1288 INIT_LIST_HEAD(&domain->group_list); 1287 INIT_LIST_HEAD(&domain->group_list);
1289 list_add(&group->next, &domain->group_list); 1288 list_add(&group->next, &domain->group_list);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 4e1191508228..34adf9b9c053 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -242,11 +242,11 @@ static inline void update_stat(struct virtio_balloon *vb, int idx,
242 242
243#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT) 243#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
244 244
245static void update_balloon_stats(struct virtio_balloon *vb) 245static unsigned int update_balloon_stats(struct virtio_balloon *vb)
246{ 246{
247 unsigned long events[NR_VM_EVENT_ITEMS]; 247 unsigned long events[NR_VM_EVENT_ITEMS];
248 struct sysinfo i; 248 struct sysinfo i;
249 int idx = 0; 249 unsigned int idx = 0;
250 long available; 250 long available;
251 251
252 all_vm_events(events); 252 all_vm_events(events);
@@ -254,18 +254,22 @@ static void update_balloon_stats(struct virtio_balloon *vb)
254 254
255 available = si_mem_available(); 255 available = si_mem_available();
256 256
257#ifdef CONFIG_VM_EVENT_COUNTERS
257 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN, 258 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
258 pages_to_bytes(events[PSWPIN])); 259 pages_to_bytes(events[PSWPIN]));
259 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT, 260 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
260 pages_to_bytes(events[PSWPOUT])); 261 pages_to_bytes(events[PSWPOUT]));
261 update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]); 262 update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
262 update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]); 263 update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
264#endif
263 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE, 265 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
264 pages_to_bytes(i.freeram)); 266 pages_to_bytes(i.freeram));
265 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT, 267 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
266 pages_to_bytes(i.totalram)); 268 pages_to_bytes(i.totalram));
267 update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL, 269 update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL,
268 pages_to_bytes(available)); 270 pages_to_bytes(available));
271
272 return idx;
269} 273}
270 274
271/* 275/*
@@ -291,14 +295,14 @@ static void stats_handle_request(struct virtio_balloon *vb)
291{ 295{
292 struct virtqueue *vq; 296 struct virtqueue *vq;
293 struct scatterlist sg; 297 struct scatterlist sg;
294 unsigned int len; 298 unsigned int len, num_stats;
295 299
296 update_balloon_stats(vb); 300 num_stats = update_balloon_stats(vb);
297 301
298 vq = vb->stats_vq; 302 vq = vb->stats_vq;
299 if (!virtqueue_get_buf(vq, &len)) 303 if (!virtqueue_get_buf(vq, &len))
300 return; 304 return;
301 sg_init_one(&sg, vb->stats, sizeof(vb->stats)); 305 sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
302 virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); 306 virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
303 virtqueue_kick(vq); 307 virtqueue_kick(vq);
304} 308}
@@ -423,13 +427,16 @@ static int init_vqs(struct virtio_balloon *vb)
423 vb->deflate_vq = vqs[1]; 427 vb->deflate_vq = vqs[1];
424 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { 428 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
425 struct scatterlist sg; 429 struct scatterlist sg;
430 unsigned int num_stats;
426 vb->stats_vq = vqs[2]; 431 vb->stats_vq = vqs[2];
427 432
428 /* 433 /*
429 * Prime this virtqueue with one buffer so the hypervisor can 434 * Prime this virtqueue with one buffer so the hypervisor can
430 * use it to signal us later (it can't be broken yet!). 435 * use it to signal us later (it can't be broken yet!).
431 */ 436 */
432 sg_init_one(&sg, vb->stats, sizeof vb->stats); 437 num_stats = update_balloon_stats(vb);
438
439 sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
433 if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL) 440 if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
434 < 0) 441 < 0)
435 BUG(); 442 BUG();
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index df548a6fb844..590534910dc6 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -147,7 +147,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
147{ 147{
148 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 148 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
149 const char *name = dev_name(&vp_dev->vdev.dev); 149 const char *name = dev_name(&vp_dev->vdev.dev);
150 int i, err = -ENOMEM, allocated_vectors, nvectors; 150 int i, j, err = -ENOMEM, allocated_vectors, nvectors;
151 unsigned flags = PCI_IRQ_MSIX; 151 unsigned flags = PCI_IRQ_MSIX;
152 bool shared = false; 152 bool shared = false;
153 u16 msix_vec; 153 u16 msix_vec;
@@ -212,7 +212,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
212 if (!vp_dev->msix_vector_map) 212 if (!vp_dev->msix_vector_map)
213 goto out_disable_config_irq; 213 goto out_disable_config_irq;
214 214
215 allocated_vectors = 1; /* vector 0 is the config interrupt */ 215 allocated_vectors = j = 1; /* vector 0 is the config interrupt */
216 for (i = 0; i < nvqs; ++i) { 216 for (i = 0; i < nvqs; ++i) {
217 if (!names[i]) { 217 if (!names[i]) {
218 vqs[i] = NULL; 218 vqs[i] = NULL;
@@ -236,18 +236,19 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
236 continue; 236 continue;
237 } 237 }
238 238
239 snprintf(vp_dev->msix_names[i + 1], 239 snprintf(vp_dev->msix_names[j],
240 sizeof(*vp_dev->msix_names), "%s-%s", 240 sizeof(*vp_dev->msix_names), "%s-%s",
241 dev_name(&vp_dev->vdev.dev), names[i]); 241 dev_name(&vp_dev->vdev.dev), names[i]);
242 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), 242 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
243 vring_interrupt, IRQF_SHARED, 243 vring_interrupt, IRQF_SHARED,
244 vp_dev->msix_names[i + 1], vqs[i]); 244 vp_dev->msix_names[j], vqs[i]);
245 if (err) { 245 if (err) {
246 /* don't free this irq on error */ 246 /* don't free this irq on error */
247 vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; 247 vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
248 goto out_remove_vqs; 248 goto out_remove_vqs;
249 } 249 }
250 vp_dev->msix_vector_map[i] = msix_vec; 250 vp_dev->msix_vector_map[i] = msix_vec;
251 j++;
251 252
252 /* 253 /*
253 * Use a different vector for each queue if they are available, 254 * Use a different vector for each queue if they are available,
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 4ce10bcca18b..23e391d3ec01 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -27,10 +27,10 @@
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/types.h> 29#include <linux/types.h>
30#include <linux/syscore_ops.h>
30#include <linux/acpi.h> 31#include <linux/acpi.h>
31#include <acpi/processor.h> 32#include <acpi/processor.h>
32#include <xen/xen.h> 33#include <xen/xen.h>
33#include <xen/xen-ops.h>
34#include <xen/interface/platform.h> 34#include <xen/interface/platform.h>
35#include <asm/xen/hypercall.h> 35#include <asm/xen/hypercall.h>
36 36
@@ -408,7 +408,7 @@ static int check_acpi_ids(struct acpi_processor *pr_backup)
408 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, 408 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
409 ACPI_UINT32_MAX, 409 ACPI_UINT32_MAX,
410 read_acpi_id, NULL, NULL, NULL); 410 read_acpi_id, NULL, NULL, NULL);
411 acpi_get_devices("ACPI0007", read_acpi_id, NULL, NULL); 411 acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, read_acpi_id, NULL, NULL);
412 412
413upload: 413upload:
414 if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) { 414 if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) {
@@ -466,15 +466,33 @@ static int xen_upload_processor_pm_data(void)
466 return rc; 466 return rc;
467} 467}
468 468
469static int xen_acpi_processor_resume(struct notifier_block *nb, 469static void xen_acpi_processor_resume_worker(struct work_struct *dummy)
470 unsigned long action, void *data)
471{ 470{
471 int rc;
472
472 bitmap_zero(acpi_ids_done, nr_acpi_bits); 473 bitmap_zero(acpi_ids_done, nr_acpi_bits);
473 return xen_upload_processor_pm_data(); 474
475 rc = xen_upload_processor_pm_data();
476 if (rc != 0)
477 pr_info("ACPI data upload failed, error = %d\n", rc);
478}
479
480static void xen_acpi_processor_resume(void)
481{
482 static DECLARE_WORK(wq, xen_acpi_processor_resume_worker);
483
484 /*
485 * xen_upload_processor_pm_data() calls non-atomic code.
486 * However, the context for xen_acpi_processor_resume is syscore
487 * with only the boot CPU online and in an atomic context.
488 *
489 * So defer the upload for some point safer.
490 */
491 schedule_work(&wq);
474} 492}
475 493
476struct notifier_block xen_acpi_processor_resume_nb = { 494static struct syscore_ops xap_syscore_ops = {
477 .notifier_call = xen_acpi_processor_resume, 495 .resume = xen_acpi_processor_resume,
478}; 496};
479 497
480static int __init xen_acpi_processor_init(void) 498static int __init xen_acpi_processor_init(void)
@@ -527,7 +545,7 @@ static int __init xen_acpi_processor_init(void)
527 if (rc) 545 if (rc)
528 goto err_unregister; 546 goto err_unregister;
529 547
530 xen_resume_notifier_register(&xen_acpi_processor_resume_nb); 548 register_syscore_ops(&xap_syscore_ops);
531 549
532 return 0; 550 return 0;
533err_unregister: 551err_unregister:
@@ -544,7 +562,7 @@ static void __exit xen_acpi_processor_exit(void)
544{ 562{
545 int i; 563 int i;
546 564
547 xen_resume_notifier_unregister(&xen_acpi_processor_resume_nb); 565 unregister_syscore_ops(&xap_syscore_ops);
548 kfree(acpi_ids_done); 566 kfree(acpi_ids_done);
549 kfree(acpi_id_present); 567 kfree(acpi_id_present);
550 kfree(acpi_id_cst_present); 568 kfree(acpi_id_cst_present);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 29b7fc28c607..c4115901d906 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1259,7 +1259,7 @@ struct btrfs_root {
1259 atomic_t will_be_snapshoted; 1259 atomic_t will_be_snapshoted;
1260 1260
1261 /* For qgroup metadata space reserve */ 1261 /* For qgroup metadata space reserve */
1262 atomic_t qgroup_meta_rsv; 1262 atomic64_t qgroup_meta_rsv;
1263}; 1263};
1264static inline u32 btrfs_inode_sectorsize(const struct inode *inode) 1264static inline u32 btrfs_inode_sectorsize(const struct inode *inode)
1265{ 1265{
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 08b74daf35d0..eb1ee7b6f532 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1342,7 +1342,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1342 atomic_set(&root->orphan_inodes, 0); 1342 atomic_set(&root->orphan_inodes, 0);
1343 atomic_set(&root->refs, 1); 1343 atomic_set(&root->refs, 1);
1344 atomic_set(&root->will_be_snapshoted, 0); 1344 atomic_set(&root->will_be_snapshoted, 0);
1345 atomic_set(&root->qgroup_meta_rsv, 0); 1345 atomic64_set(&root->qgroup_meta_rsv, 0);
1346 root->log_transid = 0; 1346 root->log_transid = 0;
1347 root->log_transid_committed = -1; 1347 root->log_transid_committed = -1;
1348 root->last_log_commit = 0; 1348 root->last_log_commit = 0;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 8df797432740..27fdb250b446 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2584,26 +2584,36 @@ static void end_bio_extent_readpage(struct bio *bio)
2584 2584
2585 if (tree->ops) { 2585 if (tree->ops) {
2586 ret = tree->ops->readpage_io_failed_hook(page, mirror); 2586 ret = tree->ops->readpage_io_failed_hook(page, mirror);
2587 if (!ret && !bio->bi_error) 2587 if (ret == -EAGAIN) {
2588 uptodate = 1; 2588 /*
2589 } else { 2589 * Data inode's readpage_io_failed_hook() always
2590 * returns -EAGAIN.
2591 *
2592 * The generic bio_readpage_error handles errors
2593 * the following way: If possible, new read
2594 * requests are created and submitted and will
2595 * end up in end_bio_extent_readpage as well (if
2596 * we're lucky, not in the !uptodate case). In
2597 * that case it returns 0 and we just go on with
2598 * the next page in our bio. If it can't handle
2599 * the error it will return -EIO and we remain
2600 * responsible for that page.
2601 */
2602 ret = bio_readpage_error(bio, offset, page,
2603 start, end, mirror);
2604 if (ret == 0) {
2605 uptodate = !bio->bi_error;
2606 offset += len;
2607 continue;
2608 }
2609 }
2610
2590 /* 2611 /*
2591 * The generic bio_readpage_error handles errors the 2612 * metadata's readpage_io_failed_hook() always returns
2592 * following way: If possible, new read requests are 2613 * -EIO and fixes nothing. -EIO is also returned if
2593 * created and submitted and will end up in 2614 * data inode error could not be fixed.
2594 * end_bio_extent_readpage as well (if we're lucky, not
2595 * in the !uptodate case). In that case it returns 0 and
2596 * we just go on with the next page in our bio. If it
2597 * can't handle the error it will return -EIO and we
2598 * remain responsible for that page.
2599 */ 2615 */
2600 ret = bio_readpage_error(bio, offset, page, start, end, 2616 ASSERT(ret == -EIO);
2601 mirror);
2602 if (ret == 0) {
2603 uptodate = !bio->bi_error;
2604 offset += len;
2605 continue;
2606 }
2607 } 2617 }
2608readpage_ok: 2618readpage_ok:
2609 if (likely(uptodate)) { 2619 if (likely(uptodate)) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 231503935652..a18510be76c1 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -10523,9 +10523,9 @@ out_inode:
10523} 10523}
10524 10524
10525__attribute__((const)) 10525__attribute__((const))
10526static int dummy_readpage_io_failed_hook(struct page *page, int failed_mirror) 10526static int btrfs_readpage_io_failed_hook(struct page *page, int failed_mirror)
10527{ 10527{
10528 return 0; 10528 return -EAGAIN;
10529} 10529}
10530 10530
10531static const struct inode_operations btrfs_dir_inode_operations = { 10531static const struct inode_operations btrfs_dir_inode_operations = {
@@ -10570,7 +10570,7 @@ static const struct extent_io_ops btrfs_extent_io_ops = {
10570 .submit_bio_hook = btrfs_submit_bio_hook, 10570 .submit_bio_hook = btrfs_submit_bio_hook,
10571 .readpage_end_io_hook = btrfs_readpage_end_io_hook, 10571 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
10572 .merge_bio_hook = btrfs_merge_bio_hook, 10572 .merge_bio_hook = btrfs_merge_bio_hook,
10573 .readpage_io_failed_hook = dummy_readpage_io_failed_hook, 10573 .readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
10574 10574
10575 /* optional callbacks */ 10575 /* optional callbacks */
10576 .fill_delalloc = run_delalloc_range, 10576 .fill_delalloc = run_delalloc_range,
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index a5da750c1087..a59801dc2a34 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -2948,20 +2948,20 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
2948 ret = qgroup_reserve(root, num_bytes, enforce); 2948 ret = qgroup_reserve(root, num_bytes, enforce);
2949 if (ret < 0) 2949 if (ret < 0)
2950 return ret; 2950 return ret;
2951 atomic_add(num_bytes, &root->qgroup_meta_rsv); 2951 atomic64_add(num_bytes, &root->qgroup_meta_rsv);
2952 return ret; 2952 return ret;
2953} 2953}
2954 2954
2955void btrfs_qgroup_free_meta_all(struct btrfs_root *root) 2955void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
2956{ 2956{
2957 struct btrfs_fs_info *fs_info = root->fs_info; 2957 struct btrfs_fs_info *fs_info = root->fs_info;
2958 int reserved; 2958 u64 reserved;
2959 2959
2960 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || 2960 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
2961 !is_fstree(root->objectid)) 2961 !is_fstree(root->objectid))
2962 return; 2962 return;
2963 2963
2964 reserved = atomic_xchg(&root->qgroup_meta_rsv, 0); 2964 reserved = atomic64_xchg(&root->qgroup_meta_rsv, 0);
2965 if (reserved == 0) 2965 if (reserved == 0)
2966 return; 2966 return;
2967 btrfs_qgroup_free_refroot(fs_info, root->objectid, reserved); 2967 btrfs_qgroup_free_refroot(fs_info, root->objectid, reserved);
@@ -2976,8 +2976,8 @@ void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
2976 return; 2976 return;
2977 2977
2978 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); 2978 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
2979 WARN_ON(atomic_read(&root->qgroup_meta_rsv) < num_bytes); 2979 WARN_ON(atomic64_read(&root->qgroup_meta_rsv) < num_bytes);
2980 atomic_sub(num_bytes, &root->qgroup_meta_rsv); 2980 atomic64_sub(num_bytes, &root->qgroup_meta_rsv);
2981 btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes); 2981 btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes);
2982} 2982}
2983 2983
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 456c8901489b..a60d5bfb8a49 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -6305,8 +6305,13 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
6305 goto out; 6305 goto out;
6306 } 6306 }
6307 6307
6308 /*
6309 * Check that we don't overflow at later allocations, we request
6310 * clone_sources_count + 1 items, and compare to unsigned long inside
6311 * access_ok.
6312 */
6308 if (arg->clone_sources_count > 6313 if (arg->clone_sources_count >
6309 ULLONG_MAX / sizeof(*arg->clone_sources)) { 6314 ULONG_MAX / sizeof(struct clone_root) - 1) {
6310 ret = -EINVAL; 6315 ret = -EINVAL;
6311 goto out; 6316 goto out;
6312 } 6317 }
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 02a7a9286449..6d6eca394d4d 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -327,7 +327,6 @@ EXPORT_SYMBOL(fscrypt_decrypt_page);
327static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) 327static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
328{ 328{
329 struct dentry *dir; 329 struct dentry *dir;
330 struct fscrypt_info *ci;
331 int dir_has_key, cached_with_key; 330 int dir_has_key, cached_with_key;
332 331
333 if (flags & LOOKUP_RCU) 332 if (flags & LOOKUP_RCU)
@@ -339,18 +338,11 @@ static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
339 return 0; 338 return 0;
340 } 339 }
341 340
342 ci = d_inode(dir)->i_crypt_info;
343 if (ci && ci->ci_keyring_key &&
344 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
345 (1 << KEY_FLAG_REVOKED) |
346 (1 << KEY_FLAG_DEAD))))
347 ci = NULL;
348
349 /* this should eventually be an flag in d_flags */ 341 /* this should eventually be an flag in d_flags */
350 spin_lock(&dentry->d_lock); 342 spin_lock(&dentry->d_lock);
351 cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY; 343 cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
352 spin_unlock(&dentry->d_lock); 344 spin_unlock(&dentry->d_lock);
353 dir_has_key = (ci != NULL); 345 dir_has_key = (d_inode(dir)->i_crypt_info != NULL);
354 dput(dir); 346 dput(dir);
355 347
356 /* 348 /*
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 13052b85c393..37b49894c762 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -350,7 +350,7 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
350 fname->disk_name.len = iname->len; 350 fname->disk_name.len = iname->len;
351 return 0; 351 return 0;
352 } 352 }
353 ret = fscrypt_get_crypt_info(dir); 353 ret = fscrypt_get_encryption_info(dir);
354 if (ret && ret != -EOPNOTSUPP) 354 if (ret && ret != -EOPNOTSUPP)
355 return ret; 355 return ret;
356 356
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index fdbb8af32eaf..e39696e64494 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -67,7 +67,6 @@ struct fscrypt_info {
67 u8 ci_filename_mode; 67 u8 ci_filename_mode;
68 u8 ci_flags; 68 u8 ci_flags;
69 struct crypto_skcipher *ci_ctfm; 69 struct crypto_skcipher *ci_ctfm;
70 struct key *ci_keyring_key;
71 u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE]; 70 u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
72}; 71};
73 72
@@ -101,7 +100,4 @@ extern int fscrypt_do_page_crypto(const struct inode *inode,
101extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx, 100extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
102 gfp_t gfp_flags); 101 gfp_t gfp_flags);
103 102
104/* keyinfo.c */
105extern int fscrypt_get_crypt_info(struct inode *);
106
107#endif /* _FSCRYPT_PRIVATE_H */ 103#endif /* _FSCRYPT_PRIVATE_H */
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index d5d896fa5a71..8cdfddce2b34 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -95,6 +95,7 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
95 kfree(description); 95 kfree(description);
96 if (IS_ERR(keyring_key)) 96 if (IS_ERR(keyring_key))
97 return PTR_ERR(keyring_key); 97 return PTR_ERR(keyring_key);
98 down_read(&keyring_key->sem);
98 99
99 if (keyring_key->type != &key_type_logon) { 100 if (keyring_key->type != &key_type_logon) {
100 printk_once(KERN_WARNING 101 printk_once(KERN_WARNING
@@ -102,11 +103,9 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
102 res = -ENOKEY; 103 res = -ENOKEY;
103 goto out; 104 goto out;
104 } 105 }
105 down_read(&keyring_key->sem);
106 ukp = user_key_payload_locked(keyring_key); 106 ukp = user_key_payload_locked(keyring_key);
107 if (ukp->datalen != sizeof(struct fscrypt_key)) { 107 if (ukp->datalen != sizeof(struct fscrypt_key)) {
108 res = -EINVAL; 108 res = -EINVAL;
109 up_read(&keyring_key->sem);
110 goto out; 109 goto out;
111 } 110 }
112 master_key = (struct fscrypt_key *)ukp->data; 111 master_key = (struct fscrypt_key *)ukp->data;
@@ -117,17 +116,11 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
117 "%s: key size incorrect: %d\n", 116 "%s: key size incorrect: %d\n",
118 __func__, master_key->size); 117 __func__, master_key->size);
119 res = -ENOKEY; 118 res = -ENOKEY;
120 up_read(&keyring_key->sem);
121 goto out; 119 goto out;
122 } 120 }
123 res = derive_key_aes(ctx->nonce, master_key->raw, raw_key); 121 res = derive_key_aes(ctx->nonce, master_key->raw, raw_key);
124 up_read(&keyring_key->sem);
125 if (res)
126 goto out;
127
128 crypt_info->ci_keyring_key = keyring_key;
129 return 0;
130out: 122out:
123 up_read(&keyring_key->sem);
131 key_put(keyring_key); 124 key_put(keyring_key);
132 return res; 125 return res;
133} 126}
@@ -169,12 +162,11 @@ static void put_crypt_info(struct fscrypt_info *ci)
169 if (!ci) 162 if (!ci)
170 return; 163 return;
171 164
172 key_put(ci->ci_keyring_key);
173 crypto_free_skcipher(ci->ci_ctfm); 165 crypto_free_skcipher(ci->ci_ctfm);
174 kmem_cache_free(fscrypt_info_cachep, ci); 166 kmem_cache_free(fscrypt_info_cachep, ci);
175} 167}
176 168
177int fscrypt_get_crypt_info(struct inode *inode) 169int fscrypt_get_encryption_info(struct inode *inode)
178{ 170{
179 struct fscrypt_info *crypt_info; 171 struct fscrypt_info *crypt_info;
180 struct fscrypt_context ctx; 172 struct fscrypt_context ctx;
@@ -184,21 +176,15 @@ int fscrypt_get_crypt_info(struct inode *inode)
184 u8 *raw_key = NULL; 176 u8 *raw_key = NULL;
185 int res; 177 int res;
186 178
179 if (inode->i_crypt_info)
180 return 0;
181
187 res = fscrypt_initialize(inode->i_sb->s_cop->flags); 182 res = fscrypt_initialize(inode->i_sb->s_cop->flags);
188 if (res) 183 if (res)
189 return res; 184 return res;
190 185
191 if (!inode->i_sb->s_cop->get_context) 186 if (!inode->i_sb->s_cop->get_context)
192 return -EOPNOTSUPP; 187 return -EOPNOTSUPP;
193retry:
194 crypt_info = ACCESS_ONCE(inode->i_crypt_info);
195 if (crypt_info) {
196 if (!crypt_info->ci_keyring_key ||
197 key_validate(crypt_info->ci_keyring_key) == 0)
198 return 0;
199 fscrypt_put_encryption_info(inode, crypt_info);
200 goto retry;
201 }
202 188
203 res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); 189 res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
204 if (res < 0) { 190 if (res < 0) {
@@ -229,7 +215,6 @@ retry:
229 crypt_info->ci_data_mode = ctx.contents_encryption_mode; 215 crypt_info->ci_data_mode = ctx.contents_encryption_mode;
230 crypt_info->ci_filename_mode = ctx.filenames_encryption_mode; 216 crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
231 crypt_info->ci_ctfm = NULL; 217 crypt_info->ci_ctfm = NULL;
232 crypt_info->ci_keyring_key = NULL;
233 memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor, 218 memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
234 sizeof(crypt_info->ci_master_key)); 219 sizeof(crypt_info->ci_master_key));
235 220
@@ -273,14 +258,8 @@ retry:
273 if (res) 258 if (res)
274 goto out; 259 goto out;
275 260
276 kzfree(raw_key); 261 if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
277 raw_key = NULL; 262 crypt_info = NULL;
278 if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) {
279 put_crypt_info(crypt_info);
280 goto retry;
281 }
282 return 0;
283
284out: 263out:
285 if (res == -ENOKEY) 264 if (res == -ENOKEY)
286 res = 0; 265 res = 0;
@@ -288,6 +267,7 @@ out:
288 kzfree(raw_key); 267 kzfree(raw_key);
289 return res; 268 return res;
290} 269}
270EXPORT_SYMBOL(fscrypt_get_encryption_info);
291 271
292void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci) 272void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci)
293{ 273{
@@ -305,17 +285,3 @@ void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci)
305 put_crypt_info(ci); 285 put_crypt_info(ci);
306} 286}
307EXPORT_SYMBOL(fscrypt_put_encryption_info); 287EXPORT_SYMBOL(fscrypt_put_encryption_info);
308
309int fscrypt_get_encryption_info(struct inode *inode)
310{
311 struct fscrypt_info *ci = inode->i_crypt_info;
312
313 if (!ci ||
314 (ci->ci_keyring_key &&
315 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
316 (1 << KEY_FLAG_REVOKED) |
317 (1 << KEY_FLAG_DEAD)))))
318 return fscrypt_get_crypt_info(inode);
319 return 0;
320}
321EXPORT_SYMBOL(fscrypt_get_encryption_info);
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index 14b76da71269..4908906d54d5 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -33,17 +33,10 @@ static int create_encryption_context_from_policy(struct inode *inode,
33 const struct fscrypt_policy *policy) 33 const struct fscrypt_policy *policy)
34{ 34{
35 struct fscrypt_context ctx; 35 struct fscrypt_context ctx;
36 int res;
37 36
38 if (!inode->i_sb->s_cop->set_context) 37 if (!inode->i_sb->s_cop->set_context)
39 return -EOPNOTSUPP; 38 return -EOPNOTSUPP;
40 39
41 if (inode->i_sb->s_cop->prepare_context) {
42 res = inode->i_sb->s_cop->prepare_context(inode);
43 if (res)
44 return res;
45 }
46
47 ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; 40 ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
48 memcpy(ctx.master_key_descriptor, policy->master_key_descriptor, 41 memcpy(ctx.master_key_descriptor, policy->master_key_descriptor,
49 FS_KEY_DESCRIPTOR_SIZE); 42 FS_KEY_DESCRIPTOR_SIZE);
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 30a9f210d1e3..375fb1c05d49 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1169,10 +1169,9 @@ static int ext4_finish_convert_inline_dir(handle_t *handle,
1169 set_buffer_uptodate(dir_block); 1169 set_buffer_uptodate(dir_block);
1170 err = ext4_handle_dirty_dirent_node(handle, inode, dir_block); 1170 err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
1171 if (err) 1171 if (err)
1172 goto out; 1172 return err;
1173 set_buffer_verified(dir_block); 1173 set_buffer_verified(dir_block);
1174out: 1174 return ext4_mark_inode_dirty(handle, inode);
1175 return err;
1176} 1175}
1177 1176
1178static int ext4_convert_inline_data_nolock(handle_t *handle, 1177static int ext4_convert_inline_data_nolock(handle_t *handle,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 7385e6a6b6cb..4247d8d25687 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5400,7 +5400,7 @@ int ext4_getattr(const struct path *path, struct kstat *stat,
5400 * If there is inline data in the inode, the inode will normally not 5400 * If there is inline data in the inode, the inode will normally not
5401 * have data blocks allocated (it may have an external xattr block). 5401 * have data blocks allocated (it may have an external xattr block).
5402 * Report at least one sector for such files, so tools like tar, rsync, 5402 * Report at least one sector for such files, so tools like tar, rsync,
5403 * others doen't incorrectly think the file is completely sparse. 5403 * others don't incorrectly think the file is completely sparse.
5404 */ 5404 */
5405 if (unlikely(ext4_has_inline_data(inode))) 5405 if (unlikely(ext4_has_inline_data(inode)))
5406 stat->blocks += (stat->size + 511) >> 9; 5406 stat->blocks += (stat->size + 511) >> 9;
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 578f8c33fb44..c992ef2c2f94 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -511,7 +511,7 @@ mext_check_arguments(struct inode *orig_inode,
511 if ((orig_start & ~(PAGE_MASK >> orig_inode->i_blkbits)) != 511 if ((orig_start & ~(PAGE_MASK >> orig_inode->i_blkbits)) !=
512 (donor_start & ~(PAGE_MASK >> orig_inode->i_blkbits))) { 512 (donor_start & ~(PAGE_MASK >> orig_inode->i_blkbits))) {
513 ext4_debug("ext4 move extent: orig and donor's start " 513 ext4_debug("ext4 move extent: orig and donor's start "
514 "offset are not alligned [ino:orig %lu, donor %lu]\n", 514 "offsets are not aligned [ino:orig %lu, donor %lu]\n",
515 orig_inode->i_ino, donor_inode->i_ino); 515 orig_inode->i_ino, donor_inode->i_ino);
516 return -EINVAL; 516 return -EINVAL;
517 } 517 }
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 2e03a0a88d92..a9448db1cf7e 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1120,17 +1120,16 @@ static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
1120 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len); 1120 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
1121} 1121}
1122 1122
1123static int ext4_prepare_context(struct inode *inode)
1124{
1125 return ext4_convert_inline_data(inode);
1126}
1127
1128static int ext4_set_context(struct inode *inode, const void *ctx, size_t len, 1123static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
1129 void *fs_data) 1124 void *fs_data)
1130{ 1125{
1131 handle_t *handle = fs_data; 1126 handle_t *handle = fs_data;
1132 int res, res2, retries = 0; 1127 int res, res2, retries = 0;
1133 1128
1129 res = ext4_convert_inline_data(inode);
1130 if (res)
1131 return res;
1132
1134 /* 1133 /*
1135 * If a journal handle was specified, then the encryption context is 1134 * If a journal handle was specified, then the encryption context is
1136 * being set on a new inode via inheritance and is part of a larger 1135 * being set on a new inode via inheritance and is part of a larger
@@ -1196,7 +1195,6 @@ static unsigned ext4_max_namelen(struct inode *inode)
1196static const struct fscrypt_operations ext4_cryptops = { 1195static const struct fscrypt_operations ext4_cryptops = {
1197 .key_prefix = "ext4:", 1196 .key_prefix = "ext4:",
1198 .get_context = ext4_get_context, 1197 .get_context = ext4_get_context,
1199 .prepare_context = ext4_prepare_context,
1200 .set_context = ext4_set_context, 1198 .set_context = ext4_set_context,
1201 .dummy_context = ext4_dummy_context, 1199 .dummy_context = ext4_dummy_context,
1202 .is_encrypted = ext4_encrypted_inode, 1200 .is_encrypted = ext4_encrypted_inode,
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 67636acf7624..996e7900d4c8 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -131,31 +131,26 @@ static __le32 ext4_xattr_block_csum(struct inode *inode,
131} 131}
132 132
133static int ext4_xattr_block_csum_verify(struct inode *inode, 133static int ext4_xattr_block_csum_verify(struct inode *inode,
134 sector_t block_nr, 134 struct buffer_head *bh)
135 struct ext4_xattr_header *hdr)
136{ 135{
137 if (ext4_has_metadata_csum(inode->i_sb) && 136 struct ext4_xattr_header *hdr = BHDR(bh);
138 (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr))) 137 int ret = 1;
139 return 0;
140 return 1;
141}
142
143static void ext4_xattr_block_csum_set(struct inode *inode,
144 sector_t block_nr,
145 struct ext4_xattr_header *hdr)
146{
147 if (!ext4_has_metadata_csum(inode->i_sb))
148 return;
149 138
150 hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr); 139 if (ext4_has_metadata_csum(inode->i_sb)) {
140 lock_buffer(bh);
141 ret = (hdr->h_checksum == ext4_xattr_block_csum(inode,
142 bh->b_blocknr, hdr));
143 unlock_buffer(bh);
144 }
145 return ret;
151} 146}
152 147
153static inline int ext4_handle_dirty_xattr_block(handle_t *handle, 148static void ext4_xattr_block_csum_set(struct inode *inode,
154 struct inode *inode, 149 struct buffer_head *bh)
155 struct buffer_head *bh)
156{ 150{
157 ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh)); 151 if (ext4_has_metadata_csum(inode->i_sb))
158 return ext4_handle_dirty_metadata(handle, inode, bh); 152 BHDR(bh)->h_checksum = ext4_xattr_block_csum(inode,
153 bh->b_blocknr, BHDR(bh));
159} 154}
160 155
161static inline const struct xattr_handler * 156static inline const struct xattr_handler *
@@ -233,7 +228,7 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
233 if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || 228 if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
234 BHDR(bh)->h_blocks != cpu_to_le32(1)) 229 BHDR(bh)->h_blocks != cpu_to_le32(1))
235 return -EFSCORRUPTED; 230 return -EFSCORRUPTED;
236 if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh))) 231 if (!ext4_xattr_block_csum_verify(inode, bh))
237 return -EFSBADCRC; 232 return -EFSBADCRC;
238 error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size, 233 error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
239 bh->b_data); 234 bh->b_data);
@@ -618,23 +613,22 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
618 } 613 }
619 } 614 }
620 615
616 ext4_xattr_block_csum_set(inode, bh);
621 /* 617 /*
622 * Beware of this ugliness: Releasing of xattr block references 618 * Beware of this ugliness: Releasing of xattr block references
623 * from different inodes can race and so we have to protect 619 * from different inodes can race and so we have to protect
624 * from a race where someone else frees the block (and releases 620 * from a race where someone else frees the block (and releases
625 * its journal_head) before we are done dirtying the buffer. In 621 * its journal_head) before we are done dirtying the buffer. In
626 * nojournal mode this race is harmless and we actually cannot 622 * nojournal mode this race is harmless and we actually cannot
627 * call ext4_handle_dirty_xattr_block() with locked buffer as 623 * call ext4_handle_dirty_metadata() with locked buffer as
628 * that function can call sync_dirty_buffer() so for that case 624 * that function can call sync_dirty_buffer() so for that case
629 * we handle the dirtying after unlocking the buffer. 625 * we handle the dirtying after unlocking the buffer.
630 */ 626 */
631 if (ext4_handle_valid(handle)) 627 if (ext4_handle_valid(handle))
632 error = ext4_handle_dirty_xattr_block(handle, inode, 628 error = ext4_handle_dirty_metadata(handle, inode, bh);
633 bh);
634 unlock_buffer(bh); 629 unlock_buffer(bh);
635 if (!ext4_handle_valid(handle)) 630 if (!ext4_handle_valid(handle))
636 error = ext4_handle_dirty_xattr_block(handle, inode, 631 error = ext4_handle_dirty_metadata(handle, inode, bh);
637 bh);
638 if (IS_SYNC(inode)) 632 if (IS_SYNC(inode))
639 ext4_handle_sync(handle); 633 ext4_handle_sync(handle);
640 dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1)); 634 dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
@@ -863,13 +857,14 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
863 ext4_xattr_cache_insert(ext4_mb_cache, 857 ext4_xattr_cache_insert(ext4_mb_cache,
864 bs->bh); 858 bs->bh);
865 } 859 }
860 ext4_xattr_block_csum_set(inode, bs->bh);
866 unlock_buffer(bs->bh); 861 unlock_buffer(bs->bh);
867 if (error == -EFSCORRUPTED) 862 if (error == -EFSCORRUPTED)
868 goto bad_block; 863 goto bad_block;
869 if (!error) 864 if (!error)
870 error = ext4_handle_dirty_xattr_block(handle, 865 error = ext4_handle_dirty_metadata(handle,
871 inode, 866 inode,
872 bs->bh); 867 bs->bh);
873 if (error) 868 if (error)
874 goto cleanup; 869 goto cleanup;
875 goto inserted; 870 goto inserted;
@@ -967,10 +962,11 @@ inserted:
967 ce->e_reusable = 0; 962 ce->e_reusable = 0;
968 ea_bdebug(new_bh, "reusing; refcount now=%d", 963 ea_bdebug(new_bh, "reusing; refcount now=%d",
969 ref); 964 ref);
965 ext4_xattr_block_csum_set(inode, new_bh);
970 unlock_buffer(new_bh); 966 unlock_buffer(new_bh);
971 error = ext4_handle_dirty_xattr_block(handle, 967 error = ext4_handle_dirty_metadata(handle,
972 inode, 968 inode,
973 new_bh); 969 new_bh);
974 if (error) 970 if (error)
975 goto cleanup_dquot; 971 goto cleanup_dquot;
976 } 972 }
@@ -1020,11 +1016,12 @@ getblk_failed:
1020 goto getblk_failed; 1016 goto getblk_failed;
1021 } 1017 }
1022 memcpy(new_bh->b_data, s->base, new_bh->b_size); 1018 memcpy(new_bh->b_data, s->base, new_bh->b_size);
1019 ext4_xattr_block_csum_set(inode, new_bh);
1023 set_buffer_uptodate(new_bh); 1020 set_buffer_uptodate(new_bh);
1024 unlock_buffer(new_bh); 1021 unlock_buffer(new_bh);
1025 ext4_xattr_cache_insert(ext4_mb_cache, new_bh); 1022 ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
1026 error = ext4_handle_dirty_xattr_block(handle, 1023 error = ext4_handle_dirty_metadata(handle, inode,
1027 inode, new_bh); 1024 new_bh);
1028 if (error) 1025 if (error)
1029 goto cleanup; 1026 goto cleanup;
1030 } 1027 }
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 8f96461236f6..7163fe014b57 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -695,14 +695,11 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb,
695 695
696 inode = new_inode(sb); 696 inode = new_inode(sb);
697 if (inode) { 697 if (inode) {
698 struct hugetlbfs_inode_info *info;
699 inode->i_ino = get_next_ino(); 698 inode->i_ino = get_next_ino();
700 inode->i_mode = S_IFDIR | config->mode; 699 inode->i_mode = S_IFDIR | config->mode;
701 inode->i_uid = config->uid; 700 inode->i_uid = config->uid;
702 inode->i_gid = config->gid; 701 inode->i_gid = config->gid;
703 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 702 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
704 info = HUGETLBFS_I(inode);
705 mpol_shared_policy_init(&info->policy, NULL);
706 inode->i_op = &hugetlbfs_dir_inode_operations; 703 inode->i_op = &hugetlbfs_dir_inode_operations;
707 inode->i_fop = &simple_dir_operations; 704 inode->i_fop = &simple_dir_operations;
708 /* directory inodes start off with i_nlink == 2 (for "." entry) */ 705 /* directory inodes start off with i_nlink == 2 (for "." entry) */
@@ -733,7 +730,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
733 730
734 inode = new_inode(sb); 731 inode = new_inode(sb);
735 if (inode) { 732 if (inode) {
736 struct hugetlbfs_inode_info *info;
737 inode->i_ino = get_next_ino(); 733 inode->i_ino = get_next_ino();
738 inode_init_owner(inode, dir, mode); 734 inode_init_owner(inode, dir, mode);
739 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, 735 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
@@ -741,15 +737,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
741 inode->i_mapping->a_ops = &hugetlbfs_aops; 737 inode->i_mapping->a_ops = &hugetlbfs_aops;
742 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 738 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
743 inode->i_mapping->private_data = resv_map; 739 inode->i_mapping->private_data = resv_map;
744 info = HUGETLBFS_I(inode);
745 /*
746 * The policy is initialized here even if we are creating a
747 * private inode because initialization simply creates an
748 * an empty rb tree and calls rwlock_init(), later when we
749 * call mpol_free_shared_policy() it will just return because
750 * the rb tree will still be empty.
751 */
752 mpol_shared_policy_init(&info->policy, NULL);
753 switch (mode & S_IFMT) { 740 switch (mode & S_IFMT) {
754 default: 741 default:
755 init_special_inode(inode, mode, dev); 742 init_special_inode(inode, mode, dev);
@@ -937,6 +924,18 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
937 hugetlbfs_inc_free_inodes(sbinfo); 924 hugetlbfs_inc_free_inodes(sbinfo);
938 return NULL; 925 return NULL;
939 } 926 }
927
928 /*
929 * Any time after allocation, hugetlbfs_destroy_inode can be called
930 * for the inode. mpol_free_shared_policy is unconditionally called
931 * as part of hugetlbfs_destroy_inode. So, initialize policy here
932 * in case of a quick call to destroy.
933 *
934 * Note that the policy is initialized even if we are creating a
935 * private inode. This simplifies hugetlbfs_destroy_inode.
936 */
937 mpol_shared_policy_init(&p->policy, NULL);
938
940 return &p->vfs_inode; 939 return &p->vfs_inode;
941} 940}
942 941
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index a1a359bfcc9c..5adc2fb62b0f 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1125,10 +1125,8 @@ static journal_t *journal_init_common(struct block_device *bdev,
1125 1125
1126 /* Set up a default-sized revoke table for the new mount. */ 1126 /* Set up a default-sized revoke table for the new mount. */
1127 err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH); 1127 err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
1128 if (err) { 1128 if (err)
1129 kfree(journal); 1129 goto err_cleanup;
1130 return NULL;
1131 }
1132 1130
1133 spin_lock_init(&journal->j_history_lock); 1131 spin_lock_init(&journal->j_history_lock);
1134 1132
@@ -1145,23 +1143,25 @@ static journal_t *journal_init_common(struct block_device *bdev,
1145 journal->j_wbufsize = n; 1143 journal->j_wbufsize = n;
1146 journal->j_wbuf = kmalloc_array(n, sizeof(struct buffer_head *), 1144 journal->j_wbuf = kmalloc_array(n, sizeof(struct buffer_head *),
1147 GFP_KERNEL); 1145 GFP_KERNEL);
1148 if (!journal->j_wbuf) { 1146 if (!journal->j_wbuf)
1149 kfree(journal); 1147 goto err_cleanup;
1150 return NULL;
1151 }
1152 1148
1153 bh = getblk_unmovable(journal->j_dev, start, journal->j_blocksize); 1149 bh = getblk_unmovable(journal->j_dev, start, journal->j_blocksize);
1154 if (!bh) { 1150 if (!bh) {
1155 pr_err("%s: Cannot get buffer for journal superblock\n", 1151 pr_err("%s: Cannot get buffer for journal superblock\n",
1156 __func__); 1152 __func__);
1157 kfree(journal->j_wbuf); 1153 goto err_cleanup;
1158 kfree(journal);
1159 return NULL;
1160 } 1154 }
1161 journal->j_sb_buffer = bh; 1155 journal->j_sb_buffer = bh;
1162 journal->j_superblock = (journal_superblock_t *)bh->b_data; 1156 journal->j_superblock = (journal_superblock_t *)bh->b_data;
1163 1157
1164 return journal; 1158 return journal;
1159
1160err_cleanup:
1161 kfree(journal->j_wbuf);
1162 jbd2_journal_destroy_revoke(journal);
1163 kfree(journal);
1164 return NULL;
1165} 1165}
1166 1166
1167/* jbd2_journal_init_dev and jbd2_journal_init_inode: 1167/* jbd2_journal_init_dev and jbd2_journal_init_inode:
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index cfc38b552118..f9aefcda5854 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -280,6 +280,7 @@ int jbd2_journal_init_revoke(journal_t *journal, int hash_size)
280 280
281fail1: 281fail1:
282 jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]); 282 jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]);
283 journal->j_revoke_table[0] = NULL;
283fail0: 284fail0:
284 return -ENOMEM; 285 return -ENOMEM;
285} 286}
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 8e4dc7ab584c..ac2dfe0c5a9c 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -809,7 +809,8 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
809 if (kn->flags & KERNFS_HAS_MMAP) 809 if (kn->flags & KERNFS_HAS_MMAP)
810 unmap_mapping_range(inode->i_mapping, 0, 0, 1); 810 unmap_mapping_range(inode->i_mapping, 0, 0, 1);
811 811
812 kernfs_release_file(kn, of); 812 if (kn->flags & KERNFS_HAS_RELEASE)
813 kernfs_release_file(kn, of);
813 } 814 }
814 815
815 mutex_unlock(&kernfs_open_file_mutex); 816 mutex_unlock(&kernfs_open_file_mutex);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index fb499a3f21b5..f92ba8d6c556 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -2055,7 +2055,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2055{ 2055{
2056 struct inode *old_inode = d_inode(old_dentry); 2056 struct inode *old_inode = d_inode(old_dentry);
2057 struct inode *new_inode = d_inode(new_dentry); 2057 struct inode *new_inode = d_inode(new_dentry);
2058 struct dentry *dentry = NULL, *rehash = NULL; 2058 struct dentry *dentry = NULL;
2059 struct rpc_task *task; 2059 struct rpc_task *task;
2060 int error = -EBUSY; 2060 int error = -EBUSY;
2061 2061
@@ -2078,10 +2078,8 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2078 * To prevent any new references to the target during the 2078 * To prevent any new references to the target during the
2079 * rename, we unhash the dentry in advance. 2079 * rename, we unhash the dentry in advance.
2080 */ 2080 */
2081 if (!d_unhashed(new_dentry)) { 2081 if (!d_unhashed(new_dentry))
2082 d_drop(new_dentry); 2082 d_drop(new_dentry);
2083 rehash = new_dentry;
2084 }
2085 2083
2086 if (d_count(new_dentry) > 2) { 2084 if (d_count(new_dentry) > 2) {
2087 int err; 2085 int err;
@@ -2098,7 +2096,6 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2098 goto out; 2096 goto out;
2099 2097
2100 new_dentry = dentry; 2098 new_dentry = dentry;
2101 rehash = NULL;
2102 new_inode = NULL; 2099 new_inode = NULL;
2103 } 2100 }
2104 } 2101 }
@@ -2119,8 +2116,6 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2119 error = task->tk_status; 2116 error = task->tk_status;
2120 rpc_put_task(task); 2117 rpc_put_task(task);
2121out: 2118out:
2122 if (rehash)
2123 d_rehash(rehash);
2124 trace_nfs_rename_exit(old_dir, old_dentry, 2119 trace_nfs_rename_exit(old_dir, old_dentry,
2125 new_dir, new_dentry, error); 2120 new_dir, new_dentry, error);
2126 /* new dentry created? */ 2121 /* new dentry created? */
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 44347f4bdc15..acd30baca461 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -202,10 +202,10 @@ static int filelayout_async_handle_error(struct rpc_task *task,
202 task->tk_status); 202 task->tk_status);
203 nfs4_mark_deviceid_unavailable(devid); 203 nfs4_mark_deviceid_unavailable(devid);
204 pnfs_error_mark_layout_for_return(inode, lseg); 204 pnfs_error_mark_layout_for_return(inode, lseg);
205 pnfs_set_lo_fail(lseg);
206 rpc_wake_up(&tbl->slot_tbl_waitq); 205 rpc_wake_up(&tbl->slot_tbl_waitq);
207 /* fall through */ 206 /* fall through */
208 default: 207 default:
208 pnfs_set_lo_fail(lseg);
209reset: 209reset:
210 dprintk("%s Retry through MDS. Error %d\n", __func__, 210 dprintk("%s Retry through MDS. Error %d\n", __func__,
211 task->tk_status); 211 task->tk_status);
@@ -560,6 +560,50 @@ filelayout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
560 return PNFS_ATTEMPTED; 560 return PNFS_ATTEMPTED;
561} 561}
562 562
563static int
564filelayout_check_deviceid(struct pnfs_layout_hdr *lo,
565 struct nfs4_filelayout_segment *fl,
566 gfp_t gfp_flags)
567{
568 struct nfs4_deviceid_node *d;
569 struct nfs4_file_layout_dsaddr *dsaddr;
570 int status = -EINVAL;
571
572 /* find and reference the deviceid */
573 d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), &fl->deviceid,
574 lo->plh_lc_cred, gfp_flags);
575 if (d == NULL)
576 goto out;
577
578 dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node);
579 /* Found deviceid is unavailable */
580 if (filelayout_test_devid_unavailable(&dsaddr->id_node))
581 goto out_put;
582
583 fl->dsaddr = dsaddr;
584
585 if (fl->first_stripe_index >= dsaddr->stripe_count) {
586 dprintk("%s Bad first_stripe_index %u\n",
587 __func__, fl->first_stripe_index);
588 goto out_put;
589 }
590
591 if ((fl->stripe_type == STRIPE_SPARSE &&
592 fl->num_fh > 1 && fl->num_fh != dsaddr->ds_num) ||
593 (fl->stripe_type == STRIPE_DENSE &&
594 fl->num_fh != dsaddr->stripe_count)) {
595 dprintk("%s num_fh %u not valid for given packing\n",
596 __func__, fl->num_fh);
597 goto out_put;
598 }
599 status = 0;
600out:
601 return status;
602out_put:
603 nfs4_fl_put_deviceid(dsaddr);
604 goto out;
605}
606
563/* 607/*
564 * filelayout_check_layout() 608 * filelayout_check_layout()
565 * 609 *
@@ -572,11 +616,8 @@ static int
572filelayout_check_layout(struct pnfs_layout_hdr *lo, 616filelayout_check_layout(struct pnfs_layout_hdr *lo,
573 struct nfs4_filelayout_segment *fl, 617 struct nfs4_filelayout_segment *fl,
574 struct nfs4_layoutget_res *lgr, 618 struct nfs4_layoutget_res *lgr,
575 struct nfs4_deviceid *id,
576 gfp_t gfp_flags) 619 gfp_t gfp_flags)
577{ 620{
578 struct nfs4_deviceid_node *d;
579 struct nfs4_file_layout_dsaddr *dsaddr;
580 int status = -EINVAL; 621 int status = -EINVAL;
581 622
582 dprintk("--> %s\n", __func__); 623 dprintk("--> %s\n", __func__);
@@ -601,41 +642,10 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo,
601 goto out; 642 goto out;
602 } 643 }
603 644
604 /* find and reference the deviceid */
605 d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), id,
606 lo->plh_lc_cred, gfp_flags);
607 if (d == NULL)
608 goto out;
609
610 dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node);
611 /* Found deviceid is unavailable */
612 if (filelayout_test_devid_unavailable(&dsaddr->id_node))
613 goto out_put;
614
615 fl->dsaddr = dsaddr;
616
617 if (fl->first_stripe_index >= dsaddr->stripe_count) {
618 dprintk("%s Bad first_stripe_index %u\n",
619 __func__, fl->first_stripe_index);
620 goto out_put;
621 }
622
623 if ((fl->stripe_type == STRIPE_SPARSE &&
624 fl->num_fh > 1 && fl->num_fh != dsaddr->ds_num) ||
625 (fl->stripe_type == STRIPE_DENSE &&
626 fl->num_fh != dsaddr->stripe_count)) {
627 dprintk("%s num_fh %u not valid for given packing\n",
628 __func__, fl->num_fh);
629 goto out_put;
630 }
631
632 status = 0; 645 status = 0;
633out: 646out:
634 dprintk("--> %s returns %d\n", __func__, status); 647 dprintk("--> %s returns %d\n", __func__, status);
635 return status; 648 return status;
636out_put:
637 nfs4_fl_put_deviceid(dsaddr);
638 goto out;
639} 649}
640 650
641static void _filelayout_free_lseg(struct nfs4_filelayout_segment *fl) 651static void _filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
@@ -657,7 +667,6 @@ static int
657filelayout_decode_layout(struct pnfs_layout_hdr *flo, 667filelayout_decode_layout(struct pnfs_layout_hdr *flo,
658 struct nfs4_filelayout_segment *fl, 668 struct nfs4_filelayout_segment *fl,
659 struct nfs4_layoutget_res *lgr, 669 struct nfs4_layoutget_res *lgr,
660 struct nfs4_deviceid *id,
661 gfp_t gfp_flags) 670 gfp_t gfp_flags)
662{ 671{
663 struct xdr_stream stream; 672 struct xdr_stream stream;
@@ -682,9 +691,9 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
682 if (unlikely(!p)) 691 if (unlikely(!p))
683 goto out_err; 692 goto out_err;
684 693
685 memcpy(id, p, sizeof(*id)); 694 memcpy(&fl->deviceid, p, sizeof(fl->deviceid));
686 p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE); 695 p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
687 nfs4_print_deviceid(id); 696 nfs4_print_deviceid(&fl->deviceid);
688 697
689 nfl_util = be32_to_cpup(p++); 698 nfl_util = be32_to_cpup(p++);
690 if (nfl_util & NFL4_UFLG_COMMIT_THRU_MDS) 699 if (nfl_util & NFL4_UFLG_COMMIT_THRU_MDS)
@@ -831,15 +840,14 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
831{ 840{
832 struct nfs4_filelayout_segment *fl; 841 struct nfs4_filelayout_segment *fl;
833 int rc; 842 int rc;
834 struct nfs4_deviceid id;
835 843
836 dprintk("--> %s\n", __func__); 844 dprintk("--> %s\n", __func__);
837 fl = kzalloc(sizeof(*fl), gfp_flags); 845 fl = kzalloc(sizeof(*fl), gfp_flags);
838 if (!fl) 846 if (!fl)
839 return NULL; 847 return NULL;
840 848
841 rc = filelayout_decode_layout(layoutid, fl, lgr, &id, gfp_flags); 849 rc = filelayout_decode_layout(layoutid, fl, lgr, gfp_flags);
842 if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id, gfp_flags)) { 850 if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, gfp_flags)) {
843 _filelayout_free_lseg(fl); 851 _filelayout_free_lseg(fl);
844 return NULL; 852 return NULL;
845 } 853 }
@@ -888,18 +896,51 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
888 return min(stripe_unit - (unsigned int)stripe_offset, size); 896 return min(stripe_unit - (unsigned int)stripe_offset, size);
889} 897}
890 898
899static struct pnfs_layout_segment *
900fl_pnfs_update_layout(struct inode *ino,
901 struct nfs_open_context *ctx,
902 loff_t pos,
903 u64 count,
904 enum pnfs_iomode iomode,
905 bool strict_iomode,
906 gfp_t gfp_flags)
907{
908 struct pnfs_layout_segment *lseg = NULL;
909 struct pnfs_layout_hdr *lo;
910 struct nfs4_filelayout_segment *fl;
911 int status;
912
913 lseg = pnfs_update_layout(ino, ctx, pos, count, iomode, strict_iomode,
914 gfp_flags);
915 if (!lseg)
916 lseg = ERR_PTR(-ENOMEM);
917 if (IS_ERR(lseg))
918 goto out;
919
920 lo = NFS_I(ino)->layout;
921 fl = FILELAYOUT_LSEG(lseg);
922
923 status = filelayout_check_deviceid(lo, fl, gfp_flags);
924 if (status)
925 lseg = ERR_PTR(status);
926out:
927 if (IS_ERR(lseg))
928 pnfs_put_lseg(lseg);
929 return lseg;
930}
931
891static void 932static void
892filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio, 933filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
893 struct nfs_page *req) 934 struct nfs_page *req)
894{ 935{
895 if (!pgio->pg_lseg) { 936 if (!pgio->pg_lseg) {
896 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 937 pgio->pg_lseg = fl_pnfs_update_layout(pgio->pg_inode,
897 req->wb_context, 938 req->wb_context,
898 0, 939 0,
899 NFS4_MAX_UINT64, 940 NFS4_MAX_UINT64,
900 IOMODE_READ, 941 IOMODE_READ,
901 false, 942 false,
902 GFP_KERNEL); 943 GFP_KERNEL);
903 if (IS_ERR(pgio->pg_lseg)) { 944 if (IS_ERR(pgio->pg_lseg)) {
904 pgio->pg_error = PTR_ERR(pgio->pg_lseg); 945 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
905 pgio->pg_lseg = NULL; 946 pgio->pg_lseg = NULL;
@@ -919,13 +960,13 @@ filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
919 int status; 960 int status;
920 961
921 if (!pgio->pg_lseg) { 962 if (!pgio->pg_lseg) {
922 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 963 pgio->pg_lseg = fl_pnfs_update_layout(pgio->pg_inode,
923 req->wb_context, 964 req->wb_context,
924 0, 965 0,
925 NFS4_MAX_UINT64, 966 NFS4_MAX_UINT64,
926 IOMODE_RW, 967 IOMODE_RW,
927 false, 968 false,
928 GFP_NOFS); 969 GFP_NOFS);
929 if (IS_ERR(pgio->pg_lseg)) { 970 if (IS_ERR(pgio->pg_lseg)) {
930 pgio->pg_error = PTR_ERR(pgio->pg_lseg); 971 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
931 pgio->pg_lseg = NULL; 972 pgio->pg_lseg = NULL;
diff --git a/fs/nfs/filelayout/filelayout.h b/fs/nfs/filelayout/filelayout.h
index 2896cb833a11..79323b5dab0c 100644
--- a/fs/nfs/filelayout/filelayout.h
+++ b/fs/nfs/filelayout/filelayout.h
@@ -55,15 +55,16 @@ struct nfs4_file_layout_dsaddr {
55}; 55};
56 56
57struct nfs4_filelayout_segment { 57struct nfs4_filelayout_segment {
58 struct pnfs_layout_segment generic_hdr; 58 struct pnfs_layout_segment generic_hdr;
59 u32 stripe_type; 59 u32 stripe_type;
60 u32 commit_through_mds; 60 u32 commit_through_mds;
61 u32 stripe_unit; 61 u32 stripe_unit;
62 u32 first_stripe_index; 62 u32 first_stripe_index;
63 u64 pattern_offset; 63 u64 pattern_offset;
64 struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */ 64 struct nfs4_deviceid deviceid;
65 unsigned int num_fh; 65 struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */
66 struct nfs_fh **fh_array; 66 unsigned int num_fh;
67 struct nfs_fh **fh_array;
67}; 68};
68 69
69struct nfs4_filelayout { 70struct nfs4_filelayout {
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index 85fde93dff77..457cfeb1d5c1 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -208,6 +208,10 @@ static bool ff_layout_mirror_valid(struct pnfs_layout_segment *lseg,
208 } else 208 } else
209 goto outerr; 209 goto outerr;
210 } 210 }
211
212 if (IS_ERR(mirror->mirror_ds))
213 goto outerr;
214
211 if (mirror->mirror_ds->ds == NULL) { 215 if (mirror->mirror_ds->ds == NULL) {
212 struct nfs4_deviceid_node *devid; 216 struct nfs4_deviceid_node *devid;
213 devid = &mirror->mirror_ds->id_node; 217 devid = &mirror->mirror_ds->id_node;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index c780d98035cc..201ca3f2c4ba 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2442,17 +2442,14 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2442 } 2442 }
2443 2443
2444 nfs4_stateid_copy(&stateid, &delegation->stateid); 2444 nfs4_stateid_copy(&stateid, &delegation->stateid);
2445 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) { 2445 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) ||
2446 !test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
2447 &delegation->flags)) {
2446 rcu_read_unlock(); 2448 rcu_read_unlock();
2447 nfs_finish_clear_delegation_stateid(state, &stateid); 2449 nfs_finish_clear_delegation_stateid(state, &stateid);
2448 return; 2450 return;
2449 } 2451 }
2450 2452
2451 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags)) {
2452 rcu_read_unlock();
2453 return;
2454 }
2455
2456 cred = get_rpccred(delegation->cred); 2453 cred = get_rpccred(delegation->cred);
2457 rcu_read_unlock(); 2454 rcu_read_unlock();
2458 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred); 2455 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 73e75ac90525..8bf8f667a8cf 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -538,13 +538,21 @@ out_free:
538 538
539static ssize_t 539static ssize_t
540nfsd_print_version_support(char *buf, int remaining, const char *sep, 540nfsd_print_version_support(char *buf, int remaining, const char *sep,
541 unsigned vers, unsigned minor) 541 unsigned vers, int minor)
542{ 542{
543 const char *format = (minor == 0) ? "%s%c%u" : "%s%c%u.%u"; 543 const char *format = minor < 0 ? "%s%c%u" : "%s%c%u.%u";
544 bool supported = !!nfsd_vers(vers, NFSD_TEST); 544 bool supported = !!nfsd_vers(vers, NFSD_TEST);
545 545
546 if (vers == 4 && !nfsd_minorversion(minor, NFSD_TEST)) 546 if (vers == 4 && minor >= 0 &&
547 !nfsd_minorversion(minor, NFSD_TEST))
547 supported = false; 548 supported = false;
549 if (minor == 0 && supported)
550 /*
551 * special case for backward compatability.
552 * +4.0 is never reported, it is implied by
553 * +4, unless -4.0 is present.
554 */
555 return 0;
548 return snprintf(buf, remaining, format, sep, 556 return snprintf(buf, remaining, format, sep,
549 supported ? '+' : '-', vers, minor); 557 supported ? '+' : '-', vers, minor);
550} 558}
@@ -554,7 +562,6 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
554 char *mesg = buf; 562 char *mesg = buf;
555 char *vers, *minorp, sign; 563 char *vers, *minorp, sign;
556 int len, num, remaining; 564 int len, num, remaining;
557 unsigned minor;
558 ssize_t tlen = 0; 565 ssize_t tlen = 0;
559 char *sep; 566 char *sep;
560 struct nfsd_net *nn = net_generic(netns(file), nfsd_net_id); 567 struct nfsd_net *nn = net_generic(netns(file), nfsd_net_id);
@@ -575,6 +582,7 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
575 if (len <= 0) return -EINVAL; 582 if (len <= 0) return -EINVAL;
576 do { 583 do {
577 enum vers_op cmd; 584 enum vers_op cmd;
585 unsigned minor;
578 sign = *vers; 586 sign = *vers;
579 if (sign == '+' || sign == '-') 587 if (sign == '+' || sign == '-')
580 num = simple_strtol((vers+1), &minorp, 0); 588 num = simple_strtol((vers+1), &minorp, 0);
@@ -585,8 +593,8 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
585 return -EINVAL; 593 return -EINVAL;
586 if (kstrtouint(minorp+1, 0, &minor) < 0) 594 if (kstrtouint(minorp+1, 0, &minor) < 0)
587 return -EINVAL; 595 return -EINVAL;
588 } else 596 }
589 minor = 0; 597
590 cmd = sign == '-' ? NFSD_CLEAR : NFSD_SET; 598 cmd = sign == '-' ? NFSD_CLEAR : NFSD_SET;
591 switch(num) { 599 switch(num) {
592 case 2: 600 case 2:
@@ -594,8 +602,20 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
594 nfsd_vers(num, cmd); 602 nfsd_vers(num, cmd);
595 break; 603 break;
596 case 4: 604 case 4:
597 if (nfsd_minorversion(minor, cmd) >= 0) 605 if (*minorp == '.') {
598 break; 606 if (nfsd_minorversion(minor, cmd) < 0)
607 return -EINVAL;
608 } else if ((cmd == NFSD_SET) != nfsd_vers(num, NFSD_TEST)) {
609 /*
610 * Either we have +4 and no minors are enabled,
611 * or we have -4 and at least one minor is enabled.
612 * In either case, propagate 'cmd' to all minors.
613 */
614 minor = 0;
615 while (nfsd_minorversion(minor, cmd) >= 0)
616 minor++;
617 }
618 break;
599 default: 619 default:
600 return -EINVAL; 620 return -EINVAL;
601 } 621 }
@@ -612,9 +632,11 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
612 sep = ""; 632 sep = "";
613 remaining = SIMPLE_TRANSACTION_LIMIT; 633 remaining = SIMPLE_TRANSACTION_LIMIT;
614 for (num=2 ; num <= 4 ; num++) { 634 for (num=2 ; num <= 4 ; num++) {
635 int minor;
615 if (!nfsd_vers(num, NFSD_AVAIL)) 636 if (!nfsd_vers(num, NFSD_AVAIL))
616 continue; 637 continue;
617 minor = 0; 638
639 minor = -1;
618 do { 640 do {
619 len = nfsd_print_version_support(buf, remaining, 641 len = nfsd_print_version_support(buf, remaining,
620 sep, num, minor); 642 sep, num, minor);
@@ -624,7 +646,8 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
624 buf += len; 646 buf += len;
625 tlen += len; 647 tlen += len;
626 minor++; 648 minor++;
627 sep = " "; 649 if (len)
650 sep = " ";
628 } while (num == 4 && minor <= NFSD_SUPPORTED_MINOR_VERSION); 651 } while (num == 4 && minor <= NFSD_SUPPORTED_MINOR_VERSION);
629 } 652 }
630out: 653out:
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index fa82b7707e85..03a7e9da4da0 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -786,6 +786,7 @@ nfserrno (int errno)
786 { nfserr_serverfault, -ESERVERFAULT }, 786 { nfserr_serverfault, -ESERVERFAULT },
787 { nfserr_serverfault, -ENFILE }, 787 { nfserr_serverfault, -ENFILE },
788 { nfserr_io, -EUCLEAN }, 788 { nfserr_io, -EUCLEAN },
789 { nfserr_perm, -ENOKEY },
789 }; 790 };
790 int i; 791 int i;
791 792
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 786a4a2cb2d7..31e1f9593457 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -167,7 +167,8 @@ nfsd_adjust_nfsd_versions4(void)
167 167
168int nfsd_minorversion(u32 minorversion, enum vers_op change) 168int nfsd_minorversion(u32 minorversion, enum vers_op change)
169{ 169{
170 if (minorversion > NFSD_SUPPORTED_MINOR_VERSION) 170 if (minorversion > NFSD_SUPPORTED_MINOR_VERSION &&
171 change != NFSD_AVAIL)
171 return -1; 172 return -1;
172 switch(change) { 173 switch(change) {
173 case NFSD_SET: 174 case NFSD_SET:
@@ -415,23 +416,20 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
415 416
416void nfsd_reset_versions(void) 417void nfsd_reset_versions(void)
417{ 418{
418 int found_one = 0;
419 int i; 419 int i;
420 420
421 for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) { 421 for (i = 0; i < NFSD_NRVERS; i++)
422 if (nfsd_program.pg_vers[i]) 422 if (nfsd_vers(i, NFSD_TEST))
423 found_one = 1; 423 return;
424 }
425 424
426 if (!found_one) { 425 for (i = 0; i < NFSD_NRVERS; i++)
427 for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) 426 if (i != 4)
428 nfsd_program.pg_vers[i] = nfsd_version[i]; 427 nfsd_vers(i, NFSD_SET);
429#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) 428 else {
430 for (i = NFSD_ACL_MINVERS; i < NFSD_ACL_NRVERS; i++) 429 int minor = 0;
431 nfsd_acl_program.pg_vers[i] = 430 while (nfsd_minorversion(minor, NFSD_SET) >= 0)
432 nfsd_acl_version[i]; 431 minor++;
433#endif 432 }
434 }
435} 433}
436 434
437/* 435/*
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 4df64a1fc09e..532372c6cf15 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -14,8 +14,8 @@
14 * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.* 14 * [_sdata, _edata]: contains .data.* sections, may also contain .rodata.*
15 * and/or .init.* sections. 15 * and/or .init.* sections.
16 * [__start_rodata, __end_rodata]: contains .rodata.* sections 16 * [__start_rodata, __end_rodata]: contains .rodata.* sections
17 * [__start_data_ro_after_init, __end_data_ro_after_init]: 17 * [__start_ro_after_init, __end_ro_after_init]:
18 * contains data.ro_after_init section 18 * contains .data..ro_after_init section
19 * [__init_begin, __init_end]: contains .init.* sections, but .init.text.* 19 * [__init_begin, __init_end]: contains .init.* sections, but .init.text.*
20 * may be out of this range on some architectures. 20 * may be out of this range on some architectures.
21 * [_sinittext, _einittext]: contains .init.text.* sections 21 * [_sinittext, _einittext]: contains .init.text.* sections
@@ -33,7 +33,7 @@ extern char _data[], _sdata[], _edata[];
33extern char __bss_start[], __bss_stop[]; 33extern char __bss_start[], __bss_stop[];
34extern char __init_begin[], __init_end[]; 34extern char __init_begin[], __init_end[];
35extern char _sinittext[], _einittext[]; 35extern char _sinittext[], _einittext[];
36extern char __start_data_ro_after_init[], __end_data_ro_after_init[]; 36extern char __start_ro_after_init[], __end_ro_after_init[];
37extern char _end[]; 37extern char _end[];
38extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; 38extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
39extern char __kprobes_text_start[], __kprobes_text_end[]; 39extern char __kprobes_text_start[], __kprobes_text_end[];
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 0968d13b3885..7cdfe167074f 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -173,6 +173,7 @@
173 KEEP(*(__##name##_of_table_end)) 173 KEEP(*(__##name##_of_table_end))
174 174
175#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc) 175#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
176#define CLKEVT_OF_TABLES() OF_TABLE(CONFIG_CLKEVT_OF, clkevt)
176#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) 177#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
177#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) 178#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
178#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu) 179#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu)
@@ -260,9 +261,9 @@
260 */ 261 */
261#ifndef RO_AFTER_INIT_DATA 262#ifndef RO_AFTER_INIT_DATA
262#define RO_AFTER_INIT_DATA \ 263#define RO_AFTER_INIT_DATA \
263 __start_data_ro_after_init = .; \ 264 __start_ro_after_init = .; \
264 *(.data..ro_after_init) \ 265 *(.data..ro_after_init) \
265 __end_data_ro_after_init = .; 266 __end_ro_after_init = .;
266#endif 267#endif
267 268
268/* 269/*
@@ -559,6 +560,7 @@
559 CLK_OF_TABLES() \ 560 CLK_OF_TABLES() \
560 RESERVEDMEM_OF_TABLES() \ 561 RESERVEDMEM_OF_TABLES() \
561 CLKSRC_OF_TABLES() \ 562 CLKSRC_OF_TABLES() \
563 CLKEVT_OF_TABLES() \
562 IOMMU_OF_TABLES() \ 564 IOMMU_OF_TABLES() \
563 CPU_METHOD_OF_TABLES() \ 565 CPU_METHOD_OF_TABLES() \
564 CPUIDLE_METHOD_OF_TABLES() \ 566 CPUIDLE_METHOD_OF_TABLES() \
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
index ed953f98f0e1..1487011fe057 100644
--- a/include/drm/ttm/ttm_object.h
+++ b/include/drm/ttm/ttm_object.h
@@ -229,6 +229,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
229 * @ref_type: The type of reference. 229 * @ref_type: The type of reference.
230 * @existed: Upon completion, indicates that an identical reference object 230 * @existed: Upon completion, indicates that an identical reference object
231 * already existed, and the refcount was upped on that object instead. 231 * already existed, and the refcount was upped on that object instead.
232 * @require_existed: Fail with -EPERM if an identical ref object didn't
233 * already exist.
232 * 234 *
233 * Checks that the base object is shareable and adds a ref object to it. 235 * Checks that the base object is shareable and adds a ref object to it.
234 * 236 *
@@ -243,7 +245,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
243 */ 245 */
244extern int ttm_ref_object_add(struct ttm_object_file *tfile, 246extern int ttm_ref_object_add(struct ttm_object_file *tfile,
245 struct ttm_base_object *base, 247 struct ttm_base_object *base,
246 enum ttm_ref_type ref_type, bool *existed); 248 enum ttm_ref_type ref_type, bool *existed,
249 bool require_existed);
247 250
248extern bool ttm_ref_object_exists(struct ttm_object_file *tfile, 251extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
249 struct ttm_base_object *base); 252 struct ttm_base_object *base);
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index c71dd8fa5764..c41b8d99dd0e 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -556,7 +556,7 @@ enum ccp_engine {
556 * struct ccp_cmd - CCP operation request 556 * struct ccp_cmd - CCP operation request
557 * @entry: list element (ccp driver use only) 557 * @entry: list element (ccp driver use only)
558 * @work: work element used for callbacks (ccp driver use only) 558 * @work: work element used for callbacks (ccp driver use only)
559 * @ccp: CCP device to be run on (ccp driver use only) 559 * @ccp: CCP device to be run on
560 * @ret: operation return code (ccp driver use only) 560 * @ret: operation return code (ccp driver use only)
561 * @flags: cmd processing flags 561 * @flags: cmd processing flags
562 * @engine: CCP operation to perform 562 * @engine: CCP operation to perform
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 5d3053c34fb3..6d7edc3082f9 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -229,7 +229,7 @@ static inline void tick_setup_hrtimer_broadcast(void) { }
229 229
230#ifdef CONFIG_CLKEVT_PROBE 230#ifdef CONFIG_CLKEVT_PROBE
231extern int clockevent_probe(void); 231extern int clockevent_probe(void);
232#els 232#else
233static inline int clockevent_probe(void) { return 0; } 233static inline int clockevent_probe(void) { return 0; }
234#endif 234#endif
235 235
diff --git a/include/linux/fscrypt_common.h b/include/linux/fscrypt_common.h
index 547f81592ba1..10c1abfbac6c 100644
--- a/include/linux/fscrypt_common.h
+++ b/include/linux/fscrypt_common.h
@@ -87,7 +87,6 @@ struct fscrypt_operations {
87 unsigned int flags; 87 unsigned int flags;
88 const char *key_prefix; 88 const char *key_prefix;
89 int (*get_context)(struct inode *, void *, size_t); 89 int (*get_context)(struct inode *, void *, size_t);
90 int (*prepare_context)(struct inode *);
91 int (*set_context)(struct inode *, const void *, size_t, void *); 90 int (*set_context)(struct inode *, const void *, size_t, void *);
92 int (*dummy_context)(struct inode *); 91 int (*dummy_context)(struct inode *);
93 bool (*is_encrypted)(struct inode *); 92 bool (*is_encrypted)(struct inode *);
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 78d59dba563e..88b673749121 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -88,6 +88,7 @@ enum hwmon_temp_attributes {
88#define HWMON_T_CRIT_HYST BIT(hwmon_temp_crit_hyst) 88#define HWMON_T_CRIT_HYST BIT(hwmon_temp_crit_hyst)
89#define HWMON_T_EMERGENCY BIT(hwmon_temp_emergency) 89#define HWMON_T_EMERGENCY BIT(hwmon_temp_emergency)
90#define HWMON_T_EMERGENCY_HYST BIT(hwmon_temp_emergency_hyst) 90#define HWMON_T_EMERGENCY_HYST BIT(hwmon_temp_emergency_hyst)
91#define HWMON_T_ALARM BIT(hwmon_temp_alarm)
91#define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm) 92#define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm)
92#define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm) 93#define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm)
93#define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm) 94#define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm)
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 36162485d663..0c170a3f0d8b 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -845,6 +845,13 @@ struct vmbus_channel {
845 * link up channels based on their CPU affinity. 845 * link up channels based on their CPU affinity.
846 */ 846 */
847 struct list_head percpu_list; 847 struct list_head percpu_list;
848
849 /*
850 * Defer freeing channel until after all cpu's have
851 * gone through grace period.
852 */
853 struct rcu_head rcu;
854
848 /* 855 /*
849 * For performance critical channels (storage, networking 856 * For performance critical channels (storage, networking
850 * etc,), Hyper-V has a mechanism to enhance the throughput 857 * etc,), Hyper-V has a mechanism to enhance the throughput
@@ -1430,9 +1437,6 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
1430 const int *srv_version, int srv_vercnt, 1437 const int *srv_version, int srv_vercnt,
1431 int *nego_fw_version, int *nego_srv_version); 1438 int *nego_fw_version, int *nego_srv_version);
1432 1439
1433void hv_event_tasklet_disable(struct vmbus_channel *channel);
1434void hv_event_tasklet_enable(struct vmbus_channel *channel);
1435
1436void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); 1440void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
1437 1441
1438void vmbus_setevent(struct vmbus_channel *channel); 1442void vmbus_setevent(struct vmbus_channel *channel);
diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h
index 23ca41515527..fa7931933067 100644
--- a/include/linux/iio/sw_device.h
+++ b/include/linux/iio/sw_device.h
@@ -62,7 +62,7 @@ void iio_swd_group_init_type_name(struct iio_sw_device *d,
62 const char *name, 62 const char *name,
63 struct config_item_type *type) 63 struct config_item_type *type)
64{ 64{
65#ifdef CONFIG_CONFIGFS_FS 65#if IS_ENABLED(CONFIG_CONFIGFS_FS)
66 config_group_init_type_name(&d->group, name, type); 66 config_group_init_type_name(&d->group, name, type);
67#endif 67#endif
68} 68}
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 6a6de187ddc0..2e4de0deee53 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -125,9 +125,16 @@ enum iommu_attr {
125}; 125};
126 126
127/* These are the possible reserved region types */ 127/* These are the possible reserved region types */
128#define IOMMU_RESV_DIRECT (1 << 0) 128enum iommu_resv_type {
129#define IOMMU_RESV_RESERVED (1 << 1) 129 /* Memory regions which must be mapped 1:1 at all times */
130#define IOMMU_RESV_MSI (1 << 2) 130 IOMMU_RESV_DIRECT,
131 /* Arbitrary "never map this or give it to a device" address ranges */
132 IOMMU_RESV_RESERVED,
133 /* Hardware MSI region (untranslated) */
134 IOMMU_RESV_MSI,
135 /* Software-managed MSI translation window */
136 IOMMU_RESV_SW_MSI,
137};
131 138
132/** 139/**
133 * struct iommu_resv_region - descriptor for a reserved memory region 140 * struct iommu_resv_region - descriptor for a reserved memory region
@@ -142,7 +149,7 @@ struct iommu_resv_region {
142 phys_addr_t start; 149 phys_addr_t start;
143 size_t length; 150 size_t length;
144 int prot; 151 int prot;
145 int type; 152 enum iommu_resv_type type;
146}; 153};
147 154
148#ifdef CONFIG_IOMMU_API 155#ifdef CONFIG_IOMMU_API
@@ -288,7 +295,8 @@ extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
288extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); 295extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
289extern int iommu_request_dm_for_dev(struct device *dev); 296extern int iommu_request_dm_for_dev(struct device *dev);
290extern struct iommu_resv_region * 297extern struct iommu_resv_region *
291iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, int type); 298iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
299 enum iommu_resv_type type);
292extern int iommu_get_group_resv_regions(struct iommu_group *group, 300extern int iommu_get_group_resv_regions(struct iommu_group *group,
293 struct list_head *head); 301 struct list_head *head);
294 302
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 5734480c9590..a5c7046f26b4 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -76,6 +76,9 @@ size_t ksize(const void *);
76static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); } 76static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
77size_t kasan_metadata_size(struct kmem_cache *cache); 77size_t kasan_metadata_size(struct kmem_cache *cache);
78 78
79bool kasan_save_enable_multi_shot(void);
80void kasan_restore_multi_shot(bool enabled);
81
79#else /* CONFIG_KASAN */ 82#else /* CONFIG_KASAN */
80 83
81static inline void kasan_unpoison_shadow(const void *address, size_t size) {} 84static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 2c14ad9809da..d0250744507a 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -162,8 +162,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
162 int len, void *val); 162 int len, void *val);
163int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 163int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
164 int len, struct kvm_io_device *dev); 164 int len, struct kvm_io_device *dev);
165int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 165void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
166 struct kvm_io_device *dev); 166 struct kvm_io_device *dev);
167struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 167struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
168 gpa_t addr); 168 gpa_t addr);
169 169
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 5af377303880..bb7250c45cb8 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -740,6 +740,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
740 return false; 740 return false;
741} 741}
742 742
743static inline void mem_cgroup_update_page_stat(struct page *page,
744 enum mem_cgroup_stat_index idx,
745 int nr)
746{
747}
748
743static inline void mem_cgroup_inc_page_stat(struct page *page, 749static inline void mem_cgroup_inc_page_stat(struct page *page,
744 enum mem_cgroup_stat_index idx) 750 enum mem_cgroup_stat_index idx)
745{ 751{
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 7a01c94496f1..3eef9fb9968a 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -35,10 +35,11 @@
35 * Max bus-specific overhead incurred by request/responses. 35 * Max bus-specific overhead incurred by request/responses.
36 * I2C requires 1 additional byte for requests. 36 * I2C requires 1 additional byte for requests.
37 * I2C requires 2 additional bytes for responses. 37 * I2C requires 2 additional bytes for responses.
38 * SPI requires up to 32 additional bytes for responses.
38 * */ 39 * */
39#define EC_PROTO_VERSION_UNKNOWN 0 40#define EC_PROTO_VERSION_UNKNOWN 0
40#define EC_MAX_REQUEST_OVERHEAD 1 41#define EC_MAX_REQUEST_OVERHEAD 1
41#define EC_MAX_RESPONSE_OVERHEAD 2 42#define EC_MAX_RESPONSE_OVERHEAD 32
42 43
43/* 44/*
44 * Command interface between EC and AP, for LPC, I2C and SPI interfaces. 45 * Command interface between EC and AP, for LPC, I2C and SPI interfaces.
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5f01c88f0800..00a8fa7e366a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -32,6 +32,8 @@ struct user_struct;
32struct writeback_control; 32struct writeback_control;
33struct bdi_writeback; 33struct bdi_writeback;
34 34
35void init_mm_internals(void);
36
35#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ 37#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
36extern unsigned long max_mapnr; 38extern unsigned long max_mapnr;
37 39
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index 35d0fd7a4948..fd0de00c0d77 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -76,22 +76,12 @@ struct gpmc_timings;
76struct omap_nand_platform_data; 76struct omap_nand_platform_data;
77struct omap_onenand_platform_data; 77struct omap_onenand_platform_data;
78 78
79#if IS_ENABLED(CONFIG_MTD_NAND_OMAP2)
80extern int gpmc_nand_init(struct omap_nand_platform_data *d,
81 struct gpmc_timings *gpmc_t);
82#else
83static inline int gpmc_nand_init(struct omap_nand_platform_data *d,
84 struct gpmc_timings *gpmc_t)
85{
86 return 0;
87}
88#endif
89
90#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2) 79#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
91extern void gpmc_onenand_init(struct omap_onenand_platform_data *d); 80extern int gpmc_onenand_init(struct omap_onenand_platform_data *d);
92#else 81#else
93#define board_onenand_data NULL 82#define board_onenand_data NULL
94static inline void gpmc_onenand_init(struct omap_onenand_platform_data *d) 83static inline int gpmc_onenand_init(struct omap_onenand_platform_data *d)
95{ 84{
85 return 0;
96} 86}
97#endif 87#endif
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 86b4ed75359e..96fb139bdd08 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -31,31 +31,26 @@ static inline int device_reset_optional(struct device *dev)
31 31
32static inline int reset_control_reset(struct reset_control *rstc) 32static inline int reset_control_reset(struct reset_control *rstc)
33{ 33{
34 WARN_ON(1);
35 return 0; 34 return 0;
36} 35}
37 36
38static inline int reset_control_assert(struct reset_control *rstc) 37static inline int reset_control_assert(struct reset_control *rstc)
39{ 38{
40 WARN_ON(1);
41 return 0; 39 return 0;
42} 40}
43 41
44static inline int reset_control_deassert(struct reset_control *rstc) 42static inline int reset_control_deassert(struct reset_control *rstc)
45{ 43{
46 WARN_ON(1);
47 return 0; 44 return 0;
48} 45}
49 46
50static inline int reset_control_status(struct reset_control *rstc) 47static inline int reset_control_status(struct reset_control *rstc)
51{ 48{
52 WARN_ON(1);
53 return 0; 49 return 0;
54} 50}
55 51
56static inline void reset_control_put(struct reset_control *rstc) 52static inline void reset_control_put(struct reset_control *rstc)
57{ 53{
58 WARN_ON(1);
59} 54}
60 55
61static inline int __must_check device_reset(struct device *dev) 56static inline int __must_check device_reset(struct device *dev)
@@ -74,14 +69,14 @@ static inline struct reset_control *__of_reset_control_get(
74 const char *id, int index, bool shared, 69 const char *id, int index, bool shared,
75 bool optional) 70 bool optional)
76{ 71{
77 return ERR_PTR(-ENOTSUPP); 72 return optional ? NULL : ERR_PTR(-ENOTSUPP);
78} 73}
79 74
80static inline struct reset_control *__devm_reset_control_get( 75static inline struct reset_control *__devm_reset_control_get(
81 struct device *dev, const char *id, 76 struct device *dev, const char *id,
82 int index, bool shared, bool optional) 77 int index, bool shared, bool optional)
83{ 78{
84 return ERR_PTR(-ENOTSUPP); 79 return optional ? NULL : ERR_PTR(-ENOTSUPP);
85} 80}
86 81
87#endif /* CONFIG_RESET_CONTROLLER */ 82#endif /* CONFIG_RESET_CONTROLLER */
diff --git a/include/linux/sched/clock.h b/include/linux/sched/clock.h
index 4a68c6791207..34fe92ce1ebd 100644
--- a/include/linux/sched/clock.h
+++ b/include/linux/sched/clock.h
@@ -54,15 +54,16 @@ static inline u64 local_clock(void)
54} 54}
55#else 55#else
56extern void sched_clock_init_late(void); 56extern void sched_clock_init_late(void);
57/*
58 * Architectures can set this to 1 if they have specified
59 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
60 * but then during bootup it turns out that sched_clock()
61 * is reliable after all:
62 */
63extern int sched_clock_stable(void); 57extern int sched_clock_stable(void);
64extern void clear_sched_clock_stable(void); 58extern void clear_sched_clock_stable(void);
65 59
60/*
61 * When sched_clock_stable(), __sched_clock_offset provides the offset
62 * between local_clock() and sched_clock().
63 */
64extern u64 __sched_clock_offset;
65
66
66extern void sched_clock_tick(void); 67extern void sched_clock_tick(void);
67extern void sched_clock_idle_sleep_event(void); 68extern void sched_clock_idle_sleep_event(void);
68extern void sched_clock_idle_wakeup_event(u64 delta_ns); 69extern void sched_clock_idle_wakeup_event(u64 delta_ns);
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 1d0043dc34e4..de2a722fe3cf 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -50,4 +50,10 @@
50/* device can't handle Link Power Management */ 50/* device can't handle Link Power Management */
51#define USB_QUIRK_NO_LPM BIT(10) 51#define USB_QUIRK_NO_LPM BIT(10)
52 52
53/*
54 * Device reports its bInterval as linear frames instead of the
55 * USB 2.0 calculation.
56 */
57#define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL BIT(11)
58
53#endif /* __LINUX_USB_QUIRKS_H */ 59#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 1f71ee5ab518..069582ee5d7f 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -448,10 +448,9 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
448 return frag; 448 return frag;
449} 449}
450 450
451static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_association *asoc) 451static inline void sctp_assoc_pending_pmtu(struct sctp_association *asoc)
452{ 452{
453 453 sctp_assoc_sync_pmtu(asoc);
454 sctp_assoc_sync_pmtu(sk, asoc);
455 asoc->pmtu_pending = 0; 454 asoc->pmtu_pending = 0;
456} 455}
457 456
@@ -596,12 +595,23 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr)
596 */ 595 */
597static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) 596static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
598{ 597{
599 if (t->dst && (!dst_check(t->dst, t->dst_cookie) || 598 if (t->dst && !dst_check(t->dst, t->dst_cookie))
600 t->pathmtu != max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)),
601 SCTP_DEFAULT_MINSEGMENT)))
602 sctp_transport_dst_release(t); 599 sctp_transport_dst_release(t);
603 600
604 return t->dst; 601 return t->dst;
605} 602}
606 603
604static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
605{
606 __u32 pmtu = max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)),
607 SCTP_DEFAULT_MINSEGMENT);
608
609 if (t->pathmtu == pmtu)
610 return true;
611
612 t->pathmtu = pmtu;
613
614 return false;
615}
616
607#endif /* __net_sctp_h__ */ 617#endif /* __net_sctp_h__ */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 3e61a54424a1..b751399aa6b7 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -377,7 +377,8 @@ typedef struct sctp_sender_hb_info {
377 __u64 hb_nonce; 377 __u64 hb_nonce;
378} sctp_sender_hb_info_t; 378} sctp_sender_hb_info_t;
379 379
380struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp); 380int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp);
381int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp);
381void sctp_stream_free(struct sctp_stream *stream); 382void sctp_stream_free(struct sctp_stream *stream);
382void sctp_stream_clear(struct sctp_stream *stream); 383void sctp_stream_clear(struct sctp_stream *stream);
383 384
@@ -499,7 +500,6 @@ struct sctp_datamsg {
499 /* Did the messenge fail to send? */ 500 /* Did the messenge fail to send? */
500 int send_error; 501 int send_error;
501 u8 send_failed:1, 502 u8 send_failed:1,
502 force_delay:1,
503 can_delay; /* should this message be Nagle delayed */ 503 can_delay; /* should this message be Nagle delayed */
504}; 504};
505 505
@@ -952,8 +952,8 @@ void sctp_transport_lower_cwnd(struct sctp_transport *, sctp_lower_cwnd_t);
952void sctp_transport_burst_limited(struct sctp_transport *); 952void sctp_transport_burst_limited(struct sctp_transport *);
953void sctp_transport_burst_reset(struct sctp_transport *); 953void sctp_transport_burst_reset(struct sctp_transport *);
954unsigned long sctp_transport_timeout(struct sctp_transport *); 954unsigned long sctp_transport_timeout(struct sctp_transport *);
955void sctp_transport_reset(struct sctp_transport *); 955void sctp_transport_reset(struct sctp_transport *t);
956void sctp_transport_update_pmtu(struct sock *, struct sctp_transport *, u32); 956void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
957void sctp_transport_immediate_rtx(struct sctp_transport *); 957void sctp_transport_immediate_rtx(struct sctp_transport *);
958void sctp_transport_dst_release(struct sctp_transport *t); 958void sctp_transport_dst_release(struct sctp_transport *t);
959void sctp_transport_dst_confirm(struct sctp_transport *t); 959void sctp_transport_dst_confirm(struct sctp_transport *t);
@@ -1880,6 +1880,7 @@ struct sctp_association {
1880 1880
1881 __u8 need_ecne:1, /* Need to send an ECNE Chunk? */ 1881 __u8 need_ecne:1, /* Need to send an ECNE Chunk? */
1882 temp:1, /* Is it a temporary association? */ 1882 temp:1, /* Is it a temporary association? */
1883 force_delay:1,
1883 prsctp_enable:1, 1884 prsctp_enable:1,
1884 reconf_enable:1; 1885 reconf_enable:1;
1885 1886
@@ -1955,7 +1956,7 @@ void sctp_assoc_update(struct sctp_association *old,
1955 1956
1956__u32 sctp_association_get_next_tsn(struct sctp_association *); 1957__u32 sctp_association_get_next_tsn(struct sctp_association *);
1957 1958
1958void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *); 1959void sctp_assoc_sync_pmtu(struct sctp_association *asoc);
1959void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int); 1960void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int);
1960void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int); 1961void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
1961void sctp_assoc_set_primary(struct sctp_association *, 1962void sctp_assoc_set_primary(struct sctp_association *,
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 0f1813c13687..99e4423eb2b8 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1863,6 +1863,9 @@ struct ib_port_immutable {
1863}; 1863};
1864 1864
1865struct ib_device { 1865struct ib_device {
1866 /* Do not access @dma_device directly from ULP nor from HW drivers. */
1867 struct device *dma_device;
1868
1866 char name[IB_DEVICE_NAME_MAX]; 1869 char name[IB_DEVICE_NAME_MAX];
1867 1870
1868 struct list_head event_handler_list; 1871 struct list_head event_handler_list;
@@ -3007,7 +3010,7 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3007 */ 3010 */
3008static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 3011static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3009{ 3012{
3010 return dma_mapping_error(&dev->dev, dma_addr); 3013 return dma_mapping_error(dev->dma_device, dma_addr);
3011} 3014}
3012 3015
3013/** 3016/**
@@ -3021,7 +3024,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev,
3021 void *cpu_addr, size_t size, 3024 void *cpu_addr, size_t size,
3022 enum dma_data_direction direction) 3025 enum dma_data_direction direction)
3023{ 3026{
3024 return dma_map_single(&dev->dev, cpu_addr, size, direction); 3027 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3025} 3028}
3026 3029
3027/** 3030/**
@@ -3035,7 +3038,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
3035 u64 addr, size_t size, 3038 u64 addr, size_t size,
3036 enum dma_data_direction direction) 3039 enum dma_data_direction direction)
3037{ 3040{
3038 dma_unmap_single(&dev->dev, addr, size, direction); 3041 dma_unmap_single(dev->dma_device, addr, size, direction);
3039} 3042}
3040 3043
3041/** 3044/**
@@ -3052,7 +3055,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev,
3052 size_t size, 3055 size_t size,
3053 enum dma_data_direction direction) 3056 enum dma_data_direction direction)
3054{ 3057{
3055 return dma_map_page(&dev->dev, page, offset, size, direction); 3058 return dma_map_page(dev->dma_device, page, offset, size, direction);
3056} 3059}
3057 3060
3058/** 3061/**
@@ -3066,7 +3069,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev,
3066 u64 addr, size_t size, 3069 u64 addr, size_t size,
3067 enum dma_data_direction direction) 3070 enum dma_data_direction direction)
3068{ 3071{
3069 dma_unmap_page(&dev->dev, addr, size, direction); 3072 dma_unmap_page(dev->dma_device, addr, size, direction);
3070} 3073}
3071 3074
3072/** 3075/**
@@ -3080,7 +3083,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev,
3080 struct scatterlist *sg, int nents, 3083 struct scatterlist *sg, int nents,
3081 enum dma_data_direction direction) 3084 enum dma_data_direction direction)
3082{ 3085{
3083 return dma_map_sg(&dev->dev, sg, nents, direction); 3086 return dma_map_sg(dev->dma_device, sg, nents, direction);
3084} 3087}
3085 3088
3086/** 3089/**
@@ -3094,7 +3097,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
3094 struct scatterlist *sg, int nents, 3097 struct scatterlist *sg, int nents,
3095 enum dma_data_direction direction) 3098 enum dma_data_direction direction)
3096{ 3099{
3097 dma_unmap_sg(&dev->dev, sg, nents, direction); 3100 dma_unmap_sg(dev->dma_device, sg, nents, direction);
3098} 3101}
3099 3102
3100static inline int ib_dma_map_sg_attrs(struct ib_device *dev, 3103static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
@@ -3102,7 +3105,8 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3102 enum dma_data_direction direction, 3105 enum dma_data_direction direction,
3103 unsigned long dma_attrs) 3106 unsigned long dma_attrs)
3104{ 3107{
3105 return dma_map_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); 3108 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3109 dma_attrs);
3106} 3110}
3107 3111
3108static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 3112static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
@@ -3110,7 +3114,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3110 enum dma_data_direction direction, 3114 enum dma_data_direction direction,
3111 unsigned long dma_attrs) 3115 unsigned long dma_attrs)
3112{ 3116{
3113 dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); 3117 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
3114} 3118}
3115/** 3119/**
3116 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry 3120 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
@@ -3152,7 +3156,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
3152 size_t size, 3156 size_t size,
3153 enum dma_data_direction dir) 3157 enum dma_data_direction dir)
3154{ 3158{
3155 dma_sync_single_for_cpu(&dev->dev, addr, size, dir); 3159 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
3156} 3160}
3157 3161
3158/** 3162/**
@@ -3167,7 +3171,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
3167 size_t size, 3171 size_t size,
3168 enum dma_data_direction dir) 3172 enum dma_data_direction dir)
3169{ 3173{
3170 dma_sync_single_for_device(&dev->dev, addr, size, dir); 3174 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
3171} 3175}
3172 3176
3173/** 3177/**
@@ -3182,7 +3186,7 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
3182 dma_addr_t *dma_handle, 3186 dma_addr_t *dma_handle,
3183 gfp_t flag) 3187 gfp_t flag)
3184{ 3188{
3185 return dma_alloc_coherent(&dev->dev, size, dma_handle, flag); 3189 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
3186} 3190}
3187 3191
3188/** 3192/**
@@ -3196,7 +3200,7 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
3196 size_t size, void *cpu_addr, 3200 size_t size, void *cpu_addr,
3197 dma_addr_t dma_handle) 3201 dma_addr_t dma_handle)
3198{ 3202{
3199 dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle); 3203 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
3200} 3204}
3201 3205
3202/** 3206/**
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 9b1462e38b82..a076cf1a3a23 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -730,9 +730,11 @@ __SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
730__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc) 730__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
731#define __NR_pkey_free 290 731#define __NR_pkey_free 290
732__SYSCALL(__NR_pkey_free, sys_pkey_free) 732__SYSCALL(__NR_pkey_free, sys_pkey_free)
733#define __NR_statx 291
734__SYSCALL(__NR_statx, sys_statx)
733 735
734#undef __NR_syscalls 736#undef __NR_syscalls
735#define __NR_syscalls 291 737#define __NR_syscalls 292
736 738
737/* 739/*
738 * All syscalls below here should go away really, 740 * All syscalls below here should go away really,
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index da7cd62bace7..0b3d30837a9f 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -34,6 +34,7 @@
34#define MLX5_ABI_USER_H 34#define MLX5_ABI_USER_H
35 35
36#include <linux/types.h> 36#include <linux/types.h>
37#include <linux/if_ether.h> /* For ETH_ALEN. */
37 38
38enum { 39enum {
39 MLX5_QP_FLAG_SIGNATURE = 1 << 0, 40 MLX5_QP_FLAG_SIGNATURE = 1 << 0,
@@ -66,7 +67,7 @@ struct mlx5_ib_alloc_ucontext_req {
66}; 67};
67 68
68enum mlx5_lib_caps { 69enum mlx5_lib_caps {
69 MLX5_LIB_CAP_4K_UAR = (u64)1 << 0, 70 MLX5_LIB_CAP_4K_UAR = (__u64)1 << 0,
70}; 71};
71 72
72struct mlx5_ib_alloc_ucontext_req_v2 { 73struct mlx5_ib_alloc_ucontext_req_v2 {
diff --git a/include/video/exynos5433_decon.h b/include/video/exynos5433_decon.h
index ef8e2a8ad0af..6b083d327e98 100644
--- a/include/video/exynos5433_decon.h
+++ b/include/video/exynos5433_decon.h
@@ -46,6 +46,7 @@
46#define DECON_FRAMEFIFO_STATUS 0x0524 46#define DECON_FRAMEFIFO_STATUS 0x0524
47#define DECON_CMU 0x1404 47#define DECON_CMU 0x1404
48#define DECON_UPDATE 0x1410 48#define DECON_UPDATE 0x1410
49#define DECON_CRFMID 0x1414
49#define DECON_UPDATE_SCHEME 0x1438 50#define DECON_UPDATE_SCHEME 0x1438
50#define DECON_VIDCON1 0x2000 51#define DECON_VIDCON1 0x2000
51#define DECON_VIDCON2 0x2004 52#define DECON_VIDCON2 0x2004
@@ -126,6 +127,10 @@
126 127
127/* VIDINTCON0 */ 128/* VIDINTCON0 */
128#define VIDINTCON0_FRAMEDONE (1 << 17) 129#define VIDINTCON0_FRAMEDONE (1 << 17)
130#define VIDINTCON0_FRAMESEL_BP (0 << 15)
131#define VIDINTCON0_FRAMESEL_VS (1 << 15)
132#define VIDINTCON0_FRAMESEL_AC (2 << 15)
133#define VIDINTCON0_FRAMESEL_FP (3 << 15)
129#define VIDINTCON0_INTFRMEN (1 << 12) 134#define VIDINTCON0_INTFRMEN (1 << 12)
130#define VIDINTCON0_INTEN (1 << 0) 135#define VIDINTCON0_INTEN (1 << 0)
131 136
@@ -142,6 +147,13 @@
142#define STANDALONE_UPDATE_F (1 << 0) 147#define STANDALONE_UPDATE_F (1 << 0)
143 148
144/* DECON_VIDCON1 */ 149/* DECON_VIDCON1 */
150#define VIDCON1_LINECNT_MASK (0x0fff << 16)
151#define VIDCON1_I80_ACTIVE (1 << 15)
152#define VIDCON1_VSTATUS_MASK (0x3 << 13)
153#define VIDCON1_VSTATUS_VS (0 << 13)
154#define VIDCON1_VSTATUS_BP (1 << 13)
155#define VIDCON1_VSTATUS_AC (2 << 13)
156#define VIDCON1_VSTATUS_FP (3 << 13)
145#define VIDCON1_VCLK_MASK (0x3 << 9) 157#define VIDCON1_VCLK_MASK (0x3 << 9)
146#define VIDCON1_VCLK_RUN_VDEN_DISABLE (0x3 << 9) 158#define VIDCON1_VCLK_RUN_VDEN_DISABLE (0x3 << 9)
147#define VIDCON1_VCLK_HOLD (0x0 << 9) 159#define VIDCON1_VCLK_HOLD (0x0 << 9)
diff --git a/init/main.c b/init/main.c
index f9c9d9948203..b0c11cbf5ddf 100644
--- a/init/main.c
+++ b/init/main.c
@@ -1022,6 +1022,8 @@ static noinline void __init kernel_init_freeable(void)
1022 1022
1023 workqueue_init(); 1023 workqueue_init();
1024 1024
1025 init_mm_internals();
1026
1025 do_pre_smp_initcalls(); 1027 do_pre_smp_initcalls();
1026 lockup_detector_init(); 1028 lockup_detector_init();
1027 1029
diff --git a/kernel/audit.c b/kernel/audit.c
index e794544f5e63..2f4964cfde0b 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -54,6 +54,10 @@
54#include <linux/kthread.h> 54#include <linux/kthread.h>
55#include <linux/kernel.h> 55#include <linux/kernel.h>
56#include <linux/syscalls.h> 56#include <linux/syscalls.h>
57#include <linux/spinlock.h>
58#include <linux/rcupdate.h>
59#include <linux/mutex.h>
60#include <linux/gfp.h>
57 61
58#include <linux/audit.h> 62#include <linux/audit.h>
59 63
@@ -90,13 +94,34 @@ static u32 audit_default;
90/* If auditing cannot proceed, audit_failure selects what happens. */ 94/* If auditing cannot proceed, audit_failure selects what happens. */
91static u32 audit_failure = AUDIT_FAIL_PRINTK; 95static u32 audit_failure = AUDIT_FAIL_PRINTK;
92 96
93/* 97/* private audit network namespace index */
94 * If audit records are to be written to the netlink socket, audit_pid 98static unsigned int audit_net_id;
95 * contains the pid of the auditd process and audit_nlk_portid contains 99
96 * the portid to use to send netlink messages to that process. 100/**
101 * struct audit_net - audit private network namespace data
102 * @sk: communication socket
103 */
104struct audit_net {
105 struct sock *sk;
106};
107
108/**
109 * struct auditd_connection - kernel/auditd connection state
110 * @pid: auditd PID
111 * @portid: netlink portid
112 * @net: the associated network namespace
113 * @lock: spinlock to protect write access
114 *
115 * Description:
116 * This struct is RCU protected; you must either hold the RCU lock for reading
117 * or the included spinlock for writing.
97 */ 118 */
98int audit_pid; 119static struct auditd_connection {
99static __u32 audit_nlk_portid; 120 int pid;
121 u32 portid;
122 struct net *net;
123 spinlock_t lock;
124} auditd_conn;
100 125
101/* If audit_rate_limit is non-zero, limit the rate of sending audit records 126/* If audit_rate_limit is non-zero, limit the rate of sending audit records
102 * to that number per second. This prevents DoS attacks, but results in 127 * to that number per second. This prevents DoS attacks, but results in
@@ -123,10 +148,6 @@ u32 audit_sig_sid = 0;
123*/ 148*/
124static atomic_t audit_lost = ATOMIC_INIT(0); 149static atomic_t audit_lost = ATOMIC_INIT(0);
125 150
126/* The netlink socket. */
127static struct sock *audit_sock;
128static unsigned int audit_net_id;
129
130/* Hash for inode-based rules */ 151/* Hash for inode-based rules */
131struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS]; 152struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS];
132 153
@@ -139,6 +160,7 @@ static LIST_HEAD(audit_freelist);
139 160
140/* queue msgs to send via kauditd_task */ 161/* queue msgs to send via kauditd_task */
141static struct sk_buff_head audit_queue; 162static struct sk_buff_head audit_queue;
163static void kauditd_hold_skb(struct sk_buff *skb);
142/* queue msgs due to temporary unicast send problems */ 164/* queue msgs due to temporary unicast send problems */
143static struct sk_buff_head audit_retry_queue; 165static struct sk_buff_head audit_retry_queue;
144/* queue msgs waiting for new auditd connection */ 166/* queue msgs waiting for new auditd connection */
@@ -192,6 +214,43 @@ struct audit_reply {
192 struct sk_buff *skb; 214 struct sk_buff *skb;
193}; 215};
194 216
217/**
218 * auditd_test_task - Check to see if a given task is an audit daemon
219 * @task: the task to check
220 *
221 * Description:
222 * Return 1 if the task is a registered audit daemon, 0 otherwise.
223 */
224int auditd_test_task(const struct task_struct *task)
225{
226 int rc;
227
228 rcu_read_lock();
229 rc = (auditd_conn.pid && task->tgid == auditd_conn.pid ? 1 : 0);
230 rcu_read_unlock();
231
232 return rc;
233}
234
235/**
236 * audit_get_sk - Return the audit socket for the given network namespace
237 * @net: the destination network namespace
238 *
239 * Description:
240 * Returns the sock pointer if valid, NULL otherwise. The caller must ensure
241 * that a reference is held for the network namespace while the sock is in use.
242 */
243static struct sock *audit_get_sk(const struct net *net)
244{
245 struct audit_net *aunet;
246
247 if (!net)
248 return NULL;
249
250 aunet = net_generic(net, audit_net_id);
251 return aunet->sk;
252}
253
195static void audit_set_portid(struct audit_buffer *ab, __u32 portid) 254static void audit_set_portid(struct audit_buffer *ab, __u32 portid)
196{ 255{
197 if (ab) { 256 if (ab) {
@@ -210,9 +269,7 @@ void audit_panic(const char *message)
210 pr_err("%s\n", message); 269 pr_err("%s\n", message);
211 break; 270 break;
212 case AUDIT_FAIL_PANIC: 271 case AUDIT_FAIL_PANIC:
213 /* test audit_pid since printk is always losey, why bother? */ 272 panic("audit: %s\n", message);
214 if (audit_pid)
215 panic("audit: %s\n", message);
216 break; 273 break;
217 } 274 }
218} 275}
@@ -370,21 +427,87 @@ static int audit_set_failure(u32 state)
370 return audit_do_config_change("audit_failure", &audit_failure, state); 427 return audit_do_config_change("audit_failure", &audit_failure, state);
371} 428}
372 429
373/* 430/**
374 * For one reason or another this nlh isn't getting delivered to the userspace 431 * auditd_set - Set/Reset the auditd connection state
375 * audit daemon, just send it to printk. 432 * @pid: auditd PID
433 * @portid: auditd netlink portid
434 * @net: auditd network namespace pointer
435 *
436 * Description:
437 * This function will obtain and drop network namespace references as
438 * necessary.
439 */
440static void auditd_set(int pid, u32 portid, struct net *net)
441{
442 unsigned long flags;
443
444 spin_lock_irqsave(&auditd_conn.lock, flags);
445 auditd_conn.pid = pid;
446 auditd_conn.portid = portid;
447 if (auditd_conn.net)
448 put_net(auditd_conn.net);
449 if (net)
450 auditd_conn.net = get_net(net);
451 else
452 auditd_conn.net = NULL;
453 spin_unlock_irqrestore(&auditd_conn.lock, flags);
454}
455
456/**
457 * auditd_reset - Disconnect the auditd connection
458 *
459 * Description:
460 * Break the auditd/kauditd connection and move all the queued records into the
461 * hold queue in case auditd reconnects.
462 */
463static void auditd_reset(void)
464{
465 struct sk_buff *skb;
466
467 /* if it isn't already broken, break the connection */
468 rcu_read_lock();
469 if (auditd_conn.pid)
470 auditd_set(0, 0, NULL);
471 rcu_read_unlock();
472
473 /* flush all of the main and retry queues to the hold queue */
474 while ((skb = skb_dequeue(&audit_retry_queue)))
475 kauditd_hold_skb(skb);
476 while ((skb = skb_dequeue(&audit_queue)))
477 kauditd_hold_skb(skb);
478}
479
480/**
481 * kauditd_print_skb - Print the audit record to the ring buffer
482 * @skb: audit record
483 *
484 * Whatever the reason, this packet may not make it to the auditd connection
485 * so write it via printk so the information isn't completely lost.
376 */ 486 */
377static void kauditd_printk_skb(struct sk_buff *skb) 487static void kauditd_printk_skb(struct sk_buff *skb)
378{ 488{
379 struct nlmsghdr *nlh = nlmsg_hdr(skb); 489 struct nlmsghdr *nlh = nlmsg_hdr(skb);
380 char *data = nlmsg_data(nlh); 490 char *data = nlmsg_data(nlh);
381 491
382 if (nlh->nlmsg_type != AUDIT_EOE) { 492 if (nlh->nlmsg_type != AUDIT_EOE && printk_ratelimit())
383 if (printk_ratelimit()) 493 pr_notice("type=%d %s\n", nlh->nlmsg_type, data);
384 pr_notice("type=%d %s\n", nlh->nlmsg_type, data); 494}
385 else 495
386 audit_log_lost("printk limit exceeded"); 496/**
387 } 497 * kauditd_rehold_skb - Handle a audit record send failure in the hold queue
498 * @skb: audit record
499 *
500 * Description:
501 * This should only be used by the kauditd_thread when it fails to flush the
502 * hold queue.
503 */
504static void kauditd_rehold_skb(struct sk_buff *skb)
505{
506 /* put the record back in the queue at the same place */
507 skb_queue_head(&audit_hold_queue, skb);
508
509 /* fail the auditd connection */
510 auditd_reset();
388} 511}
389 512
390/** 513/**
@@ -421,6 +544,9 @@ static void kauditd_hold_skb(struct sk_buff *skb)
421 /* we have no other options - drop the message */ 544 /* we have no other options - drop the message */
422 audit_log_lost("kauditd hold queue overflow"); 545 audit_log_lost("kauditd hold queue overflow");
423 kfree_skb(skb); 546 kfree_skb(skb);
547
548 /* fail the auditd connection */
549 auditd_reset();
424} 550}
425 551
426/** 552/**
@@ -441,51 +567,122 @@ static void kauditd_retry_skb(struct sk_buff *skb)
441} 567}
442 568
443/** 569/**
444 * auditd_reset - Disconnect the auditd connection 570 * auditd_send_unicast_skb - Send a record via unicast to auditd
571 * @skb: audit record
445 * 572 *
446 * Description: 573 * Description:
447 * Break the auditd/kauditd connection and move all the records in the retry 574 * Send a skb to the audit daemon, returns positive/zero values on success and
448 * queue into the hold queue in case auditd reconnects. The audit_cmd_mutex 575 * negative values on failure; in all cases the skb will be consumed by this
449 * must be held when calling this function. 576 * function. If the send results in -ECONNREFUSED the connection with auditd
577 * will be reset. This function may sleep so callers should not hold any locks
578 * where this would cause a problem.
450 */ 579 */
451static void auditd_reset(void) 580static int auditd_send_unicast_skb(struct sk_buff *skb)
452{ 581{
453 struct sk_buff *skb; 582 int rc;
454 583 u32 portid;
455 /* break the connection */ 584 struct net *net;
456 if (audit_sock) { 585 struct sock *sk;
457 sock_put(audit_sock); 586
458 audit_sock = NULL; 587 /* NOTE: we can't call netlink_unicast while in the RCU section so
588 * take a reference to the network namespace and grab local
589 * copies of the namespace, the sock, and the portid; the
590 * namespace and sock aren't going to go away while we hold a
591 * reference and if the portid does become invalid after the RCU
592 * section netlink_unicast() should safely return an error */
593
594 rcu_read_lock();
595 if (!auditd_conn.pid) {
596 rcu_read_unlock();
597 rc = -ECONNREFUSED;
598 goto err;
459 } 599 }
460 audit_pid = 0; 600 net = auditd_conn.net;
461 audit_nlk_portid = 0; 601 get_net(net);
602 sk = audit_get_sk(net);
603 portid = auditd_conn.portid;
604 rcu_read_unlock();
462 605
463 /* flush all of the retry queue to the hold queue */ 606 rc = netlink_unicast(sk, skb, portid, 0);
464 while ((skb = skb_dequeue(&audit_retry_queue))) 607 put_net(net);
465 kauditd_hold_skb(skb); 608 if (rc < 0)
609 goto err;
610
611 return rc;
612
613err:
614 if (rc == -ECONNREFUSED)
615 auditd_reset();
616 return rc;
466} 617}
467 618
468/** 619/**
469 * kauditd_send_unicast_skb - Send a record via unicast to auditd 620 * kauditd_send_queue - Helper for kauditd_thread to flush skb queues
470 * @skb: audit record 621 * @sk: the sending sock
622 * @portid: the netlink destination
623 * @queue: the skb queue to process
624 * @retry_limit: limit on number of netlink unicast failures
625 * @skb_hook: per-skb hook for additional processing
626 * @err_hook: hook called if the skb fails the netlink unicast send
627 *
628 * Description:
629 * Run through the given queue and attempt to send the audit records to auditd,
630 * returns zero on success, negative values on failure. It is up to the caller
631 * to ensure that the @sk is valid for the duration of this function.
632 *
471 */ 633 */
472static int kauditd_send_unicast_skb(struct sk_buff *skb) 634static int kauditd_send_queue(struct sock *sk, u32 portid,
635 struct sk_buff_head *queue,
636 unsigned int retry_limit,
637 void (*skb_hook)(struct sk_buff *skb),
638 void (*err_hook)(struct sk_buff *skb))
473{ 639{
474 int rc; 640 int rc = 0;
641 struct sk_buff *skb;
642 static unsigned int failed = 0;
475 643
476 /* if we know nothing is connected, don't even try the netlink call */ 644 /* NOTE: kauditd_thread takes care of all our locking, we just use
477 if (!audit_pid) 645 * the netlink info passed to us (e.g. sk and portid) */
478 return -ECONNREFUSED; 646
647 while ((skb = skb_dequeue(queue))) {
648 /* call the skb_hook for each skb we touch */
649 if (skb_hook)
650 (*skb_hook)(skb);
651
652 /* can we send to anyone via unicast? */
653 if (!sk) {
654 if (err_hook)
655 (*err_hook)(skb);
656 continue;
657 }
479 658
480 /* get an extra skb reference in case we fail to send */ 659 /* grab an extra skb reference in case of error */
481 skb_get(skb); 660 skb_get(skb);
482 rc = netlink_unicast(audit_sock, skb, audit_nlk_portid, 0); 661 rc = netlink_unicast(sk, skb, portid, 0);
483 if (rc >= 0) { 662 if (rc < 0) {
484 consume_skb(skb); 663 /* fatal failure for our queue flush attempt? */
485 rc = 0; 664 if (++failed >= retry_limit ||
665 rc == -ECONNREFUSED || rc == -EPERM) {
666 /* yes - error processing for the queue */
667 sk = NULL;
668 if (err_hook)
669 (*err_hook)(skb);
670 if (!skb_hook)
671 goto out;
672 /* keep processing with the skb_hook */
673 continue;
674 } else
675 /* no - requeue to preserve ordering */
676 skb_queue_head(queue, skb);
677 } else {
678 /* it worked - drop the extra reference and continue */
679 consume_skb(skb);
680 failed = 0;
681 }
486 } 682 }
487 683
488 return rc; 684out:
685 return (rc >= 0 ? 0 : rc);
489} 686}
490 687
491/* 688/*
@@ -493,16 +690,19 @@ static int kauditd_send_unicast_skb(struct sk_buff *skb)
493 * @skb: audit record 690 * @skb: audit record
494 * 691 *
495 * Description: 692 * Description:
496 * This function doesn't consume an skb as might be expected since it has to 693 * Write a multicast message to anyone listening in the initial network
497 * copy it anyways. 694 * namespace. This function doesn't consume an skb as might be expected since
695 * it has to copy it anyways.
498 */ 696 */
499static void kauditd_send_multicast_skb(struct sk_buff *skb) 697static void kauditd_send_multicast_skb(struct sk_buff *skb)
500{ 698{
501 struct sk_buff *copy; 699 struct sk_buff *copy;
502 struct audit_net *aunet = net_generic(&init_net, audit_net_id); 700 struct sock *sock = audit_get_sk(&init_net);
503 struct sock *sock = aunet->nlsk;
504 struct nlmsghdr *nlh; 701 struct nlmsghdr *nlh;
505 702
703 /* NOTE: we are not taking an additional reference for init_net since
704 * we don't have to worry about it going away */
705
506 if (!netlink_has_listeners(sock, AUDIT_NLGRP_READLOG)) 706 if (!netlink_has_listeners(sock, AUDIT_NLGRP_READLOG))
507 return; 707 return;
508 708
@@ -526,149 +726,75 @@ static void kauditd_send_multicast_skb(struct sk_buff *skb)
526} 726}
527 727
528/** 728/**
529 * kauditd_wake_condition - Return true when it is time to wake kauditd_thread 729 * kauditd_thread - Worker thread to send audit records to userspace
530 * 730 * @dummy: unused
531 * Description:
532 * This function is for use by the wait_event_freezable() call in
533 * kauditd_thread().
534 */ 731 */
535static int kauditd_wake_condition(void)
536{
537 static int pid_last = 0;
538 int rc;
539 int pid = audit_pid;
540
541 /* wake on new messages or a change in the connected auditd */
542 rc = skb_queue_len(&audit_queue) || (pid && pid != pid_last);
543 if (rc)
544 pid_last = pid;
545
546 return rc;
547}
548
549static int kauditd_thread(void *dummy) 732static int kauditd_thread(void *dummy)
550{ 733{
551 int rc; 734 int rc;
552 int auditd = 0; 735 u32 portid = 0;
553 int reschedule = 0; 736 struct net *net = NULL;
554 struct sk_buff *skb; 737 struct sock *sk = NULL;
555 struct nlmsghdr *nlh;
556 738
557#define UNICAST_RETRIES 5 739#define UNICAST_RETRIES 5
558#define AUDITD_BAD(x,y) \
559 ((x) == -ECONNREFUSED || (x) == -EPERM || ++(y) >= UNICAST_RETRIES)
560
561 /* NOTE: we do invalidate the auditd connection flag on any sending
562 * errors, but we only "restore" the connection flag at specific places
563 * in the loop in order to help ensure proper ordering of audit
564 * records */
565 740
566 set_freezable(); 741 set_freezable();
567 while (!kthread_should_stop()) { 742 while (!kthread_should_stop()) {
568 /* NOTE: possible area for future improvement is to look at 743 /* NOTE: see the lock comments in auditd_send_unicast_skb() */
569 * the hold and retry queues, since only this thread 744 rcu_read_lock();
570 * has access to these queues we might be able to do 745 if (!auditd_conn.pid) {
571 * our own queuing and skip some/all of the locking */ 746 rcu_read_unlock();
572 747 goto main_queue;
573 /* NOTE: it might be a fun experiment to split the hold and 748 }
574 * retry queue handling to another thread, but the 749 net = auditd_conn.net;
575 * synchronization issues and other overhead might kill 750 get_net(net);
576 * any performance gains */ 751 sk = audit_get_sk(net);
752 portid = auditd_conn.portid;
753 rcu_read_unlock();
577 754
578 /* attempt to flush the hold queue */ 755 /* attempt to flush the hold queue */
579 while (auditd && (skb = skb_dequeue(&audit_hold_queue))) { 756 rc = kauditd_send_queue(sk, portid,
580 rc = kauditd_send_unicast_skb(skb); 757 &audit_hold_queue, UNICAST_RETRIES,
581 if (rc) { 758 NULL, kauditd_rehold_skb);
582 /* requeue to the same spot */ 759 if (rc < 0) {
583 skb_queue_head(&audit_hold_queue, skb); 760 sk = NULL;
584 761 goto main_queue;
585 auditd = 0;
586 if (AUDITD_BAD(rc, reschedule)) {
587 mutex_lock(&audit_cmd_mutex);
588 auditd_reset();
589 mutex_unlock(&audit_cmd_mutex);
590 reschedule = 0;
591 }
592 } else
593 /* we were able to send successfully */
594 reschedule = 0;
595 } 762 }
596 763
597 /* attempt to flush the retry queue */ 764 /* attempt to flush the retry queue */
598 while (auditd && (skb = skb_dequeue(&audit_retry_queue))) { 765 rc = kauditd_send_queue(sk, portid,
599 rc = kauditd_send_unicast_skb(skb); 766 &audit_retry_queue, UNICAST_RETRIES,
600 if (rc) { 767 NULL, kauditd_hold_skb);
601 auditd = 0; 768 if (rc < 0) {
602 if (AUDITD_BAD(rc, reschedule)) { 769 sk = NULL;
603 kauditd_hold_skb(skb); 770 goto main_queue;
604 mutex_lock(&audit_cmd_mutex);
605 auditd_reset();
606 mutex_unlock(&audit_cmd_mutex);
607 reschedule = 0;
608 } else
609 /* temporary problem (we hope), queue
610 * to the same spot and retry */
611 skb_queue_head(&audit_retry_queue, skb);
612 } else
613 /* we were able to send successfully */
614 reschedule = 0;
615 } 771 }
616 772
617 /* standard queue processing, try to be as quick as possible */ 773main_queue:
618quick_loop: 774 /* process the main queue - do the multicast send and attempt
619 skb = skb_dequeue(&audit_queue); 775 * unicast, dump failed record sends to the retry queue; if
620 if (skb) { 776 * sk == NULL due to previous failures we will just do the
621 /* setup the netlink header, see the comments in 777 * multicast send and move the record to the retry queue */
622 * kauditd_send_multicast_skb() for length quirks */ 778 kauditd_send_queue(sk, portid, &audit_queue, 1,
623 nlh = nlmsg_hdr(skb); 779 kauditd_send_multicast_skb,
624 nlh->nlmsg_len = skb->len - NLMSG_HDRLEN; 780 kauditd_retry_skb);
625 781
626 /* attempt to send to any multicast listeners */ 782 /* drop our netns reference, no auditd sends past this line */
627 kauditd_send_multicast_skb(skb); 783 if (net) {
628 784 put_net(net);
629 /* attempt to send to auditd, queue on failure */ 785 net = NULL;
630 if (auditd) {
631 rc = kauditd_send_unicast_skb(skb);
632 if (rc) {
633 auditd = 0;
634 if (AUDITD_BAD(rc, reschedule)) {
635 mutex_lock(&audit_cmd_mutex);
636 auditd_reset();
637 mutex_unlock(&audit_cmd_mutex);
638 reschedule = 0;
639 }
640
641 /* move to the retry queue */
642 kauditd_retry_skb(skb);
643 } else
644 /* everything is working so go fast! */
645 goto quick_loop;
646 } else if (reschedule)
647 /* we are currently having problems, move to
648 * the retry queue */
649 kauditd_retry_skb(skb);
650 else
651 /* dump the message via printk and hold it */
652 kauditd_hold_skb(skb);
653 } else {
654 /* we have flushed the backlog so wake everyone */
655 wake_up(&audit_backlog_wait);
656
657 /* if everything is okay with auditd (if present), go
658 * to sleep until there is something new in the queue
659 * or we have a change in the connected auditd;
660 * otherwise simply reschedule to give things a chance
661 * to recover */
662 if (reschedule) {
663 set_current_state(TASK_INTERRUPTIBLE);
664 schedule();
665 } else
666 wait_event_freezable(kauditd_wait,
667 kauditd_wake_condition());
668
669 /* update the auditd connection status */
670 auditd = (audit_pid ? 1 : 0);
671 } 786 }
787 sk = NULL;
788
789 /* we have processed all the queues so wake everyone */
790 wake_up(&audit_backlog_wait);
791
792 /* NOTE: we want to wake up if there is anything on the queue,
793 * regardless of if an auditd is connected, as we need to
794 * do the multicast send and rotate records from the
795 * main queue to the retry/hold queues */
796 wait_event_freezable(kauditd_wait,
797 (skb_queue_len(&audit_queue) ? 1 : 0));
672 } 798 }
673 799
674 return 0; 800 return 0;
@@ -678,17 +804,16 @@ int audit_send_list(void *_dest)
678{ 804{
679 struct audit_netlink_list *dest = _dest; 805 struct audit_netlink_list *dest = _dest;
680 struct sk_buff *skb; 806 struct sk_buff *skb;
681 struct net *net = dest->net; 807 struct sock *sk = audit_get_sk(dest->net);
682 struct audit_net *aunet = net_generic(net, audit_net_id);
683 808
684 /* wait for parent to finish and send an ACK */ 809 /* wait for parent to finish and send an ACK */
685 mutex_lock(&audit_cmd_mutex); 810 mutex_lock(&audit_cmd_mutex);
686 mutex_unlock(&audit_cmd_mutex); 811 mutex_unlock(&audit_cmd_mutex);
687 812
688 while ((skb = __skb_dequeue(&dest->q)) != NULL) 813 while ((skb = __skb_dequeue(&dest->q)) != NULL)
689 netlink_unicast(aunet->nlsk, skb, dest->portid, 0); 814 netlink_unicast(sk, skb, dest->portid, 0);
690 815
691 put_net(net); 816 put_net(dest->net);
692 kfree(dest); 817 kfree(dest);
693 818
694 return 0; 819 return 0;
@@ -722,16 +847,15 @@ out_kfree_skb:
722static int audit_send_reply_thread(void *arg) 847static int audit_send_reply_thread(void *arg)
723{ 848{
724 struct audit_reply *reply = (struct audit_reply *)arg; 849 struct audit_reply *reply = (struct audit_reply *)arg;
725 struct net *net = reply->net; 850 struct sock *sk = audit_get_sk(reply->net);
726 struct audit_net *aunet = net_generic(net, audit_net_id);
727 851
728 mutex_lock(&audit_cmd_mutex); 852 mutex_lock(&audit_cmd_mutex);
729 mutex_unlock(&audit_cmd_mutex); 853 mutex_unlock(&audit_cmd_mutex);
730 854
731 /* Ignore failure. It'll only happen if the sender goes away, 855 /* Ignore failure. It'll only happen if the sender goes away,
732 because our timeout is set to infinite. */ 856 because our timeout is set to infinite. */
733 netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0); 857 netlink_unicast(sk, reply->skb, reply->portid, 0);
734 put_net(net); 858 put_net(reply->net);
735 kfree(reply); 859 kfree(reply);
736 return 0; 860 return 0;
737} 861}
@@ -949,12 +1073,12 @@ static int audit_set_feature(struct sk_buff *skb)
949 1073
950static int audit_replace(pid_t pid) 1074static int audit_replace(pid_t pid)
951{ 1075{
952 struct sk_buff *skb = audit_make_reply(0, 0, AUDIT_REPLACE, 0, 0, 1076 struct sk_buff *skb;
953 &pid, sizeof(pid));
954 1077
1078 skb = audit_make_reply(0, 0, AUDIT_REPLACE, 0, 0, &pid, sizeof(pid));
955 if (!skb) 1079 if (!skb)
956 return -ENOMEM; 1080 return -ENOMEM;
957 return netlink_unicast(audit_sock, skb, audit_nlk_portid, 0); 1081 return auditd_send_unicast_skb(skb);
958} 1082}
959 1083
960static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 1084static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
@@ -981,7 +1105,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
981 memset(&s, 0, sizeof(s)); 1105 memset(&s, 0, sizeof(s));
982 s.enabled = audit_enabled; 1106 s.enabled = audit_enabled;
983 s.failure = audit_failure; 1107 s.failure = audit_failure;
984 s.pid = audit_pid; 1108 rcu_read_lock();
1109 s.pid = auditd_conn.pid;
1110 rcu_read_unlock();
985 s.rate_limit = audit_rate_limit; 1111 s.rate_limit = audit_rate_limit;
986 s.backlog_limit = audit_backlog_limit; 1112 s.backlog_limit = audit_backlog_limit;
987 s.lost = atomic_read(&audit_lost); 1113 s.lost = atomic_read(&audit_lost);
@@ -1014,30 +1140,44 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1014 * from the initial pid namespace, but something 1140 * from the initial pid namespace, but something
1015 * to keep in mind if this changes */ 1141 * to keep in mind if this changes */
1016 int new_pid = s.pid; 1142 int new_pid = s.pid;
1143 pid_t auditd_pid;
1017 pid_t requesting_pid = task_tgid_vnr(current); 1144 pid_t requesting_pid = task_tgid_vnr(current);
1018 1145
1019 if ((!new_pid) && (requesting_pid != audit_pid)) { 1146 /* test the auditd connection */
1020 audit_log_config_change("audit_pid", new_pid, audit_pid, 0); 1147 audit_replace(requesting_pid);
1148
1149 rcu_read_lock();
1150 auditd_pid = auditd_conn.pid;
1151 /* only the current auditd can unregister itself */
1152 if ((!new_pid) && (requesting_pid != auditd_pid)) {
1153 rcu_read_unlock();
1154 audit_log_config_change("audit_pid", new_pid,
1155 auditd_pid, 0);
1021 return -EACCES; 1156 return -EACCES;
1022 } 1157 }
1023 if (audit_pid && new_pid && 1158 /* replacing a healthy auditd is not allowed */
1024 audit_replace(requesting_pid) != -ECONNREFUSED) { 1159 if (auditd_pid && new_pid) {
1025 audit_log_config_change("audit_pid", new_pid, audit_pid, 0); 1160 rcu_read_unlock();
1161 audit_log_config_change("audit_pid", new_pid,
1162 auditd_pid, 0);
1026 return -EEXIST; 1163 return -EEXIST;
1027 } 1164 }
1165 rcu_read_unlock();
1166
1028 if (audit_enabled != AUDIT_OFF) 1167 if (audit_enabled != AUDIT_OFF)
1029 audit_log_config_change("audit_pid", new_pid, audit_pid, 1); 1168 audit_log_config_change("audit_pid", new_pid,
1169 auditd_pid, 1);
1170
1030 if (new_pid) { 1171 if (new_pid) {
1031 if (audit_sock) 1172 /* register a new auditd connection */
1032 sock_put(audit_sock); 1173 auditd_set(new_pid,
1033 audit_pid = new_pid; 1174 NETLINK_CB(skb).portid,
1034 audit_nlk_portid = NETLINK_CB(skb).portid; 1175 sock_net(NETLINK_CB(skb).sk));
1035 sock_hold(skb->sk); 1176 /* try to process any backlog */
1036 audit_sock = skb->sk; 1177 wake_up_interruptible(&kauditd_wait);
1037 } else { 1178 } else
1179 /* unregister the auditd connection */
1038 auditd_reset(); 1180 auditd_reset();
1039 }
1040 wake_up_interruptible(&kauditd_wait);
1041 } 1181 }
1042 if (s.mask & AUDIT_STATUS_RATE_LIMIT) { 1182 if (s.mask & AUDIT_STATUS_RATE_LIMIT) {
1043 err = audit_set_rate_limit(s.rate_limit); 1183 err = audit_set_rate_limit(s.rate_limit);
@@ -1090,7 +1230,6 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1090 if (err) 1230 if (err)
1091 break; 1231 break;
1092 } 1232 }
1093 mutex_unlock(&audit_cmd_mutex);
1094 audit_log_common_recv_msg(&ab, msg_type); 1233 audit_log_common_recv_msg(&ab, msg_type);
1095 if (msg_type != AUDIT_USER_TTY) 1234 if (msg_type != AUDIT_USER_TTY)
1096 audit_log_format(ab, " msg='%.*s'", 1235 audit_log_format(ab, " msg='%.*s'",
@@ -1108,7 +1247,6 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1108 } 1247 }
1109 audit_set_portid(ab, NETLINK_CB(skb).portid); 1248 audit_set_portid(ab, NETLINK_CB(skb).portid);
1110 audit_log_end(ab); 1249 audit_log_end(ab);
1111 mutex_lock(&audit_cmd_mutex);
1112 } 1250 }
1113 break; 1251 break;
1114 case AUDIT_ADD_RULE: 1252 case AUDIT_ADD_RULE:
@@ -1298,26 +1436,26 @@ static int __net_init audit_net_init(struct net *net)
1298 1436
1299 struct audit_net *aunet = net_generic(net, audit_net_id); 1437 struct audit_net *aunet = net_generic(net, audit_net_id);
1300 1438
1301 aunet->nlsk = netlink_kernel_create(net, NETLINK_AUDIT, &cfg); 1439 aunet->sk = netlink_kernel_create(net, NETLINK_AUDIT, &cfg);
1302 if (aunet->nlsk == NULL) { 1440 if (aunet->sk == NULL) {
1303 audit_panic("cannot initialize netlink socket in namespace"); 1441 audit_panic("cannot initialize netlink socket in namespace");
1304 return -ENOMEM; 1442 return -ENOMEM;
1305 } 1443 }
1306 aunet->nlsk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 1444 aunet->sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
1445
1307 return 0; 1446 return 0;
1308} 1447}
1309 1448
1310static void __net_exit audit_net_exit(struct net *net) 1449static void __net_exit audit_net_exit(struct net *net)
1311{ 1450{
1312 struct audit_net *aunet = net_generic(net, audit_net_id); 1451 struct audit_net *aunet = net_generic(net, audit_net_id);
1313 struct sock *sock = aunet->nlsk; 1452
1314 mutex_lock(&audit_cmd_mutex); 1453 rcu_read_lock();
1315 if (sock == audit_sock) 1454 if (net == auditd_conn.net)
1316 auditd_reset(); 1455 auditd_reset();
1317 mutex_unlock(&audit_cmd_mutex); 1456 rcu_read_unlock();
1318 1457
1319 netlink_kernel_release(sock); 1458 netlink_kernel_release(aunet->sk);
1320 aunet->nlsk = NULL;
1321} 1459}
1322 1460
1323static struct pernet_operations audit_net_ops __net_initdata = { 1461static struct pernet_operations audit_net_ops __net_initdata = {
@@ -1335,20 +1473,24 @@ static int __init audit_init(void)
1335 if (audit_initialized == AUDIT_DISABLED) 1473 if (audit_initialized == AUDIT_DISABLED)
1336 return 0; 1474 return 0;
1337 1475
1338 pr_info("initializing netlink subsys (%s)\n", 1476 memset(&auditd_conn, 0, sizeof(auditd_conn));
1339 audit_default ? "enabled" : "disabled"); 1477 spin_lock_init(&auditd_conn.lock);
1340 register_pernet_subsys(&audit_net_ops);
1341 1478
1342 skb_queue_head_init(&audit_queue); 1479 skb_queue_head_init(&audit_queue);
1343 skb_queue_head_init(&audit_retry_queue); 1480 skb_queue_head_init(&audit_retry_queue);
1344 skb_queue_head_init(&audit_hold_queue); 1481 skb_queue_head_init(&audit_hold_queue);
1345 audit_initialized = AUDIT_INITIALIZED;
1346 audit_enabled = audit_default;
1347 audit_ever_enabled |= !!audit_default;
1348 1482
1349 for (i = 0; i < AUDIT_INODE_BUCKETS; i++) 1483 for (i = 0; i < AUDIT_INODE_BUCKETS; i++)
1350 INIT_LIST_HEAD(&audit_inode_hash[i]); 1484 INIT_LIST_HEAD(&audit_inode_hash[i]);
1351 1485
1486 pr_info("initializing netlink subsys (%s)\n",
1487 audit_default ? "enabled" : "disabled");
1488 register_pernet_subsys(&audit_net_ops);
1489
1490 audit_initialized = AUDIT_INITIALIZED;
1491 audit_enabled = audit_default;
1492 audit_ever_enabled |= !!audit_default;
1493
1352 kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd"); 1494 kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd");
1353 if (IS_ERR(kauditd_task)) { 1495 if (IS_ERR(kauditd_task)) {
1354 int err = PTR_ERR(kauditd_task); 1496 int err = PTR_ERR(kauditd_task);
@@ -1519,20 +1661,16 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
1519 if (unlikely(!audit_filter(type, AUDIT_FILTER_TYPE))) 1661 if (unlikely(!audit_filter(type, AUDIT_FILTER_TYPE)))
1520 return NULL; 1662 return NULL;
1521 1663
1522 /* don't ever fail/sleep on these two conditions: 1664 /* NOTE: don't ever fail/sleep on these two conditions:
1523 * 1. auditd generated record - since we need auditd to drain the 1665 * 1. auditd generated record - since we need auditd to drain the
1524 * queue; also, when we are checking for auditd, compare PIDs using 1666 * queue; also, when we are checking for auditd, compare PIDs using
1525 * task_tgid_vnr() since auditd_pid is set in audit_receive_msg() 1667 * task_tgid_vnr() since auditd_pid is set in audit_receive_msg()
1526 * using a PID anchored in the caller's namespace 1668 * using a PID anchored in the caller's namespace
1527 * 2. audit command message - record types 1000 through 1099 inclusive 1669 * 2. generator holding the audit_cmd_mutex - we don't want to block
1528 * are command messages/records used to manage the kernel subsystem 1670 * while holding the mutex */
1529 * and the audit userspace, blocking on these messages could cause 1671 if (!(auditd_test_task(current) ||
1530 * problems under load so don't do it (note: not all of these 1672 (current == __mutex_owner(&audit_cmd_mutex)))) {
1531 * command types are valid as record types, but it is quicker to 1673 long stime = audit_backlog_wait_time;
1532 * just check two ints than a series of ints in a if/switch stmt) */
1533 if (!((audit_pid && audit_pid == task_tgid_vnr(current)) ||
1534 (type >= 1000 && type <= 1099))) {
1535 long sleep_time = audit_backlog_wait_time;
1536 1674
1537 while (audit_backlog_limit && 1675 while (audit_backlog_limit &&
1538 (skb_queue_len(&audit_queue) > audit_backlog_limit)) { 1676 (skb_queue_len(&audit_queue) > audit_backlog_limit)) {
@@ -1541,14 +1679,13 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
1541 1679
1542 /* sleep if we are allowed and we haven't exhausted our 1680 /* sleep if we are allowed and we haven't exhausted our
1543 * backlog wait limit */ 1681 * backlog wait limit */
1544 if ((gfp_mask & __GFP_DIRECT_RECLAIM) && 1682 if (gfpflags_allow_blocking(gfp_mask) && (stime > 0)) {
1545 (sleep_time > 0)) {
1546 DECLARE_WAITQUEUE(wait, current); 1683 DECLARE_WAITQUEUE(wait, current);
1547 1684
1548 add_wait_queue_exclusive(&audit_backlog_wait, 1685 add_wait_queue_exclusive(&audit_backlog_wait,
1549 &wait); 1686 &wait);
1550 set_current_state(TASK_UNINTERRUPTIBLE); 1687 set_current_state(TASK_UNINTERRUPTIBLE);
1551 sleep_time = schedule_timeout(sleep_time); 1688 stime = schedule_timeout(stime);
1552 remove_wait_queue(&audit_backlog_wait, &wait); 1689 remove_wait_queue(&audit_backlog_wait, &wait);
1553 } else { 1690 } else {
1554 if (audit_rate_check() && printk_ratelimit()) 1691 if (audit_rate_check() && printk_ratelimit())
@@ -2127,15 +2264,27 @@ out:
2127 */ 2264 */
2128void audit_log_end(struct audit_buffer *ab) 2265void audit_log_end(struct audit_buffer *ab)
2129{ 2266{
2267 struct sk_buff *skb;
2268 struct nlmsghdr *nlh;
2269
2130 if (!ab) 2270 if (!ab)
2131 return; 2271 return;
2132 if (!audit_rate_check()) { 2272
2133 audit_log_lost("rate limit exceeded"); 2273 if (audit_rate_check()) {
2134 } else { 2274 skb = ab->skb;
2135 skb_queue_tail(&audit_queue, ab->skb);
2136 wake_up_interruptible(&kauditd_wait);
2137 ab->skb = NULL; 2275 ab->skb = NULL;
2138 } 2276
2277 /* setup the netlink header, see the comments in
2278 * kauditd_send_multicast_skb() for length quirks */
2279 nlh = nlmsg_hdr(skb);
2280 nlh->nlmsg_len = skb->len - NLMSG_HDRLEN;
2281
2282 /* queue the netlink packet and poke the kauditd thread */
2283 skb_queue_tail(&audit_queue, skb);
2284 wake_up_interruptible(&kauditd_wait);
2285 } else
2286 audit_log_lost("rate limit exceeded");
2287
2139 audit_buffer_free(ab); 2288 audit_buffer_free(ab);
2140} 2289}
2141 2290
diff --git a/kernel/audit.h b/kernel/audit.h
index ca579880303a..0f1cf6d1878a 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -218,7 +218,7 @@ extern void audit_log_name(struct audit_context *context,
218 struct audit_names *n, const struct path *path, 218 struct audit_names *n, const struct path *path,
219 int record_num, int *call_panic); 219 int record_num, int *call_panic);
220 220
221extern int audit_pid; 221extern int auditd_test_task(const struct task_struct *task);
222 222
223#define AUDIT_INODE_BUCKETS 32 223#define AUDIT_INODE_BUCKETS 32
224extern struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS]; 224extern struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS];
@@ -250,10 +250,6 @@ struct audit_netlink_list {
250 250
251int audit_send_list(void *); 251int audit_send_list(void *);
252 252
253struct audit_net {
254 struct sock *nlsk;
255};
256
257extern int selinux_audit_rule_update(void); 253extern int selinux_audit_rule_update(void);
258 254
259extern struct mutex audit_filter_mutex; 255extern struct mutex audit_filter_mutex;
@@ -340,8 +336,7 @@ extern int audit_filter(int msgtype, unsigned int listtype);
340extern int __audit_signal_info(int sig, struct task_struct *t); 336extern int __audit_signal_info(int sig, struct task_struct *t);
341static inline int audit_signal_info(int sig, struct task_struct *t) 337static inline int audit_signal_info(int sig, struct task_struct *t)
342{ 338{
343 if (unlikely((audit_pid && t->tgid == audit_pid) || 339 if (auditd_test_task(t) || (audit_signals && !audit_dummy_context()))
344 (audit_signals && !audit_dummy_context())))
345 return __audit_signal_info(sig, t); 340 return __audit_signal_info(sig, t);
346 return 0; 341 return 0;
347} 342}
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index d6a8de5f8fa3..e59ffc7fc522 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -762,7 +762,7 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk,
762 struct audit_entry *e; 762 struct audit_entry *e;
763 enum audit_state state; 763 enum audit_state state;
764 764
765 if (audit_pid && tsk->tgid == audit_pid) 765 if (auditd_test_task(tsk))
766 return AUDIT_DISABLED; 766 return AUDIT_DISABLED;
767 767
768 rcu_read_lock(); 768 rcu_read_lock();
@@ -816,7 +816,7 @@ void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx)
816{ 816{
817 struct audit_names *n; 817 struct audit_names *n;
818 818
819 if (audit_pid && tsk->tgid == audit_pid) 819 if (auditd_test_task(tsk))
820 return; 820 return;
821 821
822 rcu_read_lock(); 822 rcu_read_lock();
@@ -2256,7 +2256,7 @@ int __audit_signal_info(int sig, struct task_struct *t)
2256 struct audit_context *ctx = tsk->audit_context; 2256 struct audit_context *ctx = tsk->audit_context;
2257 kuid_t uid = current_uid(), t_uid = task_uid(t); 2257 kuid_t uid = current_uid(), t_uid = task_uid(t);
2258 2258
2259 if (audit_pid && t->tgid == audit_pid) { 2259 if (auditd_test_task(t)) {
2260 if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { 2260 if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) {
2261 audit_sig_pid = task_tgid_nr(tsk); 2261 audit_sig_pid = task_tgid_nr(tsk);
2262 if (uid_valid(tsk->loginuid)) 2262 if (uid_valid(tsk->loginuid))
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 09923cc5c7c7..62e1e447ded9 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -767,38 +767,56 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
767 } 767 }
768} 768}
769 769
770static int check_ptr_alignment(struct bpf_verifier_env *env, 770static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
771 struct bpf_reg_state *reg, int off, int size) 771 int off, int size)
772{ 772{
773 if (reg->type != PTR_TO_PACKET && reg->type != PTR_TO_MAP_VALUE_ADJ) {
774 if (off % size != 0) {
775 verbose("misaligned access off %d size %d\n",
776 off, size);
777 return -EACCES;
778 } else {
779 return 0;
780 }
781 }
782
783 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
784 /* misaligned access to packet is ok on x86,arm,arm64 */
785 return 0;
786
787 if (reg->id && size != 1) { 773 if (reg->id && size != 1) {
788 verbose("Unknown packet alignment. Only byte-sized access allowed\n"); 774 verbose("Unknown alignment. Only byte-sized access allowed in packet access.\n");
789 return -EACCES; 775 return -EACCES;
790 } 776 }
791 777
792 /* skb->data is NET_IP_ALIGN-ed */ 778 /* skb->data is NET_IP_ALIGN-ed */
793 if (reg->type == PTR_TO_PACKET && 779 if ((NET_IP_ALIGN + reg->off + off) % size != 0) {
794 (NET_IP_ALIGN + reg->off + off) % size != 0) {
795 verbose("misaligned packet access off %d+%d+%d size %d\n", 780 verbose("misaligned packet access off %d+%d+%d size %d\n",
796 NET_IP_ALIGN, reg->off, off, size); 781 NET_IP_ALIGN, reg->off, off, size);
797 return -EACCES; 782 return -EACCES;
798 } 783 }
784
799 return 0; 785 return 0;
800} 786}
801 787
788static int check_val_ptr_alignment(const struct bpf_reg_state *reg,
789 int size)
790{
791 if (size != 1) {
792 verbose("Unknown alignment. Only byte-sized access allowed in value access.\n");
793 return -EACCES;
794 }
795
796 return 0;
797}
798
799static int check_ptr_alignment(const struct bpf_reg_state *reg,
800 int off, int size)
801{
802 switch (reg->type) {
803 case PTR_TO_PACKET:
804 return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 :
805 check_pkt_ptr_alignment(reg, off, size);
806 case PTR_TO_MAP_VALUE_ADJ:
807 return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 :
808 check_val_ptr_alignment(reg, size);
809 default:
810 if (off % size != 0) {
811 verbose("misaligned access off %d size %d\n",
812 off, size);
813 return -EACCES;
814 }
815
816 return 0;
817 }
818}
819
802/* check whether memory at (regno + off) is accessible for t = (read | write) 820/* check whether memory at (regno + off) is accessible for t = (read | write)
803 * if t==write, value_regno is a register which value is stored into memory 821 * if t==write, value_regno is a register which value is stored into memory
804 * if t==read, value_regno is a register which will receive the value from memory 822 * if t==read, value_regno is a register which will receive the value from memory
@@ -820,7 +838,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
820 if (size < 0) 838 if (size < 0)
821 return size; 839 return size;
822 840
823 err = check_ptr_alignment(env, reg, off, size); 841 err = check_ptr_alignment(reg, off, size);
824 if (err) 842 if (err)
825 return err; 843 return err;
826 844
@@ -1938,6 +1956,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
1938 * register as unknown. 1956 * register as unknown.
1939 */ 1957 */
1940 if (env->allow_ptr_leaks && 1958 if (env->allow_ptr_leaks &&
1959 BPF_CLASS(insn->code) == BPF_ALU64 && opcode == BPF_ADD &&
1941 (dst_reg->type == PTR_TO_MAP_VALUE || 1960 (dst_reg->type == PTR_TO_MAP_VALUE ||
1942 dst_reg->type == PTR_TO_MAP_VALUE_ADJ)) 1961 dst_reg->type == PTR_TO_MAP_VALUE_ADJ))
1943 dst_reg->type = PTR_TO_MAP_VALUE_ADJ; 1962 dst_reg->type = PTR_TO_MAP_VALUE_ADJ;
@@ -1986,14 +2005,15 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
1986 2005
1987 for (i = 0; i < MAX_BPF_REG; i++) 2006 for (i = 0; i < MAX_BPF_REG; i++)
1988 if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id) 2007 if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
1989 regs[i].range = dst_reg->off; 2008 /* keep the maximum range already checked */
2009 regs[i].range = max(regs[i].range, dst_reg->off);
1990 2010
1991 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 2011 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
1992 if (state->stack_slot_type[i] != STACK_SPILL) 2012 if (state->stack_slot_type[i] != STACK_SPILL)
1993 continue; 2013 continue;
1994 reg = &state->spilled_regs[i / BPF_REG_SIZE]; 2014 reg = &state->spilled_regs[i / BPF_REG_SIZE];
1995 if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id) 2015 if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
1996 reg->range = dst_reg->off; 2016 reg->range = max(reg->range, dst_reg->off);
1997 } 2017 }
1998} 2018}
1999 2019
diff --git a/kernel/padata.c b/kernel/padata.c
index 05316c9f32da..3202aa17492c 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -186,19 +186,20 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
186 186
187 reorder = &next_queue->reorder; 187 reorder = &next_queue->reorder;
188 188
189 spin_lock(&reorder->lock);
189 if (!list_empty(&reorder->list)) { 190 if (!list_empty(&reorder->list)) {
190 padata = list_entry(reorder->list.next, 191 padata = list_entry(reorder->list.next,
191 struct padata_priv, list); 192 struct padata_priv, list);
192 193
193 spin_lock(&reorder->lock);
194 list_del_init(&padata->list); 194 list_del_init(&padata->list);
195 atomic_dec(&pd->reorder_objects); 195 atomic_dec(&pd->reorder_objects);
196 spin_unlock(&reorder->lock);
197 196
198 pd->processed++; 197 pd->processed++;
199 198
199 spin_unlock(&reorder->lock);
200 goto out; 200 goto out;
201 } 201 }
202 spin_unlock(&reorder->lock);
202 203
203 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) { 204 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
204 padata = ERR_PTR(-ENODATA); 205 padata = ERR_PTR(-ENODATA);
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index a08795e21628..00a45c45beca 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -96,10 +96,10 @@ static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable);
96static int __sched_clock_stable_early = 1; 96static int __sched_clock_stable_early = 1;
97 97
98/* 98/*
99 * We want: ktime_get_ns() + gtod_offset == sched_clock() + raw_offset 99 * We want: ktime_get_ns() + __gtod_offset == sched_clock() + __sched_clock_offset
100 */ 100 */
101static __read_mostly u64 raw_offset; 101__read_mostly u64 __sched_clock_offset;
102static __read_mostly u64 gtod_offset; 102static __read_mostly u64 __gtod_offset;
103 103
104struct sched_clock_data { 104struct sched_clock_data {
105 u64 tick_raw; 105 u64 tick_raw;
@@ -131,17 +131,24 @@ static void __set_sched_clock_stable(void)
131 /* 131 /*
132 * Attempt to make the (initial) unstable->stable transition continuous. 132 * Attempt to make the (initial) unstable->stable transition continuous.
133 */ 133 */
134 raw_offset = (scd->tick_gtod + gtod_offset) - (scd->tick_raw); 134 __sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw);
135 135
136 printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n", 136 printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n",
137 scd->tick_gtod, gtod_offset, 137 scd->tick_gtod, __gtod_offset,
138 scd->tick_raw, raw_offset); 138 scd->tick_raw, __sched_clock_offset);
139 139
140 static_branch_enable(&__sched_clock_stable); 140 static_branch_enable(&__sched_clock_stable);
141 tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE); 141 tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE);
142} 142}
143 143
144static void __clear_sched_clock_stable(struct work_struct *work) 144static void __sched_clock_work(struct work_struct *work)
145{
146 static_branch_disable(&__sched_clock_stable);
147}
148
149static DECLARE_WORK(sched_clock_work, __sched_clock_work);
150
151static void __clear_sched_clock_stable(void)
145{ 152{
146 struct sched_clock_data *scd = this_scd(); 153 struct sched_clock_data *scd = this_scd();
147 154
@@ -154,17 +161,17 @@ static void __clear_sched_clock_stable(struct work_struct *work)
154 * 161 *
155 * Still do what we can. 162 * Still do what we can.
156 */ 163 */
157 gtod_offset = (scd->tick_raw + raw_offset) - (scd->tick_gtod); 164 __gtod_offset = (scd->tick_raw + __sched_clock_offset) - (scd->tick_gtod);
158 165
159 printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n", 166 printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n",
160 scd->tick_gtod, gtod_offset, 167 scd->tick_gtod, __gtod_offset,
161 scd->tick_raw, raw_offset); 168 scd->tick_raw, __sched_clock_offset);
162 169
163 static_branch_disable(&__sched_clock_stable);
164 tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE); 170 tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE);
165}
166 171
167static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable); 172 if (sched_clock_stable())
173 schedule_work(&sched_clock_work);
174}
168 175
169void clear_sched_clock_stable(void) 176void clear_sched_clock_stable(void)
170{ 177{
@@ -173,7 +180,7 @@ void clear_sched_clock_stable(void)
173 smp_mb(); /* matches sched_clock_init_late() */ 180 smp_mb(); /* matches sched_clock_init_late() */
174 181
175 if (sched_clock_running == 2) 182 if (sched_clock_running == 2)
176 schedule_work(&sched_clock_work); 183 __clear_sched_clock_stable();
177} 184}
178 185
179void sched_clock_init_late(void) 186void sched_clock_init_late(void)
@@ -214,7 +221,7 @@ static inline u64 wrap_max(u64 x, u64 y)
214 */ 221 */
215static u64 sched_clock_local(struct sched_clock_data *scd) 222static u64 sched_clock_local(struct sched_clock_data *scd)
216{ 223{
217 u64 now, clock, old_clock, min_clock, max_clock; 224 u64 now, clock, old_clock, min_clock, max_clock, gtod;
218 s64 delta; 225 s64 delta;
219 226
220again: 227again:
@@ -231,9 +238,10 @@ again:
231 * scd->tick_gtod + TICK_NSEC); 238 * scd->tick_gtod + TICK_NSEC);
232 */ 239 */
233 240
234 clock = scd->tick_gtod + gtod_offset + delta; 241 gtod = scd->tick_gtod + __gtod_offset;
235 min_clock = wrap_max(scd->tick_gtod, old_clock); 242 clock = gtod + delta;
236 max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC); 243 min_clock = wrap_max(gtod, old_clock);
244 max_clock = wrap_max(old_clock, gtod + TICK_NSEC);
237 245
238 clock = wrap_max(clock, min_clock); 246 clock = wrap_max(clock, min_clock);
239 clock = wrap_min(clock, max_clock); 247 clock = wrap_min(clock, max_clock);
@@ -317,7 +325,7 @@ u64 sched_clock_cpu(int cpu)
317 u64 clock; 325 u64 clock;
318 326
319 if (sched_clock_stable()) 327 if (sched_clock_stable())
320 return sched_clock() + raw_offset; 328 return sched_clock() + __sched_clock_offset;
321 329
322 if (unlikely(!sched_clock_running)) 330 if (unlikely(!sched_clock_running))
323 return 0ull; 331 return 0ull;
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index cd7cd489f739..54c577578da6 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -584,20 +584,14 @@ static int sugov_start(struct cpufreq_policy *policy)
584 for_each_cpu(cpu, policy->cpus) { 584 for_each_cpu(cpu, policy->cpus) {
585 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); 585 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
586 586
587 memset(sg_cpu, 0, sizeof(*sg_cpu));
587 sg_cpu->sg_policy = sg_policy; 588 sg_cpu->sg_policy = sg_policy;
588 if (policy_is_shared(policy)) { 589 sg_cpu->flags = SCHED_CPUFREQ_RT;
589 sg_cpu->util = 0; 590 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
590 sg_cpu->max = 0; 591 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
591 sg_cpu->flags = SCHED_CPUFREQ_RT; 592 policy_is_shared(policy) ?
592 sg_cpu->last_update = 0; 593 sugov_update_shared :
593 sg_cpu->iowait_boost = 0; 594 sugov_update_single);
594 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
595 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
596 sugov_update_shared);
597 } else {
598 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
599 sugov_update_single);
600 }
601 } 595 }
602 return 0; 596 return 0;
603} 597}
diff --git a/lib/syscall.c b/lib/syscall.c
index 17d5ff5fa6a3..2c6cd1b5c3ea 100644
--- a/lib/syscall.c
+++ b/lib/syscall.c
@@ -12,6 +12,7 @@ static int collect_syscall(struct task_struct *target, long *callno,
12 12
13 if (!try_get_task_stack(target)) { 13 if (!try_get_task_stack(target)) {
14 /* Task has no stack, so the task isn't in a syscall. */ 14 /* Task has no stack, so the task isn't in a syscall. */
15 *sp = *pc = 0;
15 *callno = -1; 16 *callno = -1;
16 return 0; 17 return 0;
17 } 18 }
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 0b1d3140fbb8..a25c9763fce1 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -20,6 +20,7 @@
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/kasan.h>
23 24
24/* 25/*
25 * Note: test functions are marked noinline so that their names appear in 26 * Note: test functions are marked noinline so that their names appear in
@@ -474,6 +475,12 @@ static noinline void __init use_after_scope_test(void)
474 475
475static int __init kmalloc_tests_init(void) 476static int __init kmalloc_tests_init(void)
476{ 477{
478 /*
479 * Temporarily enable multi-shot mode. Otherwise, we'd only get a
480 * report for the first case.
481 */
482 bool multishot = kasan_save_enable_multi_shot();
483
477 kmalloc_oob_right(); 484 kmalloc_oob_right();
478 kmalloc_oob_left(); 485 kmalloc_oob_left();
479 kmalloc_node_oob_right(); 486 kmalloc_node_oob_right();
@@ -499,6 +506,9 @@ static int __init kmalloc_tests_init(void)
499 ksize_unpoisons_memory(); 506 ksize_unpoisons_memory();
500 copy_user_test(); 507 copy_user_test();
501 use_after_scope_test(); 508 use_after_scope_test();
509
510 kasan_restore_multi_shot(multishot);
511
502 return -EAGAIN; 512 return -EAGAIN;
503} 513}
504 514
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3d0aab9ee80d..e5828875f7bb 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4403,7 +4403,9 @@ int hugetlb_reserve_pages(struct inode *inode,
4403 return 0; 4403 return 0;
4404out_err: 4404out_err:
4405 if (!vma || vma->vm_flags & VM_MAYSHARE) 4405 if (!vma || vma->vm_flags & VM_MAYSHARE)
4406 region_abort(resv_map, from, to); 4406 /* Don't call region_abort if region_chg failed */
4407 if (chg >= 0)
4408 region_abort(resv_map, from, to);
4407 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 4409 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4408 kref_put(&resv_map->refs, resv_map_release); 4410 kref_put(&resv_map->refs, resv_map_release);
4409 return ret; 4411 return ret;
@@ -4651,6 +4653,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4651{ 4653{
4652 struct page *page = NULL; 4654 struct page *page = NULL;
4653 spinlock_t *ptl; 4655 spinlock_t *ptl;
4656 pte_t pte;
4654retry: 4657retry:
4655 ptl = pmd_lockptr(mm, pmd); 4658 ptl = pmd_lockptr(mm, pmd);
4656 spin_lock(ptl); 4659 spin_lock(ptl);
@@ -4660,12 +4663,13 @@ retry:
4660 */ 4663 */
4661 if (!pmd_huge(*pmd)) 4664 if (!pmd_huge(*pmd))
4662 goto out; 4665 goto out;
4663 if (pmd_present(*pmd)) { 4666 pte = huge_ptep_get((pte_t *)pmd);
4667 if (pte_present(pte)) {
4664 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); 4668 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4665 if (flags & FOLL_GET) 4669 if (flags & FOLL_GET)
4666 get_page(page); 4670 get_page(page);
4667 } else { 4671 } else {
4668 if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) { 4672 if (is_hugetlb_entry_migration(pte)) {
4669 spin_unlock(ptl); 4673 spin_unlock(ptl);
4670 __migration_entry_wait(mm, (pte_t *)pmd, ptl); 4674 __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4671 goto retry; 4675 goto retry;
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 1c260e6b3b3c..dd2dea8eb077 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -96,11 +96,6 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
96 << KASAN_SHADOW_SCALE_SHIFT); 96 << KASAN_SHADOW_SCALE_SHIFT);
97} 97}
98 98
99static inline bool kasan_report_enabled(void)
100{
101 return !current->kasan_depth;
102}
103
104void kasan_report(unsigned long addr, size_t size, 99void kasan_report(unsigned long addr, size_t size,
105 bool is_write, unsigned long ip); 100 bool is_write, unsigned long ip);
106void kasan_report_double_free(struct kmem_cache *cache, void *object, 101void kasan_report_double_free(struct kmem_cache *cache, void *object,
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index f479365530b6..ab42a0803f16 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -13,7 +13,9 @@
13 * 13 *
14 */ 14 */
15 15
16#include <linux/bitops.h>
16#include <linux/ftrace.h> 17#include <linux/ftrace.h>
18#include <linux/init.h>
17#include <linux/kernel.h> 19#include <linux/kernel.h>
18#include <linux/mm.h> 20#include <linux/mm.h>
19#include <linux/printk.h> 21#include <linux/printk.h>
@@ -293,6 +295,40 @@ static void kasan_report_error(struct kasan_access_info *info)
293 kasan_end_report(&flags); 295 kasan_end_report(&flags);
294} 296}
295 297
298static unsigned long kasan_flags;
299
300#define KASAN_BIT_REPORTED 0
301#define KASAN_BIT_MULTI_SHOT 1
302
303bool kasan_save_enable_multi_shot(void)
304{
305 return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
306}
307EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot);
308
309void kasan_restore_multi_shot(bool enabled)
310{
311 if (!enabled)
312 clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
313}
314EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);
315
316static int __init kasan_set_multi_shot(char *str)
317{
318 set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
319 return 1;
320}
321__setup("kasan_multi_shot", kasan_set_multi_shot);
322
323static inline bool kasan_report_enabled(void)
324{
325 if (current->kasan_depth)
326 return false;
327 if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
328 return true;
329 return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags);
330}
331
296void kasan_report(unsigned long addr, size_t size, 332void kasan_report(unsigned long addr, size_t size,
297 bool is_write, unsigned long ip) 333 bool is_write, unsigned long ip)
298{ 334{
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 26c874e90b12..20036d4f9f13 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1416,7 +1416,7 @@ static void kmemleak_scan(void)
1416 /* data/bss scanning */ 1416 /* data/bss scanning */
1417 scan_large_block(_sdata, _edata); 1417 scan_large_block(_sdata, _edata);
1418 scan_large_block(__bss_start, __bss_stop); 1418 scan_large_block(__bss_start, __bss_stop);
1419 scan_large_block(__start_data_ro_after_init, __end_data_ro_after_init); 1419 scan_large_block(__start_ro_after_init, __end_ro_after_init);
1420 1420
1421#ifdef CONFIG_SMP 1421#ifdef CONFIG_SMP
1422 /* per-cpu sections scanning */ 1422 /* per-cpu sections scanning */
diff --git a/mm/migrate.c b/mm/migrate.c
index 9a0897a14d37..ed97c2c14fa8 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -209,8 +209,11 @@ static int remove_migration_pte(struct page *page, struct vm_area_struct *vma,
209 209
210 VM_BUG_ON_PAGE(PageTail(page), page); 210 VM_BUG_ON_PAGE(PageTail(page), page);
211 while (page_vma_mapped_walk(&pvmw)) { 211 while (page_vma_mapped_walk(&pvmw)) {
212 new = page - pvmw.page->index + 212 if (PageKsm(page))
213 linear_page_index(vma, pvmw.address); 213 new = page;
214 else
215 new = page - pvmw.page->index +
216 linear_page_index(vma, pvmw.address);
214 217
215 get_page(new); 218 get_page(new);
216 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); 219 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
diff --git a/mm/rmap.c b/mm/rmap.c
index 49ed681ccc7b..f6838015810f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1159,7 +1159,7 @@ void page_add_file_rmap(struct page *page, bool compound)
1159 goto out; 1159 goto out;
1160 } 1160 }
1161 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr); 1161 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr);
1162 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1162 mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, nr);
1163out: 1163out:
1164 unlock_page_memcg(page); 1164 unlock_page_memcg(page);
1165} 1165}
@@ -1199,7 +1199,7 @@ static void page_remove_file_rmap(struct page *page, bool compound)
1199 * pte lock(a spinlock) is held, which implies preemption disabled. 1199 * pte lock(a spinlock) is held, which implies preemption disabled.
1200 */ 1200 */
1201 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr); 1201 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);
1202 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1202 mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, -nr);
1203 1203
1204 if (unlikely(PageMlocked(page))) 1204 if (unlikely(PageMlocked(page)))
1205 clear_page_mlock(page); 1205 clear_page_mlock(page);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index b1947f0cbee2..89f95396ec46 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1764,7 +1764,7 @@ static int vmstat_cpu_dead(unsigned int cpu)
1764 1764
1765#endif 1765#endif
1766 1766
1767static int __init setup_vmstat(void) 1767void __init init_mm_internals(void)
1768{ 1768{
1769#ifdef CONFIG_SMP 1769#ifdef CONFIG_SMP
1770 int ret; 1770 int ret;
@@ -1792,9 +1792,7 @@ static int __init setup_vmstat(void)
1792 proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); 1792 proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
1793 proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); 1793 proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
1794#endif 1794#endif
1795 return 0;
1796} 1795}
1797module_init(setup_vmstat)
1798 1796
1799#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION) 1797#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1800 1798
diff --git a/mm/workingset.c b/mm/workingset.c
index ac839fca0e76..eda05c71fa49 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -532,7 +532,7 @@ static int __init workingset_init(void)
532 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", 532 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
533 timestamp_bits, max_order, bucket_order); 533 timestamp_bits, max_order, bucket_order);
534 534
535 ret = list_lru_init_key(&shadow_nodes, &shadow_nodes_key); 535 ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key);
536 if (ret) 536 if (ret)
537 goto err; 537 goto err;
538 ret = register_shrinker(&workingset_shadow_shrinker); 538 ret = register_shrinker(&workingset_shadow_shrinker);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 38dcf1eb427d..f76bb3332613 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -7,6 +7,7 @@
7#include <linux/kthread.h> 7#include <linux/kthread.h>
8#include <linux/net.h> 8#include <linux/net.h>
9#include <linux/nsproxy.h> 9#include <linux/nsproxy.h>
10#include <linux/sched/mm.h>
10#include <linux/slab.h> 11#include <linux/slab.h>
11#include <linux/socket.h> 12#include <linux/socket.h>
12#include <linux/string.h> 13#include <linux/string.h>
@@ -469,11 +470,16 @@ static int ceph_tcp_connect(struct ceph_connection *con)
469{ 470{
470 struct sockaddr_storage *paddr = &con->peer_addr.in_addr; 471 struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
471 struct socket *sock; 472 struct socket *sock;
473 unsigned int noio_flag;
472 int ret; 474 int ret;
473 475
474 BUG_ON(con->sock); 476 BUG_ON(con->sock);
477
478 /* sock_create_kern() allocates with GFP_KERNEL */
479 noio_flag = memalloc_noio_save();
475 ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family, 480 ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family,
476 SOCK_STREAM, IPPROTO_TCP, &sock); 481 SOCK_STREAM, IPPROTO_TCP, &sock);
482 memalloc_noio_restore(noio_flag);
477 if (ret) 483 if (ret)
478 return ret; 484 return ret;
479 sock->sk->sk_allocation = GFP_NOFS; 485 sock->sk->sk_allocation = GFP_NOFS;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 5f3ae922fcd1..c9cf425303f8 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -159,7 +159,7 @@ __skb_flow_dissect_arp(const struct sk_buff *skb,
159 unsigned char ar_tip[4]; 159 unsigned char ar_tip[4];
160 } *arp_eth, _arp_eth; 160 } *arp_eth, _arp_eth;
161 const struct arphdr *arp; 161 const struct arphdr *arp;
162 struct arphdr *_arp; 162 struct arphdr _arp;
163 163
164 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ARP)) 164 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ARP))
165 return FLOW_DISSECT_RET_OUT_GOOD; 165 return FLOW_DISSECT_RET_OUT_GOOD;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 7069f5e4a361..8ae87c591c8e 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -861,7 +861,8 @@ static void neigh_probe(struct neighbour *neigh)
861 if (skb) 861 if (skb)
862 skb = skb_clone(skb, GFP_ATOMIC); 862 skb = skb_clone(skb, GFP_ATOMIC);
863 write_unlock(&neigh->lock); 863 write_unlock(&neigh->lock);
864 neigh->ops->solicit(neigh, skb); 864 if (neigh->ops->solicit)
865 neigh->ops->solicit(neigh, skb);
865 atomic_inc(&neigh->probes); 866 atomic_inc(&neigh->probes);
866 kfree_skb(skb); 867 kfree_skb(skb);
867} 868}
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index fb87e78a2cc7..6bd2f8fb0476 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -20,9 +20,11 @@
20#include <net/tcp.h> 20#include <net/tcp.h>
21 21
22static siphash_key_t net_secret __read_mostly; 22static siphash_key_t net_secret __read_mostly;
23static siphash_key_t ts_secret __read_mostly;
23 24
24static __always_inline void net_secret_init(void) 25static __always_inline void net_secret_init(void)
25{ 26{
27 net_get_random_once(&ts_secret, sizeof(ts_secret));
26 net_get_random_once(&net_secret, sizeof(net_secret)); 28 net_get_random_once(&net_secret, sizeof(net_secret));
27} 29}
28#endif 30#endif
@@ -45,6 +47,23 @@ static u32 seq_scale(u32 seq)
45#endif 47#endif
46 48
47#if IS_ENABLED(CONFIG_IPV6) 49#if IS_ENABLED(CONFIG_IPV6)
50static u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr)
51{
52 const struct {
53 struct in6_addr saddr;
54 struct in6_addr daddr;
55 } __aligned(SIPHASH_ALIGNMENT) combined = {
56 .saddr = *(struct in6_addr *)saddr,
57 .daddr = *(struct in6_addr *)daddr,
58 };
59
60 if (sysctl_tcp_timestamps != 1)
61 return 0;
62
63 return siphash(&combined, offsetofend(typeof(combined), daddr),
64 &ts_secret);
65}
66
48u32 secure_tcpv6_seq_and_tsoff(const __be32 *saddr, const __be32 *daddr, 67u32 secure_tcpv6_seq_and_tsoff(const __be32 *saddr, const __be32 *daddr,
49 __be16 sport, __be16 dport, u32 *tsoff) 68 __be16 sport, __be16 dport, u32 *tsoff)
50{ 69{
@@ -63,7 +82,7 @@ u32 secure_tcpv6_seq_and_tsoff(const __be32 *saddr, const __be32 *daddr,
63 net_secret_init(); 82 net_secret_init();
64 hash = siphash(&combined, offsetofend(typeof(combined), dport), 83 hash = siphash(&combined, offsetofend(typeof(combined), dport),
65 &net_secret); 84 &net_secret);
66 *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0; 85 *tsoff = secure_tcpv6_ts_off(saddr, daddr);
67 return seq_scale(hash); 86 return seq_scale(hash);
68} 87}
69EXPORT_SYMBOL(secure_tcpv6_seq_and_tsoff); 88EXPORT_SYMBOL(secure_tcpv6_seq_and_tsoff);
@@ -88,6 +107,14 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
88#endif 107#endif
89 108
90#ifdef CONFIG_INET 109#ifdef CONFIG_INET
110static u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr)
111{
112 if (sysctl_tcp_timestamps != 1)
113 return 0;
114
115 return siphash_2u32((__force u32)saddr, (__force u32)daddr,
116 &ts_secret);
117}
91 118
92/* secure_tcp_seq_and_tsoff(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d), 119/* secure_tcp_seq_and_tsoff(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d),
93 * but fortunately, `sport' cannot be 0 in any circumstances. If this changes, 120 * but fortunately, `sport' cannot be 0 in any circumstances. If this changes,
@@ -102,7 +129,7 @@ u32 secure_tcp_seq_and_tsoff(__be32 saddr, __be32 daddr,
102 hash = siphash_3u32((__force u32)saddr, (__force u32)daddr, 129 hash = siphash_3u32((__force u32)saddr, (__force u32)daddr,
103 (__force u32)sport << 16 | (__force u32)dport, 130 (__force u32)sport << 16 | (__force u32)dport,
104 &net_secret); 131 &net_secret);
105 *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0; 132 *tsoff = secure_tcp_ts_off(saddr, daddr);
106 return seq_scale(hash); 133 return seq_scale(hash);
107} 134}
108 135
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 4ead336e14ea..7f9cc400eca0 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -408,14 +408,16 @@ static struct ctl_table net_core_table[] = {
408 .data = &sysctl_net_busy_poll, 408 .data = &sysctl_net_busy_poll,
409 .maxlen = sizeof(unsigned int), 409 .maxlen = sizeof(unsigned int),
410 .mode = 0644, 410 .mode = 0644,
411 .proc_handler = proc_dointvec 411 .proc_handler = proc_dointvec_minmax,
412 .extra1 = &zero,
412 }, 413 },
413 { 414 {
414 .procname = "busy_read", 415 .procname = "busy_read",
415 .data = &sysctl_net_busy_read, 416 .data = &sysctl_net_busy_read,
416 .maxlen = sizeof(unsigned int), 417 .maxlen = sizeof(unsigned int),
417 .mode = 0644, 418 .mode = 0644,
418 .proc_handler = proc_dointvec 419 .proc_handler = proc_dointvec_minmax,
420 .extra1 = &zero,
419 }, 421 },
420#endif 422#endif
421#ifdef CONFIG_NET_SCHED 423#ifdef CONFIG_NET_SCHED
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 9def8ed31c76..c3b12b1c7162 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -307,7 +307,7 @@ static void __init ic_close_devs(void)
307 while ((d = next)) { 307 while ((d = next)) {
308 next = d->next; 308 next = d->next;
309 dev = d->dev; 309 dev = d->dev;
310 if ((!ic_dev || dev != ic_dev->dev) && !netdev_uses_dsa(dev)) { 310 if (d != ic_dev && !netdev_uses_dsa(dev)) {
311 pr_debug("IP-Config: Downing %s\n", dev->name); 311 pr_debug("IP-Config: Downing %s\n", dev->name);
312 dev_change_flags(dev, d->flags); 312 dev_change_flags(dev, d->flags);
313 } 313 }
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index ef49989c93b1..da04b9c33ef3 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1249,16 +1249,6 @@ static const struct nf_conntrack_expect_policy snmp_exp_policy = {
1249 .timeout = 180, 1249 .timeout = 180,
1250}; 1250};
1251 1251
1252static struct nf_conntrack_helper snmp_helper __read_mostly = {
1253 .me = THIS_MODULE,
1254 .help = help,
1255 .expect_policy = &snmp_exp_policy,
1256 .name = "snmp",
1257 .tuple.src.l3num = AF_INET,
1258 .tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT),
1259 .tuple.dst.protonum = IPPROTO_UDP,
1260};
1261
1262static struct nf_conntrack_helper snmp_trap_helper __read_mostly = { 1252static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
1263 .me = THIS_MODULE, 1253 .me = THIS_MODULE,
1264 .help = help, 1254 .help = help,
@@ -1277,22 +1267,16 @@ static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
1277 1267
1278static int __init nf_nat_snmp_basic_init(void) 1268static int __init nf_nat_snmp_basic_init(void)
1279{ 1269{
1280 int ret = 0;
1281
1282 BUG_ON(nf_nat_snmp_hook != NULL); 1270 BUG_ON(nf_nat_snmp_hook != NULL);
1283 RCU_INIT_POINTER(nf_nat_snmp_hook, help); 1271 RCU_INIT_POINTER(nf_nat_snmp_hook, help);
1284 1272
1285 ret = nf_conntrack_helper_register(&snmp_trap_helper); 1273 return nf_conntrack_helper_register(&snmp_trap_helper);
1286 if (ret < 0) {
1287 nf_conntrack_helper_unregister(&snmp_helper);
1288 return ret;
1289 }
1290 return ret;
1291} 1274}
1292 1275
1293static void __exit nf_nat_snmp_basic_fini(void) 1276static void __exit nf_nat_snmp_basic_fini(void)
1294{ 1277{
1295 RCU_INIT_POINTER(nf_nat_snmp_hook, NULL); 1278 RCU_INIT_POINTER(nf_nat_snmp_hook, NULL);
1279 synchronize_rcu();
1296 nf_conntrack_helper_unregister(&snmp_trap_helper); 1280 nf_conntrack_helper_unregister(&snmp_trap_helper);
1297} 1281}
1298 1282
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 2af6244b83e2..ccfbce13a633 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -156,17 +156,18 @@ int ping_hash(struct sock *sk)
156void ping_unhash(struct sock *sk) 156void ping_unhash(struct sock *sk)
157{ 157{
158 struct inet_sock *isk = inet_sk(sk); 158 struct inet_sock *isk = inet_sk(sk);
159
159 pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); 160 pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
161 write_lock_bh(&ping_table.lock);
160 if (sk_hashed(sk)) { 162 if (sk_hashed(sk)) {
161 write_lock_bh(&ping_table.lock);
162 hlist_nulls_del(&sk->sk_nulls_node); 163 hlist_nulls_del(&sk->sk_nulls_node);
163 sk_nulls_node_init(&sk->sk_nulls_node); 164 sk_nulls_node_init(&sk->sk_nulls_node);
164 sock_put(sk); 165 sock_put(sk);
165 isk->inet_num = 0; 166 isk->inet_num = 0;
166 isk->inet_sport = 0; 167 isk->inet_sport = 0;
167 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 168 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
168 write_unlock_bh(&ping_table.lock);
169 } 169 }
170 write_unlock_bh(&ping_table.lock);
170} 171}
171EXPORT_SYMBOL_GPL(ping_unhash); 172EXPORT_SYMBOL_GPL(ping_unhash);
172 173
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ed6606cf5b3e..31f2765ef851 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -126,7 +126,8 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
126#define REXMIT_LOST 1 /* retransmit packets marked lost */ 126#define REXMIT_LOST 1 /* retransmit packets marked lost */
127#define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */ 127#define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */
128 128
129static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb) 129static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb,
130 unsigned int len)
130{ 131{
131 static bool __once __read_mostly; 132 static bool __once __read_mostly;
132 133
@@ -137,8 +138,9 @@ static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb)
137 138
138 rcu_read_lock(); 139 rcu_read_lock();
139 dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif); 140 dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif);
140 pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n", 141 if (!dev || len >= dev->mtu)
141 dev ? dev->name : "Unknown driver"); 142 pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n",
143 dev ? dev->name : "Unknown driver");
142 rcu_read_unlock(); 144 rcu_read_unlock();
143 } 145 }
144} 146}
@@ -161,8 +163,10 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
161 if (len >= icsk->icsk_ack.rcv_mss) { 163 if (len >= icsk->icsk_ack.rcv_mss) {
162 icsk->icsk_ack.rcv_mss = min_t(unsigned int, len, 164 icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
163 tcp_sk(sk)->advmss); 165 tcp_sk(sk)->advmss);
164 if (unlikely(icsk->icsk_ack.rcv_mss != len)) 166 /* Account for possibly-removed options */
165 tcp_gro_dev_warn(sk, skb); 167 if (unlikely(len > icsk->icsk_ack.rcv_mss +
168 MAX_TCP_OPTION_SPACE))
169 tcp_gro_dev_warn(sk, skb, len);
166 } else { 170 } else {
167 /* Otherwise, we make more careful check taking into account, 171 /* Otherwise, we make more careful check taking into account,
168 * that SACKs block is variable. 172 * that SACKs block is variable.
@@ -874,22 +878,11 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
874 const int ts) 878 const int ts)
875{ 879{
876 struct tcp_sock *tp = tcp_sk(sk); 880 struct tcp_sock *tp = tcp_sk(sk);
877 if (metric > tp->reordering) { 881 int mib_idx;
878 int mib_idx;
879 882
883 if (metric > tp->reordering) {
880 tp->reordering = min(sysctl_tcp_max_reordering, metric); 884 tp->reordering = min(sysctl_tcp_max_reordering, metric);
881 885
882 /* This exciting event is worth to be remembered. 8) */
883 if (ts)
884 mib_idx = LINUX_MIB_TCPTSREORDER;
885 else if (tcp_is_reno(tp))
886 mib_idx = LINUX_MIB_TCPRENOREORDER;
887 else if (tcp_is_fack(tp))
888 mib_idx = LINUX_MIB_TCPFACKREORDER;
889 else
890 mib_idx = LINUX_MIB_TCPSACKREORDER;
891
892 NET_INC_STATS(sock_net(sk), mib_idx);
893#if FASTRETRANS_DEBUG > 1 886#if FASTRETRANS_DEBUG > 1
894 pr_debug("Disorder%d %d %u f%u s%u rr%d\n", 887 pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
895 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, 888 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@@ -902,6 +895,18 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
902 } 895 }
903 896
904 tp->rack.reord = 1; 897 tp->rack.reord = 1;
898
899 /* This exciting event is worth to be remembered. 8) */
900 if (ts)
901 mib_idx = LINUX_MIB_TCPTSREORDER;
902 else if (tcp_is_reno(tp))
903 mib_idx = LINUX_MIB_TCPRENOREORDER;
904 else if (tcp_is_fack(tp))
905 mib_idx = LINUX_MIB_TCPFACKREORDER;
906 else
907 mib_idx = LINUX_MIB_TCPSACKREORDER;
908
909 NET_INC_STATS(sock_net(sk), mib_idx);
905} 910}
906 911
907/* This must be called before lost_out is incremented */ 912/* This must be called before lost_out is incremented */
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index 4ecb38ae8504..d8acbd9f477a 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -12,7 +12,8 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
12 /* Account for retransmits that are lost again */ 12 /* Account for retransmits that are lost again */
13 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 13 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
14 tp->retrans_out -= tcp_skb_pcount(skb); 14 tp->retrans_out -= tcp_skb_pcount(skb);
15 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT); 15 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
16 tcp_skb_pcount(skb));
16 } 17 }
17} 18}
18 19
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 309062f3debe..31762f76cdb5 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1687,7 +1687,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1687 struct kcm_attach info; 1687 struct kcm_attach info;
1688 1688
1689 if (copy_from_user(&info, (void __user *)arg, sizeof(info))) 1689 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1690 err = -EFAULT; 1690 return -EFAULT;
1691 1691
1692 err = kcm_attach_ioctl(sock, &info); 1692 err = kcm_attach_ioctl(sock, &info);
1693 1693
@@ -1697,7 +1697,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1697 struct kcm_unattach info; 1697 struct kcm_unattach info;
1698 1698
1699 if (copy_from_user(&info, (void __user *)arg, sizeof(info))) 1699 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1700 err = -EFAULT; 1700 return -EFAULT;
1701 1701
1702 err = kcm_unattach_ioctl(sock, &info); 1702 err = kcm_unattach_ioctl(sock, &info);
1703 1703
@@ -1708,7 +1708,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1708 struct socket *newsock = NULL; 1708 struct socket *newsock = NULL;
1709 1709
1710 if (copy_from_user(&info, (void __user *)arg, sizeof(info))) 1710 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1711 err = -EFAULT; 1711 return -EFAULT;
1712 1712
1713 err = kcm_clone(sock, &info, &newsock); 1713 err = kcm_clone(sock, &info, &newsock);
1714 1714
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 8adab6335ced..e37d9554da7b 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -278,7 +278,57 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn
278} 278}
279EXPORT_SYMBOL_GPL(l2tp_session_find); 279EXPORT_SYMBOL_GPL(l2tp_session_find);
280 280
281struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) 281/* Like l2tp_session_find() but takes a reference on the returned session.
282 * Optionally calls session->ref() too if do_ref is true.
283 */
284struct l2tp_session *l2tp_session_get(struct net *net,
285 struct l2tp_tunnel *tunnel,
286 u32 session_id, bool do_ref)
287{
288 struct hlist_head *session_list;
289 struct l2tp_session *session;
290
291 if (!tunnel) {
292 struct l2tp_net *pn = l2tp_pernet(net);
293
294 session_list = l2tp_session_id_hash_2(pn, session_id);
295
296 rcu_read_lock_bh();
297 hlist_for_each_entry_rcu(session, session_list, global_hlist) {
298 if (session->session_id == session_id) {
299 l2tp_session_inc_refcount(session);
300 if (do_ref && session->ref)
301 session->ref(session);
302 rcu_read_unlock_bh();
303
304 return session;
305 }
306 }
307 rcu_read_unlock_bh();
308
309 return NULL;
310 }
311
312 session_list = l2tp_session_id_hash(tunnel, session_id);
313 read_lock_bh(&tunnel->hlist_lock);
314 hlist_for_each_entry(session, session_list, hlist) {
315 if (session->session_id == session_id) {
316 l2tp_session_inc_refcount(session);
317 if (do_ref && session->ref)
318 session->ref(session);
319 read_unlock_bh(&tunnel->hlist_lock);
320
321 return session;
322 }
323 }
324 read_unlock_bh(&tunnel->hlist_lock);
325
326 return NULL;
327}
328EXPORT_SYMBOL_GPL(l2tp_session_get);
329
330struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
331 bool do_ref)
282{ 332{
283 int hash; 333 int hash;
284 struct l2tp_session *session; 334 struct l2tp_session *session;
@@ -288,6 +338,9 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
288 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { 338 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
289 hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) { 339 hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
290 if (++count > nth) { 340 if (++count > nth) {
341 l2tp_session_inc_refcount(session);
342 if (do_ref && session->ref)
343 session->ref(session);
291 read_unlock_bh(&tunnel->hlist_lock); 344 read_unlock_bh(&tunnel->hlist_lock);
292 return session; 345 return session;
293 } 346 }
@@ -298,12 +351,13 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
298 351
299 return NULL; 352 return NULL;
300} 353}
301EXPORT_SYMBOL_GPL(l2tp_session_find_nth); 354EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
302 355
303/* Lookup a session by interface name. 356/* Lookup a session by interface name.
304 * This is very inefficient but is only used by management interfaces. 357 * This is very inefficient but is only used by management interfaces.
305 */ 358 */
306struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) 359struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
360 bool do_ref)
307{ 361{
308 struct l2tp_net *pn = l2tp_pernet(net); 362 struct l2tp_net *pn = l2tp_pernet(net);
309 int hash; 363 int hash;
@@ -313,7 +367,11 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
313 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) { 367 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
314 hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) { 368 hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
315 if (!strcmp(session->ifname, ifname)) { 369 if (!strcmp(session->ifname, ifname)) {
370 l2tp_session_inc_refcount(session);
371 if (do_ref && session->ref)
372 session->ref(session);
316 rcu_read_unlock_bh(); 373 rcu_read_unlock_bh();
374
317 return session; 375 return session;
318 } 376 }
319 } 377 }
@@ -323,7 +381,49 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
323 381
324 return NULL; 382 return NULL;
325} 383}
326EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname); 384EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
385
386static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel,
387 struct l2tp_session *session)
388{
389 struct l2tp_session *session_walk;
390 struct hlist_head *g_head;
391 struct hlist_head *head;
392 struct l2tp_net *pn;
393
394 head = l2tp_session_id_hash(tunnel, session->session_id);
395
396 write_lock_bh(&tunnel->hlist_lock);
397 hlist_for_each_entry(session_walk, head, hlist)
398 if (session_walk->session_id == session->session_id)
399 goto exist;
400
401 if (tunnel->version == L2TP_HDR_VER_3) {
402 pn = l2tp_pernet(tunnel->l2tp_net);
403 g_head = l2tp_session_id_hash_2(l2tp_pernet(tunnel->l2tp_net),
404 session->session_id);
405
406 spin_lock_bh(&pn->l2tp_session_hlist_lock);
407 hlist_for_each_entry(session_walk, g_head, global_hlist)
408 if (session_walk->session_id == session->session_id)
409 goto exist_glob;
410
411 hlist_add_head_rcu(&session->global_hlist, g_head);
412 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
413 }
414
415 hlist_add_head(&session->hlist, head);
416 write_unlock_bh(&tunnel->hlist_lock);
417
418 return 0;
419
420exist_glob:
421 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
422exist:
423 write_unlock_bh(&tunnel->hlist_lock);
424
425 return -EEXIST;
426}
327 427
328/* Lookup a tunnel by id 428/* Lookup a tunnel by id
329 */ 429 */
@@ -633,6 +733,9 @@ discard:
633 * a data (not control) frame before coming here. Fields up to the 733 * a data (not control) frame before coming here. Fields up to the
634 * session-id have already been parsed and ptr points to the data 734 * session-id have already been parsed and ptr points to the data
635 * after the session-id. 735 * after the session-id.
736 *
737 * session->ref() must have been called prior to l2tp_recv_common().
738 * session->deref() will be called automatically after skb is processed.
636 */ 739 */
637void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, 740void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
638 unsigned char *ptr, unsigned char *optr, u16 hdrflags, 741 unsigned char *ptr, unsigned char *optr, u16 hdrflags,
@@ -642,14 +745,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
642 int offset; 745 int offset;
643 u32 ns, nr; 746 u32 ns, nr;
644 747
645 /* The ref count is increased since we now hold a pointer to
646 * the session. Take care to decrement the refcnt when exiting
647 * this function from now on...
648 */
649 l2tp_session_inc_refcount(session);
650 if (session->ref)
651 (*session->ref)(session);
652
653 /* Parse and check optional cookie */ 748 /* Parse and check optional cookie */
654 if (session->peer_cookie_len > 0) { 749 if (session->peer_cookie_len > 0) {
655 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) { 750 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
@@ -802,8 +897,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
802 /* Try to dequeue as many skbs from reorder_q as we can. */ 897 /* Try to dequeue as many skbs from reorder_q as we can. */
803 l2tp_recv_dequeue(session); 898 l2tp_recv_dequeue(session);
804 899
805 l2tp_session_dec_refcount(session);
806
807 return; 900 return;
808 901
809discard: 902discard:
@@ -812,8 +905,6 @@ discard:
812 905
813 if (session->deref) 906 if (session->deref)
814 (*session->deref)(session); 907 (*session->deref)(session);
815
816 l2tp_session_dec_refcount(session);
817} 908}
818EXPORT_SYMBOL(l2tp_recv_common); 909EXPORT_SYMBOL(l2tp_recv_common);
819 910
@@ -920,8 +1011,14 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
920 } 1011 }
921 1012
922 /* Find the session context */ 1013 /* Find the session context */
923 session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id); 1014 session = l2tp_session_get(tunnel->l2tp_net, tunnel, session_id, true);
924 if (!session || !session->recv_skb) { 1015 if (!session || !session->recv_skb) {
1016 if (session) {
1017 if (session->deref)
1018 session->deref(session);
1019 l2tp_session_dec_refcount(session);
1020 }
1021
925 /* Not found? Pass to userspace to deal with */ 1022 /* Not found? Pass to userspace to deal with */
926 l2tp_info(tunnel, L2TP_MSG_DATA, 1023 l2tp_info(tunnel, L2TP_MSG_DATA,
927 "%s: no session found (%u/%u). Passing up.\n", 1024 "%s: no session found (%u/%u). Passing up.\n",
@@ -930,6 +1027,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
930 } 1027 }
931 1028
932 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook); 1029 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
1030 l2tp_session_dec_refcount(session);
933 1031
934 return 0; 1032 return 0;
935 1033
@@ -1738,6 +1836,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1738struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) 1836struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
1739{ 1837{
1740 struct l2tp_session *session; 1838 struct l2tp_session *session;
1839 int err;
1741 1840
1742 session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL); 1841 session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
1743 if (session != NULL) { 1842 if (session != NULL) {
@@ -1793,6 +1892,13 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
1793 1892
1794 l2tp_session_set_header_len(session, tunnel->version); 1893 l2tp_session_set_header_len(session, tunnel->version);
1795 1894
1895 err = l2tp_session_add_to_tunnel(tunnel, session);
1896 if (err) {
1897 kfree(session);
1898
1899 return ERR_PTR(err);
1900 }
1901
1796 /* Bump the reference count. The session context is deleted 1902 /* Bump the reference count. The session context is deleted
1797 * only when this drops to zero. 1903 * only when this drops to zero.
1798 */ 1904 */
@@ -1802,28 +1908,14 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
1802 /* Ensure tunnel socket isn't deleted */ 1908 /* Ensure tunnel socket isn't deleted */
1803 sock_hold(tunnel->sock); 1909 sock_hold(tunnel->sock);
1804 1910
1805 /* Add session to the tunnel's hash list */
1806 write_lock_bh(&tunnel->hlist_lock);
1807 hlist_add_head(&session->hlist,
1808 l2tp_session_id_hash(tunnel, session_id));
1809 write_unlock_bh(&tunnel->hlist_lock);
1810
1811 /* And to the global session list if L2TPv3 */
1812 if (tunnel->version != L2TP_HDR_VER_2) {
1813 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1814
1815 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1816 hlist_add_head_rcu(&session->global_hlist,
1817 l2tp_session_id_hash_2(pn, session_id));
1818 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1819 }
1820
1821 /* Ignore management session in session count value */ 1911 /* Ignore management session in session count value */
1822 if (session->session_id != 0) 1912 if (session->session_id != 0)
1823 atomic_inc(&l2tp_session_count); 1913 atomic_inc(&l2tp_session_count);
1914
1915 return session;
1824 } 1916 }
1825 1917
1826 return session; 1918 return ERR_PTR(-ENOMEM);
1827} 1919}
1828EXPORT_SYMBOL_GPL(l2tp_session_create); 1920EXPORT_SYMBOL_GPL(l2tp_session_create);
1829 1921
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index aebf281d09ee..8ce7818c7a9d 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -230,11 +230,16 @@ out:
230 return tunnel; 230 return tunnel;
231} 231}
232 232
233struct l2tp_session *l2tp_session_get(struct net *net,
234 struct l2tp_tunnel *tunnel,
235 u32 session_id, bool do_ref);
233struct l2tp_session *l2tp_session_find(struct net *net, 236struct l2tp_session *l2tp_session_find(struct net *net,
234 struct l2tp_tunnel *tunnel, 237 struct l2tp_tunnel *tunnel,
235 u32 session_id); 238 u32 session_id);
236struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); 239struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
237struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); 240 bool do_ref);
241struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
242 bool do_ref);
238struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id); 243struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
239struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); 244struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
240 245
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 2d6760a2ae34..d100aed3d06f 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -53,7 +53,7 @@ static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd)
53 53
54static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd) 54static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
55{ 55{
56 pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); 56 pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
57 pd->session_idx++; 57 pd->session_idx++;
58 58
59 if (pd->session == NULL) { 59 if (pd->session == NULL) {
@@ -238,10 +238,14 @@ static int l2tp_dfs_seq_show(struct seq_file *m, void *v)
238 } 238 }
239 239
240 /* Show the tunnel or session context */ 240 /* Show the tunnel or session context */
241 if (pd->session == NULL) 241 if (!pd->session) {
242 l2tp_dfs_seq_tunnel_show(m, pd->tunnel); 242 l2tp_dfs_seq_tunnel_show(m, pd->tunnel);
243 else 243 } else {
244 l2tp_dfs_seq_session_show(m, pd->session); 244 l2tp_dfs_seq_session_show(m, pd->session);
245 if (pd->session->deref)
246 pd->session->deref(pd->session);
247 l2tp_session_dec_refcount(pd->session);
248 }
245 249
246out: 250out:
247 return 0; 251 return 0;
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 8bf18a5f66e0..6fd41d7afe1e 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -221,12 +221,6 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
221 goto out; 221 goto out;
222 } 222 }
223 223
224 session = l2tp_session_find(net, tunnel, session_id);
225 if (session) {
226 rc = -EEXIST;
227 goto out;
228 }
229
230 if (cfg->ifname) { 224 if (cfg->ifname) {
231 dev = dev_get_by_name(net, cfg->ifname); 225 dev = dev_get_by_name(net, cfg->ifname);
232 if (dev) { 226 if (dev) {
@@ -240,8 +234,8 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
240 234
241 session = l2tp_session_create(sizeof(*spriv), tunnel, session_id, 235 session = l2tp_session_create(sizeof(*spriv), tunnel, session_id,
242 peer_session_id, cfg); 236 peer_session_id, cfg);
243 if (!session) { 237 if (IS_ERR(session)) {
244 rc = -ENOMEM; 238 rc = PTR_ERR(session);
245 goto out; 239 goto out;
246 } 240 }
247 241
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index d25038cfd64e..4d322c1b7233 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -143,19 +143,19 @@ static int l2tp_ip_recv(struct sk_buff *skb)
143 } 143 }
144 144
145 /* Ok, this is a data packet. Lookup the session. */ 145 /* Ok, this is a data packet. Lookup the session. */
146 session = l2tp_session_find(net, NULL, session_id); 146 session = l2tp_session_get(net, NULL, session_id, true);
147 if (session == NULL) 147 if (!session)
148 goto discard; 148 goto discard;
149 149
150 tunnel = session->tunnel; 150 tunnel = session->tunnel;
151 if (tunnel == NULL) 151 if (!tunnel)
152 goto discard; 152 goto discard_sess;
153 153
154 /* Trace packet contents, if enabled */ 154 /* Trace packet contents, if enabled */
155 if (tunnel->debug & L2TP_MSG_DATA) { 155 if (tunnel->debug & L2TP_MSG_DATA) {
156 length = min(32u, skb->len); 156 length = min(32u, skb->len);
157 if (!pskb_may_pull(skb, length)) 157 if (!pskb_may_pull(skb, length))
158 goto discard; 158 goto discard_sess;
159 159
160 /* Point to L2TP header */ 160 /* Point to L2TP header */
161 optr = ptr = skb->data; 161 optr = ptr = skb->data;
@@ -165,6 +165,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
165 } 165 }
166 166
167 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook); 167 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
168 l2tp_session_dec_refcount(session);
168 169
169 return 0; 170 return 0;
170 171
@@ -178,9 +179,10 @@ pass_up:
178 179
179 tunnel_id = ntohl(*(__be32 *) &skb->data[4]); 180 tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
180 tunnel = l2tp_tunnel_find(net, tunnel_id); 181 tunnel = l2tp_tunnel_find(net, tunnel_id);
181 if (tunnel != NULL) 182 if (tunnel) {
182 sk = tunnel->sock; 183 sk = tunnel->sock;
183 else { 184 sock_hold(sk);
185 } else {
184 struct iphdr *iph = (struct iphdr *) skb_network_header(skb); 186 struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
185 187
186 read_lock_bh(&l2tp_ip_lock); 188 read_lock_bh(&l2tp_ip_lock);
@@ -202,6 +204,12 @@ pass_up:
202 204
203 return sk_receive_skb(sk, skb, 1); 205 return sk_receive_skb(sk, skb, 1);
204 206
207discard_sess:
208 if (session->deref)
209 session->deref(session);
210 l2tp_session_dec_refcount(session);
211 goto discard;
212
205discard_put: 213discard_put:
206 sock_put(sk); 214 sock_put(sk);
207 215
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index a4abcbc4c09a..88b397c30d86 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -156,19 +156,19 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
156 } 156 }
157 157
158 /* Ok, this is a data packet. Lookup the session. */ 158 /* Ok, this is a data packet. Lookup the session. */
159 session = l2tp_session_find(net, NULL, session_id); 159 session = l2tp_session_get(net, NULL, session_id, true);
160 if (session == NULL) 160 if (!session)
161 goto discard; 161 goto discard;
162 162
163 tunnel = session->tunnel; 163 tunnel = session->tunnel;
164 if (tunnel == NULL) 164 if (!tunnel)
165 goto discard; 165 goto discard_sess;
166 166
167 /* Trace packet contents, if enabled */ 167 /* Trace packet contents, if enabled */
168 if (tunnel->debug & L2TP_MSG_DATA) { 168 if (tunnel->debug & L2TP_MSG_DATA) {
169 length = min(32u, skb->len); 169 length = min(32u, skb->len);
170 if (!pskb_may_pull(skb, length)) 170 if (!pskb_may_pull(skb, length))
171 goto discard; 171 goto discard_sess;
172 172
173 /* Point to L2TP header */ 173 /* Point to L2TP header */
174 optr = ptr = skb->data; 174 optr = ptr = skb->data;
@@ -179,6 +179,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
179 179
180 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, 180 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
181 tunnel->recv_payload_hook); 181 tunnel->recv_payload_hook);
182 l2tp_session_dec_refcount(session);
183
182 return 0; 184 return 0;
183 185
184pass_up: 186pass_up:
@@ -191,9 +193,10 @@ pass_up:
191 193
192 tunnel_id = ntohl(*(__be32 *) &skb->data[4]); 194 tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
193 tunnel = l2tp_tunnel_find(net, tunnel_id); 195 tunnel = l2tp_tunnel_find(net, tunnel_id);
194 if (tunnel != NULL) 196 if (tunnel) {
195 sk = tunnel->sock; 197 sk = tunnel->sock;
196 else { 198 sock_hold(sk);
199 } else {
197 struct ipv6hdr *iph = ipv6_hdr(skb); 200 struct ipv6hdr *iph = ipv6_hdr(skb);
198 201
199 read_lock_bh(&l2tp_ip6_lock); 202 read_lock_bh(&l2tp_ip6_lock);
@@ -215,6 +218,12 @@ pass_up:
215 218
216 return sk_receive_skb(sk, skb, 1); 219 return sk_receive_skb(sk, skb, 1);
217 220
221discard_sess:
222 if (session->deref)
223 session->deref(session);
224 l2tp_session_dec_refcount(session);
225 goto discard;
226
218discard_put: 227discard_put:
219 sock_put(sk); 228 sock_put(sk);
220 229
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 3620fba31786..7e3e669baac4 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -48,7 +48,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq,
48/* Accessed under genl lock */ 48/* Accessed under genl lock */
49static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX]; 49static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX];
50 50
51static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info) 51static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info,
52 bool do_ref)
52{ 53{
53 u32 tunnel_id; 54 u32 tunnel_id;
54 u32 session_id; 55 u32 session_id;
@@ -59,14 +60,15 @@ static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
59 60
60 if (info->attrs[L2TP_ATTR_IFNAME]) { 61 if (info->attrs[L2TP_ATTR_IFNAME]) {
61 ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]); 62 ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
62 session = l2tp_session_find_by_ifname(net, ifname); 63 session = l2tp_session_get_by_ifname(net, ifname, do_ref);
63 } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) && 64 } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) &&
64 (info->attrs[L2TP_ATTR_CONN_ID])) { 65 (info->attrs[L2TP_ATTR_CONN_ID])) {
65 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); 66 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
66 session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); 67 session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
67 tunnel = l2tp_tunnel_find(net, tunnel_id); 68 tunnel = l2tp_tunnel_find(net, tunnel_id);
68 if (tunnel) 69 if (tunnel)
69 session = l2tp_session_find(net, tunnel, session_id); 70 session = l2tp_session_get(net, tunnel, session_id,
71 do_ref);
70 } 72 }
71 73
72 return session; 74 return session;
@@ -642,10 +644,12 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
642 session_id, peer_session_id, &cfg); 644 session_id, peer_session_id, &cfg);
643 645
644 if (ret >= 0) { 646 if (ret >= 0) {
645 session = l2tp_session_find(net, tunnel, session_id); 647 session = l2tp_session_get(net, tunnel, session_id, false);
646 if (session) 648 if (session) {
647 ret = l2tp_session_notify(&l2tp_nl_family, info, session, 649 ret = l2tp_session_notify(&l2tp_nl_family, info, session,
648 L2TP_CMD_SESSION_CREATE); 650 L2TP_CMD_SESSION_CREATE);
651 l2tp_session_dec_refcount(session);
652 }
649 } 653 }
650 654
651out: 655out:
@@ -658,7 +662,7 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
658 struct l2tp_session *session; 662 struct l2tp_session *session;
659 u16 pw_type; 663 u16 pw_type;
660 664
661 session = l2tp_nl_session_find(info); 665 session = l2tp_nl_session_get(info, true);
662 if (session == NULL) { 666 if (session == NULL) {
663 ret = -ENODEV; 667 ret = -ENODEV;
664 goto out; 668 goto out;
@@ -672,6 +676,10 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
672 if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete) 676 if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete)
673 ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session); 677 ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session);
674 678
679 if (session->deref)
680 session->deref(session);
681 l2tp_session_dec_refcount(session);
682
675out: 683out:
676 return ret; 684 return ret;
677} 685}
@@ -681,7 +689,7 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
681 int ret = 0; 689 int ret = 0;
682 struct l2tp_session *session; 690 struct l2tp_session *session;
683 691
684 session = l2tp_nl_session_find(info); 692 session = l2tp_nl_session_get(info, false);
685 if (session == NULL) { 693 if (session == NULL) {
686 ret = -ENODEV; 694 ret = -ENODEV;
687 goto out; 695 goto out;
@@ -716,6 +724,8 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
716 ret = l2tp_session_notify(&l2tp_nl_family, info, 724 ret = l2tp_session_notify(&l2tp_nl_family, info,
717 session, L2TP_CMD_SESSION_MODIFY); 725 session, L2TP_CMD_SESSION_MODIFY);
718 726
727 l2tp_session_dec_refcount(session);
728
719out: 729out:
720 return ret; 730 return ret;
721} 731}
@@ -811,29 +821,34 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
811 struct sk_buff *msg; 821 struct sk_buff *msg;
812 int ret; 822 int ret;
813 823
814 session = l2tp_nl_session_find(info); 824 session = l2tp_nl_session_get(info, false);
815 if (session == NULL) { 825 if (session == NULL) {
816 ret = -ENODEV; 826 ret = -ENODEV;
817 goto out; 827 goto err;
818 } 828 }
819 829
820 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 830 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
821 if (!msg) { 831 if (!msg) {
822 ret = -ENOMEM; 832 ret = -ENOMEM;
823 goto out; 833 goto err_ref;
824 } 834 }
825 835
826 ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq, 836 ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
827 0, session, L2TP_CMD_SESSION_GET); 837 0, session, L2TP_CMD_SESSION_GET);
828 if (ret < 0) 838 if (ret < 0)
829 goto err_out; 839 goto err_ref_msg;
830 840
831 return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid); 841 ret = genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
832 842
833err_out: 843 l2tp_session_dec_refcount(session);
834 nlmsg_free(msg);
835 844
836out: 845 return ret;
846
847err_ref_msg:
848 nlmsg_free(msg);
849err_ref:
850 l2tp_session_dec_refcount(session);
851err:
837 return ret; 852 return ret;
838} 853}
839 854
@@ -852,7 +867,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
852 goto out; 867 goto out;
853 } 868 }
854 869
855 session = l2tp_session_find_nth(tunnel, si); 870 session = l2tp_session_get_nth(tunnel, si, false);
856 if (session == NULL) { 871 if (session == NULL) {
857 ti++; 872 ti++;
858 tunnel = NULL; 873 tunnel = NULL;
@@ -862,8 +877,11 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
862 877
863 if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid, 878 if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
864 cb->nlh->nlmsg_seq, NLM_F_MULTI, 879 cb->nlh->nlmsg_seq, NLM_F_MULTI,
865 session, L2TP_CMD_SESSION_GET) < 0) 880 session, L2TP_CMD_SESSION_GET) < 0) {
881 l2tp_session_dec_refcount(session);
866 break; 882 break;
883 }
884 l2tp_session_dec_refcount(session);
867 885
868 si++; 886 si++;
869 } 887 }
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 36cc56fd0418..861b255a2d51 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -450,6 +450,10 @@ static void pppol2tp_session_close(struct l2tp_session *session)
450static void pppol2tp_session_destruct(struct sock *sk) 450static void pppol2tp_session_destruct(struct sock *sk)
451{ 451{
452 struct l2tp_session *session = sk->sk_user_data; 452 struct l2tp_session *session = sk->sk_user_data;
453
454 skb_queue_purge(&sk->sk_receive_queue);
455 skb_queue_purge(&sk->sk_write_queue);
456
453 if (session) { 457 if (session) {
454 sk->sk_user_data = NULL; 458 sk->sk_user_data = NULL;
455 BUG_ON(session->magic != L2TP_SESSION_MAGIC); 459 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
@@ -488,9 +492,6 @@ static int pppol2tp_release(struct socket *sock)
488 l2tp_session_queue_purge(session); 492 l2tp_session_queue_purge(session);
489 sock_put(sk); 493 sock_put(sk);
490 } 494 }
491 skb_queue_purge(&sk->sk_receive_queue);
492 skb_queue_purge(&sk->sk_write_queue);
493
494 release_sock(sk); 495 release_sock(sk);
495 496
496 /* This will delete the session context via 497 /* This will delete the session context via
@@ -582,6 +583,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
582 int error = 0; 583 int error = 0;
583 u32 tunnel_id, peer_tunnel_id; 584 u32 tunnel_id, peer_tunnel_id;
584 u32 session_id, peer_session_id; 585 u32 session_id, peer_session_id;
586 bool drop_refcnt = false;
585 int ver = 2; 587 int ver = 2;
586 int fd; 588 int fd;
587 589
@@ -683,36 +685,36 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
683 if (tunnel->peer_tunnel_id == 0) 685 if (tunnel->peer_tunnel_id == 0)
684 tunnel->peer_tunnel_id = peer_tunnel_id; 686 tunnel->peer_tunnel_id = peer_tunnel_id;
685 687
686 /* Create session if it doesn't already exist. We handle the 688 session = l2tp_session_get(sock_net(sk), tunnel, session_id, false);
687 * case where a session was previously created by the netlink 689 if (session) {
688 * interface by checking that the session doesn't already have 690 drop_refcnt = true;
689 * a socket and its tunnel socket are what we expect. If any 691 ps = l2tp_session_priv(session);
690 * of those checks fail, return EEXIST to the caller. 692
691 */ 693 /* Using a pre-existing session is fine as long as it hasn't
692 session = l2tp_session_find(sock_net(sk), tunnel, session_id); 694 * been connected yet.
693 if (session == NULL) {
694 /* Default MTU must allow space for UDP/L2TP/PPP
695 * headers.
696 */ 695 */
697 cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD; 696 if (ps->sock) {
697 error = -EEXIST;
698 goto end;
699 }
698 700
699 /* Allocate and initialize a new session context. */ 701 /* consistency checks */
700 session = l2tp_session_create(sizeof(struct pppol2tp_session), 702 if (ps->tunnel_sock != tunnel->sock) {
701 tunnel, session_id, 703 error = -EEXIST;
702 peer_session_id, &cfg);
703 if (session == NULL) {
704 error = -ENOMEM;
705 goto end; 704 goto end;
706 } 705 }
707 } else { 706 } else {
708 ps = l2tp_session_priv(session); 707 /* Default MTU must allow space for UDP/L2TP/PPP headers */
709 error = -EEXIST; 708 cfg.mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
710 if (ps->sock != NULL) 709 cfg.mru = cfg.mtu;
711 goto end;
712 710
713 /* consistency checks */ 711 session = l2tp_session_create(sizeof(struct pppol2tp_session),
714 if (ps->tunnel_sock != tunnel->sock) 712 tunnel, session_id,
713 peer_session_id, &cfg);
714 if (IS_ERR(session)) {
715 error = PTR_ERR(session);
715 goto end; 716 goto end;
717 }
716 } 718 }
717 719
718 /* Associate session with its PPPoL2TP socket */ 720 /* Associate session with its PPPoL2TP socket */
@@ -777,6 +779,8 @@ out_no_ppp:
777 session->name); 779 session->name);
778 780
779end: 781end:
782 if (drop_refcnt)
783 l2tp_session_dec_refcount(session);
780 release_sock(sk); 784 release_sock(sk);
781 785
782 return error; 786 return error;
@@ -804,12 +808,6 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
804 if (tunnel->sock == NULL) 808 if (tunnel->sock == NULL)
805 goto out; 809 goto out;
806 810
807 /* Check that this session doesn't already exist */
808 error = -EEXIST;
809 session = l2tp_session_find(net, tunnel, session_id);
810 if (session != NULL)
811 goto out;
812
813 /* Default MTU values. */ 811 /* Default MTU values. */
814 if (cfg->mtu == 0) 812 if (cfg->mtu == 0)
815 cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD; 813 cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
@@ -817,12 +815,13 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
817 cfg->mru = cfg->mtu; 815 cfg->mru = cfg->mtu;
818 816
819 /* Allocate and initialize a new session context. */ 817 /* Allocate and initialize a new session context. */
820 error = -ENOMEM;
821 session = l2tp_session_create(sizeof(struct pppol2tp_session), 818 session = l2tp_session_create(sizeof(struct pppol2tp_session),
822 tunnel, session_id, 819 tunnel, session_id,
823 peer_session_id, cfg); 820 peer_session_id, cfg);
824 if (session == NULL) 821 if (IS_ERR(session)) {
822 error = PTR_ERR(session);
825 goto out; 823 goto out;
824 }
826 825
827 ps = l2tp_session_priv(session); 826 ps = l2tp_session_priv(session);
828 ps->tunnel_sock = tunnel->sock; 827 ps->tunnel_sock = tunnel->sock;
@@ -1140,11 +1139,18 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
1140 if (stats.session_id != 0) { 1139 if (stats.session_id != 0) {
1141 /* resend to session ioctl handler */ 1140 /* resend to session ioctl handler */
1142 struct l2tp_session *session = 1141 struct l2tp_session *session =
1143 l2tp_session_find(sock_net(sk), tunnel, stats.session_id); 1142 l2tp_session_get(sock_net(sk), tunnel,
1144 if (session != NULL) 1143 stats.session_id, true);
1145 err = pppol2tp_session_ioctl(session, cmd, arg); 1144
1146 else 1145 if (session) {
1146 err = pppol2tp_session_ioctl(session, cmd,
1147 arg);
1148 if (session->deref)
1149 session->deref(session);
1150 l2tp_session_dec_refcount(session);
1151 } else {
1147 err = -EBADR; 1152 err = -EBADR;
1153 }
1148 break; 1154 break;
1149 } 1155 }
1150#ifdef CONFIG_XFRM 1156#ifdef CONFIG_XFRM
@@ -1554,7 +1560,7 @@ static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd)
1554 1560
1555static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd) 1561static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
1556{ 1562{
1557 pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); 1563 pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
1558 pd->session_idx++; 1564 pd->session_idx++;
1559 1565
1560 if (pd->session == NULL) { 1566 if (pd->session == NULL) {
@@ -1681,10 +1687,14 @@ static int pppol2tp_seq_show(struct seq_file *m, void *v)
1681 1687
1682 /* Show the tunnel or session context. 1688 /* Show the tunnel or session context.
1683 */ 1689 */
1684 if (pd->session == NULL) 1690 if (!pd->session) {
1685 pppol2tp_seq_tunnel_show(m, pd->tunnel); 1691 pppol2tp_seq_tunnel_show(m, pd->tunnel);
1686 else 1692 } else {
1687 pppol2tp_seq_session_show(m, pd->session); 1693 pppol2tp_seq_session_show(m, pd->session);
1694 if (pd->session->deref)
1695 pd->session->deref(pd->session);
1696 l2tp_session_dec_refcount(pd->session);
1697 }
1688 1698
1689out: 1699out:
1690 return 0; 1700 return 0;
@@ -1843,4 +1853,4 @@ MODULE_DESCRIPTION("PPP over L2TP over UDP");
1843MODULE_LICENSE("GPL"); 1853MODULE_LICENSE("GPL");
1844MODULE_VERSION(PPPOL2TP_DRV_VERSION); 1854MODULE_VERSION(PPPOL2TP_DRV_VERSION);
1845MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OL2TP); 1855MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OL2TP);
1846MODULE_ALIAS_L2TP_PWTYPE(11); 1856MODULE_ALIAS_L2TP_PWTYPE(7);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 40813dd3301c..5bb0c5012819 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -718,7 +718,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
718 ieee80211_recalc_ps(local); 718 ieee80211_recalc_ps(local);
719 719
720 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 720 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
721 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { 721 sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
722 local->ops->wake_tx_queue) {
722 /* XXX: for AP_VLAN, actually track AP queues */ 723 /* XXX: for AP_VLAN, actually track AP queues */
723 netif_tx_start_all_queues(dev); 724 netif_tx_start_all_queues(dev);
724 } else if (dev) { 725 } else if (dev) {
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index da9df2d56e66..22fc32143e9c 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -290,6 +290,7 @@ void nf_conntrack_unregister_notifier(struct net *net,
290 BUG_ON(notify != new); 290 BUG_ON(notify != new);
291 RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL); 291 RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
292 mutex_unlock(&nf_ct_ecache_mutex); 292 mutex_unlock(&nf_ct_ecache_mutex);
293 /* synchronize_rcu() is called from ctnetlink_exit. */
293} 294}
294EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); 295EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
295 296
@@ -326,6 +327,7 @@ void nf_ct_expect_unregister_notifier(struct net *net,
326 BUG_ON(notify != new); 327 BUG_ON(notify != new);
327 RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL); 328 RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
328 mutex_unlock(&nf_ct_ecache_mutex); 329 mutex_unlock(&nf_ct_ecache_mutex);
330 /* synchronize_rcu() is called from ctnetlink_exit. */
329} 331}
330EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier); 332EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
331 333
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 02bcf00c2492..008299b7f78f 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -53,7 +53,11 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id,
53 53
54 rcu_read_lock(); 54 rcu_read_lock();
55 t = rcu_dereference(nf_ct_ext_types[id]); 55 t = rcu_dereference(nf_ct_ext_types[id]);
56 BUG_ON(t == NULL); 56 if (!t) {
57 rcu_read_unlock();
58 return NULL;
59 }
60
57 off = ALIGN(sizeof(struct nf_ct_ext), t->align); 61 off = ALIGN(sizeof(struct nf_ct_ext), t->align);
58 len = off + t->len + var_alloc_len; 62 len = off + t->len + var_alloc_len;
59 alloc_size = t->alloc_size + var_alloc_len; 63 alloc_size = t->alloc_size + var_alloc_len;
@@ -88,7 +92,10 @@ void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id,
88 92
89 rcu_read_lock(); 93 rcu_read_lock();
90 t = rcu_dereference(nf_ct_ext_types[id]); 94 t = rcu_dereference(nf_ct_ext_types[id]);
91 BUG_ON(t == NULL); 95 if (!t) {
96 rcu_read_unlock();
97 return NULL;
98 }
92 99
93 newoff = ALIGN(old->len, t->align); 100 newoff = ALIGN(old->len, t->align);
94 newlen = newoff + t->len + var_alloc_len; 101 newlen = newoff + t->len + var_alloc_len;
@@ -175,6 +182,6 @@ void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
175 RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL); 182 RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL);
176 update_alloc_size(type); 183 update_alloc_size(type);
177 mutex_unlock(&nf_ct_ext_type_mutex); 184 mutex_unlock(&nf_ct_ext_type_mutex);
178 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 185 synchronize_rcu();
179} 186}
180EXPORT_SYMBOL_GPL(nf_ct_extend_unregister); 187EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index d49cc1e03c5b..ecdc324c7785 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -3442,6 +3442,7 @@ static void __exit ctnetlink_exit(void)
3442#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT 3442#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
3443 RCU_INIT_POINTER(nfnl_ct_hook, NULL); 3443 RCU_INIT_POINTER(nfnl_ct_hook, NULL);
3444#endif 3444#endif
3445 synchronize_rcu();
3445} 3446}
3446 3447
3447module_init(ctnetlink_init); 3448module_init(ctnetlink_init);
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 94b14c5a8b17..82802e4a6640 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -903,6 +903,8 @@ static void __exit nf_nat_cleanup(void)
903#ifdef CONFIG_XFRM 903#ifdef CONFIG_XFRM
904 RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL); 904 RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
905#endif 905#endif
906 synchronize_rcu();
907
906 for (i = 0; i < NFPROTO_NUMPROTO; i++) 908 for (i = 0; i < NFPROTO_NUMPROTO; i++)
907 kfree(nf_nat_l4protos[i]); 909 kfree(nf_nat_l4protos[i]);
908 910
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index de8782345c86..d45558178da5 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -32,6 +32,13 @@ MODULE_LICENSE("GPL");
32MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); 32MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
33MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers"); 33MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers");
34 34
35struct nfnl_cthelper {
36 struct list_head list;
37 struct nf_conntrack_helper helper;
38};
39
40static LIST_HEAD(nfnl_cthelper_list);
41
35static int 42static int
36nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff, 43nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff,
37 struct nf_conn *ct, enum ip_conntrack_info ctinfo) 44 struct nf_conn *ct, enum ip_conntrack_info ctinfo)
@@ -161,6 +168,7 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
161 int i, ret; 168 int i, ret;
162 struct nf_conntrack_expect_policy *expect_policy; 169 struct nf_conntrack_expect_policy *expect_policy;
163 struct nlattr *tb[NFCTH_POLICY_SET_MAX+1]; 170 struct nlattr *tb[NFCTH_POLICY_SET_MAX+1];
171 unsigned int class_max;
164 172
165 ret = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr, 173 ret = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
166 nfnl_cthelper_expect_policy_set); 174 nfnl_cthelper_expect_policy_set);
@@ -170,19 +178,18 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
170 if (!tb[NFCTH_POLICY_SET_NUM]) 178 if (!tb[NFCTH_POLICY_SET_NUM])
171 return -EINVAL; 179 return -EINVAL;
172 180
173 helper->expect_class_max = 181 class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
174 ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM])); 182 if (class_max == 0)
175 183 return -EINVAL;
176 if (helper->expect_class_max != 0 && 184 if (class_max > NF_CT_MAX_EXPECT_CLASSES)
177 helper->expect_class_max > NF_CT_MAX_EXPECT_CLASSES)
178 return -EOVERFLOW; 185 return -EOVERFLOW;
179 186
180 expect_policy = kzalloc(sizeof(struct nf_conntrack_expect_policy) * 187 expect_policy = kzalloc(sizeof(struct nf_conntrack_expect_policy) *
181 helper->expect_class_max, GFP_KERNEL); 188 class_max, GFP_KERNEL);
182 if (expect_policy == NULL) 189 if (expect_policy == NULL)
183 return -ENOMEM; 190 return -ENOMEM;
184 191
185 for (i=0; i<helper->expect_class_max; i++) { 192 for (i = 0; i < class_max; i++) {
186 if (!tb[NFCTH_POLICY_SET+i]) 193 if (!tb[NFCTH_POLICY_SET+i])
187 goto err; 194 goto err;
188 195
@@ -191,6 +198,8 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
191 if (ret < 0) 198 if (ret < 0)
192 goto err; 199 goto err;
193 } 200 }
201
202 helper->expect_class_max = class_max - 1;
194 helper->expect_policy = expect_policy; 203 helper->expect_policy = expect_policy;
195 return 0; 204 return 0;
196err: 205err:
@@ -203,18 +212,20 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
203 struct nf_conntrack_tuple *tuple) 212 struct nf_conntrack_tuple *tuple)
204{ 213{
205 struct nf_conntrack_helper *helper; 214 struct nf_conntrack_helper *helper;
215 struct nfnl_cthelper *nfcth;
206 int ret; 216 int ret;
207 217
208 if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN]) 218 if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN])
209 return -EINVAL; 219 return -EINVAL;
210 220
211 helper = kzalloc(sizeof(struct nf_conntrack_helper), GFP_KERNEL); 221 nfcth = kzalloc(sizeof(*nfcth), GFP_KERNEL);
212 if (helper == NULL) 222 if (nfcth == NULL)
213 return -ENOMEM; 223 return -ENOMEM;
224 helper = &nfcth->helper;
214 225
215 ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]); 226 ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]);
216 if (ret < 0) 227 if (ret < 0)
217 goto err; 228 goto err1;
218 229
219 strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN); 230 strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN);
220 helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN])); 231 helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
@@ -245,15 +256,101 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
245 256
246 ret = nf_conntrack_helper_register(helper); 257 ret = nf_conntrack_helper_register(helper);
247 if (ret < 0) 258 if (ret < 0)
248 goto err; 259 goto err2;
249 260
261 list_add_tail(&nfcth->list, &nfnl_cthelper_list);
250 return 0; 262 return 0;
251err: 263err2:
252 kfree(helper); 264 kfree(helper->expect_policy);
265err1:
266 kfree(nfcth);
253 return ret; 267 return ret;
254} 268}
255 269
256static int 270static int
271nfnl_cthelper_update_policy_one(const struct nf_conntrack_expect_policy *policy,
272 struct nf_conntrack_expect_policy *new_policy,
273 const struct nlattr *attr)
274{
275 struct nlattr *tb[NFCTH_POLICY_MAX + 1];
276 int err;
277
278 err = nla_parse_nested(tb, NFCTH_POLICY_MAX, attr,
279 nfnl_cthelper_expect_pol);
280 if (err < 0)
281 return err;
282
283 if (!tb[NFCTH_POLICY_NAME] ||
284 !tb[NFCTH_POLICY_EXPECT_MAX] ||
285 !tb[NFCTH_POLICY_EXPECT_TIMEOUT])
286 return -EINVAL;
287
288 if (nla_strcmp(tb[NFCTH_POLICY_NAME], policy->name))
289 return -EBUSY;
290
291 new_policy->max_expected =
292 ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
293 new_policy->timeout =
294 ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT]));
295
296 return 0;
297}
298
299static int nfnl_cthelper_update_policy_all(struct nlattr *tb[],
300 struct nf_conntrack_helper *helper)
301{
302 struct nf_conntrack_expect_policy new_policy[helper->expect_class_max + 1];
303 struct nf_conntrack_expect_policy *policy;
304 int i, err;
305
306 /* Check first that all policy attributes are well-formed, so we don't
307 * leave things in inconsistent state on errors.
308 */
309 for (i = 0; i < helper->expect_class_max + 1; i++) {
310
311 if (!tb[NFCTH_POLICY_SET + i])
312 return -EINVAL;
313
314 err = nfnl_cthelper_update_policy_one(&helper->expect_policy[i],
315 &new_policy[i],
316 tb[NFCTH_POLICY_SET + i]);
317 if (err < 0)
318 return err;
319 }
320 /* Now we can safely update them. */
321 for (i = 0; i < helper->expect_class_max + 1; i++) {
322 policy = (struct nf_conntrack_expect_policy *)
323 &helper->expect_policy[i];
324 policy->max_expected = new_policy->max_expected;
325 policy->timeout = new_policy->timeout;
326 }
327
328 return 0;
329}
330
331static int nfnl_cthelper_update_policy(struct nf_conntrack_helper *helper,
332 const struct nlattr *attr)
333{
334 struct nlattr *tb[NFCTH_POLICY_SET_MAX + 1];
335 unsigned int class_max;
336 int err;
337
338 err = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
339 nfnl_cthelper_expect_policy_set);
340 if (err < 0)
341 return err;
342
343 if (!tb[NFCTH_POLICY_SET_NUM])
344 return -EINVAL;
345
346 class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
347 if (helper->expect_class_max + 1 != class_max)
348 return -EBUSY;
349
350 return nfnl_cthelper_update_policy_all(tb, helper);
351}
352
353static int
257nfnl_cthelper_update(const struct nlattr * const tb[], 354nfnl_cthelper_update(const struct nlattr * const tb[],
258 struct nf_conntrack_helper *helper) 355 struct nf_conntrack_helper *helper)
259{ 356{
@@ -263,8 +360,7 @@ nfnl_cthelper_update(const struct nlattr * const tb[],
263 return -EBUSY; 360 return -EBUSY;
264 361
265 if (tb[NFCTH_POLICY]) { 362 if (tb[NFCTH_POLICY]) {
266 ret = nfnl_cthelper_parse_expect_policy(helper, 363 ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
267 tb[NFCTH_POLICY]);
268 if (ret < 0) 364 if (ret < 0)
269 return ret; 365 return ret;
270 } 366 }
@@ -293,7 +389,8 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
293 const char *helper_name; 389 const char *helper_name;
294 struct nf_conntrack_helper *cur, *helper = NULL; 390 struct nf_conntrack_helper *cur, *helper = NULL;
295 struct nf_conntrack_tuple tuple; 391 struct nf_conntrack_tuple tuple;
296 int ret = 0, i; 392 struct nfnl_cthelper *nlcth;
393 int ret = 0;
297 394
298 if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE]) 395 if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
299 return -EINVAL; 396 return -EINVAL;
@@ -304,31 +401,22 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
304 if (ret < 0) 401 if (ret < 0)
305 return ret; 402 return ret;
306 403
307 rcu_read_lock(); 404 list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
308 for (i = 0; i < nf_ct_helper_hsize && !helper; i++) { 405 cur = &nlcth->helper;
309 hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
310 406
311 /* skip non-userspace conntrack helpers. */ 407 if (strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
312 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) 408 continue;
313 continue;
314 409
315 if (strncmp(cur->name, helper_name, 410 if ((tuple.src.l3num != cur->tuple.src.l3num ||
316 NF_CT_HELPER_NAME_LEN) != 0) 411 tuple.dst.protonum != cur->tuple.dst.protonum))
317 continue; 412 continue;
318 413
319 if ((tuple.src.l3num != cur->tuple.src.l3num || 414 if (nlh->nlmsg_flags & NLM_F_EXCL)
320 tuple.dst.protonum != cur->tuple.dst.protonum)) 415 return -EEXIST;
321 continue;
322 416
323 if (nlh->nlmsg_flags & NLM_F_EXCL) { 417 helper = cur;
324 ret = -EEXIST; 418 break;
325 goto err;
326 }
327 helper = cur;
328 break;
329 }
330 } 419 }
331 rcu_read_unlock();
332 420
333 if (helper == NULL) 421 if (helper == NULL)
334 ret = nfnl_cthelper_create(tb, &tuple); 422 ret = nfnl_cthelper_create(tb, &tuple);
@@ -336,9 +424,6 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
336 ret = nfnl_cthelper_update(tb, helper); 424 ret = nfnl_cthelper_update(tb, helper);
337 425
338 return ret; 426 return ret;
339err:
340 rcu_read_unlock();
341 return ret;
342} 427}
343 428
344static int 429static int
@@ -377,10 +462,10 @@ nfnl_cthelper_dump_policy(struct sk_buff *skb,
377 goto nla_put_failure; 462 goto nla_put_failure;
378 463
379 if (nla_put_be32(skb, NFCTH_POLICY_SET_NUM, 464 if (nla_put_be32(skb, NFCTH_POLICY_SET_NUM,
380 htonl(helper->expect_class_max))) 465 htonl(helper->expect_class_max + 1)))
381 goto nla_put_failure; 466 goto nla_put_failure;
382 467
383 for (i=0; i<helper->expect_class_max; i++) { 468 for (i = 0; i < helper->expect_class_max + 1; i++) {
384 nest_parms2 = nla_nest_start(skb, 469 nest_parms2 = nla_nest_start(skb,
385 (NFCTH_POLICY_SET+i) | NLA_F_NESTED); 470 (NFCTH_POLICY_SET+i) | NLA_F_NESTED);
386 if (nest_parms2 == NULL) 471 if (nest_parms2 == NULL)
@@ -502,11 +587,12 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
502 struct sk_buff *skb, const struct nlmsghdr *nlh, 587 struct sk_buff *skb, const struct nlmsghdr *nlh,
503 const struct nlattr * const tb[]) 588 const struct nlattr * const tb[])
504{ 589{
505 int ret = -ENOENT, i; 590 int ret = -ENOENT;
506 struct nf_conntrack_helper *cur; 591 struct nf_conntrack_helper *cur;
507 struct sk_buff *skb2; 592 struct sk_buff *skb2;
508 char *helper_name = NULL; 593 char *helper_name = NULL;
509 struct nf_conntrack_tuple tuple; 594 struct nf_conntrack_tuple tuple;
595 struct nfnl_cthelper *nlcth;
510 bool tuple_set = false; 596 bool tuple_set = false;
511 597
512 if (nlh->nlmsg_flags & NLM_F_DUMP) { 598 if (nlh->nlmsg_flags & NLM_F_DUMP) {
@@ -527,45 +613,39 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
527 tuple_set = true; 613 tuple_set = true;
528 } 614 }
529 615
530 for (i = 0; i < nf_ct_helper_hsize; i++) { 616 list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
531 hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) { 617 cur = &nlcth->helper;
618 if (helper_name &&
619 strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
620 continue;
532 621
533 /* skip non-userspace conntrack helpers. */ 622 if (tuple_set &&
534 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) 623 (tuple.src.l3num != cur->tuple.src.l3num ||
535 continue; 624 tuple.dst.protonum != cur->tuple.dst.protonum))
625 continue;
536 626
537 if (helper_name && strncmp(cur->name, helper_name, 627 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
538 NF_CT_HELPER_NAME_LEN) != 0) { 628 if (skb2 == NULL) {
539 continue; 629 ret = -ENOMEM;
540 } 630 break;
541 if (tuple_set && 631 }
542 (tuple.src.l3num != cur->tuple.src.l3num ||
543 tuple.dst.protonum != cur->tuple.dst.protonum))
544 continue;
545
546 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
547 if (skb2 == NULL) {
548 ret = -ENOMEM;
549 break;
550 }
551 632
552 ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid, 633 ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
553 nlh->nlmsg_seq, 634 nlh->nlmsg_seq,
554 NFNL_MSG_TYPE(nlh->nlmsg_type), 635 NFNL_MSG_TYPE(nlh->nlmsg_type),
555 NFNL_MSG_CTHELPER_NEW, cur); 636 NFNL_MSG_CTHELPER_NEW, cur);
556 if (ret <= 0) { 637 if (ret <= 0) {
557 kfree_skb(skb2); 638 kfree_skb(skb2);
558 break; 639 break;
559 } 640 }
560 641
561 ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid, 642 ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
562 MSG_DONTWAIT); 643 MSG_DONTWAIT);
563 if (ret > 0) 644 if (ret > 0)
564 ret = 0; 645 ret = 0;
565 646
566 /* this avoids a loop in nfnetlink. */ 647 /* this avoids a loop in nfnetlink. */
567 return ret == -EAGAIN ? -ENOBUFS : ret; 648 return ret == -EAGAIN ? -ENOBUFS : ret;
568 }
569 } 649 }
570 return ret; 650 return ret;
571} 651}
@@ -576,10 +656,10 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
576{ 656{
577 char *helper_name = NULL; 657 char *helper_name = NULL;
578 struct nf_conntrack_helper *cur; 658 struct nf_conntrack_helper *cur;
579 struct hlist_node *tmp;
580 struct nf_conntrack_tuple tuple; 659 struct nf_conntrack_tuple tuple;
581 bool tuple_set = false, found = false; 660 bool tuple_set = false, found = false;
582 int i, j = 0, ret; 661 struct nfnl_cthelper *nlcth, *n;
662 int j = 0, ret;
583 663
584 if (tb[NFCTH_NAME]) 664 if (tb[NFCTH_NAME])
585 helper_name = nla_data(tb[NFCTH_NAME]); 665 helper_name = nla_data(tb[NFCTH_NAME]);
@@ -592,28 +672,27 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
592 tuple_set = true; 672 tuple_set = true;
593 } 673 }
594 674
595 for (i = 0; i < nf_ct_helper_hsize; i++) { 675 list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
596 hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i], 676 cur = &nlcth->helper;
597 hnode) { 677 j++;
598 /* skip non-userspace conntrack helpers. */
599 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
600 continue;
601 678
602 j++; 679 if (helper_name &&
680 strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
681 continue;
603 682
604 if (helper_name && strncmp(cur->name, helper_name, 683 if (tuple_set &&
605 NF_CT_HELPER_NAME_LEN) != 0) { 684 (tuple.src.l3num != cur->tuple.src.l3num ||
606 continue; 685 tuple.dst.protonum != cur->tuple.dst.protonum))
607 } 686 continue;
608 if (tuple_set &&
609 (tuple.src.l3num != cur->tuple.src.l3num ||
610 tuple.dst.protonum != cur->tuple.dst.protonum))
611 continue;
612 687
613 found = true; 688 found = true;
614 nf_conntrack_helper_unregister(cur); 689 nf_conntrack_helper_unregister(cur);
615 } 690 kfree(cur->expect_policy);
691
692 list_del(&nlcth->list);
693 kfree(nlcth);
616 } 694 }
695
617 /* Make sure we return success if we flush and there is no helpers */ 696 /* Make sure we return success if we flush and there is no helpers */
618 return (found || j == 0) ? 0 : -ENOENT; 697 return (found || j == 0) ? 0 : -ENOENT;
619} 698}
@@ -662,20 +741,16 @@ err_out:
662static void __exit nfnl_cthelper_exit(void) 741static void __exit nfnl_cthelper_exit(void)
663{ 742{
664 struct nf_conntrack_helper *cur; 743 struct nf_conntrack_helper *cur;
665 struct hlist_node *tmp; 744 struct nfnl_cthelper *nlcth, *n;
666 int i;
667 745
668 nfnetlink_subsys_unregister(&nfnl_cthelper_subsys); 746 nfnetlink_subsys_unregister(&nfnl_cthelper_subsys);
669 747
670 for (i=0; i<nf_ct_helper_hsize; i++) { 748 list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
671 hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i], 749 cur = &nlcth->helper;
672 hnode) {
673 /* skip non-userspace conntrack helpers. */
674 if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
675 continue;
676 750
677 nf_conntrack_helper_unregister(cur); 751 nf_conntrack_helper_unregister(cur);
678 } 752 kfree(cur->expect_policy);
753 kfree(nlcth);
679 } 754 }
680} 755}
681 756
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index baa75f3ab7e7..57c2cdf7b691 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -646,8 +646,8 @@ static void __exit cttimeout_exit(void)
646#ifdef CONFIG_NF_CONNTRACK_TIMEOUT 646#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
647 RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL); 647 RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL);
648 RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL); 648 RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL);
649 synchronize_rcu();
649#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ 650#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
650 rcu_barrier();
651} 651}
652 652
653module_init(cttimeout_init); 653module_init(cttimeout_init);
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 3ee0b8a000a4..933509ebf3d3 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -443,7 +443,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
443 skb = alloc_skb(size, GFP_ATOMIC); 443 skb = alloc_skb(size, GFP_ATOMIC);
444 if (!skb) { 444 if (!skb) {
445 skb_tx_error(entskb); 445 skb_tx_error(entskb);
446 return NULL; 446 goto nlmsg_failure;
447 } 447 }
448 448
449 nlh = nlmsg_put(skb, 0, 0, 449 nlh = nlmsg_put(skb, 0, 0,
@@ -452,7 +452,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
452 if (!nlh) { 452 if (!nlh) {
453 skb_tx_error(entskb); 453 skb_tx_error(entskb);
454 kfree_skb(skb); 454 kfree_skb(skb);
455 return NULL; 455 goto nlmsg_failure;
456 } 456 }
457 nfmsg = nlmsg_data(nlh); 457 nfmsg = nlmsg_data(nlh);
458 nfmsg->nfgen_family = entry->state.pf; 458 nfmsg->nfgen_family = entry->state.pf;
@@ -598,12 +598,17 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
598 } 598 }
599 599
600 nlh->nlmsg_len = skb->len; 600 nlh->nlmsg_len = skb->len;
601 if (seclen)
602 security_release_secctx(secdata, seclen);
601 return skb; 603 return skb;
602 604
603nla_put_failure: 605nla_put_failure:
604 skb_tx_error(entskb); 606 skb_tx_error(entskb);
605 kfree_skb(skb); 607 kfree_skb(skb);
606 net_err_ratelimited("nf_queue: error creating packet message\n"); 608 net_err_ratelimited("nf_queue: error creating packet message\n");
609nlmsg_failure:
610 if (seclen)
611 security_release_secctx(secdata, seclen);
607 return NULL; 612 return NULL;
608} 613}
609 614
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index e0a87776a010..7b2c2fce408a 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -643,8 +643,8 @@ static bool skb_nfct_cached(struct net *net,
643 */ 643 */
644 if (nf_ct_is_confirmed(ct)) 644 if (nf_ct_is_confirmed(ct))
645 nf_ct_delete(ct, 0, 0); 645 nf_ct_delete(ct, 0, 0);
646 else 646
647 nf_conntrack_put(&ct->ct_general); 647 nf_conntrack_put(&ct->ct_general);
648 nf_ct_set(skb, NULL, 0); 648 nf_ct_set(skb, NULL, 0);
649 return false; 649 return false;
650 } 650 }
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 9d4bb8eb63f2..3f76cb765e5b 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -527,7 +527,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
527 527
528 /* Link layer. */ 528 /* Link layer. */
529 clear_vlan(key); 529 clear_vlan(key);
530 if (key->mac_proto == MAC_PROTO_NONE) { 530 if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
531 if (unlikely(eth_type_vlan(skb->protocol))) 531 if (unlikely(eth_type_vlan(skb->protocol)))
532 return -EINVAL; 532 return -EINVAL;
533 533
@@ -745,7 +745,13 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
745 745
746int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key) 746int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
747{ 747{
748 return key_extract(skb, key); 748 int res;
749
750 res = key_extract(skb, key);
751 if (!res)
752 key->mac_proto &= ~SW_FLOW_KEY_INVALID;
753
754 return res;
749} 755}
750 756
751static int key_extract_mac_proto(struct sk_buff *skb) 757static int key_extract_mac_proto(struct sk_buff *skb)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index a0dbe7ca8f72..8489beff5c25 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3665,6 +3665,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3665 return -EBUSY; 3665 return -EBUSY;
3666 if (copy_from_user(&val, optval, sizeof(val))) 3666 if (copy_from_user(&val, optval, sizeof(val)))
3667 return -EFAULT; 3667 return -EFAULT;
3668 if (val > INT_MAX)
3669 return -EINVAL;
3668 po->tp_reserve = val; 3670 po->tp_reserve = val;
3669 return 0; 3671 return 0;
3670 } 3672 }
@@ -4193,8 +4195,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4193 if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) 4195 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4194 goto out; 4196 goto out;
4195 if (po->tp_version >= TPACKET_V3 && 4197 if (po->tp_version >= TPACKET_V3 &&
4196 (int)(req->tp_block_size - 4198 req->tp_block_size <=
4197 BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0) 4199 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
4198 goto out; 4200 goto out;
4199 if (unlikely(req->tp_frame_size < po->tp_hdrlen + 4201 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
4200 po->tp_reserve)) 4202 po->tp_reserve))
@@ -4205,6 +4207,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4205 rb->frames_per_block = req->tp_block_size / req->tp_frame_size; 4207 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4206 if (unlikely(rb->frames_per_block == 0)) 4208 if (unlikely(rb->frames_per_block == 0))
4207 goto out; 4209 goto out;
4210 if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
4211 goto out;
4208 if (unlikely((rb->frames_per_block * req->tp_block_nr) != 4212 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4209 req->tp_frame_nr)) 4213 req->tp_frame_nr))
4210 goto out; 4214 goto out;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 0439a1a68367..a9708da28eb5 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -246,6 +246,9 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
246 if (!sctp_ulpq_init(&asoc->ulpq, asoc)) 246 if (!sctp_ulpq_init(&asoc->ulpq, asoc))
247 goto fail_init; 247 goto fail_init;
248 248
249 if (sctp_stream_new(asoc, gfp))
250 goto fail_init;
251
249 /* Assume that peer would support both address types unless we are 252 /* Assume that peer would support both address types unless we are
250 * told otherwise. 253 * told otherwise.
251 */ 254 */
@@ -264,7 +267,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
264 /* AUTH related initializations */ 267 /* AUTH related initializations */
265 INIT_LIST_HEAD(&asoc->endpoint_shared_keys); 268 INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
266 if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp)) 269 if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp))
267 goto fail_init; 270 goto stream_free;
268 271
269 asoc->active_key_id = ep->active_key_id; 272 asoc->active_key_id = ep->active_key_id;
270 asoc->prsctp_enable = ep->prsctp_enable; 273 asoc->prsctp_enable = ep->prsctp_enable;
@@ -287,6 +290,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
287 290
288 return asoc; 291 return asoc;
289 292
293stream_free:
294 sctp_stream_free(asoc->stream);
290fail_init: 295fail_init:
291 sock_put(asoc->base.sk); 296 sock_put(asoc->base.sk);
292 sctp_endpoint_put(asoc->ep); 297 sctp_endpoint_put(asoc->ep);
@@ -1407,7 +1412,7 @@ sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
1407/* Update the association's pmtu and frag_point by going through all the 1412/* Update the association's pmtu and frag_point by going through all the
1408 * transports. This routine is called when a transport's PMTU has changed. 1413 * transports. This routine is called when a transport's PMTU has changed.
1409 */ 1414 */
1410void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc) 1415void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
1411{ 1416{
1412 struct sctp_transport *t; 1417 struct sctp_transport *t;
1413 __u32 pmtu = 0; 1418 __u32 pmtu = 0;
@@ -1419,8 +1424,8 @@ void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
1419 list_for_each_entry(t, &asoc->peer.transport_addr_list, 1424 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1420 transports) { 1425 transports) {
1421 if (t->pmtu_pending && t->dst) { 1426 if (t->pmtu_pending && t->dst) {
1422 sctp_transport_update_pmtu(sk, t, 1427 sctp_transport_update_pmtu(
1423 SCTP_TRUNC4(dst_mtu(t->dst))); 1428 t, SCTP_TRUNC4(dst_mtu(t->dst)));
1424 t->pmtu_pending = 0; 1429 t->pmtu_pending = 0;
1425 } 1430 }
1426 if (!pmtu || (t->pathmtu < pmtu)) 1431 if (!pmtu || (t->pathmtu < pmtu))
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 2a28ab20487f..0e06a278d2a9 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -401,10 +401,10 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
401 401
402 if (t->param_flags & SPP_PMTUD_ENABLE) { 402 if (t->param_flags & SPP_PMTUD_ENABLE) {
403 /* Update transports view of the MTU */ 403 /* Update transports view of the MTU */
404 sctp_transport_update_pmtu(sk, t, pmtu); 404 sctp_transport_update_pmtu(t, pmtu);
405 405
406 /* Update association pmtu. */ 406 /* Update association pmtu. */
407 sctp_assoc_sync_pmtu(sk, asoc); 407 sctp_assoc_sync_pmtu(asoc);
408 } 408 }
409 409
410 /* Retransmit with the new pmtu setting. 410 /* Retransmit with the new pmtu setting.
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 1224421036b3..1409a875ad8e 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -86,43 +86,53 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
86{ 86{
87 struct sctp_transport *tp = packet->transport; 87 struct sctp_transport *tp = packet->transport;
88 struct sctp_association *asoc = tp->asoc; 88 struct sctp_association *asoc = tp->asoc;
89 struct sock *sk;
89 90
90 pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag); 91 pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
91
92 packet->vtag = vtag; 92 packet->vtag = vtag;
93 93
94 if (asoc && tp->dst) { 94 /* do the following jobs only once for a flush schedule */
95 struct sock *sk = asoc->base.sk; 95 if (!sctp_packet_empty(packet))
96 96 return;
97 rcu_read_lock();
98 if (__sk_dst_get(sk) != tp->dst) {
99 dst_hold(tp->dst);
100 sk_setup_caps(sk, tp->dst);
101 }
102
103 if (sk_can_gso(sk)) {
104 struct net_device *dev = tp->dst->dev;
105 97
106 packet->max_size = dev->gso_max_size; 98 /* set packet max_size with pathmtu */
107 } else { 99 packet->max_size = tp->pathmtu;
108 packet->max_size = asoc->pathmtu; 100 if (!asoc)
109 } 101 return;
110 rcu_read_unlock();
111 102
112 } else { 103 /* update dst or transport pathmtu if in need */
113 packet->max_size = tp->pathmtu; 104 sk = asoc->base.sk;
105 if (!sctp_transport_dst_check(tp)) {
106 sctp_transport_route(tp, NULL, sctp_sk(sk));
107 if (asoc->param_flags & SPP_PMTUD_ENABLE)
108 sctp_assoc_sync_pmtu(asoc);
109 } else if (!sctp_transport_pmtu_check(tp)) {
110 if (asoc->param_flags & SPP_PMTUD_ENABLE)
111 sctp_assoc_sync_pmtu(asoc);
114 } 112 }
115 113
116 if (ecn_capable && sctp_packet_empty(packet)) { 114 /* If there a is a prepend chunk stick it on the list before
117 struct sctp_chunk *chunk; 115 * any other chunks get appended.
116 */
117 if (ecn_capable) {
118 struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc);
118 119
119 /* If there a is a prepend chunk stick it on the list before
120 * any other chunks get appended.
121 */
122 chunk = sctp_get_ecne_prepend(asoc);
123 if (chunk) 120 if (chunk)
124 sctp_packet_append_chunk(packet, chunk); 121 sctp_packet_append_chunk(packet, chunk);
125 } 122 }
123
124 if (!tp->dst)
125 return;
126
127 /* set packet max_size with gso_max_size if gso is enabled*/
128 rcu_read_lock();
129 if (__sk_dst_get(sk) != tp->dst) {
130 dst_hold(tp->dst);
131 sk_setup_caps(sk, tp->dst);
132 }
133 packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size
134 : asoc->pathmtu;
135 rcu_read_unlock();
126} 136}
127 137
128/* Initialize the packet structure. */ 138/* Initialize the packet structure. */
@@ -582,12 +592,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
582 sh->vtag = htonl(packet->vtag); 592 sh->vtag = htonl(packet->vtag);
583 sh->checksum = 0; 593 sh->checksum = 0;
584 594
585 /* update dst if in need */ 595 /* drop packet if no dst */
586 if (!sctp_transport_dst_check(tp)) {
587 sctp_transport_route(tp, NULL, sctp_sk(sk));
588 if (asoc && asoc->param_flags & SPP_PMTUD_ENABLE)
589 sctp_assoc_sync_pmtu(sk, asoc);
590 }
591 dst = dst_clone(tp->dst); 596 dst = dst_clone(tp->dst);
592 if (!dst) { 597 if (!dst) {
593 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 598 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
@@ -704,7 +709,7 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
704 */ 709 */
705 710
706 if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) && 711 if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) &&
707 !chunk->msg->force_delay) 712 !asoc->force_delay)
708 /* Nothing unacked */ 713 /* Nothing unacked */
709 return SCTP_XMIT_OK; 714 return SCTP_XMIT_OK;
710 715
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 3f78d7f06e14..fe4c3d462f6e 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -1036,8 +1036,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1036 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid 1036 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid
1037 * stream identifier. 1037 * stream identifier.
1038 */ 1038 */
1039 if (chunk->sinfo.sinfo_stream >= 1039 if (chunk->sinfo.sinfo_stream >= asoc->stream->outcnt) {
1040 asoc->c.sinit_num_ostreams) {
1041 1040
1042 /* Mark as failed send. */ 1041 /* Mark as failed send. */
1043 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM); 1042 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 206377fe91ec..a0b29d43627f 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -361,8 +361,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
361 sctp_seq_dump_remote_addrs(seq, assoc); 361 sctp_seq_dump_remote_addrs(seq, assoc);
362 seq_printf(seq, "\t%8lu %5d %5d %4d %4d %4d %8d " 362 seq_printf(seq, "\t%8lu %5d %5d %4d %4d %4d %8d "
363 "%8d %8d %8d %8d", 363 "%8d %8d %8d %8d",
364 assoc->hbinterval, assoc->c.sinit_max_instreams, 364 assoc->hbinterval, assoc->stream->incnt,
365 assoc->c.sinit_num_ostreams, assoc->max_retrans, 365 assoc->stream->outcnt, assoc->max_retrans,
366 assoc->init_retries, assoc->shutdown_retries, 366 assoc->init_retries, assoc->shutdown_retries,
367 assoc->rtx_data_chunks, 367 assoc->rtx_data_chunks,
368 atomic_read(&sk->sk_wmem_alloc), 368 atomic_read(&sk->sk_wmem_alloc),
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 969a30c7bb54..118faff6a332 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2460,15 +2460,10 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
2460 * association. 2460 * association.
2461 */ 2461 */
2462 if (!asoc->temp) { 2462 if (!asoc->temp) {
2463 int error; 2463 if (sctp_stream_init(asoc, gfp))
2464
2465 asoc->stream = sctp_stream_new(asoc->c.sinit_max_instreams,
2466 asoc->c.sinit_num_ostreams, gfp);
2467 if (!asoc->stream)
2468 goto clean_up; 2464 goto clean_up;
2469 2465
2470 error = sctp_assoc_set_id(asoc, gfp); 2466 if (sctp_assoc_set_id(asoc, gfp))
2471 if (error)
2472 goto clean_up; 2467 goto clean_up;
2473 } 2468 }
2474 2469
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index ab1374fa5ab0..4f5e6cfc7f60 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3955,7 +3955,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net,
3955 3955
3956 /* Silently discard the chunk if stream-id is not valid */ 3956 /* Silently discard the chunk if stream-id is not valid */
3957 sctp_walk_fwdtsn(skip, chunk) { 3957 sctp_walk_fwdtsn(skip, chunk) {
3958 if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams) 3958 if (ntohs(skip->stream) >= asoc->stream->incnt)
3959 goto discard_noforce; 3959 goto discard_noforce;
3960 } 3960 }
3961 3961
@@ -4026,7 +4026,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
4026 4026
4027 /* Silently discard the chunk if stream-id is not valid */ 4027 /* Silently discard the chunk if stream-id is not valid */
4028 sctp_walk_fwdtsn(skip, chunk) { 4028 sctp_walk_fwdtsn(skip, chunk) {
4029 if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams) 4029 if (ntohs(skip->stream) >= asoc->stream->incnt)
4030 goto gen_shutdown; 4030 goto gen_shutdown;
4031 } 4031 }
4032 4032
@@ -6362,7 +6362,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
6362 * and discard the DATA chunk. 6362 * and discard the DATA chunk.
6363 */ 6363 */
6364 sid = ntohs(data_hdr->stream); 6364 sid = ntohs(data_hdr->stream);
6365 if (sid >= asoc->c.sinit_max_instreams) { 6365 if (sid >= asoc->stream->incnt) {
6366 /* Mark tsn as received even though we drop it */ 6366 /* Mark tsn as received even though we drop it */
6367 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); 6367 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
6368 6368
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6489446925e6..8e56df8d175d 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1907,7 +1907,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1907 } 1907 }
1908 1908
1909 if (asoc->pmtu_pending) 1909 if (asoc->pmtu_pending)
1910 sctp_assoc_pending_pmtu(sk, asoc); 1910 sctp_assoc_pending_pmtu(asoc);
1911 1911
1912 /* If fragmentation is disabled and the message length exceeds the 1912 /* If fragmentation is disabled and the message length exceeds the
1913 * association fragmentation point, return EMSGSIZE. The I-D 1913 * association fragmentation point, return EMSGSIZE. The I-D
@@ -1920,7 +1920,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1920 } 1920 }
1921 1921
1922 /* Check for invalid stream. */ 1922 /* Check for invalid stream. */
1923 if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { 1923 if (sinfo->sinfo_stream >= asoc->stream->outcnt) {
1924 err = -EINVAL; 1924 err = -EINVAL;
1925 goto out_free; 1925 goto out_free;
1926 } 1926 }
@@ -1965,7 +1965,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1965 err = PTR_ERR(datamsg); 1965 err = PTR_ERR(datamsg);
1966 goto out_free; 1966 goto out_free;
1967 } 1967 }
1968 datamsg->force_delay = !!(msg->msg_flags & MSG_MORE); 1968 asoc->force_delay = !!(msg->msg_flags & MSG_MORE);
1969 1969
1970 /* Now send the (possibly) fragmented message. */ 1970 /* Now send the (possibly) fragmented message. */
1971 list_for_each_entry(chunk, &datamsg->chunks, frag_list) { 1971 list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
@@ -2435,7 +2435,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2435 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { 2435 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
2436 if (trans) { 2436 if (trans) {
2437 trans->pathmtu = params->spp_pathmtu; 2437 trans->pathmtu = params->spp_pathmtu;
2438 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2438 sctp_assoc_sync_pmtu(asoc);
2439 } else if (asoc) { 2439 } else if (asoc) {
2440 asoc->pathmtu = params->spp_pathmtu; 2440 asoc->pathmtu = params->spp_pathmtu;
2441 } else { 2441 } else {
@@ -2451,7 +2451,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2451 (trans->param_flags & ~SPP_PMTUD) | pmtud_change; 2451 (trans->param_flags & ~SPP_PMTUD) | pmtud_change;
2452 if (update) { 2452 if (update) {
2453 sctp_transport_pmtu(trans, sctp_opt2sk(sp)); 2453 sctp_transport_pmtu(trans, sctp_opt2sk(sp));
2454 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); 2454 sctp_assoc_sync_pmtu(asoc);
2455 } 2455 }
2456 } else if (asoc) { 2456 } else if (asoc) {
2457 asoc->param_flags = 2457 asoc->param_flags =
@@ -4497,8 +4497,8 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
4497 info->sctpi_rwnd = asoc->a_rwnd; 4497 info->sctpi_rwnd = asoc->a_rwnd;
4498 info->sctpi_unackdata = asoc->unack_data; 4498 info->sctpi_unackdata = asoc->unack_data;
4499 info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); 4499 info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
4500 info->sctpi_instrms = asoc->c.sinit_max_instreams; 4500 info->sctpi_instrms = asoc->stream->incnt;
4501 info->sctpi_outstrms = asoc->c.sinit_num_ostreams; 4501 info->sctpi_outstrms = asoc->stream->outcnt;
4502 list_for_each(pos, &asoc->base.inqueue.in_chunk_list) 4502 list_for_each(pos, &asoc->base.inqueue.in_chunk_list)
4503 info->sctpi_inqueue++; 4503 info->sctpi_inqueue++;
4504 list_for_each(pos, &asoc->outqueue.out_chunk_list) 4504 list_for_each(pos, &asoc->outqueue.out_chunk_list)
@@ -4727,8 +4727,8 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
4727 status.sstat_unackdata = asoc->unack_data; 4727 status.sstat_unackdata = asoc->unack_data;
4728 4728
4729 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); 4729 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
4730 status.sstat_instrms = asoc->c.sinit_max_instreams; 4730 status.sstat_instrms = asoc->stream->incnt;
4731 status.sstat_outstrms = asoc->c.sinit_num_ostreams; 4731 status.sstat_outstrms = asoc->stream->outcnt;
4732 status.sstat_fragmentation_point = asoc->frag_point; 4732 status.sstat_fragmentation_point = asoc->frag_point;
4733 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); 4733 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
4734 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, 4734 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr,
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 961d0a1e99d1..eff6008a32ba 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -35,33 +35,60 @@
35#include <net/sctp/sctp.h> 35#include <net/sctp/sctp.h>
36#include <net/sctp/sm.h> 36#include <net/sctp/sm.h>
37 37
38struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp) 38int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp)
39{ 39{
40 struct sctp_stream *stream; 40 struct sctp_stream *stream;
41 int i; 41 int i;
42 42
43 stream = kzalloc(sizeof(*stream), gfp); 43 stream = kzalloc(sizeof(*stream), gfp);
44 if (!stream) 44 if (!stream)
45 return NULL; 45 return -ENOMEM;
46 46
47 stream->outcnt = outcnt; 47 stream->outcnt = asoc->c.sinit_num_ostreams;
48 stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp); 48 stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp);
49 if (!stream->out) { 49 if (!stream->out) {
50 kfree(stream); 50 kfree(stream);
51 return NULL; 51 return -ENOMEM;
52 } 52 }
53 for (i = 0; i < stream->outcnt; i++) 53 for (i = 0; i < stream->outcnt; i++)
54 stream->out[i].state = SCTP_STREAM_OPEN; 54 stream->out[i].state = SCTP_STREAM_OPEN;
55 55
56 stream->incnt = incnt; 56 asoc->stream = stream;
57
58 return 0;
59}
60
61int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp)
62{
63 struct sctp_stream *stream = asoc->stream;
64 int i;
65
66 /* Initial stream->out size may be very big, so free it and alloc
67 * a new one with new outcnt to save memory.
68 */
69 kfree(stream->out);
70 stream->outcnt = asoc->c.sinit_num_ostreams;
71 stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp);
72 if (!stream->out)
73 goto nomem;
74
75 for (i = 0; i < stream->outcnt; i++)
76 stream->out[i].state = SCTP_STREAM_OPEN;
77
78 stream->incnt = asoc->c.sinit_max_instreams;
57 stream->in = kcalloc(stream->incnt, sizeof(*stream->in), gfp); 79 stream->in = kcalloc(stream->incnt, sizeof(*stream->in), gfp);
58 if (!stream->in) { 80 if (!stream->in) {
59 kfree(stream->out); 81 kfree(stream->out);
60 kfree(stream); 82 goto nomem;
61 return NULL;
62 } 83 }
63 84
64 return stream; 85 return 0;
86
87nomem:
88 asoc->stream = NULL;
89 kfree(stream);
90
91 return -ENOMEM;
65} 92}
66 93
67void sctp_stream_free(struct sctp_stream *stream) 94void sctp_stream_free(struct sctp_stream *stream)
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 3379668af368..721eeebfcd8a 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -251,14 +251,13 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
251 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 251 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
252} 252}
253 253
254void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 pmtu) 254void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
255{ 255{
256 struct dst_entry *dst; 256 struct dst_entry *dst = sctp_transport_dst_check(t);
257 257
258 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { 258 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
259 pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n", 259 pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n",
260 __func__, pmtu, 260 __func__, pmtu, SCTP_DEFAULT_MINSEGMENT);
261 SCTP_DEFAULT_MINSEGMENT);
262 /* Use default minimum segment size and disable 261 /* Use default minimum segment size and disable
263 * pmtu discovery on this transport. 262 * pmtu discovery on this transport.
264 */ 263 */
@@ -267,17 +266,13 @@ void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 p
267 t->pathmtu = pmtu; 266 t->pathmtu = pmtu;
268 } 267 }
269 268
270 dst = sctp_transport_dst_check(t);
271 if (!dst)
272 t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
273
274 if (dst) { 269 if (dst) {
275 dst->ops->update_pmtu(dst, sk, NULL, pmtu); 270 dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu);
276
277 dst = sctp_transport_dst_check(t); 271 dst = sctp_transport_dst_check(t);
278 if (!dst)
279 t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
280 } 272 }
273
274 if (!dst)
275 t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk);
281} 276}
282 277
283/* Caches the dst entry and source address for a transport's destination 278/* Caches the dst entry and source address for a transport's destination
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 8931e33b6541..2b720fa35c4f 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1635,6 +1635,7 @@ static struct svc_xprt *svc_bc_create_socket(struct svc_serv *serv,
1635 1635
1636 xprt = &svsk->sk_xprt; 1636 xprt = &svsk->sk_xprt;
1637 svc_xprt_init(net, &svc_tcp_bc_class, xprt, serv); 1637 svc_xprt_init(net, &svc_tcp_bc_class, xprt, serv);
1638 set_bit(XPT_CONG_CTRL, &svsk->sk_xprt.xpt_flags);
1638 1639
1639 serv->sv_bc_xprt = xprt; 1640 serv->sv_bc_xprt = xprt;
1640 1641
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index c13a5c35ce14..fc8f14c7bfec 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -127,6 +127,7 @@ static struct svc_xprt *svc_rdma_bc_create(struct svc_serv *serv,
127 xprt = &cma_xprt->sc_xprt; 127 xprt = &cma_xprt->sc_xprt;
128 128
129 svc_xprt_init(net, &svc_rdma_bc_class, xprt, serv); 129 svc_xprt_init(net, &svc_rdma_bc_class, xprt, serv);
130 set_bit(XPT_CONG_CTRL, &xprt->xpt_flags);
130 serv->sv_bc_xprt = xprt; 131 serv->sv_bc_xprt = xprt;
131 132
132 dprintk("svcrdma: %s(%p)\n", __func__, xprt); 133 dprintk("svcrdma: %s(%p)\n", __func__, xprt);
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 16b6b5988be9..570a2b67ca10 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -132,12 +132,10 @@ static int wiphy_resume(struct device *dev)
132 /* Age scan results with time spent in suspend */ 132 /* Age scan results with time spent in suspend */
133 cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at); 133 cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at);
134 134
135 if (rdev->ops->resume) { 135 rtnl_lock();
136 rtnl_lock(); 136 if (rdev->wiphy.registered && rdev->ops->resume)
137 if (rdev->wiphy.registered) 137 ret = rdev_resume(rdev);
138 ret = rdev_resume(rdev); 138 rtnl_unlock();
139 rtnl_unlock();
140 }
141 139
142 return ret; 140 return ret;
143} 141}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 9705c279494b..40a8aa39220d 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -412,7 +412,14 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es
412 up = nla_data(rp); 412 up = nla_data(rp);
413 ulen = xfrm_replay_state_esn_len(up); 413 ulen = xfrm_replay_state_esn_len(up);
414 414
415 if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen) 415 /* Check the overall length and the internal bitmap length to avoid
416 * potential overflow. */
417 if (nla_len(rp) < ulen ||
418 xfrm_replay_state_esn_len(replay_esn) != ulen ||
419 replay_esn->bmp_len != up->bmp_len)
420 return -EINVAL;
421
422 if (up->replay_window > up->bmp_len * sizeof(__u32) * 8)
416 return -EINVAL; 423 return -EINVAL;
417 424
418 return 0; 425 return 0;
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index d6ca649cb0e9..afe3fd3af1e4 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -148,6 +148,10 @@ cc-fullversion = $(shell $(CONFIG_SHELL) \
148# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) 148# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
149cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4)) 149cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4))
150 150
151# cc-if-fullversion
152# Usage: EXTRA_CFLAGS += $(call cc-if-fullversion, -lt, 040502, -O1)
153cc-if-fullversion = $(shell [ $(cc-fullversion) $(1) $(2) ] && echo $(3) || echo $(4))
154
151# cc-ldoption 155# cc-ldoption
152# Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both) 156# Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
153cc-ldoption = $(call try-run,\ 157cc-ldoption = $(call try-run,\
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 0a07f9014944..7234e61e7ce3 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -155,7 +155,7 @@ else
155# $(call addtree,-I$(obj)) locates .h files in srctree, from generated .c files 155# $(call addtree,-I$(obj)) locates .h files in srctree, from generated .c files
156# and locates generated .h files 156# and locates generated .h files
157# FIXME: Replace both with specific CFLAGS* statements in the makefiles 157# FIXME: Replace both with specific CFLAGS* statements in the makefiles
158__c_flags = $(if $(obj),-I$(srctree)/$(src) -I$(obj)) \ 158__c_flags = $(if $(obj),$(call addtree,-I$(src)) -I$(obj)) \
159 $(call flags,_c_flags) 159 $(call flags,_c_flags)
160__a_flags = $(call flags,_a_flags) 160__a_flags = $(call flags,_a_flags)
161__cpp_flags = $(call flags,_cpp_flags) 161__cpp_flags = $(call flags,_cpp_flags)
diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c
index 26d208b435a0..cfddddb9c9d7 100644
--- a/scripts/kconfig/gconf.c
+++ b/scripts/kconfig/gconf.c
@@ -914,7 +914,7 @@ on_treeview2_button_press_event(GtkWidget * widget,
914 current = menu; 914 current = menu;
915 display_tree_part(); 915 display_tree_part();
916 gtk_widget_set_sensitive(back_btn, TRUE); 916 gtk_widget_set_sensitive(back_btn, TRUE);
917 } else if ((col == COL_OPTION)) { 917 } else if (col == COL_OPTION) {
918 toggle_sym_value(menu); 918 toggle_sym_value(menu);
919 gtk_tree_view_expand_row(view, path, TRUE); 919 gtk_tree_view_expand_row(view, path, TRUE);
920 } 920 }
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index 33980d1c8037..01c4cfe30c9f 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -267,6 +267,10 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
267 /* NOTE: overflow flag is not cleared */ 267 /* NOTE: overflow flag is not cleared */
268 spin_unlock_irqrestore(&f->lock, flags); 268 spin_unlock_irqrestore(&f->lock, flags);
269 269
270 /* close the old pool and wait until all users are gone */
271 snd_seq_pool_mark_closing(oldpool);
272 snd_use_lock_sync(&f->use_lock);
273
270 /* release cells in old pool */ 274 /* release cells in old pool */
271 for (cell = oldhead; cell; cell = next) { 275 for (cell = oldhead; cell; cell = next) {
272 next = cell->next; 276 next = cell->next;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 7f989898cbd9..299835d1fbaa 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4858,6 +4858,7 @@ enum {
4858 ALC292_FIXUP_DISABLE_AAMIX, 4858 ALC292_FIXUP_DISABLE_AAMIX,
4859 ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK, 4859 ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK,
4860 ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, 4860 ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
4861 ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
4861 ALC275_FIXUP_DELL_XPS, 4862 ALC275_FIXUP_DELL_XPS,
4862 ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, 4863 ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
4863 ALC293_FIXUP_LENOVO_SPK_NOISE, 4864 ALC293_FIXUP_LENOVO_SPK_NOISE,
@@ -5470,6 +5471,15 @@ static const struct hda_fixup alc269_fixups[] = {
5470 .chained = true, 5471 .chained = true,
5471 .chain_id = ALC269_FIXUP_HEADSET_MODE 5472 .chain_id = ALC269_FIXUP_HEADSET_MODE
5472 }, 5473 },
5474 [ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE] = {
5475 .type = HDA_FIXUP_PINS,
5476 .v.pins = (const struct hda_pintbl[]) {
5477 { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
5478 { }
5479 },
5480 .chained = true,
5481 .chain_id = ALC269_FIXUP_HEADSET_MODE
5482 },
5473 [ALC275_FIXUP_DELL_XPS] = { 5483 [ALC275_FIXUP_DELL_XPS] = {
5474 .type = HDA_FIXUP_VERBS, 5484 .type = HDA_FIXUP_VERBS,
5475 .v.verbs = (const struct hda_verb[]) { 5485 .v.verbs = (const struct hda_verb[]) {
@@ -5542,7 +5552,7 @@ static const struct hda_fixup alc269_fixups[] = {
5542 .type = HDA_FIXUP_FUNC, 5552 .type = HDA_FIXUP_FUNC,
5543 .v.func = alc298_fixup_speaker_volume, 5553 .v.func = alc298_fixup_speaker_volume,
5544 .chained = true, 5554 .chained = true,
5545 .chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, 5555 .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
5546 }, 5556 },
5547 [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = { 5557 [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
5548 .type = HDA_FIXUP_PINS, 5558 .type = HDA_FIXUP_PINS,
diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c
index 89ac5f5a93eb..7ae46c2647d4 100644
--- a/sound/soc/atmel/atmel-classd.c
+++ b/sound/soc/atmel/atmel-classd.c
@@ -349,7 +349,7 @@ static int atmel_classd_codec_dai_digital_mute(struct snd_soc_dai *codec_dai,
349} 349}
350 350
351#define CLASSD_ACLK_RATE_11M2896_MPY_8 (112896 * 100 * 8) 351#define CLASSD_ACLK_RATE_11M2896_MPY_8 (112896 * 100 * 8)
352#define CLASSD_ACLK_RATE_12M288_MPY_8 (12228 * 1000 * 8) 352#define CLASSD_ACLK_RATE_12M288_MPY_8 (12288 * 1000 * 8)
353 353
354static struct { 354static struct {
355 int rate; 355 int rate;
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 78fca8acd3ec..fd272a40485b 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -1534,21 +1534,20 @@ static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
1534 pin->mst_capable = false; 1534 pin->mst_capable = false;
1535 /* if not MST, default is port[0] */ 1535 /* if not MST, default is port[0] */
1536 hport = &pin->ports[0]; 1536 hport = &pin->ports[0];
1537 goto out;
1538 } else { 1537 } else {
1539 for (i = 0; i < pin->num_ports; i++) { 1538 for (i = 0; i < pin->num_ports; i++) {
1540 pin->mst_capable = true; 1539 pin->mst_capable = true;
1541 if (pin->ports[i].id == pipe) { 1540 if (pin->ports[i].id == pipe) {
1542 hport = &pin->ports[i]; 1541 hport = &pin->ports[i];
1543 goto out; 1542 break;
1544 } 1543 }
1545 } 1544 }
1546 } 1545 }
1546
1547 if (hport)
1548 hdac_hdmi_present_sense(pin, hport);
1547 } 1549 }
1548 1550
1549out:
1550 if (pin && hport)
1551 hdac_hdmi_present_sense(pin, hport);
1552} 1551}
1553 1552
1554static struct i915_audio_component_audio_ops aops = { 1553static struct i915_audio_component_audio_ops aops = {
@@ -1998,7 +1997,7 @@ static int hdac_hdmi_dev_remove(struct hdac_ext_device *edev)
1998 struct hdac_hdmi_pin *pin, *pin_next; 1997 struct hdac_hdmi_pin *pin, *pin_next;
1999 struct hdac_hdmi_cvt *cvt, *cvt_next; 1998 struct hdac_hdmi_cvt *cvt, *cvt_next;
2000 struct hdac_hdmi_pcm *pcm, *pcm_next; 1999 struct hdac_hdmi_pcm *pcm, *pcm_next;
2001 struct hdac_hdmi_port *port; 2000 struct hdac_hdmi_port *port, *port_next;
2002 int i; 2001 int i;
2003 2002
2004 snd_soc_unregister_codec(&edev->hdac.dev); 2003 snd_soc_unregister_codec(&edev->hdac.dev);
@@ -2008,8 +2007,9 @@ static int hdac_hdmi_dev_remove(struct hdac_ext_device *edev)
2008 if (list_empty(&pcm->port_list)) 2007 if (list_empty(&pcm->port_list))
2009 continue; 2008 continue;
2010 2009
2011 list_for_each_entry(port, &pcm->port_list, head) 2010 list_for_each_entry_safe(port, port_next,
2012 port = NULL; 2011 &pcm->port_list, head)
2012 list_del(&port->head);
2013 2013
2014 list_del(&pcm->head); 2014 list_del(&pcm->head);
2015 kfree(pcm); 2015 kfree(pcm);
diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c
index 324461e985b3..476135ec5726 100644
--- a/sound/soc/codecs/rt5665.c
+++ b/sound/soc/codecs/rt5665.c
@@ -1241,7 +1241,7 @@ static irqreturn_t rt5665_irq(int irq, void *data)
1241static void rt5665_jd_check_handler(struct work_struct *work) 1241static void rt5665_jd_check_handler(struct work_struct *work)
1242{ 1242{
1243 struct rt5665_priv *rt5665 = container_of(work, struct rt5665_priv, 1243 struct rt5665_priv *rt5665 = container_of(work, struct rt5665_priv,
1244 calibrate_work.work); 1244 jd_check_work.work);
1245 1245
1246 if (snd_soc_read(rt5665->codec, RT5665_AJD1_CTRL) & 0x0010) { 1246 if (snd_soc_read(rt5665->codec, RT5665_AJD1_CTRL) & 0x0010) {
1247 /* jack out */ 1247 /* jack out */
@@ -2252,7 +2252,7 @@ static const char * const rt5665_if2_1_adc_in_src[] = {
2252 2252
2253static const SOC_ENUM_SINGLE_DECL( 2253static const SOC_ENUM_SINGLE_DECL(
2254 rt5665_if2_1_adc_in_enum, RT5665_DIG_INF2_DATA, 2254 rt5665_if2_1_adc_in_enum, RT5665_DIG_INF2_DATA,
2255 RT5665_IF3_ADC_IN_SFT, rt5665_if2_1_adc_in_src); 2255 RT5665_IF2_1_ADC_IN_SFT, rt5665_if2_1_adc_in_src);
2256 2256
2257static const struct snd_kcontrol_new rt5665_if2_1_adc_in_mux = 2257static const struct snd_kcontrol_new rt5665_if2_1_adc_in_mux =
2258 SOC_DAPM_ENUM("IF2_1 ADC IN Source", rt5665_if2_1_adc_in_enum); 2258 SOC_DAPM_ENUM("IF2_1 ADC IN Source", rt5665_if2_1_adc_in_enum);
@@ -3178,6 +3178,9 @@ static const struct snd_soc_dapm_route rt5665_dapm_routes[] = {
3178 {"DAC Mono Right Filter", NULL, "DAC Mono R ASRC", is_using_asrc}, 3178 {"DAC Mono Right Filter", NULL, "DAC Mono R ASRC", is_using_asrc},
3179 {"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc}, 3179 {"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc},
3180 {"DAC Stereo2 Filter", NULL, "DAC STO2 ASRC", is_using_asrc}, 3180 {"DAC Stereo2 Filter", NULL, "DAC STO2 ASRC", is_using_asrc},
3181 {"I2S1 ASRC", NULL, "CLKDET"},
3182 {"I2S2 ASRC", NULL, "CLKDET"},
3183 {"I2S3 ASRC", NULL, "CLKDET"},
3181 3184
3182 /*Vref*/ 3185 /*Vref*/
3183 {"Mic Det Power", NULL, "Vref2"}, 3186 {"Mic Det Power", NULL, "Vref2"},
@@ -3912,6 +3915,7 @@ static const struct snd_soc_dapm_route rt5665_dapm_routes[] = {
3912 {"Mono MIX", "MONOVOL Switch", "MONOVOL"}, 3915 {"Mono MIX", "MONOVOL Switch", "MONOVOL"},
3913 {"Mono Amp", NULL, "Mono MIX"}, 3916 {"Mono Amp", NULL, "Mono MIX"},
3914 {"Mono Amp", NULL, "Vref2"}, 3917 {"Mono Amp", NULL, "Vref2"},
3918 {"Mono Amp", NULL, "Vref3"},
3915 {"Mono Amp", NULL, "CLKDET SYS"}, 3919 {"Mono Amp", NULL, "CLKDET SYS"},
3916 {"Mono Amp", NULL, "CLKDET MONO"}, 3920 {"Mono Amp", NULL, "CLKDET MONO"},
3917 {"Mono Playback", "Switch", "Mono Amp"}, 3921 {"Mono Playback", "Switch", "Mono Amp"},
@@ -4798,7 +4802,7 @@ static int rt5665_i2c_probe(struct i2c_client *i2c,
4798 /* Enhance performance*/ 4802 /* Enhance performance*/
4799 regmap_update_bits(rt5665->regmap, RT5665_PWR_ANLG_1, 4803 regmap_update_bits(rt5665->regmap, RT5665_PWR_ANLG_1,
4800 RT5665_HP_DRIVER_MASK | RT5665_LDO1_DVO_MASK, 4804 RT5665_HP_DRIVER_MASK | RT5665_LDO1_DVO_MASK,
4801 RT5665_HP_DRIVER_5X | RT5665_LDO1_DVO_09); 4805 RT5665_HP_DRIVER_5X | RT5665_LDO1_DVO_12);
4802 4806
4803 INIT_DELAYED_WORK(&rt5665->jack_detect_work, 4807 INIT_DELAYED_WORK(&rt5665->jack_detect_work,
4804 rt5665_jack_detect_handler); 4808 rt5665_jack_detect_handler);
diff --git a/sound/soc/codecs/rt5665.h b/sound/soc/codecs/rt5665.h
index 12f7080a0d3c..a30f5e6d0628 100644
--- a/sound/soc/codecs/rt5665.h
+++ b/sound/soc/codecs/rt5665.h
@@ -1106,7 +1106,7 @@
1106#define RT5665_HP_DRIVER_MASK (0x3 << 2) 1106#define RT5665_HP_DRIVER_MASK (0x3 << 2)
1107#define RT5665_HP_DRIVER_1X (0x0 << 2) 1107#define RT5665_HP_DRIVER_1X (0x0 << 2)
1108#define RT5665_HP_DRIVER_3X (0x1 << 2) 1108#define RT5665_HP_DRIVER_3X (0x1 << 2)
1109#define RT5665_HP_DRIVER_5X (0x2 << 2) 1109#define RT5665_HP_DRIVER_5X (0x3 << 2)
1110#define RT5665_LDO1_DVO_MASK (0x3) 1110#define RT5665_LDO1_DVO_MASK (0x3)
1111#define RT5665_LDO1_DVO_09 (0x0) 1111#define RT5665_LDO1_DVO_09 (0x0)
1112#define RT5665_LDO1_DVO_10 (0x1) 1112#define RT5665_LDO1_DVO_10 (0x1)
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index d151224ffcca..bbdb72f73df1 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -899,7 +899,10 @@ static int wm_coeff_put(struct snd_kcontrol *kctl,
899 899
900 mutex_lock(&ctl->dsp->pwr_lock); 900 mutex_lock(&ctl->dsp->pwr_lock);
901 901
902 memcpy(ctl->cache, p, ctl->len); 902 if (ctl->flags & WMFW_CTL_FLAG_VOLATILE)
903 ret = -EPERM;
904 else
905 memcpy(ctl->cache, p, ctl->len);
903 906
904 ctl->set = 1; 907 ctl->set = 1;
905 if (ctl->enabled && ctl->dsp->running) 908 if (ctl->enabled && ctl->dsp->running)
@@ -926,6 +929,8 @@ static int wm_coeff_tlv_put(struct snd_kcontrol *kctl,
926 ctl->set = 1; 929 ctl->set = 1;
927 if (ctl->enabled && ctl->dsp->running) 930 if (ctl->enabled && ctl->dsp->running)
928 ret = wm_coeff_write_control(ctl, ctl->cache, size); 931 ret = wm_coeff_write_control(ctl, ctl->cache, size);
932 else if (ctl->flags & WMFW_CTL_FLAG_VOLATILE)
933 ret = -EPERM;
929 } 934 }
930 935
931 mutex_unlock(&ctl->dsp->pwr_lock); 936 mutex_unlock(&ctl->dsp->pwr_lock);
@@ -947,7 +952,7 @@ static int wm_coeff_put_acked(struct snd_kcontrol *kctl,
947 952
948 mutex_lock(&ctl->dsp->pwr_lock); 953 mutex_lock(&ctl->dsp->pwr_lock);
949 954
950 if (ctl->enabled) 955 if (ctl->enabled && ctl->dsp->running)
951 ret = wm_coeff_write_acked_control(ctl, val); 956 ret = wm_coeff_write_acked_control(ctl, val);
952 else 957 else
953 ret = -EPERM; 958 ret = -EPERM;
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index 4924575d2e95..343b291fc372 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -115,6 +115,7 @@ int asoc_simple_card_parse_clk(struct device *dev,
115 clk = devm_get_clk_from_child(dev, node, NULL); 115 clk = devm_get_clk_from_child(dev, node, NULL);
116 if (!IS_ERR(clk)) { 116 if (!IS_ERR(clk)) {
117 simple_dai->sysclk = clk_get_rate(clk); 117 simple_dai->sysclk = clk_get_rate(clk);
118 simple_dai->clk = clk;
118 } else if (!of_property_read_u32(node, "system-clock-frequency", &val)) { 119 } else if (!of_property_read_u32(node, "system-clock-frequency", &val)) {
119 simple_dai->sysclk = val; 120 simple_dai->sysclk = val;
120 } else { 121 } else {
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index ed58b5b3555a..2dbfb1b24ef4 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -512,7 +512,7 @@ static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
512 if (bc->set_params != SKL_PARAM_INIT) 512 if (bc->set_params != SKL_PARAM_INIT)
513 continue; 513 continue;
514 514
515 mconfig->formats_config.caps = (u32 *)&bc->params; 515 mconfig->formats_config.caps = (u32 *)bc->params;
516 mconfig->formats_config.caps_size = bc->size; 516 mconfig->formats_config.caps_size = bc->size;
517 517
518 break; 518 break;
diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
index 05cf809cf9e1..d7013bde6f45 100644
--- a/sound/soc/mediatek/Kconfig
+++ b/sound/soc/mediatek/Kconfig
@@ -13,7 +13,7 @@ config SND_SOC_MT2701
13 13
14config SND_SOC_MT2701_CS42448 14config SND_SOC_MT2701_CS42448
15 tristate "ASoc Audio driver for MT2701 with CS42448 codec" 15 tristate "ASoc Audio driver for MT2701 with CS42448 codec"
16 depends on SND_SOC_MT2701 16 depends on SND_SOC_MT2701 && I2C
17 select SND_SOC_CS42XX8_I2C 17 select SND_SOC_CS42XX8_I2C
18 select SND_SOC_BT_SCO 18 select SND_SOC_BT_SCO
19 help 19 help
diff --git a/sound/soc/sh/rcar/cmd.c b/sound/soc/sh/rcar/cmd.c
index abb5eaac854a..7d92a24b7cfa 100644
--- a/sound/soc/sh/rcar/cmd.c
+++ b/sound/soc/sh/rcar/cmd.c
@@ -31,23 +31,24 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
31 struct rsnd_mod *mix = rsnd_io_to_mod_mix(io); 31 struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
32 struct device *dev = rsnd_priv_to_dev(priv); 32 struct device *dev = rsnd_priv_to_dev(priv);
33 u32 data; 33 u32 data;
34 u32 path[] = {
35 [1] = 1 << 0,
36 [5] = 1 << 8,
37 [6] = 1 << 12,
38 [9] = 1 << 15,
39 };
34 40
35 if (!mix && !dvc) 41 if (!mix && !dvc)
36 return 0; 42 return 0;
37 43
44 if (ARRAY_SIZE(path) < rsnd_mod_id(mod) + 1)
45 return -ENXIO;
46
38 if (mix) { 47 if (mix) {
39 struct rsnd_dai *rdai; 48 struct rsnd_dai *rdai;
40 struct rsnd_mod *src; 49 struct rsnd_mod *src;
41 struct rsnd_dai_stream *tio; 50 struct rsnd_dai_stream *tio;
42 int i; 51 int i;
43 u32 path[] = {
44 [0] = 0,
45 [1] = 1 << 0,
46 [2] = 0,
47 [3] = 0,
48 [4] = 0,
49 [5] = 1 << 8
50 };
51 52
52 /* 53 /*
53 * it is assuming that integrater is well understanding about 54 * it is assuming that integrater is well understanding about
@@ -70,16 +71,19 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
70 } else { 71 } else {
71 struct rsnd_mod *src = rsnd_io_to_mod_src(io); 72 struct rsnd_mod *src = rsnd_io_to_mod_src(io);
72 73
73 u32 path[] = { 74 u8 cmd_case[] = {
74 [0] = 0x30000, 75 [0] = 0x3,
75 [1] = 0x30001, 76 [1] = 0x3,
76 [2] = 0x40000, 77 [2] = 0x4,
77 [3] = 0x10000, 78 [3] = 0x1,
78 [4] = 0x20000, 79 [4] = 0x2,
79 [5] = 0x40100 80 [5] = 0x4,
81 [6] = 0x1,
82 [9] = 0x2,
80 }; 83 };
81 84
82 data = path[rsnd_mod_id(src)]; 85 data = path[rsnd_mod_id(src)] |
86 cmd_case[rsnd_mod_id(src)] << 16;
83 } 87 }
84 88
85 dev_dbg(dev, "ctu/mix path = 0x%08x", data); 89 dev_dbg(dev, "ctu/mix path = 0x%08x", data);
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index 1f405c833867..241cb3b08a07 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -454,6 +454,20 @@ static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg)
454 return ioread32(rsnd_dmapp_addr(dmac, dma, reg)); 454 return ioread32(rsnd_dmapp_addr(dmac, dma, reg));
455} 455}
456 456
457static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg)
458{
459 struct rsnd_mod *mod = rsnd_mod_get(dma);
460 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
461 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
462 void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg);
463 u32 val = ioread32(addr);
464
465 val &= ~mask;
466 val |= (data & mask);
467
468 iowrite32(val, addr);
469}
470
457static int rsnd_dmapp_stop(struct rsnd_mod *mod, 471static int rsnd_dmapp_stop(struct rsnd_mod *mod,
458 struct rsnd_dai_stream *io, 472 struct rsnd_dai_stream *io,
459 struct rsnd_priv *priv) 473 struct rsnd_priv *priv)
@@ -461,10 +475,10 @@ static int rsnd_dmapp_stop(struct rsnd_mod *mod,
461 struct rsnd_dma *dma = rsnd_mod_to_dma(mod); 475 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
462 int i; 476 int i;
463 477
464 rsnd_dmapp_write(dma, 0, PDMACHCR); 478 rsnd_dmapp_bset(dma, 0, PDMACHCR_DE, PDMACHCR);
465 479
466 for (i = 0; i < 1024; i++) { 480 for (i = 0; i < 1024; i++) {
467 if (0 == rsnd_dmapp_read(dma, PDMACHCR)) 481 if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE))
468 return 0; 482 return 0;
469 udelay(1); 483 udelay(1);
470 } 484 }
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
index 4e817c8a18c0..14fafdaf1395 100644
--- a/sound/soc/sh/rcar/ssiu.c
+++ b/sound/soc/sh/rcar/ssiu.c
@@ -64,7 +64,11 @@ static int rsnd_ssiu_init(struct rsnd_mod *mod,
64 mask1 = (1 << 4) | (1 << 20); /* mask sync bit */ 64 mask1 = (1 << 4) | (1 << 20); /* mask sync bit */
65 mask2 = (1 << 4); /* mask sync bit */ 65 mask2 = (1 << 4); /* mask sync bit */
66 val1 = val2 = 0; 66 val1 = val2 = 0;
67 if (rsnd_ssi_is_pin_sharing(io)) { 67 if (id == 8) {
68 /*
69 * SSI8 pin is sharing with SSI7, nothing to do.
70 */
71 } else if (rsnd_ssi_is_pin_sharing(io)) {
68 int shift = -1; 72 int shift = -1;
69 73
70 switch (id) { 74 switch (id) {
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 6dca408faae3..2722bb0c5573 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3326,7 +3326,10 @@ static int snd_soc_platform_drv_pcm_new(struct snd_soc_pcm_runtime *rtd)
3326{ 3326{
3327 struct snd_soc_platform *platform = rtd->platform; 3327 struct snd_soc_platform *platform = rtd->platform;
3328 3328
3329 return platform->driver->pcm_new(rtd); 3329 if (platform->driver->pcm_new)
3330 return platform->driver->pcm_new(rtd);
3331 else
3332 return 0;
3330} 3333}
3331 3334
3332static void snd_soc_platform_drv_pcm_free(struct snd_pcm *pcm) 3335static void snd_soc_platform_drv_pcm_free(struct snd_pcm *pcm)
@@ -3334,7 +3337,8 @@ static void snd_soc_platform_drv_pcm_free(struct snd_pcm *pcm)
3334 struct snd_soc_pcm_runtime *rtd = pcm->private_data; 3337 struct snd_soc_pcm_runtime *rtd = pcm->private_data;
3335 struct snd_soc_platform *platform = rtd->platform; 3338 struct snd_soc_platform *platform = rtd->platform;
3336 3339
3337 platform->driver->pcm_free(pcm); 3340 if (platform->driver->pcm_free)
3341 platform->driver->pcm_free(pcm);
3338} 3342}
3339 3343
3340/** 3344/**
diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c
index 5992c6ab3833..93a8df6ed880 100644
--- a/sound/soc/sti/uniperif_reader.c
+++ b/sound/soc/sti/uniperif_reader.c
@@ -349,6 +349,8 @@ static int uni_reader_startup(struct snd_pcm_substream *substream,
349 struct uniperif *reader = priv->dai_data.uni; 349 struct uniperif *reader = priv->dai_data.uni;
350 int ret; 350 int ret;
351 351
352 reader->substream = substream;
353
352 if (!UNIPERIF_TYPE_IS_TDM(reader)) 354 if (!UNIPERIF_TYPE_IS_TDM(reader))
353 return 0; 355 return 0;
354 356
@@ -378,6 +380,7 @@ static void uni_reader_shutdown(struct snd_pcm_substream *substream,
378 /* Stop the reader */ 380 /* Stop the reader */
379 uni_reader_stop(reader); 381 uni_reader_stop(reader);
380 } 382 }
383 reader->substream = NULL;
381} 384}
382 385
383static const struct snd_soc_dai_ops uni_reader_dai_ops = { 386static const struct snd_soc_dai_ops uni_reader_dai_ops = {
diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
index b92bdc8361af..7527ba29a5a0 100644
--- a/sound/soc/sunxi/sun8i-codec.c
+++ b/sound/soc/sunxi/sun8i-codec.c
@@ -259,25 +259,20 @@ static int sun8i_codec_hw_params(struct snd_pcm_substream *substream,
259 return 0; 259 return 0;
260} 260}
261 261
262static const struct snd_kcontrol_new sun8i_output_left_mixer_controls[] = { 262static const struct snd_kcontrol_new sun8i_dac_mixer_controls[] = {
263 SOC_DAPM_SINGLE("LSlot 0", SUN8I_DAC_MXR_SRC, 263 SOC_DAPM_DOUBLE("AIF1 Slot 0 Digital DAC Playback Switch",
264 SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA0L, 1, 0), 264 SUN8I_DAC_MXR_SRC,
265 SOC_DAPM_SINGLE("LSlot 1", SUN8I_DAC_MXR_SRC, 265 SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA0L,
266 SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA1L, 1, 0),
267 SOC_DAPM_SINGLE("DACL", SUN8I_DAC_MXR_SRC,
268 SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF2DACL, 1, 0),
269 SOC_DAPM_SINGLE("ADCL", SUN8I_DAC_MXR_SRC,
270 SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_ADCL, 1, 0),
271};
272
273static const struct snd_kcontrol_new sun8i_output_right_mixer_controls[] = {
274 SOC_DAPM_SINGLE("RSlot 0", SUN8I_DAC_MXR_SRC,
275 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF1DA0R, 1, 0), 266 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF1DA0R, 1, 0),
276 SOC_DAPM_SINGLE("RSlot 1", SUN8I_DAC_MXR_SRC, 267 SOC_DAPM_DOUBLE("AIF1 Slot 1 Digital DAC Playback Switch",
268 SUN8I_DAC_MXR_SRC,
269 SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF1DA1L,
277 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF1DA1R, 1, 0), 270 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF1DA1R, 1, 0),
278 SOC_DAPM_SINGLE("DACR", SUN8I_DAC_MXR_SRC, 271 SOC_DAPM_DOUBLE("AIF2 Digital DAC Playback Switch", SUN8I_DAC_MXR_SRC,
272 SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_AIF2DACL,
279 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF2DACR, 1, 0), 273 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_AIF2DACR, 1, 0),
280 SOC_DAPM_SINGLE("ADCR", SUN8I_DAC_MXR_SRC, 274 SOC_DAPM_DOUBLE("ADC Digital DAC Playback Switch", SUN8I_DAC_MXR_SRC,
275 SUN8I_DAC_MXR_SRC_DACL_MXR_SRC_ADCL,
281 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_ADCR, 1, 0), 276 SUN8I_DAC_MXR_SRC_DACR_MXR_SRC_ADCR, 1, 0),
282}; 277};
283 278
@@ -286,19 +281,21 @@ static const struct snd_soc_dapm_widget sun8i_codec_dapm_widgets[] = {
286 SND_SOC_DAPM_SUPPLY("DAC", SUN8I_DAC_DIG_CTRL, SUN8I_DAC_DIG_CTRL_ENDA, 281 SND_SOC_DAPM_SUPPLY("DAC", SUN8I_DAC_DIG_CTRL, SUN8I_DAC_DIG_CTRL_ENDA,
287 0, NULL, 0), 282 0, NULL, 0),
288 283
289 /* Analog DAC */ 284 /* Analog DAC AIF */
290 SND_SOC_DAPM_DAC("Digital Left DAC", "Playback", SUN8I_AIF1_DACDAT_CTRL, 285 SND_SOC_DAPM_AIF_IN("AIF1 Slot 0 Left", "Playback", 0,
291 SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0L_ENA, 0), 286 SUN8I_AIF1_DACDAT_CTRL,
292 SND_SOC_DAPM_DAC("Digital Right DAC", "Playback", SUN8I_AIF1_DACDAT_CTRL, 287 SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0L_ENA, 0),
293 SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0R_ENA, 0), 288 SND_SOC_DAPM_AIF_IN("AIF1 Slot 0 Right", "Playback", 0,
289 SUN8I_AIF1_DACDAT_CTRL,
290 SUN8I_AIF1_DACDAT_CTRL_AIF1_DA0R_ENA, 0),
294 291
295 /* DAC Mixers */ 292 /* DAC Mixers */
296 SND_SOC_DAPM_MIXER("Left DAC Mixer", SND_SOC_NOPM, 0, 0, 293 SND_SOC_DAPM_MIXER("Left Digital DAC Mixer", SND_SOC_NOPM, 0, 0,
297 sun8i_output_left_mixer_controls, 294 sun8i_dac_mixer_controls,
298 ARRAY_SIZE(sun8i_output_left_mixer_controls)), 295 ARRAY_SIZE(sun8i_dac_mixer_controls)),
299 SND_SOC_DAPM_MIXER("Right DAC Mixer", SND_SOC_NOPM, 0, 0, 296 SND_SOC_DAPM_MIXER("Right Digital DAC Mixer", SND_SOC_NOPM, 0, 0,
300 sun8i_output_right_mixer_controls, 297 sun8i_dac_mixer_controls,
301 ARRAY_SIZE(sun8i_output_right_mixer_controls)), 298 ARRAY_SIZE(sun8i_dac_mixer_controls)),
302 299
303 /* Clocks */ 300 /* Clocks */
304 SND_SOC_DAPM_SUPPLY("MODCLK AFI1", SUN8I_MOD_CLK_ENA, 301 SND_SOC_DAPM_SUPPLY("MODCLK AFI1", SUN8I_MOD_CLK_ENA,
@@ -321,8 +318,6 @@ static const struct snd_soc_dapm_widget sun8i_codec_dapm_widgets[] = {
321 SUN8I_MOD_RST_CTL_AIF1, 0, NULL, 0), 318 SUN8I_MOD_RST_CTL_AIF1, 0, NULL, 0),
322 SND_SOC_DAPM_SUPPLY("RST DAC", SUN8I_MOD_RST_CTL, 319 SND_SOC_DAPM_SUPPLY("RST DAC", SUN8I_MOD_RST_CTL,
323 SUN8I_MOD_RST_CTL_DAC, 0, NULL, 0), 320 SUN8I_MOD_RST_CTL_DAC, 0, NULL, 0),
324
325 SND_SOC_DAPM_OUTPUT("HP"),
326}; 321};
327 322
328static const struct snd_soc_dapm_route sun8i_codec_dapm_routes[] = { 323static const struct snd_soc_dapm_route sun8i_codec_dapm_routes[] = {
@@ -338,16 +333,14 @@ static const struct snd_soc_dapm_route sun8i_codec_dapm_routes[] = {
338 { "DAC", NULL, "MODCLK DAC" }, 333 { "DAC", NULL, "MODCLK DAC" },
339 334
340 /* DAC Routes */ 335 /* DAC Routes */
341 { "Digital Left DAC", NULL, "DAC" }, 336 { "AIF1 Slot 0 Right", NULL, "DAC" },
342 { "Digital Right DAC", NULL, "DAC" }, 337 { "AIF1 Slot 0 Left", NULL, "DAC" },
343 338
344 /* DAC Mixer Routes */ 339 /* DAC Mixer Routes */
345 { "Left DAC Mixer", "LSlot 0", "Digital Left DAC"}, 340 { "Left Digital DAC Mixer", "AIF1 Slot 0 Digital DAC Playback Switch",
346 { "Right DAC Mixer", "RSlot 0", "Digital Right DAC"}, 341 "AIF1 Slot 0 Left"},
347 342 { "Right Digital DAC Mixer", "AIF1 Slot 0 Digital DAC Playback Switch",
348 /* End of route : HP out */ 343 "AIF1 Slot 0 Right"},
349 { "HP", NULL, "Left DAC Mixer" },
350 { "HP", NULL, "Right DAC Mixer" },
351}; 344};
352 345
353static struct snd_soc_dai_ops sun8i_codec_dai_ops = { 346static struct snd_soc_dai_ops sun8i_codec_dai_ops = {
diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h
index 122153b16ea4..390d7c9685fd 100644
--- a/tools/include/linux/filter.h
+++ b/tools/include/linux/filter.h
@@ -168,6 +168,16 @@
168 .off = OFF, \ 168 .off = OFF, \
169 .imm = 0 }) 169 .imm = 0 })
170 170
171/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
172
173#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
174 ((struct bpf_insn) { \
175 .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
176 .dst_reg = DST, \
177 .src_reg = SRC, \
178 .off = OFF, \
179 .imm = 0 })
180
171/* Memory store, *(uint *) (dst_reg + off16) = imm32 */ 181/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
172 182
173#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ 183#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 32fb7a294f0f..d8d94b9bd76c 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -1,7 +1,14 @@
1LIBDIR := ../../../lib 1LIBDIR := ../../../lib
2BPFDIR := $(LIBDIR)/bpf 2BPFDIR := $(LIBDIR)/bpf
3APIDIR := ../../../include/uapi
4GENDIR := ../../../../include/generated
5GENHDR := $(GENDIR)/autoconf.h
3 6
4CFLAGS += -Wall -O2 -I../../../include/uapi -I$(LIBDIR) -I../../../include 7ifneq ($(wildcard $(GENHDR)),)
8 GENFLAGS := -DHAVE_GENHDR
9endif
10
11CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
5LDLIBS += -lcap -lelf 12LDLIBS += -lcap -lelf
6 13
7TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs 14TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index f4f43c98cf7f..0963f8ffd25c 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -30,6 +30,14 @@
30 30
31#include <bpf/bpf.h> 31#include <bpf/bpf.h>
32 32
33#ifdef HAVE_GENHDR
34# include "autoconf.h"
35#else
36# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
37# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
38# endif
39#endif
40
33#include "../../../include/linux/filter.h" 41#include "../../../include/linux/filter.h"
34 42
35#ifndef ARRAY_SIZE 43#ifndef ARRAY_SIZE
@@ -40,6 +48,8 @@
40#define MAX_FIXUPS 8 48#define MAX_FIXUPS 8
41#define MAX_NR_MAPS 4 49#define MAX_NR_MAPS 4
42 50
51#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
52
43struct bpf_test { 53struct bpf_test {
44 const char *descr; 54 const char *descr;
45 struct bpf_insn insns[MAX_INSNS]; 55 struct bpf_insn insns[MAX_INSNS];
@@ -55,6 +65,7 @@ struct bpf_test {
55 REJECT 65 REJECT
56 } result, result_unpriv; 66 } result, result_unpriv;
57 enum bpf_prog_type prog_type; 67 enum bpf_prog_type prog_type;
68 uint8_t flags;
58}; 69};
59 70
60/* Note we want this to be 64 bit aligned so that the end of our array is 71/* Note we want this to be 64 bit aligned so that the end of our array is
@@ -2434,6 +2445,30 @@ static struct bpf_test tests[] = {
2434 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2445 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2435 }, 2446 },
2436 { 2447 {
2448 "direct packet access: test15 (spill with xadd)",
2449 .insns = {
2450 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2451 offsetof(struct __sk_buff, data)),
2452 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2453 offsetof(struct __sk_buff, data_end)),
2454 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2455 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2456 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2457 BPF_MOV64_IMM(BPF_REG_5, 4096),
2458 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2459 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2460 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2461 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
2462 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
2463 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
2464 BPF_MOV64_IMM(BPF_REG_0, 0),
2465 BPF_EXIT_INSN(),
2466 },
2467 .errstr = "R2 invalid mem access 'inv'",
2468 .result = REJECT,
2469 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2470 },
2471 {
2437 "helper access to packet: test1, valid packet_ptr range", 2472 "helper access to packet: test1, valid packet_ptr range",
2438 .insns = { 2473 .insns = {
2439 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 2474 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -2936,6 +2971,7 @@ static struct bpf_test tests[] = {
2936 .errstr_unpriv = "R0 pointer arithmetic prohibited", 2971 .errstr_unpriv = "R0 pointer arithmetic prohibited",
2937 .result_unpriv = REJECT, 2972 .result_unpriv = REJECT,
2938 .result = ACCEPT, 2973 .result = ACCEPT,
2974 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2939 }, 2975 },
2940 { 2976 {
2941 "valid map access into an array with a variable", 2977 "valid map access into an array with a variable",
@@ -2959,6 +2995,7 @@ static struct bpf_test tests[] = {
2959 .errstr_unpriv = "R0 pointer arithmetic prohibited", 2995 .errstr_unpriv = "R0 pointer arithmetic prohibited",
2960 .result_unpriv = REJECT, 2996 .result_unpriv = REJECT,
2961 .result = ACCEPT, 2997 .result = ACCEPT,
2998 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2962 }, 2999 },
2963 { 3000 {
2964 "valid map access into an array with a signed variable", 3001 "valid map access into an array with a signed variable",
@@ -2986,6 +3023,7 @@ static struct bpf_test tests[] = {
2986 .errstr_unpriv = "R0 pointer arithmetic prohibited", 3023 .errstr_unpriv = "R0 pointer arithmetic prohibited",
2987 .result_unpriv = REJECT, 3024 .result_unpriv = REJECT,
2988 .result = ACCEPT, 3025 .result = ACCEPT,
3026 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2989 }, 3027 },
2990 { 3028 {
2991 "invalid map access into an array with a constant", 3029 "invalid map access into an array with a constant",
@@ -3027,6 +3065,7 @@ static struct bpf_test tests[] = {
3027 .errstr = "R0 min value is outside of the array range", 3065 .errstr = "R0 min value is outside of the array range",
3028 .result_unpriv = REJECT, 3066 .result_unpriv = REJECT,
3029 .result = REJECT, 3067 .result = REJECT,
3068 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3030 }, 3069 },
3031 { 3070 {
3032 "invalid map access into an array with a variable", 3071 "invalid map access into an array with a variable",
@@ -3050,6 +3089,7 @@ static struct bpf_test tests[] = {
3050 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 3089 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
3051 .result_unpriv = REJECT, 3090 .result_unpriv = REJECT,
3052 .result = REJECT, 3091 .result = REJECT,
3092 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3053 }, 3093 },
3054 { 3094 {
3055 "invalid map access into an array with no floor check", 3095 "invalid map access into an array with no floor check",
@@ -3076,6 +3116,7 @@ static struct bpf_test tests[] = {
3076 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 3116 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
3077 .result_unpriv = REJECT, 3117 .result_unpriv = REJECT,
3078 .result = REJECT, 3118 .result = REJECT,
3119 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3079 }, 3120 },
3080 { 3121 {
3081 "invalid map access into an array with a invalid max check", 3122 "invalid map access into an array with a invalid max check",
@@ -3102,6 +3143,7 @@ static struct bpf_test tests[] = {
3102 .errstr = "invalid access to map value, value_size=48 off=44 size=8", 3143 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
3103 .result_unpriv = REJECT, 3144 .result_unpriv = REJECT,
3104 .result = REJECT, 3145 .result = REJECT,
3146 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3105 }, 3147 },
3106 { 3148 {
3107 "invalid map access into an array with a invalid max check", 3149 "invalid map access into an array with a invalid max check",
@@ -3131,6 +3173,7 @@ static struct bpf_test tests[] = {
3131 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 3173 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
3132 .result_unpriv = REJECT, 3174 .result_unpriv = REJECT,
3133 .result = REJECT, 3175 .result = REJECT,
3176 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3134 }, 3177 },
3135 { 3178 {
3136 "multiple registers share map_lookup_elem result", 3179 "multiple registers share map_lookup_elem result",
@@ -3254,6 +3297,7 @@ static struct bpf_test tests[] = {
3254 .result = REJECT, 3297 .result = REJECT,
3255 .errstr_unpriv = "R0 pointer arithmetic prohibited", 3298 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3256 .result_unpriv = REJECT, 3299 .result_unpriv = REJECT,
3300 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3257 }, 3301 },
3258 { 3302 {
3259 "constant register |= constant should keep constant type", 3303 "constant register |= constant should keep constant type",
@@ -3420,6 +3464,26 @@ static struct bpf_test tests[] = {
3420 .prog_type = BPF_PROG_TYPE_LWT_XMIT, 3464 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3421 }, 3465 },
3422 { 3466 {
3467 "overlapping checks for direct packet access",
3468 .insns = {
3469 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3470 offsetof(struct __sk_buff, data)),
3471 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3472 offsetof(struct __sk_buff, data_end)),
3473 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3474 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3475 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
3476 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3477 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
3478 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
3479 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
3480 BPF_MOV64_IMM(BPF_REG_0, 0),
3481 BPF_EXIT_INSN(),
3482 },
3483 .result = ACCEPT,
3484 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3485 },
3486 {
3423 "invalid access of tc_classid for LWT_IN", 3487 "invalid access of tc_classid for LWT_IN",
3424 .insns = { 3488 .insns = {
3425 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 3489 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
@@ -3963,7 +4027,208 @@ static struct bpf_test tests[] = {
3963 .result_unpriv = REJECT, 4027 .result_unpriv = REJECT,
3964 }, 4028 },
3965 { 4029 {
3966 "map element value (adjusted) is preserved across register spilling", 4030 "map element value or null is marked on register spilling",
4031 .insns = {
4032 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4033 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4034 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4035 BPF_LD_MAP_FD(BPF_REG_1, 0),
4036 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4037 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4038 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
4039 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4040 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4041 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4042 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4043 BPF_EXIT_INSN(),
4044 },
4045 .fixup_map2 = { 3 },
4046 .errstr_unpriv = "R0 leaks addr",
4047 .result = ACCEPT,
4048 .result_unpriv = REJECT,
4049 },
4050 {
4051 "map element value store of cleared call register",
4052 .insns = {
4053 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4054 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4055 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4056 BPF_LD_MAP_FD(BPF_REG_1, 0),
4057 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4058 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4059 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
4060 BPF_EXIT_INSN(),
4061 },
4062 .fixup_map2 = { 3 },
4063 .errstr_unpriv = "R1 !read_ok",
4064 .errstr = "R1 !read_ok",
4065 .result = REJECT,
4066 .result_unpriv = REJECT,
4067 },
4068 {
4069 "map element value with unaligned store",
4070 .insns = {
4071 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4072 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4073 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4074 BPF_LD_MAP_FD(BPF_REG_1, 0),
4075 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4076 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
4077 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
4078 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
4079 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
4080 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
4081 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4082 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
4083 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
4084 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
4085 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
4086 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
4087 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
4088 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
4089 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
4090 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
4091 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
4092 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
4093 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
4094 BPF_EXIT_INSN(),
4095 },
4096 .fixup_map2 = { 3 },
4097 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4098 .result = ACCEPT,
4099 .result_unpriv = REJECT,
4100 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4101 },
4102 {
4103 "map element value with unaligned load",
4104 .insns = {
4105 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4106 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4107 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4108 BPF_LD_MAP_FD(BPF_REG_1, 0),
4109 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4110 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
4111 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4112 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
4113 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
4114 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
4115 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
4116 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4117 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
4118 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
4119 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
4120 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
4121 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
4122 BPF_EXIT_INSN(),
4123 },
4124 .fixup_map2 = { 3 },
4125 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4126 .result = ACCEPT,
4127 .result_unpriv = REJECT,
4128 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4129 },
4130 {
4131 "map element value illegal alu op, 1",
4132 .insns = {
4133 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4134 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4135 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4136 BPF_LD_MAP_FD(BPF_REG_1, 0),
4137 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4138 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4139 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
4140 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4141 BPF_EXIT_INSN(),
4142 },
4143 .fixup_map2 = { 3 },
4144 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4145 .errstr = "invalid mem access 'inv'",
4146 .result = REJECT,
4147 .result_unpriv = REJECT,
4148 },
4149 {
4150 "map element value illegal alu op, 2",
4151 .insns = {
4152 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4153 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4154 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4155 BPF_LD_MAP_FD(BPF_REG_1, 0),
4156 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4157 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4158 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
4159 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4160 BPF_EXIT_INSN(),
4161 },
4162 .fixup_map2 = { 3 },
4163 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4164 .errstr = "invalid mem access 'inv'",
4165 .result = REJECT,
4166 .result_unpriv = REJECT,
4167 },
4168 {
4169 "map element value illegal alu op, 3",
4170 .insns = {
4171 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4172 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4173 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4174 BPF_LD_MAP_FD(BPF_REG_1, 0),
4175 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4176 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4177 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
4178 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4179 BPF_EXIT_INSN(),
4180 },
4181 .fixup_map2 = { 3 },
4182 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4183 .errstr = "invalid mem access 'inv'",
4184 .result = REJECT,
4185 .result_unpriv = REJECT,
4186 },
4187 {
4188 "map element value illegal alu op, 4",
4189 .insns = {
4190 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4191 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4192 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4193 BPF_LD_MAP_FD(BPF_REG_1, 0),
4194 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4195 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4196 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
4197 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4198 BPF_EXIT_INSN(),
4199 },
4200 .fixup_map2 = { 3 },
4201 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4202 .errstr = "invalid mem access 'inv'",
4203 .result = REJECT,
4204 .result_unpriv = REJECT,
4205 },
4206 {
4207 "map element value illegal alu op, 5",
4208 .insns = {
4209 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4210 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4211 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4212 BPF_LD_MAP_FD(BPF_REG_1, 0),
4213 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4214 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4215 BPF_MOV64_IMM(BPF_REG_3, 4096),
4216 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4217 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4218 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
4219 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
4220 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
4221 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4222 BPF_EXIT_INSN(),
4223 },
4224 .fixup_map2 = { 3 },
4225 .errstr_unpriv = "R0 invalid mem access 'inv'",
4226 .errstr = "R0 invalid mem access 'inv'",
4227 .result = REJECT,
4228 .result_unpriv = REJECT,
4229 },
4230 {
4231 "map element value is preserved across register spilling",
3967 .insns = { 4232 .insns = {
3968 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 4233 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3969 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 4234 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
@@ -3985,6 +4250,7 @@ static struct bpf_test tests[] = {
3985 .errstr_unpriv = "R0 pointer arithmetic prohibited", 4250 .errstr_unpriv = "R0 pointer arithmetic prohibited",
3986 .result = ACCEPT, 4251 .result = ACCEPT,
3987 .result_unpriv = REJECT, 4252 .result_unpriv = REJECT,
4253 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3988 }, 4254 },
3989 { 4255 {
3990 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds", 4256 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
@@ -4423,6 +4689,7 @@ static struct bpf_test tests[] = {
4423 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 4689 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
4424 .result = REJECT, 4690 .result = REJECT,
4425 .result_unpriv = REJECT, 4691 .result_unpriv = REJECT,
4692 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4426 }, 4693 },
4427 { 4694 {
4428 "invalid range check", 4695 "invalid range check",
@@ -4454,6 +4721,7 @@ static struct bpf_test tests[] = {
4454 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 4721 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
4455 .result = REJECT, 4722 .result = REJECT,
4456 .result_unpriv = REJECT, 4723 .result_unpriv = REJECT,
4724 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4457 }, 4725 },
4458 { 4726 {
4459 "map in map access", 4727 "map in map access",
@@ -4523,7 +4791,7 @@ static struct bpf_test tests[] = {
4523 .fixup_map_in_map = { 3 }, 4791 .fixup_map_in_map = { 3 },
4524 .errstr = "R1 type=map_value_or_null expected=map_ptr", 4792 .errstr = "R1 type=map_value_or_null expected=map_ptr",
4525 .result = REJECT, 4793 .result = REJECT,
4526 }, 4794 }
4527}; 4795};
4528 4796
4529static int probe_filter_length(const struct bpf_insn *fp) 4797static int probe_filter_length(const struct bpf_insn *fp)
@@ -4632,11 +4900,16 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
4632static void do_test_single(struct bpf_test *test, bool unpriv, 4900static void do_test_single(struct bpf_test *test, bool unpriv,
4633 int *passes, int *errors) 4901 int *passes, int *errors)
4634{ 4902{
4903 int fd_prog, expected_ret, reject_from_alignment;
4635 struct bpf_insn *prog = test->insns; 4904 struct bpf_insn *prog = test->insns;
4636 int prog_len = probe_filter_length(prog); 4905 int prog_len = probe_filter_length(prog);
4637 int prog_type = test->prog_type; 4906 int prog_type = test->prog_type;
4907<<<<<<< HEAD
4638 int map_fds[MAX_NR_MAPS]; 4908 int map_fds[MAX_NR_MAPS];
4639 int fd_prog, expected_ret; 4909 int fd_prog, expected_ret;
4910=======
4911 int fd_f1 = -1, fd_f2 = -1, fd_f3 = -1;
4912>>>>>>> ea6b1720ce25f92f7a17b2e0c2b653d20773d10a
4640 const char *expected_err; 4913 const char *expected_err;
4641 int i; 4914 int i;
4642 4915
@@ -4653,8 +4926,19 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
4653 test->result_unpriv : test->result; 4926 test->result_unpriv : test->result;
4654 expected_err = unpriv && test->errstr_unpriv ? 4927 expected_err = unpriv && test->errstr_unpriv ?
4655 test->errstr_unpriv : test->errstr; 4928 test->errstr_unpriv : test->errstr;
4929
4930 reject_from_alignment = fd_prog < 0 &&
4931 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
4932 strstr(bpf_vlog, "Unknown alignment.");
4933#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
4934 if (reject_from_alignment) {
4935 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
4936 strerror(errno));
4937 goto fail_log;
4938 }
4939#endif
4656 if (expected_ret == ACCEPT) { 4940 if (expected_ret == ACCEPT) {
4657 if (fd_prog < 0) { 4941 if (fd_prog < 0 && !reject_from_alignment) {
4658 printf("FAIL\nFailed to load prog '%s'!\n", 4942 printf("FAIL\nFailed to load prog '%s'!\n",
4659 strerror(errno)); 4943 strerror(errno));
4660 goto fail_log; 4944 goto fail_log;
@@ -4664,14 +4948,15 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
4664 printf("FAIL\nUnexpected success to load!\n"); 4948 printf("FAIL\nUnexpected success to load!\n");
4665 goto fail_log; 4949 goto fail_log;
4666 } 4950 }
4667 if (!strstr(bpf_vlog, expected_err)) { 4951 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
4668 printf("FAIL\nUnexpected error message!\n"); 4952 printf("FAIL\nUnexpected error message!\n");
4669 goto fail_log; 4953 goto fail_log;
4670 } 4954 }
4671 } 4955 }
4672 4956
4673 (*passes)++; 4957 (*passes)++;
4674 printf("OK\n"); 4958 printf("OK%s\n", reject_from_alignment ?
4959 " (NOTE: reject due to unknown alignment)" : "");
4675close_fds: 4960close_fds:
4676 close(fd_prog); 4961 close(fd_prog);
4677 for (i = 0; i < MAX_NR_MAPS; i++) 4962 for (i = 0; i < MAX_NR_MAPS; i++)
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index a29786dd9522..4d28a9ddbee0 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -870,7 +870,8 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
870 continue; 870 continue;
871 871
872 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); 872 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
873 kvm->buses[bus_idx]->ioeventfd_count--; 873 if (kvm->buses[bus_idx])
874 kvm->buses[bus_idx]->ioeventfd_count--;
874 ioeventfd_release(p); 875 ioeventfd_release(p);
875 ret = 0; 876 ret = 0;
876 break; 877 break;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a17d78759727..88257b311cb5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -727,8 +727,11 @@ static void kvm_destroy_vm(struct kvm *kvm)
727 list_del(&kvm->vm_list); 727 list_del(&kvm->vm_list);
728 spin_unlock(&kvm_lock); 728 spin_unlock(&kvm_lock);
729 kvm_free_irq_routing(kvm); 729 kvm_free_irq_routing(kvm);
730 for (i = 0; i < KVM_NR_BUSES; i++) 730 for (i = 0; i < KVM_NR_BUSES; i++) {
731 kvm_io_bus_destroy(kvm->buses[i]); 731 if (kvm->buses[i])
732 kvm_io_bus_destroy(kvm->buses[i]);
733 kvm->buses[i] = NULL;
734 }
732 kvm_coalesced_mmio_free(kvm); 735 kvm_coalesced_mmio_free(kvm);
733#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 736#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
734 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 737 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
@@ -1062,7 +1065,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
1062 * changes) is disallowed above, so any other attribute changes getting 1065 * changes) is disallowed above, so any other attribute changes getting
1063 * here can be skipped. 1066 * here can be skipped.
1064 */ 1067 */
1065 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 1068 if (as_id == 0 && (change == KVM_MR_CREATE || change == KVM_MR_MOVE)) {
1066 r = kvm_iommu_map_pages(kvm, &new); 1069 r = kvm_iommu_map_pages(kvm, &new);
1067 return r; 1070 return r;
1068 } 1071 }
@@ -3474,6 +3477,8 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
3474 }; 3477 };
3475 3478
3476 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3479 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3480 if (!bus)
3481 return -ENOMEM;
3477 r = __kvm_io_bus_write(vcpu, bus, &range, val); 3482 r = __kvm_io_bus_write(vcpu, bus, &range, val);
3478 return r < 0 ? r : 0; 3483 return r < 0 ? r : 0;
3479} 3484}
@@ -3491,6 +3496,8 @@ int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
3491 }; 3496 };
3492 3497
3493 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3498 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3499 if (!bus)
3500 return -ENOMEM;
3494 3501
3495 /* First try the device referenced by cookie. */ 3502 /* First try the device referenced by cookie. */
3496 if ((cookie >= 0) && (cookie < bus->dev_count) && 3503 if ((cookie >= 0) && (cookie < bus->dev_count) &&
@@ -3541,6 +3548,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
3541 }; 3548 };
3542 3549
3543 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3550 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3551 if (!bus)
3552 return -ENOMEM;
3544 r = __kvm_io_bus_read(vcpu, bus, &range, val); 3553 r = __kvm_io_bus_read(vcpu, bus, &range, val);
3545 return r < 0 ? r : 0; 3554 return r < 0 ? r : 0;
3546} 3555}
@@ -3553,6 +3562,9 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
3553 struct kvm_io_bus *new_bus, *bus; 3562 struct kvm_io_bus *new_bus, *bus;
3554 3563
3555 bus = kvm->buses[bus_idx]; 3564 bus = kvm->buses[bus_idx];
3565 if (!bus)
3566 return -ENOMEM;
3567
3556 /* exclude ioeventfd which is limited by maximum fd */ 3568 /* exclude ioeventfd which is limited by maximum fd */
3557 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 3569 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
3558 return -ENOSPC; 3570 return -ENOSPC;
@@ -3572,37 +3584,41 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
3572} 3584}
3573 3585
3574/* Caller must hold slots_lock. */ 3586/* Caller must hold slots_lock. */
3575int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 3587void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
3576 struct kvm_io_device *dev) 3588 struct kvm_io_device *dev)
3577{ 3589{
3578 int i, r; 3590 int i;
3579 struct kvm_io_bus *new_bus, *bus; 3591 struct kvm_io_bus *new_bus, *bus;
3580 3592
3581 bus = kvm->buses[bus_idx]; 3593 bus = kvm->buses[bus_idx];
3582 r = -ENOENT; 3594 if (!bus)
3595 return;
3596
3583 for (i = 0; i < bus->dev_count; i++) 3597 for (i = 0; i < bus->dev_count; i++)
3584 if (bus->range[i].dev == dev) { 3598 if (bus->range[i].dev == dev) {
3585 r = 0;
3586 break; 3599 break;
3587 } 3600 }
3588 3601
3589 if (r) 3602 if (i == bus->dev_count)
3590 return r; 3603 return;
3591 3604
3592 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) * 3605 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
3593 sizeof(struct kvm_io_range)), GFP_KERNEL); 3606 sizeof(struct kvm_io_range)), GFP_KERNEL);
3594 if (!new_bus) 3607 if (!new_bus) {
3595 return -ENOMEM; 3608 pr_err("kvm: failed to shrink bus, removing it completely\n");
3609 goto broken;
3610 }
3596 3611
3597 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 3612 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
3598 new_bus->dev_count--; 3613 new_bus->dev_count--;
3599 memcpy(new_bus->range + i, bus->range + i + 1, 3614 memcpy(new_bus->range + i, bus->range + i + 1,
3600 (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); 3615 (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
3601 3616
3617broken:
3602 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 3618 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
3603 synchronize_srcu_expedited(&kvm->srcu); 3619 synchronize_srcu_expedited(&kvm->srcu);
3604 kfree(bus); 3620 kfree(bus);
3605 return r; 3621 return;
3606} 3622}
3607 3623
3608struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 3624struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
@@ -3615,6 +3631,8 @@ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
3615 srcu_idx = srcu_read_lock(&kvm->srcu); 3631 srcu_idx = srcu_read_lock(&kvm->srcu);
3616 3632
3617 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 3633 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
3634 if (!bus)
3635 goto out_unlock;
3618 3636
3619 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); 3637 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
3620 if (dev_idx < 0) 3638 if (dev_idx < 0)